diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 785257142d..88d163d67c 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -11,7 +11,7 @@ For overall help on editing and submitting pull requests, visit: https://kubernetes.io/docs/contribute/start/#improve-existing-content - Use the default base branch, “master”, if you're documenting existing + Use the default base branch, “main”, if you're documenting existing features in the English localization. If you're working on a different localization (not English), see diff --git a/Makefile b/Makefile index bcae221430..a103f4e58e 100644 --- a/Makefile +++ b/Makefile @@ -6,8 +6,9 @@ NETLIFY_FUNC = $(NODE_BIN)/netlify-lambda # but this can be overridden when calling make, e.g. # CONTAINER_ENGINE=podman make container-image CONTAINER_ENGINE ?= docker +IMAGE_REGISTRY ?= gcr.io/k8s-staging-sig-docs IMAGE_VERSION=$(shell scripts/hash-files.sh Dockerfile Makefile | cut -c 1-12) -CONTAINER_IMAGE = kubernetes-hugo:v$(HUGO_VERSION)-$(IMAGE_VERSION) +CONTAINER_IMAGE = $(IMAGE_REGISTRY)/k8s-website-hugo:v$(HUGO_VERSION)-$(IMAGE_VERSION) CONTAINER_RUN = $(CONTAINER_ENGINE) run --rm --interactive --tty --volume $(CURDIR):/src CCRED=\033[0;31m @@ -95,4 +96,4 @@ clean-api-reference: ## Clean all directories in API reference directory, preser api-reference: clean-api-reference ## Build the API reference pages. go needed cd api-ref-generator/gen-resourcesdocs && \ - go run cmd/main.go kwebsite --config-dir config/v1.21/ --file api/v1.21/swagger.json --output-dir ../../content/en/docs/reference/kubernetes-api --templates templates + go run cmd/main.go kwebsite --config-dir ../../api-ref-assets/config/ --file ../../api-ref-assets/api/swagger.json --output-dir ../../content/en/docs/reference/kubernetes-api --templates ../../api-ref-assets/templates diff --git a/OWNERS b/OWNERS index 9b12305b4b..8e4e14f60c 100644 --- a/OWNERS +++ b/OWNERS @@ -8,7 +8,9 @@ approvers: emeritus_approvers: # - chenopis, commented out to disable PR assignments +# - irvifa, commented out to disable PR assignments # - jaredbhatti, commented out to disable PR assignments +# - kbarnard10, commented out to disable PR assignments # - steveperry-53, commented out to disable PR assignments - stewart-yu # - zacharysarah, commented out to disable PR assignments diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 2feb3cdb2d..462728069d 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -1,12 +1,8 @@ aliases: sig-docs-blog-owners: # Approvers for blog content - - castrojo - - kbarnard10 - onlydole - mrbobbytables sig-docs-blog-reviewers: # Reviewers for blog content - - castrojo - - kbarnard10 - mrbobbytables - onlydole - sftim @@ -22,31 +18,25 @@ aliases: - annajung - bradtopol - celestehorgan - - irvifa - jimangel - - kbarnard10 + - jlbutler - kbhawkey - onlydole - pi-victor - reylejano - savitharaghunathan - sftim - - steveperry-53 - tengqm - - zparnold sig-docs-en-reviews: # PR reviews for English content - bradtopol - celestehorgan - daminisatya - jimangel - - kbarnard10 - kbhawkey - onlydole - rajeshdeshpande02 - sftim - - steveperry-53 - tengqm - - zparnold sig-docs-es-owners: # Admins for Spanish content - raelga - electrocucaracha @@ -94,7 +84,6 @@ aliases: - danninov - girikuncoro - habibrosyad - - irvifa - phanama - wahyuoi sig-docs-id-reviews: # PR reviews for Indonesian content @@ -102,7 +91,6 @@ aliases: - danninov - girikuncoro - habibrosyad - - irvifa - phanama - wahyuoi sig-docs-it-owners: # Admins for Italian content @@ -138,14 +126,14 @@ aliases: - ClaudiaJKang - gochist - ianychoi - - seokho-son - - ysyukr + - jihoon-seo + - jmyung - pjhwa + - seokho-son - yoonian + - ysyukr sig-docs-leads: # Website chairs and tech leads - - irvifa - jimangel - - kbarnard10 - kbhawkey - onlydole - sftim @@ -163,8 +151,10 @@ aliases: # zhangxiaoyu-zidif sig-docs-zh-reviews: # PR reviews for Chinese content - chenrui333 + - chenxuc - howieyuen - idealhack + - mengjiao-liu - pigletfly - SataQiu - tanjunchen @@ -235,10 +225,12 @@ aliases: - parispittman # authoritative source: https://git.k8s.io/sig-release/OWNERS_ALIASES sig-release-leads: + - cpanato # SIG Technical Lead - hasheddan # SIG Technical Lead - jeremyrickard # SIG Technical Lead - justaugustus # SIG Chair - LappleApple # SIG Program Manager + - puerco # SIG Technical Lead - saschagrunert # SIG Chair release-engineering-approvers: - cpanato # Release Manager @@ -250,6 +242,7 @@ aliases: release-engineering-reviewers: - ameukam # Release Manager Associate - jimangel # Release Manager Associate + - markyjackson-taulia # Release Manager Associate - mkorbi # Release Manager Associate - palnabarun # Release Manager Associate - onlydole # Release Manager Associate diff --git a/README-ja.md b/README-ja.md index 49d0dd1bad..91e624c610 100644 --- a/README-ja.md +++ b/README-ja.md @@ -1,6 +1,6 @@ # Kubernetesのドキュメント -[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-master-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) +[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-main-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) このリポジトリには、[KubernetesのWebサイトとドキュメント](https://kubernetes.io/)をビルドするために必要な全アセットが格納されています。貢献に興味を持っていただきありがとうございます! diff --git a/README-ko.md b/README-ko.md index c4038212c6..c3e1068b2e 100644 --- a/README-ko.md +++ b/README-ko.md @@ -1,6 +1,6 @@ # 쿠버네티스 문서화 -[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-master-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) +[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-main-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) 이 저장소에는 [쿠버네티스 웹사이트 및 문서](https://kubernetes.io/)를 빌드하는 데 필요한 자산이 포함되어 있습니다. 기여해주셔서 감사합니다! diff --git a/README-pl.md b/README-pl.md index 5426aef445..06bde04303 100644 --- a/README-pl.md +++ b/README-pl.md @@ -1,6 +1,6 @@ # Dokumentacja projektu Kubernetes -[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-master-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) +[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-main-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) W tym repozytorium znajdziesz wszystko, czego potrzebujesz do zbudowania [strony internetowej Kubernetesa wraz z dokumentacją](https://kubernetes.io/). Bardzo nam miło, że chcesz wziąć udział w jej współtworzeniu! @@ -18,7 +18,7 @@ Aby móc skorzystać z tego repozytorium, musisz lokalnie zainstalować: - [npm](https://www.npmjs.com/) - [Go](https://golang.org/) - [Hugo (Extended version)](https://gohugo.io/) -- Środowisko obsługi kontenerów, np. [Docker-a](https://www.docker.com/). +- Środowisko obsługi kontenerów, np. [Dockera](https://www.docker.com/). Przed rozpoczęciem zainstaluj niezbędne zależności. Sklonuj repozytorium i przejdź do odpowiedniego katalogu: @@ -43,7 +43,9 @@ make container-image make container-serve ``` -Aby obejrzeć zawartość serwisu otwórz w przeglądarce adres http://localhost:1313. Po każdej zmianie plików źródłowych, Hugo automatycznie aktualizuje stronę i odświeża jej widok w przeglądarce. +Jeśli widzisz błędy, prawdopodobnie kontener z Hugo nie dysponuje wystarczającymi zasobami. Aby rozwiązać ten problem, zwiększ ilość dostępnych zasobów CPU i pamięci dla Dockera na Twojej maszynie ([MacOSX](https://docs.docker.com/docker-for-mac/#resources) i [Windows](https://docs.docker.com/docker-for-windows/#resources)). + +Aby obejrzeć zawartość serwisu, otwórz w przeglądarce adres http://localhost:1313. Po każdej zmianie plików źródłowych, Hugo automatycznie aktualizuje stronę i odświeża jej widok w przeglądarce. ## Jak uruchomić lokalną kopię strony przy pomocy Hugo? diff --git a/README-pt.md b/README-pt.md index e27bf544d1..d856bf7b42 100644 --- a/README-pt.md +++ b/README-pt.md @@ -1,6 +1,6 @@ # A documentação do Kubernetes -[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-master-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) +[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-main-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) Bem-vindos! Este repositório contém todos os recursos necessários para criar o [website e documentação do Kubernetes](https://kubernetes.io/). Estamos muito satisfeitos por você querer contribuir! diff --git a/README-ru.md b/README-ru.md index 348f92a82e..82eb96689d 100644 --- a/README-ru.md +++ b/README-ru.md @@ -1,6 +1,6 @@ # Документация по Kubernetes -[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-master-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) +[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-main-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) Данный репозиторий содержит все необходимые файлы для сборки [сайта Kubernetes и документации](https://kubernetes.io/). Мы благодарим вас за желание внести свой вклад! diff --git a/README-uk.md b/README-uk.md index 2872b406d1..c1605b0c85 100644 --- a/README-uk.md +++ b/README-uk.md @@ -1,7 +1,7 @@ # Документація Kubernetes -[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-master-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) +[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-main-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) Вітаємо! В цьому репозиторії міститься все необхідне для роботи над [сайтом і документацією Kubernetes](https://kubernetes.io/). Ми щасливі, що ви хочете зробити свій внесок! diff --git a/README-zh.md b/README-zh.md index ef259ef2d0..6c594b3934 100644 --- a/README-zh.md +++ b/README-zh.md @@ -4,7 +4,7 @@ # The Kubernetes documentation --> -[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-master-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) +[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-main-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) + +{{if .ApiVersion}}`apiVersion: {{.ApiVersion}}`{{end}} + +{{if .Import}}`import "{{.Import}}"`{{end}} + +{{range .Sections}} +{{.Description | replace "<" "\\<" }} + +
+{{range .Fields}} +{{ "" | indent .Indent | indent .Indent}}- {{.Name}}{{if .Value}}: {{.Value}}{{end}} +{{if .Description}} +{{.Description | replace "<" "\\<" | indent 2 | indent .Indent | indent .Indent}} +{{- end}} +{{if .TypeDefinition}} +{{ "" | indent .Indent | indent .Indent}} +{{.TypeDefinition | indent 2 | indent .Indent | indent .Indent}} +{{end}} +{{- end}}{{/* range .Fields */}} + +{{range .FieldCategories}} +### {{.Name}} {#{{"-" | regexReplaceAll "[^a-zA-Z0-9]+" .Name }}}{{/* explicitly set fragment to keep capitalization */}} + +{{range .Fields}} +{{ "" | indent .Indent | indent .Indent}}- {{.Name}}{{if .Value}}: {{.Value}}{{end}} +{{if .Description}} +{{.Description | replace "<" "\\<" | indent 2 | indent .Indent | indent .Indent}} +{{- end}} +{{if .TypeDefinition}} +{{ "" | indent .Indent | indent .Indent}} +{{.TypeDefinition | indent 2 | indent .Indent | indent .Indent}} +{{end}} +{{- end}}{{/* range .Fields */}} + +{{- end}}{{/* range .FieldCategories */}} + +{{range .Operations}} + +### `{{.Verb}}` {{.Title}} + +#### HTTP Request + +{{.RequestMethod}} {{.RequestPath}} + +#### Parameters + +{{range .Parameters}} +- {{.Title}} + +{{.Description | indent 2}} + +{{end}}{{/* range .Parameters */}} + +#### Response + +{{range .Responses}} +{{.Code}}{{if .Type}} ({{.Type}}){{end}}: {{.Description}} +{{end}}{{/* range .Responses */}} + +{{- end}}{{/* range .Operations */}} +{{- end}}{{/* range .Sections */}} diff --git a/api-ref-assets/templates/chapter.tmpl b/api-ref-assets/templates/chapter.tmpl new file mode 100644 index 0000000000..9fb80e2209 --- /dev/null +++ b/api-ref-assets/templates/chapter.tmpl @@ -0,0 +1,85 @@ +--- +api_metadata: + apiVersion: "{{.ApiVersion}}" + import: "{{.Import}}" + kind: "{{.Kind}}" +content_type: "api_reference" +description: "{{.Metadata.Description}}" +title: "{{.Metadata.Title}}" +weight: {{.Metadata.Weight}} +auto_generated: true +--- + + + +{{if .ApiVersion}}`apiVersion: {{.ApiVersion}}`{{end}} + +{{if .Import}}`import "{{.Import}}"`{{end}} + +{{range .Sections}} +## {{.Name}} {#{{"-" | regexReplaceAll "[^a-zA-Z0-9]+" .Name }}}{{/* explicitly set fragment to keep capitalization */}} + +{{.Description | replace "<" "\\<" }} + +
+{{range .Fields}} +{{ "" | indent .Indent | indent .Indent}}- {{.Name}}{{if .Value}}: {{.Value}}{{end}} +{{if .Description}} +{{.Description | replace "<" "\\<" | indent 2 | indent .Indent | indent .Indent}} +{{- end}} +{{if .TypeDefinition}} +{{ "" | indent .Indent | indent .Indent}} +{{.TypeDefinition | indent 2 | indent .Indent | indent .Indent}} +{{end}} +{{- end}}{{/* range .Fields */}} + +{{range .FieldCategories}} +### {{.Name}} + +{{range .Fields}} +{{ "" | indent .Indent | indent .Indent}}- {{.Name}}{{if .Value}}: {{.Value}}{{end}} +{{if .Description}} +{{.Description | replace "<" "\\<" | indent 2 | indent .Indent | indent .Indent}} +{{- end}} +{{if .TypeDefinition}} +{{ "" | indent .Indent | indent .Indent}} +{{.TypeDefinition | indent 2 | indent .Indent | indent .Indent}} +{{end}} +{{- end}}{{/* range .Fields */}} + +{{- end}}{{/* range .FieldCategories */}} + +{{range .Operations}} + +### `{{.Verb}}` {{.Title}} + +#### HTTP Request + +{{.RequestMethod}} {{.RequestPath}} + +#### Parameters + +{{range .Parameters}} +- {{.Title}} + +{{.Description | indent 2}} + +{{end}}{{/* range .Parameters */}} + +#### Response + +{{range .Responses}} +{{.Code}}{{if .Type}} ({{.Type}}){{end}}: {{.Description}} +{{end}}{{/* range .Responses */}} + +{{- end}}{{/* range .Operations */}} +{{- end}}{{/* range .Sections */}} diff --git a/api-ref-assets/templates/part-index.tmpl b/api-ref-assets/templates/part-index.tmpl new file mode 100644 index 0000000000..36833ee9bb --- /dev/null +++ b/api-ref-assets/templates/part-index.tmpl @@ -0,0 +1,17 @@ +--- +title: "{{.Title}}" +weight: {{.Weight}} +auto_generated: true +--- + + + diff --git a/api-ref-generator b/api-ref-generator index 78e64febda..55bce68622 160000 --- a/api-ref-generator +++ b/api-ref-generator @@ -1 +1 @@ -Subproject commit 78e64febda1b53cafc79979c5978b42162cea276 +Subproject commit 55bce686224caba37f93e1e1eb53c0c9fc104ed4 diff --git a/archetypes/blog-post.md b/archetypes/blog-post.md new file mode 100644 index 0000000000..acad021c68 --- /dev/null +++ b/archetypes/blog-post.md @@ -0,0 +1,61 @@ +--- +layout: blog +title: "{{ replace .Name "-" " " | title }}" +date: {{ .Date }} +draft: true +slug: +--- + +**Author:** (), () + + + +Replace this first line of your content with one to three sentences that summarize the blog post. + +## This is a section heading + +To help the reader, organize your content into sections that contain about three to six paragraphs. + +If you're documenting commands, separate the commands from the outputs, like this: + +1. Verify that the Secret exists by running the following command: + + ```shell + kubectl get secrets + ``` + + The response should be like this: + + ```shell + NAME TYPE DATA AGE + mysql-pass-c57bb4t7mf Opaque 1 9s + ``` + +You're free to create any sections you like. Below are a few common patterns we see at the end of blog posts. + +## What’s next? + +This optional section describes the future of the thing you've just described in the post. + +## How can I learn more? + +This optional section provides links to more information. Please avoid promoting and over-represent your organization. + +## How do I get involved? + +An optional section that links to resources for readers to get involved, and acknowledgments of individual contributors, such as: + +* [The name of a channel on Slack, #a-channel](https://.slack.com/messages/) + +* [A link to a "contribute" page with more information](). + +* Acknowledgements and thanks to the contributors. ([](https://github.com/)) who did X, Y, and Z. + +* Those interested in getting involved with the design and development of , join the [](https://github.com/project/community/tree/master/). We’re rapidly growing and always welcome new contributors. diff --git a/cloudbuild.yaml b/cloudbuild.yaml new file mode 100644 index 0000000000..61b5adc5f4 --- /dev/null +++ b/cloudbuild.yaml @@ -0,0 +1,25 @@ +# See https://cloud.google.com/cloud-build/docs/build-config + +# this must be specified in seconds. If omitted, defaults to 600s (10 mins) +timeout: 1200s +# this prevents errors if you don't use both _GIT_TAG and _PULL_BASE_REF, +# or any new substitutions added in the future. +options: + substitution_option: ALLOW_LOOSE +steps: + # It's fine to bump the tag to a recent version, as needed + - name: "gcr.io/k8s-testimages/gcb-docker-gcloud:v20190906-745fed4" + entrypoint: make + env: + - DOCKER_CLI_EXPERIMENTAL=enabled + - TAG=$_GIT_TAG + - BASE_REF=$_PULL_BASE_REF + args: + - container-image +substitutions: + # _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and + # can be used as a substitution + _GIT_TAG: "12345" + # _PULL_BASE_REF will contain the ref that was pushed to to trigger this build - + # a branch like 'master' or 'release-0.2', or a tag like 'v0.2'. + _PULL_BASE_REF: "master" diff --git a/config.toml b/config.toml index e1ef9db014..eba5852abb 100644 --- a/config.toml +++ b/config.toml @@ -138,12 +138,12 @@ time_format_default = "January 02, 2006 at 3:04 PM PST" description = "Production-Grade Container Orchestration" showedit = true -latest = "v1.21" +latest = "v1.22" -fullversion = "v1.21.0" -version = "v1.21" -githubbranch = "master" -docsbranch = "master" +fullversion = "v1.22.0" +version = "v1.22" +githubbranch = "main" +docsbranch = "main" deprecated = false currentUrl = "https://kubernetes.io/docs/home/" nextUrl = "https://kubernetes-io-vnext-staging.netlify.com/" @@ -178,45 +178,46 @@ js = [ ] [[params.versions]] -fullversion = "v1.21.0" -version = "v1.21" -githubbranch = "v1.21.0" -docsbranch = "master" +fullversion = "v1.22.0" +version = "v1.22" +githubbranch = "v1.22.0" +docsbranch = "main" url = "https://kubernetes.io" [[params.versions]] -fullversion = "v1.20.5" +fullversion = "v1.21.4" +version = "v1.21" +githubbranch = "v1.21.4" +docsbranch = "release-1.21" +url = "https://v1-21.docs.kubernetes.io" + +[[params.versions]] +fullversion = "v1.20.10" version = "v1.20" -githubbranch = "v1.20.5" +githubbranch = "v1.20.10" docsbranch = "release-1.20" url = "https://v1-20.docs.kubernetes.io" [[params.versions]] -fullversion = "v1.19.9" +fullversion = "v1.19.14" version = "v1.19" -githubbranch = "v1.19.9" +githubbranch = "v1.19.14" docsbranch = "release-1.19" url = "https://v1-19.docs.kubernetes.io" [[params.versions]] -fullversion = "v1.18.17" +fullversion = "v1.18.20" version = "v1.18" -githubbranch = "v1.18.17" +githubbranch = "v1.18.20" docsbranch = "release-1.18" url = "https://v1-18.docs.kubernetes.io" -[[params.versions]] -fullversion = "v1.17.17" -version = "v1.17" -githubbranch = "v1.17.17" -docsbranch = "release-1.17" -url = "https://v1-17.docs.kubernetes.io" - - # User interface configuration [params.ui] # Enable to show the side bar menu in its compact state. sidebar_menu_compact = false +# https://github.com/gohugoio/hugo/issues/8918#issuecomment-903314696 +sidebar_cache_limit = 1 # Set to true to disable breadcrumb navigation. breadcrumb_disable = false # Set to true to hide the sidebar search box (the top nav search box will still be displayed if search is enabled) diff --git a/content/de/_index.html b/content/de/_index.html index 838552b5c4..78d3b5e003 100644 --- a/content/de/_index.html +++ b/content/de/_index.html @@ -9,7 +9,7 @@ cid: home {{% blocks/feature image="flower" %}} ### [Kubernetes (K8s)]({{< relref "/docs/concepts/overview/what-is-kubernetes" >}}) ist ein Open-Source-System zur Automatisierung der Bereitstellung, Skalierung und Verwaltung von containerisierten Anwendungen. -Es gruppiert Container, aus denen sich eine Anwendung zusammensetzt, in logische Einheiten, um die Verwaltung und Erkennung zu erleichtern. Kubernetes baut auf [15 Jahre Erfahrung in Bewältigung von Produktions-Workloads bei Google] (http://queue.acm.org/detail.cfm?id=2898444), kombiniert mit Best-of-Breed-Ideen und Praktiken aus der Community. +Es gruppiert Container, aus denen sich eine Anwendung zusammensetzt, in logische Einheiten, um die Verwaltung und Erkennung zu erleichtern. Kubernetes baut auf [15 Jahre Erfahrung in Bewältigung von Produktions-Workloads bei Google](http://queue.acm.org/detail.cfm?id=2898444), kombiniert mit Best-of-Breed-Ideen und Praktiken aus der Community. {{% /blocks/feature %}} {{% blocks/feature image="scalable" %}} @@ -57,4 +57,4 @@ Kubernetes ist Open Source und bietet Dir die Freiheit, die Infrastruktur vor Or {{< blocks/kubernetes-features >}} -{{< blocks/case-studies >}} \ No newline at end of file +{{< blocks/case-studies >}} diff --git a/content/de/docs/concepts/cluster-administration/addons.md b/content/de/docs/concepts/cluster-administration/addons.md index f5eedeb59b..abf15e453f 100644 --- a/content/de/docs/concepts/cluster-administration/addons.md +++ b/content/de/docs/concepts/cluster-administration/addons.md @@ -26,7 +26,7 @@ Die Add-Ons in den einzelnen Kategorien sind alphabetisch sortiert - Die Reihenf * [CNI-Genie](https://github.com/Huawei-PaaS/CNI-Genie) ermöglicht das nahtlose Verbinden von Kubernetes mit einer Reihe an CNI-Plugins wie z.B. Calico, Canal, Flannel, Romana, oder Weave. * [Contiv](http://contiv.github.io) bietet konfigurierbares Networking (Native L3 auf BGP, Overlay mit vxlan, Klassisches L2, Cisco-SDN/ACI) für verschiedene Anwendungszwecke und auch umfangreiches Policy-Framework. Das Contiv-Projekt ist vollständig [Open Source](http://github.com/contiv). Der [installer](http://github.com/contiv/install) bietet sowohl kubeadm als auch nicht-kubeadm basierte Installationen. * [Contrail](http://www.juniper.net/us/en/products-services/sdn/contrail/contrail-networking/), basierend auf [Tungsten Fabric](https://tungsten.io), ist eine Open Source, multi-Cloud Netzwerkvirtualisierungs- und Policy-Management Plattform. Contrail und Tungsten Fabric sind mit Orechstratoren wie z.B. Kubernetes, OpenShift, OpenStack und Mesos integriert und bieten Isolationsmodi für Virtuelle Maschinen, Container (bzw. Pods) und Bare Metal workloads. -* [Flannel](https://github.com/coreos/flannel/blob/master/Documentation/kubernetes.md) ist ein Overlay-Network-Provider der mit Kubernetes genutzt werden kann. +* [Flannel](https://github.com/flannel-io/flannel#deploying-flannel-manually) ist ein Overlay-Network-Provider der mit Kubernetes genutzt werden kann. * [Knitter](https://github.com/ZTE/Knitter/) ist eine Network-Lösung die Mehrfach-Network in Kubernetes ermöglicht. * [Multus](https://github.com/Intel-Corp/multus-cni) ist ein Multi-Plugin für Mehrfachnetzwerk-Unterstützung um alle CNI-Plugins (z.B. Calico, Cilium, Contiv, Flannel), zusätzlich zu SRIOV-, DPDK-, OVS-DPDK- und VPP-Basierten Workloads in Kubernetes zu unterstützen. * [NSX-T](https://docs.vmware.com/en/VMware-NSX-T/2.0/nsxt_20_ncp_kubernetes.pdf) Container Plug-in (NCP) bietet eine Integration zwischen VMware NSX-T und einem Orchestator wie z.B. Kubernetes. Außerdem bietet es eine Integration zwischen NSX-T und Containerbasierten CaaS/PaaS-Plattformen wie z.B. Pivotal Container Service (PKS) und OpenShift. diff --git a/content/de/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html b/content/de/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html index 7a5fe0ce4f..4b7d5ddaae 100644 --- a/content/de/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html +++ b/content/de/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html @@ -5,7 +5,7 @@ weight: 20 - + diff --git a/content/de/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html b/content/de/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html index 8c74aafd78..b8eae305f3 100644 --- a/content/de/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html +++ b/content/de/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html @@ -5,7 +5,7 @@ weight: 20 - + diff --git a/content/de/docs/tutorials/kubernetes-basics/explore/explore-intro.html b/content/de/docs/tutorials/kubernetes-basics/explore/explore-intro.html index f220ff5eb7..5e64134a44 100644 --- a/content/de/docs/tutorials/kubernetes-basics/explore/explore-intro.html +++ b/content/de/docs/tutorials/kubernetes-basics/explore/explore-intro.html @@ -5,7 +5,7 @@ weight: 10 - + diff --git a/content/de/docs/tutorials/kubernetes-basics/expose/expose-interactive.html b/content/de/docs/tutorials/kubernetes-basics/expose/expose-interactive.html index ab5b880397..5b4c1a4ae8 100644 --- a/content/de/docs/tutorials/kubernetes-basics/expose/expose-interactive.html +++ b/content/de/docs/tutorials/kubernetes-basics/expose/expose-interactive.html @@ -5,7 +5,7 @@ weight: 20 - + diff --git a/content/de/docs/tutorials/kubernetes-basics/expose/expose-intro.html b/content/de/docs/tutorials/kubernetes-basics/expose/expose-intro.html index 07e76654a4..ce0f9caaae 100644 --- a/content/de/docs/tutorials/kubernetes-basics/expose/expose-intro.html +++ b/content/de/docs/tutorials/kubernetes-basics/expose/expose-intro.html @@ -5,7 +5,7 @@ weight: 10 - + diff --git a/content/de/docs/tutorials/kubernetes-basics/update/update-interactive.html b/content/de/docs/tutorials/kubernetes-basics/update/update-interactive.html index 448ddc81b9..086b90d6b7 100644 --- a/content/de/docs/tutorials/kubernetes-basics/update/update-interactive.html +++ b/content/de/docs/tutorials/kubernetes-basics/update/update-interactive.html @@ -5,7 +5,7 @@ weight: 20 - + diff --git a/content/de/docs/tutorials/kubernetes-basics/update/update-intro.html b/content/de/docs/tutorials/kubernetes-basics/update/update-intro.html index 61ee05d662..74e3e40982 100644 --- a/content/de/docs/tutorials/kubernetes-basics/update/update-intro.html +++ b/content/de/docs/tutorials/kubernetes-basics/update/update-intro.html @@ -5,7 +5,7 @@ weight: 10 - + diff --git a/content/en/_index.html b/content/en/_index.html index 2abc22985c..db4c966102 100644 --- a/content/en/_index.html +++ b/content/en/_index.html @@ -48,7 +48,7 @@ Kubernetes is open source giving you the freedom to take advantage of on-premise


- Revisit KubeCon EU 2021 + Attend KubeCon Europe on May 17-20, 2022
diff --git a/content/en/blog/_posts/2016-08-00-Kubernetes-Namespaces-Use-Cases-Insights.md b/content/en/blog/_posts/2016-08-00-Kubernetes-Namespaces-Use-Cases-Insights.md index 7b05c1f74c..896f2c5f84 100644 --- a/content/en/blog/_posts/2016-08-00-Kubernetes-Namespaces-Use-Cases-Insights.md +++ b/content/en/blog/_posts/2016-08-00-Kubernetes-Namespaces-Use-Cases-Insights.md @@ -125,7 +125,7 @@ You may wish to, but you cannot create a hierarchy of namespaces. Namespaces can -Namespaces are easy to create and use but it’s also easy to deploy code inadvertently into the wrong namespace. Good DevOps hygiene suggests documenting and automating processes where possible and this will help. The other way to avoid using the wrong namespace is to set a [kubectl context](/docs/user-guide/kubectl/kubectl_config_set-context/).  +Namespaces are easy to create and use but it’s also easy to deploy code inadvertently into the wrong namespace. Good DevOps hygiene suggests documenting and automating processes where possible and this will help. The other way to avoid using the wrong namespace is to set a [kubectl context](/docs/reference/generated/kubectl/kubectl-commands#-em-set-context-em-).  diff --git a/content/en/blog/_posts/2016-11-00-Visualize-Kubelet-Performance-With-Node-Dashboard.md b/content/en/blog/_posts/2016-11-00-Visualize-Kubelet-Performance-With-Node-Dashboard.md index bdb43b49b9..548c91e7b7 100644 --- a/content/en/blog/_posts/2016-11-00-Visualize-Kubelet-Performance-With-Node-Dashboard.md +++ b/content/en/blog/_posts/2016-11-00-Visualize-Kubelet-Performance-With-Node-Dashboard.md @@ -5,6 +5,11 @@ slug: visualize-kubelet-performance-with-node-dashboard url: /blog/2016/11/Visualize-Kubelet-Performance-With-Node-Dashboard --- +_Since this article was published, the Node Performance Dashboard was retired and is no longer available._ + +_This retirement happened in early 2019, as part of the_ `kubernetes/contrib` +_[repository deprecation](https://github.com/kubernetes-retired/contrib/issues/3007)_. + In Kubernetes 1.4, we introduced a new node performance analysis tool, called the _node performance dashboard_, to visualize and explore the behavior of the Kubelet in much richer details. This new feature will make it easy to understand and improve code performance for Kubelet developers, and lets cluster maintainer set configuration according to provided Service Level Objectives (SLOs). **Background** diff --git a/content/en/blog/_posts/2016-12-00-Statefulset-Run-Scale-Stateful-Applications-In-Kubernetes.md b/content/en/blog/_posts/2016-12-00-Statefulset-Run-Scale-Stateful-Applications-In-Kubernetes.md index 515a3aa195..6ce3bf0044 100644 --- a/content/en/blog/_posts/2016-12-00-Statefulset-Run-Scale-Stateful-Applications-In-Kubernetes.md +++ b/content/en/blog/_posts/2016-12-00-Statefulset-Run-Scale-Stateful-Applications-In-Kubernetes.md @@ -37,7 +37,7 @@ If you run your storage application on high-end hardware or extra-large instance [ZooKeeper](https://zookeeper.apache.org/doc/current/) is an interesting use case for StatefulSet for two reasons. First, it demonstrates that StatefulSet can be used to run a distributed, strongly consistent storage application on Kubernetes. Second, it's a prerequisite for running workloads like [Apache Hadoop](http://hadoop.apache.org/) and [Apache Kakfa](https://kafka.apache.org/) on Kubernetes. An [in-depth tutorial](/docs/tutorials/stateful-application/zookeeper/) on deploying a ZooKeeper ensemble on Kubernetes is available in the Kubernetes documentation, and we’ll outline a few of the key features below. **Creating a ZooKeeper Ensemble** -Creating an ensemble is as simple as using [kubectl create](/docs/user-guide/kubectl/kubectl_create/) to generate the objects stored in the manifest. +Creating an ensemble is as simple as using [kubectl create](/docs/reference/generated/kubectl/kubectl-commands#create) to generate the objects stored in the manifest. ``` @@ -297,7 +297,7 @@ zk-0 0/1 Terminating 0 15m -You can use [kubectl apply](/docs/user-guide/kubectl/kubectl_apply/) to recreate the zk StatefulSet and redeploy the ensemble. +You can use [kubectl apply](/docs/reference/generated/kubectl/kubectl-commands#apply) to recreate the zk StatefulSet and redeploy the ensemble. diff --git a/content/en/blog/_posts/2018-04-13-local-persistent-volumes-beta.md b/content/en/blog/_posts/2018-04-13-local-persistent-volumes-beta.md index 71a0fa26d9..a7cabde710 100644 --- a/content/en/blog/_posts/2018-04-13-local-persistent-volumes-beta.md +++ b/content/en/blog/_posts/2018-04-13-local-persistent-volumes-beta.md @@ -140,7 +140,7 @@ The local persistent volume beta feature is not complete by far. Some notable en ## Complementary features -[Pod priority and preemption](/docs/concepts/configuration/pod-priority-preemption/) is another Kubernetes feature that is complementary to local persistent volumes. When your application uses local storage, it must be scheduled to the specific node where the local volume resides. You can give your local storage workload high priority so if that node ran out of room to run your workload, Kubernetes can preempt lower priority workloads to make room for it. +[Pod priority and preemption](/docs/concepts/scheduling-eviction/pod-priority-preemption/) is another Kubernetes feature that is complementary to local persistent volumes. When your application uses local storage, it must be scheduled to the specific node where the local volume resides. You can give your local storage workload high priority so if that node ran out of room to run your workload, Kubernetes can preempt lower priority workloads to make room for it. [Pod disruption budget](/docs/concepts/workloads/pods/disruptions/) is also very important for those workloads that must maintain quorum. Setting a disruption budget for your workload ensures that it does not drop below quorum due to voluntary disruption events, such as node drains during upgrade. diff --git a/content/en/blog/_posts/2018-07-16-kubernetes-1-11-release-interview.md b/content/en/blog/_posts/2018-07-16-kubernetes-1-11-release-interview.md index 4326924029..25758bc213 100644 --- a/content/en/blog/_posts/2018-07-16-kubernetes-1-11-release-interview.md +++ b/content/en/blog/_posts/2018-07-16-kubernetes-1-11-release-interview.md @@ -94,7 +94,7 @@ JOSH BERKUS: That goes into release notes. I mean, keep in mind that one of the However, stuff happens, and we do occasionally have to do those. And so far, our main way to identify that to people actually is in the release notes. If you look at [the current release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md#no-really-you-must-do-this-before-you-upgrade), there are actually two things in there right now that are sort of breaking changes. -One of them is the bit with [priority and preemption](/docs/concepts/configuration/pod-priority-preemption/) in that preemption being on by default now allows badly behaved users of the system to cause trouble in new ways. I'd actually have to look at the release notes to see what the second one was... +One of them is the bit with [priority and preemption](/docs/concepts/scheduling-eviction/pod-priority-preemption/) in that preemption being on by default now allows badly behaved users of the system to cause trouble in new ways. I'd actually have to look at the release notes to see what the second one was... TIM PEPPER: The [JSON capitalization case sensitivity](https://github.com/kubernetes/kubernetes/issues/64612). diff --git a/content/en/blog/_posts/2018-08-03-make-kubernetes-production-grade-anywhere.md b/content/en/blog/_posts/2018-08-03-make-kubernetes-production-grade-anywhere.md index a28196d568..329b2c4de7 100644 --- a/content/en/blog/_posts/2018-08-03-make-kubernetes-production-grade-anywhere.md +++ b/content/en/blog/_posts/2018-08-03-make-kubernetes-production-grade-anywhere.md @@ -104,7 +104,7 @@ Master and Worker nodes should be protected from overload and resource exhaustio Resource consumption by the control plane will correlate with the number of pods and the pod churn rate. Very large and very small clusters will benefit from non-default [settings](/docs/reference/command-line-tools-reference/kube-apiserver/) of kube-apiserver request throttling and memory. Having these too high can lead to request limit exceeded and out of memory errors. -On worker nodes, [Node Allocatable](/docs/tasks/administer-cluster/reserve-compute-resources/) should be configured based on a reasonable supportable workload density at each node. Namespaces can be created to subdivide the worker node cluster into multiple virtual clusters with resource CPU and memory [quotas](/docs/tasks/administer-cluster/manage-resources/memory-default-namespace/). Kubelet handling of [out of resource](/docs/tasks/administer-cluster/out-of-resource/) conditions can be configured. +On worker nodes, [Node Allocatable](/docs/tasks/administer-cluster/reserve-compute-resources/) should be configured based on a reasonable supportable workload density at each node. Namespaces can be created to subdivide the worker node cluster into multiple virtual clusters with resource CPU and memory [quotas](/docs/tasks/administer-cluster/manage-resources/memory-default-namespace/). Kubelet handling of [out of resource](/docs/concepts/scheduling-eviction/node-pressure-eviction/) conditions can be configured. ## Security @@ -166,7 +166,7 @@ Some critical state is held outside etcd. Certificates, container images, and ot * Cloud provider specific account and configuration data ## Considerations for your production workloads -Anti-affinity specifications can be used to split clustered services across backing hosts, but at this time the settings are used only when the pod is scheduled. This means that Kubernetes can restart a failed node of your clustered application, but does not have a native mechanism to rebalance after a fail back. This is a topic worthy of a separate blog, but supplemental logic might be useful to achieve optimal workload placements after host or worker node recoveries or expansions. The [Pod Priority and Preemption feature](/docs/concepts/configuration/pod-priority-preemption/) can be used to specify a preferred triage in the event of resource shortages caused by failures or bursting workloads. +Anti-affinity specifications can be used to split clustered services across backing hosts, but at this time the settings are used only when the pod is scheduled. This means that Kubernetes can restart a failed node of your clustered application, but does not have a native mechanism to rebalance after a fail back. This is a topic worthy of a separate blog, but supplemental logic might be useful to achieve optimal workload placements after host or worker node recoveries or expansions. The [Pod Priority and Preemption feature](/docs/concepts/scheduling-eviction/pod-priority-preemption/) can be used to specify a preferred triage in the event of resource shortages caused by failures or bursting workloads. For stateful services, external attached volume mounts are the standard Kubernetes recommendation for a non-clustered service (e.g., a typical SQL database). At this time Kubernetes managed snapshots of these external volumes is in the category of a [roadmap feature request](https://docs.google.com/presentation/d/1dgxfnroRAu0aF67s-_bmeWpkM1h2LCxe6lB1l1oS0EQ/edit#slide=id.g3ca07c98c2_0_47), likely to align with the Container Storage Interface (CSI) integration. Thus performing backups of such a service would involve application specific, in-pod activity that is beyond the scope of this document. While awaiting better Kubernetes support for a snapshot and backup workflow, running your database service in a VM rather than a container, and exposing it to your Kubernetes workload may be worth considering. diff --git a/content/en/blog/_posts/2019-04-16-pod-priority-and-preemption-in-kubernetes.md b/content/en/blog/_posts/2019-04-16-pod-priority-and-preemption-in-kubernetes.md index 49516da96a..88907e3e4d 100644 --- a/content/en/blog/_posts/2019-04-16-pod-priority-and-preemption-in-kubernetes.md +++ b/content/en/blog/_posts/2019-04-16-pod-priority-and-preemption-in-kubernetes.md @@ -8,7 +8,7 @@ date: 2019-04-16 Kubernetes is well-known for running scalable workloads. It scales your workloads based on their resource usage. When a workload is scaled up, more instances of the application get created. When the application is critical for your product, you want to make sure that these new instances are scheduled even when your cluster is under resource pressure. One obvious solution to this problem is to over-provision your cluster resources to have some amount of slack resources available for scale-up situations. This approach often works, but costs more as you would have to pay for the resources that are idle most of the time. -[Pod priority and preemption](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/) is a scheduler feature made generally available in Kubernetes 1.14 that allows you to achieve high levels of scheduling confidence for your critical workloads without overprovisioning your clusters. It also provides a way to improve resource utilization in your clusters without sacrificing the reliability of your essential workloads. +[Pod priority and preemption](/docs/concepts/scheduling-eviction/pod-priority-preemption/) is a scheduler feature made generally available in Kubernetes 1.14 that allows you to achieve high levels of scheduling confidence for your critical workloads without overprovisioning your clusters. It also provides a way to improve resource utilization in your clusters without sacrificing the reliability of your essential workloads. ## Guaranteed scheduling with controlled cost diff --git a/content/en/blog/_posts/2020-05-27-An-Introduction-to-the-K8s-Infrastructure-Working-Group.md b/content/en/blog/_posts/2020-05-27-An-Introduction-to-the-K8s-Infrastructure-Working-Group.md index f2a74914d8..efd5e196f7 100644 --- a/content/en/blog/_posts/2020-05-27-An-Introduction-to-the-K8s-Infrastructure-Working-Group.md +++ b/content/en/blog/_posts/2020-05-27-An-Introduction-to-the-K8s-Infrastructure-Working-Group.md @@ -55,7 +55,7 @@ The team has made progress in the last few months that is well worth celebrating - The K8s-Infrastructure Working Group released an automated billing report that they start every meeting off by reviewing as a group. - DNS for k8s.io and kubernetes.io are also fully [community-owned](https://groups.google.com/g/kubernetes-dev/c/LZTYJorGh7c/m/u-ydk-yNEgAJ), with community members able to [file issues](https://github.com/kubernetes/k8s.io/issues/new?assignees=&labels=wg%2Fk8s-infra&template=dns-request.md&title=DNS+REQUEST%3A+%3Cyour-dns-record%3E) to manage records. -- The container registry [k8s.gcr.io](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io) is also fully community-owned and available for all Kubernetes subprojects to use. +- The container registry [k8s.gcr.io](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io) is also fully community-owned and available for all Kubernetes subprojects to use. - The Kubernetes [publishing-bot](https://github.com/kubernetes/publishing-bot) responsible for keeping k8s.io/kubernetes/staging repositories published to their own top-level repos (For example: [kubernetes/api](https://github.com/kubernetes/api)) runs on a community-owned cluster. - The gcsweb.k8s.io service used to provide anonymous access to GCS buckets for kubernetes artifacts runs on a community-owned cluster. - There is also an automated process of promoting all our container images. This includes a fully documented infrastructure, managed by the Kubernetes community, with automated processes for provisioning permissions. diff --git a/content/en/blog/_posts/2021-04-22-gateway-api/index.md b/content/en/blog/_posts/2021-04-22-gateway-api/index.md index a7d54ad645..d9c798a5b1 100644 --- a/content/en/blog/_posts/2021-04-22-gateway-api/index.md +++ b/content/en/blog/_posts/2021-04-22-gateway-api/index.md @@ -186,7 +186,7 @@ metadata: ### Role Oriented Design -When you put it all together, you have a single load balancing infrastructure that can be safely shared by multiple teams. The Gateway API not only a more expressive API for advanced routing, but is also a role-oriented API, designed for multi-tenant infrastructure. Its extensibility ensures that it will evolve for future use-cases while preserving portability. Ultimately these characteristics will allow Gateway API to adapt to different organizational models and implementations well into the future. +When you put it all together, you have a single load balancing infrastructure that can be safely shared by multiple teams. The Gateway API is not only a more expressive API for advanced routing, but is also a role-oriented API, designed for multi-tenant infrastructure. Its extensibility ensures that it will evolve for future use-cases while preserving portability. Ultimately these characteristics will allow the Gateway API to adapt to different organizational models and implementations well into the future. ### Try it out and get involved @@ -194,4 +194,4 @@ There are many resources to check out to learn more. * Check out the [user guides](https://gateway-api.sigs.k8s.io/guides/getting-started/) to see what use-cases can be addressed. * Try out one of the [existing Gateway controllers ](https://gateway-api.sigs.k8s.io/references/implementations/) -* Or [get involved](https://gateway-api.sigs.k8s.io/contributing/community/) and help design and influence the future of Kubernetes service networking! \ No newline at end of file +* Or [get involved](https://gateway-api.sigs.k8s.io/contributing/community/) and help design and influence the future of Kubernetes service networking! diff --git a/content/en/blog/_posts/2021-06-21-writing-a-controller-for-pod-labels.md b/content/en/blog/_posts/2021-06-21-writing-a-controller-for-pod-labels.md new file mode 100644 index 0000000000..ec3934ad7d --- /dev/null +++ b/content/en/blog/_posts/2021-06-21-writing-a-controller-for-pod-labels.md @@ -0,0 +1,467 @@ +--- +layout: blog +title: "Writing a Controller for Pod Labels" +date: 2021-06-21 +slug: writing-a-controller-for-pod-labels +--- + +**Authors**: Arthur Busser (Padok) + +[Operators][what-is-an-operator] are proving to be an excellent solution to +running stateful distributed applications in Kubernetes. Open source tools like +the [Operator SDK][operator-sdk] provide ways to build reliable and maintainable +operators, making it easier to extend Kubernetes and implement custom +scheduling. + +Kubernetes operators run complex software inside your cluster. The open source +community has already built [many operators][operatorhub] for distributed +applications like Prometheus, Elasticsearch, or Argo CD. Even outside of +open source, operators can help to bring new functionality to your Kubernetes +cluster. + +An operator is a set of [custom resources][custom-resource-definitions] and a +set of [controllers][controllers]. A controller watches for changes to specific +resources in the Kubernetes API and reacts by creating, updating, or deleting +resources. + +The Operator SDK is best suited for building fully-featured operators. +Nonetheless, you can use it to write a single controller. This post will walk +you through writing a Kubernetes controller in Go that will add a `pod-name` +label to pods that have a specific annotation. + +## Why do we need a controller for this? + +I recently worked on a project where we needed to create a Service that routed +traffic to a specific Pod in a ReplicaSet. The problem is that a Service can +only select pods by label, and all pods in a ReplicaSet have the same labels. +There are two ways to solve this problem: + +1. Create a Service without a selector and manage the Endpoints or + EndpointSlices for that Service directly. We would need to write a custom + controller to insert our Pod's IP address into those resources. +2. Add a label to the Pod with a unique value. We could then use this label in + our Service's selector. Again, we would need to write a custom controller to + add this label. + +A controller is a control loop that tracks one or more Kubernetes resource +types. The controller from option n°2 above only needs to track pods, which +makes it simpler to implement. This is the option we are going to walk through +by writing a Kubernetes controller that adds a `pod-name` label to our pods. + +StatefulSets [do this natively][statefulset-pod-name-label] by adding a +`pod-name` label to each Pod in the set. But what if we don't want to or can't +use StatefulSets? + +We rarely create pods directly; most often, we use a Deployment, ReplicaSet, or +another high-level resource. We can specify labels to add to each Pod in the +PodSpec, but not with dynamic values, so no way to replicate a StatefulSet's +`pod-name` label. + +We tried using a [mutating admission webhook][mutating-admission-webhook]. When +anyone creates a Pod, the webhook patches the Pod with a label containing the +Pod's name. Disappointingly, this does not work: not all pods have a name before +being created. For instance, when the ReplicaSet controller creates a Pod, it +sends a `namePrefix` to the Kubernetes API server and not a `name`. The API +server generates a unique name before persisting the new Pod to etcd, but only +after calling our admission webhook. So in most cases, we can't know a Pod's +name with a mutating webhook. + +Once a Pod exists in the Kubernetes API, it is mostly immutable, but we can +still add a label. We can even do so from the command line: + +```bash +kubectl label my-pod my-label-key=my-label-value +``` + +We need to watch for changes to any pods in the Kubernetes API and add the label +we want. Rather than do this manually, we are going to write a controller that +does it for us. + +## Bootstrapping a controller with the Operator SDK + +A controller is a reconciliation loop that reads the desired state of a resource +from the Kubernetes API and takes action to bring the cluster's actual state +closer to the desired state. + +In order to write this controller as quickly as possible, we are going to use +the Operator SDK. If you don't have it installed, follow the +[official documentation][operator-sdk-installation]. + +```terminal +$ operator-sdk version +operator-sdk version: "v1.4.2", commit: "4b083393be65589358b3e0416573df04f4ae8d9b", kubernetes version: "v1.19.4", go version: "go1.15.8", GOOS: "darwin", GOARCH: "amd64" +``` + +Let's create a new directory to write our controller in: + +```bash +mkdir label-operator && cd label-operator +``` + +Next, let's initialize a new operator, to which we will add a single controller. +To do this, you will need to specify a domain and a repository. The domain +serves as a prefix for the group your custom Kubernetes resources will belong +to. Because we are not going to be defining custom resources, the domain does +not matter. The repository is going to be the name of the Go module we are going +to write. By convention, this is the repository where you will be storing your +code. + +As an example, here is the command I ran: + +```bash +# Feel free to change the domain and repo values. +operator-sdk init --domain=padok.fr --repo=github.com/busser/label-operator +``` + +Next, we need a create a new controller. This controller will handle pods and +not a custom resource, so no need to generate the resource code. Let's run this +command to scaffold the code we need: + +```bash +operator-sdk create api --group=core --version=v1 --kind=Pod --controller=true --resource=false +``` + +We now have a new file: `controllers/pod_controller.go`. This file contains a +`PodReconciler` type with two methods that we need to implement. The first is +`Reconcile`, and it looks like this for now: + +```go +func (r *PodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + _ = r.Log.WithValues("pod", req.NamespacedName) + + // your logic here + + return ctrl.Result{}, nil +} +``` + +The `Reconcile` method is called whenever a Pod is created, updated, or deleted. +The name and namespace of the Pod are in the `ctrl.Request` the method receives +as a parameter. + +The second method is `SetupWithManager` and for now it looks like this: + +```go +func (r *PodReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + // Uncomment the following line adding a pointer to an instance of the controlled resource as an argument + // For(). + Complete(r) +} +``` + +The `SetupWithManager` method is called when the operator starts. It serves to +tell the operator framework what types our `PodReconciler` needs to watch. To +use the same `Pod` type used by Kubernetes internally, we need to import some of +its code. All of the Kubernetes source code is open source, so you can import +any part you like in your own Go code. You can find a complete list of available +packages in the Kubernetes source code or [here on pkg.go.dev][pkg-go-dev]. To +use pods, we need the `k8s.io/api/core/v1` package. + +```go +package controllers + +import ( + // other imports... + corev1 "k8s.io/api/core/v1" + // other imports... +) +``` + +Lets use the `Pod` type in `SetupWithManager` to tell the operator framework we +want to watch pods: + +```go +func (r *PodReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.Pod{}). + Complete(r) +} +``` + +Before moving on, we should set the RBAC permissions our controller needs. Above +the `Reconcile` method, we have some default permissions: + +```go +// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=pods/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=core,resources=pods/finalizers,verbs=update +``` + +We don't need all of those. Our controller will never interact with a Pod's +status or its finalizers. It only needs to read and update pods. Lets remove the +unnecessary permissions and keep only what we need: + +```go +// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;update;patch +``` + +We are now ready to write our controller's reconciliation logic. + +## Implementing reconciliation + +Here is what we want our `Reconcile` method to do: + +1. Use the Pod's name and namespace from the `ctrl.Request` to fetch the Pod + from the Kubernetes API. +2. If the Pod has an `add-pod-name-label` annotation, add a `pod-name` label to + the Pod; if the annotation is missing, don't add the label. +3. Update the Pod in the Kubernetes API to persist the changes made. + +Lets define some constants for the annotation and label: + +```go +const ( + addPodNameLabelAnnotation = "padok.fr/add-pod-name-label" + podNameLabel = "padok.fr/pod-name" +) +``` + +The first step in our reconciliation function is to fetch the Pod we are working +on from the Kubernetes API: + +```go +// Reconcile handles a reconciliation request for a Pod. +// If the Pod has the addPodNameLabelAnnotation annotation, then Reconcile +// will make sure the podNameLabel label is present with the correct value. +// If the annotation is absent, then Reconcile will make sure the label is too. +func (r *PodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("pod", req.NamespacedName) + + /* + Step 0: Fetch the Pod from the Kubernetes API. + */ + + var pod corev1.Pod + if err := r.Get(ctx, req.NamespacedName, &pod); err != nil { + log.Error(err, "unable to fetch Pod") + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} +``` + +Our `Reconcile` method will be called when a Pod is created, updated, or +deleted. In the deletion case, our call to `r.Get` will return a specific error. +Let's import the package that defines this error: + +```go +package controllers + +import ( + // other imports... + apierrors "k8s.io/apimachinery/pkg/api/errors" + // other imports... +) +``` + +We can now handle this specific error and — since our controller does not care +about deleted pods — explicitly ignore it: + +```go + /* + Step 0: Fetch the Pod from the Kubernetes API. + */ + + var pod corev1.Pod + if err := r.Get(ctx, req.NamespacedName, &pod); err != nil { + if apierrors.IsNotFound(err) { + // we'll ignore not-found errors, since we can get them on deleted requests. + return ctrl.Result{}, nil + } + log.Error(err, "unable to fetch Pod") + return ctrl.Result{}, err + } +``` + +Next, lets edit our Pod so that our dynamic label is present if and only if our +annotation is present: + +```go + /* + Step 1: Add or remove the label. + */ + + labelShouldBePresent := pod.Annotations[addPodNameLabelAnnotation] == "true" + labelIsPresent := pod.Labels[podNameLabel] == pod.Name + + if labelShouldBePresent == labelIsPresent { + // The desired state and actual state of the Pod are the same. + // No further action is required by the operator at this moment. + log.Info("no update required") + return ctrl.Result{}, nil + } + + if labelShouldBePresent { + // If the label should be set but is not, set it. + if pod.Labels == nil { + pod.Labels = make(map[string]string) + } + pod.Labels[podNameLabel] = pod.Name + log.Info("adding label") + } else { + // If the label should not be set but is, remove it. + delete(pod.Labels, podNameLabel) + log.Info("removing label") + } +``` + +Finally, let's push our updated Pod to the Kubernetes API: + +```go + /* + Step 2: Update the Pod in the Kubernetes API. + */ + + if err := r.Update(ctx, &pod); err != nil { + log.Error(err, "unable to update Pod") + return ctrl.Result{}, err + } +``` + +When writing our updated Pod to the Kubernetes API, there is a risk that the Pod +has been updated or deleted since we first read it. When writing a Kubernetes +controller, we should keep in mind that we are not the only actors in the +cluster. When this happens, the best thing to do is start the reconciliation +from scratch, by requeuing the event. Lets do exactly that: + +```go + /* + Step 2: Update the Pod in the Kubernetes API. + */ + + if err := r.Update(ctx, &pod); err != nil { + if apierrors.IsConflict(err) { + // The Pod has been updated since we read it. + // Requeue the Pod to try to reconciliate again. + return ctrl.Result{Requeue: true}, nil + } + if apierrors.IsNotFound(err) { + // The Pod has been deleted since we read it. + // Requeue the Pod to try to reconciliate again. + return ctrl.Result{Requeue: true}, nil + } + log.Error(err, "unable to update Pod") + return ctrl.Result{}, err + } +``` + +Let's remember to return successfully at the end of the method: + +```go + return ctrl.Result{}, nil +} +``` + +And that's it! We are now ready to run the controller on our cluster. + +## Run the controller on your cluster + +To run our controller on your cluster, we need to run the operator. For that, +all you will need is `kubectl`. If you don't have a Kubernetes cluster at hand, +I recommend you start one locally with [KinD (Kubernetes in Docker)][kind]. + +All it takes to run the operator from your machine is this command: + +```bash +make run +``` + +After a few seconds, you should see the operator's logs. Notice that our +controller's `Reconcile` method was called for all pods already running in the +cluster. + +Let's keep the operator running and, in another terminal, create a new Pod: + +```bash +kubectl run --image=nginx my-nginx +``` + +The operator should quickly print some logs, indicating that it reacted to the +Pod's creation and subsequent changes in status: + +```text +INFO controllers.Pod no update required {"pod": "default/my-nginx"} +INFO controllers.Pod no update required {"pod": "default/my-nginx"} +INFO controllers.Pod no update required {"pod": "default/my-nginx"} +INFO controllers.Pod no update required {"pod": "default/my-nginx"} +``` + +Lets check the Pod's labels: + +```terminal +$ kubectl get pod my-nginx --show-labels +NAME READY STATUS RESTARTS AGE LABELS +my-nginx 1/1 Running 0 11m run=my-nginx +``` + +Let's add an annotation to the Pod so that our controller knows to add our +dynamic label to it: + +```bash +kubectl annotate pod my-nginx padok.fr/add-pod-name-label=true +``` + +Notice that the controller immediately reacted and produced a new line in its +logs: + +```text +INFO controllers.Pod adding label {"pod": "default/my-nginx"} +``` + +```terminal +$ kubectl get pod my-nginx --show-labels +NAME READY STATUS RESTARTS AGE LABELS +my-nginx 1/1 Running 0 13m padok.fr/pod-name=my-nginx,run=my-nginx +``` + +Bravo! You just successfully wrote a Kubernetes controller capable of adding +labels with dynamic values to resources in your cluster. + +Controllers and operators, both big and small, can be an important part of your +Kubernetes journey. Writing operators is easier now than it has ever been. The +possibilities are endless. + +## What next? + +If you want to go further, I recommend starting by deploying your controller or +operator inside a cluster. The `Makefile` generated by the Operator SDK will do +most of the work. + +When deploying an operator to production, it is always a good idea to implement +robust testing. The first step in that direction is to write unit tests. +[This documentation][operator-sdk-testing] will guide you in writing tests for +your operator. I wrote tests for the operator we just wrote; you can find all of +my code in [this GitHub repository][github-repo]. + +## How to learn more? + +The [Operator SDK documentation][operator-sdk-docs] goes into detail on how you +can go further and implement more complex operators. + +When modeling a more complex use-case, a single controller acting on built-in +Kubernetes types may not be enough. You may need to build a more complex +operator with [Custom Resource Definitions (CRDs)][custom-resource-definitions] +and multiple controllers. The Operator SDK is a great tool to help you do this. + +If you want to discuss building an operator, join the [#kubernetes-operator][slack-channel] +channel in the [Kubernetes Slack workspace][slack-workspace]! + + + +[controllers]: https://kubernetes.io/docs/concepts/architecture/controller/ +[custom-resource-definitions]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/ +[kind]: https://kind.sigs.k8s.io/docs/user/quick-start/#installation +[github-repo]: https://github.com/busser/label-operator +[mutating-admission-webhook]: https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#mutatingadmissionwebhook +[operator-sdk]: https://sdk.operatorframework.io/ +[operator-sdk-docs]: https://sdk.operatorframework.io/docs/ +[operator-sdk-installation]: https://sdk.operatorframework.io/docs/installation/ +[operator-sdk-testing]: https://sdk.operatorframework.io/docs/building-operators/golang/testing/ +[operatorhub]: https://operatorhub.io/ +[pkg-go-dev]: https://pkg.go.dev/k8s.io/api +[slack-channel]: https://kubernetes.slack.com/messages/kubernetes-operators +[slack-workspace]: https://slack.k8s.io/ +[statefulset-pod-name-label]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-name-label +[what-is-an-operator]: https://kubernetes.io/docs/concepts/extend-kubernetes/operator/ diff --git a/content/en/blog/_posts/2021-06-28-announcing-kubernetes-community-group-annual-reports/index.md b/content/en/blog/_posts/2021-06-28-announcing-kubernetes-community-group-annual-reports/index.md new file mode 100644 index 0000000000..e31484abdf --- /dev/null +++ b/content/en/blog/_posts/2021-06-28-announcing-kubernetes-community-group-annual-reports/index.md @@ -0,0 +1,49 @@ +--- +layout: blog +title: "Announcing Kubernetes Community Group Annual Reports" +description: > + Introducing brand new Kubernetes Community Group Annual Reports for + Special Interest Groups and Working Groups. +date: 2021-06-28T10:00:00-08:00 +slug: Announcing-Kubernetes-Community-Group-Annual-Reports +--- + +**Authors:** Divya Mohan + +{{< figure src="k8s_annual_report_2020.svg" alt="Community annual report 2020" link="https://www.cncf.io/reports/kubernetes-community-annual-report-2020/" >}} + +Given the growth and scale of the Kubernetes project, the existing reporting mechanisms were proving to be inadequate and challenging. +Kubernetes is a large open source project. With over 100000 commits just to the main k/kubernetes repository, hundreds of other code +repositories in the project, and thousands of contributors, there's a lot going on. In fact, there are 37 contributor groups at the time of +writing. We also value all forms of contribution and not just code changes. + +With that context in mind, the challenge of reporting on all this activity was a call to action for exploring better options. Therefore +inspired by the Apache Software Foundation’s [open guide to PMC Reporting](https://www.apache.org/foundation/board/reporting) and the +[CNCF project Annual Reporting](https://www.cncf.io/cncf-annual-report-2020/), the Kubernetes project is proud to announce the +**Kubernetes Community Group Annual Reports for Special Interest Groups (SIGs) and Working Groups (WGs)**. In its flagship edition, +the [2020 Summary report](https://www.cncf.io/reports/kubernetes-community-annual-report-2020/) focuses on bettering the +Kubernetes ecosystem by assessing and promoting the healthiness of the groups within the upstream community. + +Previously, the mechanisms for the Kubernetes project overall to report on groups and their activities were +[devstats](https://k8s.devstats.cncf.io/), GitHub data, issues, to measure the healthiness of a given UG/WG/SIG/Committee. As a +project spanning several diverse communities, it was essential to have something that captured the human side of things. With 50,000+ +contributors, it’s easy to assume that the project has enough help and this report surfaces more information than /help-wanted and +/good-first-issue for end users. This is how we sustain the project. Paraphrasing one of the Steering Committee members, +[Paris Pittman](https://github.com/parispittman), “There was a requirement for tighter feedback loops - ones that involved more than just +GitHub data and issues. Given that Kubernetes, as a project, has grown in scale and number of contributors over the years, we have +outgrown the existing reporting mechanisms." + +The existing communication channels between the Steering committee members and the folks leading the groups and committees were also required +to be made as open and as bi-directional as possible. Towards achieving this very purpose, every group and committee has been assigned a +liaison from among the steering committee members for kick off, help, or guidance needed throughout the process. According to +[Davanum Srinivas a.k.a. dims](https://github.com/dims), “... That was one of the main motivations behind this report. People (leading the +groups/committees) know that they can reach out to us and there’s a vehicle for them to reach out to us… This is our way of setting up a +two-way feedback for them." The progress on these action items would be updated and tracked on the monthly Steering Committee meetings +ensuring that this is not a one-off activity. Quoting [Nikhita Raghunath](https://github.com/nikhita), one of the Steering Committee members, +“... Once we have a base, the liaisons will work with these groups to ensure that the problems are resolved. When we have a report next year, +we’ll have a look at the progress made and how we could still do better. But the idea is definitely to not stop at the report.” + +With this report, we hope to empower our end user communities with information that they can use to identify ways in which they can support +the project as well as a sneak peek into the roadmap for upcoming features. As a community, we thrive on feedback and would love to hear your +views about the report. You can get in touch with the [Steering Committee](https://github.com/kubernetes/steering#contact) via +[Slack](https://kubernetes.slack.com/messages/steering-committee) or via the [mailing list](steering@kubernetes.io). diff --git a/content/en/blog/_posts/2021-06-28-announcing-kubernetes-community-group-annual-reports/k8s_annual_report_2020.svg b/content/en/blog/_posts/2021-06-28-announcing-kubernetes-community-group-annual-reports/k8s_annual_report_2020.svg new file mode 100644 index 0000000000..179201d13b --- /dev/null +++ b/content/en/blog/_posts/2021-06-28-announcing-kubernetes-community-group-annual-reports/k8s_annual_report_2020.svg @@ -0,0 +1,16130 @@ + + + + diff --git a/content/en/blog/_posts/2021-07-14-upcoming-changes-in-kubernetes-1-22/index.md b/content/en/blog/_posts/2021-07-14-upcoming-changes-in-kubernetes-1-22/index.md new file mode 100644 index 0000000000..6759bf4975 --- /dev/null +++ b/content/en/blog/_posts/2021-07-14-upcoming-changes-in-kubernetes-1-22/index.md @@ -0,0 +1,276 @@ +--- +layout: blog +title: "Kubernetes API and Feature Removals In 1.22: Here’s What You Need To Know" +date: 2021-07-14 +slug: upcoming-changes-in-kubernetes-1-22 +--- + +**Authors**: Krishna Kilari (Amazon Web Services), Tim Bannister (The Scale Factory) + +As the Kubernetes API evolves, APIs are periodically reorganized or upgraded. +When APIs evolve, the old APIs they replace are deprecated, and eventually removed. +See [Kubernetes API removals](#kubernetes-api-removals) to read more about Kubernetes' +policy on removing APIs. + +We want to make sure you're aware of some upcoming removals. These are +beta APIs that you can use in current, supported Kubernetes versions, +and they are already deprecated. The reason for all of these removals +is that they have been superseded by a newer, stable (“GA”) API. + +Kubernetes 1.22, due for release in August 2021, will remove a number of deprecated +APIs. +_Update_: +[Kubernetes 1.22: Reaching New Peaks](/blog/2021/08/04/kubernetes-1-22-release-announcement/) +has details on the v1.22 release. + +## API removals for Kubernetes v1.22 {#api-changes} + +The **v1.22** release will stop serving the API versions we've listed immediately below. +These are all beta APIs that were previously deprecated in favor of newer and more stable +API versions. + + +* Beta versions of the `ValidatingWebhookConfiguration` and `MutatingWebhookConfiguration` API (the **admissionregistration.k8s.io/v1beta1** API versions) +* The beta `CustomResourceDefinition` API (**apiextensions.k8s.io/v1beta1**) +* The beta `APIService` API (**apiregistration.k8s.io/v1beta1**) +* The beta `TokenReview` API (**authentication.k8s.io/v1beta1**) +* Beta API versions of `SubjectAccessReview`, `LocalSubjectAccessReview`, `SelfSubjectAccessReview` (API versions from **authorization.k8s.io/v1beta1**) +* The beta `CertificateSigningRequest` API (**certificates.k8s.io/v1beta1**) +* The beta `Lease` API (**coordination.k8s.io/v1beta1**) +* All beta `Ingress` APIs (the **extensions/v1beta1** and **networking.k8s.io/v1beta1** API versions) + +The Kubernetes documentation covers these +[API removals for v1.22](/docs/reference/using-api/deprecation-guide/#v1-22) and explains +how each of those APIs change between beta and stable. + +## What to do + +We're going to run through each of the resources that are affected by these removals +and explain the steps you'll need to take. + +`Ingress` +: Migrate to use the **networking.k8s.io/v1** + [Ingress](/docs/reference/kubernetes-api/service-resources/ingress-v1/) API, + [available since v1.19](/blog/2020/08/26/kubernetes-release-1.19-accentuate-the-paw-sitive/#ingress-graduates-to-general-availability). + The related API [IngressClass](/docs/reference/kubernetes-api/service-resources/ingress-class-v1/) + is designed to complement the [Ingress](/docs/concepts/services-networking/ingress/) + concept, allowing you to configure multiple kinds of Ingress within one cluster. + If you're currently using the deprecated + [`kubernetes.io/ingress.class`](https://kubernetes.io/docs/reference/labels-annotations-taints/#kubernetes-io-ingress-class-deprecated) + annotation, plan to switch to using the `.spec.ingressClassName` field instead. + On any cluster running Kubernetes v1.19 or later, you can use the v1 API to + retrieve or update existing Ingress objects, even if they were created using an + older API version. + + When you convert an Ingress to the v1 API, you should review each rule in that Ingress. + Older Ingresses use the legacy `ImplementationSpecific` path type. Instead of `ImplementationSpecific`, switch [path matching](/docs/concepts/services-networking/ingress/#path-types) to either `Prefix` or `Exact`. One of the benefits of moving to these alternative path types is that it becomes easier to migrate between different Ingress classes. + + **ⓘ** As well as upgrading _your_ own use of the Ingress API as a client, make sure that + every ingress controller that you use is compatible with the v1 Ingress API. + Read [Ingress Prerequisites](/docs/concepts/services-networking/ingress/#prerequisites) + for more context about Ingress and ingress controllers. + +`ValidatingWebhookConfiguration` and `MutatingWebhookConfiguration` +: Migrate to use the **admissionregistration.k8s.io/v1** API versions of + [ValidatingWebhookConfiguration](/docs/reference/kubernetes-api/extend-resources/validating-webhook-configuration-v1/) + and [MutatingWebhookConfiguration](/docs/reference/kubernetes-api/extend-resources/mutating-webhook-configuration-v1/), + available since v1.16. + You can use the v1 API to retrieve or update existing objects, even if they were created using an older API version. + +`CustomResourceDefinition` +: Migrate to use the [CustomResourceDefinition](/docs/reference/kubernetes-api/extend-resources/custom-resource-definition-v1/) + **apiextensions.k8s.io/v1** API, available since v1.16. + You can use the v1 API to retrieve or update existing objects, even if they were created + using an older API version. If you defined any custom resources in your cluster, those + are still served after you upgrade. + + If you're using external CustomResourceDefinitions, you can use + [`kubectl convert`](#kubectl-convert) to translate existing manifests to use the newer API. + Because there are some functional differences between beta and stable CustomResourceDefinitions, + our advice is to test out each one to make sure it works how you expect after the upgrade. + +`APIService` +: Migrate to use the **apiregistration.k8s.io/v1** [APIService](/docs/reference/kubernetes-api/cluster-resources/api-service-v1/) + API, available since v1.10. + You can use the v1 API to retrieve or update existing objects, even if they were created using an older API version. + If you already have API aggregation using an APIService object, this aggregation continues + to work after you upgrade. + +`TokenReview` +: Migrate to use the **authentication.k8s.io/v1** [TokenReview](/docs/reference/kubernetes-api/authentication-resources/token-review-v1/) + API, available since v1.10. + + As well as serving this API via HTTP, the Kubernetes API server uses the same format to + [send](/docs/reference/access-authn-authz/authentication/#webhook-token-authentication) + TokenReviews to webhooks. The v1.22 release continues to use the v1beta1 API for TokenReviews + sent to webhooks by default. See [Looking ahead](#looking-ahead) for some specific tips about + switching to the stable API. + +`SubjectAccessReview`, `SelfSubjectAccessReview` and `LocalSubjectAccessReview` +: Migrate to use the **authorization.k8s.io/v1** versions of those + [authorization APIs](/docs/reference/kubernetes-api/authorization-resources/), available since v1.6. + +`CertificateSigningRequest` +: Migrate to use the **certificates.k8s.io/v1** + [CertificateSigningRequest](/docs/reference/kubernetes-api/authentication-resources/certificate-signing-request-v1/) + API, available since v1.19. + You can use the v1 API to retrieve or update existing objects, even if they were created + using an older API version. Existing issued certificates retain their validity when you upgrade. + +`Lease` +: Migrate to use the **coordination.k8s.io/v1** [Lease](/docs/reference/kubernetes-api/cluster-resources/lease-v1/) + API, available since v1.14. + You can use the v1 API to retrieve or update existing objects, even if they were created + using an older API version. + +### `kubectl convert` + +There is a plugin to `kubectl` that provides the `kubectl convert` subcommand. +It's an official plugin that you can download as part of Kubernetes. +See [Download Kubernetes](/releases/download/) for more details. + +You can use `kubectl convert` to update manifest files to use a different API +version. For example, if you have a manifest in source control that uses the beta +Ingress API, you can check that definition out, +and run +`kubectl convert -f --output-version /`. +You can use the `kubectl convert` command to automatically convert an +existing manifest. + +For example, to convert an older Ingress definition to +`networking.k8s.io/v1`, you can run: +```bash +kubectl convert -f ./legacy-ingress.yaml --output-version networking.k8s.io/v1 +``` + +The automatic conversion uses a similar technique to how the Kubernetes control plane +updates objects that were originally created using an older API version. Because it's +a mechanical conversion, you might need to go in and change the manifest to adjust +defaults etc. + +### Rehearse for the upgrade + +If you manage your cluster's API server component, you can try out these API +removals before you upgrade to Kubernetes v1.22. + +To do that, add the following to the kube-apiserver command line arguments: + +`--runtime-config=admissionregistration.k8s.io/v1beta1=false,apiextensions.k8s.io/v1beta1=false,apiregistration.k8s.io/v1beta1=false,authentication.k8s.io/v1beta1=false,authorization.k8s.io/v1beta1=false,certificates.k8s.io/v1beta1=false,coordination.k8s.io/v1beta1=false,extensions/v1beta1/ingresses=false,networking.k8s.io/v1beta1=false` + +(as a side effect, this also turns off v1beta1 of EndpointSlice - watch out for +that when you're testing). + +Once you've switched all the kube-apiservers in your cluster to use that setting, +those beta APIs are removed. You can test that API clients (`kubectl`, deployment +tools, custom controllers etc) still work how you expect, and you can revert if +you need to without having to plan a more disruptive downgrade. + + + +### Advice for software authors + +Maybe you're reading this because you're a developer of an addon or other +component that integrates with Kubernetes? + +If you develop an Ingress controller, webhook authenticator, an API aggregation, or +any other tool that relies on these deprecated APIs, you should already have started +to switch your software over. + +You can use the tips in +[Rehearse for the upgrade](#rehearse-for-the-upgrade) to run your own Kubernetes +cluster that only uses the new APIs, and make sure that your code works OK. +For your documentation, make sure readers are aware of any steps they should take +for the Kubernetes v1.22 upgrade. + +Where possible, give your users a hand to adopt the new APIs early - perhaps in a +test environment - so they can give you feedback about any problems. + +There are some [more deprecations](#looking-ahead) coming in Kubernetes v1.25, +so plan to have those covered too. + +## Kubernetes API removals + +Here's some background about why Kubernetes removes some APIs, and also a promise +about _stable_ APIs in Kubernetes. + +Kubernetes follows a defined +[deprecation policy](/docs/reference/using-api/deprecation-policy/) for its +features, including the Kubernetes API. That policy allows for replacing stable +(“GA”) APIs from Kubernetes. Importantly, this policy means that a stable API only +be deprecated when a newer stable version of that same API is available. + +That stability guarantee matters: if you're using a stable Kubernetes API, there +won't ever be a new version released that forces you to switch to an alpha or beta +feature. + +Earlier stages are different. Alpha features are under test and potentially +incomplete. Almost always, alpha features are disabled by default. +Kubernetes releases can and do remove alpha features that haven't worked out. + +After alpha, comes beta. These features are typically enabled by default; if the +testing works out, the feature can graduate to stable. If not, it might need +a redesign. + +Last year, Kubernetes officially +[adopted](/blog/2020/08/21/moving-forward-from-beta/#avoiding-permanent-beta) +a policy for APIs that have reached their beta phase: + +> For Kubernetes REST APIs, when a new feature's API reaches beta, that starts +> a countdown. The beta-quality API now has three releases … +> to either: +> +> * reach GA, and deprecate the beta, or +> * have a new beta version (and deprecate the previous beta). + +_At the time of that article, three Kubernetes releases equated to roughly nine +calendar months. Later that same month, Kubernetes +adopted a new +release cadence of three releases per calendar year, so the countdown period is +now roughly twelve calendar months._ + +Whether an API removal is because of a beta feature graduating to stable, or +because that API hasn't proved successful, Kubernetes will continue to remove +APIs by following its deprecation policy and making sure that migration options +are documented. + +### Looking ahead + +There's a setting that's relevant if you use webhook authentication checks. +A future Kubernetes release will switch to sending TokenReview objects +to webhooks using the `authentication.k8s.io/v1` API by default. At the moment, +the default is to send `authentication.k8s.io/v1beta1` TokenReviews to webhooks, +and that's still the default for Kubernetes v1.22. +However, you can switch over to the stable API right now if you want: +add `--authentication-token-webhook-version=v1` to the command line options for +the kube-apiserver, and check that webhooks for authentication still work how you +expected. + +Once you're happy it works OK, you can leave the `--authentication-token-webhook-version=v1` +option set across your control plane. + +The **v1.25** release that's planned for next year will stop serving beta versions of +several Kubernetes APIs that are stable right now and have been for some time. +The same v1.25 release will **remove** PodSecurityPolicy, which is deprecated and won't +graduate to stable. See +[PodSecurityPolicy Deprecation: Past, Present, and Future](/blog/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future/) +for more information. + +The official [list of API removals](/docs/reference/using-api/deprecation-guide/#v1-25) +planned for Kubernetes 1.25 is: + +* The beta `CronJob` API (**batch/v1beta1**) +* The beta `EndpointSlice` API (**networking.k8s.io/v1beta1**) +* The beta `PodDisruptionBudget` API (**policy/v1beta1**) +* The beta `PodSecurityPolicy` API (**policy/v1beta1**) + +## Want to know more? + +Deprecations are announced in the Kubernetes release notes. You can see the announcements +of pending deprecations in the release notes for +[1.19](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.19.md#deprecations), +[1.20](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.20.md#deprecation), +and [1.21](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.21.md#deprecation). + +For information on the process of deprecation and removal, check out the official Kubernetes +[deprecation policy](/docs/reference/using-api/deprecation-policy/#deprecating-parts-of-the-api) +document. diff --git a/content/en/blog/_posts/2021-07-15-SIG-Usability-Spotlight.md b/content/en/blog/_posts/2021-07-15-SIG-Usability-Spotlight.md new file mode 100644 index 0000000000..43488fc11f --- /dev/null +++ b/content/en/blog/_posts/2021-07-15-SIG-Usability-Spotlight.md @@ -0,0 +1,65 @@ +--- +layout: blog +title: "Spotlight on SIG Usability" +date: 2021-07-15 +slug: sig-usability-spotlight-2021 +--- + +**Author:** Kunal Kushwaha, Civo + +## Introduction + +Are you interested in learning about what [SIG Usability](https://github.com/kubernetes/community/tree/master/sig-usability) does and how you can get involved? Well, you're at the right place. SIG Usability is all about making Kubernetes more accessible to new folks, and its main activity is conducting user research for the community. In this blog, we have summarized our conversation with [Gaby Moreno](https://twitter.com/morengab), who walks us through the various aspects of being a part of the SIG and shares some insights about how others can get involved. + +Gaby is a co-lead for SIG Usability. She works as a Product Designer at IBM and enjoys working on the user experience of open, hybrid cloud technologies like Kubernetes, OpenShift, Terraform, and Cloud Foundry. + +## A summary of our conversation + +### Q. Could you tell us a little about what SIG Usability does? + +A. SIG Usability at a high level started because there was no dedicated user experience team for Kubernetes. The extent of SIG Usability is focussed on the end-client ease of use of the Kubernetes project. The main activity is user research for the community, which includes speaking to Kubernetes users. + +This covers points like user experience and accessibility. The objectives of the SIG are to guarantee that the Kubernetes project is maximally usable by people of a wide range of foundations and capacities, such as incorporating internationalization and ensuring the openness of documentation. + +### Q. Why should new and existing contributors consider joining SIG Usability? + +A. There are plenty of territories where new contributors can begin. For example: +- User research projects, where people can help understand the usability of the end-user experiences, including error messages, end-to-end tasks, etc. +- Accessibility guidelines for Kubernetes community artifacts, examples include: internationalization of documentation, color choices for people with color blindness, ensuring compatibility with screen reader technology, user interface design for core components with user interfaces, and more. + +### Q. What do you do to help new contributors get started? + +A. New contributors can get started by shadowing one of the user interviews, going through user interview transcripts, analyzing them, and designing surveys. + +SIG Usability is also open to new project ideas. If you have an idea, we’ll do what we can to support it. There are regular SIG Meetings where people can ask their questions live. These meetings are also recorded for those who may not be able to attend. As always, you can reach out to us on Slack as well. + +### Q. What does the survey include? + +A. In simple terms, the survey gathers information about how people use Kubernetes, such as trends in learning to deploy a new system, error messages they receive, and workflows. + +One of our goals is to standardize the responses accordingly. The ultimate goal is to analyze survey responses for important user stories whose needs aren't being met. + +### Q. Are there any particular skills you’d like to recruit for? What skills are contributors to SIG Usability likely to learn? + +A. Although contributing to SIG Usability does not have any pre-requisites as such, experience with user research, qualitative research, or prior experience with how to conduct an interview would be great plus points. Quantitative research, like survey design and screening, is also helpful and something that we expect contributors to learn. + +### Q. What are you getting positive feedback on, and what’s coming up next for SIG Usability? + +A. We have had new members joining and coming to monthly meetings regularly and showing interests in becoming a contributor and helping the community. We have also had a lot of people reach out to us via Slack showcasing their interest in the SIG. + +Currently, we are focused on finishing the study mentioned in our [talk](https://www.youtube.com/watch?v=Byn0N_ZstE0), also our project for this year. We are always happy to have new contributors join us. + +### Q: Any closing thoughts/resources you’d like to share? + +A. We love meeting new contributors and assisting them in investigating different Kubernetes project spaces. We will work with and team up with other SIGs to facilitate engaging with end-users, running studies, and help them integrate accessible design practices into their development practices. + +Here are some resources for you to get started: +- [GitHub](https://github.com/kubernetes/community/tree/master/sig-usability) +- [Mailing list](https://groups.google.com/g/kubernetes-sig-usability) +- [Open Community Issues/PRs](https://github.com/kubernetes/community/labels/sig%2Fusability) +- [Slack](https://slack.k8s.io/) +- [Slack channel #sig-usability](https://kubernetes.slack.com/archives/CLC5EF63T) + +## Wrap Up + +SIG Usability hosted a [KubeCon talk](https://www.youtube.com/watch?v=Byn0N_ZstE0) about studying Kubernetes users' experiences. The talk focuses on updates to the user study projects, understanding who is using Kubernetes, what they are trying to achieve, how the project is addressing their needs, and where we need to improve the project and the client experience. Join the SIG's update to find out about the most recent research results, what the plans are for the forthcoming year, and how to get involved in the upstream usability team as a contributor! diff --git a/content/en/blog/_posts/2021-07-20-Kubernetes-Release-Cadence/index.md b/content/en/blog/_posts/2021-07-20-Kubernetes-Release-Cadence/index.md new file mode 100644 index 0000000000..444b99a934 --- /dev/null +++ b/content/en/blog/_posts/2021-07-20-Kubernetes-Release-Cadence/index.md @@ -0,0 +1,83 @@ +--- +layout: blog +title: "Kubernetes Release Cadence Change: Here’s What You Need To Know" +date: 2021-07-20 +slug: new-kubernetes-release-cadence +--- + +**Authors**: Celeste Horgan, Adolfo García Veytia, James Laverack, Jeremy Rickard + +On April 23, 2021, the Release Team merged a Kubernetes Enhancement Proposal (KEP) changing the Kubernetes release cycle from four releases a year (once a quarter) to three releases a year. + +This blog post provides a high level overview about what this means for the Kubernetes community's contributors and maintainers. + +## What's changing and when + +Starting with the [Kubernetes 1.22 release](https://github.com/kubernetes/sig-release/tree/master/releases/release-1.22), a lightweight policy will drive the creation of each release schedule. This policy states: + +* The first Kubernetes release of a calendar year should start at the second or third + week of January to provide people more time for contributors coming back from the + end of year holidays. +* The last Kubernetes release of a calendar year should be finished by the middle of + December. +* A Kubernetes release cycle has a length of approximately 15 weeks. +* The week of KubeCon + CloudNativeCon is not considered a 'working week' for SIG Release. The Release Team will not hold meetings or make decisions in this period. +* An explicit SIG Release break of at least two weeks between each cycle will + be enforced. + +As a result, Kubernetes will follow a three releases per year cadence. Kubernetes 1.23 will be the final release of the 2021 calendar year. This new policy results in a very predictable release schedule, allowing us to forecast upcoming release dates: + + +*Proposed Kubernetes Release Schedule for the remainder of 2021* + +| Week Number in Year | Release Number | Release Week | Note | +| -------- | -------- | -------- | -------- | +| 35 | 1.23 | 1 (August 23) | | +| 50 | 1.23 | 16 (December 07) | KubeCon + CloudNativeCon NA Break (Oct 11-15) | + +*Proposed Kubernetes Release Schedule for 2022* + +| Week Number in Year | Release Number | Release Week | Note | +| -------- | -------- | -------- | -------- | +| 1 | 1.24 | 1 (January 03) | | +| 15 | 1.24 | 15 (April 12) | | +| 17 | 1.25 | 1 (April 26) | KubeCon + CloudNativeCon EU likely to occur | +| 32 | 1.25 | 15 (August 09) | | +| 34 | 1.26 | 1 (August 22 | KubeCon + CloudNativeCon NA likely to occur | +| 49 | 1.26 | 14 (December 06) | + +These proposed dates reflect only the start and end dates, and they are subject to change. The Release Team will select dates for enhancement freeze, code freeze, and other milestones at the start of each release. For more information on these milestones, please refer to the [release phases](https://www.k8s.dev/resources/release/#phases) documentation. Feedback from prior releases will feed into this process. + +## What this means for end users + +The major change end users will experience is a slower release cadence and a slower rate of enhancement graduation. Kubernetes release artifacts, release notes, and all other aspects of any given release will stay the same. + +Prior to this change an enhancement could graduate from alpha to stable in 9 months. With the change in cadence, this will stretch to 12 months. Additionally, graduation of features over the last few releases has in some part been driven by release team activities. + +With fewer releases, users can expect to see the rate of feature graduation slow. Users can also expect releases to contain a larger number of enhancements that they need to be aware of during upgrades. However, with fewer releases to consume per year, it's intended that end user organizations will spend less time on upgrades and gain more time on supporting their Kubernetes clusters. It also means that Kubernetes releases are in support for a slightly longer period of time, so bug fixes and security patches will be available for releases for a longer period of time. + + +## What this means for Kubernetes contributors + +With a lower release cadence, contributors have more time for project enhancements, feature development, planning, and testing. A slower release cadence also provides more room for maintaining their mental health, preparing for events like KubeCon + CloudNativeCon or work on downstream integrations. + + +## Why we decided to change the release cadence + +The Kubernetes 1.19 cycle was far longer than usual. SIG Release extended it to lessen the burden on both Kubernetes contributors and end users due the COVID-19 pandemic. Following this extended release, the Kubernetes 1.20 release became the third, and final, release for 2020. + +As the Kubernetes project matures, the number of enhancements per cycle grows, along with the burden on contributors, the Release Engineering team. Downstream consumers and integrators also face increased challenges keeping up with [ever more feature-packed releases](https://kubernetes.io/blog/2021/04/08/kubernetes-1-21-release-announcement/). A wider project adoption means the complexity of supporting a rapidly evolving platform affects a bigger downstream chain of consumers. + +Changing the release cadence from four to three releases per year balances a variety of factors for stakeholders: while it's not strictly an LTS policy, consumers and integrators will get longer support terms for each minor version as the extended release cycles lead to the [previous three releases being supported](https://kubernetes.io/blog/2020/08/31/kubernetes-1-19-feature-one-year-support/) for a longer period. Contributors get more time to [mature enhancements](https://www.cncf.io/blog/2021/04/12/enhancing-the-kubernetes-enhancements-process/) and [get them ready for production](https://github.com/kubernetes/community/blob/master/sig-architecture/production-readiness.md). + +Finally, the management overhead for SIG Release and the Release Engineering team diminishes allowing the team to spend more time on improving the quality of the software releases and the tooling that drives them. + +## How you can help + +Join the [discussion](https://github.com/kubernetes/sig-release/discussions/1566) about communicating future release dates and be sure to be on the lookout for post release surveys. + +## Where you can find out more + +- Read the KEP [here](https://github.com/kubernetes/enhancements/tree/master/keps/sig-release/2572-release-cadence) +- Join the [kubernetes-dev](https://groups.google.com/g/kubernetes-dev) mailing list +- Join [Kubernetes Slack](https://slack.k8s.io) and follow the #announcements channel diff --git a/content/en/blog/_posts/2021-07-26-update-with-ingress-nginx.md b/content/en/blog/_posts/2021-07-26-update-with-ingress-nginx.md new file mode 100644 index 0000000000..761b0b0575 --- /dev/null +++ b/content/en/blog/_posts/2021-07-26-update-with-ingress-nginx.md @@ -0,0 +1,71 @@ +--- +layout: blog +title: 'Updating NGINX-Ingress to use the stable Ingress API' +date: 2021-07-26 +slug: update-with-ingress-nginx +--- + +**Authors:** James Strong, Ricardo Katz + +With all Kubernetes APIs, there is a process to creating, maintaining, and +ultimately deprecating them once they become GA. The networking.k8s.io API group is no +different. The upcoming Kubernetes 1.22 release will remove several deprecated APIs +that are relevant to networking: + +- the `networking.k8s.io/v1beta1` API version of [IngressClass](/docs/concepts/services-networking/ingress/#ingress-class) +- all beta versions of [Ingress](/docs/concepts/services-networking/ingress/): `extensions/v1beta1` and `networking.k8s.io/v1beta1` + +On a v1.22 Kubernetes cluster, you'll be able to access Ingress and IngressClass +objects through the stable (v1) APIs, but access via their beta APIs won't be possible. +This change has been in +in discussion since +[2017](https://github.com/kubernetes/kubernetes/issues/43214), +[2019](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) with +1.16 Kubernetes API deprecations, and most recently in +KEP-1453: +[Graduate Ingress API to GA](https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/1453-ingress-api#122). + +During community meetings, the networking Special Interest Group has decided to continue +supporting Kubernetes versions older than 1.22 with Ingress-NGINX version 0.47.0. +Support for Ingress-NGINX will continue for six months after Kubernetes 1.22 +is released. Any additional bug fixes and CVEs for Ingress-NGINX will be +addressed on a need-by-need basis. + +Ingress-NGINX will have separate branches and releases of Ingress-NGINX to +support this model, mirroring the Kubernetes project process. Future +releases of the Ingress-NGINX project will track and support the latest +versions of Kubernetes. + +{{< table caption="Ingress NGINX supported version with Kubernetes Versions" >}} +Kubernetes version | Ingress-NGINX version | Notes +:-------------------|:----------------------|:------------ +v1.22 | v1.0.0-alpha.2 | New features, plus bug fixes. +v1.21 | v0.47.x | Bugfixes only, and just for security issues or crashes. No end-of-support date announced. +v1.20 | v0.47.x | Bugfixes only, and just for security issues or crashes. No end-of-support date announced. +v1.19 | v0.47.x | Bugfixes only, and just for security issues or crashes. Fixes only provided until 6 months after Kubernetes v1.22.0 is released. +{{< /table >}} + +Because of the updates in Kubernetes 1.22, **v0.47.0** will not work with +Kubernetes 1.22. + +# What you need to do + +The team is currently in the process of upgrading ingress-nginx to support +the v1 migration, you can track the progress +[here](https://github.com/kubernetes/ingress-nginx/pull/7156). +We're not making feature improvements to `ingress-nginx` until after the support for +Ingress v1 is complete. + +In the meantime to ensure no compatibility issues: + +* Update to the latest version of Ingress-NGINX; currently + [v0.47.0](https://github.com/kubernetes/ingress-nginx/releases/tag/controller-v0.47.0) +* After Kubernetes 1.22 is released, ensure you are using the latest version of + Ingress-NGINX that supports the stable APIs for Ingress and IngressClass. +* Test Ingress-NGINX version v1.0.0-alpha.2 with Cluster versions >= 1.19 + and report any issues to the projects Github page. + +The community’s feedback and support in this effort is welcome. The +Ingress-NGINX Sub-project regularly holds community meetings where we discuss +this and other issues facing the project. For more information on the sub-project, +please see [SIG Network](https://github.com/kubernetes/community/tree/master/sig-network). diff --git a/content/en/blog/_posts/2021-07-29-kubernetes-1-21-release-interview.md b/content/en/blog/_posts/2021-07-29-kubernetes-1-21-release-interview.md new file mode 100644 index 0000000000..25de412b81 --- /dev/null +++ b/content/en/blog/_posts/2021-07-29-kubernetes-1-21-release-interview.md @@ -0,0 +1,231 @@ +--- +layout: blog +title: "Roorkee robots, releases and racing: the Kubernetes 1.21 release interview" +date: 2021-07-29 +--- + +**Author**: Craig Box (Google) + +With Kubernetes 1.22 due out next week, now is a great time to look back on 1.21. The release team for that version was led by [Nabarun Pal](https://twitter.com/theonlynabarun) from VMware. + +Back in April I [interviewed Nabarun](https://kubernetespodcast.com/episode/146-kubernetes-1.21/) on the weekly [Kubernetes Podcast from Google](https://kubernetespodcast.com/); the latest in a series of release lead conversations that started back with 1.11, not long after the show started back in 2018. + +In these interviews we learn a little about the release, but also about the process behind it, and the story behind the person chosen to lead it. Getting to know a community member is my favourite part of the show each week, and so I encourage you to [subscribe wherever you get your podcasts](https://kubernetespodcast.com/subscribe/). With a release coming next week, you can probably guess what our next topic will be! + +*This transcript has been edited and condensed for clarity.* + +--- + +**CRAIG BOX: You have a Bachelor of Technology in Metallurgical and Materials Engineering. How are we doing at turning lead into gold?** + +NABARUN PAL: Well, last I checked, we have yet to find the philosopher's stone! + +**CRAIG BOX: One of the more important parts of the process?** + +NABARUN PAL: We're not doing that well in terms of getting alchemists up and running. There is some improvement in nuclear technology, where you can turn lead into gold, but I would guess buying gold would be much more efficient. + +**CRAIG BOX: Or Bitcoin? It depends what you want to do with the gold.** + +NABARUN PAL: Yeah, seeing the increasing prices of Bitcoin, you'd probably prefer to bet on that. But, don't take this as a suggestion. I'm not a registered investment advisor, and I don't give investment advice! + +**CRAIG BOX: But you are, of course, a trained materials engineer. How did you get into that line of education?** + +NABARUN PAL: We had a graded and equated exam structure, where you sit a single exam, and then based on your performance in that exam, you can try any of the universities which take those scores into account. I went to the Indian Institute of Technology, Roorkee. + +Materials engineering interested me a lot. I had a passion for computer science since childhood, but I also liked material science, so I wanted to explore that field. I did a lot of exploration around material science and metallurgy in my freshman and sophomore years, but then computing, since it was a passion, crept into the picture. + +**CRAIG BOX: Let's dig in there a little bit. What did computing look like during your childhood?** + +NABARUN PAL: It was a very interesting journey. I started exploring computers back when I was seven or eight. For my first programming language, if you call it a programming language, I explored LOGO. + +You have a turtle on the screen, and you issue commands to it, like move forward or rotate or pen up or pen down. You basically draw geometric figures. I could visually see how I could draw a square and how I could draw a triangle. It was an interesting journey after that. I learned BASIC, then went to some amount of HTML, JavaScript. + +**CRAIG BOX: It's interesting to me because Logo and BASIC were probably my first two programming languages, but I think there was probably quite a gap in terms of when HTML became a thing after those two! Did your love of computing always lead you down the path towards programming, or were you interested as a child in using computers for games or application software? What led you specifically into programming?** + +NABARUN PAL: Programming came in late. Not just in computing, but in life, I'm curious with things. When my parents got me my first computer, I was curious. I was like, "how does this operating system work?" What even is running it? Using a television and using a computer is a different experience, but usability is kind of the same thing. The HCI device for a television is a remote, whereas with a computer, I had a keyboard and a mouse. I used to tinker with the box and reinstall operating systems. + +We used to get magazines back then. They used to bundle OpenSuse or Debian, and I used to install them. It was an interesting experience, 15 years back, how Linux used to be. I have been a tinkerer all around, and that's what eventually led me to programming. + +**CRAIG BOX: With an interest in both the physical and ethereal aspects of technology, you did a lot of robotics challenges during university. That's something that I am not surprised to hear from someone who has a background in Logo, to be honest. There's Mindstorms, and a lot of other technology that is based around robotics that a lot of LOGO people got into. How was that something that came about for you?** + +NABARUN PAL: When I joined my university, apart from studying materials, one of the things they used to really encourage was to get involved in a lot of extracurricular activities. One which interested me was robotics. I joined [my college robotics team](https://github.com/marsiitr) and participated in a lot of challenges. + +Predominantly, we used to participate in this competition called [ABU Robocon](https://en.wikipedia.org/wiki/ABU_Robocon), which is an event conducted by the Asia-Pacific Broadcasting Union. What they used to do was, every year, one of the participating countries in the contest would provide a problem statement. For example, one year, they asked us to build a badminton-playing robot. They asked us to build a rugby playing robot or a Frisbee thrower, and there are some interesting problem statements around the challenge: you can't do this. You can't do that. Weight has to be like this. Dimensions have to be like that. + +I got involved in that, and most of my time at university, I used to spend there. Material science became kind of a backburner for me, and my hobby became my full time thing. + +**CRAIG BOX: And you were not only involved there in terms of the project and contributions to it, but you got involved as a secretary of the team, effectively, doing a lot of the organization, which is a thread that will come up as we speak about Kubernetes.** + +NABARUN PAL: Over the course of time, when I gained more knowledge into how the team works, it became very natural that I graduated up the ladder and then managed juniors. I became the joint secretary of the robotics club in our college. This was more of a broad, engaging role in evangelizing robotics at the university, to promote events, to help students to see the value in learning robotics - what you gain out of that mechanically or electronically, or how do you develop your logic by programming robots. + +**CRAIG BOX: Your first job after graduation was working at a company called Algoshelf, but you were also an intern there while you were at school?** + +NABARUN PAL: Algoshelf was known as Rorodata when I joined them as an intern. This was also an interesting opportunity for me in the sense that I was always interested in writing programs which people would use. One of the things that I did there was build an open source Function as a Service framework, if I may call it that - it was mostly turning Python functions into web servers without even writing any code. The interesting bit there was that it was targeted toward data scientists, and not towards programmers. We had to understand the pain of data scientists, that they had to learn a lot of programming in order to even deploy their machine learning models, and we wanted to solve that problem. + +They offered me a job after my internship, and I kept on working for them after I graduated from university. There, I got introduced to Kubernetes, so we pivoted into a product structure where the very same thing I told you, the Functions as a Service thing, could be deployed in Kubernetes. I was exploring Kubernetes to use it as a scalable platform. Instead of managing pets, we wanted to manage cattle, as in, we wanted to have a very highly distributed architecture. + +**CRAIG BOX: Not actual cattle. I've been to India. There are a lot of cows around.** + +NABARUN PAL: Yeah, not actual cattle. That is a bit tough. + +**CRAIG BOX: When Algoshelf we're looking at picking up Kubernetes, what was the evaluation process like? Were you looking at other tools at the time? Or had enough time passed that Kubernetes was clearly the platform that everyone was going to use?** + +NABARUN PAL: Algoshelf was a natural evolution. Before Kubernetes, we used to deploy everything on a single big AWS server, using systemd. Everything was a systemd service, and everything was deployed using Fabric. Fabric is a Python package which essentially is like Ansible, but much leaner, as it does not have all the shims and things that Ansible has. + +Then we asked "what if we need to scale out to different machines?" Kubernetes was in the hype. We hopped onto the hype train to see whether Kubernetes was worth it for us. And that's where my journey started, exploring the ecosystem, exploring the community. How can we improve the community in essence? + +**CRAIG BOX: A couple of times now you've mentioned as you've grown in a role, becoming part of the organization and the arranging of the group. You've talked about working in Python. You had submitted some talks to Pycon India. And I understand you're now a tech lead for that conference. What does the tech community look like in India and how do you describe your involvement in it?** + +NABARUN PAL: My involvement with the community began when I was at university. When I was working as an intern at Algoshelf, I was introduced to this-- I never knew about PyCon India, or tech conferences in general. + +The person that I was working with just asked me, like hey, did you submit a talk to PyCon India? It's very useful, the library that we were making. So I [submitted a talk](https://www.nabarun.in/talk/2017/pyconindia/#1) to PyCon India in 2017. Eventually the talk got selected. That was not my first speaking opportunity, it was my second. I also spoke at PyData Delhi on a similar thing that I worked on in my internship. + +It has been a journey since then. I talked about the same thing at FOSSASIA Summit in Singapore, and got really involved with the Python community because it was what I used to work on back then. + +After giving all those talks at conferences, I got also introduced to this amazing group called [dgplug](https://dgplug.org/), which is an acronym for the Durgapur Linux Users Group. It is a group started in-- I don't remember the exact year, but it was around 12 to 13 years back, by someone called Kushal Das, with the ideology of [training students into being better open source contributors](https://foss.training/). + +I liked the idea and got involved with in teaching last year. It is not limited to students. Professionals can also join in. It's about making anyone better at upstream contributions, making things sustainable. I started training people on Vim, on how to use text editors. so they are more efficient and productive. In general life, text editors are a really good tool. + +The other thing was the shell. How do you navigate around the Linux shell and command line? That has been a fun experience. + +**CRAIG BOX: It's very interesting to think about that, because my own involvement with a Linux User Group was probably around the year 2000. And back then we were teaching people how to install things-- Linux on CD was kinda new at that point in time. There was a lot more of, what is this new thing and how do we get involved? When the internet took off around that time, all of that stuff moved online - you no longer needed to go meet a group of people in a room to talk about Linux. And I haven't really given much thought to the concept of a LUG since then, but it's great to see it having turned into something that's now about contributing, rather than just about how you get things going for yourself.** + +NABARUN PAL: Exactly. So as I mentioned earlier, my journey into Linux was installing SUSE from DVDs that came bundled with magazines. Back then it was a pain installing things because you did not get any instructions. There has certainly been a paradigm shift now. People are more open to reading instructions online, downloading ISOs, and then just installing them. So we really don't need to do that as part of LUGs. + +We have shifted more towards enabling people to contribute to whichever project that they use. For example, if you're using Fedora, contribute to Fedora; make things better. It's just about giving back to the community in any way possible. + +**CRAIG BOX: You're also involved in the [Kubernetes Bangalore meetup group](https://www.meetup.com/Bangalore-Kubernetes-Meetup/). Does that group have a similar mentality?** + +NABARUN PAL: The Kubernetes Bangalore meetup group is essentially focused towards spreading the knowledge of Kubernetes and the aligned products in the ecosystem, whatever there is in the Cloud Native Landscape, in various ways. For example, to evangelize about using them in your company or how people use them in existing ways. + +So a few months back in February, we did something like a [Kubernetes contributor workshop](https://www.youtube.com/watch?v=FgsXbHBRYIc). It was one of its kind in India. It was the first one if I recall correctly. We got a lot of traction and community members interested in contributing to Kubernetes and a lot of other projects. And this is becoming a really valuable thing. + +I'm not much involved in the organization of the group. There are really great people already organizing it. I keep on being around and attending the meetups and trying to answer any questions if people have any. + +**CRAIG BOX: One way that it is possible to contribute to the Kubernetes ecosystem is through the release process. You've [written a blog](https://blog.naba.run/posts/release-enhancements-journey/) which talks about your journey through that. It started in Kubernetes 1.17, where you took a shadow role for that release. Tell me about what it was like to first take that plunge.** + +NABARUN PAL: Taking the plunge was a big step, I would say. It should not have been that way. After getting into the team, I saw that it is really encouraged that you should just apply to the team - but then write truthfully about yourself. What do you want? Write your passionate goal, why you want to be in the team. + +So even right now the shadow applications are open for the next release. I wanted to give that a small shoutout. If you want to contribute to the Kubernetes release team, please do apply. The form is pretty simple. You just need to say why do you want to contribute to the release team. + +**CRAIG BOX: What was your answer to that question?** + +NABARUN PAL: It was a bit tricky. I have this philosophy of contributing to projects that I use in my day-to-day life. I use a lot of open source projects daily, and I started contributing to Kubernetes primarily because I was using the Kubernetes Python client. That was one of my first contributions. + +When I was contributing to that, I explored the release team and it interested me a lot, particularly how interesting and varied the mechanics of releasing Kubernetes are. For most software projects, it's usually whenever you decide that you have made meaningful progress in terms of features, you release it. But Kubernetes is not like that. We follow a regular release cadence. And all those aspects really interested me. I actually applied for the first time in Kubernetes 1.16, but got rejected. + +But I still applied to Kubernetes 1.17, and I got into the enhancements team. That team was led by [MrBobbyTables, Bob Killen](https://kubernetespodcast.com/episode/126-research-steering-honking/), back then, and [Jeremy Rickard](https://kubernetespodcast.com/episode/131-kubernetes-1.20/) was one of my co-shadows in the team. I shadowed enhancements again. Then I lead enhancements in 1.19. I then shadowed the lead in 1.20 and eventually led the 1.21 team. That's what my journey has been. + +My suggestion to people is don't be afraid of failure. Even if you don't get selected, it's perfectly fine. You can still contribute to the release team. Just hop on the release calls, raise your hand, and introduce yourself. + +**CRAIG BOX: Between the 1.20 and 1.21 releases, you moved to work on the upstream contribution team at VMware. I've noticed that VMware is hiring a lot of great upstream contributors at the moment. Is this something that [Stephen Augustus](https://kubernetespodcast.com/episode/130-kubecon-na-2020/) had his fingerprints all over? Is there something in the water?** + +NABARUN PAL: A lot of people have fingerprints on this process. Stephen certainly had his fingerprints on it, I would say. We are expanding the team of upstream contributors primarily because the product that we are working for is based on Kubernetes. It helps us a lot in driving processes upstream and helping out the community as a whole, because everyone then gets enabled and benefits from what we contribute to the community. + +**CRAIG BOX: I understand that the Tanzu team is being built out in India at the moment, but I guess you probably haven't been able to meet them in person yet?** + +NABARUN PAL: Yes and no. I did not meet any of them after joining VMware, but I met a lot of my teammates, before I joined VMware, at KubeCons. For example, I met Nikhita, I met Dims, I met Stephen at KubeCon. I am yet to meet other members of the team and I'm really excited to catch up with them once everything comes out of lockdown and we go back to our normal lives. + +**CRAIG BOX: Yes, everyone that I speak to who has changed jobs in the pandemic says it's a very odd experience, just nothing really being different. And the same perhaps for people who are working on open source moving companies as well. They're doing the same thing, perhaps just for a different employer.** + +NABARUN PAL: As we say in the community, see you in another Slack in some time. + +**CRAIG BOX: We now turn to the recent release of Kubernetes 1.21. First of all, congratulations on that.** + +NABARUN PAL: Thank you. + +**CRAIG BOX: [The announcement](https://kubernetes.io/blog/2021/04/08/kubernetes-1-21-release-announcement/) says the release consists of 51 enhancements, 13 graduating to stable, 16 moving to beta, 20 entering alpha, and then two features that have been deprecated. How would you summarize this release?** + +NABARUN PAL: One of the big points for this release is that it is the largest release of all time. + +**CRAIG BOX: Really?** + +NABARUN PAL: Yep. 1.20 was the largest release back then, but 1.21 got more enhancements, primarily due to a lot of changes that we did to the process. + +In the 1.21 release cycle, we did a few things differently compared to other release cycles-- for example, in the enhancement process. An enhancement, in the Kubernetes context, is basically a feature proposal. You will hear the terminology [Kubernetes Enhancement Proposals](https://github.com/kubernetes/enhancements/blob/master/keps/README.md), or KEP, a lot in the community. An enhancement is a broad thing encapsulated in a specific document. + +**CRAIG BOX: I like to think of it as a thing that's worth having a heading in the release notes.** + +NABARUN PAL: Indeed. Until the 1.20 release cycle, what we used to do was-- the release team has a vertical called enhancements. The enhancements team members used to ping each of the enhancement issues and ask whether they want to be part of the release cycle or not. The authors would decide, or talk to their SIG, and then come back with the answer, as to whether they wanted to be part of the cycle. + +In this release, what we did was we eliminated that process and asked the SIGs proactively to discuss amongst themselves, what they wanted to pitch in for this release cycle. What set of features did they want to graduate this release? They may introduce things in alpha, graduate things to beta or stable, or they may also deprecate features. + +What this did was promote a lot of async processes, and at the same time, give power back to the community. The community decides what they want in the release and then comes back collectively. It also reduces a lot of stress on the release team who previously had to ask people consistently what they wanted to pitch in for the release. You now have a deadline. You discuss amongst your SIG what your roadmap is and what it looks like for the near future. Maybe this release, and the next two. And you put all of those answers into a Google spreadsheet. Spreadsheets are still a thing. + +**CRAIG BOX: The Kubernetes ecosystem runs entirely on Google Spreadsheets.** + +NABARUN PAL: It does, and a lot of Google Docs for meeting notes! We did a lot of process improvements, which essentially led to a better release. This release cycle we had 13 enhancements graduating to stable, 16 which moved to beta, and 20 enhancements which were net new features into the ecosystem, and came in as alpha. + +Along with that are features set for deprecation. One of them was PodSecurityPolicy. That has been a point of discussion in the Kubernetes user base and we also published [a blog post about it](https://kubernetes.io/blog/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future/). All credit to SIG Security who have been on top of things as to find a replacement for PodSecurityPolicy even before this release cycle ended, so that they could at least have a proposal of what will happen next. + +**CRAIG BOX: Let's talk about some old things and some new things. You mentioned PodSecurityPolicy there. That's a thing that's been around a long time and is being deprecated. Two things that have been around a long time and that are now being promoted to stable are CronJobs and PodDisruptionBudgets, both of which were introduced in Kubernetes 1.4, which came out in 2016. Why do you think it took so long for them both to go stable?** + +NABARUN PAL: I might not have a definitive answer to your question. One of the things that I feel is they might be already so good that nobody saw that they were beta features, and just kept on using them. + +One of the things that I noticed when reading for the CronJobs graduation from beta to stable was the new controller. Users might not see this, but there has been a drastic change in the CronJob controller v2. What it essentially does is goes from a poll-based method of checking what users have defined as CronJobs to a queue architecture, which is the modern method of defining controllers. That has been one of the really good improvements in the case of CronJobs. Instead of the controller working in O(N) time, you now have constant time complexity. + +**CRAIG BOX: A lot of these features that have been in beta for a long time, like you say, people have an expectation that they are complete. With PodSecurityPolicy, it's being deprecated, which is allowed because it's a feature that never made it out of beta. But how do you think people will react to it going away? And does that say something about the need for the process to make sure that features don't just languish in beta forever, which has been introduced recently?** + +NABARUN PAL: That's true. One of the driving factors, when contributors are thinking of graduating beta features has been the ["prevention of perma-beta" KEP](https://github.com/kubernetes/enhancements/blob/master/keps/sig-architecture/1635-prevent-permabeta/README.md). Back in 1.19 we [introduced this process](https://kubernetes.io/blog/2020/08/21/moving-forward-from-beta/) where each of the beta resources were marked for deprecation and removal in a certain time frame-- three releases for deprecation and another release for removal. That's also a motivating factor for eventually rethinking as to how beta resources work for us in the community. That is also very effective, I would say. + +**CRAIG BOX: Do remember that Gmail was in beta for eight years.** + +NABARUN PAL: I did not know that! + +**CRAIG BOX: Nothing in Kubernetes is quite that old yet, but we'll get there. Of the 20 new enhancements, do you have a favorite or any that you'd like to call out?** + +NABARUN PAL: There are two specific features in 1.21 that I'm really interested in, and are coming as net new features. One of them is the [persistent volume health monitor](https://github.com/kubernetes/enhancements/tree/master/keps/sig-storage/1432-volume-health-monitor), which gives the users the capability to actually see whether the backing volumes, which power persistent volumes in Kubernetes, are deleted or not. For example, the volumes may get deleted due to an inadvertent event, or they may get corrupted. That information is basically surfaced out as a field so that the user can leverage it in any way. + +The other feature is the proposal for [adding headers with the command name to kubectl requests](https://github.com/kubernetes/enhancements/tree/master/keps/sig-cli/859-kubectl-headers). We have always set the user-agent information when doing those kind of requests, but the proposal is to add what command the user put in so that we can enable more telemetry, and cluster administrators can determine the usage patterns of how people are using the cluster. I'm really excited about these kind of features coming into play. + +**CRAIG BOX: You're the first release lead from the Asia-Pacific region, or more accurately, outside of the US and Europe. Most meetings in the Kubernetes ecosystem are traditionally in the window of overlap between the US and Europe, in the morning in California and the evening here in the UK. What's it been like to work outside of the time zones that the community had previously been operating in?** + +NABARUN PAL: It has been a fun and a challenging proposition, I would say. In the last two-ish years that I have been contributing to Kubernetes, the community has also transformed from a lot of early morning Pacific calls to more towards async processes. For example, we in the release team have transformed our processes so we don't do updates in the calls anymore. What we do is ask for updates ahead of time, and then in the call, we just discuss things which need to be discussed synchronously in the team. + +We leverage the meetings right now more for discussions. But we also don't come to decisions in those discussions, because if any stakeholder is not present on the call, it puts them at a disadvantage. We are trying to talk more on Slack, publicly, or talk on mailing lists. That's where most of the discussion should happen, and also to gain lazy consensus. What I mean by lazy consensus is come up with a pre-decision kind of thing, but then also invite feedback from the broader community about what people would like them to see about that specific thing being discussed. This is where we as a community are also transforming a lot, but there is a lot more headroom to grow. + +The release team also started to have EU/APAC burndown meetings. In addition to having one meeting focused towards the US and European time zones, we also do a meeting which is more suited towards European and Asia-Pacific time zones. One of the driving factors for those decisions was that the release team is seeing a lot of participation from a variety of time zones. To give you one metric, we had release team members this cycle from UTC+8 all through UTC-8 - 16 hours of span. It's really difficult to accommodate all of those zones in a single meeting. And it's not just those 16 hours of span - what about the other eight hours? + +**CRAIG BOX: Yeah, you're missing New Zealand. You could add another 5 hours of span right there.** + +NABARUN PAL: Exactly. So we will always miss people in meetings, and that's why we should also innovate more, have different kinds of meetings. But that also may not be very sustainable in the future. Will people attend duplicate meetings? Will people follow both of the meetings? More meetings is one of the solutions. + +The other solution is you have threaded discussions on some medium, be it Slack or be it a mailing list. Then, people can just pitch in whenever it is work time for them. Then, at the end of the day, a 24-hour rolling period, you digest it, and then push it out as meeting notes. That's what the Contributor Experience Special Interest Group is doing - shout-out to them for moving to that process. I may be wrong here, but I think once every two weeks, they do async updates on Slack. And that is a really nice thing to have, improving variety of geographies that people can contribute from. + +**CRAIG BOX: Once you've put everything together that you hope to be in your release, you create a release candidate build. How do you motivate people to test those?** + +NABARUN PAL: That's a very interesting question. It is difficult for us to motivate people into trying out these candidates. It's mostly people who are passionate about Kubernetes who try out the release candidates and see for themselves what the bugs are. I remember [Dims tweeting out a call](https://twitter.com/dims/status/1377272238420934656) that if somebody tries out the release candidate and finds a good bug or caveat, they could get a callout in the KubeCon keynote. That's one of the incentives - if you want to be called out in a KubeCon keynote, please try our release candidates. + +**CRAIG BOX: Or get a new pair of Kubernetes socks?** + +NABARUN PAL: We would love to give out goodies to people who try out our release candidates and find bugs. For example, if you want the brand new release team logo as a sticker, just hit me up. If you find a bug in a 1.22 release candidate, I would love to be able to send you some coupon codes for the store. Don't quote me on this, but do reach out. + +**CRAIG BOX: Now the release is out, is it time for you to put your feet up? What more things do you have to do, and how do you feel about the path ahead for yourself?** + +NABARUN PAL: I was discussing this with the team yesterday. Even after the release, we had kind of a water-cooler conversation. I just pasted in a Zoom link to all the release team members and said, hey, do you want to chat? One of the things that I realized that I'm really missing is the daily burndowns right now. I will be around in the release team and the SIG Release meetings, helping out the new lead in transitioning. And even my job, right now, is not over. I'm working with Taylor, who is the emeritus advisor for 1.21, on figuring out some of the mechanics for the next release cycle. I'm also documenting what all we did as part of the process and as part of the process changes, and making sure the next release cycle is up and running. + +**CRAIG BOX: We've done a lot of these release lead interviews now, and there's a question which we always like to ask, which is, what will you write down in the transition envelope? Savitha Raghunathan is the release lead for 1.22. What is the advice that you will pass on to her?** + +NABARUN PAL: Three words-- **Do, Delegate, and Defer**. Categorize things into those three buckets as to what you should do right away, what you need to defer, and things that you can delegate to your shadows or other release team members. That's one of the mantras that works really well when leading a team. It is not just in the context of the release team, but it's in the context of managing any team. + +The other bit is **over-communicate**. No amount of communication is enough. What I've realized is the community is always willing to help you. One of the big examples that I can give is the day before release was supposed to happen, we were seeing a lot of test failures, and then one of the community members had an idea-- why don't you just send an email? I was like, "that sounds good. We can send an email mentioning all the flakes and call out for help to the broader Kubernetes developer community." And eventually, once we sent out the email, lots of people came in to help us in de-flaking the tests and trying to find out the root cause as to why those tests were failing so often. Big shout out to Antonio and all the SIG Network folks who came to pitch in. + +No matter how many names I mention, it will never be enough. A lot of people, even outside the release team, have helped us a lot with this release. And that's where the release theme comes in - **Power to the Community**. I'm really stoked by how this community behaves and how people are willing to help you all the time. It's not about what they're telling you to do, but it's what they're also interested in, they're passionate about. + +**CRAIG BOX: One of the things you're passionate about is Formula One. Do you think Lewis Hamilton is going to take it away this year?** + +NABARUN PAL: It's a fair probability that Lewis will win the title this year as well. + +**CRAIG BOX: Which would take him to eight all time career wins. And thus-- [he's currently tied with Michael Schumacher](https://www.nytimes.com/2020/11/15/sports/autoracing/lewis-hamilton-schumacher-formula-one-record.html)-- would pull him ahead.** + +NABARUN PAL: Yes. Michael Schumacher was my first favorite F1 driver, I would say. It feels a bit heartbreaking to see someone break Michael's record. + +**CRAIG BOX: How do you feel about [Michael Schumacher's son joining the contest?](https://www.formula1.com/en/latest/article.breaking-mick-schumacher-to-race-for-haas-in-2021-as-famous-surname-returns.66XTVfSt80GrZe91lvWVwJ.html)** + +NABARUN PAL: I feel good. Mick Schumacher is in the fray right now. And I wish we could see him, in a few years, in a Ferrari. The Schumacher family back to Ferrari would be really great to see. But then, my fan favorite has always been McLaren, partly because I like the chemistry of Lando and Carlos over the last two years. It was heartbreaking to see Carlos go to Ferrari. But then we have Lando and Daniel Ricciardo in the team. They're also fun people. + +--- + +_[Nabarun Pal](https://twitter.com/theonlynabarun) is on the Tanzu team at VMware and served as the Kubernetes 1.21 release team lead._ + +_You can find the [Kubernetes Podcast from Google](http://www.kubernetespodcast.com/) at [@KubernetesPod](https://twitter.com/KubernetesPod) on Twitter, and you can [subscribe](https://kubernetespodcast.com/subscribe/) so you never miss an episode._ diff --git a/content/en/blog/_posts/2021-08-04-kubernetes-release-1.22.md b/content/en/blog/_posts/2021-08-04-kubernetes-release-1.22.md new file mode 100644 index 0000000000..9a196f7fba --- /dev/null +++ b/content/en/blog/_posts/2021-08-04-kubernetes-release-1.22.md @@ -0,0 +1,157 @@ +--- +layout: blog +title: 'Kubernetes 1.22: Reaching New Peaks' +date: 2021-08-04 +slug: kubernetes-1-22-release-announcement +--- + +**Authors:** [Kubernetes 1.22 Release Team](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.22/release-team.md) + +We’re pleased to announce the release of Kubernetes 1.22, the second release of 2021! + +This release consists of 53 enhancements: 13 enhancements have graduated to stable, 24 enhancements are moving to beta, and 16 enhancements are entering alpha. Also, three features have been deprecated. + +In April of this year, the Kubernetes release cadence was officially changed from four to three releases yearly. This is the first longer-cycle release related to that change. As the Kubernetes project matures, the number of enhancements per cycle grows. This means more work, from version to version, for the contributor community and Release Engineering team, and it can put pressure on the end-user community to stay up-to-date with releases containing increasingly more features. + +Changing the release cadence from four to three releases yearly balances many aspects of the project, both in how contributions and releases are managed, and also in the community's ability to plan for upgrades and stay up to date. + +You can read more in the official blog post [Kubernetes Release Cadence Change: Here’s What You Need To Know](https://kubernetes.io/blog/2021/07/20/new-kubernetes-release-cadence/). + + +## Major Themes + +### Server-side Apply graduates to GA + +[Server-side Apply](https://kubernetes.io/docs/reference/using-api/server-side-apply/) is a new field ownership and object merge algorithm running on the Kubernetes API server. Server-side Apply helps users and controllers manage their resources via declarative configurations. It allows them to create and/or modify their objects declaratively, simply by sending their fully specified intent. After being in beta for a couple releases, Server-side Apply is now generally available. + +### External credential providers now stable + +Support for Kubernetes client [credential plugins](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins) has been in beta since 1.11, and with the release of Kubernetes 1.22 now graduates to stable. The GA feature set includes improved support for plugins that provide interactive login flows, as well as a number of bug fixes. Aspiring plugin authors can look at [sample-exec-plugin](https://github.com/ankeesler/sample-exec-plugin) to get started. + +### etcd moves to 3.5.0 + +Kubernetes' default backend storage, etcd, has a new release: 3.5.0. The new release comes with improvements to the security, performance, monitoring, and developer experience. There are numerous bug fixes and some critical new features like the migration to structured logging and built-in log rotation. The release comes with a detailed future roadmap to implement a solution to traffic overload. You can read a full and detailed list of changes in the [3.5.0 release announcement](https://etcd.io/blog/2021/announcing-etcd-3.5/). + +### Quality of Service for memory resources + +Originally, Kubernetes used the v1 cgroups API. With that design, the QoS class for a `Pod` only applied to CPU resources (such as `cpu_shares`). As an alpha feature, Kubernetes v1.22 can now use the cgroups v2 API to control memory allocation and isolation. This feature is designed to improve workload and node availability when there is contention for memory resources, and to improve the predictability of container lifecycle. + +### Node system swap support + +Every system administrator or Kubernetes user has been in the same boat regarding setting up and using Kubernetes: disable swap space. With the release of Kubernetes 1.22, alpha support is available to run nodes with swap memory. This change lets administrators opt in to configuring swap on Linux nodes, treating a portion of block storage as additional virtual memory. + +### Windows enhancements and capabilities + +Continuing to support the growing developer community, SIG Windows has released their [Development Environment](https://github.com/kubernetes-sigs/sig-windows-dev-tools/). These new tools support multiple CNI providers and can run on multiple platforms. There is also a new way to run bleeding-edge Windows features from scratch by compiling the Windows kubelet and kube-proxy, then using them along with daily builds of other Kubernetes components. + +CSI support for Windows nodes moves to GA in the 1.22 release. In Kubernetes v1.22, Windows privileged containers are an alpha feature. To allow using CSI storage on Windows nodes, [CSIProxy](https://github.com/kubernetes-csi/csi-proxy) enables CSI node plugins to be deployed as unprivileged pods, using the proxy to perform privileged storage operations on the node. + +### Default profiles for seccomp + +An alpha feature for default seccomp profiles has been added to the kubelet, along with a new command line flag and configuration. When in use, this new feature provides cluster-wide seccomp defaults, using the `RuntimeDefault` seccomp profile rather than `Unconfined` by default. This enhances the default security of the Kubernetes Deployment. Security administrators will now sleep better knowing that workloads are more secure by default. To learn more about the feature, please refer to the official [seccomp tutorial](https://kubernetes.io/docs/tutorials/clusters/seccomp/#enable-the-use-of-runtimedefault-as-the-default-seccomp-profile-for-all-workloads). + +### More secure control plane with kubeadm + +A new alpha feature allows running the `kubeadm` control plane components as non-root users. This is a long requested security measure in `kubeadm`. To try it you must enable the `kubeadm` specific RootlessControlPlane feature gate. When you deploy a cluster using this alpha feature, your control plane runs with lower privileges. + +For `kubeadm`, Kubernetes 1.22 also brings a new [v1beta3 configuration API](/docs/reference/config-api/kubeadm-config.v1beta3/). This iteration adds some long requested features and deprecates some existing ones. The v1beta3 version is now the preferred API version; the v1beta2 API also remains available and is not yet deprecated. + +## Major Changes + +### Removal of several deprecated beta APIs + +A number of deprecated beta APIs have been removed in 1.22 in favor of the GA version of those same APIs. All existing objects can be interacted with via stable APIs. This removal includes beta versions of the `Ingress`, `IngressClass`, `Lease`, `APIService`, `ValidatingWebhookConfiguration`, `MutatingWebhookConfiguration`, `CustomResourceDefinition`, `TokenReview`, `SubjectAccessReview`, and `CertificateSigningRequest` APIs. + +For the full list, check out the [Deprecated API Migration Guide](https://kubernetes.io/docs/reference/using-api/deprecation-guide/#v1-22) as well as the blog post [Kubernetes API and Feature Removals In 1.22: Here’s What You Need To Know](https://blog.k8s.io/2021/07/14/upcoming-changes-in-kubernetes-1-22/). + +### API changes and improvements for ephemeral containers + +The API used to create [Ephemeral Containers](https://kubernetes.io/docs/concepts/workloads/pods/ephemeral-containers/) changes in 1.22. The Ephemeral Containers feature is alpha and disabled by default, and the new API does not work with clients that attempt to use the old API. + +For stable features, the kubectl tool follows the Kubernetes [version skew policy](https://kubernetes.io/releases/version-skew-policy/); however, kubectl v1.21 and older do not support the new API for ephemeral containers. If you plan to use `kubectl debug` to create ephemeral containers, and your cluster is running Kubernetes v1.22, you cannot do so with kubectl v1.21 or earlier. Please update kubectl to 1.22 if you wish to use `kubectl debug` with a mix of cluster versions. + +## Other Updates + +### Graduated to Stable + +* [Bound Service Account Token Volumes](https://github.com/kubernetes/enhancements/issues/542) +* [CSI Service Account Token](https://github.com/kubernetes/enhancements/issues/2047) +* [Windows Support for CSI Plugins](https://github.com/kubernetes/enhancements/issues/1122) +* [Warning mechanism for deprecated API use](https://github.com/kubernetes/enhancements/issues/1693) +* [PodDisruptionBudget Eviction](https://github.com/kubernetes/enhancements/issues/85) + +### Notable Feature Updates + +* A new [PodSecurity admission](https://github.com/kubernetes/enhancements/issues/2579) alpha feature is introduced, intended as a replacement for PodSecurityPolicy +* [The Memory Manager](https://github.com/kubernetes/enhancements/issues/1769) moves to beta +* A new alpha feature to enable [API Server Tracing](https://github.com/kubernetes/enhancements/issues/647) +* A new v1beta3 version of the [kubeadm configuration](https://github.com/kubernetes/enhancements/issues/970) format +* [Generic data populators](https://github.com/kubernetes/enhancements/issues/1495) for PersistentVolumes are now available in alpha +* The Kubernetes control plane will now always use the [CronJobs v2 controller](https://github.com/kubernetes/enhancements/issues/19) +* As an alpha feature, all Kubernetes node components (including the kubelet, kube-proxy, and container runtime) can be [run as a non-root user](https://github.com/kubernetes/enhancements/issues/2033) + +# Release notes + +You can check out the full details of the 1.22 release in the [release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.22.md). + +# Availability of release + +Kubernetes 1.22 is [available for download](https://kubernetes.io/releases/download/) and also [on the GitHub project](https://github.com/kubernetes/kubernetes/releases/tag/v1.22.0). + +There are some great resources out there for getting started with Kubernetes. You can check out some [interactive tutorials](https://kubernetes.io/docs/tutorials/) on the main Kubernetes site, or run a local cluster on your machine using Docker containers with [kind](https://kind.sigs.k8s.io). If you’d like to try building a cluster from scratch, check out the [Kubernetes the Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way) tutorial by Kelsey Hightower. + +# Release Team + +This release was made possible by a very dedicated group of individuals, who came together as a team to deliver technical content, documentation, code, and a host of other components that go into every Kubernetes release. + +A huge thank you to the release lead Savitha Raghunathan for leading us through a successful release cycle, and to everyone else on the release team for supporting each other, and working so hard to deliver the 1.22 release for the community. + +We would also like to take this opportunity to remember Peeyush Gupta, a member of our team that we lost earlier this year. Peeyush was actively involved in SIG ContribEx and the Kubernetes Release Team, most recently serving as the 1.22 Communications lead. His contributions and efforts will continue to reflect in the community he helped build. A [CNCF memorial](https://github.com/cncf/memorials/blob/main/peeyush-gupta.md) page has been created where thoughts and memories can be shared by the community. + +# Release Logo + +![Kubernetes 1.22 Release Logo](/images/blog/2021-08-04-kubernetes-release-1.22/kubernetes-1.22.png) + +Amidst the ongoing pandemic, natural disasters, and ever-present shadow of burnout, the 1.22 release of Kubernetes includes 53 enhancements. This makes it the largest release to date. This accomplishment was only made possible due to the hard-working and passionate Release Team members and the amazing contributors of the Kubernetes ecosystem. The release logo is our reminder to keep reaching for new milestones and setting new records. And it is dedicated to all the Release Team members, hikers, and stargazers! + +The logo is designed by [Boris Zotkin](https://www.instagram.com/boris.z.man/). Boris is a Mac/Linux Administrator at the MathWorks. He enjoys simple things in life and loves spending time with his family. This tech-savvy individual is always up for a challenge and happy to help a friend! + +# User Highlights + +- In May, the CNCF welcomed 27 new organizations across the globe as members of the diverse cloud native ecosystem. These new [members](https://www.cncf.io/announcements/2021/05/05/27-new-members-join-the-cloud-native-computing-foundation/) will participate in CNCF events, including the upcoming [KubeCon + CloudNativeCon NA in Los Angeles](https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/) from October 12 – 15, 2021. +- The CNCF granted Spotify the [Top End User Award](https://www.cncf.io/announcements/2021/05/05/cloud-native-computing-foundation-grants-spotify-the-top-end-user-award/) during [KubeCon + CloudNativeCon EU – Virtual 2021](https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/). + +# Project Velocity + +The [CNCF K8s DevStats project](https://k8s.devstats.cncf.io/) aggregates a number of interesting data points related to the velocity of Kubernetes and various sub-projects. This includes everything from individual contributions to the number of companies that are contributing, and is an illustration of the depth and breadth of effort that goes into evolving this ecosystem. + +In the v1.22 release cycle, which ran for 15 weeks (April 26 to August 4), we saw contributions from [1063 companies](https://k8s.devstats.cncf.io/d/9/companies-table?orgId=1&var-period_name=v1.21.0%20-%20now&var-metric=contributions) and [2054 individuals](https://k8s.devstats.cncf.io/d/66/developer-activity-counts-by-companies?orgId=1&var-period_name=v1.21.0%20-%20now&var-metric=contributions&var-repogroup_name=Kubernetes&var-country_name=All&var-companies=All). + +# Ecosystem Updates + +- [KubeCon + CloudNativeCon Europe 2021](https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/) was held in May, the third such event to be virtual. All talks are [now available on-demand](https://www.youtube.com/playlist?list=PLj6h78yzYM2MqBm19mRz9SYLsw4kfQBrC) for anyone that would like to catch up! +- [Spring Term LFX Program](https://www.cncf.io/blog/2021/07/13/spring-term-lfx-program-largest-graduating-class-with-28-successful-cncf-interns) had the largest graduating class with 28 successful CNCF interns! +- CNCF launched [livestreaming on Twitch](https://www.cncf.io/blog/2021/06/03/cloud-native-community-goes-live-with-10-shows-on-twitch/) at the beginning of the year targeting definitive interactive media experience for anyone wanting to learn, grow, and collaborate with others in the Cloud Native community from anywhere in the world. + +# Event Updates + +- [KubeCon + CloudNativeCon North America 2021](https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/) will take place in Los Angeles, October 12 – 15, 2021! You can find more information about the conference and registration on the event site. +- [Kubernetes Community Days](https://community.cncf.io/kubernetes-community-days/about-kcd/) has upcoming events scheduled in Italy, the UK, and in Washington DC. + +# Upcoming release webinar + +Join members of the Kubernetes 1.22 release team on October 5, 2021 to learn about the major features of this release, as well as deprecations and removals to help plan for upgrades. For more information and registration, visit the [event page](https://community.cncf.io/events/details/cncf-cncf-online-programs-presents-cncf-live-webinar-kubernetes-122-release/) on the CNCF Online Programs site. + +# Get Involved + +If you’re interested in contributing to the Kubernetes community, Special Interest Groups (SIGs) are a great starting point. Many of them may align with your interests! If there are things you’d like to share with the community, you can join the weekly community meeting, or use any of the following channels: + +* Find out more about contributing to Kubernetes at the [Kubernetes Contributors](https://www.kubernetes.dev/) website. +* Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates +* Join the community discussion on [Discuss](https://discuss.kubernetes.io/) +* Join the community on [Slack](http://slack.k8s.io/) +* Share your Kubernetes [story](https://docs.google.com/a/linuxfoundation.org/forms/d/e/1FAIpQLScuI7Ye3VQHQTwBASrgkjQDSS5TP0g3AXfFhwSM9YpHgxRKFA/viewform) +* Read more about what’s happening with Kubernetes on the [blog](https://kubernetes.io/blog/) +* Learn more about the [Kubernetes Release Team](https://github.com/kubernetes/sig-release/tree/master/release-team) + + diff --git a/content/en/blog/_posts/2021-08-06-server-side-apply-ga.md b/content/en/blog/_posts/2021-08-06-server-side-apply-ga.md new file mode 100644 index 0000000000..eca57a561e --- /dev/null +++ b/content/en/blog/_posts/2021-08-06-server-side-apply-ga.md @@ -0,0 +1,177 @@ +--- +layout: blog +title: "Kubernetes 1.22: Server Side Apply moves to GA" +date: 2021-08-06 +slug: server-side-apply-ga +--- + +**Authors:** Jeffrey Ying, Google & Joe Betz, Google + +Server-side Apply (SSA) has been promoted to GA in the Kubernetes v1.22 release. The GA milestone means you can depend on the feature and its API, without fear of future backwards-incompatible changes. GA features are protected by the Kubernetes [deprecation policy](/docs/reference/using-api/deprecation-policy/). + +## What is Server-side Apply? + +Server-side Apply helps users and controllers manage their resources through declarative configurations. Server-side Apply replaces the client side apply feature implemented by “kubectl apply” with a server-side implementation, permitting use by tools/clients other than kubectl. Server-side Apply is a new merging algorithm, as well as tracking of field ownership, running on the Kubernetes api-server. Server-side Apply enables new features like conflict detection, so the system knows when two actors are trying to edit the same field. Refer to the [Server-side Apply Documentation](/docs/reference/using-api/server-side-apply/) and [Beta 2 release announcement](https://kubernetes.io/blog/2020/04/01/kubernetes-1.18-feature-server-side-apply-beta-2/) for more information. + +## What’s new since Beta? + +Since the [Beta 2 release](https://kubernetes.io/blog/2020/04/01/kubernetes-1.18-feature-server-side-apply-beta-2/) subresources support has been added, and both client-go and Kubebuilder have added comprehensive support for Server-side Apply. This completes the Server-side Apply functionality required to make controller development practical. + +### Support for subresources + +Server-side Apply now fully supports subresources like `status` and `scale`. This is particularly important for [controllers](/docs/concepts/architecture/controller/), which are often responsible for writing to subresources. + +## Server-side Apply support in client-go + +Previously, Server-side Apply could only be called from the client-go typed client using the `Patch` function, with `PatchType` set to `ApplyPatchType`. Now, `Apply` functions are included in the client to allow for a more direct and typesafe way of calling Server-side Apply. Each `Apply` function takes an "apply configuration" type as an argument, which is a structured representation of an Apply request. For example: + +```go +import ( + ... + v1ac "k8s.io/client-go/applyconfigurations/autoscaling/v1" +) + +hpaApplyConfig := v1ac.HorizontalPodAutoscaler(autoscalerName, ns). + WithSpec(v1ac.HorizontalPodAutoscalerSpec(). + WithMinReplicas(0) + ) + +return hpav1client.Apply(ctx, hpaApplyConfig, metav1.ApplyOptions{FieldManager: "mycontroller", Force: true}) +``` + +Note in this example that `HorizontalPodAutoscaler` is imported from an "applyconfigurations" package. Each "apply configuration" type represents the same Kubernetes object kind as the corresponding go struct, but where all fields are pointers to make them optional, allowing apply requests to be accurately represented. For example, when the apply configuration in the above example is marshalled to YAML, it produces: + +```yaml +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + name: myHPA + namespace: myNamespace +spec: + minReplicas: 0 +``` + +To understand why this is needed, the above YAML cannot be produced by the v1.HorizontalPodAutoscaler go struct. Take for example: + +```go +hpa := v1.HorizontalPodAutoscaler{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "autoscaling/v1", + Kind: "HorizontalPodAutoscaler", + }, + ObjectMeta: ObjectMeta{ + Namespace: ns, + Name: autoscalerName, + }, + Spec: v1.HorizontalPodAutoscalerSpec{ + MinReplicas: pointer.Int32Ptr(0), + }, +} +``` + +The above code attempts to declare the same apply configuration as shown in the previous examples, but when marshalled to YAML, produces: + +```yaml +kind: HorizontalPodAutoscaler +apiVersion: autoscaling/v1 +metadata + name: myHPA + namespace: myNamespace + creationTimestamp: null +spec: + scaleTargetRef: + kind: "" + name: "" + minReplicas: 0 + maxReplicas: 0 +``` + +Which, among other things, contains `spec.maxReplicas` set to `0`. This is almost certainly not what the caller intended (the intended apply configuration says nothing about the `maxReplicas` field), and could have serious consequences on a production system: it directs the autoscaler to downscale to zero pods. The problem here originates from the fact that the go structs contain required fields that are zero valued if not set explicitly. The go structs work as intended for create and update operations, but are fundamentally incompatible with apply, which is why we have introduced the generated "apply configuration" types. + +The "apply configurations" also have convenience `With` functions that make it easier to build apply requests. This allows developers to set fields without having to deal with the fact that all the fields in the "apply configuration" types are pointers, and are inconvenient to set using go. For example `MinReplicas: &0` is not legal go code, so without the `With` functions, developers would work around this problem by using a library, e.g. `MinReplicas: pointer.Int32Ptr(0)`, but string enumerations like `corev1.Protocol` are still a problem since they cannot be supported by a general purpose library. In addition to the convenience, the `With` functions also isolate developers from the underlying representation, which makes it safer for the underlying representation to be changed to support additional features in the future. + +## Using Server-side Apply in a controller + +You can use the new support for Server-side Apply no matter how you implemented your controller. However, the new client-go support makes it easier to use Server-side Apply in controllers. + +When authoring new controllers to use Server-side Apply, a good approach is to have the controller recreate the apply configuration for an object each time it reconciles that object. This ensures that the controller fully reconciles all the fields that it is responsible for. Controllers typically should unconditionally set all the fields they own by setting `Force: true` in the `ApplyOptions`. Controllers must also provide a `FieldManager` name that is unique to the reconciliation loop that apply is called from. + +When upgrading existing controllers to use Server-side Apply the same approach often works well--migrate the controllers to recreate the apply configuration each time it reconciles any object. Unfortunately, the controller might have multiple code paths that update different parts of an object depending on various conditions. Migrating a controller like this to Server-side Apply can be risky because if the controller forgets to include any fields in an apply configuration that is included in a previous apply request, a field can be accidently deleted. To ease this type of migration, client-go apply support provides a way to replace any controller reconciliation code that performs a "read/modify-in-place/update" (or patch) workflow with a "extract/modify-in-place/apply" workflow. Here's an example of the new workflow: + +```go +fieldMgr := "my-field-manager" +deploymentClient := clientset.AppsV1().Deployments("default") + +// read, could also be read from a shared informer +deployment, err := deploymentClient.Get(ctx, "example-deployment", metav1.GetOptions{}) +if err != nil { + // handle error +} + +// extract +deploymentApplyConfig, err := appsv1ac.ExtractDeployment(deployment, fieldMgr) +if err != nil { + // handle error +} + +// modify-in-place +deploymentApplyConfig.Spec.Template.Spec.WithContainers(corev1ac.Container(). + WithName("modify-slice"). + WithImage("nginx:1.14.2"), +) + +// apply +applied, err := deploymentClient.Apply(ctx, extractedDeployment, metav1.ApplyOptions{FieldManager: fieldMgr}) +``` + +For developers using Custom Resource Definitions (CRDs), the Kubebuilder apply support will provide the same capabilities. Documentation will be included in the Kubebuilder book when available. + +## Server-side Apply and CustomResourceDefinitions + +It is strongly recommended that all [Custom Resource Definitions](/docs/concepts/extend-kubernetes/api-extension/custom-resources/) (CRDs) have a schema. CRDs without a schema are treated as unstructured data by Server-side Apply. Keys are treated as fields in a struct and lists are assumed to be atomic. + +CRDs that specify a schema are able to specify additional annotations in the schema. Please refer to the documentation on the full list of available annotations. + +New annotations since beta: + +**Defaulting:** Values for fields that appliers do not express explicit interest in should be defaulted. This prevents an applier from unintentionally owning a defaulted field that might cause conflicts with other appliers. If unspecified, the default value is nil or the nil equivalent for the corresponding type. + +- Usage: see the [CRD Defaulting](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#defaulting) documentation for more details. +- Golang: `+default=` +- OpenAPI extension: `default: ` + + +Atomic for maps and structs: + +**Maps:** By default maps are granular. A different manager is able to manage each map entry. They can also be configured to be atomic such that a single manager owns the entire map. + +- Usage: Refer to [Merge Strategy](/docs/reference/using-api/server-side-apply/#merge-strategy) for a more detailed overview +- Golang: `+mapType=granular/atomic` +- OpenAPI extension: `x-kubernetes-map-type: granular/atomic` + +**Structs:** By default structs are granular and a separate applier may own each field. For certain kinds of structs, atomicity may be desired. This is most commonly seen in small coordinate-like structs such as Field/Object/Namespace Selectors, Object References, RGB values, Endpoints (Protocol/Port pairs), etc. + +- Usage: Refer to [Merge Strategy](/docs/reference/using-api/server-side-apply/#merge-strategy) for a more detailed overview +- Golang: `+structType=granular/atomic` +- OpenAPI extension: `x-kubernetes-map-type:atomic/granular` + +## What's Next? + +After Server Side Apply, the next focus for the API Expression working-group is around improving the expressiveness and size of the published Kubernetes API schema. To see the full list of items we are working on, please join our working group and refer to the work items document. + +## How to get involved? + +The working-group for apply is [wg-api-expression](https://github.com/kubernetes/community/tree/master/wg-api-expression). It is available on slack [#wg-api-expression](https://kubernetes.slack.com/archives/C0123CNN8F3), through the [mailing list](https://groups.google.com/g/kubernetes-wg-api-expression) and we also meet every other Tuesday at 9.30 PT on Zoom. + +We would also like to use the opportunity to thank the hard work of all the contributors involved in making this promotion to GA possible: + +- Andrea Nodari +- Antoine Pelisse +- Daniel Smith +- Jeffrey Ying +- Jenny Buckley +- Joe Betz +- Julian Modesto +- Kevin Delgado +- Kevin Wiesmüller +- Maria Ntalla diff --git a/content/en/blog/_posts/2021-08-09-alpha-swap-support.md b/content/en/blog/_posts/2021-08-09-alpha-swap-support.md new file mode 100644 index 0000000000..2d7f562c27 --- /dev/null +++ b/content/en/blog/_posts/2021-08-09-alpha-swap-support.md @@ -0,0 +1,142 @@ +--- +layout: blog +title: 'New in Kubernetes v1.22: alpha support for using swap memory' +date: 2021-08-09 +slug: run-nodes-with-swap-alpha +--- + +**Author:** Elana Hashman (Red Hat) + +The 1.22 release introduced alpha support for configuring swap memory usage for +Kubernetes workloads on a per-node basis. + +In prior releases, Kubernetes did not support the use of swap memory on Linux, +as it is difficult to provide guarantees and account for pod memory utilization +when swap is involved. As part of Kubernetes' earlier design, swap support was +considered out of scope, and a kubelet would by default fail to start if swap +was detected on a node. + +However, there are a number of [use cases](https://github.com/kubernetes/enhancements/blob/9d127347773ad19894ca488ee04f1cd3af5774fc/keps/sig-node/2400-node-swap/README.md#user-stories) +that would benefit from Kubernetes nodes supporting swap, including improved +node stability, better support for applications with high memory overhead but +smaller working sets, the use of memory-constrained devices, and memory +flexibility. + +Hence, over the past two releases, [SIG Node](https://github.com/kubernetes/community/tree/master/sig-node#readme) has +been working to gather appropriate use cases and feedback, and propose a design +for adding swap support to nodes in a controlled, predictable manner so that +Kubernetes users can perform testing and provide data to continue building +cluster capabilities on top of swap. The alpha graduation of swap memory +support for nodes is our first milestone towards this goal! + +## How does it work? + +There are a number of possible ways that one could envision swap use on a node. +To keep the scope manageable for this initial implementation, when swap is +already provisioned and available on a node, [we have proposed](https://github.com/kubernetes/enhancements/blob/9d127347773ad19894ca488ee04f1cd3af5774fc/keps/sig-node/2400-node-swap/README.md#proposal) +the kubelet should be able to be configured such that: + +- It can start with swap on. +- It will direct the Container Runtime Interface to allocate zero swap memory + to Kubernetes workloads by default. +- You can configure the kubelet to specify swap utilization for the entire + node. + +Swap configuration on a node is exposed to a cluster admin via the +[`memorySwap` in the KubeletConfiguration](/docs/reference/config-api/kubelet-config.v1beta1/). +As a cluster administrator, you can specify the node's behaviour in the +presence of swap memory by setting `memorySwap.swapBehavior`. + +This is possible through the addition of a `memory_swap_limit_in_bytes` field +to the container runtime interface (CRI). The kubelet's config will control how +much swap memory the kubelet instructs the container runtime to allocate to +each container via the CRI. The container runtime will then write the swap +settings to the container level cgroup. + +## How do I use it? + +On a node where swap memory is already provisioned, Kubernetes use of swap on a +node can be enabled by enabling the `NodeSwap` feature gate on the kubelet, and +disabling the `failSwapOn` [configuration setting](/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) +or the `--fail-swap-on` command line flag. + +You can also optionally configure `memorySwap.swapBehavior` in order to +specify how a node will use swap memory. For example, + +```yaml +memorySwap: + swapBehavior: LimitedSwap +``` + +The available configuration options for `swapBehavior` are: + +- `LimitedSwap` (default): Kubernetes workloads are limited in how much swap + they can use. Workloads on the node not managed by Kubernetes can still swap. +- `UnlimitedSwap`: Kubernetes workloads can use as much swap memory as they + request, up to the system limit. + +If configuration for `memorySwap` is not specified and the feature gate is +enabled, by default the kubelet will apply the same behaviour as the +`LimitedSwap` setting. + +The behaviour of the `LimitedSwap` setting depends if the node is running with +v1 or v2 of control groups (also known as "cgroups"): + +- **cgroups v1:** Kubernetes workloads can use any combination of memory and + swap, up to the pod's memory limit, if set. +- **cgroups v2:** Kubernetes workloads cannot use swap memory. + +### Caveats + +Having swap available on a system reduces predictability. Swap's performance is +worse than regular memory, sometimes by many orders of magnitude, which can +cause unexpected performance regressions. Furthermore, swap changes a system's +behaviour under memory pressure, and applications cannot directly control what +portions of their memory usage are swapped out. Since enabling swap permits +greater memory usage for workloads in Kubernetes that cannot be predictably +accounted for, it also increases the risk of noisy neighbours and unexpected +packing configurations, as the scheduler cannot account for swap memory usage. + +The performance of a node with swap memory enabled depends on the underlying +physical storage. When swap memory is in use, performance will be significantly +worse in an I/O operations per second (IOPS) constrained environment, such as a +cloud VM with I/O throttling, when compared to faster storage mediums like +solid-state drives or NVMe. + +Hence, we do not recommend the use of swap for certain performance-constrained +workloads or environments. Cluster administrators and developers should +benchmark their nodes and applications before using swap in production +scenarios, and [we need your help](#how-do-i-get-involved) with that! + +## Looking ahead + +The Kubernetes 1.22 release introduces alpha support for swap memory on nodes, +and we will continue to work towards beta graduation in the 1.23 release. This +will include: + +* Adding support for controlling swap consumption at the Pod level via cgroups. + * This will include the ability to set a system-reserved quantity of swap + from what kubelet detects on the host. +* Determining a set of metrics for node QoS in order to evaluate the + performance and stability of nodes with and without swap enabled. +* Collecting feedback from test user cases. + * We will consider introducing new configuration modes for swap, such as a + node-wide swap limit for workloads. + +## How can I learn more? + +You can review the current [documentation](https://kubernetes.io/docs/concepts/architecture/nodes/#swap-memory) +on the Kubernetes website. + +For more information, and to assist with testing and provide feedback, please +see [KEP-2400](https://github.com/kubernetes/enhancements/issues/2400) and its +[design proposal](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/2400-node-swap/README.md). + +## How do I get involved? + +Your feedback is always welcome! SIG Node [meets regularly](https://github.com/kubernetes/community/tree/master/sig-node#meetings) +and [can be reached](https://github.com/kubernetes/community/tree/master/sig-node#contact) +via [Slack](https://slack.k8s.io/) (channel **#sig-node**), or the SIG's +[mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-node). +Feel free to reach out to me, Elana Hashman (**@ehashman** on Slack and GitHub) +if you'd like to help. diff --git a/content/en/blog/_posts/2021-08-09-csi-windows-support-with-csi-proxy-reaches-ga.md b/content/en/blog/_posts/2021-08-09-csi-windows-support-with-csi-proxy-reaches-ga.md new file mode 100644 index 0000000000..23d1c983ff --- /dev/null +++ b/content/en/blog/_posts/2021-08-09-csi-windows-support-with-csi-proxy-reaches-ga.md @@ -0,0 +1,76 @@ +--- +layout: blog +title: 'Kubernetes 1.22: CSI Windows Support (with CSI Proxy) reaches GA' +date: 2021-08-09 +slug: csi-windows-support-with-csi-proxy-reaches-ga +--- + +**Authors:** Mauricio Poppe (Google), Jing Xu (Google), and Deep Debroy (Apple) + +*The stable version of CSI Proxy for Windows has been released alongside Kubernetes 1.22. CSI Proxy enables CSI Drivers running on Windows nodes to perform privileged storage operations.* + +## Background + +Container Storage Interface (CSI) for Kubernetes went GA in the Kubernetes 1.13 release. CSI has become the standard for exposing block and file storage to containerized workloads on Container Orchestration systems (COs) like Kubernetes. It enables third-party storage providers to write and deploy plugins without the need to alter the core Kubernetes codebase. Legacy in-tree drivers are deprecated and new storage features are introduced in CSI, therefore it is important to get CSI Drivers to work on Windows. + +A CSI Driver in Kubernetes has two main components: a controller plugin which runs in the control plane and a node plugin which runs on every node. + +- The controller plugin generally does not need direct access to the host and can perform all its operations through the Kubernetes API and external control plane services. + +- The node plugin, however, requires direct access to the host for making block devices and/or file systems available to the Kubernetes kubelet. Due to the missing capability of running privileged operations from containers on Windows nodes [CSI Proxy was introduced as alpha in Kubernetes 1.18](https://kubernetes.io/blog/2020/04/03/kubernetes-1-18-feature-windows-csi-support-alpha/) as a way to enable containers to perform privileged storage operations. This enables containerized CSI Drivers to run on Windows nodes. + +## What's CSI Proxy and how do CSI drivers interact with it? + +When a workload that uses persistent volumes is scheduled, it'll go through a sequence of steps defined in the [CSI Spec](https://github.com/container-storage-interface/spec/blob/master/spec.md). First, the workload will be scheduled to run on a node. Then the controller component of a CSI Driver will attach the persistent volume to the node. Finally the node component of a CSI Driver will mount the persistent volume on the node. + +The node component of a CSI Driver needs to run on Windows nodes to support Windows workloads. Various privileged operations like scanning of disk devices, mounting of file systems, etc. cannot be done from a containerized application running on Windows nodes yet ([Windows HostProcess containers](https://github.com/kubernetes/enhancements/issues/1981) introduced in Kubernetes 1.22 as alpha enable functionalities that require host access like the operations mentioned before). However, we can perform these operations through a binary (CSI Proxy) that's pre-installed on the Window nodes. CSI Proxy has a client-server architecture and allows CSI drivers to issue privileged storage operations through a gRPC interface exposed over named pipes created during the startup of CSI Proxy. + +![CSI Proxy Architecture](/images/blog/2021-08-09-csi-windows-support-with-csi-proxy-reaches-ga/csi-proxy.png) + +## CSI Proxy reaches GA + +The CSI Proxy development team has worked closely with storage vendors, many of whom started integrating CSI Proxy into their CSI Drivers and provided feedback as early as CSI Proxy design proposal. This cooperation uncovered use cases where additional APIs were needed, found bugs, and identified areas for documentation improvement. + +The CSI Proxy design [KEP](https://github.com/kubernetes/enhancements/pull/2737) has been updated to reflect the current CSI Proxy architecture. Additional [development documentation](https://github.com/kubernetes-csi/csi-proxy/blob/master/docs/DEVELOPMENT.md) is included for contributors interested in helping with new features or bug fixes. + +Before we reached GA we wanted to make sure that our API is simple and consistent. We went through an extensive API review of the v1beta API groups where we made sure that the CSI Proxy API methods and messages are consistent with the naming conventions defined in the [CSI Spec](https://github.com/container-storage-interface/spec/blob/master/spec.md). As part of this effort we're graduating the [Disk](https://github.com/kubernetes-csi/csi-proxy/blob/master/docs/apis/disk_v1.md), [Filesystem](https://github.com/kubernetes-csi/csi-proxy/blob/master/docs/apis/filesystem_v1.md), [SMB](https://github.com/kubernetes-csi/csi-proxy/blob/master/docs/apis/smb_v1.md) and [Volume](https://github.com/kubernetes-csi/csi-proxy/blob/master/docs/apis/volume_v1.md) API groups to v1. + +Additional Windows system APIs to get information from the Windows nodes and support to mount iSCSI targets in Windows nodes, are available as alpha APIs in the [System API](https://github.com/kubernetes-csi/csi-proxy/tree/v1.0.0/client/api/system/v1alpha1) and the [iSCSI API](https://github.com/kubernetes-csi/csi-proxy/tree/v1.0.0/client/api/iscsi/v1alpha2). These APIs will continue to be improved before we graduate them to v1. + +CSI Proxy v1 is compatible with all the previous v1betaX releases. The GA `csi-proxy.exe` binary can handle requests from v1betaX clients thanks to the autogenerated conversion layer that transforms any versioned client request to a version-agnostic request that the server can process. Several [integration tests](https://github.com/kubernetes-csi/csi-proxy/tree/v1.0.0/integrationtests) were added for all the API versions of the API groups that are graduating to v1 to ensure that CSI Proxy is backwards compatible. + +Version drift between CSI Proxy and the CSI Drivers that interact with it was also carefully considered. A [connection fallback mechanism](https://github.com/kubernetes-csi/csi-proxy/pull/124) has been provided for CSI Drivers to handle multiple versions of CSI Proxy for a smooth upgrade to v1. This allows CSI Drivers, like the GCE PD CSI Driver, [to recognize which version of the CSI Proxy binary is running](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver/pull/738) and handle multiple versions of the CSI Proxy binary deployed on the node. + +CSI Proxy v1 is already being used by many CSI Drivers, including the [AWS EBS CSI Driver](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/966), [Azure Disk CSI Driver](https://github.com/kubernetes-sigs/azuredisk-csi-driver/pull/919), [GCE PD CSI Driver](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver/pull/738), and [SMB CSI Driver](https://github.com/kubernetes-csi/csi-driver-smb/pull/319). + +## Future plans + +We're very excited for the future of CSI Proxy. With the upcoming [Windows HostProcess containers](https://github.com/kubernetes/enhancements/issues/1981), we are considering converting the CSI Proxy in to a library consumed by CSI Drivers in addition to the current client/server design. This will allow us to iterate faster on new features because the `csi-proxy.exe` binary will no longer be needed. + +## How to get involved? + +This project, like all of Kubernetes, is the result of hard work by many contributors from diverse backgrounds working together. Those interested in getting involved with the design and development of CSI Proxy, or any part of the Kubernetes Storage system, may join the Kubernetes Storage Special Interest Group (SIG). We’re rapidly growing and always welcome new contributors. + +For those interested in more details about CSI support in Windows please reach out in the [#csi-windows](https://kubernetes.slack.com/messages/csi-windows) Kubernetes slack channel. + +## Acknowledgments + +CSI-Proxy received many contributions from members of the Kubernetes community. We thank all of the people that contributed to CSI Proxy with design reviews, bug reports, bug fixes, and for their continuous support in reaching this milestone: + +- [Andy Zhang](https://github.com/andyzhangx) +- [Dan Ilan](https://github.com/jmpfar) +- [Deep Debroy](https://github.com/ddebroy) +- [Humble Devassy Chirammal](https://github.com/humblec) +- [Jing Xu](https://github.com/jingxu97) +- [Jean Rougé](https://github.com/wk8) +- [Jordan Liggitt](https://github.com/liggitt) +- [Kalya Subramanian](https://github.com/ksubrmnn) +- [Krishnakumar R](https://github.com/kkmsft) +- [Manuel Tellez](https://github.com/manueltellez) +- [Mark Rossetti](https://github.com/marosset) +- [Mauricio Poppe](https://github.com/mauriciopoppe) +- [Matthew Wong](https://github.com/wongma7) +- [Michelle Au](https://github.com/msau42) +- [Patrick Lang](https://github.com/PatrickLang) +- [Saad Ali](https://github.com/saad-ali) +- [Yuju Hong](https://github.com/yujuhong) \ No newline at end of file diff --git a/content/en/blog/_posts/2021-08-11-memory-manager-moves-to-beta.md b/content/en/blog/_posts/2021-08-11-memory-manager-moves-to-beta.md new file mode 100644 index 0000000000..0eeb8bde83 --- /dev/null +++ b/content/en/blog/_posts/2021-08-11-memory-manager-moves-to-beta.md @@ -0,0 +1,144 @@ +--- +layout: blog +title: "Kubernetes Memory Manager moves to beta" +date: 2021-08-11 +slug: kubernetes-1-22-feature-memory-manager-moves-to-beta +--- + +**Authors:** Artyom Lukianov (Red Hat), Cezary Zukowski (Samsung) + +The blog post explains some of the internals of the _Memory manager_, a beta feature +of Kubernetes 1.22. In Kubernetes, the Memory Manager is a +[kubelet](https://kubernetes.io/docs/concepts/overview/components/#kubelet) subcomponent. +The memory manage provides guaranteed memory (and hugepages) +allocation for pods in the `Guaranteed` [QoS class](https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#qos-classes). + +This blog post covers: + +1. [Why do you need it?](#Why-do-you-need-it?) +2. [The internal details of how the **MemoryManager** works](#How-does-it-work?) +3. [Current limitations of the **MemoryManager**](#Current-limitations) +4. [Future work for the **MemoryManager**](#Future-work-for-the-Memory-Manager) + +## Why do you need it? + +Some Kubernetes workloads run on nodes with +[non-uniform memory access](https://en.wikipedia.org/wiki/Non-uniform_memory_access) (NUMA). +Suppose you have NUMA nodes in your cluster. In that case, you'll know about the potential for extra latency when +compute resources need to access memory on the different NUMA locality. + +To get the best performance and latency for your workload, container CPUs, +peripheral devices, and memory should all be aligned to the same NUMA +locality. +Before Kubernetes v1.22, the kubelet already provided a set of managers to +align CPUs and PCI devices, but you did not have a way to align memory. +The Linux kernel was able to make best-effort attempts to allocate +memory for tasks from the same NUMA node where the container is +executing are placed, but without any guarantee about that placement. + +## How does it work? + +The memory manager is doing two main things: +- provides the topology hint to the Topology Manager +- allocates the memory for containers and updates the state + +The overall sequence of the Memory Manager under the Kubelet + +![MemoryManagerDiagram](/images/blog/2021-08-11-memory-manager-moves-to-beta/MemoryManagerDiagram.svg "MemoryManagerDiagram") + +During the Admission phase: + +1. When first handling a new pod, the kubelet calls the TopologyManager's `Admit()` method. +2. The Topology Manager is calling `GetTopologyHints()` for every hint provider including the Memory Manager. +3. The Memory Manager calculates all possible NUMA nodes combinations for every container inside the pod and returns hints to the Topology Manager. +4. The Topology Manager calls to `Allocate()` for every hint provider including the Memory Manager. +5. The Memory Manager allocates the memory under the state according to the hint that the Topology Manager chose. + +During Pod creation: + +1. The kubelet calls `PreCreateContainer()`. +2. For each container, the Memory Manager looks the NUMA nodes where it allocated the + memory for the container and then returns that information to the kubelet. +3. The kubelet creates the container, via CRI, using a container specification + that incorporates information from the Memory Manager information. + +### Let's talk about the configuration + +By default, the Memory Manager runs with the `None` policy, meaning it will just +relax and not do anything. To make use of the Memory Manager, you should set +two command line options for the kubelet: + +- `--memory-manager-policy=Static` +- `--reserved-memory=":="` + +The value for `--memory-manager-policy` is straightforward: `Static`. Deciding what to specify for `--reserved-memory` takes more thought. To configure it correctly, you should follow two main rules: + +- The amount of reserved memory for the `memory` resource must be greater than zero. +- The amount of reserved memory for the resource type must be equal + to [NodeAllocatable](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable) + (`kube-reserved + system-reserved + eviction-hard`) for the resource. + You can read more about memory reservations in [Reserve Compute Resources for System Daemons](/docs/tasks/administer-cluster/reserve-compute-resources/). + +![Reserved memory](/images/blog/2021-08-11-memory-manager-moves-to-beta/ReservedMemory.svg) + +## Current limitations + +The 1.22 release and promotion to beta brings along enhancements and fixes, but the Memory Manager still has several limitations. + +### Single vs Cross NUMA node allocation + +The NUMA node can not have both single and cross NUMA node allocations. When the container memory is pinned to two or more NUMA nodes, we can not know from which NUMA node the container will consume the memory. + +![Single vs Cross NUMA allocation](/images/blog/2021-08-11-memory-manager-moves-to-beta/SingleCrossNUMAAllocation.svg "SingleCrossNUMAAllocation") + +1. The `container1` started on the NUMA node 0 and requests *5Gi* of the memory but currently is consuming only *3Gi* of the memory. +2. For container2 the memory request is 10Gi, and no single NUMA node can satisfy it. +3. The `container2` consumes *3.5Gi* of the memory from the NUMA node 0, but once the `container1` will require more memory, it will not have it, and the kernel will kill one of the containers with the *OOM* error. + +To prevent such issues, the Memory Manager will fail the admission of the `container2` until the machine has two NUMA nodes without a single NUMA node allocation. + +### Works only for Guaranteed pods + +The Memory Manager can not guarantee memory allocation for Burstable pods, +also when the Burstable pod has specified equal memory limit and request. + +Let's assume you have two Burstable pods: `pod1` has containers with +equal memory request and limits, and `pod2` has containers only with a +memory request set. You want to guarantee memory allocation for the `pod1`. +To the Linux kernel, processes in either pod have the same *OOM score*, +once the kernel finds that it does not have enough memory, it can kill +processes that belong to pod `pod1`. + +### Memory fragmentation + +The sequence of Pods and containers that start and stop can fragment the memory on NUMA nodes. +The alpha implementation of the Memory Manager does not have any mechanism to balance pods and defragment memory back. + +## Future work for the Memory Manager + +We do not want to stop with the current state of the Memory Manager and are looking to +make improvements, including in the following areas. + +### Make the Memory Manager allocation algorithm smarter + +The current algorithm ignores distances between NUMA nodes during the +calculation of the allocation. If same-node placement isn't available, we can still +provide better performance compared to the current implementation, by changing the +Memory Manager to prefer the closest NUMA nodes for cross-node allocation. + +### Reduce the number of admission errors + +The default Kubernetes scheduler is not aware of the node's NUMA topology, and it can be a reason for many admission errors during the pod start. +We're hoping to add a KEP (Kubernetes Enhancement Proposal) to cover improvements in this area. +Follow [Topology aware scheduler plugin in kube-scheduler](https://github.com/kubernetes/enhancements/issues/2044) to see how this idea progresses. + + +## Conclusion +With the promotion of the Memory Manager to beta in 1.22, we encourage everyone to give it a try and look forward to any feedback you may have. While there are still several limitations, we have a set of enhancements planned to address them and look forward to providing you with many new features in upcoming releases. +If you have ideas for additional enhancements or a desire for certain features, please let us know. The team is always open to suggestions to enhance and improve the Memory Manager. +We hope you have found this blog informative and helpful! Let us know if you have any questions or comments. + +You can contact us via: +- The Kubernetes [#sig-node ](https://kubernetes.slack.com/messages/sig-node) + channel in Slack (visit https://slack.k8s.io/ for an invitation if you need one) +- The SIG Node mailing list, [kubernetes-sig-node@googlegroups.com](https://groups.google.com/g/kubernetes-sig-node) diff --git a/content/en/blog/_posts/2021-08-16-support-for-hostprocess-containers/hostprocess-architecture.png b/content/en/blog/_posts/2021-08-16-support-for-hostprocess-containers/hostprocess-architecture.png new file mode 100755 index 0000000000..b28bfcf808 Binary files /dev/null and b/content/en/blog/_posts/2021-08-16-support-for-hostprocess-containers/hostprocess-architecture.png differ diff --git a/content/en/blog/_posts/2021-08-16-support-for-hostprocess-containers/index.md b/content/en/blog/_posts/2021-08-16-support-for-hostprocess-containers/index.md new file mode 100644 index 0000000000..5475640e3b --- /dev/null +++ b/content/en/blog/_posts/2021-08-16-support-for-hostprocess-containers/index.md @@ -0,0 +1,79 @@ +--- +layout: blog +title: 'Alpha in v1.22: Windows HostProcess Containers' +date: 2021-08-16 +slug: windows-hostprocess-containers +--- + +**Authors:** Brandon Smith (Microsoft) + +Kubernetes v1.22 introduced a new alpha feature for clusters that +include Windows nodes: HostProcess containers. + +HostProcess containers aim to extend the Windows container model to enable a wider +range of Kubernetes cluster management scenarios. HostProcess containers run +directly on the host and maintain behavior and access similar to that of a regular +process. With HostProcess containers, users can package and distribute management +operations and functionalities that require host access while retaining versioning +and deployment methods provided by containers. This allows Windows containers to +be used for a variety of device plugin, storage, and networking management scenarios +in Kubernetes. With this comes the enablement of host network mode—allowing +HostProcess containers to be created within the host's network namespace instead of +their own. HostProcess containers can also be built on top of existing Windows server +2019 (or later) base images, managed through the Windows container runtime, and run +as any user that is available on or in the domain of the host machine. + +Linux privileged containers are currently used for a variety of key scenarios in +Kubernetes, including kube-proxy (via kubeadm), storage, and networking scenarios. +Support for these scenarios in Windows previously required workarounds via proxies +or other implementations. Using HostProcess containers, cluster operators no longer +need to log onto and individually configure each Windows node for administrative +tasks and management of Windows services. Operators can now utilize the container +model to deploy management logic to as many clusters as needed with ease. + +## How does it work? + +Windows HostProcess containers are implemented with Windows _Job Objects_, a break from the +previous container model using server silos. Job objects are components of the Windows OS which offer the ability to +manage a group of processes as a group (a.k.a. _jobs_) and assign resource constraints to the +group as a whole. Job objects are specific to the Windows OS and are not associated with the Kubernetes [Job API](https://kubernetes.io/docs/concepts/workloads/controllers/job/). They have no process or file system isolation, +enabling the privileged payload to view and edit the host file system with the +correct permissions, among other host resources. The init process, and any processes +it launches or that are explicitly launched by the user, are all assigned to the +job object of that container. When the init process exits or is signaled to exit, +all the processes in the job will be signaled to exit, the job handle will be +closed and the storage will be unmounted. + +HostProcess and Linux privileged containers enable similar scenarios but differ +greatly in their implementation (hence the naming difference). HostProcess containers +have their own pod security policies. Those used to configure Linux privileged +containers **do not** apply. Enabling privileged access to a Windows host is a +fundamentally different process than with Linux so the configuration and +capabilities of each differ significantly. Below is a diagram detailing the +overall architecture of Windows HostProcess containers: + +{{< figure src="hostprocess-architecture.png" alt="HostProcess Architecture" >}} + +## How do I use it? + +HostProcess containers can be run from within a +[HostProcess Pod](/docs/tasks/configure-pod-container/create-hostprocess-pod). +With the feature enabled on Kubernetes version 1.22, a containerd container runtime of +1.5.4 or higher, and the latest version of hcsshim, deploying a pod spec with the +[correct HostProcess configuration](/docs/tasks/configure-pod-container/create-hostprocess-pod/#before-you-begin) +will enable you to run HostProcess containers. To get started with running +Windows containers see the general guidance for [Windows in Kubernetes](/docs/setup/production-environment/windows/) + +## How can I learn more? + +- Work through [Create a Windows HostProcess Pod](/docs/tasks/configure-pod-container/create-hostprocess-pod/) + +- Read about Kubernetes [Pod Security Standards](/docs/concepts/security/pod-security-standards/) + +- Read the enhancement proposal [Windows Privileged Containers and Host Networking Mode](https://github.com/kubernetes/enhancements/tree/master/keps/sig-windows/1981-windows-privileged-container-support) (KEP-1981) + +## How do I get involved? + +HostProcess containers are in active development. SIG Windows welcomes suggestions from the community. +Get involved with [SIG Windows](https://github.com/kubernetes/community/tree/master/sig-windows) +to contribute! diff --git a/content/en/blog/_posts/2021-08-25-seccomp-default.md b/content/en/blog/_posts/2021-08-25-seccomp-default.md new file mode 100644 index 0000000000..c38b7fdee1 --- /dev/null +++ b/content/en/blog/_posts/2021-08-25-seccomp-default.md @@ -0,0 +1,267 @@ +--- +layout: blog +title: "Enable seccomp for all workloads with a new v1.22 alpha feature" +date: 2021-08-25 +slug: seccomp-default +--- + +**Author:** Sascha Grunert, Red Hat + +This blog post is about a new Kubernetes feature introduced in v1.22, which adds +an additional security layer on top of the existing seccomp support. Seccomp is +a security mechanism for Linux processes to filter system calls (syscalls) based +on a set of defined rules. Applying seccomp profiles to containerized workloads +is one of the key tasks when it comes to enhancing the security of the +application deployment. Developers, site reliability engineers and +infrastructure administrators have to work hand in hand to create, distribute +and maintain the profiles over the applications life-cycle. + +You can use the [`securityContext`][seccontext] field of Pods and their +containers can be used to adjust security related configurations of the +workload. Kubernetes introduced dedicated [seccomp related API +fields][seccontext] in this `SecurityContext` with the [graduation of seccomp to +General Availability (GA)][ga] in v1.19.0. This enhancement allowed an easier +way to specify if the whole pod or a specific container should run as: + +[seccontext]: /docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1 +[ga]: https://kubernetes.io/blog/2020/08/26/kubernetes-release-1.19-accentuate-the-paw-sitive/#graduated-to-stable + +- `Unconfined`: seccomp will not be enabled +- `RuntimeDefault`: the container runtimes default profile will be used +- `Localhost`: a node local profile will be applied, which is being referenced + by a relative path to the seccomp profile root (`/seccomp`) + of the kubelet + +With the graduation of seccomp, nothing has changed from an overall security +perspective, because `Unconfined` is still the default. This is totally fine if +you consider this from the upgrade path and backwards compatibility perspective of +Kubernetes releases. But it also means that it is more likely that a workload +runs without seccomp at all, which should be fixed in the long term. + +## `SeccompDefault` to the rescue + +Kubernetes v1.22.0 introduces a new kubelet [feature gate][gate] +`SeccompDefault`, which has been added in `alpha` state as every other new +feature. This means that it is disabled by default and can be enabled manually +for every single Kubernetes node. + +[gate]: /docs/reference/command-line-tools-reference/feature-gates + +What does the feature do? Well, it just changes the default seccomp profile from +`Unconfined` to `RuntimeDefault`. If not specified differently in the pod +manifest, then the feature will add a higher set of security constraints by +using the default profile of the container runtime. These profiles may differ +between runtimes like [CRI-O][crio] or [containerd][ctrd]. They also differ for +its used hardware architectures. But generally speaking, those default profiles +allow a common amount of syscalls while blocking the more dangerous ones, which +are unlikely or unsafe to be used in a containerized application. + +[crio]: https://github.com/cri-o/cri-o/blob/fe30d62/vendor/github.com/containers/common/pkg/seccomp/default_linux.go#L45 +[ctrd]: https://github.com/containerd/containerd/blob/e1445df/contrib/seccomp/seccomp_default.go#L51 + +### Enabling the feature + +Two kubelet configuration changes have to be made to enable the feature: + +1. **Enable the feature** gate by setting the `SeccompDefault=true` via the command + line (`--feature-gates`) or the [kubelet configuration][kubelet] file. +2. **Turn on the feature** by enabling the feature by adding the + `--seccomp-default` command line flag or via the [kubelet + configuration][kubelet] file (`seccompDefault: true`). + +[kubelet]: /docs/tasks/administer-cluster/kubelet-config-file + +The kubelet will error on startup if only one of the above steps have been done. + +### Trying it out + +If the feature is enabled on a node, then you can create a new workload like +this: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: test-pod +spec: + containers: + - name: test-container + image: nginx:1.21 +``` + +Now it is possible to inspect the used seccomp profile by using +[`crictl`][crictl] while investigating the containers [runtime +specification][rspec]: + +[crictl]: https://github.com/kubernetes-sigs/cri-tools +[rspec]: https://github.com/opencontainers/runtime-spec/blob/0c021c1/config-linux.md#seccomp + +```bash +CONTAINER_ID=$(sudo crictl ps -q --name=test-container) +sudo crictl inspect $CONTAINER_ID | jq .info.runtimeSpec.linux.seccomp +``` + +```yaml +{ + "defaultAction": "SCMP_ACT_ERRNO", + "architectures": ["SCMP_ARCH_X86_64", "SCMP_ARCH_X86", "SCMP_ARCH_X32"], + "syscalls": [ + { + "names": ["_llseek", "_newselect", "accept", …, "write", "writev"], + "action": "SCMP_ACT_ALLOW" + }, + … + ] +} +``` + +You can see that the lower level container runtime ([CRI-O][crio-home] and +[runc][runc] in our case), successfully applied the default seccomp profile. +This profile denies all syscalls per default, while allowing commonly used ones +like [`accept`][accept] or [`write`][write]. + +[crio-home]: https://github.com/cri-o/cri-o +[runc]: https://github.com/opencontainers/runc +[accept]: https://man7.org/linux/man-pages/man2/accept.2.html +[write]: https://man7.org/linux/man-pages/man2/write.2.html + +Please note that the feature will not influence any Kubernetes API for now. +Therefore, it is not possible to retrieve the used seccomp profile via `kubectl` +`get` or `describe` if the [`SeccompProfile`][api] field is unset within the +`SecurityContext`. + +[api]: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1 + +The feature also works when using multiple containers within a pod, for example +if you create a pod like this: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: test-pod +spec: + containers: + - name: test-container-nginx + image: nginx:1.21 + securityContext: + seccompProfile: + type: Unconfined + - name: test-container-redis + image: redis:6.2 +``` + +then you should see that the `test-container-nginx` runs without a seccomp profile: + +```bash +sudo crictl inspect $(sudo crictl ps -q --name=test-container-nginx) | + jq '.info.runtimeSpec.linux.seccomp == null' +true +``` + +Whereas the container `test-container-redis` runs with `RuntimeDefault`: + +```bash +sudo crictl inspect $(sudo crictl ps -q --name=test-container-redis) | + jq '.info.runtimeSpec.linux.seccomp != null' +true +``` + +The same applies to the pod itself, which also runs with the default profile: + +```bash +sudo crictl inspectp (sudo crictl pods -q --name test-pod) | + jq '.info.runtimeSpec.linux.seccomp != null' +true +``` + +### Upgrade strategy + +It is recommended to enable the feature in multiple steps, whereas different +risks and mitigations exist for each one. + +#### Feature gate enabling + +Enabling the feature gate at the kubelet level will not turn on the feature, but +will make it possible by using the `SeccompDefault` kubelet configuration or the +`--seccomp-default` CLI flag. This can be done by an administrator for the whole +cluster or only a set of nodes. + +#### Testing the Application + +If you're trying this within a dedicated test environment, you have to ensure +that the application code does not trigger syscalls blocked by the +`RuntimeDefault` profile before enabling the feature on a node. This can be done +by: + +- _Recommended_: Analyzing the code (manually or by running the application with + [strace][strace]) for any executed syscalls which may be blocked by the + default profiles. If that's the case, then you can override the default by + explicitly setting the pod or container to run as `Unconfined`. Alternatively, + you can create a custom seccomp profile (see optional step below). + profile based on the default by adding the additional syscalls to the + `"action": "SCMP_ACT_ALLOW"` section. + +- _Recommended_: Manually set the profile to the target workload and use a + rolling upgrade to deploy into production. Rollback the deployment if the + application does not work as intended. + +- _Optional_: Run the application against an end-to-end test suite to trigger + all relevant code paths with `RuntimeDefault` enabled. If a test fails, use + the same mitigation as mentioned above. + +- _Optional_: Create a custom seccomp profile based on the default and change + its default action from `SCMP_ACT_ERRNO` to `SCMP_ACT_LOG`. This means that + the seccomp filter for unknown syscalls will have no effect on the application + at all, but the system logs will now indicate which syscalls may be blocked. + This requires at least a Kernel version 4.14 as well as a recent [runc][runc] + release. Monitor the application hosts audit logs (defaults to + `/var/log/audit/audit.log`) or syslog entries (defaults to `/var/log/syslog`) + for syscalls via `type=SECCOMP` (for audit) or `type=1326` (for syslog). + Compare the syscall ID with those [listed in the Linux Kernel + sources][syscalls] and add them to the custom profile. Be aware that custom + audit policies may lead into missing syscalls, depending on the configuration + of auditd. + +- _Optional_: Use cluster additions like the [Security Profiles Operator][spo] + for profiling the application via its [log enrichment][logs] capabilities or + recording a profile by using its [recording feature][rec]. This makes the + above mentioned manual log investigation obsolete. + +[syscalls]: https://github.com/torvalds/linux/blob/7bb7f2a/arch/x86/entry/syscalls/syscall_64.tbl +[spo]: https://github.com/kubernetes-sigs/security-profiles-operator +[logs]: https://github.com/kubernetes-sigs/security-profiles-operator/blob/c90ef3a/installation-usage.md#record-profiles-from-workloads-with-profilerecordings +[rec]: https://github.com/kubernetes-sigs/security-profiles-operator/blob/c90ef3a/installation-usage.md#using-the-log-enricher +[strace]: https://man7.org/linux/man-pages/man1/strace.1.html + +#### Deploying the modified application + +Based on the outcome of the application tests, it may be required to change the +application deployment by either specifying `Unconfined` or a custom seccomp +profile. This is not the case if the application works as intended with +`RuntimeDefault`. + +#### Enable the kubelet configuration + +If everything went well, then the feature is ready to be enabled by the kubelet +configuration or its corresponding CLI flag. This should be done on a per-node +basis to reduce the overall risk of missing a syscall during the investigations +when running the application tests. If it's possible to monitor audit logs +within the cluster, then it's recommended to do this for eventually missed +seccomp events. If the application works as intended then the feature can be +enabled for further nodes within the cluster. + +## Conclusion + +Thank you for reading this blog post! I hope you enjoyed to see how the usage of +seccomp profiles has been evolved in Kubernetes over the past releases as much +as I do. On your own cluster, change the default seccomp profile to +`RuntimeDefault` (using this new feature) and see the security benefits, and, of +course, feel free to reach out any time for feedback or questions. + +--- + +_Editor's note: If you have any questions or feedback about this blog post, feel +free to reach out via the [Kubernetes slack in #sig-node][slack]._ + +[slack]: https://kubernetes.slack.com/messages/sig-node diff --git a/content/en/blog/_posts/2021-08-27-minreadysecond-statefulsets.md b/content/en/blog/_posts/2021-08-27-minreadysecond-statefulsets.md new file mode 100644 index 0000000000..390665950e --- /dev/null +++ b/content/en/blog/_posts/2021-08-27-minreadysecond-statefulsets.md @@ -0,0 +1,48 @@ +--- +layout: blog +title: 'Minimum Ready Seconds for StatefulSets' +date: 2021-08-27 +slug: minreadyseconds-statefulsets +--- + +**Authors:** Ravi Gudimetla (Red Hat), Maciej Szulik (Red Hat) + +This blog describes the notion of Availability for `StatefulSet` workloads, and a new alpha feature in Kubernetes 1.22 which adds `minReadySeconds` configuration for `StatefulSets`. + +## What problems does this solve? + +Prior to Kubernetes 1.22 release, once a `StatefulSet` `Pod` is in the `Ready` state it is considered `Available` to receive traffic. For some of the `StatefulSet` workloads, it may not be the case. For example, a workload like Prometheus with multiple instances of Alertmanager, it should be considered `Available` only when Alertmanager's state transfer is complete, not when the `Pod` is in `Ready` state. Since `minReadySeconds` adds buffer, the state transfer may be complete before the `Pod` becomes `Available`. While this is not a fool proof way of identifying if the state transfer is complete or not, it gives a way to the end user to express their intention of waiting for sometime before the `Pod` is considered `Available` and it is ready to serve requests. + +Another case, where `minReadySeconds` helps is when using `LoadBalancer` `Services` with cloud providers. Since `minReadySeconds` adds latency after a `Pod` is `Ready`, it provides buffer time to prevent killing pods in rotation before new pods show up. Imagine a load balancer in unhappy path taking 10-15s to propagate. If you have 2 replicas then, you'd kill the second replica only after the first one is up but in reality, first replica cannot be seen because it is not yet ready to serve requests. + +So, in general, the notion of `Availability` in `StatefulSets` is pretty useful and this feature helps in solving the above problems. This is a feature that already exists for `Deployments` and `DaemonSets` and we now have them for `StatefulSets` too to give users consistent workload experience. + + +## How does it work? + +The statefulSet controller watches for both `StatefulSets` and the `Pods` associated with them. When the feature gate associated with this feature is enabled, the statefulSet controller identifies how long a particular `Pod` associated with a `StatefulSet` has been in the `Running` state. + +If this value is greater than or equal to the time specified by the end user in `.spec.minReadySeconds` field, the statefulSet controller updates a field called `availableReplicas` in the `StatefulSet`'s status subresource to include this `Pod`. The `status.availableReplicas` in `StatefulSet`'s status is an integer field which tracks the number of pods that are `Available`. + +## How do I use it? + +You are required to prepare the following things in order to try out the feature: + + - Download and install a kubectl greater than v1.22.0 version + - Switch on the feature gate with the command line flag `--feature-gates=StatefulSetMinReadySeconds=true` on `kube-apiserver` and `kube-controller-manager` + +After successfully starting `kube-apiserver` and `kube-controller-manager`, you will see `AvailableReplicas` in the status and `minReadySeconds` of spec (with a default value of 0). + +Specify a value for `minReadySeconds` for any StatefulSet and you can check if `Pods` are available or not by checking `AvailableReplicas` field using: +`kubectl get statefulset/ -o yaml` + +## How can I learn more? + +- Read the KEP: [minReadySeconds for StatefulSets](https://github.com/kubernetes/enhancements/tree/master/keps/sig-apps/2599-minreadyseconds-for-statefulsets#readme) +- Read the documentation: [Minimum ready seconds](/docs/concepts/workloads/controllers/statefulset/#minimum-ready-seconds) for StatefulSet +- Review the [API definition](/docs/reference/kubernetes-api/workload-resources/stateful-set-v1/) for StatefulSet + +## How do I get involved? + +Please reach out to us in the [#sig-apps](https://kubernetes.slack.com/archives/C18NZM5K9) channel on Slack (visit https://slack.k8s.io/ for an invitation if you need one), or on the SIG Apps mailing list: kubernetes-sig-apps@googlegroups.com + diff --git a/content/en/blog/_posts/2021-08-30-volume-populators-alpha.md b/content/en/blog/_posts/2021-08-30-volume-populators-alpha.md new file mode 100644 index 0000000000..4f3a408584 --- /dev/null +++ b/content/en/blog/_posts/2021-08-30-volume-populators-alpha.md @@ -0,0 +1,219 @@ +--- +layout: blog +title: "Kubernetes 1.22: A New Design for Volume Populators" +date: 2021-08-30 +slug: volume-populators-redesigned +--- + +**Authors:** +Ben Swartzlander (NetApp) + +Kubernetes v1.22, released earlier this month, introduced a redesigned approach for volume +populators. Originally implemented +in v1.18, the API suffered from backwards compatibility issues. Kubernetes v1.22 includes a new API +field called `dataSourceRef` that fixes these problems. + +## Data sources + +Earlier Kubernetes releases already added a `dataSource` field into the +[PersistentVolumeClaim](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) API, +used for cloning volumes and creating volumes from snapshots. You could use the `dataSource` field when +creating a new PVC, referencing either an existing PVC or a VolumeSnapshot in the same namespace. +That also modified the normal provisioning process so that instead of yielding an empty volume, the +new PVC contained the same data as either the cloned PVC or the cloned VolumeSnapshot. + +Volume populators embrace the same design idea, but extend it to any type of object, as long +as there exists a [custom resource](/docs/concepts/extend-kubernetes/api-extension/custom-resources/) +to define the data source, and a populator controller to implement the logic. Initially, +the `dataSource` field was directly extended to allow arbitrary objects, if the `AnyVolumeDataSource` +feature gate was enabled on a cluster. That change unfortunately caused backwards compatibility +problems, and so the new `dataSourceRef` field was born. + +In v1.22 if the `AnyVolumeDataSource` feature gate is enabled, the `dataSourceRef` field is +added, which behaves similarly to the `dataSource` field except that it allows arbitrary +objects to be specified. The API server ensures that the two fields always have the same +contents, and neither of them are mutable. The differences is that at creation time +`dataSource` allows only PVCs or VolumeSnapshots, and ignores all other values, while +`dataSourceRef` allows most types of objects, and in the few cases it doesn't allow an +object (core objects other than PVCs) a validation error occurs. + +When this API change graduates to stable, we would deprecate using `dataSource` and recommend +using `dataSourceRef` field for all use cases. +In the v1.22 release, `dataSourceRef` is available (as an alpha feature) specifically for cases +where you want to use for custom volume populators. + +## Using populators + +Every volume populator must have one or more CRDs that it supports. Administrators may +install the CRD and the populator controller and then PVCs with a `dataSourceRef` specifies +a CR of the type that the populator supports will be handled by the populator controller +instead of the CSI driver directly. + +Underneath the covers, the CSI driver is still invoked to create an empty volume, which +the populator controller fills with the appropriate data. The PVC doesn't bind to the PV +until it's fully populated, so it's safe to define a whole application manifest including +pod and PVC specs and the pods won't begin running until everything is ready, just as if +the PVC was a clone of another PVC or VolumeSnapshot. + +## How it works + +PVCs with data sources are still noticed by the external-provisioner sidecar for the +related storage class (assuming a CSI provisioner is used), but because the sidecar +doesn't understand the data source kind, it doesn't do anything. The populator controller +is also watching for PVCs with data sources of a kind that it understands and when it +sees one, it creates a temporary PVC of the same size, volume mode, storage class, +and even on the same topology (if topology is used) as the original PVC. The populator +controller creates a worker pod that attaches to the volume and writes the necessary +data to it, then detaches from the volume and the populator controller rebinds the PV +from the temporary PVC to the orignal PVC. + +## Trying it out + +The following things are required to use volume populators: +* Enable the `AnyVolumeDataSource` feature gate +* Install a CRD for the specific data source / populator +* Install the populator controller itself + +Populator controllers may use the [lib-volume-populator](https://github.com/kubernetes-csi/lib-volume-populator) +library to do most of the Kubernetes API level work. Individual populators only need to +provide logic for actually writing data into the volume based on a particular CR +instance. This library provides a sample populator implementation. + +These optional components improve user experience: +* Install the VolumePopulator CRD +* Create a VolumePopulator custom respource for each specific data source +* Install the [volume data source validator](https://github.com/kubernetes-csi/volume-data-source-validator) + controller (alpha) + +The purpose of these components is to generate warning events on PVCs with data sources +for which there is no populator. + +## Putting it all together + +To see how this works, you can install the sample "hello" populator and try it +out. + +First install the volume-data-source-validator controller. + +```terminal +kubectl apply -f https://github.com/kubernetes-csi/volume-data-source-validator/blob/master/deploy/kubernetes/rbac-data-source-validator.yaml +kubectl apply -f https://github.com/kubernetes-csi/volume-data-source-validator/blob/master/deploy/kubernetes/setup-data-source-validator.yaml +``` + +Next install the example populator. + +```terminal +kubectl apply -f https://github.com/kubernetes-csi/lib-volume-populator/blob/master/example/hello-populator/crd.yaml +kubectl apply -f https://github.com/kubernetes-csi/lib-volume-populator/blob/master/example/hello-populator/deploy.yaml +``` + +Create an instance of the `Hello` CR, with some text. + +```yaml +apiVersion: hello.k8s.io/v1alpha1 +kind: Hello +metadata: + name: example-hello +spec: + fileName: example.txt + fileContents: Hello, world! +``` + +Create a PVC that refers to that CR as its data source. + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: example-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Mi + dataSourceRef: + apiGroup: hello.k8s.io + kind: Hello + name: example-hello + volumeMode: Filesystem +``` + +Next, run a job that reads the file in the PVC. + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: example-job +spec: + template: + spec: + containers: + - name: example-container + image: busybox:latest + command: + - cat + - /mnt/example.txt + volumeMounts: + - name: vol + mountPath: /mnt + restartPolicy: Never + volumes: + - name: vol + persistentVolumeClaim: + claimName: example-pvc +``` + +Wait for the job to complete (including all of its dependencies). + +```terminal +kubectl wait --for=condition=Complete job/example-job +``` + +And last examine the log from the job. + +```terminal +kubectl logs job/example-job +Hello, world! +``` + +Note that the volume already contained a text file with the string contents from +the CR. This is only the simplest example. Actual populators can set up the volume +to contain arbitrary contents. + +## How to write your own volume populator + +Developers interested in writing new poplators are encouraged to use the +[lib-volume-populator](https://github.com/kubernetes-csi/lib-volume-populator) library +and to only supply a small controller wrapper around the library, and a pod image +capable of attaching to volumes and writing the appropriate data to the volume. + +Individual populators can be extremely generic such that they work with every type +of PVC, or they can do vendor specific things to rapidly fill a volume with data +if the volume was provisioned by a specific CSI driver from the same vendor, for +example, by communicating directly with the storage for that volume. + +## The future + +As this feature is still in alpha, we expect to update the out of tree controllers +with more tests and documentation. The community plans to eventually re-implement +the populator library as a sidecar, for ease of operations. + +We hope to see some official community-supported populators for some widely-shared +use cases. Also, we expect that volume populators will be used by backup vendors +as a way to "restore" backups to volumes, and possibly a standardized API to do +this will evolve. + +## How can I learn more? + +The enhancement proposal, +[Volume Populators](https://github.com/kubernetes/enhancements/tree/master/keps/sig-storage/1495-volume-populators), includes lots of detail about the history and technical implementation +of this feature. + +[Volume populators and data sources](/docs/concepts/storage/persistent-volumes/#volume-populators-and-data-sources), within the documentation topic about persistent volumes, +explains how to use this feature in your cluster. + +Please get involved by joining the Kubernetes storage SIG to help us enhance this +feature. There are a lot of good ideas already and we'd be thrilled to have more! + diff --git a/content/en/blog/_posts/2021-09-03-api-server-tracing.md b/content/en/blog/_posts/2021-09-03-api-server-tracing.md new file mode 100644 index 0000000000..fc98a68d23 --- /dev/null +++ b/content/en/blog/_posts/2021-09-03-api-server-tracing.md @@ -0,0 +1,67 @@ +--- +layout: blog +title: 'Alpha in Kubernetes v1.22: API Server Tracing' +date: 2021-09-03 +slug: api-server-tracing +--- + +**Authors:** David Ashpole (Google) + +In distributed systems, it can be hard to figure out where problems are. You grep through one component's logs just to discover that the source of your problem is in another component. You search there only to discover that you need to enable debug logs to figure out what really went wrong... And it goes on. The more complex the path your request takes, the harder it is to answer questions about where it went. I've personally spent many hours doing this dance with a variety of Kubernetes components. Distributed tracing is a tool which is designed to help in these situations, and the Kubernetes API Server is, perhaps, the most important Kubernetes component to be able to debug. At Kubernetes' Sig Instrumentation, our mission is to make it easier to understand what's going on in your cluster, and we are happy to announce that distributed tracing in the Kubernetes API Server reached alpha in 1.22. + +## What is Tracing? + +Distributed tracing links together a bunch of super-detailed information from multiple different sources, and structures that telemetry into a single tree for that request. Unlike logging, which limits the quantity of data ingested by using log levels, tracing collects all of the details and uses sampling to collect only a small percentage of requests. This means that once you have a trace which demonstrates an issue, you should have all the information you need to root-cause the problem--no grepping for object UID required! My favorite aspect, though, is how useful the visualizations of traces are. Even if you don't understand the inner workings of the API Server, or don't have a clue what an etcd "Transaction" is, I'd wager you (yes, you!) could tell me roughly what the order of events was, and which components were involved in the request. If some step takes a long time, it is easy to tell where the problem is. + +## Why OpenTelemetry? + +It's important that Kubernetes works well for everyone, regardless of who manages your infrastructure, or which vendors you choose to integrate with. That is particularly true for Kubernetes' integrations with telemetry solutions. OpenTelemetry, being a CNCF project, shares these core values, and is creating exactly what we need in Kubernetes: A set of open standards for Tracing client library APIs and a standard trace format. By using OpenTelemetry, we can ensure users have the freedom to choose their backend, and ensure vendors have a level playing field. The timing couldn't be better: the OpenTelemetry golang API and SDK are very close to their 1.0 release, and will soon offer backwards-compatibility for these open standards. + +## Why instrument the API Server? + +The Kubernetes API Server is a great candidate for tracing for a few reasons: + +* It follows the standard "RPC" model (serve a request by making requests to downstream components), which makes it easy to instrument. +* Users are latency-sensitive: If a request takes more than 10 seconds to complete, many clients will time-out. +* It has a complex service topology: A single request could require consulting a dozen webhooks, or involve multiple requests to etcd. + +## Trying out APIServer Tracing with a webhook + +### Enabling API Server Tracing + +1. Enable the APIServerTracing [feature-gate](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/). + +2. Set our configuration for tracing by pointing the `--tracing-config-file` flag on the kube-apiserver at our config file, which contains: + +```yaml +apiVersion: apiserver.config.k8s.io/v1alpha1 +kind: TracingConfiguration +# 1% sampling rate +samplingRatePerMillion: 10000 +``` + +### Enabling Etcd Tracing + +Add `--experimental-enable-distributed-tracing`, `--experimental-distributed-tracing-address=0.0.0.0:4317`, `--experimental-distributed-tracing-service-name=etcd` flags to etcd to enable tracing. Note that this traces every request, so it will probably generate a lot of traces if you enable it. + +### Example Trace: List Nodes + +I could've used any trace backend, but decided to use Jaeger, since it is one of the most popular open-source tracing projects. I deployed [the Jaeger All-in-one container](https://hub.docker.com/r/jaegertracing/all-in-one) in my cluster, deployed [the OpenTelemetry collector](https://github.com/open-telemetry/opentelemetry-collector) on my control-plane node ([example](https://github.com/dashpole/dashpole_demos/tree/master/otel/controlplane)), and captured traces like this one: + +![Jaeger screenshot showing API server and etcd trace](/images/blog/2021-09-03-api-server-tracing/example-trace-1.png "Jaeger screenshot showing API server and etcd trace") + +The teal lines are from the API Server, and includes it serving a request to `/api/v1/nodes`, and issuing a grpc `Range` RPC to ETCD. The yellow-ish line is from ETCD handling the `Range` RPC. + +### Example Trace: Create Pod with Mutating Webhook + +I instrumented the [example webhook](https://github.com/kubernetes-sigs/controller-runtime/tree/master/examples/builtins) with OpenTelemetry (I had to [patch](https://github.com/dashpole/controller-runtime/commit/85fdda7ba03dd2c22ef62c1a3dbdf5aa651f90da) controller-runtime, but it makes a neat demo), and routed traces to Jaeger as well. I collected traces like this one: + +![Jaeger screenshot showing API server, admission webhook, and etcd trace](/images/blog/2021-09-03-api-server-tracing/example-trace-2.png "Jaeger screenshot showing API server, admission webhook, and etcd trace") + +Compared with the previous trace, there are two new spans: A teal span from the API Server making a request to the admission webhook, and a brown span from the admission webhook serving the request. Even if you didn't instrument your webhook, you would still get the span from the API Server making the request to the webhook. + +## Get involved! + +As this is our first attempt at adding distributed tracing to a Kubernetes component, there is probably a lot we can improve! If my struggles resonated with you, or if you just want to try out the latest Kubernetes has to offer, please give the feature a try and open issues with any problem you encountered and ways you think the feature could be improved. + +This is just the very beginning of what we can do with distributed tracing in Kubernetes. If there are other components you think would benefit from distributed tracing, or want to help bring API Server Tracing to GA, join sig-instrumentation at our [regular meetings](https://github.com/kubernetes/community/tree/master/sig-instrumentation#instrumentation-special-interest-group) and get involved! diff --git a/content/en/blog/_posts/image01.png b/content/en/blog/_posts/image01.png deleted file mode 100644 index 91e8856139..0000000000 Binary files a/content/en/blog/_posts/image01.png and /dev/null differ diff --git a/content/en/blog/_posts/image02.png b/content/en/blog/_posts/image02.png deleted file mode 100644 index dfd14d7cdc..0000000000 Binary files a/content/en/blog/_posts/image02.png and /dev/null differ diff --git a/content/en/blog/_posts/image03.png b/content/en/blog/_posts/image03.png deleted file mode 100644 index 443a6f2d67..0000000000 Binary files a/content/en/blog/_posts/image03.png and /dev/null differ diff --git a/content/en/blog/_posts/image04.png b/content/en/blog/_posts/image04.png deleted file mode 100644 index e107adc88b..0000000000 Binary files a/content/en/blog/_posts/image04.png and /dev/null differ diff --git a/content/en/blog/_posts/image05.png b/content/en/blog/_posts/image05.png deleted file mode 100644 index 6d80447d09..0000000000 Binary files a/content/en/blog/_posts/image05.png and /dev/null differ diff --git a/content/en/blog/_posts/image06.png b/content/en/blog/_posts/image06.png deleted file mode 100644 index d40b2eb0b6..0000000000 Binary files a/content/en/blog/_posts/image06.png and /dev/null differ diff --git a/content/en/blog/_posts/image07.png b/content/en/blog/_posts/image07.png deleted file mode 100644 index fc3976040f..0000000000 Binary files a/content/en/blog/_posts/image07.png and /dev/null differ diff --git a/content/en/community/_index.html b/content/en/community/_index.html index 5b65292ea7..b41323c69e 100644 --- a/content/en/community/_index.html +++ b/content/en/community/_index.html @@ -13,7 +13,7 @@ cid: community

The Kubernetes community -- users, contributors, and the culture we've built together -- is one of the biggest reasons for the meteoric rise of this open source project. Our culture and values continue to grow and change as the project itself grows and changes. We all work together toward constant improvement of the project and the ways we work on it. -

We are the people who file issues and pull requests, attend SIG meetings, Kubernetes meetups, and KubeCon, advocate for it's adoption and innovation, run kubectl get pods, and contribute in a thousand other vital ways. Read on to learn how you can get involved and become part of this amazing community.

+

We are the people who file issues and pull requests, attend SIG meetings, Kubernetes meetups, and KubeCon, advocate for its adoption and innovation, run kubectl get pods, and contribute in a thousand other vital ways. Read on to learn how you can get involved and become part of this amazing community.


diff --git a/content/en/docs/concepts/architecture/_index.md b/content/en/docs/concepts/architecture/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/concepts/architecture/cloud-controller.md b/content/en/docs/concepts/architecture/cloud-controller.md index 9b64289e82..229cc489f9 100644 --- a/content/en/docs/concepts/architecture/cloud-controller.md +++ b/content/en/docs/concepts/architecture/cloud-controller.md @@ -210,7 +210,7 @@ To upgrade a HA control plane to use the cloud controller manager, see [Migrate Want to know how to implement your own cloud controller manager, or extend an existing project? -The cloud controller manager uses Go interfaces to allow implementations from any cloud to be plugged in. Specifically, it uses the `CloudProvider` interface defined in [`cloud.go`](https://github.com/kubernetes/cloud-provider/blob/release-1.17/cloud.go#L42-L62) from [kubernetes/cloud-provider](https://github.com/kubernetes/cloud-provider). +The cloud controller manager uses Go interfaces to allow implementations from any cloud to be plugged in. Specifically, it uses the `CloudProvider` interface defined in [`cloud.go`](https://github.com/kubernetes/cloud-provider/blob/release-1.21/cloud.go#L42-L69) from [kubernetes/cloud-provider](https://github.com/kubernetes/cloud-provider). The implementation of the shared controllers highlighted in this document (Node, Route, and Service), and some scaffolding along with the shared cloudprovider interface, is part of the Kubernetes core. Implementations specific to cloud providers are outside the core of Kubernetes and implement the `CloudProvider` interface. diff --git a/content/en/docs/concepts/architecture/controller.md b/content/en/docs/concepts/architecture/controller.md index 711cf38363..9912c53bf8 100644 --- a/content/en/docs/concepts/architecture/controller.md +++ b/content/en/docs/concepts/architecture/controller.md @@ -159,11 +159,12 @@ You can run your own controller as a set of Pods, or externally to Kubernetes. What fits best will depend on what that particular controller does. - - ## {{% heading "whatsnext" %}} * Read about the [Kubernetes control plane](/docs/concepts/overview/components/#control-plane-components) * Discover some of the basic [Kubernetes objects](/docs/concepts/overview/working-with-objects/kubernetes-objects/) * Learn more about the [Kubernetes API](/docs/concepts/overview/kubernetes-api/) -* If you want to write your own controller, see [Extension Patterns](/docs/concepts/extend-kubernetes/extend-cluster/#extension-patterns) in Extending Kubernetes. +* If you want to write your own controller, see + [Extension Patterns](/docs/concepts/extend-kubernetes/#extension-patterns) + in Extending Kubernetes. + diff --git a/content/en/docs/concepts/architecture/garbage-collection.md b/content/en/docs/concepts/architecture/garbage-collection.md new file mode 100644 index 0000000000..7c70675fff --- /dev/null +++ b/content/en/docs/concepts/architecture/garbage-collection.md @@ -0,0 +1,182 @@ +--- +title: Garbage Collection +content_type: concept +weight: 50 +--- + + +{{}} This +allows the clean up of resources like the following: + + * [Failed pods](/docs/concepts/workloads/pods/pod-lifecycle/#pod-garbage-collection) + * [Completed Jobs](/docs/concepts/workloads/controllers/ttlafterfinished/) + * [Objects without owner references](#owners-dependents) + * [Unused containers and container images](#containers-images) + * [Dynamically provisioned PersistentVolumes with a StorageClass reclaim policy of Delete](/docs/concepts/storage/persistent-volumes/#delete) + * [Stale or expired CertificateSigningRequests (CSRs)](/reference/access-authn-authz/certificate-signing-requests/#request-signing-process) + * {{}} deleted in the following scenarios: + * On a cloud when the cluster uses a [cloud controller manager](/docs/concepts/architecture/cloud-controller/) + * On-premises when the cluster uses an addon similar to a cloud controller + manager + * [Node Lease objects](/docs/concepts/architecture/nodes/#heartbeats) + +## Owners and dependents {#owners-dependents} + +Many objects in Kubernetes link to each other through [*owner references*](/docs/concepts/overview/working-with-objects/owners-dependents/). +Owner references tell the control plane which objects are dependent on others. +Kubernetes uses owner references to give the control plane, and other API +clients, the opportunity to clean up related resources before deleting an +object. In most cases, Kubernetes manages owner references automatically. + +Ownership is different from the [labels and selectors](/docs/concepts/overview/working-with-objects/labels/) +mechanism that some resources also use. For example, consider a +{{}} that creates +`EndpointSlice` objects. The Service uses *labels* to allow the control plane to +determine which `EndpointSlice` objects are used for that Service. In addition +to the labels, each `EndpointSlice` that is managed on behalf of a Service has +an owner reference. Owner references help different parts of Kubernetes avoid +interfering with objects they don’t control. + +{{< note >}} +Cross-namespace owner references are disallowed by design. +Namespaced dependents can specify cluster-scoped or namespaced owners. +A namespaced owner **must** exist in the same namespace as the dependent. +If it does not, the owner reference is treated as absent, and the dependent +is subject to deletion once all owners are verified absent. + +Cluster-scoped dependents can only specify cluster-scoped owners. +In v1.20+, if a cluster-scoped dependent specifies a namespaced kind as an owner, +it is treated as having an unresolvable owner reference, and is not able to be garbage collected. + +In v1.20+, if the garbage collector detects an invalid cross-namespace `ownerReference`, +or a cluster-scoped dependent with an `ownerReference` referencing a namespaced kind, a warning Event +with a reason of `OwnerRefInvalidNamespace` and an `involvedObject` of the invalid dependent is reported. +You can check for that kind of Event by running +`kubectl get events -A --field-selector=reason=OwnerRefInvalidNamespace`. +{{< /note >}} + +## Cascading deletion {#cascading-deletion} + +Kubernetes checks for and deletes objects that no longer have owner +references, like the pods left behind when you delete a ReplicaSet. When you +delete an object, you can control whether Kubernetes deletes the object's +dependents automatically, in a process called *cascading deletion*. There are +two types of cascading deletion, as follows: + + * Foreground cascading deletion + * Background cascading deletion + +You can also control how and when garbage collection deletes resources that have +owner references using Kubernetes {{}}. + +### Foreground cascading deletion {#foreground-deletion} + +In foreground cascading deletion, the owner object you're deleting first enters +a *deletion in progress* state. In this state, the following happens to the +owner object: + + * The Kubernetes API server sets the object's `metadata.deletionTimestamp` + field to the time the object was marked for deletion. + * The Kubernetes API server also sets the `metadata.finalizers` field to + `foregroundDeletion`. + * The object remains visible through the Kubernetes API until the deletion + process is complete. + +After the owner object enters the deletion in progress state, the controller +deletes the dependents. After deleting all the dependent objects, the controller +deletes the owner object. At this point, the object is no longer visible in the +Kubernetes API. + +During foreground cascading deletion, the only dependents that block owner +deletion are those that have the `ownerReference.blockOwnerDeletion=true` field. +See [Use foreground cascading deletion](/docs/tasks/administer-cluster/use-cascading-deletion/#use-foreground-cascading-deletion) +to learn more. + +### Background cascading deletion {#background-deletion} + +In background cascading deletion, the Kubernetes API server deletes the owner +object immediately and the controller cleans up the dependent objects in +the background. By default, Kubernetes uses background cascading deletion unless +you manually use foreground deletion or choose to orphan the dependent objects. + +See [Use background cascading deletion](/docs/tasks/administer-cluster/use-cascading-deletion/#use-background-cascading-deletion) +to learn more. + +### Orphaned dependents + +When Kubernetes deletes an owner object, the dependents left behind are called +*orphan* objects. By default, Kubernetes deletes dependent objects. To learn how +to override this behaviour, see [Delete owner objects and orphan dependents](/docs/tasks/administer-cluster/use-cascading-deletion/#set-orphan-deletion-policy). + +## Garbage collection of unused containers and images {#containers-images} + +The {{}} performs garbage +collection on unused images every five minutes and on unused containers every +minute. You should avoid using external garbage collection tools, as these can +break the kubelet behavior and remove containers that should exist. + +To configure options for unused container and image garbage collection, tune the +kubelet using a [configuration file](/docs/tasks/administer-cluster/kubelet-config-file/) +and change the parameters related to garbage collection using the +[`KubeletConfiguration`](/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) +resource type. + +### Container image lifecycle + +Kubernetes manages the lifecycle of all images through its *image manager*, +which is part of the kubelet, with the cooperation of cadvisor. The kubelet +considers the following disk usage limits when making garbage collection +decisions: + + * `HighThresholdPercent` + * `LowThresholdPercent` + +Disk usage above the configured `HighThresholdPercent` value triggers garbage +collection, which deletes images in order based on the last time they were used, +starting with the oldest first. The kubelet deletes images +until disk usage reaches the `LowThresholdPercent` value. + +### Container image garbage collection {#container-image-garbage-collection} + +The kubelet garbage collects unused containers based on the following variables, +which you can define: + + * `MinAge`: the minimum age at which the kubelet can garbage collect a + container. Disable by setting to `0`. + * `MaxPerPodContainer`: the maximum number of dead containers each Pod pair + can have. Disable by setting to less than `0`. + * `MaxContainers`: the maximum number of dead containers the cluster can have. + Disable by setting to less than `0`. + +In addition to these variables, the kubelet garbage collects unidentified and +deleted containers, typically starting with the oldest first. + +`MaxPerPodContainer` and `MaxContainer` may potentially conflict with each other +in situations where retaining the maximum number of containers per Pod +(`MaxPerPodContainer`) would go outside the allowable total of global dead +containers (`MaxContainers`). In this situation, the kubelet adjusts +`MaxPodPerContainer` to address the conflict. A worst-case scenario would be to +downgrade `MaxPerPodContainer` to `1` and evict the oldest containers. +Additionally, containers owned by pods that have been deleted are removed once +they are older than `MinAge`. + +{{}} +The kubelet only garbage collects the containers it manages. +{{}} + +## Configuring garbage collection {#configuring-gc} + +You can tune garbage collection of resources by configuring options specific to +the controllers managing those resources. The following pages show you how to +configure garbage collection: + + * [Configuring cascading deletion of Kubernetes objects](/docs/tasks/administer-cluster/use-cascading-deletion/) + * [Configuring cleanup of finished Jobs](/docs/concepts/workloads/controllers/ttlafterfinished/) + + + +## {{% heading "whatsnext" %}} + +* Learn more about [ownership of Kubernetes objects](/docs/concepts/overview/working-with-objects/owners-dependents/). +* Learn more about Kubernetes [finalizers](/docs/concepts/overview/working-with-objects/finalizers/). +* Learn about the [TTL controller](/docs/concepts/workloads/controllers/ttlafterfinished/) (beta) that cleans up finished Jobs. \ No newline at end of file diff --git a/content/en/docs/concepts/architecture/nodes.md b/content/en/docs/concepts/architecture/nodes.md index c583098693..1d4f6455b7 100644 --- a/content/en/docs/concepts/architecture/nodes.md +++ b/content/en/docs/concepts/architecture/nodes.md @@ -14,7 +14,7 @@ A node may be a virtual or physical machine, depending on the cluster. Each node is managed by the {{< glossary_tooltip text="control plane" term_id="control-plane" >}} and contains the services necessary to run -{{< glossary_tooltip text="Pods" term_id="pod" >}} +{{< glossary_tooltip text="Pods" term_id="pod" >}}. Typically you have several nodes in a cluster; in a learning or resource-limited environment, you might have only one node. @@ -122,6 +122,9 @@ To mark a Node unschedulable, run: kubectl cordon $NODENAME ``` +See [Safely Drain a Node](/docs/tasks/administer-cluster/safely-drain-node/) +for more details. + {{< note >}} Pods that are part of a {{< glossary_tooltip term_id="daemonset" >}} tolerate being run on an unschedulable Node. DaemonSets typically provide node-local services @@ -162,8 +165,8 @@ The `conditions` field describes the status of all `Running` nodes. Examples of | Node Condition | Description | |----------------------|-------------| | `Ready` | `True` if the node is healthy and ready to accept pods, `False` if the node is not healthy and is not accepting pods, and `Unknown` if the node controller has not heard from the node in the last `node-monitor-grace-period` (default is 40 seconds) | -| `DiskPressure` | `True` if pressure exists on the disk size--that is, if the disk capacity is low; otherwise `False` | -| `MemoryPressure` | `True` if pressure exists on the node memory--that is, if the node memory is low; otherwise `False` | +| `DiskPressure` | `True` if pressure exists on the disk size—that is, if the disk capacity is low; otherwise `False` | +| `MemoryPressure` | `True` if pressure exists on the node memory—that is, if the node memory is low; otherwise `False` | | `PIDPressure` | `True` if pressure exists on the processes—that is, if there are too many processes on the node; otherwise `False` | | `NetworkUnavailable` | `True` if the network for the node is not correctly configured, otherwise `False` | {{< /table >}} @@ -174,7 +177,8 @@ If you use command-line tools to print details of a cordoned Node, the Condition cordoned nodes are marked Unschedulable in their spec. {{< /note >}} -The node condition is represented as a JSON object. For example, the following structure describes a healthy node: +In the Kubernetes API, a node's condition is represented as part of the `.status` +of the Node resource. For example, the following JSON structure describes a healthy node: ```json "conditions": [ @@ -189,7 +193,17 @@ The node condition is represented as a JSON object. For example, the following s ] ``` -If the Status of the Ready condition remains `Unknown` or `False` for longer than the `pod-eviction-timeout` (an argument passed to the {{< glossary_tooltip text="kube-controller-manager" term_id="kube-controller-manager" >}}), then all the Pods on the node are scheduled for deletion by the node controller. The default eviction timeout duration is **five minutes**. In some cases when the node is unreachable, the API server is unable to communicate with the kubelet on the node. The decision to delete the pods cannot be communicated to the kubelet until communication with the API server is re-established. In the meantime, the pods that are scheduled for deletion may continue to run on the partitioned node. +If the `status` of the Ready condition remains `Unknown` or `False` for longer +than the `pod-eviction-timeout` (an argument passed to the +{{< glossary_tooltip text="kube-controller-manager" term_id="kube-controller-manager" +>}}), then the [node controller](#node-controller) triggers +{{< glossary_tooltip text="API-initiated eviction" term_id="api-eviction" >}} +for all Pods assigned to that node. The default eviction timeout duration is +**five minutes**. +In some cases when the node is unreachable, the API server is unable to communicate +with the kubelet on the node. The decision to delete the pods cannot be communicated to +the kubelet until communication with the API server is re-established. In the meantime, +the pods that are scheduled for deletion may continue to run on the partitioned node. The node controller does not force delete pods until it is confirmed that they have stopped running in the cluster. You can see the pods that might be running on an unreachable node as @@ -199,10 +213,12 @@ may need to delete the node object by hand. Deleting the node object from Kubern all the Pod objects running on the node to be deleted from the API server and frees up their names. -The node lifecycle controller automatically creates -[taints](/docs/concepts/scheduling-eviction/taint-and-toleration/) that represent conditions. +When problems occur on nodes, the Kubernetes control plane automatically creates +[taints](/docs/concepts/scheduling-eviction/taint-and-toleration/) that match the conditions +affecting the node. The scheduler takes the Node's taints into consideration when assigning a Pod to a Node. -Pods can also have tolerations which let them tolerate a Node's taints. +Pods can also have {{< glossary_tooltip text="tolerations" term_id="toleration" >}} that let +them run on a Node even though it has a specific taint. See [Taint Nodes by Condition](/docs/concepts/scheduling-eviction/taint-and-toleration/#taint-nodes-by-condition) for more details. @@ -222,10 +238,43 @@ on a Node. ### Info -Describes general information about the node, such as kernel version, Kubernetes version (kubelet and kube-proxy version), Docker version (if used), and OS name. -This information is gathered by Kubelet from the node. +Describes general information about the node, such as kernel version, Kubernetes +version (kubelet and kube-proxy version), container runtime details, and which +operating system the node uses. +The kubelet gathers this information from the node and publishes it into +the Kubernetes API. -### Node controller +## Heartbeats + +Heartbeats, sent by Kubernetes nodes, help your cluster determine the +availability of each node, and to take action when failures are detected. + +For nodes there are two forms of heartbeats: + +* updates to the `.status` of a Node +* [Lease](/docs/reference/kubernetes-api/cluster-resources/lease-v1/) objects + within the `kube-node-lease` + {{< glossary_tooltip term_id="namespace" text="namespace">}}. + Each Node has an associated Lease object. + +Compared to updates to `.status` of a Node, a Lease is a lightweight resource. +Using Leases for heartbeats reduces the performance impact of these updates +for large clusters. + +The kubelet is responsible for creating and updating the `.status` of Nodes, +and for updating their related Leases. + +- The kubelet updates the node's `.status` either when there is change in status + or if there has been no update for a configured interval. The default interval + for `.status` updates to Nodes is 5 minutes, which is much longer than the 40 + second default timeout for unreachable nodes. +- The kubelet creates and then updates its Lease object every 10 seconds + (the default update interval). Lease updates occur independently from + updates to the Node's `.status`. If the Lease update fails, the kubelet retries, + using exponential backoff that starts at 200 milliseconds and capped at 7 seconds. + + +## Node controller The node {{< glossary_tooltip text="controller" term_id="controller" >}} is a Kubernetes control plane component that manages various aspects of nodes. @@ -241,39 +290,18 @@ controller deletes the node from its list of nodes. The third is monitoring the nodes' health. The node controller is responsible for: -- Updating the NodeReady condition of NodeStatus to ConditionUnknown when a node - becomes unreachable, as the node controller stops receiving heartbeats for some - reason such as the node being down. -- Evicting all the pods from the node using graceful termination if - the node continues to be unreachable. The default timeouts are 40s to start - reporting ConditionUnknown and 5m after that to start evicting pods. +- In the case that a node becomes unreachable, updating the NodeReady condition + of within the Node's `.status`. In this case the node controller sets the + NodeReady condition to `ConditionUnknown`. +- If a node remains unreachable: triggering + [API-initiated eviction](/docs/concepts/scheduling-eviction/api-eviction/) + for all of the Pods on the unreachable node. By default, the node controller + waits 5 minutes between marking the node as `ConditionUnknown` and submitting + the first eviction request. The node controller checks the state of each node every `--node-monitor-period` seconds. -#### Heartbeats - -Heartbeats, sent by Kubernetes nodes, help determine the availability of a node. - -There are two forms of heartbeats: updates of `NodeStatus` and the -[Lease object](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#lease-v1-coordination-k8s-io). -Each Node has an associated Lease object in the `kube-node-lease` -{{< glossary_tooltip term_id="namespace" text="namespace">}}. -Lease is a lightweight resource, which improves the performance -of the node heartbeats as the cluster scales. - -The kubelet is responsible for creating and updating the `NodeStatus` and -a Lease object. - -- The kubelet updates the `NodeStatus` either when there is change in status - or if there has been no update for a configured interval. The default interval - for `NodeStatus` updates is 5 minutes, which is much longer than the 40 second default - timeout for unreachable nodes. -- The kubelet creates and then updates its Lease object every 10 seconds - (the default update interval). Lease updates occur independently from the - `NodeStatus` updates. If the Lease update fails, the kubelet retries with - exponential backoff starting at 200 milliseconds and capped at 7 seconds. - -#### Reliability +### Rate limits on eviction In most cases, the node controller limits the eviction rate to `--node-eviction-rate` (default 0.1) per second, meaning it won't evict pods @@ -281,9 +309,9 @@ from more than 1 node per 10 seconds. The node eviction behavior changes when a node in a given availability zone becomes unhealthy. The node controller checks what percentage of nodes in the zone -are unhealthy (NodeReady condition is ConditionUnknown or ConditionFalse) at +are unhealthy (NodeReady condition is `ConditionUnknown` or `ConditionFalse`) at the same time: -- If the fraction of unhealthy nodes is at least `--unhealthy-zone-threshold` +- If the fraction of unhealthy nodes is at least `--unhealthy-zone-threshold` (default 0.55), then the eviction rate is reduced. - If the cluster is small (i.e. has less than or equal to `--large-cluster-size-threshold` nodes - default 50), then evictions are stopped. @@ -293,15 +321,17 @@ the same time: The reason these policies are implemented per availability zone is because one availability zone might become partitioned from the master while the others remain connected. If your cluster does not span multiple cloud provider availability zones, -then there is only one availability zone (i.e. the whole cluster). +then the eviction mechanism does not take per-zone unavailability into account. A key reason for spreading your nodes across availability zones is so that the workload can be shifted to healthy zones when one entire zone goes down. Therefore, if all nodes in a zone are unhealthy, then the node controller evicts at the normal rate of `--node-eviction-rate`. The corner case is when all zones are -completely unhealthy (i.e. there are no healthy nodes in the cluster). In such a -case, the node controller assumes that there is some problem with master -connectivity and stops all evictions until some connectivity is restored. +completely unhealthy (none of the nodes in the cluster are healthy). In such a +case, the node controller assumes that there is some problem with connectivity +between the control plane and the nodes, and doesn't perform any evictions. +(If there has been an outage and some nodes reappear, the node controller does +evict pods from the remaining nodes that are unhealthy or unreachable). The node controller is also responsible for evicting pods running on nodes with `NoExecute` taints, unless those pods tolerate that taint. @@ -309,7 +339,7 @@ The node controller also adds {{< glossary_tooltip text="taints" term_id="taint" corresponding to node problems like node unreachable or not ready. This means that the scheduler won't place Pods onto unhealthy nodes. -### Node capacity +## Resource capacity tracking {#node-capacity} Node objects track information about the Node's resource capacity: for example, the amount of memory available and the number of CPUs. @@ -377,6 +407,64 @@ For example, if `ShutdownGracePeriod=30s`, and for gracefully terminating normal pods, and the last 10 seconds would be reserved for terminating [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical). +{{< note >}} +When pods were evicted during the graceful node shutdown, they are marked as failed. +Running `kubectl get pods` shows the status of the the evicted pods as `Shutdown`. +And `kubectl describe pod` indicates that the pod was evicted because of node shutdown: + +``` +Status: Failed +Reason: Shutdown +Message: Node is shutting, evicting pods +``` + +Failed pod objects will be preserved until explicitly deleted or [cleaned up by the GC](/docs/concepts/workloads/pods/pod-lifecycle/#pod-garbage-collection). +This is a change of behavior compared to abrupt node termination. +{{< /note >}} + +## Swap memory management {#swap-memory} + +{{< feature-state state="alpha" for_k8s_version="v1.22" >}} + +Prior to Kubernetes 1.22, nodes did not support the use of swap memory, and a +kubelet would by default fail to start if swap was detected on a node. In 1.22 +onwards, swap memory support can be enabled on a per-node basis. + +To enable swap on a node, the `NodeSwap` feature gate must be enabled on +the kubelet, and the `--fail-swap-on` command line flag or `failSwapOn` +[configuration setting](/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) +must be set to false. + +A user can also optionally configure `memorySwap.swapBehavior` in order to +specify how a node will use swap memory. For example, + +```yaml +memorySwap: + swapBehavior: LimitedSwap +``` + +The available configuration options for `swapBehavior` are: + +- `LimitedSwap`: Kubernetes workloads are limited in how much swap they can + use. Workloads on the node not managed by Kubernetes can still swap. +- `UnlimitedSwap`: Kubernetes workloads can use as much swap memory as they + request, up to the system limit. + +If configuration for `memorySwap` is not specified and the feature gate is +enabled, by default the kubelet will apply the same behaviour as the +`LimitedSwap` setting. + +The behaviour of the `LimitedSwap` setting depends if the node is running with +v1 or v2 of control groups (also known as "cgroups"): + +- **cgroupsv1:** Kubernetes workloads can use any combination of memory and + swap, up to the pod's memory limit, if set. +- **cgroupsv2:** Kubernetes workloads cannot use swap memory. + +For more information, and to assist with testing and provide feedback, please +see [KEP-2400](https://github.com/kubernetes/enhancements/issues/2400) and its +[design proposal](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/2400-node-swap/README.md). + ## {{% heading "whatsnext" %}} * Learn about the [components](/docs/concepts/overview/components/#node-components) that make up a node. diff --git a/content/en/docs/concepts/cluster-administration/flow-control.md b/content/en/docs/concepts/cluster-administration/flow-control.md index 71eb8106e5..46f0a1eadc 100644 --- a/content/en/docs/concepts/cluster-administration/flow-control.md +++ b/content/en/docs/concepts/cluster-administration/flow-control.md @@ -33,8 +33,6 @@ the `--max-requests-inflight` flag without the API Priority and Fairness feature enabled. {{< /caution >}} - - ## Enabling/Disabling API Priority and Fairness @@ -65,6 +63,7 @@ The command-line flag `--enable-priority-and-fairness=false` will disable the API Priority and Fairness feature, even if other flags have enabled it. ## Concepts + There are several distinct features involved in the API Priority and Fairness feature. Incoming requests are classified by attributes of the request using _FlowSchemas_, and assigned to priority levels. Priority levels add a degree of @@ -75,12 +74,13 @@ each other, and allows for requests to be queued to prevent bursty traffic from causing failed requests when the average load is acceptably low. ### Priority Levels -Without APF enabled, overall concurrency in -the API server is limited by the `kube-apiserver` flags -`--max-requests-inflight` and `--max-mutating-requests-inflight`. With APF -enabled, the concurrency limits defined by these flags are summed and then the sum is divided up -among a configurable set of _priority levels_. Each incoming request is assigned -to a single priority level, and each priority level will only dispatch as many + +Without APF enabled, overall concurrency in the API server is limited by the +`kube-apiserver` flags `--max-requests-inflight` and +`--max-mutating-requests-inflight`. With APF enabled, the concurrency limits +defined by these flags are summed and then the sum is divided up among a +configurable set of _priority levels_. Each incoming request is assigned to a +single priority level, and each priority level will only dispatch as many concurrent requests as its configuration allows. The default configuration, for example, includes separate priority levels for @@ -90,6 +90,7 @@ requests cannot prevent leader election or actions by the built-in controllers from succeeding. ### Queuing + Even within a priority level there may be a large number of distinct sources of traffic. In an overload situation, it is valuable to prevent one stream of requests from starving others (in particular, in the relatively common case of a @@ -114,15 +115,18 @@ independent flows will all make progress when total traffic exceeds capacity), tolerance for bursty traffic, and the added latency induced by queuing. ### Exempt requests + Some requests are considered sufficiently important that they are not subject to any of the limitations imposed by this feature. These exemptions prevent an improperly-configured flow control configuration from totally disabling an API server. ## Defaults + The Priority and Fairness feature ships with a suggested configuration that should suffice for experimentation; if your cluster is likely to -experience heavy load then you should consider what configuration will work best. The suggested configuration groups requests into five priority +experience heavy load then you should consider what configuration will work +best. The suggested configuration groups requests into five priority classes: * The `system` priority level is for requests from the `system:nodes` group, @@ -180,19 +184,18 @@ If you add the following additional FlowSchema, this exempts those requests from rate limiting. {{< caution >}} - Making this change also allows any hostile party to then send health-check requests that match this FlowSchema, at any volume they like. If you have a web traffic filter or similar external security mechanism to protect your cluster's API server from general internet traffic, you can configure rules to block any health check requests that originate from outside your cluster. - {{< /caution >}} {{< codenew file="priority-and-fairness/health-for-strangers.yaml" >}} ## Resources + The flow control API involves two kinds of resources. [PriorityLevelConfigurations](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io) define the available isolation classes, the share of the available concurrency @@ -204,6 +207,7 @@ of the same API group, and it has the same Kinds with the same syntax and semantics. ### PriorityLevelConfiguration + A PriorityLevelConfiguration represents a single isolation class. Each PriorityLevelConfiguration has an independent limit on the number of outstanding requests, and limitations on the number of queued requests. @@ -217,6 +221,7 @@ server by restarting `kube-apiserver` with a different value for `--max-requests-inflight` (or `--max-mutating-requests-inflight`), and all PriorityLevelConfigurations will see their maximum allowed concurrency go up (or down) by the same fraction. + {{< caution >}} With the Priority and Fairness feature enabled, the total concurrency limit for the server is set to the sum of `--max-requests-inflight` and @@ -235,8 +240,8 @@ above the threshold will be queued, with the shuffle sharding and fair queuing t to balance progress between request flows. The queuing configuration allows tuning the fair queuing algorithm for a -priority level. Details of the algorithm can be read in the [enhancement -proposal](#whats-next), but in short: +priority level. Details of the algorithm can be read in the +[enhancement proposal](#whats-next), but in short: * Increasing `queues` reduces the rate of collisions between different flows, at the cost of increased memory usage. A value of 1 here effectively disables the @@ -249,15 +254,15 @@ proposal](#whats-next), but in short: * Changing `handSize` allows you to adjust the probability of collisions between different flows and the overall concurrency available to a single flow in an overload situation. - {{< note >}} - A larger `handSize` makes it less likely for two individual flows to collide - (and therefore for one to be able to starve the other), but more likely that - a small number of flows can dominate the apiserver. A larger `handSize` also - potentially increases the amount of latency that a single high-traffic flow - can cause. The maximum number of queued requests possible from a - single flow is `handSize * queueLengthLimit`. - {{< /note >}} + {{< note >}} + A larger `handSize` makes it less likely for two individual flows to collide + (and therefore for one to be able to starve the other), but more likely that + a small number of flows can dominate the apiserver. A larger `handSize` also + potentially increases the amount of latency that a single high-traffic flow + can cause. The maximum number of queued requests possible from a + single flow is `handSize * queueLengthLimit`. + {{< /note >}} Following is a table showing an interesting collection of shuffle sharding configurations, showing for each the probability that a @@ -319,6 +324,7 @@ considered part of a single flow. The correct choice for a given FlowSchema depends on the resource and your particular environment. ## Diagnostics + Every HTTP response from an API server with the priority and fairness feature enabled has two extra headers: `X-Kubernetes-PF-FlowSchema-UID` and `X-Kubernetes-PF-PriorityLevel-UID`, noting the flow schema that matched the request @@ -356,13 +362,14 @@ poorly-behaved workloads that may be harming system health. matched the request), `priority_level` (indicating the one to which the request was assigned), and `reason`. The `reason` label will be have one of the following values: - * `queue-full`, indicating that too many requests were already - queued, - * `concurrency-limit`, indicating that the - PriorityLevelConfiguration is configured to reject rather than - queue excess requests, or - * `time-out`, indicating that the request was still in the queue - when its queuing time limit expired. + + * `queue-full`, indicating that too many requests were already + queued, + * `concurrency-limit`, indicating that the + PriorityLevelConfiguration is configured to reject rather than + queue excess requests, or + * `time-out`, indicating that the request was still in the queue + when its queuing time limit expired. * `apiserver_flowcontrol_dispatched_requests_total` is a counter vector (cumulative since server start) of requests that began @@ -405,6 +412,10 @@ poorly-behaved workloads that may be harming system health. queue) requests, broken down by the labels `priority_level` and `flow_schema`. +* `apiserver_flowcontrol_request_concurrency_in_use` is a gauge vector + holding the instantaneous number of occupied seats, broken down by + the labels `priority_level` and `flow_schema`. + * `apiserver_flowcontrol_priority_level_request_count_samples` is a histogram vector of observations of the then-current number of requests broken down by the labels `phase` (which takes on the @@ -430,14 +441,15 @@ poorly-behaved workloads that may be harming system health. sample to its histogram, reporting the length of the queue immediately after the request was added. Note that this produces different statistics than an unbiased survey would. - {{< note >}} - An outlier value in a histogram here means it is likely that a single flow - (i.e., requests by one user or for one namespace, depending on - configuration) is flooding the API server, and being throttled. By contrast, - if one priority level's histogram shows that all queues for that priority - level are longer than those for other priority levels, it may be appropriate - to increase that PriorityLevelConfiguration's concurrency shares. - {{< /note >}} + + {{< note >}} + An outlier value in a histogram here means it is likely that a single flow + (i.e., requests by one user or for one namespace, depending on + configuration) is flooding the API server, and being throttled. By contrast, + if one priority level's histogram shows that all queues for that priority + level are longer than those for other priority levels, it may be appropriate + to increase that PriorityLevelConfiguration's concurrency shares. + {{< /note >}} * `apiserver_flowcontrol_request_concurrency_limit` is a gauge vector holding the computed concurrency limit (based on the API server's @@ -450,12 +462,13 @@ poorly-behaved workloads that may be harming system health. `priority_level` (indicating the one to which the request was assigned), and `execute` (indicating whether the request started executing). - {{< note >}} - Since each FlowSchema always assigns requests to a single - PriorityLevelConfiguration, you can add the histograms for all the - FlowSchemas for one priority level to get the effective histogram for - requests assigned to that priority level. - {{< /note >}} + + {{< note >}} + Since each FlowSchema always assigns requests to a single + PriorityLevelConfiguration, you can add the histograms for all the + FlowSchemas for one priority level to get the effective histogram for + requests assigned to that priority level. + {{< /note >}} * `apiserver_flowcontrol_request_execution_seconds` is a histogram vector of how long requests took to actually execute, broken down by @@ -465,14 +478,19 @@ poorly-behaved workloads that may be harming system health. ### Debug endpoints -When you enable the API Priority and Fairness feature, the kube-apiserver serves the following additional paths at its HTTP[S] ports. +When you enable the API Priority and Fairness feature, the `kube-apiserver` +serves the following additional paths at its HTTP[S] ports. + +- `/debug/api_priority_and_fairness/dump_priority_levels` - a listing of + all the priority levels and the current state of each. You can fetch like this: -- `/debug/api_priority_and_fairness/dump_priority_levels` - a listing of all the priority levels and the current state of each. You can fetch like this: ```shell kubectl get --raw /debug/api_priority_and_fairness/dump_priority_levels ``` + The output is similar to this: - ``` + + ```none PriorityLevelName, ActiveQueues, IsIdle, IsQuiescing, WaitingRequests, ExecutingRequests, workload-low, 0, true, false, 0, 0, global-default, 0, true, false, 0, 0, @@ -483,12 +501,16 @@ When you enable the API Priority and Fairness feature, the kube-apiserver serves workload-high, 0, true, false, 0, 0, ``` -- `/debug/api_priority_and_fairness/dump_queues` - a listing of all the queues and their current state. You can fetch like this: +- `/debug/api_priority_and_fairness/dump_queues` - a listing of all the + queues and their current state. You can fetch like this: + ```shell kubectl get --raw /debug/api_priority_and_fairness/dump_queues ``` + The output is similar to this: - ``` + + ```none PriorityLevelName, Index, PendingRequests, ExecutingRequests, VirtualStart, workload-high, 0, 0, 0, 0.0000, workload-high, 1, 0, 0, 0.0000, @@ -498,25 +520,33 @@ When you enable the API Priority and Fairness feature, the kube-apiserver serves leader-election, 15, 0, 0, 0.0000, ``` -- `/debug/api_priority_and_fairness/dump_requests` - a listing of all the requests that are currently waiting in a queue. You can fetch like this: +- `/debug/api_priority_and_fairness/dump_requests` - a listing of all the requests + that are currently waiting in a queue. You can fetch like this: + ```shell kubectl get --raw /debug/api_priority_and_fairness/dump_requests ``` + The output is similar to this: - ``` + + ```none PriorityLevelName, FlowSchemaName, QueueIndex, RequestIndexInQueue, FlowDistingsher, ArriveTime, exempt, , , , , , system, system-nodes, 12, 0, system:node:127.0.0.1, 2020-07-23T15:26:57.179170694Z, ``` - In addition to the queued requests, the output includes one phantom line for each priority level that is exempt from limitation. + In addition to the queued requests, the output includes one phantom line + for each priority level that is exempt from limitation. You can get a more detailed listing with a command like this: + ```shell kubectl get --raw '/debug/api_priority_and_fairness/dump_requests?includeRequestDetails=1' ``` + The output is similar to this: - ``` + + ```none PriorityLevelName, FlowSchemaName, QueueIndex, RequestIndexInQueue, FlowDistingsher, ArriveTime, UserName, Verb, APIPath, Namespace, Name, APIVersion, Resource, SubResource, system, system-nodes, 12, 0, system:node:127.0.0.1, 2020-07-23T15:31:03.583823404Z, system:node:127.0.0.1, create, /api/v1/namespaces/scaletest/configmaps, system, system-nodes, 12, 1, system:node:127.0.0.1, 2020-07-23T15:31:03.594555947Z, system:node:127.0.0.1, create, /api/v1/namespaces/scaletest/configmaps, @@ -528,4 +558,4 @@ When you enable the API Priority and Fairness feature, the kube-apiserver serves For background information on design details for API priority and fairness, see the [enhancement proposal](https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/1040-priority-and-fairness). You can make suggestions and feature requests via [SIG API Machinery](https://github.com/kubernetes/community/tree/master/sig-api-machinery) -or the feature's [slack channel](http://kubernetes.slack.com/messages/api-priority-and-fairness). +or the feature's [slack channel](https://kubernetes.slack.com/messages/api-priority-and-fairness). diff --git a/content/en/docs/concepts/cluster-administration/kubelet-garbage-collection.md b/content/en/docs/concepts/cluster-administration/kubelet-garbage-collection.md deleted file mode 100644 index ea51a566ac..0000000000 --- a/content/en/docs/concepts/cluster-administration/kubelet-garbage-collection.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -reviewers: -title: Garbage collection for container images -content_type: concept -weight: 70 ---- - - - -Garbage collection is a helpful function of kubelet that will clean up unused [images](/docs/concepts/containers/#container-images) and unused [containers](/docs/concepts/containers/). Kubelet will perform garbage collection for containers every minute and garbage collection for images every five minutes. - -External garbage collection tools are not recommended as these tools can potentially break the behavior of kubelet by removing containers expected to exist. - - - - - - -## Image Collection - -Kubernetes manages lifecycle of all images through imageManager, with the cooperation -of cadvisor. - -The policy for garbage collecting images takes two factors into consideration: -`HighThresholdPercent` and `LowThresholdPercent`. Disk usage above the high threshold -will trigger garbage collection. The garbage collection will delete least recently used images until the low -threshold has been met. - -## Container Collection - -The policy for garbage collecting containers considers three user-defined variables. `MinAge` is the minimum age at which a container can be garbage collected. `MaxPerPodContainer` is the maximum number of dead containers every single -pod (UID, container name) pair is allowed to have. `MaxContainers` is the maximum number of total dead containers. These variables can be individually disabled by setting `MinAge` to zero and setting `MaxPerPodContainer` and `MaxContainers` respectively to less than zero. - -Kubelet will act on containers that are unidentified, deleted, or outside of the boundaries set by the previously mentioned flags. The oldest containers will generally be removed first. `MaxPerPodContainer` and `MaxContainer` may potentially conflict with each other in situations where retaining the maximum number of containers per pod (`MaxPerPodContainer`) would go outside the allowable range of global dead containers (`MaxContainers`). `MaxPerPodContainer` would be adjusted in this situation: A worst case scenario would be to downgrade `MaxPerPodContainer` to 1 and evict the oldest containers. Additionally, containers owned by pods that have been deleted are removed once they are older than `MinAge`. - -Containers that are not managed by kubelet are not subject to container garbage collection. - -## User Configuration - -You can adjust the following thresholds to tune image garbage collection with the following kubelet flags : - -1. `image-gc-high-threshold`, the percent of disk usage which triggers image garbage collection. -Default is 85%. -2. `image-gc-low-threshold`, the percent of disk usage to which image garbage collection attempts -to free. Default is 80%. - -You can customize the garbage collection policy through the following kubelet flags: - -1. `minimum-container-ttl-duration`, minimum age for a finished container before it is -garbage collected. Default is 0 minute, which means every finished container will be garbage collected. -2. `maximum-dead-containers-per-container`, maximum number of old instances to be retained -per container. Default is 1. -3. `maximum-dead-containers`, maximum number of old instances of containers to retain globally. -Default is -1, which means there is no global limit. - -Containers can potentially be garbage collected before their usefulness has expired. These containers -can contain logs and other data that can be useful for troubleshooting. A sufficiently large value for -`maximum-dead-containers-per-container` is highly recommended to allow at least 1 dead container to be -retained per expected container. A larger value for `maximum-dead-containers` is also recommended for a -similar reason. -See [this issue](https://github.com/kubernetes/kubernetes/issues/13287) for more details. - - -## Deprecation - -Some kubelet Garbage Collection features in this doc will be replaced by kubelet eviction in the future. - -Including: - -| Existing Flag | New Flag | Rationale | -| ------------- | -------- | --------- | -| `--image-gc-high-threshold` | `--eviction-hard` or `--eviction-soft` | existing eviction signals can trigger image garbage collection | -| `--image-gc-low-threshold` | `--eviction-minimum-reclaim` | eviction reclaims achieve the same behavior | -| `--maximum-dead-containers` | | deprecated once old logs are stored outside of container's context | -| `--maximum-dead-containers-per-container` | | deprecated once old logs are stored outside of container's context | -| `--minimum-container-ttl-duration` | | deprecated once old logs are stored outside of container's context | -| `--low-diskspace-threshold-mb` | `--eviction-hard` or `eviction-soft` | eviction generalizes disk thresholds to other resources | -| `--outofdisk-transition-frequency` | `--eviction-pressure-transition-period` | eviction generalizes disk pressure transition to other resources | - - - -## {{% heading "whatsnext" %}} - - -See [Configuring Out Of Resource Handling](/docs/tasks/administer-cluster/out-of-resource/) for more details. - diff --git a/content/en/docs/concepts/cluster-administration/logging.md b/content/en/docs/concepts/cluster-administration/logging.md index e75fdea4a5..1bf057f23e 100644 --- a/content/en/docs/concepts/cluster-administration/logging.md +++ b/content/en/docs/concepts/cluster-administration/logging.md @@ -81,10 +81,13 @@ rotate an application's logs automatically. As an example, you can find detailed information about how `kube-up.sh` sets up logging for COS image on GCP in the corresponding -[`configure-helper` script](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh). +[`configure-helper` script](https://github.com/kubernetes/kubernetes/blob/master/cluster/gce/gci/configure-helper.sh). -When using a **CRI container runtime**, the kubelet is responsible for rotating the logs and managing the logging directory structure. The kubelet -sends this information to the CRI container runtime and the runtime writes the container logs to the given location. The two kubelet flags `container-log-max-size` and `container-log-max-files` can be used to configure the maximum size for each log file and the maximum number of files allowed for each container respectively. +When using a **CRI container runtime**, the kubelet is responsible for rotating the logs and managing the logging directory structure. +The kubelet sends this information to the CRI container runtime and the runtime writes the container logs to the given location. +The two kubelet parameters [`containerLogMaxSize` and `containerLogMaxFiles`](/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) +in [kubelet config file](/docs/tasks/administer-cluster/kubelet-config-file/) +can be used to configure the maximum size for each log file and the maximum number of files allowed for each container respectively. When you run [`kubectl logs`](/docs/reference/generated/kubectl/kubectl-commands#logs) as in the basic logging example, the kubelet on the node handles the request and diff --git a/content/en/docs/concepts/cluster-administration/manage-deployment.md b/content/en/docs/concepts/cluster-administration/manage-deployment.md index f51911116d..4d98cf820c 100644 --- a/content/en/docs/concepts/cluster-administration/manage-deployment.md +++ b/content/en/docs/concepts/cluster-administration/manage-deployment.md @@ -50,7 +50,7 @@ It is a recommended practice to put resources related to the same microservice o A URL can also be specified as a configuration source, which is handy for deploying directly from configuration files checked into GitHub: ```shell -kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/application/nginx/nginx-deployment.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/application/nginx/nginx-deployment.yaml ``` ```shell @@ -160,7 +160,7 @@ If you're interested in learning more about `kubectl`, go ahead and read [kubect The examples we've used so far apply at most a single label to any resource. There are many scenarios where multiple labels should be used to distinguish sets from one another. -For instance, different applications would use different values for the `app` label, but a multi-tier application, such as the [guestbook example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/guestbook/), would additionally need to distinguish each tier. The frontend could carry the following labels: +For instance, different applications would use different values for the `app` label, but a multi-tier application, such as the [guestbook example](https://github.com/kubernetes/examples/tree/master/guestbook/), would additionally need to distinguish each tier. The frontend could carry the following labels: ```yaml labels: diff --git a/content/en/docs/concepts/cluster-administration/system-traces.md b/content/en/docs/concepts/cluster-administration/system-traces.md new file mode 100644 index 0000000000..f324604b16 --- /dev/null +++ b/content/en/docs/concepts/cluster-administration/system-traces.md @@ -0,0 +1,89 @@ +--- +title: Traces For Kubernetes System Components +reviewers: +- logicalhan +- lilic +content_type: concept +weight: 60 +--- + + + +{{< feature-state for_k8s_version="v1.22" state="alpha" >}} + +System component traces record the latency of and relationships between operations in the cluster. + +Kubernetes components emit traces using the +[OpenTelemetry Protocol](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md#opentelemetry-protocol-specification) +with the gRPC exporter and can be collected and routed to tracing backends using an +[OpenTelemetry Collector](https://github.com/open-telemetry/opentelemetry-collector#-opentelemetry-collector). + + + +## Trace Collection + +For a complete guide to collecting traces and using the collector, see +[Getting Started with the OpenTelemetry Collector](https://opentelemetry.io/docs/collector/getting-started/). +However, there are a few things to note that are specific to Kubernetes components. + +By default, Kubernetes components export traces using the grpc exporter for OTLP on the +[IANA OpenTelemetry port](https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=opentelemetry), 4317. +As an example, if the collector is running as a sidecar to a Kubernetes component, +the following receiver configuration will collect spans and log them to standard output: + +```yaml +receivers: + otlp: + protocols: + grpc: +exporters: + # Replace this exporter with the exporter for your backend + logging: + logLevel: debug +service: + pipelines: + traces: + receivers: [otlp] + exporters: [logging] +``` + +## Component traces + +### kube-apiserver traces + +The kube-apiserver generates spans for incoming HTTP requests, and for outgoing requests +to webhooks, etcd, and re-entrant requests. It propagates the +[W3C Trace Context](https://www.w3.org/TR/trace-context/) with outgoing requests +but does not make use of the trace context attached to incoming requests, +as the kube-apiserver is often a public endpoint. + +#### Enabling tracing in the kube-apiserver + +To enable tracing, enable the `APIServerTracing` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +on the kube-apiserver. Also, provide the kube-apiserver with a tracing configration file +with `--tracing-config-file=`. This is an example config that records +spans for 1 in 10000 requests, and uses the default OpenTelemetry endpoint: + +```yaml +apiVersion: apiserver.config.k8s.io/v1alpha1 +kind: TracingConfiguration +# default value +#endpoint: localhost:4317 +samplingRatePerMillion: 100 +``` + +For more information about the `TracingConfiguration` struct, see +[API server config API (v1alpha1)](/docs/reference/config-api/apiserver-config.v1alpha1/#apiserver-k8s-io-v1alpha1-TracingConfiguration). + +## Stability + +Tracing instrumentation is still under active development, and may change +in a variety of ways. This includes span names, attached attributes, +instrumented endpoints, etc. Until this feature graduates to stable, +there are no guarantees of backwards compatibility for tracing instrumentation. + +## {{% heading "whatsnext" %}} + +* Read about [Getting Started with the OpenTelemetry Collector](https://opentelemetry.io/docs/collector/getting-started/) + diff --git a/content/en/docs/concepts/configuration/_index.md b/content/en/docs/concepts/configuration/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/concepts/configuration/configmap.md b/content/en/docs/concepts/configuration/configmap.md index cb98bf7439..47ecaedba6 100644 --- a/content/en/docs/concepts/configuration/configmap.md +++ b/content/en/docs/concepts/configuration/configmap.md @@ -61,6 +61,11 @@ You can write a Pod `spec` that refers to a ConfigMap and configures the contain in that Pod based on the data in the ConfigMap. The Pod and the ConfigMap must be in the same {{< glossary_tooltip text="namespace" term_id="namespace" >}}. +{{< note >}} +The `spec` of a {{< glossary_tooltip text="static Pod" term_id="static-pod" >}} cannot refer to a ConfigMap +or any other API objects. +{{< /note >}} + Here's an example ConfigMap that has some keys with single values, and other keys where the value looks like a fragment of a configuration format. diff --git a/content/en/docs/concepts/configuration/manage-resources-containers.md b/content/en/docs/concepts/configuration/manage-resources-containers.md index ee4669641c..cbcfff67ef 100644 --- a/content/en/docs/concepts/configuration/manage-resources-containers.md +++ b/content/en/docs/concepts/configuration/manage-resources-containers.md @@ -115,7 +115,7 @@ CPU is always requested as an absolute quantity, never as a relative quantity; Limits and requests for `memory` are measured in bytes. You can express memory as a plain integer or as a fixed-point number using one of these suffixes: -E, P, T, G, M, K. You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi, +E, P, T, G, M, k. You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki. For example, the following represent roughly the same value: ```shell @@ -181,8 +181,9 @@ When using Docker: flag in the `docker run` command. - The `spec.containers[].resources.limits.cpu` is converted to its millicore value and - multiplied by 100. The resulting value is the total amount of CPU time that a container can use - every 100ms. A container cannot use more than its share of CPU time during this interval. + multiplied by 100. The resulting value is the total amount of CPU time in microseconds + that a container can use every 100ms. A container cannot use more than its share of + CPU time during this interval. {{< note >}} The default quota period is 100ms. The minimum resolution of CPU quota is 1ms. @@ -337,6 +338,9 @@ spec: ephemeral-storage: "2Gi" limits: ephemeral-storage: "4Gi" + volumeMounts: + - name: ephemeral + mountPath: "/tmp" - name: log-aggregator image: images.my-company.example/log-aggregator:v6 resources: @@ -344,6 +348,12 @@ spec: ephemeral-storage: "2Gi" limits: ephemeral-storage: "4Gi" + volumeMounts: + - name: ephemeral + mountPath: "/tmp" + volumes: + - name: ephemeral + emptyDir: {} ``` ### How Pods with ephemeral-storage requests are scheduled diff --git a/content/en/docs/concepts/configuration/organize-cluster-access-kubeconfig.md b/content/en/docs/concepts/configuration/organize-cluster-access-kubeconfig.md index df767bbc3e..b27fcdee61 100644 --- a/content/en/docs/concepts/configuration/organize-cluster-access-kubeconfig.md +++ b/content/en/docs/concepts/configuration/organize-cluster-access-kubeconfig.md @@ -17,6 +17,11 @@ a *kubeconfig file*. This is a generic way of referring to configuration files. It does not mean that there is a file named `kubeconfig`. {{< /note >}} +{{< warning >}} +Only use kubeconfig files from trusted sources. Using a specially-crafted kubeconfig file could result in malicious code execution or file exposure. +If you must use an untrusted kubeconfig file, inspect it carefully first, much as you would a shell script. +{{< /warning>}} + By default, `kubectl` looks for a file named `config` in the `$HOME/.kube` directory. You can specify other kubeconfig files by setting the `KUBECONFIG` environment variable or by setting the @@ -154,4 +159,3 @@ are stored absolutely. - diff --git a/content/en/docs/concepts/configuration/overview.md b/content/en/docs/concepts/configuration/overview.md index 25cfb2e7f1..36eebe3abc 100644 --- a/content/en/docs/concepts/configuration/overview.md +++ b/content/en/docs/concepts/configuration/overview.md @@ -21,7 +21,7 @@ This is a living document. If you think of something that is not on this list bu - Write your configuration files using YAML rather than JSON. Though these formats can be used interchangeably in almost all scenarios, YAML tends to be more user-friendly. -- Group related objects into a single file whenever it makes sense. One file is often easier to manage than several. See the [guestbook-all-in-one.yaml](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/guestbook/all-in-one/guestbook-all-in-one.yaml) file as an example of this syntax. +- Group related objects into a single file whenever it makes sense. One file is often easier to manage than several. See the [guestbook-all-in-one.yaml](https://github.com/kubernetes/examples/tree/master/guestbook/all-in-one/guestbook-all-in-one.yaml) file as an example of this syntax. - Note also that many `kubectl` commands can be called on a directory. For example, you can call `kubectl apply` on a directory of config files. @@ -63,7 +63,7 @@ DNS server watches the Kubernetes API for new `Services` and creates a set of DN ## Using Labels -- Define and use [labels](/docs/concepts/overview/working-with-objects/labels/) that identify __semantic attributes__ of your application or Deployment, such as `{ app: myapp, tier: frontend, phase: test, deployment: v3 }`. You can use these labels to select the appropriate Pods for other resources; for example, a Service that selects all `tier: frontend` Pods, or all `phase: test` components of `app: myapp`. See the [guestbook](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/guestbook/) app for examples of this approach. +- Define and use [labels](/docs/concepts/overview/working-with-objects/labels/) that identify __semantic attributes__ of your application or Deployment, such as `{ app: myapp, tier: frontend, phase: test, deployment: v3 }`. You can use these labels to select the appropriate Pods for other resources; for example, a Service that selects all `tier: frontend` Pods, or all `phase: test` components of `app: myapp`. See the [guestbook](https://github.com/kubernetes/examples/tree/master/guestbook/) app for examples of this approach. A Service can be made to span multiple Deployments by omitting release-specific labels from its selector. When you need to update a running service without downtime, use a [Deployment](/docs/concepts/workloads/controllers/deployment/). @@ -73,32 +73,6 @@ A desired state of an object is described by a Deployment, and if changes to tha - You can manipulate labels for debugging. Because Kubernetes controllers (such as ReplicaSet) and Services match to Pods using selector labels, removing the relevant labels from a Pod will stop it from being considered by a controller or from being served traffic by a Service. If you remove the labels of an existing Pod, its controller will create a new Pod to take its place. This is a useful way to debug a previously "live" Pod in a "quarantine" environment. To interactively remove or add labels, use [`kubectl label`](/docs/reference/generated/kubectl/kubectl-commands#label). -## Container Images - -The [imagePullPolicy](/docs/concepts/containers/images/#updating-images) and the tag of the image affect when the [kubelet](/docs/reference/command-line-tools-reference/kubelet/) attempts to pull the specified image. - -- `imagePullPolicy: IfNotPresent`: the image is pulled only if it is not already present locally. - -- `imagePullPolicy: Always`: every time the kubelet launches a container, the kubelet queries the container image registry to resolve the name to an image digest. If the kubelet has a container image with that exact digest cached locally, the kubelet uses its cached image; otherwise, the kubelet downloads (pulls) the image with the resolved digest, and uses that image to launch the container. - -- `imagePullPolicy` is omitted and either the image tag is `:latest` or it is omitted: `imagePullPolicy` is automatically set to `Always`. Note that this will _not_ be updated to `IfNotPresent` if the tag changes value. - -- `imagePullPolicy` is omitted and the image tag is present but not `:latest`: `imagePullPolicy` is automatically set to `IfNotPresent`. Note that this will _not_ be updated to `Always` if the tag is later removed or changed to `:latest`. - -- `imagePullPolicy: Never`: the image is assumed to exist locally. No attempt is made to pull the image. - -{{< note >}} -To make sure the container always uses the same version of the image, you can specify its [digest](https://docs.docker.com/engine/reference/commandline/pull/#pull-an-image-by-digest-immutable-identifier); replace `:` with `@` (for example, `image@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2`). The digest uniquely identifies a specific version of the image, so it is never updated by Kubernetes unless you change the digest value. -{{< /note >}} - -{{< note >}} -You should avoid using the `:latest` tag when deploying containers in production as it is harder to track which version of the image is running and more difficult to roll back properly. -{{< /note >}} - -{{< note >}} -The caching semantics of the underlying image provider make even `imagePullPolicy: Always` efficient, as long as the registry is reliably accessible. With Docker, for example, if the image already exists, the pull attempt is fast because all image layers are cached and no image download is needed. -{{< /note >}} - ## Using kubectl - Use `kubectl apply -f `. This looks for Kubernetes configuration in all `.yaml`, `.yml`, and `.json` files in `` and passes it to `apply`. diff --git a/content/en/docs/concepts/configuration/secret.md b/content/en/docs/concepts/configuration/secret.md index 48ac53ed47..e1c7dfb79b 100644 --- a/content/en/docs/concepts/configuration/secret.md +++ b/content/en/docs/concepts/configuration/secret.md @@ -12,26 +12,33 @@ weight: 30 -Kubernetes Secrets let you store and manage sensitive information, such -as passwords, OAuth tokens, and ssh keys. Storing confidential information in a Secret -is safer and more flexible than putting it verbatim in a -{{< glossary_tooltip term_id="pod" >}} definition or in a -{{< glossary_tooltip text="container image" term_id="image" >}}. -See [Secrets design document](https://git.k8s.io/community/contributors/design-proposals/auth/secrets.md) for more information. - A Secret is an object that contains a small amount of sensitive data such as a password, a token, or a key. Such information might otherwise be put in a -Pod specification or in an image. Users can create Secrets and the system -also creates some Secrets. +{{< glossary_tooltip term_id="pod" >}} specification or in a +{{< glossary_tooltip text="container image" term_id="image" >}}. Using a +Secret means that you don't need to include confidential data in your +application code. + +Because Secrets can be created independently of the Pods that use them, there +is less risk of the Secret (and its data) being exposed during the workflow of +creating, viewing, and editing Pods. Kubernetes, and applications that run in +your cluster, can also take additional precautions with Secrets, such as +avoiding writing confidential data to nonvolatile storage. + +Secrets are similar to {{< glossary_tooltip text="ConfigMaps" term_id="configmap" >}} +but are specifically intended to hold confidential data. {{< caution >}} -Kubernetes Secrets are, by default, stored as unencrypted base64-encoded -strings. By default they can be retrieved - as plain text - by anyone with API -access, or anyone with access to Kubernetes' underlying data store, etcd. In -order to safely use Secrets, it is recommended you (at a minimum): +Kubernetes Secrets are, by default, stored unencrypted in the API server's underlying data store (etcd). Anyone with API access can retrieve or modify a Secret, and so can anyone with access to etcd. +Additionally, anyone who is authorized to create a Pod in a namespace can use that access to read any Secret in that namespace; this includes indirect access such as the ability to create a Deployment. + +In order to safely use Secrets, take at least the following steps: 1. [Enable Encryption at Rest](/docs/tasks/administer-cluster/encrypt-data/) for Secrets. -2. [Enable or configure RBAC rules](/docs/reference/access-authn-authz/authorization/) that restrict reading and writing the Secret. Be aware that secrets can be obtained implicitly by anyone with the permission to create a Pod. +2. Enable or configure [RBAC rules](/docs/reference/access-authn-authz/authorization/) that + restrict reading data in Secrets (including via indirect means). +3. Where appropriate, also use mechanisms such as RBAC to limit which principals are allowed to create new Secrets or replace existing ones. + {{< /caution >}} @@ -47,6 +54,10 @@ A Secret can be used with a Pod in three ways: - As [container environment variable](#using-secrets-as-environment-variables). - By the [kubelet when pulling images](#using-imagepullsecrets) for the Pod. +The Kubernetes control plane also uses Secrets; for example, +[bootstrap token Secrets](#bootstrap-token-secrets) are a mechanism to +help automate node registration. + The name of a Secret object must be a valid [DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). You can specify the `data` and/or the `stringData` field when creating a @@ -64,9 +75,9 @@ precedence. ## Types of Secret {#secret-types} When creating a Secret, you can specify its type using the `type` field of -the [`Secret`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#secret-v1-core) -resource, or certain equivalent `kubectl` command line flags (if available). -The Secret type is used to facilitate programmatic handling of the Secret data. +a Secret resource, or certain equivalent `kubectl` command line flags (if available). +The `type` of a Secret is used to facilitate programmatic handling of different +kinds of confidential data. Kubernetes provides several builtin types for some common usage scenarios. These types vary in terms of the validations performed and the constraints @@ -822,7 +833,10 @@ are obtained from the API server. This includes any Pods created using `kubectl`, or indirectly via a replication controller. It does not include Pods created as a result of the kubelet `--manifest-url` flag, its `--config` flag, or its REST API (these are -not common ways to create Pods.) +not common ways to create Pods). +The `spec` of a {{< glossary_tooltip text="static Pod" term_id="static-pod" >}} cannot refer to a Secret +or any other API objects. + Secrets must be created before they are consumed in Pods as environment variables unless they are marked as optional. References to secrets that do @@ -1164,7 +1178,7 @@ limit access using [authorization policies]( Secrets often hold values that span a spectrum of importance, many of which can cause escalations within Kubernetes (e.g. service account tokens) and to external systems. Even if an individual app can reason about the power of the -secrets it expects to interact with, other apps within the same namespace can +Secrets it expects to interact with, other apps within the same namespace can render those assumptions invalid. For these reasons `watch` and `list` requests for secrets within a namespace are @@ -1235,15 +1249,10 @@ for secret data, so that the secrets are not stored in the clear into {{< glossa - A user who can create a Pod that uses a secret can also see the value of that secret. Even if the API server policy does not allow that user to read the Secret, the user could run a Pod which exposes the secret. - - Currently, anyone with root permission on any node can read _any_ secret from the API server, - by impersonating the kubelet. It is a planned feature to only send secrets to - nodes that actually require them, to restrict the impact of a root exploit on a - single node. - ## {{% heading "whatsnext" %}} - Learn how to [manage Secret using `kubectl`](/docs/tasks/configmap-secret/managing-secret-using-kubectl/) - Learn how to [manage Secret using config file](/docs/tasks/configmap-secret/managing-secret-using-config-file/) - Learn how to [manage Secret using kustomize](/docs/tasks/configmap-secret/managing-secret-using-kustomize/) - +- Read the [API reference](/docs/reference/kubernetes-api/config-and-storage-resources/secret-v1/) for `Secret` diff --git a/content/en/docs/concepts/containers/container-environment.md b/content/en/docs/concepts/containers/container-environment.md index a1eba4d96d..3c4c153927 100644 --- a/content/en/docs/concepts/containers/container-environment.md +++ b/content/en/docs/concepts/containers/container-environment.md @@ -52,7 +52,7 @@ FOO_SERVICE_PORT= ``` Services have dedicated IP addresses and are available to the Container via DNS, -if [DNS addon](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/dns/) is enabled.  +if [DNS addon](https://releases.k8s.io/{{< param "fullversion" >}}/cluster/addons/dns/) is enabled.  diff --git a/content/en/docs/concepts/containers/images.md b/content/en/docs/concepts/containers/images.md index 1cd678e4a8..9300561e46 100644 --- a/content/en/docs/concepts/containers/images.md +++ b/content/en/docs/concepts/containers/images.md @@ -39,14 +39,6 @@ There are additional rules about where you can place the separator characters (`_`, `-`, and `.`) inside an image tag. If you don't specify a tag, Kubernetes assumes you mean the tag `latest`. -{{< caution >}} -You should avoid using the `latest` tag when deploying containers in production, -as it is harder to track which version of the image is running and more difficult -to roll back to a working version. - -Instead, specify a meaningful tag such as `v1.42.0`. -{{< /caution >}} - ## Updating images When you first create a {{< glossary_tooltip text="Deployment" term_id="deployment" >}}, @@ -57,13 +49,68 @@ specified. This policy causes the {{< glossary_tooltip text="kubelet" term_id="kubelet" >}} to skip pulling an image if it already exists. -If you would like to always force a pull, you can do one of the following: +### Image pull policy -- set the `imagePullPolicy` of the container to `Always`. -- omit the `imagePullPolicy` and use `:latest` as the tag for the image to use; - Kubernetes will set the policy to `Always`. -- omit the `imagePullPolicy` and the tag for the image to use. -- enable the [AlwaysPullImages](/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) admission controller. +The `imagePullPolicy` for a container and the tag of the image affect when the +[kubelet](/docs/reference/command-line-tools-reference/kubelet/) attempts to pull (download) the specified image. + +Here's a list of the values you can set for `imagePullPolicy` and the effects +these values have: + +`IfNotPresent` +: the image is pulled only if it is not already present locally. + +`Always` +: every time the kubelet launches a container, the kubelet queries the container + image registry to resolve the name to an image + [digest](https://docs.docker.com/engine/reference/commandline/pull/#pull-an-image-by-digest-immutable-identifier). If the kubelet has a + container image with that exact digest cached locally, the kubelet uses its cached + image; otherwise, the kubelet pulls the image with the resolved digest, + and uses that image to launch the container. + +`Never` +: the kubelet does not try fetching the image. If the image is somehow already present + locally, the kubelet attempts to start the container; otherwise, startup fails. + See [pre-pulled images](#pre-pulled-images) for more details. + +The caching semantics of the underlying image provider make even +`imagePullPolicy: Always` efficient, as long as the registry is reliably accessible. +Your container runtime can notice that the image layers already exist on the node +so that they don't need to be downloaded again. + +{{< note >}} +You should avoid using the `:latest` tag when deploying containers in production as +it is harder to track which version of the image is running and more difficult to +roll back properly. + +Instead, specify a meaningful tag such as `v1.42.0`. +{{< /note >}} + +To make sure the Pod always uses the same version of a container image, you can specify +the image's digest; +replace `:` with `@` +(for example, `image@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2`). + +When using image tags, if the image registry were to change the code that the tag on that image represents, you might end up with a mix of Pods running the old and new code. An image digest uniquely identifies a specific version of the image, so Kubernetes runs the same code every time it starts a container with that image name and digest specified. Specifying an image fixes the code that you run so that a change at the registry cannot lead to that mix of versions. + +There are third-party [admission controllers](/docs/reference/access-authn-authz/admission-controllers/) +that mutate Pods (and pod templates) when they are created, so that the +running workload is defined based on an image digest rather than a tag. +That might be useful if you want to make sure that all your workload is +running the same code no matter what tag changes happen at the registry. + +#### Default image pull policy {#imagepullpolicy-defaulting} + +When you (or a controller) submit a new Pod to the API server, your cluster sets the +`imagePullPolicy` field when specific conditions are met: + +- if you omit the `imagePullPolicy` field, and the tag for the container image is + `:latest`, `imagePullPolicy` is automatically set to `Always`; +- if you omit the `imagePullPolicy` field, and you don't specify the tag for the + container image, `imagePullPolicy` is automatically set to `Always`; +- if you omit the `imagePullPolicy` field, and you don't specify the tag for the + container image that isn't `:latest`, the `imagePullPolicy` is automatically set to + `IfNotPresent`. {{< note >}} The value of `imagePullPolicy` of the container is always set when the object is @@ -75,7 +122,31 @@ For example, if you create a Deployment with an image whose tag is _not_ the pull policy of any object after its initial creation. {{< /note >}} -When `imagePullPolicy` is defined without a specific value, it is also set to `Always`. +#### Required image pull + +If you would like to always force a pull, you can do one of the following: + +- Set the `imagePullPolicy` of the container to `Always`. +- Omit the `imagePullPolicy` and use `:latest` as the tag for the image to use; + Kubernetes will set the policy to `Always` when you submit the Pod. +- Omit the `imagePullPolicy` and the tag for the image to use; + Kubernetes will set the policy to `Always` when you submit the Pod. +- Enable the [AlwaysPullImages](/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) admission controller. + + +### ImagePullBackOff + +When a kubelet starts creating containers for a Pod using a container runtime, +it might be possible the container is in [Waiting](/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) +state because of `ImagePullBackOff`. + +The status `ImagePullBackOff` means that a container could not start because Kubernetes +could not pull a container image (for reasons such as invalid image name, or pulling +from a private registry without `imagePullSecret`). The `BackOff` part indicates +that Kubernetes will keep trying to pull the image, with an increasing back-off delay. + +Kubernetes raises the delay between each attempt until it reaches a compiled-in limit, +which is 300 seconds (5 minutes). ## Multi-architecture images with image indexes @@ -314,6 +385,8 @@ common use cases and suggested solutions. If you need access to multiple registries, you can create one secret for each registry. Kubelet will merge any `imagePullSecrets` into a single virtual `.docker/config.json` + ## {{% heading "whatsnext" %}} -* Read the [OCI Image Manifest Specification](https://github.com/opencontainers/image-spec/blob/master/manifest.md) +* Read the [OCI Image Manifest Specification](https://github.com/opencontainers/image-spec/blob/master/manifest.md). +* Learn about [container image garbage collection](/docs/concepts/architecture/garbage-collection/#container-image-garbage-collection). diff --git a/content/en/docs/concepts/containers/runtime-class.md b/content/en/docs/concepts/containers/runtime-class.md index 6af609636e..96858d32af 100644 --- a/content/en/docs/concepts/containers/runtime-class.md +++ b/content/en/docs/concepts/containers/runtime-class.md @@ -51,7 +51,7 @@ heterogeneous node configurations, see [Scheduling](#scheduling) below. {{< /note >}} The configurations have a corresponding `handler` name, referenced by the RuntimeClass. The -handler must be a valid DNS 1123 label (alpha-numeric + `-` characters). +handler must be a valid [DNS label name](/docs/concepts/overview/working-with-objects/names/#dns-label-names). ### 2. Create the corresponding RuntimeClass resources @@ -118,7 +118,7 @@ Runtime handlers are configured through containerd's configuration at `/etc/containerd/config.toml`. Valid handlers are configured under the runtimes section: ``` -[plugins.cri.containerd.runtimes.${HANDLER_NAME}] +[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.${HANDLER_NAME}] ``` See containerd's config documentation for more details: @@ -135,7 +135,7 @@ table](https://github.com/cri-o/cri-o/blob/master/docs/crio.conf.5.md#crioruntim runtime_path = "${PATH_TO_BINARY}" ``` -See CRI-O's [config documentation](https://raw.githubusercontent.com/cri-o/cri-o/9f11d1d/docs/crio.conf.5.md) for more details. +See CRI-O's [config documentation](https://github.com/cri-o/cri-o/blob/master/docs/crio.conf.5.md) for more details. ## Scheduling @@ -179,4 +179,4 @@ are accounted for in Kubernetes. - [RuntimeClass Design](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/585-runtime-class/README.md) - [RuntimeClass Scheduling Design](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/585-runtime-class/README.md#runtimeclass-scheduling) - Read about the [Pod Overhead](/docs/concepts/scheduling-eviction/pod-overhead/) concept -- [PodOverhead Feature Design](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/20190226-pod-overhead.md) +- [PodOverhead Feature Design](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/688-pod-overhead) diff --git a/content/en/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md b/content/en/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md index d9fe184f85..f9ab8647e3 100644 --- a/content/en/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md +++ b/content/en/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md @@ -1,5 +1,5 @@ --- -title: Extending the Kubernetes API with the aggregation layer +title: Kubernetes API Aggregation Layer reviewers: - lavalamp - cheftako @@ -34,7 +34,7 @@ If your extension API server cannot achieve that latency requirement, consider m * To get the aggregator working in your environment, [configure the aggregation layer](/docs/tasks/extend-kubernetes/configure-aggregation-layer/). * Then, [setup an extension api-server](/docs/tasks/extend-kubernetes/setup-extension-api-server/) to work with the aggregation layer. -* Also, learn how to [extend the Kubernetes API using Custom Resource Definitions](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/). -* Read the specification for [APIService](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#apiservice-v1-apiregistration-k8s-io) +* Read about [APIService](/docs/reference/kubernetes-api/cluster-resources/api-service-v1/) in the API reference +Alternatively: learn how to [extend the Kubernetes API using Custom Resource Definitions](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/). diff --git a/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md b/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md index f37a71f278..3d72f279b6 100644 --- a/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md +++ b/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md @@ -167,7 +167,7 @@ CRDs are easier to create than Aggregated APIs. | CRDs | Aggregated API | | --------------------------- | -------------- | -| Do not require programming. Users can choose any language for a CRD controller. | Requires programming in Go and building binary and image. | +| Do not require programming. Users can choose any language for a CRD controller. | Requires programming and building binary and image. | | No additional service to run; CRDs are handled by API server. | An additional service to create and that could fail. | | No ongoing support once the CRD is created. Any bug fixes are picked up as part of normal Kubernetes Master upgrades. | May need to periodically pickup bug fixes from upstream and rebuild and update the Aggregated API server. | | No need to handle multiple versions of your API; for example, when you control the client for this resource, you can upgrade it in sync with the API. | You need to handle multiple versions of your API; for example, when developing an extension to share with the world. | diff --git a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md index ae96bb7551..868d8d56e8 100644 --- a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md +++ b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md @@ -199,7 +199,7 @@ service PodResourcesLister { The `List` endpoint provides information on resources of running pods, with details such as the id of exclusively allocated CPUs, device id as it was reported by device plugins and id of -the NUMA node where these devices are allocated. +the NUMA node where these devices are allocated. Also, for NUMA-based machines, it contains the information about memory and hugepages reserved for a container. ```gRPC // ListPodResourcesResponse is the response returned by List function @@ -219,6 +219,14 @@ message ContainerResources { string name = 1; repeated ContainerDevices devices = 2; repeated int64 cpu_ids = 3; + repeated ContainerMemory memory = 4; +} + +// ContainerMemory contains information about memory and hugepages assigned to a container +message ContainerMemory { + string memory_type = 1; + uint64 size = 2; + TopologyInfo topology = 3; } // Topology describes hardware topology of the resource @@ -247,6 +255,7 @@ It provides more information than kubelet exports to APIServer. message AllocatableResourcesResponse { repeated ContainerDevices devices = 1; repeated int64 cpu_ids = 2; + repeated ContainerMemory memory = 3; } ``` diff --git a/content/en/docs/concepts/extend-kubernetes/operator.md b/content/en/docs/concepts/extend-kubernetes/operator.md index feb40163fc..72fe12f1e7 100644 --- a/content/en/docs/concepts/extend-kubernetes/operator.md +++ b/content/en/docs/concepts/extend-kubernetes/operator.md @@ -51,8 +51,7 @@ Some of the things that you can use an operator to automate include: * choosing a leader for a distributed application without an internal member election process -What might an Operator look like in more detail? Here's an example in more -detail: +What might an Operator look like in more detail? Here's an example: 1. A custom resource named SampleDB, that you can configure into the cluster. 2. A Deployment that makes sure a Pod is running that contains the @@ -115,8 +114,9 @@ Operator. * [Charmed Operator Framework](https://juju.is/) * [kubebuilder](https://book.kubebuilder.io/) +* [KubeOps](https://buehler.github.io/dotnet-operator-sdk/) (.NET operator SDK) * [KUDO](https://kudo.dev/) (Kubernetes Universal Declarative Operator) -* [Metacontroller](https://metacontroller.app/) along with WebHooks that +* [Metacontroller](https://metacontroller.github.io/metacontroller/intro.html) along with WebHooks that you implement yourself * [Operator Framework](https://operatorframework.io) * [shell-operator](https://github.com/flant/shell-operator) @@ -124,6 +124,7 @@ Operator. ## {{% heading "whatsnext" %}} +* Read the {{< glossary_tooltip text="CNCF" term_id="cncf" >}} [Operator White Paper](https://github.com/cncf/tag-app-delivery/blob/eece8f7307f2970f46f100f51932db106db46968/operator-wg/whitepaper/Operator-WhitePaper_v1-0.md). * Learn more about [Custom Resources](/docs/concepts/extend-kubernetes/api-extension/custom-resources/) * Find ready-made operators on [OperatorHub.io](https://operatorhub.io/) to suit your use case * [Publish](https://operatorhub.io/) your operator for other people to use diff --git a/content/en/docs/concepts/overview/_index.md b/content/en/docs/concepts/overview/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/concepts/overview/components.md b/content/en/docs/concepts/overview/components.md index 61cd2b0d30..6c5c0eefa1 100644 --- a/content/en/docs/concepts/overview/components.md +++ b/content/en/docs/concepts/overview/components.md @@ -16,7 +16,7 @@ card: When you deploy Kubernetes, you get a cluster. {{< glossary_definition term_id="cluster" length="all" prepend="A Kubernetes cluster consists of">}} -This document outlines the various components you need to have +This document outlines the various components you need to have for a complete and working Kubernetes cluster. Here's the diagram of a Kubernetes cluster with all the components tied together. diff --git a/content/en/docs/concepts/overview/what-is-kubernetes.md b/content/en/docs/concepts/overview/what-is-kubernetes.md index 1ace280139..d72f1beb48 100644 --- a/content/en/docs/concepts/overview/what-is-kubernetes.md +++ b/content/en/docs/concepts/overview/what-is-kubernetes.md @@ -45,7 +45,7 @@ Containers have become popular because they provide extra benefits, such as: * Agile application creation and deployment: increased ease and efficiency of container image creation compared to VM image use. * Continuous development, integration, and deployment: provides for reliable and frequent container image build and deployment with quick and efficient rollbacks (due to image immutability). * Dev and Ops separation of concerns: create application container images at build/release time rather than deployment time, thereby decoupling applications from infrastructure. -* Observability not only surfaces OS-level information and metrics, but also application health and other signals. +* Observability: not only surfaces OS-level information and metrics, but also application health and other signals. * Environmental consistency across development, testing, and production: Runs the same on a laptop as it does in the cloud. * Cloud and OS distribution portability: Runs on Ubuntu, RHEL, CoreOS, on-premises, on major public clouds, and anywhere else. * Application-centric management: Raises the level of abstraction from running an OS on virtual hardware to running an application on an OS using logical resources. diff --git a/content/en/docs/concepts/overview/working-with-objects/_index.md b/content/en/docs/concepts/overview/working-with-objects/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/concepts/overview/working-with-objects/annotations.md b/content/en/docs/concepts/overview/working-with-objects/annotations.md index fe31841612..f09820bc32 100644 --- a/content/en/docs/concepts/overview/working-with-objects/annotations.md +++ b/content/en/docs/concepts/overview/working-with-objects/annotations.md @@ -30,6 +30,11 @@ Annotations, like labels, are key/value maps: } ``` +{{}} +The keys and the values in the map must be strings. In other words, you cannot use +numeric, boolean, list or other types for either the keys or the values. +{{}} + Here are some examples of information that could be recorded in annotations: * Fields managed by a declarative configuration layer. Attaching these fields diff --git a/content/en/docs/concepts/overview/working-with-objects/field-selectors.md b/content/en/docs/concepts/overview/working-with-objects/field-selectors.md index 45a81e9035..a65cb54798 100644 --- a/content/en/docs/concepts/overview/working-with-objects/field-selectors.md +++ b/content/en/docs/concepts/overview/working-with-objects/field-selectors.md @@ -48,7 +48,7 @@ kubectl get pods --field-selector=status.phase!=Running,spec.restartPolicy=Alway ## Multiple resource types -You use field selectors across multiple resource types. This `kubectl` command selects all Statefulsets and Services that are not in the `default` namespace: +You can use field selectors across multiple resource types. This `kubectl` command selects all Statefulsets and Services that are not in the `default` namespace: ```shell kubectl get statefulsets,services --all-namespaces --field-selector metadata.namespace!=default diff --git a/content/en/docs/concepts/overview/working-with-objects/finalizers.md b/content/en/docs/concepts/overview/working-with-objects/finalizers.md new file mode 100644 index 0000000000..9516b935c9 --- /dev/null +++ b/content/en/docs/concepts/overview/working-with-objects/finalizers.md @@ -0,0 +1,80 @@ +--- +title: Finalizers +content_type: concept +weight: 60 +--- + + + +{{}} + +You can use finalizers to control {{}} +of resources by alerting {{}} to perform specific cleanup tasks before +deleting the target resource. + +Finalizers don't usually specify the code to execute. Instead, they are +typically lists of keys on a specific resource similar to annotations. +Kubernetes specifies some finalizers automatically, but you can also specify +your own. + +## How finalizers work + +When you create a resource using a manifest file, you can specify finalizers in +the `metadata.finalizers` field. When you attempt to delete the resource, the +controller that manages it notices the values in the `finalizers` field and does +the following: + + * Modifies the object to add a `metadata.deletionTimestamp` field with the + time you started the deletion. + * Marks the object as read-only until its `metadata.finalizers` field is empty. + +The controller then attempts to satisfy the requirements of the finalizers +specified for that resource. Each time a finalizer condition is satisfied, the +controller removes that key from the resource's `finalizers` field. When the +field is empty, garbage collection continues. You can also use finalizers to +prevent deletion of unmanaged resources. + +A common example of a finalizer is `kubernetes.io/pv-protection`, which prevents +accidental deletion of `PersistentVolume` objects. When a `PersistentVolume` +object is in use by a Pod, Kubernetes adds the `pv-protection` finalizer. If you +try to delete the `PersistentVolume`, it enters a `Terminating` status, but the +controller can't delete it because the finalizer exists. When the Pod stops +using the `PersistentVolume`, Kubernetes clears the `pv-protection` finalizer, +and the controller deletes the volume. + +## Owner references, labels, and finalizers {#owners-labels-finalizers} + +Like {{}}, [owner references](/concepts/overview/working-with-objects/owners-dependents/) +describe the relationships between objects in Kubernetes, but are used for a +different purpose. When a +{{}} manages objects +like Pods, it uses labels to track changes to groups of related objects. For +example, when a {{}} creates one or +more Pods, the Job controller applies labels to those pods and tracks changes to +any Pods in the cluster with the same label. + +The Job controller also adds *owner references* to those Pods, pointing at the +Job that created the Pods. If you delete the Job while these Pods are running, +Kubernetes uses the owner references (not labels) to determine which Pods in the +cluster need cleanup. + +Kubernetes also processes finalizers when it identifies owner references on a +resource targeted for deletion. + +In some situations, finalizers can block the deletion of dependent objects, +which can cause the targeted owner object to remain in a read-only state for +longer than expected without being fully deleted. In these situations, you +should check finalizers and owner references on the target owner and dependent +objects to troubleshoot the cause. + +{{}} +In cases where objects are stuck in a deleting state, try to avoid manually +removing finalizers to allow deletion to continue. Finalizers are usually added +to resources for a reason, so forcefully removing them can lead to issues in +your cluster. +{{}} + +## {{% heading "whatsnext" %}} + +* Read [Using Finalizers to Control Deletion](/blog/2021/05/14/using-finalizers-to-control-deletion/) + on the Kubernetes blog. diff --git a/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md b/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md index 716955ca06..c763b40e05 100644 --- a/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md +++ b/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md @@ -81,12 +81,11 @@ In the `.yaml` file for the Kubernetes object you want to create, you'll need to * `metadata` - Data that helps uniquely identify the object, including a `name` string, `UID`, and optional `namespace` * `spec` - What state you desire for the object -The precise format of the object `spec` is different for every Kubernetes object, and contains nested fields specific to that object. The [Kubernetes API Reference](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) can help you find the spec format for all of the objects you can create using Kubernetes. -For example, the `spec` format for a Pod can be found in -[PodSpec v1 core](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core), -and the `spec` format for a Deployment can be found in -[DeploymentSpec v1 apps](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#deploymentspec-v1-apps). +The precise format of the object `spec` is different for every Kubernetes object, and contains nested fields specific to that object. The [Kubernetes API Reference](https://kubernetes.io/docs/reference/kubernetes-api/) can help you find the spec format for all of the objects you can create using Kubernetes. +For example, the reference for Pod details the [`spec` field](/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec) +for a Pod in the API, and the reference for Deployment details the [`spec` field](/docs/reference/kubernetes-api/workload-resources/deployment-v1/#DeploymentSpec) for Deployments. +In those API reference pages you'll see mention of PodSpec and DeploymentSpec. These names are implementation details of the Golang code that Kubernetes uses to implement its API. ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/concepts/overview/working-with-objects/labels.md b/content/en/docs/concepts/overview/working-with-objects/labels.md index 25eb5da66e..fe590402ae 100644 --- a/content/en/docs/concepts/overview/working-with-objects/labels.md +++ b/content/en/docs/concepts/overview/working-with-objects/labels.md @@ -42,7 +42,7 @@ Example labels: * `"partition" : "customerA"`, `"partition" : "customerB"` * `"track" : "daily"`, `"track" : "weekly"` -These are examples of commonly used labels; you are free to develop your own conventions. Keep in mind that label Key must be unique for a given object. +These are examples of [commonly used labels](/docs/concepts/overview/working-with-objects/common-labels/); you are free to develop your own conventions. Keep in mind that label Key must be unique for a given object. ## Syntax and character set @@ -50,7 +50,7 @@ _Labels_ are key/value pairs. Valid label keys have two segments: an optional pr If the prefix is omitted, the label Key is presumed to be private to the user. Automated system components (e.g. `kube-scheduler`, `kube-controller-manager`, `kube-apiserver`, `kubectl`, or other third-party automation) which add labels to end-user objects must specify a prefix. -The `kubernetes.io/` and `k8s.io/` prefixes are reserved for Kubernetes core components. +The `kubernetes.io/` and `k8s.io/` prefixes are [reserved](/docs/reference/labels-annotations-taints/) for Kubernetes core components. Valid label value: * must be 63 characters or less (can be empty), diff --git a/content/en/docs/concepts/overview/working-with-objects/names.md b/content/en/docs/concepts/overview/working-with-objects/names.md index 8e74eb5c0b..9bafb1584c 100644 --- a/content/en/docs/concepts/overview/working-with-objects/names.md +++ b/content/en/docs/concepts/overview/working-with-objects/names.md @@ -28,7 +28,7 @@ For non-unique user-provided attributes, Kubernetes provides [labels](/docs/conc In cases when objects represent a physical entity, like a Node representing a physical host, when the host is re-created under the same name without deleting and re-creating the Node, Kubernetes treats the new host as the old one, which may lead to inconsistencies. {{< /note >}} -Below are three types of commonly used name constraints for resources. +Below are four types of commonly used name constraints for resources. ### DNS Subdomain Names @@ -41,7 +41,7 @@ This means the name must: - start with an alphanumeric character - end with an alphanumeric character -### DNS Label Names +### RFC 1123 Label Names {#dns-label-names} Some resource types require their names to follow the DNS label standard as defined in [RFC 1123](https://tools.ietf.org/html/rfc1123). @@ -52,6 +52,17 @@ This means the name must: - start with an alphanumeric character - end with an alphanumeric character +### RFC 1035 Label Names + +Some resource types require their names to follow the DNS +label standard as defined in [RFC 1035](https://tools.ietf.org/html/rfc1035). +This means the name must: + +- contain at most 63 characters +- contain only lowercase alphanumeric characters or '-' +- start with an alphabetic character +- end with an alphanumeric character + ### Path Segment Names Some resource types require their names to be able to be safely encoded as a diff --git a/content/en/docs/concepts/overview/working-with-objects/namespaces.md b/content/en/docs/concepts/overview/working-with-objects/namespaces.md index 45f454516c..6664a2ad4c 100644 --- a/content/en/docs/concepts/overview/working-with-objects/namespaces.md +++ b/content/en/docs/concepts/overview/working-with-objects/namespaces.md @@ -62,7 +62,10 @@ Kubernetes starts with four initial namespaces: * `default` The default namespace for objects with no other namespace * `kube-system` The namespace for objects created by the Kubernetes system * `kube-public` This namespace is created automatically and is readable by all users (including those not authenticated). This namespace is mostly reserved for cluster usage, in case that some resources should be visible and readable publicly throughout the whole cluster. The public aspect of this namespace is only a convention, not a requirement. - * `kube-node-lease` This namespace for the lease objects associated with each node which improves the performance of the node heartbeats as the cluster scales. + * `kube-node-lease` This namespace holds [Lease](/docs/reference/kubernetes-api/cluster-resources/lease-v1/) + objects associated with each node. Node leases allow the kubelet to send + [heartbeats](/docs/concepts/architecture/nodes/#heartbeats) so that the control plane + can detect node failure. ### Setting the namespace for a request diff --git a/content/en/docs/concepts/overview/working-with-objects/owners-dependents.md b/content/en/docs/concepts/overview/working-with-objects/owners-dependents.md new file mode 100644 index 0000000000..ea40c3b3a3 --- /dev/null +++ b/content/en/docs/concepts/overview/working-with-objects/owners-dependents.md @@ -0,0 +1,89 @@ +--- +title: Owners and Dependents +content_type: concept +weight: 60 +--- + + + +In Kubernetes, some objects are *owners* of other objects. For example, a +{{}} is the owner of a set of Pods. These owned objects are *dependents* +of their owner. + +Ownership is different from the [labels and selectors](/docs/concepts/overview/working-with-objects/labels/) +mechanism that some resources also use. For example, consider a Service that +creates `EndpointSlice` objects. The Service uses labels to allow the control plane to +determine which `EndpointSlice` objects are used for that Service. In addition +to the labels, each `EndpointSlice` that is managed on behalf of a Service has +an owner reference. Owner references help different parts of Kubernetes avoid +interfering with objects they don’t control. + +## Owner references in object specifications + +Dependent objects have a `metadata.ownerReferences` field that references their +owner object. A valid owner reference consists of the object name and a UID +within the same namespace as the dependent object. Kubernetes sets the value of +this field automatically for objects that are dependents of other objects like +ReplicaSets, DaemonSets, Deployments, Jobs and CronJobs, and ReplicationControllers. +You can also configure these relationships manually by changing the value of +this field. However, you usually don't need to and can allow Kubernetes to +automatically manage the relationships. + +Dependent objects also have an `ownerReferences.blockOwnerDeletion` field that +takes a boolean value and controls whether specific dependents can block garbage +collection from deleting their owner object. Kubernetes automatically sets this +field to `true` if a {{}} +(for example, the Deployment controller) sets the value of the +`metadata.ownerReferences` field. You can also set the value of the +`blockOwnerDeletion` field manually to control which dependents block garbage +collection. + +A Kubernetes admission controller controls user access to change this field for +dependent resources, based on the delete permissions of the owner. This control +prevents unauthorized users from delaying owner object deletion. + +{{< note >}} +Cross-namespace owner references are disallowed by design. +Namespaced dependents can specify cluster-scoped or namespaced owners. +A namespaced owner **must** exist in the same namespace as the dependent. +If it does not, the owner reference is treated as absent, and the dependent +is subject to deletion once all owners are verified absent. + +Cluster-scoped dependents can only specify cluster-scoped owners. +In v1.20+, if a cluster-scoped dependent specifies a namespaced kind as an owner, +it is treated as having an unresolvable owner reference, and is not able to be garbage collected. + +In v1.20+, if the garbage collector detects an invalid cross-namespace `ownerReference`, +or a cluster-scoped dependent with an `ownerReference` referencing a namespaced kind, a warning Event +with a reason of `OwnerRefInvalidNamespace` and an `involvedObject` of the invalid dependent is reported. +You can check for that kind of Event by running +`kubectl get events -A --field-selector=reason=OwnerRefInvalidNamespace`. +{{< /note >}} + +## Ownership and finalizers + +When you tell Kubernetes to delete a resource, the API server allows the +managing controller to process any [finalizer rules](/docs/concepts/overview/working-with-objects/finalizers/) +for the resource. {{}} +prevent accidental deletion of resources your cluster may still need to function +correctly. For example, if you try to delete a `PersistentVolume` that is still +in use by a Pod, the deletion does not happen immediately because the +`PersistentVolume` has the `kubernetes.io/pv-protection` finalizer on it. +Instead, the volume remains in the `Terminating` status until Kubernetes clears +the finalizer, which only happens after the `PersistentVolume` is no longer +bound to a Pod. + +Kubernetes also adds finalizers to an owner resource when you use either +[foreground or orphan cascading deletion](/docs/concepts/architecture/garbage-collection/#cascading-deletion). +In foreground deletion, it adds the `foreground` finalizer so that the +controller must delete dependent resources that also have +`ownerReferences.blockOwnerDeletion=true` before it deletes the owner. If you +specify an orphan deletion policy, Kubernetes adds the `orphan` finalizer so +that the controller ignores dependent resources after it deletes the owner +object. + +## {{% heading "whatsnext" %}} + +* Learn more about [Kubernetes finalizers](/docs/concepts/overview/working-with-objects/finalizers/). +* Learn about [garbage collection](/docs/concepts/architecture/garbage-collection). +* Read the API reference for [object metadata](/docs/reference/kubernetes-api/common-definitions/object-meta/#System). \ No newline at end of file diff --git a/content/en/docs/concepts/policy/_index.md b/content/en/docs/concepts/policy/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/concepts/policy/pid-limiting.md b/content/en/docs/concepts/policy/pid-limiting.md index 6d173bc845..1e03ccf375 100644 --- a/content/en/docs/concepts/policy/pid-limiting.md +++ b/content/en/docs/concepts/policy/pid-limiting.md @@ -10,7 +10,8 @@ weight: 40 {{< feature-state for_k8s_version="v1.20" state="stable" >}} -Kubernetes allow you to limit the number of process IDs (PIDs) that a {{< glossary_tooltip term_id="Pod" text="Pod" >}} can use. +Kubernetes allow you to limit the number of process IDs (PIDs) that a +{{< glossary_tooltip term_id="Pod" text="Pod" >}} can use. You can also reserve a number of allocatable PIDs for each {{< glossary_tooltip term_id="node" text="node" >}} for use by the operating system and daemons (rather than by Pods). @@ -84,7 +85,9 @@ gate](/docs/reference/command-line-tools-reference/feature-gates/) Kubernetes allows you to limit the number of processes running in a Pod. You specify this limit at the node level, rather than configuring it as a resource limit for a particular Pod. Each Node can have a different PID limit. -To configure the limit, you can specify the command line parameter `--pod-max-pids` to the kubelet, or set `PodPidsLimit` in the kubelet [configuration file](/docs/tasks/administer-cluster/kubelet-config-file/). +To configure the limit, you can specify the command line parameter `--pod-max-pids` +to the kubelet, or set `PodPidsLimit` in the kubelet +[configuration file](/docs/tasks/administer-cluster/kubelet-config-file/). {{< note >}} Before Kubernetes version 1.20, PID resource limiting for Pods required enabling @@ -95,9 +98,12 @@ the [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) ## PID based eviction You can configure kubelet to start terminating a Pod when it is misbehaving and consuming abnormal amount of resources. -This feature is called eviction. You can [Configure Out of Resource Handling](/docs/tasks/administer-cluster/out-of-resource) for various eviction signals. +This feature is called eviction. You can +[Configure Out of Resource Handling](/docs/concepts/scheduling-eviction/node-pressure-eviction/) +for various eviction signals. Use `pid.available` eviction signal to configure the threshold for number of PIDs used by Pod. -You can set soft and hard eviction policies. However, even with the hard eviction policy, if the number of PIDs growing very fast, +You can set soft and hard eviction policies. +However, even with the hard eviction policy, if the number of PIDs growing very fast, node can still get into unstable state by hitting the node PIDs limit. Eviction signal value is calculated periodically and does NOT enforce the limit. @@ -112,6 +118,7 @@ when one Pod is misbehaving. ## {{% heading "whatsnext" %}} - Refer to the [PID Limiting enhancement document](https://github.com/kubernetes/enhancements/blob/097b4d8276bc9564e56adf72505d43ce9bc5e9e8/keps/sig-node/20190129-pid-limiting.md) for more information. -- For historical context, read [Process ID Limiting for Stability Improvements in Kubernetes 1.14](/blog/2019/04/15/process-id-limiting-for-stability-improvements-in-kubernetes-1.14/). +- For historical context, read + [Process ID Limiting for Stability Improvements in Kubernetes 1.14](/blog/2019/04/15/process-id-limiting-for-stability-improvements-in-kubernetes-1.14/). - Read [Managing Resources for Containers](/docs/concepts/configuration/manage-resources-containers/). -- Learn how to [Configure Out of Resource Handling](/docs/tasks/administer-cluster/out-of-resource). +- Learn how to [Configure Out of Resource Handling](/docs/concepts/scheduling-eviction/node-pressure-eviction/). diff --git a/content/en/docs/concepts/policy/pod-security-policy.md b/content/en/docs/concepts/policy/pod-security-policy.md index f0884c3dea..36172faba5 100644 --- a/content/en/docs/concepts/policy/pod-security-policy.md +++ b/content/en/docs/concepts/policy/pod-security-policy.md @@ -11,7 +11,8 @@ weight: 30 {{< feature-state for_k8s_version="v1.21" state="deprecated" >}} -PodSecurityPolicy is deprecated as of Kubernetes v1.21, and will be removed in v1.25. +PodSecurityPolicy is deprecated as of Kubernetes v1.21, and will be removed in v1.25. For more information on the deprecation, +see [PodSecurityPolicy Deprecation: Past, Present, and Future](/blog/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future/). Pod Security Policies enable fine-grained authorization of pod creation and updates. @@ -48,13 +49,12 @@ administrator to control the following: ## Enabling Pod Security Policies -Pod security policy control is implemented as an optional (but recommended) -[admission -controller](/docs/reference/access-authn-authz/admission-controllers/#podsecuritypolicy). PodSecurityPolicies -are enforced by [enabling the admission +Pod security policy control is implemented as an optional [admission +controller](/docs/reference/access-authn-authz/admission-controllers/#podsecuritypolicy). +PodSecurityPolicies are enforced by [enabling the admission controller](/docs/reference/access-authn-authz/admission-controllers/#how-do-i-turn-on-an-admission-control-plug-in), -but doing so without authorizing any policies **will prevent any pods from being -created** in the cluster. +but doing so without authorizing any policies **will prevent any pods from being created** in the +cluster. Since the pod security policy API (`policy/v1beta1/podsecuritypolicy`) is enabled independently of the admission controller, for existing clusters it is @@ -110,7 +110,11 @@ roleRef: name: apiGroup: rbac.authorization.k8s.io subjects: -# Authorize specific service accounts: +# Authorize all service accounts in a namespace (recommended): +- kind: Group + apiGroup: rbac.authorization.k8s.io + name: system:serviceaccounts: +# Authorize specific service accounts (not recommended): - kind: ServiceAccount name: namespace: @@ -139,6 +143,40 @@ Examples](/docs/reference/access-authn-authz/rbac#role-binding-examples). For a complete example of authorizing a PodSecurityPolicy, see [below](#example). +### Recommended Practice + +PodSecurityPolicy is being replaced by a new, simplified `PodSecurity` {{< glossary_tooltip +text="admission controller" term_id="admission-controller" >}}. For more details on this change, see +[PodSecurityPolicy Deprecation: Past, Present, and +Future](/blog/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future/). Follow these +guidelines to simplify migration from PodSecurityPolicy to the new admission controller: + +1. Limit your PodSecurityPolicies to the policies defined by the [Pod Security Standards](/docs/concepts/security/pod-security-standards): + - {{< example file="policy/privileged-psp.yaml" >}}Privileged{{< /example >}} + - {{< example file="policy/baseline-psp.yaml" >}}Baseline{{< /example >}} + - {{< example file="policy/restricted-psp.yaml" >}}Restricted{{< /example >}} + +2. Only bind PSPs to entire namespaces, by using the `system:serviceaccounts:` group + (where `` is the target namespace). For example: + + ```yaml + apiVersion: rbac.authorization.k8s.io/v1 + # This cluster role binding allows all pods in the "development" namespace to use the baseline PSP. + kind: ClusterRoleBinding + metadata: + name: psp-baseline-namespaces + roleRef: + kind: ClusterRole + name: psp-baseline + apiGroup: rbac.authorization.k8s.io + subjects: + - kind: Group + name: system:serviceaccounts:development + apiGroup: rbac.authorization.k8s.io + - kind: Group + name: system:serviceaccounts:canary + apiGroup: rbac.authorization.k8s.io + ``` ### Troubleshooting @@ -464,12 +502,12 @@ allowed prefix, and a `readOnly` field indicating it must be mounted read-only. For example: ```yaml -allowedHostPaths: - # This allows "/foo", "/foo/", "/foo/bar" etc., but - # disallows "/fool", "/etc/foo" etc. - # "/foo/../" is never valid. - - pathPrefix: "/foo" - readOnly: true # only allow read-only mounts + allowedHostPaths: + # This allows "/foo", "/foo/", "/foo/bar" etc., but + # disallows "/fool", "/etc/foo" etc. + # "/foo/../" is never valid. + - pathPrefix: "/foo" + readOnly: true # only allow read-only mounts ``` {{< warning >}}There are many ways a container with unrestricted access to the host @@ -661,8 +699,10 @@ Refer to the [Sysctl documentation]( ## {{% heading "whatsnext" %}} +- See [PodSecurityPolicy Deprecation: Past, Present, and + Future](/blog/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future/) to learn about + the future of pod security policy. + - See [Pod Security Standards](/docs/concepts/security/pod-security-standards/) for policy recommendations. - Refer to [Pod Security Policy Reference](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podsecuritypolicy-v1beta1-policy) for the api details. - - diff --git a/content/en/docs/concepts/policy/resource-quotas.md b/content/en/docs/concepts/policy/resource-quotas.md index 1d0e9d4ecd..d31efd09bc 100644 --- a/content/en/docs/concepts/policy/resource-quotas.md +++ b/content/en/docs/concepts/policy/resource-quotas.md @@ -57,8 +57,9 @@ Neither contention nor changes to quota will affect already created resources. ## Enabling Resource Quota -Resource Quota support is enabled by default for many Kubernetes distributions. It is -enabled when the {{< glossary_tooltip text="API server" term_id="kube-apiserver" >}} `--enable-admission-plugins=` flag has `ResourceQuota` as +Resource Quota support is enabled by default for many Kubernetes distributions. It is +enabled when the {{< glossary_tooltip text="API server" term_id="kube-apiserver" >}} +`--enable-admission-plugins=` flag has `ResourceQuota` as one of its arguments. A resource quota is enforced in a particular namespace when there is a @@ -66,7 +67,9 @@ ResourceQuota in that namespace. ## Compute Resource Quota -You can limit the total sum of [compute resources](/docs/concepts/configuration/manage-resources-containers/) that can be requested in a given namespace. +You can limit the total sum of +[compute resources](/docs/concepts/configuration/manage-resources-containers/) +that can be requested in a given namespace. The following resource types are supported: @@ -125,7 +128,9 @@ In release 1.8, quota support for local ephemeral storage is added as an alpha f | `ephemeral-storage` | Same as `requests.ephemeral-storage`. | {{< note >}} -When using a CRI container runtime, container logs will count against the ephemeral storage quota. This can result in the unexpected eviction of pods that have exhausted their storage quotas. Refer to [Logging Architecture](/docs/concepts/cluster-administration/logging/) for details. +When using a CRI container runtime, container logs will count against the ephemeral storage quota. +This can result in the unexpected eviction of pods that have exhausted their storage quotas. +Refer to [Logging Architecture](/docs/concepts/cluster-administration/logging/) for details. {{< /note >}} ## Object Count Quota @@ -192,7 +197,7 @@ Resources specified on the quota outside of the allowed set results in a validat | `NotTerminating` | Match pods where `.spec.activeDeadlineSeconds is nil` | | `BestEffort` | Match pods that have best effort quality of service. | | `NotBestEffort` | Match pods that do not have best effort quality of service. | -| `PriorityClass` | Match pods that references the specified [priority class](/docs/concepts/configuration/pod-priority-preemption). | +| `PriorityClass` | Match pods that references the specified [priority class](/docs/concepts/scheduling-eviction/pod-priority-preemption). | | `CrossNamespacePodAffinity` | Match pods that have cross-namespace pod [(anti)affinity terms](/docs/concepts/scheduling-eviction/assign-pod-node). | The `BestEffort` scope restricts a quota to tracking the following resource: @@ -248,13 +253,14 @@ specified. {{< feature-state for_k8s_version="v1.17" state="stable" >}} -Pods can be created at a specific [priority](/docs/concepts/configuration/pod-priority-preemption/#pod-priority). +Pods can be created at a specific [priority](/docs/concepts/scheduling-eviction/pod-priority-preemption/#pod-priority). You can control a pod's consumption of system resources based on a pod's priority, by using the `scopeSelector` field in the quota spec. A quota is matched and consumed only if `scopeSelector` in the quota spec selects the pod. -When quota is scoped for priority class using `scopeSelector` field, quota object is restricted to track only following resources: +When quota is scoped for priority class using `scopeSelector` field, quota object +is restricted to track only following resources: * `pods` * `cpu` @@ -436,7 +442,7 @@ pods 0 10 ### Cross-namespace Pod Affinity Quota -{{< feature-state for_k8s_version="v1.21" state="alpha" >}} +{{< feature-state for_k8s_version="v1.22" state="beta" >}} Operators can use `CrossNamespacePodAffinity` quota scope to limit which namespaces are allowed to have pods with affinity terms that cross namespaces. Specifically, it controls which pods are allowed @@ -487,7 +493,7 @@ With the above configuration, pods can use `namespaces` and `namespaceSelector` if the namespace where they are created have a resource quota object with `CrossNamespaceAffinity` scope and a hard limit greater than or equal to the number of pods using those fields. -This feature is alpha and disabled by default. You can enable it by setting the +This feature is beta and enabled by default. You can disable it using the [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) `PodAffinityNamespaceSelector` in both kube-apiserver and kube-scheduler. @@ -554,7 +560,7 @@ kubectl create -f ./object-counts.yaml --namespace=myspace kubectl get quota --namespace=myspace ``` -``` +```none NAME AGE compute-resources 30s object-counts 32s @@ -564,7 +570,7 @@ object-counts 32s kubectl describe quota compute-resources --namespace=myspace ``` -``` +```none Name: compute-resources Namespace: myspace Resource Used Hard @@ -580,7 +586,7 @@ requests.nvidia.com/gpu 0 4 kubectl describe quota object-counts --namespace=myspace ``` -``` +```none Name: object-counts Namespace: myspace Resource Used Hard @@ -677,10 +683,10 @@ Then, create a resource quota object in the `kube-system` namespace: {{< codenew file="policy/priority-class-resourcequota.yaml" >}} ```shell -$ kubectl apply -f https://k8s.io/examples/policy/priority-class-resourcequota.yaml -n kube-system +kubectl apply -f https://k8s.io/examples/policy/priority-class-resourcequota.yaml -n kube-system ``` -``` +```none resourcequota/pods-cluster-services created ``` diff --git a/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md b/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md index 3c779dda79..9216ec2ff9 100644 --- a/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md +++ b/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md @@ -271,14 +271,14 @@ All `matchExpressions` associated with `requiredDuringSchedulingIgnoredDuringExe must be satisfied for the pod to be scheduled onto a node. #### Namespace selector -{{< feature-state for_k8s_version="v1.21" state="alpha" >}} +{{< feature-state for_k8s_version="v1.22" state="beta" >}} Users can also select matching namespaces using `namespaceSelector`, which is a label query over the set of namespaces. The affinity term is applied to the union of the namespaces selected by `namespaceSelector` and the ones listed in the `namespaces` field. Note that an empty `namespaceSelector` ({}) matches all namespaces, while a null or empty `namespaces` list and null `namespaceSelector` means "this pod's namespace". -This feature is alpha and disabled by default. You can enable it by setting the +This feature is beta and enabled by default. You can disable it via the [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) `PodAffinityNamespaceSelector` in both kube-apiserver and kube-scheduler. diff --git a/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md b/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md index 52c8fd417e..916f050513 100644 --- a/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md +++ b/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md @@ -47,7 +47,7 @@ functions to score the feasible Nodes and picks a Node with the highest score among the feasible ones to run the Pod. The scheduler then notifies the API server about this decision in a process called _binding_. -Factors that need taken into account for scheduling decisions include +Factors that need to be taken into account for scheduling decisions include individual and collective resource requirements, hardware / software / policy constraints, affinity and anti-affinity specifications, data locality, inter-workload interference, and so on. @@ -85,7 +85,7 @@ of the scheduler: * Read about [scheduler performance tuning](/docs/concepts/scheduling-eviction/scheduler-perf-tuning/) * Read about [Pod topology spread constraints](/docs/concepts/workloads/pods/pod-topology-spread-constraints/) * Read the [reference documentation](/docs/reference/command-line-tools-reference/kube-scheduler/) for kube-scheduler -* Read the [kube-scheduler config (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) reference +* Read the [kube-scheduler config (v1beta2)](/docs/reference/config-api/kube-scheduler-config.v1beta2/) reference * Learn about [configuring multiple schedulers](/docs/tasks/extend-kubernetes/configure-multiple-schedulers/) * Learn about [topology management policies](/docs/tasks/administer-cluster/topology-manager/) * Learn about [Pod Overhead](/docs/concepts/scheduling-eviction/pod-overhead/) diff --git a/content/en/docs/concepts/scheduling-eviction/node-pressure-eviction.md b/content/en/docs/concepts/scheduling-eviction/node-pressure-eviction.md index f2ae086783..e832d1d48c 100644 --- a/content/en/docs/concepts/scheduling-eviction/node-pressure-eviction.md +++ b/content/en/docs/concepts/scheduling-eviction/node-pressure-eviction.md @@ -193,7 +193,7 @@ resources based on the filesystems on the node. If the node has a dedicated `imagefs` filesystem for container runtimes to use, the kubelet does the following: - * If the `nodefs` filesystem meets the eviction threshlds, the kubelet garbage collects + * If the `nodefs` filesystem meets the eviction thresholds, the kubelet garbage collects dead pods and containers. * If the `imagefs` filesystem meets the eviction thresholds, the kubelet deletes all unused images. @@ -214,7 +214,7 @@ signal below the threshold, the kubelet begins to evict end-user pods. The kubelet uses the following parameters to determine pod eviction order: 1. Whether the pod's resource usage exceeds requests -1. [Pod Priority](/docs/concepts/configuration/pod-priority-preemption/) +1. [Pod Priority](/docs/concepts/scheduling-eviction/pod-priority-preemption/) 1. The pod's resource usage relative to requests As a result, kubelet ranks and evicts pods in the following order: diff --git a/content/en/docs/concepts/scheduling-eviction/pod-priority-preemption.md b/content/en/docs/concepts/scheduling-eviction/pod-priority-preemption.md index 112e244f46..fff925b6c5 100644 --- a/content/en/docs/concepts/scheduling-eviction/pod-priority-preemption.md +++ b/content/en/docs/concepts/scheduling-eviction/pod-priority-preemption.md @@ -252,12 +252,12 @@ Even so, the answer to the preceding question must be yes. If the answer is no, the Node is not considered for preemption. {{< /note >}} -If a pending Pod has inter-pod affinity to one or more of the lower-priority -Pods on the Node, the inter-Pod affinity rule cannot be satisfied in the absence -of those lower-priority Pods. In this case, the scheduler does not preempt any -Pods on the Node. Instead, it looks for another Node. The scheduler might find a -suitable Node or it might not. There is no guarantee that the pending Pod can be -scheduled. +If a pending Pod has inter-pod {{< glossary_tooltip text="affinity" term_id="affinity" >}} +to one or more of the lower-priority Pods on the Node, the inter-Pod affinity +rule cannot be satisfied in the absence of those lower-priority Pods. In this case, +the scheduler does not preempt any Pods on the Node. Instead, it looks for another +Node. The scheduler might find a suitable Node or it might not. There is no +guarantee that the pending Pod can be scheduled. Our recommended solution for this problem is to create inter-Pod affinity only towards equal or higher priority Pods. @@ -353,7 +353,7 @@ the removal of the lowest priority Pods is not sufficient to allow the scheduler to schedule the preemptor Pod, or if the lowest priority Pods are protected by `PodDisruptionBudget`. -The kubelet uses Priority to determine pod order for [out-of-resource eviction](/docs/tasks/administer-cluster/out-of-resource/). +The kubelet uses Priority to determine pod order for [node-pressure eviction](/docs/concepts/scheduling-eviction/node-pressure-eviction/). You can use the QoS class to estimate the order in which pods are most likely to get evicted. The kubelet ranks pods for eviction based on the following factors: @@ -361,10 +361,10 @@ to get evicted. The kubelet ranks pods for eviction based on the following facto 1. Pod Priority 1. Amount of resource usage relative to requests -See [evicting end-user pods](/docs/tasks/administer-cluster/out-of-resource/#evicting-end-user-pods) +See [Pod selection for kubelet eviction](/docs/concepts/scheduling-eviction/node-pressure-eviction/#pod-selection-for-kubelet-eviction) for more details. -kubelet out-of-resource eviction does not evict Pods when their +kubelet node-pressure eviction does not evict Pods when their usage does not exceed their requests. If a Pod with lower priority is not exceeding its requests, it won't be evicted. Another Pod with higher priority that exceeds its requests may be evicted. diff --git a/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md b/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md index e294537c4b..17a426e35b 100644 --- a/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md +++ b/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md @@ -92,9 +92,9 @@ shape: ``` yaml resources: - - name: CPU + - name: cpu weight: 1 - - name: Memory + - name: memory weight: 1 ``` @@ -104,9 +104,9 @@ It can be used to add extended resources as follows: resources: - name: intel.com/foo weight: 5 - - name: CPU + - name: cpu weight: 3 - - name: Memory + - name: memory weight: 1 ``` @@ -123,16 +123,16 @@ Requested resources: ``` intel.com/foo : 2 -Memory: 256MB -CPU: 2 +memory: 256MB +cpu: 2 ``` Resource weights: ``` intel.com/foo : 5 -Memory: 1 -CPU: 3 +memory: 1 +cpu: 3 ``` FunctionShapePoint {{0, 0}, {100, 10}} @@ -142,13 +142,13 @@ Node 1 spec: ``` Available: intel.com/foo: 4 - Memory: 1 GB - CPU: 8 + memory: 1 GB + cpu: 8 Used: intel.com/foo: 1 - Memory: 256MB - CPU: 1 + memory: 256MB + cpu: 1 ``` Node score: @@ -161,13 +161,13 @@ intel.com/foo = resourceScoringFunction((2+1),4) = rawScoringFunction(75) = 7 # floor(75/10) -Memory = resourceScoringFunction((256+256),1024) +memory = resourceScoringFunction((256+256),1024) = (100 -((1024-512)*100/1024)) = 50 # requested + used = 50% * available = rawScoringFunction(50) = 5 # floor(50/10) -CPU = resourceScoringFunction((2+1),8) +cpu = resourceScoringFunction((2+1),8) = (100 -((8-3)*100/8)) = 37.5 # requested + used = 37.5% * available = rawScoringFunction(37.5) @@ -182,12 +182,12 @@ Node 2 spec: ``` Available: intel.com/foo: 8 - Memory: 1GB - CPU: 8 + memory: 1GB + cpu: 8 Used: intel.com/foo: 2 - Memory: 512MB - CPU: 6 + memory: 512MB + cpu: 6 ``` Node score: @@ -200,13 +200,13 @@ intel.com/foo = resourceScoringFunction((2+2),8) = rawScoringFunction(50) = 5 -Memory = resourceScoringFunction((256+512),1024) +memory = resourceScoringFunction((256+512),1024) = (100 -((1024-768)*100/1024)) = 75 = rawScoringFunction(75) = 7 -CPU = resourceScoringFunction((2+6),8) +cpu = resourceScoringFunction((2+6),8) = (100 -((8-8)*100/8)) = 100 = rawScoringFunction(100) diff --git a/content/en/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md b/content/en/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md index b110dc63e5..5894398c9b 100644 --- a/content/en/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md +++ b/content/en/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md @@ -43,7 +43,7 @@ If you set `percentageOfNodesToScore` above 100, kube-scheduler acts as if you had set a value of 100. To change the value, edit the -[kube-scheduler configuration file](/docs/reference/config-api/kube-scheduler-config.v1beta1/) +[kube-scheduler configuration file](/docs/reference/config-api/kube-scheduler-config.v1beta2/) and then restart the scheduler. In many cases, the configuration file can be found at `/etc/kubernetes/config/kube-scheduler.yaml`. @@ -161,5 +161,5 @@ After going over all the Nodes, it goes back to Node 1. ## {{% heading "whatsnext" %}} -* Check the [kube-scheduler configuration reference (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) +* Check the [kube-scheduler configuration reference (v1beta2)](/docs/reference/config-api/kube-scheduler-config.v1beta2/) diff --git a/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md b/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md index 3be7adf430..e08052c017 100644 --- a/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md +++ b/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md @@ -8,7 +8,7 @@ weight: 90 -{{< feature-state for_k8s_version="v1.15" state="alpha" >}} +{{< feature-state for_k8s_version="v1.19" state="stable" >}} The scheduling framework is a pluggable architecture for the Kubernetes scheduler. It adds a new set of "plugin" APIs to the existing scheduler. Plugins are compiled into the scheduler. The APIs allow most scheduling features to be implemented as plugins, while keeping the diff --git a/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md b/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md index 946e858a02..030f28e7d1 100644 --- a/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md +++ b/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md @@ -10,7 +10,7 @@ weight: 40 -[_Node affinity_](/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity), +[_Node affinity_](/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) is a property of {{< glossary_tooltip text="Pods" term_id="pod" >}} that *attracts* them to a set of {{< glossary_tooltip text="nodes" term_id="node" >}} (either as a preference or a hard requirement). _Taints_ are the opposite -- they allow a node to repel a set of pods. @@ -266,9 +266,23 @@ This ensures that DaemonSet pods are never evicted due to these problems. ## Taint Nodes by Condition -The node lifecycle controller automatically creates taints corresponding to -Node conditions with `NoSchedule` effect. -Similarly the scheduler does not check Node conditions; instead the scheduler checks taints. This assures that Node conditions don't affect what's scheduled onto the Node. The user can choose to ignore some of the Node's problems (represented as Node conditions) by adding appropriate Pod tolerations. +The control plane, using the node {{}}, +automatically creates taints with a `NoSchedule` effect for [node conditions](/docs/concepts/scheduling-eviction/node-pressure-eviction/#node-conditions). + +The scheduler checks taints, not node conditions, when it makes scheduling +decisions. This ensures that node conditions don't directly affect scheduling. +For example, if the `DiskPressure` node condition is active, the control plane +adds the `node.kubernetes.io/disk-pressure` taint and does not schedule new pods +onto the affected node. If the `MemoryPressure` node condition is active, the +control plane adds the `node.kubernetes.io/memory-pressure` taint. + +You can ignore node conditions for newly created pods by adding the corresponding +Pod tolerations. The control plane also adds the `node.kubernetes.io/memory-pressure` +toleration on pods that have a {{< glossary_tooltip text="QoS class" term_id="qos-class" >}} +other than `BestEffort`. This is because Kubernetes treats pods in the `Guaranteed` +or `Burstable` QoS classes (even pods with no memory request set) as if they are +able to cope with memory pressure, while new `BestEffort` pods are not scheduled +onto the affected node. The DaemonSet controller automatically adds the following `NoSchedule` tolerations to all daemons, to prevent DaemonSets from breaking. @@ -282,10 +296,9 @@ tolerations to all daemons, to prevent DaemonSets from breaking. Adding these tolerations ensures backward compatibility. You can also add arbitrary tolerations to DaemonSets. - ## {{% heading "whatsnext" %}} -* Read about [out of resource handling](/docs/tasks/administer-cluster/out-of-resource/) and how you can configure it -* Read about [pod priority](/docs/concepts/configuration/pod-priority-preemption/) +* Read about [Node-pressure Eviction](/docs/concepts/scheduling-eviction/node-pressure-eviction/) and how you can configure it +* Read about [Pod Priority](/docs/concepts/scheduling-eviction/pod-priority-preemption/) diff --git a/content/en/docs/concepts/security/controlling-access.md b/content/en/docs/concepts/security/controlling-access.md index 9d6c2b9617..1a0c93d8cf 100644 --- a/content/en/docs/concepts/security/controlling-access.md +++ b/content/en/docs/concepts/security/controlling-access.md @@ -142,7 +142,7 @@ By default, the Kubernetes API server serves HTTP on 2 ports: - is intended for testing and bootstrap, and for other components of the master node (scheduler, controller-manager) to talk to the API - no TLS - - default is port 8080, change with `--insecure-port` flag. + - default is port 8080 - default IP is localhost, change with `--insecure-bind-address` flag. - request **bypasses** authentication and authorization modules. - request handled by admission control module(s). diff --git a/content/en/docs/concepts/security/overview.md b/content/en/docs/concepts/security/overview.md index b23a07c79a..ce75653dfd 100644 --- a/content/en/docs/concepts/security/overview.md +++ b/content/en/docs/concepts/security/overview.md @@ -2,8 +2,10 @@ reviewers: - zparnold title: Overview of Cloud Native Security +description: > + A model for thinking about Kubernetes security in the context of Cloud Native security. content_type: concept -weight: 10 +weight: 1 --- diff --git a/content/en/docs/concepts/security/pod-security-admission.md b/content/en/docs/concepts/security/pod-security-admission.md new file mode 100644 index 0000000000..a1c87767c9 --- /dev/null +++ b/content/en/docs/concepts/security/pod-security-admission.md @@ -0,0 +1,145 @@ +--- +reviewers: +- tallclair +- liggitt +title: Pod Security Admission +description: > + An overview of the Pod Security Admission Controller, which can enforce the Pod Security + Standards. +content_type: concept +weight: 20 +min-kubernetes-server-version: v1.22 +--- + + + +{{< feature-state for_k8s_version="v1.22" state="alpha" >}} + +The Kubernetes [Pod Security Standards](/docs/concepts/security/pod-security-standards/) define +different isolation levels for Pods. These standards let you define how you want to restrict the +behavior of pods in a clear, consistent fashion. + +As an Alpha feature, Kubernetes offers a built-in _Pod Security_ {{< glossary_tooltip +text="admission controller" term_id="admission-controller" >}}, the successor +to [PodSecurityPolicies](/docs/concepts/policy/pod-security-policy/). Pod security restrictions +are applied at the {{< glossary_tooltip text="namespace" term_id="namespace" >}} level when pods +are created. + +{{< note >}} +The PodSecurityPolicy API is deprecated and will be +[removed](/docs/reference/using-api/deprecation-guide/#v1-25) from Kubernetes in v1.25. +{{< /note >}} + + + +## Enabling the Alpha feature + +Setting pod security controls by namespace is an alpha feature. You must enable the `PodSecurity` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) in order to use it. + +```shell +--feature-gates="...,PodSecurity=true" +``` + +## Pod Security levels + +Pod Security admission places requirements on a Pod's [Security +Context](/docs/tasks/configure-pod-container/security-context/) and other related fields according +to the three levels defined by the [Pod Security +Standards](/docs/concepts/security/pod-security-standards): `privileged`, `baseline`, and +`restricted`. Refer to the [Pod Security Standards](/docs/concepts/security/pod-security-standards) +page for an in-depth look at those requirements. + +## Pod Security Admission labels for namespaces + +Provided that you have enabled this feature, you can configure namespaces to define the admission +control mode you want to use for pod security in each namespace. Kubernetes defines a set of +{{< glossary_tooltip term_id="label" text="labels" >}} that you can set to define which of the +predefined Pod Security Standard levels you want to use for a namespace. The label you select +defines what action the {{< glossary_tooltip text="control plane" term_id="control-plane" >}} +takes if a potential violation is detected: + +{{< table caption="Pod Security Admission modes" >}} +Mode | Description +:---------|:------------ +**enforce** | Policy violations will cause the pod to be rejected. +**audit** | Policy violations will trigger the addition of an audit annotation to the event recorded in the [audit log](/docs/tasks/debug-application-cluster/audit/), but are otherwise allowed. +**warn** | Policy violations will trigger a user-facing warning, but are otherwise allowed. +{{< /table >}} + +A namespace can configure any or all modes, or even set a different level for different modes. + +For each mode, there are two labels that determine the policy used: + +```yaml +# The per-mode level label indicates which policy level to apply for the mode. +# +# MODE must be one of `enforce`, `audit`, or `warn`. +# LEVEL must be one of `privileged`, `baseline`, or `restricted`. +pod-security.kubernetes.io/: + +# Optional: per-mode version label that can be used to pin the policy to the +# version that shipped with a given Kubernetes minor version (for example v{{< skew latestVersion >}}). +# +# MODE must be one of `enforce`, `audit`, or `warn`. +# VERSION must be a valid Kubernetes minor version, or `latest`. +pod-security.kubernetes.io/-version: +``` + +Check out [Enforce Pod Security Standards with Namespace Labels](/docs/tasks/configure-pod-container/enforce-standards-namespace-labels) to see example usage. + +## Workload resources and Pod templates + +Pods are often created indirectly, by creating a [workload +object](/docs/concepts/workloads/controllers/) such as a {{< glossary_tooltip +term_id="deployment" >}} or {{< glossary_tooltip term_id="job">}}. The workload object defines a +_Pod template_ and a {{< glossary_tooltip term_id="controller" text="controller" >}} for the +workload resource creates Pods based on that template. To help catch violations early, both the +audit and warning modes are applied to the workload resources. However, enforce mode is **not** +applied to workload resources, only to the resulting pod objects. + +## Exemptions + +You can define _exemptions_ from pod security enforcement in order allow the creation of pods that +would have otherwise been prohibited due to the policy associated with a given namespace. +Exemptions can be statically configured in the +[Admission Controller configuration](/docs/tasks/configure-pod-container/enforce-standards-admission-controller/#configure-the-admission-controller). + +Exemptions must be explicitly enumerated. Requests meeting exemption criteria are _ignored_ by the +Admission Controller (all `enforce`, `audit` and `warn` behaviors are skipped). Exemption dimensions include: + +- **Usernames:** requests from users with an exempt authenticated (or impersonated) username are + ignored. +- **RuntimeClassNames:** pods and [workload resources](#workload-resources-and-pod-templates) specifying an exempt runtime class name are + ignored. +- **Namespaces:** pods and [workload resources](#workload-resources-and-pod-templates) in an exempt namespace are ignored. + +{{< caution >}} + +Most pods are created by a controller in response to a [workload +resource](#workload-resources-and-pod-templates), meaning that exempting an end user will only +exempt them from enforcement when creating pods directly, but not when creating a workload resource. +Controller service accounts (such as `system:serviceaccount:kube-system:replicaset-controller`) +should generally not be exempted, as doing so would implicitly exempt any user that can create the +corresponding workload resource. + +{{< /caution >}} + +Updates to the following pod fields are exempt from policy checks, meaning that if a pod update +request only changes these fields, it will not be denied even if the pod is in violation of the +current policy level: + +- Any metadata updates **except** changes to the seccomp or AppArmor annotations: + - `seccomp.security.alpha.kubernetes.io/pod` (deprecated) + - `container.seccomp.security.alpha.kubernetes.io/*` (deprecated) + - `container.apparmor.security.beta.kubernetes.io/*` +- Valid updates to `.spec.activeDeadlineSeconds` +- Valid updates to `.spec.tolerations` + +## {{% heading "whatsnext" %}} + +- [Pod Security Standards](/docs/concepts/security/pod-security-standards) +- [Enforcing Pod Security Standards](/docs/setup/best-practices/enforcing-pod-security-standards) +- [Enforce Pod Security Standards by Configuring the Built-in Admission Controller](/docs/tasks/configure-pod-container/enforce-standards-admission-controller) +- [Enforce Pod Security Standards with Namespace Labels](/docs/tasks/configure-pod-container/enforce-standards-namespace-labels) +- [Migrate from PodSecurityPolicy to the Built-In PodSecurity Admission Controller](/docs/tasks/configure-pod-container/migrate-from-psp) diff --git a/content/en/docs/concepts/security/pod-security-standards.md b/content/en/docs/concepts/security/pod-security-standards.md index 32635d6747..f3b43344bf 100644 --- a/content/en/docs/concepts/security/pod-security-standards.md +++ b/content/en/docs/concepts/security/pod-security-standards.md @@ -2,59 +2,52 @@ reviewers: - tallclair title: Pod Security Standards +description: > + A detailed look at the different policy levels defined in the Pod Security Standards. content_type: concept weight: 10 --- -Security settings for Pods are typically applied by using [security -contexts](/docs/tasks/configure-pod-container/security-context/). Security Contexts allow for the -definition of privilege and access controls on a per-Pod basis. - -The enforcement and policy-based definition of cluster requirements of security contexts has -previously been achieved using [Pod Security Policy](/docs/concepts/policy/pod-security-policy/). A -_Pod Security Policy_ is a cluster-level resource that controls security sensitive aspects of the -Pod specification. - -However, numerous means of policy enforcement have arisen that augment or replace the use of -PodSecurityPolicy. The intent of this page is to detail recommended Pod security profiles, decoupled -from any specific instantiation. - +The Pod Security Standards define three different _policies_ to broadly cover the security +spectrum. These policies are _cumulative_ and range from highly-permissive to highly-restrictive. +This guide outlines the requirements of each policy. +| Profile | Description | +| ------ | ----------- | +| Privileged | Unrestricted policy, providing the widest possible level of permissions. This policy allows for known privilege escalations. | +| Baseline | Minimally restrictive policy which prevents known privilege escalations. Allows the default (minimally specified) Pod configuration. | +| Restricted | Heavily restricted policy, following current Pod hardening best practices. | -## Policy Types - -There is an immediate need for base policy definitions to broadly cover the security spectrum. These -should range from highly restricted to highly flexible: - -- **_Privileged_** - Unrestricted policy, providing the widest possible level of permissions. This - policy allows for known privilege escalations. -- **_Baseline_** - Minimally restrictive policy while preventing known privilege - escalations. Allows the default (minimally specified) Pod configuration. -- **_Restricted_** - Heavily restricted policy, following current Pod hardening best practices. - -## Policies +## Profile Details ### Privileged -The Privileged policy is purposely-open, and entirely unrestricted. This type of policy is typically -aimed at system- and infrastructure-level workloads managed by privileged, trusted users. +**The _Privileged_ policy is purposely-open, and entirely unrestricted.** This type of policy is +typically aimed at system- and infrastructure-level workloads managed by privileged, trusted users. -The privileged policy is defined by an absence of restrictions. For allow-by-default enforcement -mechanisms (such as gatekeeper), the privileged profile may be an absence of applied constraints -rather than an instantiated policy. In contrast, for a deny-by-default mechanism (such as Pod -Security Policy) the privileged policy should enable all controls (disable all restrictions). +The Privileged policy is defined by an absence of restrictions. For allow-by-default enforcement +mechanisms (such as gatekeeper), the Privileged policy may be an absence of applied constraints +rather than an instantiated profile. In contrast, for a deny-by-default mechanism (such as Pod +Security Policy) the Privileged policy should enable all controls (disable all restrictions). ### Baseline -The Baseline policy is aimed at ease of adoption for common containerized workloads while -preventing known privilege escalations. This policy is targeted at application operators and +**The _Baseline_ policy is aimed at ease of adoption for common containerized workloads while +preventing known privilege escalations.** This policy is targeted at application operators and developers of non-critical applications. The following listed controls should be enforced/disallowed: +{{< note >}} +In this table, wildcards (`*`) indicate all elements in a list. For example, +`spec.containers[*].securityContext` refers to the Security Context object for _all defined +containers_. If any of the listed containers fails to meet the requirements, the entire pod will +fail validation. +{{< /note >}} + @@ -63,112 +56,223 @@ enforced/disallowed: - + - + - + - + - + - + - + - + - + + + + + + + + + @@ -176,10 +280,17 @@ enforced/disallowed: ### Restricted -The Restricted policy is aimed at enforcing current Pod hardening best practices, at the expense of -some compatibility. It is targeted at operators and developers of security-critical applications, as -well as lower-trust users.The following listed controls should be enforced/disallowed: +**The _Restricted_ policy is aimed at enforcing current Pod hardening best practices, at the +expense of some compatibility.** It is targeted at operators and developers of security-critical +applications, as well as lower-trust users. The following listed controls should be +enforced/disallowed: +{{< note >}} +In this table, wildcards (`*`) indicate all elements in a list. For example, +`spec.containers[*].securityContext` refers to the Security Context object for _all defined +containers_. If any of the listed containers fails to meet the requirements, the entire pod will +fail validation. +{{< /note >}}
Baseline policy specification
Policy
Host NamespacesHostProcess - Sharing the host namespaces must be disallowed.
-
Restricted Fields:
- spec.hostNetwork
- spec.hostPID
- spec.hostIPC
-
Allowed Values: false
+

Windows pods offer the ability to run HostProcess containers which enables privileged access to the Windows node. Privileged access to the host is disallowed in the baseline policy. HostProcess pods are an alpha feature as of Kubernetes v1.22.

+

Restricted Fields

+
    +
  • spec.securityContext.windowsOptions.hostProcess
  • +
  • spec.containers[*].securityContext.windowsOptions.hostProcess
  • +
  • spec.initContainers[*].securityContext.windowsOptions.hostProcess
  • +
  • spec.ephemeralContainers[*].securityContext.windowsOptions.hostProcess
  • +
+

Allowed Values

+
    +
  • Undefined/nil
  • +
  • false
  • +
Privileged ContainersHost Namespaces - Privileged Pods disable most security mechanisms and must be disallowed.
-
Restricted Fields:
- spec.containers[*].securityContext.privileged
- spec.initContainers[*].securityContext.privileged
-
Allowed Values: false, undefined/nil
+

Sharing the host namespaces must be disallowed.

+

Restricted Fields

+
    +
  • spec.hostNetwork
  • +
  • spec.hostPID
  • +
  • spec.hostIPC
  • +
+

Allowed Values

+
    +
  • Undefined/nil
  • +
  • false
  • +
CapabilitiesPrivileged Containers - Adding additional capabilities beyond the default set must be disallowed.
-
Restricted Fields:
- spec.containers[*].securityContext.capabilities.add
- spec.initContainers[*].securityContext.capabilities.add
-
Allowed Values: empty (or restricted to a known list)
+

Privileged Pods disable most security mechanisms and must be disallowed.

+

Restricted Fields

+
    +
  • spec.containers[*].securityContext.privileged
  • +
  • spec.initContainers[*].securityContext.privileged
  • +
  • spec.ephemeralContainers[*].securityContext.privileged
  • +
+

Allowed Values

+
    +
  • Undefined/nil
  • +
  • false
  • +
HostPath VolumesCapabilities - HostPath volumes must be forbidden.
-
Restricted Fields:
- spec.volumes[*].hostPath
-
Allowed Values: undefined/nil
+

Adding additional capabilities beyond those listed below must be disallowed.

+

Restricted Fields

+
    +
  • spec.containers[*].securityContext.capabilities.add
  • +
  • spec.initContainers[*].securityContext.capabilities.add
  • +
  • spec.ephemeralContainers[*].securityContext.capabilities.add
  • +
+

Allowed Values

+
    +
  • Undefined/nil
  • +
  • AUDIT_WRITE
  • +
  • CHOWN
  • +
  • DAC_OVERRIDE
  • +
  • FOWNER
  • +
  • FSETID
  • +
  • KILL
  • +
  • MKNOD
  • +
  • NET_BIND_SERVICE
  • +
  • SETFCAP
  • +
  • SETGID
  • +
  • SETPCAP
  • +
  • SETUID
  • +
  • SYS_CHROOT
  • +
Host PortsHostPath Volumes - HostPorts should be disallowed, or at minimum restricted to a known list.
-
Restricted Fields:
- spec.containers[*].ports[*].hostPort
- spec.initContainers[*].ports[*].hostPort
-
Allowed Values: 0, undefined (or restricted to a known list)
+

HostPath volumes must be forbidden.

+

Restricted Fields

+
    +
  • spec.volumes[*].hostPath
  • +
+

Allowed Values

+
    +
  • Undefined/nil
  • +
AppArmorHost Ports - On supported hosts, the 'runtime/default' AppArmor profile is applied by default. - The baseline policy should prevent overriding or disabling the default AppArmor - profile, or restrict overrides to an allowed set of profiles.
-
Restricted Fields:
- metadata.annotations['container.apparmor.security.beta.kubernetes.io/*']
-
Allowed Values: 'runtime/default', undefined
+

HostPorts should be disallowed, or at minimum restricted to a known list.

+

Restricted Fields

+
    +
  • spec.containers[*].ports[*].hostPort
  • +
  • spec.initContainers[*].ports[*].hostPort
  • +
  • spec.ephemeralContainers[*].ports[*].hostPort
  • +
+

Allowed Values

+
    +
  • Undefined/nil
  • +
  • Known list
  • +
  • 0
  • +
SELinuxAppArmor - Setting the SELinux type is restricted, and setting a custom SELinux user or role option is forbidden.
-
Restricted Fields:
- spec.securityContext.seLinuxOptions.type
- spec.containers[*].securityContext.seLinuxOptions.type
- spec.initContainers[*].securityContext.seLinuxOptions.type
-
Allowed Values:
- undefined/empty
- container_t
- container_init_t
- container_kvm_t
-
Restricted Fields:
- spec.securityContext.seLinuxOptions.user
- spec.containers[*].securityContext.seLinuxOptions.user
- spec.initContainers[*].securityContext.seLinuxOptions.user
- spec.securityContext.seLinuxOptions.role
- spec.containers[*].securityContext.seLinuxOptions.role
- spec.initContainers[*].securityContext.seLinuxOptions.role
-
Allowed Values: undefined/empty
+

On supported hosts, the runtime/default AppArmor profile is applied by default. The baseline policy should prevent overriding or disabling the default AppArmor profile, or restrict overrides to an allowed set of profiles.

+

Restricted Fields

+
    +
  • metadata.annotations["container.apparmor.security.beta.kubernetes.io/*"]
  • +
+

Allowed Values

+
    +
  • Undefined/nil
  • +
  • runtime/default
  • +
  • localhost/*
  • +
/proc Mount TypeSELinux - The default /proc masks are set up to reduce attack surface, and should be required.
-
Restricted Fields:
- spec.containers[*].securityContext.procMount
- spec.initContainers[*].securityContext.procMount
-
Allowed Values: undefined/nil, 'Default'
+

Setting the SELinux type is restricted, and setting a custom SELinux user or role option is forbidden.

+

Restricted Fields

+
    +
  • spec.securityContext.seLinuxOptions.type
  • +
  • spec.containers[*].securityContext.seLinuxOptions.type
  • +
  • spec.initContainers[*].securityContext.seLinuxOptions.type
  • +
  • spec.ephemeralContainers[*].securityContext.seLinuxOptions.type
  • +
+

Allowed Values

+
    +
  • Undefined/""
  • +
  • container_t
  • +
  • container_init_t
  • +
  • container_kvm_t
  • +
+
+

Restricted Fields

+
    +
  • spec.securityContext.seLinuxOptions.user
  • +
  • spec.containers[*].securityContext.seLinuxOptions.user
  • +
  • spec.initContainers[*].securityContext.seLinuxOptions.user
  • +
  • spec.ephemeralContainers[*].securityContext.seLinuxOptions.user
  • +
  • spec.securityContext.seLinuxOptions.role
  • +
  • spec.containers[*].securityContext.seLinuxOptions.role
  • +
  • spec.initContainers[*].securityContext.seLinuxOptions.role
  • +
  • spec.ephemeralContainers[*].securityContext.seLinuxOptions.role
  • +
+

Allowed Values

+
    +
  • Undefined/""
  • +
Sysctls/proc Mount Type - Sysctls can disable security mechanisms or affect all containers on a host, and should be disallowed except for an allowed "safe" subset. - A sysctl is considered safe if it is namespaced in the container or the Pod, and it is isolated from other Pods or processes on the same Node.
-
Restricted Fields:
- spec.securityContext.sysctls
-
Allowed Values:
- kernel.shm_rmid_forced
- net.ipv4.ip_local_port_range
- net.ipv4.tcp_syncookies
- net.ipv4.ping_group_range
- undefined/empty
+

The default /proc masks are set up to reduce attack surface, and should be required.

+

Restricted Fields

+
    +
  • spec.containers[*].securityContext.procMount
  • +
  • spec.initContainers[*].securityContext.procMount
  • +
  • spec.ephemeralContainers[*].securityContext.procMount
  • +
+

Allowed Values

+
    +
  • Undefined/nil
  • +
  • Default
  • +
+
Seccomp +

Seccomp profile must not be explicitly set to Unconfined.

+

Restricted Fields

+
    +
  • spec.securityContext.seccompProfile.type
  • +
  • spec.containers[*].securityContext.seccompProfile.type
  • +
  • spec.initContainers[*].securityContext.seccompProfile.type
  • +
  • spec.ephemeralContainers[*].securityContext.seccompProfile.type
  • +
+

Allowed Values

+
    +
  • Undefined/nil
  • +
  • RuntimeDefault
  • +
  • Localhost
  • +
+
Sysctls +

Sysctls can disable security mechanisms or affect all containers on a host, and should be disallowed except for an allowed "safe" subset. A sysctl is considered safe if it is namespaced in the container or the Pod, and it is isolated from other Pods or processes on the same Node.

+

Restricted Fields

+
    +
  • spec.securityContext.sysctls[*].name
  • +
+

Allowed Values

+
    +
  • Undefined/nil
  • +
  • kernel.shm_rmid_forced
  • +
  • net.ipv4.ip_local_port_range
  • +
  • net.ipv4.ip_unprivileged_port_start
  • +
  • net.ipv4.tcp_syncookies
  • +
  • net.ipv4.ping_group_range
  • +
@@ -192,81 +303,149 @@ well as lower-trust users.The following listed controls should be enforced/disal - + - + - + - + - + + + + + @@ -281,11 +460,17 @@ mechanism. As mechanisms mature, they will be defined below on a per-policy basis. The methods of enforcement of individual policies are not defined here. -[**PodSecurityPolicy**](/docs/concepts/policy/pod-security-policy/) +[**Pod Security Admission Controller**](/docs/concepts/security/pod-security-admission/) -- [Privileged](https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/policy/privileged-psp.yaml) -- [Baseline](https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/policy/baseline-psp.yaml) -- [Restricted](https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/policy/restricted-psp.yaml) +- {{< example file="security/podsecurity-privileged.yaml" >}}Privileged namespace{{< /example >}} +- {{< example file="security/podsecurity-baseline.yaml" >}}Baseline namespace{{< /example >}} +- {{< example file="security/podsecurity-restricted.yaml" >}}Restricted namespace{{< /example >}} + +[**PodSecurityPolicy**](/docs/concepts/profile/pod-security-profile/) (Deprecated) + +- {{< example file="policy/privileged-psp.yaml" >}}Privileged{{< /example >}} +- {{< example file="policy/baseline-psp.yaml" >}}Baseline{{< /example >}} +- {{< example file="policy/restricted-psp.yaml" >}}Restricted{{< /example >}} ## FAQ @@ -299,26 +484,40 @@ policies in this space need to be defined on a case-by-case basis. SIG Auth may reconsider this position in the future, should a clear need for other profiles arise. -### What's the difference between a security policy and a security context? +### What's the difference between a security profile and a security context? [Security Contexts](/docs/tasks/configure-pod-container/security-context/) configure Pods and Containers at runtime. Security contexts are defined as part of the Pod and container specifications in the Pod manifest, and represent parameters to the container runtime. -Security policies are control plane mechanisms to enforce specific settings in the Security Context, -as well as other parameters outside the Security Context. As of February 2020, the current native -solution for enforcing these security policies is [Pod Security -Policy](/docs/concepts/policy/pod-security-policy/) - a mechanism for centrally enforcing security -policy on Pods across a cluster. Other alternatives for enforcing security policy are being -developed in the Kubernetes ecosystem, such as [OPA -Gatekeeper](https://github.com/open-policy-agent/gatekeeper). +Security profiles are control plane mechanisms to enforce specific settings in the Security Context, +as well as other related parameters outside the Security Context. As of July 2021, +[Pod Security Policies](/docs/concepts/profile/pod-security-profile/) are deprecated in favor of the +built-in [Pod Security Admission Controller](/docs/concepts/security/pod-security-admission/). + +{{% thirdparty-content %}} + +Other alternatives for enforcing security profiles are being developed in the Kubernetes +ecosystem, such as: +- [OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper). +- [Kubewarden](https://github.com/kubewarden). +- [Kyverno](https://kyverno.io/policies/pod-security/). ### What profiles should I apply to my Windows Pods? Windows in Kubernetes has some limitations and differentiators from standard Linux-based -workloads. Specifically, the Pod SecurityContext fields [have no effect on +workloads. Specifically, many of the Pod SecurityContext fields [have no effect on Windows](/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#v1-podsecuritycontext). As -such, no standardized Pod Security profiles currently exists. +such, no standardized Pod Security profiles currently exist. + +If you apply the restricted profile for a Windows pod, this **may** have an impact on the pod +at runtime. The restricted profile requires enforcing Linux-specific restrictions (such as seccomp +profile, and disallowing privilege escalation). If the kubelet and / or its container runtime ignore +these Linux-specific values, then the Windows pod should still work normally within the restricted +profile. However, the lack of enforcement means that there is no additional restriction, for Pods +that use Windows containers, compared to the baseline profile. + +The use of the HostProcess flag to create a HostProcess pod should only be done in alignment with the privileged policy. Creation of a Windows HostProcess pod is blocked under the baseline and restricted policies, so any HostProcess pod should be considered privileged. ### What about sandboxed Pods? @@ -331,6 +530,4 @@ restrict privileged permissions is lessened when the workload is isolated from t kernel. This allows for workloads requiring heightened permissions to still be isolated. Additionally, the protection of sandboxed workloads is highly dependent on the method of -sandboxing. As such, no single recommended policy is recommended for all sandboxed workloads. - - +sandboxing. As such, no single recommended profile is recommended for all sandboxed workloads. diff --git a/content/en/docs/concepts/services-networking/_index.md b/content/en/docs/concepts/services-networking/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/concepts/services-networking/connect-applications-service.md b/content/en/docs/concepts/services-networking/connect-applications-service.md index 14bc98101f..89d2daddb2 100644 --- a/content/en/docs/concepts/services-networking/connect-applications-service.md +++ b/content/en/docs/concepts/services-networking/connect-applications-service.md @@ -133,7 +133,7 @@ about the [service proxy](/docs/concepts/services-networking/service/#virtual-ip Kubernetes supports 2 primary modes of finding a Service - environment variables and DNS. The former works out of the box while the latter requires the -[CoreDNS cluster addon](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/dns/coredns). +[CoreDNS cluster addon](https://releases.k8s.io/{{< param "fullversion" >}}/cluster/addons/dns/coredns). {{< note >}} If the service environment variables are not desired (because possible clashing with expected program ones, too many variables to process, only using DNS, etc) you can disable this mode by setting the `enableServiceLinks` @@ -231,7 +231,7 @@ Till now we have only accessed the nginx server from within the cluster. Before * An nginx server configured to use the certificates * A [secret](/docs/concepts/configuration/secret/) that makes the certificates accessible to pods -You can acquire all these from the [nginx https example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/https-nginx/). This requires having go and make tools installed. If you don't want to install those, then follow the manual steps later. In short: +You can acquire all these from the [nginx https example](https://github.com/kubernetes/examples/tree/master/staging/https-nginx/). This requires having go and make tools installed. If you don't want to install those, then follow the manual steps later. In short: ```shell make keys KEY=/tmp/nginx.key CERT=/tmp/nginx.crt @@ -303,7 +303,7 @@ Now modify your nginx replicas to start an https server using the certificate in Noteworthy points about the nginx-secure-app manifest: - It contains both Deployment and Service specification in the same file. -- The [nginx server](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/https-nginx/default.conf) +- The [nginx server](https://github.com/kubernetes/examples/tree/master/staging/https-nginx/default.conf) serves HTTP traffic on port 80 and HTTPS traffic on 443, and nginx Service exposes both ports. - Each container has access to the keys through a volume mounted at `/etc/nginx/ssl`. diff --git a/content/en/docs/concepts/services-networking/dns-pod-service.md b/content/en/docs/concepts/services-networking/dns-pod-service.md index 2888064c2e..f43eeff22b 100644 --- a/content/en/docs/concepts/services-networking/dns-pod-service.md +++ b/content/en/docs/concepts/services-networking/dns-pod-service.md @@ -7,6 +7,7 @@ content_type: concept weight: 20 --- + Kubernetes creates DNS records for services and pods. You can contact services with consistent DNS names instead of IP addresses. @@ -49,7 +50,7 @@ options ndots:5 ``` In summary, a pod in the _test_ namespace can successfully resolve either -`data.prod` or `data.prod.cluster.local`. +`data.prod` or `data.prod.svc.cluster.local`. ### DNS Records @@ -196,7 +197,7 @@ record unless `publishNotReadyAddresses=True` is set on the Service. ### Pod's setHostnameAsFQDN field {#pod-sethostnameasfqdn-field} -{{< feature-state for_k8s_version="v1.20" state="beta" >}} +{{< feature-state for_k8s_version="v1.22" state="stable" >}} When a Pod is configured to have fully qualified domain name (FQDN), its hostname is the short hostname. For example, if you have a Pod with the fully qualified domain name `busybox-1.default-subdomain.my-namespace.svc.cluster-domain.example`, then by default the `hostname` command inside that Pod returns `busybox-1` and the `hostname --fqdn` command returns the FQDN. @@ -261,6 +262,8 @@ spec: ### Pod's DNS Config {#pod-dns-config} +{{< feature-state for_k8s_version="v1.14" state="stable" >}} + Pod's DNS Config allows users more control on the DNS settings for a Pod. The `dnsConfig` field is optional and it can work with any `dnsPolicy` settings. @@ -310,6 +313,17 @@ search default.svc.cluster-domain.example svc.cluster-domain.example cluster-dom options ndots:5 ``` +#### Expanded DNS Configuration + +{{< feature-state for_k8s_version="1.22" state="alpha" >}} + +By default, for Pod's DNS Config, Kubernetes allows at most 6 search domains and +a list of search domains of up to 256 characters. + +If the feature gate `ExpandedDNSConfig` is enabled for the kube-apiserver and +the kubelet, it is allowed for Kubernetes to have at most 32 search domains and +a list of search domains of up to 2048 characters. + ### Feature availability The availability of Pod DNS Config and DNS Policy "`None`" is shown as below. @@ -321,7 +335,6 @@ The availability of Pod DNS Config and DNS Policy "`None`" is shown as below. | 1.9 | Alpha | - ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/concepts/services-networking/endpoint-slices.md b/content/en/docs/concepts/services-networking/endpoint-slices.md index fdcbd0ed50..da8d09d9d5 100644 --- a/content/en/docs/concepts/services-networking/endpoint-slices.md +++ b/content/en/docs/concepts/services-networking/endpoint-slices.md @@ -249,5 +249,4 @@ implementation in `kube-proxy`. ## {{% heading "whatsnext" %}} -* Learn about [Enabling EndpointSlices](/docs/tasks/administer-cluster/enabling-endpointslices) * Read [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/) diff --git a/content/en/docs/concepts/services-networking/ingress-controllers.md b/content/en/docs/concepts/services-networking/ingress-controllers.md index d0405a060d..0ee1d53ef9 100644 --- a/content/en/docs/concepts/services-networking/ingress-controllers.md +++ b/content/en/docs/concepts/services-networking/ingress-controllers.md @@ -32,6 +32,7 @@ Kubernetes as a project supports and maintains [AWS](https://github.com/kubernet Citrix Application Delivery Controller. * [Contour](https://projectcontour.io/) is an [Envoy](https://www.envoyproxy.io/) based ingress controller. * [EnRoute](https://getenroute.io/) is an [Envoy](https://www.envoyproxy.io) based API gateway that can run as an ingress controller. +* [Easegress IngressController](https://github.com/megaease/easegress/blob/main/doc/ingresscontroller.md) is an [Easegress](https://megaease.com/easegress/) based API gateway that can run as an ingress controller. * F5 BIG-IP [Container Ingress Services for Kubernetes](https://clouddocs.f5.com/containers/latest/userguide/kubernetes/) lets you use an Ingress to configure F5 BIG-IP virtual servers. * [Gloo](https://gloo.solo.io) is an open-source ingress controller based on [Envoy](https://www.envoyproxy.io), diff --git a/content/en/docs/concepts/services-networking/ingress.md b/content/en/docs/concepts/services-networking/ingress.md index de4e665af1..6879b998db 100644 --- a/content/en/docs/concepts/services-networking/ingress.md +++ b/content/en/docs/concepts/services-networking/ingress.md @@ -224,7 +224,7 @@ reference additional implementation-specific configuration for this class. #### Namespace-scoped parameters -{{< feature-state for_k8s_version="v1.21" state="alpha" >}} +{{< feature-state for_k8s_version="v1.22" state="beta" >}} `Parameters` field has a `scope` and `namespace` field that can be used to reference a namespace-specific resource for configuration of an Ingress class. @@ -232,6 +232,11 @@ reference a namespace-specific resource for configuration of an Ingress class. resource. Setting `Scope` to `Namespace` and setting the `Namespace` field will reference a parameters resource in a specific namespace: +Namespace-scoped parameters avoid the need for a cluster-scoped CustomResourceDefinition +for a parameters resource. This further avoids RBAC-related resources +that would otherwise be required to grant permissions to cluster-scoped +resources. + {{< codenew file="service/networking/namespaced-params.yaml" >}} ### Deprecated annotation diff --git a/content/en/docs/concepts/services-networking/network-policies.md b/content/en/docs/concepts/services-networking/network-policies.md index 2c9ed4a90e..e3f3a203b7 100644 --- a/content/en/docs/concepts/services-networking/network-policies.md +++ b/content/en/docs/concepts/services-networking/network-policies.md @@ -154,6 +154,7 @@ contains two elements in the `from` array, and allows connections from Pods in t When in doubt, use `kubectl describe` to see how Kubernetes has interpreted the policy. + __ipBlock__: This selects particular IP CIDR ranges to allow as ingress sources or egress destinations. These should be cluster-external IPs, since Pod IPs are ephemeral and unpredictable. Cluster ingress and egress mechanisms often require rewriting the source or destination IP @@ -223,7 +224,7 @@ You must be using a {{< glossary_tooltip text="CNI" term_id="cni" >}} plugin tha ## Targeting a range of Ports -{{< feature-state for_k8s_version="v1.21" state="alpha" >}} +{{< feature-state for_k8s_version="v1.22" state="beta" >}} When writing a NetworkPolicy, you can target a range of ports instead of a single port. @@ -251,10 +252,15 @@ spec: endPort: 32768 ``` -The above rule allows any Pod with label `db` on the namespace `default` to communicate with any IP within the range `10.0.0.0/24` over TCP, provided that the target port is between the range 32000 and 32768. +The above rule allows any Pod with label `role=db` on the namespace `default` to communicate +with any IP within the range `10.0.0.0/24` over TCP, provided that the target +port is between the range 32000 and 32768. The following restrictions apply when using this field: -* As an alpha feature, this is disabled by default. To enable the `endPort` field at a cluster level, you (or your cluster administrator) need to enable the `NetworkPolicyEndPort` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) for the API server with `--feature-gates=NetworkPolicyEndPort=true,…`. +* As a beta feature, this is enabled by default. To disable the `endPort` field +at a cluster level, you (or your cluster administrator) need to disable the +`NetworkPolicyEndPort` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +for the API server with `--feature-gates=NetworkPolicyEndPort=false,…`. * The `endPort` field must be equal than or greater to the `port` field. * `endPort` can only be defined if `port` is also defined. * Both ports must be numeric. @@ -262,6 +268,9 @@ The following restrictions apply when using this field: {{< note >}} Your cluster must be using a {{< glossary_tooltip text="CNI" term_id="cni" >}} plugin that supports the `endPort` field in NetworkPolicy specifications. +If your [network plugin](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) +does not support the `endPort` field and you specify a NetworkPolicy with that, +the policy will be applied only for the single `port` field. {{< /note >}} ## Targeting a Namespace by its name diff --git a/content/en/docs/concepts/services-networking/service.md b/content/en/docs/concepts/services-networking/service.md index 2c9e6e8996..ee1c7514da 100644 --- a/content/en/docs/concepts/services-networking/service.md +++ b/content/en/docs/concepts/services-networking/service.md @@ -72,7 +72,7 @@ A Service in Kubernetes is a REST object, similar to a Pod. Like all of the REST objects, you can `POST` a Service definition to the API server to create a new instance. The name of a Service object must be a valid -[DNS label name](/docs/concepts/overview/working-with-objects/names#dns-label-names). +[RFC 1035 label name](/docs/concepts/overview/working-with-objects/names#rfc-1035-label-names). For example, suppose you have a set of Pods where each listens on TCP port 9376 and contains a label `app=MyApp`: @@ -188,9 +188,10 @@ selectors and uses DNS names instead. For more information, see the [ExternalName](#externalname) section later in this document. ### Over Capacity Endpoints -If an Endpoints resource has more than 1000 endpoints then a Kubernetes v1.21 (or later) -cluster annotates that Endpoints with `endpoints.kubernetes.io/over-capacity: warning`. -This annotation indicates that the affected Endpoints object is over capacity. +If an Endpoints resource has more than 1000 endpoints then a Kubernetes v1.22 (or later) +cluster annotates that Endpoints with `endpoints.kubernetes.io/over-capacity: truncated`. +This annotation indicates that the affected Endpoints object is over capacity and that +the endpoints controller has truncated the number of endpoints to 1000. ### EndpointSlices @@ -215,7 +216,7 @@ each Service port. The value of this field is mirrored by the corresponding Endpoints and EndpointSlice objects. This field follows standard Kubernetes label syntax. Values should either be -[IANA standard service names](http://www.iana.org/assignments/service-names) or +[IANA standard service names](https://www.iana.org/assignments/service-names) or domain prefixed names such as `mycompany.com/my-custom-protocol`. ## Virtual IPs and service proxies @@ -241,9 +242,25 @@ There are a few reasons for using proxying for Services: on the DNS records could impose a high load on DNS that then becomes difficult to manage. +Later in this page you can read about various kube-proxy implementations work. Overall, +you should note that, when running `kube-proxy`, kernel level rules may be +modified (for example, iptables rules might get created), which won't get cleaned up, +in some cases until you reboot. Thus, running kube-proxy is something that should +only be done by an administrator which understands the consequences of having a +low level, privileged network proxying service on a computer. Although the `kube-proxy` +executable supports a `cleanup` function, this function is not an official feature and +thus is only available to use as-is. + +### Configuration + +Note that the kube-proxy starts up in different modes, which are determined by its configuration. +- The kube-proxy's configuration is done via a ConfigMap, and the ConfigMap for kube-proxy effectively deprecates the behaviour for almost all of the flags for the kube-proxy. +- The ConfigMap for the kube-proxy does not support live reloading of configuration. +- The ConfigMap parameters for the kube-proxy cannot all be validated and verified on startup. For example, if your operating system doesn't allow you to run iptables commands, the standard kernel kube-proxy implementation will not work. Likewise, if you have an operating system which doesn't support `netsh`, it will not run in Windows userspace mode. + ### User space proxy mode {#proxy-mode-userspace} -In this mode, kube-proxy watches the Kubernetes control plane for the addition and +In this (legacy) mode, kube-proxy watches the Kubernetes control plane for the addition and removal of Service and Endpoint objects. For each Service it opens a port (randomly chosen) on the local node. Any connections to this "proxy port" are proxied to one of the Service's backend Pods (as reported via @@ -384,6 +401,40 @@ The IP address that you choose must be a valid IPv4 or IPv6 address from within If you try to create a Service with an invalid clusterIP address value, the API server will return a 422 HTTP status code to indicate that there's a problem. +## Traffic policies + +### External traffic policy + +You can set the `spec.externalTrafficPolicy` field to control how traffic from external sources is routed. +Valid values are `Cluster` and `Local`. Set the field to `Cluster` to route external traffic to all ready endpoints +and `Local` to only route to ready node-local endpoints. If the traffic policy is `Local` and there are are no node-local +endpoints, the kube-proxy does not forward any traffic for the relevant Service. + +{{< note >}} +{{< feature-state for_k8s_version="v1.22" state="alpha" >}} +If you enable the `ProxyTerminatingEndpoints` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +`ProxyTerminatingEndpoints` for the kube-proxy, the kube-proxy checks if the node +has local endpoints and whether or not all the local endpoints are marked as terminating. +If there are local endpoints and **all** of those are terminating, then the kube-proxy ignores +any external traffic policy of `Local`. Instead, whilst the node-local endpoints remain as all +terminating, the kube-proxy forwards traffic for that Service to healthy endpoints elsewhere, +as if the external traffic policy were set to `Cluster`. +This forwarding behavior for terminating endpoints exists to allow external load balancers to +gracefully drain connections that are backed by `NodePort` Services, even when the health check +node port starts to fail. Otherwise, traffic can be lost between the time a node is still in the node pool of a load +balancer and traffic is being dropped during the termination period of a pod. +{{< /note >}} + +### Internal traffic policy + +{{< feature-state for_k8s_version="v1.22" state="beta" >}} + +You can set the `spec.internalTrafficPolicy` field to control how traffic from internal sources is routed. +Valid values are `Cluster` and `Local`. Set the field to `Cluster` to route internal traffic to all ready endpoints +and `Local` to only route to ready node-local endpoints. If the traffic policy is `Local` and there are no node-local +endpoints, traffic is dropped by kube-proxy. + ## Discovering services Kubernetes supports 2 primary modes of finding a Service - environment @@ -394,7 +445,7 @@ variables and DNS. When a Pod is run on a Node, the kubelet adds a set of environment variables for each active Service. It supports both [Docker links compatible](https://docs.docker.com/userguide/dockerlinks/) variables (see -[makeLinkVariables](https://releases.k8s.io/{{< param "githubbranch" >}}/pkg/kubelet/envvars/envvars.go#L49)) +[makeLinkVariables](https://releases.k8s.io/{{< param "fullversion" >}}/pkg/kubelet/envvars/envvars.go#L49)) and simpler `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` variables, where the Service name is upper-cased and dashes are converted to underscores. @@ -642,12 +693,12 @@ You must enable the `ServiceLBNodePortControl` feature gate to use this field. #### Specifying class of load balancer implementation {#load-balancer-class} -{{< feature-state for_k8s_version="v1.21" state="alpha" >}} +{{< feature-state for_k8s_version="v1.22" state="beta" >}} -Starting in v1.21, you can optionally specify the class of a load balancer implementation for -`LoadBalancer` type of Service by setting the field `spec.loadBalancerClass`. +`spec.loadBalancerClass` enables you to use a load balancer implementation other than the cloud provider default. This feature is available from v1.21, you must enable the `ServiceLoadBalancerClass` feature gate to use this field in v1.21, and the feature gate is enabled by default from v1.22 onwards. By default, `spec.loadBalancerClass` is `nil` and a `LoadBalancer` type of Service uses -the cloud provider's default load balancer implementation. +the cloud provider's default load balancer implementation if the cluster is configured with +a cloud provider using the `--cloud-provider` component flag. If `spec.loadBalancerClass` is specified, it is assumed that a load balancer implementation that matches the specified class is watching for Services. Any default load balancer implementation (for example, the one provided by @@ -657,7 +708,6 @@ Once set, it cannot be changed. The value of `spec.loadBalancerClass` must be a label-style identifier, with an optional prefix such as "`internal-vip`" or "`example.com/internal-vip`". Unprefixed names are reserved for end-users. -You must enable the `ServiceLoadBalancerClass` feature gate to use this field. #### Internal load balancer diff --git a/content/en/docs/concepts/storage/_index.md b/content/en/docs/concepts/storage/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/concepts/storage/persistent-volumes.md b/content/en/docs/concepts/storage/persistent-volumes.md index f45d17ff54..a029ceaeda 100644 --- a/content/en/docs/concepts/storage/persistent-volumes.md +++ b/content/en/docs/concepts/storage/persistent-volumes.md @@ -314,12 +314,9 @@ PersistentVolume types are implemented as plugins. Kubernetes currently supports * [`azureDisk`](/docs/concepts/storage/volumes/#azuredisk) - Azure Disk * [`azureFile`](/docs/concepts/storage/volumes/#azurefile) - Azure File * [`cephfs`](/docs/concepts/storage/volumes/#cephfs) - CephFS volume -* [`cinder`](/docs/concepts/storage/volumes/#cinder) - Cinder (OpenStack block storage) - (**deprecated**) * [`csi`](/docs/concepts/storage/volumes/#csi) - Container Storage Interface (CSI) * [`fc`](/docs/concepts/storage/volumes/#fc) - Fibre Channel (FC) storage * [`flexVolume`](/docs/concepts/storage/volumes/#flexVolume) - FlexVolume -* [`flocker`](/docs/concepts/storage/volumes/#flocker) - Flocker storage * [`gcePersistentDisk`](/docs/concepts/storage/volumes/#gcepersistentdisk) - GCE Persistent Disk * [`glusterfs`](/docs/concepts/storage/volumes/#glusterfs) - Glusterfs volume * [`hostPath`](/docs/concepts/storage/volumes/#hostpath) - HostPath volume @@ -329,17 +326,28 @@ PersistentVolume types are implemented as plugins. Kubernetes currently supports * [`local`](/docs/concepts/storage/volumes/#local) - local storage devices mounted on nodes. * [`nfs`](/docs/concepts/storage/volumes/#nfs) - Network File System (NFS) storage -* `photonPersistentDisk` - Photon controller persistent disk. - (This volume type no longer works since the removal of the corresponding - cloud provider.) * [`portworxVolume`](/docs/concepts/storage/volumes/#portworxvolume) - Portworx volume -* [`quobyte`](/docs/concepts/storage/volumes/#quobyte) - Quobyte volume * [`rbd`](/docs/concepts/storage/volumes/#rbd) - Rados Block Device (RBD) volume -* [`scaleIO`](/docs/concepts/storage/volumes/#scaleio) - ScaleIO volume - (**deprecated**) -* [`storageos`](/docs/concepts/storage/volumes/#storageos) - StorageOS volume * [`vsphereVolume`](/docs/concepts/storage/volumes/#vspherevolume) - vSphere VMDK volume +The following types of PersistentVolume are deprecated. This means that support is still available but will be removed in a future Kubernetes release. + +* [`cinder`](/docs/concepts/storage/volumes/#cinder) - Cinder (OpenStack block storage) + (**deprecated** in v1.18) +* [`flocker`](/docs/concepts/storage/volumes/#flocker) - Flocker storage + (**deprecated** in v1.22) +* [`quobyte`](/docs/concepts/storage/volumes/#quobyte) - Quobyte volume + (**deprecated** in v1.22) +* [`storageos`](/docs/concepts/storage/volumes/#storageos) - StorageOS volume + (**deprecated** in v1.22) + +Older versions of Kubernetes also supported the following in-tree PersistentVolume types: + +* `photonPersistentDisk` - Photon controller persistent disk. + (**not available** after v1.15) +* [`scaleIO`](/docs/concepts/storage/volumes/#scaleio) - ScaleIO volume + (**not available** after v1.21) + ## Persistent Volumes Each PV contains a spec and status, which is the specification and status of the volume. @@ -407,38 +415,40 @@ The access modes are: * ReadWriteOnce -- the volume can be mounted as read-write by a single node * ReadOnlyMany -- the volume can be mounted read-only by many nodes * ReadWriteMany -- the volume can be mounted as read-write by many nodes +* ReadWriteOncePod -- the volume can be mounted as read-write by a single Pod. + This is only supported for CSI volumes and Kubernetes version 1.22+. In the CLI, the access modes are abbreviated to: * RWO - ReadWriteOnce * ROX - ReadOnlyMany * RWX - ReadWriteMany +* RWOP - ReadWriteOncePod > __Important!__ A volume can only be mounted using one access mode at a time, even if it supports many. For example, a GCEPersistentDisk can be mounted as ReadWriteOnce by a single node or ReadOnlyMany by many nodes, but not at the same time. -| Volume Plugin | ReadWriteOnce | ReadOnlyMany | ReadWriteMany| -| :--- | :---: | :---: | :---: | -| AWSElasticBlockStore | ✓ | - | - | -| AzureFile | ✓ | ✓ | ✓ | -| AzureDisk | ✓ | - | - | -| CephFS | ✓ | ✓ | ✓ | -| Cinder | ✓ | - | - | -| CSI | depends on the driver | depends on the driver | depends on the driver | -| FC | ✓ | ✓ | - | -| FlexVolume | ✓ | ✓ | depends on the driver | -| Flocker | ✓ | - | - | -| GCEPersistentDisk | ✓ | ✓ | - | -| Glusterfs | ✓ | ✓ | ✓ | -| HostPath | ✓ | - | - | -| iSCSI | ✓ | ✓ | - | -| Quobyte | ✓ | ✓ | ✓ | -| NFS | ✓ | ✓ | ✓ | -| RBD | ✓ | ✓ | - | -| VsphereVolume | ✓ | - | - (works when Pods are collocated) | -| PortworxVolume | ✓ | - | ✓ | -| ScaleIO | ✓ | ✓ | - | -| StorageOS | ✓ | - | - | +| Volume Plugin | ReadWriteOnce | ReadOnlyMany | ReadWriteMany | ReadWriteOncePod | +| :--- | :---: | :---: | :---: | - | +| AWSElasticBlockStore | ✓ | - | - | - | +| AzureFile | ✓ | ✓ | ✓ | - | +| AzureDisk | ✓ | - | - | - | +| CephFS | ✓ | ✓ | ✓ | - | +| Cinder | ✓ | - | - | - | +| CSI | depends on the driver | depends on the driver | depends on the driver | depends on the driver | +| FC | ✓ | ✓ | - | - | +| FlexVolume | ✓ | ✓ | depends on the driver | - | +| Flocker | ✓ | - | - | - | +| GCEPersistentDisk | ✓ | ✓ | - | - | +| Glusterfs | ✓ | ✓ | ✓ | - | +| HostPath | ✓ | - | - | - | +| iSCSI | ✓ | ✓ | - | - | +| Quobyte | ✓ | ✓ | ✓ | - | +| NFS | ✓ | ✓ | ✓ | - | +| RBD | ✓ | ✓ | - | - | +| VsphereVolume | ✓ | - | - (works when Pods are collocated) | - | +| PortworxVolume | ✓ | - | ✓ | - | - | +| StorageOS | ✓ | - | - | - | ### Class @@ -499,7 +509,7 @@ it will become fully deprecated in a future Kubernetes release. For most volume types, you do not need to set this field. It is automatically populated for [AWS EBS](/docs/concepts/storage/volumes/#awselasticblockstore), [GCE PD](/docs/concepts/storage/volumes/#gcepersistentdisk) and [Azure Disk](/docs/concepts/storage/volumes/#azuredisk) volume block types. You need to explicitly set this for [local](/docs/concepts/storage/volumes/#local) volumes. {{< /note >}} -A PV can specify [node affinity](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#volumenodeaffinity-v1-core) to define constraints that limit what nodes this volume can be accessed from. Pods that use a PV will only be scheduled to nodes that are selected by the node affinity. +A PV can specify node affinity to define constraints that limit what nodes this volume can be accessed from. Pods that use a PV will only be scheduled to nodes that are selected by the node affinity. To specify node affinity, set `nodeAffinity` in the `.spec` of a PV. The [PersistentVolume](/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-v1/#PersistentVolumeSpec) API reference has more details on this field. ### Phase @@ -785,6 +795,82 @@ spec: storage: 10Gi ``` +## Volume populators and data sources + +{{< feature-state for_k8s_version="v1.22" state="alpha" >}} + +{{< note >}} +Kubernetes supports custom volume populators; this alpha feature was introduced +in Kubernetes 1.18. Kubernetes 1.22 reimplemented the mechanism with a redesigned API. +Check that you are reading the version of the Kubernetes documentation that matches your +cluster. {{% version-check %}} +To use custom volume populators, you must enable the `AnyVolumeDataSource` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) for +the kube-apiserver and kube-controller-manager. +{{< /note >}} + +Volume populators take advantage of a PVC spec field called `dataSourceRef`. Unlike the +`dataSource` field, which can only contain either a reference to another PersistentVolumeClaim +or to a VolumeSnapshot, the `dataSourceRef` field can contain a reference to any object in the +same namespace, except for core objects other than PVCs. For clusters that have the feature +gate enabled, use of the `dataSourceRef` is preferred over `dataSource`. + +## Data source references + +The `dataSourceRef` field behaves almost the same as the `dataSource` field. If either one is +specified while the other is not, the API server will give both fields the same value. Neither +field can be changed after creation, and attempting to specify different values for the two +fields will result in a validation error. Therefore the two fields will always have the same +contents. + +There are two differences between the `dataSourceRef` field and the `dataSource` field that +users should be aware of: +* The `dataSource` field ignores invalid values (as if the field was blank) while the + `dataSourceRef` field never ignores values and will cause an error if an invalid value is + used. Invalid values are any core object (objects with no apiGroup) except for PVCs. +* The `dataSourceRef` field may contain different types of objects, while the `dataSource` field + only allows PVCs and VolumeSnapshots. + +Users should always use `dataSourceRef` on clusters that have the feature gate enabled, and +fall back to `dataSource` on clusters that do not. It is not necessary to look at both fields +under any circumstance. The duplicated values with slightly different semantics exist only for +backwards compatibility. In particular, a mixture of older and newer controllers are able to +interoperate because the fields are the same. + +### Using volume populators + +Volume populators are {{< glossary_tooltip text="controllers" term_id="controller" >}} that can +create non-empty volumes, where the contents of the volume are determined by a Custom Resource. +Users create a populated volume by referring to a Custom Resource using the `dataSourceRef` field: + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: populated-pvc +spec: + dataSourceRef: + name: example-name + kind: ExampleDataSource + apiGroup: example.storage.k8s.io + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi +``` + +Because volume populators are external components, attempts to create a PVC that uses one +can fail if not all the correct components are installed. External controllers should generate +events on the PVC to provide feedback on the status of the creation, including warnings if +the PVC cannot be created due to some missing component. + +You can install the alpha [volume data source validator](https://github.com/kubernetes-csi/volume-data-source-validator) +controller into your cluster. That controller generates warning Events on a PVC in the case that no populator +is registered to handle that kind of data source. When a suitable populator is installed for a PVC, it's the +responsibility of that populator controller to report Events that relate to volume creation and issues during +the process. + ## Writing Portable Configuration If you're writing configuration templates or examples that run on a wide range of clusters @@ -811,16 +897,15 @@ and need persistent storage, it is recommended that you use the following patter or the cluster has no storage system (in which case the user cannot deploy config requiring PVCs). - ## {{% heading "whatsnext" %}} - +## {{% heading "whatsnext" %}} * Learn more about [Creating a PersistentVolume](/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolume). * Learn more about [Creating a PersistentVolumeClaim](/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolumeclaim). * Read the [Persistent Storage design document](https://git.k8s.io/community/contributors/design-proposals/storage/persistent-storage.md). -### Reference +### API references {#reference} -* [PersistentVolume](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolume-v1-core) -* [PersistentVolumeSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumespec-v1-core) -* [PersistentVolumeClaim](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaim-v1-core) -* [PersistentVolumeClaimSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaimspec-v1-core) +Read about the APIs described in this page: + +* [`PersistentVolume`](/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-v1/) +* [`PersistentVolumeClaim`](/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/) diff --git a/content/en/docs/concepts/storage/storage-classes.md b/content/en/docs/concepts/storage/storage-classes.md index 5e6851d94a..36615098c6 100644 --- a/content/en/docs/concepts/storage/storage-classes.md +++ b/content/en/docs/concepts/storage/storage-classes.md @@ -76,7 +76,7 @@ for provisioning PVs. This field must be specified. | Glusterfs | ✓ | [Glusterfs](#glusterfs) | | iSCSI | - | - | | Quobyte | ✓ | [Quobyte](#quobyte) | -| NFS | - | - | +| NFS | - | [NFS](#nfs) | | RBD | ✓ | [Ceph RBD](#ceph-rbd) | | VsphereVolume | ✓ | [vSphere](#vsphere) | | PortworxVolume | ✓ | [Portworx Volume](#portworx-volume) | @@ -189,7 +189,7 @@ and pre-created PVs, but you'll need to look at the documentation for a specific to see its supported topology keys and examples. {{< note >}} - If you choose to use `waitForFirstConsumer`, do not use `nodeName` in the Pod spec + If you choose to use `WaitForFirstConsumer`, do not use `nodeName` in the Pod spec to specify node affinity. If `nodeName` is used in this case, the scheduler will be bypassed and PVC will remain in `pending` state. Instead, you can use node selector for hostname in this case as shown below. @@ -423,6 +423,29 @@ parameters: `gluster-dynamic-`. The dynamic endpoint and service are automatically deleted when the persistent volume claim is deleted. +### NFS + +```yaml +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: example-nfs +provisioner: example.com/external-nfs +parameters: + server: nfs-server.example.com + path: /share + readOnly: false +``` + +* `server`: Server is the hostname or IP address of the NFS server. +* `path`: Path that is exported by the NFS server. +* `readOnly`: A flag indicating whether the storage will be mounted as read only (default false). + +Kubernetes doesn't include an internal NFS provisioner. You need to use an external provisioner to create a StorageClass for NFS. +Here are some examples: +* [NFS Ganesha server and external provisioner](https://github.com/kubernetes-sigs/nfs-ganesha-server-and-external-provisioner) +* [NFS subdir external provisioner](https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner) + ### OpenStack Cinder ```yaml @@ -578,6 +601,12 @@ parameters: ### Quobyte +{{< feature-state for_k8s_version="v1.22" state="deprecated" >}} + +The Quobyte in-tree storage plugin is deprecated, an +[example](https://github.com/quobyte/quobyte-csi/blob/master/example/StorageClass.yaml) +`StorageClass` for the out-of-tree Quobyte plugin can be found at the Quobyte CSI repository. + ```yaml apiVersion: storage.k8s.io/v1 kind: StorageClass @@ -658,11 +687,11 @@ metadata: provisioner: kubernetes.io/azure-disk parameters: storageaccounttype: Standard_LRS - kind: Shared + kind: managed ``` * `storageaccounttype`: Azure storage account Sku tier. Default is empty. -* `kind`: Possible values are `shared` (default), `dedicated`, and `managed`. +* `kind`: Possible values are `shared`, `dedicated`, and `managed` (default). When `kind` is `shared`, all unmanaged disks are created in a few shared storage accounts in the same resource group as the cluster. When `kind` is `dedicated`, a new dedicated storage account will be created for the new diff --git a/content/en/docs/concepts/storage/volumes.md b/content/en/docs/concepts/storage/volumes.md index d693e057ef..56694dee66 100644 --- a/content/en/docs/concepts/storage/volumes.md +++ b/content/en/docs/concepts/storage/volumes.md @@ -124,13 +124,13 @@ beta features must be enabled. {{< feature-state for_k8s_version="v1.17" state="alpha" >}} To disable the `awsElasticBlockStore` storage plugin from being loaded by the controller manager -and the kubelet, set the `CSIMigrationAWSComplete` flag to `true`. This feature requires the `ebs.csi.aws.com` Container Storage Interface (CSI) driver installed on all worker nodes. +and the kubelet, set the `InTreePluginAWSUnregister` flag to `true`. ### azureDisk {#azuredisk} The `azureDisk` volume type mounts a Microsoft Azure [Data Disk](https://docs.microsoft.com/en-us/azure/aks/csi-storage-drivers) into a pod. -For more details, see the [`azureDisk` volume plugin](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/azure_disk/README.md). +For more details, see the [`azureDisk` volume plugin](https://github.com/kubernetes/examples/tree/master/staging/volumes/azure_disk/README.md). #### azureDisk CSI migration @@ -148,7 +148,7 @@ features must be enabled. The `azureFile` volume type mounts a Microsoft Azure File volume (SMB 2.1 and 3.0) into a pod. -For more details, see the [`azureFile` volume plugin](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/azure_file/README.md). +For more details, see the [`azureFile` volume plugin](https://github.com/kubernetes/examples/tree/master/staging/volumes/azure_file/README.md). #### azureFile CSI migration @@ -176,7 +176,7 @@ writers simultaneously. You must have your own Ceph server running with the share exported before you can use it. {{< /note >}} -See the [CephFS example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/volumes/cephfs/) for more details. +See the [CephFS example](https://github.com/kubernetes/examples/tree/master/volumes/cephfs/) for more details. ### cinder @@ -347,7 +347,7 @@ You must configure FC SAN Zoning to allocate and mask those LUNs (volumes) to th beforehand so that Kubernetes hosts can access them. {{< /note >}} -See the [fibre channel example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/fibre_channel) for more details. +See the [fibre channel example](https://github.com/kubernetes/examples/tree/master/staging/volumes/fibre_channel) for more details. ### flocker (deprecated) {#flocker} @@ -365,7 +365,7 @@ can be shared between pods as required. You must have your own Flocker installation running before you can use it. {{< /note >}} -See the [Flocker example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/flocker) for more details. +See the [Flocker example](https://github.com/kubernetes/examples/tree/master/staging/volumes/flocker) for more details. ### gcePersistentDisk @@ -462,7 +462,8 @@ spec: required: nodeSelectorTerms: - matchExpressions: - - key: failure-domain.beta.kubernetes.io/zone + # failure-domain.beta.kubernetes.io/zone should be used prior to 1.21 + - key: topology.kubernetes.io/zone operator: In values: - us-central1-a @@ -480,6 +481,13 @@ Driver](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-drive must be installed on the cluster and the `CSIMigration` and `CSIMigrationGCE` beta features must be enabled. +#### GCE CSI migration complete + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +To disable the `gcePersistentDisk` storage plugin from being loaded by the controller manager +and the kubelet, set the `InTreePluginGCEUnregister` flag to `true`. + ### gitRepo (deprecated) {#gitrepo} {{< warning >}} @@ -525,10 +533,19 @@ simultaneously. You must have your own GlusterFS installation running before you can use it. {{< /note >}} -See the [GlusterFS example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/volumes/glusterfs) for more details. +See the [GlusterFS example](https://github.com/kubernetes/examples/tree/master/volumes/glusterfs) for more details. ### hostPath {#hostpath} +{{< warning >}} +HostPath volumes present many security risks, and it is a best practice to avoid the use of +HostPaths when possible. When a HostPath volume must be used, it should be scoped to only the +required file or directory, and mounted as ReadOnly. + +If restricting HostPath access to specific directories through AdmissionPolicy, `volumeMounts` MUST +be required to use `readOnly` mounts for the policy to be effective. +{{< /warning >}} + A `hostPath` volume mounts a file or directory from the host node's filesystem into your Pod. This is not something that most Pods will need, but it offers a powerful escape hatch for some applications. @@ -558,6 +575,9 @@ The supported values for field `type` are: Watch out when using this type of volume, because: +* HostPaths can expose privileged system credentials (such as for the Kubelet) or privileged APIs + (such as container runtime socket), which can be used for container escape or to attack other + parts of the cluster. * Pods with identical configuration (such as created from a PodTemplate) may behave differently on different nodes due to different files on the nodes * The files or directories created on the underlying hosts are only writable by root. You @@ -641,7 +661,7 @@ and then serve it in parallel from as many Pods as you need. Unfortunately, iSCSI volumes can only be mounted by a single consumer in read-write mode. Simultaneous writers are not allowed. -See the [iSCSI example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/volumes/iscsi) for more details. +See the [iSCSI example](https://github.com/kubernetes/examples/tree/master/volumes/iscsi) for more details. ### local @@ -729,7 +749,7 @@ writers simultaneously. You must have your own NFS server running with the share exported before you can use it. {{< /note >}} -See the [NFS example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/nfs) for more details. +See the [NFS example](https://github.com/kubernetes/examples/tree/master/staging/volumes/nfs) for more details. ### persistentVolumeClaim {#persistentvolumeclaim} @@ -777,7 +797,7 @@ Make sure you have an existing PortworxVolume with name `pxvol` before using it in the Pod. {{< /note >}} -For more details, see the [Portworx volume](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/portworx/README.md) examples. +For more details, see the [Portworx volume](https://github.com/kubernetes/examples/tree/master/staging/volumes/portworx/README.md) examples. ### projected @@ -791,7 +811,7 @@ Currently, the following types of volume sources can be projected: * `serviceAccountToken` All sources are required to be in the same namespace as the Pod. For more details, -see the [all-in-one volume design document](https://github.com/kubernetes/community/blob/{{< param "githubbranch" >}}/contributors/design-proposals/node/all-in-one-volume.md). +see the [all-in-one volume design document](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/all-in-one-volume.md). #### Example configuration with a secret, a downwardAPI, and a configMap {#example-configuration-secret-downwardapi-configmap} @@ -919,7 +939,7 @@ A container using a projected volume source as a [`subPath`](#using-subpath) vol receive updates for those volume sources. {{< /note >}} -### quobyte +### quobyte (deprecated) {#quobyte} A `quobyte` volume allows an existing [Quobyte](https://www.quobyte.com) volume to be mounted into your Pod. @@ -952,52 +972,9 @@ and then serve it in parallel from as many pods as you need. Unfortunately, RBD volumes can only be mounted by a single consumer in read-write mode. Simultaneous writers are not allowed. -See the [RBD example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/volumes/rbd) +See the [RBD example](https://github.com/kubernetes/examples/tree/master/volumes/rbd) for more details. -### scaleIO (deprecated) {#scaleio} - -ScaleIO is a software-based storage platform that uses existing hardware to -create clusters of scalable shared block networked storage. The `scaleIO` volume -plugin allows deployed pods to access existing ScaleIO -volumes. For information about dynamically provisioning new volumes for -persistent volume claims, see -[ScaleIO persistent volumes](/docs/concepts/storage/persistent-volumes/#scaleio). - -{{< note >}} -You must have an existing ScaleIO cluster already setup and -running with the volumes created before you can use them. -{{< /note >}} - -The following example is a Pod configuration with ScaleIO: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: pod-0 -spec: - containers: - - image: k8s.gcr.io/test-webserver - name: pod-0 - volumeMounts: - - mountPath: /test-pd - name: vol-0 - volumes: - - name: vol-0 - scaleIO: - gateway: https://localhost:443/api - system: scaleio - protectionDomain: sd0 - storagePool: sp1 - volumeName: vol-0 - secretRef: - name: sio-secret - fsType: xfs -``` - -For further details, see the [ScaleIO](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/scaleio) examples. - ### secret A `secret` volume is used to pass sensitive information, such as passwords, to @@ -1017,7 +994,7 @@ receive Secret updates. For more details, see [Configuring Secrets](/docs/concepts/configuration/secret/). -### storageOS {#storageos} +### storageOS (deprecated) {#storageos} A `storageos` volume allows an existing [StorageOS](https://www.storageos.com) volume to mount into your Pod. @@ -1165,7 +1142,7 @@ but new volumes created by the vSphere CSI driver will not be honoring these par {{< feature-state for_k8s_version="v1.19" state="beta" >}} -To turn off the `vsphereVolume` plugin from being loaded by the controller manager and the kubelet, you need to set this feature flag to `true`. You must install a `csi.vsphere.vmware.com` {{< glossary_tooltip text="CSI" term_id="csi" >}} driver on all worker nodes. +To turn off the `vsphereVolume` plugin from being loaded by the controller manager and the kubelet, you need to set `InTreePluginvSphereUnregister` feature flag to `true`. You must install a `csi.vsphere.vmware.com` {{< glossary_tooltip text="CSI" term_id="csi" >}} driver on all worker nodes. ## Using subPath {#using-subpath} diff --git a/content/en/docs/concepts/workloads/controllers/_index.md b/content/en/docs/concepts/workloads/controllers/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/concepts/workloads/controllers/cron-jobs.md b/content/en/docs/concepts/workloads/controllers/cron-jobs.md index 7127924411..c6cd4d1336 100644 --- a/content/en/docs/concepts/workloads/controllers/cron-jobs.md +++ b/content/en/docs/concepts/workloads/controllers/cron-jobs.md @@ -17,6 +17,8 @@ A _CronJob_ creates {{< glossary_tooltip term_id="job" text="Jobs" >}} on a repe One CronJob object is like one line of a _crontab_ (cron table) file. It runs a job periodically on a given schedule, written in [Cron](https://en.wikipedia.org/wiki/Cron) format. +In addition, the CronJob schedule supports timezone handling, you can specify the timezone by adding "CRON_TZ= +This role does not allow write access to resource quota or to the namespace itself. +This role also does not allow write access to Endpoints in clusters created +using Kubernetes v1.22+. More information is available in the +["Write Access for Endpoints" section](#write-access-for-endpoints). @@ -695,7 +701,9 @@ This role does not allow write access to resource quota or to the namespace itse This role does not allow viewing or modifying roles or role bindings. However, this role allows accessing Secrets and running Pods as any ServiceAccount in the namespace, so it can be used to gain the API access levels of any ServiceAccount in -the namespace. +the namespace. This role also does not allow write access to Endpoints in +clusters created using Kubernetes v1.22+. More information is available in the +["Write Access for Endpoints" section](#write-access-for-endpoints). @@ -1185,6 +1193,24 @@ In order from most secure to least secure, the approaches are: --group=system:serviceaccounts ``` +## Write access for Endpoints + +Kubernetes clusters created before Kubernetes v1.22 include write access to +Endpoints in the aggregated "edit" and "admin" roles. As a mitigation for +[CVE-2021-25740](https://github.com/kubernetes/kubernetes/issues/103675), this +access is not part of the aggregated roles in clusters that you create using +Kubernetes v1.22 or later. + +Existing clusters that have been upgraded to Kubernetes v1.22 will not be +subject to this change. The [CVE +announcement](https://github.com/kubernetes/kubernetes/issues/103675) includes +guidance for restricting this access in existing clusters. + +If you want new clusters to retain this level of access in the aggregated roles, +you can create the following ClusterRole: + +{{< codenew file="access/endpoints-aggregated.yaml" >}} + ## Upgrading from ABAC Clusters that originally ran older Kubernetes versions often used diff --git a/content/en/docs/reference/access-authn-authz/service-accounts-admin.md b/content/en/docs/reference/access-authn-authz/service-accounts-admin.md index 0d4ecff08c..f40cc7d00c 100644 --- a/content/en/docs/reference/access-authn-authz/service-accounts-admin.md +++ b/content/en/docs/reference/access-authn-authz/service-accounts-admin.md @@ -62,10 +62,9 @@ It acts synchronously to modify pods as they are created or updated. When this p #### Bound Service Account Token Volume -{{< feature-state for_k8s_version="v1.21" state="beta" >}} +{{< feature-state for_k8s_version="v1.22" state="stable" >}} -When the `BoundServiceAccountTokenVolume` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled, the service account admission controller will -add the following projected volume instead of a Secret-based volume for the non-expiring service account token created by Token Controller. +The ServiceAccount admission controller will add the following projected volume instead of a Secret-based volume for the non-expiring service account token created by Token Controller. ```yaml - name: kube-api-access- @@ -96,10 +95,6 @@ This projected volume consists of three sources: See more details about [projected volumes](/docs/tasks/configure-pod-container/configure-projected-volume-storage/). -You can manually migrate a Secret-based service account volume to a projected volume when -the `BoundServiceAccountTokenVolume` feature gate is not enabled by adding the above -projected volume to the pod spec. - ### Token Controller TokenController runs as part of `kube-controller-manager`. It acts asynchronously. It: diff --git a/content/en/docs/reference/access-authn-authz/webhook.md b/content/en/docs/reference/access-authn-authz/webhook.md index 69be02ff76..d0ee79f4cf 100644 --- a/content/en/docs/reference/access-authn-authz/webhook.md +++ b/content/en/docs/reference/access-authn-authz/webhook.md @@ -172,5 +172,5 @@ Access to other non-resource paths can be disallowed without restricting access to the REST api. For further documentation refer to the authorization.v1beta1 API objects and -[webhook.go](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go). +[webhook.go](https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates.md b/content/en/docs/reference/command-line-tools-reference/feature-gates.md index 99e3094c18..d639029659 100644 --- a/content/en/docs/reference/command-line-tools-reference/feature-gates.md +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates.md @@ -2,6 +2,9 @@ title: Feature Gates weight: 10 content_type: concept +card: + name: reference + weight: 60 --- @@ -25,7 +28,7 @@ Use `-h` flag to see a full set of feature gates for all components. To set feature gates for a component, such as kubelet, use the `--feature-gates` flag assigned to a list of feature pairs: ```shell ---feature-gates="...,DynamicKubeletConfig=true" +--feature-gates="...,GracefulNodeShutdown=true" ``` The following tables are a summary of the feature gates that you can set on @@ -55,64 +58,60 @@ different Kubernetes components. | `APIResponseCompression` | `false` | Alpha | 1.7 | 1.15 | | `APIResponseCompression` | `true` | Beta | 1.16 | | | `APIServerIdentity` | `false` | Alpha | 1.20 | | +| `APIServerTracing` | `false` | Alpha | 1.22 | | | `AllowInsecureBackendProxy` | `true` | Beta | 1.17 | | | `AnyVolumeDataSource` | `false` | Alpha | 1.18 | | | `AppArmor` | `true` | Beta | 1.4 | | -| `BalanceAttachedNodeVolumes` | `false` | Alpha | 1.11 | | -| `BoundServiceAccountTokenVolume` | `false` | Alpha | 1.13 | 1.20 | -| `BoundServiceAccountTokenVolume` | `true` | Beta | 1.21 | | +| `ControllerManagerLeaderMigration` | `false` | Alpha | 1.21 | | | `CPUManager` | `false` | Alpha | 1.8 | 1.9 | | `CPUManager` | `true` | Beta | 1.10 | | +| `CPUManagerPolicyOptions` | `false` | Alpha | 1.22 | | | `CSIInlineVolume` | `false` | Alpha | 1.15 | 1.15 | | `CSIInlineVolume` | `true` | Beta | 1.16 | - | | `CSIMigration` | `false` | Alpha | 1.14 | 1.16 | | `CSIMigration` | `true` | Beta | 1.17 | | | `CSIMigrationAWS` | `false` | Alpha | 1.14 | | | `CSIMigrationAWS` | `false` | Beta | 1.17 | | -| `CSIMigrationAWSComplete` | `false` | Alpha | 1.17 | | | `CSIMigrationAzureDisk` | `false` | Alpha | 1.15 | 1.18 | | `CSIMigrationAzureDisk` | `false` | Beta | 1.19 | | -| `CSIMigrationAzureDiskComplete` | `false` | Alpha | 1.17 | | | `CSIMigrationAzureFile` | `false` | Alpha | 1.15 | 1.19 | | `CSIMigrationAzureFile` | `false` | Beta | 1.21 | | -| `CSIMigrationAzureFileComplete` | `false` | Alpha | 1.17 | | | `CSIMigrationGCE` | `false` | Alpha | 1.14 | 1.16 | | `CSIMigrationGCE` | `false` | Beta | 1.17 | | -| `CSIMigrationGCEComplete` | `false` | Alpha | 1.17 | | | `CSIMigrationOpenStack` | `false` | Alpha | 1.14 | 1.17 | | `CSIMigrationOpenStack` | `true` | Beta | 1.18 | | -| `CSIMigrationOpenStackComplete` | `false` | Alpha | 1.17 | | | `CSIMigrationvSphere` | `false` | Beta | 1.19 | | -| `CSIMigrationvSphereComplete` | `false` | Beta | 1.19 | | -| `CSIServiceAccountToken` | `false` | Alpha | 1.20 | 1.20 | -| `CSIServiceAccountToken` | `true` | Beta | 1.21 | | | `CSIStorageCapacity` | `false` | Alpha | 1.19 | 1.20 | | `CSIStorageCapacity` | `true` | Beta | 1.21 | | | `CSIVolumeFSGroupPolicy` | `false` | Alpha | 1.19 | 1.19 | | `CSIVolumeFSGroupPolicy` | `true` | Beta | 1.20 | | | `CSIVolumeHealth` | `false` | Alpha | 1.21 | | +| `CSRDuration` | `true` | Beta | 1.22 | | | `ConfigurableFSGroupPolicy` | `false` | Alpha | 1.18 | 1.19 | | `ConfigurableFSGroupPolicy` | `true` | Beta | 1.20 | | -| `CronJobControllerV2` | `false` | Alpha | 1.20 | 1.20 | -| `CronJobControllerV2` | `true` | Beta | 1.21 | | +| `ControllerManagerLeaderMigration` | `false` | Alpha | 1.21 | 1.21 | +| `ControllerManagerLeaderMigration` | `true` | Beta | 1.22 | | | `CustomCPUCFSQuotaPeriod` | `false` | Alpha | 1.12 | | +| `DaemonSetUpdateSurge` | `false` | Alpha | 1.21 | 1.21 | +| `DaemonSetUpdateSurge` | `true` | Beta | 1.22 | | | `DefaultPodTopologySpread` | `false` | Alpha | 1.19 | 1.19 | | `DefaultPodTopologySpread` | `true` | Beta | 1.20 | | +| `DelegateFSGroupToCSIDriver` | `false` | Alpha | 1.22 | | | `DevicePlugins` | `false` | Alpha | 1.8 | 1.9 | | `DevicePlugins` | `true` | Beta | 1.10 | | | `DisableAcceleratorUsageMetrics` | `false` | Alpha | 1.19 | 1.19 | | `DisableAcceleratorUsageMetrics` | `true` | Beta | 1.20 | | +| `DisableCloudProviders` | `false` | Alpha | 1.22 | | | `DownwardAPIHugePages` | `false` | Alpha | 1.20 | 1.20 | | `DownwardAPIHugePages` | `false` | Beta | 1.21 | | -| `DynamicKubeletConfig` | `false` | Alpha | 1.4 | 1.10 | -| `DynamicKubeletConfig` | `true` | Beta | 1.11 | | -| `EfficientWatchResumption` | `false` | Alpha | 1.20 | | -| `EndpointSliceProxying` | `false` | Alpha | 1.18 | 1.18 | -| `EndpointSliceProxying` | `true` | Beta | 1.19 | | -| `EndpointSliceTerminatingCondition` | `false` | Alpha | 1.20 | | +| `EfficientWatchResumption` | `false` | Alpha | 1.20 | 1.20 | +| `EfficientWatchResumption` | `true` | Beta | 1.21 | | +| `EndpointSliceTerminatingCondition` | `false` | Alpha | 1.20 | 1.21 | +| `EndpointSliceTerminatingCondition` | `true` | Beta | 1.22 | | | `EphemeralContainers` | `false` | Alpha | 1.16 | | | `ExpandCSIVolumes` | `false` | Alpha | 1.14 | 1.15 | | `ExpandCSIVolumes` | `true` | Beta | 1.16 | | +| `ExpandedDNSConfig` | `false` | Alpha | 1.22 | | | `ExpandInUsePersistentVolumes` | `false` | Alpha | 1.11 | 1.14 | | `ExpandInUsePersistentVolumes` | `true` | Beta | 1.15 | | | `ExpandPersistentVolumes` | `false` | Alpha | 1.8 | 1.10 | @@ -124,69 +123,83 @@ different Kubernetes components. | `GracefulNodeShutdown` | `true` | Beta | 1.21 | | | `HPAContainerMetrics` | `false` | Alpha | 1.20 | | | `HPAScaleToZero` | `false` | Alpha | 1.16 | | -| `HugePageStorageMediumSize` | `false` | Alpha | 1.18 | 1.18 | -| `HugePageStorageMediumSize` | `true` | Beta | 1.19 | | -| `IndexedJob` | `false` | Alpha | 1.21 | | -| `IngressClassNamespacedParams` | `false` | Alpha | 1.21 | | +| `IndexedJob` | `false` | Alpha | 1.21 | 1.21 | +| `IndexedJob` | `true` | Beta | 1.22 | | +| `JobTrackingWithFinalizers` | `false` | Alpha | 1.22 | | +| `IngressClassNamespacedParams` | `false` | Alpha | 1.21 | 1.21 | +| `IngressClassNamespacedParams` | `true` | Beta | 1.22 | | +| `InTreePluginAWSUnregister` | `false` | Alpha | 1.21 | | +| `InTreePluginAzureDiskUnregister` | `false` | Alpha | 1.21 | | +| `InTreePluginAzureFileUnregister` | `false` | Alpha | 1.21 | | +| `InTreePluginGCEUnregister` | `false` | Alpha | 1.21 | | +| `InTreePluginOpenStackUnregister` | `false` | Alpha | 1.21 | | +| `InTreePluginvSphereUnregister` | `false` | Alpha | 1.21 | | | `IPv6DualStack` | `false` | Alpha | 1.15 | 1.20 | | `IPv6DualStack` | `true` | Beta | 1.21 | | +| `JobTrackingWithFinalizers` | `false` | Alpha | 1.22 | | | `KubeletCredentialProviders` | `false` | Alpha | 1.20 | | -| `LegacyNodeRoleBehavior` | `false` | Alpha | 1.16 | 1.18 | -| `LegacyNodeRoleBehavior` | `true` | Beta | 1.19 | 1.20 | | `LocalStorageCapacityIsolation` | `false` | Alpha | 1.7 | 1.9 | | `LocalStorageCapacityIsolation` | `true` | Beta | 1.10 | | | `LocalStorageCapacityIsolationFSQuotaMonitoring` | `false` | Alpha | 1.15 | | -| `LogarithmicScaleDown` | `false` | Alpha | 1.21 | | +| `LogarithmicScaleDown` | `false` | Alpha | 1.21 | 1.21 | +| `LogarithmicScaleDown` | `true` | Beta | 1.22 | | +| `KubeletInUserNamespace` | `false` | Alpha | 1.22 | | | `KubeletPodResourcesGetAllocatable` | `false` | Alpha | 1.21 | | +| `MemoryManager` | `false` | Alpha | 1.21 | 1.21 | +| `MemoryManager` | `true` | Beta | 1.22 | | +| `MemoryQoS` | `false` | Alpha | 1.22 | | | `MixedProtocolLBService` | `false` | Alpha | 1.20 | | -| `NamespaceDefaultLabelName` | `true` | Beta | 1.21 | | -| `NetworkPolicyEndPort` | `false` | Alpha | 1.21 | | -| `NodeDisruptionExclusion` | `false` | Alpha | 1.16 | 1.18 | -| `NodeDisruptionExclusion` | `true` | Beta | 1.19 | 1.20 | +| `NetworkPolicyEndPort` | `false` | Alpha | 1.21 | 1.21 | +| `NetworkPolicyEndPort` | `true` | Beta | 1.22 | | +| `NodeSwap` | `false` | Alpha | 1.22 | | | `NonPreemptingPriority` | `false` | Alpha | 1.15 | 1.18 | | `NonPreemptingPriority` | `true` | Beta | 1.19 | | -| `PodDeletionCost` | `false` | Alpha | 1.21 | | -| `PodAffinityNamespaceSelector` | `false` | Alpha | 1.21 | | +| `PodDeletionCost` | `false` | Alpha | 1.21 | 1.21 | +| `PodDeletionCost` | `true` | Beta | 1.22 | | +| `PodAffinityNamespaceSelector` | `false` | Alpha | 1.21 | 1.21 | +| `PodAffinityNamespaceSelector` | `true` | Beta | 1.22 | | | `PodOverhead` | `false` | Alpha | 1.16 | 1.17 | -| `PodOverhead` | `true` | Beta | 1.18 | | -| `ProbeTerminationGracePeriod` | `false` | Alpha | 1.21 | | +| `PodOverhead` | `true` | Beta | 1.18 | | +| `PodSecurity` | `false` | Alpha | 1.22 | | +| `PreferNominatedNode` | `false` | Alpha | 1.21 | 1.21 | +| `PreferNominatedNode` | `true` | Beta | 1.22 | | +| `ProbeTerminationGracePeriod` | `false` | Alpha | 1.21 | 1.21 | +| `ProbeTerminationGracePeriod` | `false` | Beta | 1.22 | | | `ProcMountType` | `false` | Alpha | 1.12 | | +| `ProxyTerminatingEndpoints` | `false` | Alpha | 1.22 | | | `QOSReserved` | `false` | Alpha | 1.11 | | -| `RemainingItemCount` | `false` | Alpha | 1.15 | | +| `ReadWriteOncePod` | `false` | Alpha | 1.22 | | +| `RemainingItemCount` | `false` | Alpha | 1.15 | 1.15 | +| `RemainingItemCount` | `true` | Beta | 1.16 | | | `RemoveSelfLink` | `false` | Alpha | 1.16 | 1.19 | | `RemoveSelfLink` | `true` | Beta | 1.20 | | | `RotateKubeletServerCertificate` | `false` | Alpha | 1.7 | 1.11 | | `RotateKubeletServerCertificate` | `true` | Beta | 1.12 | | -| `RunAsGroup` | `true` | Beta | 1.14 | | -| `ServerSideApply` | `false` | Alpha | 1.14 | 1.15 | -| `ServerSideApply` | `true` | Beta | 1.16 | | -| `ServiceInternalTrafficPolicy` | `false` | Alpha | 1.21 | | -| `ServiceLBNodePortControl` | `false` | Alpha | 1.20 | | -| `ServiceLoadBalancerClass` | `false` | Alpha | 1.21 | | -| `ServiceNodeExclusion` | `false` | Alpha | 1.8 | 1.18 | -| `ServiceNodeExclusion` | `true` | Beta | 1.19 | 1.20 | -| `ServiceTopology` | `false` | Alpha | 1.17 | | -| `SetHostnameAsFQDN` | `false` | Alpha | 1.19 | 1.19 | -| `SetHostnameAsFQDN` | `true` | Beta | 1.20 | | -| `SizeMemoryBackedVolumes` | `false` | Alpha | 1.20 | | +| `SeccompDefault` | `false` | Alpha | 1.22 | | +| `ServiceInternalTrafficPolicy` | `false` | Alpha | 1.21 | 1.21 | +| `ServiceInternalTrafficPolicy` | `true` | Beta | 1.22 | | +| `ServiceLBNodePortControl` | `false` | Alpha | 1.20 | 1.21 | +| `ServiceLBNodePortControl` | `true` | Beta | 1.22 | | +| `ServiceLoadBalancerClass` | `false` | Alpha | 1.21 | 1.21 | +| `ServiceLoadBalancerClass` | `true` | Beta | 1.22 | | +| `SizeMemoryBackedVolumes` | `false` | Alpha | 1.20 | 1.21 | +| `SizeMemoryBackedVolumes` | `true` | Beta | 1.22 | | +| `StatefulSetMinReadySeconds` | `false` | Alpha | 1.22 | | | `StorageVersionAPI` | `false` | Alpha | 1.20 | | | `StorageVersionHash` | `false` | Alpha | 1.14 | 1.14 | | `StorageVersionHash` | `true` | Beta | 1.15 | | -| `SuspendJob` | `false` | Alpha | 1.21 | | +| `SuspendJob` | `false` | Alpha | 1.21 | 1.21 | +| `SuspendJob` | `true` | Beta | 1.22 | | | `TTLAfterFinished` | `false` | Alpha | 1.12 | 1.20 | | `TTLAfterFinished` | `true` | Beta | 1.21 | | | `TopologyAwareHints` | `false` | Alpha | 1.21 | | | `TopologyManager` | `false` | Alpha | 1.16 | 1.17 | | `TopologyManager` | `true` | Beta | 1.18 | | -| `ValidateProxyRedirects` | `false` | Alpha | 1.12 | 1.13 | -| `ValidateProxyRedirects` | `true` | Beta | 1.14 | | | `VolumeCapacityPriority` | `false` | Alpha | 1.21 | - | -| `WarningHeaders` | `true` | Beta | 1.19 | | | `WinDSR` | `false` | Alpha | 1.14 | | | `WinOverlay` | `false` | Alpha | 1.14 | 1.19 | | `WinOverlay` | `true` | Beta | 1.20 | | -| `WindowsEndpointSliceProxying` | `false` | Alpha | 1.19 | 1.20 | -| `WindowsEndpointSliceProxying` | `true` | Beta | 1.21 | | +| `WindowsHostProcessContainers` | `false` | Alpha | 1.22 | | {{< /table >}} ### Feature gates for graduated or deprecated features @@ -204,9 +217,17 @@ different Kubernetes components. | `AffinityInAnnotations` | - | Deprecated | 1.8 | - | | `AllowExtTrafficLocalEndpoints` | `false` | Beta | 1.4 | 1.6 | | `AllowExtTrafficLocalEndpoints` | `true` | GA | 1.7 | - | +| `AttachVolumeLimit` | `false` | Alpha | 1.11 | 1.11 | +| `AttachVolumeLimit` | `true` | Beta | 1.12 | 1.16 | +| `AttachVolumeLimit` | `true` | GA | 1.17 | - | +| `BalanceAttachedNodeVolumes` | `false` | Alpha | 1.11 | 1.21 | +| `BalanceAttachedNodeVolumes` | `false` | Deprecated | 1.22 | | | `BlockVolume` | `false` | Alpha | 1.9 | 1.12 | | `BlockVolume` | `true` | Beta | 1.13 | 1.17 | | `BlockVolume` | `true` | GA | 1.18 | - | +| `BoundServiceAccountTokenVolume` | `false` | Alpha | 1.13 | 1.20 | +| `BoundServiceAccountTokenVolume` | `true` | Beta | 1.21 | 1.21 | +| `BoundServiceAccountTokenVolume` | `true` | GA | 1.22 | - | | `CRIContainerLogRotation` | `false` | Alpha | 1.10 | 1.10 | | `CRIContainerLogRotation` | `true` | Beta | 1.11 | 1.20 | | `CRIContainerLogRotation` | `true` | GA | 1.21 | - | @@ -216,15 +237,30 @@ different Kubernetes components. | `CSIDriverRegistry` | `false` | Alpha | 1.12 | 1.13 | | `CSIDriverRegistry` | `true` | Beta | 1.14 | 1.17 | | `CSIDriverRegistry` | `true` | GA | 1.18 | | +| `CSIMigrationAWSComplete` | `false` | Alpha | 1.17 | 1.20 | +| `CSIMigrationAWSComplete` | - | Deprecated | 1.21 | - | +| `CSIMigrationAzureDiskComplete` | `false` | Alpha | 1.17 | 1.20 | +| `CSIMigrationAzureDiskComplete` | - | Deprecated | 1.21 | - | +| `CSIMigrationAzureFileComplete` | `false` | Alpha | 1.17 | 1.20 | +| `CSIMigrationAzureFileComplete` | - | Deprecated | 1.21 | - | +| `CSIMigrationGCEComplete` | `false` | Alpha | 1.17 | 1.20 | +| `CSIMigrationGCEComplete` | - | Deprecated | 1.21 | - | +| `CSIMigrationOpenStackComplete` | `false` | Alpha | 1.17 | 1.20 | +| `CSIMigrationOpenStackComplete` | - | Deprecated | 1.21 | - | +| `CSIMigrationvSphereComplete` | `false` | Beta | 1.19 | 1.21 | +| `CSIMigrationvSphereComplete` | - | Deprecated | 1.22 | - | | `CSINodeInfo` | `false` | Alpha | 1.12 | 1.13 | | `CSINodeInfo` | `true` | Beta | 1.14 | 1.16 | | `CSINodeInfo` | `true` | GA | 1.17 | | -| `AttachVolumeLimit` | `false` | Alpha | 1.11 | 1.11 | -| `AttachVolumeLimit` | `true` | Beta | 1.12 | 1.16 | -| `AttachVolumeLimit` | `true` | GA | 1.17 | - | | `CSIPersistentVolume` | `false` | Alpha | 1.9 | 1.9 | | `CSIPersistentVolume` | `true` | Beta | 1.10 | 1.12 | | `CSIPersistentVolume` | `true` | GA | 1.13 | - | +| `CSIServiceAccountToken` | `false` | Alpha | 1.20 | 1.20 | +| `CSIServiceAccountToken` | `true` | Beta | 1.21 | 1.21 | +| `CSIServiceAccountToken` | `true` | GA | 1.22 | | +| `CronJobControllerV2` | `false` | Alpha | 1.20 | 1.20 | +| `CronJobControllerV2` | `true` | Beta | 1.21 | 1.21 | +| `CronJobControllerV2` | `true` | GA | 1.22 | - | | `CustomPodDNS` | `false` | Alpha | 1.9 | 1.9 | | `CustomPodDNS` | `true` | Beta| 1.10 | 1.13 | | `CustomPodDNS` | `true` | GA | 1.14 | - | @@ -248,8 +284,14 @@ different Kubernetes components. | `DryRun` | `true` | GA | 1.19 | - | | `DynamicAuditing` | `false` | Alpha | 1.13 | 1.18 | | `DynamicAuditing` | - | Deprecated | 1.19 | - | +| `DynamicKubeletConfig` | `false` | Alpha | 1.4 | 1.10 | +| `DynamicKubeletConfig` | `true` | Beta | 1.11 | 1.21 | +| `DynamicKubeletConfig` | `false` | Deprecated | 1.22 | - | | `DynamicProvisioningScheduling` | `false` | Alpha | 1.11 | 1.11 | | `DynamicProvisioningScheduling` | - | Deprecated| 1.12 | - | +| `DynamicKubeletConfig` | `false` | Alpha | 1.4 | 1.10 | +| `DynamicKubeletConfig` | `true` | Beta | 1.11 | 1.21 | +| `DynamicKubeletConfig` | `false` | Deprecated | 1.22 | - | | `DynamicVolumeProvisioning` | `true` | Alpha | 1.3 | 1.7 | | `DynamicVolumeProvisioning` | `true` | GA | 1.8 | - | | `EnableAggregatedDiscoveryTimeout` | `true` | Deprecated | 1.16 | - | @@ -261,6 +303,9 @@ different Kubernetes components. | `EndpointSlice` | `true` | GA | 1.21 | - | | `EndpointSliceNodeName` | `false` | Alpha | 1.20 | 1.20 | | `EndpointSliceNodeName` | `true` | GA | 1.21 | - | +| `EndpointSliceProxying` | `false` | Alpha | 1.18 | 1.18 | +| `EndpointSliceProxying` | `true` | Beta | 1.19 | 1.21 | +| `EndpointSliceProxying` | `true` | GA | 1.22 | - | | `ExperimentalCriticalPodAnnotation` | `false` | Alpha | 1.5 | 1.12 | | `ExperimentalCriticalPodAnnotation` | `false` | Deprecated | 1.13 | - | | `EvenPodsSpread` | `false` | Alpha | 1.16 | 1.17 | @@ -270,9 +315,15 @@ different Kubernetes components. | `ExternalPolicyForExternalIP` | `true` | GA | 1.18 | - | | `GCERegionalPersistentDisk` | `true` | Beta | 1.10 | 1.12 | | `GCERegionalPersistentDisk` | `true` | GA | 1.13 | - | +| `HugePageStorageMediumSize` | `false` | Alpha | 1.18 | 1.18 | +| `HugePageStorageMediumSize` | `true` | Beta | 1.19 | 1.21 | +| `HugePageStorageMediumSize` | `true` | GA | 1.22 | - | | `HugePages` | `false` | Alpha | 1.8 | 1.9 | | `HugePages` | `true` | Beta| 1.10 | 1.13 | | `HugePages` | `true` | GA | 1.14 | - | +| `HugePageStorageMediumSize` | `false` | Alpha | 1.18 | 1.18 | +| `HugePageStorageMediumSize` | `true` | Beta | 1.19 | 1.21 | +| `HugePageStorageMediumSize` | `true` | GA | 1.22 | - | | `HyperVContainer` | `false` | Alpha | 1.10 | 1.19 | | `HyperVContainer` | `false` | Deprecated | 1.20 | - | | `ImmutableEphemeralVolumes` | `false` | Alpha | 1.18 | 1.18 | @@ -288,16 +339,22 @@ different Kubernetes components. | `KubeletPodResources` | `false` | Alpha | 1.13 | 1.14 | | `KubeletPodResources` | `true` | Beta | 1.15 | | | `KubeletPodResources` | `true` | GA | 1.20 | | +| `LegacyNodeRoleBehavior` | `false` | Alpha | 1.16 | 1.18 | +| `LegacyNodeRoleBehavior` | `true` | Beta | 1.19 | 1.20 | | `LegacyNodeRoleBehavior` | `false` | GA | 1.21 | - | | `MountContainers` | `false` | Alpha | 1.9 | 1.16 | | `MountContainers` | `false` | Deprecated | 1.17 | - | | `MountPropagation` | `false` | Alpha | 1.8 | 1.9 | | `MountPropagation` | `true` | Beta | 1.10 | 1.11 | | `MountPropagation` | `true` | GA | 1.12 | - | +| `NodeDisruptionExclusion` | `false` | Alpha | 1.16 | 1.18 | +| `NodeDisruptionExclusion` | `true` | Beta | 1.19 | 1.20 | | `NodeDisruptionExclusion` | `true` | GA | 1.21 | - | | `NodeLease` | `false` | Alpha | 1.12 | 1.13 | | `NodeLease` | `true` | Beta | 1.14 | 1.16 | | `NodeLease` | `true` | GA | 1.17 | - | +| `NamespaceDefaultLabelName` | `true` | Beta | 1.21 | 1.21 | +| `NamespaceDefaultLabelName` | `true` | GA | 1.22 | - | | `PVCProtection` | `false` | Alpha | 1.9 | 1.9 | | `PVCProtection` | - | Deprecated | 1.10 | - | | `PersistentLocalVolumes` | `false` | Alpha | 1.7 | 1.9 | @@ -327,15 +384,23 @@ different Kubernetes components. | `RootCAConfigMap` | `true` | GA | 1.21 | - | | `RotateKubeletClientCertificate` | `true` | Beta | 1.8 | 1.18 | | `RotateKubeletClientCertificate` | `true` | GA | 1.19 | - | +| `RunAsGroup` | `true` | Beta | 1.14 | 1.20 | +| `RunAsGroup` | `true` | GA | 1.21 | - | | `RuntimeClass` | `false` | Alpha | 1.12 | 1.13 | | `RuntimeClass` | `true` | Beta | 1.14 | 1.19 | | `RuntimeClass` | `true` | GA | 1.20 | - | -| `ScheduleDaemonSetPods` | `false` | Alpha | 1.11 | 1.11 | -| `ScheduleDaemonSetPods` | `true` | Beta | 1.12 | 1.16 | -| `ScheduleDaemonSetPods` | `true` | GA | 1.17 | - | | `SCTPSupport` | `false` | Alpha | 1.12 | 1.18 | | `SCTPSupport` | `true` | Beta | 1.19 | 1.19 | | `SCTPSupport` | `true` | GA | 1.20 | - | +| `ScheduleDaemonSetPods` | `false` | Alpha | 1.11 | 1.11 | +| `ScheduleDaemonSetPods` | `true` | Beta | 1.12 | 1.16 | +| `ScheduleDaemonSetPods` | `true` | GA | 1.17 | - | +| `SelectorIndex` | `false` | Alpha | 1.18 | 1.18 | +| `SelectorIndex` | `true` | Beta | 1.19 | 1.19 | +| `SelectorIndex` | `true` | GA | 1.20 | - | +| `ServerSideApply` | `false` | Alpha | 1.14 | 1.15 | +| `ServerSideApply` | `true` | Beta | 1.16 | 1.21 | +| `ServerSideApply` | `true` | GA | 1.22 | - | | `ServiceAccountIssuerDiscovery` | `false` | Alpha | 1.18 | 1.19 | | `ServiceAccountIssuerDiscovery` | `true` | Beta | 1.20 | 1.20 | | `ServiceAccountIssuerDiscovery` | `true` | GA | 1.21 | - | @@ -345,15 +410,23 @@ different Kubernetes components. | `ServiceLoadBalancerFinalizer` | `false` | Alpha | 1.15 | 1.15 | | `ServiceLoadBalancerFinalizer` | `true` | Beta | 1.16 | 1.16 | | `ServiceLoadBalancerFinalizer` | `true` | GA | 1.17 | - | +| `ServiceNodeExclusion` | `false` | Alpha | 1.8 | 1.18 | +| `ServiceNodeExclusion` | `true` | Beta | 1.19 | 1.20 | | `ServiceNodeExclusion` | `true` | GA | 1.21 | - | +| `ServiceTopology` | `false` | Alpha | 1.17 | 1.19 | +| `ServiceTopology` | `false` | Deprecated | 1.20 | - | +| `SetHostnameAsFQDN` | `false` | Alpha | 1.19 | 1.19 | +| `SetHostnameAsFQDN` | `true` | Beta | 1.20 | 1.21 | +| `SetHostnameAsFQDN` | `true` | GA | 1.22 | - | | `StartupProbe` | `false` | Alpha | 1.16 | 1.17 | | `StartupProbe` | `true` | Beta | 1.18 | 1.19 | | `StartupProbe` | `true` | GA | 1.20 | - | | `StorageObjectInUseProtection` | `true` | Beta | 1.10 | 1.10 | | `StorageObjectInUseProtection` | `true` | GA | 1.11 | - | | `StreamingProxyRedirects` | `false` | Beta | 1.5 | 1.5 | -| `StreamingProxyRedirects` | `true` | Beta | 1.6 | 1.18 | -| `StreamingProxyRedirects` | - | GA | 1.19 | - | +| `StreamingProxyRedirects` | `true` | Beta | 1.6 | 1.17 | +| `StreamingProxyRedirects` | `true` | Deprecated | 1.18 | 1.21 | +| `StreamingProxyRedirects` | `false` | Deprecated | 1.22 | - | | `SupportIPVSProxyMode` | `false` | Alpha | 1.8 | 1.8 | | `SupportIPVSProxyMode` | `false` | Beta | 1.9 | 1.9 | | `SupportIPVSProxyMode` | `true` | Beta | 1.10 | 1.10 | @@ -378,6 +451,9 @@ different Kubernetes components. | `TokenRequestProjection` | `false` | Alpha | 1.11 | 1.11 | | `TokenRequestProjection` | `true` | Beta | 1.12 | 1.19 | | `TokenRequestProjection` | `true` | GA | 1.20 | - | +| `ValidateProxyRedirects` | `false` | Alpha | 1.12 | 1.13 | +| `ValidateProxyRedirects` | `true` | Beta | 1.14 | 1.21 | +| `ValidateProxyRedirects` | `true` | Deprecated | 1.22 | - | | `VolumePVCDataSource` | `false` | Alpha | 1.15 | 1.15 | | `VolumePVCDataSource` | `true` | Beta | 1.16 | 1.17 | | `VolumePVCDataSource` | `true` | GA | 1.18 | - | @@ -391,12 +467,18 @@ different Kubernetes components. | `VolumeSubpathEnvExpansion` | `false` | Alpha | 1.14 | 1.14 | | `VolumeSubpathEnvExpansion` | `true` | Beta | 1.15 | 1.16 | | `VolumeSubpathEnvExpansion` | `true` | GA | 1.17 | - | +| `WarningHeaders` | `true` | Beta | 1.19 | 1.21 | +| `WarningHeaders` | `true` | GA | 1.22 | - | | `WatchBookmark` | `false` | Alpha | 1.15 | 1.15 | | `WatchBookmark` | `true` | Beta | 1.16 | 1.16 | | `WatchBookmark` | `true` | GA | 1.17 | - | +| `WindowsEndpointSliceProxying` | `false` | Alpha | 1.19 | 1.20 | +| `WindowsEndpointSliceProxying` | `true` | Beta | 1.21 | 1.21 | +| `WindowsEndpointSliceProxying` | `true` | GA | 1.22| - | | `WindowsGMSA` | `false` | Alpha | 1.14 | 1.15 | | `WindowsGMSA` | `true` | Beta | 1.16 | 1.17 | | `WindowsGMSA` | `true` | GA | 1.18 | - | +| `WindowsHostProcessContainers` | `false` | Alpha | 1.22 | | `WindowsRunAsUserName` | `false` | Alpha | 1.16 | 1.16 | | `WindowsRunAsUserName` | `true` | Beta | 1.17 | 1.17 | | `WindowsRunAsUserName` | `true` | GA | 1.18 | - | @@ -451,6 +533,7 @@ Each feature gate is designed for enabling/disabling a specific feature: prioritization and fairness at each server. (Renamed from `RequestManagement`) - `APIResponseCompression`: Compress the API responses for `LIST` or `GET` requests. - `APIServerIdentity`: Assign each API server an ID in a cluster. +- `APIServerTracing`: Add support for distributed tracing in the API server. - `Accelerators`: Enable Nvidia GPU support when using Docker - `AdvancedAuditing`: Enable [advanced auditing](/docs/tasks/debug-application-cluster/audit/#advanced-audit) - `AffinityInAnnotations`: Enable setting @@ -477,8 +560,14 @@ Each feature gate is designed for enabling/disabling a specific feature: extended tokens by starting `kube-apiserver` with flag `--service-account-extend-token-expiration=false`. Check [Bound Service Account Tokens](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/1205-bound-service-account-tokens/README.md) for more details. +- `ControllerManagerLeaderMigration`: Enables Leader Migration for + [kube-controller-manager](/docs/tasks/administer-cluster/controller-manager-leader-migration/#initial-leader-migration-configuration) and + [cloud-controller-manager](/docs/tasks/administer-cluster/controller-manager-leader-migration/#deploy-cloud-controller-manager) which allows a cluster operator to live migrate + controllers from the kube-controller-manager into an external controller-manager + (e.g. the cloud-controller-manager) in an HA cluster without downtime. - `CPUManager`: Enable container level CPU affinity support, see [CPU Management Policies](/docs/tasks/administer-cluster/cpu-management-policies/). +- `CPUManagerPolicyOptions`: Allow fine-tuning of CPUManager policies. - `CRIContainerLogRotation`: Enable container log rotation for CRI container runtime. The default max size of a log file is 10MB and the default max number of log files allowed for a container is 5. These values can be configured in the kubelet config. See the [logging at node level](/docs/concepts/cluster-administration/logging/#logging-at-the-node-level) documentation for more details. @@ -498,7 +587,9 @@ Each feature gate is designed for enabling/disabling a specific feature: kubelet and volume controllers and enables shims and translation logic to route volume operations from the AWS-EBS in-tree plugin to EBS CSI plugin. Requires CSIMigration and CSIMigrationAWS feature flags enabled and EBS CSI - plugin installed and configured on all nodes in the cluster. + plugin installed and configured on all nodes in the cluster. This flag has + been deprecated in favor of the `InTreePluginAWSUnregister` feature flag + which prevents the registration of in-tree EBS plugin. - `CSIMigrationAzureDisk`: Enables shims and translation logic to route volume operations from the Azure-Disk in-tree plugin to AzureDisk CSI plugin. Supports falling back to in-tree AzureDisk plugin if a node does not have @@ -509,7 +600,8 @@ Each feature gate is designed for enabling/disabling a specific feature: logic to route volume operations from the Azure-Disk in-tree plugin to AzureDisk CSI plugin. Requires CSIMigration and CSIMigrationAzureDisk feature flags enabled and AzureDisk CSI plugin installed and configured on all nodes - in the cluster. + in the cluster. This flag has been deprecated in favor of the + `InTreePluginAzureDiskUnregister` feature flag which prevents the registration of in-tree AzureDisk plugin. - `CSIMigrationAzureFile`: Enables shims and translation logic to route volume operations from the Azure-File in-tree plugin to AzureFile CSI plugin. Supports falling back to in-tree AzureFile plugin if a node does not have @@ -520,7 +612,9 @@ Each feature gate is designed for enabling/disabling a specific feature: logic to route volume operations from the Azure-File in-tree plugin to AzureFile CSI plugin. Requires CSIMigration and CSIMigrationAzureFile feature flags enabled and AzureFile CSI plugin installed and configured on all nodes - in the cluster. + in the cluster. This flag has been deprecated in favor of the + `InTreePluginAzureFileUnregister` feature flag which prevents the registration + of in-tree AzureFile plugin. - `CSIMigrationGCE`: Enables shims and translation logic to route volume operations from the GCE-PD in-tree plugin to PD CSI plugin. Supports falling back to in-tree GCE plugin if a node does not have PD CSI plugin installed and @@ -529,7 +623,8 @@ Each feature gate is designed for enabling/disabling a specific feature: kubelet and volume controllers and enables shims and translation logic to route volume operations from the GCE-PD in-tree plugin to PD CSI plugin. Requires CSIMigration and CSIMigrationGCE feature flags enabled and PD CSI - plugin installed and configured on all nodes in the cluster. + plugin installed and configured on all nodes in the cluster. This flag has + been deprecated in favor of the `InTreePluginGCEUnregister` feature flag which prevents the registration of in-tree GCE PD plugin. - `CSIMigrationOpenStack`: Enables shims and translation logic to route volume operations from the Cinder in-tree plugin to Cinder CSI plugin. Supports falling back to in-tree Cinder plugin if a node does not have Cinder CSI @@ -538,7 +633,8 @@ Each feature gate is designed for enabling/disabling a specific feature: kubelet and volume controllers and enables shims and translation logic to route volume operations from the Cinder in-tree plugin to Cinder CSI plugin. Requires CSIMigration and CSIMigrationOpenStack feature flags enabled and Cinder - CSI plugin installed and configured on all nodes in the cluster. + CSI plugin installed and configured on all nodes in the cluster. This flag has + been deprecated in favor of the `InTreePluginOpenStackUnregister` feature flag which prevents the registration of in-tree openstack cinder plugin. - `CSIMigrationvSphere`: Enables shims and translation logic to route volume operations from the vSphere in-tree plugin to vSphere CSI plugin. Supports falling back to in-tree vSphere plugin if a node does not have vSphere @@ -547,7 +643,8 @@ Each feature gate is designed for enabling/disabling a specific feature: and volume controllers and enables shims and translation logic to route volume operations from the vSphere in-tree plugin to vSphere CSI plugin. Requires CSIMigration and CSIMigrationvSphere feature flags enabled and vSphere CSI plugin installed and - configured on all nodes in the cluster. + configured on all nodes in the cluster. This flag has been deprecated in favor + of the `InTreePluginvSphereUnregister` feature flag which prevents the registration of in-tree vsphere plugin. - `CSINodeInfo`: Enable all logic related to the CSINodeInfo API object in csi.storage.k8s.io. - `CSIPersistentVolume`: Enable discovering and mounting volumes provisioned through a [CSI (Container Storage Interface)](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/storage/container-storage-interface.md) @@ -563,14 +660,17 @@ Each feature gate is designed for enabling/disabling a specific feature: This field controls whether volumes created by a CSIDriver support volume ownership and permission modifications when these volumes are mounted. - `CSIVolumeHealth`: Enable support for CSI volume health monitoring on node. +- `CSRDuration`: Allows clients to request a duration for certificates issued + via the Kubernetes CSR API. - `ConfigurableFSGroupPolicy`: Allows user to configure volume permission change policy for fsGroups when mounting a volume in a Pod. See [Configure volume permission and ownership change policy for Pods](/docs/tasks/configure-pod-container/security-context/#configure-volume-permission-and-ownership-change-policy-for-pods) for more details. +- `ControllerManagerLeaderMigration`: Enables leader migration for + `kube-controller-manager` and `cloud-controller-manager`. - `CronJobControllerV2`: Use an alternative implementation of the {{< glossary_tooltip text="CronJob" term_id="cronjob" >}} controller. Otherwise, version 1 of the same controller is selected. - The version 2 controller provides experimental performance improvements. - `CustomCPUCFSQuotaPeriod`: Enable nodes to change `cpuCFSQuotaPeriod` in [kubelet config](/docs/tasks/administer-cluster/kubelet-config-file/). - `CustomPodDNS`: Enable customizing the DNS settings for a Pod using its `dnsConfig` property. @@ -584,12 +684,20 @@ Each feature gate is designed for enabling/disabling a specific feature: [CustomResourceDefinition](/docs/concepts/extend-kubernetes/api-extension/custom-resources/). - `CustomResourceWebhookConversion`: Enable webhook-based conversion on resources created from [CustomResourceDefinition](/docs/concepts/extend-kubernetes/api-extension/custom-resources/). +- `DaemonSetUpdateSurge`: Enables the DaemonSet workloads to maintain + availability during update per node. - `DefaultPodTopologySpread`: Enables the use of `PodTopologySpread` scheduling plugin to do [default spreading](/docs/concepts/workloads/pods/pod-topology-spread-constraints/#internal-default-constraints). +- `DelegateFSGroupToCSIDriver`: If supported by the CSI driver, delegates the + role of applying `fsGroup` from a Pod's `securityContext` to the driver by + passing `fsGroup` through the NodeStageVolume and NodePublishVolume CSI calls. - `DevicePlugins`: Enable the [device-plugins](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) based resource provisioning on nodes. - `DisableAcceleratorUsageMetrics`: [Disable accelerator metrics collected by the kubelet](/docs/concepts/cluster-administration/system-metrics/#disable-accelerator-metrics). +- `DisableCloudProviders`: Disables any functionality in `kube-apiserver`, + `kube-controller-manager` and `kubelet` related to the `--cloud-provider` + component flag. - `DownwardAPIHugePages`: Enables usage of hugepages in [downward API](/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information). - `DryRun`: Enable server-side [dry run](/docs/reference/using-api/api-concepts/#dry-run) requests @@ -628,6 +736,9 @@ Each feature gate is designed for enabling/disabling a specific feature: now-corrected fault where Kubernetes ignored exec probe timeouts. See [readiness probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). - `ExpandCSIVolumes`: Enable the expanding of CSI volumes. +- `ExpandedDNSConfig`: Enable kubelet and kube-apiserver to allow more DNS + search paths and longer list of DNS search paths. See + [Expanded DNS Configuration](/docs/concepts/services-networking/dns-pod-service/#expanded-dns-configuration). - `ExpandInUsePersistentVolumes`: Enable expanding in-use PVCs. See [Resizing an in-use PersistentVolumeClaim](/docs/concepts/storage/persistent-volumes/#resizing-an-in-use-persistentvolumeclaim). - `ExpandPersistentVolumes`: Enable the expanding of persistent volumes. See @@ -664,6 +775,18 @@ Each feature gate is designed for enabling/disabling a specific feature: for Windows containers. - `ImmutableEphemeralVolumes`: Allows for marking individual Secrets and ConfigMaps as immutable for better safety and performance. +- `InTreePluginAWSUnregister`: Stops registering the aws-ebs in-tree plugin in kubelet + and volume controllers. +- `InTreePluginAzureDiskUnregister`: Stops registering the azuredisk in-tree plugin in kubelet + and volume controllers. +- `InTreePluginAzureFileUnregister`: Stops registering the azurefile in-tree plugin in kubelet + and volume controllers. +- `InTreePluginGCEUnregister`: Stops registering the gce-pd in-tree plugin in kubelet + and volume controllers. +- `InTreePluginOpenStackUnregister`: Stops registering the OpenStack cinder in-tree plugin in kubelet + and volume controllers. +- `InTreePluginvSphereUnregister`: Stops registering the vSphere in-tree plugin in kubelet + and volume controllers. - `IndexedJob`: Allows the [Job](/docs/concepts/workloads/controllers/job/) controller to manage Pod completions per completion index. - `IngressClassNamespacedParams`: Allow namespace-scoped parameters reference in @@ -673,11 +796,17 @@ Each feature gate is designed for enabling/disabling a specific feature: Initializers admission plugin. - `IPv6DualStack`: Enable [dual stack](/docs/concepts/services-networking/dual-stack/) support for IPv6. +- `JobTrackingWithFinalizers`: Enables tracking [Job](/docs/concepts/workloads/controllers/job) + completions without relying on Pods remaining in the cluster indefinitely. + The Job controller uses Pod finalizers and a field in the Job status to keep + track of the finished Pods to count towards completion. - `KubeletConfigFile`: Enable loading kubelet configuration from a file specified using a config file. See [setting kubelet parameters via a config file](/docs/tasks/administer-cluster/kubelet-config-file/) for more details. - `KubeletCredentialProviders`: Enable kubelet exec credential providers for image pull credentials. +- `KubeletInUserNamespace`: Enables support for running kubelet in a {{}}. + See [Running Kubernetes Node Components as a Non-root User](/docs/tasks/administer-cluster/kubelet-in-userns/). - `KubeletPluginsWatcher`: Enable probe-based plugin watcher utility to enable kubelet to discover plugins such as [CSI volume drivers](/docs/concepts/storage/volumes/#csi). - `KubeletPodResources`: Enable the kubelet's pod resources gRPC endpoint. See @@ -702,6 +831,9 @@ Each feature gate is designed for enabling/disabling a specific feature: filesystem walk for better performance and accuracy. - `LogarithmicScaleDown`: Enable semi-random selection of pods to evict on controller scaledown based on logarithmic bucketing of pod timestamps. +- `MemoryManager`: Allows setting memory affinity for a container based on + NUMA topology. +- `MemoryQoS`: Enable memory protection and usage throttle on pod / container using cgroup v2 memory controller. - `MixedProtocolLBService`: Enable using different protocols in the same `LoadBalancer` type Service instance. - `MountContainers`: Enable using utility containers on host as the volume mounter. @@ -713,6 +845,9 @@ Each feature gate is designed for enabling/disabling a specific feature: - `NodeDisruptionExclusion`: Enable use of the Node label `node.kubernetes.io/exclude-disruption` which prevents nodes from being evacuated during zone failures. - `NodeLease`: Enable the new Lease API to report node heartbeats, which could be used as a node health signal. +- `NodeSwap`: Enable the kubelet to allocate swap memory for Kubernetes workloads on a node. + Must be used with `KubeletConfiguration.failSwapOn` set to false. + For more details, please see [swap memory](/docs/concepts/architecture/nodes/#swap-memory) - `NonPreemptingPriority`: Enable `preemptionPolicy` field for PriorityClass and Pod. - `PVCProtection`: Enable the prevention of a PersistentVolumeClaim (PVC) from being deleted when it is still used by any Pod. @@ -726,21 +861,29 @@ Each feature gate is designed for enabling/disabling a specific feature: - `PodOverhead`: Enable the [PodOverhead](/docs/concepts/scheduling-eviction/pod-overhead/) feature to account for pod overheads. - `PodPriority`: Enable the descheduling and preemption of Pods based on their - [priorities](/docs/concepts/configuration/pod-priority-preemption/). + [priorities](/docs/concepts/scheduling-eviction/pod-priority-preemption/). - `PodReadinessGates`: Enable the setting of `PodReadinessGate` field for extending Pod readiness evaluation. See [Pod readiness gate](/docs/concepts/workloads/pods/pod-lifecycle/#pod-readiness-gate) for more details. +- `PodSecurity`: Enables the `PodSecurity` admission plugin. - `PodShareProcessNamespace`: Enable the setting of `shareProcessNamespace` in a Pod for sharing a single process namespace between containers running in a pod. More details can be found in [Share Process Namespace between Containers in a Pod](/docs/tasks/configure-pod-container/share-process-namespace/). +- `PreferNominatedNode`: This flag tells the scheduler whether the nominated + nodes will be checked first before looping through all the other nodes in + the cluster. - `ProbeTerminationGracePeriod`: Enable [setting probe-level `terminationGracePeriodSeconds`](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#probe-level-terminationgraceperiodseconds) on pods. See the [enhancement proposal](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2238-liveness-probe-grace-period) for more details. - `ProcMountType`: Enables control over the type proc mounts for containers by setting the `procMount` field of a SecurityContext. +- `ProxyTerminatingEndpoints`: Enable the kube-proxy to handle terminating + endpoints when `ExternalTrafficPolicy=Local`. - `QOSReserved`: Allows resource reservations at the QoS level preventing pods at lower QoS levels from bursting into resources requested at higher QoS levels (memory only for now). +- `ReadWriteOncePod`: Enables the usage of `ReadWriteOncePod` PersistentVolume + access mode. - `RemainingItemCount`: Allow the API servers to show a count of remaining items in the response to a [chunking list request](/docs/reference/using-api/api-concepts/#retrieving-large-results-sets-in-chunks). @@ -772,6 +915,10 @@ Each feature gate is designed for enabling/disabling a specific feature: instead of the DaemonSet controller. - `SCTPSupport`: Enables the _SCTP_ `protocol` value in Pod, Service, Endpoints, EndpointSlice, and NetworkPolicy definitions. +- `SeccompDefault`: Enables the use of `RuntimeDefault` as the default seccomp profile for all workloads. + The seccomp profile is specified in the `securityContext` of a Pod and/or a Container. +- `SelectorIndex`: Allows label and field based indexes in API server watch + cache to accelerate list operations. - `ServerSideApply`: Enables the [Sever Side Apply (SSA)](/docs/reference/using-api/server-side-apply/) feature on the API Server. - `ServiceAccountIssuerDiscovery`: Enable OIDC discovery endpoints (issuer and @@ -799,6 +946,8 @@ Each feature gate is designed for enabling/disabling a specific feature: - `StartupProbe`: Enable the [startup](/docs/concepts/workloads/pods/pod-lifecycle/#when-should-you-use-a-startup-probe) probe in the kubelet. +- `StatefulSetMinReadySeconds`: Allows `minReadySeconds` to be respected by + the StatefulSet controller. - `StorageObjectInUseProtection`: Postpone the deletion of PersistentVolume or PersistentVolumeClaim objects if they are still being used. - `StorageVersionAPI`: Enable the @@ -859,15 +1008,16 @@ Each feature gate is designed for enabling/disabling a specific feature: - `WatchBookmark`: Enable support for watch bookmark events. - `WinDSR`: Allows kube-proxy to create DSR loadbalancers for Windows. - `WinOverlay`: Allows kube-proxy to run in overlay mode for Windows. -- `WindowsGMSA`: Enables passing of GMSA credential specs from pods to container runtimes. -- `WindowsRunAsUserName` : Enable support for running applications in Windows containers - with as a non-default user. See - [Configuring RunAsUserName](/docs/tasks/configure-pod-container/configure-runasusername) - for more details. - `WindowsEndpointSliceProxying`: When enabled, kube-proxy running on Windows will use EndpointSlices as the primary data source instead of Endpoints, enabling scalability and performance improvements. See [Enabling Endpoint Slices](/docs/tasks/administer-cluster/enabling-endpointslices/). +- `WindowsGMSA`: Enables passing of GMSA credential specs from pods to container runtimes. +- `WindowsHostProcessContainers`: Enables support for Windows HostProcess containers. +- `WindowsRunAsUserName` : Enable support for running applications in Windows containers + with as a non-default user. See + [Configuring RunAsUserName](/docs/tasks/configure-pod-container/configure-runasusername) + for more details. ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md b/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md index 9a824fd834..77b354dc70 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md @@ -477,7 +477,7 @@ kube-apiserver [flags] - + @@ -498,7 +498,7 @@ kube-apiserver [flags] - + @@ -638,7 +638,7 @@ kube-apiserver [flags] - + @@ -771,7 +771,7 @@ kube-apiserver [flags] - + @@ -799,14 +799,14 @@ kube-apiserver [flags] - + - + @@ -985,10 +985,10 @@ kube-apiserver [flags] - + - + @@ -1138,6 +1138,13 @@ kube-apiserver [flags] + + + + + + + diff --git a/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md b/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md index 29e7b1ec8e..a8389c69f0 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md @@ -208,7 +208,7 @@ kube-controller-manager [flags] - + @@ -474,7 +474,7 @@ kube-controller-manager [flags] - + @@ -663,7 +663,7 @@ kube-controller-manager [flags] - + diff --git a/content/en/docs/reference/command-line-tools-reference/kube-proxy.md b/content/en/docs/reference/command-line-tools-reference/kube-proxy.md index dc236b02e9..3306668093 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-proxy.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-proxy.md @@ -179,7 +179,7 @@ kube-proxy [flags] - + diff --git a/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md b/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md index 45d8cae73a..621bac8aa2 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md @@ -27,7 +27,7 @@ each Pod in the scheduling queue according to constraints and available resources. The scheduler then ranks each valid Node and binds the Pod to a suitable Node. Multiple different schedulers may be used within a cluster; kube-scheduler is the reference implementation. -See [scheduling](/docs/concepts/scheduling-eviction/) +See [scheduling](https://kubernetes.io/docs/concepts/scheduling-eviction/) for more information about scheduling and the kube-scheduler component. ``` @@ -51,19 +51,12 @@ kube-scheduler [flags] - + - - - - - - - @@ -166,11 +159,11 @@ kube-scheduler [flags] - + - + @@ -194,14 +187,7 @@ kube-scheduler [flags] - - - - - - - - + @@ -219,21 +205,21 @@ kube-scheduler [flags] - + - + - + @@ -247,63 +233,63 @@ kube-scheduler [flags] - + - + - + - + - + - + - + - + - + @@ -348,7 +334,7 @@ kube-scheduler [flags] - + @@ -390,32 +376,32 @@ kube-scheduler [flags] - + - + - + - + - + @@ -456,13 +442,6 @@ kube-scheduler [flags] - - - - - - - diff --git a/content/en/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping.md b/content/en/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping.md index 1b1142913f..5d2458079e 100644 --- a/content/en/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping.md +++ b/content/en/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping.md @@ -10,7 +10,7 @@ content_type: concept -In a Kubernetes cluster, the components on the worker nodes - kubelet and kube-proxy - need to communicate with Kubernetes master components, specifically kube-apiserver. +In a Kubernetes cluster, the components on the worker nodes - kubelet and kube-proxy - need to communicate with Kubernetes control plane components, specifically kube-apiserver. In order to ensure that communication is kept private, not interfered with, and ensure that each component of the cluster is talking to another trusted component, we strongly recommend using client TLS certificates on nodes. @@ -18,7 +18,7 @@ The normal process of bootstrapping these components, especially worker nodes th can be a challenging process as it is often outside of the scope of Kubernetes and requires significant additional work. This in turn, can make it challenging to initialize or scale a cluster. -In order to simplify the process, beginning in version 1.4, Kubernetes introduced a certificate request and signing API to simplify the process. The proposal can be +In order to simplify the process, beginning in version 1.4, Kubernetes introduced a certificate request and signing API. The proposal can be found [here](https://github.com/kubernetes/kubernetes/pull/20439). This document describes the process of node initialization, how to set up TLS client certificate bootstrapping for @@ -44,7 +44,7 @@ Note that the above process depends upon: All of the following are responsibilities of whoever sets up and manages the cluster: 1. Creating the CA key and certificate -2. Distributing the CA certificate to the master nodes, where kube-apiserver is running +2. Distributing the CA certificate to the control plane nodes, where kube-apiserver is running 3. Creating a key and certificate for each kubelet; strongly recommended to have a unique one, with a unique CN, for each kubelet 4. Signing the kubelet certificate using the CA key 5. Distributing the kubelet key and signed certificate to the specific node on which the kubelet is running @@ -90,9 +90,9 @@ In addition, you need your Kubernetes Certificate Authority (CA). ## Certificate Authority As without bootstrapping, you will need a Certificate Authority (CA) key and certificate. As without bootstrapping, these will be used -to sign the kubelet certificate. As before, it is your responsibility to distribute them to master nodes. +to sign the kubelet certificate. As before, it is your responsibility to distribute them to control plane nodes. -For the purposes of this document, we will assume these have been distributed to master nodes at `/var/lib/kubernetes/ca.pem` (certificate) and `/var/lib/kubernetes/ca-key.pem` (key). +For the purposes of this document, we will assume these have been distributed to control plane nodes at `/var/lib/kubernetes/ca.pem` (certificate) and `/var/lib/kubernetes/ca-key.pem` (key). We will refer to these as "Kubernetes CA certificate and key". All Kubernetes components that use these certificates - kubelet, kube-apiserver, kube-controller-manager - assume the key and certificate to be PEM-encoded. @@ -167,7 +167,7 @@ If you want to use bootstrap tokens, you must enable it on kube-apiserver with t #### Token authentication file -kube-apiserver has an ability to accept tokens as authentication. +kube-apiserver has the ability to accept tokens as authentication. These tokens are arbitrary but should represent at least 128 bits of entropy derived from a secure random number generator (such as `/dev/urandom` on most modern Linux systems). There are multiple ways you can generate a token. For example: @@ -234,7 +234,7 @@ In order for the controller-manager to sign certificates, it needs the following ### Access to key and certificate -As described earlier, you need to create a Kubernetes CA key and certificate, and distribute it to the master nodes. +As described earlier, you need to create a Kubernetes CA key and certificate, and distribute it to the control plane nodes. These will be used by the controller-manager to sign the kubelet certificates. Since these signed certificates will, in turn, be used by the kubelet to authenticate as a regular kubelet to kube-apiserver, it is important that the CA @@ -319,7 +319,7 @@ collection. ## kubelet configuration -Finally, with the master nodes properly set up and all of the necessary authentication and authorization in place, we can configure the kubelet. +Finally, with the control plane nodes properly set up and all of the necessary authentication and authorization in place, we can configure the kubelet. The kubelet requires the following configuration to bootstrap: diff --git a/content/en/docs/reference/command-line-tools-reference/kubelet.md b/content/en/docs/reference/command-line-tools-reference/kubelet.md index 66eb5785de..0531f0847a 100644 --- a/content/en/docs/reference/command-line-tools-reference/kubelet.md +++ b/content/en/docs/reference/command-line-tools-reference/kubelet.md @@ -6,31 +6,33 @@ weight: 28 ## {{% heading "synopsis" %}} - -The kubelet is the primary "node agent" that runs on each -node. It can register the node with the apiserver using one of: the hostname; a flag to override the hostname; or specific logic for a cloud provider. +The kubelet is the primary "node agent" that runs on each node. It can +register the node with the apiserver using one of: the hostname; a flag to +override the hostname; or specific logic for a cloud provider. The kubelet works in terms of a PodSpec. A PodSpec is a YAML or JSON object -that describes a pod. The kubelet takes a set of PodSpecs that are provided through various mechanisms (primarily through the apiserver) and ensures that the containers described in those PodSpecs are running and healthy. The kubelet doesn't manage containers which were not created by Kubernetes. +that describes a pod. The kubelet takes a set of PodSpecs that are provided +through various mechanisms (primarily through the apiserver) and ensures that +the containers described in those PodSpecs are running and healthy. The +kubelet doesn't manage containers which were not created by Kubernetes. -Other than from a PodSpec from the apiserver, there are three ways that a container manifest can be provided to the Kubelet. +Other than from a PodSpec from the apiserver, there are three ways that a +container manifest can be provided to the Kubelet. -File: Path passed as a flag on the command line. Files under this path will be monitored periodically for updates. The monitoring period is 20s by default and is configurable via a flag. - -HTTP endpoint: HTTP endpoint passed as a parameter on the command line. This endpoint is checked every 20 seconds (also configurable with a flag). - -HTTP server: The kubelet can also listen for HTTP and respond to a simple API (underspec'd currently) to submit a new manifest. +- File: Path passed as a flag on the command line. Files under this path will be + monitored periodically for updates. The monitoring period is 20s by default + and is configurable via a flag. +- HTTP endpoint: HTTP endpoint passed as a parameter on the command line. This + endpoint is checked every 20 seconds (also configurable with a flag). +- HTTP server: The kubelet can also listen for HTTP and respond to a simple API + (underspec'd currently) to submit a new manifest. ``` kubelet [flags] ``` - - - ## {{% heading "options" %}} -
Restricted policy specification
Everything from the baseline profile.
Volume TypesVolume Types - In addition to restricting HostPath volumes, the restricted profile limits usage of non-core volume types to those defined through PersistentVolumes.
-
Restricted Fields:
- spec.volumes[*].hostPath
- spec.volumes[*].gcePersistentDisk
- spec.volumes[*].awsElasticBlockStore
- spec.volumes[*].gitRepo
- spec.volumes[*].nfs
- spec.volumes[*].iscsi
- spec.volumes[*].glusterfs
- spec.volumes[*].rbd
- spec.volumes[*].flexVolume
- spec.volumes[*].cinder
- spec.volumes[*].cephFS
- spec.volumes[*].flocker
- spec.volumes[*].fc
- spec.volumes[*].azureFile
- spec.volumes[*].vsphereVolume
- spec.volumes[*].quobyte
- spec.volumes[*].azureDisk
- spec.volumes[*].portworxVolume
- spec.volumes[*].scaleIO
- spec.volumes[*].storageos
- spec.volumes[*].csi
-
Allowed Values: undefined/nil
+

In addition to restricting HostPath volumes, the restricted policy limits usage of non-core volume types to those defined through PersistentVolumes.

+

Restricted Fields

+
    +
  • spec.volumes[*].hostPath
  • +
  • spec.volumes[*].gcePersistentDisk
  • +
  • spec.volumes[*].awsElasticBlockStore
  • +
  • spec.volumes[*].gitRepo
  • +
  • spec.volumes[*].nfs
  • +
  • spec.volumes[*].iscsi
  • +
  • spec.volumes[*].glusterfs
  • +
  • spec.volumes[*].rbd
  • +
  • spec.volumes[*].flexVolume
  • +
  • spec.volumes[*].cinder
  • +
  • spec.volumes[*].cephfs
  • +
  • spec.volumes[*].flocker
  • +
  • spec.volumes[*].fc
  • +
  • spec.volumes[*].azureFile
  • +
  • spec.volumes[*].vsphereVolume
  • +
  • spec.volumes[*].quobyte
  • +
  • spec.volumes[*].azureDisk
  • +
  • spec.volumes[*].portworxVolume
  • +
  • spec.volumes[*].scaleIO
  • +
  • spec.volumes[*].storageos
  • +
  • spec.volumes[*].photonPersistentDisk
  • +
+

Allowed Values

+
    +
  • Undefined/nil
  • +
Privilege EscalationPrivilege Escalation (v1.8+) - Privilege escalation (such as via set-user-ID or set-group-ID file mode) should not be allowed.
-
Restricted Fields:
- spec.containers[*].securityContext.allowPrivilegeEscalation
- spec.initContainers[*].securityContext.allowPrivilegeEscalation
-
Allowed Values: false
+

Privilege escalation (such as via set-user-ID or set-group-ID file mode) should not be allowed.

+

Restricted Fields

+
    +
  • spec.containers[*].securityContext.allowPrivilegeEscalation
  • +
  • spec.initContainers[*].securityContext.allowPrivilegeEscalation
  • +
  • spec.ephemeralContainers[*].securityContext.allowPrivilegeEscalation
  • +
+

Allowed Values

+
    +
  • false
  • +
Running as Non-rootRunning as Non-root - Containers must be required to run as non-root users.
-
Restricted Fields:
- spec.securityContext.runAsNonRoot
- spec.containers[*].securityContext.runAsNonRoot
- spec.initContainers[*].securityContext.runAsNonRoot
-
Allowed Values: true
+

Containers must be required to run as non-root users.

+

Restricted Fields

+
    +
  • spec.securityContext.runAsNonRoot
  • +
  • spec.containers[*].securityContext.runAsNonRoot
  • +
  • spec.initContainers[*].securityContext.runAsNonRoot
  • +
  • spec.ephemeralContainers[*].securityContext.runAsNonRoot
  • +
+

Allowed Values

+
    +
  • true
  • +
+ + The container fields may be undefined/nil if the pod-level + spec.securityContext.runAsNonRoot is set to true. +
Non-root groups (optional)Non-root groups (optional) - Containers should be forbidden from running with a root primary or supplementary GID.
-
Restricted Fields:
- spec.securityContext.runAsGroup
- spec.securityContext.supplementalGroups[*]
- spec.securityContext.fsGroup
- spec.containers[*].securityContext.runAsGroup
- spec.initContainers[*].securityContext.runAsGroup
-
Allowed Values:
- non-zero
- undefined / nil (except for `*.runAsGroup`)
+

Containers should be forbidden from running with a root primary or supplementary GID.

+

Restricted Fields

+
    +
  • spec.securityContext.runAsGroup
  • +
  • spec.securityContext.supplementalGroups[*]
  • +
  • spec.securityContext.fsGroup
  • +
  • spec.containers[*].securityContext.runAsGroup
  • +
  • spec.initContainers[*].securityContext.runAsGroup
  • +
  • spec.ephemeralContainers[*].securityContext.runAsGroup
  • +
+

Allowed Values

+
    +
  • Undefined/nil (except for *.runAsGroup)
  • +
  • Non-zero
  • +
SeccompSeccomp (v1.19+) +

Seccomp profile must be explicitly set to one of the allowed values. Both the Unconfined profile and the absence of a profile are prohibited.

+

Restricted Fields

+
    +
  • spec.securityContext.seccompProfile.type
  • +
  • spec.containers[*].securityContext.seccompProfile.type
  • +
  • spec.initContainers[*].securityContext.seccompProfile.type
  • +
  • spec.ephemeralContainers[*].securityContext.seccompProfile.type
  • +
+

Allowed Values

+
    +
  • RuntimeDefault
  • +
  • Localhost
  • +
+ + The container fields may be undefined/nil if the pod-level + spec.securityContext.seccompProfile.type field is set appropriately. + Conversely, the pod-level field may be undefined/nil if _all_ container- + level fields are set. + +
Capabilities (v1.22+) - The RuntimeDefault seccomp profile must be required, or allow specific additional profiles.
-
Restricted Fields:
- spec.securityContext.seccompProfile.type
- spec.containers[*].securityContext.seccompProfile
- spec.initContainers[*].securityContext.seccompProfile
-
Allowed Values:
- 'runtime/default'
- undefined / nil
+

+ Containers must drop ALL capabilities, and are only permitted to add back + the NET_BIND_SERVICE capability. +

+

Restricted Fields

+
    +
  • spec.containers[*].securityContext.capabilities.drop
  • +
  • spec.initContainers[*].securityContext.capabilities.drop
  • +
  • spec.ephemeralContainers[*].securityContext.capabilities.drop
  • +
+

Allowed Values

+
    +
  • Any list of capabilities that includes ALL
  • +
+
+

Restricted Fields

+
    +
  • spec.containers[*].securityContext.capabilities.add
  • +
  • spec.initContainers[*].securityContext.capabilities.add
  • +
  • spec.ephemeralContainers[*].securityContext.capabilities.add
  • +
+

Allowed Values

+
    +
  • Undefined/nil
  • +
  • NET_BIND_SERVICE
  • +
admin None Allows admin access, intended to be granted within a namespace using a RoleBinding. + If used in a RoleBinding, allows read/write access to most resources in a namespace, including the ability to create roles and role bindings within the namespace. -This role does not allow write access to resource quota or to the namespace itself.
edit
view--disable-admission-plugins strings

admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyServiceExternalIPs, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter.

admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, PodSecurity, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyServiceExternalIPs, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurity, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter.

--enable-admission-plugins strings

admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyServiceExternalIPs, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter.

admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, PodSecurity, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyServiceExternalIPs, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurity, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter.

--feature-gates <comma-separated 'key=True|False' pairs>

A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (BETA - default=true)
CPUManager=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureFile=true|false (BETA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=true)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIMigrationvSphereComplete=true|false (BETA - default=false)
CSIServiceAccountToken=true|false (BETA - default=true)
CSIStorageCapacity=true|false (BETA - default=true)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
CSIVolumeHealth=true|false (ALPHA - default=false)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
ControllerManagerLeaderMigration=true|false (ALPHA - default=false)
CronJobControllerV2=true|false (BETA - default=true)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DaemonSetUpdateSurge=true|false (ALPHA - default=false)
DefaultPodTopologySpread=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DownwardAPIHugePages=true|false (BETA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EfficientWatchResumption=true|false (BETA - default=true)
EndpointSliceProxying=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (BETA - default=true)
GracefulNodeShutdown=true|false (BETA - default=true)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (BETA - default=true)
IPv6DualStack=true|false (BETA - default=true)
InTreePluginAWSUnregister=true|false (ALPHA - default=false)
InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
InTreePluginGCEUnregister=true|false (ALPHA - default=false)
InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
IndexedJob=true|false (ALPHA - default=false)
IngressClassNamespacedParams=true|false (ALPHA - default=false)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
KubeletPodResourcesGetAllocatable=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
LogarithmicScaleDown=true|false (ALPHA - default=false)
MemoryManager=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NamespaceDefaultLabelName=true|false (BETA - default=true)
NetworkPolicyEndPort=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (BETA - default=true)
PodAffinityNamespaceSelector=true|false (ALPHA - default=false)
PodDeletionCost=true|false (ALPHA - default=false)
PodOverhead=true|false (BETA - default=true)
PreferNominatedNode=true|false (ALPHA - default=false)
ProbeTerminationGracePeriod=true|false (ALPHA - default=false)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
ServerSideApply=true|false (BETA - default=true)
ServiceInternalTrafficPolicy=true|false (ALPHA - default=false)
ServiceLBNodePortControl=true|false (ALPHA - default=false)
ServiceLoadBalancerClass=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
SetHostnameAsFQDN=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
SuspendJob=true|false (ALPHA - default=false)
TTLAfterFinished=true|false (BETA - default=true)
TopologyAwareHints=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumeCapacityPriority=true|false (ALPHA - default=false)
WarningHeaders=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsEndpointSliceProxying=true|false (BETA - default=true)

A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
APIServerTracing=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
CPUManager=true|false (BETA - default=true)
CPUManagerPolicyOptions=true|false (ALPHA - default=false)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureFile=true|false (BETA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=true)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIStorageCapacity=true|false (BETA - default=true)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
CSIVolumeHealth=true|false (ALPHA - default=false)
CSRDuration=true|false (BETA - default=true)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
ControllerManagerLeaderMigration=true|false (BETA - default=true)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DaemonSetUpdateSurge=true|false (BETA - default=true)
DefaultPodTopologySpread=true|false (BETA - default=true)
DelegateFSGroupToCSIDriver=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DisableCloudProviders=true|false (ALPHA - default=false)
DownwardAPIHugePages=true|false (BETA - default=false)
EfficientWatchResumption=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (BETA - default=true)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExpandedDNSConfig=true|false (ALPHA - default=false)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (BETA - default=true)
GracefulNodeShutdown=true|false (BETA - default=true)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
IPv6DualStack=true|false (BETA - default=true)
InTreePluginAWSUnregister=true|false (ALPHA - default=false)
InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
InTreePluginGCEUnregister=true|false (ALPHA - default=false)
InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
IndexedJob=true|false (BETA - default=true)
IngressClassNamespacedParams=true|false (BETA - default=true)
JobTrackingWithFinalizers=true|false (ALPHA - default=false)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletInUserNamespace=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
KubeletPodResourcesGetAllocatable=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
LogarithmicScaleDown=true|false (BETA - default=true)
MemoryManager=true|false (BETA - default=true)
MemoryQoS=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NetworkPolicyEndPort=true|false (BETA - default=true)
NodeSwap=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (BETA - default=true)
PodAffinityNamespaceSelector=true|false (BETA - default=true)
PodDeletionCost=true|false (BETA - default=true)
PodOverhead=true|false (BETA - default=true)
PodSecurity=true|false (ALPHA - default=false)
PreferNominatedNode=true|false (BETA - default=true)
ProbeTerminationGracePeriod=true|false (BETA - default=false)
ProcMountType=true|false (ALPHA - default=false)
ProxyTerminatingEndpoints=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
ReadWriteOncePod=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
SeccompDefault=true|false (ALPHA - default=false)
ServiceInternalTrafficPolicy=true|false (BETA - default=true)
ServiceLBNodePortControl=true|false (BETA - default=true)
ServiceLoadBalancerClass=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (BETA - default=true)
StatefulSetMinReadySeconds=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
SuspendJob=true|false (BETA - default=true)
TTLAfterFinished=true|false (BETA - default=true)
TopologyAwareHints=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
VolumeCapacityPriority=true|false (ALPHA - default=false)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsHostProcessContainers=true|false (ALPHA - default=false)

--logging-format string     Default: "text"

Sets the log format. Permitted formats: "json", "text".
Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --one-output, --skip-headers, --skip-log-headers, --stderrthreshold, --vmodule, --log-flush-frequency.
Non-default choices are currently alpha and subject to change without warning.

Sets the log format. Permitted formats: "text".
Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --one-output, --skip-headers, --skip-log-headers, --stderrthreshold, --vmodule, --log-flush-frequency.
Non-default choices are currently alpha and subject to change without warning.

--max-mutating-requests-inflight int     Default: 200

The maximum number of mutating requests in flight at a given time. When the server exceeds this, it rejects requests. Zero for no limit.

This and --max-requests-inflight are summed to determine the server's total concurrency limit (which must be positive) if --enable-priority-and-fairness is true. Otherwise, this flag limits the maximum number of mutating requests in flight, or a zero value disables the limit completely.

--max-requests-inflight int     Default: 400

The maximum number of non-mutating requests in flight at a given time. When the server exceeds this, it rejects requests. Zero for no limit.

This and --max-mutating-requests-inflight are summed to determine the server's total concurrency limit (which must be positive) if --enable-priority-and-fairness is true. Otherwise, this flag limits the maximum number of non-mutating requests in flight, or a zero value disables the limit completely.

--service-account-issuer string--service-account-issuer strings

Identifier of the service account token issuer. The issuer will assert this identifier in "iss" claim of issued tokens. This value is a string or URI. If this option is not a valid URI per the OpenID Discovery 1.0 spec, the ServiceAccountIssuerDiscovery feature will remain disabled, even if the feature gate is set to true. It is highly recommended that this value comply with the OpenID spec: https://openid.net/specs/openid-connect-discovery-1_0.html. In practice, this means that service-account-issuer must be an https URL. It is also highly recommended that this URL be capable of serving OpenID discovery documents at {service-account-issuer}/.well-known/openid-configuration.

Identifier of the service account token issuer. The issuer will assert this identifier in "iss" claim of issued tokens. This value is a string or URI. If this option is not a valid URI per the OpenID Discovery 1.0 spec, the ServiceAccountIssuerDiscovery feature will remain disabled, even if the feature gate is set to true. It is highly recommended that this value comply with the OpenID spec: https://openid.net/specs/openid-connect-discovery-1_0.html. In practice, this means that service-account-issuer must be an https URL. It is also highly recommended that this URL be capable of serving OpenID discovery documents at {service-account-issuer}/.well-known/openid-configuration. When this flag is specified multiple times, the first is used to generate tokens and all are used to determine which issuers are accepted.

If set, the file that will be used to secure the secure port of the API server via token authentication.

--tracing-config-file string

File with apiserver tracing configuration.

-v, --v int
--cluster-signing-duration duration     Default: 8760h0m0s

The length of duration signed certificates will be given.

The max length of duration signed certificates will be given. Individual CSRs may request shorter certs by setting spec.expirationSeconds.

--feature-gates <comma-separated 'key=True|False' pairs>

A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (BETA - default=true)
CPUManager=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureFile=true|false (BETA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=true)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIMigrationvSphereComplete=true|false (BETA - default=false)
CSIServiceAccountToken=true|false (BETA - default=true)
CSIStorageCapacity=true|false (BETA - default=true)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
CSIVolumeHealth=true|false (ALPHA - default=false)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
ControllerManagerLeaderMigration=true|false (ALPHA - default=false)
CronJobControllerV2=true|false (BETA - default=true)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DaemonSetUpdateSurge=true|false (ALPHA - default=false)
DefaultPodTopologySpread=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DownwardAPIHugePages=true|false (BETA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EfficientWatchResumption=true|false (BETA - default=true)
EndpointSliceProxying=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (BETA - default=true)
GracefulNodeShutdown=true|false (BETA - default=true)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (BETA - default=true)
IPv6DualStack=true|false (BETA - default=true)
InTreePluginAWSUnregister=true|false (ALPHA - default=false)
InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
InTreePluginGCEUnregister=true|false (ALPHA - default=false)
InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
IndexedJob=true|false (ALPHA - default=false)
IngressClassNamespacedParams=true|false (ALPHA - default=false)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
KubeletPodResourcesGetAllocatable=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
LogarithmicScaleDown=true|false (ALPHA - default=false)
MemoryManager=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NamespaceDefaultLabelName=true|false (BETA - default=true)
NetworkPolicyEndPort=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (BETA - default=true)
PodAffinityNamespaceSelector=true|false (ALPHA - default=false)
PodDeletionCost=true|false (ALPHA - default=false)
PodOverhead=true|false (BETA - default=true)
PreferNominatedNode=true|false (ALPHA - default=false)
ProbeTerminationGracePeriod=true|false (ALPHA - default=false)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
ServerSideApply=true|false (BETA - default=true)
ServiceInternalTrafficPolicy=true|false (ALPHA - default=false)
ServiceLBNodePortControl=true|false (ALPHA - default=false)
ServiceLoadBalancerClass=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
SetHostnameAsFQDN=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
SuspendJob=true|false (ALPHA - default=false)
TTLAfterFinished=true|false (BETA - default=true)
TopologyAwareHints=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumeCapacityPriority=true|false (ALPHA - default=false)
WarningHeaders=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsEndpointSliceProxying=true|false (BETA - default=true)

A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
APIServerTracing=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
CPUManager=true|false (BETA - default=true)
CPUManagerPolicyOptions=true|false (ALPHA - default=false)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureFile=true|false (BETA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=true)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIStorageCapacity=true|false (BETA - default=true)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
CSIVolumeHealth=true|false (ALPHA - default=false)
CSRDuration=true|false (BETA - default=true)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
ControllerManagerLeaderMigration=true|false (BETA - default=true)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DaemonSetUpdateSurge=true|false (BETA - default=true)
DefaultPodTopologySpread=true|false (BETA - default=true)
DelegateFSGroupToCSIDriver=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DisableCloudProviders=true|false (ALPHA - default=false)
DownwardAPIHugePages=true|false (BETA - default=false)
EfficientWatchResumption=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (BETA - default=true)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExpandedDNSConfig=true|false (ALPHA - default=false)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (BETA - default=true)
GracefulNodeShutdown=true|false (BETA - default=true)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
IPv6DualStack=true|false (BETA - default=true)
InTreePluginAWSUnregister=true|false (ALPHA - default=false)
InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
InTreePluginGCEUnregister=true|false (ALPHA - default=false)
InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
IndexedJob=true|false (BETA - default=true)
IngressClassNamespacedParams=true|false (BETA - default=true)
JobTrackingWithFinalizers=true|false (ALPHA - default=false)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletInUserNamespace=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
KubeletPodResourcesGetAllocatable=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
LogarithmicScaleDown=true|false (BETA - default=true)
MemoryManager=true|false (BETA - default=true)
MemoryQoS=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NetworkPolicyEndPort=true|false (BETA - default=true)
NodeSwap=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (BETA - default=true)
PodAffinityNamespaceSelector=true|false (BETA - default=true)
PodDeletionCost=true|false (BETA - default=true)
PodOverhead=true|false (BETA - default=true)
PodSecurity=true|false (ALPHA - default=false)
PreferNominatedNode=true|false (BETA - default=true)
ProbeTerminationGracePeriod=true|false (BETA - default=false)
ProcMountType=true|false (ALPHA - default=false)
ProxyTerminatingEndpoints=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
ReadWriteOncePod=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
SeccompDefault=true|false (ALPHA - default=false)
ServiceInternalTrafficPolicy=true|false (BETA - default=true)
ServiceLBNodePortControl=true|false (BETA - default=true)
ServiceLoadBalancerClass=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (BETA - default=true)
StatefulSetMinReadySeconds=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
SuspendJob=true|false (BETA - default=true)
TTLAfterFinished=true|false (BETA - default=true)
TopologyAwareHints=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
VolumeCapacityPriority=true|false (ALPHA - default=false)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsHostProcessContainers=true|false (ALPHA - default=false)

--logging-format string     Default: "text"

Sets the log format. Permitted formats: "json", "text".
Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --one-output, --skip-headers, --skip-log-headers, --stderrthreshold, --vmodule, --log-flush-frequency.
Non-default choices are currently alpha and subject to change without warning.

Sets the log format. Permitted formats: "text".
Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --one-output, --skip-headers, --skip-log-headers, --stderrthreshold, --vmodule, --log-flush-frequency.
Non-default choices are currently alpha and subject to change without warning.

--feature-gates <comma-separated 'key=True|False' pairs>

A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (BETA - default=true)
CPUManager=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureFile=true|false (BETA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=true)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIMigrationvSphereComplete=true|false (BETA - default=false)
CSIServiceAccountToken=true|false (BETA - default=true)
CSIStorageCapacity=true|false (BETA - default=true)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
CSIVolumeHealth=true|false (ALPHA - default=false)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
ControllerManagerLeaderMigration=true|false (ALPHA - default=false)
CronJobControllerV2=true|false (BETA - default=true)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DaemonSetUpdateSurge=true|false (ALPHA - default=false)
DefaultPodTopologySpread=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DownwardAPIHugePages=true|false (BETA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EfficientWatchResumption=true|false (BETA - default=true)
EndpointSliceProxying=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (BETA - default=true)
GracefulNodeShutdown=true|false (BETA - default=true)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (BETA - default=true)
IPv6DualStack=true|false (BETA - default=true)
InTreePluginAWSUnregister=true|false (ALPHA - default=false)
InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
InTreePluginGCEUnregister=true|false (ALPHA - default=false)
InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
IndexedJob=true|false (ALPHA - default=false)
IngressClassNamespacedParams=true|false (ALPHA - default=false)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
KubeletPodResourcesGetAllocatable=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
LogarithmicScaleDown=true|false (ALPHA - default=false)
MemoryManager=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NamespaceDefaultLabelName=true|false (BETA - default=true)
NetworkPolicyEndPort=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (BETA - default=true)
PodAffinityNamespaceSelector=true|false (ALPHA - default=false)
PodDeletionCost=true|false (ALPHA - default=false)
PodOverhead=true|false (BETA - default=true)
PreferNominatedNode=true|false (ALPHA - default=false)
ProbeTerminationGracePeriod=true|false (ALPHA - default=false)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
ServerSideApply=true|false (BETA - default=true)
ServiceInternalTrafficPolicy=true|false (ALPHA - default=false)
ServiceLBNodePortControl=true|false (ALPHA - default=false)
ServiceLoadBalancerClass=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
SetHostnameAsFQDN=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
SuspendJob=true|false (ALPHA - default=false)
TTLAfterFinished=true|false (BETA - default=true)
TopologyAwareHints=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumeCapacityPriority=true|false (ALPHA - default=false)
WarningHeaders=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsEndpointSliceProxying=true|false (BETA - default=true)

A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
APIServerTracing=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
CPUManager=true|false (BETA - default=true)
CPUManagerPolicyOptions=true|false (ALPHA - default=false)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureFile=true|false (BETA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=true)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIStorageCapacity=true|false (BETA - default=true)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
CSIVolumeHealth=true|false (ALPHA - default=false)
CSRDuration=true|false (BETA - default=true)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
ControllerManagerLeaderMigration=true|false (BETA - default=true)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DaemonSetUpdateSurge=true|false (BETA - default=true)
DefaultPodTopologySpread=true|false (BETA - default=true)
DelegateFSGroupToCSIDriver=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DisableCloudProviders=true|false (ALPHA - default=false)
DownwardAPIHugePages=true|false (BETA - default=false)
EfficientWatchResumption=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (BETA - default=true)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExpandedDNSConfig=true|false (ALPHA - default=false)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (BETA - default=true)
GracefulNodeShutdown=true|false (BETA - default=true)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
IPv6DualStack=true|false (BETA - default=true)
InTreePluginAWSUnregister=true|false (ALPHA - default=false)
InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
InTreePluginGCEUnregister=true|false (ALPHA - default=false)
InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
IndexedJob=true|false (BETA - default=true)
IngressClassNamespacedParams=true|false (BETA - default=true)
JobTrackingWithFinalizers=true|false (ALPHA - default=false)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletInUserNamespace=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
KubeletPodResourcesGetAllocatable=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
LogarithmicScaleDown=true|false (BETA - default=true)
MemoryManager=true|false (BETA - default=true)
MemoryQoS=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NetworkPolicyEndPort=true|false (BETA - default=true)
NodeSwap=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (BETA - default=true)
PodAffinityNamespaceSelector=true|false (BETA - default=true)
PodDeletionCost=true|false (BETA - default=true)
PodOverhead=true|false (BETA - default=true)
PodSecurity=true|false (ALPHA - default=false)
PreferNominatedNode=true|false (BETA - default=true)
ProbeTerminationGracePeriod=true|false (BETA - default=false)
ProcMountType=true|false (ALPHA - default=false)
ProxyTerminatingEndpoints=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
ReadWriteOncePod=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
SeccompDefault=true|false (ALPHA - default=false)
ServiceInternalTrafficPolicy=true|false (BETA - default=true)
ServiceLBNodePortControl=true|false (BETA - default=true)
ServiceLoadBalancerClass=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (BETA - default=true)
StatefulSetMinReadySeconds=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
SuspendJob=true|false (BETA - default=true)
TTLAfterFinished=true|false (BETA - default=true)
TopologyAwareHints=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
VolumeCapacityPriority=true|false (ALPHA - default=false)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsHostProcessContainers=true|false (ALPHA - default=false)

--address string     Default: "0.0.0.0"--address string

DEPRECATED: the IP address on which to listen for the --port port (set to 0.0.0.0 or :: for listening in all interfaces and IP families). See --bind-address instead. This parameter is ignored if a config file is specified in --config.

--algorithm-provider string

DEPRECATED: the scheduling algorithm provider to use, this sets the default plugins for component config profiles. Choose one of: ClusterAutoscalerProvider | DefaultProvider

--allow-metric-labels stringToString     Default: []
--config string

The path to the configuration file. The following flags can overwrite fields in this file:
--algorithm-provider
--policy-config-file
--policy-configmap
--policy-configmap-namespace

The path to the configuration file. The following flags can overwrite fields in this file:
--policy-config-file
--policy-configmap
--policy-configmap-namespace

--contention-profiling     Default: true--contention-profiling

DEPRECATED: enable lock contention profiling, if profiling is enabled. This parameter is ignored if a config file is specified in --config.

--feature-gates <comma-separated 'key=True|False' pairs>

A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (BETA - default=true)
CPUManager=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureFile=true|false (BETA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=true)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIMigrationvSphereComplete=true|false (BETA - default=false)
CSIServiceAccountToken=true|false (BETA - default=true)
CSIStorageCapacity=true|false (BETA - default=true)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
CSIVolumeHealth=true|false (ALPHA - default=false)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
ControllerManagerLeaderMigration=true|false (ALPHA - default=false)
CronJobControllerV2=true|false (BETA - default=true)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DaemonSetUpdateSurge=true|false (ALPHA - default=false)
DefaultPodTopologySpread=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DownwardAPIHugePages=true|false (BETA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EfficientWatchResumption=true|false (BETA - default=true)
EndpointSliceProxying=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (BETA - default=true)
GracefulNodeShutdown=true|false (BETA - default=true)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (BETA - default=true)
IPv6DualStack=true|false (BETA - default=true)
InTreePluginAWSUnregister=true|false (ALPHA - default=false)
InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
InTreePluginGCEUnregister=true|false (ALPHA - default=false)
InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
IndexedJob=true|false (ALPHA - default=false)
IngressClassNamespacedParams=true|false (ALPHA - default=false)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
KubeletPodResourcesGetAllocatable=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
LogarithmicScaleDown=true|false (ALPHA - default=false)
MemoryManager=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NamespaceDefaultLabelName=true|false (BETA - default=true)
NetworkPolicyEndPort=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (BETA - default=true)
PodAffinityNamespaceSelector=true|false (ALPHA - default=false)
PodDeletionCost=true|false (ALPHA - default=false)
PodOverhead=true|false (BETA - default=true)
PreferNominatedNode=true|false (ALPHA - default=false)
ProbeTerminationGracePeriod=true|false (ALPHA - default=false)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
ServerSideApply=true|false (BETA - default=true)
ServiceInternalTrafficPolicy=true|false (ALPHA - default=false)
ServiceLBNodePortControl=true|false (ALPHA - default=false)
ServiceLoadBalancerClass=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
SetHostnameAsFQDN=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
SuspendJob=true|false (ALPHA - default=false)
TTLAfterFinished=true|false (BETA - default=true)
TopologyAwareHints=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumeCapacityPriority=true|false (ALPHA - default=false)
WarningHeaders=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsEndpointSliceProxying=true|false (BETA - default=true)

--hard-pod-affinity-symmetric-weight int32     Default: 1

DEPRECATED: RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule corresponding to every RequiredDuringScheduling affinity rule. --hard-pod-affinity-symmetric-weight represents the weight of implicit PreferredDuringScheduling affinity rule. Must be in the range 0-100.This parameter is ignored if a config file is specified in --config.

A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
APIServerTracing=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
CPUManager=true|false (BETA - default=true)
CPUManagerPolicyOptions=true|false (ALPHA - default=false)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureFile=true|false (BETA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=true)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIStorageCapacity=true|false (BETA - default=true)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
CSIVolumeHealth=true|false (ALPHA - default=false)
CSRDuration=true|false (BETA - default=true)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
ControllerManagerLeaderMigration=true|false (BETA - default=true)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DaemonSetUpdateSurge=true|false (BETA - default=true)
DefaultPodTopologySpread=true|false (BETA - default=true)
DelegateFSGroupToCSIDriver=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DisableCloudProviders=true|false (ALPHA - default=false)
DownwardAPIHugePages=true|false (BETA - default=false)
EfficientWatchResumption=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (BETA - default=true)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExpandedDNSConfig=true|false (ALPHA - default=false)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (BETA - default=true)
GracefulNodeShutdown=true|false (BETA - default=true)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
IPv6DualStack=true|false (BETA - default=true)
InTreePluginAWSUnregister=true|false (ALPHA - default=false)
InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
InTreePluginGCEUnregister=true|false (ALPHA - default=false)
InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
IndexedJob=true|false (BETA - default=true)
IngressClassNamespacedParams=true|false (BETA - default=true)
JobTrackingWithFinalizers=true|false (ALPHA - default=false)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletInUserNamespace=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
KubeletPodResourcesGetAllocatable=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
LogarithmicScaleDown=true|false (BETA - default=true)
MemoryManager=true|false (BETA - default=true)
MemoryQoS=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NetworkPolicyEndPort=true|false (BETA - default=true)
NodeSwap=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (BETA - default=true)
PodAffinityNamespaceSelector=true|false (BETA - default=true)
PodDeletionCost=true|false (BETA - default=true)
PodOverhead=true|false (BETA - default=true)
PodSecurity=true|false (ALPHA - default=false)
PreferNominatedNode=true|false (BETA - default=true)
ProbeTerminationGracePeriod=true|false (BETA - default=false)
ProcMountType=true|false (ALPHA - default=false)
ProxyTerminatingEndpoints=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
ReadWriteOncePod=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
SeccompDefault=true|false (ALPHA - default=false)
ServiceInternalTrafficPolicy=true|false (BETA - default=true)
ServiceLBNodePortControl=true|false (BETA - default=true)
ServiceLoadBalancerClass=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (BETA - default=true)
StatefulSetMinReadySeconds=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
SuspendJob=true|false (BETA - default=true)
TTLAfterFinished=true|false (BETA - default=true)
TopologyAwareHints=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
VolumeCapacityPriority=true|false (ALPHA - default=false)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsHostProcessContainers=true|false (ALPHA - default=false)

--kube-api-burst int32     Default: 100--kube-api-burst int32

DEPRECATED: burst to use while talking with kubernetes apiserver. This parameter is ignored if a config file is specified in --config.

--kube-api-content-type string     Default: "application/vnd.kubernetes.protobuf"--kube-api-content-type string

DEPRECATED: content type of requests sent to apiserver. This parameter is ignored if a config file is specified in --config.

--kube-api-qps float     Default: 50--kube-api-qps float

DEPRECATED: QPS to use while talking with kubernetes apiserver. This parameter is ignored if a config file is specified in --config.

--leader-elect     Default: true--leader-elect

Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.

--leader-elect-lease-duration duration     Default: 15s--leader-elect-lease-duration duration

The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.

--leader-elect-renew-deadline duration     Default: 10s--leader-elect-renew-deadline duration

The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled.

--leader-elect-resource-lock string     Default: "leases"--leader-elect-resource-lock string

The type of resource object that is used for locking during leader election. Supported options are 'endpoints', 'configmaps', 'leases', 'endpointsleases' and 'configmapsleases'.

--leader-elect-resource-name string     Default: "kube-scheduler"--leader-elect-resource-name string

The name of resource object that is used for locking during leader election.

--leader-elect-resource-namespace string     Default: "kube-system"--leader-elect-resource-namespace string

The namespace of resource object that is used for locking during leader election.

--leader-elect-retry-period duration     Default: 2s--leader-elect-retry-period duration

The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.

--lock-object-name string     Default: "kube-scheduler"--lock-object-name string

DEPRECATED: define the name of the lock object. Will be removed in favor of leader-elect-resource-name. This parameter is ignored if a config file is specified in --config.

--lock-object-namespace string     Default: "kube-system"--lock-object-namespace string

DEPRECATED: define the namespace of the lock object. Will be removed in favor of leader-elect-resource-namespace. This parameter is ignored if a config file is specified in --config.

--logging-format string     Default: "text"

Sets the log format. Permitted formats: "json", "text".
Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --one-output, --skip-headers, --skip-log-headers, --stderrthreshold, --vmodule, --log-flush-frequency.
Non-default choices are currently alpha and subject to change without warning.

Sets the log format. Permitted formats: "text".
Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --one-output, --skip-headers, --skip-log-headers, --stderrthreshold, --vmodule, --log-flush-frequency.
Non-default choices are currently alpha and subject to change without warning.

--policy-config-file string

DEPRECATED: file with scheduler policy configuration. This file is used if policy ConfigMap is not provided or --use-legacy-policy-config=true. Note: The scheduler will fail if this is combined with Plugin configs

DEPRECATED: file with scheduler policy configuration. This file is used if policy ConfigMap is not provided or --use-legacy-policy-config=true. Note: The predicates/priorities defined in this file will take precedence over any profiles define in ComponentConfig.

--policy-configmap string

DEPRECATED: name of the ConfigMap object that contains scheduler's policy configuration. It must exist in the system namespace before scheduler initialization if --use-legacy-policy-config=false. The config must be provided as the value of an element in 'Data' map with the key='policy.cfg'. Note: The scheduler will fail if this is combined with Plugin configs

DEPRECATED: name of the ConfigMap object that contains scheduler's policy configuration. It must exist in the system namespace before scheduler initialization if --use-legacy-policy-config=false. The config must be provided as the value of an element in 'Data' map with the key='policy.cfg'. Note: The predicates/priorities defined in this file will take precedence over any profiles define in ComponentConfig.

--policy-configmap-namespace string     Default: "kube-system"

DEPRECATED: the namespace where policy ConfigMap is located. The kube-system namespace will be used if this is not provided or is empty. Note: The scheduler will fail if this is combined with Plugin configs

DEPRECATED: the namespace where policy ConfigMap is located. The kube-system namespace will be used if this is not provided or is empty. Note: The predicates/priorities defined in this file will take precedence over any profiles define in ComponentConfig.

--port int     Default: 10251--port int

DEPRECATED: the port on which to serve HTTP insecurely without authentication and authorization. If 0, don't serve plain HTTP at all. See --secure-port instead. This parameter is ignored if a config file is specified in --config.

--profiling     Default: true--profiling

DEPRECATED: enable profiling via web interface host:port/debug/pprof/. This parameter is ignored if a config file is specified in --config.

List of request headers to inspect for usernames. X-Remote-User is common.

--scheduler-name string     Default: "default-scheduler"

DEPRECATED: name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's "spec.schedulerName". This parameter is ignored if a config file is specified in --config.

--secure-port int     Default: 10259
@@ -46,66 +48,66 @@ kubelet [flags] - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -119,49 +121,42 @@ kubelet [flags] - + - + - + - + - - + - + - - - - - - - - + - + @@ -182,35 +177,35 @@ kubelet [flags] - + - + - + - + - + - + - + - + @@ -224,28 +219,28 @@ kubelet [flags] - + - + - + - + - + - + - + @@ -253,133 +248,140 @@ kubelet [flags] - + - + - + - + - + - + - + - + - + - + - + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -390,176 +392,185 @@ kubelet [flags] - + - + - + - + - + - - + - + - + - + - + - +WindowsHostProcessContainers=true|false (ALPHA - default=false)
+(DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) - + - + - + - + - + - + - + @@ -573,21 +584,14 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
- + - + - - - - - - - - + @@ -608,42 +612,42 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
- + - + - + - + - + - + - + @@ -657,56 +661,56 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
- + - + - + - + - + - + - + - + - + - + @@ -717,10 +721,10 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
- + - + @@ -745,48 +749,48 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
- + - + - + - + - + - + - + - + - + @@ -796,176 +800,182 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
- + - + - + - + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -976,63 +986,63 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
- + - + - + - - - - - - - - + - + - + - + - + - + - + - + + + + + + + + @@ -1042,21 +1052,21 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
- + - + - + @@ -1067,39 +1077,46 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
- + - + - + - + + + + + + + - + - + - + - + @@ -1110,84 +1127,86 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
- + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -1205,26 +1224,25 @@ Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_R - + - + - + - + - + - +
--address ip     Default: 0.0.0.0 --address string     Default: 0.0.0.0
The IP address for the Kubelet to serve on (set to `0.0.0.0` for all IPv4 interfaces and `::` for all IPv6 interfaces) (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)The IP address for the Kubelet to serve on (set to 0.0.0.0 or :: for listening in gll interfaces and IP families) (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--allowed-unsafe-sysctls strings
Comma-separated whitelist of unsafe sysctls or unsafe sysctl patterns (ending in `*`). Use these at your own risk. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Comma-separated whitelist of unsafe sysctls or unsafe sysctl patterns (ending in *). Use these at your own risk. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--alsologtostderr
log to standard error as well as filesLog to standard error as well as files
--anonymous-auth     Default: true
Enables anonymous requests to the Kubelet server. Requests that are not rejected by another authentication method are treated as anonymous requests. Anonymous requests have a username of `system:anonymous`, and a group name of `system:unauthenticated`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Enables anonymous requests to the Kubelet server. Requests that are not rejected by another authentication method are treated as anonymous requests. Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--authentication-token-webhook
Use the `TokenReview` API to determine authentication for bearer tokens. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Use the TokenReview API to determine authentication for bearer tokens. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--authentication-token-webhook-cache-ttl duration     Default: `2m0s`--authentication-token-webhook-cache-ttl duration     Default: 2m0s
The duration to cache responses from the webhook token authenticator. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)The duration to cache responses from the webhook token authenticator. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--authorization-mode string
Authorization mode for Kubelet server. Valid options are `AlwaysAllow` or `Webhook`. `Webhook` mode uses the `SubjectAccessReview` API to determine authorization. (default "AlwaysAllow" when `--config` flag is not provided; "Webhook" when `--config` flag presents.) (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Authorization mode for Kubelet server. Valid options are AlwaysAllow or Webhook. Webhook mode uses the SubjectAccessReview API to determine authorization. Default AlwaysAllow when --config flag is not provided; Webhook when --config flag presents. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--authorization-webhook-cache-authorized-ttl duration     Default: `5m0s`--authorization-webhook-cache-authorized-ttl duration     Default: 5m0s
The duration to cache 'authorized' responses from the webhook authorizer. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)The duration to cache 'authorized' responses from the webhook authorizer. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--authorization-webhook-cache-unauthorized-ttl duration     Default: `30s`--authorization-webhook-cache-unauthorized-ttl duration     Default: 30s
The duration to cache 'unauthorized' responses from the webhook authorizer. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)The duration to cache 'unauthorized' responses from the webhook authorizer. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--bootstrap-kubeconfig string
Path to a kubeconfig file that will be used to get client certificate for kubelet. If the file specified by `--kubeconfig` does not exist, the bootstrap kubeconfig is used to request a client certificate from the API server. On success, a kubeconfig file referencing the generated client certificate and key is written to the path specified by `--kubeconfig`. The client certificate and key file will be stored in the directory pointed by `--cert-dir`.Path to a kubeconfig file that will be used to get client certificate for kubelet. If the file specified by --kubeconfig does not exist, the bootstrap kubeconfig is used to request a client certificate from the API server. On success, a kubeconfig file referencing the generated client certificate and key is written to the path specified by --kubeconfig. The client certificate and key file will be stored in the directory pointed by --cert-dir.
--cert-dir string     Default: `/var/lib/kubelet/pki`--cert-dir string     Default: /var/lib/kubelet/pki
The directory where the TLS certs are located. If `--tls-cert-file` and `--tls-private-key-file` are provided, this flag will be ignored.The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.
--cgroup-driver string     Default: `cgroupfs`--cgroup-driver string     Default: cgroupfs
Driver that the kubelet uses to manipulate cgroups on the host. Possible values: `cgroupfs`, `systemd`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)/td> +Driver that the kubelet uses to manipulate cgroups on the host. Possible values: cgroupfs, systemd. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)/td>
--cgroup-root string     Default: `''`--cgroup-root string     Default: ''
Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--cgroups-per-qos     Default: `true`--cgroups-per-qos     Default: true
Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--chaos-chance float
If > 0.0, introduce random client errors and latency. Intended for testing. (DEPRECATED: will be removed in a future version.)Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--client-ca-file string
If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--cluster-dns strings
Comma-separated list of DNS server IP address. This value is used for containers DNS server in case of Pods with "dnsPolicy=ClusterFirst". Note: all DNS servers appearing in the list MUST serve the same set of records otherwise name resolution within the cluster may not work correctly. There is no guarantee as to which DNS server may be contacted for name resolution. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Comma-separated list of DNS server IP address. This value is used for containers DNS server in case of Pods with "dnsPolicy=ClusterFirst".
Note: all DNS servers appearing in the list MUST serve the same set of records otherwise name resolution within the cluster may not work correctly. There is no guarantee as to which DNS server may be contacted for name resolution. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--cluster-domain string
Domain for this cluster. If set, kubelet will configure all containers to search this domain in addition to the host's search domains (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Domain for this cluster. If set, kubelet will configure all containers to search this domain in addition to the host's search domains (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--cni-bin-dir string     Default: `/opt/cni/bin`--cni-bin-dir string     Default: /opt/cni/bin
<Warning: Alpha feature> A comma-separated list of full paths of directories in which to search for CNI plugin binaries. This docker-specific flag only works when container-runtime is set to `docker`.A comma-separated list of full paths of directories in which to search for CNI plugin binaries. This docker-specific flag only works when container-runtime is set to docker. (DEPRECATED: will be removed along with dockershim.)
--cni-cache-dir string     Default: `/var/lib/cni/cache`--cni-cache-dir string     Default: /var/lib/cni/cache
<Warning: Alpha feature> The full path of the directory in which CNI should store cache files. This docker-specific flag only works when container-runtime is set to `docker`.The full path of the directory in which CNI should store cache files. This docker-specific flag only works when container-runtime is set to docker. (DEPRECATED: will be removed along with dockershim.)
--cni-conf-dir string     Default: `/etc/cni/net.d`--cni-conf-dir string     Default: /etc/cni/net.d
<Warning: Alpha feature> The full path of the directory in which to search for CNI config files. This docker-specific flag only works when container-runtime is set to `docker`.<Warning: Alpha feature> The full path of the directory in which to search for CNI config files. This docker-specific flag only works when container-runtime is set to docker. (DEPRECATED: will be removed along with dockershim.)
--container-log-max-files int32     Default: 5
Set the maximum number of container log files that can be present for a container. The number must be ≥ 2. This flag can only be used with `--container-runtime=remote`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)<Warning: Beta feature> Set the maximum number of container log files that can be present for a container. The number must be >= 2. This flag can only be used with --container-runtime=remote. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--container-log-max-size string     Default: `10Mi`--container-log-max-size string     Default: 10Mi
Set the maximum size (e.g. 10Mi) of container log file before it is rotated. This flag can only be used with `--container-runtime=remote`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)<Warning: Beta feature> Set the maximum size (e.g. 10Mi) of container log file before it is rotated. This flag can only be used with --container-runtime=remote. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--container-runtime string     Default: `docker`--container-runtime string     Default: docker
The container runtime to use. Possible values: `docker`, `remote`.The container runtime to use. Possible values: docker, remote.
--container-runtime-endpoint string     Default: `unix:///var/run/dockershim.sock`--container-runtime-endpoint string     Default: unix:///var/run/dockershim.sock
[Experimental] The endpoint of remote runtime service. Currently unix socket endpoint is supported on Linux, while npipe and tcp endpoints are supported on windows. Examples: `unix:///var/run/dockershim.sock`, `npipe:////./pipe/dockershim`.[Experimental] The endpoint of remote runtime service. Currently unix socket endpoint is supported on Linux, while npipe and tcp endpoints are supported on windows. Examples: unix:///var/run/dockershim.sock, npipe:////./pipe/dockershim.
--contention-profiling
Enable lock contention profiling, if profiling is enabled (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Enable lock contention profiling, if profiling is enabled (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--cpu-cfs-quota     Default: `true`--cpu-cfs-quota     Default: true
Enable CPU CFS quota enforcement for containers that specify CPU limits (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Enable CPU CFS quota enforcement for containers that specify CPU limits (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--cpu-cfs-quota-period duration     Default: `100ms`--cpu-cfs-quota-period duration     Default: 100ms
Sets CPU CFS quota period value, `cpu.cfs_period_us`, defaults to Linux Kernel default. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Sets CPU CFS quota period value, cpu.cfs_period_us, defaults to Linux Kernel default. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--cpu-manager-policy string     Default: `none`--cpu-manager-policy string     Default: none
CPU Manager policy to use. Possible values: `none`, `static`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)CPU Manager policy to use. Possible values: none, static. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--cpu-manager-reconcile-period duration     Default: `10s`--cpu-manager-policy-options strings
<Warning: Alpha feature> CPU Manager reconciliation period. Examples: `10s`, or `1m`. If not supplied, defaults to node status update frequency. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Comma-separated list of options to fine-tune the behavior of the selected CPU Manager policy. If not supplied, keep the default behaviour. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--docker-endpoint string     Default: `unix:///var/run/docker.sock`--cpu-manager-reconcile-period duration     Default: 10s
Use this for the `docker` endpoint to communicate with. This docker-specific flag only works when container-runtime is set to `docker`.<Warning: Alpha feature> CPU Manager reconciliation period. Examples: 10s, or 1m. If not supplied, defaults to node status update frequency. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--docker-endpoint string     Default: unix:///var/run/docker.sock
Use this for the docker endpoint to communicate with. This docker-specific flag only works when container-runtime is set to docker.
--dynamic-config-dir string
The Kubelet will use this directory for checkpointing downloaded configurations and tracking configuration health. The Kubelet will create this directory if it does not already exist. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Providing this flag enables dynamic Kubelet configuration. The `DynamicKubeletConfig` feature gate must be enabled to pass this flag; this gate currently defaults to `true` because the feature is beta.The Kubelet will use this directory for checkpointing downloaded configurations and tracking configuration health. The Kubelet will create this directory if it does not already exist. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Providing this flag enables dynamic Kubelet configuration. The DynamicKubeletConfig feature gate must be enabled to pass this flag. (DEPRECATED: Feature DynamicKubeletConfig is deprecated in 1.22 and will not move to GA. It is planned to be removed from Kubernetes in the version 1.23. Please use alternative ways to update kubelet configuration.)
--enable-controller-attach-detach     Default: `true`--enable-controller-attach-detach     Default: true
Enables the Attach/Detach controller to manage attachment/detachment of volumes scheduled to this node, and disables kubelet from executing any attach/detach operations.Enables the Attach/Detach controller to manage attachment/detachment of volumes scheduled to this node, and disables kubelet from executing any attach/detach operations. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--enable-debugging-handlers     Default: `true`--enable-debugging-handlers     Default: true
Enables server endpoints for log collection and local running of containers and commands. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Enables server endpoints for log collection and local running of containers and commands. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--enable-server     Default: `true`--enable-server     Default: true
Enable the Kubelet's server. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Enable the Kubelet's server. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--enforce-node-allocatable strings     Default: `pods`--enforce-node-allocatable strings     Default: pods
A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. Acceptable options are `none`, `pods`, `system-reserved`, and `kube-reserved`. If the latter two options are specified, `--system-reserved-cgroup` and `--kube-reserved-cgroup` must also be set, respectively. If `none` is specified, no additional options should be set. See https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ for more details. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. Acceptable options are none, pods, system-reserved, and kube-reserved. If the latter two options are specified, --system-reserved-cgroup and --kube-reserved-cgroup must also be set, respectively. If none is specified, no additional options should be set. See https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ for more details. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--event-burst int32     Default: 10
Maximum size of a bursty event records, temporarily allows event records to burst to this number, while still not exceeding `--event-qps`. Only used if `--event-qps` > 0. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Maximum size of a bursty event records, temporarily allows event records to burst to this number, while still not exceeding --event-qps. The number must be >= 0. If 0 will use default burst (10). (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--event-qps int32     Default: 5
If > `0`, limit event creations per second to this value. If `0`, unlimited. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)QPS to limit event creations. The number must be >= 0. If 0 will use default QPS (5). (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--eviction-hard mapStringString     Default: `imagefs.available<15%,memory.available<100Mi,nodefs.available<10%`--eviction-hard mapStringString     Default: imagefs.available<15%,memory.available<100Mi,nodefs.available<10%
A set of eviction thresholds (e.g. `memory.available<1Gi`) that if met would trigger a pod eviction. On a Linux node, the default value also includes `nodefs.inodesFree<5%`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)A set of eviction thresholds (e.g. memory.available<1Gi) that if met would trigger a pod eviction. On a Linux node, the default value also includes nodefs.inodesFree<5%. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--eviction-max-pod-grace-period int32
Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. If negative, defer to pod specified value. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. If negative, defer to pod specified value. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--eviction-minimum-reclaim mapStringString
A set of minimum reclaims (e.g. `imagefs.available=2Gi`) that describes the minimum amount of resource the kubelet will reclaim when performing a pod eviction if that resource is under pressure. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)A set of minimum reclaims (e.g. imagefs.available=2Gi) that describes the minimum amount of resource the kubelet will reclaim when performing a pod eviction if that resource is under pressure. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--eviction-pressure-transition-period duration     Default: `5m0s`--eviction-pressure-transition-period duration     Default: 5m0s
Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--eviction-soft mapStringString
A set of eviction thresholds (e.g. `memory.available>1.5Gi`) that if met over a corresponding grace period would trigger a pod eviction. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)A set of eviction thresholds (e.g. memory.available<1.5Gi) that if met over a corresponding grace period would trigger a pod eviction. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--eviction-soft-grace-period mapStringString
A set of eviction grace periods (e.g. `memory.available=1m30s`) that correspond to how long a soft eviction threshold must hold before triggering a pod eviction. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)A set of eviction grace periods (e.g. memory.available=1m30s) that correspond to how long a soft eviction threshold must hold before triggering a pod eviction. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--experimental-allocatable-ignore-eviction     Default: `false`--experimental-allocatable-ignore-eviction     Default: false
When set to `true`, Hard eviction thresholds will be ignored while calculating node allocatable. See https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ for more details. (DEPRECATED: will be removed in 1.23)When set to true, hard eviction thresholds will be ignored while calculating node allocatable. See https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ for more details. (DEPRECATED: will be removed in 1.23)
--experimental-bootstrap-kubeconfig string
DEPRECATED: Use `--bootstrap-kubeconfig`DEPRECATED: Use --bootstrap-kubeconfig
--experimental-check-node-capabilities-before-mount
[Experimental] if set to `true`, the kubelet will check the underlying node for required components (binaries, etc.) before performing the mount (DEPRECATED: will be removed in 1.23, in favor of using CSI.)[Experimental] if set to true, the kubelet will check the underlying node for required components (binaries, etc.) before performing the mount (DEPRECATED: will be removed in 1.23, in favor of using CSI.)
--experimental-kernel-memcg-notification
If enabled, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling. This flag will be removed in 1.23. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)If enabled, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling. This flag will be removed in 1.23. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--experimental-log-sanitization bool
[Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens). Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) +[Experimental] When enabled, prevents logging of fields tagged as sensitive (passwords, keys, tokens). Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--experimental-mounter-path string     Default: `mount`--experimental-mounter-path string     Default: mount
[Experimental] Path of mounter binary. Leave empty to use the default `mount`. (DEPRECATED: will be removed in 1.23, in favor of using CSI.)[Experimental] Path of mounter binary. Leave empty to use the default mount. (DEPRECATED: will be removed in 1.23, in favor of using CSI.)
--fail-swap-on     Default: `true`--fail-swap-on     Default: true
Makes the Kubelet fail to start if swap is enabled on the node. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Makes the Kubelet fail to start if swap is enabled on the node. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--feature-gates mapStringBool--feature-gates <A list of 'key=true/false' pairs>
A set of `key=value` pairs that describe feature gates for alpha/experimental features. Options are:
+
A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
+APIServerTracing=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
-AllowInsecureBackendProxy=true|false (BETA - default=true)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
-BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
-BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
+CPUManagerPolicyOptions=true|false (ALPHA - default=false)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
-CSIMigrationAWSComplete=true|false (ALPHA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
-CSIMigrationAzureDiskComplete=true|false (ALPHA - default=false)
-CSIMigrationAzureFile=true|false (ALPHA - default=false)
-CSIMigrationAzureFileComplete=true|false (ALPHA - default=false)
+CSIMigrationAzureFile=true|false (BETA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
-CSIMigrationGCEComplete=true|false (ALPHA - default=false)
-CSIMigrationOpenStack=true|false (BETA - default=false)
-CSIMigrationOpenStackComplete=true|false (ALPHA - default=false)
+CSIMigrationOpenStack=true|false (BETA - default=true)
CSIMigrationvSphere=true|false (BETA - default=false)
-CSIMigrationvSphereComplete=true|false (BETA - default=false)
-CSIServiceAccountToken=true|false (ALPHA - default=false)
-CSIStorageCapacity=true|false (ALPHA - default=false)
+CSIStorageCapacity=true|false (BETA - default=true)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
+CSIVolumeHealth=true|false (ALPHA - default=false)
+CSRDuration=true|false (BETA - default=true)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
-CronJobControllerV2=true|false (ALPHA - default=false)
+ControllerManagerLeaderMigration=true|false (BETA - default=true)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
+DaemonSetUpdateSurge=true|false (BETA - default=true)
DefaultPodTopologySpread=true|false (BETA - default=true)
+DelegateFSGroupToCSIDriver=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
-DownwardAPIHugePages=true|false (ALPHA - default=false)
-DynamicKubeletConfig=true|false (BETA - default=true)
-EfficientWatchResumption=true|false (ALPHA - default=false)
-EndpointSlice=true|false (BETA - default=true)
-EndpointSliceNodeName=true|false (ALPHA - default=false)
-EndpointSliceProxying=true|false (BETA - default=true)
-EndpointSliceTerminatingCondition=true|false (ALPHA - default=false)
+DisableCloudProviders=true|false (ALPHA - default=false)
+DownwardAPIHugePages=true|false (BETA - default=false)
+EfficientWatchResumption=true|false (BETA - default=true)
+EndpointSliceTerminatingCondition=true|false (BETA - default=true)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
+ExpandedDNSConfig=true|false (ALPHA - default=false)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
-GenericEphemeralVolume=true|false (ALPHA - default=false)
-GracefulNodeShutdown=true|false (ALPHA - default=false)
+GenericEphemeralVolume=true|false (BETA - default=true)
+GracefulNodeShutdown=true|false (BETA - default=true)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
-HugePageStorageMediumSize=true|false (BETA - default=true)
-IPv6DualStack=true|false (ALPHA - default=false)
-ImmutableEphemeralVolumes=true|false (BETA - default=true)
+IPv6DualStack=true|false (BETA - default=true)
+InTreePluginAWSUnregister=true|false (ALPHA - default=false)
+InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
+InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
+InTreePluginGCEUnregister=true|false (ALPHA - default=false)
+InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
+InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
+IndexedJob=true|false (BETA - default=true)
+IngressClassNamespacedParams=true|false (BETA - default=true)
+JobTrackingWithFinalizers=true|false (ALPHA - default=false)
KubeletCredentialProviders=true|false (ALPHA - default=false)
+KubeletInUserNamespace=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
-LegacyNodeRoleBehavior=true|false (BETA - default=true)
+KubeletPodResourcesGetAllocatable=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
+LogarithmicScaleDown=true|false (BETA - default=true)
+MemoryManager=true|false (BETA - default=true)
+MemoryQoS=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
-NodeDisruptionExclusion=true|false (BETA - default=true)
+NetworkPolicyEndPort=true|false (BETA - default=true)
+NodeSwap=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (BETA - default=true)
-PodDisruptionBudget=true|false (BETA - default=true)
+PodAffinityNamespaceSelector=true|false (BETA - default=true)
+PodDeletionCost=true|false (BETA - default=true)
PodOverhead=true|false (BETA - default=true)
+PodSecurity=true|false (ALPHA - default=false)
+PreferNominatedNode=true|false (BETA - default=true)
+ProbeTerminationGracePeriod=true|false (BETA - default=false)
ProcMountType=true|false (ALPHA - default=false)
+ProxyTerminatingEndpoints=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
+ReadWriteOncePod=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
-RootCAConfigMap=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
-RunAsGroup=true|false (BETA - default=true)
-ServerSideApply=true|false (BETA - default=true)
-ServiceAccountIssuerDiscovery=true|false (BETA - default=true)
-ServiceLBNodePortControl=true|false (ALPHA - default=false)
-ServiceNodeExclusion=true|false (BETA - default=true)
-ServiceTopology=true|false (ALPHA - default=false)
-SetHostnameAsFQDN=true|false (BETA - default=true)
-SizeMemoryBackedVolumes=true|false (ALPHA - default=false)
+SeccompDefault=true|false (ALPHA - default=false)
+ServiceInternalTrafficPolicy=true|false (BETA - default=true)
+ServiceLBNodePortControl=true|false (BETA - default=true)
+ServiceLoadBalancerClass=true|false (BETA - default=true)
+SizeMemoryBackedVolumes=true|false (BETA - default=true)
+StatefulSetMinReadySeconds=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
-Sysctls=true|false (BETA - default=true)
-TTLAfterFinished=true|false (ALPHA - default=false)
+SuspendJob=true|false (BETA - default=true)
+TTLAfterFinished=true|false (BETA - default=true)
+TopologyAwareHints=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
-ValidateProxyRedirects=true|false (BETA - default=true)
-WarningHeaders=true|false (BETA - default=true)
+VolumeCapacityPriority=true|false (ALPHA - default=false)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
-WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
-(DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--file-check-frequency duration     Default: `20s`--file-check-frequency duration     Default: 20s
Duration between checking config files for new data. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Duration between checking config files for new data. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--hairpin-mode string     Default: `promiscuous-bridge`--hairpin-mode string     Default: promiscuous-bridge
How should the kubelet setup hairpin NAT. This allows endpoints of a Service to load balance back to themselves if they should try to access their own Service. Valid values are `promiscuous-bridge`, `hairpin-veth` and `none`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)How should the kubelet setup hairpin NAT. This allows endpoints of a Service to load balance back to themselves if they should try to access their own Service. Valid values are promiscuous-bridge, hairpin-veth and none. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--healthz-bind-address ip     Default: `127.0.0.1`--healthz-bind-address string     Default: 127.0.0.1
The IP address for the healthz server to serve on (set to `0.0.0.0` for all IPv4 interfaces and `::` for all IPv6 interfaces). (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)The IP address for the healthz server to serve on (set to 0.0.0.0 or :: for listening in all interfaces and IP families). (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--healthz-port int32     Default: 10248
The port of the localhost healthz endpoint (set to `0` to disable). (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)The port of the localhost healthz endpoint (set to 0 to disable). (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--hostname-override string
If non-empty, will use this string as identification instead of the actual hostname. If `--cloud-provider` is set, the cloud provider determines the name of the node (consult cloud provider documentation to determine if and how the hostname is used).If non-empty, will use this string as identification instead of the actual hostname. If --cloud-provider is set, the cloud provider determines the name of the node (consult cloud provider documentation to determine if and how the hostname is used).
--housekeeping-interval duration     Default: `10s`--http-check-frequency duration     Default: 20s
Interval between container housekeepings.
--http-check-frequency duration     Default: `20s`
Duration between checking HTTP for new data. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Duration between checking HTTP for new data. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--image-gc-high-threshold int32     Default: 85
The percent of disk usage after which image garbage collection is always run. Values must be within the range [0, 100], To disable image garbage collection, set to 100. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)The percent of disk usage after which image garbage collection is always run. Values must be within the range [0, 100], To disable image garbage collection, set to 100. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--image-gc-low-threshold int32     Default: 80
The percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. Values must be within the range [0, 100] and should not be larger than that of `--image-gc-high-threshold`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)The percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. Values must be within the range [0, 100] and should not be larger than that of --image-gc-high-threshold. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--image-pull-progress-deadline duration     Default: `1m0s`--image-pull-progress-deadline duration     Default: 1m0s
If no pulling progress is made before this deadline, the image pulling will be cancelled. This docker-specific flag only works when container-runtime is set to `docker`.If no pulling progress is made before this deadline, the image pulling will be cancelled. This docker-specific flag only works when container-runtime is set to docker. (DEPRECATED: will be removed along with dockershim.)
--image-service-endpoint string
[Experimental] The endpoint of remote image service. If not specified, it will be the same with `--container-runtime-endpoint` by default. Currently UNIX socket endpoint is supported on Linux, while npipe and TCP endpoints are supported on Windows. Examples: `unix:///var/run/dockershim.sock`, `npipe:////./pipe/dockershim`[Experimental] The endpoint of remote image service. If not specified, it will be the same with --container-runtime-endpoint by default. Currently UNIX socket endpoint is supported on Linux, while npipe and TCP endpoints are supported on Windows. Examples: unix:///var/run/dockershim.sock, npipe:////./pipe/dockershim
--iptables-drop-bit int32     Default: 15
The bit of the `fwmark` space to mark packets for dropping. Must be within the range [0, 31]. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)The bit of the fwmark space to mark packets for dropping. Must be within the range [0, 31]. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--iptables-masquerade-bit int32     Default: 14
The bit of the `fwmark` space to mark packets for SNAT. Must be within the range [0, 31]. Please match this parameter with corresponding parameter in `kube-proxy`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)The bit of the fwmark space to mark packets for SNAT. Must be within the range [0, 31]. Please match this parameter with corresponding parameter in kube-proxy. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--kernel-memcg-notification
If enabled, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)If enabled, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--kube-api-burst int32     Default: 10
Burst to use while talking with kubernetes API server. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Burst to use while talking with kubernetes API server. The number must be >= 0. If 0 will use default burst (10). (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--kube-api-content-type string     Default: `application/vnd.kubernetes.protobuf`--kube-api-content-type string     Default: application/vnd.kubernetes.protobuf
Content type of requests sent to apiserver. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Content type of requests sent to apiserver. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--kube-api-qps int32     Default: 5
QPS to use while talking with kubernetes API server. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)QPS to use while talking with kubernetes API server. The number must be >= 0. If 0 will use default QPS (5). (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--kube-reserved mapStringString     Default: <None>
A set of `=` (e.g. `cpu=200m,memory=500Mi,ephemeral-storage=1Gi,pid='100'`) pairs that describe resources reserved for kubernetes system components. Currently `cpu`, `memory` and local `ephemeral-storage` for root file system are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)A set of <resource name>=<resource quantity> (e.g. cpu=200m,memory=500Mi,ephemeral-storage=1Gi,pid='100') pairs that describe resources reserved for kubernetes system components. Currently cpu, memory and local ephemeral-storage for root file system are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--kube-reserved-cgroup string     Default: `''`--kube-reserved-cgroup string     Default: ''
Absolute name of the top level cgroup that is used to manage kubernetes components for which compute resources were reserved via `--kube-reserved` flag. Ex. `/kube-reserved`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Absolute name of the top level cgroup that is used to manage kubernetes components for which compute resources were reserved via --kube-reserved flag. Ex. /kube-reserved. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--kubeconfig string
Path to a kubeconfig file, specifying how to connect to the API server. Providing `--kubeconfig` enables API server mode, omitting `--kubeconfig` enables standalone mode. Path to a kubeconfig file, specifying how to connect to the API server. Providing --kubeconfig enables API server mode, omitting --kubeconfig enables standalone mode.
--kubelet-cgroups string
Optional absolute name of cgroups to create and run the Kubelet in. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Optional absolute name of cgroups to create and run the Kubelet in. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--log-backtrace-at traceLocation     Default: `:0`--log-backtrace-at <A string of format 'file:line'>     Default: ":0"
When logging hits line `:`, emit a stack trace.When logging hits line :, emit a stack trace.
--log-flush-frequency duration     Default: `5s`--log-flush-frequency duration     Default: 5s
Maximum number of seconds between log flushes.
--logging-format string     Default: `text`--logging-format string     Default: text
Sets the log format. Permitted formats: `text`, `json`.\nNon-default formats don't honor these flags: `--add-dir-header`, `--alsologtostderr`, `--log-backtrace-at`, `--log-dir`, `--log-file`, `--log-file-max-size`, `--logtostderr`, `--skip_headers`, `--skip_log_headers`, `--stderrthreshold`, `--log-flush-frequency`.\nNon-default choices are currently alpha and subject to change without warning. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Sets the log format. Permitted formats: text, json.
Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --skip_headers, --skip_log_headers, --stderrthreshold, --log-flush-frequency.
Non-default choices are currently alpha and subject to change without warning. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--logtostderr     Default: `true`--logtostderr     Default: true
log to standard error instead of files.
--make-iptables-util-chains     Default: `true`--make-iptables-util-chains     Default: true
If true, kubelet will ensure `iptables` utility rules are present on host. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)If true, kubelet will ensure iptables utility rules are present on host. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--manifest-url string
URL for accessing additional Pod specifications to run (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)URL for accessing additional Pod specifications to run (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--manifest-url-header string
Comma-separated list of HTTP headers to use when accessing the URL provided to `--manifest-url`. Multiple headers with the same name will be added in the same order provided. This flag can be repeatedly invoked. For example: `--manifest-url-header 'a:hello,b:again,c:world' --manifest-url-header 'b:beautiful'` (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Comma-separated list of HTTP headers to use when accessing the URL provided to --manifest-url. Multiple headers with the same name will be added in the same order provided. This flag can be repeatedly invoked. For example: --manifest-url-header 'a:hello,b:again,c:world' --manifest-url-header 'b:beautiful' (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--master-service-namespace string     Default: `default`--master-service-namespace string     Default: default
The namespace from which the kubernetes master services should be injected into pods. (DEPRECATED: This flag will be removed in a future version.) --max-open-files int     Default: 1000000
Number of files that can be opened by Kubelet process. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Number of files that can be opened by Kubelet process. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--max-pods int32     Default: 110
Number of Pods that can run on this Kubelet. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Number of Pods that can run on this Kubelet. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--maximum-dead-containers int32     Default: -1
Maximum number of old instances of containers to retain globally. Each container takes up some disk space. To disable, set to a negative number. (DEPRECATED: Use `--eviction-hard` or `--eviction-soft` instead. Will be removed in a future version.)Maximum number of old instances of containers to retain globally. Each container takes up some disk space. To disable, set to a negative number. (DEPRECATED: Use --eviction-hard or --eviction-soft instead. Will be removed in a future version.)
--maximum-dead-containers-per-container int32     Default: 1
Maximum number of old instances to retain per container. Each container takes up some disk space. (DEPRECATED: Use `--eviction-hard` or `--eviction-soft` instead. Will be removed in a future version.)Maximum number of old instances to retain per container. Each container takes up some disk space. (DEPRECATED: Use --eviction-hard or --eviction-soft instead. Will be removed in a future version.)
--memory-manager-policy string     Default: None
Memory Manager policy to use. Possible values: 'None', 'Static'. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--minimum-container-ttl-duration duration
Minimum age for a finished container before it is garbage collected. Examples: `300ms`, `10s` or `2h45m` (DEPRECATED: Use `--eviction-hard` or `--eviction-soft` instead. Will be removed in a future version.)Minimum age for a finished container before it is garbage collected. Examples: '300ms', '10s' or '2h45m' (DEPRECATED: Use --eviction-hard or --eviction-soft instead. Will be removed in a future version.)
--minimum-image-ttl-duration duration     Default: `2m0s`--minimum-image-ttl-duration duration     Default: 2m0s
Minimum age for an unused image before it is garbage collected. Examples: `300ms`, `10s` or `2h45m`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Minimum age for an unused image before it is garbage collected. Examples: '300ms', '10s' or '2h45m'. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--network-plugin string
<Warning: Alpha feature> The name of the network plugin to be invoked for various events in kubelet/pod lifecycle. This docker-specific flag only works when container-runtime is set to `docker`.The name of the network plugin to be invoked for various events in kubelet/pod lifecycle. This docker-specific flag only works when container-runtime is set to docker. (DEPRECATED: will be removed along with dockershim.)
--network-plugin-mtu int32
<Warning: Alpha feature> The MTU to be passed to the network plugin, to override the default. Set to `0` to use the default 1460 MTU. This docker-specific flag only works when container-runtime is set to `docker`.The MTU to be passed to the network plugin, to override the default. Set to 0 to use the default 1460 MTU. This docker-specific flag only works when container-runtime is set to docker. (DEPRECATED: will be removed along with dockershim.)
--node-ip string
IP address of the node. If set, kubelet will use this IP address for the nodeIP address (or comma-separated dual-stack IP addresses) of the node. If unset, kubelet will use the node's default IPv4 address, if any, or its default IPv6 address if it has no IPv4 addresses. You can pass '::' to make it prefer the default IPv6 address rather than the default IPv4 address.
--node-labels mapStringString
<Warning: Alpha feature>Labels to add when registering the node in the cluster. Labels must be `key=value pairs` separated by `,`. Labels in the `kubernetes.io` namespace must begin with an allowed prefix (`kubelet.kubernetes.io`, `node.kubernetes.io`) or be in the specifically allowed set (`beta.kubernetes.io/arch`, `beta.kubernetes.io/instance-type`, `beta.kubernetes.io/os`, `failure-domain.beta.kubernetes.io/region`, `failure-domain.beta.kubernetes.io/zone`, `kubernetes.io/arch`, `kubernetes.io/hostname`, `kubernetes.io/os`, `node.kubernetes.io/instance-type`, `topology.kubernetes.io/region`, `topology.kubernetes.io/zone`)<Warning: Alpha feature>Labels to add when registering the node in the cluster. Labels must be key=value pairs separated by ','. Labels in the 'kubernetes.io' namespace must begin with an allowed prefix ('kubelet.kubernetes.io', 'node.kubernetes.io') or be in the specifically allowed set ('beta.kubernetes.io/arch', 'beta.kubernetes.io/instance-type', 'beta.kubernetes.io/os', 'failure-domain.beta.kubernetes.io/region', 'failure-domain.beta.kubernetes.io/zone', 'kubernetes.io/arch', 'kubernetes.io/hostname', 'kubernetes.io/os', 'node.kubernetes.io/instance-type', 'topology.kubernetes.io/region', 'topology.kubernetes.io/zone')
--node-status-max-images int32     Default: 50
The maximum number of images to report in `node.status.images`. If `-1` is specified, no cap will be applied. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)The maximum number of images to report in node.status.images. If -1 is specified, no cap will be applied. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--node-status-update-frequency duration     Default: `10s`--node-status-update-frequency duration     Default: 10s
Specifies how often kubelet posts node status to master. Note: be cautious when changing the constant, it must work with nodeMonitorGracePeriod in Node controller. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Specifies how often kubelet posts node status to master. Note: be cautious when changing the constant, it must work with nodeMonitorGracePeriod in Node controller. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--non-masquerade-cidr string     Default: `10.0.0.0/8`--non-masquerade-cidr string     Default: 10.0.0.0/8
Traffic to IPs outside this range will use IP masquerade. Set to `0.0.0.0/0` to never masquerade. (DEPRECATED: will be removed in a future version)Traffic to IPs outside this range will use IP masquerade. Set to '0.0.0.0/0' to never masquerade. (DEPRECATED: will be removed in a future version)
--one-output
If true, only write logs to their native severity level (vs also writing to each lower severity level. -If true, only write logs to their native severity level (vs also writing to each lower severity level).
--oom-score-adj int32     Default: -999
The oom-score-adj value for kubelet process. Values must be within the range [-1000, 1000]. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)The oom-score-adj value for kubelet process. Values must be within the range [-1000, 1000]. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--pod-cidr string
The CIDR to use for pod IP addresses, only used in standalone mode. In cluster mode, this is obtained from the master. For IPv6, the maximum number of IP's allocated is 65536 (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)The CIDR to use for pod IP addresses, only used in standalone mode. In cluster mode, this is obtained from the master. For IPv6, the maximum number of IP's allocated is 65536 (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--pod-infra-container-image string     Default: `k8s.gcr.io/pause:3.2`--pod-infra-container-image string     Default: k8s.gcr.io/pause:3.5
Specified image will not be pruned by the image garbage collector. When container-runtime is set to `docker`, all containers in each pod will use the network/ipc namespaces from this image. Other CRI implementations have their own configuration to set this image.Specified image will not be pruned by the image garbage collector. When container-runtime is set to docker, all containers in each pod will use the network/IPC namespaces from this image. Other CRI implementations have their own configuration to set this image.
--pod-manifest-path string
Path to the directory containing static pod files to run, or the path to a single static pod file. Files starting with dots will be ignored. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Path to the directory containing static pod files to run, or the path to a single static pod file. Files starting with dots will be ignored. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--pod-max-pids int     Default: -1
Set the maximum number of processes per pod. If `-1`, the kubelet defaults to the node allocatable PID capacity. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Set the maximum number of processes per pod. If -1, the kubelet defaults to the node allocatable PID capacity. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--pods-per-core int32
Number of Pods per core that can run on this Kubelet. The total number of Pods on this Kubelet cannot exceed `--max-pods`, so `--max-pods` will be used if this calculation results in a larger number of Pods allowed on the Kubelet. A value of `0` disables this limit. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Number of Pods per core that can run on this kubelet. The total number of pods on this kubelet cannot exceed --max-pods, so --max-pods will be used if this calculation results in a larger number of pods allowed on the kubelet. A value of 0 disables this limit. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--port int32     Default: 10250
The port for the Kubelet to serve on. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)The port for the kubelet to serve on. (DEPRECATED: This parameter should be set via the config file specified by the kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--protect-kernel-defaults
Default kubelet behaviour for kernel tuning. If set, kubelet errors if any of kernel tunables is different than kubelet defaults. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) Default kubelet behaviour for kernel tuning. If set, kubelet errors if any of kernel tunables is different than kubelet defaults. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--provider-id string
Unique identifier for identifying the node in a machine database, i.e cloud provider. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Unique identifier for identifying the node in a machine database, i.e cloud provider. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--qos-reserved mapStringString
<Warning: Alpha feature> A set of `=` (e.g. `memory=50%`) pairs that describe how pod resource requests are reserved at the QoS level. Currently only memory is supported. Requires the `QOSReserved` feature gate to be enabled. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)<Warning: Alpha feature> A set of <resource name>=<percentage> (e.g. memory=50%) pairs that describe how pod resource requests are reserved at the QoS level. Currently only memory is supported. Requires the QOSReserved feature gate to be enabled. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--read-only-port int32     Default: 10255
The read-only port for the Kubelet to serve on with no authentication/authorization (set to `0` to disable). (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)The read-only port for the kubelet to serve on with no authentication/authorization (set to 0 to disable). (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--redirect-container-streaming--register-node     Default: true
Enables container streaming redirect. If false, kubelet will proxy container streaming data between the API server and container runtime; if `true`, kubelet will return an HTTP redirect to the API server, and the API server will access container runtime directly. The proxy approach is more secure, but introduces some overhead. The redirect approach is more performant, but less secure because the connection between apiserver and container runtime may not be authenticated. (DEPRECATED: Container streaming redirection will be removed from the kubelet in v1.20, and this flag will be removed in v1.22. For more details, see http://git.k8s.io/enhancements/keps/sig-node/20191205-container-streaming-requests.md)Register the node with the API server. If --kubeconfig is not provided, this flag is irrelevant, as the Kubelet won't have an API server to register with.
--register-node     Default: `true`--register-schedulable     Default: true
Register the node with the API server. If `--kubeconfig` is not provided, this flag is irrelevant, as the Kubelet won't have an API server to register with. Default to `true`.
--register-schedulable     Default: `true`
Register the node as schedulable. Won't have any effect if `--register-node` is false. (DEPRECATED: will be removed in a future version)Register the node as schedulable. Won't have any effect if --register-node is false. (DEPRECATED: will be removed in a future version)
--register-with-taints mapStringString
Register the node with the given list of taints (comma separated `=:`). No-op if `--register-node` is `false`.Register the node with the given list of taints (comma separated <key>=<value>:<effect>). No-op if --register-node is false.
--registry-burst int32     Default: 10
Maximum size of a bursty pulls, temporarily allows pulls to burst to this number, while still not exceeding `--registry-qps`. Only used if `--registry-qps > 0`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Maximum size of a bursty pulls, temporarily allows pulls to burst to this number, while still not exceeding --registry-qps. Only used if --registry-qps is greater than 0. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--registry-qps int32     Default: 5
If > 0, limit registry pull QPS to this value. If `0`, unlimited. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)If > 0, limit registry pull QPS to this value. If 0, unlimited. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--reserved-cpus string
A comma-separated list of CPUs or CPU ranges that are reserved for system and kubernetes usage. This specific list will supersede cpu counts in `--system-reserved` and `--kube-reserved`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)A comma-separated list of CPUs or CPU ranges that are reserved for system and kubernetes usage. This specific list will supersede cpu counts in --system-reserved and --kube-reserved. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--resolv-conf string     Default: `/etc/resolv.conf`--reserved-memory string
Resolver configuration file used as the basis for the container DNS resolution configuration. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)A comma-separated list of memory reservations for NUMA nodes. (e.g. --reserved-memory 0:memory=1Gi,hugepages-1M=2Gi --reserved-memory 1:memory=2Gi). The total sum for each memory type should be equal to the sum of --kube-reserved, --system-reserved and --eviction-threshold. See https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/#reserved-memory-flag for more details. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--root-dir string     Default: `/var/lib/kubelet`--resolv-conf string     Default: /etc/resolv.conf
Resolver configuration file used as the basis for the container DNS resolution configuration. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--root-dir string     Default: /var/lib/kubelet
Directory path for managing kubelet files (volume mounts, etc). --rotate-certificates
<Warning: Beta feature> Auto rotate the kubelet client certificates by requesting new certificates from the `kube-apiserver` when the certificate expiration approaches. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)<Warning: Beta feature> Auto rotate the kubelet client certificates by requesting new certificates from the kube-apiserver when the certificate expiration approaches. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--rotate-server-certificates
Auto-request and rotate the kubelet serving certificates by requesting new certificates from the `kube-apiserver` when the certificate expiration approaches. Requires the `RotateKubeletServerCertificate` feature gate to be enabled, and approval of the submitted `CertificateSigningRequest` objects. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Auto-request and rotate the kubelet serving certificates by requesting new certificates from the kube-apiserver when the certificate expiration approaches. Requires the RotateKubeletServerCertificate feature gate to be enabled, and approval of the submitted CertificateSigningRequest objects. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--runonce
If `true`, exit after spawning pods from local manifests or remote urls. Exclusive with `--enable-server` (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)If true, exit after spawning pods from local manifests or remote urls. Exclusive with --enable-server (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--runtime-request-timeout duration     Default: `2m0s`--runtime-request-timeout duration     Default: 2m0s
Timeout of all runtime requests except long running request - `pull`, `logs`, `exec` and `attach`. When timeout exceeded, kubelet will cancel the request, throw out an error and retry later. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Timeout of all runtime requests except long running request - pull, logs, exec and attach. When timeout exceeded, kubelet will cancel the request, throw out an error and retry later. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--seccomp-profile-root string     Default: `/var/lib/kubelet/seccomp`--seccomp-default RuntimeDefault
<Warning: Alpha feature> Directory path for seccomp profiles. (DEPRECATED: will be removed in 1.23, in favor of using the `/seccomp` directory) +<Warning: Alpha feature> Enable the use of RuntimeDefault as the default seccomp profile for all workloads. The SeccompDefault feature gate must be enabled to allow this flag, which is disabled by default.
--seccomp-profile-root string     Default: /var/lib/kubelet/seccomp
<Warning: Alpha feature> Directory path for seccomp profiles. (DEPRECATED: will be removed in 1.23, in favor of using the /seccomp directory)
--serialize-image-pulls     Default: `true`--serialize-image-pulls     Default: true
Pull images one at a time. We recommend *not* changing the default value on nodes that run docker daemon with version < 1.9 or an `aufs` storage backend. Issue #10959 has more details. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Pull images one at a time. We recommend *not* changing the default value on nodes that run docker daemon with version < 1.9 or an aufs storage backend. Issue #10959 has more details. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--skip-headers
If `true`, avoid header prefixes in the log messagesIf true, avoid header prefixes in the log messages
--skip-log-headers
If `true`, avoid headers when opening log filesIf true, avoid headers when opening log files
--streaming-connection-idle-timeout duration     Default: `4h0m0s`--streaming-connection-idle-timeout duration     Default: 4h0m0s
Maximum time a streaming connection can be idle before the connection is automatically closed. `0` indicates no timeout. Example: `5m`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Maximum time a streaming connection can be idle before the connection is automatically closed. 0 indicates no timeout. Example: 5m. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--sync-frequency duration     Default: `1m0s`--sync-frequency duration     Default: 1m0s
Max period between synchronizing running containers and config. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Max period between synchronizing running containers and config. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--system-cgroups string
Optional absolute name of cgroups in which to place all non-kernel processes that are not already inside a cgroup under `/`. Empty for no container. Rolling back the flag requires a reboot. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Optional absolute name of cgroups in which to place all non-kernel processes that are not already inside a cgroup under '/'. Empty for no container. Rolling back the flag requires a reboot. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--system-reserved mapStringString     Default: \--system-reserved mapStringString     Default: <none>
A set of `=` (e.g. `cpu=200m,memory=500Mi,ephemeral-storage=1Gi,pid='100'`) pairs that describe resources reserved for non-kubernetes components. Currently only `cpu` and `memory` are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)A set of <resource name>=<resource quantity> (e.g. cpu=200m,memory=500Mi,ephemeral-storage=1Gi,pid='100') pairs that describe resources reserved for non-kubernetes components. Currently only cpu and memory are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--system-reserved-cgroup string     Default: `''`--system-reserved-cgroup string     Default: ''
Absolute name of the top level cgroup that is used to manage non-kubernetes components for which compute resources were reserved via `--system-reserved` flag. Ex. `/system-reserved`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Absolute name of the top level cgroup that is used to manage non-kubernetes components for which compute resources were reserved via --system-reserved flag. Ex. /system-reserved. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--tls-cert-file string
File containing x509 Certificate used for serving HTTPS (with intermediate certs, if any, concatenated after server cert). If `--tls-cert-file` and `--tls-private-key-file` are not provided, a self-signed certificate and key are generated for the public address and saved to the directory passed to `--cert-dir`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)File containing x509 Certificate used for serving HTTPS (with intermediate certs, if any, concatenated after server cert). If --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory passed to --cert-dir. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--tls-cipher-suites stringSlice--tls-cipher-suites strings
Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.
-Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) +Insecure values: +TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA. +(DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--tls-min-version string
Minimum TLS version supported. Possible values: `VersionTLS10`, `VersionTLS11`, `VersionTLS12`, `VersionTLS13` (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--tls-private-key-file string
File containing x509 private key matching `--tls-cert-file`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)File containing x509 private key matching --tls-cert-file. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--topology-manager-policy string     Default: `none`--topology-manager-policy string     Default: 'none'
Topology Manager policy to use. Possible values: `none`, `best-effort`, `restricted`, `single-numa-node`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Topology Manager policy to use. Possible values: 'none', 'best-effort', 'restricted', 'single-numa-node'. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--topology-manager-scope string     Default: `container`--topology-manager-scope string     Default: container
Scope to which topology hints applied. Topology Manager collects hints from Hint Providers and applies them to defined scope to ensure the pod admission. Possible values: 'container' (default), 'pod'. (default "container") (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Scope to which topology hints applied. Topology Manager collects hints from Hint Providers and applies them to defined scope to ensure the pod admission. Possible values: 'container', 'pod'. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--vmodule moduleSpec--vmodule <A list of 'pattern=N' string>
Comma-separated list of `pattern=N` settings for file-filtered loggingComma-separated list of pattern=N settings for file-filtered logging
--volume-plugin-dir string     Default: `/usr/libexec/kubernetes/kubelet-plugins/volume/exec/`--volume-plugin-dir string     Default: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/
The full path of the directory in which to search for additional third party volume plugins. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)The full path of the directory in which to search for additional third party volume plugins. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
--volume-stats-agg-period duration     Default: `1m0s`--volume-stats-agg-period duration     Default: 1m0s
Specifies interval for kubelet to calculate and cache the volume disk usage for all pods and volumes. To disable volume calculations, set to `0`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)Specifies interval for kubelet to calculate and cache the volume disk usage for all pods and volumes. To disable volume calculations, set to 0. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)
- diff --git a/content/en/docs/reference/config-api/apiserver-audit.v1.md b/content/en/docs/reference/config-api/apiserver-audit.v1.md index f0f36c2344..11df06bd8c 100644 --- a/content/en/docs/reference/config-api/apiserver-audit.v1.md +++ b/content/en/docs/reference/config-api/apiserver-audit.v1.md @@ -81,7 +81,7 @@ For non-resource requests, this is the lower-cased HTTP method. user [Required]
-authentication/v1.UserInfo +authentication/v1.UserInfo Authenticated user information. @@ -89,7 +89,7 @@ For non-resource requests, this is the lower-cased HTTP method. impersonatedUser
-authentication/v1.UserInfo +authentication/v1.UserInfo Impersonated user information. @@ -123,7 +123,7 @@ Does not apply for List-type requests, or non-resource requests. responseStatus
-meta/v1.Status +meta/v1.Status The response status, populated even when the ResponseObject is not a Status type. @@ -154,7 +154,7 @@ at Response Level. requestReceivedTimestamp
-meta/v1.MicroTime +meta/v1.MicroTime Time the request reached the apiserver. @@ -162,7 +162,7 @@ at Response Level. stageTimestamp
-meta/v1.MicroTime +meta/v1.MicroTime Time the request reached current audit stage. @@ -206,7 +206,7 @@ EventList is a list of audit Events. metadata
-meta/v1.ListMeta +meta/v1.ListMeta No description provided. @@ -252,7 +252,7 @@ categories are logged. metadata
-meta/v1.ObjectMeta +meta/v1.ObjectMeta ObjectMeta is included for interoperability with API infrastructure.Refer to the Kubernetes API documentation for the fields of the metadata field. @@ -303,7 +303,7 @@ PolicyList is a list of audit Policies. metadata
-meta/v1.ListMeta +meta/v1.ListMeta No description provided. diff --git a/content/en/docs/reference/config-api/apiserver-config.v1alpha1.md b/content/en/docs/reference/config-api/apiserver-config.v1alpha1.md new file mode 100644 index 0000000000..81702355a5 --- /dev/null +++ b/content/en/docs/reference/config-api/apiserver-config.v1alpha1.md @@ -0,0 +1,438 @@ +--- +title: kube-apiserver Configuration (v1alpha1) +content_type: tool-reference +package: apiserver.k8s.io/v1alpha1 +auto_generated: true +--- +Package v1alpha1 is the v1alpha1 version of the API. + +## Resource Types + + +- [AdmissionConfiguration](#apiserver-k8s-io-v1alpha1-AdmissionConfiguration) +- [EgressSelectorConfiguration](#apiserver-k8s-io-v1alpha1-EgressSelectorConfiguration) +- [TracingConfiguration](#apiserver-k8s-io-v1alpha1-TracingConfiguration) + + + + +## `AdmissionConfiguration` {#apiserver-k8s-io-v1alpha1-AdmissionConfiguration} + + + + + +AdmissionConfiguration provides versioned configuration for admission controllers. + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
apiserver.k8s.io/v1alpha1
kind
string
AdmissionConfiguration
plugins
+[]AdmissionPluginConfiguration +
+ Plugins allows specifying a configuration per admission control plugin.
+ + + +## `EgressSelectorConfiguration` {#apiserver-k8s-io-v1alpha1-EgressSelectorConfiguration} + + + + + +EgressSelectorConfiguration provides versioned configuration for egress selector clients. + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
apiserver.k8s.io/v1alpha1
kind
string
EgressSelectorConfiguration
egressSelections [Required]
+[]EgressSelection +
+ connectionServices contains a list of egress selection client configurations
+ + + +## `TracingConfiguration` {#apiserver-k8s-io-v1alpha1-TracingConfiguration} + + + + + +TracingConfiguration provides versioned configuration for tracing clients. + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
apiserver.k8s.io/v1alpha1
kind
string
TracingConfiguration
endpoint
+string +
+ Endpoint of the collector that's running on the control-plane node. +The APIServer uses the egressType ControlPlane when sending data to the collector. +The syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. +Defaults to the otlpgrpc default, localhost:4317 +The connection is insecure, and does not support TLS.
samplingRatePerMillion
+int32 +
+ SamplingRatePerMillion is the number of samples to collect per million spans. +Defaults to 0.
+ + + +## `AdmissionPluginConfiguration` {#apiserver-k8s-io-v1alpha1-AdmissionPluginConfiguration} + + + + +**Appears in:** + +- [AdmissionConfiguration](#apiserver-k8s-io-v1alpha1-AdmissionConfiguration) + + +AdmissionPluginConfiguration provides the configuration for a single plug-in. + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ Name is the name of the admission controller. +It must match the registered admission plugin name.
path
+string +
+ Path is the path to a configuration file that contains the plugin's +configuration
configuration
+k8s.io/apimachinery/pkg/runtime.Unknown +
+ Configuration is an embedded configuration object to be used as the plugin's +configuration. If present, it will be used instead of the path to the configuration file.
+ + + +## `Connection` {#apiserver-k8s-io-v1alpha1-Connection} + + + + +**Appears in:** + +- [EgressSelection](#apiserver-k8s-io-v1alpha1-EgressSelection) + + +Connection provides the configuration for a single egress selection client. + + + + + + + + + + + + + + + + + + +
FieldDescription
proxyProtocol [Required]
+ProtocolType +
+ Protocol is the protocol used to connect from client to the konnectivity server.
transport
+Transport +
+ Transport defines the transport configurations we use to dial to the konnectivity server. +This is required if ProxyProtocol is HTTPConnect or GRPC.
+ + + +## `EgressSelection` {#apiserver-k8s-io-v1alpha1-EgressSelection} + + + + +**Appears in:** + +- [EgressSelectorConfiguration](#apiserver-k8s-io-v1alpha1-EgressSelectorConfiguration) + + +EgressSelection provides the configuration for a single egress selection client. + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ name is the name of the egress selection. +Currently supported values are "controlplane", "master", "etcd" and "cluster" +The "master" egress selector is deprecated in favor of "controlplane"
connection [Required]
+Connection +
+ connection is the exact information used to configure the egress selection
+ + + +## `ProtocolType` {#apiserver-k8s-io-v1alpha1-ProtocolType} + +(Alias of `string`) + + +**Appears in:** + +- [Connection](#apiserver-k8s-io-v1alpha1-Connection) + + +ProtocolType is a set of valid values for Connection.ProtocolType + + + + + +## `TCPTransport` {#apiserver-k8s-io-v1alpha1-TCPTransport} + + + + +**Appears in:** + +- [Transport](#apiserver-k8s-io-v1alpha1-Transport) + + +TCPTransport provides the information to connect to konnectivity server via TCP + + + + + + + + + + + + + + + + + + +
FieldDescription
url [Required]
+string +
+ URL is the location of the konnectivity server to connect to. +As an example it might be "https://127.0.0.1:8131"
tlsConfig
+TLSConfig +
+ TLSConfig is the config needed to use TLS when connecting to konnectivity server
+ + + +## `TLSConfig` {#apiserver-k8s-io-v1alpha1-TLSConfig} + + + + +**Appears in:** + +- [TCPTransport](#apiserver-k8s-io-v1alpha1-TCPTransport) + + +TLSConfig provides the authentication information to connect to konnectivity server +Only used with TCPTransport + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
caBundle
+string +
+ caBundle is the file location of the CA to be used to determine trust with the konnectivity server. +Must be absent/empty if TCPTransport.URL is prefixed with http:// +If absent while TCPTransport.URL is prefixed with https://, default to system trust roots.
clientKey
+string +
+ clientKey is the file location of the client key to be used in mtls handshakes with the konnectivity server. +Must be absent/empty if TCPTransport.URL is prefixed with http:// +Must be configured if TCPTransport.URL is prefixed with https://
clientCert
+string +
+ clientCert is the file location of the client certificate to be used in mtls handshakes with the konnectivity server. +Must be absent/empty if TCPTransport.URL is prefixed with http:// +Must be configured if TCPTransport.URL is prefixed with https://
+ + + +## `Transport` {#apiserver-k8s-io-v1alpha1-Transport} + + + + +**Appears in:** + +- [Connection](#apiserver-k8s-io-v1alpha1-Connection) + + +Transport defines the transport configurations we use to dial to the konnectivity server + + + + + + + + + + + + + + + + + + +
FieldDescription
tcp
+TCPTransport +
+ TCP is the TCP configuration for communicating with the konnectivity server via TCP +ProxyProtocol of GRPC is not supported with TCP transport at the moment +Requires at least one of TCP or UDS to be set
uds
+UDSTransport +
+ UDS is the UDS configuration for communicating with the konnectivity server via UDS +Requires at least one of TCP or UDS to be set
+ + + +## `UDSTransport` {#apiserver-k8s-io-v1alpha1-UDSTransport} + + + + +**Appears in:** + +- [Transport](#apiserver-k8s-io-v1alpha1-Transport) + + +UDSTransport provides the information to connect to konnectivity server via UDS + + + + + + + + + + + + + +
FieldDescription
udsName [Required]
+string +
+ UDSName is the name of the unix domain socket to connect to konnectivity server +This does not use a unix:// prefix. (Eg: /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket)
+ + diff --git a/content/en/docs/reference/config-api/client-authentication.v1beta1.md b/content/en/docs/reference/config-api/client-authentication.v1beta1.md index e78edd23f6..d018fb208f 100644 --- a/content/en/docs/reference/config-api/client-authentication.v1beta1.md +++ b/content/en/docs/reference/config-api/client-authentication.v1beta1.md @@ -187,6 +187,14 @@ ExecConfig.ProvideClusterInfo). +interactive [Required]
+bool + + + Interactive declares whether stdin has been passed to this exec plugin. + + + @@ -215,7 +223,7 @@ itself should at least be protected via file permissions. expirationTimestamp
-meta/v1.Time +meta/v1.Time ExpirationTimestamp indicates a time when the provided credentials expire. diff --git a/content/en/docs/reference/config-api/kube-proxy-config.v1alpha1.md b/content/en/docs/reference/config-api/kube-proxy-config.v1alpha1.md index 86315856b2..94209488fe 100644 --- a/content/en/docs/reference/config-api/kube-proxy-config.v1alpha1.md +++ b/content/en/docs/reference/config-api/kube-proxy-config.v1alpha1.md @@ -546,6 +546,10 @@ this always falls back to the userspace proxy. - [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) +- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta2-KubeSchedulerConfiguration) + +- [GenericControllerManagerConfiguration](#controllermanager-config-k8s-io-v1alpha1-GenericControllerManagerConfiguration) + ClientConnectionConfiguration contains details for constructing a client. @@ -597,5 +601,180 @@ client. + + + +## `DebuggingConfiguration` {#DebuggingConfiguration} + + + + +**Appears in:** + +- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta2-KubeSchedulerConfiguration) + +- [GenericControllerManagerConfiguration](#controllermanager-config-k8s-io-v1alpha1-GenericControllerManagerConfiguration) + + +DebuggingConfiguration holds configuration for Debugging related features. + + + + + + + + + + + + + + + + + + +
FieldDescription
enableProfiling [Required]
+bool +
+ enableProfiling enables profiling via web interface host:port/debug/pprof/
enableContentionProfiling [Required]
+bool +
+ enableContentionProfiling enables lock contention profiling, if +enableProfiling is true.
+ +## `LeaderElectionConfiguration` {#LeaderElectionConfiguration} + + + + +**Appears in:** + +- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta2-KubeSchedulerConfiguration) + +- [GenericControllerManagerConfiguration](#controllermanager-config-k8s-io-v1alpha1-GenericControllerManagerConfiguration) + + +LeaderElectionConfiguration defines the configuration of leader election +clients for components that can run with leader election enabled. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
leaderElect [Required]
+bool +
+ leaderElect enables a leader election client to gain leadership +before executing the main loop. Enable this when running replicated +components for high availability.
leaseDuration [Required]
+meta/v1.Duration +
+ leaseDuration is the duration that non-leader candidates will wait +after observing a leadership renewal until attempting to acquire +leadership of a led but unrenewed leader slot. This is effectively the +maximum duration that a leader can be stopped before it is replaced +by another candidate. This is only applicable if leader election is +enabled.
renewDeadline [Required]
+meta/v1.Duration +
+ renewDeadline is the interval between attempts by the acting master to +renew a leadership slot before it stops leading. This must be less +than or equal to the lease duration. This is only applicable if leader +election is enabled.
retryPeriod [Required]
+meta/v1.Duration +
+ retryPeriod is the duration the clients should wait between attempting +acquisition and renewal of a leadership. This is only applicable if +leader election is enabled.
resourceLock [Required]
+string +
+ resourceLock indicates the resource object type that will be used to lock +during leader election cycles.
resourceName [Required]
+string +
+ resourceName indicates the name of resource object that will be used to lock +during leader election cycles.
resourceNamespace [Required]
+string +
+ resourceName indicates the namespace of resource object that will be used to lock +during leader election cycles.
+ +## `LoggingConfiguration` {#LoggingConfiguration} + + + + +**Appears in:** + +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) + + +LoggingConfiguration contains logging options +Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information. + + + + + + + + + + + + + + + + +
FieldDescription
format [Required]
+string +
+ Format Flag specifies the structure of log messages. +default value of format is `text`
sanitization [Required]
+bool +
+ [Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens). +Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production.`)
diff --git a/content/en/docs/reference/config-api/kube-scheduler-config.v1beta2.md b/content/en/docs/reference/config-api/kube-scheduler-config.v1beta2.md new file mode 100644 index 0000000000..1a28c03c88 --- /dev/null +++ b/content/en/docs/reference/config-api/kube-scheduler-config.v1beta2.md @@ -0,0 +1,2108 @@ +--- +title: kube-scheduler Configuration (v1beta2) +content_type: tool-reference +package: kubescheduler.config.k8s.io/v1beta2 +auto_generated: true +--- + + +## Resource Types + + +- [DefaultPreemptionArgs](#kubescheduler-config-k8s-io-v1beta2-DefaultPreemptionArgs) +- [InterPodAffinityArgs](#kubescheduler-config-k8s-io-v1beta2-InterPodAffinityArgs) +- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta2-KubeSchedulerConfiguration) +- [NodeAffinityArgs](#kubescheduler-config-k8s-io-v1beta2-NodeAffinityArgs) +- [NodeResourcesBalancedAllocationArgs](#kubescheduler-config-k8s-io-v1beta2-NodeResourcesBalancedAllocationArgs) +- [NodeResourcesFitArgs](#kubescheduler-config-k8s-io-v1beta2-NodeResourcesFitArgs) +- [PodTopologySpreadArgs](#kubescheduler-config-k8s-io-v1beta2-PodTopologySpreadArgs) +- [VolumeBindingArgs](#kubescheduler-config-k8s-io-v1beta2-VolumeBindingArgs) +- [Policy](#kubescheduler-config-k8s-io-v1-Policy) + + + +## `ClientConnectionConfiguration` {#ClientConnectionConfiguration} + + + + +**Appears in:** + +- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta2-KubeSchedulerConfiguration) + + +ClientConnectionConfiguration contains details for constructing a client. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
kubeconfig [Required]
+string +
+ kubeconfig is the path to a KubeConfig file.
acceptContentTypes [Required]
+string +
+ acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the +default value of 'application/json'. This field will control all connections to the server used by a particular +client.
contentType [Required]
+string +
+ contentType is the content type used when sending data to the server from this client.
qps [Required]
+float32 +
+ qps controls the number of queries per second allowed for this connection.
burst [Required]
+int32 +
+ burst allows extra queries to accumulate when a client is exceeding its rate.
+ +## `DebuggingConfiguration` {#DebuggingConfiguration} + + + + +**Appears in:** + +- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta2-KubeSchedulerConfiguration) + + +DebuggingConfiguration holds configuration for Debugging related features. + + + + + + + + + + + + + + + + + + +
FieldDescription
enableProfiling [Required]
+bool +
+ enableProfiling enables profiling via web interface host:port/debug/pprof/
enableContentionProfiling [Required]
+bool +
+ enableContentionProfiling enables lock contention profiling, if +enableProfiling is true.
+ +## `LeaderElectionConfiguration` {#LeaderElectionConfiguration} + + + + +**Appears in:** + +- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta2-KubeSchedulerConfiguration) + + +LeaderElectionConfiguration defines the configuration of leader election +clients for components that can run with leader election enabled. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
leaderElect [Required]
+bool +
+ leaderElect enables a leader election client to gain leadership +before executing the main loop. Enable this when running replicated +components for high availability.
leaseDuration [Required]
+meta/v1.Duration +
+ leaseDuration is the duration that non-leader candidates will wait +after observing a leadership renewal until attempting to acquire +leadership of a led but unrenewed leader slot. This is effectively the +maximum duration that a leader can be stopped before it is replaced +by another candidate. This is only applicable if leader election is +enabled.
renewDeadline [Required]
+meta/v1.Duration +
+ renewDeadline is the interval between attempts by the acting master to +renew a leadership slot before it stops leading. This must be less +than or equal to the lease duration. This is only applicable if leader +election is enabled.
retryPeriod [Required]
+meta/v1.Duration +
+ retryPeriod is the duration the clients should wait between attempting +acquisition and renewal of a leadership. This is only applicable if +leader election is enabled.
resourceLock [Required]
+string +
+ resourceLock indicates the resource object type that will be used to lock +during leader election cycles.
resourceName [Required]
+string +
+ resourceName indicates the name of resource object that will be used to lock +during leader election cycles.
resourceNamespace [Required]
+string +
+ resourceName indicates the namespace of resource object that will be used to lock +during leader election cycles.
+ +## `LoggingConfiguration` {#LoggingConfiguration} + + + + +**Appears in:** + +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) + + +LoggingConfiguration contains logging options +Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information. + + + + + + + + + + + + + + + + + + +
FieldDescription
format [Required]
+string +
+ Format Flag specifies the structure of log messages. +default value of format is `text`
sanitization [Required]
+bool +
+ [Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens). +Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production.`)
+ + + + +## `DefaultPreemptionArgs` {#kubescheduler-config-k8s-io-v1beta2-DefaultPreemptionArgs} + + + + + +DefaultPreemptionArgs holds arguments used to configure the +DefaultPreemption plugin. + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1beta2
kind
string
DefaultPreemptionArgs
minCandidateNodesPercentage [Required]
+int32 +
+ MinCandidateNodesPercentage is the minimum number of candidates to +shortlist when dry running preemption as a percentage of number of nodes. +Must be in the range [0, 100]. Defaults to 10% of the cluster size if +unspecified.
minCandidateNodesAbsolute [Required]
+int32 +
+ MinCandidateNodesAbsolute is the absolute minimum number of candidates to +shortlist. The likely number of candidates enumerated for dry running +preemption is given by the formula: +numCandidates = max(numNodes ∗ minCandidateNodesPercentage, minCandidateNodesAbsolute) +We say "likely" because there are other factors such as PDB violations +that play a role in the number of candidates shortlisted. Must be at least +0 nodes. Defaults to 100 nodes if unspecified.
+ + + +## `InterPodAffinityArgs` {#kubescheduler-config-k8s-io-v1beta2-InterPodAffinityArgs} + + + + + +InterPodAffinityArgs holds arguments used to configure the InterPodAffinity plugin. + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1beta2
kind
string
InterPodAffinityArgs
hardPodAffinityWeight [Required]
+int32 +
+ HardPodAffinityWeight is the scoring weight for existing pods with a +matching hard affinity to the incoming pod.
+ + + +## `KubeSchedulerConfiguration` {#kubescheduler-config-k8s-io-v1beta2-KubeSchedulerConfiguration} + + + + + +KubeSchedulerConfiguration configures a scheduler + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1beta2
kind
string
KubeSchedulerConfiguration
parallelism [Required]
+int32 +
+ Parallelism defines the amount of parallelism in algorithms for scheduling a Pods. Must be greater than 0. Defaults to 16
leaderElection [Required]
+LeaderElectionConfiguration +
+ LeaderElection defines the configuration of leader election client.
clientConnection [Required]
+ClientConnectionConfiguration +
+ ClientConnection specifies the kubeconfig file and client connection +settings for the proxy server to use when communicating with the apiserver.
healthzBindAddress [Required]
+string +
+ HealthzBindAddress is the IP address and port for the health check server to serve on, +defaulting to 0.0.0.0:10251
metricsBindAddress [Required]
+string +
+ MetricsBindAddress is the IP address and port for the metrics server to +serve on, defaulting to 0.0.0.0:10251.
DebuggingConfiguration [Required]
+DebuggingConfiguration +
(Members of DebuggingConfiguration are embedded into this type.) + DebuggingConfiguration holds configuration for Debugging related features +TODO: We might wanna make this a substruct like Debugging componentbaseconfigv1alpha1.DebuggingConfiguration
percentageOfNodesToScore [Required]
+int32 +
+ PercentageOfNodesToScore is the percentage of all nodes that once found feasible +for running a pod, the scheduler stops its search for more feasible nodes in +the cluster. This helps improve scheduler's performance. Scheduler always tries to find +at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is. +Example: if the cluster size is 500 nodes and the value of this flag is 30, +then scheduler stops finding further feasible nodes once it finds 150 feasible ones. +When the value is 0, default percentage (5%--50% based on the size of the cluster) of the +nodes will be scored.
podInitialBackoffSeconds [Required]
+int64 +
+ PodInitialBackoffSeconds is the initial backoff for unschedulable pods. +If specified, it must be greater than 0. If this value is null, the default value (1s) +will be used.
podMaxBackoffSeconds [Required]
+int64 +
+ PodMaxBackoffSeconds is the max backoff for unschedulable pods. +If specified, it must be greater than podInitialBackoffSeconds. If this value is null, +the default value (10s) will be used.
profiles [Required]
+[]KubeSchedulerProfile +
+ Profiles are scheduling profiles that kube-scheduler supports. Pods can +choose to be scheduled under a particular profile by setting its associated +scheduler name. Pods that don't specify any scheduler name are scheduled +with the "default-scheduler" profile, if present here.
extenders [Required]
+[]Extender +
+ Extenders are the list of scheduler extenders, each holding the values of how to communicate +with the extender. These extenders are shared by all scheduler profiles.
+ + + +## `NodeAffinityArgs` {#kubescheduler-config-k8s-io-v1beta2-NodeAffinityArgs} + + + + + +NodeAffinityArgs holds arguments to configure the NodeAffinity plugin. + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1beta2
kind
string
NodeAffinityArgs
addedAffinity
+core/v1.NodeAffinity +
+ AddedAffinity is applied to all Pods additionally to the NodeAffinity +specified in the PodSpec. That is, Nodes need to satisfy AddedAffinity +AND .spec.NodeAffinity. AddedAffinity is empty by default (all Nodes +match). +When AddedAffinity is used, some Pods with affinity requirements that match +a specific Node (such as Daemonset Pods) might remain unschedulable.
+ + + +## `NodeResourcesBalancedAllocationArgs` {#kubescheduler-config-k8s-io-v1beta2-NodeResourcesBalancedAllocationArgs} + + + + + +NodeResourcesBalancedAllocationArgs holds arguments used to configure NodeResourcesBalancedAllocation plugin. + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1beta2
kind
string
NodeResourcesBalancedAllocationArgs
resources [Required]
+[]ResourceSpec +
+ Resources to be managed, the default is "cpu" and "memory" if not specified.
+ + + +## `NodeResourcesFitArgs` {#kubescheduler-config-k8s-io-v1beta2-NodeResourcesFitArgs} + + + + + +NodeResourcesFitArgs holds arguments used to configure the NodeResourcesFit plugin. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1beta2
kind
string
NodeResourcesFitArgs
ignoredResources [Required]
+[]string +
+ IgnoredResources is the list of resources that NodeResources fit filter +should ignore. This doesn't apply to scoring.
ignoredResourceGroups [Required]
+[]string +
+ IgnoredResourceGroups defines the list of resource groups that NodeResources fit filter should ignore. +e.g. if group is ["example.com"], it will ignore all resource names that begin +with "example.com", such as "example.com/aaa" and "example.com/bbb". +A resource group name can't contain '/'. This doesn't apply to scoring.
scoringStrategy [Required]
+ScoringStrategy +
+ ScoringStrategy selects the node resource scoring strategy. +The default strategy is LeastAllocated with an equal "cpu" and "memory" weight.
+ + + +## `PodTopologySpreadArgs` {#kubescheduler-config-k8s-io-v1beta2-PodTopologySpreadArgs} + + + + + +PodTopologySpreadArgs holds arguments used to configure the PodTopologySpread plugin. + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1beta2
kind
string
PodTopologySpreadArgs
defaultConstraints
+[]core/v1.TopologySpreadConstraint +
+ DefaultConstraints defines topology spread constraints to be applied to +Pods that don't define any in `pod.spec.topologySpreadConstraints`. +`.defaultConstraints[∗].labelSelectors` must be empty, as they are +deduced from the Pod's membership to Services, ReplicationControllers, +ReplicaSets or StatefulSets. +When not empty, .defaultingType must be "List".
defaultingType
+PodTopologySpreadConstraintsDefaulting +
+ DefaultingType determines how .defaultConstraints are deduced. Can be one +of "System" or "List". + +- "System": Use kubernetes defined constraints that spread Pods among + Nodes and Zones. +- "List": Use constraints defined in .defaultConstraints. + +Defaults to "List" if feature gate DefaultPodTopologySpread is disabled +and to "System" if enabled.
+ + + +## `VolumeBindingArgs` {#kubescheduler-config-k8s-io-v1beta2-VolumeBindingArgs} + + + + + +VolumeBindingArgs holds arguments used to configure the VolumeBinding plugin. + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1beta2
kind
string
VolumeBindingArgs
bindTimeoutSeconds [Required]
+int64 +
+ BindTimeoutSeconds is the timeout in seconds in volume binding operation. +Value must be non-negative integer. The value zero indicates no waiting. +If this value is nil, the default value (600) will be used.
shape
+[]UtilizationShapePoint +
+ Shape specifies the points defining the score function shape, which is +used to score nodes based on the utilization of statically provisioned +PVs. The utilization is calculated by dividing the total requested +storage of the pod by the total capacity of feasible PVs on each node. +Each point contains utilization (ranges from 0 to 100) and its +associated score (ranges from 0 to 10). You can turn the priority by +specifying different scores for different utilization numbers. +The default shape points are: +1) 0 for 0 utilization +2) 10 for 100 utilization +All points must be sorted in increasing order by utilization.
+ + + +## `Extender` {#kubescheduler-config-k8s-io-v1beta2-Extender} + + + + +**Appears in:** + +- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta2-KubeSchedulerConfiguration) + + +Extender holds the parameters used to communicate with the extender. If a verb is unspecified/empty, +it is assumed that the extender chose not to provide that extension. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
urlPrefix [Required]
+string +
+ URLPrefix at which the extender is available
filterVerb [Required]
+string +
+ Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender.
preemptVerb [Required]
+string +
+ Verb for the preempt call, empty if not supported. This verb is appended to the URLPrefix when issuing the preempt call to extender.
prioritizeVerb [Required]
+string +
+ Verb for the prioritize call, empty if not supported. This verb is appended to the URLPrefix when issuing the prioritize call to extender.
weight [Required]
+int64 +
+ The numeric multiplier for the node scores that the prioritize call generates. +The weight should be a positive integer
bindVerb [Required]
+string +
+ Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. +If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender +can implement this function.
enableHTTPS [Required]
+bool +
+ EnableHTTPS specifies whether https should be used to communicate with the extender
tlsConfig [Required]
+ExtenderTLSConfig +
+ TLSConfig specifies the transport layer security config
httpTimeout [Required]
+meta/v1.Duration +
+ HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize +timeout is ignored, k8s/other extenders priorities are used to select the node.
nodeCacheCapable [Required]
+bool +
+ NodeCacheCapable specifies that the extender is capable of caching node information, +so the scheduler should only send minimal information about the eligible nodes +assuming that the extender already cached full details of all nodes in the cluster
managedResources
+[]ExtenderManagedResource +
+ ManagedResources is a list of extended resources that are managed by +this extender. +- A pod will be sent to the extender on the Filter, Prioritize and Bind + (if the extender is the binder) phases iff the pod requests at least + one of the extended resources in this list. If empty or unspecified, + all pods will be sent to this extender. +- If IgnoredByScheduler is set to true for a resource, kube-scheduler + will skip checking the resource in predicates.
ignorable [Required]
+bool +
+ Ignorable specifies if the extender is ignorable, i.e. scheduling should not +fail when the extender returns an error or is not reachable.
+ + + +## `KubeSchedulerProfile` {#kubescheduler-config-k8s-io-v1beta2-KubeSchedulerProfile} + + + + +**Appears in:** + +- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta2-KubeSchedulerConfiguration) + + +KubeSchedulerProfile is a scheduling profile. + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
schedulerName [Required]
+string +
+ SchedulerName is the name of the scheduler associated to this profile. +If SchedulerName matches with the pod's "spec.schedulerName", then the pod +is scheduled with this profile.
plugins [Required]
+Plugins +
+ Plugins specify the set of plugins that should be enabled or disabled. +Enabled plugins are the ones that should be enabled in addition to the +default plugins. Disabled plugins are any of the default plugins that +should be disabled. +When no enabled or disabled plugin is specified for an extension point, +default plugins for that extension point will be used if there is any. +If a QueueSort plugin is specified, the same QueueSort Plugin and +PluginConfig must be specified for all profiles.
pluginConfig [Required]
+[]PluginConfig +
+ PluginConfig is an optional set of custom plugin arguments for each plugin. +Omitting config args for a plugin is equivalent to using the default config +for that plugin.
+ + + +## `Plugin` {#kubescheduler-config-k8s-io-v1beta2-Plugin} + + + + +**Appears in:** + +- [PluginSet](#kubescheduler-config-k8s-io-v1beta2-PluginSet) + + +Plugin specifies a plugin name and its weight when applicable. Weight is used only for Score plugins. + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ Name defines the name of plugin
weight [Required]
+int32 +
+ Weight defines the weight of plugin, only used for Score plugins.
+ + + +## `PluginConfig` {#kubescheduler-config-k8s-io-v1beta2-PluginConfig} + + + + +**Appears in:** + +- [KubeSchedulerProfile](#kubescheduler-config-k8s-io-v1beta2-KubeSchedulerProfile) + + +PluginConfig specifies arguments that should be passed to a plugin at the time of initialization. +A plugin that is invoked at multiple extension points is initialized once. Args can have arbitrary structure. +It is up to the plugin to process these Args. + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ Name defines the name of plugin being configured
args [Required]
+k8s.io/apimachinery/pkg/runtime.RawExtension +
+ Args defines the arguments passed to the plugins at the time of initialization. Args can have arbitrary structure.
+ + + +## `PluginSet` {#kubescheduler-config-k8s-io-v1beta2-PluginSet} + + + + +**Appears in:** + +- [Plugins](#kubescheduler-config-k8s-io-v1beta2-Plugins) + + +PluginSet specifies enabled and disabled plugins for an extension point. +If an array is empty, missing, or nil, default plugins at that extension point will be used. + + + + + + + + + + + + + + + + + + +
FieldDescription
enabled [Required]
+[]Plugin +
+ Enabled specifies plugins that should be enabled in addition to default plugins. +If the default plugin is also configured in the scheduler config file, the weight of plugin will +be overridden accordingly. +These are called after default plugins and in the same order specified here.
disabled [Required]
+[]Plugin +
+ Disabled specifies default plugins that should be disabled. +When all default plugins need to be disabled, an array containing only one "∗" should be provided.
+ + + +## `Plugins` {#kubescheduler-config-k8s-io-v1beta2-Plugins} + + + + +**Appears in:** + +- [KubeSchedulerProfile](#kubescheduler-config-k8s-io-v1beta2-KubeSchedulerProfile) + + +Plugins include multiple extension points. When specified, the list of plugins for +a particular extension point are the only ones enabled. If an extension point is +omitted from the config, then the default set of plugins is used for that extension point. +Enabled plugins are called in the order specified here, after default plugins. If they need to +be invoked before default plugins, default plugins must be disabled and re-enabled here in desired order. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
queueSort [Required]
+PluginSet +
+ QueueSort is a list of plugins that should be invoked when sorting pods in the scheduling queue.
preFilter [Required]
+PluginSet +
+ PreFilter is a list of plugins that should be invoked at "PreFilter" extension point of the scheduling framework.
filter [Required]
+PluginSet +
+ Filter is a list of plugins that should be invoked when filtering out nodes that cannot run the Pod.
postFilter [Required]
+PluginSet +
+ PostFilter is a list of plugins that are invoked after filtering phase, no matter whether filtering succeeds or not.
preScore [Required]
+PluginSet +
+ PreScore is a list of plugins that are invoked before scoring.
score [Required]
+PluginSet +
+ Score is a list of plugins that should be invoked when ranking nodes that have passed the filtering phase.
reserve [Required]
+PluginSet +
+ Reserve is a list of plugins invoked when reserving/unreserving resources +after a node is assigned to run the pod.
permit [Required]
+PluginSet +
+ Permit is a list of plugins that control binding of a Pod. These plugins can prevent or delay binding of a Pod.
preBind [Required]
+PluginSet +
+ PreBind is a list of plugins that should be invoked before a pod is bound.
bind [Required]
+PluginSet +
+ Bind is a list of plugins that should be invoked at "Bind" extension point of the scheduling framework. +The scheduler call these plugins in order. Scheduler skips the rest of these plugins as soon as one returns success.
postBind [Required]
+PluginSet +
+ PostBind is a list of plugins that should be invoked after a pod is successfully bound.
+ + + +## `PodTopologySpreadConstraintsDefaulting` {#kubescheduler-config-k8s-io-v1beta2-PodTopologySpreadConstraintsDefaulting} + +(Alias of `string`) + + +**Appears in:** + +- [PodTopologySpreadArgs](#kubescheduler-config-k8s-io-v1beta2-PodTopologySpreadArgs) + + +PodTopologySpreadConstraintsDefaulting defines how to set default constraints +for the PodTopologySpread plugin. + + + + + +## `RequestedToCapacityRatioParam` {#kubescheduler-config-k8s-io-v1beta2-RequestedToCapacityRatioParam} + + + + +**Appears in:** + +- [ScoringStrategy](#kubescheduler-config-k8s-io-v1beta2-ScoringStrategy) + + +RequestedToCapacityRatioParam define RequestedToCapacityRatio parameters + + + + + + + + + + + + + +
FieldDescription
shape [Required]
+[]UtilizationShapePoint +
+ Shape is a list of points defining the scoring function shape.
+ + + +## `ResourceSpec` {#kubescheduler-config-k8s-io-v1beta2-ResourceSpec} + + + + +**Appears in:** + +- [NodeResourcesBalancedAllocationArgs](#kubescheduler-config-k8s-io-v1beta2-NodeResourcesBalancedAllocationArgs) + +- [ScoringStrategy](#kubescheduler-config-k8s-io-v1beta2-ScoringStrategy) + + +ResourceSpec represents a single resource. + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ Name of the resource.
weight [Required]
+int64 +
+ Weight of the resource.
+ + + +## `ScoringStrategy` {#kubescheduler-config-k8s-io-v1beta2-ScoringStrategy} + + + + +**Appears in:** + +- [NodeResourcesFitArgs](#kubescheduler-config-k8s-io-v1beta2-NodeResourcesFitArgs) + + +ScoringStrategy define ScoringStrategyType for node resource plugin + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
type [Required]
+ScoringStrategyType +
+ Type selects which strategy to run.
resources [Required]
+[]ResourceSpec +
+ Resources to consider when scoring. +The default resource set includes "cpu" and "memory" with an equal weight. +Allowed weights go from 1 to 100. +Weight defaults to 1 if not specified or explicitly set to 0.
requestedToCapacityRatio [Required]
+RequestedToCapacityRatioParam +
+ Arguments specific to RequestedToCapacityRatio strategy.
+ + + +## `ScoringStrategyType` {#kubescheduler-config-k8s-io-v1beta2-ScoringStrategyType} + +(Alias of `string`) + + +**Appears in:** + +- [ScoringStrategy](#kubescheduler-config-k8s-io-v1beta2-ScoringStrategy) + + +ScoringStrategyType the type of scoring strategy used in NodeResourcesFit plugin. + + + + + +## `UtilizationShapePoint` {#kubescheduler-config-k8s-io-v1beta2-UtilizationShapePoint} + + + + +**Appears in:** + +- [VolumeBindingArgs](#kubescheduler-config-k8s-io-v1beta2-VolumeBindingArgs) + +- [RequestedToCapacityRatioParam](#kubescheduler-config-k8s-io-v1beta2-RequestedToCapacityRatioParam) + + +UtilizationShapePoint represents single point of priority function shape. + + + + + + + + + + + + + + + + + + +
FieldDescription
utilization [Required]
+int32 +
+ Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100.
score [Required]
+int32 +
+ Score assigned to given utilization (y axis). Valid values are 0 to 10.
+ + + + + + +## `Policy` {#kubescheduler-config-k8s-io-v1-Policy} + + + + + +Policy describes a struct for a policy resource used in api. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubescheduler.config.k8s.io/v1
kind
string
Policy
predicates [Required]
+[]PredicatePolicy +
+ Holds the information to configure the fit predicate functions
priorities [Required]
+[]PriorityPolicy +
+ Holds the information to configure the priority functions
extenders [Required]
+[]LegacyExtender +
+ Holds the information to communicate with the extender(s)
hardPodAffinitySymmetricWeight [Required]
+int32 +
+ RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule +corresponding to every RequiredDuringScheduling affinity rule. +HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 1-100.
alwaysCheckAllPredicates [Required]
+bool +
+ When AlwaysCheckAllPredicates is set to true, scheduler checks all +the configured predicates even after one or more of them fails. +When the flag is set to false, scheduler skips checking the rest +of the predicates after it finds one predicate that failed.
+ + + +## `ExtenderManagedResource` {#kubescheduler-config-k8s-io-v1-ExtenderManagedResource} + + + + +**Appears in:** + +- [Extender](#kubescheduler-config-k8s-io-v1beta2-Extender) + +- [LegacyExtender](#kubescheduler-config-k8s-io-v1-LegacyExtender) + + +ExtenderManagedResource describes the arguments of extended resources +managed by an extender. + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ Name is the extended resource name.
ignoredByScheduler [Required]
+bool +
+ IgnoredByScheduler indicates whether kube-scheduler should ignore this +resource when applying predicates.
+ + + +## `ExtenderTLSConfig` {#kubescheduler-config-k8s-io-v1-ExtenderTLSConfig} + + + + +**Appears in:** + +- [Extender](#kubescheduler-config-k8s-io-v1beta2-Extender) + +- [LegacyExtender](#kubescheduler-config-k8s-io-v1-LegacyExtender) + + +ExtenderTLSConfig contains settings to enable TLS with extender + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
insecure [Required]
+bool +
+ Server should be accessed without verifying the TLS certificate. For testing only.
serverName [Required]
+string +
+ ServerName is passed to the server for SNI and is used in the client to check server +certificates against. If ServerName is empty, the hostname used to contact the +server is used.
certFile [Required]
+string +
+ Server requires TLS client certificate authentication
keyFile [Required]
+string +
+ Server requires TLS client certificate authentication
caFile [Required]
+string +
+ Trusted root certificates for server
certData [Required]
+[]byte +
+ CertData holds PEM-encoded bytes (typically read from a client certificate file). +CertData takes precedence over CertFile
keyData [Required]
+[]byte +
+ KeyData holds PEM-encoded bytes (typically read from a client certificate key file). +KeyData takes precedence over KeyFile
caData [Required]
+[]byte +
+ CAData holds PEM-encoded bytes (typically read from a root certificates bundle). +CAData takes precedence over CAFile
+ + + +## `LabelPreference` {#kubescheduler-config-k8s-io-v1-LabelPreference} + + + + +**Appears in:** + +- [PriorityArgument](#kubescheduler-config-k8s-io-v1-PriorityArgument) + + +LabelPreference holds the parameters that are used to configure the corresponding priority function + + + + + + + + + + + + + + + + + + +
FieldDescription
label [Required]
+string +
+ Used to identify node "groups"
presence [Required]
+bool +
+ This is a boolean flag +If true, higher priority is given to nodes that have the label +If false, higher priority is given to nodes that do not have the label
+ + + +## `LabelsPresence` {#kubescheduler-config-k8s-io-v1-LabelsPresence} + + + + +**Appears in:** + +- [PredicateArgument](#kubescheduler-config-k8s-io-v1-PredicateArgument) + + +LabelsPresence holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration. + + + + + + + + + + + + + + + + + + +
FieldDescription
labels [Required]
+[]string +
+ The list of labels that identify node "groups" +All of the labels should be either present (or absent) for the node to be considered a fit for hosting the pod
presence [Required]
+bool +
+ The boolean flag that indicates whether the labels should be present or absent from the node
+ + + +## `LegacyExtender` {#kubescheduler-config-k8s-io-v1-LegacyExtender} + + + + +**Appears in:** + +- [Policy](#kubescheduler-config-k8s-io-v1-Policy) + + +LegacyExtender holds the parameters used to communicate with the extender. If a verb is unspecified/empty, +it is assumed that the extender chose not to provide that extension. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
urlPrefix [Required]
+string +
+ URLPrefix at which the extender is available
filterVerb [Required]
+string +
+ Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender.
preemptVerb [Required]
+string +
+ Verb for the preempt call, empty if not supported. This verb is appended to the URLPrefix when issuing the preempt call to extender.
prioritizeVerb [Required]
+string +
+ Verb for the prioritize call, empty if not supported. This verb is appended to the URLPrefix when issuing the prioritize call to extender.
weight [Required]
+int64 +
+ The numeric multiplier for the node scores that the prioritize call generates. +The weight should be a positive integer
bindVerb [Required]
+string +
+ Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. +If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender +can implement this function.
enableHttps [Required]
+bool +
+ EnableHTTPS specifies whether https should be used to communicate with the extender
tlsConfig [Required]
+ExtenderTLSConfig +
+ TLSConfig specifies the transport layer security config
httpTimeout [Required]
+time.Duration +
+ HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize +timeout is ignored, k8s/other extenders priorities are used to select the node.
nodeCacheCapable [Required]
+bool +
+ NodeCacheCapable specifies that the extender is capable of caching node information, +so the scheduler should only send minimal information about the eligible nodes +assuming that the extender already cached full details of all nodes in the cluster
managedResources
+[]ExtenderManagedResource +
+ ManagedResources is a list of extended resources that are managed by +this extender. +- A pod will be sent to the extender on the Filter, Prioritize and Bind + (if the extender is the binder) phases iff the pod requests at least + one of the extended resources in this list. If empty or unspecified, + all pods will be sent to this extender. +- If IgnoredByScheduler is set to true for a resource, kube-scheduler + will skip checking the resource in predicates.
ignorable [Required]
+bool +
+ Ignorable specifies if the extender is ignorable, i.e. scheduling should not +fail when the extender returns an error or is not reachable.
+ + + +## `PredicateArgument` {#kubescheduler-config-k8s-io-v1-PredicateArgument} + + + + +**Appears in:** + +- [PredicatePolicy](#kubescheduler-config-k8s-io-v1-PredicatePolicy) + + +PredicateArgument represents the arguments to configure predicate functions in scheduler policy configuration. +Only one of its members may be specified + + + + + + + + + + + + + + + + + + +
FieldDescription
serviceAffinity [Required]
+ServiceAffinity +
+ The predicate that provides affinity for pods belonging to a service +It uses a label to identify nodes that belong to the same "group"
labelsPresence [Required]
+LabelsPresence +
+ The predicate that checks whether a particular node has a certain label +defined or not, regardless of value
+ + + +## `PredicatePolicy` {#kubescheduler-config-k8s-io-v1-PredicatePolicy} + + + + +**Appears in:** + +- [Policy](#kubescheduler-config-k8s-io-v1-Policy) + + +PredicatePolicy describes a struct of a predicate policy. + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ Identifier of the predicate policy +For a custom predicate, the name can be user-defined +For the Kubernetes provided predicates, the name is the identifier of the pre-defined predicate
argument [Required]
+PredicateArgument +
+ Holds the parameters to configure the given predicate
+ + + +## `PriorityArgument` {#kubescheduler-config-k8s-io-v1-PriorityArgument} + + + + +**Appears in:** + +- [PriorityPolicy](#kubescheduler-config-k8s-io-v1-PriorityPolicy) + + +PriorityArgument represents the arguments to configure priority functions in scheduler policy configuration. +Only one of its members may be specified + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
serviceAntiAffinity [Required]
+ServiceAntiAffinity +
+ The priority function that ensures a good spread (anti-affinity) for pods belonging to a service +It uses a label to identify nodes that belong to the same "group"
labelPreference [Required]
+LabelPreference +
+ The priority function that checks whether a particular node has a certain label +defined or not, regardless of value
requestedToCapacityRatioArguments [Required]
+RequestedToCapacityRatioArguments +
+ The RequestedToCapacityRatio priority function is parametrized with function shape.
+ + + +## `PriorityPolicy` {#kubescheduler-config-k8s-io-v1-PriorityPolicy} + + + + +**Appears in:** + +- [Policy](#kubescheduler-config-k8s-io-v1-Policy) + + +PriorityPolicy describes a struct of a priority policy. + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ Identifier of the priority policy +For a custom priority, the name can be user-defined +For the Kubernetes provided priority functions, the name is the identifier of the pre-defined priority function
weight [Required]
+int64 +
+ The numeric multiplier for the node scores that the priority function generates +The weight should be non-zero and can be a positive or a negative integer
argument [Required]
+PriorityArgument +
+ Holds the parameters to configure the given priority function
+ + + +## `RequestedToCapacityRatioArguments` {#kubescheduler-config-k8s-io-v1-RequestedToCapacityRatioArguments} + + + + +**Appears in:** + +- [PriorityArgument](#kubescheduler-config-k8s-io-v1-PriorityArgument) + + +RequestedToCapacityRatioArguments holds arguments specific to RequestedToCapacityRatio priority function. + + + + + + + + + + + + + + + + + + +
FieldDescription
shape [Required]
+[]UtilizationShapePoint +
+ Array of point defining priority function shape.
resources [Required]
+[]ResourceSpec +
+ No description provided. +
+ + + +## `ResourceSpec` {#kubescheduler-config-k8s-io-v1-ResourceSpec} + + + + +**Appears in:** + +- [RequestedToCapacityRatioArguments](#kubescheduler-config-k8s-io-v1-RequestedToCapacityRatioArguments) + + +ResourceSpec represents single resource and weight for bin packing of priority RequestedToCapacityRatioArguments. + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ Name of the resource to be managed by RequestedToCapacityRatio function.
weight [Required]
+int64 +
+ Weight of the resource.
+ + + +## `ServiceAffinity` {#kubescheduler-config-k8s-io-v1-ServiceAffinity} + + + + +**Appears in:** + +- [PredicateArgument](#kubescheduler-config-k8s-io-v1-PredicateArgument) + + +ServiceAffinity holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration. + + + + + + + + + + + + + +
FieldDescription
labels [Required]
+[]string +
+ The list of labels that identify node "groups" +All of the labels should match for the node to be considered a fit for hosting the pod
+ + + +## `ServiceAntiAffinity` {#kubescheduler-config-k8s-io-v1-ServiceAntiAffinity} + + + + +**Appears in:** + +- [PriorityArgument](#kubescheduler-config-k8s-io-v1-PriorityArgument) + + +ServiceAntiAffinity holds the parameters that are used to configure the corresponding priority function + + + + + + + + + + + + + +
FieldDescription
label [Required]
+string +
+ Used to identify node "groups"
+ + + +## `UtilizationShapePoint` {#kubescheduler-config-k8s-io-v1-UtilizationShapePoint} + + + + +**Appears in:** + +- [RequestedToCapacityRatioArguments](#kubescheduler-config-k8s-io-v1-RequestedToCapacityRatioArguments) + + +UtilizationShapePoint represents single point of priority function shape. + + + + + + + + + + + + + + + + + + +
FieldDescription
utilization [Required]
+int32 +
+ Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100.
score [Required]
+int32 +
+ Score assigned to given utilization (y axis). Valid values are 0 to 10.
+ + diff --git a/content/en/docs/reference/config-api/kube-scheduler-policy-config.v1.md b/content/en/docs/reference/config-api/kube-scheduler-policy-config.v1.md index e694f7ecbc..8b6c0a9a24 100644 --- a/content/en/docs/reference/config-api/kube-scheduler-policy-config.v1.md +++ b/content/en/docs/reference/config-api/kube-scheduler-policy-config.v1.md @@ -89,7 +89,7 @@ of the predicates after it finds one predicate that failed. **Appears in:** -- [Extender](#kubescheduler-config-k8s-io-v1beta1-Extender) +- [Extender](#kubescheduler-config-k8s-io-v1beta2-Extender) - [LegacyExtender](#kubescheduler-config-k8s-io-v1-LegacyExtender) @@ -132,7 +132,7 @@ resource when applying predicates. **Appears in:** -- [Extender](#kubescheduler-config-k8s-io-v1beta1-Extender) +- [Extender](#kubescheduler-config-k8s-io-v1beta2-Extender) - [LegacyExtender](#kubescheduler-config-k8s-io-v1-LegacyExtender) diff --git a/content/en/docs/reference/config-api/kubeadm-config.v1beta2.md b/content/en/docs/reference/config-api/kubeadm-config.v1beta2.md new file mode 100644 index 0000000000..77595b4599 --- /dev/null +++ b/content/en/docs/reference/config-api/kubeadm-config.v1beta2.md @@ -0,0 +1,1489 @@ +--- +title: kubeadm Configuration (v1beta2) +content_type: tool-reference +package: kubeadm.k8s.io/v1beta2 +auto_generated: true +--- +Package v1beta2 defines the v1beta2 version of the kubeadm configuration file format. +This version improves on the v1beta1 format by fixing some minor issues and adding a few new fields. + +A list of changes since v1beta1: + +- `certificateKey" field is added to InitConfiguration and JoinConfiguration. +- "ignorePreflightErrors" field is added to the NodeRegistrationOptions. +- The JSON "omitempty" tag is used in a more places where appropriate. +- The JSON "omitempty" tag of the "taints" field (inside NodeRegistrationOptions) is removed. +See the Kubernetes 1.15 changelog for further details. + +## Migration from old kubeadm config versions + +Please convert your v1beta1 configuration files to v1beta2 using the "kubeadm config migrate" command of kubeadm v1.15.x +(conversion from older releases of kubeadm config files requires older release of kubeadm as well e.g. + +- kubeadm v1.11 should be used to migrate v1alpha1 to v1alpha2; kubeadm v1.12 should be used to translate v1alpha2 to v1alpha3; +- kubeadm v1.13 or v1.14 should be used to translate v1alpha3 to v1beta1) + +Nevertheless, kubeadm v1.15.x will support reading from v1beta1 version of the kubeadm config file format. + +## Basics + +The preferred way to configure kubeadm is to pass an YAML configuration file with the --config option. Some of the +configuration options defined in the kubeadm config file are also available as command line flags, but only +the most common/simple use case are supported with this approach. + +A kubeadm config file could contain multiple configuration types separated using three dashes (“---”). + +kubeadm supports the following configuration types: + +```yaml +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration + +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration + +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration + +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration + +apiVersion: kubeadm.k8s.io/v1beta2 +kind: JoinConfiguration +``` + +To print the defaults for "init" and "join" actions use the following commands: + +```shell +kubeadm config print init-defaults +kubeadm config print join-defaults +``` + +The list of configuration types that must be included in a configuration file depends by the action you are +performing (init or join) and by the configuration options you are going to use (defaults or advanced customization). + +If some configuration types are not provided, or provided only partially, kubeadm will use default values; defaults +provided by kubeadm includes also enforcing consistency of values across components when required (e.g. +cluster-cidr flag on controller manager and clusterCIDR on kube-proxy). + +Users are always allowed to override default values, with the only exception of a small subset of setting with +relevance for security (e.g. enforce authorization-mode Node and RBAC on api server) + +If the user provides a configuration types that is not expected for the action you are performing, kubeadm will +ignore those types and print a warning. + +## Kubeadm init configuration types + +When executing kubeadm init with the `--config` option, the following configuration types could be used: +InitConfiguration, ClusterConfiguration, KubeProxyConfiguration, KubeletConfiguration, but only one +between InitConfiguration and ClusterConfiguration is mandatory. + +```yaml +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +bootstrapTokens: + ... +nodeRegistration: + ... +``` + +The InitConfiguration type should be used to configure runtime settings, that in case of kubeadm init +are the configuration of the bootstrap token and all the setting which are specific to the node where kubeadm +is executed, including: + +- NodeRegistration, that holds fields that relate to registering the new node to the cluster; + use it to customize the node name, the CRI socket to use or any other settings that should apply to this + node only (e.g. the node ip). + +- LocalAPIEndpoint, that represents the endpoint of the instance of the API server to be deployed on this node; + use it e.g. to customize the API server advertise address. + + ```yaml + apiVersion: kubeadm.k8s.io/v1beta2 + kind: ClusterConfiguration + networking: + ... + etcd: + ... + apiServer: + extraArgs: + ... + extraVolumes: + ... + ``` + +The ClusterConfiguration type should be used to configure cluster-wide settings, +including settings for: + +- Networking, that holds configuration for the networking topology of the cluster; use it e.g. to customize + pod subnet or services subnet. +- Etcd configurations; use it e.g. to customize the local etcd or to configure the API server + for using an external etcd cluster. +- kube-apiserver, kube-scheduler, kube-controller-manager configurations; use it to customize control-plane + components by adding customized setting or overriding kubeadm default settings. + + ```yaml + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + kind: KubeProxyConfiguration + ... + ``` + +The KubeProxyConfiguration type should be used to change the configuration passed to kube-proxy instances deployed +in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults. + +See https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/ or https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration +for kube proxy official documentation. + +```yaml +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +... +``` + +The KubeletConfiguration type should be used to change the configurations that will be passed to all kubelet instances +deployed in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults. + +See https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/ or https://godoc.org/k8s.io/kubelet/config/v1beta1#KubeletConfiguration +for kubelet official documentation. + +Here is a fully populated example of a single YAML file containing multiple +configuration types to be used during a `kubeadm init` run. + +```yaml +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +bootstrapTokens: + - token: "9a08jv.c0izixklcxtmnze7" + description: "kubeadm bootstrap token" + ttl: "24h" + - token: "783bde.3f89s0fje9f38fhf" + description: "another bootstrap token" + usages: + - authentication + - signing + groups: + - system:bootstrappers:kubeadm:default-node-token +nodeRegistration: + name: "ec2-10-100-0-1" + criSocket: "/var/run/dockershim.sock" + taints: + - key: "kubeadmNode" + value: "master" + effect: "NoSchedule" + kubeletExtraArgs: + cgroup-driver: "cgroupfs" + ignorePreflightErrors: + - IsPrivilegedUser +localAPIEndpoint: + advertiseAddress: "10.100.0.1" + bindPort: 6443 +certificateKey: "e6a2eb8581237ab72a4f494f30285ec12a9694d750b9785706a83bfcbbbd2204" +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +etcd: + # one of local or external + local: + imageRepository: "k8s.gcr.io" + imageTag: "3.2.24" + dataDir: "/var/lib/etcd" + extraArgs: + listen-client-urls: "http://10.100.0.1:2379" + serverCertSANs: + - "ec2-10-100-0-1.compute-1.amazonaws.com" + peerCertSANs: + - "10.100.0.1" + # external: + # endpoints: + # - "10.100.0.1:2379" + # - "10.100.0.2:2379" + # caFile: "/etcd/kubernetes/pki/etcd/etcd-ca.crt" + # certFile: "/etcd/kubernetes/pki/etcd/etcd.crt" + # keyFile: "/etcd/kubernetes/pki/etcd/etcd.key" + networking: + serviceSubnet: "10.96.0.0/12" + podSubnet: "10.100.0.1/24" + dnsDomain: "cluster.local" + kubernetesVersion: "v1.12.0" + controlPlaneEndpoint: "10.100.0.1:6443" + apiServer: + extraArgs: + authorization-mode: "Node,RBAC" + extraVolumes: + - name: "some-volume" + hostPath: "/etc/some-path" + mountPath: "/etc/some-pod-path" + readOnly: false + pathType: File + certSANs: + - "10.100.1.1" + - "ec2-10-100-0-1.compute-1.amazonaws.com" + timeoutForControlPlane: 4m0s + controllerManager: + extraArgs: + "node-cidr-mask-size": "20" + extraVolumes: + - name: "some-volume" + hostPath: "/etc/some-path" + mountPath: "/etc/some-pod-path" + readOnly: false + pathType: File + scheduler: + extraArgs: + address: "10.100.0.1" + extraVolumes: + - name: "some-volume" + hostPath: "/etc/some-path" + mountPath: "/etc/some-pod-path" + readOnly: false + pathType: File +certificatesDir: "/etc/kubernetes/pki" +imageRepository: "k8s.gcr.io" +useHyperKubeImage: false +clusterName: "example-cluster" +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# kubelet specific options here +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +# kube-proxy specific options here +``` + +## Kubeadm join configuration types + +When executing kubeadm join with the `--config` option, the JoinConfiguration type should be provided. + +```yaml +apiVersion: kubeadm.k8s.io/v1beta2 +kind: JoinConfiguration +... +``` + +The JoinConfiguration type should be used to configure runtime settings, that in case of kubeadm join +are the discovery method used for accessing the cluster info and all the setting which are specific +to the node where kubeadm is executed, including: + +- NodeRegistration, that holds fields that relate to registering the new node to the cluster; + use it to customize the node name, the CRI socket to use or any other settings that should apply to this + node only (e.g. the node ip). + +- APIEndpoint, that represents the endpoint of the instance of the API server to be eventually deployed on this node. + +## Resource Types + + +- [ClusterConfiguration](#kubeadm-k8s-io-v1beta2-ClusterConfiguration) +- [ClusterStatus](#kubeadm-k8s-io-v1beta2-ClusterStatus) +- [InitConfiguration](#kubeadm-k8s-io-v1beta2-InitConfiguration) +- [JoinConfiguration](#kubeadm-k8s-io-v1beta2-JoinConfiguration) + + + + +## `ClusterConfiguration` {#kubeadm-k8s-io-v1beta2-ClusterConfiguration} + + + + + +ClusterConfiguration contains cluster-wide configuration for a kubeadm cluster + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubeadm.k8s.io/v1beta2
kind
string
ClusterConfiguration
etcd [Required]
+Etcd +
+ `etcd` holds configuration for etcd.
networking [Required]
+Networking +
+ `networking` holds configuration for the networking topology of the cluster.
kubernetesVersion [Required]
+string +
+ `kubernetesVersion` is the target version of the control plane.
controlPlaneEndpoint [Required]
+string +
+ `controlPlaneEndpoint` sets a stable IP address or DNS name for the control plane; it +can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port. +In case the ControlPlaneEndpoint is not specified, the AdvertiseAddress + BindPort +are used; in case the ControlPlaneEndpoint is specified but without a TCP port, +the BindPort is used. +Possible usages are: + +- In a cluster with more than one control plane instances, this field should be + assigned the address of the external load balancer in front of the + control plane instances. +- In environments with enforced node recycling, the ControlPlaneEndpoint + could be used for assigning a stable DNS to the control plane.
apiServer [Required]
+APIServer +
+ `apiServer` contains extra settings for the API server.
controllerManager [Required]
+ControlPlaneComponent +
+ `controllerManager` contains extra settings for the controller manager.
scheduler [Required]
+ControlPlaneComponent +
+ `scheduler` contains extra settings for the scheduler.
dns [Required]
+DNS +
+ `dns` defines the options for the DNS add-on.
certificatesDir [Required]
+string +
+ `certificatesDir` specifies where to store or look for all required certificates.
imageRepository [Required]
+string +
+ `imageRepository` sets the container registry to pull images from. +If empty, `k8s.gcr.io` will be used by default; in case of kubernetes version is +a CI build (kubernetes version starts with `ci/` or `ci-cross/`) +`gcr.io/k8s-staging-ci-images` will be used as a default for control plane +components and for kube-proxy, while `k8s.gcr.io` will be used for all the other images.
useHyperKubeImage [Required]
+bool +
+ `useHyperKubeImage` controls if hyperkube should be used for Kubernetes +components instead of their respective separate images +DEPRECATED: As hyperkube is itself deprecated, this fields is too. It will +be removed in future kubeadm config versions, kubeadm will print multiple +warnings when this is set to true, and at some point it may become ignored.
featureGates [Required]
+map[string]bool +
+ Feature gates enabled by the user.
clusterName [Required]
+string +
+ The cluster name
+ + + +## `ClusterStatus` {#kubeadm-k8s-io-v1beta2-ClusterStatus} + + + + + +ClusterStatus contains the cluster status. The ClusterStatus will be stored in the kubeadm-config +ConfigMap in the cluster, and then updated by kubeadm when additional control plane instance joins or leaves the cluster. + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubeadm.k8s.io/v1beta2
kind
string
ClusterStatus
apiEndpoints [Required]
+map[string]github.com/tengqm/kubeconfig/config/kubeadm/v1beta2.APIEndpoint +
+ `apiEndpoints` currently available in the cluster, one for each control +plane/API server instance. The key of the map is the IP of the host's default interface
+ + + +## `InitConfiguration` {#kubeadm-k8s-io-v1beta2-InitConfiguration} + + + + + +InitConfiguration contains a list of elements that is specific "kubeadm init"-only runtime +information. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubeadm.k8s.io/v1beta2
kind
string
InitConfiguration
bootstrapTokens [Required]
+[]BootstrapToken +
+ `bootstrapTokens` is respected at `kubeadm init` time and describes a set of Bootstrap Tokens to create. +This information IS NOT uploaded to the kubeadm cluster configmap, partly because of its sensitive nature
nodeRegistration [Required]
+NodeRegistrationOptions +
+ `nodeRegistration` holds fields that relate to registering the new control-plane node to the cluster
localAPIEndpoint [Required]
+APIEndpoint +
+ `localAPIEndpoint` represents the endpoint of the API server instance that's deployed on this control plane node +In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint in the sense that ControlPlaneEndpoint +is the global endpoint for the cluster, which then loadbalances the requests to each individual API server. This +configuration object lets you customize what IP/DNS name and port the local API server advertises it's accessible +on. By default, kubeadm tries to auto-detect the IP of the default interface and use that, but in case that process +fails you may set the desired value here.
certificateKey [Required]
+string +
+ `certificateKey` sets the key with which certificates and keys are encrypted prior to being uploaded in +a Secret in the cluster during the "uploadcerts" init phase.
+ + + +## `JoinConfiguration` {#kubeadm-k8s-io-v1beta2-JoinConfiguration} + + + + + +JoinConfiguration contains elements describing a particular node. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubeadm.k8s.io/v1beta2
kind
string
JoinConfiguration
nodeRegistration [Required]
+NodeRegistrationOptions +
+ `nodeRegistration` holds fields that relate to registering the new control-plane +node to the cluster
caCertPath [Required]
+string +
+ `caCertPath` is the path to the SSL certificate authority used to +secure comunications between node and control-plane. +Defaults to "/etc/kubernetes/pki/ca.crt".
discovery [Required]
+Discovery +
+ `discovery` specifies the options for the kubelet to use during the TLS Bootstrap +process
controlPlane [Required]
+JoinControlPlane +
+ `controlPlane` defines the additional control plane instance to be deployed on the +joining node. If nil, no additional control plane instance will be deployed.
+ + + +## `APIEndpoint` {#kubeadm-k8s-io-v1beta2-APIEndpoint} + + + + +**Appears in:** + +- [ClusterStatus](#kubeadm-k8s-io-v1beta2-ClusterStatus) + +- [InitConfiguration](#kubeadm-k8s-io-v1beta2-InitConfiguration) + +- [JoinControlPlane](#kubeadm-k8s-io-v1beta2-JoinControlPlane) + + +APIEndpoint struct contains elements of API server instance deployed on a node. + + + + + + + + + + + + + + + + + + +
FieldDescription
advertiseAddress [Required]
+string +
+ `advertiseAddress` sets the IP address for the API server to advertise.
bindPort [Required]
+int32 +
+ `bindPort` sets the secure port for the API Server to bind to. Defaults to 6443.
+ + + +## `APIServer` {#kubeadm-k8s-io-v1beta2-APIServer} + + + + +**Appears in:** + +- [ClusterConfiguration](#kubeadm-k8s-io-v1beta2-ClusterConfiguration) + + +APIServer holds settings necessary for API server deployments in the cluster + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
ControlPlaneComponent [Required]
+ControlPlaneComponent +
(Members of ControlPlaneComponent are embedded into this type.) + No description provided. +
certSANs [Required]
+[]string +
+ `certSANs` sets extra Subject Alternative Names for the API Server signing cert.
timeoutForControlPlane [Required]
+meta/v1.Duration +
+ `timeoutForControlPlane` controls the timeout that we use for API server to appear
+ + + +## `BootstrapToken` {#kubeadm-k8s-io-v1beta2-BootstrapToken} + + + + +**Appears in:** + +- [InitConfiguration](#kubeadm-k8s-io-v1beta2-InitConfiguration) + + +BootstrapToken describes one bootstrap token, stored as a Secret in the cluster + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
token [Required]
+BootstrapTokenString +
+ `token` used for establishing bidirectional trust between nodes and control-planes. +Used for joining nodes in the cluster.
description [Required]
+string +
+ `description` sets a human-friendly message why this token exists and what it's used +for, so other administrators can know its purpose.
ttl [Required]
+meta/v1.Duration +
+ `ttl` defines the time to live for this token. Defaults to "24h". +`expires` and `ttl` are mutually exclusive.
expires [Required]
+meta/v1.Time +
+ `expires` specifies the timestamp when this token expires. Defaults to being set +dynamically at runtime based on the `ttl`. `expires` and `ttl` are mutually exclusive.
usages [Required]
+[]string +
+ `usages` describes the ways in which this token can be used. Can by default be used +for establishing bidirectional trust, but that can be changed here.
groups [Required]
+[]string +
+ `groups` specifies the extra groups that this token will authenticate as when/if +used for authentication
+ + + +## `BootstrapTokenDiscovery` {#kubeadm-k8s-io-v1beta2-BootstrapTokenDiscovery} + + + + +**Appears in:** + +- [Discovery](#kubeadm-k8s-io-v1beta2-Discovery) + + +BootstrapTokenDiscovery is used to set the options for bootstrap token based discovery + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
token [Required]
+string +
+ `token` is a token used to validate cluster information fetched from the control-plane.
apiServerEndpoint [Required]
+string +
+ `apiServerEndpoint` is an IP or domain name to the API server from which +information will be fetched.
caCertHashes [Required]
+[]string +
+ discovery is used. The root CA found during discovery must match one of these +values. Specifying an empty set disables root CA pinning, which can be unsafe. +Each hash is specified as `:`, where the only currently supported +type is "sha256". This is a hex-encoded SHA-256 hash of the Subject Public Key +Info (SPKI) object in DER-encoded ASN.1. These hashes can be calculated using, +for example, OpenSSL.
unsafeSkipCAVerification [Required]
+bool +
+ `unsafeSkipCAVerification` allows token-based discovery without CA verification +via `caCertHashes`. This can weaken the security of kubeadm since other nodes +can impersonate the control-plane.
+ + + +## `BootstrapTokenString` {#kubeadm-k8s-io-v1beta2-BootstrapTokenString} + + + + +**Appears in:** + +- [BootstrapToken](#kubeadm-k8s-io-v1beta2-BootstrapToken) + + +BootstrapTokenString is a token of the format abcdef.abcdef0123456789 that is used +for both validation of the practically of the API server from a joining node's point +of view and as an authentication method for the node in the bootstrap phase of +"kubeadm join". This token is and should be short-lived + + + + + + + + + + + + + + + + + + +
FieldDescription
- [Required]
+string +
+ No description provided. +
- [Required]
+string +
+ No description provided. +
+ + + +## `ControlPlaneComponent` {#kubeadm-k8s-io-v1beta2-ControlPlaneComponent} + + + + +**Appears in:** + +- [ClusterConfiguration](#kubeadm-k8s-io-v1beta2-ClusterConfiguration) + +- [APIServer](#kubeadm-k8s-io-v1beta2-APIServer) + + +ControlPlaneComponent holds settings common to control plane component of the cluster + + + + + + + + + + + + + + + + + + +
FieldDescription
extraArgs [Required]
+map[string]string +
+ `extraArgs` is an extra set of flags to pass to the control plane component.
extraVolumes [Required]
+[]HostPathMount +
+ `extraVolumes` is an extra set of host volumes, mounted to the control plane component.
+ + + +## `DNS` {#kubeadm-k8s-io-v1beta2-DNS} + + + + +**Appears in:** + +- [ClusterConfiguration](#kubeadm-k8s-io-v1beta2-ClusterConfiguration) + + +DNS defines the DNS addon that should be used in the cluster + + + + + + + + + + + + + + + + + + +
FieldDescription
type [Required]
+DNSAddOnType +
+ `type` defines the DNS add-on to use.
ImageMeta [Required]
+ImageMeta +
(Members of ImageMeta are embedded into this type.) + `imageMeta` allows to customize the image used for the DNS.
+ + + +## `DNSAddOnType` {#kubeadm-k8s-io-v1beta2-DNSAddOnType} + +(Alias of `string`) + + +**Appears in:** + +- [DNS](#kubeadm-k8s-io-v1beta2-DNS) + + +DNSAddOnType defines string identifying DNS add-on types + + + + + +## `Discovery` {#kubeadm-k8s-io-v1beta2-Discovery} + + + + +**Appears in:** + +- [JoinConfiguration](#kubeadm-k8s-io-v1beta2-JoinConfiguration) + + +Discovery specifies the options for the kubelet to use during the TLS Bootstrap process + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
bootstrapToken [Required]
+BootstrapTokenDiscovery +
+ `bootstrapToken` is used to set the options for bootstrap token based discovery. +`bootstrapToken` and `file` are mutually exclusive.
file [Required]
+FileDiscovery +
+ `file` specifies a file or URL to a kubeconfig file from which to load cluster information. +`bootstrapToken` and `file` are mutually exclusive.
tlsBootstrapToken [Required]
+string +
+ `tlsBootstrapToken` is a token used for TLS bootstrapping. +If `bootstrapToken` is set, this field is defaulted to `bootstrapToken.token`, +but can be overridden. +If `file` is set, this field ∗∗must be set∗∗ in case the KubeConfigFile does +not contain any other authentication information
timeout [Required]
+meta/v1.Duration +
+ `timeout` modifies the discovery timeout.
+ + + +## `Etcd` {#kubeadm-k8s-io-v1beta2-Etcd} + + + + +**Appears in:** + +- [ClusterConfiguration](#kubeadm-k8s-io-v1beta2-ClusterConfiguration) + + +Etcd contains elements describing Etcd configuration. + + + + + + + + + + + + + + + + + + +
FieldDescription
local [Required]
+LocalEtcd +
+ `local` provides configuration knobs for configuring the local etcd instance. +`local` and `external` are mutually exclusive.
external [Required]
+ExternalEtcd +
+ `external` describes how to connect to an external etcd cluster. +`local` and `external` are mutually exclusive.
+ + + +## `ExternalEtcd` {#kubeadm-k8s-io-v1beta2-ExternalEtcd} + + + + +**Appears in:** + +- [Etcd](#kubeadm-k8s-io-v1beta2-Etcd) + + +ExternalEtcd describes an external etcd cluster. +Kubeadm has no knowledge of where certificate files live and they must be supplied. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
endpoints [Required]
+[]string +
+ `endpoints` are endpoints of etcd members. This field is required.
caFile [Required]
+string +
+ `caFile` is an SSL Certificate Authority file used to secure etcd communication. +Required if using a TLS connection.
certFile [Required]
+string +
+ `certFile` is an SSL certification file used to secure etcd communication. +Required if using a TLS connection.
keyFile [Required]
+string +
+ `keyFile` is an SSL key file used to secure etcd communication. +Required if using a TLS connection.
+ + + +## `FileDiscovery` {#kubeadm-k8s-io-v1beta2-FileDiscovery} + + + + +**Appears in:** + +- [Discovery](#kubeadm-k8s-io-v1beta2-Discovery) + + +FileDiscovery is used to specify a file or URL to a kubeconfig file from which to load cluster information + + + + + + + + + + + + + +
FieldDescription
kubeConfigPath [Required]
+string +
+ `kubeConfigPath` specifies the actual file path or URL to the kubeconfig file +from which to load cluster information
+ + + +## `HostPathMount` {#kubeadm-k8s-io-v1beta2-HostPathMount} + + + + +**Appears in:** + +- [ControlPlaneComponent](#kubeadm-k8s-io-v1beta2-ControlPlaneComponent) + + +HostPathMount contains elements describing volumes that are mounted from the host. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ `name` is the volume name inside the Pod template.
hostPath [Required]
+string +
+ `hostPath` is the path in the host that will be mounted inside the Pod.
mountPath [Required]
+string +
+ `mountPath` is the path inside the Pod where the `hostPath` volume is mounted.
readOnly [Required]
+bool +
+ `readOnly` controls write access to the volume.
pathType [Required]
+core/v1.HostPathType +
+ `pathType` is the type of the `hostPath` volume.
+ + + +## `ImageMeta` {#kubeadm-k8s-io-v1beta2-ImageMeta} + + + + +**Appears in:** + +- [DNS](#kubeadm-k8s-io-v1beta2-DNS) + +- [LocalEtcd](#kubeadm-k8s-io-v1beta2-LocalEtcd) + + +ImageMeta allows to customize the image used for components that are not +originated from the Kubernetes/Kubernetes release process + + + + + + + + + + + + + + + + + + +
FieldDescription
imageRepository [Required]
+string +
+ `imageRepository` sets the container registry to pull images from. +If not set, the ImageRepository defined in ClusterConfiguration will be used instead.
imageTag [Required]
+string +
+ `imageTag` allows to specify a tag for the image. +In case this value is set, kubeadm does not change automatically the +version of the above components during upgrades.
+ + + +## `JoinControlPlane` {#kubeadm-k8s-io-v1beta2-JoinControlPlane} + + + + +**Appears in:** + +- [JoinConfiguration](#kubeadm-k8s-io-v1beta2-JoinConfiguration) + + +JoinControlPlane contains elements describing an additional control plane instance to be deployed on the joining node. + + + + + + + + + + + + + + + + + + +
FieldDescription
localAPIEndpoint [Required]
+APIEndpoint +
+ `localAPIEndpoint` represents the endpoint of the API server instance to be deployed +on this node.
certificateKey [Required]
+string +
+ `certificateKey` is the key that is used for decryption of certificates after they +are downloaded from the secret upon joining a new control plane node. The +corresponding encryption key is in the InitConfiguration.
+ + + +## `LocalEtcd` {#kubeadm-k8s-io-v1beta2-LocalEtcd} + + + + +**Appears in:** + +- [Etcd](#kubeadm-k8s-io-v1beta2-Etcd) + + +LocalEtcd describes that kubeadm should run an etcd cluster locally + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
ImageMeta [Required]
+ImageMeta +
(Members of ImageMeta are embedded into this type.) + `ImageMeta` allows to customize the container used for etcd.
dataDir [Required]
+string +
+ `dataDir` is the directory etcd will place its data. +Defaults to "/var/lib/etcd".
extraArgs [Required]
+map[string]string +
+ `extraArgs` are extra arguments provided to the etcd binary +when run inside a static pod.
serverCertSANs [Required]
+[]string +
+ `serverCertSANs` sets extra Subject Alternative Names for the etcd server signing cert.
peerCertSANs [Required]
+[]string +
+ `peerCertSANs` sets extra Subject Alternative Names for the etcd peer signing cert.
+ + + +## `Networking` {#kubeadm-k8s-io-v1beta2-Networking} + + + + +**Appears in:** + +- [ClusterConfiguration](#kubeadm-k8s-io-v1beta2-ClusterConfiguration) + + +Networking contains elements describing cluster's networking configuration + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
serviceSubnet [Required]
+string +
+ `serviceSubnet` is the subnet used by k8s services. Defaults to "10.96.0.0/12".
podSubnet [Required]
+string +
+ `podSubnet` is the subnet used by Pods.
dnsDomain [Required]
+string +
+ `dnsDomain` is the DNS domain used by k8s services. Defaults to "cluster.local".
+ + + +## `NodeRegistrationOptions` {#kubeadm-k8s-io-v1beta2-NodeRegistrationOptions} + + + + +**Appears in:** + +- [InitConfiguration](#kubeadm-k8s-io-v1beta2-InitConfiguration) + +- [JoinConfiguration](#kubeadm-k8s-io-v1beta2-JoinConfiguration) + + +NodeRegistrationOptions holds fields that relate to registering a new control-plane or node to the cluster, either via "kubeadm init" or "kubeadm join" + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ `name` is the `.metadata.name` field of the Node API object that will be created in this +`kubeadm init` or `kubeadm join` operation. +This field is also used in the CommonName field of the kubelet's client certificate to the +API server. Defaults to the hostname of the node if not provided.
criSocket [Required]
+string +
+ `criSocket` is used to retrieve container runtime info. This information will be +annotated to the Node API object, for later re-use.
taints [Required]
+[]core/v1.Taint +
+ `taints` specifies the taints the Node API object should be registered with. If +this field is unset, i.e. nil, in the `kubeadm init` process, it will be defaulted +to `['"node-role.kubernetes.io/master"=""']`. If you don't want to taint your +control-plane node, set this field to an empty list, i.e. `taints: []` in the YAML +file. This field is solely used for Node registration.
kubeletExtraArgs [Required]
+map[string]string +
+ `kubeletExtraArgs` passes through extra arguments to the kubelet. The arguments here +are passed to the kubelet command line via the environment file kubeadm writes at +runtime for the kubelet to source. This overrides the generic base-level +configuration in the "kubelet-config-1.X" ConfigMap. Flags have higher priority when +parsing. These values are local and specific to the node kubeadm is executing on.
ignorePreflightErrors [Required]
+[]string +
+ `ignorePreflightErrors` provides a slice of pre-flight errors to be ignored when +the current node is registered.
+ + diff --git a/content/en/docs/reference/config-api/kubeadm-config.v1beta3.md b/content/en/docs/reference/config-api/kubeadm-config.v1beta3.md new file mode 100644 index 0000000000..5f73e8b3a8 --- /dev/null +++ b/content/en/docs/reference/config-api/kubeadm-config.v1beta3.md @@ -0,0 +1,1535 @@ +--- +title: kubeadm Configuration (v1beta3) +content_type: tool-reference +package: kubeadm.k8s.io/v1beta3 +auto_generated: true +--- +Package v1beta3 defines the v1beta3 version of the kubeadm configuration file format. +This version improves on the v1beta2 format by fixing some minor issues and adding a few new fields. + +A list of changes since v1beta2: + +- The deprecated `ClusterConfiguration.useHyperKubeImage` field has been removed. + Kubeadm no longer supports the hyperkube image. +- The `ClusterConfiguration.dns.type` field has been removed since CoreDNS is the only supported + DNS server type by kubeadm. +- Include "datapolicy" tags on the fields that hold secrets. + This would result in the field values to be omitted when API structures are printed with klog. +- Add `InitConfiguration.skipPhases`, `JoinConfiguration.skipPhases` to allow skipping + a list of phases during kubeadm init/join command execution. +- Add `InitConfiguration.nodeRegistration.imagePullPolicy" and + `JoinConfiguration.nodeRegistration.imagePullPolicy` to allow specifying + the images pull policy during kubeadm "init" and "join". The value must be + one of "Always", "Never" or "IfNotPresent". "IfNotPresent" is the default, + which has been the existing behavior prior to this addition. +- Add `InitConfiguration.patches.directory`, `JoinConfiguration.patches.directory` + to allow the user to configure a directory from which to take patches for + components deployed by kubeadm. +- Move the `BootstrapToken∗` API and related utilities out of the "kubeadm" API group + to a new group "bootstraptoken". The kubeadm API version v1beta3 no longer contains + the `BootstrapToken∗` structures. + +## Migration from old kubeadm config versions + +- kubeadm v1.15.x and newer can be used to migrate from the v1beta1 to v1beta2. +- kubeadm v1.22.x no longer supports v1beta1 and older APIs, but can be used to migrate v1beta2 to v1beta3. + +## Basics + +The preferred way to configure kubeadm is to pass an YAML configuration file with the --config option. Some of the +configuration options defined in the kubeadm config file are also available as command line flags, but only +the most common/simple use case are supported with this approach. + +A kubeadm config file could contain multiple configuration types separated using three dashes (“---”). + +kubeadm supports the following configuration types: + +```yaml +apiVersion: kubeadm.k8s.io/v1beta3 +kind: InitConfiguration +--- +apiVersion: kubeadm.k8s.io/v1beta3 +kind: ClusterConfiguration +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +--- +apiVersion: kubeadm.k8s.io/v1beta3 +kind: JoinConfiguration +``` + +To print the defaults for "init" and "join" actions use the following commands: + +```shell +kubeadm config print init-defaults +kubeadm config print join-defaults +``` + +The list of configuration types that must be included in a configuration file depends by the action you are +performing (init or join) and by the configuration options you are going to use (defaults or advanced customization). + +If some configuration types are not provided, or provided only partially, kubeadm will use default values; defaults +provided by kubeadm includes also enforcing consistency of values across components when required (e.g. +cluster-cidr flag on controller manager and clusterCIDR on kube-proxy). + +Users are always allowed to override default values, with the only exception of a small subset of setting with +relevance for security (e.g. enforce authorization-mode Node and RBAC on api server) + +If the user provides a configuration types that is not expected for the action you are performing, kubeadm will +ignore those types and print a warning. + +## Kubeadm init configuration types + +When executing kubeadm init with the `--config` option, the following configuration types could be used: +InitConfiguration, ClusterConfiguration, KubeProxyConfiguration, KubeletConfiguration, but only one +between InitConfiguration and ClusterConfiguration is mandatory. + +```yaml +apiVersion: kubeadm.k8s.io/v1beta3 +kind: InitConfiguration +bootstrapTokens: + ... +nodeRegistration: + ... +``` + +The InitConfiguration type should be used to configure runtime settings, that in case of kubeadm init +are the configuration of the bootstrap token and all the setting which are specific to the node where kubeadm +is executed, including: + +- NodeRegistration, that holds fields that relate to registering the new node to the cluster; + use it to customize the node name, the CRI socket to use or any other settings that should apply to this + node only (e.g. the node ip). + +- LocalAPIEndpoint, that represents the endpoint of the instance of the API server to be deployed on this node; + use it e.g. to customize the API server advertise address. + + ```yaml + apiVersion: kubeadm.k8s.io/v1beta3 + kind: ClusterConfiguration + networking: + ... + etcd: + ... + apiServer: + extraArgs: + ... + extraVolumes: + ... + ... + ``` + +The ClusterConfiguration type should be used to configure cluster-wide settings, +including settings for: + +- Networking, that holds configuration for the networking topology of the cluster; use it e.g. to customize + pod subnet or services subnet. +- Etcd configurations; use it e.g. to customize the local etcd or to configure the API server + for using an external etcd cluster. +- kube-apiserver, kube-scheduler, kube-controller-manager configurations; use it to customize control-plane + components by adding customized setting or overriding kubeadm default settings. + + ```yaml + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + kind: KubeProxyConfiguration + ... + ``` + +The KubeProxyConfiguration type should be used to change the configuration passed to kube-proxy instances deployed +in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults. + +See https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/ or https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration +for kube proxy official documentation. + +```yaml +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +... +``` + +The KubeletConfiguration type should be used to change the configurations that will be passed to all kubelet instances +deployed in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults. + +See https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/ or https://godoc.org/k8s.io/kubelet/config/v1beta1#KubeletConfiguration +for kubelet official documentation. + +Here is a fully populated example of a single YAML file containing multiple +configuration types to be used during a `kubeadm init` run. + +```yaml +apiVersion: kubeadm.k8s.io/v1beta3 +kind: InitConfiguration +bootstrapTokens: + - token: "9a08jv.c0izixklcxtmnze7" + description: "kubeadm bootstrap token" + ttl: "24h" + - token: "783bde.3f89s0fje9f38fhf" + description: "another bootstrap token" + usages: + - authentication + - signing + groups: + - system:bootstrappers:kubeadm:default-node-token +nodeRegistration: + name: "ec2-10-100-0-1" + criSocket: "/var/run/dockershim.sock" + taints: + - key: "kubeadmNode" + value: "master" + effect: "NoSchedule" + kubeletExtraArgs: + v: 4 + ignorePreflightErrors: + - IsPrivilegedUser + imagePullPolicy: "IfNotPresent" +localAPIEndpoint: + advertiseAddress: "10.100.0.1" + bindPort: 6443 +certificateKey: "e6a2eb8581237ab72a4f494f30285ec12a9694d750b9785706a83bfcbbbd2204" +skipPhases: + - add/kube-proxy +--- +apiVersion: kubeadm.k8s.io/v1beta3 +kind: ClusterConfiguration +etcd: + # one of local or external + local: + imageRepository: "k8s.gcr.io" + imageTag: "3.2.24" + dataDir: "/var/lib/etcd" + extraArgs: + listen-client-urls: "http://10.100.0.1:2379" + serverCertSANs: + - "ec2-10-100-0-1.compute-1.amazonaws.com" + peerCertSANs: + - "10.100.0.1" + # external: + # endpoints: + # - "10.100.0.1:2379" + # - "10.100.0.2:2379" + # caFile: "/etcd/kubernetes/pki/etcd/etcd-ca.crt" + # certFile: "/etcd/kubernetes/pki/etcd/etcd.crt" + # keyFile: "/etcd/kubernetes/pki/etcd/etcd.key" +networking: + serviceSubnet: "10.96.0.0/12" + podSubnet: "10.100.0.1/24" + dnsDomain: "cluster.local" +kubernetesVersion: "v1.12.0" +controlPlaneEndpoint: "10.100.0.1:6443" +apiServer: + extraArgs: + authorization-mode: "Node,RBAC" + extraVolumes: + - name: "some-volume" + hostPath: "/etc/some-path" + mountPath: "/etc/some-pod-path" + readOnly: false + pathType: File + certSANs: + - "10.100.1.1" + - "ec2-10-100-0-1.compute-1.amazonaws.com" + timeoutForControlPlane: 4m0s +controllerManager: + extraArgs: + "node-cidr-mask-size": "20" + extraVolumes: + - name: "some-volume" + hostPath: "/etc/some-path" + mountPath: "/etc/some-pod-path" + readOnly: false + pathType: File +scheduler: + extraArgs: + address: "10.100.0.1" + extraVolumes: + - name: "some-volume" + hostPath: "/etc/some-path" + mountPath: "/etc/some-pod-path" + readOnly: false + pathType: File +certificatesDir: "/etc/kubernetes/pki" +imageRepository: "k8s.gcr.io" +clusterName: "example-cluster" +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# kubelet specific options here +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +# kube-proxy specific options here +``` + +## Kubeadm join configuration types + +When executing kubeadm join with the `--config` option, the JoinConfiguration type should be provided. + +```yaml +apiVersion: kubeadm.k8s.io/v1beta3 +kind: JoinConfiguration +... +``` + +The JoinConfiguration type should be used to configure runtime settings, that in case of kubeadm join +are the discovery method used for accessing the cluster info and all the setting which are specific +to the node where kubeadm is executed, including: + +- NodeRegistration, that holds fields that relate to registering the new node to the cluster; + use it to customize the node name, the CRI socket to use or any other settings that should apply to this + node only (e.g. the node ip). +- APIEndpoint, that represents the endpoint of the instance of the API server to be eventually + deployed on this node. + +## Resource Types + + +- [ClusterConfiguration](#kubeadm-k8s-io-v1beta3-ClusterConfiguration) +- [InitConfiguration](#kubeadm-k8s-io-v1beta3-InitConfiguration) +- [JoinConfiguration](#kubeadm-k8s-io-v1beta3-JoinConfiguration) + + + + +## `ClusterConfiguration` {#kubeadm-k8s-io-v1beta3-ClusterConfiguration} + + + + + +ClusterConfiguration contains cluster-wide configuration for a kubeadm cluster + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubeadm.k8s.io/v1beta3
kind
string
ClusterConfiguration
etcd
+Etcd +
+ `etcd` holds configuration for etcd.
networking
+Networking +
+ `networking` holds configuration for the networking topology of the cluster.
kubernetesVersion
+string +
+ `kubernetesVersion` is the target version of the control plane.
controlPlaneEndpoint
+string +
+ `controlPlaneEndpoint` sets a stable IP address or DNS name for the control plane; it +can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port. +In case the `controlPlaneEndpoint` is not specified, the `advertiseAddress` + `bindPort` +are used; in case the `controlPlaneEndpoint` is specified but without a TCP port, +the `bindPort` of the `localAPIEndpoint` is used. +Possible usages are: + +- In a cluster with more than one control plane instances, this field should be + assigned the address of the external load balancer in front of the + control plane instances. +- In environments with enforced node recycling, the ControlPlaneEndpoint + could be used for assigning a stable DNS to the control plane.
apiServer
+APIServer +
+ `apiServer` contains extra settings for the API server.
controllerManager
+ControlPlaneComponent +
+ `controllerManager` contains extra settings for the controller manager.
scheduler
+ControlPlaneComponent +
+ `scheduler` contains extra settings for the scheduler.
dns
+DNS +
+ `dns` defines the options for the DNS add-on.
certificatesDir
+string +
+ `certificatesDir` specifies where to store or look for all required certificates.
imageRepository
+string +
+ `imageRepository` sets the container registry to pull images from. +If empty, `k8s.gcr.io` will be used by default; in case of kubernetes version is +a CI build (kubernetes version starts with `ci/` or `ci-cross/`) +`gcr.io/k8s-staging-ci-images` will be used as a default for control plane +components and for kube-proxy, while `k8s.gcr.io` will be used for all the other images.
featureGates
+map[string]bool +
+ Feature gates enabled by the user.
clusterName
+string +
+ The cluster name.
+ + + +## `InitConfiguration` {#kubeadm-k8s-io-v1beta3-InitConfiguration} + + + + + +InitConfiguration contains a list of elements that is specific "kubeadm init"-only runtime +information. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubeadm.k8s.io/v1beta3
kind
string
InitConfiguration
bootstrapTokens
+[]BootstrapToken +
+ `bootstrapTokens` is respected at `kubeadm init` time and describes a set of Bootstrap Tokens to create. +This information IS NOT uploaded to the kubeadm cluster configmap, partly because of its sensitive nature.
nodeRegistration
+NodeRegistrationOptions +
+ `nodeRegistration` holds fields that relate to registering the new control-plane node to the cluster
localAPIEndpoint
+APIEndpoint +
+ `localAPIEndpoint` represents the endpoint of the API server instance that's deployed on this control plane node +In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint in the sense that ControlPlaneEndpoint +is the global endpoint for the cluster, which then loadbalances the requests to each individual API server. This +configuration object lets you customize what IP/DNS name and port the local API server advertises it's accessible +on. By default, kubeadm tries to auto-detect the IP of the default interface and use that, but in case that process +fails you may set the desired value here.
certificateKey
+string +
+ `certificateKey` sets the key with which certificates and keys are encrypted prior to being uploaded in +a Secret in the cluster during the "uploadcerts" init phase.
skipPhases
+[]string +
+ `skipPhases` is a list of phases to skip during command execution. +The list of phases can be obtained with the `kubeadm init --help` command. +The flag `--skip-phases` takes precedence over this field.
patches
+Patches +
+ `patches` contains options related to applying patches to components deployed by kubeadm during +"kubeadm init".
+ + + +## `JoinConfiguration` {#kubeadm-k8s-io-v1beta3-JoinConfiguration} + + + + + +JoinConfiguration contains elements describing a particular node. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubeadm.k8s.io/v1beta3
kind
string
JoinConfiguration
nodeRegistration
+NodeRegistrationOptions +
+ `nodeRegistration` holds fields that relate to registering the new control-plane +node to the cluster
caCertPath
+string +
+ `caCertPath` is the path to the SSL certificate authority used to +secure comunications between node and control-plane. +Defaults to "/etc/kubernetes/pki/ca.crt".
discovery [Required]
+Discovery +
+ `discovery` specifies the options for the kubelet to use during the TLS Bootstrap process.
controlPlane
+JoinControlPlane +
+ `controlPlane` defines the additional control plane instance to be deployed on the +joining node. If nil, no additional control plane instance will be deployed.
skipPhases
+[]string +
+ `skipPhases` is a list of phases to skip during command execution. +The list of phases can be obtained with the `kubeadm join --help` command. +The flag `--skip-phases` takes precedence over this field.
patches
+Patches +
+ `patches` contains options related to applying patches to components deployed by kubeadm during +`kubeadm join`.
+ + + +## `APIEndpoint` {#kubeadm-k8s-io-v1beta3-APIEndpoint} + + + + +**Appears in:** + +- [InitConfiguration](#kubeadm-k8s-io-v1beta3-InitConfiguration) + +- [JoinControlPlane](#kubeadm-k8s-io-v1beta3-JoinControlPlane) + + +APIEndpoint struct contains elements of API server instance deployed on a node. + + + + + + + + + + + + + + + + + + +
FieldDescription
advertiseAddress
+string +
+ `advertiseAddress` sets the IP address for the API server to advertise.
bindPort
+int32 +
+ `bindPort` sets the secure port for the API Server to bind to. Defaults to 6443.
+ + + +## `APIServer` {#kubeadm-k8s-io-v1beta3-APIServer} + + + + +**Appears in:** + +- [ClusterConfiguration](#kubeadm-k8s-io-v1beta3-ClusterConfiguration) + + +APIServer holds settings necessary for API server deployments in the cluster + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
ControlPlaneComponent [Required]
+ControlPlaneComponent +
(Members of ControlPlaneComponent are embedded into this type.) + No description provided. +
certSANs
+[]string +
+ `certSANs` sets extra Subject Alternative Names for the API Server signing cert.
timeoutForControlPlane
+meta/v1.Duration +
+ `timeoutForControlPlane` controls the timeout that we use for API server to appear
+ + + +## `BootstrapTokenDiscovery` {#kubeadm-k8s-io-v1beta3-BootstrapTokenDiscovery} + + + + +**Appears in:** + +- [Discovery](#kubeadm-k8s-io-v1beta3-Discovery) + + +BootstrapTokenDiscovery is used to set the options for bootstrap token based discovery + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
token [Required]
+string +
+ `token` is a token used to validate cluster information fetched from the control-plane.
apiServerEndpoint
+string +
+ `apiServerEndpoint` is an IP or domain name to the API server from which +information will be fetched.
caCertHashes
+[]string +
+ CACertHashes specifies a set of public key pins to verify when token-based +discovery is used. The root CA found during discovery must match one of these +values. Specifying an empty set disables root CA pinning, which can be unsafe. +Each hash is specified as `:`, where the only currently supported +type is "sha256". This is a hex-encoded SHA-256 hash of the Subject Public Key +Info (SPKI) object in DER-encoded ASN.1. These hashes can be calculated using, +for example, OpenSSL.
unsafeSkipCAVerification
+bool +
+ `unsafeSkipCAVerification` allows token-based discovery without CA verification +via `caCertHashes`. This can weaken the security of kubeadm since other nodes +can impersonate the control-plane.
+ + + +## `ControlPlaneComponent` {#kubeadm-k8s-io-v1beta3-ControlPlaneComponent} + + + + +**Appears in:** + +- [ClusterConfiguration](#kubeadm-k8s-io-v1beta3-ClusterConfiguration) + +- [APIServer](#kubeadm-k8s-io-v1beta3-APIServer) + + +ControlPlaneComponent holds settings common to control plane component of the cluster + + + + + + + + + + + + + + + + + + +
FieldDescription
extraArgs
+map[string]string +
+ `extraArgs` is an extra set of flags to pass to the control plane component. +A key in this map is the flag name as it appears on the +command line except without leading dash(es).
extraVolumes
+[]HostPathMount +
+ `extraVolumes` is an extra set of host volumes, mounted to the control plane component.
+ + + +## `DNS` {#kubeadm-k8s-io-v1beta3-DNS} + + + + +**Appears in:** + +- [ClusterConfiguration](#kubeadm-k8s-io-v1beta3-ClusterConfiguration) + + +DNS defines the DNS addon that should be used in the cluster + + + + + + + + + + + + + +
FieldDescription
ImageMeta [Required]
+ImageMeta +
(Members of ImageMeta are embedded into this type.) + `imageMeta` allows to customize the image used for the DNS component.
+ + + +## `DNSAddOnType` {#kubeadm-k8s-io-v1beta3-DNSAddOnType} + +(Alias of `string`) + + + +DNSAddOnType defines string identifying DNS add-on types + + + + + +## `Discovery` {#kubeadm-k8s-io-v1beta3-Discovery} + + + + +**Appears in:** + +- [JoinConfiguration](#kubeadm-k8s-io-v1beta3-JoinConfiguration) + + +Discovery specifies the options for the kubelet to use during the TLS Bootstrap process + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
bootstrapToken
+BootstrapTokenDiscovery +
+ `bootstrapToken` is used to set the options for bootstrap token based discovery. +`bootstrapToken` and `file` are mutually exclusive.
file
+FileDiscovery +
+ `file` specifies a file or URL to a kubeconfig file from which to load cluster information. +`bootstrapToken` and `file` are mutually exclusive.
tlsBootstrapToken
+string +
+ `tlsBootstrapToken` is a token used for TLS bootstrapping. +If `bootstrapToken` is set, this field is defaulted to `bootstrapToken.token`, +but can be overridden. +If `file` is set, this field ∗∗must be set∗∗ in case the KubeConfigFile does +not contain any other authentication information
timeout
+meta/v1.Duration +
+ `timeout` modifies the discovery timeout.
+ + + +## `Etcd` {#kubeadm-k8s-io-v1beta3-Etcd} + + + + +**Appears in:** + +- [ClusterConfiguration](#kubeadm-k8s-io-v1beta3-ClusterConfiguration) + + +Etcd contains elements describing Etcd configuration. + + + + + + + + + + + + + + + + + + +
FieldDescription
local
+LocalEtcd +
+ `local` provides configuration knobs for configuring the local etcd instance. +`local` and `external` are mutually exclusive.
external
+ExternalEtcd +
+ `external` describes how to connect to an external etcd cluster. +`local` and `external` are mutually exclusive.
+ + + +## `ExternalEtcd` {#kubeadm-k8s-io-v1beta3-ExternalEtcd} + + + + +**Appears in:** + +- [Etcd](#kubeadm-k8s-io-v1beta3-Etcd) + + +ExternalEtcd describes an external etcd cluster. +Kubeadm has no knowledge of where certificate files live and they must be supplied. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
endpoints [Required]
+[]string +
+ `endpoints` are endpoints of etcd members. This field is required.
caFile [Required]
+string +
+ `caFile` is an SSL Certificate Authority file used to secure etcd communication. +Required if using a TLS connection.
certFile [Required]
+string +
+ `certFile` is an SSL certification file used to secure etcd communication. +Required if using a TLS connection.
keyFile [Required]
+string +
+ `keyFile` is an SSL key file used to secure etcd communication. +Required if using a TLS connection.
+ + + +## `FileDiscovery` {#kubeadm-k8s-io-v1beta3-FileDiscovery} + + + + +**Appears in:** + +- [Discovery](#kubeadm-k8s-io-v1beta3-Discovery) + + +FileDiscovery is used to specify a file or URL to a kubeconfig file from which to load cluster information + + + + + + + + + + + + + +
FieldDescription
kubeConfigPath [Required]
+string +
+ `kubeConfigPath` specifies the actual file path or URL to the kubeconfig file +from which to load cluster information
+ + + +## `HostPathMount` {#kubeadm-k8s-io-v1beta3-HostPathMount} + + + + +**Appears in:** + +- [ControlPlaneComponent](#kubeadm-k8s-io-v1beta3-ControlPlaneComponent) + + +HostPathMount contains elements describing volumes that are mounted from the host. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+ `name` is the volume name inside the Pod template.
hostPath [Required]
+string +
+ `hostPath` is the path in the host that will be mounted inside the Pod.
mountPath [Required]
+string +
+ `mountPath` is the path inside the Pod where the `hostPath` volume is mounted.
readOnly
+bool +
+ `readOnly` controls write access to the volume.
pathType
+core/v1.HostPathType +
+ `pathType` is the type of the `hostPath` volume.
+ + + +## `ImageMeta` {#kubeadm-k8s-io-v1beta3-ImageMeta} + + + + +**Appears in:** + +- [DNS](#kubeadm-k8s-io-v1beta3-DNS) + +- [LocalEtcd](#kubeadm-k8s-io-v1beta3-LocalEtcd) + + +ImageMeta allows to customize the image used for components that are not +originated from the Kubernetes/Kubernetes release process + + + + + + + + + + + + + + + + + + +
FieldDescription
imageRepository
+string +
+ `imageRepository` sets the container registry to pull images from. +If not set, the ImageRepository defined in ClusterConfiguration will be used instead.
imageTag
+string +
+ `imageTag` allows to specify a tag for the image. +In case this value is set, kubeadm does not change automatically the +version of the above components during upgrades.
+ + + +## `JoinControlPlane` {#kubeadm-k8s-io-v1beta3-JoinControlPlane} + + + + +**Appears in:** + +- [JoinConfiguration](#kubeadm-k8s-io-v1beta3-JoinConfiguration) + + +JoinControlPlane contains elements describing an additional control plane instance to be deployed on the joining node. + + + + + + + + + + + + + + + + + + +
FieldDescription
localAPIEndpoint
+APIEndpoint +
+ `localAPIEndpoint` represents the endpoint of the API server instance to be deployed +on this node.
certificateKey
+string +
+ `certificateKey` is the key that is used for decryption of certificates after they +are downloaded from the secret upon joining a new control plane node. The +corresponding encryption key is in the InitConfiguration.
+ + + +## `LocalEtcd` {#kubeadm-k8s-io-v1beta3-LocalEtcd} + + + + +**Appears in:** + +- [Etcd](#kubeadm-k8s-io-v1beta3-Etcd) + + +LocalEtcd describes that kubeadm should run an etcd cluster locally + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
ImageMeta [Required]
+ImageMeta +
(Members of ImageMeta are embedded into this type.) + `ImageMeta` allows to customize the container used for etcd.
dataDir [Required]
+string +
+ `dataDir` is the directory etcd will place its data. +Defaults to "/var/lib/etcd".
extraArgs
+map[string]string +
+ `extraArgs` are extra arguments provided to the etcd binary +when run inside a static pod. +A key in this map is the flag name as it appears on the command line except +without leading dash(es).
serverCertSANs
+[]string +
+ `serverCertSANs` sets extra Subject Alternative Names for the etcd server signing cert.
peerCertSANs
+[]string +
+ `peerCertSANs` sets extra Subject Alternative Names for the etcd peer signing cert.
+ + + +## `Networking` {#kubeadm-k8s-io-v1beta3-Networking} + + + + +**Appears in:** + +- [ClusterConfiguration](#kubeadm-k8s-io-v1beta3-ClusterConfiguration) + + +Networking contains elements describing cluster's networking configuration + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
serviceSubnet
+string +
+ `serviceSubnet` is the subnet used by k8s services. Defaults to "10.96.0.0/12".
podSubnet
+string +
+ `podSubnet` is the subnet used by Pods.
dnsDomain
+string +
+ `dnsDomain` is the DNS domain used by k8s services. Defaults to "cluster.local".
+ + + +## `NodeRegistrationOptions` {#kubeadm-k8s-io-v1beta3-NodeRegistrationOptions} + + + + +**Appears in:** + +- [InitConfiguration](#kubeadm-k8s-io-v1beta3-InitConfiguration) + +- [JoinConfiguration](#kubeadm-k8s-io-v1beta3-JoinConfiguration) + + +NodeRegistrationOptions holds fields that relate to registering a new control-plane or node to the cluster, either via "kubeadm init" or "kubeadm join" + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
name
+string +
+ `name` is the `.metadata.name` field of the Node API object that will be created in this +`kubeadm init` or `kubeadm join` operation. +This field is also used in the `CommonName` field of the kubelet's client certificate to the +API server. Defaults to the hostname of the node if not provided.
criSocket
+string +
+ `criSocket` is used to retrieve container runtime info. This information will be +annotated to the Node API object, for later re-use.
taints [Required]
+[]core/v1.Taint +
+ `taints` specifies the taints the Node API object should be registered with. If +this field is unset, i.e. nil, in the `kubeadm init` process, it will be defaulted +to `['"node-role.kubernetes.io/master"=""']`. If you don't want to taint your +control-plane node, set this field to an empty list, i.e. `taints: []` in the YAML +file. This field is solely used for Node registration.
kubeletExtraArgs
+map[string]string +
+ `kubeletExtraArgs` passes through extra arguments to the kubelet. The arguments here +are passed to the kubelet command line via the environment file kubeadm writes at +runtime for the kubelet to source. This overrides the generic base-level +configuration in the "kubelet-config-1.X" ConfigMap. Flags have higher priority when +parsing. These values are local and specific to the node kubeadm is executing on. +A key in this map is the flag name as it appears on the command line except without +leading dash(es).
ignorePreflightErrors
+[]string +
+ `ignorePreflightErrors` provides a slice of pre-flight errors to be ignored when +the current node is registered.
imagePullPolicy
+core/v1.PullPolicy +
+ `imagePullPolicy` specifies the policy for image pulling during `kubeadm init` and +`kubeadm join` operations. +The value of this field must be one of "Always", "IfNotPresent" or "Never". +If this field is unset kubeadm will default it to "IfNotPresent", or pull the required +images if not present on the host.
+ + + +## `Patches` {#kubeadm-k8s-io-v1beta3-Patches} + + + + +**Appears in:** + +- [InitConfiguration](#kubeadm-k8s-io-v1beta3-InitConfiguration) + +- [JoinConfiguration](#kubeadm-k8s-io-v1beta3-JoinConfiguration) + + +Patches contains options related to applying patches to components deployed by kubeadm. + + + + + + + + + + + + + +
FieldDescription
directory
+string +
+ `directory` is a path to a directory that contains files named +`target[suffix][+patchtype].extension`. +For example, `kube-apiserver0+merge.yaml` or just `etcd.json`. `target` can be one of +"kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". `patchtype` can be one +of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. +The default `patchtype` is "strategic". `extension` must be either "json" or "yaml". +`suffix` is an optional string that can be used to determine which patches are applied +first alpha-numerically.
+ + + + + +## `BootstrapToken` {#BootstrapToken} + + + + +**Appears in:** + +- [InitConfiguration](#kubeadm-k8s-io-v1beta3-InitConfiguration) + + +BootstrapToken describes one bootstrap token, stored as a Secret in the cluster + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
token [Required]
+BootstrapTokenString +
+ `token` is used for establishing bidirectional trust between nodes and control-planes. +Used for joining nodes in the cluster.
description
+string +
+ `description` sets a human-friendly message why this token exists and what it's used +for, so other administrators can know its purpose.
ttl
+meta/v1.Duration +
+ `ttl` defines the time to live for this token. Defaults to `24h`. +`expires` and `ttl` are mutually exclusive.
expires
+meta/v1.Time +
+ `expires` specifies the timestamp when this token expires. Defaults to being set +dynamically at runtime based on the `ttl`. `expires` and `ttl` are mutually exclusive.
usages
+[]string +
+ `usages` describes the ways in which this token can be used. Can by default be used +for establishing bidirectional trust, but that can be changed here.
groups
+[]string +
+ `groups` specifies the extra groups that this token will authenticate as when/if +used for authentication
+ +## `BootstrapTokenString` {#BootstrapTokenString} + + + + +**Appears in:** + +- [BootstrapToken](#BootstrapToken) + + +BootstrapTokenString is a token of the format `abcdef.abcdef0123456789` that is used +for both validation of the practically of the API server from a joining node's point +of view and as an authentication method for the node in the bootstrap phase of +"kubeadm join". This token is and should be short-lived. + + + + + + + + + + + + + + + + + + +
FieldDescription
- [Required]
+string +
+ No description provided. +
- [Required]
+string +
+ No description provided. +
diff --git a/content/en/docs/reference/config-api/kubelet-config.v1beta1.md b/content/en/docs/reference/config-api/kubelet-config.v1beta1.md index bee05b68db..261a6dd5f8 100644 --- a/content/en/docs/reference/config-api/kubelet-config.v1beta1.md +++ b/content/en/docs/reference/config-api/kubelet-config.v1beta1.md @@ -39,7 +39,8 @@ KubeletConfiguration contains the configuration for the Kubelet enableServer enables Kubelet's secured server. Note: Kubelet's insecure port is controlled by the readOnlyPort option. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. Default: true @@ -51,7 +52,8 @@ Default: true staticPodPath is the path to the directory containing local (static) pods to run, or the path to a single static pod file. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that the set of static pods specified at the new path may be different than the ones the Kubelet initially started with, and this may disrupt your node. Default: "" @@ -64,7 +66,8 @@ Default: "" syncFrequency is the max period between synchronizing running containers and config. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that shortening this duration may have a negative performance impact, especially as the number of Pods on the node increases. Alternatively, increasing this duration will result in longer refresh times for ConfigMaps and Secrets. @@ -77,8 +80,9 @@ Default: "1m" fileCheckFrequency is the duration between checking config files for -new data -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +new data. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that shortening the duration will cause the Kubelet to reload local Static Pod configurations more frequently, which may have a negative performance impact. Default: "20s" @@ -89,8 +93,9 @@ Default: "20s" meta/v1.Duration - httpCheckFrequency is the duration between checking http for new data -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + httpCheckFrequency is the duration between checking http for new data. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that shortening the duration will cause the Kubelet to poll staticPodURL more frequently, which may have a negative performance impact. Default: "20s" @@ -101,8 +106,9 @@ Default: "20s" string - staticPodURL is the URL for accessing static pods to run -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + staticPodURL is the URL for accessing static pods to run. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that the set of static pods specified at the new URL may be different than the ones the Kubelet initially started with, and this may disrupt your node. Default: "" @@ -113,8 +119,9 @@ Default: "" map[string][]string - staticPodURLHeader is a map of slices with HTTP headers to use when accessing the podURL -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + staticPodURLHeader is a map of slices with HTTP headers to use when accessing the podURL. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may disrupt the ability to read the latest set of static pods from StaticPodURL. Default: nil @@ -126,7 +133,8 @@ Default: nil address is the IP address for the Kubelet to serve on (set to 0.0.0.0 for all interfaces). -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. Default: "0.0.0.0" @@ -137,7 +145,9 @@ Default: "0.0.0.0" port is the port for the Kubelet to serve on. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +The port number must be between 1 and 65535, inclusive. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. Default: 10250 @@ -149,7 +159,10 @@ Default: 10250 readOnlyPort is the read-only port for the Kubelet to serve on with no authentication/authorization. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +The port number must be between 1 and 65535, inclusive. +Setting this field to 0 disables the read-only service. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. Default: 0 (disabled) @@ -164,7 +177,8 @@ if any, concatenated after server cert). If tlsCertFile and tlsPrivateKeyFile are not provided, a self-signed certificate and key are generated for the public address and saved to the directory passed to the Kubelet's --cert-dir flag. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. Default: "" @@ -174,8 +188,9 @@ Default: "" string - tlsPrivateKeyFile is the file containing x509 private key matching tlsCertFile -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + tlsPrivateKeyFile is the file containing x509 private key matching tlsCertFile. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. Default: "" @@ -185,9 +200,10 @@ Default: "" []string - TLSCipherSuites is the list of allowed cipher suites for the server. + tlsCipherSuites is the list of allowed cipher suites for the server. Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. Default: nil @@ -197,9 +213,10 @@ Default: nil string - TLSMinVersion is the minimum TLS version supported. + tlsMinVersion is the minimum TLS version supported. Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. Default: "" @@ -212,7 +229,8 @@ Default: "" rotateCertificates enables client certificate rotation. The Kubelet will request a new certificate from the certificates.k8s.io API. This requires an approver to approve the certificate signing requests. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that disabling it may disrupt the Kubelet's ability to authenticate with the API server after the current certificate expires. Default: false @@ -225,10 +243,11 @@ Default: false serverTLSBootstrap enables server certificate bootstrap. Instead of self signing a serving certificate, the Kubelet will request a certificate from -the certificates.k8s.io API. This requires an approver to approve the -certificate signing requests. The RotateKubeletServerCertificate feature -must be enabled. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +the 'certificates.k8s.io' API. This requires an approver to approve the +certificate signing requests (CSR). The RotateKubeletServerCertificate feature +must be enabled when setting this field. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that disabling it will stop the renewal of Kubelet server certificates, which can disrupt components that interact with the Kubelet server in the long term, due to certificate expiration. @@ -240,8 +259,9 @@ Default: false KubeletAuthentication - authentication specifies how requests to the Kubelet's server are authenticated -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + authentication specifies how requests to the Kubelet's server are authenticated. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. Defaults: anonymous: @@ -256,8 +276,9 @@ Defaults: KubeletAuthorization - authorization specifies how requests to the Kubelet's server are authorized -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + authorization specifies how requests to the Kubelet's server are authorized. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. Defaults: mode: Webhook @@ -272,8 +293,10 @@ Defaults: registryPullQPS is the limit of registry pulls per second. -Set to 0 for no limit. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +The value must not be a negative number. +Setting it to 0 means no limit. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may impact scalability by changing the amount of traffic produced by image pulls. Default: 5 @@ -286,8 +309,10 @@ Default: 5 registryBurst is the maximum size of bursty pulls, temporarily allows pulls to burst to this number, while still not exceeding registryPullQPS. -Only used if registryPullQPS > 0. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +The value must not be a negative number. +Only used if registryPullQPS is greater than 0. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may impact scalability by changing the amount of traffic produced by image pulls. Default: 10 @@ -299,8 +324,9 @@ Default: 10 eventRecordQPS is the maximum event creations per second. If 0, there -is no limit enforced. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +is no limit enforced. The value cannot be a negative number. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may impact scalability by changing the amount of traffic produced by event creations. Default: 5 @@ -313,8 +339,10 @@ Default: 5 eventBurst is the maximum size of a burst of event creations, temporarily allows event creations to burst to this number, while still not exceeding -eventRecordQPS. Only used if eventRecordQPS > 0. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +eventRecordQPS. This field canot be a negative number and it is only used +when eventRecordQPS > 0. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may impact scalability by changing the amount of traffic produced by event creations. Default: 10 @@ -328,7 +356,8 @@ Default: 10 enableDebuggingHandlers enables server endpoints for log access and local running of containers and commands, including the exec, attach, logs, and portforward features. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that disabling it may disrupt components that interact with the Kubelet server. Default: true @@ -339,7 +368,8 @@ Default: true enableContentionProfiling enables lock contention profiling, if enableDebuggingHandlers is true. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that enabling it may carry a performance impact. Default: false @@ -349,8 +379,10 @@ Default: false int32 - healthzPort is the port of the localhost healthz endpoint (set to 0 to disable) -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + healthzPort is the port of the localhost healthz endpoint (set to 0 to disable). +A valid number is between 1 and 65535. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may disrupt components that monitor Kubelet health. Default: 10248 @@ -360,8 +392,9 @@ Default: 10248 string - healthzBindAddress is the IP address for the healthz server to serve on -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + healthzBindAddress is the IP address for the healthz server to serve on. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may disrupt components that monitor Kubelet health. Default: "127.0.0.1" @@ -373,7 +406,8 @@ Default: "127.0.0.1" oomScoreAdj is The oom-score-adj value for kubelet process. Values must be within the range [-1000, 1000]. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may impact the stability of nodes under memory pressure. Default: -999 @@ -386,7 +420,7 @@ Default: -999 clusterDomain is the DNS domain for this cluster. If set, kubelet will configure all containers to search this domain in addition to the host's search domains. -Dynamic Kubelet Config (beta): Dynamically updating this field is not recommended, +Dynamic Kubelet Config (deprecated): Dynamically updating this field is not recommended, as it should be kept in sync with the rest of the cluster. Default: "" @@ -399,7 +433,8 @@ Default: "" clusterDNS is a list of IP addresses for the cluster DNS server. If set, kubelet will configure all containers to use this for DNS resolution instead of the host's DNS servers. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that changes will only take effect on Pods created after the update. Draining the node is recommended before changing this field. Default: nil @@ -412,7 +447,8 @@ Default: nil streamingConnectionIdleTimeout is the maximum time a streaming connection can be idle before the connection is automatically closed. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may impact components that rely on infrequent updates over streaming connections to the Kubelet server. Default: "4h" @@ -428,7 +464,8 @@ status. If node lease feature is not enabled, it is also the frequency that kubelet posts node status to master. Note: When node lease feature is not enabled, be cautious when changing the constant, it must work with nodeMonitorGracePeriod in nodecontroller. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may impact node scalability, and also that the node controller's nodeMonitorGracePeriod must be set to N∗NodeStatusUpdateFrequency, where N is the number of retries before the node controller marks @@ -445,10 +482,10 @@ Default: "10s" status to master if node status does not change. Kubelet will ignore this frequency and post node status immediately if any change is detected. It is only used when node lease feature is enabled. nodeStatusReportFrequency's -default value is 1m. But if nodeStatusUpdateFrequency is set explicitly, +default value is 5m. But if nodeStatusUpdateFrequency is set explicitly, nodeStatusReportFrequency's default value will be set to nodeStatusUpdateFrequency for backward compatibility. -Default: "1m" +Default: "5m" @@ -462,8 +499,10 @@ health by having the Kubelet create and periodically renew a lease, named after in the kube-node-lease namespace. If the lease expires, the node can be considered unhealthy. The lease is currently renewed every 10s, per KEP-0009. In the future, the lease renewal interval may be set based on the lease duration. +The field value must be greater than 0. Requires the NodeLease feature gate to be enabled. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that decreasing the duration may reduce tolerance for issues that temporarily prevent the Kubelet from renewing the lease (e.g. a short-lived network issue). Default: 40 @@ -476,7 +515,8 @@ Default: 40 imageMinimumGCAge is the minimum age for an unused image before it is garbage collected. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may trigger or delay garbage collection, and may change the image overhead on the node. Default: "2m" @@ -488,9 +528,12 @@ Default: "2m" imageGCHighThresholdPercent is the percent of disk usage after which -image garbage collection is always run. The percent is calculated as -this field value out of 100. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +image garbage collection is always run. The percent is calculated by +dividing this field value by 100, so this field must be between 0 and +100, inclusive. When specified, the value must be greater than +imageGCLowThresholdPercent. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may trigger or delay garbage collection, and may change the image overhead on the node. Default: 85 @@ -503,8 +546,11 @@ Default: 85 imageGCLowThresholdPercent is the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage -collect to. The percent is calculated as this field value out of 100. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +collect to. The percent is calculated by dividing this field value by 100, +so the field value must be between 0 and 100, inclusive. When specified, the +value must be less than imageGCHighThresholdPercent. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may trigger or delay garbage collection, and may change the image overhead on the node. Default: 80 @@ -515,8 +561,10 @@ Default: 80 meta/v1.Duration - How frequently to calculate and cache volume disk usage for all pods -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + volumeStatsAggPeriod is the frequency for calculating and caching volume +disk usage for all pods. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that shortening the period may carry a performance impact. Default: "1m" @@ -527,7 +575,7 @@ Default: "1m" kubeletCgroups is the absolute name of cgroups to isolate the kubelet in -Dynamic Kubelet Config (beta): This field should not be updated without a full node +Dynamic Kubelet Config (deprecated): This field should not be updated without a full node reboot. It is safest to keep this value the same as the local config. Default: "" @@ -540,7 +588,8 @@ Default: "" systemCgroups is absolute name of cgroups in which to place all non-kernel processes that are not already in a container. Empty for no container. Rolling back the flag requires a reboot. -Dynamic Kubelet Config (beta): This field should not be updated without a full node +The cgroupRoot must be specified if this field is not empty. +Dynamic Kubelet Config (deprecated): This field should not be updated without a full node reboot. It is safest to keep this value the same as the local config. Default: "" @@ -552,7 +601,7 @@ Default: "" cgroupRoot is the root cgroup to use for pods. This is handled by the container runtime on a best effort basis. -Dynamic Kubelet Config (beta): This field should not be updated without a full node +Dynamic Kubelet Config (deprecated): This field should not be updated without a full node reboot. It is safest to keep this value the same as the local config. Default: "" @@ -562,10 +611,10 @@ Default: "" bool - Enable QoS based Cgroup hierarchy: top level cgroups for QoS Classes -And all Burstable and BestEffort pods are brought up under their -specific top level QoS cgroup. -Dynamic Kubelet Config (beta): This field should not be updated without a full node + cgroupsPerQOS enable QoS based CGroup hierarchy: top level CGroups for QoS classes +and all Burstable and BestEffort Pods are brought up under their specific top level +QoS CGroup. +Dynamic Kubelet Config (deprecated): This field should not be updated without a full node reboot. It is safest to keep this value the same as the local config. Default: true @@ -575,8 +624,9 @@ Default: true string - driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd) -Dynamic Kubelet Config (beta): This field should not be updated without a full node + cgroupDriver is the driver kubelet uses to manipulate CGroups on the host (cgroupfs +or systemd). +Dynamic Kubelet Config (deprecated): This field should not be updated without a full node reboot. It is safest to keep this value the same as the local config. Default: "cgroupfs" @@ -586,11 +636,24 @@ Default: "cgroupfs" string - CPUManagerPolicy is the name of the policy to use. + cpuManagerPolicy is the name of the policy to use. Requires the CPUManager feature gate to be enabled. +Dynamic Kubelet Config (deprecated): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: "None" + + + +cpuManagerPolicyOptions
+map[string]string + + + cpuManagerPolicyOptions is a set of key=value which allows to set extra options +to fine tune the behaviour of the cpu manager policies. +Requires both the "CPUManager" and "CPUManagerPolicyOptions" feature gates to be enabled. Dynamic Kubelet Config (beta): This field should not be updated without a full node reboot. It is safest to keep this value the same as the local config. -Default: "none" +Default: nil @@ -598,21 +661,44 @@ Default: "none" meta/v1.Duration - CPU Manager reconciliation period. + cpuManagerReconcilePeriod is the reconciliation period for the CPU Manager. Requires the CPUManager feature gate to be enabled. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that shortening the period may carry a performance impact. Default: "10s" +memoryManagerPolicy
+string + + + memoryManagerPolicy is the name of the policy to use by memory manager. +Requires the MemoryManager feature gate to be enabled. +Dynamic Kubelet Config (deprecated): This field should not be updated without a full node +reboot. It is safest to keep this value the same as the local config. +Default: "none" + + + topologyManagerPolicy
string - TopologyManagerPolicy is the name of the policy to use. + topologyManagerPolicy is the name of the topology manager policy to use. +Valid values include: + +- `restricted`: kubelet only allows pods with optimal NUMA node alignment for + requested resources; +- `best-effort`: kubelet will favor pods with NUMA alignment of CPU and device + resources; +- `none`: kublet has no knowledge of NUMA alignment of a pod's CPU and device resources. +- `single-numa-node`: kubelet only allows pods with a single NUMA alignment + of CPU and device resources. + Policies other than "none" require the TopologyManager feature gate to be enabled. -Dynamic Kubelet Config (beta): This field should not be updated without a full node +Dynamic Kubelet Config (deprecated): This field should not be updated without a full node reboot. It is safest to keep this value the same as the local config. Default: "none" @@ -622,8 +708,12 @@ Default: "none" string - TopologyManagerScope represents the scope of topology hint generation -that topology manager requests and hint providers generate. + topologyManagerScope represents the scope of topology hint generation +that topology manager requests and hint providers generate. Valid values include: + +- `container`: topology policy is applied on a per-container basis. +- `pod`: topology policy is applied on a per-pod basis. + "pod" scope requires the TopologyManager feature gate to be enabled. Default: "container" @@ -638,7 +728,7 @@ the minimum percentage of a resource reserved for exclusive use by the guaranteed QoS tier. Currently supported resources: "memory" Requires the QOSReserved feature gate to be enabled. -Dynamic Kubelet Config (beta): This field should not be updated without a full node +Dynamic Kubelet Config (deprecated): This field should not be updated without a full node reboot. It is safest to keep this value the same as the local config. Default: nil @@ -650,7 +740,8 @@ Default: nil runtimeRequestTimeout is the timeout for all runtime requests except long running requests - pull, logs, exec and attach. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. Default: "2m" @@ -664,12 +755,15 @@ Default: "2m" bridge for hairpin packets. Setting this flag allows endpoints in a Service to loadbalance back to themselves if they should try to access their own Service. Values: - "promiscuous-bridge": make the container bridge promiscuous. - "hairpin-veth": set the hairpin flag on container veth interfaces. - "none": do nothing. -Generally, one must set --hairpin-mode=hairpin-veth to achieve hairpin NAT, + +- "promiscuous-bridge": make the container bridge promiscuous. +- "hairpin-veth": set the hairpin flag on container veth interfaces. +- "none": do nothing. + +Generally, one must set `--hairpin-mode=hairpin-veth to` achieve hairpin NAT, because promiscuous-bridge assumes the existence of a container bridge named cbr0. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may require a node reboot, depending on the network plugin. Default: "promiscuous-bridge" @@ -679,8 +773,10 @@ Default: "promiscuous-bridge" int32 - maxPods is the number of pods that can run on this Kubelet. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + maxPods is the maximum number of Pods that can run on this Kubelet. +The value must be a non-negative integer. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that changes may cause Pods to fail admission on Kubelet restart, and may change the value reported in Node.Status.Capacity[v1.ResourcePods], thus affecting future scheduling decisions. Increasing this value may also decrease performance, @@ -693,9 +789,9 @@ Default: 110 string - The CIDR to use for pod IP addresses, only used in standalone mode. -In cluster mode, this is obtained from the master. -Dynamic Kubelet Config (beta): This field should always be set to the empty default. + podCIDR is the CIDR to use for pod IP addresses, only used in standalone mode. +In cluster mode, this is obtained from the control plane. +Dynamic Kubelet Config (deprecated): This field should always be set to the empty default. It should only set for standalone Kubelets, which cannot use Dynamic Kubelet Config. Default: "" @@ -705,8 +801,9 @@ Default: "" int64 - PodPidsLimit is the maximum number of pids in any pod. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + podPidsLimit is the maximum number of PIDs in any pod. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that lowering it may prevent container processes from forking after the change. Default: -1 @@ -716,9 +813,10 @@ Default: -1 string - ResolverConfig is the resolver configuration file used as the basis + resolvConf is the resolver configuration file used as the basis for the container DNS resolution configuration. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that changes will only take effect on Pods created after the update. Draining the node is recommended before changing this field. Default: "/etc/resolv.conf" @@ -729,7 +827,7 @@ Default: "/etc/resolv.conf" bool - RunOnce causes the Kubelet to check the API server once for pods, + runOnce causes the Kubelet to check the API server once for pods, run those in addition to the pods specified by static pod files, and exit. Default: false @@ -741,7 +839,8 @@ Default: false cpuCFSQuota enables CPU CFS quota enforcement for containers that specify CPU limits. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that disabling it may reduce node stability. Default: true @@ -751,8 +850,11 @@ Default: true meta/v1.Duration - CPUCFSQuotaPeriod is the CPU CFS quota period value, cpu.cfs_period_us. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + cpuCFSQuotaPeriod is the CPU CFS quota period value, `cpu.cfs_period_us`. +The value must be between 1 us and 1 second, inclusive. +Requires the CustomCPUCFSQuotaPeriod feature gate to be enabled. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that limits set for containers will result in different cpu.cfs_quota settings. This will trigger container restarts on the node being reconfigured. Default: "100ms" @@ -763,9 +865,11 @@ Default: "100ms" int32 - nodeStatusMaxImages caps the number of images reported in Node.Status.Images. + nodeStatusMaxImages caps the number of images reported in Node.status.images. +The value must be greater than -2. Note: If -1 is specified, no cap will be applied. If 0 is specified, no image is returned. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that different values can be reported on node status. Default: 50 @@ -776,7 +880,9 @@ Default: 50 maxOpenFiles is Number of files that can be opened by Kubelet process. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +The value must be a non-negative number. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may impact the ability of the Kubelet to interact with the node's filesystem. Default: 1000000 @@ -787,7 +893,8 @@ Default: 1000000 contentType is contentType of requests sent to apiserver. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may impact the ability for the Kubelet to communicate with the API server. If the Kubelet loses contact with the API server due to a change to this field, the change cannot be reverted via dynamic Kubelet config. @@ -799,8 +906,9 @@ Default: "application/vnd.kubernetes.protobuf" int32 - kubeAPIQPS is the QPS to use while talking with kubernetes apiserver -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may impact scalability by changing the amount of traffic the Kubelet sends to the API server. Default: 5 @@ -811,8 +919,10 @@ Default: 5 int32 - kubeAPIBurst is the burst to allow while talking with kubernetes apiserver -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + kubeAPIBurst is the burst to allow while talking with kubernetes API server. +This field cannot be a negative number. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may impact scalability by changing the amount of traffic the Kubelet sends to the API server. Default: 10 @@ -827,7 +937,8 @@ Default: 10 at a time. We recommend ∗not∗ changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Issue #10959 has more details. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may impact the performance of image pulls. Default: true @@ -837,9 +948,11 @@ Default: true map[string]string - Map of signal names to quantities that defines hard eviction thresholds. For example: {"memory.available": "300Mi"}. + evictionHard is a map of signal names to quantities that defines hard eviction +thresholds. For example: `{"memory.available": "300Mi"}`. To explicitly disable, pass a 0% or 100% threshold on an arbitrary resource. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may trigger or delay Pod evictions. Default: memory.available: "100Mi" @@ -853,9 +966,10 @@ Default: map[string]string - Map of signal names to quantities that defines soft eviction thresholds. -For example: {"memory.available": "300Mi"}. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + evictionSoft is a map of signal names to quantities that defines soft eviction thresholds. +For example: `{"memory.available": "300Mi"}`. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may trigger or delay Pod evictions, and may change the allocatable reported by the node. Default: nil @@ -866,9 +980,10 @@ Default: nil map[string]string - Map of signal names to quantities that defines grace periods for each soft eviction signal. -For example: {"memory.available": "30s"}. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + evictionSoftGracePeriod is a map of signal names to quantities that defines grace +periods for each soft eviction signal. For example: `{"memory.available": "30s"}`. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may trigger or delay Pod evictions. Default: nil @@ -878,8 +993,10 @@ Default: nil meta/v1.Duration - Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + evictionPressureTransitionPeriod is the duration for which the kubelet has to wait +before transitioning out of an eviction pressure condition. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that lowering it may decrease the stability of the node when the node is overcommitted. Default: "5m" @@ -889,13 +1006,14 @@ Default: "5m" int32 - Maximum allowed grace period (in seconds) to use when terminating pods in -response to a soft eviction threshold being met. This value effectively caps -the Pod's TerminationGracePeriodSeconds value during soft evictions. + evictionMaxPodGracePeriod is the maximum allowed grace period (in seconds) to use +when terminating pods in response to a soft eviction threshold being met. This value +effectively caps the Pod's terminationGracePeriodSeconds value during soft evictions. Note: Due to issue #64530, the behavior has a bug where this value currently just overrides the grace period during soft eviction, which can increase the grace period from what is set on the Pod. This bug will be fixed in a future release. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that lowering it decreases the amount of time Pods will have to gracefully clean up before being killed during a soft eviction. Default: 0 @@ -906,10 +1024,12 @@ Default: 0 map[string]string - Map of signal names to quantities that defines minimum reclaims, which describe the minimum -amount of a given resource the kubelet will reclaim when performing a pod eviction while -that resource is under pressure. For example: {"imagefs.available": "2Gi"} -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + evictionMinimumReclaim is a map of signal names to quantities that defines minimum reclaims, +which describe the minimum amount of a given resource the kubelet will reclaim when +performing a pod eviction while that resource is under pressure. +For example: `{"imagefs.available": "2Gi"}`. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may change how well eviction can manage resource pressure. Default: nil @@ -919,11 +1039,13 @@ Default: nil int32 - podsPerCore is the maximum number of pods per core. Cannot exceed MaxPods. -If 0, this field is ignored. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + podsPerCore is the maximum number of pods per core. Cannot exceed maxPods. +The value must be a non-negative integer. +If 0, there is no limit on the number of Pods. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that changes may cause Pods to fail admission on Kubelet restart, and may change -the value reported in Node.Status.Capacity[v1.ResourcePods], thus affecting +the value reported in `Node.status.capacity.pods`, thus affecting future scheduling decisions. Increasing this value may also decrease performance, as more Pods can be packed into a single node. Default: 0 @@ -936,8 +1058,9 @@ Default: 0 enableControllerAttachDetach enables the Attach/Detach controller to manage attachment/detachment of volumes scheduled to this node, and -disables kubelet from executing any attach/detach operations -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +disables kubelet from executing any attach/detach operations. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that changing which component is responsible for volume management on a live node may result in volumes refusing to detach if the node is not drained prior to the update, and if Pods are scheduled to the node before the @@ -954,7 +1077,8 @@ Default: true protectKernelDefaults, if true, causes the Kubelet to error if kernel flags are not as it expects. Otherwise the Kubelet will attempt to modify kernel flags to match its expectation. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that enabling it may cause the Kubelet to crash-loop if the Kernel is not configured as Kubelet expects. Default: false @@ -965,10 +1089,12 @@ Default: false bool - If true, Kubelet ensures a set of iptables rules are present on host. -These rules will serve as utility rules for various components, e.g. KubeProxy. -The rules will be created based on IPTablesMasqueradeBit and IPTablesDropBit. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + makeIPTablesUtilChains, if true, causes the Kubelet ensures a set of iptables rules +are present on host. +These rules will serve as utility rules for various components, e.g. kube-proxy. +The rules will be created based on iptablesMasqueradeBit and iptablesDropBit. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that disabling it will prevent the Kubelet from healing locally misconfigured iptables rules. Default: true @@ -978,11 +1104,12 @@ Default: true int32 - iptablesMasqueradeBit is the bit of the iptables fwmark space to mark for SNAT + iptablesMasqueradeBit is the bit of the iptables fwmark space to mark for SNAT. Values must be within the range [0, 31]. Must be different from other mark bits. Warning: Please match the value of the corresponding parameter in kube-proxy. -TODO: clean up IPTablesMasqueradeBit in kube-proxy -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +TODO: clean up IPTablesMasqueradeBit in kube-proxy. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it needs to be coordinated with other components, like kube-proxy, and the update will only be effective if MakeIPTablesUtilChains is enabled. Default: 14 @@ -995,7 +1122,8 @@ Default: 14 iptablesDropBit is the bit of the iptables fwmark space to mark for dropping packets. Values must be within the range [0, 31]. Must be different from other mark bits. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it needs to be coordinated with other components, like kube-proxy, and the update will only be effective if MakeIPTablesUtilChains is enabled. Default: 15 @@ -1006,10 +1134,11 @@ Default: 15 map[string]bool - featureGates is a map of feature names to bools that enable or disable alpha/experimental + featureGates is a map of feature names to bools that enable or disable experimental features. This field modifies piecemeal the built-in default values from "k8s.io/kubernetes/pkg/features/kube_features.go". -Dynamic Kubelet Config (beta): If dynamically updating this field, consider the +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider the documentation for the features you are enabling or disabling. While we encourage feature developers to make it possible to dynamically enable and disable features, some changes may require node reboots, and some @@ -1023,19 +1152,29 @@ Default: nil failSwapOn tells the Kubelet to fail to start if swap is enabled on the node. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that setting it to true will cause the Kubelet to crash-loop if swap is enabled. Default: true +memorySwap
+MemorySwapConfiguration + + + memorySwap configures swap memory available to container workloads. + + + containerLogMaxSize
string - A quantity defines the maximum size of the container log file before it is rotated. -For example: "5Mi" or "256Ki". -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + containerLogMaxSize is a quantity defining the maximum size of the container log +file before it is rotated. For example: "5Mi" or "256Ki". +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may trigger log rotation. Default: "10Mi" @@ -1045,8 +1184,10 @@ Default: "10Mi" int32 - Maximum number of container log files that can be present for a container. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + containerLogMaxFiles specifies the maximum number of container log files that can +be present for a container. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that lowering it may cause log files to be deleted. Default: 5 @@ -1056,8 +1197,13 @@ Default: 5 ResourceChangeDetectionStrategy - ConfigMapAndSecretChangeDetectionStrategy is a mode in which -config map and secret managers are running. + configMapAndSecretChangeDetectionStrategy is a mode in which ConfigMap and Secret +managers are running. Valid values include: + +- `Get`: kubelet fetches necessary objects directly from the API server; +- `Cache`: kubelet uses TTL cache for object fetched from the API server; +- `Watch`: kubelet uses watches to observe changes to objects that are in its interest. + Default: "Watch" @@ -1070,7 +1216,8 @@ Default: "Watch" pairs that describe resources reserved for non-kubernetes components. Currently only cpu and memory are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may not be possible to increase the reserved resources, because this requires resizing cgroups. Always look for a NodeAllocatableEnforced event after updating this field to ensure that the update was successful. @@ -1082,11 +1229,13 @@ Default: nil map[string]string - A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs + kubeReserved is a set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for kubernetes system components. Currently cpu, memory and local storage for root file system are supported. -See http://kubernetes.io/docs/user-guide/compute-resources for more detail. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +for more details. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may not be possible to increase the reserved resources, because this requires resizing cgroups. Always look for a NodeAllocatableEnforced event after updating this field to ensure that the update was successful. @@ -1098,9 +1247,10 @@ Default: nil string - This ReservedSystemCPUs option specifies the cpu list reserved for the host level system threads and kubernetes related threads. -This provide a "static" CPU list rather than the "dynamic" list by system-reserved and kube-reserved. -This option overwrites CPUs provided by system-reserved and kube-reserved. + The reservedSystemCPUs option specifies the CPU list reserved for the host +level system threads and kubernetes related threads. This provide a "static" +CPU list rather than the "dynamic" list by systemReserved and kubeReserved. +This option does not support systemReservedCgroup or kubeReservedCgroup. @@ -1108,11 +1258,13 @@ This option overwrites CPUs provided by system-reserved and kube-reserved. string - The previous version for which you want to show hidden metrics. + showHiddenMetricsForVersion is the previous version for which you want to show +hidden metrics. Only the previous minor version is meaningful, other values will not be allowed. -The format is ., e.g.: '1.16'. -The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, -rather than being surprised when they are permanently removed in the release after that. +The format is `.`, e.g.: `1.16`. +The purpose of this format is make sure you have the opportunity to notice +if the next release hides additional metrics, rather than being surprised +when they are permanently removed in the release after that. Default: "" @@ -1121,9 +1273,11 @@ Default: "" string - This flag helps kubelet identify absolute name of top level cgroup used to enforce `SystemReserved` compute resource reservation for OS system daemons. -Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. -Dynamic Kubelet Config (beta): This field should not be updated without a full node + systemReservedCgroup helps the kubelet identify absolute name of top level CGroup used +to enforce `systemReserved` compute resource reservation for OS system daemons. +Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) +doc for more information. +Dynamic Kubelet Config (deprecated): This field should not be updated without a full node reboot. It is safest to keep this value the same as the local config. Default: "" @@ -1133,9 +1287,11 @@ Default: "" string - This flag helps kubelet identify absolute name of top level cgroup used to enforce `KubeReserved` compute resource reservation for Kubernetes node system daemons. -Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. -Dynamic Kubelet Config (beta): This field should not be updated without a full node + kubeReservedCgroup helps the kubelet identify absolute name of top level CGroup used +to enforce `KubeReserved` compute resource reservation for Kubernetes node system daemons. +Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) +doc for more information. +Dynamic Kubelet Config (deprecated): This field should not be updated without a full node reboot. It is safest to keep this value the same as the local config. Default: "" @@ -1146,10 +1302,16 @@ Default: "" This flag specifies the various Node Allocatable enforcements that Kubelet needs to perform. -This flag accepts a list of options. Acceptable options are `none`, `pods`, `system-reserved` & `kube-reserved`. +This flag accepts a list of options. Acceptable options are `none`, `pods`, +`system-reserved` and `kube-reserved`. If `none` is specified, no other options may be specified. -Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that +When `system-reserved` is in the list, systemReservedCgroup must be specified. +When `kube-reserved` is in the list, kubeReservedCgroup must be specified. +This field is supported only when `cgroupsPerQOS` is set to true. +Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) +for more information. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that removing enforcements may reduce the stability of the node. Alternatively, adding enforcements may reduce the stability of components which were using more than the reserved amount of resources; for example, enforcing kube-reserved may cause @@ -1163,9 +1325,9 @@ Default: ["pods"] []string - A comma separated whitelist of unsafe sysctls or sysctl patterns (ending in ∗). -Unsafe sysctl groups are kernel.shm∗, kernel.msg∗, kernel.sem, fs.mqueue.∗, and net.∗. -These sysctls are namespaced but not allowed by default. For example: "kernel.msg∗,net.ipv4.route.min_pmtu" + A comma separated whitelist of unsafe sysctls or sysctl patterns (ending in `∗`). +Unsafe sysctl groups are `kernel.shm∗`, `kernel.msg∗`, `kernel.sem`, `fs.mqueue.∗`, +and `net.∗`. For example: "`kernel.msg∗,net.ipv4.route.min_pmtu`" Default: [] @@ -1176,7 +1338,8 @@ Default: [] volumePluginDir is the full path of the directory in which to search for additional third party volume plugins. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that changing +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that changing the volumePluginDir may disrupt workloads relying on third party volume plugins. Default: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" @@ -1186,9 +1349,10 @@ Default: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" string - providerID, if set, sets the unique id of the instance that an external provider (i.e. cloudprovider) -can use to identify a specific node. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + providerID, if set, sets the unique ID of the instance that an external +provider (i.e. cloudprovider) can use to identify a specific node. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may impact the ability of the Kubelet to interact with cloud providers. Default: "" @@ -1198,9 +1362,11 @@ Default: "" bool - kernelMemcgNotification, if set, the kubelet will integrate with the kernel memcg notification -to determine if memory eviction thresholds are crossed rather than polling. -Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + kernelMemcgNotification, if set, instructs the the kubelet to integrate with the +kernel memcg notification for determining if memory eviction thresholds are +exceeded rather than polling. +If DynamicKubeletConfig (deprecated; default off) is on, when +dynamically updating this field, consider that it may impact the way Kubelet interacts with the kernel. Default: false @@ -1210,9 +1376,10 @@ Default: false LoggingConfiguration - Logging specifies the options of logging. -Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information. -Defaults: + logging specifies the options of logging. +Refer to [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) +for more information. +Default: Format: text @@ -1230,8 +1397,9 @@ Default: true meta/v1.Duration - ShutdownGracePeriod specifies the total duration that the node should delay the shutdown and total grace period for pod termination during a node shutdown. -Default: "30s" + shutdownGracePeriod specifies the total duration that the node should delay the +shutdown and total grace period for pod termination during a node shutdown. +Default: "0s" @@ -1239,9 +1407,81 @@ Default: "30s" meta/v1.Duration - ShutdownGracePeriodCriticalPods specifies the duration used to terminate critical pods during a node shutdown. This should be less than ShutdownGracePeriod. -For example, if ShutdownGracePeriod=30s, and ShutdownGracePeriodCriticalPods=10s, during a node shutdown the first 20 seconds would be reserved for gracefully terminating normal pods, and the last 10 seconds would be reserved for terminating critical pods. -Default: "10s" + shutdownGracePeriodCriticalPods specifies the duration used to terminate critical +pods during a node shutdown. This should be less than shutdownGracePeriod. +For example, if shutdownGracePeriod=30s, and shutdownGracePeriodCriticalPods=10s, +during a node shutdown the first 20 seconds would be reserved for gracefully +terminating normal pods, and the last 10 seconds would be reserved for terminating +critical pods. +Default: "0s" + + + +reservedMemory
+[]MemoryReservation + + + reservedMemory specifies a comma-separated list of memory reservations for NUMA nodes. +The parameter makes sense only in the context of the memory manager feature. +The memory manager will not allocate reserved memory for container workloads. +For example, if you have a NUMA0 with 10Gi of memory and the reservedMemory was +specified to reserve 1Gi of memory at NUMA0, the memory manager will assume that +only 9Gi is available for allocation. +You can specify a different amount of NUMA node and memory types. +You can omit this parameter at all, but you should be aware that the amount of +reserved memory from all NUMA nodes should be equal to the amount of memory specified +by the [node allocatable](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable). +If at least one node allocatable parameter has a non-zero value, you will need +to specify at least one NUMA node. +Also, avoid specifying: + +1. Duplicates, the same NUMA node, and memory type, but with a different value. +2. zero limits for any memory type. +3. NUMAs nodes IDs that do not exist under the machine. +4. memory types except for memory and hugepages- + +Default: nil + + + +enableProfilingHandler
+bool + + + enableProfilingHandler enables profiling via web interface host:port/debug/pprof/ +Default: true + + + +enableDebugFlagsHandler
+bool + + + enableDebugFlagsHandler enables flags endpoint via web interface host:port/debug/flags/v +Default: true + + + +seccompDefault
+bool + + + SeccompDefault enables the use of `RuntimeDefault` as the default seccomp profile for all workloads. +This requires the corresponding SeccompDefault feature gate to be enabled as well. +Default: false + + + +memoryThrottlingFactor
+float64 + + + MemoryThrottlingFactor specifies the factor multiplied by the memory limit or node allocatable memory +when setting the cgroupv2 memory.high value to enforce MemoryQoS. +Decreasing this factor will set lower high limit for container cgroups and put heavier reclaim pressure +while increasing will put less reclaim pressure. +See http://kep.k8s.io/2570 for more details. +Default: 0.8 @@ -1271,10 +1511,10 @@ It exists in the kubeletconfig API group because it is classified as a versioned source
-core/v1.NodeConfigSource +core/v1.NodeConfigSource - Source is the source that we are serializing + source is the source that we are serializing. @@ -1319,8 +1559,10 @@ hairpin packets. enabled allows anonymous requests to the kubelet server. -Requests that are not rejected by another authentication method are treated as anonymous requests. -Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated. +Requests that are not rejected by another authentication method are treated as +anonymous requests. +Anonymous requests have a username of `system:anonymous`, and a group name of +`system:unauthenticated`. @@ -1351,7 +1593,7 @@ Anonymous requests have a username of system:anonymous, and a group name of syst KubeletX509Authentication - x509 contains settings related to x509 client certificate authentication + x509 contains settings related to x509 client certificate authentication. @@ -1359,7 +1601,7 @@ Anonymous requests have a username of system:anonymous, and a group name of syst KubeletWebhookAuthentication - webhook contains settings related to webhook bearer token authentication + webhook contains settings related to webhook bearer token authentication. @@ -1367,7 +1609,7 @@ Anonymous requests have a username of system:anonymous, and a group name of syst KubeletAnonymousAuthentication - anonymous contains settings related to anonymous authentication + anonymous contains settings related to anonymous authentication. @@ -1399,7 +1641,7 @@ Anonymous requests have a username of system:anonymous, and a group name of syst mode is the authorization mode to apply to requests to the kubelet server. -Valid values are AlwaysAllow and Webhook. +Valid values are `AlwaysAllow` and `Webhook`. Webhook mode uses the SubjectAccessReview API to determine authorization. @@ -1455,7 +1697,8 @@ Webhook mode uses the SubjectAccessReview API to determine authorization. bool - enabled allows bearer token authentication backed by the tokenreviews.authentication.k8s.io API + enabled allows bearer token authentication backed by the +tokenreviews.authentication.k8s.io API. @@ -1494,7 +1737,8 @@ Webhook mode uses the SubjectAccessReview API to determine authorization. meta/v1.Duration - cacheAuthorizedTTL is the duration to cache 'authorized' responses from the webhook authorizer. + cacheAuthorizedTTL is the duration to cache 'authorized' responses from the +webhook authorizer. @@ -1502,7 +1746,8 @@ Webhook mode uses the SubjectAccessReview API to determine authorization. meta/v1.Duration - cacheUnauthorizedTTL is the duration to cache 'unauthorized' responses from the webhook authorizer. + cacheUnauthorizedTTL is the duration to cache 'unauthorized' responses from +the webhook authorizer. @@ -1533,8 +1778,9 @@ Webhook mode uses the SubjectAccessReview API to determine authorization. string - clientCAFile is the path to a PEM-encoded certificate bundle. If set, any request presenting a client certificate -signed by one of the authorities in the bundle is authenticated with a username corresponding to the CommonName, + clientCAFile is the path to a PEM-encoded certificate bundle. If set, any request +presenting a client certificate signed by one of the authorities in the bundle +is authenticated with a username corresponding to the CommonName, and groups corresponding to the Organization in the client certificate. @@ -1544,6 +1790,80 @@ and groups corresponding to the Organization in the client certificate. +## `MemoryReservation` {#kubelet-config-k8s-io-v1beta1-MemoryReservation} + + + + +**Appears in:** + +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) + + +MemoryReservation specifies the memory reservation of different types for each NUMA node + + + + + + + + + + + + + + + + + + +
FieldDescription
numaNode [Required]
+int32 +
+ No description provided. +
limits [Required]
+core/v1.ResourceList +
+ No description provided. +
+ + + +## `MemorySwapConfiguration` {#kubelet-config-k8s-io-v1beta1-MemorySwapConfiguration} + + + + +**Appears in:** + +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) + + + + + + + + + + + + + + + + +
FieldDescription
swapBehavior
+string +
+ swapBehavior configures swap memory available to container workloads. May be one of +"", "LimitedSwap": workload combined memory and swap usage cannot exceed pod memory limit +"UnlimitedSwap": workloads can use unlimited swap, up to the allocatable limit.
+ + + ## `ResourceChangeDetectionStrategy` {#kubelet-config-k8s-io-v1beta1-ResourceChangeDetectionStrategy} (Alias of `string`) diff --git a/content/en/docs/reference/glossary/affinity.md b/content/en/docs/reference/glossary/affinity.md new file mode 100644 index 0000000000..e5cfc92ea2 --- /dev/null +++ b/content/en/docs/reference/glossary/affinity.md @@ -0,0 +1,22 @@ +--- +title: Affinity +id: affinity +date: 2019-01-11 +full_link: /docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity +short_description: > + Rules used by the scheduler to determine where to place pods +aka: +tags: +- fundamental +--- + +In Kubernetes, _affinity_ is a set of rules that give hints to the scheduler about where to place pods. + + +There are two kinds of affinity: +* [node affinity](/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity) +* [pod-to-pod affinity](/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity) + +The rules are defined using the Kubernetes {{< glossary_tooltip term_id="label" text="labels">}}, +and {{< glossary_tooltip term_id="selector" text="selectors">}} specified in {{< glossary_tooltip term_id="pod" text="pods" >}}, +and they can be either required or preferred, depending on how strictly you want the scheduler to enforce them. diff --git a/content/en/docs/reference/glossary/annotation.md b/content/en/docs/reference/glossary/annotation.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/api-eviction.md b/content/en/docs/reference/glossary/api-eviction.md index b13238c955..69fc9d9b0c 100644 --- a/content/en/docs/reference/glossary/api-eviction.md +++ b/content/en/docs/reference/glossary/api-eviction.md @@ -2,7 +2,7 @@ title: API-initiated eviction id: api-eviction date: 2021-04-27 -full_link: /docs/concepts/scheduling-eviction/pod-eviction/#api-eviction +full_link: /docs/concepts/scheduling-eviction/api-eviction/ short_description: > API-initiated eviction is the process by which you use the Eviction API to create an Eviction object that triggers graceful pod termination. diff --git a/content/en/docs/reference/glossary/application-architect.md b/content/en/docs/reference/glossary/application-architect.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/application-developer.md b/content/en/docs/reference/glossary/application-developer.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/approver.md b/content/en/docs/reference/glossary/approver.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/certificate.md b/content/en/docs/reference/glossary/certificate.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/cla.md b/content/en/docs/reference/glossary/cla.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/cloud-controller-manager.md b/content/en/docs/reference/glossary/cloud-controller-manager.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/cloud-provider.md b/content/en/docs/reference/glossary/cloud-provider.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/cluster-architect.md b/content/en/docs/reference/glossary/cluster-architect.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/cluster-operator.md b/content/en/docs/reference/glossary/cluster-operator.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/cluster.md b/content/en/docs/reference/glossary/cluster.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/cncf.md b/content/en/docs/reference/glossary/cncf.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/code-contributor.md b/content/en/docs/reference/glossary/code-contributor.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/configmap.md b/content/en/docs/reference/glossary/configmap.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/container-env-variables.md b/content/en/docs/reference/glossary/container-env-variables.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/container.md b/content/en/docs/reference/glossary/container.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/contributor.md b/content/en/docs/reference/glossary/contributor.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/controller.md b/content/en/docs/reference/glossary/controller.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/cronjob.md b/content/en/docs/reference/glossary/cronjob.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/customresourcedefinition.md b/content/en/docs/reference/glossary/customresourcedefinition.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/daemonset.md b/content/en/docs/reference/glossary/daemonset.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/deployment.md b/content/en/docs/reference/glossary/deployment.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/developer.md b/content/en/docs/reference/glossary/developer.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/docker.md b/content/en/docs/reference/glossary/docker.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/downstream.md b/content/en/docs/reference/glossary/downstream.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/dynamic-volume-provisioning.md b/content/en/docs/reference/glossary/dynamic-volume-provisioning.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/etcd.md b/content/en/docs/reference/glossary/etcd.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/eviction.md b/content/en/docs/reference/glossary/eviction.md new file mode 100644 index 0000000000..4437e43354 --- /dev/null +++ b/content/en/docs/reference/glossary/eviction.md @@ -0,0 +1,18 @@ +--- +title: Eviction +id: eviction +date: 2021-05-08 +full_link: /docs/concepts/scheduling-eviction/ +short_description: > + Process of terminating one or more Pods on Nodes +aka: +tags: +- operation +--- + +Eviction is the process of terminating one or more Pods on Nodes. + + +There are two kinds of eviction: +* [Node-pressure eviction](/docs/concepts/scheduling-eviction/node-pressure-eviction/) +* [API-initiated eviction](/docs/concepts/scheduling-eviction/api-eviction/) diff --git a/content/en/docs/reference/glossary/finalizer.md b/content/en/docs/reference/glossary/finalizer.md new file mode 100644 index 0000000000..c44386fbf3 --- /dev/null +++ b/content/en/docs/reference/glossary/finalizer.md @@ -0,0 +1,31 @@ +--- +title: Finalizer +id: finalizer +date: 2021-07-07 +full_link: /docs/concepts/overview/working-with-objects/finalizers/ +short_description: > + A namespaced key that tells Kubernetes to wait until specific conditions are met + before it fully deletes an object marked for deletion. +aka: +tags: +- fundamental +- operation +--- +Finalizers are namespaced keys that tell Kubernetes to wait until specific +conditions are met before it fully deletes resources marked for deletion. +Finalizers alert {{}} +to clean up resources the deleted object owned. + + + +When you tell Kubernetes to delete an object that has finalizers specified for +it, the Kubernetes API marks the object for deletion, putting it into a +read-only state. The target object remains in a terminating state while the +control plane, or other components, take the actions defined by the finalizers. +After these actions are complete, the controller removes the relevant finalizers +from the target object. When the `metadata.finalizers` field is empty, +Kubernetes considers the deletion complete. + +You can use finalizers to control {{}} +of resources. For example, you can define a finalizer to clean up related resources or +infrastructure before the controller deletes the target resource. \ No newline at end of file diff --git a/content/en/docs/reference/glossary/garbage-collection.md b/content/en/docs/reference/glossary/garbage-collection.md new file mode 100644 index 0000000000..ec2fe19af7 --- /dev/null +++ b/content/en/docs/reference/glossary/garbage-collection.md @@ -0,0 +1,24 @@ +--- +title: Garbage Collection +id: garbage-collection +date: 2021-07-07 +full_link: /docs/concepts/workloads/controllers/garbage-collection/ +short_description: > + A collective term for the various mechanisms Kubernetes uses to clean up cluster + resources. + +aka: +tags: +- fundamental +- operation +--- + Garbage collection is a collective term for the various mechanisms Kubernetes uses to clean up + cluster resources. + + + +Kubernetes uses garbage collection to clean up resources like [unused containers and images](/docs/concepts/workloads/controllers/garbage-collection/#containers-images), +[failed Pods](/docs/concepts/workloads/pods/pod-lifecycle/#pod-garbage-collection), +[objects owned by the targeted resource](/docs/concepts/overview/working-with-objects/owners-dependents/), +[completed Jobs](/docs/concepts/workloads/controllers/ttlafterfinished/), and resources +that have expired or failed. \ No newline at end of file diff --git a/content/en/docs/reference/glossary/helm-chart.md b/content/en/docs/reference/glossary/helm-chart.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/horizontal-pod-autoscaler.md b/content/en/docs/reference/glossary/horizontal-pod-autoscaler.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/image.md b/content/en/docs/reference/glossary/image.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/index.md b/content/en/docs/reference/glossary/index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/ingress.md b/content/en/docs/reference/glossary/ingress.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/init-container.md b/content/en/docs/reference/glossary/init-container.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/istio.md b/content/en/docs/reference/glossary/istio.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/job.md b/content/en/docs/reference/glossary/job.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/kops.md b/content/en/docs/reference/glossary/kops.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/kube-apiserver.md b/content/en/docs/reference/glossary/kube-apiserver.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/kube-controller-manager.md b/content/en/docs/reference/glossary/kube-controller-manager.md old mode 100755 new mode 100644 index fa4205292c..78c22b32c7 --- a/content/en/docs/reference/glossary/kube-controller-manager.md +++ b/content/en/docs/reference/glossary/kube-controller-manager.md @@ -11,7 +11,7 @@ tags: - architecture - fundamental --- - Control Plane component that runs {{< glossary_tooltip text="controller" term_id="controller" >}} processes. + Control plane component that runs {{< glossary_tooltip text="controller" term_id="controller" >}} processes. diff --git a/content/en/docs/reference/glossary/kube-proxy.md b/content/en/docs/reference/glossary/kube-proxy.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/kube-scheduler.md b/content/en/docs/reference/glossary/kube-scheduler.md old mode 100755 new mode 100644 index a1a91a1527..96fc11a71d --- a/content/en/docs/reference/glossary/kube-scheduler.md +++ b/content/en/docs/reference/glossary/kube-scheduler.md @@ -2,7 +2,7 @@ title: kube-scheduler id: kube-scheduler date: 2018-04-12 -full_link: /docs/reference/generated/kube-scheduler/ +full_link: /docs/reference/command-line-tools-reference/kube-scheduler/ short_description: > Control plane component that watches for newly created pods with no assigned node, and selects a node for them to run on. diff --git a/content/en/docs/reference/glossary/kubeadm.md b/content/en/docs/reference/glossary/kubeadm.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/kubectl.md b/content/en/docs/reference/glossary/kubectl.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/kubelet.md b/content/en/docs/reference/glossary/kubelet.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/kubernetes-api.md b/content/en/docs/reference/glossary/kubernetes-api.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/label.md b/content/en/docs/reference/glossary/label.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/limitrange.md b/content/en/docs/reference/glossary/limitrange.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/managed-service.md b/content/en/docs/reference/glossary/managed-service.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/member.md b/content/en/docs/reference/glossary/member.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/minikube.md b/content/en/docs/reference/glossary/minikube.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/mirror-pod.md b/content/en/docs/reference/glossary/mirror-pod.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/name.md b/content/en/docs/reference/glossary/name.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/namespace.md b/content/en/docs/reference/glossary/namespace.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/network-policy.md b/content/en/docs/reference/glossary/network-policy.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/node.md b/content/en/docs/reference/glossary/node.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/object.md b/content/en/docs/reference/glossary/object.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/operator-pattern.md b/content/en/docs/reference/glossary/operator-pattern.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/persistent-volume-claim.md b/content/en/docs/reference/glossary/persistent-volume-claim.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/persistent-volume.md b/content/en/docs/reference/glossary/persistent-volume.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/platform-developer.md b/content/en/docs/reference/glossary/platform-developer.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/pod-priority.md b/content/en/docs/reference/glossary/pod-priority.md index 994f8bc4d8..f0e0a0f1c6 100644 --- a/content/en/docs/reference/glossary/pod-priority.md +++ b/content/en/docs/reference/glossary/pod-priority.md @@ -2,7 +2,7 @@ title: Pod Priority id: pod-priority date: 2019-01-31 -full_link: /docs/concepts/configuration/pod-priority-preemption/#pod-priority +full_link: /docs/concepts/scheduling-eviction/pod-priority-preemption/#pod-priority short_description: > Pod Priority indicates the importance of a Pod relative to other Pods. @@ -14,4 +14,4 @@ tags: -[Pod Priority](/docs/concepts/configuration/pod-priority-preemption/#pod-priority) gives the ability to set scheduling priority of a Pod to be higher and lower than other Pods — an important feature for production clusters workload. +[Pod Priority](/docs/concepts/scheduling-eviction/pod-priority-preemption/#pod-priority) gives the ability to set scheduling priority of a Pod to be higher and lower than other Pods — an important feature for production clusters workload. diff --git a/content/en/docs/reference/glossary/pod-security-policy.md b/content/en/docs/reference/glossary/pod-security-policy.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/pod.md b/content/en/docs/reference/glossary/pod.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/preemption.md b/content/en/docs/reference/glossary/preemption.md index f27e36c66f..1a1f0e929a 100644 --- a/content/en/docs/reference/glossary/preemption.md +++ b/content/en/docs/reference/glossary/preemption.md @@ -2,7 +2,7 @@ title: Preemption id: preemption date: 2019-01-31 -full_link: /docs/concepts/configuration/pod-priority-preemption/#preemption +full_link: /docs/concepts/scheduling-eviction/pod-priority-preemption/#preemption short_description: > Preemption logic in Kubernetes helps a pending Pod to find a suitable Node by evicting low priority Pods existing on that Node. @@ -14,4 +14,4 @@ tags: -If a Pod cannot be scheduled, the scheduler tries to [preempt](/docs/concepts/configuration/pod-priority-preemption/#preemption) lower priority Pods to make scheduling of the pending Pod possible. +If a Pod cannot be scheduled, the scheduler tries to [preempt](/docs/concepts/scheduling-eviction/pod-priority-preemption/#preemption) lower priority Pods to make scheduling of the pending Pod possible. diff --git a/content/en/docs/reference/glossary/proxy.md b/content/en/docs/reference/glossary/proxy.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/qos-class.md b/content/en/docs/reference/glossary/qos-class.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/rbac.md b/content/en/docs/reference/glossary/rbac.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/replica-set.md b/content/en/docs/reference/glossary/replica-set.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/replication-controller.md b/content/en/docs/reference/glossary/replication-controller.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/resource-quota.md b/content/en/docs/reference/glossary/resource-quota.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/reviewer.md b/content/en/docs/reference/glossary/reviewer.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/secret.md b/content/en/docs/reference/glossary/secret.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/security-context.md b/content/en/docs/reference/glossary/security-context.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/selector.md b/content/en/docs/reference/glossary/selector.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/service-account.md b/content/en/docs/reference/glossary/service-account.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/service-broker.md b/content/en/docs/reference/glossary/service-broker.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/service-catalog.md b/content/en/docs/reference/glossary/service-catalog.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/service.md b/content/en/docs/reference/glossary/service.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/sig.md b/content/en/docs/reference/glossary/sig.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/statefulset.md b/content/en/docs/reference/glossary/statefulset.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/static-pod.md b/content/en/docs/reference/glossary/static-pod.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/storage-class.md b/content/en/docs/reference/glossary/storage-class.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/sysctl.md b/content/en/docs/reference/glossary/sysctl.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/uid.md b/content/en/docs/reference/glossary/uid.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/upstream.md b/content/en/docs/reference/glossary/upstream.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/userns.md b/content/en/docs/reference/glossary/userns.md new file mode 100644 index 0000000000..acfe754993 --- /dev/null +++ b/content/en/docs/reference/glossary/userns.md @@ -0,0 +1,28 @@ +--- +title: user namespace +id: userns +date: 2021-07-13 +full_link: https://man7.org/linux/man-pages/man7/user_namespaces.7.html +short_description: > + A Linux kernel feature to emulate superuser privilege for unprivileged users. + +aka: +tags: +- security +--- + +A kernel feature to emulate root. Used for "rootless containers". + + + +User namespaces are a Linux kernel feature that allows a non-root user to +emulate superuser ("root") privileges, +for example in order to run containers without being a superuser outside the container. + +User namespace is effective for mitigating damage of potential container break-out attacks. + +In the context of user namespaces, the namespace is a Linux kernel feature, and not a +{{< glossary_tooltip text="namespace" term_id="namespace" >}} in the Kubernetes sense +of the term. + + diff --git a/content/en/docs/reference/glossary/volume-plugin.md b/content/en/docs/reference/glossary/volume-plugin.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/volume.md b/content/en/docs/reference/glossary/volume.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/glossary/wg.md b/content/en/docs/reference/glossary/wg.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/issues-security/security.md b/content/en/docs/reference/issues-security/security.md index 43b01a4172..7fe91a037a 100644 --- a/content/en/docs/reference/issues-security/security.md +++ b/content/en/docs/reference/issues-security/security.md @@ -29,7 +29,7 @@ To make a report, submit your vulnerability to the [Kubernetes bug bounty progra You can also email the private [security@kubernetes.io](mailto:security@kubernetes.io) list with the security details and the details expected for [all Kubernetes bug reports](https://git.k8s.io/kubernetes/.github/ISSUE_TEMPLATE/bug-report.md). -You may encrypt your email to this list using the GPG keys of the [Product Security Committee members](https://git.k8s.io/security/README.md#product-security-committee-psc). Encryption using GPG is NOT required to make a disclosure. +You may encrypt your email to this list using the GPG keys of the [Security Response Committee members](https://git.k8s.io/security/README.md#product-security-committee-psc). Encryption using GPG is NOT required to make a disclosure. ### When Should I Report a Vulnerability? @@ -47,13 +47,13 @@ You may encrypt your email to this list using the GPG keys of the [Product Secur ## Security Vulnerability Response -Each report is acknowledged and analyzed by Product Security Committee members within 3 working days. This will set off the [Security Release Process](https://git.k8s.io/security/security-release-process.md#disclosures). +Each report is acknowledged and analyzed by Security Response Committee members within 3 working days. This will set off the [Security Release Process](https://git.k8s.io/security/security-release-process.md#disclosures). -Any vulnerability information shared with Product Security Committee stays within Kubernetes project and will not be disseminated to other projects unless it is necessary to get the issue fixed. +Any vulnerability information shared with Security Response Committee stays within Kubernetes project and will not be disseminated to other projects unless it is necessary to get the issue fixed. As the security issue moves from triage, to identified fix, to release planning we will keep the reporter updated. ## Public Disclosure Timing -A public disclosure date is negotiated by the Kubernetes Product Security Committee and the bug submitter. We prefer to fully disclose the bug as soon as possible once a user mitigation is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for vendor coordination. The timeframe for disclosure is from immediate (especially if it's already publicly known) to a few weeks. For a vulnerability with a straightforward mitigation, we expect report date to disclosure date to be on the order of 7 days. The Kubernetes Product Security Committee holds the final say when setting a disclosure date. +A public disclosure date is negotiated by the Kubernetes Security Response Committee and the bug submitter. We prefer to fully disclose the bug as soon as possible once a user mitigation is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for vendor coordination. The timeframe for disclosure is from immediate (especially if it's already publicly known) to a few weeks. For a vulnerability with a straightforward mitigation, we expect report date to disclosure date to be on the order of 7 days. The Kubernetes Security Response Committee holds the final say when setting a disclosure date. diff --git a/content/en/docs/reference/kubectl/_index.md b/content/en/docs/reference/kubectl/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/kubectl/kubectl.md b/content/en/docs/reference/kubectl/kubectl.md index 2cb2db7f9f..1b855168e5 100644 --- a/content/en/docs/reference/kubectl/kubectl.md +++ b/content/en/docs/reference/kubectl/kubectl.md @@ -328,7 +328,31 @@ kubectl [flags] +## {{% heading "envvars" %}} + ++++ + + + + + + + + + + + + + + + + + +
KUBECONFIG
Path to the kubectl configuration ("kubeconfig") file. Default: "$HOME/.kube/config"
KUBECTL_COMMAND_HEADERS
When set to false, turns off extra HTTP headers detailing invoked kubectl command (Kubernetes version v1.22 or later)
## {{% heading "seealso" %}} diff --git a/content/en/docs/reference/kubectl/overview.md b/content/en/docs/reference/kubectl/overview.md index f8ec7e5603..611065eead 100644 --- a/content/en/docs/reference/kubectl/overview.md +++ b/content/en/docs/reference/kubectl/overview.md @@ -71,6 +71,32 @@ Flags that you specify from the command line override default values and any cor If you need help, run `kubectl help` from the terminal window. +## In-cluster authentication and namespace overrides + +By default `kubectl` will first determine if it is running within a pod, and thus in a cluster. It starts by checking for the `KUBERNETES_SERVICE_HOST` and `KUBERNETES_SERVICE_PORT` environment variables and the existence of a service account token file at `/var/run/secrets/kubernetes.io/serviceaccount/token`. If all three are found in-cluster authentication is assumed. + +To maintain backwards compatibility, if the `POD_NAMESPACE` environment variable is set during in-cluster authentication it will override the default namespace from the from the service account token. Any manifests or tools relying on namespace defaulting will be affected by this. + +**`POD_NAMESPACE` environment variable** + +If the `POD_NAMESPACE` environment variable is set, cli operations on namespaced resources will default to the variable value. For example, if the variable is set to `seattle`, `kubectl get pods` would return pods in the `seattle` namespace. This is because pods are a namespaced resource, and no namespace was provided in the command. Review the output of `kubectl api-resources` to determine if a resource is namespaced. + +Explicit use of `--namespace ` overrides this behavior. + +**How kubectl handles ServiceAccount tokens** + +If: +* there is Kubernetes service account token file mounted at + `/var/run/secrets/kubernetes.io/serviceaccount/token`, and +* the `KUBERNETES_SERVICE_HOST` environment variable is set, and +* the `KUBERNETES_SERVICE_PORT` environment variable is set, and +* you don't explicitly specify a namespace on the kubectl command line +then kubectl assumes it is running in your cluster. The kubectl tool looks up the +namespace of that ServiceAccount (this is the same as the namespace of the Pod) +and acts against that namespace. This is different from what happens outside of a +cluster; when kubectl runs outside a cluster and you don't specify a namespace, +the kubectl command acts against the `default` namespace. + ## Operations The following table includes short descriptions and the general syntax for all of the `kubectl` operations: @@ -89,7 +115,7 @@ Operation | Syntax | Description `cluster-info` | `kubectl cluster-info [flags]` | Display endpoint information about the master and services in the cluster. `completion` | `kubectl completion SHELL [options]` | Output shell completion code for the specified shell (bash or zsh). `config` | `kubectl config SUBCOMMAND [flags]` | Modifies kubeconfig files. See the individual subcommands for details. -`convert` | `kubectl convert -f FILENAME [options]` | Convert config files between different API versions. Both YAML and JSON formats are accepted. +`convert` | `kubectl convert -f FILENAME [options]` | Convert config files between different API versions. Both YAML and JSON formats are accepted. Note - requires `kubectl-convert` plugin to be installed. `cordon` | `kubectl cordon NODE [options]` | Mark node as unschedulable. `cp` | `kubectl cp [options]` | Copy files and directories to and from containers. `create` | `kubectl create -f FILENAME [flags]` | Create one or more resources from a file or stdin. diff --git a/content/en/docs/reference/kubernetes-api/authentication-resources/certificate-signing-request-v1.md b/content/en/docs/reference/kubernetes-api/authentication-resources/certificate-signing-request-v1.md index 5cf56dd6e6..a02b668387 100644 --- a/content/en/docs/reference/kubernetes-api/authentication-resources/certificate-signing-request-v1.md +++ b/content/en/docs/reference/kubernetes-api/authentication-resources/certificate-signing-request-v1.md @@ -49,7 +49,7 @@ This API can be used to request client certificates to authenticate to kube-apis - **spec** (}}">CertificateSigningRequestSpec), required - spec contains the certificate request, and is immutable after creation. Only the request, signerName, and usages fields can be set on creation. Other fields are derived by Kubernetes and cannot be modified by users. + spec contains the certificate request, and is immutable after creation. Only the request, signerName, expirationSeconds, and usages fields can be set on creation. Other fields are derived by Kubernetes and cannot be modified by users. - **status** (}}">CertificateSigningRequestStatus) @@ -95,6 +95,23 @@ CertificateSigningRequestSpec contains the certificate request. 5. Expiration/certificate lifetime: whether it is fixed by the signer, configurable by the admin. 6. Whether or not requests for CA certificates are allowed. +- **expirationSeconds** (int32) + + expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration. + + The v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager. + + Certificate signers may not honor this field for various reasons: + + 1. Old signer that is unaware of the field (such as the in-tree + implementations prior to v1.22) + 2. Signer whose configured maximum is shorter than the requested duration + 3. Signer whose configured minimum is longer than the requested duration + + The minimum valid value for expirationSeconds is 600, i.e. 10 minutes. + + As of v1.22, this field is beta and is controlled via the CSRDuration feature gate. + - **extra** (map[string][]string) extra contains extra attributes of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable. @@ -630,6 +647,8 @@ PATCH /apis/certificates.k8s.io/v1/certificatesigningrequests/{name} 200 (}}">CertificateSigningRequest): OK +201 (}}">CertificateSigningRequest): Created + 401: Unauthorized @@ -678,6 +697,8 @@ PATCH /apis/certificates.k8s.io/v1/certificatesigningrequests/{name}/approval 200 (}}">CertificateSigningRequest): OK +201 (}}">CertificateSigningRequest): Created + 401: Unauthorized @@ -726,6 +747,8 @@ PATCH /apis/certificates.k8s.io/v1/certificatesigningrequests/{name}/status 200 (}}">CertificateSigningRequest): OK +201 (}}">CertificateSigningRequest): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/authentication-resources/service-account-v1.md b/content/en/docs/reference/kubernetes-api/authentication-resources/service-account-v1.md index a83d44bbf9..250bc29d07 100644 --- a/content/en/docs/reference/kubernetes-api/authentication-resources/service-account-v1.md +++ b/content/en/docs/reference/kubernetes-api/authentication-resources/service-account-v1.md @@ -416,6 +416,8 @@ PATCH /api/v1/namespaces/{namespace}/serviceaccounts/{name} 200 (}}">ServiceAccount): OK +201 (}}">ServiceAccount): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/authentication-resources/token-request-v1.md b/content/en/docs/reference/kubernetes-api/authentication-resources/token-request-v1.md index b9ee5ab858..f215074e82 100644 --- a/content/en/docs/reference/kubernetes-api/authentication-resources/token-request-v1.md +++ b/content/en/docs/reference/kubernetes-api/authentication-resources/token-request-v1.md @@ -40,12 +40,15 @@ TokenRequest requests a token for a given service account. - **metadata** (}}">ObjectMeta) + Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - **spec** (}}">TokenRequestSpec), required + Spec holds information about the request being evaluated - **status** (}}">TokenRequestStatus) + Status is filled in by the server and indicates whether the token can be authenticated. diff --git a/content/en/docs/reference/kubernetes-api/authentication-resources/token-review-v1.md b/content/en/docs/reference/kubernetes-api/authentication-resources/token-review-v1.md index df71bf4e1a..8740fb27a4 100644 --- a/content/en/docs/reference/kubernetes-api/authentication-resources/token-review-v1.md +++ b/content/en/docs/reference/kubernetes-api/authentication-resources/token-review-v1.md @@ -40,6 +40,7 @@ TokenReview attempts to authenticate a token to a known user. Note: TokenReview - **metadata** (}}">ObjectMeta) + Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - **spec** (}}">TokenReviewSpec), required diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-binding-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-binding-v1.md index ad6a0ff732..993148295b 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-binding-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-binding-v1.md @@ -351,6 +351,8 @@ PATCH /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/{name} 200 (}}">ClusterRoleBinding): OK +201 (}}">ClusterRoleBinding): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-v1.md index cc58c7804e..307d6ffff0 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-v1.md @@ -66,11 +66,11 @@ ClusterRole is a cluster level, logical grouping of PolicyRules that can be refe - **rules.resources** ([]string) - Resources is a list of resources this rule applies to. ResourceAll represents all resources. + Resources is a list of resources this rule applies to. '*' represents all resources. - **rules.verbs** ([]string), required - Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs. - **rules.resourceNames** ([]string) @@ -347,6 +347,8 @@ PATCH /apis/rbac.authorization.k8s.io/v1/clusterroles/{name} 200 (}}">ClusterRole): OK +201 (}}">ClusterRole): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/local-subject-access-review-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/local-subject-access-review-v1.md index a163bfa743..d1a61db7d6 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/local-subject-access-review-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/local-subject-access-review-v1.md @@ -40,6 +40,7 @@ LocalSubjectAccessReview checks whether or not a user or group can perform an ac - **metadata** (}}">ObjectMeta) + Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - **spec** (}}">SubjectAccessReviewSpec), required diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/role-binding-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/role-binding-v1.md index bb847f3370..f02dcee05a 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/role-binding-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/role-binding-v1.md @@ -444,6 +444,8 @@ PATCH /apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/rolebindings/{na 200 (}}">RoleBinding): OK +201 (}}">RoleBinding): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/role-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/role-v1.md index 6c1f8b1fde..d96769ef24 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/role-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/role-v1.md @@ -55,11 +55,11 @@ Role is a namespaced, logical grouping of PolicyRules that can be referenced as - **rules.resources** ([]string) - Resources is a list of resources this rule applies to. ResourceAll represents all resources. + Resources is a list of resources this rule applies to. '*' represents all resources. - **rules.verbs** ([]string), required - Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs. - **rules.resourceNames** ([]string) @@ -429,6 +429,8 @@ PATCH /apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/roles/{name} 200 (}}">Role): OK +201 (}}">Role): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-access-review-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-access-review-v1.md index 430a4a953b..a8496aab72 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-access-review-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-access-review-v1.md @@ -40,6 +40,7 @@ SelfSubjectAccessReview checks whether or the current user can perform an action - **metadata** (}}">ObjectMeta) + Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - **spec** (}}">SelfSubjectAccessReviewSpec), required diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-rules-review-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-rules-review-v1.md index 82ab54ec4f..f8d85dc23c 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-rules-review-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-rules-review-v1.md @@ -40,6 +40,7 @@ SelfSubjectRulesReview enumerates the set of actions the current user can perfor - **metadata** (}}">ObjectMeta) + Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - **spec** (}}">SelfSubjectRulesReviewSpec), required @@ -105,7 +106,7 @@ SelfSubjectRulesReview enumerates the set of actions the current user can perfor ## SelfSubjectRulesReviewSpec {#SelfSubjectRulesReviewSpec} - +SelfSubjectRulesReviewSpec defines the specification for SelfSubjectRulesReview.
diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/subject-access-review-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/subject-access-review-v1.md index 5c8d23ea4d..cae105ba24 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/subject-access-review-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/subject-access-review-v1.md @@ -40,6 +40,7 @@ SubjectAccessReview checks whether or not a user or group can perform an action. - **metadata** (}}">ObjectMeta) + Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - **spec** (}}">SubjectAccessReviewSpec), required diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/api-service-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/api-service-v1.md index 45f3629c39..9f06c7b3fe 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/api-service-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/api-service-v1.md @@ -40,6 +40,7 @@ APIService represents a server for a particular GroupVersion. Name must be "vers - **metadata** (}}">ObjectMeta) + Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - **spec** (}}">APIServiceSpec) @@ -166,9 +167,11 @@ APIServiceList is a list of APIService objects. - **metadata** (}}">ListMeta) + Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - **items** ([]}}">APIService), required + Items is the list of APIService @@ -486,6 +489,8 @@ PATCH /apis/apiregistration.k8s.io/v1/apiservices/{name} 200 (}}">APIService): OK +201 (}}">APIService): Created + 401: Unauthorized @@ -534,6 +539,8 @@ PATCH /apis/apiregistration.k8s.io/v1/apiservices/{name}/status 200 (}}">APIService): OK +201 (}}">APIService): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/event-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/event-v1.md index d01f3ee709..644496e7f0 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/event-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/event-v1.md @@ -492,6 +492,8 @@ PATCH /apis/events.k8s.io/v1/namespaces/{namespace}/events/{name} 200 (}}">Event): OK +201 (}}">Event): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/flow-schema-v1beta1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/flow-schema-v1beta1.md index 8329c6016b..0df4386eb1 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/flow-schema-v1beta1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/flow-schema-v1beta1.md @@ -106,10 +106,11 @@ FlowSchemaSpec describes how the FlowSchema's specification looks like. - **rules.subjects.kind** (string), required - Required + `kind` indicates which one of the other fields is non-empty. Required - **rules.subjects.group** (GroupSubject) + `group` matches based on user group name. *GroupSubject holds detailed information for group-kind subject.* @@ -120,6 +121,7 @@ FlowSchemaSpec describes how the FlowSchema's specification looks like. - **rules.subjects.serviceAccount** (ServiceAccountSubject) + `serviceAccount` matches ServiceAccounts. *ServiceAccountSubject holds detailed information for service-account-kind subject.* @@ -134,6 +136,7 @@ FlowSchemaSpec describes how the FlowSchema's specification looks like. - **rules.subjects.user** (UserSubject) + `user` matches based on username. *UserSubject holds detailed information for user-kind subject.* @@ -588,6 +591,8 @@ PATCH /apis/flowcontrol.apiserver.k8s.io/v1beta1/flowschemas/{name} 200 (}}">FlowSchema): OK +201 (}}">FlowSchema): Created + 401: Unauthorized @@ -636,6 +641,8 @@ PATCH /apis/flowcontrol.apiserver.k8s.io/v1beta1/flowschemas/{name}/status 200 (}}">FlowSchema): OK +201 (}}">FlowSchema): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/lease-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/lease-v1.md index 8f74401a59..4db3251991 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/lease-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/lease-v1.md @@ -442,6 +442,8 @@ PATCH /apis/coordination.k8s.io/v1/namespaces/{namespace}/leases/{name} 200 (}}">Lease): OK +201 (}}">Lease): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/namespace-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/namespace-v1.md index a05f7f4f26..8ae6934385 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/namespace-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/namespace-v1.md @@ -492,6 +492,8 @@ PATCH /api/v1/namespaces/{name} 200 (}}">Namespace): OK +201 (}}">Namespace): Created + 401: Unauthorized @@ -540,6 +542,8 @@ PATCH /api/v1/namespaces/{name}/status 200 (}}">Namespace): OK +201 (}}">Namespace): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/node-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/node-v1.md index 23764d9033..0046895782 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/node-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/node-v1.md @@ -62,17 +62,17 @@ NodeSpec describes the attributes that a node is created with. - **configSource** (NodeConfigSource) - If specified, the source to get node configuration from The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field + Deprecated. If specified, the source of the node's configuration. The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field. This field is deprecated as of 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration - *NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.* + *NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22* - **configSource.configMap** (ConfigMapNodeConfigSource) ConfigMap is a reference to a Node's ConfigMap - *ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.* + *ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration* - **configSource.configMap.kubeletConfigKey** (string), required @@ -226,14 +226,14 @@ NodeStatus is information about the current status of a node. Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error. - *NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.* + *NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22* - **config.active.configMap** (ConfigMapNodeConfigSource) ConfigMap is a reference to a Node's ConfigMap - *ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.* + *ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration* - **config.active.configMap.kubeletConfigKey** (string), required @@ -260,14 +260,14 @@ NodeStatus is information about the current status of a node. Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned. - *NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.* + *NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22* - **config.assigned.configMap** (ConfigMapNodeConfigSource) ConfigMap is a reference to a Node's ConfigMap - *ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.* + *ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration* - **config.assigned.configMap.kubeletConfigKey** (string), required @@ -298,14 +298,14 @@ NodeStatus is information about the current status of a node. LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future. - *NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.* + *NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22* - **config.lastKnownGood.configMap** (ConfigMapNodeConfigSource) ConfigMap is a reference to a Node's ConfigMap - *ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.* + *ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration* - **config.lastKnownGood.configMap.kubeletConfigKey** (string), required @@ -352,7 +352,7 @@ NodeStatus is information about the current status of a node. *Describe a container image* - - **images.names** ([]string), required + - **images.names** ([]string) Names by which this image is known. e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] @@ -770,6 +770,8 @@ PATCH /api/v1/nodes/{name} 200 (}}">Node): OK +201 (}}">Node): Created + 401: Unauthorized @@ -818,6 +820,8 @@ PATCH /api/v1/nodes/{name}/status 200 (}}">Node): OK +201 (}}">Node): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/priority-level-configuration-v1beta1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/priority-level-configuration-v1beta1.md index e7bd624556..eda105ab73 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/priority-level-configuration-v1beta1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/priority-level-configuration-v1beta1.md @@ -493,6 +493,8 @@ PATCH /apis/flowcontrol.apiserver.k8s.io/v1beta1/prioritylevelconfigurations/{na 200 (}}">PriorityLevelConfiguration): OK +201 (}}">PriorityLevelConfiguration): Created + 401: Unauthorized @@ -541,6 +543,8 @@ PATCH /apis/flowcontrol.apiserver.k8s.io/v1beta1/prioritylevelconfigurations/{na 200 (}}">PriorityLevelConfiguration): OK +201 (}}">PriorityLevelConfiguration): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/runtime-class-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/runtime-class-v1.md index b505277ccc..fad02bc731 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/runtime-class-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/runtime-class-v1.md @@ -366,6 +366,8 @@ PATCH /apis/node.k8s.io/v1/runtimeclasses/{name} 200 (}}">RuntimeClass): OK +201 (}}">RuntimeClass): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/object-meta.md b/content/en/docs/reference/kubernetes-api/common-definitions/object-meta.md index 81d66b38c7..40c7899adc 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/object-meta.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/object-meta.md @@ -99,6 +99,10 @@ ObjectMeta is metadata that all persisted resources must have, which includes al Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'. + - **managedFields.subresource** (string) + + Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource. + - **managedFields.time** (Time) Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply' diff --git a/content/en/docs/reference/kubernetes-api/common-parameters/common-parameters.md b/content/en/docs/reference/kubernetes-api/common-parameters/common-parameters.md index deb8164881..45a90e0411 100644 --- a/content/en/docs/reference/kubernetes-api/common-parameters/common-parameters.md +++ b/content/en/docs/reference/kubernetes-api/common-parameters/common-parameters.md @@ -28,7 +28,7 @@ guide. You can file document formatting bugs against the ## allowWatchBookmarks {#allowWatchBookmarks} -allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored. +allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/config-map-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/config-map-v1.md index be3ef5c8e4..774f12ae97 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/config-map-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/config-map-v1.md @@ -414,6 +414,8 @@ PATCH /api/v1/namespaces/{namespace}/configmaps/{name} 200 (}}">ConfigMap): OK +201 (}}">ConfigMap): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-driver-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-driver-v1.md index 22fcf194ee..db3a2a389b 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-driver-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-driver-v1.md @@ -64,9 +64,11 @@ CSIDriverSpec is the specification of a CSIDriver. - **fsGroupPolicy** (string) - Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate. + Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is beta, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate. This field is immutable. + + Defaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce. - **podInfoOnMount** (boolean) @@ -82,8 +84,6 @@ CSIDriverSpec is the specification of a CSIDriver. RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false. Note: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container. - - This is a beta feature and only available when the CSIServiceAccountToken feature is enabled. - **storageCapacity** (boolean) @@ -110,8 +110,6 @@ CSIDriverSpec is the specification of a CSIDriver. } Note: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically. - - This is a beta feature and only available when the CSIServiceAccountToken feature is enabled. *TokenRequest contains parameters of a service account token.* @@ -399,6 +397,8 @@ PATCH /apis/storage.k8s.io/v1/csidrivers/{name} 200 (}}">CSIDriver): OK +201 (}}">CSIDriver): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-node-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-node-v1.md index 343ab01135..5eb65b7e54 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-node-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-node-v1.md @@ -355,6 +355,8 @@ PATCH /apis/storage.k8s.io/v1/csinodes/{name} 200 (}}">CSINode): OK +201 (}}">CSINode): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-storage-capacity-v1beta1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-storage-capacity-v1beta1.md index cc915b8b15..08c6572f89 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-storage-capacity-v1beta1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-storage-capacity-v1beta1.md @@ -436,6 +436,8 @@ PATCH /apis/storage.k8s.io/v1beta1/namespaces/{namespace}/csistoragecapacities/{ 200 (}}">CSIStorageCapacity): OK +201 (}}">CSIStorageCapacity): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1.md index f73ded9ff5..e1461951e8 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1.md @@ -102,7 +102,16 @@ PersistentVolumeClaimSpec describes the common attributes of storage devices and - **dataSource** (}}">TypedLocalObjectReference) - This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. + This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field. + +- **dataSourceRef** (}}">TypedLocalObjectReference) + + Specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While DataSource ignores disallowed values (dropping them), DataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + (Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled. @@ -604,6 +613,8 @@ PATCH /api/v1/namespaces/{namespace}/persistentvolumeclaims/{name} 200 (}}">PersistentVolumeClaim): OK +201 (}}">PersistentVolumeClaim): Created + 401: Unauthorized @@ -657,6 +668,8 @@ PATCH /api/v1/namespaces/{namespace}/persistentvolumeclaims/{name}/status 200 (}}">PersistentVolumeClaim): OK +201 (}}">PersistentVolumeClaim): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-v1.md index c5c68b19d7..86c689dc44 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-v1.md @@ -1189,6 +1189,8 @@ PATCH /api/v1/persistentvolumes/{name} 200 (}}">PersistentVolume): OK +201 (}}">PersistentVolume): Created + 401: Unauthorized @@ -1237,6 +1239,8 @@ PATCH /api/v1/persistentvolumes/{name}/status 200 (}}">PersistentVolume): OK +201 (}}">PersistentVolume): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/secret-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/secret-v1.md index 5e75c90b79..bb2710e4c8 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/secret-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/secret-v1.md @@ -418,6 +418,8 @@ PATCH /api/v1/namespaces/{namespace}/secrets/{name} 200 (}}">Secret): OK +201 (}}">Secret): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/storage-class-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/storage-class-v1.md index c00f36797d..e5cb3b4b3e 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/storage-class-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/storage-class-v1.md @@ -54,6 +54,8 @@ StorageClasses are non-namespaced; the name of the storage class according to et - **allowedTopologies** ([]TopologySelectorTerm) + *Atomic: will be replaced during a merge* + Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature. @@ -357,6 +359,8 @@ PATCH /apis/storage.k8s.io/v1/storageclasses/{name} 200 (}}">StorageClass): OK +201 (}}">StorageClass): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/volume-attachment-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/volume-attachment-v1.md index b9bc3ee831..332053b4a9 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/volume-attachment-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/volume-attachment-v1.md @@ -479,6 +479,8 @@ PATCH /apis/storage.k8s.io/v1/volumeattachments/{name} 200 (}}">VolumeAttachment): OK +201 (}}">VolumeAttachment): Created + 401: Unauthorized @@ -527,6 +529,8 @@ PATCH /apis/storage.k8s.io/v1/volumeattachments/{name}/status 200 (}}">VolumeAttachment): OK +201 (}}">VolumeAttachment): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/extend-resources/custom-resource-definition-v1.md b/content/en/docs/reference/kubernetes-api/extend-resources/custom-resource-definition-v1.md index ce88ef945d..89cb9687f6 100644 --- a/content/en/docs/reference/kubernetes-api/extend-resources/custom-resource-definition-v1.md +++ b/content/en/docs/reference/kubernetes-api/extend-resources/custom-resource-definition-v1.md @@ -40,6 +40,7 @@ CustomResourceDefinition represents a resource that should be exposed on the API - **metadata** (}}">ObjectMeta) + Standard object's metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - **spec** (}}">CustomResourceDefinitionSpec), required @@ -590,6 +591,7 @@ CustomResourceDefinitionList is a list of CustomResourceDefinition objects. - **metadata** (}}">ListMeta) + Standard object's metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata @@ -907,6 +909,8 @@ PATCH /apis/apiextensions.k8s.io/v1/customresourcedefinitions/{name} 200 (}}">CustomResourceDefinition): OK +201 (}}">CustomResourceDefinition): Created + 401: Unauthorized @@ -955,6 +959,8 @@ PATCH /apis/apiextensions.k8s.io/v1/customresourcedefinitions/{name}/status 200 (}}">CustomResourceDefinition): OK +201 (}}">CustomResourceDefinition): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/extend-resources/mutating-webhook-configuration-v1.md b/content/en/docs/reference/kubernetes-api/extend-resources/mutating-webhook-configuration-v1.md index e335c9fc2e..499daa6405 100644 --- a/content/en/docs/reference/kubernetes-api/extend-resources/mutating-webhook-configuration-v1.md +++ b/content/en/docs/reference/kubernetes-api/extend-resources/mutating-webhook-configuration-v1.md @@ -479,6 +479,8 @@ PATCH /apis/admissionregistration.k8s.io/v1/mutatingwebhookconfigurations/{name} 200 (}}">MutatingWebhookConfiguration): OK +201 (}}">MutatingWebhookConfiguration): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/extend-resources/validating-webhook-configuration-v1.md b/content/en/docs/reference/kubernetes-api/extend-resources/validating-webhook-configuration-v1.md index a985763cb3..417f90402e 100644 --- a/content/en/docs/reference/kubernetes-api/extend-resources/validating-webhook-configuration-v1.md +++ b/content/en/docs/reference/kubernetes-api/extend-resources/validating-webhook-configuration-v1.md @@ -469,6 +469,8 @@ PATCH /apis/admissionregistration.k8s.io/v1/validatingwebhookconfigurations/{nam 200 (}}">ValidatingWebhookConfiguration): OK +201 (}}">ValidatingWebhookConfiguration): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/policy-resources/limit-range-v1.md b/content/en/docs/reference/kubernetes-api/policy-resources/limit-range-v1.md index 5d84379b6e..36d9ff42c4 100644 --- a/content/en/docs/reference/kubernetes-api/policy-resources/limit-range-v1.md +++ b/content/en/docs/reference/kubernetes-api/policy-resources/limit-range-v1.md @@ -447,6 +447,8 @@ PATCH /api/v1/namespaces/{namespace}/limitranges/{name} 200 (}}">LimitRange): OK +201 (}}">LimitRange): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/policy-resources/network-policy-v1.md b/content/en/docs/reference/kubernetes-api/policy-resources/network-policy-v1.md index 6643b81bee..2ba558d7b1 100644 --- a/content/en/docs/reference/kubernetes-api/policy-resources/network-policy-v1.md +++ b/content/en/docs/reference/kubernetes-api/policy-resources/network-policy-v1.md @@ -121,7 +121,7 @@ NetworkPolicySpec provides the specification of a NetworkPolicy - **ingress.ports.endPort** (int32) - If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Alpha state and should be enabled using the Feature Gate "NetworkPolicyEndPort". + If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort". - **ingress.ports.protocol** (string) @@ -184,7 +184,7 @@ NetworkPolicySpec provides the specification of a NetworkPolicy - **egress.ports.endPort** (int32) - If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Alpha state and should be enabled using the Feature Gate "NetworkPolicyEndPort". + If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort". - **egress.ports.protocol** (string) @@ -550,6 +550,8 @@ PATCH /apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies/{name} 200 (}}">NetworkPolicy): OK +201 (}}">NetworkPolicy): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/policy-resources/pod-disruption-budget-v1.md b/content/en/docs/reference/kubernetes-api/policy-resources/pod-disruption-budget-v1.md index 3b21024aeb..5b88652bb2 100644 --- a/content/en/docs/reference/kubernetes-api/policy-resources/pod-disruption-budget-v1.md +++ b/content/en/docs/reference/kubernetes-api/policy-resources/pod-disruption-budget-v1.md @@ -604,6 +604,8 @@ PATCH /apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name} 200 (}}">PodDisruptionBudget): OK +201 (}}">PodDisruptionBudget): Created + 401: Unauthorized @@ -657,6 +659,8 @@ PATCH /apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}/status 200 (}}">PodDisruptionBudget): OK +201 (}}">PodDisruptionBudget): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/policy-resources/pod-security-policy-v1beta1.md b/content/en/docs/reference/kubernetes-api/policy-resources/pod-security-policy-v1beta1.md index b6050390e6..2f03cfa5bf 100644 --- a/content/en/docs/reference/kubernetes-api/policy-resources/pod-security-policy-v1beta1.md +++ b/content/en/docs/reference/kubernetes-api/policy-resources/pod-security-policy-v1beta1.md @@ -590,6 +590,8 @@ PATCH /apis/policy/v1beta1/podsecuritypolicies/{name} 200 (}}">PodSecurityPolicy): OK +201 (}}">PodSecurityPolicy): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/policy-resources/resource-quota-v1.md b/content/en/docs/reference/kubernetes-api/policy-resources/resource-quota-v1.md index 2f66235c0f..e71631863e 100644 --- a/content/en/docs/reference/kubernetes-api/policy-resources/resource-quota-v1.md +++ b/content/en/docs/reference/kubernetes-api/policy-resources/resource-quota-v1.md @@ -555,6 +555,8 @@ PATCH /api/v1/namespaces/{namespace}/resourcequotas/{name} 200 (}}">ResourceQuota): OK +201 (}}">ResourceQuota): Created + 401: Unauthorized @@ -608,6 +610,8 @@ PATCH /api/v1/namespaces/{namespace}/resourcequotas/{name}/status 200 (}}">ResourceQuota): OK +201 (}}">ResourceQuota): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1.md b/content/en/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1.md index a405d50660..b602f0e728 100644 --- a/content/en/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1.md +++ b/content/en/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1.md @@ -505,6 +505,8 @@ PATCH /apis/discovery.k8s.io/v1/namespaces/{namespace}/endpointslices/{name} 200 (}}">EndpointSlice): OK +201 (}}">EndpointSlice): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/service-resources/endpoints-v1.md b/content/en/docs/reference/kubernetes-api/service-resources/endpoints-v1.md index acc7d938f9..ccc385b983 100644 --- a/content/en/docs/reference/kubernetes-api/service-resources/endpoints-v1.md +++ b/content/en/docs/reference/kubernetes-api/service-resources/endpoints-v1.md @@ -134,7 +134,7 @@ Endpoints is a collection of endpoints that implement the actual service. Exampl - **subsets.ports.appProtocol** (string) - The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default. + The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. @@ -496,6 +496,8 @@ PATCH /api/v1/namespaces/{namespace}/endpoints/{name} 200 (}}">Endpoints): OK +201 (}}">Endpoints): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/service-resources/ingress-class-v1.md b/content/en/docs/reference/kubernetes-api/service-resources/ingress-class-v1.md index c549ac2f83..335597af49 100644 --- a/content/en/docs/reference/kubernetes-api/service-resources/ingress-class-v1.md +++ b/content/en/docs/reference/kubernetes-api/service-resources/ingress-class-v1.md @@ -354,6 +354,8 @@ PATCH /apis/networking.k8s.io/v1/ingressclasses/{name} 200 (}}">IngressClass): OK +201 (}}">IngressClass): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/service-resources/ingress-v1.md b/content/en/docs/reference/kubernetes-api/service-resources/ingress-v1.md index 00fe1bb617..7bdec1fcb2 100644 --- a/content/en/docs/reference/kubernetes-api/service-resources/ingress-v1.md +++ b/content/en/docs/reference/kubernetes-api/service-resources/ingress-v1.md @@ -107,11 +107,7 @@ IngressSpec describes the Ingress the user wishes to exist. Backend defines the referenced service endpoint to which the traffic will be forwarded to. - - **rules.http.paths.path** (string) - - Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional "path" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched. - - - **rules.http.paths.pathType** (string) + - **rules.http.paths.pathType** (string), required PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is done on a path element by element basis. A path element refers is the @@ -125,6 +121,10 @@ IngressSpec describes the Ingress the user wishes to exist. or treat it identically to Prefix or Exact path types. Implementations are required to support all path types. + - **rules.http.paths.path** (string) + + Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional "path" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value "Exact" or "Prefix". + - **tls** ([]IngressTLS) *Atomic: will be replaced during a merge* @@ -685,6 +685,8 @@ PATCH /apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses/{name} 200 (}}">Ingress): OK +201 (}}">Ingress): Created + 401: Unauthorized @@ -738,6 +740,8 @@ PATCH /apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses/{name}/status 200 (}}">Ingress): OK +201 (}}">Ingress): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/service-resources/service-v1.md b/content/en/docs/reference/kubernetes-api/service-resources/service-v1.md index 7b9fd5f32c..c49b2607c1 100644 --- a/content/en/docs/reference/kubernetes-api/service-resources/service-v1.md +++ b/content/en/docs/reference/kubernetes-api/service-resources/service-v1.md @@ -96,11 +96,11 @@ ServiceSpec describes the attributes that a user creates on a service. - **ports.nodePort** (int32) - The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#nodeport + The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - **ports.appProtocol** (string) - The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default. + The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. - **type** (string) @@ -144,7 +144,7 @@ ServiceSpec describes the attributes that a user creates on a service. - **loadBalancerSourceRanges** ([]string) - If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/ + If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ - **loadBalancerClass** (string) @@ -188,13 +188,9 @@ ServiceSpec describes the attributes that a user creates on a service. timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && \<=86400(for 1 day) if ServiceAffinity == "ClientIP". Default value is 10800(for 3 hours). -- **topologyKeys** ([]string) - - topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value "*" may be used to mean "any topology". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied. This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. This field is deprecated and will be removed in a future version. - - **allocateLoadBalancerNodePorts** (boolean) - allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is "true". It may be set to "false" if the cluster load-balancer does not rely on NodePorts. allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature. + allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is "true". It may be set to "false" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. This field is beta-level and is only honored by servers that enable the ServiceLBNodePortControl feature. @@ -733,6 +729,8 @@ PATCH /api/v1/namespaces/{namespace}/services/{name} 200 (}}">Service): OK +201 (}}">Service): Created + 401: Unauthorized @@ -786,6 +784,8 @@ PATCH /api/v1/namespaces/{namespace}/services/{name}/status 200 (}}">Service): OK +201 (}}">Service): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/controller-revision-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/controller-revision-v1.md index 23b324fb6e..bf3ffa7f12 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/controller-revision-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/controller-revision-v1.md @@ -6,7 +6,7 @@ api_metadata: content_type: "api_reference" description: "ControllerRevision implements an immutable snapshot of state data." title: "ControllerRevision" -weight: 8 +weight: 7 auto_generated: true --- @@ -440,6 +440,8 @@ PATCH /apis/apps/v1/namespaces/{namespace}/controllerrevisions/{name} 200 (}}">ControllerRevision): OK +201 (}}">ControllerRevision): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/cron-job-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/cron-job-v1.md index a518d1f72a..3aa5ceb8be 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/cron-job-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/cron-job-v1.md @@ -6,7 +6,7 @@ api_metadata: content_type: "api_reference" description: "CronJob represents the configuration of a single cron job." title: "CronJob" -weight: 11 +weight: 10 auto_generated: true --- @@ -572,6 +572,8 @@ PATCH /apis/batch/v1/namespaces/{namespace}/cronjobs/{name} 200 (}}">CronJob): OK +201 (}}">CronJob): Created + 401: Unauthorized @@ -625,6 +627,8 @@ PATCH /apis/batch/v1/namespaces/{namespace}/cronjobs/{name}/status 200 (}}">CronJob): OK +201 (}}">CronJob): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/daemon-set-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/daemon-set-v1.md index 2a313f533f..9d7eb6c24c 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/daemon-set-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/daemon-set-v1.md @@ -6,7 +6,7 @@ api_metadata: content_type: "api_reference" description: "DaemonSet represents the configuration of a daemon set." title: "DaemonSet" -weight: 9 +weight: 8 auto_generated: true --- @@ -92,14 +92,14 @@ DaemonSetSpec is the specification of a daemon set. - **updateStrategy.rollingUpdate.maxSurge** (IntOrString) - The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate. + The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate. *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* - **updateStrategy.rollingUpdate.maxUnavailable** (IntOrString) - The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding down to a minimum of one. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update. + The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update. *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* @@ -629,6 +629,8 @@ PATCH /apis/apps/v1/namespaces/{namespace}/daemonsets/{name} 200 (}}">DaemonSet): OK +201 (}}">DaemonSet): Created + 401: Unauthorized @@ -682,6 +684,8 @@ PATCH /apis/apps/v1/namespaces/{namespace}/daemonsets/{name}/status 200 (}}">DaemonSet): OK +201 (}}">DaemonSet): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/deployment-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/deployment-v1.md index f304fd9239..c56bf76df7 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/deployment-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/deployment-v1.md @@ -6,7 +6,7 @@ api_metadata: content_type: "api_reference" description: "Deployment enables declarative updates for Pods and ReplicaSets." title: "Deployment" -weight: 6 +weight: 5 auto_generated: true --- @@ -40,7 +40,7 @@ Deployment enables declarative updates for Pods and ReplicaSets. - **metadata** (}}">ObjectMeta) - Standard object metadata. + Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - **spec** (}}">DeploymentSpec) @@ -642,6 +642,8 @@ PATCH /apis/apps/v1/namespaces/{namespace}/deployments/{name} 200 (}}">Deployment): OK +201 (}}">Deployment): Created + 401: Unauthorized @@ -695,6 +697,8 @@ PATCH /apis/apps/v1/namespaces/{namespace}/deployments/{name}/status 200 (}}">Deployment): OK +201 (}}">Deployment): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/ephemeral-containers-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/ephemeral-containers-v1.md deleted file mode 100644 index 960fc9c8c4..0000000000 --- a/content/en/docs/reference/kubernetes-api/workload-resources/ephemeral-containers-v1.md +++ /dev/null @@ -1,621 +0,0 @@ ---- -api_metadata: - apiVersion: "v1" - import: "k8s.io/api/core/v1" - kind: "EphemeralContainers" -content_type: "api_reference" -description: "A list of ephemeral containers used with the Pod ephemeralcontainers subresource." -title: "EphemeralContainers" -weight: 2 -auto_generated: true ---- - - - -`apiVersion: v1` - -`import "k8s.io/api/core/v1"` - - -## EphemeralContainers {#EphemeralContainers} - -A list of ephemeral containers used with the Pod ephemeralcontainers subresource. - -
- -- **apiVersion**: v1 - - -- **kind**: EphemeralContainers - - -- **metadata** (}}">ObjectMeta) - - -- **ephemeralContainers** ([]}}">EphemeralContainer), required - - *Patch strategy: merge on key `name`* - - A list of ephemeral containers associated with this pod. New ephemeral containers may be appended to this list, but existing ephemeral containers may not be removed or modified. - - - - - -## EphemeralContainer {#EphemeralContainer} - -An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag. - -
- -- **name** (string), required - - Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers. - -- **targetContainerName** (string) - - If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature. - - - -### Image - - -- **image** (string) - - Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - -- **imagePullPolicy** (string) - - Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - -### Entrypoint - - -- **command** ([]string) - - Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - -- **args** ([]string) - - Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - -- **workingDir** (string) - - Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. - -### Environment variables - - -- **env** ([]EnvVar) - - *Patch strategy: merge on key `name`* - - List of environment variables to set in the container. Cannot be updated. - - - *EnvVar represents an environment variable present in a Container.* - - - **env.name** (string), required - - Name of the environment variable. Must be a C_IDENTIFIER. - - - **env.value** (string) - - Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". - - - **env.valueFrom** (EnvVarSource) - - Source for the environment variable's value. Cannot be used if value is not empty. - - - *EnvVarSource represents a source for the value of an EnvVar.* - - - **env.valueFrom.configMapKeyRef** (ConfigMapKeySelector) - - Selects a key of a ConfigMap. - - - *Selects a key from a ConfigMap.* - - - **env.valueFrom.configMapKeyRef.key** (string), required - - The key to select. - - - **env.valueFrom.configMapKeyRef.name** (string) - - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - - **env.valueFrom.configMapKeyRef.optional** (boolean) - - Specify whether the ConfigMap or its key must be defined - - - **env.valueFrom.fieldRef** (}}">ObjectFieldSelector) - - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['\']`, `metadata.annotations['\']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - - - **env.valueFrom.resourceFieldRef** (}}">ResourceFieldSelector) - - Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - - - **env.valueFrom.secretKeyRef** (SecretKeySelector) - - Selects a key of a secret in the pod's namespace - - - *SecretKeySelector selects a key of a Secret.* - - - **env.valueFrom.secretKeyRef.key** (string), required - - The key of the secret to select from. Must be a valid secret key. - - - **env.valueFrom.secretKeyRef.name** (string) - - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - - **env.valueFrom.secretKeyRef.optional** (boolean) - - Specify whether the Secret or its key must be defined - -- **envFrom** ([]EnvFromSource) - - List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. - - - *EnvFromSource represents the source of a set of ConfigMaps* - - - **envFrom.configMapRef** (ConfigMapEnvSource) - - The ConfigMap to select from - - - *ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. - - The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.* - - - **envFrom.configMapRef.name** (string) - - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - - **envFrom.configMapRef.optional** (boolean) - - Specify whether the ConfigMap must be defined - - - **envFrom.prefix** (string) - - An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. - - - **envFrom.secretRef** (SecretEnvSource) - - The Secret to select from - - - *SecretEnvSource selects a Secret to populate the environment variables with. - - The contents of the target Secret's Data field will represent the key-value pairs as environment variables.* - - - **envFrom.secretRef.name** (string) - - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - - **envFrom.secretRef.optional** (boolean) - - Specify whether the Secret must be defined - -### Volumes - - -- **volumeMounts** ([]VolumeMount) - - *Patch strategy: merge on key `mountPath`* - - Pod volumes to mount into the container's filesystem. Cannot be updated. - - - *VolumeMount describes a mounting of a Volume within a container.* - - - **volumeMounts.mountPath** (string), required - - Path within the container at which the volume should be mounted. Must not contain ':'. - - - **volumeMounts.name** (string), required - - This must match the Name of a Volume. - - - **volumeMounts.mountPropagation** (string) - - mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. - - - **volumeMounts.readOnly** (boolean) - - Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. - - - **volumeMounts.subPath** (string) - - Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). - - - **volumeMounts.subPathExpr** (string) - - Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. - -- **volumeDevices** ([]VolumeDevice) - - *Patch strategy: merge on key `devicePath`* - - volumeDevices is the list of block devices to be used by the container. - - - *volumeDevice describes a mapping of a raw block device within a container.* - - - **volumeDevices.devicePath** (string), required - - devicePath is the path inside of the container that the device will be mapped to. - - - **volumeDevices.name** (string), required - - name must match the name of a persistentVolumeClaim in the pod - -### Lifecycle - - -- **terminationMessagePath** (string) - - Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. - -- **terminationMessagePolicy** (string) - - Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. - -### Debugging - - -- **stdin** (boolean) - - Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. - -- **stdinOnce** (boolean) - - Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false - -- **tty** (boolean) - - Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. - -### Not allowed - - -- **ports** ([]ContainerPort) - - Ports are not allowed for ephemeral containers. - - - *ContainerPort represents a network port in a single container.* - - - **ports.containerPort** (int32), required - - Number of port to expose on the pod's IP address. This must be a valid port number, 0 \< x \< 65536. - - - **ports.hostIP** (string) - - What host IP to bind the external port to. - - - **ports.hostPort** (int32) - - Number of port to expose on the host. If specified, this must be a valid port number, 0 \< x \< 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. - - - **ports.name** (string) - - If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. - - - **ports.protocol** (string) - - Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". - -- **resources** (ResourceRequirements) - - Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. - - - *ResourceRequirements describes the compute resource requirements.* - - - **resources.limits** (map[string]}}">Quantity) - - Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - - - **resources.requests** (map[string]}}">Quantity) - - Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - -- **lifecycle** (Lifecycle) - - Lifecycle is not allowed for ephemeral containers. - - - *Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.* - - - **lifecycle.postStart** (}}">Handler) - - PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - - - **lifecycle.preStop** (}}">Handler) - - PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - -- **livenessProbe** (}}">Probe) - - Probes are not allowed for ephemeral containers. - -- **readinessProbe** (}}">Probe) - - Probes are not allowed for ephemeral containers. - -- **securityContext** (SecurityContext) - - SecurityContext is not allowed for ephemeral containers. - - - *SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.* - - - **securityContext.runAsUser** (int64) - - The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - **securityContext.runAsNonRoot** (boolean) - - Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - **securityContext.runAsGroup** (int64) - - The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - **securityContext.readOnlyRootFilesystem** (boolean) - - Whether this container has a read-only root filesystem. Default is false. - - - **securityContext.procMount** (string) - - procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. - - - **securityContext.privileged** (boolean) - - Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. - - - **securityContext.allowPrivilegeEscalation** (boolean) - - AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN - - - **securityContext.capabilities** (Capabilities) - - The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. - - - *Adds and removes POSIX capabilities from running containers.* - - - **securityContext.capabilities.add** ([]string) - - Added capabilities - - - **securityContext.capabilities.drop** ([]string) - - Removed capabilities - - - **securityContext.seccompProfile** (SeccompProfile) - - The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. - - - *SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.* - - - **securityContext.seccompProfile.type** (string), required - - type indicates which kind of seccomp profile will be applied. Valid options are: - - Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. - - - **securityContext.seccompProfile.localhostProfile** (string) - - localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". - - - **securityContext.seLinuxOptions** (SELinuxOptions) - - The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - *SELinuxOptions are the labels to be applied to the container* - - - **securityContext.seLinuxOptions.level** (string) - - Level is SELinux level label that applies to the container. - - - **securityContext.seLinuxOptions.role** (string) - - Role is a SELinux role label that applies to the container. - - - **securityContext.seLinuxOptions.type** (string) - - Type is a SELinux type label that applies to the container. - - - **securityContext.seLinuxOptions.user** (string) - - User is a SELinux user label that applies to the container. - - - **securityContext.windowsOptions** (WindowsSecurityContextOptions) - - The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - *WindowsSecurityContextOptions contain Windows-specific options and credentials.* - - - **securityContext.windowsOptions.gmsaCredentialSpec** (string) - - GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. - - - **securityContext.windowsOptions.gmsaCredentialSpecName** (string) - - GMSACredentialSpecName is the name of the GMSA credential spec to use. - - - **securityContext.windowsOptions.runAsUserName** (string) - - The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - -- **startupProbe** (}}">Probe) - - Probes are not allowed for ephemeral containers. - - - -## Operations {#Operations} - - - -
- - - - - - -### `get` read ephemeralcontainers of the specified Pod - -#### HTTP Request - -GET /api/v1/namespaces/{namespace}/pods/{name}/ephemeralcontainers - -#### Parameters - - -- **name** (*in path*): string, required - - name of the EphemeralContainers - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">EphemeralContainers): OK - -401: Unauthorized - - -### `update` replace ephemeralcontainers of the specified Pod - -#### HTTP Request - -PUT /api/v1/namespaces/{namespace}/pods/{name}/ephemeralcontainers - -#### Parameters - - -- **name** (*in path*): string, required - - name of the EphemeralContainers - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">EphemeralContainers, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">EphemeralContainers): OK - -201 (}}">EphemeralContainers): Created - -401: Unauthorized - - -### `patch` partially update ephemeralcontainers of the specified Pod - -#### HTTP Request - -PATCH /api/v1/namespaces/{namespace}/pods/{name}/ephemeralcontainers - -#### Parameters - - -- **name** (*in path*): string, required - - name of the EphemeralContainers - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">Patch, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **force** (*in query*): boolean - - }}">force - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">EphemeralContainers): OK - -401: Unauthorized - diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v1.md index a62d79e4f7..d2da0c286a 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v1.md @@ -6,7 +6,7 @@ api_metadata: content_type: "api_reference" description: "configuration of a horizontal pod autoscaler." title: "HorizontalPodAutoscaler" -weight: 12 +weight: 11 auto_generated: true --- @@ -567,6 +567,8 @@ PATCH /apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name 200 (}}">HorizontalPodAutoscaler): OK +201 (}}">HorizontalPodAutoscaler): Created + 401: Unauthorized @@ -620,6 +622,8 @@ PATCH /apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name 200 (}}">HorizontalPodAutoscaler): OK +201 (}}">HorizontalPodAutoscaler): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v2beta2.md b/content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v2beta2.md index 9d326e2131..67894ab9c4 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v2beta2.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v2beta2.md @@ -6,7 +6,7 @@ api_metadata: content_type: "api_reference" description: "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified." title: "HorizontalPodAutoscaler v2beta2" -weight: 13 +weight: 12 auto_generated: true --- @@ -1119,6 +1119,8 @@ PATCH /apis/autoscaling/v2beta2/namespaces/{namespace}/horizontalpodautoscalers/ 200 (}}">HorizontalPodAutoscaler): OK +201 (}}">HorizontalPodAutoscaler): Created + 401: Unauthorized @@ -1172,6 +1174,8 @@ PATCH /apis/autoscaling/v2beta2/namespaces/{namespace}/horizontalpodautoscalers/ 200 (}}">HorizontalPodAutoscaler): OK +201 (}}">HorizontalPodAutoscaler): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/job-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/job-v1.md index 4848a36d4d..0f61e522eb 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/job-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/job-v1.md @@ -6,7 +6,7 @@ api_metadata: content_type: "api_reference" description: "Job represents the configuration of a single job." title: "Job" -weight: 10 +weight: 9 auto_generated: true --- @@ -86,9 +86,9 @@ JobSpec describes how the job execution will look like. `NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other. - `Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. + `Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`. - This field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job. + This field is beta-level. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job. - **backoffLimit** (int32) @@ -104,7 +104,9 @@ JobSpec describes how the job execution will look like. - **suspend** (boolean) - Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false. + Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false. + + This field is beta-level, gated by SuspendJob feature flag (enabled by default). ### Selector @@ -196,6 +198,30 @@ JobStatus represents the current state of a Job. (brief) reason for the condition's last transition. +- **uncountedTerminatedPods** (UncountedTerminatedPods) + + UncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters. + + The job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status: (1) Add the pod UID to the arrays in this field. (2) Remove the pod finalizer. (3) Remove the pod UID from the arrays while increasing the corresponding + counter. + + This field is alpha-level. The job controller only makes use of this field when the feature gate PodTrackingWithFinalizers is enabled. Old jobs might not be tracked using this field, in which case the field remains null. + + + *UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.* + + - **uncountedTerminatedPods.failed** ([]string) + + *Set: unique values will be kept during a merge* + + Failed holds UIDs of failed Pods. + + - **uncountedTerminatedPods.succeeded** ([]string) + + *Set: unique values will be kept during a merge* + + Succeeded holds UIDs of succeeded Pods. + @@ -639,6 +665,8 @@ PATCH /apis/batch/v1/namespaces/{namespace}/jobs/{name} 200 (}}">Job): OK +201 (}}">Job): Created + 401: Unauthorized @@ -692,6 +720,8 @@ PATCH /apis/batch/v1/namespaces/{namespace}/jobs/{name}/status 200 (}}">Job): OK +201 (}}">Job): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/pod-template-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/pod-template-v1.md index 9a0bbecab1..7e75ea07de 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/pod-template-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/pod-template-v1.md @@ -6,7 +6,7 @@ api_metadata: content_type: "api_reference" description: "PodTemplate describes a template for creating copies of a predefined pod." title: "PodTemplate" -weight: 3 +weight: 2 auto_generated: true --- @@ -424,6 +424,8 @@ PATCH /api/v1/namespaces/{namespace}/podtemplates/{name} 200 (}}">PodTemplate): OK +201 (}}">PodTemplate): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/pod-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/pod-v1.md index d16d9b0a85..8c166cfd9e 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/pod-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/pod-v1.md @@ -159,7 +159,7 @@ PodSpec is a description of a pod. - **runtimeClassName** (string) - RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14. + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class This is a beta feature as of Kubernetes v1.14. - **priorityClassName** (string) @@ -216,7 +216,7 @@ PodSpec is a description of a pod. - **readinessGates** ([]PodReadinessGate) - If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md + If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates *PodReadinessGate contains the reference to a pod condition* @@ -428,6 +428,10 @@ PodSpec is a description of a pod. GMSACredentialSpecName is the name of the GMSA credential spec to use. + - **securityContext.windowsOptions.hostProcess** (boolean) + + HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + - **securityContext.windowsOptions.runAsUserName** (string) The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. @@ -441,12 +445,12 @@ PodSpec is a description of a pod. - **overhead** (map[string]}}">Quantity) - Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature. + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature. ### Alpha level -- **ephemeralContainers** ([]}}">EphemeralContainer) +- **ephemeralContainers** ([]}}">EphemeralContainer) *Patch strategy: merge on key `name`* @@ -489,11 +493,11 @@ A single application container that you want to run within a pod. - **command** ([]string) - Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - **args** ([]string) - Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - **workingDir** (string) @@ -551,7 +555,7 @@ A single application container that you want to run within a pod. - **env.value** (string) - Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". + Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". - **env.valueFrom** (EnvVarSource) @@ -765,7 +769,7 @@ A single application container that you want to run within a pod. - **securityContext** (SecurityContext) - Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ *SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.* @@ -868,6 +872,10 @@ A single application container that you want to run within a pod. GMSACredentialSpecName is the name of the GMSA credential spec to use. + - **securityContext.windowsOptions.hostProcess** (boolean) + + HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + - **securityContext.windowsOptions.runAsUserName** (string) The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. @@ -889,6 +897,432 @@ A single application container that you want to run within a pod. +## EphemeralContainer {#EphemeralContainer} + +An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag. + +
+ +- **name** (string), required + + Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers. + +- **targetContainerName** (string) + + If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature. + + + +### Image + + +- **image** (string) + + Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + +- **imagePullPolicy** (string) + + Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + +### Entrypoint + + +- **command** ([]string) + + Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + +- **args** ([]string) + + Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + +- **workingDir** (string) + + Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + +### Environment variables + + +- **env** ([]EnvVar) + + *Patch strategy: merge on key `name`* + + List of environment variables to set in the container. Cannot be updated. + + + *EnvVar represents an environment variable present in a Container.* + + - **env.name** (string), required + + Name of the environment variable. Must be a C_IDENTIFIER. + + - **env.value** (string) + + Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". + + - **env.valueFrom** (EnvVarSource) + + Source for the environment variable's value. Cannot be used if value is not empty. + + + *EnvVarSource represents a source for the value of an EnvVar.* + + - **env.valueFrom.configMapKeyRef** (ConfigMapKeySelector) + + Selects a key of a ConfigMap. + + + *Selects a key from a ConfigMap.* + + - **env.valueFrom.configMapKeyRef.key** (string), required + + The key to select. + + - **env.valueFrom.configMapKeyRef.name** (string) + + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + + - **env.valueFrom.configMapKeyRef.optional** (boolean) + + Specify whether the ConfigMap or its key must be defined + + - **env.valueFrom.fieldRef** (}}">ObjectFieldSelector) + + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['\']`, `metadata.annotations['\']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + + - **env.valueFrom.resourceFieldRef** (}}">ResourceFieldSelector) + + Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + + - **env.valueFrom.secretKeyRef** (SecretKeySelector) + + Selects a key of a secret in the pod's namespace + + + *SecretKeySelector selects a key of a Secret.* + + - **env.valueFrom.secretKeyRef.key** (string), required + + The key of the secret to select from. Must be a valid secret key. + + - **env.valueFrom.secretKeyRef.name** (string) + + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + + - **env.valueFrom.secretKeyRef.optional** (boolean) + + Specify whether the Secret or its key must be defined + +- **envFrom** ([]EnvFromSource) + + List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + + + *EnvFromSource represents the source of a set of ConfigMaps* + + - **envFrom.configMapRef** (ConfigMapEnvSource) + + The ConfigMap to select from + + + *ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. + + The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.* + + - **envFrom.configMapRef.name** (string) + + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + + - **envFrom.configMapRef.optional** (boolean) + + Specify whether the ConfigMap must be defined + + - **envFrom.prefix** (string) + + An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + + - **envFrom.secretRef** (SecretEnvSource) + + The Secret to select from + + + *SecretEnvSource selects a Secret to populate the environment variables with. + + The contents of the target Secret's Data field will represent the key-value pairs as environment variables.* + + - **envFrom.secretRef.name** (string) + + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + + - **envFrom.secretRef.optional** (boolean) + + Specify whether the Secret must be defined + +### Volumes + + +- **volumeMounts** ([]VolumeMount) + + *Patch strategy: merge on key `mountPath`* + + Pod volumes to mount into the container's filesystem. Cannot be updated. + + + *VolumeMount describes a mounting of a Volume within a container.* + + - **volumeMounts.mountPath** (string), required + + Path within the container at which the volume should be mounted. Must not contain ':'. + + - **volumeMounts.name** (string), required + + This must match the Name of a Volume. + + - **volumeMounts.mountPropagation** (string) + + mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + + - **volumeMounts.readOnly** (boolean) + + Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + + - **volumeMounts.subPath** (string) + + Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + + - **volumeMounts.subPathExpr** (string) + + Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + +- **volumeDevices** ([]VolumeDevice) + + *Patch strategy: merge on key `devicePath`* + + volumeDevices is the list of block devices to be used by the container. + + + *volumeDevice describes a mapping of a raw block device within a container.* + + - **volumeDevices.devicePath** (string), required + + devicePath is the path inside of the container that the device will be mapped to. + + - **volumeDevices.name** (string), required + + name must match the name of a persistentVolumeClaim in the pod + +### Lifecycle + + +- **terminationMessagePath** (string) + + Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. + +- **terminationMessagePolicy** (string) + + Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + +### Debugging + + +- **stdin** (boolean) + + Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + +- **stdinOnce** (boolean) + + Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + +- **tty** (boolean) + + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + +### Not allowed + + +- **ports** ([]ContainerPort) + + Ports are not allowed for ephemeral containers. + + + *ContainerPort represents a network port in a single container.* + + - **ports.containerPort** (int32), required + + Number of port to expose on the pod's IP address. This must be a valid port number, 0 \< x \< 65536. + + - **ports.hostIP** (string) + + What host IP to bind the external port to. + + - **ports.hostPort** (int32) + + Number of port to expose on the host. If specified, this must be a valid port number, 0 \< x \< 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + + - **ports.name** (string) + + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + + - **ports.protocol** (string) + + Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + +- **resources** (ResourceRequirements) + + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. + + + *ResourceRequirements describes the compute resource requirements.* + + - **resources.limits** (map[string]}}">Quantity) + + Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + + - **resources.requests** (map[string]}}">Quantity) + + Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + +- **lifecycle** (Lifecycle) + + Lifecycle is not allowed for ephemeral containers. + + + *Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.* + + - **lifecycle.postStart** (}}">Handler) + + PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + + - **lifecycle.preStop** (}}">Handler) + + PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + +- **livenessProbe** (}}">Probe) + + Probes are not allowed for ephemeral containers. + +- **readinessProbe** (}}">Probe) + + Probes are not allowed for ephemeral containers. + +- **securityContext** (SecurityContext) + + Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + + + *SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.* + + - **securityContext.runAsUser** (int64) + + The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + + - **securityContext.runAsNonRoot** (boolean) + + Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + + - **securityContext.runAsGroup** (int64) + + The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + + - **securityContext.readOnlyRootFilesystem** (boolean) + + Whether this container has a read-only root filesystem. Default is false. + + - **securityContext.procMount** (string) + + procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. + + - **securityContext.privileged** (boolean) + + Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. + + - **securityContext.allowPrivilegeEscalation** (boolean) + + AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN + + - **securityContext.capabilities** (Capabilities) + + The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. + + + *Adds and removes POSIX capabilities from running containers.* + + - **securityContext.capabilities.add** ([]string) + + Added capabilities + + - **securityContext.capabilities.drop** ([]string) + + Removed capabilities + + - **securityContext.seccompProfile** (SeccompProfile) + + The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. + + + *SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.* + + - **securityContext.seccompProfile.type** (string), required + + type indicates which kind of seccomp profile will be applied. Valid options are: + + Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. + + - **securityContext.seccompProfile.localhostProfile** (string) + + localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + + - **securityContext.seLinuxOptions** (SELinuxOptions) + + The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + + + *SELinuxOptions are the labels to be applied to the container* + + - **securityContext.seLinuxOptions.level** (string) + + Level is SELinux level label that applies to the container. + + - **securityContext.seLinuxOptions.role** (string) + + Role is a SELinux role label that applies to the container. + + - **securityContext.seLinuxOptions.type** (string) + + Type is a SELinux type label that applies to the container. + + - **securityContext.seLinuxOptions.user** (string) + + User is a SELinux user label that applies to the container. + + - **securityContext.windowsOptions** (WindowsSecurityContextOptions) + + The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + + + *WindowsSecurityContextOptions contain Windows-specific options and credentials.* + + - **securityContext.windowsOptions.gmsaCredentialSpec** (string) + + GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + + - **securityContext.windowsOptions.gmsaCredentialSpecName** (string) + + GMSACredentialSpecName is the name of the GMSA credential spec to use. + + - **securityContext.windowsOptions.hostProcess** (boolean) + + HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + + - **securityContext.windowsOptions.runAsUserName** (string) + + The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + +- **startupProbe** (}}">Probe) + + Probes are not allowed for ephemeral containers. + + + ## Handler {#Handler} Handler defines a specific action that should be taken @@ -1057,7 +1491,7 @@ Pod affinity is a group of inter pod affinity scheduling rules. - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector** (}}">LabelSelector) - A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaces** ([]string) @@ -1084,7 +1518,7 @@ Pod affinity is a group of inter pod affinity scheduling rules. - **requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector** (}}">LabelSelector) - A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - **requiredDuringSchedulingIgnoredDuringExecution.namespaces** ([]string) @@ -1124,7 +1558,7 @@ Pod anti affinity is a group of inter pod anti affinity scheduling rules. - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector** (}}">LabelSelector) - A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaces** ([]string) @@ -1151,7 +1585,7 @@ Pod anti affinity is a group of inter pod anti affinity scheduling rules. - **requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector** (}}">LabelSelector) - A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - **requiredDuringSchedulingIgnoredDuringExecution.namespaces** ([]string) @@ -1243,7 +1677,7 @@ Probe describes a health check to be performed against a container to determine - **terminationGracePeriodSeconds** (int64) - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate. + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - **periodSeconds** (int32) @@ -1996,6 +2430,39 @@ GET /api/v1/namespaces/{namespace}/pods/{name} +#### Response + + +200 (}}">Pod): OK + +401: Unauthorized + + +### `get` read ephemeralcontainers of the specified Pod + +#### HTTP Request + +GET /api/v1/namespaces/{namespace}/pods/{name}/ephemeralcontainers + +#### Parameters + + +- **name** (*in path*): string, required + + name of the Pod + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **pretty** (*in query*): string + + }}">pretty + + + #### Response @@ -2322,6 +2789,56 @@ PUT /api/v1/namespaces/{namespace}/pods/{name} +- **dryRun** (*in query*): string + + }}">dryRun + + +- **fieldManager** (*in query*): string + + }}">fieldManager + + +- **pretty** (*in query*): string + + }}">pretty + + + +#### Response + + +200 (}}">Pod): OK + +201 (}}">Pod): Created + +401: Unauthorized + + +### `update` replace ephemeralcontainers of the specified Pod + +#### HTTP Request + +PUT /api/v1/namespaces/{namespace}/pods/{name}/ephemeralcontainers + +#### Parameters + + +- **name** (*in path*): string, required + + name of the Pod + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **body**: }}">Pod, required + + + + - **dryRun** (*in query*): string }}">dryRun @@ -2448,6 +2965,63 @@ PATCH /api/v1/namespaces/{namespace}/pods/{name} 200 (}}">Pod): OK +201 (}}">Pod): Created + +401: Unauthorized + + +### `patch` partially update ephemeralcontainers of the specified Pod + +#### HTTP Request + +PATCH /api/v1/namespaces/{namespace}/pods/{name}/ephemeralcontainers + +#### Parameters + + +- **name** (*in path*): string, required + + name of the Pod + + +- **namespace** (*in path*): string, required + + }}">namespace + + +- **body**: }}">Patch, required + + + + +- **dryRun** (*in query*): string + + }}">dryRun + + +- **fieldManager** (*in query*): string + + }}">fieldManager + + +- **force** (*in query*): boolean + + }}">force + + +- **pretty** (*in query*): string + + }}">pretty + + + +#### Response + + +200 (}}">Pod): OK + +201 (}}">Pod): Created + 401: Unauthorized @@ -2501,6 +3075,8 @@ PATCH /api/v1/namespaces/{namespace}/pods/{name}/status 200 (}}">Pod): OK +201 (}}">Pod): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/priority-class-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/priority-class-v1.md index 020c25c05a..cd96fe6790 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/priority-class-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/priority-class-v1.md @@ -6,7 +6,7 @@ api_metadata: content_type: "api_reference" description: "PriorityClass defines mapping from a priority class name to the priority integer value." title: "PriorityClass" -weight: 14 +weight: 13 auto_generated: true --- @@ -325,6 +325,8 @@ PATCH /apis/scheduling.k8s.io/v1/priorityclasses/{name} 200 (}}">PriorityClass): OK +201 (}}">PriorityClass): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/replica-set-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/replica-set-v1.md index 7a344128c8..f2c5f894af 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/replica-set-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/replica-set-v1.md @@ -6,7 +6,7 @@ api_metadata: content_type: "api_reference" description: "ReplicaSet ensures that a specified number of pod replicas are running at any given time." title: "ReplicaSet" -weight: 5 +weight: 4 auto_generated: true --- @@ -581,6 +581,8 @@ PATCH /apis/apps/v1/namespaces/{namespace}/replicasets/{name} 200 (}}">ReplicaSet): OK +201 (}}">ReplicaSet): Created + 401: Unauthorized @@ -634,6 +636,8 @@ PATCH /apis/apps/v1/namespaces/{namespace}/replicasets/{name}/status 200 (}}">ReplicaSet): OK +201 (}}">ReplicaSet): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/replication-controller-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/replication-controller-v1.md index c14db8ece9..890897ecbb 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/replication-controller-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/replication-controller-v1.md @@ -6,7 +6,7 @@ api_metadata: content_type: "api_reference" description: "ReplicationController represents the configuration of a replication controller." title: "ReplicationController" -weight: 4 +weight: 3 auto_generated: true --- @@ -581,6 +581,8 @@ PATCH /api/v1/namespaces/{namespace}/replicationcontrollers/{name} 200 (}}">ReplicationController): OK +201 (}}">ReplicationController): Created + 401: Unauthorized @@ -634,6 +636,8 @@ PATCH /api/v1/namespaces/{namespace}/replicationcontrollers/{name}/status 200 (}}">ReplicationController): OK +201 (}}">ReplicationController): Created + 401: Unauthorized diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/stateful-set-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/stateful-set-v1.md index ec097d7cce..6bc6da0e15 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/stateful-set-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/stateful-set-v1.md @@ -6,7 +6,7 @@ api_metadata: content_type: "api_reference" description: "StatefulSet represents a set of pods with consistent identities." title: "StatefulSet" -weight: 7 +weight: 6 auto_generated: true --- @@ -43,6 +43,7 @@ The StatefulSet guarantees that a given network identity will always map to the - **metadata** (}}">ObjectMeta) + Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - **spec** (}}">StatefulSetSpec) @@ -112,6 +113,10 @@ A StatefulSetSpec is the specification of a StatefulSet. volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name. +- **minReadySeconds** (int32) + + Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. + @@ -138,6 +143,10 @@ StatefulSetStatus represents the current state of a StatefulSet. updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision. +- **availableReplicas** (int32) + + Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. Remove omitempty when graduating to beta + - **collisionCount** (int32) collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision. @@ -204,9 +213,11 @@ StatefulSetList is a collection of StatefulSets. - **metadata** (}}">ListMeta) + Standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - **items** ([]}}">StatefulSet), required + Items is the list of stateful sets. @@ -627,6 +638,8 @@ PATCH /apis/apps/v1/namespaces/{namespace}/statefulsets/{name} 200 (}}">StatefulSet): OK +201 (}}">StatefulSet): Created + 401: Unauthorized @@ -680,6 +693,8 @@ PATCH /apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/status 200 (}}">StatefulSet): OK +201 (}}">StatefulSet): Created + 401: Unauthorized diff --git a/content/en/docs/reference/labels-annotations-taints.md b/content/en/docs/reference/labels-annotations-taints.md index 29f3a63a8c..07e6d19426 100644 --- a/content/en/docs/reference/labels-annotations-taints.md +++ b/content/en/docs/reference/labels-annotations-taints.md @@ -200,7 +200,7 @@ Used on: Service The kube-proxy has this label for custom proxy, which delegates service control to custom proxy. -## experimental.windows.kubernetes.io/isolation-type +## experimental.windows.kubernetes.io/isolation-type (deprecated) {#experimental-windows-kubernetes-io-isolation-type} Example: `experimental.windows.kubernetes.io/isolation-type: "hyperv"` @@ -210,6 +210,7 @@ The annotation is used to run Windows containers with Hyper-V isolation. To use {{< note >}} You can only set this annotation on Pods that have a single container. +Starting from v1.20, this annotation is deprecated. Experimental Hyper-V support was removed in 1.21. {{< /note >}} ## ingressclass.kubernetes.io/is-default-class @@ -262,11 +263,29 @@ The value of the annotation is the container name that is default for this Pod. ## endpoints.kubernetes.io/over-capacity -Example: `endpoints.kubernetes.io/over-capacity:warning` +Example: `endpoints.kubernetes.io/over-capacity:truncated` Used on: Endpoints -In Kubernetes clusters v1.21 (or later), the Endpoints controller adds this annotation to an Endpoints resource if it has more than 1000 endpoints. The annotation indicates that the Endpoints resource is over capacity. +In Kubernetes clusters v1.22 (or later), the Endpoints controller adds this annotation to an Endpoints resource if it has more than 1000 endpoints. The annotation indicates that the Endpoints resource is over capacity and the number of endpoints has been truncated to 1000. + +## batch.kubernetes.io/job-tracking + +Example: `batch.kubernetes.io/job-tracking: ""` + +Used on: Jobs + +The presence of this annotation on a Job indicates that the control plane is +[tracking the Job status using finalizers](/docs/concepts/workloads/controllers/job/#job-tracking-with-finalizers). +You should **not** manually add or remove this annotation. + +## scheduler.alpha.kubernetes.io/preferAvoidPods (deprecated) {#scheduleralphakubernetesio-preferavoidpods} + +Used on: Nodes + +This annotation requires the [NodePreferAvoidPods scheduling plugin](/docs/reference/scheduling/config/#scheduling-plugins) +to be enabled. The plugin is deprecated since Kubernetes 1.22. +Use [Taints and Tolerations](/docs/concepts/scheduling-eviction/taint-and-toleration/) instead. **The taints listed below are always used on Nodes** @@ -323,3 +342,87 @@ Sets this taint on a node to mark it as unusable, when kubelet is started with t Example: `node.cloudprovider.kubernetes.io/shutdown:NoSchedule` If a Node is in a cloud provider specified shutdown state, the Node gets tainted accordingly with `node.cloudprovider.kubernetes.io/shutdown` and the taint effect of `NoSchedule`. + +## pod-security.kubernetes.io/enforce + +Example: `pod-security.kubernetes.io/enforce: baseline` + +Used on: Namespace + +Value **must** be one of `privileged`, `baseline`, or `restricted` which correspond to +[Pod Security Standard](/docs/concepts/security/pod-security-standards) levels. Specifically, +the `enforce` label _prohibits_ the creation of any Pod in the labeled Namespace which does not meet +the requirements outlined in the indicated level. + +See [Enforcing Pod Security at the Namespace Level](/docs/concepts/security/pod-security-admission) +for more information. + +## pod-security.kubernetes.io/enforce-version + +Example: `pod-security.kubernetes.io/enforce-version: {{< skew latestVersion >}}` + +Used on: Namespace + +Value **must** be `latest` or a valid Kubernetes version in the format `v.`. +This determines the version of the [Pod Security Standard](/docs/concepts/security/pod-security-standards) +policies to apply when validating a submitted Pod. + +See [Enforcing Pod Security at the Namespace Level](/docs/concepts/security/pod-security-admission) +for more information. + +## pod-security.kubernetes.io/audit + +Example: `pod-security.kubernetes.io/audit: baseline` + +Used on: Namespace + +Value **must** be one of `privileged`, `baseline`, or `restricted` which correspond to +[Pod Security Standard](/docs/concepts/security/pod-security-standards) levels. Specifically, +the `audit` label does not prevent the creation of a Pod in the labeled Namespace which does not meet +the requirements outlined in the indicated level, but adds an audit annotation to that Pod. + +See [Enforcing Pod Security at the Namespace Level](/docs/concepts/security/pod-security-admission) +for more information. + +## pod-security.kubernetes.io/audit-version + +Example: `pod-security.kubernetes.io/audit-version: {{< skew latestVersion >}}` + +Used on: Namespace + +Value **must** be `latest` or a valid Kubernetes version in the format `v.`. +This determines the version of the [Pod Security Standard](/docs/concepts/security/pod-security-standards) +policies to apply when validating a submitted Pod. + +See [Enforcing Pod Security at the Namespace Level](/docs/concepts/security/pod-security-admission) +for more information. + +## pod-security.kubernetes.io/warn + +Example: `pod-security.kubernetes.io/warn: baseline` + +Used on: Namespace + +Value **must** be one of `privileged`, `baseline`, or `restricted` which correspond to +[Pod Security Standard](/docs/concepts/security/pod-security-standards) levels. Specifically, +the `warn` label does not prevent the creation of a Pod in the labeled Namespace which does not meet the +requirements outlined in the indicated level, but returns a warning to the user after doing so. +Note that warnings are also displayed when creating or updating objects that contain Pod templates, +such as Deployments, Jobs, StatefulSets, etc. + +See [Enforcing Pod Security at the Namespace Level](/docs/concepts/security/pod-security-admission) +for more information. + +## pod-security.kubernetes.io/warn-version + +Example: `pod-security.kubernetes.io/warn-version: {{< skew latestVersion >}}` + +Used on: Namespace + +Value **must** be `latest` or a valid Kubernetes version in the format `v.`. +This determines the version of the [Pod Security Standard](/docs/concepts/security/pod-security-standards) +policies to apply when validating a submitted Pod. Note that warnings are also displayed when creating +or updating objects that contain Pod templates, such as Deployments, Jobs, StatefulSets, etc. + +See [Enforcing Pod Security at the Namespace Level](/docs/concepts/security/pod-security-admission) +for more information. \ No newline at end of file diff --git a/content/en/docs/reference/scheduling/config.md b/content/en/docs/reference/scheduling/config.md index 1e140e6300..e30ab2e133 100644 --- a/content/en/docs/reference/scheduling/config.md +++ b/content/en/docs/reference/scheduling/config.md @@ -15,18 +15,19 @@ file and passing its path as a command line argument. A scheduling Profile allows you to configure the different stages of scheduling in the {{< glossary_tooltip text="kube-scheduler" term_id="kube-scheduler" >}}. -Each stage is exposed in a extension point. Plugins provide scheduling behaviors +Each stage is exposed in an extension point. Plugins provide scheduling behaviors by implementing one or more of these extension points. You can specify scheduling profiles by running `kube-scheduler --config `, -using the -[KubeSchedulerConfiguration (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) +using the +KubeSchedulerConfiguration ([v1beta1](/docs/reference/config-api/kube-scheduler-config.v1beta1/) +or [v1beta2](/docs/reference/config-api/kube-scheduler-config.v1beta2/)) struct. A minimal configuration looks as follows: ```yaml -apiVersion: kubescheduler.config.k8s.io/v1beta1 +apiVersion: kubescheduler.config.k8s.io/v1beta2 kind: KubeSchedulerConfiguration clientConnection: kubeconfig: /etc/srv/kubernetes/kube-scheduler/kubeconfig @@ -48,38 +49,41 @@ You can configure a single instance of `kube-scheduler` to run Scheduling happens in a series of stages that are exposed through the following extension points: -1. `QueueSort`: These plugins provide an ordering function that is used to +1. `queueSort`: These plugins provide an ordering function that is used to sort pending Pods in the scheduling queue. Exactly one queue sort plugin may be enabled at a time. -1. `PreFilter`: These plugins are used to pre-process or check information +1. `preFilter`: These plugins are used to pre-process or check information about a Pod or the cluster before filtering. They can mark a pod as unschedulable. -1. `Filter`: These plugins are the equivalent of Predicates in a scheduling +1. `filter`: These plugins are the equivalent of Predicates in a scheduling Policy and are used to filter out nodes that can not run the Pod. Filters are called in the configured order. A pod is marked as unschedulable if no nodes pass all the filters. -1. `PreScore`: This is an informational extension point that can be used +1. `postFilter`: These plugins are called in their configured order when no + feasible nodes were found for the pod. If any `postFilter` plugin marks the + Pod _schedulable_, the remaining plugins are not called. +1. `preScore`: This is an informational extension point that can be used for doing pre-scoring work. -1. `Score`: These plugins provide a score to each node that has passed the +1. `score`: These plugins provide a score to each node that has passed the filtering phase. The scheduler will then select the node with the highest weighted scores sum. -1. `Reserve`: This is an informational extension point that notifies plugins +1. `reserve`: This is an informational extension point that notifies plugins when resources have been reserved for a given Pod. Plugins also implement an `Unreserve` call that gets called in the case of failure during or after `Reserve`. -1. `Permit`: These plugins can prevent or delay the binding of a Pod. -1. `PreBind`: These plugins perform any work required before a Pod is bound. -1. `Bind`: The plugins bind a Pod to a Node. Bind plugins are called in order +1. `permit`: These plugins can prevent or delay the binding of a Pod. +1. `preBind`: These plugins perform any work required before a Pod is bound. +1. `bind`: The plugins bind a Pod to a Node. `bind` plugins are called in order and once one has done the binding, the remaining plugins are skipped. At least one bind plugin is required. -1. `PostBind`: This is an informational extension point that is called after +1. `postBind`: This is an informational extension point that is called after a Pod has been bound. For each extension point, you could disable specific [default plugins](#scheduling-plugins) or enable your own. For example: ```yaml -apiVersion: kubescheduler.config.k8s.io/v1beta1 +apiVersion: kubescheduler.config.k8s.io/v1beta2 kind: KubeSchedulerConfiguration profiles: - plugins: @@ -99,106 +103,109 @@ desired. ### Scheduling plugins -1. `UnReserve`: This is an informational extension point that is called if - a Pod is rejected after being reserved and put on hold by a `Permit` plugin. - -## Scheduling plugins - The following plugins, enabled by default, implement one or more of these extension points: -- `SelectorSpread`: Favors spreading across nodes for Pods that belong to - {{< glossary_tooltip text="Services" term_id="service" >}}, - {{< glossary_tooltip text="ReplicaSets" term_id="replica-set" >}} and - {{< glossary_tooltip text="StatefulSets" term_id="statefulset" >}}. - Extension points: `PreScore`, `Score`. - `ImageLocality`: Favors nodes that already have the container images that the Pod runs. - Extension points: `Score`. + Extension points: `score`. - `TaintToleration`: Implements [taints and tolerations](/docs/concepts/scheduling-eviction/taint-and-toleration/). - Implements extension points: `Filter`, `Prescore`, `Score`. + Implements extension points: `filter`, `preScore`, `score`. - `NodeName`: Checks if a Pod spec node name matches the current node. - Extension points: `Filter`. + Extension points: `filter`. - `NodePorts`: Checks if a node has free ports for the requested Pod ports. - Extension points: `PreFilter`, `Filter`. + Extension points: `preFilter`, `filter`. - `NodePreferAvoidPods`: Scores nodes according to the node {{< glossary_tooltip text="annotation" term_id="annotation" >}} `scheduler.alpha.kubernetes.io/preferAvoidPods`. - Extension points: `Score`. + Extension points: `score`. - `NodeAffinity`: Implements [node selectors](/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) and [node affinity](/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity). - Extension points: `Filter`, `Score`. + Extension points: `filter`, `score`. - `PodTopologySpread`: Implements [Pod topology spread](/docs/concepts/workloads/pods/pod-topology-spread-constraints/). - Extension points: `PreFilter`, `Filter`, `PreScore`, `Score`. + Extension points: `preFilter`, `filter`, `preScore`, `score`. - `NodeUnschedulable`: Filters out nodes that have `.spec.unschedulable` set to true. - Extension points: `Filter`. + Extension points: `filter`. - `NodeResourcesFit`: Checks if the node has all the resources that the Pod is - requesting. - Extension points: `PreFilter`, `Filter`. + requesting. The score can use one of three strategies: `LeastAllocated` + (default), `MostAllocated` and `RequestedToCapacityRatio`. + Extension points: `preFilter`, `filter`, `score`. - `NodeResourcesBalancedAllocation`: Favors nodes that would obtain a more balanced resource usage if the Pod is scheduled there. - Extension points: `Score`. -- `NodeResourcesLeastAllocated`: Favors nodes that have a low allocation of - resources. - Extension points: `Score`. + Extension points: `score`. - `VolumeBinding`: Checks if the node has or if it can bind the requested {{< glossary_tooltip text="volumes" term_id="volume" >}}. - Extension points: `PreFilter`, `Filter`, `Reserve`, `PreBind`, `Score`. + Extension points: `preFilter`, `filter`, `reserve`, `preBind`, `score`. {{< note >}} - `Score` extension point is enabled when `VolumeCapacityPriority` feature is + `score` extension point is enabled when `VolumeCapacityPriority` feature is enabled. It prioritizes the smallest PVs that can fit the requested volume size. {{< /note >}} - `VolumeRestrictions`: Checks that volumes mounted in the node satisfy restrictions that are specific to the volume provider. - Extension points: `Filter`. + Extension points: `filter`. - `VolumeZone`: Checks that volumes requested satisfy any zone requirements they might have. - Extension points: `Filter`. + Extension points: `filter`. - `NodeVolumeLimits`: Checks that CSI volume limits can be satisfied for the node. - Extension points: `Filter`. + Extension points: `filter`. - `EBSLimits`: Checks that AWS EBS volume limits can be satisfied for the node. - Extension points: `Filter`. + Extension points: `filter`. - `GCEPDLimits`: Checks that GCP-PD volume limits can be satisfied for the node. - Extension points: `Filter`. + Extension points: `filter`. - `AzureDiskLimits`: Checks that Azure disk volume limits can be satisfied for the node. - Extension points: `Filter`. + Extension points: `filter`. - `InterPodAffinity`: Implements [inter-Pod affinity and anti-affinity](/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity). - Extension points: `PreFilter`, `Filter`, `PreScore`, `Score`. + Extension points: `preFilter`, `filter`, `preScore`, `score`. - `PrioritySort`: Provides the default priority based sorting. - Extension points: `QueueSort`. + Extension points: `queueSort`. - `DefaultBinder`: Provides the default binding mechanism. - Extension points: `Bind`. + Extension points: `bind`. - `DefaultPreemption`: Provides the default preemption mechanism. - Extension points: `PostFilter`. + Extension points: `postFilter`. You can also enable the following plugins, through the component config APIs, that are not enabled by default: +- `SelectorSpread`: Favors spreading across nodes for Pods that belong to + {{< glossary_tooltip text="Services" term_id="service" >}}, + {{< glossary_tooltip text="ReplicaSets" term_id="replica-set" >}} and + {{< glossary_tooltip text="StatefulSets" term_id="statefulset" >}}. + Extension points: `preScore`, `score`. +- `CinderLimits`: Checks that [OpenStack Cinder](https://docs.openstack.org/cinder/) + volume limits can be satisfied for the node. + Extension points: `filter`. + +The following plugins are deprecated and can only be enabled in a `v1beta1` +configuration: + +- `NodeResourcesLeastAllocated`: Favors nodes that have a low allocation of + resources. + Extension points: `score`. - `NodeResourcesMostAllocated`: Favors nodes that have a high allocation of resources. - Extension points: `Score`. + Extension points: `score`. - `RequestedToCapacityRatio`: Favor nodes according to a configured function of the allocated resources. - Extension points: `Score`. -- `CinderVolume`: Checks that OpenStack Cinder volume limits can be satisfied - for the node. - Extension points: `Filter`. + Extension points: `score`. - `NodeLabel`: Filters and / or scores a node according to configured {{< glossary_tooltip text="label(s)" term_id="label" >}}. - Extension points: `Filter`, `Score`. + Extension points: `filter`, `score`. - `ServiceAffinity`: Checks that Pods that belong to a {{< glossary_tooltip term_id="service" >}} fit in a set of nodes defined by configured labels. This plugin also favors spreading the Pods belonging to a Service across nodes. - Extension points: `PreFilter`, `Filter`, `Score`. + Extension points: `preFilter`, `filter`, `score`. +- `NodePreferAvoidPods`: Prioritizes nodes according to the node annotation + `scheduler.alpha.kubernetes.io/preferAvoidPods`. + Extension points: `score`. ### Multiple profiles @@ -211,7 +218,7 @@ profiles: one with the default plugins and one with all scoring plugins disabled. ```yaml -apiVersion: kubescheduler.config.k8s.io/v1beta1 +apiVersion: kubescheduler.config.k8s.io/v1beta2 kind: KubeSchedulerConfiguration profiles: - schedulerName: default-scheduler @@ -243,7 +250,7 @@ list. {{< /note >}} {{< note >}} -All profiles must use the same plugin in the QueueSort extension point and have +All profiles must use the same plugin in the `queueSort` extension point and have the same configuration parameters (if applicable). This is because the scheduler only has one pending pods queue. {{< /note >}} @@ -253,4 +260,5 @@ only has one pending pods queue. * Read the [kube-scheduler reference](/docs/reference/command-line-tools-reference/kube-scheduler/) * Learn about [scheduling](/docs/concepts/scheduling-eviction/kube-scheduler/) * Read the [kube-scheduler configuration (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) reference +* Read the [kube-scheduler configuration (v1beta2)](/docs/reference/config-api/kube-scheduler-config.v1beta2/) reference diff --git a/content/en/docs/reference/scheduling/policies.md b/content/en/docs/reference/scheduling/policies.md index 1cd80273d9..99291c2b37 100644 --- a/content/en/docs/reference/scheduling/policies.md +++ b/content/en/docs/reference/scheduling/policies.md @@ -104,6 +104,6 @@ The following *priorities* implement scoring: * Learn about [scheduling](/docs/concepts/scheduling-eviction/kube-scheduler/) * Learn about [kube-scheduler Configuration](/docs/reference/scheduling/config/) -* Read the [kube-scheduler configuration reference (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1) +* Read the [kube-scheduler configuration reference (v1beta2)](/docs/reference/config-api/kube-scheduler-config.v1beta2) * Read the [kube-scheduler Policy reference (v1)](/docs/reference/config-api/kube-scheduler-policy-config.v1/) diff --git a/content/en/docs/reference/setup-tools/kubeadm/_index.md b/content/en/docs/reference/setup-tools/kubeadm/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha.md deleted file mode 100644 index af458320a5..0000000000 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha.md +++ /dev/null @@ -1,61 +0,0 @@ - - - -Kubeadm experimental sub-commands - -### Synopsis - - -Kubeadm experimental sub-commands - -### Options - - ---- - - - - - - - - - - -
-h, --help

help for alpha

- - - -### Options inherited from parent commands - - ---- - - - - - - - - - - -
--rootfs string

[EXPERIMENTAL] The path to the 'real' host root filesystem.

- - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig.md deleted file mode 100644 index b678061bb0..0000000000 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig.md +++ /dev/null @@ -1,63 +0,0 @@ - - - -Kubeconfig file utilities - -### Synopsis - - -Kubeconfig file utilities. - -Alpha Disclaimer: this command is currently alpha. - -### Options - - ---- - - - - - - - - - - -
-h, --help

help for kubeconfig

- - - -### Options inherited from parent commands - - ---- - - - - - - - - - - -
--rootfs string

[EXPERIMENTAL] The path to the 'real' host root filesystem.

- - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig_user.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig_user.md deleted file mode 100644 index de07cd0f7d..0000000000 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig_user.md +++ /dev/null @@ -1,102 +0,0 @@ - - - -Output a kubeconfig file for an additional user - -### Synopsis - - -Output a kubeconfig file for an additional user. - -Alpha Disclaimer: this command is currently alpha. - -``` -kubeadm alpha kubeconfig user [flags] -``` - -### Examples - -``` - # Output a kubeconfig file for an additional user named foo using a kubeadm config file bar - kubeadm alpha kubeconfig user --client-name=foo --config=bar -``` - -### Options - - ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--client-name string

The name of user. It will be used as the CN if client certificates are created

--config string

Path to a kubeadm configuration file.

-h, --help

help for user

--org strings

The orgnizations of the client certificate. It will be used as the O if client certificates are created

--token string

The token that should be used as the authentication mechanism for this kubeconfig, instead of client certificates

- - - -### Options inherited from parent commands - - ---- - - - - - - - - - - -
--rootfs string

[EXPERIMENTAL] The path to the 'real' host root filesystem.

- - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_generate-csr.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_generate-csr.md index 2a41f2e58f..1abc7d9bac 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_generate-csr.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_generate-csr.md @@ -17,7 +17,7 @@ Generate keys and certificate signing requests Generates keys and certificate signing requests (CSRs) for all the certificates required to run the control plane. This command also generates partial kubeconfig files with private key data in the "users > user > client-key-data" field, and for each kubeconfig file an accompanying ".csr" file is created. -This command is designed for use in [Kubeadm External CA Mode](/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/#external-ca-mode). It generates CSRs which you can then submit to your external certificate authority for signing. +This command is designed for use in [Kubeadm External CA Mode](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/#external-ca-mode). It generates CSRs which you can then submit to your external certificate authority for signing. The PEM encoded signed certificates should then be saved alongside the key files, using ".crt" as the file extension, or in the case of kubeconfig files, the PEM encoded signed certificate should be base64 encoded and added to the kubeconfig file in the "users > user > client-certificate-data" field. @@ -29,7 +29,7 @@ kubeadm certs generate-csr [flags] ``` # The following command will generate keys and CSRs for all control-plane certificates and kubeconfig files: - kubeadm alpha certs generate-csr --kubeconfig-dir /tmp/etc-k8s --cert-dir /tmp/etc-k8s/pki + kubeadm certs generate-csr --kubeconfig-dir /tmp/etc-k8s --cert-dir /tmp/etc-k8s/pki ``` ### Options diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_admin.conf.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_admin.conf.md index 2a81cee1d4..31192cf3f7 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_admin.conf.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_admin.conf.md @@ -50,20 +50,6 @@ kubeadm certs renew admin.conf [flags]

Path to a kubeadm configuration file.

- ---csr-dir string - - -

The path to output the CSRs and private keys to

- - - ---csr-only - - -

Create CSRs instead of generating certificates

- - -h, --help diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_all.md index b948adb65c..77ea6e45a1 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_all.md @@ -44,20 +44,6 @@ kubeadm certs renew all [flags]

Path to a kubeadm configuration file.

- ---csr-dir string - - -

The path to output the CSRs and private keys to

- - - ---csr-only - - -

Create CSRs instead of generating certificates

- - -h, --help diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-etcd-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-etcd-client.md index cb8fe0d5f7..f95a51e1a7 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-etcd-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-etcd-client.md @@ -50,20 +50,6 @@ kubeadm certs renew apiserver-etcd-client [flags]

Path to a kubeadm configuration file.

- ---csr-dir string - - -

The path to output the CSRs and private keys to

- - - ---csr-only - - -

Create CSRs instead of generating certificates

- - -h, --help diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-kubelet-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-kubelet-client.md index 475e8c9f22..27ba374b9f 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-kubelet-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-kubelet-client.md @@ -50,20 +50,6 @@ kubeadm certs renew apiserver-kubelet-client [flags]

Path to a kubeadm configuration file.

- ---csr-dir string - - -

The path to output the CSRs and private keys to

- - - ---csr-only - - -

Create CSRs instead of generating certificates

- - -h, --help diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver.md index 750df89d83..7dc59c45d4 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver.md @@ -50,20 +50,6 @@ kubeadm certs renew apiserver [flags]

Path to a kubeadm configuration file.

- ---csr-dir string - - -

The path to output the CSRs and private keys to

- - - ---csr-only - - -

Create CSRs instead of generating certificates

- - -h, --help diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_controller-manager.conf.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_controller-manager.conf.md index b052fb3e54..4df1d8221c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_controller-manager.conf.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_controller-manager.conf.md @@ -50,20 +50,6 @@ kubeadm certs renew controller-manager.conf [flags]

Path to a kubeadm configuration file.

- ---csr-dir string - - -

The path to output the CSRs and private keys to

- - - ---csr-only - - -

Create CSRs instead of generating certificates

- - -h, --help diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-healthcheck-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-healthcheck-client.md index 252296e395..84d75bfd36 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-healthcheck-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-healthcheck-client.md @@ -50,20 +50,6 @@ kubeadm certs renew etcd-healthcheck-client [flags]

Path to a kubeadm configuration file.

- ---csr-dir string - - -

The path to output the CSRs and private keys to

- - - ---csr-only - - -

Create CSRs instead of generating certificates

- - -h, --help diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-peer.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-peer.md index f25b86fa15..60acaae1db 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-peer.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-peer.md @@ -50,20 +50,6 @@ kubeadm certs renew etcd-peer [flags]

Path to a kubeadm configuration file.

- ---csr-dir string - - -

The path to output the CSRs and private keys to

- - - ---csr-only - - -

Create CSRs instead of generating certificates

- - -h, --help diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-server.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-server.md index 059d0d9bbb..969157fe3e 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-server.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-server.md @@ -50,20 +50,6 @@ kubeadm certs renew etcd-server [flags]

Path to a kubeadm configuration file.

- ---csr-dir string - - -

The path to output the CSRs and private keys to

- - - ---csr-only - - -

Create CSRs instead of generating certificates

- - -h, --help diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_front-proxy-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_front-proxy-client.md index d93fca8d46..3d9564e485 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_front-proxy-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_front-proxy-client.md @@ -50,20 +50,6 @@ kubeadm certs renew front-proxy-client [flags]

Path to a kubeadm configuration file.

- ---csr-dir string - - -

The path to output the CSRs and private keys to

- - - ---csr-only - - -

Create CSRs instead of generating certificates

- - -h, --help diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_scheduler.conf.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_scheduler.conf.md index 5d7ade453b..6c8d40dae3 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_scheduler.conf.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_scheduler.conf.md @@ -50,20 +50,6 @@ kubeadm certs renew scheduler.conf [flags]

Path to a kubeadm configuration file.

- ---csr-dir string - - -

The path to output the CSRs and private keys to

- - - ---csr-only - - -

Create CSRs instead of generating certificates

- - -h, --help diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md index 4634bd0a27..b7f3e05a8b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md @@ -55,7 +55,7 @@ kubeadm config images list [flags] --feature-gates string -

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

+

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)
RootlessControlPlane=true|false (ALPHA - default=false)

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md index 840072d167..a44970a68a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md @@ -48,7 +48,7 @@ kubeadm config images pull [flags] --feature-gates string -

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

+

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)
RootlessControlPlane=true|false (ALPHA - default=false)

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md index 5858bdb307..8aa2f6f1d2 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md @@ -19,9 +19,9 @@ Read an older version of the kubeadm configuration API types from a file, and ou This command lets you convert configuration objects of older versions to the latest supported version, locally in the CLI tool without ever touching anything in the cluster. In this version of kubeadm, the following API versions are supported: -- kubeadm.k8s.io/v1beta2 +- kubeadm.k8s.io/v1beta3 -Further, kubeadm can only write out config of version "kubeadm.k8s.io/v1beta2", but read both types. +Further, kubeadm can only write out config of version "kubeadm.k8s.io/v1beta3", but read both types. So regardless of what version you pass to the --old-config parameter here, the API object will be read, deserialized, defaulted, converted, validated, and re-serialized when written to stdout or --new-config if specified. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md index 2f20d9d1ce..e8aa81abf6 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md @@ -17,7 +17,7 @@ Print configuration This command prints configurations for subcommands provided. -For details, see: https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 +For details, see: https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm#section-directories ``` kubeadm config print [flags] diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md index 4294cffe8b..62f4ca7e5b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md @@ -134,18 +134,11 @@ kubeadm init [flags]

Don't apply any changes; just output what would be done.

- ---experimental-patches string - - -

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

- - --feature-gates string -

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

+

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)
RootlessControlPlane=true|false (ALPHA - default=false)

@@ -183,6 +176,13 @@ kubeadm init [flags]

Specify the node name.

+ +--patches string + + +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

+ + --pod-network-cidr string diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md index 48ae42ca48..c30d45980c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md @@ -62,7 +62,7 @@ kubeadm init phase addon all [flags] --feature-gates string -

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

+

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)
RootlessControlPlane=true|false (ALPHA - default=false)

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md index 68f0d0d025..3e4076a862 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md @@ -41,7 +41,7 @@ kubeadm init phase addon coredns [flags] --feature-gates string -

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

+

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)
RootlessControlPlane=true|false (ALPHA - default=false)

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md index 4c8bed971a..3280fdc0eb 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md @@ -15,7 +15,7 @@ Generate the certificate the apiserver uses to access etcd ### Synopsis -Generate the certificate the apiserver uses to access etcd, and save them into apiserver-etcd-client.cert and apiserver-etcd-client.key files. +Generate the certificate the apiserver uses to access etcd, and save them into apiserver-etcd-client.crt and apiserver-etcd-client.key files. If both files already exist, kubeadm skips the generation step and existing files will be used. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md index 814a9c15ff..f98f75def0 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md @@ -15,7 +15,7 @@ Generate the certificate for the API server to connect to kubelet ### Synopsis -Generate the certificate for the API server to connect to kubelet, and save them into apiserver-kubelet-client.cert and apiserver-kubelet-client.key files. +Generate the certificate for the API server to connect to kubelet, and save them into apiserver-kubelet-client.crt and apiserver-kubelet-client.key files. If both files already exist, kubeadm skips the generation step and existing files will be used. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md index fa2d46ab8e..afa192d3de 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md @@ -15,9 +15,7 @@ Generate the certificate for serving the Kubernetes API ### Synopsis -Generate the certificate for serving the Kubernetes API, and save them into apiserver.cert and apiserver.key files. - -Default SANs are kubernetes, kubernetes.default, kubernetes.default.svc, kubernetes.default.svc.cluster.local, 10.96.0.1, 127.0.0.1 +Generate the certificate for serving the Kubernetes API, and save them into apiserver.crt and apiserver.key files. If both files already exist, kubeadm skips the generation step and existing files will be used. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md index d12b74f19f..b94061e8d4 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md @@ -15,7 +15,7 @@ Generate the self-signed Kubernetes CA to provision identities for other Kuberne ### Synopsis -Generate the self-signed Kubernetes CA to provision identities for other Kubernetes components, and save them into ca.cert and ca.key files. +Generate the self-signed Kubernetes CA to provision identities for other Kubernetes components, and save them into ca.crt and ca.key files. If both files already exist, kubeadm skips the generation step and existing files will be used. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md index 2cddb77ade..547601e364 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md @@ -15,7 +15,7 @@ Generate the self-signed CA to provision identities for etcd ### Synopsis -Generate the self-signed CA to provision identities for etcd, and save them into etcd/ca.cert and etcd/ca.key files. +Generate the self-signed CA to provision identities for etcd, and save them into etcd/ca.crt and etcd/ca.key files. If both files already exist, kubeadm skips the generation step and existing files will be used. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md index 9876d5bce7..ea3755c786 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md @@ -15,7 +15,7 @@ Generate the certificate for liveness probes to healthcheck etcd ### Synopsis -Generate the certificate for liveness probes to healthcheck etcd, and save them into etcd/healthcheck-client.cert and etcd/healthcheck-client.key files. +Generate the certificate for liveness probes to healthcheck etcd, and save them into etcd/healthcheck-client.crt and etcd/healthcheck-client.key files. If both files already exist, kubeadm skips the generation step and existing files will be used. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md index d86991f8f8..904b00a68f 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md @@ -15,7 +15,7 @@ Generate the certificate for etcd nodes to communicate with each other ### Synopsis -Generate the certificate for etcd nodes to communicate with each other, and save them into etcd/peer.cert and etcd/peer.key files. +Generate the certificate for etcd nodes to communicate with each other, and save them into etcd/peer.crt and etcd/peer.key files. Default SANs are localhost, 127.0.0.1, 127.0.0.1, ::1 diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md index 213cf22d2f..4b8894075c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md @@ -15,7 +15,7 @@ Generate the certificate for serving etcd ### Synopsis -Generate the certificate for serving etcd, and save them into etcd/server.cert and etcd/server.key files. +Generate the certificate for serving etcd, and save them into etcd/server.crt and etcd/server.key files. Default SANs are localhost, 127.0.0.1, 127.0.0.1, ::1 diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md index c2d37be74f..8193d38fce 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md @@ -15,7 +15,7 @@ Generate the self-signed CA to provision identities for front proxy ### Synopsis -Generate the self-signed CA to provision identities for front proxy, and save them into front-proxy-ca.cert and front-proxy-ca.key files. +Generate the self-signed CA to provision identities for front proxy, and save them into front-proxy-ca.crt and front-proxy-ca.key files. If both files already exist, kubeadm skips the generation step and existing files will be used. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md index 58a81fa7a2..d5cff5b662 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md @@ -15,7 +15,7 @@ Generate the certificate for the front proxy client ### Synopsis -Generate the certificate for the front proxy client, and save them into front-proxy-client.cert and front-proxy-client.key files. +Generate the certificate for the front proxy client, and save them into front-proxy-client.crt and front-proxy-client.key files. If both files already exist, kubeadm skips the generation step and existing files will be used. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md index 45fa4a29c4..6a53512cc4 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md @@ -91,17 +91,17 @@ kubeadm init phase control-plane all [flags] ---experimental-patches string +--dry-run -

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

+

Don't apply any changes; just output what would be done.

--feature-gates string -

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

+

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)
RootlessControlPlane=true|false (ALPHA - default=false)

@@ -125,6 +125,13 @@ kubeadm init phase control-plane all [flags]

Choose a specific Kubernetes version for the control plane.

+ +--patches string + + +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

+ + --pod-network-cidr string diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md index d073ed89f0..b46d5ea7c8 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md @@ -73,17 +73,17 @@ kubeadm init phase control-plane apiserver [flags] ---experimental-patches string +--dry-run -

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

+

Don't apply any changes; just output what would be done.

--feature-gates string -

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

+

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)
RootlessControlPlane=true|false (ALPHA - default=false)

@@ -107,6 +107,13 @@ kubeadm init phase control-plane apiserver [flags]

Choose a specific Kubernetes version for the control plane.

+ +--patches string + + +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

+ + --service-cidr string     Default: "10.96.0.0/12" diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md index 4a7f1e0fe0..48d36cb899 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md @@ -52,10 +52,10 @@ kubeadm init phase control-plane controller-manager [flags] ---experimental-patches string +--dry-run -

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

+

Don't apply any changes; just output what would be done.

@@ -79,6 +79,13 @@ kubeadm init phase control-plane controller-manager [flags]

Choose a specific Kubernetes version for the control plane.

+ +--patches string + + +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

+ + --pod-network-cidr string diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md index c8ccb8c37a..f726834229 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md @@ -45,10 +45,10 @@ kubeadm init phase control-plane scheduler [flags] ---experimental-patches string +--dry-run -

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

+

Don't apply any changes; just output what would be done.

@@ -72,6 +72,13 @@ kubeadm init phase control-plane scheduler [flags]

Choose a specific Kubernetes version for the control plane.

+ +--patches string + + +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

+ + --scheduler-extra-args <comma-separated 'key=value' pairs> diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md index 1e4e8fa22f..f5bc0a529b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md @@ -56,13 +56,6 @@ kubeadm init phase etcd local [flags]

Path to a kubeadm configuration file.

- ---experimental-patches string - - -

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

- - -h, --help @@ -77,6 +70,13 @@ kubeadm init phase etcd local [flags]

Choose a container registry to pull control plane images from

+ +--patches string + + +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

+ + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md index 3f39346c96..145f0bc340 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md @@ -69,7 +69,7 @@ control-plane-prepare Prepare the machine for serving a control plane kubelet-start Write kubelet settings, certificates and (re)start the kubelet control-plane-join Join a machine as a control plane instance /etcd Add a new local etcd member - /update-status Register the new control-plane node into the ClusterStatus maintained in the kubeadm-config ConfigMap + /update-status Register the new control-plane node into the ClusterStatus maintained in the kubeadm-config ConfigMap (DEPRECATED) /mark-control-plane Mark a node as a control-plane ``` @@ -157,13 +157,6 @@ kubeadm join [api-server-endpoint] [flags]

For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.

- ---experimental-patches string - - -

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

- - -h, --help @@ -185,6 +178,13 @@ kubeadm join [api-server-endpoint] [flags]

Specify the node name.

+ +--patches string + + +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

+ + --skip-phases strings diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md index ed1753457a..7a3517652d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md @@ -65,6 +65,13 @@ kubeadm join phase control-plane-join all [flags]

Specify the node name.

+ +--patches string + + +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

+ + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md index 9990ce3dc1..c06ddaae40 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md @@ -51,13 +51,6 @@ kubeadm join phase control-plane-join etcd [flags]

Create a new control plane instance on this node

- ---experimental-patches string - - -

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

- - -h, --help @@ -72,6 +65,13 @@ kubeadm join phase control-plane-join etcd [flags]

Specify the node name.

+ +--patches string + + +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

+ + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md index 10127f967f..af1aac985c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md @@ -10,12 +10,12 @@ guide. You can file document formatting bugs against the --> -Register the new control-plane node into the ClusterStatus maintained in the kubeadm-config ConfigMap +Register the new control-plane node into the ClusterStatus maintained in the kubeadm-config ConfigMap (DEPRECATED) ### Synopsis -Register the new control-plane node into the ClusterStatus maintained in the kubeadm-config ConfigMap +Register the new control-plane node into the ClusterStatus maintained in the kubeadm-config ConfigMap (DEPRECATED) ``` kubeadm join phase control-plane-join update-status [flags] diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md index 02864ace82..661edf597d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md @@ -93,13 +93,6 @@ kubeadm join phase control-plane-prepare all [api-server-endpoint] [flags]

For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.

- ---experimental-patches string - - -

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

- - -h, --help @@ -114,6 +107,13 @@ kubeadm join phase control-plane-prepare all [api-server-endpoint] [flags]

Specify the node name.

+ +--patches string + + +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

+ + --tls-bootstrap-token string diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md index 820f499c41..c9084c6e55 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md @@ -58,13 +58,6 @@ kubeadm join phase control-plane-prepare control-plane [flags]

Create a new control plane instance on this node

- ---experimental-patches string - - -

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

- - -h, --help @@ -72,6 +65,13 @@ kubeadm join phase control-plane-prepare control-plane [flags]

help for control-plane

+ +--patches string + + +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

+ + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig.md index b678061bb0..55177462d6 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig.md @@ -17,8 +17,6 @@ Kubeconfig file utilities Kubeconfig file utilities. -Alpha Disclaimer: this command is currently alpha. - ### Options diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig_user.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig_user.md index 8293ee2f27..89315e27b8 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig_user.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig_user.md @@ -17,8 +17,6 @@ Output a kubeconfig file for an additional user Output a kubeconfig file for an additional user. -Alpha Disclaimer: this command is currently alpha. - ``` kubeadm kubeconfig user [flags] ``` @@ -27,7 +25,7 @@ kubeadm kubeconfig user [flags] ``` # Output a kubeconfig file for an additional user named foo using a kubeadm config file bar - kubeadm alpha kubeconfig user --client-name=foo --config=bar + kubeadm kubeconfig user --client-name=foo --config=bar ``` ### Options @@ -74,6 +72,13 @@ kubeadm kubeconfig user [flags] + + + + + + +

The token that should be used as the authentication mechanism for this kubeconfig, instead of client certificates

--validity-period duration     Default: 8760h0m0s

The validity period of the client certificate. It is an offset from the current time.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md index a745cb8c9e..19bdbb417a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md @@ -20,7 +20,7 @@ Performs a best effort revert of changes made to this host by 'kubeadm init' or The "reset" command executes the following phases: ``` preflight Run reset pre-flight checks -update-cluster-status Remove this node from the ClusterStatus object. +update-cluster-status Remove this node from the ClusterStatus object (DEPRECATED). remove-etcd-member Remove a local etcd member. cleanup-node Run cleanup node. ``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_update-cluster-status.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_update-cluster-status.md index b73f736958..9d4b7af77f 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_update-cluster-status.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_update-cluster-status.md @@ -10,12 +10,12 @@ guide. You can file document formatting bugs against the --> -Remove this node from the ClusterStatus object. +Remove this node from the ClusterStatus object (DEPRECATED). ### Synopsis -Remove this node from the ClusterStatus object if the node is a control plane node. +Remove this node from the ClusterStatus object (DEPRECATED). ``` kubeadm reset phase update-cluster-status [flags] diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md index d34e01da47..3add5a98c2 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md @@ -72,18 +72,11 @@ kubeadm upgrade apply [version]

Perform the upgrade of etcd.

- ---experimental-patches string - - -

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

- - --feature-gates string -

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

+

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)
RootlessControlPlane=true|false (ALPHA - default=false)

@@ -114,6 +107,13 @@ kubeadm upgrade apply [version]

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

+ +--patches string + + +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

+ + --print-config diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md index 5bd05a9822..a8a3138c88 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md @@ -59,13 +59,6 @@ kubeadm upgrade node [flags]

Perform the upgrade of etcd.

- ---experimental-patches string - - -

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

- - -h, --help @@ -87,6 +80,13 @@ kubeadm upgrade node [flags]

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

+ +--patches string + + +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

+ + --skip-phases strings diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_control-plane.md index 835eba6842..58a6a672e3 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_control-plane.md @@ -51,13 +51,6 @@ kubeadm upgrade node phase control-plane [flags]

Perform the upgrade of etcd.

- ---experimental-patches string - - -

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

- - -h, --help @@ -72,6 +65,13 @@ kubeadm upgrade node phase control-plane [flags]

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

+ +--patches string + + +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

+ + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md index 7d16866b9a..c3cc133169 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md @@ -55,7 +55,7 @@ kubeadm upgrade plan [version] [flags] --feature-gates string -

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

+

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)
RootlessControlPlane=true|false (ALPHA - default=false)

diff --git a/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md b/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md index cc7cef2543..6222685845 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md +++ b/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md @@ -298,26 +298,6 @@ Please note that: 2. in case of kubeadm is executed in the `--dry-run` mode, the etcd static Pod manifest is written in a temporary folder 3. Static Pod manifest generation for local etcd can be invoked individually with the [`kubeadm init phase etcd local`](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-etcd) command -### Optional Dynamic Kubelet Configuration - -To use this functionality call `kubeadm alpha kubelet config enable-dynamic`. It writes the kubelet init configuration -into `/var/lib/kubelet/config/init/kubelet` file. - -The init configuration is used for starting the kubelet on this specific node, providing an alternative for the kubelet drop-in file; -such configuration will be replaced by the kubelet base configuration as described in following steps. -See [set kubelet parameters via a config file](/docs/tasks/administer-cluster/kubelet-config-file) for additional information. - -Please note that: - -1. To make dynamic kubelet configuration work, flag `--dynamic-config-dir=/var/lib/kubelet/config/dynamic` should be specified - in `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` -1. The kubelet configuration can be changed by passing a `KubeletConfiguration` object to `kubeadm init` or `kubeadm join` by using - a configuration file `--config some-file.yaml`. The `KubeletConfiguration` object can be separated from other objects such - as `InitConfiguration` using the `---` separator. For more details have a look at the `kubeadm config print-default` command. - -For more details about the `KubeletConfiguration` struct, take a look at the -[`KubeletConfiguration` reference](/docs/reference/config-api/kubelet-config.v1beta1/). - ### Wait for the control plane to come up kubeadm waits (upto 4m0s) until `localhost:6443/healthz` (kube-apiserver liveness) returns `ok`. However in order to detect @@ -327,17 +307,6 @@ deadlock conditions, kubeadm fails fast if `localhost:10255/healthz` (kubelet li kubeadm relies on the kubelet to pull the control plane images and run them properly as static Pods. After the control plane is up, kubeadm completes the tasks described in following paragraphs. -### (optional) Write base kubelet configuration - -{{< feature-state for_k8s_version="v1.11" state="beta" >}} - -If kubeadm is invoked with `--feature-gates=DynamicKubeletConfig`: - -1. Write the kubelet base configuration into the `kubelet-base-config-v1.9` ConfigMap in the `kube-system` namespace -2. Creates RBAC rules for granting read access to that ConfigMap to all bootstrap tokens and all kubelet instances - (that is `system:bootstrappers:kubeadm:default-node-token` and `system:nodes` groups) -3. Enable the dynamic kubelet configuration feature for the initial control-plane node by pointing `Node.spec.configSource` to the newly-created ConfigMap - ### Save the kubeadm ClusterConfiguration in a ConfigMap for later reference kubeadm saves the configuration passed to `kubeadm init` in a ConfigMap named `kubeadm-config` under `kube-system` namespace. @@ -520,18 +489,3 @@ Please note that: - The temporary authentication resolve to a user member of `system:bootstrappers:kubeadm:default-node-token` group which was granted access to CSR api during the `kubeadm init` process - The automatic CSR approval is managed by the csrapprover controller, according with configuration done the `kubeadm init` process - -### (optional) Write init kubelet configuration - -{{< feature-state for_k8s_version="v1.11" state="beta" >}} - -If kubeadm is invoked with `--feature-gates=DynamicKubeletConfig`: - -1. Read the kubelet base configuration from the `kubelet-base-config-v1.x` ConfigMap in the `kube-system` namespace using the - Bootstrap Token credentials, and write it to disk as kubelet init configuration file `/var/lib/kubelet/config/init/kubelet` -2. As soon as kubelet starts with the Node's own credential (`/etc/kubernetes/kubelet.conf`), update current node configuration - specifying that the source for the node/kubelet configuration is the above ConfigMap. - -Please note that: - -1. To make dynamic kubelet configuration work, flag `--dynamic-config-dir=/var/lib/kubelet/config/dynamic` should be specified in `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md index 0b373ee423..76d9921be1 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md @@ -10,7 +10,7 @@ weight: 50 During `kubeadm init`, kubeadm uploads the `ClusterConfiguration` object to your cluster in a ConfigMap called `kubeadm-config` in the `kube-system` namespace. This configuration is then read during -`kubeadm join`, `kubeadm reset` and `kubeadm upgrade`. To view this ConfigMap call `kubeadm config view`. +`kubeadm join`, `kubeadm reset` and `kubeadm upgrade`. You can use `kubeadm config print` to print the default configuration and `kubeadm config migrate` to convert your old configuration files to a newer version. `kubeadm config images list` and @@ -20,7 +20,7 @@ For more information navigate to [Using kubeadm init with a configuration file](/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file) or [Using kubeadm join with a configuration file](/docs/reference/setup-tools/kubeadm/kubeadm-join/#config-file). -You can also configure several kubelet-configuration options with `kubeadm init`. These options will be the same on any node in your cluster. +You can also configure several kubelet-configuration options with `kubeadm init`. These options will be the same on any node in your cluster. See [Configuring each kubelet in your cluster using kubeadm](/docs/setup/production-environment/tools/kubeadm/kubelet-integration/) for details. In Kubernetes v1.13.0 and later to list/pull kube-dns images instead of the CoreDNS image @@ -28,7 +28,7 @@ the `--config` method described [here](/docs/reference/setup-tools/kubeadm/kubea has to be used. -## kubeadm config print {#cmd-config-view} +## kubeadm config print {#cmd-config-print} {{< include "generated/kubeadm_config_print.md" >}} diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md index 2b6939bac6..3f9812b260 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md @@ -119,16 +119,16 @@ Use the following phase to configure bootstrap tokens. {{< tab name="bootstrap-token" include="generated/kubeadm_init_phase_bootstrap-token.md" />}} {{< /tabs >}} -## kubeadm init phase kubelet-finialize {#cmd-phase-kubelet-finalize-all} +## kubeadm init phase kubelet-finalize {#cmd-phase-kubelet-finalize-all} Use the following phase to update settings relevant to the kubelet after TLS bootstrap. You can use the `all` subcommand to run all `kubelet-finalize` phases. {{< tabs name="tab-kubelet-finalize" >}} -{{< tab name="kublet-finalize" include="generated/kubeadm_init_phase_kubelet-finalize.md" />}} -{{< tab name="kublet-finalize-all" include="generated/kubeadm_init_phase_kubelet-finalize_all.md" />}} -{{< tab name="kublet-finalize-cert-rotation" include="generated/kubeadm_init_phase_kubelet-finalize_experimental-cert-rotation.md" />}} +{{< tab name="kubelet-finalize" include="generated/kubeadm_init_phase_kubelet-finalize.md" />}} +{{< tab name="kubelet-finalize-all" include="generated/kubeadm_init_phase_kubelet-finalize_all.md" />}} +{{< tab name="kubelet-finalize-cert-rotation" include="generated/kubeadm_init_phase_kubelet-finalize_experimental-cert-rotation.md" />}} {{< /tabs >}} ## kubeadm init phase addon {#cmd-phase-addon} @@ -143,8 +143,8 @@ install them selectively. {{< tab name="kube-proxy" include="generated/kubeadm_init_phase_addon_kube-proxy.md" />}} {{< /tabs >}} -For more details on each field in the `v1beta2` configuration you can navigate to our -[API reference pages.] (https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2) +For more details on each field in the `v1beta3` configuration you can navigate to our +[API reference pages.](/docs/reference/config-api/kubeadm-config.v1beta3/) ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md index 2260c2ca22..3779ec2fdf 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md @@ -104,6 +104,10 @@ sudo kubeadm init --skip-phases=control-plane,etcd --config=configfile.yaml What this example would do is write the manifest files for the control plane and etcd in `/etc/kubernetes/manifests` based on the configuration in `configfile.yaml`. This allows you to modify the files and then skip these phases using `--skip-phases`. By calling the last command you will create a control plane node with the custom manifest files. +{{< feature-state for_k8s_version="v1.22" state="beta" >}} + +Alternatively, you can use the `skipPhases` field under `InitConfiguration`. + ### Using kubeadm init with a configuration file {#config-file} {{< caution >}} @@ -122,8 +126,8 @@ The default configuration can be printed out using the If your configuration is not using the latest version it is **recommended** that you migrate using the [kubeadm config migrate](/docs/reference/setup-tools/kubeadm/kubeadm-config/) command. -For more information on the fields and usage of the configuration you can navigate to our API reference -page and pick a version from [the list](https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm#section-directories). +For more information on the fields and usage of the configuration you can navigate to our +[API reference page](/docs/reference/config-api/kubeadm-config.v1beta2/). ### Adding kube-proxy parameters {#kube-proxy} @@ -142,7 +146,7 @@ For information about passing flags to control plane components see: By default, kubeadm pulls images from `k8s.gcr.io`. If the requested Kubernetes version is a CI label (such as `ci/latest`) -`gcr.io/kubernetes-ci-images` is used. +`gcr.io/k8s-staging-ci-images` is used. You can override this behavior by using [kubeadm with a configuration file](#config-file). Allowed customization are: diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md index 53ca4a789b..5ad349e66c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md @@ -66,6 +66,10 @@ For example: sudo kubeadm join --skip-phases=preflight --config=config.yaml ``` +{{< feature-state for_k8s_version="v1.22" state="beta" >}} + +Alternatively, you can use the `skipPhases` field in `JoinConfiguration`. + ### Discovering what cluster CA to trust The kubeadm discovery has several options, each with security tradeoffs. @@ -282,8 +286,8 @@ The default configuration can be printed out using the If your configuration is not using the latest version it is **recommended** that you migrate using the [kubeadm config migrate](/docs/reference/setup-tools/kubeadm/kubeadm-config/) command. -For more information on the fields and usage of the configuration you can navigate to our API reference -page and pick a version from [the list](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm#pkg-subdirectories). +For more information on the fields and usage of the configuration you can navigate to our +[API reference](/docs/reference/config-api/kubeadm-config.v1beta2/). ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/reference/tools/_index.md b/content/en/docs/reference/tools/_index.md index 7194ab83bd..aa65af289b 100644 --- a/content/en/docs/reference/tools/_index.md +++ b/content/en/docs/reference/tools/_index.md @@ -8,7 +8,7 @@ no_list: true --- -Kubernetes contains several built-in tools to help you work with the Kubernetes system. +Kubernetes contains several built-in tools and external tools that are commonly used or relevant that may as well be seen as required for Kubernetes to function. @@ -26,8 +26,8 @@ to a Kubernetes cluster, troubleshoot them, and manage the cluster and its resou ## Helm -[`Kubernetes Helm`](https://github.com/kubernetes/helm) is a tool for managing packages of pre-configured -Kubernetes resources, aka Kubernetes charts. +[Helm](https://helm.sh/) is a tool for managing packages of pre-configured +Kubernetes resources. These packages are known as _Helm charts_. Use Helm to: diff --git a/content/en/docs/reference/using-api/api-concepts.md b/content/en/docs/reference/using-api/api-concepts.md index da41e7c4c9..913d3db42e 100644 --- a/content/en/docs/reference/using-api/api-concepts.md +++ b/content/en/docs/reference/using-api/api-concepts.md @@ -192,7 +192,92 @@ For example, if there are 1,253 pods on the cluster and the client wants to rece } ``` -Note that the `resourceVersion` of the list remains constant across each request, indicating the server is showing us a consistent snapshot of the pods. Pods that are created, updated, or deleted after version `10245` would not be shown unless the user makes a list request without the `continue` token. This allows clients to break large requests into smaller chunks and then perform a watch operation on the full set without missing any updates. +Note that the `resourceVersion` of the list remains constant across each request, +indicating the server is showing us a consistent snapshot of the pods. Pods that +are created, updated, or deleted after version `10245` would not be shown unless +the user makes a list request without the `continue` token. This allows clients +to break large requests into smaller chunks and then perform a watch operation +on the full set without missing any updates. + +`remainingItemCount` is the number of subsequent items in the list which are not +included in this list response. If the list request contained label or field selectors, +then the number of remaining items is unknown and the API server does not include +a `remainingItemCount` field in its response. If the list is complete (either +because it is not chunking or because this is the last chunk), then there are no +more remaining items and the API server does not include a `remainingItemCount` +field in its response. The intended use of the `remainingItemCount` is estimating +the size of a collection. + +## Lists + +There are dozens of list types (such as `PodList`, `ServiceList`, and `NodeList`) defined in the Kubernetes API. +You can get more information about each list type from the [Kubernetes API](/docs/reference/kubernetes-api/) documentation. + +When you query the API for a particular type, all items returned by that query are of that type. For example, when you +ask for a list of services, the list type is shown as `kind: ServiceList` and each item in that list represents a single Service. For example: + +```console + +GET /api/v1/services +--- +{ + "kind": "ServiceList", + "apiVersion": "v1", + "metadata": { + "resourceVersion": "2947301" + }, + "items": [ + { + "metadata": { + "name": "kubernetes", + "namespace": "default", +... + "metadata": { + "name": "kube-dns", + "namespace": "kube-system", +... +``` + +Some tools, such as `kubectl` provide another way to query the Kubernetes API. Because the output of `kubectl` might include multiple list types, the list of items is represented as `kind: List`. For example: + +```console + +$ kubectl get services -A -o yaml + +apiVersion: v1 +kind: List +metadata: + resourceVersion: "" + selfLink: "" +items: +- apiVersion: v1 + kind: Service + metadata: + creationTimestamp: "2021-06-03T14:54:12Z" + labels: + component: apiserver + provider: kubernetes + name: kubernetes + namespace: default +... +- apiVersion: v1 + kind: Service + metadata: + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + creationTimestamp: "2021-06-03T14:54:14Z" + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: kube-dns + namespace: kube-system +``` + +{{< note >}} +Keep in mind that the Kubernetes API does not have a `kind: List` type. `kind: List` is an internal mechanism type for lists of mixed resources and should not be depended upon. +{{< /note >}} ## Receiving resources as Tables diff --git a/content/en/docs/reference/using-api/client-libraries.md b/content/en/docs/reference/using-api/client-libraries.md index 9ec9f84c5d..eed8169909 100644 --- a/content/en/docs/reference/using-api/client-libraries.md +++ b/content/en/docs/reference/using-api/client-libraries.md @@ -66,7 +66,6 @@ their authors, not the Kubernetes team. | PHP | [github.com/maclof/kubernetes-client](https://github.com/maclof/kubernetes-client) | | PHP | [github.com/travisghansen/kubernetes-client-php](https://github.com/travisghansen/kubernetes-client-php) | | PHP | [github.com/renoki-co/php-k8s](https://github.com/renoki-co/php-k8s) | -| Python | [github.com/eldarion-gondor/pykube](https://github.com/eldarion-gondor/pykube) | | Python | [github.com/fiaas/k8s](https://github.com/fiaas/k8s) | | Python | [github.com/mnubo/kubernetes-py](https://github.com/mnubo/kubernetes-py) | | Python | [github.com/tomplus/kubernetes_asyncio](https://github.com/tomplus/kubernetes_asyncio) | diff --git a/content/en/docs/reference/using-api/deprecation-guide.md b/content/en/docs/reference/using-api/deprecation-guide.md old mode 100755 new mode 100644 index 73a4ae2a18..579e4c04ac --- a/content/en/docs/reference/using-api/deprecation-guide.md +++ b/content/en/docs/reference/using-api/deprecation-guide.md @@ -53,11 +53,11 @@ The **events.k8s.io/v1beta1** API version of Event will no longer be served in v * Notable changes in **events.k8s.io/v1**: * `type` is limited to `Normal` and `Warning` * `involvedObject` is renamed to `regarding` - * `action`, `reason`, `reportingComponent`, and `reportingInstance` are required when creating new **events.k8s.io/v1** Events + * `action`, `reason`, `reportingController`, and `reportingInstance` are required when creating new **events.k8s.io/v1** Events * use `eventTime` instead of the deprecated `firstTimestamp` field (which is renamed to `deprecatedFirstTimestamp` and not permitted in new **events.k8s.io/v1** Events) * use `series.lastObservedTime` instead of the deprecated `lastTimestamp` field (which is renamed to `deprecatedLastTimestamp` and not permitted in new **events.k8s.io/v1** Events) * use `series.count` instead of the deprecated `count` field (which is renamed to `deprecatedCount` and not permitted in new **events.k8s.io/v1** Events) - * use `reportingComponent` instead of the deprecated `source.component` field (which is renamed to `deprecatedSource.component` and not permitted in new **events.k8s.io/v1** Events) + * use `reportingController` instead of the deprecated `source.component` field (which is renamed to `deprecatedSource.component` and not permitted in new **events.k8s.io/v1** Events) * use `reportingInstance` instead of the deprecated `source.host` field (which is renamed to `deprecatedSource.host` and not permitted in new **events.k8s.io/v1** Events) #### PodDisruptionBudget {#poddisruptionbudget-v125} @@ -86,11 +86,11 @@ RuntimeClass in the **node.k8s.io/v1beta1** API version will no longer be served ### v1.22 -The **v1.22** release will stop serving the following deprecated API versions: +The **v1.22** release stopped serving the following deprecated API versions: #### Webhook resources {#webhook-resources-v122} -The **admissionregistration.k8s.io/v1beta1** API version of MutatingWebhookConfiguration and ValidatingWebhookConfiguration will no longer be served in v1.22. +The **admissionregistration.k8s.io/v1beta1** API version of MutatingWebhookConfiguration and ValidatingWebhookConfiguration is no longer served as of v1.22. * Migrate manifests and API clients to use the **admissionregistration.k8s.io/v1** API version, available since v1.16. * All existing persisted objects are accessible via the new APIs @@ -104,7 +104,7 @@ The **admissionregistration.k8s.io/v1beta1** API version of MutatingWebhookConfi #### CustomResourceDefinition {#customresourcedefinition-v122} -The **apiextensions.k8s.io/v1beta1** API version of CustomResourceDefinition will no longer be served in v1.22. +The **apiextensions.k8s.io/v1beta1** API version of CustomResourceDefinition is no longer served as of v1.22. * Migrate manifests and API clients to use the **apiextensions.k8s.io/v1** API version, available since v1.16. * All existing persisted objects are accessible via the new API @@ -122,7 +122,7 @@ The **apiextensions.k8s.io/v1beta1** API version of CustomResourceDefinition wil #### APIService {#apiservice-v122} -The **apiregistration.k8s.io/v1beta1** API version of APIService will no longer be served in v1.22. +The **apiregistration.k8s.io/v1beta1** API version of APIService is no longer served as of v1.22. * Migrate manifests and API clients to use the **apiregistration.k8s.io/v1** API version, available since v1.10. * All existing persisted objects are accessible via the new API @@ -130,14 +130,14 @@ The **apiregistration.k8s.io/v1beta1** API version of APIService will no longer #### TokenReview {#tokenreview-v122} -The **authentication.k8s.io/v1beta1** API version of TokenReview will no longer be served in v1.22. +The **authentication.k8s.io/v1beta1** API version of TokenReview is no longer served as of v1.22. * Migrate manifests and API clients to use the **authentication.k8s.io/v1** API version, available since v1.6. * No notable changes #### SubjectAccessReview resources {#subjectaccessreview-resources-v122} -The **authorization.k8s.io/v1beta1** API version of LocalSubjectAccessReview, SelfSubjectAccessReview, and SubjectAccessReview will no longer be served in v1.22. +The **authorization.k8s.io/v1beta1** API version of LocalSubjectAccessReview, SelfSubjectAccessReview, and SubjectAccessReview is no longer served as of v1.22. * Migrate manifests and API clients to use the **authorization.k8s.io/v1** API version, available since v1.6. * Notable changes: @@ -145,7 +145,7 @@ The **authorization.k8s.io/v1beta1** API version of LocalSubjectAccessReview, Se #### CertificateSigningRequest {#certificatesigningrequest-v122} -The **certificates.k8s.io/v1beta1** API version of CertificateSigningRequest will no longer be served in v1.22. +The **certificates.k8s.io/v1beta1** API version of CertificateSigningRequest is no longer served as of v1.22. * Migrate manifests and API clients to use the **certificates.k8s.io/v1** API version, available since v1.19. * All existing persisted objects are accessible via the new API @@ -160,7 +160,7 @@ The **certificates.k8s.io/v1beta1** API version of CertificateSigningRequest wil #### Lease {#lease-v122} -The **coordination.k8s.io/v1beta1** API version of Lease will no longer be served in v1.22. +The **coordination.k8s.io/v1beta1** API version of Lease is no longer served as of v1.22. * Migrate manifests and API clients to use the **coordination.k8s.io/v1** API version, available since v1.14. * All existing persisted objects are accessible via the new API @@ -168,7 +168,7 @@ The **coordination.k8s.io/v1beta1** API version of Lease will no longer be serve #### Ingress {#ingress-v122} -The **extensions/v1beta1** and **networking.k8s.io/v1beta1** API versions of Ingress will no longer be served in v1.22. +The **extensions/v1beta1** and **networking.k8s.io/v1beta1** API versions of Ingress is no longer served as of v1.22. * Migrate manifests and API clients to use the **networking.k8s.io/v1** API version, available since v1.19. * All existing persisted objects are accessible via the new API @@ -181,7 +181,7 @@ The **extensions/v1beta1** and **networking.k8s.io/v1beta1** API versions of Ing #### IngressClass {#ingressclass-v122} -The **networking.k8s.io/v1beta1** API version of IngressClass will no longer be served in v1.22. +The **networking.k8s.io/v1beta1** API version of IngressClass is no longer served as of v1.22. * Migrate manifests and API clients to use the **networking.k8s.io/v1** API version, available since v1.19. * All existing persisted objects are accessible via the new API @@ -189,7 +189,7 @@ The **networking.k8s.io/v1beta1** API version of IngressClass will no longer be #### RBAC resources {#rbac-resources-v122} -The **rbac.authorization.k8s.io/v1beta1** API version of ClusterRole, ClusterRoleBinding, Role, and RoleBinding will no longer be served in v1.22. +The **rbac.authorization.k8s.io/v1beta1** API version of ClusterRole, ClusterRoleBinding, Role, and RoleBinding is no longer served as of v1.22. * Migrate manifests and API clients to use the **rbac.authorization.k8s.io/v1** API version, available since v1.8. * All existing persisted objects are accessible via the new APIs @@ -197,7 +197,7 @@ The **rbac.authorization.k8s.io/v1beta1** API version of ClusterRole, ClusterRol #### PriorityClass {#priorityclass-v122} -The **scheduling.k8s.io/v1beta1** API version of PriorityClass will no longer be served in v1.22. +The **scheduling.k8s.io/v1beta1** API version of PriorityClass is no longer served as of v1.22. * Migrate manifests and API clients to use the **scheduling.k8s.io/v1** API version, available since v1.14. * All existing persisted objects are accessible via the new API @@ -205,7 +205,7 @@ The **scheduling.k8s.io/v1beta1** API version of PriorityClass will no longer be #### Storage resources {#storage-resources-v122} -The **storage.k8s.io/v1beta1** API version of CSIDriver, CSINode, StorageClass, and VolumeAttachment will no longer be served in v1.22. +The **storage.k8s.io/v1beta1** API version of CSIDriver, CSINode, StorageClass, and VolumeAttachment is no longer served as of v1.22. * Migrate manifests and API clients to use the **storage.k8s.io/v1** API version * CSIDriver is available in **storage.k8s.io/v1** since v1.19. diff --git a/content/en/docs/reference/using-api/health-checks.md b/content/en/docs/reference/using-api/health-checks.md index 2c315505db..e4ce50f30d 100644 --- a/content/en/docs/reference/using-api/health-checks.md +++ b/content/en/docs/reference/using-api/health-checks.md @@ -18,14 +18,14 @@ The Kubernetes API server provides 3 API endpoints (`healthz`, `livez` and `read The `healthz` endpoint is deprecated (since Kubernetes v1.16), and you should use the more specific `livez` and `readyz` endpoints instead. The `livez` endpoint can be used with the `--livez-grace-period` [flag](/docs/reference/command-line-tools-reference/kube-apiserver) to specify the startup duration. For a graceful shutdown you can specify the `--shutdown-delay-duration` [flag](/docs/reference/command-line-tools-reference/kube-apiserver) with the `/readyz` endpoint. -Machines that check the `health`/`livez`/`readyz` of the API server should rely on the HTTP status code. +Machines that check the `healthz`/`livez`/`readyz` of the API server should rely on the HTTP status code. A status code `200` indicates the API server is `healthy`/`live`/`ready`, depending of the called endpoint. The more verbose options shown below are intended to be used by human operators to debug their cluster or specially the state of the API server. The following examples will show how you can interact with the health API endpoints. For all endpoints you can use the `verbose` parameter to print out the checks and their status. -This can be useful for a human operator to debug the current status of the Api server, it is not intended to be consumed by a machine: +This can be useful for a human operator to debug the current status of the API server, it is not intended to be consumed by a machine: ```shell curl -k https://localhost:6443/livez?verbose @@ -93,7 +93,7 @@ The output show that the `etcd` check is excluded: {{< feature-state state="alpha" >}} -Each individual health check exposes an http endpoint and could can be checked individually. +Each individual health check exposes an HTTP endpoint and could can be checked individually. The schema for the individual health checks is `/livez/` where `livez` and `readyz` and be used to indicate if you want to check the liveness or the readiness of the API server. The `` path can be discovered using the `verbose` flag from above and take the path between `[+]` and `ok`. These individual health checks should not be consumed by machines but can be helpful for a human operator to debug a system: diff --git a/content/en/docs/reference/using-api/server-side-apply.md b/content/en/docs/reference/using-api/server-side-apply.md index 3d88413b50..60acb02f7d 100644 --- a/content/en/docs/reference/using-api/server-side-apply.md +++ b/content/en/docs/reference/using-api/server-side-apply.md @@ -12,7 +12,7 @@ min-kubernetes-server-version: 1.16 -{{< feature-state for_k8s_version="v1.16" state="beta" >}} +{{< feature-state for_k8s_version="v1.22" state="stable" >}} ## Introduction diff --git a/content/en/docs/setup/_index.md b/content/en/docs/setup/_index.md index 59db384258..bb73375553 100644 --- a/content/en/docs/setup/_index.md +++ b/content/en/docs/setup/_index.md @@ -3,11 +3,11 @@ reviewers: - brendandburns - erictune - mikedanese -no_issue: true title: Getting started main_menu: true weight: 20 content_type: concept +no_list: true card: name: setup weight: 20 @@ -24,16 +24,40 @@ This section lists the different ways to set up and run Kubernetes. When you install Kubernetes, choose an installation type based on: ease of maintenance, security, control, available resources, and expertise required to operate and manage a cluster. -You can deploy a Kubernetes cluster on a local machine, cloud, on-prem datacenter, or choose a managed Kubernetes cluster. There are also custom solutions across a wide range of cloud providers, or bare metal environments. +You can [download Kubernetes](/releases/download/) to deploy a Kubernetes cluster +on a local machine, into the cloud, or for your own datacenter. + +If you don't want to manage a Kubernetes cluster yourself, you could pick a managed service, including +[certified platforms](/docs/setup/production-environment/turnkey-solutions/). +There are also other standardized and custom solutions across a wide range of cloud and +bare metal environments. ## Learning environment -If you're learning Kubernetes, use the tools supported by the Kubernetes community, or tools in the ecosystem to set up a Kubernetes cluster on a local machine. +If you're learning Kubernetes, use the tools supported by the Kubernetes community, +or tools in the ecosystem to set up a Kubernetes cluster on a local machine. +See [Install tools](/docs/tasks/tools/). ## Production environment -When evaluating a solution for a production environment, consider which aspects of operating a Kubernetes cluster (or _abstractions_) you want to manage yourself or offload to a provider. +When evaluating a solution for a +[production environment](/docs/setup/production-environment/), consider which aspects of +operating a Kubernetes cluster (or _abstractions_) you want to manage yourself and which you +prefer to hand off to a provider. -[Kubernetes Partners](https://kubernetes.io/partners/#conformance) includes a list of [Certified Kubernetes](https://github.com/cncf/k8s-conformance/#certified-kubernetes) providers. +For a cluster you're managing yourself, the officially supported tool +for deploying Kubernetes is [kubeadm](/docs/setup/production-environment/tools/kubeadm/). + +## {{% heading "whatsnext" %}} + +- [Download Kubernetes](/releases/download/) +- Download and [install tools](/docs/tasks/tools/) including `kubectl` +- Select a [container runtime](/docs/setup/production-environment/container-runtimes/) for your new cluster +- Learn about [best practices](/docs/setup/best-practices/) for cluster setup + +Kubernetes is designed for its {{< glossary_tooltip term_id="control-plane" text="control plane" >}} to +run on Linux. Within your cluster you can run applications on Linux or other operating systems, including +Windows. +- Learn to [set up clusters with Windows nodes](/docs/setup/production-environment/windows/) diff --git a/content/en/docs/setup/best-practices/cluster-large.md b/content/en/docs/setup/best-practices/cluster-large.md index 30e8128a19..0f7fb0552e 100644 --- a/content/en/docs/setup/best-practices/cluster-large.md +++ b/content/en/docs/setup/best-practices/cluster-large.md @@ -12,7 +12,7 @@ or virtual machines) running Kubernetes agents, managed by the Kubernetes {{< param "version" >}} supports clusters with up to 5000 nodes. More specifically, Kubernetes is designed to accommodate configurations that meet *all* of the following criteria: -* No more than 100 pods per node +* No more than 110 pods per node * No more than 5000 nodes * No more than 150000 total pods * No more than 300000 total containers @@ -124,3 +124,6 @@ components, including cluster-critical addons. The [cluster autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#readme) integrates with a number of cloud providers to help you run the right number of nodes for the level of resource demand in your cluster. + +The [addon resizer](https://github.com/kubernetes/autoscaler/tree/master/addon-resizer#readme) +helps you in resizing the addons automatically as your cluster's scale changes. \ No newline at end of file diff --git a/content/en/docs/setup/best-practices/enforcing-pod-security-standards.md b/content/en/docs/setup/best-practices/enforcing-pod-security-standards.md new file mode 100644 index 0000000000..2b7ca6144e --- /dev/null +++ b/content/en/docs/setup/best-practices/enforcing-pod-security-standards.md @@ -0,0 +1,78 @@ +--- +reviewers: +- tallclair +- liggitt +title: Enforcing Pod Security Standards +weight: 40 +--- + + + +This page provides an overview of best practices when it comes to enforcing +[Pod Security Standards](/docs/concepts/security/pod-security-standards). + + + +## Using the built-in Pod Security Admission Controller + +{{< feature-state for_k8s_version="v1.22" state="alpha" >}} + +The [Pod Security Admission Controller](/docs/reference/access-authn-authz/admission-controllers/#podsecurity) +intends to replace the deprecated PodSecurityPolicies. + +### Configure all cluster namespaces + +Namespaces that lack any configuration at all should be considered significant gaps in your cluster +security model. We recommend taking the time to analyze the types of workloads occurring in each +namespace, and by referencing the Pod Security Standards, decide on an appropriate level for +each of them. Unlabeled namespaces should only indicate that they've yet to be evaluated. + +In the scenario that all workloads in all namespaces have the same security requirements, +we provide an [example](/docs/concepts/security/pod-security-admission/#applying-to-all-namespaces) +that illustrates how the PodSecurity labels can be applied in bulk. + +### Embrace the principle of least privilege + +In an ideal world, every pod in every namespace would meet the requirements of the `restricted` +policy. However, this is not possible nor practical, as some workloads will require elevated +privileges for legitimate reasons. + +- Namespaces allowing `privileged` workloads should establish and enforce appropriate access controls. +- For workloads running in those permissive namespaces, maintain documentation about their unique + security requirements. If at all possible, consider how those requirements could be further + constrained. + +### Adopt a multi-mode strategy + +The `audit` and `warn` modes of the Pod Security Standards admission controller make it easy to +collect important security insights about your pods without breaking existing workloads. + +It is good practice to enable these modes for all namespaces, setting them to the _desired_ level +and version you would eventually like to `enforce`. The warnings and audit annotations generated in +this phase can guide you toward that state. If you expect workload authors to make changes to fit +within the desired level, enable the `warn` mode. If you expect to use audit logs to monitor/drive +changes to fit within the desired level, enable the `audit` mode. + +When you have the `enforce` mode set to your desired value, these modes can still be useful in a +few different ways: + +- By setting `warn` to the same level as `enforce`, clients will receive warnings when attempting + to create Pods (or resources that have Pod templates) that do not pass validation. This will help + them update those resources to become compliant. +- In Namespaces that pin `enforce` to a specific non-latest version, setting the `audit` and `warn` + modes to the same level as `enforce`, but to the `latest` version, gives visibility into settings + that were allowed by previous versions but are not allowed per current best practices. + +## Third-party alternatives + +{{% thirdparty-content %}} + +Other alternatives for enforcing security profiles are being developed in the Kubernetes +ecosystem: + +- [OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper). + +The decision to go with a _built-in_ solution (e.g. PodSecurity admission controller) versus a +third-party tool is entirely dependent on your own situation. When evaluating any solution, +trust of your supply chain is crucial. Ultimately, using _any_ of the aforementioned approaches +will be better than doing nothing. \ No newline at end of file diff --git a/content/en/docs/setup/learning-environment/_index.md b/content/en/docs/setup/learning-environment/_index.md index 672bbd69ed..6abebc3976 100644 --- a/content/en/docs/setup/learning-environment/_index.md +++ b/content/en/docs/setup/learning-environment/_index.md @@ -11,7 +11,7 @@ weight: 20 {{/* If you're localizing this page, you only need to copy the front matter */}} {{/* and add a redirect into "/static/_redirects", for YOUR localization. */}} --> - + + diff --git a/content/en/docs/setup/production-environment/_index.md b/content/en/docs/setup/production-environment/_index.md index fc99c31a7d..1611170ec9 100644 --- a/content/en/docs/setup/production-environment/_index.md +++ b/content/en/docs/setup/production-environment/_index.md @@ -9,7 +9,7 @@ no_list: true A production-quality Kubernetes cluster requires planning and preparation. If your Kubernetes cluster is to run critical workloads, it must be configured to be resilient. This page explains steps you can take to set up a production-ready cluster, -or to uprate an existing cluster for production use. +or to promote an existing cluster for production use. If you're already familiar with production setup and want the links, skip to [What's next](#what-s-next). diff --git a/content/en/docs/setup/production-environment/container-runtimes.md b/content/en/docs/setup/production-environment/container-runtimes.md index 188a02673a..fa72d04ced 100644 --- a/content/en/docs/setup/production-environment/container-runtimes.md +++ b/content/en/docs/setup/production-environment/container-runtimes.md @@ -57,6 +57,38 @@ If you have automation that makes it feasible, replace the node with another usi configuration, or reinstall it using automation. {{< /caution >}} +## Cgroup v2 + +Cgroup v2 is the next version of the cgroup Linux API. Differently than cgroup v1, there is a single +hierarchy instead of a different one for each controller. + +The new version offers several improvements over cgroup v1, some of these improvements are: + +- cleaner and easier to use API +- safe sub-tree delegation to containers +- newer features like Pressure Stall Information + +Even if the kernel supports a hybrid configuration where some controllers are managed by cgroup v1 +and some others by cgroup v2, Kubernetes supports only the same cgroup version to manage all the +controllers. + +If systemd doesn't use cgroup v2 by default, you can configure the system to use it by adding +`systemd.unified_cgroup_hierarchy=1` to the kernel command line. + +```shell +# dnf install -y grubby && \ + sudo grubby \ + --update-kernel=ALL \ + --args=”systemd.unified_cgroup_hierarchy=1" +``` + +To apply the configuration, it is necessary to reboot the node. + +There should not be any noticeable difference in the user experience when switching to cgroup v2, unless +users are accessing the cgroup file system directly, either on the node or from within the containers. + +In order to use it, cgroup v2 must be supported by the CRI runtime as well. + ### Migrating to the `systemd` driver in kubeadm managed clusters Follow this [Migration guide](/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/) diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md b/content/en/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md index 8a9828a7ad..58086c1eef 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md @@ -1,79 +1,108 @@ --- reviewers: - sig-cluster-lifecycle -title: Customizing control plane configuration with kubeadm +title: Customizing components with the kubeadm API content_type: concept weight: 40 --- +This page covers how to customize the components that kubeadm deploys. For control plane components +you can use flags in the `ClusteConfiguration` structure or patches per-node. For the kubelet +and kube-proxy you can use `KubeletConfiguration` and `KubeProxyConfiguration`, accordingly. + +All of these options are possible via the kubeadm configuration API. +For more details on each field in the configuration you can navigate to our +[API reference pages](/docs/reference/config-api/kubeadm-config.v1beta3/). + +{{< note >}} +Customizing the CoreDNS deployment of kubeadm is currently not supported. You must manually +patch the `kube-system/coredns` {{< glossary_tooltip text="ConfigMap" term_id="configmap" >}} +and recreate the CoreDNS {{< glossary_tooltip text="Pods" term_id="pod" >}} after that. Alternatively, +you can skip the default CoreDNS deployment and deploy your own variant. +For more details on that see [Using init phases with kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm-init/#init-phases). +{{< /note >}} + + + {{< feature-state for_k8s_version="v1.12" state="stable" >}} -The kubeadm `ClusterConfiguration` object exposes the field `extraArgs` that can override the default flags passed to control plane -components such as the APIServer, ControllerManager and Scheduler. The components are defined using the following fields: +## Customizing the control plane with flags in `ClusterConfiguration` + +The kubeadm `ClusterConfiguration` object exposes a way for users to override the default +flags passed to control plane components such as the APIServer, ControllerManager, Scheduler and Etcd. +The components are defined using the following structures: - `apiServer` - `controllerManager` - `scheduler` +- `etcd` -The `extraArgs` field consist of `key: value` pairs. To override a flag for a control plane component: +These structures contain a common `extraArgs` field, that consists of `key: value` pairs. +To override a flag for a control plane component: -1. Add the appropriate fields to your configuration. -2. Add the flags to override to the field. +1. Add the appropriate `extraArgs` to your configuration. +2. Add flags to the `extraArgs` field. 3. Run `kubeadm init` with `--config `. -For more details on each field in the configuration you can navigate to our -[API reference pages](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2#ClusterConfiguration). - {{< note >}} -You can generate a `ClusterConfiguration` object with default values by running `kubeadm config print init-defaults` and saving the output to a file of your choice. +You can generate a `ClusterConfiguration` object with default values by running `kubeadm config print init-defaults` +and saving the output to a file of your choice. {{< /note >}} +{{< note >}} +The `ClusterConfiguration` object is currently global in kubeadm clusters. This means that any flags that you add, +will apply to all instances of the same component on different nodes. To apply individual configuration per component +on different nodes you can use [patches](#patches). +{{< /note >}} +{{< note >}} +Duplicate flags (keys), or passing the same flag `--foo` multiple times, is currently not supported. +To workaround that you must use [patches](#patches). +{{< /note >}} - - -## APIServer flags +### APIServer flags For details, see the [reference documentation for kube-apiserver](/docs/reference/command-line-tools-reference/kube-apiserver/). Example usage: + ```yaml -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration kubernetesVersion: v1.16.0 apiServer: extraArgs: - advertise-address: 192.168.0.103 anonymous-auth: "false" enable-admission-plugins: AlwaysPullImages,DefaultStorageClass audit-log-path: /home/johndoe/audit.log ``` -## ControllerManager flags +### ControllerManager flags For details, see the [reference documentation for kube-controller-manager](/docs/reference/command-line-tools-reference/kube-controller-manager/). Example usage: + ```yaml -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration kubernetesVersion: v1.16.0 controllerManager: extraArgs: cluster-signing-key-file: /home/johndoe/keys/ca.key - bind-address: 0.0.0.0 deployment-controller-sync-period: "50" ``` -## Scheduler flags +### Scheduler flags For details, see the [reference documentation for kube-scheduler](/docs/reference/command-line-tools-reference/kube-scheduler/). Example usage: + ```yaml -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration kubernetesVersion: v1.16.0 scheduler: @@ -87,4 +116,95 @@ scheduler: pathType: "File" ``` +### Etcd flags +For details, see the [etcd server documentation](https://etcd.io/docs/). + +Example usage: + +```yaml +apiVersion: kubeadm.k8s.io/v1beta3 +kind: ClusterConfiguration +etcd: + local: + extraArgs: + election-timeout: 1000 +``` + +## Customizing the control plane with patches {#patches} + +{{< feature-state for_k8s_version="v1.22" state="beta" >}} + +Kubeadm allows you to pass a directory with patch files to `InitConfiguration` and `JoinConfiguration` +on individual nodes. These patches can be used as the last customization step before the control +plane component manifests are written to disk. + +You can pass this file to `kubeadm init` with `--config `: + +```yaml +apiVersion: kubeadm.k8s.io/v1beta3 +kind: InitConfiguration +nodeRegistration: + patches: + directory: /home/user/somedir +``` + +{{< note >}} +For `kubeadm init` you can pass a file containing both a `ClusterConfiguration` and `InitConfiguration` +separated by `---`. +{{< /note >}} + +You can pass this file to `kubeadm join` with `--config `: + +```yaml +apiVersion: kubeadm.k8s.io/v1beta3 +kind: JoinConfiguration +nodeRegistration: + patches: + directory: /home/user/somedir +``` + +The directory must contain files named `target[suffix][+patchtype].extension`. +For example, `kube-apiserver0+merge.yaml` or just `etcd.json`. + +- `target` can be one of `kube-apiserver`, `kube-controller-manager`, `kube-scheduler` and `etcd`. +- `patchtype` can be one of `strategic`, `merge` or `json` and these must match the patching formats +[supported by kubectl](/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch). +The default `patchtype` is `strategic`. +- `extension` must be either `json` or `yaml`. +- `suffix` is an optional string that can be used to determine which patches are applied first +alpha-numerically. + +{{< note >}} +If you are using `kubeadm upgrade` to upgrade your kubeadm nodes you must again provide the same +patches, so that the customization is preserved after upgrade. To do that you can use the `--patches` +flag, which must point to the same directory. `kubeadm upgrade` currently does not support a configuration +API structure that can be used for the same purpose. +{{< /note >}} + +## Customizing the kubelet + +To customize the kubelet you can add a `KubeletConfiguration` next to the `ClusterConfiguration` or +`InitConfiguration` separated by `---` within the same configuration file. This file can then be passed to `kubeadm init`. + +{{< note >}} +kubeadm applies the same `KubeletConfiguration` to all nodes in the cluster. To apply node +specific settings you can use kubelet flags as overrides by passing them in the `nodeRegistration.kubeletExtraArgs` +field supported by both `InitConfiguration` and `JoinConfiguration`. Some kubelet flags are deprecated, +so check their status in the [kubelet reference documentation](/docs/reference/command-line-tools-reference/kubelet) +before using them. +{{< /note >}} + +For more details see [Configuring each kubelet in your cluster using kubeadm](/docs/setup/production-environment/tools/kubeadm/kubelet-integration) + +## Customizing kube-proxy + +To customize kube-proxy you can pass a `KubeProxyConfiguration` next your `ClusterConfiguration` or +`InitConfiguration` to `kubeadm init` separated by `---`. + +For more details you can navigate to our [API reference pages](/docs/reference/config-api/kubeadm-config.v1beta3/). + +{{< note >}} +kubeadm deploys kube-proxy as a {{< glossary_tooltip text="DaemonSet" term_id="daemonset" >}}, which means +that the `KubeProxyConfiguration` would apply to all instances of kube-proxy in the cluster. +{{< /note >}} diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md index 0a394ad022..0c7d176c5f 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md @@ -8,9 +8,12 @@ weight: 30 -Using `kubeadm`, you can create a minimum viable Kubernetes cluster that conforms to best practices. In fact, you can use `kubeadm` to set up a cluster that will pass the [Kubernetes Conformance tests](https://kubernetes.io/blog/2017/10/software-conformance-certification). -`kubeadm` also supports other cluster -lifecycle functions, such as [bootstrap tokens](/docs/reference/access-authn-authz/bootstrap-tokens/) and cluster upgrades. + +Using `kubeadm`, you can create a minimum viable Kubernetes cluster that conforms to best practices. +In fact, you can use `kubeadm` to set up a cluster that will pass the +[Kubernetes Conformance tests](https://kubernetes.io/blog/2017/10/software-conformance-certification). +`kubeadm` also supports other cluster lifecycle functions, such as +[bootstrap tokens](/docs/reference/access-authn-authz/bootstrap-tokens/) and cluster upgrades. The `kubeadm` tool is good if you need: @@ -42,7 +45,8 @@ To follow this guide, you need: You also need to use a version of `kubeadm` that can deploy the version of Kubernetes that you want to use in your new cluster. -[Kubernetes' version and version skew support policy](/docs/setup/release/version-skew-policy/#supported-versions) applies to `kubeadm` as well as to Kubernetes overall. +[Kubernetes' version and version skew support policy](/docs/setup/release/version-skew-policy/#supported-versions) +applies to `kubeadm` as well as to Kubernetes overall. Check that policy to learn about what versions of Kubernetes and `kubeadm` are supported. This page is written for Kubernetes {{< param "version" >}}. @@ -97,7 +101,8 @@ a provider-specific value. See [Installing a Pod network add-on](#pod-network). 1. (Optional) Since version 1.14, `kubeadm` tries to detect the container runtime on Linux by using a list of well known domain socket paths. To use different container runtime or if there are more than one installed on the provisioned node, specify the `--cri-socket` -argument to `kubeadm init`. See [Installing runtime](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#installing-runtime). +argument to `kubeadm init`. See +[Installing a runtime](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#installing-runtime). 1. (Optional) Unless otherwise specified, `kubeadm` uses the network interface associated with the default gateway to set the advertise address for this particular control-plane node's API server. To use a different network interface, specify the `--apiserver-advertise-address=` argument @@ -139,9 +144,12 @@ is not supported by kubeadm. For more information about `kubeadm init` arguments, see the [kubeadm reference guide](/docs/reference/setup-tools/kubeadm/). -To configure `kubeadm init` with a configuration file see [Using kubeadm init with a configuration file](/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file). +To configure `kubeadm init` with a configuration file see +[Using kubeadm init with a configuration file](/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file). -To customize control plane components, including optional IPv6 assignment to liveness probe for control plane components and etcd server, provide extra arguments to each component as documented in [custom arguments](/docs/setup/production-environment/tools/kubeadm/control-plane-flags/). +To customize control plane components, including optional IPv6 assignment to liveness probe +for control plane components and etcd server, provide extra arguments to each component as documented in +[custom arguments](/docs/setup/production-environment/tools/kubeadm/control-plane-flags/). To run `kubeadm init` again, you must first [tear down the cluster](#tear-down). @@ -292,11 +300,13 @@ The nodes are where your workloads (containers and Pods, etc) run. To add new no * SSH to the machine * Become root (e.g. `sudo su -`) +* [Install a runtime](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#installing-runtime) + if needed * Run the command that was output by `kubeadm init`. For example: -```bash -kubeadm join --token : --discovery-token-ca-cert-hash sha256: -``` + ```bash + kubeadm join --token : --discovery-token-ca-cert-hash sha256: + ``` If you do not have the token, you can get it by running the following command on the control-plane node: @@ -415,7 +425,7 @@ and make sure that the node is empty, then deconfigure the node. Talking to the control-plane node with the appropriate credentials, run: ```bash -kubectl drain --delete-local-data --force --ignore-daemonsets +kubectl drain --delete-emptydir-data --force --ignore-daemonsets ``` Before removing the node, reset the state installed by `kubeadm`: diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/dual-stack-support.md b/content/en/docs/setup/production-environment/tools/kubeadm/dual-stack-support.md index 767787179c..39930cca37 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/dual-stack-support.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/dual-stack-support.md @@ -21,7 +21,7 @@ For each server that you want to use as a {{< glossary_tooltip text="node" term_ You need to have an IPv4 and and IPv6 address range to use. Cluster operators typically use private address ranges for IPv4. For IPv6, a cluster operator typically chooses a global -unicast address block from within `2000::/3`, using a range that is assigned to the operator. +unicast address block from within `2000::/3`, using a range that is assigned to the operator. You don't have to route the cluster's IP address ranges to the public internet. The size of the IP address allocations should be suitable for the number of Pods and @@ -30,7 +30,7 @@ Services that you are planning to run. {{< note >}} If you are upgrading an existing cluster then, by default, the `kubeadm upgrade` command changes the [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) -`IPv6DualStack` to `true` if that is not already enabled. +`IPv6DualStack` to `true` if that is not already enabled. However, `kubeadm` does not support making modifications to the pod IP address range (“cluster CIDR”) nor to the cluster's Service address range (“Service CIDR”). {{< /note >}} @@ -45,11 +45,11 @@ similar to the following example: kubeadm init --pod-network-cidr=10.244.0.0/16,2001:db8:42:0::/56 --service-cidr=10.96.0.0/16,2001:db8:42:1::/112 ``` -To make things clearer, here is an example kubeadm [configuration file](https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2) `kubeadm-config.yaml` for the primary dual-stack control plane node. +To make things clearer, here is an example kubeadm [configuration file](https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3) `kubeadm-config.yaml` for the primary dual-stack control plane node. ```yaml --- -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration featureGates: IPv6DualStack: true @@ -57,7 +57,7 @@ networking: podSubnet: 10.244.0.0/16,2001:db8:42:0::/56 serviceSubnet: 10.96.0.0/16,2001:db8:42:1::/112 --- -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: InitConfiguration localAPIEndpoint: advertiseAddress: "10.100.0.1" @@ -85,10 +85,10 @@ The `--apiserver-advertise-address` flag does not support dual-stack. Before joining a node, make sure that the node has IPv6 routable network interface and allows IPv6 forwarding. -Here is an example kubeadm [configuration file](https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2) `kubeadm-config.yaml` for joining a worker node to the cluster. +Here is an example kubeadm [configuration file](https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3) `kubeadm-config.yaml` for joining a worker node to the cluster. ```yaml -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: JoinConfiguration discovery: bootstrapToken: @@ -98,9 +98,9 @@ nodeRegistration: node-ip: 10.100.0.3,fd00:1:2:3::3 ``` -Also, here is an example kubeadm [configuration file](https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2) `kubeadm-config.yaml` for joining another control plane node to the cluster. +Also, here is an example kubeadm [configuration file](https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3) `kubeadm-config.yaml` for joining another control plane node to the cluster. ```yaml -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: JoinConfiguration controlPlane: localAPIEndpoint: @@ -124,7 +124,7 @@ kubeadm join --config=kubeadm-config.yaml ... ### Create a single-stack cluster {{< note >}} -Enabling the dual-stack feature doesn't mean that you need to use dual-stack addressing. +Enabling the dual-stack feature doesn't mean that you need to use dual-stack addressing. You can deploy a single-stack cluster that has the dual-stack networking feature enabled. {{< /note >}} @@ -134,10 +134,10 @@ In 1.21 the `IPv6DualStack` feature is Beta and the feature gate is defaulted to kubeadm init --feature-gates IPv6DualStack=false ``` -To make things more clear, here is an example kubeadm [configuration file](https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2) `kubeadm-config.yaml` for the single-stack control plane node. +To make things more clear, here is an example kubeadm [configuration file](https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3) `kubeadm-config.yaml` for the single-stack control plane node. ```yaml -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration featureGates: IPv6DualStack: false @@ -150,3 +150,4 @@ networking: * [Validate IPv4/IPv6 dual-stack](/docs/tasks/network/validate-dual-stack) networking * Read about [Dual-stack](/docs/concepts/services-networking/dual-stack/) cluster networking +* Learn more about the kubeadm [configuration format](/docs/reference/config-api/kubeadm-config.v1beta2/) diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md b/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md index e387e2d41c..5206529f5c 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md @@ -115,7 +115,7 @@ option. Your cluster requirements may need a different configuration. {{< note >}} The `kubeadm init` flags `--config` and `--certificate-key` cannot be mixed, therefore if you want - to use the [kubeadm configuration](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2) + to use the [kubeadm configuration](/docs/reference/config-api/kubeadm-config.v1beta3/) you must add the `certificateKey` field in the appropriate config locations (under `InitConfiguration` and `JoinConfiguration: controlPlane`). {{< /note >}} @@ -230,7 +230,7 @@ in the kubeadm config file. 1. Create a file called `kubeadm-config.yaml` with the following contents: - apiVersion: kubeadm.k8s.io/v1beta2 + apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration kubernetesVersion: stable controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index c997156827..24c5d4383d 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -11,7 +11,7 @@ card: This page shows how to install the `kubeadm` toolbox. -For information how to create a cluster with kubeadm once you have performed this installation process, see the [Using kubeadm to Create a Cluster](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) page. +For information on how to create a cluster with kubeadm once you have performed this installation process, see the [Using kubeadm to Create a Cluster](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) page. @@ -240,8 +240,9 @@ Install CNI plugins (required for most pod network): ```bash CNI_VERSION="v0.8.2" +ARCH="amd64" sudo mkdir -p /opt/cni/bin -curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-amd64-${CNI_VERSION}.tgz" | sudo tar -C /opt/cni/bin -xz +curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-${ARCH}-${CNI_VERSION}.tgz" | sudo tar -C /opt/cni/bin -xz ``` Define the directory to download command files @@ -260,15 +261,17 @@ Install crictl (required for kubeadm / Kubelet Container Runtime Interface (CRI) ```bash CRICTL_VERSION="v1.17.0" -curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | sudo tar -C $DOWNLOAD_DIR -xz +ARCH="amd64" +curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-${ARCH}.tar.gz" | sudo tar -C $DOWNLOAD_DIR -xz ``` Install `kubeadm`, `kubelet`, `kubectl` and add a `kubelet` systemd service: ```bash RELEASE="$(curl -sSL https://dl.k8s.io/release/stable.txt)" +ARCH="amd64" cd $DOWNLOAD_DIR -sudo curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl} +sudo curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/${ARCH}/{kubeadm,kubelet,kubectl} sudo chmod +x {kubeadm,kubelet,kubectl} RELEASE_VERSION="v0.4.0" @@ -314,4 +317,3 @@ If you are running into difficulties with kubeadm, please consult our [troublesh ## {{% heading "whatsnext" %}} * [Using kubeadm to Create a Cluster](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) - diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md index 9e95a69f47..0319ee17fc 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md @@ -95,7 +95,7 @@ this example. HOST=${ETCDHOSTS[$i]} NAME=${NAMES[$i]} cat << EOF > /tmp/${HOST}/kubeadmcfg.yaml - apiVersion: "kubeadm.k8s.io/v1beta2" + apiVersion: "kubeadm.k8s.io/v1beta3" kind: ClusterConfiguration etcd: local: diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md index 5de8afd20b..8baf5e9dd6 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md @@ -89,23 +89,11 @@ If you notice that `kubeadm init` hangs after printing out the following line: This may be caused by a number of problems. The most common are: - network connection problems. Check that your machine has full network connectivity before continuing. -- the default cgroup driver configuration for the kubelet differs from that used by Docker. - Check the system log file (e.g. `/var/log/message`) or examine the output from `journalctl -u kubelet`. If you see something like the following: - - ```shell - error: failed to run Kubelet: failed to create kubelet: - misconfiguration: kubelet cgroup driver: "systemd" is different from docker cgroup driver: "cgroupfs" - ``` - - There are two common ways to fix the cgroup driver problem: - - 1. Install Docker again following instructions - [here](/docs/setup/production-environment/container-runtimes/#docker). - - 1. Change the kubelet config to match the Docker cgroup driver manually, you can refer to - [Configure cgroup driver used by kubelet on control-plane node](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#configure-cgroup-driver-used-by-kubelet-on-control-plane-node) - -- control plane Docker containers are crashlooping or hanging. You can check this by running `docker ps` and investigating each container by running `docker logs`. +- the cgroup driver of the container runtime differs from that of the kubelet. To understand how to +configure it properly see [Configuring a cgroup driver](/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/). +- control plane containers are crashlooping or hanging. You can check this by running `docker ps` +and investigating each container by running `docker logs`. For other container runtime see +[Debugging Kubernetes nodes with crictl](/docs/tasks/debug-application-cluster/crictl/). ## kubeadm blocks when removing managed containers @@ -175,7 +163,7 @@ services](/docs/concepts/services-networking/service/#nodeport) or use `HostNetw ## Pods are not accessible via their Service IP -- Many network add-ons do not yet enable [hairpin mode](/docs/tasks/debug-application-cluster/debug-service/#a-pod-cannot-reach-itself-via-service-ip) +- Many network add-ons do not yet enable [hairpin mode](/docs/tasks/debug-application-cluster/debug-service/#a-pod-fails-to-reach-itself-via-the-service-ip) which allows pods to access themselves via their Service IP. This is an issue related to [CNI](https://github.com/containernetworking/cni/issues/476). Please contact the network add-on provider to get the latest status of their support for hairpin mode. @@ -220,6 +208,25 @@ Unable to connect to the server: x509: certificate signed by unknown authority ( sudo chown $(id -u):$(id -g) $HOME/.kube/config ``` +## Kubelet client certificate rotation fails {#kubelet-client-cert} + +By default, kubeadm configures a kubelet with automatic rotation of client certificates by using the `/var/lib/kubelet/pki/kubelet-client-current.pem` symlink specified in `/etc/kubernetes/kubelet.conf`. +If this rotation process fails you might see errors such as `x509: certificate has expired or is not yet valid` +in kube-apiserver logs. To fix the issue you must follow these steps: + +1. Backup and delete `/etc/kubernetes/kubelet.conf` and `/var/lib/kubelet/pki/kubelet-client*` from the failed node. +1. From a working control plane node in the cluster that has `/etc/kubernetes/pki/ca.key` execute +`kubeadm kubeconfig user --org system:nodes --client-name system:node:$NODE > kubelet.conf`. +`$NODE` must be set to the name of the existing failed node in the cluster. +Modify the resulted `kubelet.conf` manually to adjust the cluster name and server endpoint, +or pass `kubeconfig user --config` (it accepts `InitConfiguration`). If your cluster does not have +the `ca.key` you must sign the embedded certificates in the `kubelet.conf` externally. +1. Copy this resulted `kubelet.conf` to `/etc/kubernetes/kubelet.conf` on the failed node. +1. Restart the kubelet (`systemctl restart kubelet`) on the failed node and wait for +`/var/lib/kubelet/pki/kubelet-client-current.pem` to be recreated. +1. Run `kubeadm init phase kubelet-finalize all` on the failed node. This will make the new +`kubelet.conf` file use `/var/lib/kubelet/pki/kubelet-client-current.pem` and will restart the kubelet. +1. Make sure the node becomes `Ready`. ## Default NIC When using flannel as the pod network in Vagrant The following error might indicate that something was wrong in the pod network: @@ -251,7 +258,12 @@ Error from server: Get https://10.19.0.41:10250/containerLogs/default/mysql-ddc6 curl http://169.254.169.254/metadata/v1/interfaces/public/0/anchor_ipv4/address ``` - The workaround is to tell `kubelet` which IP to use using `--node-ip`. When using DigitalOcean, it can be the public one (assigned to `eth0`) or the private one (assigned to `eth1`) should you want to use the optional private network. The [`KubeletExtraArgs` section of the kubeadm `NodeRegistrationOptions` structure](https://github.com/kubernetes/kubernetes/blob/release-1.13/cmd/kubeadm/app/apis/kubeadm/v1beta1/types.go) can be used for this. + The workaround is to tell `kubelet` which IP to use using `--node-ip`. + When using DigitalOcean, it can be the public one (assigned to `eth0`) or + the private one (assigned to `eth1`) should you want to use the optional + private network. The `kubeletExtraArgs` section of the kubeadm + [`NodeRegistrationOptions` structure](/docs/reference/config-api/kubeadm-config.v1beta2/#kubeadm-k8s-io-v1beta2-NodeRegistrationOptions) + can be used for this. Then restart `kubelet`: @@ -324,7 +336,7 @@ Alternatively, you can try separating the `key=value` pairs like so: `--apiserver-extra-args "enable-admission-plugins=LimitRanger,enable-admission-plugins=NamespaceExists"` but this will result in the key `enable-admission-plugins` only having the value of `NamespaceExists`. -A known workaround is to use the kubeadm [configuration file](/docs/setup/production-environment/tools/kubeadm/control-plane-flags/#apiserver-flags). +A known workaround is to use the kubeadm [configuration file](/docs/reference/config-api/kubeadm-config.v1beta2/). ## kube-proxy scheduled before node is initialized by cloud-controller-manager @@ -348,23 +360,6 @@ kubectl -n kube-system patch ds kube-proxy -p='{ "spec": { "template": { "spec": The tracking issue for this problem is [here](https://github.com/kubernetes/kubeadm/issues/1027). -## The NodeRegistration.Taints field is omitted when marshalling kubeadm configuration - -*Note: This [issue](https://github.com/kubernetes/kubeadm/issues/1358) only applies to tools that marshal kubeadm types (e.g. to a YAML configuration file). It will be fixed in kubeadm API v1beta2.* - -By default, kubeadm applies the `node-role.kubernetes.io/master:NoSchedule` taint to control-plane nodes. -If you prefer kubeadm to not taint the control-plane node, and set `InitConfiguration.NodeRegistration.Taints` to an empty slice, -the field will be omitted when marshalling. When the field is omitted, kubeadm applies the default taint. - -There are at least two workarounds: - -1. Use the `node-role.kubernetes.io/master:PreferNoSchedule` taint instead of an empty slice. [Pods will get scheduled on masters](/docs/concepts/scheduling-eviction/taint-and-toleration/), unless other nodes have capacity. - -2. Remove the taint after kubeadm init exits: -```bash -kubectl taint nodes NODE_NAME node-role.kubernetes.io/master:NoSchedule- -``` - ## `/usr` is mounted read-only on nodes {#usr-mounted-read-only} On Linux distributions such as Fedora CoreOS or Flatcar Container Linux, the directory `/usr` is mounted as a read-only filesystem. @@ -374,19 +369,19 @@ Kubernetes components like the kubelet and kube-controller-manager use the defau for the feature to work. To workaround this issue you can configure the flex-volume directory using the kubeadm -[configuration file](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2). +[configuration file](/docs/reference/config-api/kubeadm-config.v1beta3/). On the primary control-plane Node (created using `kubeadm init`) pass the following file using `--config`: ```yaml -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: InitConfiguration nodeRegistration: kubeletExtraArgs: volume-plugin-dir: "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/" --- -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration controllerManager: extraArgs: @@ -396,7 +391,7 @@ controllerManager: On joining Nodes: ```yaml -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: JoinConfiguration nodeRegistration: kubeletExtraArgs: diff --git a/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md b/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md index 587b34b58f..c3757824c5 100644 --- a/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md +++ b/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md @@ -67,10 +67,9 @@ nodes and Linux containers on Linux nodes. | Kubernetes version | Windows Server LTSC releases | Windows Server SAC releases | | --- | --- | --- | --- | -| *Kubernetes v1.19* | Windows Server 2019 | Windows Server ver 1909, Windows Server ver 2004 | | *Kubernetes v1.20* | Windows Server 2019 | Windows Server ver 1909, Windows Server ver 2004 | | *Kubernetes v1.21* | Windows Server 2019 | Windows Server ver 2004, Windows Server ver 20H2 | - +| *Kubernetes v1.22* | Windows Server 2019 | Windows Server ver 2004, Windows Server ver 20H2 | Information on the different Windows Server servicing channels including their support models can be found at @@ -100,8 +99,14 @@ limitation and compatibility rules will change. #### Pause Image -Microsoft maintains a Windows pause infrastructure container at -`mcr.microsoft.com/oss/kubernetes/pause:3.4.1`. +Kubernetes maintains a multi-architecture image that includes support for Windows. +For Kubernetes v1.22 the recommended pause image is `k8s.gcr.io/pause:3.5`. +The [source code](https://github.com/kubernetes/kubernetes/tree/master/build/pause) +is available on GitHub. + +Microsoft maintains a multi-architecture image with Linux and Windows amd64 support at `mcr.microsoft.com/oss/kubernetes/pause:3.5`. +This image is built from the same source as the Kubernetes maintained image but all of the Windows binaries are [authenticode signed](https://docs.microsoft.com/en-us/windows-hardware/drivers/install/authenticode) by Microsoft. +The Microsoft maintained image is recommended for production environments when signed binaries are required. #### Compute @@ -236,7 +241,7 @@ deployed as powershell scripts on the host, support Windows nodes: ##### CSI Plugins -{{< feature-state for_k8s_version="v1.19" state="beta" >}} +{{< feature-state for_k8s_version="v1.22" state="stable" >}} Code associated with {{< glossary_tooltip text="CSI" term_id="csi" >}} plugins ship as out-of-tree scripts and binaries that are typically distributed as @@ -245,21 +250,15 @@ DaemonSets and StatefulSets. CSI plugins handle a wide range of volume management actions in Kubernetes: provisioning/de-provisioning/resizing of volumes, attaching/detaching of volumes to/from a Kubernetes node and mounting/dismounting a volume to/from individual containers in a pod, -backup/restore of persistent data using snapshots and cloning. CSI plugins -typically consist of node plugins (that run on each node as a DaemonSet) and -controller plugins. +backup/restore of persistent data using snapshots and cloning. -CSI node plugins (especially those associated with persistent volumes exposed -as either block devices or over a shared file-system) need to perform various -privileged operations like scanning of disk devices, mounting of file systems, -etc. These operations differ for each host operating system. For Linux worker -nodes, containerized CSI node plugins are typically deployed as privileged -containers. For Windows worker nodes, privileged operations for containerized -CSI node plugins is supported using -[csi-proxy](https://github.com/kubernetes-csi/csi-proxy), a community-managed, -stand-alone binary that needs to be pre-installed on each Windows node. Please -refer to the deployment guide of the CSI plugin you wish to deploy for further -details. +CSI plugins communicate with a CSI node plugin which performs the local storage operations. +On Windows nodes CSI node plugins typically call APIs exposed by the community-managed +[csi-proxy](https://github.com/kubernetes-csi/csi-proxy) which handles the local storage operations. + +Please refer to the deployment guide of the environment where you wish to deploy a Windows CSI plugin +for further details around installation. +You may also refer to the following [installation steps](https://github.com/kubernetes-csi/csi-proxy#installation). #### Networking @@ -1068,9 +1067,8 @@ contributors. Follow the instructions in the SIG-Windows Register kubelet.exe: ```powershell - # Microsoft releases the pause infrastructure container at mcr.microsoft.com/oss/kubernetes/pause:3.4.1 nssm install kubelet C:\k\kubelet.exe - nssm set kubelet AppParameters --hostname-override= --v=6 --pod-infra-container-image=mcr.microsoft.com/oss/kubernetes/pause:3.4.1 --resolv-conf="" --allow-privileged=true --enable-debugging-handlers --cluster-dns= --cluster-domain=cluster.local --kubeconfig=c:\k\config --hairpin-mode=promiscuous-bridge --image-pull-progress-deadline=20m --cgroups-per-qos=false --log-dir= --logtostderr=false --enforce-node-allocatable="" --network-plugin=cni --cni-bin-dir=c:\k\cni --cni-conf-dir=c:\k\cni\config + nssm set kubelet AppParameters --hostname-override= --v=6 --pod-infra-container-image=k8s.gcr.io/pause:3.5 --resolv-conf="" --allow-privileged=true --enable-debugging-handlers --cluster-dns= --cluster-domain=cluster.local --kubeconfig=c:\k\config --hairpin-mode=promiscuous-bridge --image-pull-progress-deadline=20m --cgroups-per-qos=false --log-dir= --logtostderr=false --enforce-node-allocatable="" --network-plugin=cni --cni-bin-dir=c:\k\cni --cni-conf-dir=c:\k\cni\config nssm set kubelet AppDirectory C:\k nssm start kubelet ``` @@ -1239,11 +1237,11 @@ contributors. Follow the instructions in the SIG-Windows * `kubectl port-forward` fails with "unable to do port forwarding: wincat not found" - This was implemented in Kubernetes 1.15 by including wincat.exe in the - pause infrastructure container `mcr.microsoft.com/oss/kubernetes/pause:3.4.1`. - Be sure to use these versions or newer ones. If you would like to build your - own pause infrastructure container be sure to include - [wincat](https://github.com/kubernetes-sigs/sig-windows-tools/tree/master/cmd/wincat). + Port forwarding support for Windows requires wincat.exe to be available in the + [pause infrastructure container](#pause-image). + Ensure you are using a supported image that is compatable with your Windows OS version. + If you would like to build your own pause infrastructure container be sure to include + [wincat](https://github.com/kubernetes/kubernetes/tree/master/build/pause/windows/wincat). * My Kubernetes installation is failing because my Windows Server node is behind a proxy @@ -1265,10 +1263,8 @@ contributors. Follow the instructions in the SIG-Windows to accommodate worker containers crashing or restarting without losing any of the networking configuration. - The "pause" (infrastructure) image is hosted on Microsoft Container Registry - (MCR). You can access it using `mcr.microsoft.com/oss/kubernetes/pause:3.4.1`. - For more details, see the - [DOCKERFILE](https://github.com/kubernetes-sigs/windows-testing/blob/master/images/pause/Dockerfile). + Refer to the [pause image](#pause-image) section to find the recommended version + of the pause image. ### Further investigation @@ -1337,4 +1333,3 @@ guide is available [here](/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes/). We are also making investments in cluster API to ensure Windows nodes are properly provisioned. - diff --git a/content/en/docs/setup/production-environment/windows/user-guide-windows-containers.md b/content/en/docs/setup/production-environment/windows/user-guide-windows-containers.md index 7ddf5397ae..ec47f5637a 100644 --- a/content/en/docs/setup/production-environment/windows/user-guide-windows-containers.md +++ b/content/en/docs/setup/production-environment/windows/user-guide-windows-containers.md @@ -26,7 +26,7 @@ This guide walks you through the steps to configure and deploy a Windows contain ## Before you begin * Create a Kubernetes cluster that includes a -[master and a worker node running Windows Server](/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes) +control plane and a [worker node running Windows Server](/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes/) * It is important to note that creating and deploying services and workloads on Kubernetes behaves in much the same way for Linux and Windows containers. [Kubectl commands](/docs/reference/kubectl/overview/) to interface with the cluster are identical. @@ -105,15 +105,15 @@ the container port 80 is exposed directly to the service. 1. Check that the deployment succeeded. To verify: * Two containers per pod on the Windows node, use `docker ps` - * Two pods listed from the Linux master, use `kubectl get pods` - * Node-to-pod communication across the network, `curl` port 80 of your pod IPs from the Linux master + * Two pods listed from the Linux control plane node, use `kubectl get pods` + * Node-to-pod communication across the network, `curl` port 80 of your pod IPs from the Linux control plane node to check for a web server response * Pod-to-pod communication, ping between pods (and across hosts, if you have more than one Windows node) using docker exec or kubectl exec * Service-to-pod communication, `curl` the virtual service IP (seen under `kubectl get services`) - from the Linux master and from individual pods + from the Linux control plane node and from individual pods * Service discovery, `curl` the service name with the Kubernetes [default DNS suffix](/docs/concepts/services-networking/dns-pod-service/#services) - * Inbound connectivity, `curl` the NodePort from the Linux master or machines outside of the cluster + * Inbound connectivity, `curl` the NodePort from the Linux control plane node or machines outside of the cluster * Outbound connectivity, `curl` external IPs from inside the pod using kubectl exec {{< note >}} @@ -184,7 +184,7 @@ For example: `--register-with-taints='os=windows:NoSchedule'` By adding a taint to all Windows nodes, nothing will be scheduled on them (that includes existing Linux Pods). In order for a Windows Pod to be scheduled on a Windows node, -it would need both the nodeSelector to choose Windows, and the appropriate matching toleration. +it would need both the nodeSelector and the appropriate matching toleration to choose Windows. ```yaml nodeSelector: diff --git a/content/en/docs/tasks/access-application-cluster/_index.md b/content/en/docs/tasks/access-application-cluster/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md b/content/en/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md index e32a6ba02c..8b79d7042f 100644 --- a/content/en/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md +++ b/content/en/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md @@ -7,7 +7,6 @@ card: weight: 40 --- - This page shows how to configure access to multiple clusters by using @@ -22,19 +21,21 @@ It does not mean that there is a file named `kubeconfig`. {{< /note >}} +{{< warning >}} +Only use kubeconfig files from trusted sources. Using a specially-crafted kubeconfig file could result in malicious code execution or file exposure. +If you must use an untrusted kubeconfig file, inspect it carefully first, much as you would a shell script. +{{< /warning>}} + ## {{% heading "prerequisites" %}} - {{< include "task-tutorial-prereqs.md" >}} To check that {{< glossary_tooltip text="kubectl" term_id="kubectl" >}} is installed, run `kubectl version --client`. The kubectl version should be -[within one minor version](/docs/setup/release/version-skew-policy/#kubectl) of your +[within one minor version](/releases/version-skew-policy/#kubectl) of your cluster's API server. - - ## Define clusters, users, and contexts @@ -186,7 +187,7 @@ kubectl config --kubeconfig=config-demo view --minify The output shows configuration information associated with the `dev-frontend` context: -```shell +```yaml apiVersion: v1 clusters: - cluster: @@ -238,7 +239,6 @@ kubectl config --kubeconfig=config-demo use-context dev-storage View configuration associated with the new current context, `dev-storage`. - ```shell kubectl config --kubeconfig=config-demo view --minify ``` @@ -247,7 +247,7 @@ kubectl config --kubeconfig=config-demo view --minify In your `config-exercise` directory, create a file named `config-demo-2` with this content: -```shell +```yaml apiVersion: v1 kind: Config preferences: {} @@ -269,13 +269,17 @@ current value of your `KUBECONFIG` environment variable, so you can restore it l For example: ### Linux + ```shell export KUBECONFIG_SAVED=$KUBECONFIG ``` + ### Windows PowerShell -```shell + +```powershell $Env:KUBECONFIG_SAVED=$ENV:KUBECONFIG ``` + The `KUBECONFIG` environment variable is a list of paths to configuration files. The list is colon-delimited for Linux and Mac, and semicolon-delimited for Windows. If you have a `KUBECONFIG` environment variable, familiarize yourself with the configuration files @@ -284,11 +288,14 @@ in the list. Temporarily append two paths to your `KUBECONFIG` environment variable. For example: ### Linux + ```shell export KUBECONFIG=$KUBECONFIG:config-demo:config-demo-2 ``` + ### Windows PowerShell -```shell + +```powershell $Env:KUBECONFIG=("config-demo;config-demo-2") ``` @@ -303,7 +310,7 @@ environment variable. In particular, notice that the merged information has the `dev-ramp-up` context from the `config-demo-2` file and the three contexts from the `config-demo` file: -```shell +```yaml contexts: - context: cluster: development @@ -347,11 +354,14 @@ If you have a `$HOME/.kube/config` file, and it's not already listed in your For example: ### Linux + ```shell export KUBECONFIG=$KUBECONFIG:$HOME/.kube/config ``` + ### Windows Powershell -```shell + +```powershell $Env:KUBECONFIG="$Env:KUBECONFIG;$HOME\.kube\config" ``` @@ -367,23 +377,19 @@ kubectl config view Return your `KUBECONFIG` environment variable to its original value. For example:
### Linux + ```shell export KUBECONFIG=$KUBECONFIG_SAVED ``` + ### Windows PowerShell -```shell + +```powershell $Env:KUBECONFIG=$ENV:KUBECONFIG_SAVED ``` - - ## {{% heading "whatsnext" %}} - * [Organizing Cluster Access Using kubeconfig Files](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) * [kubectl config](/docs/reference/generated/kubectl/kubectl-commands#config) - - - - diff --git a/content/en/docs/tasks/access-application-cluster/create-external-load-balancer.md b/content/en/docs/tasks/access-application-cluster/create-external-load-balancer.md index 7dcc613232..23f6f91a2c 100644 --- a/content/en/docs/tasks/access-application-cluster/create-external-load-balancer.md +++ b/content/en/docs/tasks/access-application-cluster/create-external-load-balancer.md @@ -4,47 +4,44 @@ content_type: task weight: 80 --- - -This page shows how to create an External Load Balancer. +This page shows how to create an external load balancer. -{{< note >}} -This feature is only available for cloud providers or environments which support external load balancers. -{{< /note >}} - -When creating a service, you have the option of automatically creating a -cloud network load balancer. This provides an externally-accessible IP address -that sends traffic to the correct port on your cluster nodes +When creating a {{< glossary_tooltip text="Service" term_id="service" >}}, you have +the option of automatically creating a cloud load balancer. This provides an +externally-accessible IP address that sends traffic to the correct port on your cluster +nodes, _provided your cluster runs in a supported environment and is configured with the correct cloud load balancer provider package_. -For information on provisioning and using an Ingress resource that can give -services externally-reachable URLs, load balance the traffic, terminate SSL etc., -please check the [Ingress](/docs/concepts/services-networking/ingress/) +You can also use an {{< glossary_tooltip term_id="ingress" >}} in place of Service. +For more information, check the [Ingress](/docs/concepts/services-networking/ingress/) documentation. - - ## {{% heading "prerequisites" %}} -* {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} +{{< include "task-tutorial-prereqs.md" >}} +Your cluster must be running in a cloud or other environment that already has support +for configuring external load balancers. -## Configuration file +## Create a Service + +### Create a Service from a manifest To create an external load balancer, add the following line to your -[service configuration file](/docs/concepts/services-networking/service/#loadbalancer): +Service manifest: ```yaml type: LoadBalancer ``` -Your configuration file might look like: +Your manifest might then look like: ```yaml apiVersion: v1 @@ -60,19 +57,19 @@ spec: type: LoadBalancer ``` -## Using kubectl +### Create a Service using kubectl You can alternatively create the service with the `kubectl expose` command and its `--type=LoadBalancer` flag: ```bash -kubectl expose rc example --port=8765 --target-port=9376 \ +kubectl expose deployment example --port=8765 --target-port=9376 \ --name=example-service --type=LoadBalancer ``` -This command creates a new service using the same selectors as the referenced -resource (in the case of the example above, a replication controller named -`example`). +This command creates a new Service using the same selectors as the referenced +resource (in the case of the example above, a +{{< glossary_tooltip text="Deployment" term_id="deployment" >}} named `example`). For more information, including optional flags, refer to the [`kubectl expose` reference](/docs/reference/generated/kubectl/kubectl-commands/#expose). @@ -86,59 +83,63 @@ information through `kubectl`: kubectl describe services example-service ``` -which should produce output like this: +which should produce output similar to: -```bash - Name: example-service - Namespace: default - Labels: - Annotations: - Selector: app=example - Type: LoadBalancer - IP: 10.67.252.103 - LoadBalancer Ingress: 192.0.2.89 - Port: 80/TCP - NodePort: 32445/TCP - Endpoints: 10.64.0.4:80,10.64.1.5:80,10.64.2.4:80 - Session Affinity: None - Events: +``` +Name: example-service +Namespace: default +Labels: app=example +Annotations: +Selector: app=example +Type: LoadBalancer +IP Families: +IP: 10.3.22.96 +IPs: 10.3.22.96 +LoadBalancer Ingress: 192.0.2.89 +Port: 8765/TCP +TargetPort: 9376/TCP +NodePort: 30593/TCP +Endpoints: 172.17.0.3:9376 +Session Affinity: None +External Traffic Policy: Cluster +Events: ``` -The IP address is listed next to `LoadBalancer Ingress`. +The load balancer's IP address is listed next to `LoadBalancer Ingress`. {{< note >}} If you are running your service on Minikube, you can find the assigned IP address and port with: -{{< /note >}} ```bash minikube service example-service --url ``` +{{< /note >}} ## Preserving the client source IP -Due to the implementation of this feature, the source IP seen in the target -container is *not the original source IP* of the client. To enable -preservation of the client IP, the following fields can be configured in the -service spec (supported in GCE/Google Kubernetes Engine environments): +By default, the source IP seen in the target container is *not the original +source IP* of the client. To enable preservation of the client IP, the following +fields can be configured in the `.spec` of the Service: -* `service.spec.externalTrafficPolicy` - denotes if this Service desires to route -external traffic to node-local or cluster-wide endpoints. There are two available -options: Cluster (default) and Local. Cluster obscures the client source -IP and may cause a second hop to another node, but should have good overall -load-spreading. Local preserves the client source IP and avoids a second hop -for LoadBalancer and NodePort type services, but risks potentially imbalanced -traffic spreading. -* `service.spec.healthCheckNodePort` - specifies the health check node port -(numeric port number) for the service. If `healthCheckNodePort` isn't specified, -the service controller allocates a port from your cluster's NodePort range. You -can configure that range by setting an API server command line option, -`--service-node-port-range`. It will use the -user-specified `healthCheckNodePort` value if specified by the client. It only has an -effect when `type` is set to LoadBalancer and `externalTrafficPolicy` is set -to Local. +* `.spec.externalTrafficPolicy` - denotes if this Service desires to route + external traffic to node-local or cluster-wide endpoints. There are two available + options: `Cluster` (default) and `Local`. `Cluster` obscures the client source + IP and may cause a second hop to another node, but should have good overall + load-spreading. `Local` preserves the client source IP and avoids a second hop + for LoadBalancer and NodePort type Services, but risks potentially imbalanced + traffic spreading. +* `.spec.healthCheckNodePort` - specifies the health check node port + (numeric port number) for the service. If you don't specify + `healthCheckNodePort`, the service controller allocates a port from your + cluster's NodePort range. + You can configure that range by setting an API server command line option, + `--service-node-port-range`. The Service will use the user-specified + `healthCheckNodePort` value if you specify it, provided that the + Service `type` is set to LoadBalancer and `externalTrafficPolicy` is set + to `Local`. -Setting `externalTrafficPolicy` to Local in the Service configuration file -activates this feature. +Setting `externalTrafficPolicy` to Local in the Service manifest +activates this feature. For example: ```yaml apiVersion: v1 @@ -155,7 +156,20 @@ spec: type: LoadBalancer ``` -## Garbage Collecting Load Balancers +### Caveats and limitations when preserving source IPs + +Load balancing services from some cloud providers do not let you configure different weights for each target. + +With each target weighted equally in terms of sending traffic to Nodes, external +traffic is not equally load balanced across different Pods. The external load balancer +is unaware of the number of Pods on each node that are used as a target. + +Where `NumServicePods << _NumNodes` or `NumServicePods >> NumNodes`, a fairly close-to-equal +distribution will be seen, even without weights. + +Internal pod to pod traffic should behave similar to ClusterIP services, with equal probability across all pods. + +## Garbage collecting load balancers {{< feature-state for_k8s_version="v1.17" state="stable" >}} @@ -172,32 +186,18 @@ The finalizer will only be removed after the load balancer resource is cleaned u This prevents dangling load balancer resources even in corner cases such as the service controller crashing. -## External Load Balancer Providers +## External load balancer providers It is important to note that the datapath for this functionality is provided by a load balancer external to the Kubernetes cluster. When the Service `type` is set to LoadBalancer, Kubernetes provides functionality equivalent to `type` equals ClusterIP to pods -within the cluster and extends it by programming the (external to Kubernetes) load balancer with entries for the Kubernetes -pods. The Kubernetes service controller automates the creation of the external load balancer, health checks (if needed), -firewall rules (if needed) and retrieves the external IP allocated by the cloud provider and populates it in the service -object. - -## Caveats and Limitations when preserving source IPs - -GCE/AWS load balancers do not provide weights for their target pools. This was not an issue with the old LB -kube-proxy rules which would correctly balance across all endpoints. - -With the new functionality, the external traffic is not equally load balanced across pods, but rather -equally balanced at the node level (because GCE/AWS and other external LB implementations do not have the ability -for specifying the weight per node, they balance equally across all target nodes, disregarding the number of -pods on each node). - -We can, however, state that for NumServicePods << NumNodes or NumServicePods >> NumNodes, a fairly close-to-equal -distribution will be seen, even without weights. - -Once the external load balancers provide weights, this functionality can be added to the LB programming path. -*Future Work: No support for weights is provided for the 1.4 release, but may be added at a future date* - -Internal pod to pod traffic should behave similar to ClusterIP services, with equal probability across all pods. +within the cluster and extends it by programming the (external to Kubernetes) load balancer with entries for the nodes +hosting the relevant Kubernetes pods. The Kubernetes control plane automates the creation of the external load balancer, +health checks (if needed), and packet filtering rules (if needed). Once the cloud provider allocates an IP address for the load +balancer, the control plane looks up that external IP address and populates it into the Service object. +## {{% heading "whatsnext" %}} +* Read about [Service](/docs/concepts/services-networking/service/) +* Read about [Ingress](/docs/concepts/services-networking/ingress/) +* Read [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/) diff --git a/content/en/docs/tasks/access-application-cluster/ingress-minikube.md b/content/en/docs/tasks/access-application-cluster/ingress-minikube.md index ce7d4d4dde..65249e8fc8 100644 --- a/content/en/docs/tasks/access-application-cluster/ingress-minikube.md +++ b/content/en/docs/tasks/access-application-cluster/ingress-minikube.md @@ -44,6 +44,7 @@ This page shows you how to set up a simple Ingress which routes requests to Serv 1. Verify that the NGINX Ingress controller is running + {{< tabs name="tab_with_md" >}} {{% tab name="minikube v1.19 or later" %}} ```shell @@ -83,6 +84,22 @@ storage-provisioner 1/1 Running 0 2m + ```shell + kubectl get pods -n ingress-nginx + ``` + + {{< note >}}This can take up to a minute.{{< /note >}} + + Output: + + ```shell + NAME READY STATUS RESTARTS AGE + ingress-nginx-admission-create-2tgrf 0/1 Completed 0 3m28s + ingress-nginx-admission-patch-68b98 0/1 Completed 0 3m28s + ingress-nginx-controller-59b45fb494-lzmw2 1/1 Running 0 3m28s + ``` + + ## Deploy a hello, world app 1. Create a Deployment using the following command: diff --git a/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md b/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md index a0b4d78dab..ba8f7b1244 100644 --- a/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md +++ b/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md @@ -31,7 +31,7 @@ for database debugging. 1. Create a Deployment that runs MongoDB: ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/mongo-deployment.yaml + kubectl apply -f https://k8s.io/examples/application/mongodb/mongo-deployment.yaml ``` The output of a successful command verifies that the deployment was created: @@ -84,7 +84,7 @@ for database debugging. 2. Create a Service to expose MongoDB on the network: ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/mongo-service.yaml + kubectl apply -f https://k8s.io/examples/application/mongodb/mongo-service.yaml ``` The output of a successful command verifies that the Service was created: diff --git a/content/en/docs/tasks/access-application-cluster/web-ui-dashboard.md b/content/en/docs/tasks/access-application-cluster/web-ui-dashboard.md index 5c402e0304..0182bbad73 100644 --- a/content/en/docs/tasks/access-application-cluster/web-ui-dashboard.md +++ b/content/en/docs/tasks/access-application-cluster/web-ui-dashboard.md @@ -2,8 +2,9 @@ reviewers: - bryk - mikedanese -- rf232 -title: Web UI (Dashboard) +title: Deploy and Access the Kubernetes Dashboard +description: >- + Deploy the web UI (Kubernetes Dashboard) and access it. content_type: concept weight: 10 card: @@ -34,7 +35,7 @@ Dashboard also provides information on the state of Kubernetes resources in your The Dashboard UI is not deployed by default. To deploy it, run the following command: ``` -kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.2.0/aio/deploy/recommended.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml ``` ## Accessing the Dashboard UI @@ -49,7 +50,9 @@ The sample user created in the tutorial will have administrative privileges and {{< /warning >}} ### Command line proxy -You can access Dashboard using the kubectl command-line tool by running the following command: + +You can enable access to the Dashboard using the `kubectl` command-line tool, +by running the following command: ``` kubectl proxy @@ -60,7 +63,8 @@ Kubectl will make Dashboard available at [http://localhost:8001/api/v1/namespace The UI can _only_ be accessed from the machine where the command is executed. See `kubectl proxy --help` for more options. {{< note >}} -Kubeconfig Authentication method does NOT support external identity providers or x509 certificate-based authentication. +The kubeconfig authentication method does **not** support external identity providers +or X.509 certificate-based authentication. {{< /note >}} ## Welcome view @@ -75,7 +79,7 @@ In addition, you can view which system applications are running by default in th ## Deploying containerized applications Dashboard lets you create and deploy a containerized application as a Deployment and optional Service with a simple wizard. -You can either manually specify application details, or upload a YAML or JSON file containing application configuration. +You can either manually specify application details, or upload a YAML or JSON _manifest_ file containing application configuration. Click the **CREATE** button in the upper right corner of any page to begin. @@ -186,13 +190,14 @@ If needed, you can expand the **Advanced options** section where you can specify ### Uploading a YAML or JSON file Kubernetes supports declarative configuration. -In this style, all configuration is stored in YAML or JSON configuration files -using the Kubernetes [API](/docs/concepts/overview/kubernetes-api/) resource schemas. +In this style, all configuration is stored in manifests (YAML or JSON configuration files). +The manifests use Kubernetes [API](/docs/concepts/overview/kubernetes-api/) resource schemas. As an alternative to specifying application details in the deploy wizard, -you can define your application in YAML or JSON files, and upload the files using Dashboard. +you can define your application in one or more manifests, and upload the files using Dashboard. ## Using Dashboard + Following sections describe views of the Kubernetes Dashboard UI; what they provide and how can they be used. ### Navigation @@ -203,8 +208,9 @@ this can be changed using the namespace selector located in the navigation menu. Dashboard shows most Kubernetes object kinds and groups them in a few menu categories. -#### Admin Overview -For cluster and namespace administrators, Dashboard lists Nodes, Namespaces and Persistent Volumes and has detail views for them. +#### Admin overview + +For cluster and namespace administrators, Dashboard lists Nodes, Namespaces and PersistentVolumes and has detail views for them. Node list view contains CPU and memory usage metrics aggregated across all Nodes. The details view shows the metrics for a Node, its specification, status, allocated resources, events and pods running on the node. @@ -212,14 +218,14 @@ allocated resources, events and pods running on the node. #### Workloads Shows all applications running in the selected namespace. -The view lists applications by workload kind (e.g., Deployments, Replica Sets, Stateful Sets, etc.) +The view lists applications by workload kind (for example: Deployments, ReplicaSets, StatefulSets). and each workload kind can be viewed separately. The lists summarize actionable information about the workloads, -such as the number of ready pods for a Replica Set or current memory usage for a Pod. +such as the number of ready pods for a ReplicaSet or current memory usage for a Pod. Detail views for workloads show status and specification information and surface relationships between objects. -For example, Pods that Replica Set is controlling or New Replica Sets and Horizontal Pod Autoscalers for Deployments. +For example, Pods that ReplicaSet is controlling or new ReplicaSets and HorizontalPodAutoscalers for Deployments. #### Services @@ -230,9 +236,9 @@ internal endpoints for cluster connections and external endpoints for external u #### Storage -Storage view shows Persistent Volume Claim resources which are used by applications for storing data. +Storage view shows PersistentVolumeClaim resources which are used by applications for storing data. -#### Config Maps and Secrets +#### ConfigMaps and Secrets {#config-maps-and-secrets} Shows all Kubernetes resources that are used for live configuration of applications running in clusters. The view allows for editing and managing config objects and displays secrets hidden by default. diff --git a/content/en/docs/tasks/administer-cluster/_index.md b/content/en/docs/tasks/administer-cluster/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/tasks/administer-cluster/access-cluster-api.md b/content/en/docs/tasks/administer-cluster/access-cluster-api.md index 0275cadabf..827cb50f7c 100644 --- a/content/en/docs/tasks/administer-cluster/access-cluster-api.md +++ b/content/en/docs/tasks/administer-cluster/access-cluster-api.md @@ -30,7 +30,7 @@ Check the location and credentials that kubectl knows about with this command: kubectl config view ``` -Many of the [examples](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/) provide an introduction to using +Many of the [examples](https://github.com/kubernetes/examples/tree/master/) provide an introduction to using kubectl. Complete documentation is found in the [kubectl manual](/docs/reference/kubectl/overview/). ### Directly accessing the REST API diff --git a/content/en/docs/tasks/administer-cluster/certificates.md b/content/en/docs/tasks/administer-cluster/certificates.md index 6361b20d16..2338b0cdc7 100644 --- a/content/en/docs/tasks/administer-cluster/certificates.md +++ b/content/en/docs/tasks/administer-cluster/certificates.md @@ -116,6 +116,9 @@ manually through `easyrsa`, `openssl` or `cfssl`. openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key \ -CAcreateserial -out server.crt -days 10000 \ -extensions v3_ext -extfile csr.conf +1. View the certificate signing request: + + openssl req -noout -text -in ./server.csr 1. View the certificate: openssl x509 -noout -text -in ./server.crt diff --git a/content/en/docs/tasks/administer-cluster/change-pv-reclaim-policy.md b/content/en/docs/tasks/administer-cluster/change-pv-reclaim-policy.md index be7cbf2673..6a11b4f2d3 100644 --- a/content/en/docs/tasks/administer-cluster/change-pv-reclaim-policy.md +++ b/content/en/docs/tasks/administer-cluster/change-pv-reclaim-policy.md @@ -26,7 +26,7 @@ volume is automatically deleted when a user deletes the corresponding PersistentVolumeClaim. This automatic behavior might be inappropriate if the volume contains precious data. In that case, it is more appropriate to use the "Retain" policy. With the "Retain" policy, if a user deletes a PersistentVolumeClaim, -the corresponding PersistentVolume is not be deleted. Instead, it is moved to the +the corresponding PersistentVolume will not be deleted. Instead, it is moved to the Released phase, where all of its data can be manually recovered. ## Changing the reclaim policy of a PersistentVolume diff --git a/content/en/docs/tasks/administer-cluster/controller-manager-leader-migration.md b/content/en/docs/tasks/administer-cluster/controller-manager-leader-migration.md index 8b141f9da0..ddfa8e592e 100644 --- a/content/en/docs/tasks/administer-cluster/controller-manager-leader-migration.md +++ b/content/en/docs/tasks/administer-cluster/controller-manager-leader-migration.md @@ -10,7 +10,7 @@ content_type: task -{{< feature-state state="alpha" for_k8s_version="v1.21" >}} +{{< feature-state state="beta" for_k8s_version="v1.22" >}} {{< glossary_definition term_id="cloud-controller-manager" length="all" prepend="The cloud-controller-manager is">}} @@ -20,21 +20,21 @@ As part of the [cloud provider extraction effort](https://kubernetes.io/blog/201 Leader Migration provides a mechanism in which HA clusters can safely migrate "cloud specific" controllers between the `kube-controller-manager` and the `cloud-controller-manager` via a shared resource lock between the two components while upgrading the replicated control plane. For a single-node control plane, or if unavailability of controller managers can be tolerated during the upgrade, Leader Migration is not needed and this guide can be ignored. -Leader Migration is an alpha feature that is disabled by default and it requires `--enable-leader-migration` to be set on controller managers. It can be enabled by setting the feature gate `ControllerManagerLeaderMigration` plus `--enable-leader-migration` on `kube-controller-manager` or `cloud-controller-manager`. Leader Migration only applies during the upgrade and can be safely disabled or left enabled after the upgrade is complete. +Leader Migration can be enabled by setting `--enable-leader-migration` on `kube-controller-manager` or `cloud-controller-manager`. Leader Migration only applies during the upgrade and can be safely disabled or left enabled after the upgrade is complete. This guide walks you through the manual process of upgrading the control plane from `kube-controller-manager` with built-in cloud provider to running both `kube-controller-manager` and `cloud-controller-manager`. If you use a tool to administrator the cluster, please refer to the documentation of the tool and the cloud provider for more details. ## {{% heading "prerequisites" %}} -It is assumed that the control plane is running Kubernetes version N and to be upgraded to version N + 1. Although it is possible to migrate within the same version, ideally the migration should be performed as part of a upgrade so that changes of configuration can be aligned to releases. The exact versions of N and N + 1 depend on each cloud provider. For example, if a cloud provider builds a `cloud-controller-manager` to work with Kubernetes 1.22, then N can be 1.21 and N + 1 can be 1.22. +It is assumed that the control plane is running Kubernetes version N and to be upgraded to version N + 1. Although it is possible to migrate within the same version, ideally the migration should be performed as part of an upgrade so that changes of configuration can be aligned to each release. The exact versions of N and N + 1 depend on each cloud provider. For example, if a cloud provider builds a `cloud-controller-manager` to work with Kubernetes 1.22, then N can be 1.21 and N + 1 can be 1.22. The control plane nodes should run `kube-controller-manager` with Leader Election enabled through `--leader-elect=true`. As of version N, an in-tree cloud privider must be set with `--cloud-provider` flag and `cloud-controller-manager` should not yet be deployed. -The out-of-tree cloud provider must have built a `cloud-controller-manager` with Leader Migration implmentation. If the cloud provider imports `k8s.io/cloud-provider` and `k8s.io/controller-manager` of version v0.21.0 or later, Leader Migration will be avaliable. +The out-of-tree cloud provider must have built a `cloud-controller-manager` with Leader Migration implementation. If the cloud provider imports `k8s.io/cloud-provider` and `k8s.io/controller-manager` of version v0.21.0 or later, Leader Migration will be available. However, for version before v0.22.0, Leader Migration is alpha and requires feature gate `ControllerManagerLeaderMigration` to be enabled. This guide assumes that kubelet of each control plane node starts `kube-controller-manager` and `cloud-controller-manager` as static pods defined by their manifests. If the components run in a different setting, please adjust the steps accordingly. -For authorization, this guide assumes that the cluser uses RBAC. If another authorization mode grants permissions to `kube-controller-manager` and `cloud-controller-manager` components, please grant the needed access in a way that matches the mode. +For authorization, this guide assumes that the cluster uses RBAC. If another authorization mode grants permissions to `kube-controller-manager` and `cloud-controller-manager` components, please grant the needed access in a way that matches the mode. @@ -52,11 +52,13 @@ Do the same to the `system::leader-locking-cloud-controller-manager` role. ### Initial Leader Migration configuration -Leader Migration requires a configuration file representing the state of controller-to-manager assignment. At this moment, with in-tree cloud provider, `kube-controller-manager` runs `route`, `service`, and `cloud-node-lifecycle`. The following example configuration shows the assignment. +Leader Migration optionally takes a configuration file representing the state of controller-to-manager assignment. At this moment, with in-tree cloud provider, `kube-controller-manager` runs `route`, `service`, and `cloud-node-lifecycle`. The following example configuration shows the assignment. + +Leader Migration can be enabled without a configuration. Please see [Default Configuration](#default-configuration) for details. ```yaml kind: LeaderMigrationConfiguration -apiVersion: controllermanager.config.k8s.io/v1alpha1 +apiVersion: controllermanager.config.k8s.io/v1beta1 leaderName: cloud-provider-extraction-migration resourceLock: leases controllerLeaders: @@ -70,7 +72,6 @@ controllerLeaders: On each control plane node, save the content to `/etc/leadermigration.conf`, and update the manifest of `kube-controller-manager` so that the file is mounted inside the container at the same location. Also, update the same manifest to add the following arguments: -- `--feature-gates=ControllerManagerLeaderMigration=true` to enable Leader Migration which is an alpha feature - `--enable-leader-migration` to enable Leader Migration on the controller manager - `--leader-migration-config=/etc/leadermigration.conf` to set configuration file @@ -82,7 +83,7 @@ In version N + 1, the desired state of controller-to-manager assignment can be r ```yaml kind: LeaderMigrationConfiguration -apiVersion: controllermanager.config.k8s.io/v1alpha1 +apiVersion: controllermanager.config.k8s.io/v1beta1 leaderName: cloud-provider-extraction-migration resourceLock: leases controllerLeaders: @@ -113,6 +114,13 @@ Now that the control plane has been upgraded to run both `kube-controller-manage In a rolling manager, update manifest of `cloud-controller-manager` to unset both `--enable-leader-migration` and `--leader-migration-config=` flag, also remove the mount of `/etc/leadermigration.conf`, and finally remove `/etc/leadermigration.conf`. To re-enable Leader Migration, recreate the configuration file and add its mount and the flags that enable Leader Migration back to `cloud-controller-manager`. +### Default Configuration + +Starting Kubernetes 1.22, Leader Migration provides a default configuration suitable for the default controller-to-manager assignment. +The default configuration can be enabled by setting `--enable-leader-migration` but without `--leader-migration-config=`. + +For `kube-controller-manager` and `cloud-controller-manager`, if there are no flags that enable any in-tree cloud provider or change ownership of controllers, the default configuration can be used to avoid manual creation of the configuration file. + ## {{% heading "whatsnext" %}} - Read the [Controller Manager Leader Migration](https://github.com/kubernetes/enhancements/tree/master/keps/sig-cloud-provider/2436-controller-manager-leader-migration) enhancement proposal diff --git a/content/en/docs/tasks/administer-cluster/cpu-management-policies.md b/content/en/docs/tasks/administer-cluster/cpu-management-policies.md index 5ffc40781a..9eb7d7febb 100644 --- a/content/en/docs/tasks/administer-cluster/cpu-management-policies.md +++ b/content/en/docs/tasks/administer-cluster/cpu-management-policies.md @@ -58,12 +58,16 @@ frequency is set through a new Kubelet configuration value `--cpu-manager-reconcile-period`. If not specified, it defaults to the same duration as `--node-status-update-frequency`. +The behavior of the static policy can be fine-tuned using the `--cpu-manager-policy-options` flag. +The flag takes a comma-separated list of `key=value` policy options. + ### None policy The `none` policy explicitly enables the existing default CPU affinity scheme, providing no affinity beyond what the OS scheduler does automatically.  Limits on CPU usage for -[Guaranteed pods](/docs/tasks/configure-pod-container/quality-service-pod/) +[Guaranteed pods](/docs/tasks/configure-pod-container/quality-service-pod/) and +[Burstable pods](/docs/tasks/configure-pod-container/quality-service-pod/) are enforced using CFS quota. ### Static policy @@ -212,4 +216,14 @@ and `requests` are set equal to `limits` when not explicitly specified. And the container's resource limit for the CPU resource is an integer greater than or equal to one. The `nginx` container is granted 2 exclusive CPUs. +#### Static policy options +If the `full-pcpus-only` policy option is specified, the static policy will always allocate full physical cores. +You can enable this option by adding `full-pcups-only=true` to the CPUManager policy options. +By default, without this option, the static policy allocates CPUs using a topology-aware best-fit allocation. +On SMT enabled systems, the policy can allocate individual virtual cores, which correspond to hardware threads. +This can lead to different containers sharing the same physical cores; this behaviour in turn contributes +to the [noisy neighbours problem](https://en.wikipedia.org/wiki/Cloud_computing_issues#Performance_interference_and_noisy_neighbors). +With the option enabled, the pod will be admitted by the kubelet only if the CPU request of all its containers +can be fulfilled by allocating full physical cores. +If the pod does not pass the admission, it will be put in Failed state with the message `SMTAlignmentError`. diff --git a/content/en/docs/tasks/administer-cluster/dns-custom-nameservers.md b/content/en/docs/tasks/administer-cluster/dns-custom-nameservers.md index 308b066651..bd2fb3684c 100644 --- a/content/en/docs/tasks/administer-cluster/dns-custom-nameservers.md +++ b/content/en/docs/tasks/administer-cluster/dns-custom-nameservers.md @@ -28,7 +28,7 @@ explains how to use `kubeadm` to migrate from `kube-dns`. DNS is a built-in Kubernetes service launched automatically using the _addon manager_ -[cluster add-on](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/README.md). +[cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md). As of Kubernetes v1.12, CoreDNS is the recommended DNS Server, replacing kube-dns. If your cluster originally used kube-dns, you may still have `kube-dns` deployed rather than CoreDNS. diff --git a/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md b/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md index faed4e1cb1..6bc41d2170 100644 --- a/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md +++ b/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md @@ -288,6 +288,13 @@ This should probably be implemented eventually. Linux's libc (a.k.a. glibc) has a limit for the DNS `nameserver` records to 3 by default. What's more, for the glibc versions which are older than glibc-2.17-222 ([the new versions update see this issue](https://access.redhat.com/solutions/58028)), the allowed number of DNS `search` records has been limited to 6 ([see this bug from 2005](https://bugzilla.redhat.com/show_bug.cgi?id=168253)). Kubernetes needs to consume 1 `nameserver` record and 3 `search` records. This means that if a local installation already uses 3 `nameserver`s or uses more than 3 `search`es while your glibc version is in the affected list, some of those settings will be lost. To work around the DNS `nameserver` records limit, the node can run `dnsmasq`, which will provide more `nameserver` entries. You can also use kubelet's `--resolv-conf` flag. To fix the DNS `search` records limit, consider upgrading your linux distribution or upgrading to an unaffected version of glibc. +{{< note >}} + +With [Expanded DNS Configuration](/docs/concepts/services-networking/dns-pod-service/#expanded-dns-configuration), +Kubernetes allows more DNS `search` records. + +{{< /note >}} + If you are using Alpine version 3.3 or earlier as your base image, DNS may not work properly due to a known issue with Alpine. Kubernetes [issue 30215](https://github.com/kubernetes/kubernetes/issues/30215) diff --git a/content/en/docs/tasks/administer-cluster/encrypt-data.md b/content/en/docs/tasks/administer-cluster/encrypt-data.md index 8499855bb0..66427133b2 100644 --- a/content/en/docs/tasks/administer-cluster/encrypt-data.md +++ b/content/en/docs/tasks/administer-cluster/encrypt-data.md @@ -157,7 +157,7 @@ program to retrieve the contents of your secret. kubectl describe secret secret1 -n default ``` - should match `mykey: bXlkYXRh`, mydata is encoded, check [decoding a secret](/docs/concepts/configuration/secret#decoding-a-secret) to + should match `mykey: bXlkYXRh`, mydata is encoded, check [decoding a secret](/docs/tasks/configmap-secret/managing-secret-using-kubectl/#decoding-secret) to completely decode the secret. diff --git a/content/en/docs/tasks/administer-cluster/highly-available-control-plane.md b/content/en/docs/tasks/administer-cluster/highly-available-control-plane.md index 339f48e41a..f50bfb01d9 100644 --- a/content/en/docs/tasks/administer-cluster/highly-available-control-plane.md +++ b/content/en/docs/tasks/administer-cluster/highly-available-control-plane.md @@ -10,7 +10,7 @@ aliases: [ '/docs/tasks/administer-cluster/highly-available-master/' ] {{< feature-state for_k8s_version="v1.5" state="alpha" >}} -You can replicate Kubernetes control plane nodes in `kube-up` or `kube-down` scripts for Google Compute Engine. +You can replicate Kubernetes control plane nodes in `kube-up` or `kube-down` scripts for Google Compute Engine. However this scripts are not suitable for any sort of production use, it's widely used in the project's CI. This document describes how to use kube-up/down scripts to manage a highly available (HA) control plane and how HA control planes are implemented for use with GCE. @@ -156,14 +156,14 @@ and the IP address of the first replica will be promoted to IP address of load b Similarly, after removal of the penultimate control plane node, the load balancer will be removed and its IP address will be assigned to the last remaining replica. Please note that creation and removal of load balancer are complex operations and it may take some time (~20 minutes) for them to propagate. -### Master service & kubelets +### Control plane service & kubelets Instead of trying to keep an up-to-date list of Kubernetes apiserver in the Kubernetes service, the system directs all traffic to the external IP: * in case of a single node control plane, the IP points to the control plane node, -* in case of an HA control plane, the IP points to the load balancer in-front of the masters. +* in case of an HA control plane, the IP points to the load balancer in-front of the control plane nodes. Similarly, the external IP will be used by kubelets to communicate with the control plane. diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/_index.md b/content/en/docs/tasks/administer-cluster/kubeadm/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md b/content/en/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md index 9d8c672dfe..56af353f89 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md @@ -163,7 +163,7 @@ Instructions to do so are available at [Install Docker Engine - Enterprise on Wi #### Install wins, kubelet, and kubeadm ```PowerShell -curl.exe -LO https://github.com/kubernetes-sigs/sig-windows-tools/releases/latest/download/PrepareNode.ps1 +curl.exe -LO https://raw.githubusercontent.com/kubernetes-sigs/sig-windows-tools/master/kubeadm/scripts/PrepareNode.ps1 .\PrepareNode.ps1 -KubernetesVersion {{< param "fullversion" >}} ``` @@ -206,7 +206,7 @@ If you're using a different interface rather than Ethernet (i.e. "Ethernet0 2") #### Install wins, kubelet, and kubeadm ```PowerShell -curl.exe -LO https://github.com/kubernetes-sigs/sig-windows-tools/releases/latest/download/PrepareNode.ps1 +curl.exe -LO https://raw.githubusercontent.com/kubernetes-sigs/sig-windows-tools/master/kubeadm/scripts/PrepareNode.ps1 .\PrepareNode.ps1 -KubernetesVersion {{< param "fullversion" >}} -ContainerRuntime containerD ``` diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver.md b/content/en/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver.md index 9ed45bd07f..31a9ff0e33 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver.md @@ -33,10 +33,8 @@ driver of the kubelet. {{< note >}} -{{< feature-state for_k8s_version="v1.21" state="stable" >}} - -If the user is not setting the `cgroupDriver` field under `KubeletConfiguration`, -`kubeadm init` will default it to `systemd`. +In v1.22, if the user is not setting the `cgroupDriver` field under `KubeletConfiguration`, +`kubeadm` will default it to `systemd`. {{< /note >}} A minimal example of configuring the field explicitly: @@ -44,7 +42,7 @@ A minimal example of configuring the field explicitly: ```yaml # kubeadm-config.yaml kind: ClusterConfiguration -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kubernetesVersion: v1.21.0 --- kind: KubeletConfiguration diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md index e706bf0267..3d4959b536 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md @@ -85,7 +85,11 @@ Additionally, kubeadm informs the user if the certificate is externally managed; {{< /warning >}} {{< note >}} -`kubelet.conf` is not included in the list above because kubeadm configures kubelet for automatic certificate renewal. +`kubelet.conf` is not included in the list above because kubeadm configures kubelet +for [automatic certificate renewal](/docs/tasks/tls/certificate-rotation/) +with rotatable certificates under `/var/lib/kubelet/pki`. +To repair an expired kubelet client certificate see +[Kubelet client certificate rotation fails](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/#kubelet-client-cert). {{< /note >}} {{< warning >}} @@ -122,7 +126,17 @@ command. In that case, you should explicitly set `--certificate-renewal=true`. You can renew your certificates manually at any time with the `kubeadm certs renew` command. -This command performs the renewal using CA (or front-proxy-CA) certificate and key stored in `/etc/kubernetes/pki`. +This command performs the renewal using CA (or front-proxy-CA) certificate and key stored in `/etc/kubernetes/pki`. + +After running the command you should restart the control plane Pods. This is required since +dynamic certificate reload is currently not supported for all components and certificates. +[Static Pods](/docs/tasks/configure-pod-container/static-pod/) are managed by the local kubelet +and not by the API Server, thus kubectl cannot be used to delete and restart them. +To restart a static Pod you can temporarily remove its manifest file from `/etc/kubernetes/manifests/` +and wait for 20 seconds (see the `fileCheckFrequency` value in [KubeletConfiguration struct](/docs/reference/config-api/kubelet-config.v1beta1/). +The kubelet will terminate the Pod if it's no longer in the manifest directory. +You can then move the file back and after another `fileCheckFrequency` period, the kubelet will recreate +the Pod and the certificate renewal for the component can complete. {{< warning >}} If you are running an HA cluster, this command needs to be executed on all the control-plane nodes. @@ -142,7 +156,7 @@ The Kubernetes certificates normally reach their expiration date after one year. ## Renew certificates with the Kubernetes certificates API -This section provide more details about how to execute manual certificate renewal using the Kubernetes certificates API. +This section provides more details about how to execute manual certificate renewal using the Kubernetes certificates API. {{< caution >}} These are advanced topics for users who need to integrate their organization's certificate infrastructure into a kubeadm-built cluster. If the default kubeadm configuration satisfies your needs, you should let kubeadm manage certificates instead. @@ -157,10 +171,10 @@ The built-in signer is part of [`kube-controller-manager`](/docs/reference/comma To activate the built-in signer, you must pass the `--cluster-signing-cert-file` and `--cluster-signing-key-file` flags. -If you're creating a new cluster, you can use a kubeadm [configuration file](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2): +If you're creating a new cluster, you can use a kubeadm [configuration file](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3): ```yaml -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration controllerManager: extraArgs: @@ -219,7 +233,7 @@ To configure the kubelets in a new kubeadm cluster to obtain properly signed ser certificates you must pass the following minimal configuration to `kubeadm init`: ```yaml -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration --- apiVersion: kubelet.config.k8s.io/v1beta1 diff --git a/content/en/docs/tasks/administer-cluster/kubelet-config-file.md b/content/en/docs/tasks/administer-cluster/kubelet-config-file.md index b49c84220a..668f4532a5 100644 --- a/content/en/docs/tasks/administer-cluster/kubelet-config-file.md +++ b/content/en/docs/tasks/administer-cluster/kubelet-config-file.md @@ -30,17 +30,18 @@ Here is an example of what this file might look like: ``` apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +address: "192.168.0.8", +port: 20250, +serializeImagePulls: false, evictionHard: memory.available: "200Mi" ``` -In the example, the Kubelet is configured to evict Pods when available memory drops below 200Mi. +In the example, the Kubelet is configured to serve on IP address 192.168.0.8 and port 20250, pull images in parallel, +and evict Pods when available memory drops below 200Mi. All other Kubelet configuration values are left at their built-in defaults, unless overridden by flags. Command line flags which target the same value as a config file will override that value. -For a trick to generate a configuration file from a live node, see -[Reconfigure a Node's Kubelet in a Live Cluster](/docs/tasks/administer-cluster/reconfigure-kubelet). - ## Start a Kubelet process configured via the config file {{< note >}} @@ -65,12 +66,6 @@ In the above example, this version is `kubelet.config.k8s.io/v1beta1`. -## Relationship to Dynamic Kubelet Config - -If you are using the [Dynamic Kubelet Configuration](/docs/tasks/administer-cluster/reconfigure-kubelet) -feature, the combination of configuration provided via `--config` and any flags which override these values -is considered the default "last known good" configuration by the automatic rollback mechanism. - ## {{% heading "whatsnext" %}} - Learn more about kubelet configuration by checking the diff --git a/content/en/docs/tasks/administer-cluster/kubelet-in-userns.md b/content/en/docs/tasks/administer-cluster/kubelet-in-userns.md new file mode 100644 index 0000000000..7b90c5cfd1 --- /dev/null +++ b/content/en/docs/tasks/administer-cluster/kubelet-in-userns.md @@ -0,0 +1,252 @@ +--- +title: Running Kubernetes Node Components as a Non-root User +content_type: task +min-kubernetes-server-version: 1.22 +--- + + + +{{< feature-state for_k8s_version="v1.22" state="alpha" >}} + +This document describes how to run Kubernetes Node components such as kubelet, CRI, OCI, and CNI +without root privileges, by using a {{< glossary_tooltip text="user namespace" term_id="userns" >}}. + +This technique is also known as _rootless mode_. + +{{< note >}} +This document describes how to run Kubernetes Node components (and hence pods) a non-root user. + +If you are just looking for how to run a pod as a non-root user, see [SecurityContext](/docs/tasks/configure-pod-container/security-context/). +{{< /note >}} + +## {{% heading "prerequisites" %}} + +{{% version-check %}} + +* [Enable Cgroup v2](https://rootlesscontaine.rs/getting-started/common/cgroup2/) +* [Enable systemd with user session](https://rootlesscontaine.rs/getting-started/common/login/) +* [Configure several sysctl values, depending on host Linux distribution](https://rootlesscontaine.rs/getting-started/common/sysctl/) +* [Ensure that your unprivileged user is listed in `/etc/subuid` and `/etc/subgid`](https://rootlesscontaine.rs/getting-started/common/subuid/) + +* `KubeletInUserNamespace` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) + + + +## Running Kubernetes inside Rootless Docker/Podman + +[kind](https://kind.sigs.k8s.io/) supports running Kubernetes inside a Rootless Docker or Rootless Podman. + +See [Running kind with Rootless Docker](https://kind.sigs.k8s.io/docs/user/rootless/). + + + +## Running Rootless Kubernetes directly on a host + +{{% thirdparty-content %}} + +### K3s + +[K3s](https://k3s.io/) experimentally supports rootless mode. + +See [Running K3s with Rootless mode](https://rancher.com/docs/k3s/latest/en/advanced/#running-k3s-with-rootless-mode-experimental) for the usage. + +### Usernetes +[Usernetes](https://github.com/rootless-containers/usernetes) is a reference distribution of Kubernetes that can be installed under `$HOME` directory without the root privilege. + +Usernetes supports both containerd and CRI-O as CRI runtimes. +Usernetes supports multi-node clusters using Flannel (VXLAN). + +See [the Usernetes repo](https://github.com/rootless-containers/usernetes) for the usage. + +## Manually deploy a node that runs the kubelet in a user namespace {#userns-the-hard-way} + +This section provides hints for running Kubernetes in a user namespace manually. + +{{< note >}} +This section is intended to be read by developers of Kubernetes distributions, not by end users. +{{< /note >}} + +### Creating a user namespace + +The first step is to create a {{< glossary_tooltip text="user namespace" term_id="userns" >}}. + +If you are trying to run Kubernetes in a user-namespaced container such as +Rootless Docker/Podman or LXC/LXD, you are all set, and you can go to the next subsection. + +Otherwise you have to create a user namespace by yourself, by calling `unshare(2)` with `CLONE_NEWUSER`. + +A user namespace can be also unshared by using command line tools such as: +- [RootlessKit](https://github.com/rootless-containers/rootlesskit) +- [become-root](https://github.com/giuseppe/become-root) +- [`unshare(1)`](https://man7.org/linux/man-pages/man1/unshare.1.html) + +After unsharing the user namespace, you will also have to unshare other namespaces such as mount namespace. + +You do *not* need to call `chroot()` nor `pivot_root()` after unsharing the mount namespace, +however, you have to mount writable filesystems on several directories *in* the namespace. + +At least, the following directories need to be writable *in* the namespace (not *outside* the namespace): + +- `/etc` +- `/run` +- `/var/logs` +- `/var/lib/kubelet` +- `/var/lib/cni` +- `/var/lib/containerd` (for containerd) +- `/var/lib/containers` (for CRI-O) + +### Creating a delegated cgroup tree + +In addition to the user namespace, you also need to have a writable cgroup tree with cgroup v2. + +{{< note >}} +Kubernetes support for running Node components in user namespaces requires cgroup v2. +Cgroup v1 is not supported. +{{< /note >}} + +If you are trying to run Kubernetes in Rootless Docker/Podman or LXC/LXD on a systemd-based host, you are all set. + +Otherwise you have to create a systemd unit with `Delegate=yes` property to delegate a cgroup tree with writable permission. + +On your node, systemd must already be configured to allow delegation; for more details, see +[cgroup v2](https://rootlesscontaine.rs/getting-started/common/cgroup2/) in the Rootless +Containers documentation. + +### Configuring network +{{% thirdparty-content %}} + +The network namespace of the Node components has to have a non-loopback interface, which can be for example configured with +slirp4netns, VPNKit, or lxc-user-nic. + +The network namespaces of the Pods can be configured with regular CNI plugins. +For multi-node networking, Flannel (VXLAN, 8472/UDP) is known to work. + +Ports such as the kubelet port (10250/TCP) and `NodePort` service ports have to be exposed from the Node network namespace to +the host with an external port forwarder, such as RootlessKit, slirp4netns, or socat. + +You can use the port forwarder from K3s; see https://github.com/k3s-io/k3s/blob/v1.21.2+k3s1/pkg/rootlessports/controller.go + +### Configuring CRI + +The kubelet relies on a container runtime. You should deploy a container runtime such as containerd or CRI-O and ensure that it is running within the user namespace before the kubelet starts. + +{{< tabs name="cri" >}} +{{% tab name="containerd" %}} + +Running CRI plugin of containerd in a user namespace is supported since containerd 1.4. + +Running containerd within a user namespace requires the following configuration: + +```toml +version = 2 + +[plugins."io.containerd.grpc.v1.cri"] +# Disable AppArmor + disable_apparmor = true +# Ignore an error during setting oom_score_adj + restrict_oom_score_adj = true +# Disable hugetlb cgroup v2 controller (because systemd does not support delegating hugetlb controller) + disable_hugetlb_controller = true + +[plugins."io.containerd.grpc.v1.cri".containerd] +# Using non-fuse overlayfs is also possible for kernel >= 5.11, but requires SELinux to be disabled + snapshotter = "fuse-overlayfs" + +[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] +# We use cgroupfs that is delegated by systemd, so we do not use SystemdCgroup driver +# (unless you run another systemd in the namespace) + SystemdCgroup = false +``` + +{{% /tab %}} +{{% tab name="CRI-O" %}} + +Running CRI-O in a user namespace is supported since CRI-O 1.22. + +CRI-O requires an environment variable `_CRIO_ROOTLESS=1` to be set. + +The following configuration is also recommended: + +```toml +[crio] + storage_driver = "overlay" +# Using non-fuse overlayfs is also possible for kernel >= 5.11, but requires SELinux to be disabled + storage_option = ["overlay.mount_program=/usr/local/bin/fuse-overlayfs"] + +[crio.runtime] +# We use cgroupfs that is delegated by systemd, so we do not use "systemd" driver +# (unless you run another systemd in the namespace) + cgroup_manager = "cgroupfs" +``` + +{{% /tab %}} +{{< /tabs >}} + +### Configuring kubelet + +Running kubelet in a user namespace requires the following configuration: + +```yaml +kind: KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +featureGates: + KubeletInUserNamespace: true +# We use cgroupfs that is delegated by systemd, so we do not use "systemd" driver +# (unless you run another systemd in the namespace) +cgroupDriver: "cgroupfs" +``` + +When the `KubeletInUserNamespace` feature gate is enabled, kubelet ignores errors that may happen during setting the following sysctl values: +- `vm.overcommit_memory` +- `vm.panic_on_oom` +- `kernel.panic` +- `kernel.panic_on_oops` +- `kernel.keys.root_maxkeys` +- `kernel.keys.root_maxbytes`. + (these are sysctl values for the host, not for the containers). + +Within a user namespace, the kubelet also ignores any error raised from trying to open `/dev/kmsg`. +This feature gate also allows kube-proxy to ignore an error during setting `RLIMIT_NOFILE`. + +The `KubeletInUserNamespace` feature gate was introduced in Kubernetes v1.22 with "alpha" status. + +Running kubelet in a user namespace without using this feature gate is also possible by mounting a specially crafted proc filesystem, +but not officially supported. + +### Configuring kube-proxy + +Running kube-proxy in a user namespace requires the following configuration: + +```yaml +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +mode: "iptables" # or "userspace" +conntrack: +# Skip setting sysctl value "net.netfilter.nf_conntrack_max" + maxPerCore: 0 +# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established" + tcpEstablishedTimeout: 0s +# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close" + tcpCloseWaitTimeout: 0s +``` + +## Caveats + +- Most of "non-local" volume drivers such as `nfs` and `iscsi` do not work. + Local volumes like `local`, `hostPath`, `emptyDir`, `configMap`, `secret`, and `downwardAPI` are known to work. + +- Some CNI plugins may not work. Flannel (VXLAN) is known to work. + +For more on this, see the [Caveats and Future work](https://rootlesscontaine.rs/caveats/) page +on the rootlesscontaine.rs website. + +## {{% heading "seealso" %}} +- [rootlesscontaine.rs](https://rootlesscontaine.rs/) +- [Rootless Containers 2020 (KubeCon NA 2020)](https://www.slideshare.net/AkihiroSuda/kubecon-na-2020-containerd-rootless-containers-2020) +- [Running kind with Rootless Docker](https://kind.sigs.k8s.io/docs/user/rootless/) +- [Usernetes](https://github.com/rootless-containers/usernetes) +- [Running K3s with rootless mode](https://rancher.com/docs/k3s/latest/en/advanced/#running-k3s-with-rootless-mode-experimental) +- [KEP-2033: Kubelet-in-UserNS (aka Rootless mode)](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2033-kubelet-in-userns-aka-rootless) diff --git a/content/en/docs/tasks/administer-cluster/memory-manager.md b/content/en/docs/tasks/administer-cluster/memory-manager.md index 60f2ded206..27a0ed2405 100644 --- a/content/en/docs/tasks/administer-cluster/memory-manager.md +++ b/content/en/docs/tasks/administer-cluster/memory-manager.md @@ -1,5 +1,5 @@ --- -title: Memory Manager +title: Utilizing the NUMA-aware Memory Manager reviewers: - klueska @@ -11,7 +11,7 @@ min-kubernetes-server-version: v1.21 -{{< feature-state state="alpha" for_k8s_version="v1.21" >}} +{{< feature-state state="beta" for_k8s_version="v1.22" >}} The Kubernetes *Memory Manager* enables the feature of guaranteed memory (and hugepages) allocation for pods in the `Guaranteed` {{< glossary_tooltip text="QoS class" term_id="qos-class" >}}. @@ -29,12 +29,14 @@ To align memory resources with other requested resources in a Pod Spec: - the CPU Manager should be enabled and proper CPU Manager policy should be configured on a Node. See [control CPU Management Policies](/docs/tasks/administer-cluster/cpu-management-policies/); - the Topology Manager should be enabled and proper Topology Manager policy should be configured on a Node. See [control Topology Management Policies](/docs/tasks/administer-cluster/topology-manager/). -Support for the Memory Manager requires `MemoryManager` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) to be enabled. +Starting from v1.22, the Memory Manager is enabled by default through `MemoryManager` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/). -That is, the `kubelet` must be started with the following flag: +Preceding v1.22, the `kubelet` must be started with the following flag: `--feature-gates=MemoryManager=true` +in order to enable the Memory Manager feature. + ## How Memory Manager Operates? The Memory Manager currently offers the guaranteed memory (and hugepages) allocation for Pods in Guaranteed QoS class. To immediately put the Memory Manager into operation follow the guidelines in the section [Memory Manager configuration](#memory-manager-configuration), and subsequently, prepare and deploy a `Guaranteed` pod as illustrated in the section [Placing a Pod in the Guaranteed QoS class](#placing-a-pod-in-the-guaranteed-qos-class). @@ -51,7 +53,7 @@ The Memory Manager updates the Node Map during the startup and runtime as follow This occurs once a node administrator employs `--reserved-memory` (section [Reserved memory flag](#reserved-memory-flag)). In this case, the Node Map becomes updated to reflect this reservation as illustrated in [Memory Manager KEP: Memory Maps at start-up (with examples)][5]. -The administrator must provide `--reserved-memory` flag when `static` policy is configured. +The administrator must provide `--reserved-memory` flag when `Static` policy is configured. ### Runtime @@ -61,7 +63,7 @@ Important topic in the context of Memory Manager operation is the management of ## Memory Manager configuration -Other Managers should be first pre-configured (section [Pre-configuration](#pre-configuration)). Next, the Memory Manger feature should be enabled (section [Enable the Memory Manager feature](#enable-the-memory-manager-feature)) and be run with `static` policy (section [static policy](#static-policy)). Optionally, some amount of memory can be reserved for system or kubelet processes to increase node stability (section [Reserved memory flag](#reserved-memory-flag)). +Other Managers should be first pre-configured (section [Pre-configuration](#pre-configuration)). Next, the Memory Manger feature should be enabled (section [Enable the Memory Manager feature](#enable-the-memory-manager-feature)) and be run with `Static` policy (section [Static policy](#static-policy)). Optionally, some amount of memory can be reserved for system or kubelet processes to increase node stability (section [Reserved memory flag](#reserved-memory-flag)). ### Policies @@ -69,21 +71,21 @@ Memory Manager supports two policies. You can select a policy via a `kubelet` fl Two policies can be selected: -* `none` (default) -* `static` +* `None` (default) +* `Static` -#### none policy {#policy-none} +#### None policy {#policy-none} This is the default policy and does not affect the memory allocation in any way. It acts the same as if the Memory Manager is not present at all. -The `none` policy returns default topology hint. This special hint denotes that Hint Provider (Memory Manger in this case) has no preference for NUMA affinity with any resource. +The `None` policy returns default topology hint. This special hint denotes that Hint Provider (Memory Manger in this case) has no preference for NUMA affinity with any resource. -#### static policy {#policy-static} +#### Static policy {#policy-static} -In the case of the `Guaranteed` pod, the `static` Memory Manger policy returns topology hints relating to the set of NUMA nodes where the memory can be guaranteed, and reserves the memory through updating the internal [NodeMap][2] object. +In the case of the `Guaranteed` pod, the `Static` Memory Manger policy returns topology hints relating to the set of NUMA nodes where the memory can be guaranteed, and reserves the memory through updating the internal [NodeMap][2] object. -In the case of the `BestEffort` or `Burstable` pod, the `static` Memory Manager policy sends back the default topology hint as there is no request for the guaranteed memory, and does not reserve the memory in the internal [NodeMap][2] object. +In the case of the `BestEffort` or `Burstable` pod, the `Static` Memory Manager policy sends back the default topology hint as there is no request for the guaranteed memory, and does not reserve the memory in the internal [NodeMap][2] object. ### Reserved memory flag @@ -100,7 +102,7 @@ The Memory Manager will not use this reserved memory for the allocation of conta For example, if you have a NUMA node "NUMA0" with `10Gi` of memory available, and the `--reserved-memory` was specified to reserve `1Gi` of memory at "NUMA0", the Memory Manager assumes that only `9Gi` is available for containers. -You can omit this parameter, however, you should be aware that the quantity of reserved memory from all NUMA nodes should be equal to the quantity of memory specified by the [Node Allocatable feature](/docs/tasks/administer-cluster/reserve-compute-resources/). If at least one node allocatable parameter is non-zero, you will need to specify `--reserved-memory` for at least one NUMA node. In fact, `eviction-hard` threshold value is equal to `100Mi` by default, so if `static` policy is used, `--reserved-memory` is obligatory. +You can omit this parameter, however, you should be aware that the quantity of reserved memory from all NUMA nodes should be equal to the quantity of memory specified by the [Node Allocatable feature](/docs/tasks/administer-cluster/reserve-compute-resources/). If at least one node allocatable parameter is non-zero, you will need to specify `--reserved-memory` for at least one NUMA node. In fact, `eviction-hard` threshold value is equal to `100Mi` by default, so if `Static` policy is used, `--reserved-memory` is obligatory. Also, avoid the following configurations: 1. duplicates, i.e. the same NUMA node or memory type, but with a different value; @@ -152,7 +154,7 @@ Here is an example of a correct configuration: --feature-gates=MemoryManager=true --kube-reserved=cpu=4,memory=4Gi --system-reserved=cpu=1,memory=1Gi ---memory-manager-policy=static +--memory-manager-policy=Static --reserved-memory 0:memory=3Gi --reserved-memory 1:memory=2148Mi ``` Let us validate the configuration above: @@ -163,7 +165,7 @@ Let us validate the configuration above: ## Placing a Pod in the Guaranteed QoS class -If the selected policy is anything other than `none`, the Memory Manager identifies pods that are in the `Guaranteed` QoS class. The Memory Manager provides specific topology hints to the Topology Manager for each `Guaranteed` pod. For pods in a QoS class other than `Guaranteed`, the Memory Manager provides default topology hints to the Topology Manager. +If the selected policy is anything other than `None`, the Memory Manager identifies pods that are in the `Guaranteed` QoS class. The Memory Manager provides specific topology hints to the Topology Manager for each `Guaranteed` pod. For pods in a QoS class other than `Guaranteed`, the Memory Manager provides default topology hints to the Topology Manager. The following excerpts from pod manifests assign a pod to the `Guaranteed` QoS class. @@ -211,6 +213,10 @@ The following means can be used to troubleshoot the reason why a pod could not b - pod status - indicates topology affinity errors - system logs - include valuable information for debugging, e.g., about generated hints - state file - the dump of internal state of the Memory Manager (includes [Node Map and Memory Maps][2]) +- starting from v1.22, the [device plugin resource API](#device-plugin-resource-api) can be used + to retrieve information about the memory reserved for containers + + ### Pod status (TopologyAffinityError) {#TopologyAffinityError} @@ -270,7 +276,7 @@ spec: Next, let us log into the node where it was deployed and examine the state file in `/var/lib/kubelet/memory_manager_state`: ```json { - "policyName":"static", + "policyName":"Static", "machineState":{ "0":{ "numberOfAssignments":1, @@ -358,6 +364,10 @@ For example, the total amount of free "conventional" memory in the group can be The line `"systemReserved":3221225472` indicates that the administrator of this node reserved `3221225472` bytes (i.e. `3Gi`) to serve kubelet and system processes at NUMA node `0`, by using `--reserved-memory` flag. +### Device plugin resource API + +By employing the [API](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/), the information about reserved memory for each container can be retrieved, which is contained in protobuf `ContainerMemory` message. This information can be retrieved solely for pods in Guaranteed QoS class. + ## {{% heading "whatsnext" %}} - [Memory Manager KEP: Design Overview][4] diff --git a/content/en/docs/tasks/administer-cluster/migrating-from-dockershim/_index.md b/content/en/docs/tasks/administer-cluster/migrating-from-dockershim/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/tasks/administer-cluster/migrating-from-dockershim/migrating-telemetry-and-security-agents.md b/content/en/docs/tasks/administer-cluster/migrating-from-dockershim/migrating-telemetry-and-security-agents.md index dd813754e4..ddfefb6f49 100644 --- a/content/en/docs/tasks/administer-cluster/migrating-from-dockershim/migrating-telemetry-and-security-agents.md +++ b/content/en/docs/tasks/administer-cluster/migrating-from-dockershim/migrating-telemetry-and-security-agents.md @@ -78,3 +78,4 @@ telemetry agents on the node, make sure to check with the vendor of the agent wh We keep the work in progress version of migration instructions for various telemetry and security agent vendors in [Google doc](https://docs.google.com/document/d/1ZFi4uKit63ga5sxEiZblfb-c23lFhvy6RXVPikS8wf0/edit#). Please contact the vendor to get up to date instructions for migrating from dockershim. + diff --git a/content/en/docs/tasks/administer-cluster/namespaces.md b/content/en/docs/tasks/administer-cluster/namespaces.md index 2934e1c0f7..231de37e26 100644 --- a/content/en/docs/tasks/administer-cluster/namespaces.md +++ b/content/en/docs/tasks/administer-cluster/namespaces.md @@ -78,7 +78,8 @@ A namespace can be in one of two phases: * `Active` the namespace is in use * `Terminating` the namespace is being deleted, and can not be used for new objects -See the [design doc](https://git.k8s.io/community/contributors/design-proposals/architecture/namespaces.md#phases) for more details. +For more details, see [Namespace](/docs/reference/kubernetes-api/cluster-resources/namespace-v1/) +in the API reference. ## Creating a new namespace @@ -313,7 +314,7 @@ across namespaces, you need to use the fully qualified domain name (FQDN). * Learn more about [setting the namespace preference](/docs/concepts/overview/working-with-objects/namespaces/#setting-the-namespace-preference). * Learn more about [setting the namespace for a request](/docs/concepts/overview/working-with-objects/namespaces/#setting-the-namespace-for-a-request) -* See [namespaces design](https://github.com/kubernetes/community/blob/{{< param "githubbranch" >}}/contributors/design-proposals/architecture/namespaces.md). +* See [namespaces design](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/namespaces.md). diff --git a/content/en/docs/tasks/administer-cluster/network-policy-provider/cilium-network-policy.md b/content/en/docs/tasks/administer-cluster/network-policy-provider/cilium-network-policy.md index 948893d3ea..74b8d2182e 100644 --- a/content/en/docs/tasks/administer-cluster/network-policy-provider/cilium-network-policy.md +++ b/content/en/docs/tasks/administer-cluster/network-policy-provider/cilium-network-policy.md @@ -24,45 +24,59 @@ For background on Cilium, read the [Introduction to Cilium](https://docs.cilium. ## Deploying Cilium on Minikube for Basic Testing To get familiar with Cilium easily you can follow the -[Cilium Kubernetes Getting Started Guide](https://docs.cilium.io/en/stable/gettingstarted/minikube/) +[Cilium Kubernetes Getting Started Guide](https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/) to perform a basic DaemonSet installation of Cilium in minikube. -To start minikube, minimal version required is >= v1.3.1, run the with the +To start minikube, minimal version required is >= v1.5.2, run the with the following arguments: ```shell minikube version ``` ``` -minikube version: v1.3.1 +minikube version: v1.5.2 ``` ```shell -minikube start --network-plugin=cni --memory=4096 +minikube start --network-plugin=cni ``` -Mount the BPF filesystem: +For minikube you can install Cilium using its CLI tool. Cilium will +automatically detect the cluster configuration and will install the appropriate +components for a successful installation: ```shell -minikube ssh -- sudo mount bpffs -t bpf /sys/fs/bpf -``` - -For minikube you can deploy this simple ''all-in-one'' YAML file that includes -DaemonSet configurations for Cilium as well as appropriate RBAC settings: - -```shell -kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.8/install/kubernetes/quick-install.yaml +curl -LO https://github.com/cilium/cilium-cli/releases/latest/download/cilium-linux-amd64.tar.gz +sudo tar xzvfC cilium-linux-amd64.tar.gz /usr/local/bin +rm cilium-linux-amd64.tar.gz +cilium install ``` ``` -configmap/cilium-config created -serviceaccount/cilium created -serviceaccount/cilium-operator created -clusterrole.rbac.authorization.k8s.io/cilium created -clusterrole.rbac.authorization.k8s.io/cilium-operator created -clusterrolebinding.rbac.authorization.k8s.io/cilium created -clusterrolebinding.rbac.authorization.k8s.io/cilium-operator created -daemonset.apps/cilium create -deployment.apps/cilium-operator created +🔮 Auto-detected Kubernetes kind: minikube +✨ Running "minikube" validation checks +✅ Detected minikube version "1.20.0" +ℹ️ Cilium version not set, using default version "v1.10.0" +🔮 Auto-detected cluster name: minikube +🔮 Auto-detected IPAM mode: cluster-pool +🔮 Auto-detected datapath mode: tunnel +🔑 Generating CA... +2021/05/27 02:54:44 [INFO] generate received request +2021/05/27 02:54:44 [INFO] received CSR +2021/05/27 02:54:44 [INFO] generating key: ecdsa-256 +2021/05/27 02:54:44 [INFO] encoded CSR +2021/05/27 02:54:44 [INFO] signed certificate with serial number 48713764918856674401136471229482703021230538642 +🔑 Generating certificates for Hubble... +2021/05/27 02:54:44 [INFO] generate received request +2021/05/27 02:54:44 [INFO] received CSR +2021/05/27 02:54:44 [INFO] generating key: ecdsa-256 +2021/05/27 02:54:44 [INFO] encoded CSR +2021/05/27 02:54:44 [INFO] signed certificate with serial number 3514109734025784310086389188421560613333279574 +🚀 Creating Service accounts... +🚀 Creating Cluster roles... +🚀 Creating ConfigMap... +🚀 Creating Agent DaemonSet... +🚀 Creating Operator Deployment... +⌛ Waiting for Cilium to be installed... ``` The remainder of the Getting Started Guide explains how to enforce both L3/L4 @@ -85,14 +99,14 @@ Deploying a cluster with Cilium adds Pods to the `kube-system` namespace. To see this list of Pods run: ```shell -kubectl get pods --namespace=kube-system +kubectl get pods --namespace=kube-system -l k8s-app=cilium ``` You'll see a list of Pods similar to this: ```console -NAME READY STATUS RESTARTS AGE -cilium-6rxbd 1/1 Running 0 1m +NAME READY STATUS RESTARTS AGE +cilium-kkdhz 1/1 Running 0 3m23s ... ``` diff --git a/content/en/docs/tasks/administer-cluster/reconfigure-kubelet.md b/content/en/docs/tasks/administer-cluster/reconfigure-kubelet.md index 4ec3b428d0..466910f30f 100644 --- a/content/en/docs/tasks/administer-cluster/reconfigure-kubelet.md +++ b/content/en/docs/tasks/administer-cluster/reconfigure-kubelet.md @@ -8,7 +8,14 @@ min-kubernetes-server-version: v1.11 --- -{{< feature-state for_k8s_version="v1.11" state="beta" >}} +{{< feature-state for_k8s_version="v1.22" state="deprecated" >}} + +{{< caution >}} +[Dynamic Kubelet Configuration](https://github.com/kubernetes/enhancements/issues/281) +feature is deprecated and should not be used. +Please switch to alternative means distributing configuration to the Nodes of your cluster. +{{< /caution >}} + [Dynamic Kubelet Configuration](https://github.com/kubernetes/enhancements/issues/281) allows you to change the configuration of each diff --git a/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md b/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md index a5661263f2..d4cce14870 100644 --- a/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md +++ b/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md @@ -17,33 +17,27 @@ itself. Unless resources are set aside for these system daemons, pods and system daemons compete for resources and lead to resource starvation issues on the node. -The `kubelet` exposes a feature named `Node Allocatable` that helps to reserve +The `kubelet` exposes a feature named 'Node Allocatable' that helps to reserve compute resources for system daemons. Kubernetes recommends cluster -administrators to configure `Node Allocatable` based on their workload density +administrators to configure 'Node Allocatable' based on their workload density on each node. - - - ## {{% heading "prerequisites" %}} - {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} Your Kubernetes server must be at or later than version 1.17 to use the kubelet command line option `--reserved-cpus` to set an [explicitly reserved CPU list](#explicitly-reserved-cpu-list). - - ## Node Allocatable ![node capacity](/images/docs/node-capacity.svg) -`Allocatable` on a Kubernetes node is defined as the amount of compute resources +'Allocatable' on a Kubernetes node is defined as the amount of compute resources that are available for pods. The scheduler does not over-subscribe -`Allocatable`. `CPU`, `memory` and `ephemeral-storage` are supported as of now. +'Allocatable'. 'CPU', 'memory' and 'ephemeral-storage' are supported as of now. Node Allocatable is exposed as part of `v1.Node` object in the API and as part of `kubectl describe node` in the CLI. @@ -97,8 +91,7 @@ flag. It is recommended that the kubernetes system daemons are placed under a top level control group (`runtime.slice` on systemd machines for example). Each system daemon should ideally run within its own child control group. Refer to -[this -doc](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md#recommended-cgroups-setup) +[the design proposal](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md#recommended-cgroups-setup) for more details on recommended control group hierarchy. Note that Kubelet **does not** create `--kube-reserved-cgroup` if it doesn't @@ -109,7 +102,6 @@ exist. Kubelet will fail if an invalid cgroup is specified. - **Kubelet Flag**: `--system-reserved=[cpu=100m][,][memory=100Mi][,][ephemeral-storage=1Gi][,][pid=1000]` - **Kubelet Flag**: `--system-reserved-cgroup=` - `system-reserved` is meant to capture resource reservation for OS system daemons like `sshd`, `udev`, etc. `system-reserved` should reserve `memory` for the `kernel` too since `kernel` memory is not accounted to pods in Kubernetes at this time. @@ -127,13 +119,14 @@ kubelet flag. It is recommended that the OS system daemons are placed under a top level control group (`system.slice` on systemd machines for example). -Note that Kubelet **does not** create `--system-reserved-cgroup` if it doesn't -exist. Kubelet will fail if an invalid cgroup is specified. +Note that `kubelet` **does not** create `--system-reserved-cgroup` if it doesn't +exist. `kubelet` will fail if an invalid cgroup is specified. ### Explicitly Reserved CPU List + {{< feature-state for_k8s_version="v1.17" state="stable" >}} -- **Kubelet Flag**: `--reserved-cpus=0-3` +**Kubelet Flag**: `--reserved-cpus=0-3` `reserved-cpus` is meant to define an explicit CPU set for OS system daemons and kubernetes system daemons. `reserved-cpus` is for systems that do not intend to @@ -154,14 +147,15 @@ For example: in Centos, you can do this using the tuned toolset. ### Eviction Thresholds -- **Kubelet Flag**: `--eviction-hard=[memory.available<500Mi]` +**Kubelet Flag**: `--eviction-hard=[memory.available<500Mi]` Memory pressure at the node level leads to System OOMs which affects the entire node and all pods running on it. Nodes can go offline temporarily until memory has been reclaimed. To avoid (or reduce the probability of) system OOMs kubelet -provides [`Out of Resource`](/docs/tasks/administer-cluster/out-of-resource/) management. Evictions are +provides [out of resource](/docs/concepts/scheduling-eviction/node-pressure-eviction/) +management. Evictions are supported for `memory` and `ephemeral-storage` only. By reserving some memory via -`--eviction-hard` flag, the `kubelet` attempts to `evict` pods whenever memory +`--eviction-hard` flag, the `kubelet` attempts to evict pods whenever memory availability on the node drops below the reserved value. Hypothetically, if system daemons did not exist on a node, pods cannot use more than `capacity - eviction-hard`. For this reason, resources reserved for evictions are not @@ -169,17 +163,17 @@ available for pods. ### Enforcing Node Allocatable -- **Kubelet Flag**: `--enforce-node-allocatable=pods[,][system-reserved][,][kube-reserved]` +**Kubelet Flag**: `--enforce-node-allocatable=pods[,][system-reserved][,][kube-reserved]` -The scheduler treats `Allocatable` as the available `capacity` for pods. +The scheduler treats 'Allocatable' as the available `capacity` for pods. -`kubelet` enforce `Allocatable` across pods by default. Enforcement is performed +`kubelet` enforce 'Allocatable' across pods by default. Enforcement is performed by evicting pods whenever the overall usage across all pods exceeds -`Allocatable`. More details on eviction policy can be found -[here](/docs/tasks/administer-cluster/out-of-resource/#eviction-policy). This enforcement is controlled by +'Allocatable'. More details on eviction policy can be found +on the [node pressure eviction](/docs/concepts/scheduling-eviction/node-pressure-eviction/) +page. This enforcement is controlled by specifying `pods` value to the kubelet flag `--enforce-node-allocatable`. - Optionally, `kubelet` can be made to enforce `kube-reserved` and `system-reserved` by specifying `kube-reserved` & `system-reserved` values in the same flag. Note that to enforce `kube-reserved` or `system-reserved`, @@ -188,10 +182,10 @@ respectively. ## General Guidelines -System daemons are expected to be treated similar to `Guaranteed` pods. System +System daemons are expected to be treated similar to 'Guaranteed' pods. System daemons can burst within their bounding control groups and this behavior needs to be managed as part of kubernetes deployments. For example, `kubelet` should -have its own control group and share `Kube-reserved` resources with the +have its own control group and share `kube-reserved` resources with the container runtime. However, Kubelet cannot burst and use up all available Node resources if `kube-reserved` is enforced. @@ -200,9 +194,9 @@ to critical system services being CPU starved, OOM killed, or unable to fork on the node. The recommendation is to enforce `system-reserved` only if a user has profiled their nodes exhaustively to come up with precise estimates and is confident in their -ability to recover if any process in that group is oom_killed. +ability to recover if any process in that group is oom-killed. -* To begin with enforce `Allocatable` on `pods`. +* To begin with enforce 'Allocatable' on `pods`. * Once adequate monitoring and alerting is in place to track kube system daemons, attempt to enforce `kube-reserved` based on usage heuristics. * If absolutely necessary, enforce `system-reserved` over time. @@ -212,8 +206,6 @@ more features are added. Over time, kubernetes project will attempt to bring down utilization of node system daemons, but that is not a priority as of now. So expect a drop in `Allocatable` capacity in future releases. - - ## Example Scenario @@ -225,15 +217,15 @@ Here is an example to illustrate Node Allocatable computation: * `--system-reserved` is set to `cpu=500m,memory=1Gi,ephemeral-storage=1Gi` * `--eviction-hard` is set to `memory.available<500Mi,nodefs.available<10%` -Under this scenario, `Allocatable` will be `14.5 CPUs`, `28.5Gi` of memory and +Under this scenario, 'Allocatable' will be 14.5 CPUs, 28.5Gi of memory and `88Gi` of local storage. Scheduler ensures that the total memory `requests` across all pods on this node does -not exceed `28.5Gi` and storage doesn't exceed `88Gi`. -Kubelet evicts pods whenever the overall memory usage across pods exceeds `28.5Gi`, -or if overall disk usage exceeds `88Gi` If all processes on the node consume as -much CPU as they can, pods together cannot consume more than `14.5 CPUs`. +not exceed 28.5Gi and storage doesn't exceed 88Gi. +Kubelet evicts pods whenever the overall memory usage across pods exceeds 28.5Gi, +or if overall disk usage exceeds 88Gi If all processes on the node consume as +much CPU as they can, pods together cannot consume more than 14.5 CPUs. If `kube-reserved` and/or `system-reserved` is not enforced and system daemons exceed their reservation, `kubelet` evicts pods whenever the overall node memory -usage is higher than `31.5Gi` or `storage` is greater than `90Gi` +usage is higher than 31.5Gi or `storage` is greater than 90Gi. diff --git a/content/en/docs/tasks/administer-cluster/safely-drain-node.md b/content/en/docs/tasks/administer-cluster/safely-drain-node.md index 0fc0a97ffc..04c908c592 100644 --- a/content/en/docs/tasks/administer-cluster/safely-drain-node.md +++ b/content/en/docs/tasks/administer-cluster/safely-drain-node.md @@ -29,7 +29,7 @@ This task also assumes that you have met the following prerequisites: ## (Optional) Configure a disruption budget {#configure-poddisruptionbudget} -To endure that your workloads remain available during maintenance, you can +To ensure that your workloads remain available during maintenance, you can configure a [PodDisruptionBudget](/docs/concepts/workloads/pods/disruptions/). If availability is important for any applications that run or could run on the node(s) @@ -109,6 +109,28 @@ Pod can be thought of as a kind of policy-controlled DELETE operation on the Pod itself. To attempt an eviction (more precisely: to attempt to *create* an Eviction), you POST an attempted operation. Here's an example: +{{< tabs name="Eviction_example" >}} +{{% tab name="policy/v1" %}} +{{< note >}} +`policy/v1` Eviction is available in v1.22+. Use `policy/v1beta1` with prior releases. +{{< /note >}} + +```json +{ + "apiVersion": "policy/v1", + "kind": "Eviction", + "metadata": { + "name": "quux", + "namespace": "default" + } +} +``` +{{% /tab %}} +{{% tab name="policy/v1beta1" %}} +{{< note >}} +Deprecated in v1.22 in favor of `policy/v1` +{{< /note >}} + ```json { "apiVersion": "policy/v1beta1", @@ -119,6 +141,8 @@ itself. To attempt an eviction (more precisely: to attempt to } } ``` +{{% /tab %}} +{{< /tabs >}} You can attempt an eviction using `curl`: diff --git a/content/en/docs/tasks/administer-cluster/securing-a-cluster.md b/content/en/docs/tasks/administer-cluster/securing-a-cluster.md index 090e292966..ba820cbe0a 100644 --- a/content/en/docs/tasks/administer-cluster/securing-a-cluster.md +++ b/content/en/docs/tasks/administer-cluster/securing-a-cluster.md @@ -26,7 +26,7 @@ and provides recommendations on overall security. ## Controlling access to the Kubernetes API -As Kubernetes is entirely API driven, controlling and limiting who can access the cluster and what actions +As Kubernetes is entirely API-driven, controlling and limiting who can access the cluster and what actions they are allowed to perform is the first line of defense. ### Use Transport Layer Security (TLS) for all API traffic @@ -40,7 +40,7 @@ potentially unsecured traffic. ### API Authentication Choose an authentication mechanism for the API servers to use that matches the common access patterns -when you install a cluster. For instance, small single user clusters may wish to use a simple certificate +when you install a cluster. For instance, small, single-user clusters may wish to use a simple certificate or static Bearer token approach. Larger clusters may wish to integrate an existing OIDC or LDAP server that allow users to be subdivided into groups. @@ -54,7 +54,7 @@ Consult the [authentication reference document](/docs/reference/access-authn-aut Once authenticated, every API call is also expected to pass an authorization check. Kubernetes ships an integrated [Role-Based Access Control (RBAC)](/docs/reference/access-authn-authz/rbac/) component that matches an incoming user or group to a set of permissions bundled into roles. These permissions combine verbs (get, create, delete) with -resources (pods, services, nodes) and can be namespace or cluster scoped. A set of out of the box +resources (pods, services, nodes) and can be namespace-scoped or cluster-scoped. A set of out-of-the-box roles are provided that offer reasonable default separation of responsibility depending on what actions a client might want to perform. It is recommended that you use the [Node](/docs/reference/access-authn-authz/node/) and @@ -69,8 +69,8 @@ With authorization, it is important to understand how updates on one object may other places. For instance, a user may not be able to create pods directly, but allowing them to create a deployment, which creates pods on their behalf, will let them create those pods indirectly. Likewise, deleting a node from the API will result in the pods scheduled to that node -being terminated and recreated on other nodes. The out of the box roles represent a balance -between flexibility and the common use cases, but more limited roles should be carefully reviewed +being terminated and recreated on other nodes. The out-of-the box roles represent a balance +between flexibility and common use cases, but more limited roles should be carefully reviewed to prevent accidental escalation. You can make roles specific to your use case if the out-of-box ones don't meet your needs. Consult the [authorization reference section](/docs/reference/access-authn-authz/authorization/) for more information. @@ -104,7 +104,7 @@ reserved resources like memory, or to provide default limits when none are speci ### Controlling what privileges containers run with A pod definition contains a [security context](/docs/tasks/configure-pod-container/security-context/) -that allows it to request access to running as a specific Linux user on a node (like root), +that allows it to request access to run as a specific Linux user on a node (like root), access to run privileged or access the host network, and other controls that would otherwise allow it to run unfettered on a hosting node. [Pod security policies](/docs/concepts/policy/pod-security-policy/) can limit which users or service accounts can provide dangerous security context settings. For example, pod security policies can limit volume mounts, especially `hostPath`, which are aspects of a pod that should be controlled. @@ -155,10 +155,10 @@ within their namespaces. Many of the supported [Kubernetes networking providers] now respect network policy. Quota and limit ranges can also be used to control whether users may request node ports or -load balanced services, which on many clusters can control whether those users applications +load-balanced services, which on many clusters can control whether those users applications are visible outside of the cluster. -Additional protections may be available that control network rules on a per plugin or per +Additional protections may be available that control network rules on a per-plugin or per- environment basis, such as per-node firewalls, physically separating cluster nodes to prevent cross talk, or advanced networking policy. @@ -169,7 +169,7 @@ By default these APIs are accessible by pods running on an instance and can cont credentials for that node, or provisioning data such as kubelet credentials. These credentials can be used to escalate within the cluster or to other cloud services under the same account. -When running Kubernetes on a cloud platform limit permissions given to instance credentials, use +When running Kubernetes on a cloud platform, limit permissions given to instance credentials, use [network policies](/docs/tasks/administer-cluster/declare-network-policy/) to restrict pod access to the metadata API, and avoid using provisioning data to deliver secrets. @@ -177,7 +177,7 @@ to the metadata API, and avoid using provisioning data to deliver secrets. By default, there are no restrictions on which nodes may run a pod. Kubernetes offers a [rich set of policies for controlling placement of pods onto nodes](/docs/concepts/scheduling-eviction/assign-pod-node/) -and the [taint based pod placement and eviction](/docs/concepts/scheduling-eviction/taint-and-toleration/) +and the [taint-based pod placement and eviction](/docs/concepts/scheduling-eviction/taint-and-toleration/) that are available to end users. For many clusters use of these policies to separate workloads can be a convention that authors adopt or enforce via tooling. @@ -223,8 +223,9 @@ do not use. The shorter the lifetime of a secret or credential the harder it is for an attacker to make use of that credential. Set short lifetimes on certificates and automate their rotation. Use an authentication provider that can control how long issued tokens are available and use short -lifetimes where possible. If you use service account tokens in external integrations, plan to -rotate those tokens frequently. For example, once the bootstrap phase is complete, a bootstrap token used for setting up nodes should be revoked or its authorization removed. +lifetimes where possible. If you use service-account tokens in external integrations, plan to +rotate those tokens frequently. For example, once the bootstrap phase is complete, a bootstrap +token used for setting up nodes should be revoked or its authorization removed. ### Review third party integrations before enabling them @@ -246,7 +247,8 @@ and may grant an attacker significant visibility into the state of your cluster. your backups using a well reviewed backup and encryption solution, and consider using full disk encryption where possible. -Kubernetes supports [encryption at rest](/docs/tasks/administer-cluster/encrypt-data/), a feature introduced in 1.7, and beta since 1.13. This will encrypt `Secret` resources in etcd, preventing +Kubernetes supports [encryption at rest](/docs/tasks/administer-cluster/encrypt-data/), a feature +introduced in 1.7, and beta since 1.13. This will encrypt `Secret` resources in etcd, preventing parties that gain access to your etcd backups from viewing the content of those secrets. While this feature is currently beta, it offers an additional level of defense when backups are not encrypted or an attacker gains read access to etcd. diff --git a/content/en/docs/tasks/administer-cluster/use-cascading-deletion.md b/content/en/docs/tasks/administer-cluster/use-cascading-deletion.md new file mode 100644 index 0000000000..eb72d68de0 --- /dev/null +++ b/content/en/docs/tasks/administer-cluster/use-cascading-deletion.md @@ -0,0 +1,352 @@ +--- +title: Use Cascading Deletion in a Cluster +content_type: task +--- + + + +This page shows you how to specify the type of [cascading deletion](/docs/concepts/workloads/controllers/garbage-collection/#cascading-deletion) +to use in your cluster during {{}}. + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} + +You also need to [create a sample Deployment](/docs/tasks/run-application/run-stateless-application-deployment/#creating-and-exploring-an-nginx-deployment) +to experiment with the different types of cascading deletion. You will need to +recreate the Deployment for each type. + +## Check owner references on your pods + +Check that the `ownerReferences` field is present on your pods: + +```shell +kubectl get pods -l app=nginx --output=yaml +``` + +The output has an `ownerReferences` field similar to this: + +``` +apiVersion: v1 + ... + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: ReplicaSet + name: nginx-deployment-6b474476c4 + uid: 4fdcd81c-bd5d-41f7-97af-3a3b759af9a7 + ... +``` + +## Use foreground cascading deletion {#use-foreground-cascading-deletion} + +By default, Kubernetes uses [background cascading deletion](/docs/concepts/workloads/controllers/garbage-collection/#background-deletion) +to delete dependents of an object. You can switch to foreground cascading deletion +using either `kubectl` or the Kubernetes API, depending on the Kubernetes +version your cluster runs. {{}} + +{{}} +{{% tab name="Kubernetes 1.20.x and later" %}} +You can delete objects using foreground cascading deletion using `kubectl` or the +Kubernetes API. + +**Using kubectl** + +Run the following command: + + +```shell +kubectl delete deployment nginx-deployment --cascade=foreground +``` + +**Using the Kubernetes API** + +1. Start a local proxy session: + + ```shell + kubectl proxy --port=8080 + ``` + +1. Use `curl` to trigger deletion: + + ```shell + curl -X DELETE localhost:8080/apis/apps/v1/namespaces/default/deployments/nginx-deployment \ + -d '{"kind":"DeleteOptions","apiVersion":"v1","propagationPolicy":"Foreground"}' \ + -H "Content-Type: application/json" + ``` + + The output contains a `foregroundDeletion` {{}} + like this: + + ``` + "kind": "Deployment", + "apiVersion": "apps/v1", + "metadata": { + "name": "nginx-deployment", + "namespace": "default", + "uid": "d1ce1b02-cae8-4288-8a53-30e84d8fa505", + "resourceVersion": "1363097", + "creationTimestamp": "2021-07-08T20:24:37Z", + "deletionTimestamp": "2021-07-08T20:27:39Z", + "finalizers": [ + "foregroundDeletion" + ] + ... + ``` + +{{% /tab %}} +{{% tab name="Versions prior to Kubernetes 1.20.x" %}} +You can delete objects using foreground cascading deletion by calling the +Kubernetes API. + +For details, read the [documentation for your Kubernetes version](/docs/home/supported-doc-versions/). + +1. Start a local proxy session: + + ```shell + kubectl proxy --port=8080 + ``` + +1. Use `curl` to trigger deletion: + + ```shell + curl -X DELETE localhost:8080/apis/apps/v1/namespaces/default/deployments/nginx-deployment \ + -d '{"kind":"DeleteOptions","apiVersion":"v1","propagationPolicy":"Foreground"}' \ + -H "Content-Type: application/json" + ``` + + The output contains a `foregroundDeletion` {{}} + like this: + + ``` + "kind": "Deployment", + "apiVersion": "apps/v1", + "metadata": { + "name": "nginx-deployment", + "namespace": "default", + "uid": "d1ce1b02-cae8-4288-8a53-30e84d8fa505", + "resourceVersion": "1363097", + "creationTimestamp": "2021-07-08T20:24:37Z", + "deletionTimestamp": "2021-07-08T20:27:39Z", + "finalizers": [ + "foregroundDeletion" + ] + ... + ``` +{{% /tab %}} +{{}} + +## Use background cascading deletion {#use-background-cascading-deletion} + +1. [Create a sample Deployment](/docs/tasks/run-application/run-stateless-application-deployment/#creating-and-exploring-an-nginx-deployment). +1. Use either `kubectl` or the Kubernetes API to delete the Deployment, + depending on the Kubernetes version your cluster runs. {{}} + +{{}} +{{% tab name="Kubernetes version 1.20.x and later" %}} + +You can delete objects using background cascading deletion using `kubectl` +or the Kubernetes API. + +Kubernetes uses background cascading deletion by default, and does so +even if you run the following commands without the `--cascade` flag or the +`propagationPolicy` argument. + +**Using kubectl** + +Run the following command: + +```shell +kubectl delete deployment nginx-deployment --cascade=background +``` + +**Using the Kubernetes API** + +1. Start a local proxy session: + + ```shell + kubectl proxy --port=8080 + ``` + +1. Use `curl` to trigger deletion: + + ```shell + curl -X DELETE localhost:8080/apis/apps/v1/namespaces/default/deployments/nginx-deployment \ + -d '{"kind":"DeleteOptions","apiVersion":"v1","propagationPolicy":"Background"}' \ + -H "Content-Type: application/json" + ``` + + The output is similar to this: + + ``` + "kind": "Status", + "apiVersion": "v1", + ... + "status": "Success", + "details": { + "name": "nginx-deployment", + "group": "apps", + "kind": "deployments", + "uid": "cc9eefb9-2d49-4445-b1c1-d261c9396456" + } + ``` +{{% /tab %}} +{{% tab name="Versions prior to Kubernetes 1.20.x" %}} +Kubernetes uses background cascading deletion by default, and does so +even if you run the following commands without the `--cascade` flag or the +`propagationPolicy: Background` argument. + +For details, read the [documentation for your Kubernetes version](/docs/home/supported-doc-versions/). + +**Using kubectl** + +Run the following command: + +```shell +kubectl delete deployment nginx-deployment --cascade=true +``` + +**Using the Kubernetes API** + +1. Start a local proxy session: + + ```shell + kubectl proxy --port=8080 + ``` + +1. Use `curl` to trigger deletion: + + ```shell + curl -X DELETE localhost:8080/apis/apps/v1/namespaces/default/deployments/nginx-deployment \ + -d '{"kind":"DeleteOptions","apiVersion":"v1","propagationPolicy":"Background"}' \ + -H "Content-Type: application/json" + ``` + + The output is similar to this: + + ``` + "kind": "Status", + "apiVersion": "v1", + ... + "status": "Success", + "details": { + "name": "nginx-deployment", + "group": "apps", + "kind": "deployments", + "uid": "cc9eefb9-2d49-4445-b1c1-d261c9396456" + } + ``` +{{% /tab %}} +{{}} + + +## Delete owner objects and orphan dependents {#set-orphan-deletion-policy} + +By default, when you tell Kubernetes to delete an object, the +{{}} also deletes +dependent objects. You can make Kubernetes *orphan* these dependents using +`kubectl` or the Kubernetes API, depending on the Kubernetes version your +cluster runs. {{}} + +{{}} +{{% tab name="Kubernetes version 1.20.x and later" %}} + +**Using kubectl** + +Run the following command: + +```shell +kubectl delete deployment nginx-deployment --cascade=orphan +``` + +**Using the Kubernetes API** + +1. Start a local proxy session: + + ```shell + kubectl proxy --port=8080 + ``` + +1. Use `curl` to trigger deletion: + + ```shell + curl -X DELETE localhost:8080/apis/apps/v1/namespaces/default/deployments/nginx-deployment \ + -d '{"kind":"DeleteOptions","apiVersion":"v1","propagationPolicy":"Orphan"}' \ + -H "Content-Type: application/json" + ``` + + The output contains `orphan` in the `finalizers` field, similar to this: + + ``` + "kind": "Deployment", + "apiVersion": "apps/v1", + "namespace": "default", + "uid": "6f577034-42a0-479d-be21-78018c466f1f", + "creationTimestamp": "2021-07-09T16:46:37Z", + "deletionTimestamp": "2021-07-09T16:47:08Z", + "deletionGracePeriodSeconds": 0, + "finalizers": [ + "orphan" + ], + ... + ``` + +{{% /tab %}} +{{% tab name="Versions prior to Kubernetes 1.20.x" %}} + +For details, read the [documentation for your Kubernetes version](/docs/home/supported-doc-versions/). + +**Using kubectl** + +Run the following command: + +```shell +kubectl delete deployment nginx-deployment --cascade=false +``` + +**Using the Kubernetes API** + +1. Start a local proxy session: + + ```shell + kubectl proxy --port=8080 + ``` + +1. Use `curl` to trigger deletion: + + ```shell + curl -X DELETE localhost:8080/apis/apps/v1/namespaces/default/deployments/nginx-deployment \ + -d '{"kind":"DeleteOptions","apiVersion":"v1","propagationPolicy":"Orphan"}' \ + -H "Content-Type: application/json" + ``` + + The output contains `orphan` in the `finalizers` field, similar to this: + + ``` + "kind": "Deployment", + "apiVersion": "apps/v1", + "namespace": "default", + "uid": "6f577034-42a0-479d-be21-78018c466f1f", + "creationTimestamp": "2021-07-09T16:46:37Z", + "deletionTimestamp": "2021-07-09T16:47:08Z", + "deletionGracePeriodSeconds": 0, + "finalizers": [ + "orphan" + ], + ... + ``` +{{% /tab %}} +{{}} + +You can check that the Pods managed by the Deployment are still running: + +```shell +kubectl get pods -l app=nginx +``` + +## {{% heading "whatsnext" %}} + +* Learn about [owners and dependents](/docs/concepts/overview/working-with-objects/owners-dependents/) in Kubernetes. +* Learn about Kubernetes [finalizers](/docs/concepts/overview/working-with-objects/finalizers/). +* Learn about [garbage collection](/docs/concepts/workloads/controllers/garbage-collection/). \ No newline at end of file diff --git a/content/en/docs/tasks/configmap-secret/_index.md b/content/en/docs/tasks/configmap-secret/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/tasks/configmap-secret/managing-secret-using-config-file.md b/content/en/docs/tasks/configmap-secret/managing-secret-using-config-file.md index b2aace7057..6fb5cdca3d 100644 --- a/content/en/docs/tasks/configmap-secret/managing-secret-using-config-file.md +++ b/content/en/docs/tasks/configmap-secret/managing-secret-using-config-file.md @@ -1,5 +1,5 @@ --- -title: Managing Secret using Configuration File +title: Managing Secrets using Configuration File content_type: task weight: 20 description: Creating Secret objects using resource configuration file. @@ -193,6 +193,6 @@ kubectl delete secret mysecret ## {{% heading "whatsnext" %}} - Read more about the [Secret concept](/docs/concepts/configuration/secret/) -- Learn how to [manage Secret with the `kubectl` command](/docs/tasks/configmap-secret/managing-secret-using-kubectl/) -- Learn how to [manage Secret using kustomize](/docs/tasks/configmap-secret/managing-secret-using-kustomize/) +- Learn how to [manage Secrets with the `kubectl` command](/docs/tasks/configmap-secret/managing-secret-using-kubectl/) +- Learn how to [manage Secrets using kustomize](/docs/tasks/configmap-secret/managing-secret-using-kustomize/) diff --git a/content/en/docs/tasks/configmap-secret/managing-secret-using-kubectl.md b/content/en/docs/tasks/configmap-secret/managing-secret-using-kubectl.md index fe63c2434d..dad86e36df 100644 --- a/content/en/docs/tasks/configmap-secret/managing-secret-using-kubectl.md +++ b/content/en/docs/tasks/configmap-secret/managing-secret-using-kubectl.md @@ -67,7 +67,7 @@ single quotes (`'`). For example, if your password is `S!B\*d$zDsb=`, run the following command: ```shell -kubectl create secret generic dev-db-secret \ +kubectl create secret generic db-user-pass \ --from-literal=username=devuser \ --from-literal=password='S!B\*d$zDsb=' ``` diff --git a/content/en/docs/tasks/configmap-secret/managing-secret-using-kustomize.md b/content/en/docs/tasks/configmap-secret/managing-secret-using-kustomize.md index 4e78a4c5f0..db9f5b40f3 100644 --- a/content/en/docs/tasks/configmap-secret/managing-secret-using-kustomize.md +++ b/content/en/docs/tasks/configmap-secret/managing-secret-using-kustomize.md @@ -1,5 +1,5 @@ --- -title: Managing Secret using Kustomize +title: Managing Secrets using Kustomize content_type: task weight: 30 description: Creating Secret objects using kustomization.yaml file. @@ -135,6 +135,6 @@ kubectl delete secret db-user-pass-96mffmfh4k ## {{% heading "whatsnext" %}} - Read more about the [Secret concept](/docs/concepts/configuration/secret/) -- Learn how to [manage Secret with the `kubectl` command](/docs/tasks/configmap-secret/managing-secret-using-kubectl/) -- Learn how to [manage Secret using config file](/docs/tasks/configmap-secret/managing-secret-using-config-file/) +- Learn how to [manage Secrets with the `kubectl` command](/docs/tasks/configmap-secret/managing-secret-using-kubectl/) +- Learn how to [manage Secrets using config file](/docs/tasks/configmap-secret/managing-secret-using-config-file/) diff --git a/content/en/docs/tasks/configure-pod-container/_index.md b/content/en/docs/tasks/configure-pod-container/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md b/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md index 77a9ac7647..d9ab2056da 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md +++ b/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md @@ -145,7 +145,7 @@ Any code greater than or equal to 200 and less than 400 indicates success. Any other code indicates failure. You can see the source code for the server in -[server.go](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/test/images/agnhost/liveness/server.go). +[server.go](https://github.com/kubernetes/kubernetes/blob/master/test/images/agnhost/liveness/server.go). For the first 10 seconds that the container is alive, the `/healthz` handler returns a status of 200. After that, the handler returns a status of 500. @@ -429,7 +429,7 @@ to resolve it. ### Probe-level `terminationGracePeriodSeconds` -{{< feature-state for_k8s_version="v1.21" state="alpha" >}} +{{< feature-state for_k8s_version="v1.22" state="beta" >}} Prior to release 1.21, the pod-level `terminationGracePeriodSeconds` was used for terminating a container that failed its liveness or startup probe. This @@ -437,11 +437,26 @@ coupling was unintended and may have resulted in failed containers taking an unusually long time to restart when a pod-level `terminationGracePeriodSeconds` was set. -In 1.21, when the feature flag `ProbeTerminationGracePeriod` is enabled, users -can specify a probe-level `terminationGracePeriodSeconds` as part of the probe -specification. When the feature flag is enabled, and both a pod- and -probe-level `terminationGracePeriodSeconds` are set, the kubelet will use the -probe-level value. +In 1.21 and beyond, when the feature gate `ProbeTerminationGracePeriod` is +enabled, users can specify a probe-level `terminationGracePeriodSeconds` as +part of the probe specification. When the feature gate is enabled, and both a +pod- and probe-level `terminationGracePeriodSeconds` are set, the kubelet will +use the probe-level value. + +{{< note >}} +As of Kubernetes 1.22, the `ProbeTerminationGracePeriod` feature gate is only +available on the API Server. The kubelet always honors the probe-level +`terminationGracePeriodSeconds` field if it is present on a Pod. + +If you have existing Pods where the `terminationGracePeriodSeconds` field is set and +you no longer wish to use per-probe termination grace periods, you must delete +those existing Pods. + +When you (or the control plane, or some other component) create replacement +Pods, and the feature gate `ProbeTerminationGracePeriod` is disabled, then the +API server ignores the Pod-level `terminationGracePeriodSeconds` field, even if +a Pod or pod template specifies it. +{{< /note >}} For example, diff --git a/content/en/docs/tasks/configure-pod-container/configure-projected-volume-storage.md b/content/en/docs/tasks/configure-pod-container/configure-projected-volume-storage.md index ad99a05c27..ca71e7a721 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-projected-volume-storage.md +++ b/content/en/docs/tasks/configure-pod-container/configure-projected-volume-storage.md @@ -83,5 +83,5 @@ kubectl delete secret user pass ## {{% heading "whatsnext" %}} * Learn more about [`projected`](/docs/concepts/storage/volumes/#projected) volumes. -* Read the [all-in-one volume](https://github.com/kubernetes/community/blob/{{< param "githubbranch" >}}/contributors/design-proposals/node/all-in-one-volume.md) design document. +* Read the [all-in-one volume](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/all-in-one-volume.md) design document. diff --git a/content/en/docs/tasks/configure-pod-container/configure-runasusername.md b/content/en/docs/tasks/configure-pod-container/configure-runasusername.md index 12c10a9ddf..9ddcac270f 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-runasusername.md +++ b/content/en/docs/tasks/configure-pod-container/configure-runasusername.md @@ -23,7 +23,7 @@ You need to have a Kubernetes cluster and the kubectl command-line tool must be ## Set the Username for a Pod -To specify the username with which to execute the Pod's container processes, include the `securityContext` field ([PodSecurityContext](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podsecuritycontext-v1-core) in the Pod specification, and within it, the `windowsOptions` ([WindowsSecurityContextOptions](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#windowssecuritycontextoptions-v1-core) field containing the `runAsUserName` field. +To specify the username with which to execute the Pod's container processes, include the `securityContext` field ([PodSecurityContext](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podsecuritycontext-v1-core)) in the Pod specification, and within it, the `windowsOptions` ([WindowsSecurityContextOptions](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#windowssecuritycontextoptions-v1-core)) field containing the `runAsUserName` field. The Windows security context options that you specify for a Pod apply to all Containers and init Containers in the Pod. @@ -63,7 +63,7 @@ ContainerUser ## Set the Username for a Container -To specify the username with which to execute a Container's processes, include the `securityContext` field ([SecurityContext](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#securitycontext-v1-core)) in the Container manifest, and within it, the `windowsOptions` ([WindowsSecurityContextOptions](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#windowssecuritycontextoptions-v1-core) field containing the `runAsUserName` field. +To specify the username with which to execute a Container's processes, include the `securityContext` field ([SecurityContext](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#securitycontext-v1-core)) in the Container manifest, and within it, the `windowsOptions` ([WindowsSecurityContextOptions](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#windowssecuritycontextoptions-v1-core)) field containing the `runAsUserName` field. The Windows security context options that you specify for a Container apply only to that individual Container, and they override the settings made at the Pod level. diff --git a/content/en/docs/tasks/configure-pod-container/configure-service-account.md b/content/en/docs/tasks/configure-pod-container/configure-service-account.md index 505ec7d755..e5a0e26ea0 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-service-account.md +++ b/content/en/docs/tasks/configure-pod-container/configure-service-account.md @@ -349,8 +349,11 @@ JSON Web Key Set (JWKS) at `/openid/v1/jwks`. The OpenID Provider Configuration is sometimes referred to as the _discovery document_. Clusters include a default RBAC ClusterRole called -`system:service-account-issuer-discovery`. No role bindings are provided -by default. Administrators may, for example, choose whether to bind the role to +`system:service-account-issuer-discovery`. A default RBAC ClusterRoleBinding +assigns this role to the `system:serviceaccounts` group, which all service +accounts implicitly belong to. This allows pods running on the cluster to access +the service account discovery document via their mounted service account token. +Administrators may, additionally, choose to bind the role to `system:authenticated` or `system:unauthenticated` depending on their security requirements and which external systems they intend to federate with. @@ -380,5 +383,5 @@ JWKS URI is required to use the `https` scheme. See also: - [Cluster Admin Guide to Service Accounts](/docs/reference/access-authn-authz/service-accounts-admin/) -- [Service Account Signing Key Retrieval KEP](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/20190730-oidc-discovery.md) +- [Service Account Signing Key Retrieval KEP](https://github.com/kubernetes/enhancements/tree/master/keps/sig-auth/1393-oidc-discovery) - [OIDC Discovery Spec](https://openid.net/specs/openid-connect-discovery-1_0.html) diff --git a/content/en/docs/tasks/configure-pod-container/create-hostprocess-pod.md b/content/en/docs/tasks/configure-pod-container/create-hostprocess-pod.md new file mode 100644 index 0000000000..2ab2bd3661 --- /dev/null +++ b/content/en/docs/tasks/configure-pod-container/create-hostprocess-pod.md @@ -0,0 +1,214 @@ +--- +title: Create a Windows HostProcess Pod +content_type: task +weight: 20 +min-kubernetes-server-version: 1.22 +--- + + + +{{< feature-state for_k8s_version="v1.22" state="alpha" >}} + +Windows HostProcess containers enable you to run containerized +workloads on a Windows host. These containers operate as +normal processes but have access to the host network namespace, +storage, and devices when given the appropriate user privileges. +HostProcess containers can be used to deploy network plugins, +storage configurations, device plugins, kube-proxy, and other +components to Windows nodes without the need for dedicated proxies or +the direct installation of host services. + +Administrative tasks such as installation of security patches, event +log collection, and more can be performed without requiring cluster operators to +log onto each Window node. HostProcess containers can run as any user that is +available on the host or is in the domain of the host machine, allowing administrators +to restrict resource access through user permissions. While neither filesystem or process +isolation are supported, a new volume is created on the host upon starting the container +to give it a clean and consolidated workspace. HostProcess containers can also be built on +top of existing Windows base images and do not inherit the same +[compatibility requirements](https://docs.microsoft.com/virtualization/windowscontainers/deploy-containers/version-compatibility) +as Windows server containers, meaning that the version of the base images does not need +to match that of the host. HostProcess containers also support +[volume mounts](./create-hostprocess-pod#volume-mounts) within the container volume. + +### When should I use a Windows HostProcess container? + +- When you need to perform tasks which require the networking namespace of the host. +HostProcess containers have access to the host's network interfaces and IP addresses. +- You need access to resources on the host such as the filesystem, event logs, etc. +- Installation of specific device drivers or Windows services. +- Consolidation of administrative tasks and security policies. This reduces the degree of +privileges needed by Windows nodes. + + +## {{% heading "prerequisites" %}} + +{{% version-check %}} + +To enable HostProcess containers while in Alpha you need to pass the following feature gate flag to +**kubelet** and **kube-apiserver**. +See [Features Gates](/docs/reference/command-line-tools-reference/feature-gates/#overview) +documentation for more details. + +``` +--feature-gates=WindowsHostProcessContainers=true +``` + +You can use the latest version of Containerd (v1.5.4+) with the following settings using the containerd +v2 configuration. Add these annotations to any runtime configurations were you wish to enable the +HostProcess container feature. + + +``` +[plugins] + [plugins."io.containerd.grpc.v1.cri"] + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime] + container_annotations = ["microsoft.com/hostprocess-container"] + pod_annotations = ["microsoft.com/hostprocess-container"] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runhcs-wcow-process] + container_annotations = ["microsoft.com/hostprocess-container"] + pod_annotations = ["microsoft.com/hostprocess-container"] +``` + +The current versions of containerd ship with a version of hcsshim that does not have support. +You will need to build a version of hcsshim from the main branch following the +[instructions in hcsshim](https://github.com/Microsoft/hcsshim/#containerd-shim). +Once the containerd shim is built you can replace the file in your contianerd installation. +For example if you followed the instructions to +[install containerd](/docs/setup/production-environment/container-runtimes/#containerd) +replace the `containerd-shim-runhcs-v1.exe` is installed at `$Env:ProgramFiles\containerd` with the newly built shim. + +## Limitations + +- HostProcess containers require version 1.5.4 or higher of the containerd {{< glossary_tooltip text="container runtime" term_id="container-runtime" >}}. +- As of v1.22 HostProcess pods can only contain HostProcess containers. This is a current limitation +of the Windows OS; non-privileged Windows containers cannot share a vNIC with the host IP namespace. +- HostProcess containers run as a process on the host and do not have any degree of +isolation other than resource constraints imposed on the HostProcess user account. Neither +filesystem or Hyper-V isolation are supported for HostProcess containers. +- Volume mounts are supported and are mounted under the container volume. +See [Volume Mounts](#volume-mounts) +- A limited set of host user accounts are available for HostProcess containers by default. +See [Choosing a User Account](#choosing-a-user-account). +- Resource limits (disk, memory, cpu count) are supported in the same fashion as processes +on the host. +- Both Named pipe mounts and Unix domain sockets are **not** currently supported and should instead +be accessed via their path on the host (e.g. \\\\.\\pipe\\\*) + +## HostProcess Pod configuration requirements + +Enabling a Windows HostProcess pod requires setting the right configurations in the pod security +configuration. Of the policies defined in the [Pod Security Standards](/docs/concepts/security/pod-security-standards) +HostProcess pods are disallowed by the baseline and restricted policies. It is therefore recommended +that HostProcess pods run in alignment with the privileged profile. + +When running under the privileged policy, here are +the configurations which need to be set to enable the creation of a HostProcess pod: + + + + + + + + + + + + + + + + + + + + + + + + + +
Privileged policy specification
ControlPolicy
Windows HostProcess +

Windows pods offer the ability to run + HostProcess containers which enables privileged access to the Windows node.

+

Allowed Values

+
    +
  • true
  • +
+
Host Networking +

Will be in host network by default initially. Support + to set network to a different compartment may be desirable in + the future.

+

Allowed Values

+
    +
  • true
  • +
+
runAsUsername +

Specification of which user the HostProcess container should run as is required for the pod spec.

+

Allowed Values

+
    +
  • NT AUTHORITY\SYSTEM
  • +
  • NT AUTHORITY\Local service
  • +
  • NT AUTHORITY\NetworkService
  • +
+
runAsNonRoot +

Because HostProcess containers have privileged access to the host, the runAsNonRoot field cannot be set to true.

+

Allowed Values

+
    +
  • Undefined/Nil
  • +
  • false
  • +
+
+ +### Example Manifest (excerpt) + +```yaml +spec: + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\Local service" + hostNetwork: true + containers: + - name: test + image: image1:latest + command: + - ping + - -t + - 127.0.0.1 + nodeSelector: + "kubernetes.io/os": windows +``` + +## Volume Mounts + +HostProcess containers support the ability to mount volumes within the container volume space. +Applications running inside the container can access volume mounts directly via relative or +absolute paths. An environment variable `$CONTAINER_SANDBOX_MOUNT_POINT` is set upon container +creation and provides the absolute host path to the container volume. Relative paths are based +upon the `Pod.containers.volumeMounts.mountPath` configuration. + +### Example {#volume-mount-example} + +To access service account tokens the following path structures are supported within the container: + +`.\var\run\secrets\kubernetes.io\serviceaccount\` + +`$CONTAINER_SANDBOX_MOUNT_POINT\var\run\secrets\kubernetes.io\serviceaccount\` + +## Choosing a User Account + +HostProcess containers support the ability to run as one of three supported Windows service accounts: + +- **[LocalSystem](https://docs.microsoft.com/en-us/windows/win32/services/localsystem-account)** +- **[LocalService](https://docs.microsoft.com/en-us/windows/win32/services/localservice-account)** +- **[NetworkService](https://docs.microsoft.com/en-us/windows/win32/services/networkservice-account)** + +You should select an appropriate Windows service account for each HostProcess +container, aiming to limit the degree of privileges so as to avoid accidental (or even +malicious) damage to the host. The LocalSystem service account has the highest level +of privilege of the three and should be used only if absolutely necessary. Where possible, +use the LocalService service account as it is the least privileged of the three options. diff --git a/content/en/docs/tasks/configure-pod-container/enforce-standards-admission-controller.md b/content/en/docs/tasks/configure-pod-container/enforce-standards-admission-controller.md new file mode 100644 index 0000000000..c3dcb0e995 --- /dev/null +++ b/content/en/docs/tasks/configure-pod-container/enforce-standards-admission-controller.md @@ -0,0 +1,54 @@ +--- +title: Enforce Pod Security Standards by Configuring the Built-in Admission Controller +reviewers: +- tallclair +- liggitt +content_type: task +min-kubernetes-server-version: v1.22 +--- + +As of v1.22, Kubernetes provides a built-in [admission controller](/docs/reference/access-authn-authz/admission-controllers/#podsecurity) +to enforce the [Pod Security Standards](/docs/concepts/security/pod-security-standards). +You can configure this admission controller to set cluster-wide defaults and [exemptions](#exemptions). + +## {{% heading "prerequisites" %}} + +{{% version-check %}} + +- Enable the `PodSecurity` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features). + +## Configure the Admission Controller + +```yaml +apiVersion: apiserver.config.k8s.io/v1 +kind: AdmissionConfiguration +plugins: +- name: PodSecurity + configuration: + apiVersion: pod-security.admission.config.k8s.io/v1alpha1 + kind: PodSecurityConfiguration + # Defaults applied when a mode label is not set. + # + # Level label values must be one of: + # - "privileged" (default) + # - "baseline" + # - "restricted" + # + # Version label values must be one of: + # - "latest" (default) + # - specific version like "v{{< skew latestVersion >}}" + defaults: + enforce: "privileged" + enforce-version: "latest" + audit: "privileged" + audit-version: "latest" + warn: "privileged" + warn-version: "latest" + exemptions: + # Array of authenticated usernames to exempt. + usernames: [] + # Array of runtime class names to exempt. + runtimeClassNames: [] + # Array of namespaces to exempt. + namespaces: [] +``` \ No newline at end of file diff --git a/content/en/docs/tasks/configure-pod-container/enforce-standards-namespace-labels.md b/content/en/docs/tasks/configure-pod-container/enforce-standards-namespace-labels.md new file mode 100644 index 0000000000..9a4c3a44ed --- /dev/null +++ b/content/en/docs/tasks/configure-pod-container/enforce-standards-namespace-labels.md @@ -0,0 +1,87 @@ +--- +title: Enforce Pod Security Standards with Namespace Labels +reviewers: +- tallclair +- liggitt +content_type: task +min-kubernetes-server-version: v1.22 +--- + +Namespaces can be labeled to enforce the [Pod Security Standards](/docs/concepts/security/pod-security-standards). + +## {{% heading "prerequisites" %}} + +{{% version-check %}} + +- Enable the `PodSecurity` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features). + +## Requiring the `baseline` Pod Security Standard with namespace labels + +This manifest defines a Namespace `my-baseline-namespace` that: + +- _Blocks_ any pods that don't satisfy the `baseline` policy requirements. +- Generates a user-facing warning and adds an audit annotation to any created pod that does not + meet the `restricted` policy requirements. +- Pins the versions of the `baseline` and `restricted` policies to v{{< skew latestVersion >}}. + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: my-baseline-namespace + labels: + pod-security.kubernetes.io/enforce: baseline + pod-security.kubernetes.io/enforce-version: v{{< skew latestVersion >}} + + # We are setting these to our _desired_ `enforce` level. + pod-security.kubernetes.io/audit: restricted + pod-security.kubernetes.io/audit-version: v{{< skew latestVersion >}} + pod-security.kubernetes.io/warn: restricted + pod-security.kubernetes.io/warn-version: v{{< skew latestVersion >}} +``` + +## Add labels to existing namespaces with `kubectl label` + +{{< note >}} +When an `enforce` policy (or version) label is added or changed, the admission plugin will test +each pod in the namespace against the new policy. Violations are returned to the user as warnings. +{{< /note >}} + +It is helpful to apply the `--dry-run` flag when initially evaluating security profile changes for +namespaces. The Pod Security Standard checks will still be run in _dry run_ mode, giving you +information about how the new policy would treat existing pods, without actually updating a policy. + +```shell +kubectl label --dry-run=server --overwrite ns --all \ + pod-security.kubernetes.io/enforce=baseline +``` + +### Applying to all namespaces + +If you're just getting started with the Pod Security Standards, a suitable first step would be to +configure all namespaces with audit annotations for a stricter level such as `baseline`: + +```shell +kubectl label --overwrite ns --all \ + pod-security.kubernetes.io/audit=baseline \ + pod-security.kubernetes.io/warn=baseline +``` + +Note that this is not setting an enforce level, so that namespaces that haven't been explicitly +evaluated can be distinguished. You can list namespaces without an explicitly set enforce level +using this command: + +```shell +kubectl get namespaces --selector='!pod-security.kubernetes.io/enforce' +``` + +### Applying to a single namespace + +You can update a specific namespace as well. This command adds the `enforce=restricted` +policy to `my-existing-namespace`, pinning the restricted policy version to v{{< skew latestVersion >}}. + +```shell +kubectl label --overwrite ns my-existing-namespace \ + pod-security.kubernetes.io/enforce=restricted \ + pod-security.kubernetes.io/enforce-version=v{{< skew latestVersion >}} +``` diff --git a/content/en/docs/tasks/configure-pod-container/migrate-from-psp.md b/content/en/docs/tasks/configure-pod-container/migrate-from-psp.md new file mode 100644 index 0000000000..f0ea2d02df --- /dev/null +++ b/content/en/docs/tasks/configure-pod-container/migrate-from-psp.md @@ -0,0 +1,48 @@ +--- +title: Migrate from PodSecurityPolicy to the Built-In PodSecurity Admission Controller +reviewers: +- tallclair +- liggitt +content_type: task +min-kubernetes-server-version: v1.22 +--- + + + +This page describes the process of migrating from PodSecurityPolicies to the built-in PodSecurity +admission controller. This can be done effectively using a combination of dry-run and `audit` and +`warn` modes, although this becomes harder if mutating PSPs are used. + +## {{% heading "prerequisites" %}} + +{{% version-check %}} + +- Enable the `PodSecurity` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features). + + + +## Steps + +- **Eliminate mutating PodSecurityPolicies, if your cluster has any set up.** + - Clone all mutating PSPs into a non-mutating version. + - Update all ClusterRoles authorizing use of those mutating PSPs to also authorize use of the + non-mutating variant. + - Watch for Pods using the mutating PSPs and work with code owners to migrate to valid, + non-mutating resources. + - Delete mutating PSPs. +- **Select a compatible policy level for each namespace.** Analyze existing resources in the + namespace to drive this decision. + - Review the requirements of the different [Pod Security Standards](/docs/concepts/security/pod-security-standards). + - Evaluate the difference in privileges that would come from disabling the PSP controller. + - In the event that a PodSecurityPolicy falls between two levels, consider: + - Selecting a _less_ permissive PodSecurity level prioritizes security, and may require adjusting + workloads to fit within the stricter policy. + - Selecting a _more_ permissive PodSecurity level prioritizes avoiding disrupting or + changing workloads, but may allow workload authors in the namespace greater permissions + than desired. +- **Apply the selected profiles in `warn` and `audit` mode.** This will give you an idea of how + your Pods will respond to the new policies, without breaking existing workloads. Iterate on your + [Pods' configuration](/docs/concepts/security/pod-security-admission#configuring-pods) until + they are in compliance with the selected profiles. +- Apply the profiles in `enforce` mode. +- Stop including `PodSecurityPolicy` in the `--enable-admission-plugins` flag. \ No newline at end of file diff --git a/content/en/docs/tasks/configure-pod-container/pull-image-private-registry.md b/content/en/docs/tasks/configure-pod-container/pull-image-private-registry.md index 57c5329b7a..0886871f9c 100644 --- a/content/en/docs/tasks/configure-pod-container/pull-image-private-registry.md +++ b/content/en/docs/tasks/configure-pod-container/pull-image-private-registry.md @@ -102,7 +102,7 @@ kubectl create secret docker-registry regcred --docker-server=` is your Private Docker Registry FQDN. - Use `https://index.docker.io/v2/` for DockerHub. + Use `https://index.docker.io/v1/` for DockerHub. * `` is your Docker username. * `` is your Docker password. * `` is your Docker email. diff --git a/content/en/docs/tasks/configure-pod-container/security-context.md b/content/en/docs/tasks/configure-pod-container/security-context.md index 50a02b990e..56bcc0f3f9 100644 --- a/content/en/docs/tasks/configure-pod-container/security-context.md +++ b/content/en/docs/tasks/configure-pod-container/security-context.md @@ -24,7 +24,7 @@ a Pod or Container. Security context settings include, but are not limited to: * [AppArmor](/docs/tutorials/clusters/apparmor/): Use program profiles to restrict the capabilities of individual programs. -* [Seccomp](https://en.wikipedia.org/wiki/Seccomp): Filter a process's system calls. +* [Seccomp](/docs/tutorials/clusters/seccomp/): Filter a process's system calls. * AllowPrivilegeEscalation: Controls whether a process can gain more privileges than its parent process. This bool directly controls whether the [`no_new_privs`](https://www.kernel.org/doc/Documentation/prctl/no_new_privs.txt) flag gets set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged OR 2) has `CAP_SYS_ADMIN`. @@ -184,6 +184,25 @@ This field has no effect on ephemeral volume types such as and [`emptydir`](/docs/concepts/storage/volumes/#emptydir). {{< /note >}} +## Delegating volume permission and ownership change to CSI driver + +{{< feature-state for_k8s_version="v1.22" state="alpha" >}} + +If you deploy a [Container Storage Interface (CSI)](https://github.com/container-storage-interface/spec/blob/master/spec.md) +driver which supports the `VOLUME_MOUNT_GROUP` `NodeServiceCapability`, the +process of setting file ownership and permissions based on the +`fsGroup` specified in the `securityContext` will be performed by the CSI driver +instead of Kubernetes, provided that the `DelegateFSGroupToCSIDriver` Kubernetes +feature gate is enabled. In this case, since Kubernetes doesn't perform any +ownership and permission change, `fsGroupChangePolicy` does not take effect, and +as specified by CSI, the driver is expected to mount the volume with the +provided `fsGroup`, resulting in a volume that is readable/writable by the +`fsGroup`. + +Please refer to the [KEP](https://github.com/gnufied/enhancements/blob/master/keps/sig-storage/2317-fsgroup-on-mount/README.md) +and the description of the `VolumeCapability.MountVolume.volume_mount_group` +field in the [CSI spec](https://github.com/container-storage-interface/spec/blob/master/spec.md#createvolume) +for more information. ## Set the security context for a Container diff --git a/content/en/docs/tasks/configure-pod-container/static-pod.md b/content/en/docs/tasks/configure-pod-container/static-pod.md index 9126243462..c889570dc7 100644 --- a/content/en/docs/tasks/configure-pod-container/static-pod.md +++ b/content/en/docs/tasks/configure-pod-container/static-pod.md @@ -22,7 +22,7 @@ The kubelet automatically tries to create a {{< glossary_tooltip text="mirror Po on the Kubernetes API server for each static Pod. This means that the Pods running on a node are visible on the API server, but cannot be controlled from there. -The Pod names will suffixed with the node hostname with a leading hyphen +The Pod names will be suffixed with the node hostname with a leading hyphen. {{< note >}} If you are running clustered Kubernetes and are using static @@ -31,6 +31,13 @@ Pods to run a Pod on every node, you should probably be using a instead. {{< /note >}} +{{< note >}} +The `spec` of a static Pod cannot refer to other API objects +(e.g., {{< glossary_tooltip text="ServiceAccount" term_id="service-account" >}}, +{{< glossary_tooltip text="ConfigMap" term_id="configmap" >}}, +{{< glossary_tooltip text="Secret" term_id="secret" >}}, etc). +{{< /note >}} + ## {{% heading "prerequisites" %}} {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} diff --git a/content/en/docs/tasks/debug-application-cluster/_index.md b/content/en/docs/tasks/debug-application-cluster/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/tasks/debug-application-cluster/audit.md b/content/en/docs/tasks/debug-application-cluster/audit.md index c44caf66b5..6c4b433ca2 100644 --- a/content/en/docs/tasks/debug-application-cluster/audit.md +++ b/content/en/docs/tasks/debug-application-cluster/audit.md @@ -94,7 +94,7 @@ rules: ``` If you're crafting your own audit profile, you can use the audit profile for Google Container-Optimized OS as a starting point. You can check the -[configure-helper.sh](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh) +[configure-helper.sh](https://github.com/kubernetes/kubernetes/blob/master/cluster/gce/gci/configure-helper.sh) script, which generates an audit policy file. You can see most of the audit policy file by looking directly at the script. You can also refer to the [`Policy` configuration reference](/docs/reference/config-api/apiserver-audit.v1/#audit-k8s-io-v1-Policy) diff --git a/content/en/docs/tasks/debug-application-cluster/debug-running-pod.md b/content/en/docs/tasks/debug-application-cluster/debug-running-pod.md index 59a83e87c7..6009a76341 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-running-pod.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-running-pod.md @@ -73,22 +73,20 @@ For more details, see [Get a Shell to a Running Container]( ## Debugging with an ephemeral debug container {#ephemeral-container} -{{< feature-state state="alpha" for_k8s_version="v1.18" >}} +{{< feature-state state="alpha" for_k8s_version="v1.22" >}} {{< glossary_tooltip text="Ephemeral containers" term_id="ephemeral-container" >}} are useful for interactive troubleshooting when `kubectl exec` is insufficient because a container has crashed or a container image doesn't include debugging utilities, such as with [distroless images]( -https://github.com/GoogleContainerTools/distroless). `kubectl` has an alpha -command that can create ephemeral containers for debugging beginning with version -`v1.18`. +https://github.com/GoogleContainerTools/distroless). ### Example debugging using ephemeral containers {#ephemeral-container-example} {{< note >}} The examples in this section require the `EphemeralContainers` [feature gate]( /docs/reference/command-line-tools-reference/feature-gates/) enabled in your -cluster and `kubectl` version v1.18 or later. +cluster and `kubectl` version v1.22 or later. {{< /note >}} You can use the `kubectl debug` command to add ephemeral containers to a @@ -137,7 +135,8 @@ creates. The `--target` parameter must be supported by the {{< glossary_tooltip text="Container Runtime" term_id="container-runtime" >}}. When not supported, the Ephemeral Container may not be started, or it may be started with an -isolated process namespace. +isolated process namespace so that `ps` does not reveal processes in other +containers. {{< /note >}} You can view the state of the newly created ephemeral container using `kubectl describe`: diff --git a/content/en/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md b/content/en/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md index 543573781b..f1ddd96389 100644 --- a/content/en/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md +++ b/content/en/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md @@ -41,7 +41,7 @@ the container starts. kubectl apply -f https://k8s.io/examples/debug/termination.yaml - In the YAML file, in the `cmd` and `args` fields, you can see that the + In the YAML file, in the `command` and `args` fields, you can see that the container sleeps for 10 seconds and then writes "Sleep expired" to the `/dev/termination-log` file. After the container writes the "Sleep expired" message, it terminates. diff --git a/content/en/docs/tasks/extend-kubernetes/configure-aggregation-layer.md b/content/en/docs/tasks/extend-kubernetes/configure-aggregation-layer.md index 739a69d45a..3b65d6abc6 100644 --- a/content/en/docs/tasks/extend-kubernetes/configure-aggregation-layer.md +++ b/content/en/docs/tasks/extend-kubernetes/configure-aggregation-layer.md @@ -17,7 +17,7 @@ Configuring the [aggregation layer](/docs/concepts/extend-kubernetes/api-extensi {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} {{< note >}} -There are a few setup requirements for getting the aggregation layer working in your environment to support mutual TLS auth between the proxy and extension apiservers. Kubernetes and the kube-apiserver have multiple CAs, so make sure that the proxy is signed by the aggregation layer CA and not by something else, like the master CA. +There are a few setup requirements for getting the aggregation layer working in your environment to support mutual TLS auth between the proxy and extension apiservers. Kubernetes and the kube-apiserver have multiple CAs, so make sure that the proxy is signed by the aggregation layer CA and not by something else, like the Kubernetes general CA. {{< /note >}} {{< caution >}} diff --git a/content/en/docs/tasks/extend-kubernetes/configure-multiple-schedulers.md b/content/en/docs/tasks/extend-kubernetes/configure-multiple-schedulers.md index 7ad7072fd7..d44e6897b0 100644 --- a/content/en/docs/tasks/extend-kubernetes/configure-multiple-schedulers.md +++ b/content/en/docs/tasks/extend-kubernetes/configure-multiple-schedulers.md @@ -18,7 +18,7 @@ learn how to run multiple schedulers in Kubernetes with an example. A detailed description of how to implement a scheduler is outside the scope of this document. Please refer to the kube-scheduler implementation in -[pkg/scheduler](https://github.com/kubernetes/kubernetes/tree/{{< param "githubbranch" >}}/pkg/scheduler) +[pkg/scheduler](https://github.com/kubernetes/kubernetes/tree/master/pkg/scheduler) in the Kubernetes source directory for a canonical example. ## {{% heading "prerequisites" %}} diff --git a/content/en/docs/tasks/extend-kubernetes/custom-resources/_index.md b/content/en/docs/tasks/extend-kubernetes/custom-resources/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning.md b/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning.md index 1e21306e5f..45f589e9d7 100644 --- a/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning.md +++ b/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning.md @@ -80,7 +80,7 @@ Removing an old version: If this occurs, switch back to using `served:true` on the old version, migrate the remaining clients to the new version and repeat this step. 1. Ensure the [upgrade of existing objects to the new stored version](#upgrade-existing-objects-to-a-new-stored-version) step has been completed. - 1. Verify that the `stored` is set to `true` for the new version in the `spec.versions` list in the CustomResourceDefinition. + 1. Verify that the `storage` is set to `true` for the new version in the `spec.versions` list in the CustomResourceDefinition. 1. Verify that the old version is no longer listed in the CustomResourceDefinition `status.storedVersions`. 1. Remove the old version from the CustomResourceDefinition `spec.versions` list. 1. Drop conversion support for the old version in conversion webhooks. @@ -202,7 +202,7 @@ spec: plural: crontabs # singular name to be used as an alias on the CLI and for display singular: crontab - # kind is normally the CamelCased singular type. Your resource manifests use this. + # kind is normally the PascalCased singular type. Your resource manifests use this. kind: CronTab # shortNames allow shorter string to match your resource on the CLI shortNames: diff --git a/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions.md b/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions.md index 3230b7b73a..cd2d0fb103 100644 --- a/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions.md +++ b/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions.md @@ -154,22 +154,26 @@ from the YAML you used to create it: ```yaml apiVersion: v1 -kind: List items: - apiVersion: stable.example.com/v1 kind: CronTab metadata: - creationTimestamp: 2017-05-31T12:56:35Z + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"stable.example.com/v1","kind":"CronTab","metadata":{"annotations":{},"name":"my-new-cron-object","namespace":"default"},"spec":{"cronSpec":"* * * * */5","image":"my-awesome-cron-image"}} + creationTimestamp: "2021-06-20T07:35:27Z" generation: 1 name: my-new-cron-object namespace: default - resourceVersion: "285" - uid: 9423255b-4600-11e7-af6a-28d2447dc82b + resourceVersion: "1326" + uid: 9aab1d66-628e-41bb-a422-57b8b3b1f5a9 spec: cronSpec: '* * * * */5' image: my-awesome-cron-image +kind: List metadata: resourceVersion: "" + selfLink: "" ``` ## Delete a CustomResourceDefinition diff --git a/content/en/docs/tasks/inject-data-application/_index.md b/content/en/docs/tasks/inject-data-application/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/tasks/job/indexed-parallel-processing-static.md b/content/en/docs/tasks/job/indexed-parallel-processing-static.md index b5492eed6e..da5e6d4e08 100644 --- a/content/en/docs/tasks/job/indexed-parallel-processing-static.md +++ b/content/en/docs/tasks/job/indexed-parallel-processing-static.md @@ -5,7 +5,7 @@ min-kubernetes-server-version: v1.21 weight: 30 --- -{{< feature-state for_k8s_version="v1.21" state="alpha" >}} +{{< feature-state for_k8s_version="v1.22" state="beta" >}} @@ -38,11 +38,6 @@ non-parallel, use of [Job](/docs/concepts/workloads/controllers/job/). {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -To be able to create Indexed Jobs, make sure to enable the `IndexedJob` -[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) -on the [API server](/docs/reference/command-line-tools-reference/kube-apiserver/) -and the [controller manager](/docs/reference/command-line-tools-reference/kube-controller-manager/). - ## Choose an approach diff --git a/content/en/docs/tasks/manage-daemon/_index.md b/content/en/docs/tasks/manage-daemon/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/tasks/manage-daemon/update-daemon-set.md b/content/en/docs/tasks/manage-daemon/update-daemon-set.md index 2f3001da0f..a74864fed5 100644 --- a/content/en/docs/tasks/manage-daemon/update-daemon-set.md +++ b/content/en/docs/tasks/manage-daemon/update-daemon-set.md @@ -7,12 +7,11 @@ weight: 10 --- - This page shows how to perform a rolling update on a DaemonSet. ## {{% heading "prerequisites" %}} -* The DaemonSet rolling update feature is only supported in Kubernetes version 1.6 or later. +{{< include "task-tutorial-prereqs.md" >}} @@ -20,22 +19,28 @@ This page shows how to perform a rolling update on a DaemonSet. DaemonSet has two update strategy types: -* OnDelete: With `OnDelete` update strategy, after you update a DaemonSet template, new +* `OnDelete`: With `OnDelete` update strategy, after you update a DaemonSet template, new DaemonSet pods will *only* be created when you manually delete old DaemonSet pods. This is the same behavior of DaemonSet in Kubernetes version 1.5 or before. -* RollingUpdate: This is the default update strategy. +* `RollingUpdate`: This is the default update strategy. With `RollingUpdate` update strategy, after you update a DaemonSet template, old DaemonSet pods will be killed, and new DaemonSet pods - will be created automatically, in a controlled fashion. At most one pod of the DaemonSet will be running on each node during the whole update process. + will be created automatically, in a controlled fashion. At most one pod of + the DaemonSet will be running on each node during the whole update process. ## Performing a Rolling Update To enable the rolling update feature of a DaemonSet, you must set its `.spec.updateStrategy.type` to `RollingUpdate`. -You may want to set [`.spec.updateStrategy.rollingUpdate.maxUnavailable`](/docs/concepts/workloads/controllers/deployment/#max-unavailable) (default -to 1) and [`.spec.minReadySeconds`](/docs/concepts/workloads/controllers/deployment/#min-ready-seconds) (default to 0) as well. +You may want to set +[`.spec.updateStrategy.rollingUpdate.maxUnavailable`](/docs/concepts/workloads/controllers/deployment/#max-unavailable) +(default to 1), +[`.spec.minReadySeconds`](/docs/concepts/workloads/controllers/deployment/#min-ready-seconds) +(default to 0) and +[`.spec.maxSurge`](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#max-surge) +(a beta feature and defaults to 25%) as well. ### Creating a DaemonSet with `RollingUpdate` update strategy @@ -143,7 +148,7 @@ causes: The rollout is stuck because new DaemonSet pods can't be scheduled on at least one node. This is possible when the node is -[running out of resources](/docs/tasks/administer-cluster/out-of-resource/). +[running out of resources](/docs/concepts/scheduling-eviction/node-pressure-eviction/). When this happens, find the nodes that don't have the DaemonSet pods scheduled on by comparing the output of `kubectl get nodes` and the output of: @@ -184,14 +189,7 @@ Delete DaemonSet from a namespace : kubectl delete ds fluentd-elasticsearch -n kube-system ``` - - - ## {{% heading "whatsnext" %}} - -* See [Task: Performing a rollback on a - DaemonSet](/docs/tasks/manage-daemon/rollback-daemon-set/) -* See [Concepts: Creating a DaemonSet to adopt existing DaemonSet pods](/docs/concepts/workloads/controllers/daemonset/) - - +* See [Performing a rollback on a DaemonSet](/docs/tasks/manage-daemon/rollback-daemon-set/) +* See [Creating a DaemonSet to adopt existing DaemonSet pods](/docs/concepts/workloads/controllers/daemonset/) diff --git a/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md b/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md index 8d4cd4afe6..dd78446381 100644 --- a/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md +++ b/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md @@ -113,10 +113,4 @@ spec: - Huge page usage in a namespace is controllable via ResourceQuota similar to other compute resources like `cpu` or `memory` using the `hugepages-` token. -- Support of multiple sizes huge pages is feature gated. It can be - disabled with the `HugePageStorageMediumSize` - [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) - on the {{< glossary_tooltip text="kubelet" term_id="kubelet" >}} and - {{< glossary_tooltip text="kube-apiserver" term_id="kube-apiserver" >}} - (`--feature-gates=HugePageStorageMediumSize=false`). diff --git a/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md b/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md index 27f3762988..9b16c25167 100644 --- a/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md +++ b/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md @@ -180,7 +180,7 @@ spec: containers: - name: app image: my-app - volumeMount: + volumeMounts: - name: config mountPath: /config volumes: @@ -234,7 +234,7 @@ spec: containers: - image: my-app name: app - volumeMount: + volumeMounts: - mountPath: /config name: config volumes: @@ -327,7 +327,7 @@ spec: containers: - name: app image: my-app - volumeMount: + volumeMounts: - name: password mountPath: /secrets volumes: diff --git a/content/en/docs/tasks/network/_index.md b/content/en/docs/tasks/network/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md b/content/en/docs/tasks/network/customize-hosts-file-for-pods.md similarity index 98% rename from content/en/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md rename to content/en/docs/tasks/network/customize-hosts-file-for-pods.md index 8eee03bf9b..e396db5d2d 100644 --- a/content/en/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md +++ b/content/en/docs/tasks/network/customize-hosts-file-for-pods.md @@ -3,7 +3,7 @@ reviewers: - rickypai - thockin title: Adding entries to Pod /etc/hosts with HostAliases -content_type: concept +content_type: task weight: 60 min-kubernetes-server-version: 1.7 --- @@ -16,7 +16,7 @@ Adding entries to a Pod's `/etc/hosts` file provides Pod-level override of hostn Modification not using HostAliases is not suggested because the file is managed by the kubelet and can be overwritten on during Pod creation/restart. - + ## Default hosts file content diff --git a/content/en/docs/tasks/network/validate-dual-stack.md b/content/en/docs/tasks/network/validate-dual-stack.md index bc90dea4ea..717bac27ad 100644 --- a/content/en/docs/tasks/network/validate-dual-stack.md +++ b/content/en/docs/tasks/network/validate-dual-stack.md @@ -16,7 +16,7 @@ This document shares how to validate IPv4/IPv6 dual-stack enabled Kubernetes clu * Provider support for dual-stack networking (Cloud provider or otherwise must be able to provide Kubernetes nodes with routable IPv4/IPv6 network interfaces) -* A [network plugin](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) that supports dual-stack (such as Kubenet or Calico) +* A [network plugin](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) that supports dual-stack (such as Calico, Cilium or Kubenet) * [Dual-stack enabled](/docs/concepts/services-networking/dual-stack/) cluster {{< version-check >}} diff --git a/content/en/docs/tasks/run-application/_index.md b/content/en/docs/tasks/run-application/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md b/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md index 84ae1addd2..6328d458fb 100644 --- a/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md +++ b/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md @@ -189,7 +189,7 @@ by making use of the `autoscaling/v2beta2` API version. First, get the YAML of your HorizontalPodAutoscaler in the `autoscaling/v2beta2` form: ```shell -kubectl get hpa.v2beta2.autoscaling -o yaml > /tmp/hpa-v2.yaml +kubectl get hpa php-apache -o yaml > /tmp/hpa-v2.yaml ``` Open the `/tmp/hpa-v2.yaml` file in an editor, and you should see YAML which looks like this: @@ -397,7 +397,9 @@ section to your HorizontalPodAutoscaler manifest to specify that you need one wo external: metric: name: queue_messages_ready - selector: "queue=worker_tasks" + selector: + matchLabels: + queue: "worker_tasks" target: type: AverageValue averageValue: 30 diff --git a/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md b/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md index 5e94027423..27165d0ca7 100644 --- a/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md +++ b/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md @@ -68,14 +68,7 @@ or the custom metrics API (for all other metrics). The HorizontalPodAutoscaler normally fetches metrics from a series of aggregated APIs (`metrics.k8s.io`, `custom.metrics.k8s.io`, and `external.metrics.k8s.io`). The `metrics.k8s.io` API is usually provided by -metrics-server, which needs to be launched separately. See -[metrics-server](/docs/tasks/debug-application-cluster/resource-metrics-pipeline/#metrics-server) -for instructions. The HorizontalPodAutoscaler can also fetch metrics directly from Heapster. - -{{< note >}} -{{< feature-state state="deprecated" for_k8s_version="v1.11" >}} -Fetching metrics from Heapster is deprecated as of Kubernetes 1.11. -{{< /note >}} +metrics-server, which needs to be launched separately. For more information about resource metrics, see [Metrics Server](/docs/tasks/debug-application-cluster/resource-metrics-pipeline/#metrics-server). See [Support for metrics APIs](#support-for-metrics-apis) for more details. @@ -198,14 +191,17 @@ The detailed documentation of `kubectl autoscale` can be found [here](/docs/refe ## Autoscaling during rolling update -Currently in Kubernetes, it is possible to perform a rolling update by using the deployment object, which manages the underlying replica sets for you. -Horizontal Pod Autoscaler only supports the latter approach: the Horizontal Pod Autoscaler is bound to the deployment object, -it sets the size for the deployment object, and the deployment is responsible for setting sizes of underlying replica sets. +Kubernetes lets you perform a rolling update on a Deployment. In that +case, the Deployment manages the underlying ReplicaSets for you. +When you configure autoscaling for a Deployment, you bind a +HorizontalPodAutoscaler to a single Deployment. The HorizontalPodAutoscaler +manages the `replicas` field of the Deployment. The deployment controller is responsible +for setting the `replicas` of the underlying ReplicaSets so that they add up to a suitable +number during the rollout and also afterwards. -Horizontal Pod Autoscaler does not work with rolling update using direct manipulation of replication controllers, -i.e. you cannot bind a Horizontal Pod Autoscaler to a replication controller and do rolling update. -The reason this doesn't work is that when rolling update creates a new replication controller, -the Horizontal Pod Autoscaler will not be bound to the new replication controller. +If you perform a rolling update of a StatefulSet that has an autoscaled number of +replicas, the StatefulSet directly manages its set of Pods (there is no intermediate resource +similar to ReplicaSet). ## Support for cooldown/delay @@ -341,8 +337,6 @@ APIs, cluster administrators must ensure that: * For external metrics, this is the `external.metrics.k8s.io` API. It may be provided by the custom metrics adapters provided above. -* The `--horizontal-pod-autoscaler-use-rest-clients` is `true` or unset. Setting this to false switches to Heapster-based autoscaling, which is deprecated. - For more information on these different metrics paths and how they differ please see the relevant design proposals for [the HPA V2](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/autoscaling/hpa-v2.md), [custom.metrics.k8s.io](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/custom-metrics-api.md) diff --git a/content/en/docs/tasks/run-application/run-replicated-stateful-application.md b/content/en/docs/tasks/run-application/run-replicated-stateful-application.md index 22f929c06f..e98830b9e3 100644 --- a/content/en/docs/tasks/run-application/run-replicated-stateful-application.md +++ b/content/en/docs/tasks/run-application/run-replicated-stateful-application.md @@ -379,7 +379,7 @@ This might impact other applications on the Node, so it's best to **only do this in a test cluster**. ```shell -kubectl drain --force --delete-local-data --ignore-daemonsets +kubectl drain --force --delete-emptydir-data --ignore-daemonsets ``` Now you can watch as the Pod reschedules on a different Node: diff --git a/content/en/docs/tasks/service-catalog/_index.md b/content/en/docs/tasks/service-catalog/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/tasks/tls/_index.md b/content/en/docs/tasks/tls/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/tasks/tls/certificate-rotation.md b/content/en/docs/tasks/tls/certificate-rotation.md index 5dd9b85714..2db0c1255d 100644 --- a/content/en/docs/tasks/tls/certificate-rotation.md +++ b/content/en/docs/tasks/tls/certificate-rotation.md @@ -27,8 +27,8 @@ The kubelet uses certificates for authenticating to the Kubernetes API. By default, these certificates are issued with one year expiration so that they do not need to be renewed too frequently. -Kubernetes 1.8 contains [kubelet certificate -rotation](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/), a beta feature +Kubernetes contains [kubelet certificate +rotation](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/), that will automatically generate a new key and request a new certificate from the Kubernetes API as the current certificate approaches expiration. Once the new certificate is available, it will be used for authenticating connections to @@ -62,7 +62,7 @@ criteria, it will be auto approved by the controller manager, then it will have a status of `Approved`. Next, the controller manager will sign a certificate, issued for the duration specified by the `--cluster-signing-duration` parameter, and the signed certificate -will be attached to the certificate signing requests. +will be attached to the certificate signing request. The kubelet will retrieve the signed certificate from the Kubernetes API and write that to disk, in the location specified by `--cert-dir`. Then the kubelet diff --git a/content/en/docs/tasks/tools/_index.md b/content/en/docs/tasks/tools/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/tasks/tools/included/install-kubectl-gcloud.md b/content/en/docs/tasks/tools/included/install-kubectl-gcloud.md deleted file mode 100644 index dcf8572618..0000000000 --- a/content/en/docs/tasks/tools/included/install-kubectl-gcloud.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "gcloud kubectl install" -description: "How to install kubectl with gcloud snippet for inclusion in each OS-specific tab." -headless: true ---- - -You can install kubectl as part of the Google Cloud SDK. - -1. Install the [Google Cloud SDK](https://cloud.google.com/sdk/). - -1. Run the `kubectl` installation command: - - ```shell - gcloud components install kubectl - ``` - -1. Test to ensure the version you installed is up-to-date: - - ```shell - kubectl version --client - ``` \ No newline at end of file diff --git a/content/en/docs/tasks/tools/included/kubectl-convert-overview.md b/content/en/docs/tasks/tools/included/kubectl-convert-overview.md new file mode 100644 index 0000000000..b1799d52ea --- /dev/null +++ b/content/en/docs/tasks/tools/included/kubectl-convert-overview.md @@ -0,0 +1,11 @@ +--- +title: "kubectl-convert overview" +description: >- + A kubectl plugin that allows you to convert manifests from one version + of a Kubernetes API to a different version. +headless: true +--- + +A plugin for Kubernetes command-line tool `kubectl`, which allows you to convert manifests between different API +versions. This can be particularly helpful to migrate manifests to a non-deprecated api version with newer Kubernetes release. +For more info, visit [migrate to non deprecated apis](/docs/reference/using-api/deprecation-guide/#migrate-to-non-deprecated-apis) \ No newline at end of file diff --git a/content/en/docs/tasks/tools/install-kubectl-linux.md b/content/en/docs/tasks/tools/install-kubectl-linux.md index d64ef99b13..efb203f8b9 100644 --- a/content/en/docs/tasks/tools/install-kubectl-linux.md +++ b/content/en/docs/tasks/tools/install-kubectl-linux.md @@ -22,7 +22,6 @@ The following methods exist for installing kubectl on Linux: - [Install kubectl binary with curl on Linux](#install-kubectl-binary-with-curl-on-linux) - [Install using native package management](#install-using-native-package-management) - [Install using other package management](#install-using-other-package-management) -- [Install on Linux as part of the Google Cloud SDK](#install-on-linux-as-part-of-the-google-cloud-sdk) ### Install kubectl binary with curl on Linux @@ -83,6 +82,7 @@ For example, to download version {{< param "fullversion" >}} on Linux, type: If you do not have root access on the target system, you can still install kubectl to the `~/.local/bin` directory: ```bash + chmod +x kubectl mkdir -p ~/.local/bin/kubectl mv ./kubectl ~/.local/bin/kubectl # and then add ~/.local/bin/kubectl to $PATH @@ -168,15 +168,11 @@ kubectl version --client {{< /tabs >}} -### Install on Linux as part of the Google Cloud SDK - -{{< include "included/install-kubectl-gcloud.md" >}} - ## Verify kubectl configuration {{< include "included/verify-kubectl.md" >}} -## Optional kubectl configurations +## Optional kubectl configurations and plugins ### Enable shell autocompletion @@ -189,6 +185,61 @@ Below are the procedures to set up autocompletion for Bash and Zsh. {{< tab name="Zsh" include="included/optional-kubectl-configs-zsh.md" />}} {{< /tabs >}} +### Install `kubectl convert` plugin + +{{< include "included/kubectl-convert-overview.md" >}} + +1. Download the latest release with the command: + + ```bash + curl -LO https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert + ``` + +1. Validate the binary (optional) + + Download the kubectl-convert checksum file: + + ```bash + curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert.sha256" + ``` + + Validate the kubectl-convert binary against the checksum file: + + ```bash + echo "$(}} + Download the same version of the binary and checksum. + {{< /note >}} + +1. Install kubectl-convert + + ```bash + sudo install -o root -g root -m 0755 kubectl-convert /usr/local/bin/kubectl-convert + ``` + +1. Verify plugin is successfully installed + + ```shell + kubectl convert --help + ``` + + If you do not see an error, it means the plugin is successfully installed. + ## {{% heading "whatsnext" %}} {{< include "included/kubectl-whats-next.md" >}} diff --git a/content/en/docs/tasks/tools/install-kubectl-macos.md b/content/en/docs/tasks/tools/install-kubectl-macos.md index d952359407..b46ab03640 100644 --- a/content/en/docs/tasks/tools/install-kubectl-macos.md +++ b/content/en/docs/tasks/tools/install-kubectl-macos.md @@ -22,7 +22,6 @@ The following methods exist for installing kubectl on macOS: - [Install kubectl binary with curl on macOS](#install-kubectl-binary-with-curl-on-macos) - [Install with Homebrew on macOS](#install-with-homebrew-on-macos) - [Install with Macports on macOS](#install-with-macports-on-macos) -- [Install on macOS as part of the Google Cloud SDK](#install-on-macos-as-part-of-the-google-cloud-sdk) ### Install kubectl binary with curl on macOS @@ -31,7 +30,6 @@ The following methods exist for installing kubectl on macOS: {{< tabs name="download_binary_macos" >}} {{< tab name="Intel" codelang="bash" >}} curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl" - chmod +x kubectl {{< /tab >}} {{< tab name="Apple Silicon" codelang="bash" >}} curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl" @@ -104,6 +102,10 @@ The following methods exist for installing kubectl on macOS: sudo chown root: /usr/local/bin/kubectl ``` + {{< note >}} + Make sure `/usr/local/bin` is in your PATH environment variable. + {{< /note >}} + 1. Test to ensure the version you installed is up-to-date: ```bash @@ -149,16 +151,11 @@ If you are on macOS and using [Macports](https://macports.org/) package manager, kubectl version --client ``` - -### Install on macOS as part of the Google Cloud SDK - -{{< include "included/install-kubectl-gcloud.md" >}} - ## Verify kubectl configuration {{< include "included/verify-kubectl.md" >}} -## Optional kubectl configurations +## Optional kubectl configurations and plugins ### Enable shell autocompletion @@ -171,6 +168,82 @@ Below are the procedures to set up autocompletion for Bash and Zsh. {{< tab name="Zsh" include="included/optional-kubectl-configs-zsh.md" />}} {{< /tabs >}} +### Install `kubectl convert` plugin + +{{< include "included/kubectl-convert-overview.md" >}} + +1. Download the latest release with the command: + + {{< tabs name="download_convert_binary_macos" >}} + {{< tab name="Intel" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl-convert" + {{< /tab >}} + {{< tab name="Apple Silicon" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl-convert" + {{< /tab >}} + {{< /tabs >}} + +1. Validate the binary (optional) + + Download the kubectl-convert checksum file: + + {{< tabs name="download_convert_checksum_macos" >}} + {{< tab name="Intel" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl-convert.sha256" + {{< /tab >}} + {{< tab name="Apple Silicon" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl-convert.sha256" + {{< /tab >}} + {{< /tabs >}} + + Validate the kubectl-convert binary against the checksum file: + + ```bash + echo "$(}} + Download the same version of the binary and checksum. + {{< /note >}} + +1. Make kubectl-convert binary executable + + ```bash + chmod +x ./kubectl-convert + ``` + +1. Move the kubectl-convert binary to a file location on your system `PATH`. + + ```bash + sudo mv ./kubectl-convert /usr/local/bin/kubectl-convert + sudo chown root: /usr/local/bin/kubectl-convert + ``` + + {{< note >}} + Make sure `/usr/local/bin` is in your PATH environment variable. + {{< /note >}} + +1. Verify plugin is successfully installed + + ```shell + kubectl convert --help + ``` + + If you do not see an error, it means the plugin is successfully installed. + ## {{% heading "whatsnext" %}} {{< include "included/kubectl-whats-next.md" >}} diff --git a/content/en/docs/tasks/tools/install-kubectl-windows.md b/content/en/docs/tasks/tools/install-kubectl-windows.md index 11f79b6d94..8059fa7a3a 100644 --- a/content/en/docs/tasks/tools/install-kubectl-windows.md +++ b/content/en/docs/tasks/tools/install-kubectl-windows.md @@ -21,7 +21,6 @@ The following methods exist for installing kubectl on Windows: - [Install kubectl binary with curl on Windows](#install-kubectl-binary-with-curl-on-windows) - [Install on Windows using Chocolatey or Scoop](#install-on-windows-using-chocolatey-or-scoop) -- [Install on Windows as part of the Google Cloud SDK](#install-on-windows-as-part-of-the-google-cloud-sdk) ### Install kubectl binary with curl on Windows @@ -31,7 +30,7 @@ The following methods exist for installing kubectl on Windows: Or if you have `curl` installed, use this command: ```powershell - curl -LO https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe + curl -LO "https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe" ``` {{< note >}} @@ -43,7 +42,7 @@ The following methods exist for installing kubectl on Windows: Download the kubectl checksum file: ```powershell - curl -LO https://dl.k8s.io/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe.sha256 + curl -LO "https://dl.k8s.io/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe.sha256" ``` Validate the kubectl binary against the checksum file: @@ -127,15 +126,11 @@ If you have installed Docker Desktop before, you may need to place your `PATH` e Edit the config file with a text editor of your choice, such as Notepad. {{< /note >}} -### Install on Windows as part of the Google Cloud SDK - -{{< include "included/install-kubectl-gcloud.md" >}} - ## Verify kubectl configuration {{< include "included/verify-kubectl.md" >}} -## Optional kubectl configurations +## Optional kubectl configurations and plugins ### Enable shell autocompletion @@ -145,6 +140,49 @@ Below are the procedures to set up autocompletion for Zsh, if you are running th {{< include "included/optional-kubectl-configs-zsh.md" >}} +### Install `kubectl convert` plugin + +{{< include "included/kubectl-convert-overview.md" >}} + +1. Download the latest release with the command: + + ```powershell + curl -LO "https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl-convert.exe" + ``` + +1. Validate the binary (optional) + + Download the kubectl-convert checksum file: + + ```powershell + curl -LO "https://dl.k8s.io/{{< param "fullversion" >}}/bin/windows/amd64/kubectl-convert.exe.sha256" + ``` + + Validate the kubectl-convert binary against the checksum file: + + - Using Command Prompt to manually compare `CertUtil`'s output to the checksum file downloaded: + + ```cmd + CertUtil -hashfile kubectl-convert.exe SHA256 + type kubectl-convert.exe.sha256 + ``` + + - Using PowerShell to automate the verification using the `-eq` operator to get a `True` or `False` result: + + ```powershell + $($(CertUtil -hashfile .\kubectl-convert.exe SHA256)[1] -replace " ", "") -eq $(type .\kubectl-convert.exe.sha256) + ``` + +1. Add the binary in to your `PATH`. + +1. Verify plugin is successfully installed + + ```shell + kubectl convert --help + ``` + + If you do not see an error, it means the plugin is successfully installed. + ## {{% heading "whatsnext" %}} -{{< include "included/kubectl-whats-next.md" >}} \ No newline at end of file +{{< include "included/kubectl-whats-next.md" >}} diff --git a/content/en/docs/test.md b/content/en/docs/test.md index ae5bb447f1..b9998635e2 100644 --- a/content/en/docs/test.md +++ b/content/en/docs/test.md @@ -287,7 +287,7 @@ tables, use HTML instead. ## Visualizations with Mermaid You can use [Mermaid JS](https://mermaidjs.github.io) visualizations. -The Mermaid JS version is specified in [/layouts/partials/head.html](https://github.com/kubernetes/website/blob/master/layouts/partials/head.html) +The Mermaid JS version is specified in [/layouts/partials/head.html](https://github.com/kubernetes/website/blob/main/layouts/partials/head.html) ``` {{}} diff --git a/content/en/docs/tutorials/_index.md b/content/en/docs/tutorials/_index.md index acd2a4363f..fdc62e11fb 100644 --- a/content/en/docs/tutorials/_index.md +++ b/content/en/docs/tutorials/_index.md @@ -35,7 +35,7 @@ Before walking through each tutorial, you may want to bookmark the * [Exposing an External IP Address to Access an Application in a Cluster](/docs/tutorials/stateless-application/expose-external-ip-address/) -* [Example: Deploying PHP Guestbook application with MongoDB](/docs/tutorials/stateless-application/guestbook/) +* [Example: Deploying PHP Guestbook application with Redis](/docs/tutorials/stateless-application/guestbook/) ## Stateful Applications @@ -51,6 +51,8 @@ Before walking through each tutorial, you may want to bookmark the * [AppArmor](/docs/tutorials/clusters/apparmor/) +* [seccomp](/docs/tutorials/clusters/seccomp/) + ## Services * [Using Source IP](/docs/tutorials/services/source-ip/) diff --git a/content/en/docs/tutorials/clusters/_index.md b/content/en/docs/tutorials/clusters/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/tutorials/clusters/apparmor.md b/content/en/docs/tutorials/clusters/apparmor.md index 32f25ba483..8907768089 100644 --- a/content/en/docs/tutorials/clusters/apparmor.md +++ b/content/en/docs/tutorials/clusters/apparmor.md @@ -348,6 +348,11 @@ node with the required profile. ### Restricting profiles with the PodSecurityPolicy +{{< note >}} +PodSecurityPolicy is deprecated in Kubernetes v1.21, and will be removed in v1.25. +See [PodSecurityPolicy documentation](/docs/concepts/policy/pod-security-policy/) for more information. +{{< /note >}} + If the PodSecurityPolicy extension is enabled, cluster-wide AppArmor restrictions can be applied. To enable the PodSecurityPolicy, the following flag must be set on the `apiserver`: diff --git a/content/en/docs/tutorials/clusters/seccomp.md b/content/en/docs/tutorials/clusters/seccomp.md index 971618cf55..029ea97a7d 100644 --- a/content/en/docs/tutorials/clusters/seccomp.md +++ b/content/en/docs/tutorials/clusters/seccomp.md @@ -3,9 +3,10 @@ reviewers: - hasheddan - pjbgf - saschagrunert -title: Restrict a Container's Syscalls with Seccomp +title: Restrict a Container's Syscalls with seccomp content_type: tutorial weight: 20 +min-kubernetes-server-version: v1.22 --- @@ -13,7 +14,7 @@ weight: 20 {{< feature-state for_k8s_version="v1.19" state="stable" >}} Seccomp stands for secure computing mode and has been a feature of the Linux -kernel since version 2.6.12. It can be used to sandbox the privileges of a +kernel since version 2.6.12. It can be used to sandbox the privileges of a process, restricting the calls it is able to make from userspace into the kernel. Kubernetes lets you automatically apply seccomp profiles loaded onto a Node to your Pods and containers. @@ -35,16 +36,72 @@ profiles that give only the necessary privileges to your container processes. ## {{% heading "prerequisites" %}} +{{< version-check >}} + In order to complete all steps in this tutorial, you must install [kind](https://kind.sigs.k8s.io/docs/user/quick-start/) and [kubectl](/docs/tasks/tools/). This tutorial will show examples -with both alpha (pre-v1.19) and generally available seccomp functionality, so +both alpha (new in v1.22) and generally available seccomp functionality. You should make sure that your cluster is [configured correctly](https://kind.sigs.k8s.io/docs/user/quick-start/#setting-kubernetes-version) for the version you are using. +{{< note >}} +It is not possible to apply a seccomp profile to a container running with +`privileged: true` set in the container's `securityContext`. Privileged containers always +run as `Unconfined`. +{{< /note >}} + +## Enable the use of `RuntimeDefault` as the default seccomp profile for all workloads + +{{< feature-state state="alpha" for_k8s_version="v1.22" >}} + +`SeccompDefault` is an optional kubelet +[feature gate](/docs/reference/command-line-tools-reference/feature-gates) as +well as corresponding `--seccomp-default` +[command line flag](/docs/reference/command-line-tools-reference/kubelet). +Both have to be enabled simultaneously to use the feature. + +If enabled, the kubelet will use the `RuntimeDefault` seccomp profile by default, which is +defined by the container runtime, instead of using the `Unconfined` (seccomp disabled) mode. +The default profiles aim to provide a strong set +of security defaults while preserving the functionality of the workload. It is +possible that the default profiles differ between container runtimes and their +release versions, for example when comparing those from CRI-O and containerd. + +Some workloads may require a lower amount of syscall restrictions than others. +This means that they can fail during runtime even with the `RuntimeDefault` +profile. To mitigate such a failure, you can: + +- Run the workload explicitly as `Unconfined`. +- Disable the `SeccompDefault` feature for the nodes. Also making sure that + workloads get scheduled on nodes where the feature is disabled. +- Create a custom seccomp profile for the workload. + +If you were introducing this feature into production-like cluster, the Kubernetes project +recommends that you enable this feature gate on a subset of your nodes and then +test workload execution before rolling the change out cluster-wide. + +More detailed information about a possible upgrade and downgrade strategy can be +found in the [related Kubernetes Enhancement Proposal (KEP)](https://github.com/kubernetes/enhancements/tree/a70cc18/keps/sig-node/2413-seccomp-by-default#upgrade--downgrade-strategy). + +Since the feature is in alpha state it is disabled per default. To enable it, +pass the flags `--feature-gates=SeccompDefault=true --seccomp-default` to the +`kubelet` CLI or enable it via the [kubelet configuration +file](/docs/tasks/administer-cluster/kubelet-config-file/). To enable the +feature gate in [kind](https://kind.sigs.k8s.io), ensure that `kind` provides +the minimum required Kubernetes version and enables the `SeccompDefault` feature +[in the kind configuration](https://kind.sigs.k8s.io/docs/user/quick-start/#enable-feature-gates-in-your-cluster): + +```yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +featureGates: + SeccompDefault: true +``` + ## Create Seccomp Profiles The contents of these profiles will be explored later on, but for now go ahead @@ -108,7 +165,7 @@ docker exec -it 6a96207fed4b ls /var/lib/kubelet/seccomp/profiles audit.json fine-grained.json violation.json ``` -## Create a Pod with a Seccomp profile for syscall auditing +## Create a Pod with a seccomp profile for syscall auditing To start off, apply the `audit.json` profile, which will log all syscalls of the process, to a new Pod. @@ -208,7 +265,7 @@ kubectl delete pod/audit-pod kubectl delete svc/audit-pod ``` -## Create Pod with Seccomp Profile that Causes Violation +## Create Pod with seccomp Profile that Causes Violation For demonstration, apply a profile to the Pod that does not allow for any syscalls. @@ -255,7 +312,7 @@ kubectl delete pod/violation-pod kubectl delete svc/violation-pod ``` -## Create Pod with Seccomp Profile that Only Allows Necessary Syscalls +## Create Pod with seccomp Profile that Only Allows Necessary Syscalls If you take a look at the `fine-pod.json`, you will notice some of the syscalls seen in the first example where the profile set `"defaultAction": @@ -339,7 +396,7 @@ kubectl delete pod/fine-pod kubectl delete svc/fine-pod ``` -## Create Pod that uses the Container Runtime Default Seccomp Profile +## Create Pod that uses the Container Runtime Default seccomp Profile Most container runtimes provide a sane set of default syscalls that are allowed or not. The defaults can easily be applied in Kubernetes by using the @@ -364,5 +421,5 @@ The default seccomp profile should provide adequate access for most workloads. Additional resources: -* [A Seccomp Overview](https://lwn.net/Articles/656307/) +* [A seccomp Overview](https://lwn.net/Articles/656307/) * [Seccomp Security Profiles for Docker](https://docs.docker.com/engine/security/seccomp/) diff --git a/content/en/docs/tutorials/configuration/_index.md b/content/en/docs/tutorials/configuration/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/tutorials/configuration/configure-java-microservice/_index.md b/content/en/docs/tutorials/configuration/configure-java-microservice/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/tutorials/configuration/configure-redis-using-configmap.md b/content/en/docs/tutorials/configuration/configure-redis-using-configmap.md index b29b352aca..ec6edb9cc7 100644 --- a/content/en/docs/tutorials/configuration/configure-redis-using-configmap.md +++ b/content/en/docs/tutorials/configuration/configure-redis-using-configmap.md @@ -55,7 +55,7 @@ Apply the ConfigMap created above, along with a Redis pod manifest: ```shell kubectl apply -f example-redis-config.yaml -kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/pods/config/redis-pod.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/pods/config/redis-pod.yaml ``` Examine the contents of the Redis pod manifest and note the following: @@ -206,7 +206,7 @@ values from associated ConfigMaps. Let's delete and recreate the Pod: ```shell kubectl delete pod redis -kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/pods/config/redis-pod.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/pods/config/redis-pod.yaml ``` Now re-check the configuration values one last time: diff --git a/content/en/docs/tutorials/hello-minikube.md b/content/en/docs/tutorials/hello-minikube.md index d8ad753958..3911ff2de6 100644 --- a/content/en/docs/tutorials/hello-minikube.md +++ b/content/en/docs/tutorials/hello-minikube.md @@ -60,11 +60,17 @@ If you installed minikube locally, run `minikube start`. Before you run `minikub 4. Katacoda environment only: Type `30000`, and then click **Display Port**. {{< note >}} -The `dashboard` command enables the dashboard add-on and opens the proxy in the default web browser. You can create Kubernetes resources on the dashboard such as Deployment and Service. +The `dashboard` command enables the dashboard add-on and opens the proxy in the default web browser. +You can create Kubernetes resources on the dashboard such as Deployment and Service. If you are running in an environment as root, see [Open Dashboard with URL](#open-dashboard-with-url). -To stop the proxy, run `Ctrl+C` to exit the process. The dashboard remains running. +By default, the dashboard is only accessible from within the internal Kubernetes virtual network. +The `dashboard` command creates a temporary proxy to make the dashboard accessible from outside the Kubernetes virtual network. + +To stop the proxy, run `Ctrl+C` to exit the process. +After the command exits, the dashboard remains running in Kubernetes cluster. +You can run the `dashboard` command again to create another proxy to access the dashboard. {{< /note >}} ## Open Dashboard with URL @@ -224,7 +230,7 @@ The minikube tool includes a set of built-in {{< glossary_tooltip text="addons" The output is similar to: ``` - metrics-server was successfully enabled + The 'metrics-server' addon is enabled ``` 3. View the Pod and Service you created: diff --git a/content/en/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html b/content/en/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html index ad7b856223..5301c6b7a1 100644 --- a/content/en/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html +++ b/content/en/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html @@ -25,7 +25,8 @@ weight: 20
diff --git a/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html b/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html index ab008c38af..915304f912 100644 --- a/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html +++ b/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html @@ -37,7 +37,9 @@ weight: 20 diff --git a/content/en/docs/tutorials/kubernetes-basics/explore/explore-interactive.html b/content/en/docs/tutorials/kubernetes-basics/explore/explore-interactive.html index 8c87cfab18..ad79ec5d7f 100644 --- a/content/en/docs/tutorials/kubernetes-basics/explore/explore-interactive.html +++ b/content/en/docs/tutorials/kubernetes-basics/explore/explore-interactive.html @@ -29,7 +29,9 @@ weight: 20 diff --git a/content/en/docs/tutorials/kubernetes-basics/explore/explore-intro.html b/content/en/docs/tutorials/kubernetes-basics/explore/explore-intro.html index 3b5a51dc40..6f8cd442b8 100644 --- a/content/en/docs/tutorials/kubernetes-basics/explore/explore-intro.html +++ b/content/en/docs/tutorials/kubernetes-basics/explore/explore-intro.html @@ -74,11 +74,11 @@ weight: 10

Nodes

-

A Pod always runs on a Node. A Node is a worker machine in Kubernetes and may be either a virtual or a physical machine, depending on the cluster. Each Node is managed by the Master. A Node can have multiple pods, and the Kubernetes master automatically handles scheduling the pods across the Nodes in the cluster. The Master's automatic scheduling takes into account the available resources on each Node.

+

A Pod always runs on a Node. A Node is a worker machine in Kubernetes and may be either a virtual or a physical machine, depending on the cluster. Each Node is managed by the control plane. A Node can have multiple pods, and the Kubernetes control plane automatically handles scheduling the pods across the Nodes in the cluster. The control plane's automatic scheduling takes into account the available resources on each Node.

Every Kubernetes Node runs at least:

    -
  • Kubelet, a process responsible for communication between the Kubernetes Master and the Node; it manages the Pods and the containers running on a machine.
  • +
  • Kubelet, a process responsible for communication between the Kubernetes control plane and the Node; it manages the Pods and the containers running on a machine.
  • A container runtime (like Docker) responsible for pulling the container image from a registry, unpacking the container, and running the application.
diff --git a/content/en/docs/tutorials/kubernetes-basics/expose/expose-interactive.html b/content/en/docs/tutorials/kubernetes-basics/expose/expose-interactive.html index e89414b917..2b5d3aa365 100644 --- a/content/en/docs/tutorials/kubernetes-basics/expose/expose-interactive.html +++ b/content/en/docs/tutorials/kubernetes-basics/expose/expose-interactive.html @@ -26,7 +26,9 @@ weight: 20
diff --git a/content/en/docs/tutorials/kubernetes-basics/expose/expose-intro.html b/content/en/docs/tutorials/kubernetes-basics/expose/expose-intro.html index d7687bc7b1..1996859e2e 100644 --- a/content/en/docs/tutorials/kubernetes-basics/expose/expose-intro.html +++ b/content/en/docs/tutorials/kubernetes-basics/expose/expose-intro.html @@ -37,7 +37,7 @@ weight: 10
  • ClusterIP (default) - Exposes the Service on an internal IP in the cluster. This type makes the Service only reachable from within the cluster.
  • NodePort - Exposes the Service on the same port of each selected Node in the cluster using NAT. Makes a Service accessible from outside the cluster using <NodeIP>:<NodePort>. Superset of ClusterIP.
  • LoadBalancer - Creates an external load balancer in the current cloud (if supported) and assigns a fixed, external IP to the Service. Superset of NodePort.
  • -
  • ExternalName - Maps the Service to the contents of the externalName field (e.g. `foo.bar.example.com`), by returning a CNAME record with its value. No proxying of any kind is set up. This type requires v1.7 or higher of kube-dns, or CoreDNS version 0.0.8 or higher.
  • +
  • ExternalName - Maps the Service to the contents of the externalName field (e.g. foo.bar.example.com), by returning a CNAME record with its value. No proxying of any kind is set up. This type requires v1.7 or higher of kube-dns, or CoreDNS version 0.0.8 or higher.
  • More information about the different types of Services can be found in the Using Source IP tutorial. Also see Connecting Applications with Services.

    Additionally, note that there are some use cases with Services that involve not defining selector in the spec. A Service created without selector will also not create the corresponding Endpoints object. This allows users to manually map a Service to specific endpoints. Another possibility why there may be no selector is you are strictly using type: ExternalName.

    diff --git a/content/en/docs/tutorials/kubernetes-basics/scale/scale-interactive.html b/content/en/docs/tutorials/kubernetes-basics/scale/scale-interactive.html index 77e707c429..3fedf79782 100644 --- a/content/en/docs/tutorials/kubernetes-basics/scale/scale-interactive.html +++ b/content/en/docs/tutorials/kubernetes-basics/scale/scale-interactive.html @@ -26,7 +26,9 @@ weight: 20
    diff --git a/content/en/docs/tutorials/kubernetes-basics/update/update-interactive.html b/content/en/docs/tutorials/kubernetes-basics/update/update-interactive.html index 42663ecdaa..2e70d61d74 100644 --- a/content/en/docs/tutorials/kubernetes-basics/update/update-interactive.html +++ b/content/en/docs/tutorials/kubernetes-basics/update/update-interactive.html @@ -26,7 +26,8 @@ weight: 20 @@ -35,3 +36,7 @@ weight: 20 + + + + diff --git a/content/en/docs/tutorials/services/_index.md b/content/en/docs/tutorials/services/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/tutorials/stateful-application/_index.md b/content/en/docs/tutorials/stateful-application/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/tutorials/stateful-application/basic-stateful-set.md b/content/en/docs/tutorials/stateful-application/basic-stateful-set.md index a44c4392ca..760d3df013 100644 --- a/content/en/docs/tutorials/stateful-application/basic-stateful-set.md +++ b/content/en/docs/tutorials/stateful-application/basic-stateful-set.md @@ -26,7 +26,7 @@ following Kubernetes concepts: * [Cluster DNS](/docs/concepts/services-networking/dns-pod-service/) * [Headless Services](/docs/concepts/services-networking/service/#headless-services) * [PersistentVolumes](/docs/concepts/storage/persistent-volumes/) -* [PersistentVolume Provisioning](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/persistent-volume-provisioning/) +* [PersistentVolume Provisioning](https://github.com/kubernetes/examples/tree/master/staging/persistent-volume-provisioning/) * [StatefulSets](/docs/concepts/workloads/controllers/statefulset/) * The [kubectl](/docs/reference/kubectl/kubectl/) command line tool @@ -845,12 +845,12 @@ kubectl get pods -w -l app=nginx ``` Use [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands/#delete) to delete the -StatefulSet. Make sure to supply the `--cascade=false` parameter to the +StatefulSet. Make sure to supply the `--cascade=orphan` parameter to the command. This parameter tells Kubernetes to only delete the StatefulSet, and to not delete any of its Pods. ```shell -kubectl delete statefulset web --cascade=false +kubectl delete statefulset web --cascade=orphan ``` ``` statefulset.apps "web" deleted @@ -966,7 +966,7 @@ kubectl get pods -w -l app=nginx ``` In another terminal, delete the StatefulSet again. This time, omit the -`--cascade=false` parameter. +`--cascade=orphan` parameter. ```shell kubectl delete statefulset web diff --git a/content/en/docs/tutorials/stateful-application/cassandra.md b/content/en/docs/tutorials/stateful-application/cassandra.md index 5b453f6594..ffbf65286b 100644 --- a/content/en/docs/tutorials/stateful-application/cassandra.md +++ b/content/en/docs/tutorials/stateful-application/cassandra.md @@ -50,7 +50,7 @@ To complete this tutorial, you should already have a basic familiarity with ### Additional Minikube setup instructions {{< caution >}} -[Minikube](https://minikube.sigs.k8s.io/docs/) defaults to 1024MiB of memory and 1 CPU. +[Minikube](https://minikube.sigs.k8s.io/docs/) defaults to 2048MB of memory and 2 CPU. Running Minikube with the default resource configuration results in insufficient resource errors during this tutorial. To avoid these errors, start Minikube with the following settings: @@ -266,7 +266,7 @@ to also be deleted. Never assume you'll be able to access data if its volume cla The Pods in this tutorial use the [`gcr.io/google-samples/cassandra:v13`](https://github.com/kubernetes/examples/blob/master/cassandra/image/Dockerfile) image from Google's [container registry](https://cloud.google.com/container-registry/docs/). -The Docker image above is based on [debian-base](https://github.com/kubernetes/kubernetes/tree/master/build/debian-base) +The Docker image above is based on [debian-base](https://github.com/kubernetes/release/tree/master/images/build/debian-base) and includes OpenJDK 8. This image includes a standard Cassandra installation from the Apache Debian repo. diff --git a/content/en/docs/tutorials/stateful-application/zookeeper.md b/content/en/docs/tutorials/stateful-application/zookeeper.md index 6d517ef229..3ed1cd454b 100644 --- a/content/en/docs/tutorials/stateful-application/zookeeper.md +++ b/content/en/docs/tutorials/stateful-application/zookeeper.md @@ -27,7 +27,7 @@ Kubernetes concepts: - [Cluster DNS](/docs/concepts/services-networking/dns-pod-service/) - [Headless Services](/docs/concepts/services-networking/service/#headless-services) - [PersistentVolumes](/docs/concepts/storage/volumes/) -- [PersistentVolume Provisioning](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/persistent-volume-provisioning/) +- [PersistentVolume Provisioning](https://github.com/kubernetes/examples/tree/master/staging/persistent-volume-provisioning/) - [StatefulSets](/docs/concepts/workloads/controllers/statefulset/) - [PodDisruptionBudgets](/docs/concepts/workloads/pods/disruptions/#pod-disruption-budget) - [PodAntiAffinity](/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) @@ -937,7 +937,7 @@ Use [`kubectl drain`](/docs/reference/generated/kubectl/kubectl-commands/#drain) drain the node on which the `zk-0` Pod is scheduled. ```shell -kubectl drain $(kubectl get pod zk-0 --template {{.spec.nodeName}}) --ignore-daemonsets --force --delete-local-data +kubectl drain $(kubectl get pod zk-0 --template {{.spec.nodeName}}) --ignore-daemonsets --force --delete-emptydir-data ``` ``` @@ -972,7 +972,7 @@ Keep watching the `StatefulSet`'s Pods in the first terminal and drain the node `zk-1` is scheduled. ```shell -kubectl drain $(kubectl get pod zk-1 --template {{.spec.nodeName}}) --ignore-daemonsets --force --delete-local-data "kubernetes-node-ixsl" cordoned +kubectl drain $(kubectl get pod zk-1 --template {{.spec.nodeName}}) --ignore-daemonsets --force --delete-emptydir-data "kubernetes-node-ixsl" cordoned ``` ``` @@ -1015,7 +1015,7 @@ Continue to watch the Pods of the stateful set, and drain the node on which `zk-2` is scheduled. ```shell -kubectl drain $(kubectl get pod zk-2 --template {{.spec.nodeName}}) --ignore-daemonsets --force --delete-local-data +kubectl drain $(kubectl get pod zk-2 --template {{.spec.nodeName}}) --ignore-daemonsets --force --delete-emptydir-data ``` ``` @@ -1101,7 +1101,7 @@ zk-1 1/1 Running 0 13m Attempt to drain the node on which `zk-2` is scheduled. ```shell -kubectl drain $(kubectl get pod zk-2 --template {{.spec.nodeName}}) --ignore-daemonsets --force --delete-local-data +kubectl drain $(kubectl get pod zk-2 --template {{.spec.nodeName}}) --ignore-daemonsets --force --delete-emptydir-data ``` The output: diff --git a/content/en/docs/tutorials/stateless-application/_index.md b/content/en/docs/tutorials/stateless-application/_index.md old mode 100755 new mode 100644 diff --git a/content/en/docs/tutorials/stateless-application/guestbook.md b/content/en/docs/tutorials/stateless-application/guestbook.md index d21e60dde8..c31bcbc49e 100644 --- a/content/en/docs/tutorials/stateless-application/guestbook.md +++ b/content/en/docs/tutorials/stateless-application/guestbook.md @@ -1,121 +1,207 @@ --- -title: "Example: Deploying PHP Guestbook application with MongoDB" +title: "Example: Deploying PHP Guestbook application with Redis" reviewers: - ahmetb +- jimangel content_type: tutorial weight: 20 card: name: tutorials weight: 30 - title: "Stateless Example: PHP Guestbook with MongoDB" + title: "Stateless Example: PHP Guestbook with Redis" min-kubernetes-server-version: v1.14 +source: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook --- -This tutorial shows you how to build and deploy a simple _(not production ready)_, multi-tier web application using Kubernetes and [Docker](https://www.docker.com/). This example consists of the following components: +This tutorial shows you how to build and deploy a simple _(not production +ready)_, multi-tier web application using Kubernetes and +[Docker](https://www.docker.com/). This example consists of the following +components: -* A single-instance [MongoDB](https://www.mongodb.com/) to store guestbook entries +* A single-instance [Redis](https://www.redis.io/) to store guestbook entries * Multiple web frontend instances ## {{% heading "objectives" %}} -* Start up a Mongo database. +* Start up a Redis leader. +* Start up two Redis followers. * Start up the guestbook frontend. * Expose and view the Frontend Service. * Clean up. - ## {{% heading "prerequisites" %}} - {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} - - -## Start up the Mongo Database +## Start up the Redis Database -The guestbook application uses MongoDB to store its data. +The guestbook application uses Redis to store its data. -### Creating the Mongo Deployment +### Creating the Redis Deployment -The manifest file, included below, specifies a Deployment controller that runs a single replica MongoDB Pod. +The manifest file, included below, specifies a Deployment controller that runs a single replica Redis Pod. -{{< codenew file="application/guestbook/mongo-deployment.yaml" >}} +{{< codenew file="application/guestbook/redis-leader-deployment.yaml" >}} 1. Launch a terminal window in the directory you downloaded the manifest files. -1. Apply the MongoDB Deployment from the `mongo-deployment.yaml` file: +1. Apply the Redis Deployment from the `redis-leader-deployment.yaml` file: - + - ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/mongo-deployment.yaml - ``` + ```shell + kubectl apply -f https://k8s.io/examples/application/guestbook/redis-leader-deployment.yaml + ``` -1. Query the list of Pods to verify that the MongoDB Pod is running: +1. Query the list of Pods to verify that the Redis Pod is running: - ```shell - kubectl get pods - ``` + ```shell + kubectl get pods + ``` - The response should be similar to this: + The response should be similar to this: - ```shell - NAME READY STATUS RESTARTS AGE - mongo-5cfd459dd4-lrcjb 1/1 Running 0 28s - ``` + ``` + NAME READY STATUS RESTARTS AGE + redis-leader-fb76b4755-xjr2n 1/1 Running 0 13s + ``` -1. Run the following command to view the logs from the MongoDB Deployment: +1. Run the following command to view the logs from the Redis leader Pod: - ```shell - kubectl logs -f deployment/mongo - ``` + ```shell + kubectl logs -f deployment/redis-leader + ``` -### Creating the MongoDB Service +### Creating the Redis leader Service -The guestbook application needs to communicate to the MongoDB to write its data. You need to apply a [Service](/docs/concepts/services-networking/service/) to proxy the traffic to the MongoDB Pod. A Service defines a policy to access the Pods. +The guestbook application needs to communicate to the Redis to write its data. +You need to apply a [Service](/docs/concepts/services-networking/service/) to +proxy the traffic to the Redis Pod. A Service defines a policy to access the +Pods. -{{< codenew file="application/guestbook/mongo-service.yaml" >}} +{{< codenew file="application/guestbook/redis-leader-service.yaml" >}} -1. Apply the MongoDB Service from the following `mongo-service.yaml` file: +1. Apply the Redis Service from the following `redis-leader-service.yaml` file: - + - ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/mongo-service.yaml - ``` + ```shell + kubectl apply -f https://k8s.io/examples/application/guestbook/redis-leader-service.yaml + ``` -1. Query the list of Services to verify that the MongoDB Service is running: +1. Query the list of Services to verify that the Redis Service is running: - ```shell - kubectl get service - ``` + ```shell + kubectl get service + ``` - The response should be similar to this: + The response should be similar to this: - ```shell - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - kubernetes ClusterIP 10.0.0.1 443/TCP 1m - mongo ClusterIP 10.0.0.151 27017/TCP 8s - ``` + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + kubernetes ClusterIP 10.0.0.1 443/TCP 1m + redis-leader ClusterIP 10.103.78.24 6379/TCP 16s + ``` {{< note >}} -This manifest file creates a Service named `mongo` with a set of labels that match the labels previously defined, so the Service routes network traffic to the MongoDB Pod. +This manifest file creates a Service named `redis-leader` with a set of labels +that match the labels previously defined, so the Service routes network +traffic to the Redis Pod. {{< /note >}} +### Set up Redis followers + +Although the Redis leader is a single Pod, you can make it highly available +and meet traffic demands by adding a few Redis followers, or replicas. + +{{< codenew file="application/guestbook/redis-follower-deployment.yaml" >}} + +1. Apply the Redis Deployment from the following `redis-follower-deployment.yaml` file: + + + + ```shell + kubectl apply -f https://k8s.io/examples/application/guestbook/redis-follower-deployment.yaml + ``` + +1. Verify that the two Redis follower replicas are running by querying the list of Pods: + + ```shell + kubectl get pods + ``` + + The response should be similar to this: + + ``` + NAME READY STATUS RESTARTS AGE + redis-follower-dddfbdcc9-82sfr 1/1 Running 0 37s + redis-follower-dddfbdcc9-qrt5k 1/1 Running 0 38s + redis-leader-fb76b4755-xjr2n 1/1 Running 0 11m + ``` + +### Creating the Redis follower service + +The guestbook application needs to communicate with the Redis followers to +read data. To make the Redis followers discoverable, you must set up another +[Service](/docs/concepts/services-networking/service/). + +{{< codenew file="application/guestbook/redis-follower-service.yaml" >}} + +1. Apply the Redis Service from the following `redis-follower-service.yaml` file: + + + + ```shell + kubectl apply -f https://k8s.io/examples/application/guestbook/redis-follower-service.yaml + ``` + +1. Query the list of Services to verify that the Redis Service is running: + + ```shell + kubectl get service + ``` + + The response should be similar to this: + + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + kubernetes ClusterIP 10.96.0.1 443/TCP 3d19h + redis-follower ClusterIP 10.110.162.42 6379/TCP 9s + redis-leader ClusterIP 10.103.78.24 6379/TCP 6m10s + ``` + +{{< note >}} +This manifest file creates a Service named `redis-follower` with a set of +labels that match the labels previously defined, so the Service routes network +traffic to the Redis Pod. +{{< /note >}} ## Set up and Expose the Guestbook Frontend -The guestbook application has a web frontend serving the HTTP requests written in PHP. It is configured to connect to the `mongo` Service to store Guestbook entries. +Now that you have the Redis storage of your guestbook up and running, start +the guestbook web servers. Like the Redis followers, the frontend is deployed +using a Kubernetes Deployment. + +The guestbook app uses a PHP frontend. It is configured to communicate with +either the Redis follower or leader Services, depending on whether the request +is a read or a write. The frontend exposes a JSON interface, and serves a +jQuery-Ajax-based UX. ### Creating the Guestbook Frontend Deployment @@ -123,190 +209,210 @@ The guestbook application has a web frontend serving the HTTP requests written i 1. Apply the frontend Deployment from the `frontend-deployment.yaml` file: - + - ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/frontend-deployment.yaml - ``` + ```shell + kubectl apply -f https://k8s.io/examples/application/guestbook/frontend-deployment.yaml + ``` 1. Query the list of Pods to verify that the three frontend replicas are running: - ```shell - kubectl get pods -l app.kubernetes.io/name=guestbook -l app.kubernetes.io/component=frontend - ``` + ```shell + kubectl get pods -l app=guestbook -l tier=frontend + ``` - The response should be similar to this: + The response should be similar to this: - ``` - NAME READY STATUS RESTARTS AGE - frontend-3823415956-dsvc5 1/1 Running 0 54s - frontend-3823415956-k22zn 1/1 Running 0 54s - frontend-3823415956-w9gbt 1/1 Running 0 54s - ``` + ``` + NAME READY STATUS RESTARTS AGE + frontend-85595f5bf9-5tqhb 1/1 Running 0 47s + frontend-85595f5bf9-qbzwm 1/1 Running 0 47s + frontend-85595f5bf9-zchwc 1/1 Running 0 47s + ``` ### Creating the Frontend Service -The `mongo` Services you applied is only accessible within the Kubernetes cluster because the default type for a Service is [ClusterIP](/docs/concepts/services-networking/service/#publishing-services-service-types). `ClusterIP` provides a single IP address for the set of Pods the Service is pointing to. This IP address is accessible only within the cluster. +The `Redis` Services you applied is only accessible within the Kubernetes +cluster because the default type for a Service is +[ClusterIP](/docs/concepts/services-networking/service/#publishing-services-service-types). +`ClusterIP` provides a single IP address for the set of Pods the Service is +pointing to. This IP address is accessible only within the cluster. -If you want guests to be able to access your guestbook, you must configure the frontend Service to be externally visible, so a client can request the Service from outside the Kubernetes cluster. However a Kubernetes user you can use `kubectl port-forward` to access the service even though it uses a `ClusterIP`. +If you want guests to be able to access your guestbook, you must configure the +frontend Service to be externally visible, so a client can request the Service +from outside the Kubernetes cluster. However a Kubernetes user you can use +`kubectl port-forward` to access the service even though it uses a +`ClusterIP`. {{< note >}} -Some cloud providers, like Google Compute Engine or Google Kubernetes Engine, support external load balancers. If your cloud provider supports load balancers and you want to use it, uncomment `type: LoadBalancer`. +Some cloud providers, like Google Compute Engine or Google Kubernetes Engine, +support external load balancers. If your cloud provider supports load +balancers and you want to use it, uncomment `type: LoadBalancer`. {{< /note >}} {{< codenew file="application/guestbook/frontend-service.yaml" >}} 1. Apply the frontend Service from the `frontend-service.yaml` file: - + - ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/frontend-service.yaml - ``` + ```shell + kubectl apply -f https://k8s.io/examples/application/guestbook/frontend-service.yaml + ``` 1. Query the list of Services to verify that the frontend Service is running: - ```shell - kubectl get services - ``` + ```shell + kubectl get services + ``` - The response should be similar to this: + The response should be similar to this: - ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - frontend ClusterIP 10.0.0.112 80/TCP 6s - kubernetes ClusterIP 10.0.0.1 443/TCP 4m - mongo ClusterIP 10.0.0.151 6379/TCP 2m - ``` + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + frontend ClusterIP 10.97.28.230 80/TCP 19s + kubernetes ClusterIP 10.96.0.1 443/TCP 3d19h + redis-follower ClusterIP 10.110.162.42 6379/TCP 5m48s + redis-leader ClusterIP 10.103.78.24 6379/TCP 11m + ``` ### Viewing the Frontend Service via `kubectl port-forward` 1. Run the following command to forward port `8080` on your local machine to port `80` on the service. - ```shell - kubectl port-forward svc/frontend 8080:80 - ``` + ```shell + kubectl port-forward svc/frontend 8080:80 + ``` - The response should be similar to this: + The response should be similar to this: - ``` - Forwarding from 127.0.0.1:8080 -> 80 - Forwarding from [::1]:8080 -> 80 - ``` + ``` + Forwarding from 127.0.0.1:8080 -> 80 + Forwarding from [::1]:8080 -> 80 + ``` 1. load the page [http://localhost:8080](http://localhost:8080) in your browser to view your guestbook. ### Viewing the Frontend Service via `LoadBalancer` -If you deployed the `frontend-service.yaml` manifest with type: `LoadBalancer` you need to find the IP address to view your Guestbook. +If you deployed the `frontend-service.yaml` manifest with type: `LoadBalancer` +you need to find the IP address to view your Guestbook. 1. Run the following command to get the IP address for the frontend Service. - ```shell - kubectl get service frontend - ``` + ```shell + kubectl get service frontend + ``` - The response should be similar to this: + The response should be similar to this: - ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - frontend LoadBalancer 10.51.242.136 109.197.92.229 80:32372/TCP 1m - ``` + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + frontend LoadBalancer 10.51.242.136 109.197.92.229 80:32372/TCP 1m + ``` 1. Copy the external IP address, and load the page in your browser to view your guestbook. +{{< note >}} +Try adding some guestbook entries by typing in a message, and clicking Submit. +The message you typed appears in the frontend. This message indicates that +data is successfully added to Redis through the Services you created earlier. +{{< /note >}} + ## Scale the Web Frontend -You can scale up or down as needed because your servers are defined as a Service that uses a Deployment controller. +You can scale up or down as needed because your servers are defined as a +Service that uses a Deployment controller. 1. Run the following command to scale up the number of frontend Pods: - ```shell - kubectl scale deployment frontend --replicas=5 - ``` + ```shell + kubectl scale deployment frontend --replicas=5 + ``` 1. Query the list of Pods to verify the number of frontend Pods running: - ```shell - kubectl get pods - ``` + ```shell + kubectl get pods + ``` - The response should look similar to this: + The response should look similar to this: - ``` - NAME READY STATUS RESTARTS AGE - frontend-3823415956-70qj5 1/1 Running 0 5s - frontend-3823415956-dsvc5 1/1 Running 0 54m - frontend-3823415956-k22zn 1/1 Running 0 54m - frontend-3823415956-w9gbt 1/1 Running 0 54m - frontend-3823415956-x2pld 1/1 Running 0 5s - mongo-1068406935-3lswp 1/1 Running 0 56m - ``` + ``` + NAME READY STATUS RESTARTS AGE + frontend-85595f5bf9-5df5m 1/1 Running 0 83s + frontend-85595f5bf9-7zmg5 1/1 Running 0 83s + frontend-85595f5bf9-cpskg 1/1 Running 0 15m + frontend-85595f5bf9-l2l54 1/1 Running 0 14m + frontend-85595f5bf9-l9c8z 1/1 Running 0 14m + redis-follower-dddfbdcc9-82sfr 1/1 Running 0 97m + redis-follower-dddfbdcc9-qrt5k 1/1 Running 0 97m + redis-leader-fb76b4755-xjr2n 1/1 Running 0 108m + ``` 1. Run the following command to scale down the number of frontend Pods: - ```shell - kubectl scale deployment frontend --replicas=2 - ``` + ```shell + kubectl scale deployment frontend --replicas=2 + ``` 1. Query the list of Pods to verify the number of frontend Pods running: - ```shell - kubectl get pods - ``` - - The response should look similar to this: - - ``` - NAME READY STATUS RESTARTS AGE - frontend-3823415956-k22zn 1/1 Running 0 1h - frontend-3823415956-w9gbt 1/1 Running 0 1h - mongo-1068406935-3lswp 1/1 Running 0 1h - ``` + ```shell + kubectl get pods + ``` + The response should look similar to this: + ``` + NAME READY STATUS RESTARTS AGE + frontend-85595f5bf9-cpskg 1/1 Running 0 16m + frontend-85595f5bf9-l9c8z 1/1 Running 0 15m + redis-follower-dddfbdcc9-82sfr 1/1 Running 0 98m + redis-follower-dddfbdcc9-qrt5k 1/1 Running 0 98m + redis-leader-fb76b4755-xjr2n 1/1 Running 0 109m + ``` ## {{% heading "cleanup" %}} -Deleting the Deployments and Services also deletes any running Pods. Use labels to delete multiple resources with one command. +Deleting the Deployments and Services also deletes any running Pods. Use +labels to delete multiple resources with one command. 1. Run the following commands to delete all Pods, Deployments, and Services. - ```shell - kubectl delete deployment -l app.kubernetes.io/name=mongo - kubectl delete service -l app.kubernetes.io/name=mongo - kubectl delete deployment -l app.kubernetes.io/name=guestbook - kubectl delete service -l app.kubernetes.io/name=guestbook - ``` + ```shell + kubectl delete deployment -l app=redis + kubectl delete service -l app=redis + kubectl delete deployment frontend + kubectl delete service frontend + ``` - The responses should be: + The response should look similar to this: - ``` - deployment.apps "mongo" deleted - service "mongo" deleted - deployment.apps "frontend" deleted - service "frontend" deleted - ``` + ``` + deployment.apps "redis-follower" deleted + deployment.apps "redis-leader" deleted + deployment.apps "frontend" deleted + service "frontend" deleted + ``` 1. Query the list of Pods to verify that no Pods are running: - ```shell - kubectl get pods - ``` - - The response should be this: - - ``` - No resources found. - ``` + ```shell + kubectl get pods + ``` + The response should look similar to this: + ``` + No resources found in default namespace. + ``` ## {{% heading "whatsnext" %}} diff --git a/content/en/examples/access/endpoints-aggregated.yaml b/content/en/examples/access/endpoints-aggregated.yaml new file mode 100644 index 0000000000..41cd12164a --- /dev/null +++ b/content/en/examples/access/endpoints-aggregated.yaml @@ -0,0 +1,18 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + kubernetes.io/description: |- + Add endpoints write permissions to the edit and admin roles. This was + removed by default in 1.22 because of CVE-2021-25740. See + https://issue.k8s.io/103675. This can allow writers to direct LoadBalancer + or Ingress implementations to expose backend IPs that would not otherwise + be accessible, and can circumvent network policies or security controls + intended to prevent/isolate access to those backends. + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + name: custom:aggregate-to-edit:endpoints # you can change this if you wish +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] diff --git a/content/en/examples/application/guestbook/frontend-deployment.yaml b/content/en/examples/application/guestbook/frontend-deployment.yaml index 613c654aa9..f97f20dab6 100644 --- a/content/en/examples/application/guestbook/frontend-deployment.yaml +++ b/content/en/examples/application/guestbook/frontend-deployment.yaml @@ -1,32 +1,29 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook apiVersion: apps/v1 kind: Deployment metadata: name: frontend - labels: - app.kubernetes.io/name: guestbook - app.kubernetes.io/component: frontend spec: + replicas: 3 selector: matchLabels: - app.kubernetes.io/name: guestbook - app.kubernetes.io/component: frontend - replicas: 3 + app: guestbook + tier: frontend template: metadata: labels: - app.kubernetes.io/name: guestbook - app.kubernetes.io/component: frontend + app: guestbook + tier: frontend spec: containers: - - name: guestbook - image: paulczar/gb-frontend:v5 - # image: gcr.io/google-samples/gb-frontend:v4 + - name: php-redis + image: gcr.io/google_samples/gb-frontend:v5 + env: + - name: GET_HOSTS_FROM + value: "dns" resources: requests: cpu: 100m memory: 100Mi - env: - - name: GET_HOSTS_FROM - value: dns ports: - - containerPort: 80 + - containerPort: 80 \ No newline at end of file diff --git a/content/en/examples/application/guestbook/frontend-service.yaml b/content/en/examples/application/guestbook/frontend-service.yaml index 34ad3771d7..410c6bbaf2 100644 --- a/content/en/examples/application/guestbook/frontend-service.yaml +++ b/content/en/examples/application/guestbook/frontend-service.yaml @@ -1,16 +1,19 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook apiVersion: v1 kind: Service metadata: name: frontend labels: - app.kubernetes.io/name: guestbook - app.kubernetes.io/component: frontend + app: guestbook + tier: frontend spec: # if your cluster supports it, uncomment the following to automatically create # an external load-balanced IP for the frontend service. # type: LoadBalancer + #type: LoadBalancer ports: + # the port that this service should serve on - port: 80 selector: - app.kubernetes.io/name: guestbook - app.kubernetes.io/component: frontend + app: guestbook + tier: frontend \ No newline at end of file diff --git a/content/en/examples/application/guestbook/redis-follower-deployment.yaml b/content/en/examples/application/guestbook/redis-follower-deployment.yaml new file mode 100644 index 0000000000..c418cf7364 --- /dev/null +++ b/content/en/examples/application/guestbook/redis-follower-deployment.yaml @@ -0,0 +1,30 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis-follower + labels: + app: redis + role: follower + tier: backend +spec: + replicas: 2 + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + role: follower + tier: backend + spec: + containers: + - name: follower + image: gcr.io/google_samples/gb-redis-follower:v2 + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 \ No newline at end of file diff --git a/content/en/examples/application/guestbook/redis-follower-service.yaml b/content/en/examples/application/guestbook/redis-follower-service.yaml new file mode 100644 index 0000000000..53283d35c4 --- /dev/null +++ b/content/en/examples/application/guestbook/redis-follower-service.yaml @@ -0,0 +1,17 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: v1 +kind: Service +metadata: + name: redis-follower + labels: + app: redis + role: follower + tier: backend +spec: + ports: + # the port that this service should serve on + - port: 6379 + selector: + app: redis + role: follower + tier: backend \ No newline at end of file diff --git a/content/en/examples/application/guestbook/redis-leader-deployment.yaml b/content/en/examples/application/guestbook/redis-leader-deployment.yaml new file mode 100644 index 0000000000..9c7547291c --- /dev/null +++ b/content/en/examples/application/guestbook/redis-leader-deployment.yaml @@ -0,0 +1,30 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis-leader + labels: + app: redis + role: leader + tier: backend +spec: + replicas: 1 + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + role: leader + tier: backend + spec: + containers: + - name: leader + image: "docker.io/redis:6.0.5" + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 \ No newline at end of file diff --git a/content/en/examples/application/guestbook/redis-leader-service.yaml b/content/en/examples/application/guestbook/redis-leader-service.yaml new file mode 100644 index 0000000000..e04cc183d0 --- /dev/null +++ b/content/en/examples/application/guestbook/redis-leader-service.yaml @@ -0,0 +1,17 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: v1 +kind: Service +metadata: + name: redis-leader + labels: + app: redis + role: leader + tier: backend +spec: + ports: + - port: 6379 + targetPort: 6379 + selector: + app: redis + role: leader + tier: backend \ No newline at end of file diff --git a/content/en/examples/application/guestbook/mongo-deployment.yaml b/content/en/examples/application/mongodb/mongo-deployment.yaml similarity index 100% rename from content/en/examples/application/guestbook/mongo-deployment.yaml rename to content/en/examples/application/mongodb/mongo-deployment.yaml diff --git a/content/en/examples/application/guestbook/mongo-service.yaml b/content/en/examples/application/mongodb/mongo-service.yaml similarity index 100% rename from content/en/examples/application/guestbook/mongo-service.yaml rename to content/en/examples/application/mongodb/mongo-service.yaml diff --git a/content/en/examples/examples_test.go b/content/en/examples/examples_test.go index 982ddbd693..a31d01b130 100644 --- a/content/en/examples/examples_test.go +++ b/content/en/examples/examples_test.go @@ -149,8 +149,19 @@ func getCodecForObject(obj runtime.Object) (runtime.Codec, error) { func validateObject(obj runtime.Object) (errors field.ErrorList) { podValidationOptions := validation.PodValidationOptions{ - AllowMultipleHugePageResources: true, - AllowDownwardAPIHugePages: true, + AllowDownwardAPIHugePages: true, + AllowInvalidPodDeletionCost: false, + AllowIndivisibleHugePagesValues: true, + AllowWindowsHostProcessField: true, + AllowExpandedDNSConfig: true, + } + + quotaValidationOptions := validation.ResourceQuotaValidationOptions{ + AllowPodAffinityNamespaceSelector: true, + } + + pspValidationOptions := policy_validation.PodSecurityPolicyValidationOptions{ + AllowEphemeralVolumeType: true, } // Enable CustomPodDNS for testing @@ -174,20 +185,23 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { case *api.Namespace: errors = validation.ValidateNamespace(t) case *api.PersistentVolume: - errors = validation.ValidatePersistentVolume(t) + opts := validation.PersistentVolumeSpecValidationOptions{ + AllowReadWriteOncePod: true, + } + errors = validation.ValidatePersistentVolume(t, opts) case *api.PersistentVolumeClaim: if t.Namespace == "" { t.Namespace = api.NamespaceDefault } - errors = validation.ValidatePersistentVolumeClaim(t) + opts := validation.PersistentVolumeClaimSpecValidationOptions{ + AllowReadWriteOncePod: true, + } + errors = validation.ValidatePersistentVolumeClaim(t, opts) case *api.Pod: if t.Namespace == "" { t.Namespace = api.NamespaceDefault } - opts := validation.PodValidationOptions{ - AllowMultipleHugePageResources: true, - } - errors = validation.ValidatePodCreate(t, opts) + errors = validation.ValidatePodCreate(t, podValidationOptions) case *api.PodList: for i := range t.Items { errors = append(errors, validateObject(&t.Items[i])...) @@ -210,7 +224,7 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { if t.Namespace == "" { t.Namespace = api.NamespaceDefault } - errors = validation.ValidateResourceQuota(t) + errors = validation.ValidateResourceQuota(t, quotaValidationOptions) case *api.Secret: if t.Namespace == "" { t.Namespace = api.NamespaceDefault @@ -238,7 +252,7 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { if t.Namespace == "" { t.Namespace = api.NamespaceDefault } - errors = apps_validation.ValidateStatefulSet(t) + errors = apps_validation.ValidateStatefulSet(t, podValidationOptions) case *autoscaling.HorizontalPodAutoscaler: if t.Namespace == "" { t.Namespace = api.NamespaceDefault @@ -287,7 +301,7 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { errors = networking_validation.ValidateIngressClass(t) case *policy.PodSecurityPolicy: - errors = policy_validation.ValidatePodSecurityPolicy(t) + errors = policy_validation.ValidatePodSecurityPolicy(t, pspValidationOptions) case *apps.ReplicaSet: if t.Namespace == "" { t.Namespace = api.NamespaceDefault @@ -462,12 +476,12 @@ func TestExampleObjectSchemas(t *testing.T) { "cassandra-statefulset": {&apps.StatefulSet{}, &storage.StorageClass{}}, }, "application/guestbook": { - "frontend-deployment": {&apps.Deployment{}}, - "frontend-service": {&api.Service{}}, - "redis-master-deployment": {&apps.Deployment{}}, - "redis-master-service": {&api.Service{}}, - "redis-slave-deployment": {&apps.Deployment{}}, - "redis-slave-service": {&api.Service{}}, + "frontend-deployment": {&apps.Deployment{}}, + "frontend-service": {&api.Service{}}, + "redis-follower-deployment": {&apps.Deployment{}}, + "redis-follower-service": {&api.Service{}}, + "redis-leader-deployment": {&apps.Deployment{}}, + "redis-leader-service": {&api.Service{}}, }, "application/hpa": { "php-apache": {&autoscaling.HorizontalPodAutoscaler{}}, @@ -477,8 +491,10 @@ func TestExampleObjectSchemas(t *testing.T) { "nginx-svc": {&api.Service{}}, }, "application/job": { - "cronjob": {&batch.CronJob{}}, - "job-tmpl": {&batch.Job{}}, + "cronjob": {&batch.CronJob{}}, + "job-tmpl": {&batch.Job{}}, + "indexed-job": {&batch.Job{}}, + "indexed-job-vol": {&batch.Job{}}, }, "application/job/rabbitmq": { "job": {&batch.Job{}}, @@ -557,7 +573,8 @@ func TestExampleObjectSchemas(t *testing.T) { "two-container-pod": {&api.Pod{}}, }, "pods/config": { - "redis-pod": {&api.Pod{}}, + "redis-pod": {&api.Pod{}}, + "example-redis-config": {&api.ConfigMap{}}, }, "pods/inject": { "dapi-envars-container": {&api.Pod{}}, @@ -610,10 +627,11 @@ func TestExampleObjectSchemas(t *testing.T) { "redis": {&api.Pod{}}, }, "policy": { - "baseline-psp": {&policy.PodSecurityPolicy{}}, - "example-psp": {&policy.PodSecurityPolicy{}}, - "privileged-psp": {&policy.PodSecurityPolicy{}}, - "restricted-psp": {&policy.PodSecurityPolicy{}}, + "baseline-psp": {&policy.PodSecurityPolicy{}}, + "example-psp": {&policy.PodSecurityPolicy{}}, + "priority-class-resourcequota": {&api.ResourceQuota{}}, + "privileged-psp": {&policy.PodSecurityPolicy{}}, + "restricted-psp": {&policy.PodSecurityPolicy{}}, "zookeeper-pod-disruption-budget-maxunavailable": {&policy.PodDisruptionBudget{}}, "zookeeper-pod-disruption-budget-minavailable": {&policy.PodDisruptionBudget{}}, }, @@ -645,6 +663,7 @@ func TestExampleObjectSchemas(t *testing.T) { "minimal-ingress": {&networking.Ingress{}}, "name-virtual-host-ingress": {&networking.Ingress{}}, "name-virtual-host-ingress-no-third-host": {&networking.Ingress{}}, + "namespaced-params": {&networking.IngressClass{}}, "network-policy-allow-all-egress": {&networking.NetworkPolicy{}}, "network-policy-allow-all-ingress": {&networking.NetworkPolicy{}}, "network-policy-default-deny-egress": {&networking.NetworkPolicy{}}, diff --git a/content/en/examples/policy/baseline-psp.yaml b/content/en/examples/policy/baseline-psp.yaml index 36e440588b..57258bf313 100644 --- a/content/en/examples/policy/baseline-psp.yaml +++ b/content/en/examples/policy/baseline-psp.yaml @@ -6,20 +6,16 @@ metadata: # Optional: Allow the default AppArmor profile, requires setting the default. apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - # Optional: Allow the default seccomp profile, requires setting the default. - seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default,unconfined' - seccomp.security.alpha.kubernetes.io/defaultProfileName: 'unconfined' + seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' spec: privileged: false - # The moby default capability set, defined here: - # https://github.com/moby/moby/blob/0a5cec2833f82a6ad797d70acbf9cbbaf8956017/oci/caps/defaults.go#L6-L19 + # The moby default capability set, minus NET_RAW allowedCapabilities: - 'CHOWN' - 'DAC_OVERRIDE' - 'FSETID' - 'FOWNER' - 'MKNOD' - - 'NET_RAW' - 'SETGID' - 'SETUID' - 'SETFCAP' @@ -36,15 +32,16 @@ spec: - 'projected' - 'secret' - 'downwardAPI' - # Assume that persistentVolumes set up by the cluster admin are safe to use. + # Assume that ephemeral CSI drivers & persistentVolumes set up by the cluster admin are safe to use. + - 'csi' - 'persistentVolumeClaim' + - 'ephemeral' # Allow all other non-hostpath volume types. - 'awsElasticBlockStore' - 'azureDisk' - 'azureFile' - 'cephFS' - 'cinder' - - 'csi' - 'fc' - 'flexVolume' - 'flocker' @@ -67,6 +64,9 @@ spec: runAsUser: rule: 'RunAsAny' seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + # The PSP SELinux API cannot express the SELinux Pod Security Standards, + # so if using SELinux, you must choose a more restrictive default. rule: 'RunAsAny' supplementalGroups: rule: 'RunAsAny' diff --git a/content/en/examples/policy/restricted-psp.yaml b/content/en/examples/policy/restricted-psp.yaml index 4db57688b1..0837c5a3ce 100644 --- a/content/en/examples/policy/restricted-psp.yaml +++ b/content/en/examples/policy/restricted-psp.yaml @@ -5,14 +5,11 @@ metadata: annotations: seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' - seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' spec: privileged: false # Required to prevent escalations to root. allowPrivilegeEscalation: false - # This is redundant with non-root + disallow privilege escalation, - # but we can provide it for defense in depth. requiredDropCapabilities: - ALL # Allow core volume types. @@ -22,8 +19,10 @@ spec: - 'projected' - 'secret' - 'downwardAPI' - # Assume that persistentVolumes set up by the cluster admin are safe to use. + # Assume that ephemeral CSI drivers & persistentVolumes set up by the cluster admin are safe to use. + - 'csi' - 'persistentVolumeClaim' + - 'ephemeral' hostNetwork: false hostIPC: false hostPID: false diff --git a/content/en/examples/security/podsecurity-baseline.yaml b/content/en/examples/security/podsecurity-baseline.yaml new file mode 100644 index 0000000000..6251af5d2f --- /dev/null +++ b/content/en/examples/security/podsecurity-baseline.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: my-baseline-namespace + labels: + pod-security.kubernetes.io/enforce: baseline + pod-security.kubernetes.io/enforce-version: latest + pod-security.kubernetes.io/warn: baseline + pod-security.kubernetes.io/warn-version: latest \ No newline at end of file diff --git a/content/en/examples/security/podsecurity-privileged.yaml b/content/en/examples/security/podsecurity-privileged.yaml new file mode 100644 index 0000000000..12471cce28 --- /dev/null +++ b/content/en/examples/security/podsecurity-privileged.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: my-privileged-namespace + labels: + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/enforce-version: latest \ No newline at end of file diff --git a/content/en/examples/security/podsecurity-restricted.yaml b/content/en/examples/security/podsecurity-restricted.yaml new file mode 100644 index 0000000000..8b9c30886d --- /dev/null +++ b/content/en/examples/security/podsecurity-restricted.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: my-restricted-namespace + labels: + pod-security.kubernetes.io/enforce: restricted + pod-security.kubernetes.io/enforce-version: latest + pod-security.kubernetes.io/warn: restricted + pod-security.kubernetes.io/warn-version: latest \ No newline at end of file diff --git a/content/en/releases/patch-releases.md b/content/en/releases/patch-releases.md index adc51c5ac2..c7597e332f 100644 --- a/content/en/releases/patch-releases.md +++ b/content/en/releases/patch-releases.md @@ -59,7 +59,7 @@ Towards the end of the twelve month, the following will happen: During the two-month maintenance mode period, Release Managers may cut additional maintenance releases to resolve: -- CVEs (under the advisement of the Product Security Committee) +- CVEs (under the advisement of the Security Response Committee) - dependency issues (including base image updates) - critical core component issues @@ -76,25 +76,39 @@ Timelines may vary with the severity of bug fixes, but for easier planning we will target the following monthly release points. Unplanned, critical releases may also occur in between these. -| Monthly Patch Release | Target date | -| --------------------- | ----------- | -| June 2021 | 2021-06-16 | -| July 2021 | 2021-07-14 | -| August 2021 | 2021-08-11 | -| September 2021 | 2021-09-15 | +| Monthly Patch Release | Cherry Pick Deadline | Target date | +| --------------------- | -------------------- | ----------- | +| September 2021 | 2021-09-10 | 2021-09-15 | +| October 2021 | 2021-10-15 | 2021-10-20 | +| November 2021 | 2021-11-12 | 2021-11-17 | +| December 2021 | 2021-12-10 | 2021-12-15 | ## Detailed Release History for Active Branches +### 1.22 + +**1.22** enters maintenance mode on **2022-08-28** + +End of Life for **1.22** is **2022-10-28** + +| PATCH RELEASE | CHERRY PICK DEADLINE | TARGET DATE | NOTE | +|---------------|----------------------|-------------|------| +| 1.22.2 | 2021-09-10 | 2021-09-15 | | +| 1.22.1 | 2021-08-16 | 2021-08-19 | | + ### 1.21 **1.21** enters maintenance mode on **2022-04-28** End of Life for **1.21** is **2022-06-28** -| PATCH RELEASE | CHERRY PICK DEADLINE | TARGET DATE | -| ------------- | -------------------- | ----------- | -| 1.21.2 | 2021-06-12 | 2021-06-16 | -| 1.21.1 | 2021-05-07 | 2021-05-12 | +| PATCH RELEASE | CHERRY PICK DEADLINE | TARGET DATE | NOTE | +| ------------- | -------------------- | ----------- | ---------------------------------------------------------------------- | +| 1.21.5 | 2021-09-10 | 2021-09-15 | | +| 1.21.4 | 2021-08-07 | 2021-08-11 | | +| 1.21.3 | 2021-07-10 | 2021-07-14 | | +| 1.21.2 | 2021-06-12 | 2021-06-16 | | +| 1.21.1 | 2021-05-07 | 2021-05-12 | [Regression](https://groups.google.com/g/kubernetes-dev/c/KuF8s2zueFs) | ### 1.20 @@ -102,16 +116,19 @@ End of Life for **1.21** is **2022-06-28** End of Life for **1.20** is **2022-02-28** -| PATCH RELEASE | CHERRY PICK DEADLINE | TARGET DATE | -| ------------- | ----------------------------------------------------------------------------------- | ----------- | -| 1.20.8 | 2021-06-12 | 2021-06-16 | -| 1.20.7 | 2021-05-07 | 2021-05-12 | -| 1.20.6 | 2021-04-09 | 2021-04-14 | -| 1.20.5 | 2021-03-12 | 2021-03-17 | -| 1.20.4 | 2021-02-12 | 2021-02-18 | -| 1.20.3 | [Conformance Tests Issue](https://groups.google.com/g/kubernetes-dev/c/oUpY9vWgzJo) | 2021-02-17 | -| 1.20.2 | 2021-01-08 | 2021-01-13 | -| 1.20.1 | [Tagging Issue](https://groups.google.com/g/kubernetes-dev/c/dNH2yknlCBA) | 2020-12-18 | +| PATCH RELEASE | CHERRY PICK DEADLINE | TARGET DATE | NOTE | +| ------------- | -------------------- | ----------- | ----------------------------------------------------------------------------------- | +| 1.20.11 | 2021-09-10 | 2021-09-15 | | +| 1.20.10 | 2021-08-07 | 2021-08-11 | | +| 1.20.9 | 2021-07-10 | 2021-07-14 | | +| 1.20.8 | 2021-06-12 | 2021-06-16 | | +| 1.20.7 | 2021-05-07 | 2021-05-12 | [Regression](https://groups.google.com/g/kubernetes-dev/c/KuF8s2zueFs) | +| 1.20.6 | 2021-04-09 | 2021-04-14 | | +| 1.20.5 | 2021-03-12 | 2021-03-17 | | +| 1.20.4 | 2021-02-12 | 2021-02-18 | | +| 1.20.3 | 2021-02-12 | 2021-02-17 | [Conformance Tests Issue](https://groups.google.com/g/kubernetes-dev/c/oUpY9vWgzJo) | +| 1.20.2 | 2021-01-08 | 2021-01-13 | | +| 1.20.1 | 2020-12-11 | 2020-12-18 | [Tagging Issue](https://groups.google.com/g/kubernetes-dev/c/dNH2yknlCBA) | ### 1.19 @@ -119,46 +136,50 @@ End of Life for **1.20** is **2022-02-28** End of Life for **1.19** is **2021-10-28** -| PATCH RELEASE | CHERRY PICK DEADLINE | TARGET DATE | -| ------------- | ------------------------------------------------------------------------- | ----------- | -| 1.19.12 | 2021-06-12 | 2021-06-16 | -| 1.19.11 | 2021-05-07 | 2021-05-12 | -| 1.19.10 | 2021-04-09 | 2021-04-14 | -| 1.19.9 | 2021-03-12 | 2021-03-17 | -| 1.19.8 | 2021-02-12 | 2021-02-17 | -| 1.19.7 | 2021-01-08 | 2021-01-13 | -| 1.19.6 | [Tagging Issue](https://groups.google.com/g/kubernetes-dev/c/dNH2yknlCBA) | 2020-12-18 | -| 1.19.5 | 2020-12-04 | 2020-12-09 | -| 1.19.4 | 2020-11-06 | 2020-11-11 | -| 1.19.3 | 2020-10-09 | 2020-10-14 | -| 1.19.2 | 2020-09-11 | 2020-09-16 | -| 1.19.1 | 2020-09-04 | 2020-09-09 | +| PATCH RELEASE | CHERRY PICK DEADLINE | TARGET DATE | NOTE | +| ------------- | -------------------- | ----------- | ------------------------------------------------------------------------- | +| 1.19.15 | 2021-09-10 | 2021-09-15 | | +| 1.19.14 | 2021-08-07 | 2021-08-11 | | +| 1.19.13 | 2021-07-10 | 2021-07-14 | | +| 1.19.12 | 2021-06-12 | 2021-06-16 | | +| 1.19.11 | 2021-05-07 | 2021-05-12 | [Regression](https://groups.google.com/g/kubernetes-dev/c/KuF8s2zueFs) | +| 1.19.10 | 2021-04-09 | 2021-04-14 | | +| 1.19.9 | 2021-03-12 | 2021-03-17 | | +| 1.19.8 | 2021-02-12 | 2021-02-17 | | +| 1.19.7 | 2021-01-08 | 2021-01-13 | | +| 1.19.6 | 2020-12-11 | 2020-12-18 | [Tagging Issue](https://groups.google.com/g/kubernetes-dev/c/dNH2yknlCBA) | +| 1.19.5 | 2020-12-04 | 2020-12-09 | | +| 1.19.4 | 2020-11-06 | 2020-11-11 | | +| 1.19.3 | 2020-10-09 | 2020-10-14 | | +| 1.19.2 | 2020-09-11 | 2020-09-16 | | +| 1.19.1 | 2020-09-04 | 2020-09-09 | | ## Non-Active Branch History These releases are no longer supported. -| Minor Version | Final Patch Release | EOL date | -| ------------- | ------------------- | ---------- | -| 1.18 | 1.18.19 | 2021-05-12 | -| 1.17 | 1.17.17 | 2021-01-13 | -| 1.16 | 1.16.15 | 2020-09-02 | -| 1.15 | 1.15.12 | 2020-05-06 | -| 1.14 | 1.14.10 | 2019-12-11 | -| 1.13 | 1.13.12 | 2019-10-15 | -| 1.12 | 1.12.10 | 2019-07-08 | -| 1.11 | 1.11.10 | 2019-05-01 | -| 1.10 | 1.10.13 | 2019-02-13 | -| 1.9 | 1.9.11 | 2018-09-29 | -| 1.8 | 1.8.15 | 2018-07-12 | -| 1.7 | 1.7.16 | 2018-04-04 | -| 1.6 | 1.6.13 | 2017-11-23 | -| 1.5 | 1.5.8 | 2017-10-01 | -| 1.4 | 1.4.12 | 2017-04-21 | -| 1.3 | 1.3.10 | 2016-11-01 | -| 1.2 | 1.2.7 | 2016-10-23 | +| MINOR VERSION | FINAL PATCH RELEASE | EOL DATE | NOTE | +| ------------- | ------------------- | ---------- | ---------------------------------------------------------------------- | +| 1.18 | 1.18.20 | 2021-06-18 | Created to resolve regression introduced in 1.18.19 | +| 1.18 | 1.18.19 | 2021-05-12 | [Regression](https://groups.google.com/g/kubernetes-dev/c/KuF8s2zueFs) | +| 1.17 | 1.17.17 | 2021-01-13 | | +| 1.16 | 1.16.15 | 2020-09-02 | | +| 1.15 | 1.15.12 | 2020-05-06 | | +| 1.14 | 1.14.10 | 2019-12-11 | | +| 1.13 | 1.13.12 | 2019-10-15 | | +| 1.12 | 1.12.10 | 2019-07-08 | | +| 1.11 | 1.11.10 | 2019-05-01 | | +| 1.10 | 1.10.13 | 2019-02-13 | | +| 1.9 | 1.9.11 | 2018-09-29 | | +| 1.8 | 1.8.15 | 2018-07-12 | | +| 1.7 | 1.7.16 | 2018-04-04 | | +| 1.6 | 1.6.13 | 2017-11-23 | | +| 1.5 | 1.5.8 | 2017-10-01 | | +| 1.4 | 1.4.12 | 2017-04-21 | | +| 1.3 | 1.3.10 | 2016-11-01 | | +| 1.2 | 1.2.7 | 2016-10-23 | | [cherry-picks]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-release/cherry-picks.md -[release-managers]: /release-managers.md -[release process description]: /release.md +[release-managers]: /releases/release-managers +[release process description]: /releases/release [yearly-support]: https://git.k8s.io/enhancements/keps/sig-release/1498-kubernetes-yearly-support-period/README.md diff --git a/content/en/releases/release-managers.md b/content/en/releases/release-managers.md index e8b895def6..0b68ca0b87 100644 --- a/content/en/releases/release-managers.md +++ b/content/en/releases/release-managers.md @@ -10,6 +10,7 @@ and building/packaging Kubernetes. The responsibilities of each role are described below. - [Contact](#contact) + - [Security Embargo Policy](#security-embargo-policy) - [Handbooks](#handbooks) - [Release Managers](#release-managers) - [Becoming a Release Manager](#becoming-a-release-manager) @@ -26,7 +27,11 @@ The responsibilities of each role are described below. | --- | --- | --- | --- | --- | | [release-managers@kubernetes.io](mailto:release-managers@kubernetes.io) | [#release-management](https://kubernetes.slack.com/messages/CJH2GBF7Y) (channel) / @release-managers (user group) | Public | Public discussion for Release Managers | All Release Managers (including Associates, Build Admins, and SIG Chairs) | | [release-managers-private@kubernetes.io](mailto:release-managers-private@kubernetes.io) | N/A | Private | Private discussion for privileged Release Managers | Release Managers, SIG Release leadership | -| [security-release-team@kubernetes.io](mailto:security-release-team@kubernetes.io) | [#security-release-team](https://kubernetes.slack.com/archives/G0162T1RYHG) (channel) / @security-rel-team (user group) | Private | Security release coordination with the Product Security Committee | [security-discuss-private@kubernetes.io](mailto:security-discuss-private@kubernetes.io), [release-managers-private@kubernetes.io](mailto:release-managers-private@kubernetes.io) | +| [security-release-team@kubernetes.io](mailto:security-release-team@kubernetes.io) | [#security-release-team](https://kubernetes.slack.com/archives/G0162T1RYHG) (channel) / @security-rel-team (user group) | Private | Security release coordination with the Security Response Committee | [security-discuss-private@kubernetes.io](mailto:security-discuss-private@kubernetes.io), [release-managers-private@kubernetes.io](mailto:release-managers-private@kubernetes.io) | + +### Security Embargo Policy + +Some information about releases is subject to embargo and we have defined policy about how those embargos are set. Please refer [Security Embargo Policy](https://github.com/kubernetes/security/blob/master/private-distributors-list.md#embargo-policy) here for more information. ## Handbooks @@ -74,7 +79,7 @@ Release Managers are responsible for: answering questions and suggesting appropriate work for them to do This team at times works in close conjunction with the -[Product Security Committee][psc] and therefore should abide by the guidelines +[Security Response Committee][src] and therefore should abide by the guidelines set forth in the [Security Release Process][security-release-process]. GitHub Access Controls: [@kubernetes/release-managers](https://github.com/orgs/kubernetes/teams/release-managers) @@ -192,6 +197,8 @@ GitHub team: [@kubernetes/sig-release-leads](https://github.com/orgs/kubernetes/ ### Technical Leads +- Adolfo García Veytia ([@puerco](https://github.com/puerco)) +- Carlos Panato ([@cpanato](https://github.com/cpanato)) - Daniel Mangum ([@hasheddan](https://github.com/hasheddan)) - Jeremy Rickard ([@jeremyrickard](https://github.com/jeremyrickard)) @@ -208,6 +215,6 @@ Example: [1.15 Release Team](https://git.k8s.io/sig-release/releases/release-1.1 [handbook-patch-release]: https://git.k8s.io/sig-release/release-engineering/role-handbooks/patch-release-team.md [k-sig-release-releases]: https://git.k8s.io/sig-release/releases [patches]: /patch-releases.md -[psc]: https://git.k8s.io/community/committee-product-security/README.md +[src]: https://git.k8s.io/community/committee-product-security/README.md [release-team]: https://git.k8s.io/sig-release/release-team/README.md [security-release-process]: https://git.k8s.io/security/security-release-process.md diff --git a/content/es/docs/concepts/architecture/_index.md b/content/es/docs/concepts/architecture/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/concepts/cluster-administration/_index.md b/content/es/docs/concepts/cluster-administration/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/concepts/configuration/_index.md b/content/es/docs/concepts/configuration/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/concepts/containers/_index.md b/content/es/docs/concepts/containers/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/concepts/overview/_index.md b/content/es/docs/concepts/overview/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/concepts/overview/object-management-kubectl/_index.md b/content/es/docs/concepts/overview/object-management-kubectl/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/concepts/overview/working-with-objects/_index.md b/content/es/docs/concepts/overview/working-with-objects/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/concepts/policy/_index.md b/content/es/docs/concepts/policy/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/concepts/services-networking/_index.md b/content/es/docs/concepts/services-networking/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/concepts/storage/_index.md b/content/es/docs/concepts/storage/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/concepts/storage/volume-snapshot-classes.md b/content/es/docs/concepts/storage/volume-snapshot-classes.md new file mode 100644 index 0000000000..497b256e67 --- /dev/null +++ b/content/es/docs/concepts/storage/volume-snapshot-classes.md @@ -0,0 +1,69 @@ +--- +reviewers: +- edithturn +- raelga +title: Volume Snapshot Classes +content_type: concept +weight: 30 +--- + + + +Este documento describe el concepto de VolumeSnapshotClass en Kubernetes. Se sugiere estar familiarizado +con [Volume Snapshots](/docs/concepts/storage/volume-snapshots/) y +[Storage Classes](/docs/concepts/storage/storage-classes). + + + + +## Introducción + +Al igual que StorageClass proporciona a los administradores una forma de describir las “clases” +de almacenamiento que ofrecen al aprovisionar un volumen, VolumeSnapshotClass proporciona una +forma de describir las “clases” de almacenamiento al aprovisionar un Snapshot de volumen. + +## El Recurso VolumeSnapshotClass + +Cada VolumeSnapshotClass contiene los campos `driver`, `deletionPolicy`, y `parameters`, +que se utilizan cuando un VolumeSnapshot que pertenece a la clase, necesita aprovisionarse dinámicamente. + +El nombre de un objeto VolumeSnapshotClass es significativo y es la forma en que los usuarios pueden solicitar una clase en particular. Los administradores establecen el nombre y parámetros de una clase cuando crean por primera vez objetos VolumeSnapshotClass; una vez creados los objetos no pueden ser actualizados. + +```yaml +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshotClass +metadata: + name: csi-hostpath-snapclass +driver: hostpath.csi.k8s.io +deletionPolicy: Delete +parameters: +``` + +Los administradores pueden especificar un VolumeSnapshotClass predeterminado para VolumeSnapshots que no solicitan ninguna clase en particular. Para definir la clase predeterminada agregue la anotación: `snapshot.storage.kubernetes.io/is-default-class: "true"`. + +```yaml +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshotClass +metadata: + name: csi-hostpath-snapclass + annotations: + snapshot.storage.kubernetes.io/is-default-class: "true" +driver: hostpath.csi.k8s.io +deletionPolicy: Delete +parameters: +``` + +### Driver + +Las clases de Snapshot de volumen tienen un controlador que determina que complemento de volumen CSI se utiliza para aprovisionar VolumeSnapshots. Este campo debe especificarse. + +### DeletionPolicy + +Las clases de Snapshot de volumen tienen un deletionPolicy. Permite configurar lo que sucede con un VolumeSnapshotContent cuando se va a eliminar el objeto VolumeSnapshot al que está vinculado. La deletionPolicy de una clase de Snapshot de volumen puede `Retain` o `Delete`. Este campo debe ser especificado. + +Si la deletionPolicy es `Delete`, el Snapshot de almacenamiento subyacente se eliminará junto con el objeto VolumeSnapshotContent. Si deletionPolicy es `Retain`, tanto el Snapshot subyacente como VolumeSnapshotContent permanecerán. + +### Parameters + +Las clases de Snapshot de volumen tienen parámetros que describen los Snapshots de volumen que pertenecen a la clase de Snapshot de volumen. Se pueden aceptar diferentes parámetros dependiendo del `driver`. + diff --git a/content/es/docs/concepts/workloads/controllers/_index.md b/content/es/docs/concepts/workloads/controllers/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/concepts/workloads/controllers/deployment.md b/content/es/docs/concepts/workloads/controllers/deployment.md index 17a42882a0..9fc506ae01 100644 --- a/content/es/docs/concepts/workloads/controllers/deployment.md +++ b/content/es/docs/concepts/workloads/controllers/deployment.md @@ -84,16 +84,14 @@ Esto es útil para futuras introspecciones, por ejemplo para comprobar qué coma A continuación, ejecuta el comando `kubectl get deployments`. La salida debe ser parecida a la siguiente: ```shell -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -nginx-deployment 3 0 0 0 1s +NAME READY UP-TO-DATE AVAILABLE AGE +nginx-deployment 3/3 3 3 1s ``` Cuando inspeccionas los Deployments de tu clúster, se muestran los siguientes campos: * `NAME` enumera los nombre de los Deployments del clúster. -* `DESIRED` muestra el número deseado de _réplicas_ de la aplicación, que se define - cuando se crea el Deployment. Esto se conoce como el _estado deseado_. -* `CURRENT` muestra cuántas réplicas se están ejecutando actualment. +* `READY` muestra cuántas réplicas de la aplicación están disponibles para sus usuarios. Sigue el patrón número de réplicas `listas/deseadas`. * `UP-TO-DATE` muestra el número de réplicas que se ha actualizado para alcanzar el estado deseado. * `AVAILABLE` muestra cuántas réplicas de la aplicación están disponibles para los usuarios. * `AGE` muestra la cantidad de tiempo que la aplicación lleva ejecutándose. @@ -105,6 +103,20 @@ Nótese cómo los valores de cada campo corresponden a los valores de la especif * El número de réplicas actualizadas es 0 de acuerdo con el campo `.status.updatedReplicas`. * El número de réplicas disponibles es 0 de acuerdo con el campo `.status.availableReplicas`. +Si deseamos obtener más información del Deployment utilice el parámetro '-o wide', ejecutando el comando 'kubectl get deployments -o wide'. La salida será parecida a la siguiente: + +```shell +NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR +nginx-deployment 3/3 3 3 10s nginx nginx:1.7.9 app=nginx +``` + +Ejecutando el comando anterior se muestran los siguientes campos adicionales: + +* `CONTAINERS` muestra los nombres de los contenedores declarados en `.spec.template.spec.containers.[name]`. +* `IMAGES` muestra los nombres de las imágenes declaradas en `.spec.template.spec.containers.[image]`. +* 'SELECTOR' muestra el Label selector que se declaró en matchLabels o matchExpressions. + + Para ver el estado del Deployment, ejecuta el comando `kubectl rollout status deployment.v1.apps/nginx-deployment`. Este comando devuelve el siguiente resultado: ```shell @@ -115,8 +127,8 @@ deployment "nginx-deployment" successfully rolled out Ejecuta de nuevo el comando `kubectl get deployments` unos segundos más tarde: ```shell -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -nginx-deployment 3 3 3 3 18s +NAME READY UP-TO-DATE AVAILABLE AGE +nginx-deployment 3/3 3 3 18s ``` Fíjate que el Deployment ha creado todas las tres réplicas, y que todas las réplicas están actualizadas (contienen @@ -204,8 +216,8 @@ Cuando el despliegue funciona, puede que quieras `obtener` el Deployment: kubectl get deployments ``` ``` -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -nginx-deployment 3 3 3 3 36s +NAME READY UP-TO-DATE AVAILABLE AGE +nginx-deployment 3/3 3 3 36s ``` El número de réplicas actualizadas indica que el Deployment ha actualizado las réplicas según la última configuración. @@ -241,7 +253,7 @@ La próxima vez que quieras actualizar estos Pods, sólo necesitas actualizar la El Deployment permite garantizar que sólo un número determinado de Pods puede eliminarse mientras se están actualizando. Por defecto, garantiza que al menos el 25% menos del número deseado de Pods se está ejecutando (máx. 25% no disponible). -El Deployment tmabién permite garantizar que sólo un número determinado de Pods puede crearse por encima del número deseado de +El Deployment también permite garantizar que sólo un número determinado de Pods puede crearse por encima del número deseado de Pods. Por defecto, garantiza que al menos el 25% más del número deseado de Pods se está ejecutando (máx. 25% de aumento). Por ejemplo, si miras detenidamente el Deployment de arriba, verás que primero creó un Pod, @@ -515,8 +527,8 @@ al retroceder a la revisión 2. kubectl get deployment nginx-deployment ``` ``` -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -nginx-deployment 3 3 3 3 30m +NAME READY UP-TO-DATE AVAILABLE AGE +nginx-deployment 3/3 3 3 30m ``` ```shell @@ -601,8 +613,8 @@ Por ejemplo, imagina que estás ejecutando un Deployment con 10 réplicas, donde kubectl get deploy ``` ``` -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -nginx-deployment 10 10 10 10 50s +NAME READY UP-TO-DATE AVAILABLE AGE +nginx-deployment 10/10 10 10 50s ``` Si actualizas a una nueva imagen que no puede descargarse desde el clúster: @@ -641,8 +653,8 @@ réplicas arranquen positivamente. kubectl get deploy ``` ``` -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -nginx-deployment 15 18 7 8 7m +NAME READY UP-TO-DATE AVAILABLE AGE +nginx-deployment 18/15 7 8 7m ``` ```shell @@ -665,8 +677,8 @@ Por ejemplo, con un Deployment que acaba de crearse: kubectl get deploy ``` ``` -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -nginx 3 3 3 3 1m +NAME READY UP-TO-DATE AVAILABLE AGE +nginx-deployment 3/3 3 3 1m ``` ```shell kubectl get rs @@ -1106,5 +1118,3 @@ no generará nuevos despliegues mientras esté pausado. Un Deployment se pausa d [`kubectl rolling update`](/docs/reference/generated/kubectl/kubectl-commands#rolling-update) actualiza los Pods y los ReplicationControllers de forma similar. Pero se recomienda el uso de Deployments porque se declaran del lado del servidor, y proporcionan características adicionales como la posibilidad de retroceder a revisiones anteriores incluso después de haber terminado una actualización continua. - - diff --git a/content/es/docs/concepts/workloads/pods/_index.md b/content/es/docs/concepts/workloads/pods/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/getting-started-guides/_index.md b/content/es/docs/getting-started-guides/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/getting-started-guides/fedora/_index.md b/content/es/docs/getting-started-guides/fedora/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/application-architect.md b/content/es/docs/reference/glossary/application-architect.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/application-developer.md b/content/es/docs/reference/glossary/application-developer.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/certificate.md b/content/es/docs/reference/glossary/certificate.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/cluster.md b/content/es/docs/reference/glossary/cluster.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/container-runtime.md b/content/es/docs/reference/glossary/container-runtime.md index 597ceaf25c..fd3328e799 100644 --- a/content/es/docs/reference/glossary/container-runtime.md +++ b/content/es/docs/reference/glossary/container-runtime.md @@ -2,7 +2,7 @@ title: Container Runtime id: container-runtime date: 2019-06-05 -full_link: /es/docs/reference/generated/container-runtime +full_link: /docs/setup/production-environment/container-runtimes short_description: > El _Container Runtime_, entorno de ejecución de un contenedor, es el software responsable de ejecutar contenedores. diff --git a/content/es/docs/reference/glossary/controller.md b/content/es/docs/reference/glossary/controller.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/docker.md b/content/es/docs/reference/glossary/docker.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/etcd.md b/content/es/docs/reference/glossary/etcd.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/image.md b/content/es/docs/reference/glossary/image.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/index.md b/content/es/docs/reference/glossary/index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/job.md b/content/es/docs/reference/glossary/job.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/kops.md b/content/es/docs/reference/glossary/kops.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/kube-apiserver.md b/content/es/docs/reference/glossary/kube-apiserver.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/kube-controller-manager.md b/content/es/docs/reference/glossary/kube-controller-manager.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/kube-proxy.md b/content/es/docs/reference/glossary/kube-proxy.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/kube-scheduler.md b/content/es/docs/reference/glossary/kube-scheduler.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/kubeadm.md b/content/es/docs/reference/glossary/kubeadm.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/kubectl.md b/content/es/docs/reference/glossary/kubectl.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/kubelet.md b/content/es/docs/reference/glossary/kubelet.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/label.md b/content/es/docs/reference/glossary/label.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/limitrange.md b/content/es/docs/reference/glossary/limitrange.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/minikube.md b/content/es/docs/reference/glossary/minikube.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/namespace.md b/content/es/docs/reference/glossary/namespace.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/node.md b/content/es/docs/reference/glossary/node.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/persistent-volume-claim.md b/content/es/docs/reference/glossary/persistent-volume-claim.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/pod.md b/content/es/docs/reference/glossary/pod.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/replica-set.md b/content/es/docs/reference/glossary/replica-set.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/secret.md b/content/es/docs/reference/glossary/secret.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/selector.md b/content/es/docs/reference/glossary/selector.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/sysctl.md b/content/es/docs/reference/glossary/sysctl.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/glossary/volume.md b/content/es/docs/reference/glossary/volume.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/kubectl/_index.md b/content/es/docs/reference/kubectl/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/reference/setup-tools/kubeadm/_index.md b/content/es/docs/reference/setup-tools/kubeadm/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/setup/independent/_index.md b/content/es/docs/setup/independent/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/setup/release/_index.md b/content/es/docs/setup/release/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/tasks/access-application-cluster/_index.md b/content/es/docs/tasks/access-application-cluster/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/tasks/access-kubernetes-api/_index.md b/content/es/docs/tasks/access-kubernetes-api/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/tasks/access-kubernetes-api/custom-resources/_index.md b/content/es/docs/tasks/access-kubernetes-api/custom-resources/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/tasks/administer-cluster/_index.md b/content/es/docs/tasks/administer-cluster/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/tasks/administer-cluster/kubeadm/_index.md b/content/es/docs/tasks/administer-cluster/kubeadm/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/tasks/configure-pod-container/_index.md b/content/es/docs/tasks/configure-pod-container/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/tasks/debug-application-cluster/logging-stackdriver.md b/content/es/docs/tasks/debug-application-cluster/logging-stackdriver.md deleted file mode 100644 index 3a247b5e88..0000000000 --- a/content/es/docs/tasks/debug-application-cluster/logging-stackdriver.md +++ /dev/null @@ -1,366 +0,0 @@ ---- -title: Escribiendo Logs con Stackdriver -content_type: concept ---- - - - -Antes de seguir leyendo esta página, deberías familiarizarte con el -[resumen de escritura de logs en Kubernetes](/docs/concepts/cluster-administration/logging). - -{{< note >}} -Por defecto, Stackdriver recolecta toda la salida estándar de tus contenedores, así -como el flujo de la salida de error. Para recolectar cualquier log tu aplicación escribe en un archivo (por ejemplo), -ver la [estrategia de sidecar](/docs/concepts/cluster-administration/logging#sidecar-container-with-a-logging-agent) -en el resumen de escritura de logs en Kubernetes. -{{< /note >}} - - - - - - -## Despliegue - -Para ingerir logs, debes desplegar el agente de Stackdriver Logging en cada uno de los nodos de tu clúster. -Dicho agente configura una instancia de `fluentd`, donde la configuración se guarda en un `ConfigMap` -y las instancias se gestionan a través de un `DaemonSet` de Kubernetes. El despliegue actual del -`ConfigMap` y el `DaemonSet` dentro de tu clúster depende de tu configuración individual del clúster. - -### Desplegar en un nuevo clúster - -#### Google Kubernetes Engine - -Stackdriver es la solución por defecto de escritura de logs para aquellos clústeres desplegados en Google Kubernetes Engine. -Stackdriver Logging se despliega por defecto en cada clúster a no ser que se le indique de forma explícita no hacerlo. - -#### Otras plataformas - -Para desplegar Stackdriver Logging en un *nuevo* clúster que estés creando con -`kube-up.sh`, haz lo siguiente: - -1. Configura la variable de entorno `KUBE_LOGGING_DESTINATION` con el valor `gcp`. -1. **Si no estás trabajando en GCE**, incluye `beta.kubernetes.io/fluentd-ds-ready=true` -en la variable `KUBE_NODE_LABELS`. - -Una vez que tu clúster ha arrancado, cada nodo debería ejecutar un agente de Stackdriver Logging. -Los `DaemonSet` y `ConfigMap` se configuran como extras. Si no estás usando `kube-up.sh`, -considera la posibilidad de arrancar un clúster sin una solución pre-determinada de escritura de logs -y entonces desplegar los agentes de Stackdriver Logging una vez el clúster esté ejecutándose. - -{{< warning >}} -El proceso de Stackdriver Logging reporta problemas conocidos en plataformas distintas -a Google Kubernetes Engine. Úsalo bajo tu propio riesgo. -{{< /warning >}} - -### Desplegar a un clúster existente - -1. Aplica una etiqueta en cada nodo, si no estaba presente ya. - - El despliegue del agente de Stackdriver Logging utiliza etiquetas de nodo para - determinar en qué nodos debería desplegarse. Estas etiquetas fueron introducidas - para distinguir entre nodos de Kubernetes de la versión 1.6 o superior. - Si el clúster se creó con Stackdriver Logging configurado y el nodo tiene la - versión 1.5.X o inferior, ejecutará fluentd como un pod estático. Puesto que un nodo - no puede tener más de una instancia de fluentd, aplica únicamente las etiquetas - a los nodos que no tienen un pod de fluentd ya desplegado. Puedes confirmar si tu nodo - ha sido etiquetado correctamente ejecutando `kubectl describe` de la siguiente manera: - - ``` - kubectl describe node $NODE_NAME - ``` - - La salida debería ser similar a la siguiente: - - ``` - Name: NODE_NAME - Role: - Labels: beta.kubernetes.io/fluentd-ds-ready=true - ... - ``` - - Asegúrate que la salida contiene la etiqueta `beta.kubernetes.io/fluentd-ds-ready=true`. - Si no está presente, puedes añadirla usando el comando `kubectl label` como se indica: - - ``` - kubectl label node $NODE_NAME beta.kubernetes.io/fluentd-ds-ready=true - ``` - - {{< note >}} - Si un nodo falla y tiene que volver a crearse, deberás volver a definir - la etiqueta al nuevo nodo. Para facilitar esta tarea, puedes utilizar el - parámetro de línea de comandos del Kubelet para aplicar dichas etiquetas - cada vez que se arranque un nodo. - {{< /note >}} - -1. Despliega un `ConfigMap` con la configuración del agente de escritura de logs ejecutando el siguiente comando: - - ``` - kubectl apply -f https://k8s.io/examples/debug/fluentd-gcp-configmap.yaml - ``` - - Este comando crea el `ConfigMap` en el espacio de nombres `default`. Puedes descargar el archivo - manualmente y cambiarlo antes de crear el objeto `ConfigMap`. - -1. Despliega el agente `DaemonSet` de escritura de logs ejecutando el siguiente comando: - - ``` - kubectl apply -f https://k8s.io/examples/debug/fluentd-gcp-ds.yaml - ``` - - Puedes descargar y editar este archivo antes de usarlo igualmente. - -## Verificar el despliegue de tu agente de escritura de logs - -Tras el despliegue del `DaemonSet` de StackDriver, puedes comprobar el estado de -cada uno de los despliegues de los agentes ejecutando el siguiente comando: - -```shell -kubectl get ds --all-namespaces -``` - -Si tienes 3 nodos en el clúster, la salida debería ser similar a esta: - -``` -NAMESPACE NAME DESIRED CURRENT READY NODE-SELECTOR AGE -... -default fluentd-gcp-v2.0 3 3 3 beta.kubernetes.io/fluentd-ds-ready=true 5m -... -``` -Para comprender cómo funciona Stackdriver, considera la siguiente especificación -de un generador de logs sintéticos [counter-pod.yaml](/examples/debug/counter-pod.yaml): - -{{< codenew file="debug/counter-pod.yaml" >}} - -Esta especificación de pod tiene un contenedor que ejecuta una secuencia de comandos bash -que escribe el valor de un contador y la fecha y hora cada segundo, de forma indefinida. -Vamos a crear este pod en el espacio de nombres por defecto. - -```shell -kubectl apply -f https://k8s.io/examples/debug/counter-pod.yaml -``` - -Puedes observar el pod corriendo: - -```shell -kubectl get pods -``` -``` -NAME READY STATUS RESTARTS AGE -counter 1/1 Running 0 5m -``` - -Durante un período de tiempo corto puedes observar que el estado del pod es 'Pending', debido a que el kubelet -tiene primero que descargar la imagen del contenedor. Cuando el estado del pod cambia a `Running` -puedes usar el comando `kubectl logs` para ver la salida de este pod contador. - -```shell -kubectl logs counter -``` -``` -0: Mon Jan 1 00:00:00 UTC 2001 -1: Mon Jan 1 00:00:01 UTC 2001 -2: Mon Jan 1 00:00:02 UTC 2001 -... -``` - -Como se describe en el resumen de escritura de logs, este comando visualiza las entradas de logs -del archivo de logs del contenedor. Si se termina el contenedor y Kubernetes lo reinicia, -todavía puedes acceder a los logs de la ejecución previa del contenedor. Sin embargo, -si el pod se desaloja del nodo, los archivos de log se pierden. Vamos a demostrar este -comportamiento mediante el borrado del contenedor que ejecuta nuestro contador: - -```shell -kubectl delete pod counter -``` -``` -pod "counter" deleted -``` - -y su posterior re-creación: - -```shell -kubectl create -f https://k8s.io/examples/debug/counter-pod.yaml -``` -``` -pod/counter created -``` - -Tras un tiempo, puedes acceder a los logs del pod contador otra vez: - -```shell -kubectl logs counter -``` -``` -0: Mon Jan 1 00:01:00 UTC 2001 -1: Mon Jan 1 00:01:01 UTC 2001 -2: Mon Jan 1 00:01:02 UTC 2001 -... -``` - -Como era de esperar, únicamente se visualizan las líneas de log recientes. Sin embargo, -para una aplicación real seguramente prefieras acceder a los logs de todos los contenedores, -especialmente cuando te haga falta depurar problemas. Aquí es donde haber habilitado -Stackdriver Logging puede ayudarte. - -## Ver logs - -El agente de Stackdriver Logging asocia metadatos a cada entrada de log, para que puedas usarlos posteriormente -en consultas para seleccionar sólo los mensajes que te interesan: por ejemplo, -los mensajes de un pod en particular. - -Los metadatos más importantes son el tipo de recurso y el nombre del log. -El tipo de recurso de un log de contenedor tiene el valor `container`, que se muestra como -`GKE Containers` en la UI (incluso si el clúster de Kubernetes no está en Google Kubernetes Engine). -El nombre de log es el nombre del contenedor, de forma que si tienes un pod con -dos contenedores, denominados `container_1` y `container_2` en la especificación, sus logs -tendrán los nombres `container_1` y `container_2` respectivamente. - -Los componentes del sistema tienen el valor `compute` como tipo de recursos, que se muestra como -`GCE VM Instance` en la UI. Los nombres de log para los componentes del sistema son fijos. -Para un nodo de Google Kubernetes Engine, cada entrada de log de cada componente de sistema tiene uno de los siguientes nombres: - -* docker -* kubelet -* kube-proxy - -Puedes aprender más acerca de cómo visualizar los logs en la [página dedicada a Stackdriver](https://cloud.google.com/logging/docs/view/logs_viewer). - -Uno de los posibles modos de ver los logs es usando el comando de línea de interfaz -[`gcloud logging`](https://cloud.google.com/logging/docs/api/gcloud-logging) -del [SDK de Google Cloud](https://cloud.google.com/sdk/). -Este comando usa la [sintaxis de filtrado](https://cloud.google.com/logging/docs/view/advanced_filters) de StackDriver Logging -para consultar logs específicos. Por ejemplo, puedes ejecutar el siguiente comando: - -```none -gcloud beta logging read 'logName="projects/$YOUR_PROJECT_ID/logs/count"' --format json | jq '.[].textPayload' -``` -``` -... -"2: Mon Jan 1 00:01:02 UTC 2001\n" -"1: Mon Jan 1 00:01:01 UTC 2001\n" -"0: Mon Jan 1 00:01:00 UTC 2001\n" -... -"2: Mon Jan 1 00:00:02 UTC 2001\n" -"1: Mon Jan 1 00:00:01 UTC 2001\n" -"0: Mon Jan 1 00:00:00 UTC 2001\n" -``` - -Como puedes observar, muestra los mensajes del contenedor contador tanto de la -primera como de la segunda ejecución, a pesar de que el kubelet ya había eliminado los logs del primer contenedor. - -### Exportar logs - -Puedes exportar los logs al [Google Cloud Storage](https://cloud.google.com/storage/) -o a [BigQuery](https://cloud.google.com/bigquery/) para llevar a cabo un análisis más profundo. -Stackdriver Logging ofrece el concepto de destinos, donde puedes especificar el destino de -las entradas de logs. Más información disponible en la [página de exportación de logs](https://cloud.google.com/logging/docs/export/configure_export_v2) de StackDriver. - -## Configurar los agentes de Stackdriver Logging - -En ocasiones la instalación por defecto de Stackdriver Logging puede que no se ajuste a tus necesidades, por ejemplo: - -* Puede que quieras añadir más recursos porque el rendimiento por defecto no encaja con tus necesidades. -* Puede que quieras añadir un parseo adicional para extraer más metadatos de tus mensajes de log, -como la severidad o referencias al código fuente. -* Puede que quieras enviar los logs no sólo a Stackdriver o sólo enviarlos a Stackdriver parcialmente. - -En cualquiera de estos casos, necesitas poder cambiar los parámetros del `DaemonSet` y el `ConfigMap`. - -### Prerequisitos - -Si estás usando GKE y Stackdriver Logging está habilitado en tu clúster, no puedes -cambiar su configuración, porque ya está gestionada por GKE. -Sin embargo, puedes deshabilitar la integración por defecto y desplegar la tuya propia. - -{{< note >}} -Tendrás que mantener y dar soporte tú mismo a la nueva configuración desplegada: -actualizar la imagen y la configuración, ajustar los recuros y todo eso. -{{< /note >}} - -Para deshabilitar la integración por defecto, usa el siguiente comando: - -``` -gcloud beta container clusters update --logging-service=none CLUSTER -``` - -Puedes encontrar notas acerca de cómo instalar los agentes de Stackdriver Logging - en un clúster ya ejecutándose en la [sección de despliegue](#deploying). - -### Cambiar los parámetros del `DaemonSet` - -Cuando tienes un `DaemonSet` de Stackdriver Logging en tu clúster, puedes simplemente -modificar el campo `template` en su especificación, y el controlador del daemonset actualizará los pods por ti. Por ejemplo, -asumamos que acabas de instalar el Stackdriver Logging como se describe arriba. Ahora quieres cambiar -el límite de memoria que se le asigna a fluentd para poder procesar más logs de forma segura. - -Obtén la especificación del `DaemonSet` que corre en tu clúster: - -```shell -kubectl get ds fluentd-gcp-v2.0 --namespace kube-system -o yaml > fluentd-gcp-ds.yaml -``` - -A continuación, edita los requisitos del recurso en el `spec` y actualiza el objeto `DaemonSet` -en el apiserver usando el siguiente comando: - -```shell -kubectl replace -f fluentd-gcp-ds.yaml -``` - -Tras un tiempo, los pods de agente de Stackdriver Logging se reiniciarán con la nueva configuración. - -### Cambiar los parámetros de fluentd - -La configuración de Fluentd se almacena en un objeto `ConfigMap`. Realmente se trata de un conjunto -de archivos de configuración que se combinan conjuntamente. Puedes aprender acerca de -la configuración de fluentd en el [sitio oficial](http://docs.fluentd.org). - -Imagina que quieres añadir una nueva lógica de parseo a la configuración actual, de forma que fluentd pueda entender -el formato de logs por defecto de Python. Un filtro apropiado de fluentd para conseguirlo sería: - -``` - - type parser - format /^(?\w):(?\w):(?.*)/ - reserve_data true - suppress_parse_error_log true - key_name log - -``` - -Ahora tienes que añadirlo a la configuración actual y que los agentes de Stackdriver Logging la usen. -Para ello, obtén la versión actual del `ConfigMap` de Stackdriver Logging de tu clúster -ejecutando el siguiente comando: - -```shell -kubectl get cm fluentd-gcp-config --namespace kube-system -o yaml > fluentd-gcp-configmap.yaml -``` - -Luego, como valor de la clave `containers.input.conf`, inserta un nuevo filtro justo después -de la sección `source`. - -{{< note >}} -El orden es importante. -{{< /note >}} - -Actualizar el `ConfigMap` en el apiserver es más complicado que actualizar el `DaemonSet`. -Es mejor considerar que un `ConfigMap` es inmutable. Así, para poder actualizar la configuración, deberías -crear un nuevo `ConfigMap` con otro nombre y cambiar el `DaemonSet` para que apunte al nuevo -siguiendo la [guía de arriba](#changing-daemonset-parameters). - -### Añadir plugins de fluentd - -Fluentd está desarrollado en Ruby y permite extender sus capacidades mediante el uso de -[plugins](http://www.fluentd.org/plugins). Si quieres usar un plugin que no está incluido en -la imagen por defecto del contenedor de Stackdriver Logging, debes construir tu propia imagen. -Imagina que quieres añadir un destino Kafka para aquellos mensajes de un contenedor en particular -para poder procesarlos posteriormente. Puedes reusar los [fuentes de imagen de contenedor](https://git.k8s.io/contrib/fluentd/fluentd-gcp-image) -con algunos pequeños cambios: - -* Cambia el archivo Makefile para que apunte a tu repositorio de contenedores, ej. `PREFIX=gcr.io/`. -* Añade tu dependencia al archivo Gemfile, por ejemplo `gem 'fluent-plugin-kafka'`. - -Luego, ejecuta `make build push` desde ese directorio. Cuando el `DaemonSet` haya tomado los cambios de la nueva imagen, -podrás usar el plugin que has indicado en la configuración de fluentd. - - diff --git a/content/es/docs/tasks/federation/_index.md b/content/es/docs/tasks/federation/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/tasks/federation/administer-federation/_index.md b/content/es/docs/tasks/federation/administer-federation/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/tasks/inject-data-application/_index.md b/content/es/docs/tasks/inject-data-application/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/tasks/manage-daemon/_index.md b/content/es/docs/tasks/manage-daemon/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/tasks/run-application/_index.md b/content/es/docs/tasks/run-application/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/tasks/service-catalog/_index.md b/content/es/docs/tasks/service-catalog/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/tasks/tls/_index.md b/content/es/docs/tasks/tls/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/tutorials/clusters/_index.md b/content/es/docs/tutorials/clusters/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/tutorials/configuration/_index.md b/content/es/docs/tutorials/configuration/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/tutorials/kubernetes-basics/_index.md b/content/es/docs/tutorials/kubernetes-basics/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html b/content/es/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html index a91e00f679..6743729d49 100644 --- a/content/es/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html +++ b/content/es/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html @@ -5,7 +5,7 @@ weight: 20 - + diff --git a/content/es/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html b/content/es/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html index 28a2f35a0e..2ec6de59e9 100644 --- a/content/es/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html +++ b/content/es/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html @@ -5,7 +5,7 @@ weight: 20 - + diff --git a/content/es/docs/tutorials/online-training/_index.md b/content/es/docs/tutorials/online-training/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/tutorials/services/_index.md b/content/es/docs/tutorials/services/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/tutorials/stateful-application/_index.md b/content/es/docs/tutorials/stateful-application/_index.md old mode 100755 new mode 100644 diff --git a/content/es/docs/tutorials/stateless-application/_index.md b/content/es/docs/tutorials/stateless-application/_index.md old mode 100755 new mode 100644 diff --git a/content/es/examples/controllers/daemonset.yaml b/content/es/examples/controllers/daemonset.yaml index f6c598c9bf..e41e0a6b6f 100644 --- a/content/es/examples/controllers/daemonset.yaml +++ b/content/es/examples/controllers/daemonset.yaml @@ -16,6 +16,7 @@ spec: spec: tolerations: - key: node-role.kubernetes.io/master + operator: Exists effect: NoSchedule containers: - name: fluentd-elasticsearch diff --git a/content/fr/_index.html b/content/fr/_index.html index 53c11593db..836e5b7504 100644 --- a/content/fr/_index.html +++ b/content/fr/_index.html @@ -43,12 +43,12 @@ Kubernetes est une solution open-source qui vous permet de tirer parti de vos in

    - Venez au KubeCon NA Virtuel du 17 au 20 Novembre 2020 + Venez au KubeCon NA Los Angeles, USA du 11 au 15 Octobre 2021



    - Venez au KubeCon EU Virtuel du 4 au 7 Mai 2021 + Venez au KubeCon EU Valence, Espagne du 15 au 20 Mai 2022
    diff --git a/content/fr/docs/concepts/architecture/_index.md b/content/fr/docs/concepts/architecture/_index.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/concepts/cluster-administration/_index.md b/content/fr/docs/concepts/cluster-administration/_index.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/concepts/services-networking/_index.md b/content/fr/docs/concepts/services-networking/_index.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/concepts/workloads/_index.md b/content/fr/docs/concepts/workloads/_index.md index 1d81794f7b..4561edb155 100644 --- a/content/fr/docs/concepts/workloads/_index.md +++ b/content/fr/docs/concepts/workloads/_index.md @@ -1,4 +1,51 @@ --- title: Workloads weight: 50 +description: > + Comprendre les Pods, le plus petit objet déployable sur Kubernetes, et les abstractions de haut niveaux vous permettant de les lancer. +no_list: true --- + + + + +Un workload (charge de travail) est une application fonctionnant sur Kubernetes. Que votre workload soit un composant unique ou un agrégat de composants, sur Kubernetes celui-ci fonctionnera dans une série de pods. Dans Kubernetes, un Pod represente un ensemble de conteneur (containers) en fonctionnement sur votre cluster. + +Les pods Kubernetes ont un cycle de vie définit (defined lifecycle). Par exemple, quand un pod est en fonction sur votre cluster et qu’une panne critique survient sur le noeud (node) où se situe ce pod, tous les pods du noeud seront en échec. Kubernetes traite ce niveau d’échec comme un état final : +Vous devez créer un nouveau Pod pour retrouver l’état initial même si le noeud redevient sain. + +Cependant, pour vous simplifier la vie, vous n’avez pas a gérer chaque Pod directement. Vous pouvez utiliser une ressource workload qui gère votre groupe de pods à votre place. Ces ressources configurent des controleurs (controllers) qui s’assurent que le bon nombre et le bon type de pod soit en fonction pour égaler l’état que vous avez spécifié. + +Kubernetes fournit plusieurs ressources workload pré-faites : + +* [`Deployment`](/docs/concepts/workloads/controllers/deployment/) et [`ReplicaSet`](/docs/concepts/workloads/controllers/replicaset/) +(qui remplacent l’ancienne ressource {{< glossary_tooltip text="ReplicationController" term_id="replication-controller" >}})). +Le `Deployment` (déploiement) est une bonne approche pour manager une application stateless sur votre cluster, tous les `Pods` d’un `Deployment` sont interchangeables et peuvent être remplacés si besoin. +* Le [`StatefulSet`](/docs/concepts/workloads/controllers/statefulset/) vous permet de lancer un ou plusieurs Pods en relation qui garde plus ou moins la trace de leurs état. +Par exemple si votre workload enregistre des données de façon persistente, vous pouvez lancer un `StatefulSet` qui fera le lien entre les `Pods` et un volume persistent ([`PersistentVolume`](/docs/concepts/storage/persistent-volumes/)). +Votre code, présent dans les `Pods` du `StatefulSet`, peut répliquer des données dans les autres `Pods` qui sont dans le même `StatefulSet`, +pour améliorer la résilience global. +* Le [`DaemonSet`](/docs/concepts/workloads/controllers/daemonset/) permet de définir les `Pods` qui effectuent des actions sur le noeud local. +Ceux-ci peuvent être fondamental aux opérations de votre cluster, comme un outil d’aide réseau, ou peuvent faire part d’un module complémentaire (add-on). +Pour chaque nouveau noeud ajouté au cluster, le controle plane organise l'ajout d'un `Pod` pour ce `DaemonSet` sur le nouveau noeud. +* Les [`Job`](/docs/concepts/workloads/controllers/job/) et [`CronJob`](/docs/concepts/workloads/controllers/cron-jobs/) sont des taches lancées jusqu’à accomplissement puis s’arrêtent. Les `Jobs` réprésentent une tâche ponctuelle, les `CronJob` sont des tâches récurrentes planifiés. + +Dans l’écosystème étendu de Kubernetes, vous pouvez trouver des ressources workload de fournisseurs tiers qui offrent des fonctionnalités supplémentaires. +L’utilisation d’un [`CustomResourceDefinition`](/docs/concepts/extend-kubernetes/api-extension/custom-resources/) permet d’ajouter une ressource workload d’un fournisseur tiers si vous souhaitez rajouter une fonctionnalité ou un comportement spécifique qui ne fait pas partie du noyau de Kubernetes. +Par exemple, si vous voulez lancer un groupe de `Pods` pour votre application mais que vous devez arrêter leurs fonctionnement tant qu’ils ne sont pas tous disponibles, alors vous pouvez implémenter ou installer une extension qui permet cette fonctionnalité. + +## {{% heading "whatsnext" %}} +Vous pouvez continuer la lecture des ressources, vous pouvez aussi apprendre à connaitre les taches qui leurs sont liées : +* Lancer une [application stateless en utilisant un `Deployment`](/docs/tasks/run-application/run-stateless-application-deployment/). +* Lancer une application statefull, soit comme [instance unique](/docs/tasks/run-application/run-single-instance-stateful-application/) + ou alors comme un [ensemble répliqué](/docs/tasks/run-application/run-replicated-stateful-application/). +* Lancer une [tâche automatisée avec un `CronJob`](/docs/tasks/job/automated-tasks-with-cron-jobs/). + +Pour en apprendre plus sur les méchanismes de Kubernetes, de séparation du code et de la configuration, +allez voir [Configuration](/docs/concepts/configuration/). + +Il y a deux concepts supportés qui fournissent un contexte sur le sujet : comment Kubernetes gère les pods pour les applications : +* Le [ramasse-miettes](/docs/concepts/workloads/controllers/garbage-collection/), fait le ménage dans votre cluster après qu’une de _vos ressource_ soit supprimé. +* Le [temps de vie d’un controlleur éteint](/docs/concepts/workloads/controllers/ttlafterfinished/) supprime les Jobs une fois qu’un temps définit soit passé après son accomplissement. + +Une fois que votre application est lancée, vous souhaitez peut etre la rendre disponible sur internet comme un [Service](/docs/concepts/services-networking/service/) ou comme une application web uniquement en utilsant un [Ingress](/docs/concepts/services-networking/ingress). diff --git a/content/fr/docs/concepts/workloads/controllers/statefulset.md b/content/fr/docs/concepts/workloads/controllers/statefulset.md index 87286aeaa4..f223a8432f 100644 --- a/content/fr/docs/concepts/workloads/controllers/statefulset.md +++ b/content/fr/docs/concepts/workloads/controllers/statefulset.md @@ -178,7 +178,7 @@ Lorsque le StatefulSet {{< glossary_tooltip term_id="controller" >}} crée un Po il ajoute une étiquette, `statefulset.kubernetes.io/pod-name`, renseignée avec le nom du Pod. Cette étiquette vous permet d'attacher un Service à un Pod spécifique du StatefulSet. -## Garanties de déploiment et de mise à l'échelle +## Garanties de déploiement et de mise à l'échelle * Pour un StatefulSet avec N réplicas, lorsque les Pods sont déployés, ils sont créés de manière séquentielle, dans l'ordre {0..N-1}. * Lorsque les Pods sont supprimés, ils sont terminés dans l'ordre inverse, {N-1..0}. diff --git a/content/fr/docs/contribute/generate-ref-docs/kubernetes-api.md b/content/fr/docs/contribute/generate-ref-docs/kubernetes-api.md index 9e00fb57b0..cdb91bb27a 100644 --- a/content/fr/docs/contribute/generate-ref-docs/kubernetes-api.md +++ b/content/fr/docs/contribute/generate-ref-docs/kubernetes-api.md @@ -135,7 +135,6 @@ hack/update-generated-swagger-docs.sh hack/update-swagger-spec.sh hack/update-openapi-spec.sh hack/update-generated-protobuf.sh -hack/update-api-reference-docs.sh ``` Exécutez `git status` pour voir ce qui a été généré. @@ -144,8 +143,6 @@ Exécutez `git status` pour voir ce qui a été généré. On branch master ... modified: api/openapi-spec/swagger.json - modified: api/swagger-spec/apps_v1.json - modified: docs/api-reference/apps/v1/definitions.html modified: staging/src/k8s.io/api/apps/v1/generated.proto modified: staging/src/k8s.io/api/apps/v1/types.go modified: staging/src/k8s.io/api/apps/v1/types_swagger_doc_generated.go diff --git a/content/fr/docs/reference/glossary/annotation.md b/content/fr/docs/reference/glossary/annotation.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/approver.md b/content/fr/docs/reference/glossary/approver.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/certificate.md b/content/fr/docs/reference/glossary/certificate.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/cla.md b/content/fr/docs/reference/glossary/cla.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/cloud-controller-manager.md b/content/fr/docs/reference/glossary/cloud-controller-manager.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/cloud-provider.md b/content/fr/docs/reference/glossary/cloud-provider.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/cluster-architect.md b/content/fr/docs/reference/glossary/cluster-architect.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/cluster-operator.md b/content/fr/docs/reference/glossary/cluster-operator.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/code-contributor.md b/content/fr/docs/reference/glossary/code-contributor.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/configmap.md b/content/fr/docs/reference/glossary/configmap.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/container-env-variables.md b/content/fr/docs/reference/glossary/container-env-variables.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/contributor.md b/content/fr/docs/reference/glossary/contributor.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/controller.md b/content/fr/docs/reference/glossary/controller.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/cronjob.md b/content/fr/docs/reference/glossary/cronjob.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/customresourcedefinition.md b/content/fr/docs/reference/glossary/customresourcedefinition.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/daemonset.md b/content/fr/docs/reference/glossary/daemonset.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/deployment.md b/content/fr/docs/reference/glossary/deployment.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/developer.md b/content/fr/docs/reference/glossary/developer.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/downstream.md b/content/fr/docs/reference/glossary/downstream.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/dynamic-volume-provisioning.md b/content/fr/docs/reference/glossary/dynamic-volume-provisioning.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/etcd.md b/content/fr/docs/reference/glossary/etcd.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/helm-chart.md b/content/fr/docs/reference/glossary/helm-chart.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/ingress.md b/content/fr/docs/reference/glossary/ingress.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/init-container.md b/content/fr/docs/reference/glossary/init-container.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/istio.md b/content/fr/docs/reference/glossary/istio.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/kube-apiserver.md b/content/fr/docs/reference/glossary/kube-apiserver.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/kube-controller-manager.md b/content/fr/docs/reference/glossary/kube-controller-manager.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/kube-proxy.md b/content/fr/docs/reference/glossary/kube-proxy.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/kube-scheduler.md b/content/fr/docs/reference/glossary/kube-scheduler.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/service.md b/content/fr/docs/reference/glossary/service.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/glossary/statefulset.md b/content/fr/docs/reference/glossary/statefulset.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/kubectl/_index.md b/content/fr/docs/reference/kubectl/_index.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/kubectl/kubectl.md b/content/fr/docs/reference/kubectl/kubectl.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/reference/setup-tools/kubeadm/kubeadm-init.md b/content/fr/docs/reference/setup-tools/kubeadm/kubeadm-init.md index edf5c7e4c9..0df7c1a65b 100644 --- a/content/fr/docs/reference/setup-tools/kubeadm/kubeadm-init.md +++ b/content/fr/docs/reference/setup-tools/kubeadm/kubeadm-init.md @@ -131,7 +131,7 @@ Pour de l'information sur comment passer des options aux composants du control p ### Utiliser des images personnalisées {#custom-images} -Par défaut, kubeadm télécharge les images depuis `k8s.gcr.io`, à moins que la version demandée de Kubernetes soit une version Intégration Continue (CI). Dans ce cas, `gcr.io/kubernetes-ci-images` est utilisé. +Par défaut, kubeadm télécharge les images depuis `k8s.gcr.io`, à moins que la version demandée de Kubernetes soit une version Intégration Continue (CI). Dans ce cas, `gcr.io/k8s-staging-ci-images` est utilisé. Vous pouvez outrepasser ce comportement en utilisant [kubeadm avec un fichier de configuration](#config-file). Les personnalisations permises sont : diff --git a/content/fr/docs/setup/custom-cloud/kubespray.md b/content/fr/docs/setup/custom-cloud/kubespray.md index 2e10c21f46..cde3cbb3f9 100644 --- a/content/fr/docs/setup/custom-cloud/kubespray.md +++ b/content/fr/docs/setup/custom-cloud/kubespray.md @@ -8,7 +8,7 @@ content_type: concept Cette documentation permet d'installer rapidement un cluster Kubernetes hébergé sur GCE, Azure, Openstack, AWS, vSphere, Oracle Cloud Infrastructure (expérimental) ou sur des serveurs physiques (bare metal) grâce à [Kubespray](https://github.com/kubernetes-incubator/kubespray). -Kubespray se base sur des outils de provisioning, des [paramètres](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/ansible.md) et playbooks [Ansible](http://docs.ansible.com/) ainsi que sur des connaissances spécifiques à Kubernetes et l'installation de systèmes d'exploitation afin de fournir: +Kubespray se base sur des outils de provisioning, des [paramètres](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/ansible.md) et playbooks [Ansible](https://docs.ansible.com/) ainsi que sur des connaissances spécifiques à Kubernetes et l'installation de systèmes d'exploitation afin de fournir: * Un cluster en haute disponibilité * des composants modulables @@ -49,7 +49,7 @@ Afin de vous aider à préparer votre de votre environnement, Kubespray fournit ### (2/5) Construire un fichier d'inventaire Ansible -Lorsque vos serveurs sont disponibles, créez un fichier d'inventaire Ansible ([inventory](http://docs.ansible.com/ansible/intro_inventory.html)). +Lorsque vos serveurs sont disponibles, créez un fichier d'inventaire Ansible ([inventory](https://docs.ansible.com/ansible/latest/network/getting_started/first_inventory.html)). Vous pouvez le créer manuellement ou en utilisant un script d'inventaire dynamique. Pour plus d'informations se référer à [Building your own inventory](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/getting-started.md#building-your-own-inventory). ### (3/5) Préparation au déploiement de votre cluster diff --git a/content/fr/docs/setup/pick-right-solution.md b/content/fr/docs/setup/pick-right-solution.md deleted file mode 100644 index a730ce8eb1..0000000000 --- a/content/fr/docs/setup/pick-right-solution.md +++ /dev/null @@ -1,303 +0,0 @@ ---- -reviewers: -- yastij -title: Choisir la bonne solution -description: Panorama de solutions Kubernetes -weight: 10 -content_type: concept ---- - - - -Kubernetes peut fonctionner sur des plateformes variées: sur votre PC portable, sur des VMs d'un fournisseur de cloud, ou un rack -de serveurs bare-metal. L'effort demandé pour configurer un cluster varie de l'éxécution d'une simple commande à la création -de votre propre cluster personnalisé. Utilisez ce guide pour choisir la solution qui correspond le mieux à vos besoins. - -Si vous voulez simplement jeter un coup d'oeil rapide, utilisez alors de préférence les [solutions locales basées sur Docker](#solutions-locales). - -Lorsque vous êtes prêts à augmenter le nombre de machines et souhaitez bénéficier de la haute disponibilité, une -[solution hébergée](#solutions-hebergées) est la plus simple à déployer et à maintenir. - -[Les solutions cloud clés en main](#solutions-clés-en-main) ne demandent que peu de commande pour déployer et couvrent un large panel de - fournisseurs de cloud. [Les solutions clés en main pour cloud privé](#solutions-on-premises-clés-en-main) possèdent la simplicité des solutions cloud clés en main combinées avec la sécurité de votre propre réseau privé. - -Si vous avez déjà un moyen de configurer vos resources, utilisez [kubeadm](/fr/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) pour facilement -déployer un cluster grâce à une seule ligne de commande par machine. - -[Les solutions personnalisées](#solutions-personnalisées) varient d'instructions pas à pas, à des conseils relativement généraux pour déployer un - -cluster Kubernetes en partant du début. - - - - - -## Solutions locales - -* [Minikube](/fr/docs/setup/learning-environment/minikube/) est une méthode pour créer un cluster Kubernetes local à noeud unique pour le développement et le test. L'installation est entièrement automatisée et ne nécessite pas de compte de fournisseur de cloud. - -* [Docker Desktop](https://www.docker.com/products/docker-desktop) est une -application facile à installer pour votre environnement Mac ou Windows qui vous permet de -commencer à coder et déployer votre code dans des conteneurs en quelques minutes sur un nœud unique Kubernetes. - -* [Minishift](https://docs.okd.io/latest/minishift/) installe la version communautaire de la plate-forme d'entreprise OpenShift -de Kubernetes pour le développement local et les tests. Il offre une VM tout-en-un (`minishift start`) pour Windows, macOS et Linux, - le `oc cluster up` containerisé (Linux uniquement) et [est livré avec quelques Add Ons faciles à installer](https://github.com/minishift/minishift-addons/tree/master/add-ons). - -* [MicroK8s](https://microk8s.io/) fournit une commande unique d'installation de la dernière version de Kubernetes sur une machine locale -pour le développement et les tests. L'installation est rapide (~30 sec) et supporte de nombreux plugins dont Istio avec une seule commande. - -* [IBM Cloud Private-CE (Community Edition)](https://github.com/IBM/deploy-ibm-cloud-private) peut utiliser VirtualBox sur votre machine -pour déployer Kubernetes sur une ou plusieurs machines virtuelles afin de développer et réaliser des scénarios de test. Cette solution -peut créer un cluster multi-nœuds complet. - -* [IBM Cloud Private-CE (Community Edition) sur Linux Containers](https://github.com/HSBawa/icp-ce-on-linux-containers) est un script IaC (Infrastructure as Code) basé sur Terraform/Packer/BASH pour créer un cluster LXD à sept nœuds (1 Boot, 1 Master, 1 Management, 1 Proxy et 3 Workers) sur une machine Linux. - -* [Kubeadm-dind](https://github.com/kubernetes-sigs/kubeadm-dind-cluster) est un cluster Kubernetes multi-nœuds (tandis que minikube est -un nœud unique) qui ne nécessite qu'un docker-engine. Il utilise la technique du docker-in-docker pour déployer le cluster Kubernetes. - -* [Ubuntu sur LXD](/docs/getting-start-guides/ubuntu/local/) supporte un déploiement de 9 instances sur votre machine locale. - -## Solutions hebergées - -* [AppsCode.com](https://appscode.com/products/cloud-deployment/) fournit des clusters Kubernetes managés pour divers clouds publics, dont AWS et Google Cloud Platform. - -* [APPUiO](https://appuio.ch) propose une plate-forme de cloud public OpenShift, supportant n'importe quel workload Kubernetes. De plus, APPUiO propose des Clusters OpenShift privés et managés, fonctionnant sur n'importe quel cloud public ou privé. - -* [Amazon Elastic Container Service for Kubernetes](https://aws.amazon.com/eks/) offre un service managé de Kubernetes. - -* [Azure Kubernetes Service](https://azure.microsoft.com/services/container-service/) offre des clusters Kubernetes managés. - -* [Containership Kubernetes Engine (CKE)](https://containership.io/containership-platform) Approvisionnement et gestion intuitive de clusters - Kubernetes sur GCP, Azure, AWS, Packet, et DigitalOcean. Mises à niveau transparentes, auto-scaling, métriques, création de -workloads, et plus encore. - -* [DigitalOcean Kubernetes](https://www.digitalocean.com/products/kubernetes/) offre un service managé de Kubernetes. - -* [Giant Swarm](https://giantswarm.io/product/) offre des clusters Kubernetes managés dans leur propre centre de données, on-premises ou sur des clouds public. - -* [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/) offre des clusters Kubernetes managés. - -* [IBM Cloud Kubernetes Service](https://cloud.ibm.com/docs/containers?topic=containers-getting-started) offre des clusters Kubernetes managés avec choix d'isolation, des outils opérationnels, une vision intégrée de la sécurité des images et des conteneurs et une intégration avec Watson, IoT et les données. - -* [Kubermatic](https://www.loodse.com) fournit des clusters Kubernetes managés pour divers clouds publics, y compris AWS et Digital Ocean, ainsi que sur site avec intégration OpenStack. - -* [Kublr](https://kublr.com) offre des clusters Kubernetes sécurisés, évolutifs et hautement fiables sur AWS, Azure, GCP et on-premises, - de qualité professionnelle. Il inclut la sauvegarde et la reprise après sinistre prêtes à l'emploi, la journalisation et la surveillance centralisées multi-clusters, ainsi qu'une fonction d'alerte intégrée. - -* [Madcore.Ai](https://madcore.ai) est un outil CLI orienté développement pour déployer l'infrastructure Kubernetes dans AWS. Les masters, un groupe d'autoscaling pour les workers sur des spot instances, les ingress-ssl-lego, Heapster, et Grafana. - -* [Nutanix Karbon](https://www.nutanix.com/products/karbon/) est une plateforme de gestion et d'exploitation Kubernetes multi-clusters hautement disponibles qui simplifie l'approvisionnement, les opérations et la gestion du cycle de vie de Kubernetes. - -* [OpenShift Dedicated](https://www.openshift.com/dedicated/) offre des clusters Kubernetes gérés et optimisés par OpenShift. - -* [OpenShift Online](https://www.openshift.com/features/) fournit un accès hébergé gratuit aux applications Kubernetes. - -* [Oracle Container Engine for Kubernetes](https://docs.us-phoenix-1.oraclecloud.com/Content/ContEng/Concepts/contengoverview.htm) est un service entièrement géré, évolutif et hautement disponible que vous pouvez utiliser pour déployer vos applications conteneurisées dans le cloud. - -* [Platform9](https://platform9.com/products/kubernetes/) offre des Kubernetes gérés on-premises ou sur n'importe quel cloud public, et fournit une surveillance et des alertes de santé 24h/24 et 7j/7. (Kube2go, une plate-forme de service de déploiement de cluster Kubernetes pour le déploiement de l'interface utilisateur Web9, a été intégrée à Platform9 Sandbox.) - -* [Stackpoint.io](https://stackpoint.io) fournit l'automatisation et la gestion de l'infrastructure Kubernetes pour plusieurs clouds publics. - -* [SysEleven MetaKube](https://www.syseleven.io/products-services/managed-kubernetes/) offre un Kubernetes-as-a-Service sur un cloud public OpenStack. Il inclut la gestion du cycle de vie, les tableaux de bord d'administration, la surveillance, la mise à l'échelle automatique et bien plus encore. - -* [VMware Cloud PKS](https://cloud.vmware.com/vmware-cloud-pks) est une offre d'entreprise Kubernetes-as-a-Service faisant partie du catalogue de services Cloud VMware qui fournit des clusters Kubernetes faciles à utiliser, sécurisés par défaut, rentables et basés sur du SaaS. - -## Solutions clés en main - -Ces solutions vous permettent de créer des clusters Kubernetes sur une gamme de fournisseurs de Cloud IaaaS avec seulement -quelques commandes. Ces solutions sont activement développées et bénéficient du soutien actif de la communauté. - -* [Agile Stacks](https://www.agilestacks.com/products/kubernetes) -* [Alibaba Cloud](/docs/setup/turnkey/alibaba-cloud/) -* [APPUiO](https://appuio.ch) -* [AWS](/docs/setup/turnkey/aws/) -* [Azure](/docs/setup/turnkey/azure/) -* [CenturyLink Cloud](/docs/setup/turnkey/clc/) -* [Conjure-up Kubernetes with Ubuntu on AWS, Azure, Google Cloud, Oracle Cloud](/docs/getting-started-guides/ubuntu/) -* [Containership](https://containership.io/containership-platform) -* [Docker Enterprise](https://www.docker.com/products/docker-enterprise) -* [Gardener](https://gardener.cloud/) -* [Giant Swarm](https://giantswarm.io) -* [Google Compute Engine (GCE)](/docs/setup/turnkey/gce/) -* [IBM Cloud](https://github.com/patrocinio/kubernetes-softlayer) -* [Kontena Pharos](https://kontena.io/pharos/) -* [Kubermatic](https://cloud.kubermatic.io) -* [Kublr](https://kublr.com/) -* [Madcore.Ai](https://madcore.ai/) -* [Nirmata](https://nirmata.com/) -* [Nutanix Karbon](https://www.nutanix.com/products/karbon/) -* [Oracle Container Engine for K8s](https://docs.us-phoenix-1.oraclecloud.com/Content/ContEng/Concepts/contengprerequisites.htm) -* [Pivotal Container Service](https://pivotal.io/platform/pivotal-container-service) -* [Rancher 2.0](https://rancher.com/docs/rancher/v2.x/en/) -* [Stackpoint.io](/docs/setup/turnkey/stackpoint/) -* [Tectonic by CoreOS](https://coreos.com/tectonic) -* [VMware Cloud PKS](https://cloud.vmware.com/vmware-cloud-pks) - -## Solutions On-Premises clés en main - -Ces solutions vous permettent de créer des clusters Kubernetes sur votre cloud privé sécurisé avec seulement quelques commandes. - -* [Agile Stacks](https://www.agilestacks.com/products/kubernetes) -* [APPUiO](https://appuio.ch) -* [Docker Enterprise](https://www.docker.com/products/docker-enterprise) -* [Giant Swarm](https://giantswarm.io) -* [GKE On-Prem | Google Cloud](https://cloud.google.com/gke-on-prem/) -* [IBM Cloud Private](https://www.ibm.com/cloud-computing/products/ibm-cloud-private/) -* [Kontena Pharos](https://kontena.io/pharos/) -* [Kubermatic](https://www.loodse.com) -* [Kublr](https://kublr.com/) -* [Mirantis Cloud Platform](https://www.mirantis.com/software/kubernetes/) -* [Nirmata](https://nirmata.com/) -* [OpenShift Container Platform](https://www.openshift.com/products/container-platform/) (OCP) by [Red Hat](https://www.redhat.com) -* [Pivotal Container Service](https://pivotal.io/platform/pivotal-container-service) -* [Rancher 2.0](https://rancher.com/docs/rancher/v2.x/en/) -* [SUSE CaaS Platform](https://www.suse.com/products/caas-platform) -* [SUSE Cloud Application Platform](https://www.suse.com/products/cloud-application-platform/) - -## Solutions personnalisées - -Kubernetes peut fonctionner sur une large gamme de fournisseurs de Cloud et d'environnements bare-metal, ainsi qu'avec de nombreux -systèmes d'exploitation. - -Si vous pouvez trouver un guide ci-dessous qui correspond à vos besoins, utilisez-le. C'est peut-être un peu dépassé, mais... -ce sera plus facile que de partir de zéro. Si vous voulez repartir de zéro, soit parce que vous avez des exigences particulières, -ou simplement parce que vous voulez comprendre ce qu'il y a à l'interieur de Kubernetes -essayez le guide [Getting Started from Scratch](/docs/setup/release/building-from-source/). - -### Universel - -Si vous avez déjà un moyen de configurer les ressources d'hébergement, utilisez -[kubeadm](/fr/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) pour déployer facilement un cluster -avec une seule commande par machine. - -### Cloud - -Ces solutions sont des combinaisons de fournisseurs de cloud computing et de systèmes d'exploitation qui ne sont pas couverts par les solutions ci-dessus. - -* [Cloud Foundry Container Runtime (CFCR)](https://docs-cfcr.cfapps.io/) -* [CoreOS on AWS or GCE](/docs/setup/custom-cloud/coreos/) -* [Gardener](https://gardener.cloud/) -* [Kublr](https://kublr.com/) -* [Kubernetes on Ubuntu](/docs/getting-started-guides/ubuntu/) -* [Kubespray](/docs/setup/custom-cloud/kubespray/) -* [Rancher Kubernetes Engine (RKE)](https://github.com/rancher/rke) - -### VMs On-Premises - -* [Cloud Foundry Container Runtime (CFCR)](https://docs-cfcr.cfapps.io/) -* [CloudStack](/docs/setup/on-premises-vm/cloudstack/) (uses Ansible, CoreOS and flannel) -* [Fedora (Multi Node)](/docs/getting-started-guides/fedora/flannel_multi_node_cluster/) (uses Fedora and flannel) -* [Nutanix AHV](https://www.nutanix.com/products/acropolis/virtualization/) -* [OpenShift Container Platform](https://www.openshift.com/products/container-platform/) (OCP) Kubernetes platform by [Red Hat](https://www.redhat.com) -* [oVirt](/docs/setup/on-premises-vm/ovirt/) -* [Vagrant](/docs/setup/custom-cloud/coreos/) (uses CoreOS and flannel) -* [VMware](/docs/setup/custom-cloud/coreos/) (uses CoreOS and flannel) -* [VMware vSphere](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/) -* [VMware vSphere, OpenStack, or Bare Metal](/docs/getting-started-guides/ubuntu/) (uses Juju, Ubuntu and flannel) - -### Bare Metal - -* [CoreOS](/docs/setup/custom-cloud/coreos/) -* [Digital Rebar](/docs/setup/on-premises-metal/krib/) -* [Docker Enterprise](https://www.docker.com/products/docker-enterprise) -* [Fedora (Single Node)](/docs/getting-started-guides/fedora/fedora_manual_config/) -* [Fedora (Multi Node)](/docs/getting-started-guides/fedora/flannel_multi_node_cluster/) -* [Kubernetes on Ubuntu](/docs/getting-started-guides/ubuntu/) -* [OpenShift Container Platform](https://www.openshift.com/products/container-platform/) (OCP) Kubernetes platform by [Red Hat](https://www.redhat.com) - -### Integrations - -Ces solutions fournissent une intégration avec des orchestrateurs, des resources managers ou des plateformes tierces. - -* [DCOS](/docs/setup/on-premises-vm/dcos/) - * Community Edition DCOS utilise AWS - * Enterprise Edition DCOS supporte l'hébergement cloud, les VMs on-premises, et le bare-metal - -## Tableau des Solutions - -Ci-dessous vous trouverez un tableau récapitulatif de toutes les solutions listées précédemment. - -| Fournisseur de IaaS | Config. Mgmt. | OS | Réseau | Docs | Niveau de support | -|------------------------------------------------|------------------------------------------------------------------------------|--------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| tous | tous | multi-support | tout les CNI | [docs](/fr/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) | Project ([SIG-cluster-lifecycle](https://git.k8s.io/community/sig-cluster-lifecycle)) | -| Google Kubernetes Engine | | | GCE | [docs](https://cloud.google.com/kubernetes-engine/docs/) | Commercial | -| Docker Enterprise | personnalisé | [multi-support](https://success.docker.com/article/compatibility-matrix) | [multi-support](https://docs.docker.com/ee/ucp/kubernetes/install-cni-plugin/) | [docs](https://docs.docker.com/ee/) | Commercial | -| IBM Cloud Private | Ansible | multi-support | multi-support | [docs](https://www.ibm.com/support/knowledgecenter/SSBS6K/product_welcome_cloud_private.html) | [Commercial](https://www.ibm.com/mysupport/s/topic/0TO500000001o0fGAA/ibm-cloud-private?language=en_US&productId=01t50000004X1PWAA0) and [Community](https://www.ibm.com/support/knowledgecenter/SSBS6K_3.1.2/troubleshoot/support_types.html) | -| Red Hat OpenShift | Ansible & CoreOS | RHEL & CoreOS | [multi-support](https://docs.openshift.com/container-platform/3.11/architecture/networking/network_plugins.html) | [docs](https://docs.openshift.com/container-platform/3.11/welcome/index.html) | Commercial | -| Stackpoint.io | | multi-support | multi-support | [docs](https://stackpoint.io/) | Commercial | -| AppsCode.com | Saltstack | Debian | multi-support | [docs](https://appscode.com/products/cloud-deployment/) | Commercial | -| Madcore.Ai | Jenkins DSL | Ubuntu | flannel | [docs](https://madcore.ai) | Community ([@madcore-ai](https://github.com/madcore-ai)) | -| Platform9 | | multi-support | multi-support | [docs](https://platform9.com/managed-kubernetes/) | Commercial | -| Kublr | personnalisé | multi-support | multi-support | [docs](http://docs.kublr.com/) | Commercial | -| Kubermatic | | multi-support | multi-support | [docs](http://docs.kubermatic.io/) | Commercial | -| IBM Cloud Kubernetes Service | | Ubuntu | IBM Cloud Networking + Calico | [docs](https://cloud.ibm.com/docs/containers?topic=containers-getting-started) | Commercial | -| Giant Swarm | | CoreOS | flannel and/or Calico | [docs](https://docs.giantswarm.io/) | Commercial | -| GCE | Saltstack | Debian | GCE | [docs](/docs/setup/turnkey/gce/) | Project | -| Azure Kubernetes Service | | Ubuntu | Azure | [docs](https://docs.microsoft.com/en-us/azure/aks/) | Commercial | -| Azure (IaaS) | | Ubuntu | Azure | [docs](/docs/setup/turnkey/azure/) | [Community (Microsoft)](https://github.com/Azure/acs-engine) | -| Bare-metal | personnalisé | Fedora | _none_ | [docs](/docs/getting-started-guides/fedora/fedora_manual_config/) | Project | -| Bare-metal | personnalisé | Fedora | flannel | [docs](/docs/getting-started-guides/fedora/flannel_multi_node_cluster/) | Community ([@aveshagarwal](https://github.com/aveshagarwal)) | -| libvirt | personnalisé | Fedora | flannel | [docs](/docs/getting-started-guides/fedora/flannel_multi_node_cluster/) | Community ([@aveshagarwal](https://github.com/aveshagarwal)) | -| KVM | personnalisé | Fedora | flannel | [docs](/docs/getting-started-guides/fedora/flannel_multi_node_cluster/) | Community ([@aveshagarwal](https://github.com/aveshagarwal)) | -| DCOS | Marathon | CoreOS/Alpine | personnalisé | [docs](/docs/getting-started-guides/dcos/) | Community ([Kubernetes-Mesos Authors](https://github.com/mesosphere/kubernetes-mesos/blob/master/AUTHORS.md)) | -| AWS | CoreOS | CoreOS | flannel | [docs](/docs/setup/turnkey/aws/) | Community | -| GCE | CoreOS | CoreOS | flannel | [docs](/docs/getting-started-guides/coreos/) | Community ([@pires](https://github.com/pires)) | -| Vagrant | CoreOS | CoreOS | flannel | [docs](/docs/getting-started-guides/coreos/) | Community ([@pires](https://github.com/pires), [@AntonioMeireles](https://github.com/AntonioMeireles)) | -| CloudStack | Ansible | CoreOS | flannel | [docs](/docs/getting-started-guides/cloudstack/) | Community ([@sebgoa](https://github.com/sebgoa)) | -| VMware vSphere | tous | multi-support | multi-support | [docs](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/) | [Community](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/contactus.html) | -| Bare-metal | personnalisé | CentOS | flannel | [docs](/docs/getting-started-guides/centos/centos_manual_config/) | Community ([@coolsvap](https://github.com/coolsvap)) | -| lxd | Juju | Ubuntu | flannel/canal | [docs](/docs/getting-started-guides/ubuntu/local/) | [Commercial](https://www.ubuntu.com/kubernetes) and [Community](https://jujucharms.com/kubernetes) | -| AWS | Juju | Ubuntu | flannel/calico/canal | [docs](/docs/getting-started-guides/ubuntu/) | [Commercial](https://www.ubuntu.com/kubernetes) and [Community](https://jujucharms.com/kubernetes) | -| Azure | Juju | Ubuntu | flannel/calico/canal | [docs](/docs/getting-started-guides/ubuntu/) | [Commercial](https://www.ubuntu.com/kubernetes) and [Community](https://jujucharms.com/kubernetes) | -| GCE | Juju | Ubuntu | flannel/calico/canal | [docs](/docs/getting-started-guides/ubuntu/) | [Commercial](https://www.ubuntu.com/kubernetes) and [Community](https://jujucharms.com/kubernetes) | -| Oracle Cloud | Juju | Ubuntu | flannel/calico/canal | [docs](/docs/getting-started-guides/ubuntu/) | [Commercial](https://www.ubuntu.com/kubernetes) and [Community](https://jujucharms.com/kubernetes) | -| Rackspace | personnalisé | CoreOS | flannel/calico/canal | [docs](https://developer.rackspace.com/docs/rkaas/latest/) | [Commercial](https://www.rackspace.com/managed-kubernetes) | -| VMware vSphere | Juju | Ubuntu | flannel/calico/canal | [docs](/docs/getting-started-guides/ubuntu/) | [Commercial](https://www.ubuntu.com/kubernetes) and [Community](https://jujucharms.com/kubernetes) | -| Bare Metal | Juju | Ubuntu | flannel/calico/canal | [docs](/docs/getting-started-guides/ubuntu/) | [Commercial](https://www.ubuntu.com/kubernetes) and [Community](https://jujucharms.com/kubernetes) | -| AWS | Saltstack | Debian | AWS | [docs](/docs/setup/turnkey/aws/) | Community ([@justinsb](https://github.com/justinsb)) | -| AWS | kops | Debian | AWS | [docs](https://github.com/kubernetes/kops/) | Community ([@justinsb](https://github.com/justinsb)) | -| Bare-metal | personnalisé | Ubuntu | flannel | [docs](/docs/getting-started-guides/ubuntu/) | Community ([@resouer](https://github.com/resouer), [@WIZARD-CXY](https://github.com/WIZARD-CXY)) | -| oVirt | | | | [docs](/docs/setup/on-premises-vm/ovirt/) | Community ([@simon3z](https://github.com/simon3z)) | -| tous | tous | tous | tous | [docs](/docs/setup/release/building-from-source/) | Community ([@erictune](https://github.com/erictune)) | -| tous | tous | tous | tous | [docs](http://docs.projectcalico.org/v2.2/getting-started/kubernetes/installation/) | Commercial and Community | -| tous | RKE | multi-support | flannel or canal | [docs](https://rancher.com/docs/rancher/v2.x/en/quick-start-guide/) | [Commercial](https://rancher.com/what-is-rancher/overview/) and [Community](https://github.com/rancher/rancher) | -| tous | [Gardener Cluster-Operator](https://kubernetes.io/blog/2018/05/17/gardener/) | multi-support | multi-support | [docs](https://gardener.cloud) | [Project/Community](https://github.com/gardener) and [Commercial]( https://cloudplatform.sap.com/) | -| Alibaba Cloud Container Service For Kubernetes | ROS | CentOS | flannel/Terway | [docs](https://www.aliyun.com/product/containerservice) | Commercial | -| Agile Stacks | Terraform | CoreOS | multi-support | [docs](https://www.agilestacks.com/products/kubernetes) | Commercial | -| IBM Cloud Kubernetes Service | | Ubuntu | calico | [docs](https://cloud.ibm.com/docs/containers?topic=containers-container_index#container_index) | Commercial | -| Digital Rebar | kubeadm | tous | metal | [docs](/docs/setup/on-premises-metal/krib/) | Community ([@digitalrebar](https://github.com/digitalrebar)) | -| VMware Cloud PKS | | Photon OS | Canal | [docs](https://docs.vmware.com/en/VMware-Kubernetes-Engine/index.html) | Commercial | -| Mirantis Cloud Platform | Salt | Ubuntu | multi-support | [docs](https://docs.mirantis.com/mcp/) | Commercial | - -{{< note >}} -Le tableau ci-dessus est ordonné par versions testées et utilisées dans les noeuds, suivis par leur niveau de support. -{{< /note >}} - -### Définition des colonnes - -* **IaaS Provider** est le produit ou l'organisation qui fournit les machines virtuelles ou physiques (nœuds) sur lesquelles Kubernetes fonctionne. -* **OS** est le système d'exploitation de base des nœuds. -* **Config. Mgmt.** est le système de gestion de configuration qui permet d'installer et de maintenir Kubernetes sur les - nœuds. -* **Le réseau** est ce qui implémente le [modèle de réseau](/docs/concepts/cluster-administration/networking/). Ceux qui ont le type de réseautage - Aucun_ ne peut pas prendre en charge plus d'un nœud unique, ou peut prendre en charge plusieurs nœuds VM dans un nœud physique unique. -* **Conformité** indique si un cluster créé avec cette configuration a passé la conformité du projet. - pour le support de l'API et des fonctionnalités de base de Kubernetes v1.0.0. -* **Niveaux de soutien** - * **Projet** : Les contributeurs de Kubernetes utilisent régulièrement cette configuration, donc elle fonctionne généralement avec la dernière version. - de Kubernetes. - * **Commercial** : Une offre commerciale avec son propre dispositif d'accompagnement. - * **Communauté** : Soutenu activement par les contributions de la communauté. Peut ne pas fonctionner avec les versions récentes de Kubernetes. - * **Inactif** : Pas de maintenance active. Déconseillé aux nouveaux utilisateurs de Kubernetes et peut être retiré. -* **Note** contient d'autres informations pertinentes, telles que la version de Kubernetes utilisée. - - - -[1]: https://gist.github.com/erictune/4cabc010906afbcc5061 - -[2]: https://gist.github.com/derekwaynecarr/505e56036cdf010bf6b6 - -[3]: https://gist.github.com/erictune/2f39b22f72565365e59b - - diff --git a/content/fr/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/fr/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index 06c6d2f53c..fdfd36b7eb 100644 --- a/content/fr/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/fr/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -225,8 +225,9 @@ Installez les plugins CNI (requis pour la plupart des réseaux de pods) : ```bash CNI_VERSION="v0.8.2" +ARCH="amd64" sudo mkdir -p /opt/cni/bin -curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-amd64-${CNI_VERSION}.tgz" | sudo tar -C /opt/cni/bin -xz +curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-${ARCH}-${CNI_VERSION}.tgz" | sudo tar -C /opt/cni/bin -xz ``` Définissez le répertoire pour télécharger les fichiers de commande @@ -245,7 +246,8 @@ Installez crictl (requis pour Kubeadm / Kubelet Container Runtime Interface (CRI ```bash CRICTL_VERSION="v1.17.0" -curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | sudo tar -C $DOWNLOAD_DIR -xz +ARCH="amd64" +curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-${ARCH}.tar.gz" | sudo tar -C $DOWNLOAD_DIR -xz ``` Installez `kubeadm`,` kubelet`, `kubectl` et ajoutez un service systemd` kubelet`: @@ -254,8 +256,9 @@ RELEASE_VERSION="v0.6.0" ```bash RELEASE="$(curl -sSL https://dl.k8s.io/release/stable.txt)" +ARCH="amd64" cd $DOWNLOAD_DIR -sudo curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl} +sudo curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/${ARCH}/{kubeadm,kubelet,kubectl} sudo chmod +x {kubeadm,kubelet,kubectl} curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/kubepkg/templates/latest/deb/kubelet/lib/systemd/system/kubelet.service" | sed "s:/usr/bin:${DOWNLOAD_DIR}:g" | sudo tee /etc/systemd/system/kubelet.service diff --git a/content/fr/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md b/content/fr/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md index c8966b56fa..b13b6ff7d9 100644 --- a/content/fr/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md +++ b/content/fr/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md @@ -230,8 +230,8 @@ kubeadm contient tout ce qui est nécessaire pour générer les certificats déc ```sh root@HOST0 $ kubeadm init phase etcd local --config=/tmp/${HOST0}/kubeadmcfg.yaml - root@HOST1 $ kubeadm init phase etcd local --config=/home/ubuntu/kubeadmcfg.yaml - root@HOST2 $ kubeadm init phase etcd local --config=/home/ubuntu/kubeadmcfg.yaml + root@HOST1 $ kubeadm init phase etcd local --config=/tmp/${HOST1}/kubeadmcfg.yaml + root@HOST2 $ kubeadm init phase etcd local --config=/tmp/${HOST2}/kubeadmcfg.yaml ``` 1. Facultatif: Vérifiez la santé du cluster diff --git a/content/fr/docs/setup/release/_index.md b/content/fr/docs/setup/release/_index.md old mode 100755 new mode 100644 diff --git a/content/fr/docs/tasks/access-application-cluster/list-all-running-container-images.md b/content/fr/docs/tasks/access-application-cluster/list-all-running-container-images.md index eea6e5cd3d..114bcc784b 100644 --- a/content/fr/docs/tasks/access-application-cluster/list-all-running-container-images.md +++ b/content/fr/docs/tasks/access-application-cluster/list-all-running-container-images.md @@ -19,7 +19,7 @@ Dans cet exercice, vous allez utiliser kubectl pour récupérer tous les pods ex ## Répertorier toutes les images de conteneurs dans tous les namespaces - Récupérez tous les pods dans tous les namespace à l'aide de `kubectl get pods --all-namespaces` -- Formatez la sortie pour inclure uniquement la liste des noms d'image de conteneur à l'aide de `-o jsonpath={..image}`. +- Formatez la sortie pour inclure uniquement la liste des noms d'image de conteneur à l'aide de `-o jsonpath={.items[*].spec.containers[*].image}`. Cela analysera récursivement le champ `image` du json retourné. - Voir la [reference jsonpath](/docs/reference/kubectl/jsonpath/) pour plus d'informations sur l'utilisation de jsonpath. - Formatez la sortie à l'aide des outils standard: `tr`, `sort`, `uniq` @@ -28,7 +28,7 @@ Dans cet exercice, vous allez utiliser kubectl pour récupérer tous les pods ex - Utilisez `uniq` pour agréger le nombre d'images ```shell -kubectl get pods --all-namespaces -o jsonpath="{..image}" |\ +kubectl get pods --all-namespaces -o jsonpath="{.items[*].spec.containers[*].image}" |\ tr -s '[[:space:]]' '\n' |\ sort |\ uniq -c @@ -69,7 +69,7 @@ Pour cibler uniquement les pods correspondant à un label spécifique, utilisez Les éléments suivants correspondent uniquement aux pods avec les labels `app=nginx`. ```shell -kubectl get pods --all-namespaces -o=jsonpath="{..image}" -l app=nginx +kubectl get pods --all-namespaces -o=jsonpath="{.items[*].spec.containers[*].image}" -l app=nginx ``` ## Filtrage des images de conteneur de liste par namespace de pod @@ -78,7 +78,7 @@ Pour cibler uniquement les pods dans un namespace spécifique, utilisez l'indica Ce qui suit correspond uniquement aux pods du namespace `kube-system`. ```shell -kubectl get pods --namespace kube-system -o jsonpath="{..image}" +kubectl get pods --namespace kube-system -o jsonpath="{.items[*].spec.containers[*].image}" ``` ## Répertorier les images de conteneurs en utilisant un go-template au lieu de jsonpath diff --git a/content/fr/docs/tasks/configure-pod-container/configure-service-account.md b/content/fr/docs/tasks/configure-pod-container/configure-service-account.md index 1147f2234e..4f38f3a397 100644 --- a/content/fr/docs/tasks/configure-pod-container/configure-service-account.md +++ b/content/fr/docs/tasks/configure-pod-container/configure-service-account.md @@ -155,8 +155,8 @@ La sortie est comme la suivante : Name: build-robot-secret Namespace: default Labels: -Annotations: kubernetes.io/service-account.name=build-robot - kubernetes.io/service-account.uid=da68f9c6-9d26-11e7-b84e-002dc52800da +Annotations: kubernetes.io/service-account: name=build-robot + kubernetes.io/service-account: uid=da68f9c6-9d26-11e7-b84e-002dc52800da Type: kubernetes.io/service-account-token diff --git a/content/hi/docs/_index.md b/content/hi/docs/_index.md new file mode 100644 index 0000000000..2244c2f26b --- /dev/null +++ b/content/hi/docs/_index.md @@ -0,0 +1,6 @@ +--- +linktitle: कुबेरनेट्स प्रलेखन +title: प्रलेखन +sitemap: + priority: 1.0 +--- diff --git a/content/id/community/static/cncf-code-of-conduct.md b/content/id/community/static/cncf-code-of-conduct.md index 7ee127a6b3..9ec35edc3d 100644 --- a/content/id/community/static/cncf-code-of-conduct.md +++ b/content/id/community/static/cncf-code-of-conduct.md @@ -24,7 +24,7 @@ Contoh perilaku kasar, melecehkan, atau tidak dapat diterima di Kubernetes dapat Kode Etik ini diadaptasi dari Covenant Contributor , versi 1.2.0, tersedia di - + ### Pedoman Perilaku Acara CNCF diff --git a/content/id/docs/concepts/_index.md b/content/id/docs/concepts/_index.md index 33f4ada445..623b3fac3a 100644 --- a/content/id/docs/concepts/_index.md +++ b/content/id/docs/concepts/_index.md @@ -61,7 +61,7 @@ Kontroler merupakan objek mendasar dengan fungsi tambahan, contoh dari kontroler * [Deployment](/id/docs/concepts/workloads/controllers/deployment/) * [StatefulSet](/id/docs/concepts/workloads/controllers/statefulset/) * [DaemonSet](/id/docs/concepts/workloads/controllers/daemonset/) -* [Job](/id/docs/concepts/workloads/controllers/jobs-run-to-completion/) +* [Job](/id/docs/concepts/workloads/controllers/job/) ## *Control Plane* Kubernetes diff --git a/content/id/docs/concepts/cluster-administration/addons.md b/content/id/docs/concepts/cluster-administration/addons.md index ca50347492..e8a52a4910 100644 --- a/content/id/docs/concepts/cluster-administration/addons.md +++ b/content/id/docs/concepts/cluster-administration/addons.md @@ -27,7 +27,7 @@ Laman ini akan menjabarkan beberapa *add-ons* yang tersedia serta tautan instruk * [CNI-Genie](https://github.com/Huawei-PaaS/CNI-Genie) memungkinkan Kubernetes agar dapat terkoneksi dengan beragam *plugin* CNI, seperti Calico, Canal, Flannel, Romana, atau Weave dengan mulus. * [Contiv](http://contiv.github.io) menyediakan jaringan yang dapat dikonfigurasi (*native* L3 menggunakan BGP, *overlay* menggunakan vxlan, klasik L2, dan Cisco-SDN/ACI) untuk berbagai penggunaan serta *policy framework* yang kaya dan beragam. Proyek Contiv merupakan proyek [open source](http://github.com/contiv). Laman [instalasi](http://github.com/contiv/install) ini akan menjabarkan cara instalasi, baik untuk klaster dengan kubeadm maupun non-kubeadm. * [Contrail](http://www.juniper.net/us/en/products-services/sdn/contrail/contrail-networking/), yang berbasis dari [Tungsten Fabric](https://tungsten.io), merupakan sebuah proyek *open source* yang menyediakan virtualisasi jaringan *multi-cloud* serta platform manajemen *policy*. Contrail dan Tungsten Fabric terintegrasi dengan sistem orkestrasi lainnya seperti Kubernetes, OpenShift, OpenStack dan Mesos, serta menyediakan mode isolasi untuk mesin virtual (VM), kontainer/pod dan *bare metal*. -* [Flannel](https://github.com/coreos/flannel/blob/master/Documentation/kubernetes.md) merupakan penyedia jaringan *overlay* yang dapat digunakan pada Kubernetes. +* [Flannel](https://github.com/flannel-io/flannel#deploying-flannel-manually) merupakan penyedia jaringan *overlay* yang dapat digunakan pada Kubernetes. * [Knitter](https://github.com/ZTE/Knitter/) merupakan solusi jaringan yang mendukung multipel jaringan pada Kubernetes. * [Multus](https://github.com/Intel-Corp/multus-cni) merupakan sebuah multi *plugin* agar Kubernetes mendukung multipel jaringan secara bersamaan sehingga dapat menggunakan semua *plugin* CNI (contoh: Calico, Cilium, Contiv, Flannel), ditambah pula dengan SRIOV, DPDK, OVS-DPDK dan VPP pada *workload* Kubernetes. * [NSX-T](https://docs.vmware.com/en/VMware-NSX-T/2.0/nsxt_20_ncp_kubernetes.pdf) Container Plug-in (NCP) menyediakan integrasi antara VMware NSX-T dan orkestrator kontainer seperti Kubernetes, termasuk juga integrasi antara NSX-T dan platform CaaS/PaaS berbasis kontainer seperti *Pivotal Container Service* (PKS) dan OpenShift. diff --git a/content/id/docs/concepts/cluster-administration/flow-control.md b/content/id/docs/concepts/cluster-administration/flow-control.md index b8d8f9acf7..4f6036c0ca 100644 --- a/content/id/docs/concepts/cluster-administration/flow-control.md +++ b/content/id/docs/concepts/cluster-administration/flow-control.md @@ -368,7 +368,7 @@ beban kerja yang berperilaku buruk yang dapat membahayakan kesehatan dari sistem Untuk latar belakang informasi mengenai detail desain dari prioritas dan kesetaraan API, silahkan lihat -[proposal pembaharuan](https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/20190228-priority-and-fairness.md). +[proposal pembaharuan](https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/1040-priority-and-fairness). Kamu juga dapat membuat saran dan permintaan akan fitur melalui [SIG API Machinery](https://github.com/kubernetes/community/tree/master/sig-api-machinery). diff --git a/content/id/docs/concepts/configuration/overview.md b/content/id/docs/concepts/configuration/overview.md index 67fb2061fe..51fb10f5ef 100644 --- a/content/id/docs/concepts/configuration/overview.md +++ b/content/id/docs/concepts/configuration/overview.md @@ -34,7 +34,7 @@ Dokumentasi ini terbuka. Jika Anda menemukan sesuatu yang tidak ada dalam daftar - Jangan gunakan Pods naked (artinya, Pods tidak terikat dengan a [ReplicaSet](/id/docs/concepts/workloads/controllers/replicaset/) a [Deployment](/id/docs/concepts/workloads/controllers/deployment/)) jika kamu bisa menghindarinya. Pod naked tidak akan dijadwal ulang jika terjadi kegagalan pada node. - Deployment, yang keduanya menciptakan ReplicaSet untuk memastikan bahwa jumlah Pod yang diinginkan selalu tersedia, dan menentukan strategi untuk mengganti Pods (seperti [RollingUpdate](/id/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment)), hampir selalu lebih disukai daripada membuat Pods secara langsung, kecuali untuk beberapa yang eksplisit [`restartPolicy: Never`](/id/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy) banyak skenario . A [Job](/id/docs/concepts/workloads/controllers/jobs-run-to-completion/) mungkin juga sesuai. + Deployment, yang keduanya menciptakan ReplicaSet untuk memastikan bahwa jumlah Pod yang diinginkan selalu tersedia, dan menentukan strategi untuk mengganti Pods (seperti [RollingUpdate](/id/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment)), hampir selalu lebih disukai daripada membuat Pods secara langsung, kecuali untuk beberapa yang eksplisit [`restartPolicy: Never`](/id/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy) banyak skenario . A [Job](/id/docs/concepts/workloads/controllers/job/) mungkin juga sesuai. ## Services diff --git a/content/id/docs/concepts/extend-kubernetes/operator.md b/content/id/docs/concepts/extend-kubernetes/operator.md index 315ae35e3d..b9a8bf5a06 100644 --- a/content/id/docs/concepts/extend-kubernetes/operator.md +++ b/content/id/docs/concepts/extend-kubernetes/operator.md @@ -132,7 +132,7 @@ menggunakan bahasa / _runtime_ yang dapat bertindak sebagai * Menggunakan perangkat yang ada untuk menulis Operator kamu sendiri, misalnya: * menggunakan [KUDO](https://kudo.dev/) (Kubernetes Universal Declarative Operator) * menggunakan [kubebuilder](https://book.kubebuilder.io/) - * menggunakan [Metacontroller](https://metacontroller.app/) bersama dengan + * menggunakan [Metacontroller](https://metacontroller.github.io/metacontroller/intro.html) bersama dengan `WebHooks` yang kamu implementasikan sendiri * menggunakan the [Operator _Framework_](https://github.com/operator-framework/getting-started) * [Terbitkan](https://operatorhub.io/) Operator kamu agar dapat digunakan oleh diff --git a/content/id/docs/concepts/policy/_index.md b/content/id/docs/concepts/policy/_index.md old mode 100755 new mode 100644 diff --git a/content/id/docs/concepts/storage/_index.md b/content/id/docs/concepts/storage/_index.md old mode 100755 new mode 100644 diff --git a/content/id/docs/concepts/storage/storage-classes.md b/content/id/docs/concepts/storage/storage-classes.md index c5fc71a8de..083620d937 100644 --- a/content/id/docs/concepts/storage/storage-classes.md +++ b/content/id/docs/concepts/storage/storage-classes.md @@ -595,11 +595,11 @@ metadata: provisioner: kubernetes.io/azure-disk parameters: storageaccounttype: Standard_LRS - kind: Shared + kind: managed ``` * `storageaccounttype`: Akun penyimpanan Azure yang ada pada tingkatan Sku. Nilai _default_-nya adalah kosong. -* `kind`: Nilai yang mungkin adalah `shared` (default), `dedicated`, dan `managed`. +* `kind`: Nilai yang mungkin adalah `shared`, `dedicated`, dan `managed` (default). Ketika `kind` yang digunakan adalah `shared`, semua disk yang tidak di-_manage_ akan dibuat pada beberapa akun penyimpanan yang ada pada grup sumber daya yang sama dengan klaster. Ketika `kind` yang digunakan adalah `dedicated`, sebuah akun penyimpanan diff --git a/content/id/docs/concepts/workloads/controllers/jobs-run-to-completion.md b/content/id/docs/concepts/workloads/controllers/job.md similarity index 99% rename from content/id/docs/concepts/workloads/controllers/jobs-run-to-completion.md rename to content/id/docs/concepts/workloads/controllers/job.md index 5f4720646b..4a7cce3f2a 100644 --- a/content/id/docs/concepts/workloads/controllers/jobs-run-to-completion.md +++ b/content/id/docs/concepts/workloads/controllers/job.md @@ -1,5 +1,5 @@ --- -title: Job - Dijalankan Hingga Selesai +title: Jobs content_type: concept feature: title: Eksekusi batch diff --git a/content/id/docs/concepts/workloads/controllers/ttlafterfinished.md b/content/id/docs/concepts/workloads/controllers/ttlafterfinished.md index 97aa5a47f3..0f462008ee 100644 --- a/content/id/docs/concepts/workloads/controllers/ttlafterfinished.md +++ b/content/id/docs/concepts/workloads/controllers/ttlafterfinished.md @@ -10,7 +10,7 @@ weight: 65 Pengendali TTL menyediakan mekanisme TTL yang membatasi umur dari suatu objek sumber daya yang telah selesai digunakan. Pengendali TTL untuk saat ini hanya menangani -[Jobs](/id/docs/concepts/workloads/controllers/jobs-run-to-completion/), +{{< glossary_tooltip text="Jobs" term_id="job" >}}, dan nantinya bisa saja digunakan untuk sumber daya lain yang telah selesai digunakan misalnya saja Pod atau sumber daya khusus (_custom resource_) lainnya. @@ -32,7 +32,7 @@ Pengendali TTL untuk saat ini hanya mendukung Job. Sebuah operator klaster dapat menggunakan fitur ini untuk membersihkan Job yang telah dieksekusi (baik `Complete` atau `Failed`) secara otomatis dengan menentukan _field_ `.spec.ttlSecondsAfterFinished` pada Job, seperti yang tertera di -[contoh](/id/docs/concepts/workloads/controllers/jobs-run-to-completion/#clean-up-finished-jobs-automatically). +[contoh](/id/docs/concepts/workloads/controllers/job/#clean-up-finished-jobs-automatically). Pengendali TTL akan berasumsi bahwa sebuah sumber daya dapat dihapus apabila TTL dari sumber daya tersebut telah habis. Proses dihapusnya sumber daya ini dilakukan secara berantai, dimana sumber daya lain yang diff --git a/content/id/docs/contribute/localization_id.md b/content/id/docs/contribute/localization_id.md index 5a9c491297..beffa5be6c 100644 --- a/content/id/docs/contribute/localization_id.md +++ b/content/id/docs/contribute/localization_id.md @@ -107,7 +107,7 @@ dapat menemukan kata-kata tersebut dalam bahasa Indonesia. ### Panduan untuk kata-kata API Objek Kubernetes Gunakan gaya "CamelCase" untuk menulis objek API Kubernetes, lihat daftar -lengkapnya [di sini](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/). +lengkapnya [di sini](/docs/reference/kubernetes-api/). Sebagai contoh: * *Benar*: PersistentVolume. *Salah*: volume persisten, `PersistentVolume`, @@ -130,7 +130,7 @@ ditulis dalam huruf kapital pada halaman asli bahasa Inggris. ### Panduan untuk "Feature Gate" Kubernetes -Istilah [_functional gate_](https://kubernetes.io/ko/docs/reference/command-line-tools-reference/feature-gates/) +Istilah [_feature gate_](/docs/reference/command-line-tools-reference/feature-gates/) Kubernetes tidak perlu diterjemahkan ke dalam bahasa Indonesia dan tetap dipertahankan dalam bentuk aslinya. @@ -175,4 +175,4 @@ scale | | skala | | process | kata kerja | memproses | https://kbbi.web.id/proses | replica | kata benda | replika | https://kbbi.web.id/replika | flag | | tanda, parameter, argumen | | -event | | _event_ | | \ No newline at end of file +event | | _event_ | | diff --git a/content/id/docs/reference/glossary/controller.md b/content/id/docs/reference/glossary/controller.md old mode 100755 new mode 100644 diff --git a/content/id/docs/reference/glossary/managed-service.md b/content/id/docs/reference/glossary/managed-service.md old mode 100755 new mode 100644 diff --git a/content/id/docs/reference/glossary/name.md b/content/id/docs/reference/glossary/name.md old mode 100755 new mode 100644 diff --git a/content/id/docs/reference/glossary/service-broker.md b/content/id/docs/reference/glossary/service-broker.md old mode 100755 new mode 100644 diff --git a/content/id/docs/reference/glossary/service-catalog.md b/content/id/docs/reference/glossary/service-catalog.md old mode 100755 new mode 100644 diff --git a/content/id/docs/reference/glossary/service.md b/content/id/docs/reference/glossary/service.md old mode 100755 new mode 100644 diff --git a/content/id/docs/reference/glossary/uid.md b/content/id/docs/reference/glossary/uid.md old mode 100755 new mode 100644 diff --git a/content/id/docs/setup/best-practices/_index.md b/content/id/docs/setup/best-practices/_index.md old mode 100755 new mode 100644 diff --git a/content/id/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/id/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index fa0b0ce7f0..7e4a77f3de 100644 --- a/content/id/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/id/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -225,16 +225,18 @@ Menginstal _plugin_ CNI (dibutuhkan untuk kebanyakan jaringan Pod): ```bash CNI_VERSION="v0.8.2" +ARCH="amd64" mkdir -p /opt/cni/bin -curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz +curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-${ARCH}-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz ``` Menginstal crictl (dibutuhkan untuk kubeadm / Kubelet Container Runtime Interface (CRI)) ```bash CRICTL_VERSION="v1.17.0" +ARCH="amd64" mkdir -p /opt/bin -curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz +curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-${ARCH}.tar.gz" | sudo tar -C $DOWNLOAD_DIR -xz ``` Menginstal `kubeadm`, `kubelet`, `kubectl` dan menambahkan _systemd service_ `kubelet`: @@ -243,8 +245,9 @@ Menginstal `kubeadm`, `kubelet`, `kubectl` dan menambahkan _systemd service_ `ku RELEASE="$(curl -sSL https://dl.k8s.io/release/stable.txt)" mkdir -p /opt/bin +ARCH="amd64" cd /opt/bin -curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl} +curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/${ARCH}/{kubeadm,kubelet,kubectl} chmod +x {kubeadm,kubelet,kubectl} RELEASE_VERSION="v0.2.7" diff --git a/content/id/docs/tasks/access-application-cluster/_index.md b/content/id/docs/tasks/access-application-cluster/_index.md old mode 100755 new mode 100644 diff --git a/content/id/docs/tasks/access-application-cluster/list-all-running-container-images.md b/content/id/docs/tasks/access-application-cluster/list-all-running-container-images.md index f2140e5276..86a6b267e2 100644 --- a/content/id/docs/tasks/access-application-cluster/list-all-running-container-images.md +++ b/content/id/docs/tasks/access-application-cluster/list-all-running-container-images.md @@ -28,7 +28,7 @@ Container untuk masing-masing Pod. - Silakan ambil semua Pod dalam Namespace dengan menggunakan perintah `kubectl get pods --all-namespaces` - Silakan format keluarannya agar hanya menyertakan daftar nama _image_ dari Container - dengan menggunakan perintah `-o jsonpath={..image}`. Perintah ini akan mem-_parsing field_ + dengan menggunakan perintah `-o jsonpath={.items[*].spec.containers[*].image}`. Perintah ini akan mem-_parsing field_ `image` dari keluaran json yang dihasilkan. - Silakan lihat [referensi jsonpath](/docs/user-guide/jsonpath/) untuk informasi lebih lanjut tentang cara menggunakan `jsonpath`. @@ -38,7 +38,7 @@ Container untuk masing-masing Pod. - Gunakan `uniq` untuk mengumpulkan jumlah _image_ ```sh -kubectl get pods --all-namespaces -o jsonpath="{..image}" |\ +kubectl get pods --all-namespaces -o jsonpath="{.items[*].spec.containers[*].image}" |\ tr -s '[[:space:]]' '\n' |\ sort |\ uniq -c @@ -86,7 +86,7 @@ Untuk menargetkan hanya Pod yang cocok dengan label tertentu saja, gunakan tanda dibawah ini akan menghasilkan Pod dengan label yang cocok dengan `app=nginx`. ```sh -kubectl get pods --all-namespaces -o=jsonpath="{..image}" -l app=nginx +kubectl get pods --all-namespaces -o=jsonpath="{.items[*].spec.containers[*].image}" -l app=nginx ``` ## Membuat daftar _image_ Container yang difilter berdasarkan Namespace Pod @@ -95,7 +95,7 @@ Untuk hanya menargetkan Pod pada Namespace tertentu, gunakankan tanda Namespace. dibawah ini hanya menyaring Pod pada Namespace `kube-system`. ```sh -kubectl get pods --namespace kube-system -o jsonpath="{..image}" +kubectl get pods --namespace kube-system -o jsonpath="{.items[*].spec.containers[*].image}" ``` ## Membuat daftar _image_ Container dengan menggunakan go-template sebagai alternatif dari jsonpath diff --git a/content/id/docs/tasks/administer-cluster/_index.md b/content/id/docs/tasks/administer-cluster/_index.md old mode 100755 new mode 100644 diff --git a/content/id/docs/tasks/configure-pod-container/_index.md b/content/id/docs/tasks/configure-pod-container/_index.md old mode 100755 new mode 100644 diff --git a/content/id/docs/tasks/configure-pod-container/configure-service-account.md b/content/id/docs/tasks/configure-pod-container/configure-service-account.md index 4a4d5999db..e53812d65a 100644 --- a/content/id/docs/tasks/configure-pod-container/configure-service-account.md +++ b/content/id/docs/tasks/configure-pod-container/configure-service-account.md @@ -151,8 +151,8 @@ Keluarannya akan serupa dengan: Name: build-robot-secret Namespace: default Labels: -Annotations: kubernetes.io/service-account.name=build-robot - kubernetes.io/service-account.uid=da68f9c6-9d26-11e7-b84e-002dc52800da +Annotations: kubernetes.io/service-account.name: build-robot + kubernetes.io/service-account.uid: da68f9c6-9d26-11e7-b84e-002dc52800da Type: kubernetes.io/service-account-token diff --git a/content/id/docs/tasks/debug-application-cluster/_index.md b/content/id/docs/tasks/debug-application-cluster/_index.md old mode 100755 new mode 100644 diff --git a/content/id/docs/tasks/inject-data-application/_index.md b/content/id/docs/tasks/inject-data-application/_index.md old mode 100755 new mode 100644 diff --git a/content/id/docs/tasks/job/automated-tasks-with-cron-jobs.md b/content/id/docs/tasks/job/automated-tasks-with-cron-jobs.md index c2c4b9399f..5a850cb739 100644 --- a/content/id/docs/tasks/job/automated-tasks-with-cron-jobs.md +++ b/content/id/docs/tasks/job/automated-tasks-with-cron-jobs.md @@ -146,7 +146,7 @@ Semua modifikasi pada sebuah CronJob, terutama `.spec`, akan diterapkan pada pro `.spec.schedule` adalah _field_ yang wajib diisi dari sebuah `.spec` Dibutuhkan sebuah format string [Cron](https://en.wikipedia.org/wiki/Cron), misalnya `0 * * * *` atau `@hourly`, sebagai jadwal Job untuk dibuat dan dieksekusi. -Format ini juga mencakup nilai langkah `Vixie cron`. Seperti penjelasan di [FreeBSD manual](https://www.freebsd.org/cgi/man.cgi?crontab%285%29): +Format ini juga mencakup nilai langkah "Vixie cron". Seperti penjelasan di [FreeBSD manual](https://www.freebsd.org/cgi/man.cgi?crontab%285%29): > Nilai langkah dapat digunakan bersama dengan rentang. Sebuah rentang diikuti dengan > `/` menentukan lompatan angka melalui rentang. @@ -162,8 +162,8 @@ Sebuah tanda tanya (`?`) dalam penjadwalan memiliki makna yang sama dengan tanda ### Templat Job `.spec.JobTemplate` adalah templat untuk sebuah Job, dan itu wajib. -Templat Job memiliki skema yang sama dengan [Job](/id/docs/concepts/workloads/controllers/jobs-run-to-completion/), kecuali jika bersarang dan tidak memiliki sebuah `apiVersion` atau `kind`. -Untuk informasi lebih lanjut tentang menulis sebuah Job `.spec` lihat [Menulis spesifikasi Job](/id/docs/concepts/workloads/controllers/jobs-run-to-completion/#writing-a-job-spec). +Templat Job memiliki skema yang sama dengan [Job](/id/docs/concepts/workloads/controllers/job/), kecuali jika bersarang dan tidak memiliki sebuah `apiVersion` atau `kind`. +Untuk informasi lebih lanjut tentang menulis sebuah Job `.spec` lihat [Menulis spesifikasi Job](/id/docs/concepts/workloads/controllers/job/#writing-a-job-spec). ### _Starting Deadline_ diff --git a/content/id/docs/tasks/tls/_index.md b/content/id/docs/tasks/tls/_index.md old mode 100755 new mode 100644 diff --git a/content/id/docs/tasks/tools/_index.md b/content/id/docs/tasks/tools/_index.md old mode 100755 new mode 100644 diff --git a/content/id/docs/tutorials/stateful-application/_index.md b/content/id/docs/tutorials/stateful-application/_index.md old mode 100755 new mode 100644 diff --git a/content/id/docs/tutorials/stateful-application/basic-stateful-set.md b/content/id/docs/tutorials/stateful-application/basic-stateful-set.md index 35c27666b4..0e15eceddb 100644 --- a/content/id/docs/tutorials/stateful-application/basic-stateful-set.md +++ b/content/id/docs/tutorials/stateful-application/basic-stateful-set.md @@ -845,12 +845,12 @@ kubectl get pods -w -l app=nginx ``` Gunakan perintah [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands/#delete) -untuk menghapus StatefulSet. Pastikan kamu menambahkan parameter `--cascade=false` ke +untuk menghapus StatefulSet. Pastikan kamu menambahkan parameter `--cascade=orphan` ke perintah tersebut. Parameter ini memberitahukan Kubernetes untuk hanya menghapus StatefulSet dan agar tidak menghapus Pod yang ada padanya. ```shell -kubectl delete statefulset web --cascade=false +kubectl delete statefulset web --cascade=orphan ``` ``` statefulset.apps "web" deleted @@ -965,7 +965,7 @@ kubectl get pods -w -l app=nginx ``` Pada terminal yang lain, hapus StatefulSet lagi. Kali ini, hilangkan parameter -`--cascade=false`. +`--cascade=orphan`. ```shell kubectl delete statefulset web diff --git a/content/id/examples/controllers/daemonset.yaml b/content/id/examples/controllers/daemonset.yaml index 1bfa082833..375391826d 100644 --- a/content/id/examples/controllers/daemonset.yaml +++ b/content/id/examples/controllers/daemonset.yaml @@ -16,6 +16,7 @@ spec: spec: tolerations: - key: node-role.kubernetes.io/master + operator: Exists effect: NoSchedule containers: - name: fluentd-elasticsearch diff --git a/content/it/docs/concepts/architecture/_index.md b/content/it/docs/concepts/architecture/_index.md old mode 100755 new mode 100644 diff --git a/content/it/docs/concepts/architecture/nodes.md b/content/it/docs/concepts/architecture/nodes.md index 0494050420..c3c58be9d4 100644 --- a/content/it/docs/concepts/architecture/nodes.md +++ b/content/it/docs/concepts/architecture/nodes.md @@ -156,8 +156,9 @@ Condizione Notata quando un nodo diventa irraggiungibile (ad esempio, il control ricevere heartbeat per qualche motivo, ad es. a causa del fatto che il nodo si trova in basso), e poi in seguito sfratto tutti i pod dal nodo (usando una terminazione elegante) se il nodo continua essere irraggiungibile. (I timeout predefiniti sono 40 secondi per iniziare la segnalazione -ConditionUnknown e 5m dopo di ciò per iniziare a sfrattare i pod.) Il controller del nodo -controlla lo stato di ogni nodo ogni `--node-monitor-period` secondi. +ConditionUnknown e 5m dopo di ciò per iniziare a sfrattare i pod.) + +Il controller del nodo controlla lo stato di ogni nodo ogni `--node-monitor-period` secondi. Nelle versioni di Kubernetes precedenti alla 1.13, NodeStatus è l'heartbeat di nodo. A partire da Kubernetes 1.13, la funzionalità di lease del nodo viene introdotta come un @@ -191,8 +192,9 @@ lo stesso tempo. Se la frazione di nodi malsani è almeno se il cluster è piccolo (cioè ha meno o uguale a `--large-cluster-size-threshold` nodes - default 50) quindi gli sfratti sono fermato, altrimenti il ​​tasso di sfratto è ridotto a -`--secondary-node-eviction-rate` (default 0.01) al secondo. La ragione per cui -le politiche sono implementate per zona di disponibilità è perché una zona di disponibilità +`--secondary-node-eviction-rate` (default 0.01) al secondo. + +La ragione per cui le politiche sono implementate per zona di disponibilità è perché una zona di disponibilità potrebbe divenire partizionato dal master mentre gli altri rimangono connessi. Se il tuo cluster non si estende su più zone di disponibilità del provider cloud, quindi c'è solo una zona di disponibilità (l'intero cluster). diff --git a/content/it/docs/concepts/cluster-administration/_index.md b/content/it/docs/concepts/cluster-administration/_index.md old mode 100755 new mode 100644 diff --git a/content/it/docs/concepts/cluster-administration/addons.md b/content/it/docs/concepts/cluster-administration/addons.md index 3a91ff7b93..8bd3c154af 100644 --- a/content/it/docs/concepts/cluster-administration/addons.md +++ b/content/it/docs/concepts/cluster-administration/addons.md @@ -26,7 +26,7 @@ I componenti aggiuntivi in ogni sezione sono ordinati alfabeticamente - l'ordine * [Cilium](https://github.com/cilium/cilium) è un plug-in di criteri di rete e di rete L3 in grado di applicare in modo trasparente le politiche HTTP / API / L7. Sono supportate entrambe le modalità di routing e overlay / incapsulamento. * [CNI-Genie](https://github.com/Huawei-PaaS/CNI-Genie) consente a Kubernetes di connettersi senza problemi a una scelta di plugin CNI, come Calico, Canal, Flannel, Romana o Weave. * [Contiv](http://contiv.github.io) offre networking configurabile (L3 nativo con BGP, overlay con vxlan, L2 classico e Cisco-SDN / ACI) per vari casi d'uso e un ricco framework di policy. Il progetto Contiv è completamente [open source](http://github.com/contiv). Il [programma di installazione](http://github.com/contiv/install) fornisce sia opzioni di installazione basate su kubeadm che non su Kubeadm. -* [Flanella](https://github.com/coreos/flannel/blob/master/Documentation/kubernetes.md) è un provider di reti sovrapposte che può essere utilizzato con Kubernetes. +* [Flannel](https://github.com/flannel-io/flannel#deploying-flannel-manually) è un provider di reti sovrapposte che può essere utilizzato con Kubernetes. * [Knitter](https://github.com/ZTE/Knitter/) è una soluzione di rete che supporta più reti in Kubernetes. * [Multus](https://github.com/Intel-Corp/multus-cni) è un multi-plugin per il supporto di più reti in Kubernetes per supportare tutti i plugin CNI (es. Calico, Cilium, Contiv, Flannel), oltre a SRIOV, DPDK, OVS-DPDK e carichi di lavoro basati su VPP in Kubernetes. * [NSX-T](https://docs.vmware.com/en/VMware-NSX-T/2.0/nsxt_20_ncp_kubernetes.pdf) Container Plug-in (NCP) fornisce l'integrazione tra VMware NSX-T e orchestratori di contenitori come Kubernetes, oltre all'integrazione tra NSX-T e piattaforme CaaS / PaaS basate su container come Pivotal Container Service (PKS) e OpenShift. diff --git a/content/it/docs/concepts/containers/_index.md b/content/it/docs/concepts/containers/_index.md old mode 100755 new mode 100644 diff --git a/content/it/docs/concepts/overview/_index.md b/content/it/docs/concepts/overview/_index.md old mode 100755 new mode 100644 diff --git a/content/it/docs/reference/glossary/cloud-controller-manager.md b/content/it/docs/reference/glossary/cloud-controller-manager.md old mode 100755 new mode 100644 diff --git a/content/it/docs/reference/glossary/cluster.md b/content/it/docs/reference/glossary/cluster.md old mode 100755 new mode 100644 diff --git a/content/it/docs/reference/glossary/container-runtime.md b/content/it/docs/reference/glossary/container-runtime.md index 640b3eaa17..4b6b5d1e53 100644 --- a/content/it/docs/reference/glossary/container-runtime.md +++ b/content/it/docs/reference/glossary/container-runtime.md @@ -2,7 +2,7 @@ title: Container Runtime id: container-runtime date: 2019-06-05 -full_link: /docs/reference/generated/container-runtime +full_link: /docs/setup/production-environment/container-runtimes short_description: > Il container runtime è il software che è responsabile per l'esecuzione dei container. diff --git a/content/it/docs/reference/glossary/container.md b/content/it/docs/reference/glossary/container.md old mode 100755 new mode 100644 diff --git a/content/it/docs/reference/glossary/controller.md b/content/it/docs/reference/glossary/controller.md old mode 100755 new mode 100644 diff --git a/content/it/docs/reference/glossary/daemonset.md b/content/it/docs/reference/glossary/daemonset.md old mode 100755 new mode 100644 diff --git a/content/it/docs/reference/glossary/deployment.md b/content/it/docs/reference/glossary/deployment.md old mode 100755 new mode 100644 diff --git a/content/it/docs/reference/glossary/docker.md b/content/it/docs/reference/glossary/docker.md old mode 100755 new mode 100644 diff --git a/content/it/docs/reference/glossary/etcd.md b/content/it/docs/reference/glossary/etcd.md old mode 100755 new mode 100644 diff --git a/content/it/docs/reference/glossary/job.md b/content/it/docs/reference/glossary/job.md old mode 100755 new mode 100644 diff --git a/content/it/docs/reference/glossary/kube-apiserver.md b/content/it/docs/reference/glossary/kube-apiserver.md old mode 100755 new mode 100644 diff --git a/content/it/docs/reference/glossary/kube-controller-manager.md b/content/it/docs/reference/glossary/kube-controller-manager.md old mode 100755 new mode 100644 diff --git a/content/it/docs/reference/glossary/kube-proxy.md b/content/it/docs/reference/glossary/kube-proxy.md old mode 100755 new mode 100644 diff --git a/content/it/docs/reference/glossary/kube-scheduler.md b/content/it/docs/reference/glossary/kube-scheduler.md old mode 100755 new mode 100644 diff --git a/content/it/docs/reference/glossary/kubeadm.md b/content/it/docs/reference/glossary/kubeadm.md old mode 100755 new mode 100644 diff --git a/content/it/docs/reference/glossary/kubelet.md b/content/it/docs/reference/glossary/kubelet.md old mode 100755 new mode 100644 diff --git a/content/it/docs/reference/glossary/label.md b/content/it/docs/reference/glossary/label.md old mode 100755 new mode 100644 diff --git a/content/it/docs/reference/glossary/node.md b/content/it/docs/reference/glossary/node.md old mode 100755 new mode 100644 diff --git a/content/it/docs/reference/glossary/pod.md b/content/it/docs/reference/glossary/pod.md old mode 100755 new mode 100644 diff --git a/content/it/docs/reference/glossary/statefulset.md b/content/it/docs/reference/glossary/statefulset.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/concepts/cluster-administration/_index.md b/content/ja/docs/concepts/cluster-administration/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/concepts/cluster-administration/addons.md b/content/ja/docs/concepts/cluster-administration/addons.md index b50beb85f5..c07cfce07c 100644 --- a/content/ja/docs/concepts/cluster-administration/addons.md +++ b/content/ja/docs/concepts/cluster-administration/addons.md @@ -23,7 +23,7 @@ content_type: concept * [CNI-Genie](https://github.com/Huawei-PaaS/CNI-Genie)は、KubernetesをCalico、Canal、Flannel、Romana、Weaveなど選択したCNIプラグインをシームレスに接続できるようにするプラグインです。 * [Contiv](https://contiv.github.io)は、さまざまなユースケースと豊富なポリシーフレームワーク向けに設定可能なネットワーク(BGPを使用したネイティブのL3、vxlanを使用したオーバーレイ、古典的なL2、Cisco-SDN/ACI)を提供します。Contivプロジェクトは完全に[オープンソース](https://github.com/contiv)です。[インストーラ](https://github.com/contiv/install)はkubeadmとkubeadm以外の両方をベースとしたインストールオプションがあります。 * [Contrail](https://www.juniper.net/us/en/products-services/sdn/contrail/contrail-networking/)は、[Tungsten Fabric](https://tungsten.io)をベースにしている、オープンソースでマルチクラウドに対応したネットワーク仮想化およびポリシー管理プラットフォームです。ContrailおよびTungsten Fabricは、Kubernetes、OpenShift、OpenStack、Mesosなどのオーケストレーションシステムと統合されており、仮想マシン、コンテナ/Pod、ベアメタルのワークロードに隔離モードを提供します。 -* [Flannel](https://github.com/coreos/flannel/blob/master/Documentation/kubernetes.md)は、Kubernetesで使用できるオーバーレイネットワークプロバイダーです。 +* [Flannel](https://github.com/flannel-io/flannel#deploying-flannel-manually)は、Kubernetesで使用できるオーバーレイネットワークプロバイダーです。 * [Knitter](https://github.com/ZTE/Knitter/)は、1つのKubernetes Podで複数のネットワークインターフェイスをサポートするためのプラグインです。 * [Multus](https://github.com/Intel-Corp/multus-cni)は、すべてのCNIプラグイン(たとえば、Calico、Cilium、Contiv、Flannel)に加えて、SRIOV、DPDK、OVS-DPDK、VPPをベースとするKubernetes上のワークロードをサポートする、複数のネットワークサポートのためのマルチプラグインです。 * [OVN-Kubernetes](https://github.com/ovn-org/ovn-kubernetes/)は、Open vSwitch(OVS)プロジェクトから生まれた仮想ネットワーク実装である[OVN(Open Virtual Network)](https://github.com/ovn-org/ovn/)をベースとする、Kubernetesのためのネットワークプロバイダです。OVN-Kubernetesは、OVSベースのロードバランサーおよびネットワークポリシーの実装を含む、Kubernetes向けのオーバーレイベースのネットワーク実装を提供します。 diff --git a/content/ja/docs/concepts/configuration/_index.md b/content/ja/docs/concepts/configuration/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/concepts/configuration/secret.md b/content/ja/docs/concepts/configuration/secret.md index f7586e5205..f2a71e1860 100644 --- a/content/ja/docs/concepts/configuration/secret.md +++ b/content/ja/docs/concepts/configuration/secret.md @@ -194,7 +194,7 @@ Basic認証Secret型は、ユーザーの便宜のためにのみ提供されて ### SSH authentication secrets -組み込みのタイプ`kubernetes.io/ssh-auth`は、SSH認証で使用されるデータを保存するために提供されています。このSecret型を使用する場合、使用するSSH認証として`data`(または`stringData`)フィールドに`ssh-privatekey`キーと値のペアを指定する必要があります。 +組み込みのタイプ`kubernetes.io/ssh-auth`は、SSH認証で使用されるデータを保存するために提供されています。このSecret型を使用する場合、使用するSSH認証として`data`(または`stringData`)フィールドに`ssh-privatekey`キーと値のペアを指定する必要があります。 次のYAMLはSSH authentication Secretの設定例です: @@ -284,7 +284,7 @@ Bootstrap type Secretには、`data`で指定された次のキーがありま - `usage-bootstrap-`:Bootstrap tokenの追加の使用法を示すブールフラグ。 - `auth-extra-groups`:`system:bootstrappers`グループに加えて認証されるグループ名のコンマ区切りのリスト。 -上記のYAMLは、値がすべてbase64でエンコードされた文字列であるため、混乱しているように見える場合があります。実際、次のYAMLを使用して同一のSecretを作成できます。 +上記のYAMLは、値がすべてbase64でエンコードされた文字列であるため、分かりづらく見えるかもしれません。実際には、次のYAMLを使用して同一のSecretを作成できます。 ```yaml apiVersion: v1 diff --git a/content/ja/docs/concepts/containers/_index.md b/content/ja/docs/concepts/containers/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/concepts/extend-kubernetes/operator.md b/content/ja/docs/concepts/extend-kubernetes/operator.md index c3857598f2..116ebbb434 100644 --- a/content/ja/docs/concepts/extend-kubernetes/operator.md +++ b/content/ja/docs/concepts/extend-kubernetes/operator.md @@ -87,10 +87,12 @@ kubectl edit SampleDB/example-database # 手動でいくつかの設定を変更 * [Custom Resources](/ja/docs/concepts/extend-kubernetes/api-extension/custom-resources/)をより深く学びます * ユースケースに合わせた、既製のオペレーターを[OperatorHub.io](https://operatorhub.io/)から見つけます * 自前のオペレーターを書くために既存のツールを使います、例: + * [Charmed Operator Framework](https://juju.is/) * [KUDO](https://kudo.dev/)(Kubernetes Universal Declarative Operator)を使います * [kubebuilder](https://book.kubebuilder.io/)を使います - * [Metacontroller](https://metacontroller.app/)を自分で実装したWebHooksと一緒に使います + * [Metacontroller](https://metacontroller.github.io/metacontroller/intro.html)を自分で実装したWebHooksと一緒に使います * [Operator Framework](https://operatorframework.io)を使います + * [shell-operator](https://github.com/flant/shell-operator) * 自前のオペレーターを他のユーザーのために[公開](https://operatorhub.io/)します * オペレーターパターンを紹介している[CoreOSオリジナル記事](https://coreos.com/blog/introducing-operators.html)を読みます * Google Cloudが出したオペレーター作成のベストプラクティス[記事](https://cloud.google.com/blog/products/containers-kubernetes/best-practices-for-building-kubernetes-operators-and-stateful-apps)を読みます diff --git a/content/ja/docs/concepts/overview/_index.md b/content/ja/docs/concepts/overview/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/concepts/overview/working-with-objects/_index.md b/content/ja/docs/concepts/overview/working-with-objects/_index.md old mode 100755 new mode 100644 index d4a9f2e6b6..10da27655c --- a/content/ja/docs/concepts/overview/working-with-objects/_index.md +++ b/content/ja/docs/concepts/overview/working-with-objects/_index.md @@ -1,5 +1,8 @@ --- -title: "Kubernetesのオブジェクトについて" +title: "Kubernetesオブジェクトを利用する" weight: 40 +description: > + Kubernetesオブジェクトは、Kubernetes上で永続的なエンティティです。Kubernetesはこれらのエンティティを使い、クラスターの状態を表現します。 + Kubernetesオブジェクトモデルと、これらのオブジェクトの利用方法について学びます。 --- diff --git a/content/ja/docs/concepts/overview/working-with-objects/object-management.md b/content/ja/docs/concepts/overview/working-with-objects/object-management.md index 49092c6dea..591a978360 100644 --- a/content/ja/docs/concepts/overview/working-with-objects/object-management.md +++ b/content/ja/docs/concepts/overview/working-with-objects/object-management.md @@ -120,7 +120,7 @@ kubectl replace -f nginx.yaml ## 宣言型オブジェクト設定 宣言型オブジェクト設定を利用する場合、ユーザーはローカルに置かれている設定ファイルを操作します。 -しかし、ユーザーは操作内容をファイルに記載しません。作成、更新、そして削除といった操作はオブジェクトごとに`kubectl`が検出します。 +しかし、ユーザーはファイルに対する操作内容を指定しません。作成、更新、そして削除といった操作はオブジェクトごとに`kubectl`が検出します。 この仕組みが、異なるオブジェクトごとに異なる操作をディレクトリに対して行うことを可能にしています。 {{< note >}} diff --git a/content/ja/docs/concepts/policy/_index.md b/content/ja/docs/concepts/policy/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/concepts/services-networking/_index.md b/content/ja/docs/concepts/services-networking/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/concepts/services-networking/dual-stack.md b/content/ja/docs/concepts/services-networking/dual-stack.md index 3c320e446a..3bad029501 100644 --- a/content/ja/docs/concepts/services-networking/dual-stack.md +++ b/content/ja/docs/concepts/services-networking/dual-stack.md @@ -40,7 +40,7 @@ IPv4/IPv6デュアルスタックを有効にするには、クラスターの * kube-apiserver: * `--feature-gates="IPv6DualStack=true"` - * `--service-cluster-ip-range=, + * `--service-cluster-ip-range=,` * kube-controller-manager: * `--feature-gates="IPv6DualStack=true"` * `--cluster-cidr=,` diff --git a/content/ja/docs/concepts/services-networking/network-policies.md b/content/ja/docs/concepts/services-networking/network-policies.md index b11bcead2b..441063aeae 100644 --- a/content/ja/docs/concepts/services-networking/network-policies.md +++ b/content/ja/docs/concepts/services-networking/network-policies.md @@ -207,7 +207,7 @@ SCTPプロトコルのネットワークポリシーをサポートする{{< glo Kubernetes1.20現在、ネットワークポリシーAPIに以下の機能は存在しません。 しかし、オペレーティングシステムのコンポーネント(SELinux、OpenVSwitch、IPTablesなど)、レイヤ7の技術(Ingressコントローラー、サービスメッシュ実装)、もしくはアドミッションコントローラーを使用して回避策を実装できる場合があります。 -Kubernetesのネットワークセキュリティを初めて使用する場合は、ネットワークポリシーAPIを使用して以下ののユーザーストーリーを(まだ)実装できないことに注意してください。これらのユーザーストーリーの一部(全てではありません)は、ネットワークポリシーAPIの将来のリリースで活発に議論されています。 +Kubernetesのネットワークセキュリティを初めて使用する場合は、ネットワークポリシーAPIを使用して以下のユーザーストーリーを(まだ)実装できないことに注意してください。これらのユーザーストーリーの一部(全てではありません)は、ネットワークポリシーAPIの将来のリリースで活発に議論されています。 - クラスター内トラフィックを強制的に共通ゲートウェイを通過させる(これは、サービスメッシュもしくは他のプロキシで提供するのが最適な場合があります)。 - TLS関連のもの(これにはサービスメッシュまたはIngressコントローラを使用します)。 diff --git a/content/ja/docs/concepts/storage/_index.md b/content/ja/docs/concepts/storage/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/concepts/storage/persistent-volumes.md b/content/ja/docs/concepts/storage/persistent-volumes.md index 1940b72cab..b22ed7d8eb 100644 --- a/content/ja/docs/concepts/storage/persistent-volumes.md +++ b/content/ja/docs/concepts/storage/persistent-volumes.md @@ -431,7 +431,7 @@ PVはクラスを持つことができます。これは`storageClassName`属性 ### マウントオプション -Kubernets管理者は永続ボリュームがNodeにマウントされるときの追加マウントオプションを指定できます。 +Kubernetes管理者は永続ボリュームがNodeにマウントされるときの追加マウントオプションを指定できます。 {{< note >}} すべての永続ボリュームタイプがすべてのマウントオプションをサポートするわけではありません。 diff --git a/content/ja/docs/concepts/workloads/pods/_index.md b/content/ja/docs/concepts/workloads/pods/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/concepts/workloads/pods/pod-overview.md b/content/ja/docs/concepts/workloads/pods/pod-overview.md index 4d286fdbcf..6e053f9ab1 100644 --- a/content/ja/docs/concepts/workloads/pods/pod-overview.md +++ b/content/ja/docs/concepts/workloads/pods/pod-overview.md @@ -61,7 +61,7 @@ Podは、Podによって構成されたコンテナ群のために2種類の共 ## Podを利用する -ユーザーはまれに、Kubenetes内で独立したPodを直接作成する場合があります(シングルトンPodなど)。 +ユーザーはまれに、Kubernetes内で独立したPodを直接作成する場合があります(シングルトンPodなど)。 これはPodが比較的、一時的な使い捨てエンティティとしてデザインされているためです。Podが作成された時(ユーザーによって直接的、またはコントローラーによって間接的に作成された場合)、ユーザーのクラスター内の単一の{{< glossary_tooltip term_id="node" >}}上で稼働するようにスケジューリングされます。そのPodはプロセスが停止されたり、Podオブジェクトが削除されたり、Podがリソースの欠如のために*追い出され* たり、ノードが故障するまでノード上に残り続けます。 {{< note >}} diff --git a/content/ja/docs/contribute/review/for-approvers.md b/content/ja/docs/contribute/review/for-approvers.md new file mode 100644 index 0000000000..3a96595ea8 --- /dev/null +++ b/content/ja/docs/contribute/review/for-approvers.md @@ -0,0 +1,195 @@ +--- +title: approverとreviewer向けのレビュー +linktitle: approverとreviewer向け +slug: for-approvers +content_type: concept +weight: 20 +--- + + + +SIG Docsの[Reviewer(レビュアー)](/docs/contribute/participate/#reviewers)と[Approver(承認者)](/docs/contribute/participate/#approvers)は、変更をレビューする時にいくつか追加の作業を行います。 + +毎週、docsのメンバーの特定のapproverのボランティアは、pull requestのトリアージとレビューを担当します。この担当者は、その週の「PR Wrangler(PRの世話人)」と呼ばれます。詳しい情報は、[PR Wrangler scheduler](https://github.com/kubernetes/website/wiki/PR-Wranglers)を参照してください。PR Wranglerになるには、週次のSIG Docsミーティングに参加し、ボランティアをします。もしその週にスケジュールされていなくても、活発なレビューが行われていないpull request(PR)をレビューすることは問題ありません。 + +このローテーションに加えて、変更されたファイルのオーナーに基づいて、botがPRにreviewerとapproverを割り当てます。 + + + +## PRをレビューする + +Kubernetesのドキュメントは[Kubernetesコードレビュープロセス](https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md#the-code-review-process)に従います。 + +[pull requestのレビュー](/ja/docs/contribute/review/reviewing-prs/)に書かれているすべてのことが適用されますが、ReviewerとApproverはそれに加えて次のことも行います。 + +- 必要に応じて、`/assign`Prowコマンドを使用して、特定のreviewerにPRを割り当てる。これは、コードのコントリビューターからの技術的なレビューが必要な場合には特に重要です。 + + {{< note >}} + 技術的なレビューを行える人物を知るには、Markdownファイル上部にあるfront-matterの`reviewers`フィールドを確認してください。 + {{< /note >}} + +- PRが[コンテンツ](/ja/docs/contribute/style/content-guide/)および[スタイル](/docs/contribute/style/style-guide/)のガイドに従っていることを確認してください。ガイドに従っていない場合は、ガイドの関連する部分にリンクを作者に示してください。 +- PRの作者に変更を提案できるときは、GitHubの**Request Changes**(変更をリクエスト)オプションを利用してください。 +- 提案したことが反映されたら、`/approve`や`/lgtm`コマンドを使用して、GitHubのレビューステータスを変更してください。 + +## 他の作者のPRにコミットを追加する + +PRにコメントを残すのは助けになりますが、まれに他の作者のPRに代わりにコミットを追加する必要がある場合があります。 + +あなたが明示的に作者から頼まれたり、長い間放置されたPRを蘇らせるような場合でない限り、他の作者のPRを「乗っ取る」ようなことはしないでください。短期的に見ればそのほうが短時間で終わるかもしれませんが、そのようなことをするとその人が貢献するチャンスを奪ってしまうことになります。 + +あなたが取る方法は、編集する必要のあるファイルがすでにPRのスコープに入っているか、あるいはPRがまだ触れていないファイルであるかによって変わります。 + +以下のいずれかが当てはまる場合、他の作者のPRにあなたがコミットを追加することはできません。 + +- PRの作者が自分のブランチを直接[https://github.com/kubernetes/website/](https://github.com/kubernetes/website/)リポジトリにpushした場合。この場合、pushアクセス権限を持つreviewerしか他のユーザーのPRにコミットを追加することはできません。 + + {{< note >}} + 次回PRを作成するとき、自分のブランチを自分のforkに対してpushするように作者に促してください。 + {{< /note >}} + +- PRの作者が明示的にapproverからの編集を禁止している場合。 + +## レビュー向けのProwコマンド + +[Prow](https://github.com/kubernetes/test-infra/blob/master/prow/README.md)は、pull request(PR)に対してジョブを実行するKubernetesベースのCI/CDシステムです。Prowは、Kubernetes organization全体でchatbotスタイルのコマンドを利用してGitHub actionsを扱えるようにします。たとえば、[ラベルの追加と削除](#adding-and-removing-issue-labels)、issueのclose、approverの割り当てなどが行なえます。Prowコマンドは、GitHubのコメントに`/`という形式で入力します。 + +reviewerとapproverが最もよく使うprowコマンドには、以下のようなものがあります。 + +{{< table caption="Prow commands for reviewing" >}} +Prowコマンド | Roleの制限 | 説明 +:------------|:------------------|:----------- +`/lgtm` | 誰でも。ただし、オートメーションがトリガされるのはReviewerまたはApproverが使用したときのみ。 | PRのレビューが完了し、変更に納得したことを知らせる。 +`/approve` | Approver | PRをマージすることを承認する。 +`/assign` | ReviewerまたはApprover | PRのレビューまたは承認するひとを割り当てる。 +`/close` | ReviewerまたはApprover | issueまたはPRをcloseする。 +`/hold` | 誰でも | `do-not-merge/hold`ラベルを追加して、自動的にマージできないPRであることを示す。 +`/hold cancel` | 誰でも | `do-not-merge/hold`ラベルを削除する。 +{{< /table >}} + +PRで利用できるすべてのコマンド一覧を確認するには、[Prowコマンドリファレンス](https://prow.k8s.io/command-help)を参照してください。 + +## issueのトリアージとカテゴリー分類 + +一般に、SIG Docsは[Kubernetes issue triage](https://github.com/kubernetes/community/blob/master/contributors/guide/issue-triage.md)のプロセスに従い、同じラベルを使用しています。 + +このGitHub issueの[フィルター](https://github.com/kubernetes/website/issues?q=is%3Aissue+is%3Aopen+-label%3Apriority%2Fbacklog+-label%3Apriority%2Fimportant-longterm+-label%3Apriority%2Fimportant-soon+-label%3Atriage%2Fneeds-information+-label%3Atriage%2Fsupport+sort%3Acreated-asc)は、トリアージが必要な可能性があるissueを表示します。 + +### issueをトリアージする + +1. issueを検証する + - issueがドキュメントのウェブサイトに関係するものであることを確かめる。質問に答えたりリソースの場所を報告者に教えることですぐに閉じられるissueもあります。詳しくは、[サポートリクエストまたはコードのバグレポート](#support-requests-or-code-bug-reports)のセクションを読んでください。 + - issueにメリットがあるかどうか評価する。 + - issueに行動を取るのに十分な詳細情報がない場合や、テンプレートが十分埋められていない場合は、`triage/needs-information`ラベルを追加する。 + - `lifecycle/stale`と`triage/needs-information`の両方のラベルがあるときは、issueをcloseする。 + +2. 優先度(priority)ラベルを追加する([issueトリアージガイドライン](https://github.com/kubernetes/community/blob/master/contributors/guide/issue-triage.md#define-priority)は、priorityラベルについて詳しく定義しています。) + + {{< table caption="issueのラベル" >}} + ラベル | 説明 + :------------|:------------------ + `priority/critical-urgent` | 今すぐに作業する。 + `priority/important-soon` | 3ヶ月以内に取り組む。 + `priority/important-longterm` | 6ヶ月以内に取り組む。 + `priority/backlog` | 無期限に延期可能。リソースに余裕がある時に取り組む。 + `priority/awaiting-more-evidence` | よいissueの可能性があるissueを見失わないようにするためのプレースホルダー。 + `help`または`good first issue` | KubernetesまたはSIG Docsでほとんど経験がない人に適したissue。より詳しい情報は、[Help WantedとGood First Issueラベル](https://github.com/kubernetes/community/blob/master/contributors/guide/help-wanted.md)を読んでください。 + {{< /table >}} + + あなたの裁量で、issueのオーナーシップを取り、issueに対するPRを提出してください(簡単なissueや、自分がすでに行った作業に関連するissueである場合は特に)。 + +issueのトリアージについて質問があるときは、Slackの`#sig-docs`か[kubernetes-sig-docs mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-docs)で質問してください。 + +## issueラベルの追加と削除 {#adding-and-removing-issue-labels} + +ラベルを追加するには、以下のいずれかの形式でコメントします。 + +- `/`(たとえば、`/good-first-issue`) +- `/ `(たとえば、`/triage needs-information`や`/language ja`) + +ラベルを削除するには、以下のいずれかの形式でコメントします。 + +- `/remove-`(たとえば、`/remove-help`) +- `/remove- `(たとえば、`/remove-triage needs-information`) + +いずれの場合でも、ラベルは既存のものでなければなりません。存在しないラベルを追加しようとした場合、コマンドは無視されます。 + +すべてのラベル一覧は、[websiteリポジトリーのラベルセクション](https://github.com/kubernetes/website/labels)で確認できます。SIG Docsですべてのラベルが使われているわけではありません。 + +### issueのライフサイクルに関するラベル + +issueは一般にopen後に短期間でcloseされます。しかし、issueがopenされた後にアクティブでなくなったり、issueが90日以上openのままである場合もあります。 + +{{< table caption="issueのライブラリに関するラベル" >}} +ラベル | 説明 +:------------|:------------------ +`lifecycle/stale` | 90日間活動がない場合、issueは自動的にstaleとラベル付けされます。`/remove-lifecycle stale`コマンドを使って手動でlifecycleをリバートしない限り、issueは自動的にcloseされます。 +`lifecycle/frozen` | このラベルが付けられたissueは、90日間活動がなくてもstaleになりません。`priority/important-longterm`ラベルを付けたissueなど、90日以上openにしておく必要があるissueには、このラベルを手動で追加します。 +{{< /table >}} + +## 特別な種類のissueに対処する + +SIG Docsでは、対処方法をドキュメントに書いても良いくらい頻繁に、以下のような種類のissueに出会います。 + +### 重服したissue + +1つの問題に対して1つ以上のissueがopenしている場合、1つのissueに統合します。あなたはどちらのissueをopenにしておくか(あるいは新しいissueを作成するか)を決断して、すべての関連する情報を移動し、関連するすべてのissueにリンクしなければなりません。最後に、同じ問題について書かれたすべての他のissueに`triage/duplicate`ラベルを付けて、それらをcloseします。作業対象のissueを1つだけにすることで、混乱を減らし、同じ問題に対して作業が重複することを避けられます。 + +### リンク切れに関するissue + +リンク切れのissueがAPIまたは`kubectl`のドキュメントにあるものは、問題が完全に理解されるまでは`/priority critical-urgent`を割り当ててください。その他のすべてのリンク切れに関するissueには、手動で修正が必要であるため、`/priority important-longterm`を付けます。 + +### Blogに関するissue + +[Kubernetes Blog](https://kubernetes.io/blog/)のエントリーは時間が経つと情報が古くなるものだと考えています。そのため、ブログのエントリーは1年以内のものだけをメンテナンスします。1年以上前のブログエントリーに関するissueは修正せずにcloseします。 + +### サポートリクエストまたはコードのバグレポート {#support-requests-or-code-bug-reports} + +一部のドキュメントのissueは、実際には元になっているコードの問題や、何か(たとえば、チュートリアル)がうまく動かないときにサポートをリクエストするものです。ドキュメントに関係のない問題は、`kind/support`ラベルを付け、サポートチャンネル(SlackやStack Overflowなど)へ報告者を導くコメントをして、もし関連があれば機能のバグに対するissueを報告するリポジトリ(`kubernetes/kubernetes`は始めるのに最適な場所です)を教えて、closeします。 + +サポートリクエストに対する返答の例を示します。(リクエストを行う際は英語で行うことが想定されるため、英文とその日本語訳を記載しています) + +```none +This issue sounds more like a request for support and less +like an issue specifically for docs. I encourage you to bring +your question to the `#kubernetes-users` channel in +[Kubernetes slack](https://slack.k8s.io/). You can also search +resources like +[Stack Overflow](https://stackoverflow.com/questions/tagged/kubernetes) +for answers to similar questions. + +You can also open issues for Kubernetes functionality in +https://github.com/kubernetes/kubernetes. + +If this is a documentation issue, please re-open this issue. +``` + +```none +このissueは特定のドキュメントに関するissueではなく、サポートリクエストのようです。 +Kubernetesに関する質問については、[Kubernetes slack](https://slack.k8s.io/)の +`#kubernetes-users`チャンネルに投稿することをおすすめします。同様の質問に対する回答を +[Stack Overflow](https://stackoverflow.com/questions/tagged/kubernetes)などの +リソースで検索することもできます。 + +Kubernetesの機能に関するissueについては、https://github.com/kubernetes/kubernetes +でissueを作成できます。 + +もしこれがドキュメントに関するissueの場合、このissueを再びopenしてください。 +``` + +コードのバグに対する返答の例を示します。 + +```none +This sounds more like an issue with the code than an issue with +the documentation. Please open an issue at +https://github.com/kubernetes/kubernetes/issues. + +If this is a documentation issue, please re-open this issue. +``` + +```none +こちらのissueは、ドキュメントではなくコードに関係するissueのようです。 +https://github.com/kubernetes/kubernetes/issues でissueを作成してください。 + +もしこれがドキュメントに関するissueの場合、このissueを再びopenしてください。 +``` diff --git a/content/ja/docs/reference/command-line-tools-reference/feature-gates.md b/content/ja/docs/reference/command-line-tools-reference/feature-gates.md index 01a2c289e9..caab3b8c11 100644 --- a/content/ja/docs/reference/command-line-tools-reference/feature-gates.md +++ b/content/ja/docs/reference/command-line-tools-reference/feature-gates.md @@ -136,7 +136,8 @@ content_type: concept | `TokenRequest` | `true` | Beta | 1.12 | | | `TokenRequestProjection` | `false` | Alpha | 1.11 | 1.11 | | `TokenRequestProjection` | `true` | Beta | 1.12 | | -| `TTLAfterFinished` | `false` | Alpha | 1.12 | | +| `TTLAfterFinished` | `false` | Alpha | 1.12 | 1.20 | +| `TTLAfterFinished` | `true` | Beta | 1.21 | | | `TopologyManager` | `false` | Alpha | 1.16 | 1.17 | | `TopologyManager` | `true` | Beta | 1.18 | | | `ValidateProxyRedirects` | `false` | Alpha | 1.12 | 1.13 | diff --git a/content/ja/docs/reference/glossary/cloud-controller-manager.md b/content/ja/docs/reference/glossary/cloud-controller-manager.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/cluster-operator.md b/content/ja/docs/reference/glossary/cluster-operator.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/cncf.md b/content/ja/docs/reference/glossary/cncf.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/configmap.md b/content/ja/docs/reference/glossary/configmap.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/contributor.md b/content/ja/docs/reference/glossary/contributor.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/controller.md b/content/ja/docs/reference/glossary/controller.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/daemonset.md b/content/ja/docs/reference/glossary/daemonset.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/deployment.md b/content/ja/docs/reference/glossary/deployment.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/docker.md b/content/ja/docs/reference/glossary/docker.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/etcd.md b/content/ja/docs/reference/glossary/etcd.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/image.md b/content/ja/docs/reference/glossary/image.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/index.md b/content/ja/docs/reference/glossary/index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/ingress.md b/content/ja/docs/reference/glossary/ingress.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/kube-apiserver.md b/content/ja/docs/reference/glossary/kube-apiserver.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/kube-controller-manager.md b/content/ja/docs/reference/glossary/kube-controller-manager.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/kube-proxy.md b/content/ja/docs/reference/glossary/kube-proxy.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/kube-scheduler.md b/content/ja/docs/reference/glossary/kube-scheduler.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/kubelet.md b/content/ja/docs/reference/glossary/kubelet.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/label.md b/content/ja/docs/reference/glossary/label.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/member.md b/content/ja/docs/reference/glossary/member.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/mirror-pod.md b/content/ja/docs/reference/glossary/mirror-pod.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/name.md b/content/ja/docs/reference/glossary/name.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/namespace.md b/content/ja/docs/reference/glossary/namespace.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/node.md b/content/ja/docs/reference/glossary/node.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/platform-developer.md b/content/ja/docs/reference/glossary/platform-developer.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/pod.md b/content/ja/docs/reference/glossary/pod.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/secret.md b/content/ja/docs/reference/glossary/secret.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/selector.md b/content/ja/docs/reference/glossary/selector.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/service-catalog.md b/content/ja/docs/reference/glossary/service-catalog.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/service.md b/content/ja/docs/reference/glossary/service.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/sig.md b/content/ja/docs/reference/glossary/sig.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/statefulset.md b/content/ja/docs/reference/glossary/statefulset.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/glossary/uid.md b/content/ja/docs/reference/glossary/uid.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/kubectl/_index.md b/content/ja/docs/reference/kubectl/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/kubectl/overview.md b/content/ja/docs/reference/kubectl/overview.md index 2ddd60b62e..c5855ce4af 100644 --- a/content/ja/docs/reference/kubectl/overview.md +++ b/content/ja/docs/reference/kubectl/overview.md @@ -457,8 +457,6 @@ error: one plugin warning was found cat ./kubectl-whoami ``` 次の例では、下記の内容を含んだ`kubectl-whoami`が既に作成済であることを前提としています。 -The next few examples assume that you already made `kubectl-whoami` have -the following contents: ```shell #!/bin/bash diff --git a/content/ja/docs/reference/setup-tools/kubeadm/_index.md b/content/ja/docs/reference/setup-tools/kubeadm/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/reference/tools.md b/content/ja/docs/reference/tools.md index 0fedb1cf9d..c64b1e74f7 100644 --- a/content/ja/docs/reference/tools.md +++ b/content/ja/docs/reference/tools.md @@ -11,7 +11,7 @@ Kubernetesには、Kubernetesシステムの操作に役立ついくつかの組 [`kubectl`](/docs/tasks/tools/install-kubectl/)は、Kubernetesのためのコマンドラインツールです。このコマンドはKubernetes cluster managerを操作します。 ## Kubeadm -[`kubeadm`](docs/setup/production-environment/tools/kubeadm/install-kubeadm/)は、物理サーバやクラウドサーバ、仮想マシン上にKubenetesクラスタを容易にプロビジョニングするためのコマンドラインツールです(現在はアルファ版です)。 +[`kubeadm`](docs/setup/production-environment/tools/kubeadm/install-kubeadm/)は、物理サーバやクラウドサーバ、仮想マシン上にKubernetesクラスタを容易にプロビジョニングするためのコマンドラインツールです(現在はアルファ版です)。 ## Minikube [`minikube`](https://minikube.sigs.k8s.io/docs/)は、開発やテストのためにワークステーション上でシングルノードのKubernetesクラスタをローカルで実行するツールです。 diff --git a/content/ja/docs/setup/best-practices/cluster-large.md b/content/ja/docs/setup/best-practices/cluster-large.md index bc59b1b8ee..5a1f45b662 100644 --- a/content/ja/docs/setup/best-practices/cluster-large.md +++ b/content/ja/docs/setup/best-practices/cluster-large.md @@ -7,10 +7,10 @@ weight: 20 At {{< param "version" >}}, Kubernetes supports clusters with up to 5000 nodes. More specifically, we support configurations that meet *all* of the following criteria: +* No more than 110 pods per node * No more than 5000 nodes * No more than 150000 total pods * No more than 300000 total containers -* No more than 100 pods per node ## 構築 diff --git a/content/ja/docs/setup/learning-environment/minikube.md b/content/ja/docs/setup/learning-environment/minikube.md index c197a03081..171d2b1b1e 100644 --- a/content/ja/docs/setup/learning-environment/minikube.md +++ b/content/ja/docs/setup/learning-environment/minikube.md @@ -342,7 +342,7 @@ Could not read CA certificate "/etc/docker/ca.pem": open /etc/docker/ca.pem: no ### Kubernetesの設定 -Minikubeにはユーザーが任意の値でKubenetesコンポーネントを設定することを可能にする "configurator" 機能があります。 +Minikubeにはユーザーが任意の値でKubernetesコンポーネントを設定することを可能にする "configurator" 機能があります。 この機能を使うには、`minikube start` コマンドに `--extra-config` フラグを使うことができます。 このフラグは繰り返されるので、複数のオプションを設定するためにいくつかの異なる値を使って何度も渡すことができます。 diff --git a/content/ja/docs/setup/production-environment/tools/kops.md b/content/ja/docs/setup/production-environment/tools/kops.md index dfd2eec406..0b40a6540b 100644 --- a/content/ja/docs/setup/production-environment/tools/kops.md +++ b/content/ja/docs/setup/production-environment/tools/kops.md @@ -56,10 +56,10 @@ To download a specific version, replace the following portion of the command wit $(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4) ``` -For example, to download kops version v1.15.0 type: +For example, to download kops version v1.20.0 type: ```shell -curl -LO https://github.com/kubernetes/kops/releases/download/1.15.0/kops-darwin-amd64 +curl -LO https://github.com/kubernetes/kops/releases/download/v1.20.0/kops-darwin-amd64 ``` Make the kops binary executable. @@ -94,10 +94,10 @@ To download a specific version of kops, replace the following portion of the com $(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4) ``` -For example, to download kops version v1.15.0 type: +For example, to download kops version v1.20.0 type: ```shell -curl -LO https://github.com/kubernetes/kops/releases/download/1.15.0/kops-linux-amd64 +curl -LO https://github.com/kubernetes/kops/releases/download/v1.20.0/kops-linux-amd64 ``` Make the kops binary executable diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md b/content/ja/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md index 356101574d..319be36bbd 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md @@ -242,8 +242,8 @@ this example. ```sh root@HOST0 $ kubeadm init phase etcd local --config=/tmp/${HOST0}/kubeadmcfg.yaml - root@HOST1 $ kubeadm init phase etcd local --config=/home/ubuntu/kubeadmcfg.yaml - root@HOST2 $ kubeadm init phase etcd local --config=/home/ubuntu/kubeadmcfg.yaml + root@HOST1 $ kubeadm init phase etcd local --config=/tmp/${HOST1}/kubeadmcfg.yaml + root@HOST2 $ kubeadm init phase etcd local --config=/tmp/${HOST2}/kubeadmcfg.yaml ``` 1. Optional: Check the cluster health diff --git a/content/ja/docs/setup/production-environment/tools/kubespray.md b/content/ja/docs/setup/production-environment/tools/kubespray.md index e8c49078fd..b254e63c60 100644 --- a/content/ja/docs/setup/production-environment/tools/kubespray.md +++ b/content/ja/docs/setup/production-environment/tools/kubespray.md @@ -51,7 +51,7 @@ Kubespray provides the following utilities to help provision your environment: ### (2/5) インベントリファイルの用意 -After you provision your servers, create an [inventory file for Ansible](https://docs.ansible.com/ansible/intro_inventory.html). You can do this manually or via a dynamic inventory script. For more information, see "[Building your own inventory](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/getting-started.md#building-your-own-inventory)". +After you provision your servers, create an [inventory file for Ansible](https://docs.ansible.com/ansible/latest/network/getting_started/first_inventory.html). You can do this manually or via a dynamic inventory script. For more information, see "[Building your own inventory](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/getting-started.md#building-your-own-inventory)". ### (3/5) クラスタ作成の計画 diff --git a/content/ja/docs/setup/production-environment/windows/user-guide-windows-containers.md b/content/ja/docs/setup/production-environment/windows/user-guide-windows-containers.md index 6f1ed4558e..2868afb182 100644 --- a/content/ja/docs/setup/production-environment/windows/user-guide-windows-containers.md +++ b/content/ja/docs/setup/production-environment/windows/user-guide-windows-containers.md @@ -134,7 +134,7 @@ Kubernetes v1.14以降、Windowsコンテナワークロードは、Group Manage Podの仕様で`"kubernetes.io/os": windows`のようなnodeSelectorが指定されていない場合、PodをWindowsまたはLinuxの任意のホストでスケジュールすることができます。WindowsコンテナはWindowsでのみ実行でき、LinuxコンテナはLinuxでのみ実行できるため、これは問題になる可能性があります。ベストプラクティスは、nodeSelectorを使用することです。 -ただし、多くの場合、ユーザーには既存の多数のLinuxコンテナのdepolyment、およびコミュニティHelmチャートのような既成構成のエコシステムやOperatorのようなプログラム的にPodを生成するケースがあることを理解しています。このような状況では、nodeSelectorsを追加するための構成変更をためらう可能性があります。代替策は、Taintsを使用することです。kubeletは登録中にTaintsを設定できるため、Windowsだけで実行する時に自動的にTaintを追加するように簡単に変更できます。 +ただし、多くの場合、ユーザーには既存の多数のLinuxコンテナのdeployment、およびコミュニティHelmチャートのような既成構成のエコシステムやOperatorのようなプログラム的にPodを生成するケースがあることを理解しています。このような状況では、nodeSelectorsを追加するための構成変更をためらう可能性があります。代替策は、Taintsを使用することです。kubeletは登録中にTaintsを設定できるため、Windowsだけで実行する時に自動的にTaintを追加するように簡単に変更できます。 例:`--register-with-taints='os=windows:NoSchedule'` diff --git a/content/ja/docs/setup/release/_index.md b/content/ja/docs/setup/release/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/tasks/access-application-cluster/_index.md b/content/ja/docs/tasks/access-application-cluster/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/tasks/administer-cluster/_index.md b/content/ja/docs/tasks/administer-cluster/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/tasks/administer-cluster/topology-manager.md b/content/ja/docs/tasks/administer-cluster/topology-manager.md new file mode 100644 index 0000000000..6478323e1a --- /dev/null +++ b/content/ja/docs/tasks/administer-cluster/topology-manager.md @@ -0,0 +1,248 @@ +--- +title: ノードのトポロジー管理ポリシーを制御する +content_type: task +min-kubernetes-server-version: v1.18 +--- + + + +{{< feature-state state="beta" for_k8s_version="v1.18" >}} + +近年、CPUやハードウェア・アクセラレーターの組み合わせによって、レイテンシーが致命的となる実行や高いスループットを求められる並列計算をサポートするシステムが増えています。このようなシステムには、通信、科学技術計算、機械学習、金融サービス、データ分析などの分野のワークロードが含まれます。このようなハイブリッドシステムは、高い性能の環境で構成されます。 + +最高のパフォーマンスを引き出すために、CPUの分離やメモリーおよびデバイスの位置に関する最適化が求められます。しかしながら、Kubernetesでは、これらの最適化は分断されたコンポーネントによって処理されます。 + +_トポロジーマネージャー_ はKubeletコンポーネントの1つで最適化の役割を担い、コンポーネント群を調和して機能させます。 + + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + + +## トポロジーマネージャーはどのように機能するか + +トポロジーマネージャー導入前は、KubernetesにおいてCPUマネージャーやデバイスマネージャーはそれぞれ独立してリソースの割り当てを決定します。 +これは、マルチソケットのシステムでは望ましくない割り当てとなり、パフォーマンスやレイテンシーが求められるアプリケーションは、この望ましくない割り当てに悩まされます。 +この場合の望ましくない例として、CPUやデバイスが異なるNUMAノードに割り当てられ、それによりレイテンシー悪化を招くことが挙げられます。 + +トポロジーマネージャーはKubeletコンポーネントであり、信頼できる情報源として振舞います。それによって、他のKubeletコンポーネントはトポロジーに沿ったリソース割り当ての選択を行うことができます。 + +トポロジーマネージャーは *Hint Providers* と呼ばれるコンポーネントのインターフェースを提供し、トポロジー情報を送受信します。トポロジーマネージャーは、ノード単位のポリシー群を保持します。ポリシーについて以下で説明します。 + +トポロジーマネージャーは *Hint Providers* からトポロジー情報を受け取ります。トポロジー情報は、利用可能なNUMAノードと優先割り当て表示を示すビットマスクです。トポロジーマネージャーのポリシーは、提供されたヒントに対して一連の操作を行い、ポリシーに沿ってヒントをまとめて最適な結果を得ます。もし、望ましくないヒントが保存された場合、ヒントの優先フィールドがfalseに設定されます。現在のポリシーでは、最も狭い優先マスクが優先されます。 + +選択されたヒントはトポロジーマネージャーの一部として保存されます。設定されたポリシーにしたがい、選択されたヒントに基づいてノードがPodを許可したり、拒否することができます。 +トポロジーマネージャーに保存されたヒントは、*Hint Providers* が使用しリソース割り当てを決定します。 + +### トポロジーマネージャーの機能を有効にする + +トポロジーマネージャーをサポートするには、`TopologyManager` [フィーチャーゲート](/ja/docs/reference/command-line-tools-reference/feature-gates/)を有効にする必要があります。Kubernetes 1.18ではデフォルトで有効です。 + +## トポロジーマネージャーのスコープとポリシー + +トポロジーマネージャは現在: + + - 全てのQoAクラスのPodを調整する + - Hint Providerによって提供されたトポロジーヒントから、要求されたリソースを調整する + +これらの条件が合致した場合、トポロジーマネージャーは要求されたリソースを調整します。 + +この調整をどのように実行するかカスタマイズするために、トポロジーマネージャーは2つのノブを提供します: `スコープ` と`ポリシー`です。 + +`スコープ`はリソースの配置を行う粒度を定義します(例:`pod`や`container`)。そして、`ポリシー`は調整を実行するための実戦略を定義します(`best-effort`, `restricted`, `single-numa-node`等)。 + +現在利用可能な`スコープ`と`ポリシー`の値について詳細は以下の通りです。 + +{{< note >}} +PodのSpecにある他の要求リソースとCPUリソースを調整するために、CPUマネージャーを有効にし、適切なCPUマネージャーのポリシーがノードに設定されるべきです。[CPU管理ポリシー](/docs/tasks/administer-cluster/cpu-management-policies/)を参照してください。 +{{< /note >}} + +{{< note >}} +PodのSpecにある他の要求リソースとメモリー(およびhugepage)リソースを調整するために、メモリーマネージャーを有効にし、適切なメモリーマネージャーポリシーがノードに設定されるべきです。[メモリーマネージャー](/docs/tasks/administer-cluster/memory-manager/) のドキュメントを確認してください。 +{{< /note >}} + +### トポロジーマネージャーのスコープ + +トポロジーマネージャーは、以下の複数の異なるスコープでリソースの調整を行う事が可能です: + +* `container` (デフォルト) +* `pod` + +いずれのオプションも、`--topology-manager-scope`フラグによって、kubelet起動時に選択できます。 + +### containerスコープ + +`container`スコープはデフォルトで使用されます。 + +このスコープでは、トポロジーマネージャーは連続した複数のリソース調整を実行します。つまり、Pod内の各コンテナは、分離された配置計算がされます。言い換えると、このスコープでは、コンテナを特定のNUMAノードのセットにグループ化するという概念はありません。実際には、トポロジーマネージャーは各コンテナのNUMAノードへの配置を任意に実行します。 + +コンテナをグループ化するという概念は、以下のスコープで設定・実行されます。例えば、`pod`スコープが挙げられます。 + +### podスコープ + +`pod`スコープを選択するには、コマンドラインで`--topology-manager-scope=pod`オプションを指定してkubeletを起動します。 + +このスコープでは、Pod内全てのコンテナを共通のNUMAノードのセットにグループ化することができます。トポロジーマネージャーはPodをまとめて1つとして扱い、ポッド全体(全てのコンテナ)を単一のNUMAノードまたはNUMAノードの共通セットのいずれかに割り当てようとします。以下の例は、さまざまな場面でトポロジーマネージャーが実行する調整を示します: + +* 全てのコンテナは、単一のNUMAノードに割り当てられます。 +* 全てのコンテナは、共有されたNUMAノードのセットに割り当てられます。 + +Pod全体に要求される特定のリソースの総量は[有効なリクエスト/リミット](/ja/docs/concepts/workloads/pods/init-containers/#resources)の式に従って計算されるため、この総量の値は以下の最大値となります。 +* 全てのアプリケーションコンテナのリクエストの合計。 +* リソースに対するinitコンテナのリクエストの最大値。 + +`pod`スコープと`single-numa-node`トポロジーマネージャーポリシーを併用することは、レイテンシーが重要なワークロードやIPCを行う高スループットのアプリケーションに対して特に有効です。両方のオプションを組み合わせることで、Pod内の全てのコンテナを単一のNUMAノードに配置できます。そのため、PodのNUMA間通信によるオーバーヘッドを排除することができます。 + +`single-numa-node`ポリシーの場合、可能な割り当ての中に適切なNUMAノードのセットが存在する場合にのみ、Podが許可されます。上の例をもう一度考えてみましょう: + +* 1つのNUMAノードのみを含むセット - Podが許可されます。 +* 2つ以上のNUMAノードを含むセット - Podが拒否されます(1つのNUMAノードの代わりに、割り当てを満たすために2つ以上のNUMAノードが必要となるため)。 + + +要約すると、トポロジーマネージャーはまずNUMAノードのセットを計算し、それをトポロジーマネージャーのポリシーと照合し、Podの拒否または許可を検証します。 + +### トポロジーマネージャーのポリシー + +トポロジーマネージャーは4つの調整ポリシーをサポートします。`--topology-manager-policy`というKubeletフラグを通してポリシーを設定できます。 +4つのサポートされるポリシーがあります: + +* `none` (デフォルト) +* `best-effort` +* `restricted` +* `single-numa-node` + +{{< note >}} +トポロジーマネージャーが **pod** スコープで設定された場合、コンテナはポリシーによって、Pod全体の要求として反映します。 +したがって、Podの各コンテナは **同じ** トポロジー調整と同じ結果となります。 +{{< /note >}} + +### none ポリシー {#policy-none} + +これはデフォルトのポリシーで、トポロジーの調整を実行しません。 + +### best-effort ポリシー {#policy-best-effort} + +Pod内の各コンテナに対して、`best-effort` トポロジー管理ポリシーが設定されたkubeletは、各Hint Providerを呼び出してそれらのリソースの可用性を検出します。 +トポロジーマネージャーはこの情報を使用し、そのコンテナの推奨されるNUMAノードのアフィニティーを保存します。アフィニティーが優先されない場合、トポロジーマネージャーはこれを保存し、Podをノードに許可します。 + +*Hint Providers* はこの情報を使ってリソースの割り当てを決定します。 + +### restricted ポリシー {#policy-restricted} + +Pod内の各コンテナに対して、`restricted` トポロジー管理ポリシーが設定されたkubeletは各Hint Providerを呼び出してそれらのリソースの可用性を検出します。 +トポロジーマネージャーはこの情報を使用し、そのコンテナの推奨されるNUMAノードのアフィニティーを保存します。アフィニティーが優先されない場合、トポロジーマネージャーはPodをそのノードに割り当てることを拒否します。この結果、PodはPodの受付失敗となり`Terminated` 状態になります。 + +Podが一度`Terminated`状態になると、KubernetesスケジューラーはPodの再スケジューリングを試み **ません** 。Podの再デプロイをするためには、ReplicasetかDeploymenを使用してください。`Topology Affinity`エラーとなったpodを再デプロイするために、外部のコントロールループを実行することも可能です。 + +Podが許可されれば、 *Hint Providers* はこの情報を使ってリソースの割り当てを決定します。 + +### single-numa-node ポリシー {#policy-single-numa-node} + +Pod内の各コンテナに対して、`single-numa-node`トポロジー管理ポリシーが設定されたkubeletは各Hint Prociderを呼び出してそれらのリソースの可用性を検出します。 +トポロジーマネージャーはこの情報を使用し、単一のNUMAノードアフィニティが可能かどうか決定します。 +可能な場合、トポロジーマネージャーは、この情報を保存し、*Hint Providers* はこの情報を使ってリソースの割り当てを決定します。 +不可能な場合、トポロジーマネージャーは、Podをそのノードに割り当てることを拒否します。この結果、Pod は Pod の受付失敗となり`Terminated`状態になります。 + +Podが一度`Terminated`状態になると、KubernetesスケジューラーはPodの再スケジューリングを試み**ません**。Podの再デプロイをするためには、ReplicasetかDeploymentを使用してください。`Topology Affinity`エラーとなったpodを再デプロイするために、外部のコントロールループを実行することも可能です。 + +### Podとトポロジー管理ポリシーの関係 + +以下のようなpodのSpecで定義されるコンテナを考えます: + +```yaml +spec: + containers: + - name: nginx + image: nginx +``` + +`requests`も`limits`も定義されていないため、このPodは`BestEffort`QoSクラスで実行します。 + +```yaml +spec: + containers: + - name: nginx + image: nginx + resources: + limits: + memory: "200Mi" + requests: + memory: "100Mi" +``` + +requestsがlimitsより小さい値のため、このPodは`Burstable`QoSクラスで実行します。 + +選択されたポリシーが`none`以外の場合、トポロジーマネージャーは、これらのPodのSpecを考慮します。トポロジーマネージャーは、Hint Providersからトポロジーヒントを取得します。CPUマネージャーポリシーが`static`の場合、デフォルトのトポロジーヒントを返却します。これらのPodは明示的にCPUリソースを要求していないからです。 + + +```yaml +spec: + containers: + - name: nginx + image: nginx + resources: + limits: + memory: "200Mi" + cpu: "2" + example.com/device: "1" + requests: + memory: "200Mi" + cpu: "2" + example.com/device: "1" +``` + +整数値でCPUリクエストを指定されたこのPodは、`requests`が`limits`が同じ値のため、`Guaranteed`QoSクラスで実行します。 + + +```yaml +spec: + containers: + - name: nginx + image: nginx + resources: + limits: + memory: "200Mi" + cpu: "300m" + example.com/device: "1" + requests: + memory: "200Mi" + cpu: "300m" + example.com/device: "1" +``` + +CPUの一部をリクエストで指定されたこのPodは、`requests`が`limits`が同じ値のため、`Guaranteed`QoSクラスで実行します。 + + +```yaml +spec: + containers: + - name: nginx + image: nginx + resources: + limits: + example.com/deviceA: "1" + example.com/deviceB: "1" + requests: + example.com/deviceA: "1" + example.com/deviceB: "1" +``` +CPUもメモリもリクエスト値がないため、このPodは `BestEffort` QoSクラスで実行します。 + +トポロジーマネージャーは、上記Podを考慮します。トポロジーマネージャーは、Hint ProvidersとなるCPUマネージャーとデバイスマネージャーに問い合わせ、トポロジーヒントを取得します。 + +整数値でCPU要求を指定された`Guaranteed`QoSクラスのPodの場合、`static`が設定されたCPUマネージャーポリシーは、排他的なCPUに関するトポロジーヒントを返却し、デバイスマネージャーは要求されたデバイスのヒントを返します。 + +CPUの一部を要求を指定された`Guaranteed`QoSクラスのPodの場合、排他的ではないCPU要求のため`static`が設定されたCPUマネージャーポリシーはデフォルトのトポロジーヒントを返却します。デバイスマネージャーは要求されたデバイスのヒントを返します。 + +上記の`Guaranteed`QoSクラスのPodに関する2ケースでは、`none`で設定されたCPUマネージャーポリシーは、デフォルトのトポロジーヒントを返却します。 + +`BestEffort`QoSクラスのPodの場合、`static`が設定されたCPUマネージャーポリシーは、CPUの要求がないためデフォルトのトポロジーヒントを返却します。デバイスマネージャーは要求されたデバイスごとのヒントを返します。 + +トポロジーマネージャーはこの情報を使用してPodに最適なヒントを計算し保存します。保存されたヒントは Hint Providersが使用しリソースを割り当てます。 + +### 既知の制限 +1. トポロジーマネージャーが許容するNUMAノードの最大値は8です。8より多いNUMAノードでは、可能なNUMAアフィニティを列挙しヒントを生成する際に、生成する状態数が爆発的に増加します。 + +2. スケジューラーはトポロジーを意識しません。そのため、ノードにスケジュールされた後に実行に失敗する可能性があります。 diff --git a/content/ja/docs/tasks/configmap-secret/_index.md b/content/ja/docs/tasks/configmap-secret/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/tasks/configmap-secret/managing-secret-using-config-file.md b/content/ja/docs/tasks/configmap-secret/managing-secret-using-config-file.md index f9572ca1f4..e4c19098fe 100644 --- a/content/ja/docs/tasks/configmap-secret/managing-secret-using-config-file.md +++ b/content/ja/docs/tasks/configmap-secret/managing-secret-using-config-file.md @@ -119,6 +119,8 @@ kubectl get secret mysecret -o yaml ```yaml apiVersion: v1 +data: + config.yaml: YXBpVXJsOiAiaHR0cHM6Ly9teS5hcGkuY29tL2FwaS92MSIKdXNlcm5hbWU6IHt7dXNlcm5hbWV9fQpwYXNzd29yZDoge3twYXNzd29yZH19 kind: Secret metadata: creationTimestamp: 2018-11-15T20:40:59Z @@ -127,8 +129,6 @@ metadata: resourceVersion: "7225" uid: c280ad2e-e916-11e8-98f2-025000000001 type: Opaque -data: - config.yaml: YXBpVXJsOiAiaHR0cHM6Ly9teS5hcGkuY29tL2FwaS92MSIKdXNlcm5hbWU6IHt7dXNlcm5hbWV9fQpwYXNzd29yZDoge3twYXNzd29yZH19 ``` `kubectl get`と`kubectl describe`コマンドはデフォルトではSecretの内容を表示しません。 @@ -154,6 +154,8 @@ stringData: ```yaml apiVersion: v1 +data: + username: YWRtaW5pc3RyYXRvcg== kind: Secret metadata: creationTimestamp: 2018-11-15T20:46:46Z @@ -162,8 +164,6 @@ metadata: resourceVersion: "7579" uid: 91460ecb-e917-11e8-98f2-025000000001 type: Opaque -data: - username: YWRtaW5pc3RyYXRvcg== ``` `YWRtaW5pc3RyYXRvcg==`をデコードすると`administrator`となります。 diff --git a/content/ja/docs/tasks/configmap-secret/managing-secret-using-kubectl.md b/content/ja/docs/tasks/configmap-secret/managing-secret-using-kubectl.md index fbc427469e..7be8c0b890 100644 --- a/content/ja/docs/tasks/configmap-secret/managing-secret-using-kubectl.md +++ b/content/ja/docs/tasks/configmap-secret/managing-secret-using-kubectl.md @@ -58,7 +58,7 @@ kubectl create secret generic db-user-pass \ たとえば、実際のパスワードが`S!B\*d$zDsb=`の場合、次のようにコマンドを実行します: ```shell -kubectl create secret generic dev-db-secret \ +kubectl create secret generic db-user-pass \ --from-literal=username=devuser \ --from-literal=password='S!B\*d$zDsb=' ``` diff --git a/content/ja/docs/tasks/configmap-secret/managing-secret-using-kustomize.md b/content/ja/docs/tasks/configmap-secret/managing-secret-using-kustomize.md new file mode 100644 index 0000000000..9d4df475f3 --- /dev/null +++ b/content/ja/docs/tasks/configmap-secret/managing-secret-using-kustomize.md @@ -0,0 +1,129 @@ +--- +title: Kustomizeを使用してSecretを管理する +content_type: task +weight: 30 +description: kustomization.yamlを使用してSecretを作成する +--- + + + +Kubernetes v1.14以降、`kubectl`は[Kustomizeを使ったオブジェクト管理](/docs/tasks/manage-kubernetes-objects/kustomization/)をサポートしています。 +KustomizeはSecretやConfigMapを作成するためのリソースジェネレーターを提供します。 +Kustomizeジェネレーターは、ディレクトリ内の`kustomization.yaml`ファイルで指定します。 +Secretを生成したら、`kubectl apply`でAPIサーバー上にSecretを作成します。 + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} + + + +## Kustomizationファイルを作成する + +`kustomization.yaml`ファイルの中で`secretGenerator`を定義し、他の既存のファイルを参照することで、Secretを生成することができます。 +たとえば、以下のkustomizationファイルは`./username.txt`と`./password.txt`を参照しています。 + +```yaml +secretGenerator: +- name: db-user-pass + files: + - username.txt + - password.txt +``` + +また、`kustomization.yaml`ファイルの中でリテラルを指定して`secretGenerator`を定義することもできます。 +たとえば、以下の`kustomization.yaml`ファイルには`username`と`password`の2つのリテラルが含まれています。 + +```yaml +secretGenerator: +- name: db-user-pass + literals: + - username=admin + - password=1f2d1e2e67df +``` + +また、`kustomization.yaml`ファイルに`.env`ファイルを用意して`secretGenerator`を定義することもできます。 +たとえば、以下の`kustomization.yaml`ファイルは、`.env.secret`ファイルからデータを取り込みます。 + +```yaml +secretGenerator: +- name: db-user-pass + envs: + - .env.secret +``` + +なお、いずれの場合も、値をbase64エンコードする必要はありません。 + +## Secretを作成する + +`kustomization.yaml`を含むディレクトリを適用して、Secretを作成します。 + +```shell +kubectl apply -k . +``` + +出力は次のようになります: + +``` +secret/db-user-pass-96mffmfh4k created +``` + +なお、Secretを生成する際には、データをハッシュ化し、そのハッシュ値を付加することでSecret名を生成します。 +これにより、データが変更されるたびに、新しいSecretが生成されます。 + +## 作成したSecretを確認する + +Secretが作成されたことを確認できます: + +```shell +kubectl get secrets +``` + +出力は次のようになります: + +``` +NAME TYPE DATA AGE +db-user-pass-96mffmfh4k Opaque 2 51s +``` + +Secretの説明を参照できます: + +```shell +kubectl describe secrets/db-user-pass-96mffmfh4k +``` + +出力は次のようになります: + +``` +Name: db-user-pass-96mffmfh4k +Namespace: default +Labels: +Annotations: + +Type: Opaque + +Data +==== +password.txt: 12 bytes +username.txt: 5 bytes +``` + +`kubectl get`と`kubectl describe`コマンドはデフォルトではSecretの内容を表示しません。 +これは、Secretが不用意に他人にさらされたり、ターミナルログに保存されたりしないようにするためです。 +エンコードされたデータの実際の内容を確認するには、[Secretのデコード](/ja/docs/tasks/configmap-secret/managing-secret-using-kubectl/#decoding-secret)を参照してください。 + +## クリーンアップ + +作成したSecretを削除するには次のコマンドを実行します: + +```shell +kubectl delete secret db-user-pass-96mffmfh4k +``` + + +## {{% heading "whatsnext" %}} + +- [Secretのコンセプト](/ja/docs/concepts/configuration/secret/)を読む +- [kubectlを使用してSecretを管理する](/ja/docs/tasks/configmap-secret/managing-secret-using-kubectl/)方法を知る +- [設定ファイルを使用してSecretを管理する](/ja/docs/tasks/configmap-secret/managing-secret-using-config-file/)方法を知る + diff --git a/content/ja/docs/tasks/configure-pod-container/_index.md b/content/ja/docs/tasks/configure-pod-container/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/tasks/debug-application-cluster/_index.md b/content/ja/docs/tasks/debug-application-cluster/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md b/content/ja/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md index 570134b84d..89e927aff9 100644 --- a/content/ja/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md +++ b/content/ja/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md @@ -47,7 +47,7 @@ Podをスケジュールできない理由に関するスケジューラーか クラスター内のCPUまたはメモリーの供給を使い果たした可能性があります。 この場合、いくつかのことを試すことができます。 -* クラスターに[ノードを追加します](/docs/tasks/administer-cluster/cluster-management/#resizing-a-cluster)。 +* クラスターにノードを追加します。 * [不要なPodを終了](/docs/concepts/workloads/pods/#pod-termination)して、 `Pending`状態のPodのための空きリソースを作ります。 diff --git a/content/ja/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md b/content/ja/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md index fdddb859ce..1fbdb763aa 100644 --- a/content/ja/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md +++ b/content/ja/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md @@ -33,7 +33,7 @@ content_type: task kubectl apply -f https://k8s.io/examples/debug/termination.yaml - YAMLファイルの`cmd`フィールドと`args`フィールドで、コンテナが10秒間スリープしてから`/dev/termination-log`ファイルに「Sleep expired」と書いているのがわかります。コンテナが「Sleep expired」メッセージを書き込んだ後、コンテナは終了します。 + YAMLファイルの`command`フィールドと`args`フィールドで、コンテナが10秒間スリープしてから`/dev/termination-log`ファイルに「Sleep expired」と書いているのがわかります。コンテナが「Sleep expired」メッセージを書き込んだ後、コンテナは終了します。 1. Podに関する情報を表示します: diff --git a/content/ja/docs/tasks/network/_index.md b/content/ja/docs/tasks/network/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md b/content/ja/docs/tasks/network/customize-hosts-file-for-pods.md similarity index 99% rename from content/ja/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md rename to content/ja/docs/tasks/network/customize-hosts-file-for-pods.md index 5c89abf5e7..f644fc575a 100644 --- a/content/ja/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md +++ b/content/ja/docs/tasks/network/customize-hosts-file-for-pods.md @@ -1,6 +1,6 @@ --- title: HostAliasesを使用してPodの/etc/hostsにエントリーを追加する -content_type: concept +content_type: task weight: 60 min-kubernetes-server-version: 1.7 --- @@ -13,7 +13,7 @@ Podの`/etc/hosts`ファイルにエントリーを追加すると、DNSやそ HostAliasesを使用せずにファイルを修正することはおすすめできません。このファイルはkubeletが管理しており、Podの作成や再起動時に上書きされる可能性があるためです。 - + ## デフォルトのhostsファイルの内容 diff --git a/content/ja/docs/tasks/run-application/_index.md b/content/ja/docs/tasks/run-application/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/tasks/run-application/delete-stateful-set.md b/content/ja/docs/tasks/run-application/delete-stateful-set.md index 659dabec17..81142068ed 100644 --- a/content/ja/docs/tasks/run-application/delete-stateful-set.md +++ b/content/ja/docs/tasks/run-application/delete-stateful-set.md @@ -37,13 +37,13 @@ StatefulSet自体が削除された後で、関連するヘッドレスサービ kubectl delete service ``` -kubectlを使ってStatefulSetを削除すると0にスケールダウンされ、すべてのPodが削除されます。PodではなくStatefulSetだけを削除したい場合は、`--cascade=false`を使用してください。 +kubectlを使ってStatefulSetを削除すると0にスケールダウンされ、すべてのPodが削除されます。PodではなくStatefulSetだけを削除したい場合は、`--cascade=orphan`を使用してください。 ```shell -kubectl delete -f --cascade=false +kubectl delete -f --cascade=orphan ``` -`--cascade=false`を`kubectl delete`に渡すことで、StatefulSetオブジェクト自身が削除された後でも、StatefulSetによって管理されていたPodは残ります。Podに`app=myapp`というラベルが付いている場合は、次のようにして削除できます: +`--cascade=orphan`を`kubectl delete`に渡すことで、StatefulSetオブジェクト自身が削除された後でも、StatefulSetによって管理されていたPodは残ります。Podに`app=myapp`というラベルが付いている場合は、次のようにして削除できます: ```shell kubectl delete pods -l app=myapp diff --git a/content/ja/docs/tasks/service-catalog/_index.md b/content/ja/docs/tasks/service-catalog/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/tasks/tls/_index.md b/content/ja/docs/tasks/tls/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/tasks/tools/_index.md b/content/ja/docs/tasks/tools/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/tutorials/clusters/_index.md b/content/ja/docs/tutorials/clusters/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/tutorials/configuration/_index.md b/content/ja/docs/tutorials/configuration/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/tutorials/services/_index.md b/content/ja/docs/tutorials/services/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/tutorials/stateful-application/_index.md b/content/ja/docs/tutorials/stateful-application/_index.md old mode 100755 new mode 100644 diff --git a/content/ja/docs/tutorials/stateful-application/basic-stateful-set.md b/content/ja/docs/tutorials/stateful-application/basic-stateful-set.md index e14b1e5925..6b9d8da5a8 100644 --- a/content/ja/docs/tutorials/stateful-application/basic-stateful-set.md +++ b/content/ja/docs/tutorials/stateful-application/basic-stateful-set.md @@ -711,10 +711,10 @@ StatefulSetは、非カスケードな削除とカスケードな削除の両方 kubectl get pods -w -l app=nginx ``` -[`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands/#delete)を使用して、StatefulSetを削除します。このとき、`--cascade=false`パラメーターをコマンドに与えてください。このパラメーターは、Kubernetesに対して、StatefulSetだけを削除して配下のPodは削除しないように指示します。 +[`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands/#delete)を使用して、StatefulSetを削除します。このとき、`--cascade=orphan`パラメーターをコマンドに与えてください。このパラメーターは、Kubernetesに対して、StatefulSetだけを削除して配下のPodは削除しないように指示します。 ```shell -kubectl delete statefulset web --cascade=false +kubectl delete statefulset web --cascade=orphan ``` ``` statefulset.apps "web" deleted @@ -814,7 +814,7 @@ web-1 kubectl get pods -w -l app=nginx ``` -2つ目のターミナルで、StatefulSetをもう一度削除します。今回は、`--cascade=false`パラメーターを省略します。 +2つ目のターミナルで、StatefulSetをもう一度削除します。今回は、`--cascade=orphan`パラメーターを省略します。 ```shell kubectl delete statefulset web diff --git a/content/ja/docs/tutorials/stateful-application/cassandra.md b/content/ja/docs/tutorials/stateful-application/cassandra.md index c5f8ed3986..4283a378b5 100644 --- a/content/ja/docs/tutorials/stateful-application/cassandra.md +++ b/content/ja/docs/tutorials/stateful-application/cassandra.md @@ -246,7 +246,7 @@ StatefulSetに関連するすべてのリソースを自動的に破棄するよ ## Cassandraコンテナの環境変数 -このチュートリアルのPodでは、Googleの[コンテナレジストリ](https://cloud.google.com/container-registry/docs/)の[`gcr.io/google-samples/cassandra:v13`](https://github.com/kubernetes/examples/blob/master/cassandra/image/Dockerfile)イメージを使用しました。このDockerイメージは[debian-base](https://github.com/kubernetes/kubernetes/tree/master/build/debian-base)をベースにしており、OpenJDK 8が含まれています。 +このチュートリアルのPodでは、Googleの[コンテナレジストリ](https://cloud.google.com/container-registry/docs/)の[`gcr.io/google-samples/cassandra:v13`](https://github.com/kubernetes/examples/blob/master/cassandra/image/Dockerfile)イメージを使用しました。このDockerイメージは[debian-base](https://github.com/kubernetes/release/tree/master/images/build/debian-base)をベースにしており、OpenJDK 8が含まれています。 このイメージには、Apache Debianリポジトリの標準のCassandraインストールが含まれます。 環境変数を利用すると、`cassandra.yaml`に挿入された値を変更できます。 diff --git a/content/ja/examples/application/job/redis/worker.py b/content/ja/examples/application/job/redis/worker.py index 49e5dae798..87d90bde18 100644 --- a/content/ja/examples/application/job/redis/worker.py +++ b/content/ja/examples/application/job/redis/worker.py @@ -8,11 +8,11 @@ host="redis" # import os # host = os.getenv("REDIS_SERVICE_HOST") -q = rediswq.RedisWQ(name="job2", host="redis") +q = rediswq.RedisWQ(name="job2", host=host) print("Worker with sessionID: " + q.sessionID()) print("Initial queue state: empty=" + str(q.empty())) while not q.empty(): - item = q.lease(lease_secs=10, block=True, timeout=2) + item = q.lease(lease_secs=10, block=True, timeout=2) if item is not None: itemstr = item.decode("utf=8") print("Working on " + itemstr) diff --git a/content/ja/examples/controllers/daemonset.yaml b/content/ja/examples/controllers/daemonset.yaml index 1bfa082833..375391826d 100644 --- a/content/ja/examples/controllers/daemonset.yaml +++ b/content/ja/examples/controllers/daemonset.yaml @@ -16,6 +16,7 @@ spec: spec: tolerations: - key: node-role.kubernetes.io/master + operator: Exists effect: NoSchedule containers: - name: fluentd-elasticsearch diff --git a/content/ja/includes/task-tutorial-prereqs.md b/content/ja/includes/task-tutorial-prereqs.md index e8ba679a99..09e7dda1b2 100644 --- a/content/ja/includes/task-tutorial-prereqs.md +++ b/content/ja/includes/task-tutorial-prereqs.md @@ -1,5 +1,6 @@ Kubernetesクラスターが必要、かつそのクラスターと通信するためにkubectlコマンドラインツールが設定されている必要があります。 -まだクラスターがない場合、[Minikube](/ja/docs/setup/learning-environment/minikube/)を使って作成するか、 +このチュートリアルは、コントロールプレーンのホストとして動作していない少なくとも2つのノードを持つクラスターで実行することをおすすめします。 +まだクラスターがない場合、[minikube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node/)を使って作成するか、 以下のいずれかのKubernetesプレイグラウンドも使用できます: * [Katacoda](https://www.katacoda.com/courses/kubernetes/playground) diff --git a/content/ko/_index.html b/content/ko/_index.html index 9aa0ab3cd3..c6350f1559 100644 --- a/content/ko/_index.html +++ b/content/ko/_index.html @@ -10,7 +10,7 @@ sitemap: {{% blocks/feature image="flower" %}} K8s라고도 알려진 [쿠버네티스]({{< relref "/docs/concepts/overview/what-is-kubernetes" >}})는 컨테이너화된 애플리케이션을 자동으로 배포, 스케일링 및 관리해주는 오픈소스 시스템입니다. -애플리케이션을 구성하는 컨테이너들의 쉬운 관리 및 발견을 위해서 컨테이너들을 논리적인 단위로 그룹화합니다. 쿠버네티스는 [Google에서 15년간 프로덕션 워크로드 운영한 경험](http://queue.acm.org/detail.cfm?id=2898444)을 토대로 구축되었으며, 커뮤니티에서 제공한 최상의 아이디어와 방법들이 결합되어 있습니다. +애플리케이션을 구성하는 컨테이너들의 쉬운 관리 및 발견을 위해서 컨테이너들을 논리적인 단위로 그룹화합니다. 쿠버네티스는 [Google에서 15년간 프로덕션 워크로드 운영한 경험](https://queue.acm.org/detail.cfm?id=2898444)을 토대로 구축되었으며, 커뮤니티에서 제공한 최상의 아이디어와 방법들이 결합되어 있습니다. {{% /blocks/feature %}} {{% blocks/feature image="scalable" %}} @@ -43,12 +43,12 @@ Google이 일주일에 수십억 개의 컨테이너들을 운영하게 해준

    - Attend KubeCon NA virtually on November 17-20, 2020 + Attend KubeCon North America on October 11-15, 2021



    - Attend KubeCon EU virtually on May 4 – 7, 2021 + Attend KubeCon Europe on May 17-20, 2022
    diff --git a/content/ko/blog/_posts/2021-08-04-kubernetes-release-1.22.md b/content/ko/blog/_posts/2021-08-04-kubernetes-release-1.22.md new file mode 100644 index 0000000000..d936d7c767 --- /dev/null +++ b/content/ko/blog/_posts/2021-08-04-kubernetes-release-1.22.md @@ -0,0 +1,157 @@ +--- +layout: blog +title: '쿠버네티스 1.22: 새로운 정점에 도달(Reaching New Peaks)' +date: 2021-08-04 +slug: kubernetes-1-22-release-announcement +--- + +**저자:** [쿠버네티스 1.22 릴리스 팀](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.22/release-team.md) + +**번역:** [손석호(ETRI)](https://github.com/seokho-son), [서지훈(ETRI)](https://github.com/jihoon-seo), [쿠버네티스 문서 한글화 팀](https://kubernetes.slack.com/archives/CA1MMR86S) + +2021년의 두 번째 릴리스인 쿠버네티스 1.22 릴리스를 발표하게 되어 기쁘게 생각합니다! + +이번 릴리스는 53개의 개선 사항(enhancement)으로 구성되어 있습니다. 13개의 개선 사항은 스테이블(stable)로 졸업하였으며(graduated), 24개의 개선 사항은 베타(beta)로 이동하였고, 16개는 알파(alpha)에 진입하였습니다. 또한, 3개의 기능(feature)을 더 이상 사용하지 않게 되었습니다(deprecated). + +이번 해 4월에는 쿠버네티스 릴리스 케이던스(cadence)가 1년에 4회에서 3회로 공식적으로 변경되었습니다. 이번 릴리스가 해당 방식에 따라 긴 주기를 가진 첫 번째 릴리스입니다. 쿠버네티스 프로젝트가 성숙해짐에 따라, 사이클(cycle) 당 개선 사항도 늘어나고 있습니다. 이것은 기여자 커뮤니티 및 릴리스 엔지니어링 팀에게, 버전과 버전 사이에 더 많은 작업이 필요하다는 것을 의미합니다. 또한 점점 더 많은 기능을 포함하는 릴리스로 최신 상태를 유지하려는 최종-사용자 커뮤니티에도 부담을 줄 수 있습니다. + +연간 4회에서 3회로의 릴리스 케이던스 변경을 통해 프로젝트의 다양한 측면(기여와 릴리스가 관리되는 방법, 업그레이드 및 최신 릴리스 유지에 대한 커뮤니티의 역량 등)에 대한 균형을 이루고자 하였습니다. + +더 자세한 사항은 공식 블로그 포스트 [쿠버네티스 릴리스 케이던스 변경: 알아두어야 할 사항](https://kubernetes.io/blog/2021/07/20/new-kubernetes-release-cadence/)에서 확인할 수 있습니다. + + +## 주요 주제 + +### 서버-사이드 어플라이(Server-side Apply)가 GA로 졸업 + +[서버-사이드 어플라이](https://kubernetes.io/docs/reference/using-api/server-side-apply/)는 쿠버네티스 API 서버에서 동작하는 신규 필드 오너십이며 오브젝트 병합 알고리즘입니다. 서버-사이드 어플라이는 사용자와 컨트롤러가 선언적인 구성을 통해서 자신의 리소스를 관리할 수 있도록 돕습니다. 이 기능은 단순히 fully specified intent를 전송하는 것만으로 자신의 오브젝트를 선언적으로 생성 또는 수정할 수 있도록 허용합니다. 몇 릴리스에 걸친 베타 과정 이후, 서버-사이드 어플라이는 이제 GA(generally available)가 되었습니다. + +### 외부 크리덴셜 제공자가 이제 스테이블이 됨 + +쿠버네티스 클라이언트 [크리덴셜 플러그인](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins)에 대한 지원은 1.11부터 베타였으나, 쿠버네티스 1.22 릴리스에서 스테이블로 졸업하였습니다. 해당 GA 기능 집합은 인터랙티브 로그인 플로우(interactive login flow)를 제공하는 플러그인에 대한 향상된 지원을 포함합니다. 또한, 많은 버그가 수정되었습니다. 플러그인 개발은 [sample-exec-plugin](https://github.com/ankeesler/sample-exec-plugin)을 통해 시작할 수 있습니다. + +### etcd 3.5.0으로 변경 + +쿠버네티스의 기본 백엔드 저장소인 etcd 3.5.0이 신규로 릴리스되었습니다. 신규 릴리스에는 보안, 성능, 모니터링, 개발자 경험 측면의 개선 사항이 포함되어 있습니다. 많은 버그가 수정되었으며 구조화된 로깅으로 마이그레이션(migration to structured logging) 및 빌트-인 로그 순환(built-in log rotation)과 같은 신규 중요 기능들도 일부 포함되었습니다. 해당 릴리스는 트래픽 부하에 대한 솔루션 구현을 위한 자세한 차기 로드맵도 제시하고 있습니다. [3.5.0 릴리스 발표](https://etcd.io/blog/2021/announcing-etcd-3.5/)에서 변경에 대한 자세한 항목을 확인할 수 있습니다. + +### 메모리 리소스에 대한 서비스 품질(Quality of Service) + +쿠버네티스는 원래 v1 cgroups API를 사용했습니다. 해당 디자인에 의해서, `Pod`에 대한 QoS 클래스는 CPU 리소스(예를 들면, `cpu_shares`)에만 적용되었습니다. 알파 기능으로, 쿠버네티스 v1.22에서는 메모리 할당(allocation)과 격리(isolation)를 제어하기 위한 cgroups v2 API를 사용할 수 있습니다. 이 기능은 메모리 리소스에 대한 컨텐션(contention)이 있을 때 워크로드와 노드의 가용성을 향상시키고, 컨테이너 라이프사이클에 대한 예측 가능성을 향상시킬 수 있도록 디자인되었습니다. + +### 노드 시스템 스왑(swap) 지원 + +모든 시스템 관리자나 쿠버네티스 사용자는 쿠버네티스를 설정하거나 사용할 때 스왑 공간(space)을 비활성화해야 한다는 동일한 상황에 놓여 있었습니다. 쿠버네티스 1.22 릴리스에서는 노드의 스왑 메모리를 지원합니다(알파). 이 변경은 블록 스토리지의 일부를 추가적인 가상 메모리로 취급하도록, 관리자의 옵트인(opt in)을 받아서 리눅스 노드에 스왑을 구성합니다. + +### 윈도우(Windows) 개선 사항 및 기능 + +SIG Windows는 계속해서 성장하는 개발자 커뮤니티를 지원하기 위해서 [개발 환경](https://github.com/kubernetes-sigs/sig-windows-dev-tools/)을 릴리스하였습니다. 이 새로운 도구는 여러 CNI 제공자를 지원하며, 여러 플랫폼에서 구동할 수 있습니다. 윈도우 kubelet과 kube-proxy를 컴파일하고, 다른 쿠버네티스 컴포넌트와 함께 빌드될 수 있도록 하는 새로운 방법을 제공하여, 최신(bleeding-edge) 윈도우 기능을 스크래치(scratch)부터 실행할 수 있도록 지원합니다. + +1.22 릴리스에서 윈도우 노드의 CSI 지원이 GA 상태가 되었습니다. 쿠버네티스 v1.22에서는 특권을 가진(privileged) 윈도우 컨테이너가 알파가 되었습니다. 윈도우 노드에서 CSI 스토리지를 사용하도록, 노드에서의 스토리지 작업에 대한 특권을 가진(privileged) [CSIProxy](https://github.com/kubernetes-csi/csi-proxy)가 CSI 노드 플러그인을 특권을 가지지 않은(unprivileged) 파드로 배치되도록 합니다. + +### 기본(default) seccomp 프로파일 + +알파 기능인 기본 seccomp 프로파일이 신규 커맨드라인 플래그 및 설정과 함께 kubelet에 추가되었습니다. 이 신규 기능을 사용하면, `Unconfined`대신 `RuntimeDefault` seccomp 프로파일을 기본으로 사용하는 seccomp이 클러스터 전반에서 기본이 됩니다. 이는 쿠버네티스 디플로이먼트(Deployment)의 기본 보안을 강화합니다. 워크로드에 대한 보안이 기본으로 더 강화되었으므로, 이제 보안 관리자도 조금 더 안심하고 쉴 수 있습니다. 이 기능에 대한 자세한 사항은 공식적인 [seccomp 튜토리얼](https://kubernetes.io/docs/tutorials/clusters/seccomp/#enable-the-use-of-runtimedefault-as-the-default-seccomp-profile-for-all-workloads)을 참고하시기 바랍니다. + +### kubeadm을 통한 보안성이 더 높은 컨트롤 플레인 + +이 신규 알파 기능을 사용하면 `kubeadm` 컨트롤 플레인 컴포넌트들을 루트가 아닌(non-root) 사용자로 동작시킬 수 있습니다. 이것은 `kubeadm`에 오랫동안 요청되어 온 보안 조치 사항입니다. 이 기능을 사용하려면 `kubeadm`에 한정된 RootlessControlPlane 기능 게이트를 활성화해야 합니다. 이 알파 기능을 사용하여 클러스터를 배치하는 경우, 사용자의 컨트롤 플레인은 더 낮은 특권(privileges)을 가지고 동작하게 됩니다. + +또한 쿠버네티스 1.22는 `kubeadm`의 신규 [v1beta3 구성 API](/docs/reference/config-api/kubeadm-config.v1beta3/)를 제공합니다. 이 버전에는 오랫동안 요청되어 온 몇 가지 기능들이 추가되었고, 기존의 일부 기능들은 사용 중단(deprecated)되었습니다. 이제 v1beta3 버전이 선호되는(preferred) API 버전입니다. 그러나, v1beta2 API도 여전히 사용 가능하며 아직 사용 중단(deprecated)되지 않았습니다. + +## 주요 변경 사항 + +### 사용 중단된(deprecated) 일부 베타 APIs의 제거 + +GA 버전과 중복된 사용 중단(deprecated)된 여러 베타 API가 1.22에서 제거되었습니다. 기존의 모든 오브젝트는 스테이블 APIs를 통해 상호 작용할 수 있습니다. 이 제거에는 `Ingress`, `IngressClass`, `Lease`, `APIService`, `ValidatingWebhookConfiguration`, `MutatingWebhookConfiguration`, `CustomResourceDefinition`, `TokenReview`, `SubjectAccessReview`, `CertificateSigningRequest` API의 베타 버전이 포함되었습니다. + +전체 항목은 [사용 중단된 API에 대한 마이그레이션 지침](https://kubernetes.io/docs/reference/using-api/deprecation-guide/#v1-22)과 블로그 포스트 [1.22에서 쿠버네티스 API와 제거된 기능: 알아두어야 할 사항](https://blog.k8s.io/2021/07/14/upcoming-changes-in-kubernetes-1-22/)에서 확인 가능합니다. + +### 임시(ephemeral) 컨테이너에 대한 API 변경 및 개선 + +1.22에서 [임시 컨테이너](https://kubernetes.io/ko/docs/concepts/workloads/pods/ephemeral-containers/)를 생성하기 위한 API가 변경되었습니다. 임시 컨테이너 기능은 알파이며 기본적으로 비활성화되었습니다. 신규 API는 예전 API를 사용하려는 클라이언트에 대해 동작하지 않습니다. + +스테이블 기능에 대해서, kubectl 도구는 쿠버네티스의 [버전 차이(skew) 정책](https://kubernetes.io/ko/releases/version-skew-policy/)을 따릅니다. 그러나, kubectl v1.21 이하의 버전은 임시 컨테이너에 대한 신규 API를 지원하지 않습니다. 만약 `kubectl debug`를 사용하여 임시 컨테이너를 생성할 계획이 있고 클러스터에서 쿠버네티스 v1.22로 구동하고 있는 경우, kubectl v1.21 이하의 버전에서는 그렇게 할 수 없다는 것을 알아두어야 합니다. 따라서 만약 클러스터 버전을 혼합하여 `kubectl debug`를 사용하려면 kubectl를 1.22로 업데이트하길 바랍니다. + +## 기타 업데이트 + +### 스테이블로 졸업 + +* [바운드 서비스 어카운트 토큰 볼륨(Bound Service Account Token Volumes)](https://github.com/kubernetes/enhancements/issues/542) +* [CSI 서비스 어카운트 토큰(CSI Service Account Token)](https://github.com/kubernetes/enhancements/issues/2047) +* [윈도우의 CSI 플러그인 지원](https://github.com/kubernetes/enhancements/issues/1122) +* [사용 중단된 API 사용에 대한 경고(warning) 메커니즘](https://github.com/kubernetes/enhancements/issues/1693) +* [PodDisruptionBudget 축출(eviction)](https://github.com/kubernetes/enhancements/issues/85) + +### 주목할만한 기능 업데이트 + +* 파드시큐리티폴리시(PodSecurityPolicy)를 대체하기 위한 새로운 [파드시큐리티(PodSecurity) 어드미션(admission)](https://github.com/kubernetes/enhancements/issues/2579) 알파 기능이 소개됨. +* [메모리 관리자(manager)](https://github.com/kubernetes/enhancements/issues/1769)가 베타가 됨. +* [API 서버 트레이싱(tracing)](https://github.com/kubernetes/enhancements/issues/647)을 활성화하는 새로운 알파 기능. +* [kubeadm 설정(configuration)](https://github.com/kubernetes/enhancements/issues/970) 포맷의 신규 v1beta3 버전. +* 퍼시스턴트볼륨(PersistentVolume)을 위한 [Generic data populators](https://github.com/kubernetes/enhancements/issues/1495)를 알파로 활용 가능. +* 쿠버네티스 컨트롤 플레인이 이제 [크론잡 v2 컨트롤러(CronJobs v2 controller)](https://github.com/kubernetes/enhancements/issues/19)를 사용하게 됨. +* 알파 기능으로, 모든 쿠버네티스 노드 컴포넌트(kubelet, kube-proxy, 컨테이너 런타임을 포함)는 [루트가 아닌 사용자로](https://github.com/kubernetes/enhancements/issues/2033) 동작시킬 수 있음. + +# 릴리스 노트 + +1.22 릴리스의 자세한 전체 사항은 [릴리스 노트](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.22.md)에서 확인할 수 있습니다. + +# 릴리스 위치 + +쿠버네티스 1.22는 [여기](https://kubernetes.io/releases/download/)에서 다운로드할 수 있고, [GitHub 프로젝트](https://github.com/kubernetes/kubernetes/releases/tag/v1.22.0)에서도 찾을 수 있습니다. + +쿠버네티스를 시작하는 데 도움이 되는 좋은 자료가 많이 있습니다. 쿠버네티스 사이트에서 [상호 작용형 튜토리얼](https://kubernetes.io/ko/docs/tutorials/)을 수행할 수도 있고, [kind](https://kind.sigs.k8s.io)와 도커 컨테이너를 사용하여 로컬 클러스터를 사용자의 머신에서 구동해볼 수도 있습니다. 클러스터를 스크래치(scratch)부터 구축해보고 싶다면, Kelsey Hightower의 [쿠버네티스 어렵게 익히기(the Hard Way)](https://github.com/kelseyhightower/kubernetes-the-hard-way) 튜토리얼을 확인해보시기 바랍니다. + +# 릴리스 팀 + +이 릴리스는 쿠버네티스 릴리스에 포함되는 모든 기술 콘텐츠, 문서, 코드, 기타 구성 요소 등을 제공하기 위해 팀들로 모인 매우 헌신적인 개인 그룹에 의해 가능했습니다. + +팀을 성공적인 릴리스로 이끈 릴리스 리드 Savitha Raghunathan에게 감사드리며, 릴리스 팀 이외에도 커뮤니티에 1.22 릴리스를 제공하기 위해 열심히 작업하고 지원한 모든 사람들에게 감사드립니다. + +우리는 또한 이 자리를 빌려 올해 초에 생을 마감한 팀 멤버 Peeyush Gupta를 추모하고 싶습니다. Peeyush Gupta는 SIG ContribEx 및 쿠버네티스 릴리스 팀에 활발히 참여했으며, 최근에는 1.22 커뮤니케이션 리드를 역임하였습니다. 그의 기여와 노력은 앞으로도 커뮤니티에 지속적으로 영향을 줄 것입니다. 그에 대한 추억과 추모를 공유하기 위한 [CNCF 추모](https://github.com/cncf/memorials/blob/main/peeyush-gupta.md) 페이지가 생성되어 있습니다. + +# 릴리스 로고 + +![쿠버네티스 1.22 릴리스 로고](/images/blog/2021-08-04-kubernetes-release-1.22/kubernetes-1.22.png) + +진행 중인 팬데믹, 자연재해 및 항상 존재하는 번아웃의 그림자 속에서도, 쿠버네티스 1.22 릴리스는 53개의 개선 사항을 제공하였습니다. 이것은 현재까지 가장 큰 릴리스입니다. 이 성과는 열심히 일하고 열정적인 릴리스 팀 구성원과 쿠버네티스 생태계의 대단한 기여자들 덕분에 달성할 수 있었습니다. 이 릴리스 로고는 새로운 마일스톤과 새로운 기록을 세우기 위한 리마인더입니다. 이 로고를 모든 릴리스 팀 구성원, 등산객, 별을 보는 사람들에게 바칩니다! + +이 로고는 [Boris Zotkin](https://www.instagram.com/boris.z.man/)가 디자인하였습니다. Boris는 MathWorks에서 Mac/Linux 관리자 역할을 맡고 있습니다. 그는 인생에서의 소소한 재미를 즐기고 가족과 함께 시간을 보내는 것을 사랑합니다. 이 기술에 정통(tech-savvy)한 개인은 항상 도전을 준비하며 친구를 돕는 것에 행복을 느낍니다! + +# 사용자 하이라이트 + +- 5월에 CNCF가 전 세계에 걸친 27 기관을 다양한 클라우드 네이티브 생태계의 신규 멤버로 받았습니다. 이 신규 [멤버](https://www.cncf.io/announcements/2021/05/05/27-new-members-join-the-cloud-native-computing-foundation/)는 다가오는 [KubeCon + CloudNativeCon NA in Los Angeles (October 12 – 15, 2021)](https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/)를 포함한 CNCF 이벤트들에 참여할 것입니다. +- CNCF는 [KubeCon + CloudNativeCon EU – Virtual 2021](https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/)에서 Spotify에 [최고 엔드 유저 상(Top End User Award)](https://www.cncf.io/announcements/2021/05/05/cloud-native-computing-foundation-grants-spotify-the-top-end-user-award/)을 수여했습니다. + +# 프로젝트 속도(Velocity) + +[CNCF K8s DevStats 프로젝트](https://k8s.devstats.cncf.io/)는 쿠버네티스와 다양한 서브-프로젝트에 대한 흥미로운 데이터를 수집하고 있습니다. 여기에는 개인 기여부터 기여하는 회사 수에 이르기까지 모든 것이 포함되며, 이 생태계를 발전시키는 데 필요한 노력의 깊이와 넓이를 보여줍니다. + +우리는 15주(4월 26일에서 8월 4일) 간 진행된 v1.22 릴리스 주기에서, [1063개의 기업](https://k8s.devstats.cncf.io/d/9/companies-table?orgId=1&var-period_name=v1.21.0%20-%20now&var-metric=contributions)과 [2054명의 개인](https://k8s.devstats.cncf.io/d/66/developer-activity-counts-by-companies?orgId=1&var-period_name=v1.21.0%20-%20now&var-metric=contributions&var-repogroup_name=Kubernetes&var-country_name=All&var-companies=All)의 기여를 보았습니다. + +# 생태계 업데이트 + +- 세 번째 가상 이벤트인 [KubeCon + CloudNativeCon Europe 2021](https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/)이 5월에 열렸습니다. 모든 발표가 [온디맨드로 확인 가능](https://www.youtube.com/playlist?list=PLj6h78yzYM2MqBm19mRz9SYLsw4kfQBrC)합니다. +- [Spring Term LFX 프로그램](https://www.cncf.io/blog/2021/07/13/spring-term-lfx-program-largest-graduating-class-with-28-successful-cncf-interns)이 28명의 성공적인 인턴을 배출한 최대 규모의 졸업반을 가졌습니다! +- CNCF가 연초에 클라우드 네이티브 커뮤니티와 함께 배우고, 성장하고, 협업하기를 원하는 전 세계 누구에게나 상호 작용형 미디어 경험을 제공하고자, [Twitch에서 라이브스트리밍](https://www.cncf.io/blog/2021/06/03/cloud-native-community-goes-live-with-10-shows-on-twitch/)을 시작하였습니다. + +# 이벤트 업데이트 + +- [KubeCon + CloudNativeCon North America 2021](https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/)가 October 12 – 15, 2021에 Los Angeles에서 열립니다! 컨퍼런스와 등록에 대한 더 자세한 정보는 이벤트 사이트에서 찾을 수 있습니다. +- [쿠버네티스 커뮤니티 Days](https://community.cncf.io/kubernetes-community-days/about-kcd/)가 Italy, UK, Washington DC에서 이벤트를 앞두고 있습니다. + +# 다가오는 릴리스 웨비나 + +이번 릴리스에 대한 중요 기능뿐만 아니라 업그레이드 계획을 위해 필요한 사용 중지된 사항이나 제거에 대한 사항을 학습하고 싶다면, 2021년 9월 7일에 쿠버네티스 1.22 릴리스 팀 웨비나에 참여하세요. 더 자세한 정보와 등록에 대해서는 CNCF 온라인 프로그램 사이트의 [이벤트 페이지](https://community.cncf.io/events/details/cncf-cncf-online-programs-presents-cncf-live-webinar-kubernetes-122-release/)를 확인하세요. + +# 참여하기 + +만약 쿠버네티스 커뮤니티 기여에 관심이 있다면, 특별 관심 그룹(Special Interest Groups, SIGs)이 좋은 시작 지점이 될 수 있습니다. 그중 많은 SIG가 당신의 관심사와 일치될 수 있습니다! 만약 커뮤니티와 공유하고 싶은 것이 있다면, 주간 커뮤니티 미팅에 참석할 수 있습니다. 또한 다음 중 어떠한 채널이라도 활용할 수 있습니다. + +* [쿠버네티스 기여자](https://www.kubernetes.dev/) 웹사이트에서 기여에 대한 더 자세한 사항을 확인 +* 최신 정보 업데이트를 위해 [@Kubernetesio](https://twitter.com/kubernetesio) 트위터 팔로우 +* [논의(discuss)](https://discuss.kubernetes.io/)에서 커뮤니티 논의에 참여 +* [슬랙](http://slack.k8s.io/)에서 커뮤니티에 참여 +* 쿠버네티스 [사용기](https://docs.google.com/a/linuxfoundation.org/forms/d/e/1FAIpQLScuI7Ye3VQHQTwBASrgkjQDSS5TP0g3AXfFhwSM9YpHgxRKFA/viewform) 공유 +* 쿠버네티스에서 일어나는 일에 대한 자세한 사항을 [블로그](https://kubernetes.io/blog/)를 통해 읽기 +* [쿠버네티스 릴리스 팀](https://github.com/kubernetes/sig-release/tree/master/release-team)에 대해 더 알아보기 diff --git a/content/ko/case-studies/box/index.html b/content/ko/case-studies/box/index.html index 058ff7f9a2..392e3d66bb 100644 --- a/content/ko/case-studies/box/index.html +++ b/content/ko/case-studies/box/index.html @@ -23,7 +23,7 @@ case_study_details:

    Solution

    -

    Over the past couple of years, Box has been decomposing its infrastructure into microservices, and became an early adopter of, as well as contributor to, Kubernetes container orchestration. Kubernetes, Ghods says, has allowed Box's developers to "target a universal set of concepts that are portable across all clouds."

    +

    Over the past couple of years, Box has been decomposing its infrastructure into microservices, and became an early adopter of, as well as contributor to, Kubernetes container orchestration. Kubernetes, Ghods says, has allowed Box's developers to "target a universal set of concepts that are portable across all clouds."

    Impact

    @@ -37,7 +37,7 @@ case_study_details: In the summer of 2014, Box was feeling the pain of a decade's worth of hardware and software infrastructure that wasn't keeping up with the company's needs. {{< /case-studies/lead >}} -

    A platform that allows its more than 50 million users (including governments and big businesses like General Electric) to manage and share content in the cloud, Box was originally a PHP monolith of millions of lines of code built exclusively with bare metal inside of its own data centers. It had already begun to slowly chip away at the monolith, decomposing it into microservices. And "as we've been expanding into regions around the globe, and as the public cloud wars have been heating up, we've been focusing a lot more on figuring out how we run our workload across many different environments and many different cloud infrastructure providers," says Box Cofounder and Services Architect Sam Ghods. "It's been a huge challenge thus far because of all these different providers, especially bare metal, have very different interfaces and ways in which you work with them."

    +

    A platform that allows its more than 50 million users (including governments and big businesses like General Electric) to manage and share content in the cloud, Box was originally a PHP monolith of millions of lines of code built exclusively with bare metal inside of its own data centers. It had already begun to slowly chip away at the monolith, decomposing it into microservices. And "as we've been expanding into regions around the globe, and as the public cloud wars have been heating up, we've been focusing a lot more on figuring out how we run our workload across many different environments and many different cloud infrastructure providers," says Box Cofounder and Services Architect Sam Ghods. "It's been a huge challenge thus far because of all these different providers, especially bare metal, have very different interfaces and ways in which you work with them."

    Box's cloud native journey accelerated that June, when Ghods attended DockerCon. The company had come to the realization that it could no longer run its applications only off bare metal, and was researching containerizing with Docker, virtualizing with OpenStack, and supporting public cloud.

    diff --git a/content/ko/docs/concepts/architecture/cloud-controller.md b/content/ko/docs/concepts/architecture/cloud-controller.md index fe7fda364a..e5e7d315c5 100644 --- a/content/ko/docs/concepts/architecture/cloud-controller.md +++ b/content/ko/docs/concepts/architecture/cloud-controller.md @@ -210,7 +210,7 @@ rules: 자체 클라우드 컨트롤러 매니저를 구현하거나 기존 프로젝트를 확장하는 방법을 알고 싶은가? -클라우드 컨트롤러 매니저는 Go 인터페이스를 사용해서 모든 클라우드 플러그인을 구현할 수 있다. 구체적으로, [kubernetes/cloud-provider](https://github.com/kubernetes/cloud-provider)의 [`cloud.go`](https://github.com/kubernetes/cloud-provider/blob/release-1.17/cloud.go#L42-L62)에 정의된 `CloudProvider` 인터페이스를 사용한다. +클라우드 컨트롤러 매니저는 Go 인터페이스를 사용함으로써, 어떠한 클라우드에 대한 구현체(implementation)라도 플러그인 될 수 있도록 한다. 구체적으로는, [kubernetes/cloud-provider](https://github.com/kubernetes/cloud-provider)의 [`cloud.go`](https://github.com/kubernetes/cloud-provider/blob/release-1.21/cloud.go#L42-L69)에 정의된 `CloudProvider` 인터페이스를 사용한다. 이 문서(노드, 라우트와 서비스)에서 강조된 공유 컨트롤러의 구현과 공유 cloudprovider 인터페이스와 함께 일부 스캐폴딩(scaffolding)은 쿠버네티스 핵심의 일부이다. 클라우드 공급자 전용 구현은 쿠버네티스의 핵심 바깥에 있으며 `CloudProvider` 인터페이스를 구현한다. diff --git a/content/ko/docs/concepts/architecture/controller.md b/content/ko/docs/concepts/architecture/controller.md index e516dd9cc5..92afd615b6 100644 --- a/content/ko/docs/concepts/architecture/controller.md +++ b/content/ko/docs/concepts/architecture/controller.md @@ -159,11 +159,11 @@ IP 주소 관리 도구, 스토리지 서비스, 클라우드 제공자의 API 또는 쿠버네티스 외부에서 실행할 수 있다. 가장 적합한 것은 특정 컨트롤러의 기능에 따라 달라진다. - - ## {{% heading "whatsnext" %}} * [쿠버네티스 컨트롤 플레인](/ko/docs/concepts/overview/components/#컨트롤-플레인-컴포넌트)에 대해 읽기 * [쿠버네티스 오브젝트](/ko/docs/concepts/overview/working-with-objects/kubernetes-objects/)의 몇 가지 기본 사항을 알아보자. * [쿠버네티스 API](/ko/docs/concepts/overview/kubernetes-api/)에 대해 더 배워 보자. -* 만약 자신만의 컨트롤러를 작성하기 원한다면, 쿠버네티스 확장하기의 [확장 패턴](/ko/docs/concepts/extend-kubernetes/extend-cluster/#익스텐션-패턴)을 본다. +* 만약 자신만의 컨트롤러를 작성하기 원한다면, + 쿠버네티스 확장하기의 [확장 패턴](/ko/docs/concepts/extend-kubernetes/#익스텐션-패턴)을 + 본다. diff --git a/content/ko/docs/concepts/architecture/nodes.md b/content/ko/docs/concepts/architecture/nodes.md index 5bba08100e..3ab89472d8 100644 --- a/content/ko/docs/concepts/architecture/nodes.md +++ b/content/ko/docs/concepts/architecture/nodes.md @@ -1,4 +1,7 @@ --- + + + title: 노드 content_type: concept weight: 10 @@ -8,7 +11,8 @@ weight: 10 쿠버네티스는 컨테이너를 파드내에 배치하고 _노드_ 에서 실행함으로 워크로드를 구동한다. 노드는 클러스터에 따라 가상 또는 물리적 머신일 수 있다. 각 노드는 -{{< glossary_tooltip text="컨트롤 플레인" term_id="control-plane" >}}에 의해 관리되며 +{{< glossary_tooltip text="컨트롤 플레인" term_id="control-plane" >}}에 +의해 관리되며 {{< glossary_tooltip text="파드" term_id="pod" >}}를 실행하는 데 필요한 서비스를 포함한다. @@ -272,17 +276,18 @@ kubelet은 `NodeStatus` 와 리스 오브젝트를 생성하고 업데이트 할 #### 안정성 대부분의 경우, 노드 컨트롤러는 초당 `--node-eviction-rate`(기본값 0.1)로 -축출 비율을 제한한다. 이 말은 10초당 1개의 노드를 초과하여 +축출 속도를 제한한다. 이 말은 10초당 1개의 노드를 초과하여 파드 축출을 하지 않는다는 의미가 된다. 노드 축출 행위는 주어진 가용성 영역 내 하나의 노드가 상태가 불량할 경우 변화한다. 노드 컨트롤러는 영역 내 동시에 상태가 불량한 노드의 퍼센티지가 얼마나 되는지 체크한다(NodeReady 컨디션은 ConditionUnknown 또는 -ConditionFalse 다.). -- 상태가 불량한 노드의 일부가 최소 `--unhealthy-zone-threshold` - (기본값 0.55)가 되면 축출 비율은 감소한다. +ConditionFalse 다). +- 상태가 불량한 노드의 비율이 최소 `--unhealthy-zone-threshold` + (기본값 0.55)가 되면 축출 속도가 감소한다. - 클러스터가 작으면 (즉 `--large-cluster-size-threshold` - 노드 이하면 - 기본값 50) 축출은 중지되고, 그렇지 않으면 축출 비율은 초당 + 노드 이하면 - 기본값 50) 축출이 중지된다. +- 이외의 경우, 축출 속도는 초당 `--secondary-node-eviction-rate`(기본값 0.01)로 감소된다. 이 정책들이 가용성 영역 단위로 실행되어지는 이유는 나머지가 연결되어 있는 동안 @@ -293,7 +298,7 @@ ConditionFalse 다.). 노드가 가용성 영역들에 걸쳐 퍼져 있는 주된 이유는 하나의 전체 영역이 장애가 발생할 경우 워크로드가 상태 양호한 영역으로 이전되어질 수 있도록 하기 위해서이다. 그러므로, 하나의 영역 내 모든 노드들이 상태가 불량하면 노드 컨트롤러는 -`--node-eviction-rate` 의 정상 비율로 축출한다. 코너 케이스란 모든 영역이 +`--node-eviction-rate` 의 정상 속도로 축출한다. 코너 케이스란 모든 영역이 완전히 상태불량 (즉 클러스터 내 양호한 노드가 없는 경우) 한 경우이다. 이러한 경우, 노드 컨트롤러는 마스터 연결에 문제가 있어 일부 연결이 복원될 때까지 모든 축출을 중지하는 것으로 여긴다. @@ -347,7 +352,8 @@ Kubelet은 노드가 종료되는 동안 파드가 일반 [파드 종료 프로 사용하여 주어진 기간 동안 노드 종료를 지연시키므로 systemd에 의존한다. 그레이스풀 노드 셧다운은 1.21에서 기본적으로 활성화된 `GracefulNodeShutdown` -[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)로 제어된다. +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)로 +제어된다. 기본적으로, 아래 설명된 두 구성 옵션, `ShutdownGracePeriod` 및 `ShutdownGracePeriodCriticalPods` 는 모두 0으로 설정되어 있으므로, @@ -371,6 +377,20 @@ Kubelet은 노드가 종료되는 동안 파드가 일반 [파드 종료 프로 유예 종료에 할당되고, 마지막 10초는 [중요 파드](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical)의 종료에 할당된다. +{{< note >}} +그레이스풀 노드 셧다운 과정에서 축출된 파드는 `Failed` 라고 표시된다. +`kubectl get pods` 명령을 실행하면 축출된 파드의 상태가 `Shutdown`으로 표시된다. +그리고 `kubectl describe pod` 명령을 실행하면 노드 셧다운으로 인해 파드가 축출되었음을 알 수 있다. + +``` +Status: Failed +Reason: Shutdown +Message: Node is shutting, evicting pods +``` + +실패한 파드 오브젝트는 명시적으로 삭제하거나 [가비지 콜렉션에 의해 정리](/ko/docs/concepts/workloads/pods/pod-lifecycle/#pod-garbage-collection)되기 전까지는 보존된다. +이는 갑작스러운 노드 종료의 경우와 비교했을 때 동작에 차이가 있다. +{{< /note >}} ## {{% heading "whatsnext" %}} diff --git a/content/ko/docs/concepts/cluster-administration/_index.md b/content/ko/docs/concepts/cluster-administration/_index.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/concepts/cluster-administration/kubelet-garbage-collection.md b/content/ko/docs/concepts/cluster-administration/kubelet-garbage-collection.md index 95ea899cbb..c64dd127b3 100644 --- a/content/ko/docs/concepts/cluster-administration/kubelet-garbage-collection.md +++ b/content/ko/docs/concepts/cluster-administration/kubelet-garbage-collection.md @@ -1,5 +1,4 @@ --- - title: kubelet 가비지(Garbage) 수집 설정하기 content_type: concept weight: 70 @@ -7,12 +6,13 @@ weight: 70 -가비지 수집은 사용되지 않는 [이미지](/ko/docs/concepts/containers/#컨테이너-이미지)들과 [컨테이너](/ko/docs/concepts/containers/)들을 정리하는 kubelet의 유용한 기능이다. Kubelet은 1분마다 컨테이너들에 대하여 가비지 수집을 수행하며, 5분마다 이미지들에 대하여 가비지 수집을 수행한다. - -별도의 가비지 수집 도구들을 사용하는 것은, 이러한 도구들이 존재할 수도 있는 컨테이너들을 제거함으로써 kubelet 을 중단시킬 수도 있으므로 권장하지 않는다. - - +가비지 수집은 사용되지 않는 +[이미지](/ko/docs/concepts/containers/#컨테이너-이미지)들과 +[컨테이너](/ko/docs/concepts/containers/)들을 정리하는 kubelet의 유용한 기능이다. Kubelet은 +1분마다 컨테이너들에 대하여 가비지 수집을 수행하며, 5분마다 이미지들에 대하여 가비지 수집을 수행한다. +별도의 가비지 수집 도구들을 사용하는 것은, 이러한 도구들이 존재할 수도 있는 컨테이너들을 제거함으로써 +kubelet을 중단시킬 수도 있으므로 권장하지 않는다. @@ -28,10 +28,24 @@ weight: 70 ## 컨테이너 수집 -컨테이너에 대한 가비지 수집 정책은 세 가지 사용자 정의 변수들을 고려한다: `MinAge` 는 컨테이너를 가비지 수집 할 수 있는 최소 연령이다. `MaxPerPodContainer` 는 모든 단일 파드 (UID, 컨테이너 이름) 쌍이 가질 수 있는 -최대 비활성 컨테이너의 수량이다. `MaxContainers` 죽은 컨테이너의 최대 수량이다. 이러한 변수는 `MinAge` 를 0으로 설정하고, `MaxPerPodContainer` 와 `MaxContainers` 를 각각 0 보다 작게 설정해서 비활성화 할 수 있다. +컨테이너에 대한 가비지 수집 정책은 세 가지 사용자 정의 변수들을 고려한다. +`MinAge` 는 컨테이너를 가비지 수집할 수 있는 최소 연령이다. +`MaxPerPodContainer` 는 모든 단일 파드(UID, 컨테이너 이름) +쌍이 가질 수 있는 최대 비활성 컨테이너의 수량이다. +`MaxContainers` 는 죽은 컨테이너의 최대 수량이다. +이러한 변수는 `MinAge` 를 0으로 설정하고, +`MaxPerPodContainer` 와 `MaxContainers` 를 각각 0 보다 작게 설정해서 비활성화할 수 있다. -Kubelet은 미확인, 삭제 또는 앞에서 언급 한 플래그가 설정 한 경계를 벗어나거나, 확인되지 않은 컨테이너에 대해 조치를 취한다. 일반적으로 가장 오래된 컨테이너가 먼저 제거된다. `MaxPerPodContainer` 와 `MaxContainer` 는 파드 당 최대 컨테이너 수 (`MaxPerPodContainer`)가 허용 가능한 범위의 전체 죽은 컨테이너의 수(`MaxContainers`)를 벗어나는 상황에서 잠재적으로 서로 충돌할 수 있습니다. 이러한 상황에서 `MaxPerPodContainer` 가 조정된다: 최악의 시나리오는 `MaxPerPodContainer` 를 1로 다운그레이드하고 가장 오래된 컨테이너를 제거하는 것이다. 추가로, 삭제된 파드가 소유 한 컨테이너는 `MinAge` 보다 오래된 컨테이너가 제거된다. +Kubelet은 미확인, 삭제 또는 앞에서 언급한 +플래그가 설정한 경계를 벗어나거나, 확인되지 않은 컨테이너에 대해 조치를 취한다. +일반적으로 가장 오래된 컨테이너가 먼저 제거된다. `MaxPerPodContainer` 와 `MaxContainer` 는 +파드 당 최대 +컨테이너 수(`MaxPerPodContainer`)가 허용 가능한 범위의 +전체 죽은 컨테이너의 수(`MaxContainers`)를 벗어나는 상황에서 잠재적으로 서로 충돌할 수 있다. +다음의 상황에서 `MaxPerPodContainer` 가 조정된다. +최악의 시나리오는 `MaxPerPodContainer` 를 1로 다운그레이드하고 +가장 오래된 컨테이너를 제거하는 것이다. 추가로, 삭제된 파드가 소유한 컨테이너는 +`MinAge` 보다 오래되면 제거된다. kubelet이 관리하지 않는 컨테이너는 컨테이너 가비지 수집 대상이 아니다. @@ -40,9 +54,9 @@ kubelet이 관리하지 않는 컨테이너는 컨테이너 가비지 수집 대 여러분은 후술될 kubelet 플래그들을 통하여 이미지 가비지 수집을 조정하기 위하여 다음의 임계값을 조정할 수 있다. 1. `image-gc-high-threshold`, 이미지 가비지 수집을 발생시키는 디스크 사용량의 비율로 -기본값은 85% 이다. + 기본값은 85% 이다. 2. `image-gc-low-threshold`, 이미지 가비지 수집을 더 이상 시도하지 않는 디스크 사용량의 비율로 -기본값은 80% 이다. + 기본값은 80% 이다. 다음의 kubelet 플래그를 통해 가비지 수집 정책을 사용자 정의할 수 있다. @@ -77,9 +91,7 @@ kubelet이 관리하지 않는 컨테이너는 컨테이너 가비지 수집 대 | `--low-diskspace-threshold-mb` | `--eviction-hard` or `eviction-soft` | 축출이 다른 리소스에 대한 디스크 임계값을 일반화 함 | | `--outofdisk-transition-frequency` | `--eviction-pressure-transition-period` | 축출이 다른 리소스로의 디스크 압력전환을 일반화 함 | - - ## {{% heading "whatsnext" %}} - -자세한 내용은 [리소스 부족 처리 구성](/docs/tasks/administer-cluster/out-of-resource/)를 본다. +자세한 내용은 [리소스 부족 처리 구성](/docs/concepts/scheduling-eviction/node-pressure-eviction/)를 +본다. diff --git a/content/ko/docs/concepts/cluster-administration/logging.md b/content/ko/docs/concepts/cluster-administration/logging.md index 85f3e4efde..d4e0119c41 100644 --- a/content/ko/docs/concepts/cluster-administration/logging.md +++ b/content/ko/docs/concepts/cluster-administration/logging.md @@ -83,8 +83,11 @@ kubectl logs counter [`configure-helper` 스크립트](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh)를 통해 자세히 알 수 있다. -**CRI 컨테이너 런타임** 을 사용할 때, kubelet은 로그를 로테이션하고 로깅 디렉터리 구조를 관리한다. kubelet은 -이 정보를 CRI 컨테이너 런타임에 전송하고 런타임은 컨테이너 로그를 지정된 위치에 기록한다. 두 개의 kubelet 플래그 `container-log-max-size` 및 `container-log-max-files` 를 사용하여 각 로그 파일의 최대 크기와 각 컨테이너에 허용되는 최대 파일 수를 각각 구성할 수 있다. +**CRI 컨테이너 런타임** 을 사용할 때, kubelet은 로그를 로테이션하고 로깅 디렉터리 구조를 관리한다. +kubelet은 이 정보를 CRI 컨테이너 런타임에 전송하고 런타임은 컨테이너 로그를 지정된 위치에 기록한다. +[kubelet config file](/docs/tasks/administer-cluster/kubelet-config-file/)에 있는 +두 개의 kubelet 파라미터 [`containerLogMaxSize` 및 `containerLogMaxFiles`](/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration)를 +사용하여 각 로그 파일의 최대 크기와 각 컨테이너에 허용되는 최대 파일 수를 각각 구성할 수 있다. 기본 로깅 예제에서와 같이 [`kubectl logs`](/docs/reference/generated/kubectl/kubectl-commands#logs)를 실행하면, 노드의 kubelet이 요청을 처리하고 diff --git a/content/ko/docs/concepts/cluster-administration/manage-deployment.md b/content/ko/docs/concepts/cluster-administration/manage-deployment.md index abcc4c2cd5..7e3093d51e 100644 --- a/content/ko/docs/concepts/cluster-administration/manage-deployment.md +++ b/content/ko/docs/concepts/cluster-administration/manage-deployment.md @@ -50,7 +50,7 @@ kubectl apply -f https://k8s.io/examples/application/nginx/ URL을 구성 소스로 지정할 수도 있다. 이는 GitHub에 체크인된 구성 파일에서 직접 배포하는 데 편리하다. ```shell -kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/application/nginx/nginx-deployment.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/application/nginx/nginx-deployment.yaml ``` ```shell diff --git a/content/ko/docs/concepts/cluster-administration/system-logs.md b/content/ko/docs/concepts/cluster-administration/system-logs.md index 13008ebbd8..eff3c05a65 100644 --- a/content/ko/docs/concepts/cluster-administration/system-logs.md +++ b/content/ko/docs/concepts/cluster-administration/system-logs.md @@ -20,7 +20,7 @@ weight: 60 klog는 쿠버네티스의 로깅 라이브러리다. [klog](https://github.com/kubernetes/klog) 는 쿠버네티스 시스템 컴포넌트의 로그 메시지를 생성한다. -klog 설정에 대한 더 많은 정보는, [커맨드라인 툴](/docs/reference/command-line-tools-reference/)을 참고한다. +klog 설정에 대한 더 많은 정보는, [커맨드라인 툴](/ko/docs/reference/command-line-tools-reference/)을 참고한다. klog 네이티브 형식 예 : ``` @@ -61,7 +61,7 @@ I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod= {{}} -JSON 출력은 많은 표준 klog 플래그를 지원하지 않는다. 지원하지 않는 klog 플래그 목록은, [커맨드라인 툴](/docs/reference/command-line-tools-reference/)을 참고한다. +JSON 출력은 많은 표준 klog 플래그를 지원하지 않는다. 지원하지 않는 klog 플래그 목록은, [커맨드라인 툴](/ko/docs/reference/command-line-tools-reference/)을 참고한다. 모든 로그가 JSON 형식으로 작성되는 것은 아니다(예: 프로세스 시작 중). 로그를 파싱하려는 경우 JSON 형식이 아닌 로그 행을 처리할 수 있는지 확인해야 한다. @@ -143,6 +143,6 @@ systemd를 사용하는 시스템에서는, kubelet과 컨테이너 런타임은 ## {{% heading "whatsnext" %}} -* [쿠버네티스 로깅 아키텍처](/docs/concepts/cluster-administration/logging/) 알아보기 +* [쿠버네티스 로깅 아키텍처](/ko/docs/concepts/cluster-administration/logging/) 알아보기 * [구조화된 로깅](https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/1602-structured-logging) 알아보기 * [로깅 심각도(serverity) 규칙](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md) 알아보기 diff --git a/content/ko/docs/concepts/configuration/_index.md b/content/ko/docs/concepts/configuration/_index.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/concepts/configuration/manage-resources-containers.md b/content/ko/docs/concepts/configuration/manage-resources-containers.md index ccd3ee9290..3084651390 100644 --- a/content/ko/docs/concepts/configuration/manage-resources-containers.md +++ b/content/ko/docs/concepts/configuration/manage-resources-containers.md @@ -105,9 +105,9 @@ CPU 리소스에 대한 제한 및 요청은 *cpu* 단위로 측정된다. 컨테이너는 CPU 1개를 요구하는 컨테이너의 절반만큼 CPU를 보장한다. `0.1` 이라는 표현은 "백 밀리cpu"로 읽을 수 있는 `100m` 표현과 동일하다. 어떤 사람들은 "백 밀리코어"라고 말하는데, 같은 것을 의미하는 것으로 이해된다. -`0.1` 과 같이 소수점이 있는 요청은 API에 의해 `100m` 로 변환되며, -`1m` 도 허용되지 않게 정밀하다. 이러한 이유로, `100m` 형식이 -선호될 수 있다. +`0.1` 과 같이 소수점이 있는 요청은 API에 의해 `100m` 으로 변환되며, +`1m` 보다 더 정밀한 단위는 허용되지 않는다. 이러한 이유로, +`100m` 과 같은 형식이 선호될 수 있다. CPU는 항상 절대 수량으로 요청되며, 상대적 수량은 아니다. 0.1은 단일 코어, 이중 코어 또는 48코어 시스템에서 동일한 양의 CPU이다. diff --git a/content/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig.md b/content/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig.md index a002414b67..aa739b381c 100644 --- a/content/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig.md +++ b/content/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig.md @@ -17,6 +17,11 @@ kubeconfig 파일들을 사용하여 클러스터, 사용자, 네임스페이스 `kubeconfig`라는 이름의 파일이 있다는 의미는 아니다. {{< /note >}} +{{< warning >}} +신뢰할 수 있는 소스의 kubeconfig 파일만 사용한다. 특수 제작된 kubeconfig 파일을 사용하면 악성 코드가 실행되거나 파일이 노출될 수 있다. +신뢰할 수 없는 kubeconfig 파일을 사용해야 하는 경우 셸 스크립트를 사용하는 경우처럼 먼저 신중하게 검사한다. +{{< /warning>}} + 기본적으로 `kubectl`은 `$HOME/.kube` 디렉터리에서 `config`라는 이름의 파일을 찾는다. `KUBECONFIG` 환경 변수를 설정하거나 [`--kubeconfig`](/docs/reference/generated/kubectl/kubectl/) 플래그를 지정해서 @@ -154,4 +159,3 @@ kubeconfig 파일에서 파일과 경로 참조는 kubeconfig 파일의 위치 - diff --git a/content/ko/docs/concepts/configuration/secret.md b/content/ko/docs/concepts/configuration/secret.md index a4544397d7..06be7d5a54 100644 --- a/content/ko/docs/concepts/configuration/secret.md +++ b/content/ko/docs/concepts/configuration/secret.md @@ -31,7 +31,7 @@ weight: 30 시크릿을 안전하게 사용하려면 (최소한) 다음과 같이 하는 것이 좋다. 1. 시크릿에 대한 [암호화 활성화](/docs/tasks/administer-cluster/encrypt-data/). -2. 시크릿 읽기 및 쓰기를 제한하는 [RBAC 규칙 활성화 또는 구성](/docs/reference/access-authn-authz/authorization/). 파드를 만들 권한이 있는 모든 사용자는 시크릿을 암묵적으로 얻을 수 있다. +2. 시크릿 읽기 및 쓰기를 제한하는 [RBAC 규칙 활성화 또는 구성](/ko/docs/reference/access-authn-authz/authorization/). 파드를 만들 권한이 있는 모든 사용자는 시크릿을 암묵적으로 얻을 수 있다. {{< /caution >}} @@ -48,7 +48,7 @@ weight: 30 - 파드의 [이미지를 가져올 때 kubelet](#imagepullsecrets-사용하기)에 의해 사용. 시크릿 오브젝트의 이름은 유효한 -[DNS 서브도메인 이름](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names)이어야 한다. +[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)이어야 한다. 사용자는 시크릿을 위한 파일을 구성할 때 `data` 및 (또는) `stringData` 필드를 명시할 수 있다. 해당 `data` 와 `stringData` 필드는 선택적으로 명시할 수 있다. `data` 필드의 모든 키(key)에 해당하는 값(value)은 base64로 인코딩된 문자열이어야 한다. @@ -1156,10 +1156,10 @@ HTTP 요청을 처리하고, 복잡한 비즈니스 로직을 수행한 다음, ### 시크릿 API를 사용하는 클라이언트 -시크릿 API와 상호 작용하는 애플리케이션을 배포할 때, [RBAC]( -/docs/reference/access-authn-authz/rbac/)과 같은 [인가 정책]( -/docs/reference/access-authn-authz/authorization/)을 -사용하여 접근를 제한해야 한다. +시크릿 API와 상호 작용하는 애플리케이션을 배포할 때, +[RBAC](/docs/reference/access-authn-authz/rbac/)과 같은 +[인가 정책](/ko/docs/reference/access-authn-authz/authorization/)을 +사용하여 접근을 제한해야 한다. 시크릿은 종종 다양한 중요도에 걸친 값을 보유하며, 이 중 많은 부분이 쿠버네티스(예: 서비스 어카운트 토큰)와 외부 시스템으로 단계적으로 @@ -1235,10 +1235,6 @@ API 서버에서 kubelet으로의 통신은 SSL/TLS로 보호된다. - 시크릿을 사용하는 파드를 생성할 수 있는 사용자는 해당 시크릿의 값도 볼 수 있다. API 서버 정책이 해당 사용자가 시크릿을 읽을 수 있도록 허용하지 않더라도, 사용자는 시크릿을 노출하는 파드를 실행할 수 있다. - - 현재, 모든 노드에 대한 루트 권한이 있는 모든 사용자는 kubelet을 가장하여 - API 서버에서 _모든_ 시크릿을 읽을 수 있다. 단일 노드에 대한 루트 취약점 공격의 - 영향을 제한하기 위해, 실제로 필요한 노드에만 시크릿을 보내는 것이 앞으로 계획된 - 기능이다. ## {{% heading "whatsnext" %}} diff --git a/content/ko/docs/concepts/containers/_index.md b/content/ko/docs/concepts/containers/_index.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/concepts/containers/images.md b/content/ko/docs/concepts/containers/images.md index fe7aca59aa..886f8247a3 100644 --- a/content/ko/docs/concepts/containers/images.md +++ b/content/ko/docs/concepts/containers/images.md @@ -77,6 +77,20 @@ weight: 10 `imagePullPolicy` 가 특정값 없이 정의되면, `Always` 로 설정된다. +### 이미지풀백오프(ImagePullBackOff) + +kubelet이 컨테이너 런타임을 사용하여 파드의 컨테이너 생성을 시작할 때, +`ImagePullBackOff`로 인해 컨테이너가 +[Waiting](/ko/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) 상태에 있을 수 있다. + +`ImagePullBackOff`라는 상태는 (이미지 이름이 잘못됨, 또는 `imagePullSecret` 없이 +비공개 레지스트리에서 풀링 시도 등의 이유로) 쿠버네티스가 컨테이너 이미지를 +가져올 수 없기 때문에 컨테이너를 실행할 수 없음을 의미한다. `BackOff`라는 단어는 +쿠버네티스가 백오프 딜레이를 증가시키면서 이미지 풀링을 계속 시도할 것임을 나타낸다. + +쿠버네티스는 시간 간격을 늘려가면서 시도를 계속하며, 시간 간격의 상한은 쿠버네티스 코드에 +300초(5분)로 정해져 있다. + ## 이미지 인덱스가 있는 다중 아키텍처 이미지 바이너리 이미지를 제공할 뿐만 아니라, 컨테이너 레지스트리는 [컨테이너 이미지 인덱스](https://github.com/opencontainers/image-spec/blob/master/image-index.md)를 제공할 수도 있다. 이미지 인덱스는 컨테이너의 아키텍처별 버전에 대한 여러 [이미지 매니페스트](https://github.com/opencontainers/image-spec/blob/master/manifest.md)를 가리킬 수 있다. 아이디어는 이미지의 이름(예를 들어, `pause`, `example/mycontainer`, `kube-apiserver`)을 가질 수 있다는 것이다. 그래서 다른 시스템들이 사용하고 있는 컴퓨터 아키텍처에 적합한 바이너리 이미지를 가져올 수 있다. @@ -116,7 +130,7 @@ weight: 10 도커는 프라이빗 레지스트리를 위한 키를 `$HOME/.dockercfg` 또는 `$HOME/.docker/config.json` 파일에 저장한다. 만약 동일한 파일을 -아래의 검색 경로 리스트에 넣으면, kubelete은 이미지를 풀 할 때 해당 파일을 자격 증명 공급자로 사용한다. +아래의 검색 경로 리스트에 넣으면, kubelet은 이미지를 풀 할 때 해당 파일을 자격 증명 공급자로 사용한다. * `{--root-dir:-/var/lib/kubelet}/config.json` * `{cwd of kubelet}/config.json` diff --git a/content/ko/docs/concepts/containers/runtime-class.md b/content/ko/docs/concepts/containers/runtime-class.md index 2770b1e4b2..953571ec62 100644 --- a/content/ko/docs/concepts/containers/runtime-class.md +++ b/content/ko/docs/concepts/containers/runtime-class.md @@ -68,7 +68,7 @@ handler: myconfiguration # 상응하는 CRI 설정의 이름임 ``` 런타임클래스 오브젝트의 이름은 유효한 -[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)어이야 한다. +[DNS 레이블 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-레이블-이름)어이야 한다. {{< note >}} 런타임클래스 쓰기 작업(create/update/patch/delete)은 @@ -132,7 +132,7 @@ https://github.com/containerd/cri/blob/master/docs/config.md runtime_path = "${PATH_TO_BINARY}" ``` -더 자세한 것은 CRI-O의 [설정 문서](https://raw.githubusercontent.com/cri-o/cri-o/9f11d1d/docs/crio.conf.5.md)를 본다. +더 자세한 것은 CRI-O의 [설정 문서](https://github.com/cri-o/cri-o/blob/master/docs/crio.conf.5.md)를 본다. ## 스케줄 @@ -175,5 +175,5 @@ PodOverhead를 사용하려면, PodOverhead [기능 게이트](/ko/docs/referenc - [런타임클래스 설계](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/585-runtime-class/README.md) - [런타임클래스 스케줄링 설계](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/585-runtime-class/README.md#runtimeclass-scheduling) -- [파드 오버헤드](/ko/docs/concepts/configuration/pod-overhead/) 개념에 대해 읽기 -- [파드 오버헤드 기능 설계](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/20190226-pod-overhead.md) +- [파드 오버헤드](/ko/docs/concepts/scheduling-eviction/pod-overhead/) 개념에 대해 읽기 +- [파드 오버헤드 기능 설계](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/688-pod-overhead) diff --git a/content/ko/docs/concepts/extend-kubernetes/_index.md b/content/ko/docs/concepts/extend-kubernetes/_index.md index f93537bf62..95c61079dd 100644 --- a/content/ko/docs/concepts/extend-kubernetes/_index.md +++ b/content/ko/docs/concepts/extend-kubernetes/_index.md @@ -21,7 +21,7 @@ no_list: true 조정하는 방법을 이해하려는 {{< glossary_tooltip text="클러스터 운영자" term_id="cluster-operator" >}}를 대상으로 한다. 잠재적인 {{< glossary_tooltip text="플랫폼 개발자" term_id="platform-developer" >}} 또는 쿠버네티스 프로젝트 {{< glossary_tooltip text="컨트리뷰터" term_id="contributor" >}}인 개발자에게도 어떤 익스텐션(extension) 포인트와 패턴이 있는지, -그리고 그것들의 트레이드오프와 제약에 대한 소개 자료로 유용할 것이다. +그리고 그것의 트레이드오프와 제약을 이해하는 데 도움이 될 것이다. @@ -143,7 +143,7 @@ API를 추가해도 기존 API(예: 파드)의 동작에 직접 영향을 미치 ### 인가 -[인가](/docs/reference/access-authn-authz/webhook/)은 특정 사용자가 API 리소스에서 읽고, 쓰고, 다른 작업을 수행할 수 있는지를 결정한다. 전체 리소스 레벨에서 작동하며 임의의 오브젝트 필드를 기준으로 구별하지 않는다. 빌트인 인증 옵션이 사용자의 요구를 충족시키지 못하면 [인가 웹훅](/docs/reference/access-authn-authz/webhook/)을 통해 사용자가 제공한 코드를 호출하여 인증 결정을 내릴 수 있다. +[인가](/docs/reference/access-authn-authz/webhook/)는 특정 사용자가 API 리소스에서 읽고, 쓰고, 다른 작업을 수행할 수 있는지를 결정한다. 전체 리소스 레벨에서 작동하며 임의의 오브젝트 필드를 기준으로 구별하지 않는다. 빌트인 인증 옵션이 사용자의 요구를 충족시키지 못하면 [인가 웹훅](/docs/reference/access-authn-authz/webhook/)을 통해 사용자가 제공한 코드를 호출하여 인증 결정을 내릴 수 있다. ### 동적 어드미션 컨트롤 diff --git a/content/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources.md b/content/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources.md index b543addee6..0357ac7619 100644 --- a/content/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources.md +++ b/content/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources.md @@ -128,7 +128,7 @@ CRD를 사용하면 다른 API 서버를 추가하지 않고도 새로운 타입 ## 커스텀리소스데피니션 -[커스텀리소스데피니션](/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/) +[커스텀리소스데피니션](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) API 리소스를 사용하면 커스텀 리소스를 정의할 수 있다. CRD 오브젝트를 정의하면 지정한 이름과 스키마를 사용하여 새 커스텀 리소스가 만들어진다. 쿠버네티스 API는 커스텀 리소스의 스토리지를 제공하고 처리한다. diff --git a/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md b/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md index 3596c9f72e..13313adf58 100644 --- a/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md +++ b/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md @@ -192,6 +192,7 @@ kubelet은 gRPC 서비스를 제공하여 사용 중인 장치를 검색하고, // PodResourcesLister는 kubelet에서 제공하는 서비스로, 노드의 포드 및 컨테이너가 // 사용한 노드 리소스에 대한 정보를 제공한다. service PodResourcesLister { + rpc List(ListPodResourcesRequest) returns (ListPodResourcesResponse) {} rpc GetAllocatableResources(AllocatableResourcesRequest) returns (AllocatableResourcesResponse) {} } ``` @@ -252,7 +253,7 @@ message AllocatableResourcesResponse { `ContainerDevices` 는 장치가 어떤 NUMA 셀과 연관되는지를 선언하는 토폴로지 정보를 노출한다. NUMA 셀은 불분명한(opaque) 정수 ID를 사용하여 식별되며, 이 값은 -[kubelet에 등록할 때](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#device-plugin-integration-with-the-topology-manager) 장치 플러그인이 보고하는 것과 일치한다. +[kubelet에 등록할 때](/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#토폴로지-관리자로-장치-플러그인-통합) 장치 플러그인이 보고하는 것과 일치한다. gRPC 서비스는 `/var/lib/kubelet/pod-resources/kubelet.sock` 의 유닉스 소켓을 통해 제공된다. @@ -264,8 +265,9 @@ gRPC 서비스는 `/var/lib/kubelet/pod-resources/kubelet.sock` 의 유닉스 {{< glossary_tooltip text="볼륨" term_id="volume" >}}으로 마운트해야 한다. `PodResourcesLister service` 를 지원하려면 `KubeletPodResources` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 활성화해야 한다. +이것은 쿠버네티스 1.15부터 기본으로 활성화되어 있으며, 쿠버네티스 1.20부터는 v1 상태이다. -## 토폴로지 관리자와 장치 플러그인 통합 +## 토폴로지 관리자로 장치 플러그인 통합 {{< feature-state for_k8s_version="v1.18" state="beta" >}} diff --git a/content/ko/docs/concepts/extend-kubernetes/operator.md b/content/ko/docs/concepts/extend-kubernetes/operator.md index a0959f83dc..80ed86c2ec 100644 --- a/content/ko/docs/concepts/extend-kubernetes/operator.md +++ b/content/ko/docs/concepts/extend-kubernetes/operator.md @@ -51,8 +51,7 @@ weight: 30 * 내부 멤버 선출 절차없이 분산 애플리케이션의 리더를 선택 -오퍼레이터의 모습을 더 자세하게 볼 수 있는 방법은 무엇인가? 자세한 예는 -다음과 같다. +오퍼레이터의 모습을 더 자세하게 볼 수 있는 방법은 무엇인가? 예시는 다음과 같다. 1. 클러스터에 구성할 수 있는 SampleDB라는 사용자 정의 리소스. 2. 오퍼레이터의 컨트롤러 부분이 포함된 파드의 실행을 @@ -116,7 +115,7 @@ kubectl edit SampleDB/example-database # 일부 설정을 수동으로 변경하 * [Charmed Operator Framework](https://juju.is/) * [kubebuilder](https://book.kubebuilder.io/) 사용하기 * [KUDO](https://kudo.dev/) (Kubernetes Universal Declarative Operator) -* 웹훅(WebHook)과 함께 [Metacontroller](https://metacontroller.app/)를 +* 웹훅(WebHook)과 함께 [Metacontroller](https://metacontroller.github.io/metacontroller/intro.html)를 사용하여 직접 구현하기 * [오퍼레이터 프레임워크](https://operatorframework.io) * [shell-operator](https://github.com/flant/shell-operator) @@ -124,6 +123,7 @@ kubectl edit SampleDB/example-database # 일부 설정을 수동으로 변경하 ## {{% heading "whatsnext" %}} +* {{< glossary_tooltip text="CNCF" term_id="cncf" >}} [오퍼레이터 백서](https://github.com/cncf/tag-app-delivery/blob/eece8f7307f2970f46f100f51932db106db46968/operator-wg/whitepaper/Operator-WhitePaper_v1-0.md) 읽어보기 * [사용자 정의 리소스](/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources/)에 대해 더 알아보기 * [OperatorHub.io](https://operatorhub.io/)에서 유스케이스에 맞는 이미 만들어진 오퍼레이터 찾기 * 다른 사람들이 사용할 수 있도록 자신의 오퍼레이터를 [게시](https://operatorhub.io/)하기 diff --git a/content/ko/docs/concepts/overview/_index.md b/content/ko/docs/concepts/overview/_index.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/concepts/overview/kubernetes-api.md b/content/ko/docs/concepts/overview/kubernetes-api.md index 026e0e007c..919d59b459 100644 --- a/content/ko/docs/concepts/overview/kubernetes-api.md +++ b/content/ko/docs/concepts/overview/kubernetes-api.md @@ -20,14 +20,14 @@ card: 쿠버네티스 API를 사용하면 쿠버네티스의 API 오브젝트(예: 파드(Pod), 네임스페이스(Namespace), 컨피그맵(ConfigMap) 그리고 이벤트(Event))를 질의(query)하고 조작할 수 있다. -대부분의 작업은 [kubectl](/docs/reference/kubectl/overview/) +대부분의 작업은 [kubectl](/ko/docs/reference/kubectl/overview/) 커맨드 라인 인터페이스 또는 API를 사용하는 [kubeadm](/ko/docs/reference/setup-tools/kubeadm/)과 같은 다른 커맨드 라인 도구를 통해 수행할 수 있다. 그러나, REST 호출을 사용하여 API에 직접 접근할 수도 있다. 쿠버네티스 API를 사용하여 애플리케이션을 작성하는 경우 -[클라이언트 라이브러리](/docs/reference/using-api/client-libraries/) 중 하나를 사용하는 것이 좋다. +[클라이언트 라이브러리](/ko/docs/reference/using-api/client-libraries/) 중 하나를 사용하는 것이 좋다. @@ -130,7 +130,7 @@ API 리소스는 API 그룹, 리소스 유형, 네임스페이스 {{< /note >}} API 버전 수준 정의에 대한 자세한 내용은 -[API 버전 레퍼런스](/ko/docs/reference/using-api/api-overview/#api-버전-규칙)를 참조한다. +[API 버전 레퍼런스](/ko/docs/reference/using-api/#api-버전-규칙)를 참조한다. diff --git a/content/ko/docs/concepts/policy/node-resource-managers.md b/content/ko/docs/concepts/policy/node-resource-managers.md new file mode 100644 index 0000000000..fb3e5f46f5 --- /dev/null +++ b/content/ko/docs/concepts/policy/node-resource-managers.md @@ -0,0 +1,22 @@ +--- + + + +title: 노드 리소스 매니저 +content_type: 개념 +weight: 50 +--- + + + +쿠버네티스는 지연 시간에 민감하고 처리량이 많은 워크로드를 지원하기 위해 리소스 매니저 세트를 제공한다. 매니저는 CPU, 장치 및 메모리 (hugepages) 리소스와 같은 특정한 요구 사항으로 구성된 파드를 위해 노드의 리소스 할당을 조정하고 최적화하는 것을 목표로 한다. + + + +주 매니저인 토폴로지 매니저는 [정책](/docs/tasks/administer-cluster/topology-manager/)을 통해 전체 리소스 관리 프로세스를 조정하는 Kubelet 컴포넌트이다. + +개별 매니저의 구성은 다음의 문서에 자세히 기술되어 있다. + +- [CPU 관리 정책](/docs/tasks/administer-cluster/cpu-management-policies/) +- [장치 매니저](/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#토폴로지-관리자와-장치-플러그인-통합) +- [메모리 관리 정책](/docs/tasks/administer-cluster/memory-manager/) diff --git a/content/ko/docs/concepts/policy/pod-security-policy.md b/content/ko/docs/concepts/policy/pod-security-policy.md index 8afee5760b..ff98e134eb 100644 --- a/content/ko/docs/concepts/policy/pod-security-policy.md +++ b/content/ko/docs/concepts/policy/pod-security-policy.md @@ -11,7 +11,8 @@ weight: 30 {{< feature-state for_k8s_version="v1.21" state="deprecated" >}} -파드시큐리티폴리시(PodSecurityPolicy)는 쿠버네티스 v1.21부터 더이상 사용되지 않으며, v1.25에서 제거된다. +파드시큐리티폴리시(PodSecurityPolicy)는 쿠버네티스 v1.21부터 더이상 사용되지 않으며, v1.25에서 제거된다. 사용 중단에 대한 상세 사항은 +[파드시큐리티폴리시 사용 중단: 과거, 현재, 그리고 미래](/blog/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future/)를 참조한다. 파드 시큐리티 폴리시를 사용하면 파드 생성 및 업데이트에 대한 세분화된 권한을 부여할 수 있다. @@ -48,10 +49,9 @@ _Pod Security Policy_ 는 파드 명세의 보안 관련 측면을 제어하는 ## 파드 시큐리티 폴리시 활성화 -파드 시큐리티 폴리시 제어는 선택 사항(하지만 권장함)인 -[어드미션 -컨트롤러](/docs/reference/access-authn-authz/admission-controllers/#podsecuritypolicy)로 -구현된다. [어드미션 컨트롤러 활성화](/docs/reference/access-authn-authz/admission-controllers/#how-do-i-turn-on-an-admission-control-plug-in)하면 +파드 시큐리티 폴리시 제어는 선택 사항인 [어드미션 +컨트롤러](/docs/reference/access-authn-authz/admission-controllers/#podsecuritypolicy)로 구현된다. +[어드미션 컨트롤러를 활성화](/docs/reference/access-authn-authz/admission-controllers/#how-do-i-turn-on-an-admission-control-plug-in)하면 파드시큐리티폴리시가 적용되지만, 정책을 승인하지 않고 활성화하면 클러스터에 **파드가 생성되지 않는다.** @@ -110,11 +110,15 @@ roleRef: name: apiGroup: rbac.authorization.k8s.io subjects: -# Authorize specific service accounts: +# 네임스페이스의 모든 서비스 어카운트 승인(권장): +- kind: Group + apiGroup: rbac.authorization.k8s.io + name: system:serviceaccounts: +# 특정 서비스 어카운트 승인(권장하지 않음): - kind: ServiceAccount name: namespace: -# Authorize specific users (not recommended): +# 특정 사용자 승인(권장하지 않음): - kind: User apiGroup: rbac.authorization.k8s.io name: @@ -124,21 +128,55 @@ subjects: 실행되는 파드에 대해서만 사용 권한을 부여한다. 네임스페이스에서 실행되는 모든 파드에 접근 권한을 부여하기 위해 시스템 그룹과 쌍을 이룰 수 있다. ```yaml -# Authorize all service accounts in a namespace: +# 네임스페이스의 모든 서비스 어카운트 승인: - kind: Group apiGroup: rbac.authorization.k8s.io name: system:serviceaccounts -# Or equivalently, all authenticated users in a namespace: +# 또는 동일하게, 네임스페이스의 모든 승인된 사용자에게 사용 권한 부여 - kind: Group apiGroup: rbac.authorization.k8s.io name: system:authenticated ``` RBAC 바인딩에 대한 자세한 예는, -[역할 바인딩 예제](/docs/reference/access-authn-authz/rbac#role-binding-examples)를 참고하길 바란다. +[역할 바인딩 예제](/docs/reference/access-authn-authz/rbac#role-binding-examples)를 참고한다. 파드시큐리티폴리시 인증에 대한 전체 예제는 -[아래](#예제)를 참고하길 바란다. +[아래](#예제)를 참고한다. +### 추천 예제 + +파드시큐리티폴리시는 새롭고 간결해진 `PodSecurity` {{< glossary_tooltip +text="어드미션 컨트롤러" term_id="admission-controller" >}}로 대체되고 있다. +이 변경에 대한 상세사항은 +[파드시큐리티폴리시 사용 중단: 과거, 현재, 그리고 미래](/blog/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future/)를 참조한다. +다음 가이드라인을 참조하여 파드시큐리티폴리시를 새로운 어드미션 컨트롤러로 쉽게 전환할 수 있다. + +1. 파드시큐리티폴리시를 [파드 보안 표준](/docs/concepts/security/pod-security-standards/)에 의해 정의된 폴리시로 한정한다. + - {{< example file="policy/privileged-psp.yaml" >}}Privileged{{< /example >}} + - {{< example file="policy/baseline-psp.yaml" >}}Baseline{{< /example >}} + - {{< example file="policy/restricted-psp.yaml" >}}Restricted{{< /example >}} + +2. `system:serviceaccounts:` (여기서 ``는 타겟 네임스페이스) 그룹을 사용하여 + 파드시큐리티폴리시를 전체 네임스페이스에만 바인드한다. 예시는 다음과 같다. + + ```yaml + apiVersion: rbac.authorization.k8s.io/v1 + # 이 클러스터롤바인딩(ClusterRoleBinding)을 통해 "development" 네임스페이스의 모든 파드가 기준 파드시큐리티폴리시(PSP)를 사용할 수 있다. + kind: ClusterRoleBinding + metadata: + name: psp-baseline-namespaces + roleRef: + kind: ClusterRole + name: psp-baseline + apiGroup: rbac.authorization.k8s.io + subjects: + - kind: Group + name: system:serviceaccounts:development + apiGroup: rbac.authorization.k8s.io + - kind: Group + name: system:serviceaccounts:canary + apiGroup: rbac.authorization.k8s.io + ``` ### 문제 해결 @@ -464,12 +502,12 @@ podsecuritypolicy "example" deleted 예를 들면 다음과 같습니다. ```yaml -allowedHostPaths: - # 이 정책은 "/foo", "/foo/", "/foo/bar" 등을 허용하지만, - # "/fool", "/etc/foo" 등은 허용하지 않는다. - # "/foo/../" 는 절대 유효하지 않다. - - pathPrefix: "/foo" - readOnly: true # 읽기 전용 마운트만 허용 + allowedHostPaths: + # 이 정책은 "/foo", "/foo/", "/foo/bar" 등을 허용하지만, + # "/fool", "/etc/foo" 등은 허용하지 않는다. + # "/foo/../" 는 절대 유효하지 않다. + - pathPrefix: "/foo" + readOnly: true # 읽기 전용 마운트만 허용 ``` {{< warning >}}호스트 파일시스템에 제한없는 접근을 부여하며, 컨테이너가 특권을 에스컬레이션 @@ -567,7 +605,7 @@ spec: 리눅스 기능은 전통적으로 슈퍼유저와 관련된 권한을 보다 세밀하게 분류한다. 이러한 기능 중 일부는 권한 에스컬레이션 또는 컨테이너 분류에 사용될 수 있으며 파드시큐리티폴리시에 의해 제한될 수 있다. 리눅스 기능에 대한 자세한 내용은 -[기능(7)](http://man7.org/linux/man-pages/man7/capabilities.7.html)을 +[기능(7)](https://man7.org/linux/man-pages/man7/capabilities.7.html)을 참고하길 바란다. 다음 필드는 대문자로 표기된 기능 이름 목록을 @@ -661,5 +699,10 @@ spec: ## {{% heading "whatsnext" %}} +- [파드시큐리티폴리시 사용 중단: 과거, 현재, 그리고 +미래](/blog/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future/)에서 +파드시큐리티폴리시의 미래에 대해 알아본다. + - 폴리시 권장 사항에 대해서는 [파드 보안 표준](/docs/concepts/security/pod-security-standards/)을 참조한다. + - API 세부 정보는 [파드 시큐리티 폴리시 레퍼런스](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podsecuritypolicy-v1beta1-policy) 참조한다. diff --git a/content/ko/docs/concepts/policy/resource-quotas.md b/content/ko/docs/concepts/policy/resource-quotas.md index 8e1d918ef4..b5254e4300 100644 --- a/content/ko/docs/concepts/policy/resource-quotas.md +++ b/content/ko/docs/concepts/policy/resource-quotas.md @@ -58,7 +58,8 @@ weight: 20 ## 리소스 쿼터 활성화 많은 쿠버네티스 배포판에 기본적으로 리소스 쿼터 지원이 활성화되어 있다. -{{< glossary_tooltip text="API 서버" term_id="kube-apiserver" >}} `--enable-admission-plugins=` 플래그의 인수 중 하나로 +{{< glossary_tooltip text="API 서버" term_id="kube-apiserver" >}} +`--enable-admission-plugins=` 플래그의 인수 중 하나로 `ResourceQuota`가 있는 경우 활성화된다. 해당 네임스페이스에 리소스쿼터가 있는 경우 특정 네임스페이스에 @@ -66,7 +67,9 @@ weight: 20 ## 컴퓨트 리소스 쿼터 -지정된 네임스페이스에서 요청할 수 있는 총 [컴퓨트 리소스](/ko/docs/concepts/configuration/manage-resources-containers/) 합을 제한할 수 있다. +지정된 네임스페이스에서 요청할 수 있는 총 +[컴퓨트 리소스](/ko/docs/concepts/configuration/manage-resources-containers/) +합을 제한할 수 있다. 다음과 같은 리소스 유형이 지원된다. @@ -125,7 +128,9 @@ GPU 리소스를 다음과 같이 쿼터를 정의할 수 있다. | `ephemeral-storage` | `requests.ephemeral-storage` 와 같음. | {{< note >}} -CRI 컨테이너 런타임을 사용할 때, 컨테이너 로그는 임시 스토리지 쿼터에 포함된다. 이로 인해 스토리지 쿼터를 소진한 파드가 예기치 않게 축출될 수 있다. 자세한 내용은 [로깅 아키텍처](/ko/docs/concepts/cluster-administration/logging/)를 참조한다. +CRI 컨테이너 런타임을 사용할 때, 컨테이너 로그는 임시 스토리지 쿼터에 포함된다. +이로 인해 스토리지 쿼터를 소진한 파드가 예기치 않게 축출될 수 있다. +자세한 내용은 [로깅 아키텍처](/ko/docs/concepts/cluster-administration/logging/)를 참조한다. {{< /note >}} ## 오브젝트 수 쿼터 @@ -192,7 +197,7 @@ CRI 컨테이너 런타임을 사용할 때, 컨테이너 로그는 임시 스 | `NotTerminating` | `.spec.activeDeadlineSeconds is nil`에 일치하는 파드 | | `BestEffort` | 최상의 서비스 품질을 제공하는 파드 | | `NotBestEffort` | 서비스 품질이 나쁜 파드 | -| `PriorityClass` | 지정된 [프라이어리티 클래스](/ko/docs/concepts/configuration/pod-priority-preemption)를 참조하여 일치하는 파드. | +| `PriorityClass` | 지정된 [프라이어리티클래스](/ko/docs/concepts/scheduling-eviction/pod-priority-preemption/)를 참조하여 일치하는 파드. | | `CrossNamespacePodAffinity` | 크로스-네임스페이스 파드 [(안티)어피니티 용어]가 있는 파드 | `BestEffort` 범위는 다음의 리소스를 추적하도록 쿼터를 제한한다. @@ -248,13 +253,14 @@ CRI 컨테이너 런타임을 사용할 때, 컨테이너 로그는 임시 스 {{< feature-state for_k8s_version="v1.17" state="stable" >}} -특정 [우선 순위](/ko/docs/concepts/configuration/pod-priority-preemption/#파드-우선순위)로 파드를 생성할 수 있다. +특정 [우선 순위](/ko/docs/concepts/scheduling-eviction/pod-priority-preemption/#파드-우선순위)로 파드를 생성할 수 있다. 쿼터 스펙의 `scopeSelector` 필드를 사용하여 파드의 우선 순위에 따라 파드의 시스템 리소스 사용을 제어할 수 있다. 쿼터 스펙의 `scopeSelector`가 파드를 선택한 경우에만 쿼터가 일치하고 사용된다. -`scopeSelector` 필드를 사용하여 우선 순위 클래스의 쿼터 범위를 지정하면, 쿼터 오브젝트는 다음의 리소스만 추적하도록 제한된다. +`scopeSelector` 필드를 사용하여 우선 순위 클래스의 쿼터 범위를 지정하면, +쿼터 오브젝트는 다음의 리소스만 추적하도록 제한된다. * `pods` * `cpu` @@ -554,7 +560,7 @@ kubectl create -f ./object-counts.yaml --namespace=myspace kubectl get quota --namespace=myspace ``` -``` +```none NAME AGE compute-resources 30s object-counts 32s @@ -564,7 +570,7 @@ object-counts 32s kubectl describe quota compute-resources --namespace=myspace ``` -``` +```none Name: compute-resources Namespace: myspace Resource Used Hard @@ -580,7 +586,7 @@ requests.nvidia.com/gpu 0 4 kubectl describe quota object-counts --namespace=myspace ``` -``` +```none Name: object-counts Namespace: myspace Resource Used Hard @@ -677,10 +683,10 @@ plugins: {{< codenew file="policy/priority-class-resourcequota.yaml" >}} ```shell -$ kubectl apply -f https://k8s.io/examples/policy/priority-class-resourcequota.yaml -n kube-system +kubectl apply -f https://k8s.io/examples/policy/priority-class-resourcequota.yaml -n kube-system ``` -``` +```none resourcequota/pods-cluster-services created ``` diff --git a/content/ko/docs/concepts/scheduling-eviction/_index.md b/content/ko/docs/concepts/scheduling-eviction/_index.md index 5ae3f5822e..7128dbe99f 100644 --- a/content/ko/docs/concepts/scheduling-eviction/_index.md +++ b/content/ko/docs/concepts/scheduling-eviction/_index.md @@ -32,6 +32,6 @@ no_list: true {{}} -* [파드 우선순위와 선점](/docs/concepts/scheduling-eviction/pod-priority-preemption/) +* [파드 우선순위와 선점](/ko/docs/concepts/scheduling-eviction/pod-priority-preemption/) * [노드-압박 축출](/docs/concepts/scheduling-eviction/node-pressure-eviction/) -* [API를 이용한 축출](/docs/concepts/scheduling-eviction/api-eviction/) +* [API를 이용한 축출](/ko/docs/concepts/scheduling-eviction/api-eviction/) diff --git a/content/ko/docs/concepts/scheduling-eviction/api-eviction.md b/content/ko/docs/concepts/scheduling-eviction/api-eviction.md new file mode 100644 index 0000000000..53724320b0 --- /dev/null +++ b/content/ko/docs/concepts/scheduling-eviction/api-eviction.md @@ -0,0 +1,18 @@ +--- +title: API를 이용한 축출(Eviction) +content_type: concept +weight: 70 +--- + +{{< glossary_definition term_id="api-eviction" length="short" >}}
    + +`kubectl drain` 명령과 같은 kube-apiserver의 클라이언트를 사용하여, +축출 API를 직접 호출해 축출 요청을 할 수 있다. +그러면 API 서버가 파드를 종료하는 `Eviction` 오브젝트가 생성된다. + +API를 이용한 축출은 구성된 [`PodDisruptionBudgets`](/docs/tasks/run-application/configure-pdb/) 및 [`terminationGracePeriodSeconds`](/ko/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination)를 준수한다. + +## {{% heading "whatsnext" %}} + +- [노드-압박 축출](/docs/concepts/scheduling-eviction/node-pressure-eviction/)에 대해 더 배우기 +- [파드 우선순위와 선점](/ko/docs/concepts/scheduling-eviction/pod-priority-preemption/)에 대해 더 배우기 diff --git a/content/ko/docs/concepts/scheduling-eviction/assign-pod-node.md b/content/ko/docs/concepts/scheduling-eviction/assign-pod-node.md index da30c01ab2..f46e075b57 100644 --- a/content/ko/docs/concepts/scheduling-eviction/assign-pod-node.md +++ b/content/ko/docs/concepts/scheduling-eviction/assign-pod-node.md @@ -72,7 +72,7 @@ spec: ## 넘어가기 전에: 내장 노드 레이블들 {#built-in-node-labels} [붙인](#1-단계-노드에-레이블-붙이기) 레이블뿐만 아니라, 노드에는 -표준 레이블 셋이 미리 채워져 있다. 이들 목록은 [잘 알려진 레이블, 어노테이션 및 테인트](/docs/reference/labels-annotations-taints/)를 참고한다. +표준 레이블 셋이 미리 채워져 있다. 이들 목록은 [잘 알려진 레이블, 어노테이션 및 테인트](/ko/docs/reference/labels-annotations-taints/)를 참고한다. {{< note >}} 이 레이블들의 값은 클라우드 공급자에 따라 다르고 신뢰성이 보장되지 않는다. @@ -402,7 +402,7 @@ web-server-1287567482-s330j 1/1 Running 0 7m 10.192.3 `nodeName` 은 PodSpec의 필드이다. 만약 비어있지 않으면, 스케줄러는 파드를 무시하고 명명된 노드에서 실행 중인 kubelet이 파드를 실행하려고 한다. 따라서 만약 PodSpec에 `nodeName` 가 -제공된 경우, 노드 선텍을 위해 위의 방법보다 우선한다. +제공된 경우, 노드 선택을 위해 위의 방법보다 우선한다. `nodeName` 을 사용해서 노드를 선택할 때의 몇 가지 제한은 다음과 같다. diff --git a/content/ko/docs/concepts/scheduling-eviction/node-pressure-eviction.md b/content/ko/docs/concepts/scheduling-eviction/node-pressure-eviction.md new file mode 100644 index 0000000000..a3330a1b0d --- /dev/null +++ b/content/ko/docs/concepts/scheduling-eviction/node-pressure-eviction.md @@ -0,0 +1,411 @@ +--- +title: 노드-압박 축출 +content_type: concept +weight: 60 +--- + +{{}}
    + +{{}}은 +클러스터 노드의 CPU, 메모리, 디스크 공간, 파일시스템 inode와 같은 자원을 모니터링한다. +이러한 자원 중 하나 이상이 특정 소모 수준에 도달하면, +kubelet은 하나 이상의 파드를 능동적으로 중단시켜 +자원을 회수하고 고갈 상황을 방지할 수 있다. + +노드-압박 축출 과정에서, kubelet은 축출할 파드의 `PodPhase`를 +`Failed`로 설정한다. 이로써 파드가 종료된다. + +노드-압박 축출은 +[API를 이용한 축출](/ko/docs/concepts/scheduling-eviction/api-eviction/)과는 차이가 있다. + +kubelet은 이전에 설정된 `PodDisruptionBudget` 값이나 파드의 `terminationGracePeriodSeconds` 값을 따르지 않는다. +[소프트 축출 임계값](#soft-eviction-thresholds)을 사용하는 경우, +kubelet은 이전에 설정된 `eviction-max-pod-grace-period` 값을 따른다. +[하드 축출 임계값](#hard-eviction-thresholds)을 사용하는 경우, 파드 종료 시 `0s` 만큼 기다린 후 종료한다(즉, 기다리지 않고 바로 종료한다). + +실패한 파드를 새로운 파드로 교체하는 +{{< glossary_tooltip text="워크로드" term_id="workload" >}} 리소스(예: +{{< glossary_tooltip text="스테이트풀셋(StatefulSet)" term_id="statefulset" >}} 또는 +{{< glossary_tooltip text="디플로이먼트(Deployment)" term_id="deployment" >}})가 파드를 관리하는 경우, +컨트롤 플레인이나 `kube-controller-manager`가 축출된 파드를 대신할 새 파드를 생성한다. + +{{}} +kubelet은 최종 사용자 파드를 종료하기 전에 +먼저 [노드 수준 자원을 회수](#reclaim-node-resources)하려고 시도한다. +예를 들어, 디스크 자원이 부족하면 먼저 사용하지 않는 컨테이너 이미지를 제거한다. +{{}} + +kubelet은 축출 결정을 내리기 위해 다음과 같은 다양한 파라미터를 사용한다. + + * 축출 신호 + * 축출 임계값 + * 모니터링 간격 + +### 축출 신호 {#eviction-signals} + +축출 신호는 특정 시점에서 특정 자원의 현재 상태이다. +Kubelet은 노드에서 사용할 수 있는 리소스의 최소량인 +축출 임계값과 축출 신호를 비교하여 +축출 결정을 내린다. + +Kubelet은 다음과 같은 축출 신호를 사용한다. + +| 축출 신호 | 설명 | +|----------------------|---------------------------------------------------------------------------------------| +| `memory.available` | `memory.available` := `node.status.capacity[memory]` - `node.stats.memory.workingSet` | +| `nodefs.available` | `nodefs.available` := `node.stats.fs.available` | +| `nodefs.inodesFree` | `nodefs.inodesFree` := `node.stats.fs.inodesFree` | +| `imagefs.available` | `imagefs.available` := `node.stats.runtime.imagefs.available` | +| `imagefs.inodesFree` | `imagefs.inodesFree` := `node.stats.runtime.imagefs.inodesFree` | +| `pid.available` | `pid.available` := `node.stats.rlimit.maxpid` - `node.stats.rlimit.curproc` | + +이 표에서, `설명` 열은 kubelet이 축출 신호 값을 계산하는 방법을 나타낸다. +각 축출 신호는 백분율 또는 숫자값을 지원한다. +Kubelet은 총 용량 대비 축출 신호의 백분율 값을 +계산한다. + +`memory.available` 값은 `free -m`과 같은 도구가 아니라 cgroupfs로부터 도출된다. +이는 `free -m`이 컨테이너 안에서는 동작하지 않고, 또한 사용자가 +[node allocatable](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable) +기능을 사용하는 경우 자원 부족에 대한 결정은 루트 노드뿐만 아니라 +cgroup 계층 구조의 최종 사용자 파드 부분에서도 지역적으로 이루어지기 때문에 중요하다. +이 [스크립트](/examples/admin/resource/memory-available.sh)는 +kubelet이 `memory.available`을 계산하기 위해 수행하는 동일한 단계들을 재현한다. +kubelet은 메모리 압박 상황에서 메모리가 회수 가능하다고 가정하므로, +inactive_file(즉, 비활성 LRU 목록의 파일 기반 메모리 바이트 수)을 +계산에서 제외한다. + +kubelet은 다음과 같은 파일시스템 파티션을 지원한다. + +1. `nodefs`: 노드의 메인 파일시스템이며, 로컬 디스크 볼륨, emptyDir, + 로그 스토리지 등에 사용된다. 예를 들어 `nodefs`는 `/var/lib/kubelet/`을 포함한다. +1. `imagefs`: 컨테이너 런타임이 컨테이너 이미지 및 + 컨테이너 쓰기 가능 레이어를 저장하는 데 사용하는 선택적 파일시스템이다. + +Kubelet은 이러한 파일시스템을 자동으로 검색하고 다른 파일시스템은 무시한다. +Kubelet은 다른 구성은 지원하지 않는다. + +{{}} +일부 kubelet 가비지 수집 기능은 더 이상 사용되지 않으며 축출로 대체되었다. +사용 중지된 기능의 목록은 [kubelet 가비지 수집 사용 중단](/ko/docs/concepts/cluster-administration/kubelet-garbage-collection/#사용-중단-deprecation)을 참조한다. +{{}} + +### 축출 임계값 + +kubelet이 축출 결정을 내릴 때 사용하는 축출 임계값을 +사용자가 임의로 설정할 수 있다. + +축출 임계값은 `[eviction-signal][operator][quantity]` 형태를 갖는다. + +* `eviction-signal`에는 사용할 [축출 신호](#eviction-signals)를 적는다. +* `operator`에는 [관계연산자](https://ko.wikipedia.org/wiki/관계연산자#표준_관계연산자)를 + 적는다(예: `<` - 미만) +* `quantity`에는 `1Gi`와 같이 축출 임계값 수치를 적는다. + `quantity`에 들어가는 값은 쿠버네티스가 사용하는 수치 표현 방식과 맞아야 한다. + 숫자값 또는 백분율(`%`)을 사용할 수 있다. + +예를 들어, 노드에 총 `10Gi`의 메모리가 있고 +`1Gi` 아래로 내려갔을 때 축출이 시작되도록 만들고 싶으면, 축출 임계값을 +`memory.available<10%` 또는 `memory.available<1Gi` 형태로 정할 수 있다. 둘을 동시에 사용할 수는 없다. + +소프트 축출 임계값과 하드 축출 임계값을 설정할 수 있다. + +#### 소프트 축출 임계값 {#soft-eviction-thresholds} + +소프트 축출 임계값은 관리자가 설정하는 유예 시간(필수)과 함께 정의된다. +kubelet은 유예 시간이 초과될 때까지 파드를 제거하지 않는다. +유예 시간이 지정되지 않으면 kubelet 시작 시 +오류가 반환된다. + +kubelet이 축출 과정에서 사용할 수 있도록, +'소프트 축출 임계값'과 '최대 허용 파드 종료 유예 시간' 둘 다를 설정할 수 있다. +'최대 허용 파드 종료 유예 시간'이 설정되어 있는 상태에서 '소프트 축출 임계값'에 도달하면, +kubelet은 두 유예 시간 중 작은 쪽을 적용한다. +'최대 허용 파드 종료 유예 시간'을 설정하지 않으면, +kubelet은 축출된 파드를 유예 시간 없이 즉시 종료한다. + +소프트 축출 임계값을 설정할 때 다음과 같은 플래그를 사용할 수 있다. + +* `eviction-soft`: 축출 임계값(예: `memory.available<1.5Gi`)의 집합이며, + 지정된 유예 시간동안 이 축출 임계값 조건이 충족되면 파드 축출이 트리거된다. +* `eviction-soft-grace-period`: 축출 유예 시간의 집합이며, + 소프트 축출 임계값 조건이 이 유예 시간동안 충족되면 파드 축출이 트리거된다. +* `eviction-max-pod-grace-period`: '최대 허용 파드 종료 유예 시간(단위: 초)'이며, + 소프트 축출 임계값 조건이 충족되어 파드를 종료할 때 사용한다. + +#### 하드 축출 임계값 {#hard-eviction-thresholds} + +하드 축출 임계값에는 유예 시간이 없다. 하드 축출 임계값 조건이 충족되면, +kubelet은 고갈된 자원을 회수하기 위해 파드를 유예 시간 없이 +즉시 종료한다. + +`eviction-hard` 플래그를 사용하여 하드 축출 +임계값(예: `memory.available<1Gi`)을 설정할 수 있다. + +kubelet은 다음과 같은 하드 축출 임계값을 기본적으로 설정하고 있다. + +* `memory.available<100Mi` +* `nodefs.available<10%` +* `imagefs.available<15%` +* `nodefs.inodesFree<5%` (리눅스 노드) + +### 축출 모니터링 시간 간격 + +kubelet은 `housekeeping-interval`에 설정된 시간 간격(기본값: `10s`)마다 +축출 임계값을 확인한다. + +### 노드 컨디션 {#node-conditions} + +kubelet은 하드/소프트 축출 임계값 조건이 충족되어 +노드 압박이 발생했다는 것을 알리기 위해, +설정된 유예 시간과는 관계없이 노드 컨디션을 보고한다. + +kubelet은 다음과 같이 노드 컨디션과 축출 신호를 매핑한다. + +| 노드 컨디션 | 축출 신호 | 설명 | +|-------------------|---------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------| +| `MemoryPressure` | `memory.available` | 노드의 가용 메모리 양이 축출 임계값에 도달함 | +| `DiskPressure` | `nodefs.available`, `nodefs.inodesFree`, `imagefs.available`, 또는 `imagefs.inodesFree` | 노드의 루트 파일시스템 또는 이미지 파일시스템의 가용 디스크 공간 또는 inode의 수가 축출 임계값에 도달함 | +| `PIDPressure` | `pid.available` | (리눅스) 노드의 가용 프로세스 ID(PID)가 축출 임계값 이하로 내려옴 | + +kubelet은 `--node-status-update-frequency`에 설정된 +시간 간격(기본값: `10s`)마다 노드 컨디션을 업데이트한다. + +#### 노드 컨디션 진동(oscillation) + +경우에 따라, 노드의 축출 신호값이 사전에 설정된 유예 시간 동안 유지되지 않고 +소프트 축출 임계값을 중심으로 진동할 수 있다. 이로 인해 노드 컨디션이 계속 +`true`와 `false`로 바뀌며, 잘못된 축출 결정을 야기할 수 있다. + +이러한 진동을 방지하기 위해, `eviction-pressure-transition-period` 플래그를 +사용하여 kubelet이 노드 컨디션을 다른 상태로 바꾸기 위해 기다려야 하는 시간을 +설정할 수 있다. 기본값은 `5m`이다. + +### 노드-수준 자원 회수하기 {#reclaim-node-resources} + +kubelet은 최종 사용자 파드를 축출하기 전에 노드-수준 자원 회수를 시도한다. + +`DiskPressure` 노드 컨디션이 보고되면, +kubelet은 노드의 파일시스템을 기반으로 노드-수준 자원을 회수한다. + +#### `imagefs`가 있는 경우 + +컨테이너 런타임이 사용할 전용 `imagefs` 파일시스템이 노드에 있으면, +kubelet은 다음 작업을 수행한다. + + * `nodefs` 파일시스템이 축출 임계값 조건을 충족하면, + kubelet은 종료된 파드와 컨테이너에 대해 가비지 수집을 수행한다. + * `imagefs` 파일시스템이 축출 임계값 조건을 충족하면, + kubelet은 모든 사용중이지 않은 이미지를 삭제한다. + +#### `imagefs`가 없는 경우 + +노드에 `nodefs` 파일시스템만 있고 이것이 축출 임계값 조건을 충족한 경우, +kubelet은 다음 순서로 디스크 공간을 확보한다. + +1. 종료된 파드와 컨테이너에 대해 가비지 수집을 수행한다. +1. 사용중이지 않은 이미지를 삭제한다. + +### kubelet 축출을 위한 파드 선택 + +kubelet이 노드-수준 자원을 회수했음에도 축출 신호가 임계값 아래로 내려가지 않으면, +kubelet은 최종 사용자 파드 축출을 시작한다. + +kubelet은 파드 축출 순서를 결정하기 위해 다음의 파라미터를 활용한다. + +1. 파드의 자원 사용량이 요청량을 초과했는지 여부 +1. [파드 우선순위](/ko/docs/concepts/scheduling-eviction/pod-priority-preemption/) +1. 파드의 자원 요청량 대비 자원 사용량 + +결과적으로, kubelet은 다음과 같은 순서로 파드의 축출 순서를 정하고 축출을 수행한다. + +1. `BestEffort` 또는 `Burstable` 파드 중 자원 사용량이 요청량을 초과한 파드. + 이 파드들은 파드들의 우선순위, 그리고 자원 사용량이 요청량을 + 얼마나 초과했는지에 따라 축출된다. +1. `Guaranteed`, `Burstable` 파드 중 자원 사용량이 요청량보다 낮은 파드는 + 우선순위에 따라 후순위로 축출된다. + +{{}} +kubelet이 파드 축출 순서를 결정할 때 파드의 QoS 클래스는 이용하지 않는다. +메모리 등의 자원을 회수할 때, QoS 클래스를 이용하여 가장 가능성이 높은 파드 축출 순서를 예측할 수는 있다. +QoS는 EphemeralStorage 요청에 적용되지 않으므로, +노드가 예를 들어 `DiskPressure` 아래에 있는 경우 위의 시나리오가 적용되지 않는다. +{{}} + +`Guaranteed` 파드는 모든 컨테이너에 대해 자원 요청량과 제한이 명시되고 +그 둘이 동일할 때에만 보장(guaranteed)된다. 다른 파드의 자원 사용으로 인해 +`Guaranteed` 파드가 축출되는 일은 발생하지 않는다. 만약 시스템 데몬(예: +`kubelet`, `docker`, `journald`)이 `system-reserved` 또는 `kube-reserved` +할당을 통해 예약된 것보다 더 많은 자원을 소비하고, 노드에는 요청량보다 적은 양의 +자원을 사용하고 있는 `Guaranteed` / `Burstable` 파드만 존재한다면, +kubelet은 노드 안정성을 유지하고 자원 고갈이 다른 파드에 미칠 영향을 통제하기 위해 +이러한 파드 중 하나를 골라 축출해야 한다. +이 경우, 가장 낮은 `Priority`를 갖는 파드가 선택된다. + +`inodes`와 `PIDs`에 대한 요청량은 정의하고 있지 않기 때문에, kubelet이 `inode` +또는 `PID` 고갈 때문에 파드를 축출할 때에는 파드의 `Priority`를 이용하여 축출 +순위를 정한다. + +노드에 전용 `imagefs` 파일시스템이 있는지 여부에 따라 kubelet이 파드 축출 순서를 +정하는 방식에 차이가 있다. + +#### `imagefs`가 있는 경우 + +`nodefs`로 인한 축출의 경우, kubelet은 `nodefs` +사용량(`모든 컨테이너의 로컬 볼륨 + 로그`)을 기준으로 축출 순서를 정한다. + +`imagefs`로 인한 축출의 경우, kubelet은 모든 컨테이너의 +쓰기 가능한 레이어(writable layer) 사용량을 기준으로 축출 순서를 정한다. + +#### `imagefs`가 없는 경우 + +`nodefs`로 인한 축출의 경우, kubelet은 각 파드의 총 +디스크 사용량(`모든 컨테이너의 로컬 볼륨 + 로그 + 쓰기 가능한 레이어`)을 기준으로 축출 순서를 정한다. + +### 최소 축출 회수량 + +경우에 따라, 파드를 축출했음에도 적은 양의 자원만이 회수될 수 있다. +이로 인해 kubelet이 반복적으로 축출 임계값 도달을 감지하고 +여러 번의 축출을 수행할 수 있다. + +`--eviction-minimum-reclaim` 플래그 또는 +[kubelet 설정 파일](/docs/tasks/administer-cluster/kubelet-config-file/)을 이용하여 +각 자원에 대한 최소 회수량을 설정할 수 있다. kubelet이 자원 부족 상황을 감지하면, +앞서 설정한 최소 회수량에 도달할때까지 회수를 계속 진행한다. + +예를 들어, 다음 YAML은 최소 회수량을 정의하고 있다. + +```yaml +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +evictionHard: + memory.available: "500Mi" + nodefs.available: "1Gi" + imagefs.available: "100Gi" +evictionMinimumReclaim: + memory.available: "0Mi" + nodefs.available: "500Mi" + imagefs.available: "2Gi" +``` + +이 예제에서, 만약 `nodefs.available` 축출 신호가 축출 임계값 조건에 도달하면, +kubelet은 축출 신호가 임계값인 `1Gi`에 도달할 때까지 자원을 회수하며, +이어서 축출 신호가 `1.5Gi`에 도달할 때까지 최소 `500Mi` 이상의 자원을 +회수한다. + +유사한 방식으로, kubelet은 `imagefs.available` 축출 신호가 +`102Gi`에 도달할 때까지 `imagefs` 자원을 회수한다. + +모든 자원에 대해 `eviction-minimum-reclaim`의 기본값은 `0`이다. + +### 노드 메모리 부족 시의 동작 + +kubelet의 메모리 회수가 가능하기 이전에 +노드에 메모리 부족(out of memory, 이하 OOM) 이벤트가 발생하면, +노드는 [oom_killer](https://lwn.net/Articles/391222/)에 의존한다. + +kubelet은 각 파드에 설정된 QoS를 기반으로 각 컨테이너에 `oom_score_adj` 값을 설정한다. + +| 서비스 품질(Quality of Service) | oom_score_adj | +|--------------------|-----------------------------------------------------------------------------------| +| `Guaranteed` | -997 | +| `BestEffort` | 1000 | +| `Burstable` | min(max(2, 1000 - (1000 * memoryRequestBytes) / machineMemoryCapacityBytes), 999) | + +{{}} +또한, kubelet은 `system-node-critical` {{}}를 갖는 파드의 컨테이너에 +`oom_score_adj` 값을 `-997`로 설정한다. +{{}} + +노드가 OOM을 겪기 전에 kubelet이 메모리를 회수하지 못하면, `oom_killer`가 노드의 +메모리 사용률 백분율을 이용하여 `oom_score`를 계산하고, 각 컨테이너의 실질 +`oom_score`를 구하기 위해 `oom_score_adj`를 더한다. 그 뒤 `oom_score`가 가장 높은 +컨테이너부터 종료시킨다. + +이는 곧, 스케줄링 요청에 비해 많은 양의 메모리를 사용하면서 +QoS가 낮은 파드에 속한 컨테이너가 먼저 종료됨을 의미한다. + +파드 축출과 달리, 컨테이너가 OOM으로 인해 종료되면, +`kubelet`이 컨테이너의 `RestartPolicy`를 기반으로 컨테이너를 다시 실행할 수 있다. + +### 추천 예시 {#node-pressure-eviction-good-practices} + +아래 섹션에서 축출 설정에 대한 추천 예시를 소개한다. + +#### 스케줄 가능한 자원과 축출 정책 + +kubelet에 축출 정책을 설정할 때, 만약 어떤 파드 배치가 즉시 메모리 압박을 +야기하기 때문에 축출을 유발한다면 스케줄러가 그 파드 배치를 수행하지 않도록 +설정해야 한다. + +다음 시나리오를 가정한다. + +* 노드 메모리 용량: `10Gi` +* 운영자는 시스템 데몬(커널, `kubelet` 등)을 위해 메모리 용량의 10%를 확보해 놓고 싶어 한다. +* 운영자는 시스템 OOM 발생을 줄이기 위해 메모리 사용률이 95%인 상황에서 파드를 축출하고 싶어한다. + +이것이 실현되도록, kubelet이 다음과 같이 실행된다. + +``` +--eviction-hard=memory.available<500Mi +--system-reserved=memory=1.5Gi +``` + +이 환경 설정에서, `--system-reserved` 플래그는 시스템 용으로 `1.5Gi` 메모리를 +확보하는데, 이는 `총 메모리의 10% + 축출 임계값`에 해당된다. + +파드가 요청량보다 많은 메모리를 사용하거나 시스템이 `1Gi` 이상의 메모리를 +사용하여, `memory.available` 축출 신호가 `500Mi` 아래로 내려가면 노드가 축출 +임계값에 도달할 수 있다. + +#### 데몬셋(DaemonSet) + +파드 우선 순위(Priority)는 파드 축출 결정을 내릴 때의 주요 요소이다. +kubelet이 `DaemonSet`에 속하는 파드를 축출하지 않도록 하려면 +해당 파드의 파드 스펙에 충분히 높은 `priorityClass`를 지정한다. +또는 낮은 `priorityClass`나 기본값을 사용하여 +리소스가 충분할 때만 `DaemonSet` 파드가 실행되도록 허용할 수도 있다. + +### 알려진 이슈 + +다음 섹션에서는 리소스 부족 처리와 관련된 알려진 이슈에 대해 다룬다. + +#### kubelet이 메모리 압박을 즉시 감지하지 못할 수 있음 + +기본적으로 kubelet은 `cAdvisor`를 폴링하여 +일정한 간격으로 메모리 사용량 통계를 수집한다. +해당 타임 윈도우 내에서 메모리 사용량이 빠르게 증가하면 kubelet이 +`MemoryPressure`를 충분히 빠르게 감지하지 못해 `OOMKiller`가 계속 호출될 수 있다. + +`--kernel-memcg-notification` 플래그를 사용하여 +kubelet의 `memcg` 알림 API가 임계값을 초과할 때 즉시 알림을 받도록 +할 수 있다. + +사용률(utilization)을 극단적으로 높이려는 것이 아니라 오버커밋(overcommit)에 대한 합리적인 조치만 원하는 경우, +이 문제에 대한 현실적인 해결 방법은 `--kube-reserved` 및 +`--system-reserved` 플래그를 사용하여 시스템에 메모리를 할당하는 것이다. + +#### `active_file` 메모리가 사용 가능한 메모리로 간주되지 않음 + +리눅스에서, 커널은 활성 LRU 목록의 파일 지원 메모리 바이트 수를 `active_file` +통계로 추적한다. kubelet은 `active_file` 메모리 영역을 회수할 수 없는 것으로 +취급한다. 임시 로컬 스토리지를 포함하여 블록 지원 로컬 스토리지를 집중적으로 +사용하는 워크로드의 경우 파일 및 블록 데이터의 커널 수준 캐시는 최근에 액세스한 +많은 캐시 페이지가 `active_file`로 계산될 가능성이 있음을 의미한다. 활성 LRU +목록에 이러한 커널 블록 버퍼가 충분히 많으면, kubelet은 이를 높은 자원 사용 +상태로 간주하고 노드가 메모리 압박을 겪고 있다고 테인트를 표시할 수 있으며, 이는 +파드 축출을 유발한다. + +더 자세한 사항은 [https://github.com/kubernetes/kubernetes/issues/43916](https://github.com/kubernetes/kubernetes/issues/43916)를 참고한다. + +집중적인 I/O 작업을 수행할 가능성이 있는 컨테이너에 대해 메모리 제한량 및 메모리 +요청량을 동일하게 설정하여 이 문제를 해결할 수 있다. 해당 컨테이너에 대한 최적의 +메모리 제한량을 추정하거나 측정해야 한다. + +## {{% heading "whatsnext" %}} + +* [API를 이용한 축출](/ko/docs/concepts/scheduling-eviction/api-eviction/)에 대해 알아본다. +* [파드 우선순위와 선점](/ko/docs/concepts/scheduling-eviction/pod-priority-preemption/)에 대해 알아본다. +* [PodDisruptionBudgets](/docs/tasks/run-application/configure-pdb/)에 대해 알아본다. +* [서비스 품질](/ko/docs/tasks/configure-pod-container/quality-service-pod/)(QoS)에 대해 알아본다. +* [축출 API](/docs/reference/generated/kubernetes-api/{{}}/#create-eviction-pod-v1-core)를 확인한다. diff --git a/content/ko/docs/concepts/scheduling-eviction/pod-priority-preemption.md b/content/ko/docs/concepts/scheduling-eviction/pod-priority-preemption.md index 581525d833..f149290882 100644 --- a/content/ko/docs/concepts/scheduling-eviction/pod-priority-preemption.md +++ b/content/ko/docs/concepts/scheduling-eviction/pod-priority-preemption.md @@ -25,7 +25,7 @@ weight: 70 관리자는 리소스쿼터를 사용하여 사용자가 우선순위가 높은 파드를 생성하지 못하게 할 수 있다. -자세한 내용은 [기본적으로 프라이어리티 클래스(Priority Class) 소비 제한](/ko/docs/concepts/policy/resource-quotas/#기본적으로-우선-순위-클래스-소비-제한)을 +자세한 내용은 [기본적으로 프라이어리티클래스(Priority Class) 소비 제한](/ko/docs/concepts/policy/resource-quotas/#기본적으로-우선-순위-클래스-소비-제한)을 참고한다. {{< /warning >}} @@ -50,7 +50,7 @@ weight: 70 ## 프라이어리티클래스 -프라이어리티클래스는 프라이어리티 클래스 이름에서 우선순위의 정수 값으로의 매핑을 +프라이어리티클래스는 프라이어리티클래스 이름에서 우선순위의 정수 값으로의 매핑을 정의하는 네임스페이스가 아닌(non-namespaced) 오브젝트이다. 이름은 프라이어리티클래스 오브젝트의 메타데이터의 `name` 필드에 지정된다. 값은 필수 `value` 필드에 지정되어 있다. 값이 클수록, 우선순위가 @@ -96,7 +96,7 @@ metadata: name: high-priority value: 1000000 globalDefault: false -description: "이 프라이어리티 클래스는 XYZ 서비스 파드에만 사용해야 한다." +description: "이 프라이어리티클래스는 XYZ 서비스 파드에만 사용해야 한다." ``` ## 비-선점 프라이어리티클래스 {#non-preempting-priority-class} @@ -142,7 +142,7 @@ metadata: value: 1000000 preemptionPolicy: Never globalDefault: false -description: "이 프라이어리티 클래스는 다른 파드를 축출하지 않는다." +description: "이 프라이어리티클래스는 다른 파드를 축출하지 않는다." ``` ## 파드 우선순위 @@ -150,7 +150,7 @@ description: "이 프라이어리티 클래스는 다른 파드를 축출하지 프라이어리티클래스가 하나 이상 있으면, 그것의 명세에서 이들 프라이어리티클래스 이름 중 하나를 지정하는 파드를 생성할 수 있다. 우선순위 어드미션 컨트롤러는 `priorityClassName` 필드를 사용하고 우선순위의 정수 값을 -채운다. 프라이어리티 클래스를 찾을 수 없으면, 파드가 거부된다. +채운다. 프라이어리티클래스를 찾을 수 없으면, 파드가 거부된다. 다음의 YAML은 이전 예제에서 생성된 프라이어리티클래스를 사용하는 파드 구성의 예이다. 우선순위 어드미션 컨트롤러는 @@ -351,12 +351,12 @@ spec: 축출 대상으로 고려한다. QoS와 파드 우선순위를 모두 고려하는 유일한 컴포넌트는 -[kubelet 리소스 부족 축출](/docs/tasks/administer-cluster/out-of-resource/)이다. +[kubelet 리소스 부족 축출](/docs/concepts/scheduling-eviction/node-pressure-eviction/)이다. kubelet은 부족한 리소스의 사용이 요청을 초과하는지 여부에 따라, 그런 다음 우선순위에 따라, 파드의 스케줄링 요청에 대한 부족한 컴퓨팅 리소스의 소비에 의해 먼저 축출 대상 파드의 순위를 매긴다. 더 자세한 내용은 -[엔드유저 파드 축출](/docs/tasks/administer-cluster/out-of-resource/#evicting-end-user-pods)을 +[엔드유저 파드 축출](/docs/concepts/scheduling-eviction/node-pressure-eviction/#evicting-end-user-pods)을 참조한다. kubelet 리소스 부족 축출은 사용량이 요청을 초과하지 않는 경우 @@ -367,4 +367,4 @@ kubelet 리소스 부족 축출은 사용량이 요청을 초과하지 않는 ## {{% heading "whatsnext" %}} -* 프라이어리티클래스와 관련하여 리소스쿼터 사용에 대해 [기본적으로 프라이어리티 클래스 소비 제한](/ko/docs/concepts/policy/resource-quotas/#기본적으로-우선-순위-클래스-소비-제한)을 읽어보자. +* 프라이어리티클래스와 관련하여 리소스쿼터 사용에 대해 [기본적으로 프라이어리티클래스 소비 제한](/ko/docs/concepts/policy/resource-quotas/#기본적으로-우선-순위-클래스-소비-제한)을 읽어보자. diff --git a/content/ko/docs/concepts/scheduling-eviction/resource-bin-packing.md b/content/ko/docs/concepts/scheduling-eviction/resource-bin-packing.md index 34ff6f3108..1ac3b81262 100644 --- a/content/ko/docs/concepts/scheduling-eviction/resource-bin-packing.md +++ b/content/ko/docs/concepts/scheduling-eviction/resource-bin-packing.md @@ -26,7 +26,7 @@ kube-scheduler를 미세 조정할 수 있다. 통해 사용자는 적절한 파라미터를 사용해서 확장된 리소스를 빈 팩으로 만들 수 있어 대규모의 클러스터에서 부족한 리소스의 활용도가 향상된다. `RequestedToCapacityRatioResourceAllocation` 우선 순위 기능의 -동작은 `requestedToCapacityRatioArguments`라는 +동작은 `RequestedToCapacityRatioArgs`라는 구성 옵션으로 제어할 수 있다. 이 인수는 `shape`와 `resources` 두 개의 파라미터로 구성된다. `shape` 파라미터는 사용자가 `utilization`과 `score` 값을 기반으로 최소 요청 또는 최대 요청된 대로 기능을 @@ -39,27 +39,29 @@ kube-scheduler를 미세 조정할 수 있다. 설정하는 구성의 예시이다. ```yaml -apiVersion: v1 -kind: Policy +apiVersion: kubescheduler.config.k8s.io/v1beta1 +kind: KubeSchedulerConfiguration +profiles: # ... -priorities: - # ... - - name: RequestedToCapacityRatioPriority - weight: 2 - argument: - requestedToCapacityRatioArguments: - shape: - - utilization: 0 - score: 0 - - utilization: 100 - score: 10 - resources: - - name: intel.com/foo - weight: 3 - - name: intel.com/bar - weight: 5 + pluginConfig: + - name: RequestedToCapacityRatio + args: + shape: + - utilization: 0 + score: 10 + - utilization: 100 + score: 0 + resources: + - name: intel.com/foo + weight: 3 + - name: intel.com/bar + weight: 5 ``` +kube-scheduler 플래그 `--config=/path/to/config/file` 을 사용하여 +`KubeSchedulerConfiguration` 파일을 참조하면 구성이 스케줄러에 +전달된다. + **이 기능은 기본적으로 비활성화되어 있다.** ### 우선 순위 기능 튜닝하기 diff --git a/content/ko/docs/concepts/scheduling-eviction/taint-and-toleration.md b/content/ko/docs/concepts/scheduling-eviction/taint-and-toleration.md index 588adee0f7..c47f5f995b 100644 --- a/content/ko/docs/concepts/scheduling-eviction/taint-and-toleration.md +++ b/content/ko/docs/concepts/scheduling-eviction/taint-and-toleration.md @@ -1,4 +1,8 @@ --- + + + + title: 테인트(Taints)와 톨러레이션(Tolerations) content_type: concept weight: 40 @@ -260,13 +264,27 @@ tolerations: 이렇게 하면 이러한 문제로 인해 데몬셋 파드가 축출되지 않는다. -## 컨디션별 노드 테인트하기 +## 컨디션을 기준으로 노드 테인트하기 -노드 라이프사이클 컨트롤러는 `NoSchedule` 이펙트가 있는 노드 컨디션에 해당하는 -테인트를 자동으로 생성한다. -마찬가지로 스케줄러는 노드 컨디션을 확인하지 않는다. 대신 스케줄러는 테인트를 확인한다. 이렇게 하면 노드 컨디션이 노드에 스케줄된 내용에 영향을 미치지 않는다. 사용자는 적절한 파드 톨러레이션을 추가하여 노드의 일부 문제(노드 컨디션으로 표시)를 무시하도록 선택할 수 있다. +컨트롤 플레인은 노드 {{}}를 이용하여 +[노드 조건](/docs/concepts/scheduling-eviction/node-pressure-eviction/)에 대한 `NoSchedule` 효과를 사용하여 자동으로 테인트를 생성한다. -쿠버네티스 1.8 버전부터 데몬셋 컨트롤러는 다음의 `NoSchedule` 톨러레이션을 +스케줄러는 스케줄링 결정을 내릴 때 노드 조건을 확인하는 것이 아니라 테인트를 확인한다. +이렇게 하면 노드 조건이 스케줄링에 직접적인 영향을 주지 않는다. +예를 들어 `DiskPressure` 노드 조건이 활성화된 경우 +컨트롤 플레인은 `node.kubernetes.io/disk-pressure` 테인트를 추가하고 영향을 받는 노드에 새 파드를 할당하지 않는다. +`MemoryPressure` 노드 조건이 활성화되면 +컨트롤 플레인이 `node.kubernetes.io/memory-pressure` 테인트를 추가한다. + +새로 생성된 파드에 파드 톨러레이션을 추가하여 노드 조건을 무시하도록 할 수 있다. +또한 컨트롤 플레인은 `BestEffort` 이외의 +{{< glossary_tooltip text="QoS 클래스" term_id="qos-class" >}}를 가지는 파드에 +`node.kubernetes.io/memory-pressure` 톨러레이션을 추가한다. +이는 쿠버네티스가 `Guaranteed` 또는 `Burstable` QoS 클래스를 갖는 파드(메모리 요청이 설정되지 않은 파드 포함)를 +마치 그 파드들이 메모리 압박에 대처 가능한 것처럼 다루는 반면, +새로운 `BestEffort` 파드는 영향을 받는 노드에 할당하지 않기 때문이다. + +데몬셋 컨트롤러는 다음의 `NoSchedule` 톨러레이션을 모든 데몬에 자동으로 추가하여, 데몬셋이 중단되는 것을 방지한다. * `node.kubernetes.io/memory-pressure` @@ -278,8 +296,7 @@ tolerations: 이러한 톨러레이션을 추가하면 이전 버전과의 호환성이 보장된다. 데몬셋에 임의의 톨러레이션을 추가할 수도 있다. - ## {{% heading "whatsnext" %}} -* [리소스 부족 다루기](/docs/tasks/administer-cluster/out-of-resource/)와 어떻게 구성하는지에 대해 알아보기 -* [파드 우선순위](/ko/docs/concepts/configuration/pod-priority-preemption/)에 대해 알아보기 +* [리소스 부족 다루기](/docs/concepts/scheduling-eviction/node-pressure-eviction/)와 어떻게 구성하는지에 대해 알아보기 +* [파드 우선순위](/ko/docs/concepts/scheduling-eviction/pod-priority-preemption/)에 대해 알아보기 diff --git a/content/ko/docs/concepts/security/overview.md b/content/ko/docs/concepts/security/overview.md index 9cd48a172c..64ed2675b2 100644 --- a/content/ko/docs/concepts/security/overview.md +++ b/content/ko/docs/concepts/security/overview.md @@ -149,7 +149,7 @@ TLS를 통한 접근 | 코드가 TCP를 통해 통신해야 한다면, 미리 * [파드에 대한 네트워크 정책](/ko/docs/concepts/services-networking/network-policies/) * [쿠버네티스 API 접근 제어하기](/ko/docs/concepts/security/controlling-access) * [클러스터 보안](/docs/tasks/administer-cluster/securing-a-cluster/) -* 컨트롤 플레인을 위한 [전송 데이터 암호화](/docs/tasks/tls/managing-tls-in-a-cluster/) +* 컨트롤 플레인을 위한 [전송 데이터 암호화](/ko/docs/tasks/tls/managing-tls-in-a-cluster/) * [Rest에서 데이터 암호화](/docs/tasks/administer-cluster/encrypt-data/) * [쿠버네티스 시크릿](/ko/docs/concepts/configuration/secret/) * [런타임 클래스](/ko/docs/concepts/containers/runtime-class) diff --git a/content/ko/docs/concepts/services-networking/dns-pod-service.md b/content/ko/docs/concepts/services-networking/dns-pod-service.md index e3254d3ba8..b405617118 100644 --- a/content/ko/docs/concepts/services-networking/dns-pod-service.md +++ b/content/ko/docs/concepts/services-networking/dns-pod-service.md @@ -7,6 +7,7 @@ content_type: concept weight: 20 --- + 쿠버네티스는 파드와 서비스를 위한 DNS 레코드를 생성한다. 사용자는 IP 주소 대신에 일관된 DNS 네임을 통해서 서비스에 접속할 수 있다. @@ -49,7 +50,7 @@ options ndots:5 ``` 요약하면, _test_ 네임스페이스에 있는 파드는 `data.prod` 또는 -`data.prod.cluster.local` 중 하나를 통해 성공적으로 해석될 수 있다. +`data.prod.svc.cluster.local` 중 하나를 통해 성공적으로 해석될 수 있다. ### DNS 레코드 @@ -261,6 +262,8 @@ spec: ### 파드의 DNS 설정 {#pod-dns-config} +{{< feature-state for_k8s_version="v1.14" state="stable" >}} + 사용자들은 파드의 DNS 설정을 통해서 직접 파드의 DNS를 세팅할 수 있다. `dnsConfig` 필드는 선택적이고, `dnsPolicy` 세팅과 함께 동작한다. @@ -310,18 +313,6 @@ search default.svc.cluster-domain.example svc.cluster-domain.example cluster-dom options ndots:5 ``` -### 기능 지원 여부 - -파드 DNS 구성 및 DNS 정책 "`None`"에 대한 지원 정보는 아래에서 확인 할 수 있다. - -| k8s 버전 | 기능 지원 | -| :---------: |:-----------:| -| 1.14 | 안정 | -| 1.10 | 베타 (기본)| -| 1.9 | 알파 | - - - ## {{% heading "whatsnext" %}} diff --git a/content/ko/docs/concepts/services-networking/endpoint-slices.md b/content/ko/docs/concepts/services-networking/endpoint-slices.md index 4e12cf9ff2..4ea1281faa 100644 --- a/content/ko/docs/concepts/services-networking/endpoint-slices.md +++ b/content/ko/docs/concepts/services-networking/endpoint-slices.md @@ -154,7 +154,7 @@ v1beta1 API의 `topology` 필드에 있는 `"topology.kubernetes.io/zone"` ### 관리 -대부분의 경우, 컨트롤 플레인(특히, 엔드포인트 슬라이스 +대부분의 경우, 컨트롤 플레인(특히, 엔드포인트슬라이스 {{< glossary_tooltip text="컨트롤러" term_id="controller" >}})는 엔드포인트슬라이스 오브젝트를 생성하고 관리한다. 다른 엔티티나 컨트롤러가 추가 엔드포인트슬라이스 집합을 관리하게 할 수 있는 서비스 메시 구현과 같이 @@ -165,13 +165,13 @@ v1beta1 API의 `topology` 필드에 있는 `"topology.kubernetes.io/zone"` 엔티티를 나타내는 `endpointslice.kubernetes.io/managed-by` {{< glossary_tooltip term_id="label" text="레이블" >}}을 정의한다. -엔드포인트 슬라이스 컨트롤러는 관리하는 모든 엔드포인트슬라이스에 레이블의 값으로 +엔드포인트슬라이스 컨트롤러는 관리하는 모든 엔드포인트슬라이스에 레이블의 값으로 `endpointslice-controller.k8s.io` 를 설정한다. 엔드포인트슬라이스를 관리하는 다른 엔티티도 이 레이블에 고유한 값을 설정해야 한다. ### 소유권 -대부분의 유스케이스에서, 엔드포인트 슬라이스 오브젝트가 엔드포인트를 +대부분의 유스케이스에서, 엔드포인트슬라이스 오브젝트가 엔드포인트를 추적하는 서비스가 엔드포인트슬라이스를 소유한다. 이 소유권은 각 엔드포인트슬라이스의 소유자 참조와 서비스에 속한 모든 엔드포인트슬라이스의 간단한 조회를 가능하게 하는 `kubernetes.io/service-name` 레이블로 표시된다. @@ -247,5 +247,4 @@ v1beta1 API의 `topology` 필드에 있는 `"topology.kubernetes.io/zone"` ## {{% heading "whatsnext" %}} -* [엔드포인트슬라이스 활성화하기](/docs/tasks/administer-cluster/enabling-endpointslices)에 대해 배우기 -* [애플리케이션을 서비스와 함께 연결하기](/ko/docs/concepts/services-networking/connect-applications-service/)를 읽어보기 +* [서비스와 애플리케이션 연결하기](/ko/docs/concepts/services-networking/connect-applications-service/)를 읽어보기 diff --git a/content/ko/docs/concepts/services-networking/network-policies.md b/content/ko/docs/concepts/services-networking/network-policies.md index c68d6f2862..5a9b16a309 100644 --- a/content/ko/docs/concepts/services-networking/network-policies.md +++ b/content/ko/docs/concepts/services-networking/network-policies.md @@ -212,9 +212,10 @@ __ipBlock__: 인그레스 소스 또는 이그레스 대상으로 허용할 IP C ## SCTP 지원 -{{< feature-state for_k8s_version="v1.19" state="beta" >}} +{{< feature-state for_k8s_version="v1.20" state="stable" >}} -베타 기능으로, 기본 활성화되어 있다. 클러스터 수준에서 SCTP를 비활성화하려면, 사용자(또는 클러스터 관리자)가 API 서버에 `--feature-gates=SCTPSupport=false,…` 를 사용해서 `SCTPSupport` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 비활성화해야 한다. +안정된 기능으로, 기본 활성화되어 있다. 클러스터 수준에서 SCTP를 비활성화하려면, 사용자(또는 클러스터 관리자)가 API 서버에 `--feature-gates=SCTPSupport=false,…` 를 사용해서 `SCTPSupport` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 비활성화해야 한다. +해당 기능 게이트가 활성화되어 있는 경우, 네트워크폴리시의 `protocol` 필드를 `SCTP`로 지정할 수 있다. {{< note >}} SCTP 프로토콜 네트워크폴리시를 지원하는 {{< glossary_tooltip text="CNI" term_id="cni" >}} 플러그인을 사용하고 있어야 한다. diff --git a/content/ko/docs/concepts/services-networking/service-traffic-policy.md b/content/ko/docs/concepts/services-networking/service-traffic-policy.md new file mode 100644 index 0000000000..c4f87e2b3e --- /dev/null +++ b/content/ko/docs/concepts/services-networking/service-traffic-policy.md @@ -0,0 +1,73 @@ +--- + + +title: 서비스 내부 트래픽 정책 +content_type: concept +weight: 45 +--- + + + + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +_서비스 내부 트래픽 정책_ 을 사용하면 내부 트래픽 제한이 트래픽이 시작된 +노드 내의 엔드포인트로만 내부 트래픽을 라우팅하도록 한다. +여기서 "내부" 트래픽은 현재 클러스터의 파드로부터 시작된 트래픽을 지칭한다. +이를 통해 비용을 절감하고 성능을 개선할 수 있다. + + + +## 서비스 내부 트래픽 정책 사용 + + +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)에서 +`ServiceInternalTrafficPolicy`를 활성화한 후에 +{{< glossary_tooltip text="서비스" term_id="service" >}}의 +`.spec.internalTrafficPolicy`를 `Local`로 설정하여 내부 전용 트래픽 정책을 활성화 할 수 있다. +이것은 kube-proxy가 클러스터 내부 트래픽을 위해 노드 내부 엔드포인트로만 사용하도록 한다. + +{{< note >}} +지정된 서비스에 대한 엔드포인트가 없는 노드의 파드인 경우에 +서비스는 다른 노드에 엔드포인트가 있더라도 엔드포인트가 없는 것처럼 작동한다. +(이 노드의 파드에 대해서) +{{< /note >}} + +다음 예제는 서비스의 `.spec.internalTrafficPolicy`를 `Local`로 +설정하는 것을 보여 준다: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 + internalTrafficPolicy: Local +``` + +## 작동 방식 + +kube-proxy는 `spec.internalTrafficPolicy` 의 설정에 따라서 라우팅되는 +엔드포인트를 필터링한다. +이것을 `Local`로 설정하면, 노드 내부 엔드포인트만 고려한다. +이 설정이 `Cluster`이거나 누락되었다면 모든 엔드포인트를 고려한다. +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)의 +`ServiceInternalTrafficPolicy`를 활성화한다면, `spec.internalTrafficPolicy`는 기본값 "Cluster"로 설정된다. + +## 제약조건 + +* 같은 서비스에서 `externalTrafficPolicy` 가 `Local`로 설정된 경우 +서비스 내부 트래픽 정책이 사용되지 않는다. +클러스터에서 동일하지 않은 다른 서비스에서 이 두 가지 기능은 동시에 사용할 수 있다. + +## {{% heading "whatsnext" %}} + +* [토폴로지 인식 힌트 활성화](/docs/tasks/administer-cluster/enabling-topology-aware-hints)에 대해서 읽기 +* [서비스 외부 트래픽 정책](/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip)에 대해서 읽기 +* [서비스와 애플리케이션 연결하기](/ko/docs/concepts/services-networking/connect-applications-service/) 읽기 diff --git a/content/ko/docs/concepts/services-networking/service.md b/content/ko/docs/concepts/services-networking/service.md index 7bbb4f6f63..5c4b9edeee 100644 --- a/content/ko/docs/concepts/services-networking/service.md +++ b/content/ko/docs/concepts/services-networking/service.md @@ -215,7 +215,7 @@ API 리소스이다. 개념적으로 엔드포인트와 매우 유사하지만, 오브젝트에 의해 미러링된다. 이 필드는 표준 쿠버네티스 레이블 구문을 따른다. 값은 -[IANA 표준 서비스 이름](http://www.iana.org/assignments/service-names) 또는 +[IANA 표준 서비스 이름](https://www.iana.org/assignments/service-names) 또는 `mycompany.com/my-custom-protocol`과 같은 도메인 접두사 이름 중 하나여야 한다. ## 가상 IP와 서비스 프록시 diff --git a/content/ko/docs/concepts/storage/storage-classes.md b/content/ko/docs/concepts/storage/storage-classes.md index 0bec67ef8a..f4385419f1 100644 --- a/content/ko/docs/concepts/storage/storage-classes.md +++ b/content/ko/docs/concepts/storage/storage-classes.md @@ -1,4 +1,9 @@ --- + + + + + title: 스토리지 클래스 content_type: concept weight: 30 @@ -184,7 +189,7 @@ CSI | 1.14 (alpha), 1.16 (beta) CSI 드라이버에 대한 문서를 본다. {{< note >}} - `waitForFirstConsumer`를 사용한다면, 노드 어피니티를 지정하기 위해서 파드 스펙에 `nodeName`을 사용하지는 않아야 한다. + `WaitForFirstConsumer`를 사용한다면, 노드 어피니티를 지정하기 위해서 파드 스펙에 `nodeName`을 사용하지는 않아야 한다. 만약 `nodeName`을 사용한다면, 스케줄러가 바이패스되고 PVC가 `pending` 상태로 있을 것이다. 대신, 아래와 같이 호스트네임을 이용하는 노드셀렉터를 사용할 수 있다. @@ -653,11 +658,11 @@ metadata: provisioner: kubernetes.io/azure-disk parameters: storageaccounttype: Standard_LRS - kind: Shared + kind: managed ``` * `storageaccounttype`: Azure 스토리지 계정 Sku 계층. 기본값은 없음. -* `kind`: 가능한 값은 `shared` (기본값), `dedicated`, 그리고 `managed` 이다. +* `kind`: 가능한 값은 `shared`, `dedicated`, 그리고 `managed` (기본값) 이다. `kind` 가 `shared` 인 경우, 모든 비관리 디스크는 클러스터와 동일한 리소스 그룹에 있는 몇 개의 공유 스토리지 계정에 생성된다. `kind` 가 `dedicated` 인 경우, 클러스터와 동일한 리소스 그룹에서 새로운 diff --git a/content/ko/docs/concepts/storage/volumes.md b/content/ko/docs/concepts/storage/volumes.md index 6156a4704b..29f4755172 100644 --- a/content/ko/docs/concepts/storage/volumes.md +++ b/content/ko/docs/concepts/storage/volumes.md @@ -1,4 +1,9 @@ --- + + + + + title: 볼륨 content_type: concept weight: 10 @@ -13,7 +18,6 @@ weight: 10 파일을 공유할 때 발생한다. 쿠버네티스 {{< glossary_tooltip text="볼륨" term_id="volume" >}} 추상화는 이러한 문제를 모두 해결한다. - [파드](/ko/docs/concepts/workloads/pods/)에 대해 익숙해지는 것을 추천한다. @@ -40,7 +44,6 @@ weight: 10 볼륨을 사용하려면, `.spec.volumes` 에서 파드에 제공할 볼륨을 지정하고 `.spec.containers[*].volumeMounts` 의 컨테이너에 해당 볼륨을 마운트할 위치를 선언한다. - 컨테이너의 프로세스는 도커 이미지와 볼륨으로 구성된 파일시스템 뷰를 본다. [도커 이미지](https://docs.docker.com/userguide/dockerimages/)는 파일시스템 계층의 루트에 있다. 볼륨은 이미지 내에 지정된 경로에 @@ -117,6 +120,7 @@ EBS 볼륨이 파티션된 경우, 선택적 필드인 `partition: "}} 컨트롤러 관리자와 kubelet에 의해 로드되지 않도록 `awsElasticBlockStore` 스토리지 @@ -257,6 +261,9 @@ spec: `path` 에서 파생된다. {{< note >}} +* [컨피그맵](/docs/tasks/configure-pod-container/configure-pod-configmap/)을 사용하기 위해서는 + 먼저 컨피그맵을 생성해야 한다. + * 컨피그맵을 [`subPath`](#subpath-사용하기) 볼륨 마운트로 사용하는 컨테이너는 컨피그맵 업데이트를 수신하지 않는다. @@ -522,6 +529,15 @@ glusterfs 볼륨에 데이터를 미리 채울 수 있으며, 파드 간에 데 ### hostPath {#hostpath} +{{< warning >}} +HostPath 볼륨에는 많은 보안 위험이 있으며, 가능하면 HostPath를 사용하지 않는 +것이 좋다. HostPath 볼륨을 사용해야 하는 경우, 필요한 파일 또는 디렉터리로만 +범위를 지정하고 ReadOnly로 마운트해야 한다. + +AdmissionPolicy를 사용하여 특정 디렉터리로의 HostPath 액세스를 제한하는 경우, +`readOnly` 마운트를 사용하는 정책이 유효하려면 `volumeMounts` 가 반드시 지정되어야 한다. +{{< /warning >}} + `hostPath` 볼륨은 호스트 노드의 파일시스템에 있는 파일이나 디렉터리를 파드에 마운트 한다. 이것은 대부분의 파드들이 필요한 것은 아니지만, 일부 애플리케이션에 강력한 탈출구를 제공한다. @@ -538,13 +554,12 @@ glusterfs 볼륨에 데이터를 미리 채울 수 있으며, 파드 간에 데 필드가 `type` 에 지원되는 값은 다음과 같다. - | 값 | 행동 | |:------|:---------| | | 빈 문자열 (기본값)은 이전 버전과의 호환성을 위한 것으로, hostPath 볼륨은 마운트 하기 전에 아무런 검사도 수행되지 않는다. | | `DirectoryOrCreate` | 만약 주어진 경로에 아무것도 없다면, 필요에 따라 Kubelet이 가지고 있는 동일한 그룹과 소유권, 권한을 0755로 설정한 빈 디렉터리를 생성한다. | | `Directory` | 주어진 경로에 디렉터리가 있어야 함 | -| `FileOrCreate` | 만약 주어진 경로에 아무것도 없다면, 필요에 따라 Kubelet이 가지고 있는 동일한 그룹과 소유권, 권한을 0644로 설정한 빈 디렉터리를 생성한다. | +| `FileOrCreate` | 만약 주어진 경로에 아무것도 없다면, 필요에 따라 Kubelet이 가지고 있는 동일한 그룹과 소유권, 권한을 0644로 설정한 빈 파일을 생성한다. | | `File` | 주어진 경로에 파일이 있어야 함 | | `Socket` | 주어진 경로에 UNIX 소캣이 있어야 함 | | `CharDevice` | 주어진 경로에 문자 디바이스가 있어야 함 | @@ -552,6 +567,9 @@ glusterfs 볼륨에 데이터를 미리 채울 수 있으며, 파드 간에 데 다음과 같은 이유로 이 유형의 볼륨 사용시 주의해야 한다. +* HostPath는 권한있는 시스템 자격 증명 (예 : Kubelet 용) 또는 권한있는 API + (예 : 컨테이너 런타임 소켓)를 노출 할 수 있으며, 이는 컨테이너 이스케이프 또는 + 클러스터의 다른 부분을 공격하는 데 사용될 수 있다. * 동일한 구성(파드템플릿으로 생성한 것과 같은)을 가진 파드는 노드에 있는 파일이 다르기 때문에 노드마다 다르게 동작할 수 있다. * 기본 호스트에 생성된 파일 또는 디렉터리는 root만 쓸 수 있다. @@ -909,12 +927,13 @@ API 서버에 대해 `--service-account-max-token-expiration` 옵션을 지정 상대 경로를 지정한다. {{< note >}} -projected 볼륨 소스를 [`subPath`](#subpath-사용하기) 볼륨으로 마운트해서 사용하는 컨테이너는 해당 볼륨 소스의 업데이트를 수신하지 않는다. +projected 볼륨 소스를 [`subPath`](#subpath-사용하기) 볼륨으로 마운트해서 사용하는 컨테이너는 +해당 볼륨 소스의 업데이트를 수신하지 않는다. {{< /note >}} ### quobyte -`quobyte` 볼륨을 사용하면 기존 [Quobyte](http://www.quobyte.com) 볼륨을 +`quobyte` 볼륨을 사용하면 기존 [Quobyte](https://www.quobyte.com) 볼륨을 파드에 마운트할 수 있다. {{< note >}} @@ -1103,7 +1122,6 @@ vmware-vdiskmanager -c -t 0 -s 40GB -a lsilogic myDisk.vmdk {{< /tabs >}} - #### vSphere VMDK 구성 예시 {#vsphere-vmdk-configuration} ```yaml @@ -1133,8 +1151,7 @@ spec: {{< feature-state for_k8s_version="v1.19" state="beta" >}} `vsphereVolume` 용 `CSIMigration` 기능이 활성화되면, 기존 인-트리 플러그인에서 -`csi.vsphere.vmware.com` {{< glossary_tooltip text="CSI" term_id="csi" >}} 드라이버로 모든 플러그인 작업을 리디렉션한다. -이 기능을 사용하려면, +`csi.vsphere.vmware.com` {{< glossary_tooltip text="CSI" term_id="csi" >}} 드라이버로 모든 플러그인 작업을 리디렉션한다. 이 기능을 사용하려면, [vSphere CSI 드라이버](https://github.com/kubernetes-sigs/vsphere-csi-driver)가 클러스터에 설치되어야 하며 `CSIMigration` 및 `CSIMigrationvSphere` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)가 활성화되어 있어야 한다. diff --git a/content/ko/docs/concepts/workloads/controllers/daemonset.md b/content/ko/docs/concepts/workloads/controllers/daemonset.md index d7d583d142..1496b25ec3 100644 --- a/content/ko/docs/concepts/workloads/controllers/daemonset.md +++ b/content/ko/docs/concepts/workloads/controllers/daemonset.md @@ -1,4 +1,10 @@ --- + + + + + + title: 데몬셋 content_type: concept weight: 40 @@ -26,7 +32,8 @@ _데몬셋_ 은 모든(또는 일부) 노드가 파드의 사본을 실행하도 ### 데몬셋 생성 -YAML 파일로 데몬셋을 설명 할 수 있다. 예를 들어 아래 `daemonset.yaml` 파일은 fluentd-elasticsearch 도커 이미지를 실행하는 데몬셋을 설명한다. +YAML 파일에 데몬셋 명세를 작성할 수 있다. 예를 들어 아래 `daemonset.yaml` 파일은 +fluentd-elasticsearch 도커 이미지를 실행하는 데몬셋을 설명한다. {{< codenew file="controllers/daemonset.yaml" >}} @@ -40,19 +47,23 @@ kubectl apply -f https://k8s.io/examples/controllers/daemonset.yaml 다른 모든 쿠버네티스 설정과 마찬가지로 데몬셋에는 `apiVersion`, `kind` 그리고 `metadata` 필드가 필요하다. 일반적인 설정파일 작업에 대한 정보는 -[스테이트리스 애플리케이션 실행하기](/docs/tasks/run-application/run-stateless-application-deployment/), -[컨테이너 구성하기](/ko/docs/tasks/) 그리고 [kubectl을 사용한 오브젝트 관리](/ko/docs/concepts/overview/working-with-objects/object-management/) 문서를 참고한다. +[스테이트리스 애플리케이션 실행하기](/docs/tasks/run-application/run-stateless-application-deployment/)와 + [kubectl을 사용한 오브젝트 관리](/ko/docs/concepts/overview/working-with-objects/object-management/)를 참고한다. 데몬셋 오브젝트의 이름은 유효한 [DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)이어야 한다. -데몬셋에는 [`.spec`](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status) 섹션도 필요하다. +데몬셋에는 +[`.spec`](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status) +섹션도 필요하다. ### 파드 템플릿 `.spec.template` 는 `.spec` 의 필수 필드 중 하나이다. -`.spec.template` 는 [파드 템플릿](/ko/docs/concepts/workloads/pods/#파드-템플릿)이다. 이것은 중첩되어 있다는 점과 `apiVersion` 또는 `kind` 를 가지지 않는 것을 제외하면 {{< glossary_tooltip text="파드" term_id="pod" >}}와 정확히 같은 스키마를 가진다. +`.spec.template` 는 [파드 템플릿](/ko/docs/concepts/workloads/pods/#파드-템플릿)이다. +이것은 중첩되어 있다는 점과 `apiVersion` 또는 `kind` 를 가지지 않는 것을 제외하면 +{{< glossary_tooltip text="파드" term_id="pod" >}}와 정확히 같은 스키마를 가진다. 데몬셋의 파드 템플릿에는 파드의 필수 필드 외에도 적절한 레이블이 명시되어야 한다([파드 셀렉터](#파드-셀렉터)를 본다). @@ -73,19 +84,22 @@ kubectl apply -f https://k8s.io/examples/controllers/daemonset.yaml `.spec.selector` 는 다음 2개의 필드로 구성된 오브젝트이다. -* `matchLabels` - [레플리케이션 컨트롤러](/ko/docs/concepts/workloads/controllers/replicationcontroller/)의 `.spec.selector` 와 동일하게 작동한다. +* `matchLabels` - [레플리케이션 컨트롤러](/ko/docs/concepts/workloads/controllers/replicationcontroller/)의 +`.spec.selector` 와 동일하게 작동한다. * `matchExpressions` - 키, 값 목록 그리고 키 및 값에 관련된 연산자를 명시해서 보다 정교한 셀렉터를 만들 수 있다. 2개의 필드가 명시되면 두 필드를 모두 만족하는 것(ANDed)이 결과가 된다. -만약 `.spec.selector` 를 명시하면, 이것은 `.spec.template.metadata.labels` 와 일치해야 한다. 일치하지 않는 구성은 API에 의해 거부된다. +만약 `.spec.selector` 를 명시하면, 이것은 `.spec.template.metadata.labels` 와 일치해야 한다. +일치하지 않는 구성은 API에 의해 거부된다. ### 오직 일부 노드에서만 파드 실행 만약 `.spec.template.spec.nodeSelector` 를 명시하면 데몬셋 컨트롤러는 [노드 셀렉터](/ko/docs/concepts/scheduling-eviction/assign-pod-node/#노드-셀렉터-nodeselector)와 -일치하는 노드에 파드를 생성한다. 마찬가지로 `.spec.template.spec.affinity` 를 명시하면 +일치하는 노드에 파드를 생성한다. +마찬가지로 `.spec.template.spec.affinity` 를 명시하면 데몬셋 컨트롤러는 [노드 어피니티](/ko/docs/concepts/scheduling-eviction/assign-pod-node/#노드-어피니티)와 일치하는 노드에 파드를 생성한다. 만약 둘 중 하나를 명시하지 않으면 데몬셋 컨트롤러는 모든 노드에서 파드를 생성한다. @@ -100,18 +114,19 @@ kubectl apply -f https://k8s.io/examples/controllers/daemonset.yaml 데몬셋 파드는 데몬셋 컨트롤러에 의해 생성되고 스케줄된다. 이에 대한 이슈를 소개한다. - * 파드 동작의 불일치: 스케줄 되기 위해서 대기 중인 일반 파드는 `Pending` 상태로 생성된다. - 그러나 데몬셋 파드는 `Pending` 상태로 생성되지 않는다. - 이것은 사용자에게 혼란을 준다. - * [파드 선점](/ko/docs/concepts/configuration/pod-priority-preemption/)은 - 기본 스케줄러에서 처리한다. 선점이 활성화되면 데몬셋 컨트롤러는 - 파드 우선순위와 선점을 고려하지 않고 스케줄 한다. +* 파드 동작의 불일치: 스케줄 되기 위해서 대기 중인 일반 파드는 `Pending` 상태로 생성된다. + 그러나 데몬셋 파드는 `Pending` 상태로 생성되지 않는다. + 이것은 사용자에게 혼란을 준다. +* [파드 선점](/ko/docs/concepts/scheduling-eviction/pod-priority-preemption/)은 + 기본 스케줄러에서 처리한다. 선점이 활성화되면 데몬셋 컨트롤러는 + 파드 우선순위와 선점을 고려하지 않고 스케줄 한다. `ScheduleDaemonSetPods` 로 데몬셋 파드에 `.spec.nodeName` 용어 대신 `NodeAffinity` 용어를 추가해서 데몬셋 컨트롤러 대신 기본 스케줄러를 사용해서 데몬셋을 스케줄할 수 있다. 이후에 기본 스케줄러를 사용해서 대상 호스트에 파드를 바인딩한다. 만약 데몬셋 파드에 -이미 노드 선호도가 존재한다면 교체한다(대상 호스트를 선택하기 전에 원래 노드의 어피니티가 고려된다). 데몬셋 컨트롤러는 +이미 노드 선호도가 존재한다면 교체한다(대상 호스트를 선택하기 전에 +원래 노드의 어피니티가 고려된다). 데몬셋 컨트롤러는 데몬셋 파드를 만들거나 수정할 때만 이런 작업을 수행하며, 데몬셋의 `spec.template` 은 변경되지 않는다. @@ -152,10 +167,12 @@ nodeAffinity: - **푸시(Push)**: 데몬셋의 파드는 통계 데이터베이스와 같은 다른 서비스로 업데이트를 보내도록 구성되어있다. 그들은 클라이언트들을 가지지 않는다. -- **노드IP와 알려진 포트**: 데몬셋의 파드는 `호스트 포트`를 사용할 수 있으며, 노드IP를 통해 파드에 접근할 수 있다. 클라이언트는 노드IP를 어떻게든지 알고 있으며, 관례에 따라 포트를 알고 있다. +- **노드IP와 알려진 포트**: 데몬셋의 파드는 `호스트 포트`를 사용할 수 있으며, + 노드IP를 통해 파드에 접근할 수 있다. + 클라이언트는 노드IP를 어떻게든지 알고 있으며, 관례에 따라 포트를 알고 있다. - **DNS**: 동일한 파드 셀렉터로 [헤드리스 서비스](/ko/docs/concepts/services-networking/service/#헤드리스-headless-서비스)를 만들고, - 그 다음에 `엔드포인트` 리소스를 사용해서 데몬셋을 찾거나 DNS에서 여러 A레코드를 - 검색한다. + 그 다음에 `엔드포인트` 리소스를 사용해서 데몬셋을 찾거나 + DNS에서 여러 A레코드를 검색한다. - **서비스**: 동일한 파드 셀렉터로 서비스를 생성하고, 서비스를 사용해서 임의의 노드의 데몬에 도달한다(특정 노드에 도달할 방법이 없다). diff --git a/content/ko/docs/concepts/workloads/controllers/job.md b/content/ko/docs/concepts/workloads/controllers/job.md index 6cdab2d6c5..c24beb0fca 100644 --- a/content/ko/docs/concepts/workloads/controllers/job.md +++ b/content/ko/docs/concepts/workloads/controllers/job.md @@ -304,7 +304,7 @@ spec: ### 완료된 잡을 위한 TTL 메커니즘 -{{< feature-state for_k8s_version="v1.12" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="beta" >}} 완료된 잡 (`Complete` 또는 `Failed`)을 자동으로 정리하는 또 다른 방법은 잡의 `.spec.ttlSecondsAfterFinished` 필드를 지정해서 완료된 리소스에 대해 @@ -342,11 +342,6 @@ spec: 삭제되도록 할 수 있다. 만약 필드를 설정하지 않으면, 이 잡이 완료된 후에 TTL 컨트롤러에 의해 정리되지 않는다. -이 TTL 메커니즘은 기능 게이트 `TTLAfterFinished`와 함께 알파 단계이다. 더 -자세한 정보는 완료된 리소스를 위한 -[TTL 컨트롤러](/ko/docs/concepts/workloads/controllers/ttlafterfinished/) -문서를 본다. - ## 잡 패턴 잡 오브젝트를 사용해서 신뢰할 수 있는 파드의 병렬 실행을 지원할 수 있다. 잡 오브젝트는 과학 diff --git a/content/ko/docs/concepts/workloads/controllers/statefulset.md b/content/ko/docs/concepts/workloads/controllers/statefulset.md index 3a1f784259..e231770cc5 100644 --- a/content/ko/docs/concepts/workloads/controllers/statefulset.md +++ b/content/ko/docs/concepts/workloads/controllers/statefulset.md @@ -213,9 +213,9 @@ web-0이 실패할 경우 web-1은 web-0이 Running 및 Ready 상태가 `OrderedReady` 파드 관리는 스테이트풀셋의 기본이다. 이것은 [위에서](#디플로이먼트와-스케일-보증) 설명한 행위를 구현한다. -#### 병렬 파드 관리 +#### Parallel 파드 관리 -`병렬` 파드 관리는 스테이트풀셋 컨트롤러에게 모든 파드를 +`Parallel` 파드 관리는 스테이트풀셋 컨트롤러에게 모든 파드를 병렬로 실행 또는 종료하게 한다. 그리고 다른 파드의 실행이나 종료에 앞서 파드가 Running 및 Ready 상태가 되거나 완전히 종료되기를 기다리지 않는다. 이 옵션은 오직 스케일링 작업에 대한 동작에만 영향을 미친다. 업데이트는 영향을 diff --git a/content/ko/docs/concepts/workloads/pods/_index.md b/content/ko/docs/concepts/workloads/pods/_index.md index 8645053bef..22b98b705c 100644 --- a/content/ko/docs/concepts/workloads/pods/_index.md +++ b/content/ko/docs/concepts/workloads/pods/_index.md @@ -32,10 +32,10 @@ _파드_ (고래 떼(pod of whales)나 콩꼬투리(pea pod)와 마찬가지로) ## 파드란 무엇인가? {{< note >}} -[도커](https://www.docker.com/)가 가장 일반적으로 -잘 알려진 런타임이지만, 쿠버네티스는 도커보다 -{{< glossary_tooltip text="컨테이너 런타임" term_id="container-runtime" >}}을 -더 많이 지원하며, 도커의 일부 용어를 사용하면 파드를 설명하는 데 도움이 된다. +[도커](https://www.docker.com/)가 가장 일반적으로 잘 알려진 +{{< glossary_tooltip text="컨테이너 런타임" term_id="container-runtime" >}}이지만, +쿠버네티스는 도커 외에도 다양한 컨테이너 런타임을 지원하며, +파드를 설명할 때 도커 관련 용어를 사용하면 더 쉽게 설명할 수 있다. {{< /note >}} 파드의 공유 콘텍스트는 리눅스 네임스페이스, 컨트롤 그룹(cgroup) 및 diff --git a/content/ko/docs/concepts/workloads/pods/disruptions.md b/content/ko/docs/concepts/workloads/pods/disruptions.md index 02730d4306..497d857d11 100644 --- a/content/ko/docs/concepts/workloads/pods/disruptions.md +++ b/content/ko/docs/concepts/workloads/pods/disruptions.md @@ -31,7 +31,7 @@ weight: 60 - 클라우드 공급자 또는 하이퍼바이저의 오류로 인한 VM 장애 - 커널 패닉 - 클러스터 네트워크 파티션의 발생으로 클러스터에서 노드가 사라짐 -- 노드의 [리소스 부족](/docs/tasks/administer-cluster/out-of-resource/)으로 파드가 축출됨 +- 노드의 [리소스 부족](/docs/concepts/scheduling-eviction/node-pressure-eviction/)으로 파드가 축출됨 리소스 부족을 제외한 나머지 조건은 대부분의 사용자가 익숙할 것이다. 왜냐하면 @@ -76,7 +76,7 @@ weight: 60 - 복제된 애플리케이션의 구동 시 훨씬 더 높은 가용성을 위해 랙 전체 ([안티-어피니티](/ko/docs/concepts/scheduling-eviction/assign-pod-node/#파드간-어피니티와-안티-어피니티) 이용) 또는 영역 간 - ([다중 영역 클러스터](/docs/setup/multiple-zones)를 이용한다면)에 + ([다중 영역 클러스터](/ko/docs/setup/best-practices/multiple-zones/)를 이용한다면)에 애플리케이션을 분산해야 한다. 자발적 중단의 빈도는 다양하다. 기본적인 쿠버네티스 클러스터에서는 자동화된 자발적 중단은 발생하지 않는다(사용자가 지시한 자발적 중단만 발생한다). @@ -86,9 +86,10 @@ weight: 60 단편화를 제거하고 노드의 효율을 높이는 과정에서 자발적 중단을 야기할 수 있다. 클러스터 관리자 또는 호스팅 공급자는 예측 가능한 자발적 중단 수준에 대해 문서화해야 한다. -파드 스펙 안에 [프라이어리티클래스 사용하기](/ko/docs/concepts/configuration/pod-priority-preemption/)와 같은 특정 환경설정 옵션 +파드 스펙 안에 [프라이어리티클래스 사용하기](/ko/docs/concepts/scheduling-eviction/pod-priority-preemption/)와 같은 특정 환경설정 옵션 또한 자발적(+ 비자발적) 중단을 유발할 수 있다. + ## 파드 disruption budgets {{< feature-state for_k8s_version="v1.21" state="stable" >}} diff --git a/content/ko/docs/concepts/workloads/pods/pod-lifecycle.md b/content/ko/docs/concepts/workloads/pods/pod-lifecycle.md index 71523e183a..010694409e 100644 --- a/content/ko/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/ko/docs/concepts/workloads/pods/pod-lifecycle.md @@ -39,7 +39,7 @@ ID([UID](/ko/docs/concepts/overview/working-with-objects/names/#uids))가 파드는 자체적으로 자가 치유되지 않는다. 파드가 {{< glossary_tooltip text="노드" term_id="node" >}}에 스케줄된 후에 해당 노드가 실패하면, 파드는 삭제된다. 마찬가지로, 파드는 -리소스 부족 또는 노드 유지 관리 작업으로 인해 축출되지 않는다. 쿠버네티스는 +리소스 부족 또는 노드 유지 관리 작업으로 인한 축출에서 살아남지 못한다. 쿠버네티스는 {{< glossary_tooltip term_id="controller" text="컨트롤러" >}}라 부르는 하이-레벨 추상화를 사용하여 상대적으로 일회용인 파드 인스턴스를 관리하는 작업을 처리한다. @@ -304,13 +304,23 @@ kubelet은 실행 중인 컨테이너들에 대해서 선택적으로 세 가지 보일 수도 있지만, 스팩에 준비성 프로브가 존재한다는 것은 파드가 트래픽을 받지 않는 상태에서 시작되고 프로브가 성공하기 시작한 이후에만 트래픽을 받는다는 뜻이다. -만약 컨테이너가 대량의 데이터, 설정 파일들, -또는 시동 중 마그레이션을 처리해야 한다면, 준비성 프로브를 지정하길 바란다. -만약 당신의 컨테이너가 유지 관리를 위해서 자체 중단되게 하려면, +만약 컨테이너가 유지 관리를 위해서 자체 중단되게 하려면, 준비성 프로브를 지정하길 바란다. 준비성 프로브는 활성 프로브와는 다르게 준비성에 특정된 엔드포인트를 확인한다. +만약 애플리케이션이 백엔드 서비스에 엄격한 의존성이 있다면, +활성 프로브와 준비성 프로브 모두 활용할 수도 있다. 활성 프로브는 애플리케이션 스스로가 건강한 상태면 +통과하지만, 준비성 프로브는 추가적으로 요구되는 각 백-엔드 서비스가 가용한지 확인한다. 이를 이용하여, +오류 메시지만 응답하는 파드로 +트래픽이 가는 것을 막을 수 있다. + +만약 컨테이너가 시동 시 대량 데이터의 로딩, 구성 파일, 또는 +마이그레이션에 대한 작업을 +수행해야 한다면, [스타트업 프로브](#언제-스타트업-프로브를-사용해야-하는가)를 사용하면 된다. 그러나, 만약 +failed 애플리케이션과 시동 중에 아직 데이터를 처리하고 있는 애플리케이션을 구분하여 탐지하고 +싶다면, 준비성 프로브를 사용하는 것이 더 적합할 것이다. + {{< note >}} 파드가 삭제될 때 요청들을 흘려 보내기(drain) 위해 준비성 프로브가 꼭 필요한 것은 아니다. 삭제 시에, 파드는 diff --git a/content/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints.md b/content/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints.md index 2601f5c871..304471bb28 100644 --- a/content/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints.md +++ b/content/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints.md @@ -15,7 +15,7 @@ obsolete --> {{< note >}} v1.18 이전 버전의 쿠버네티스에서는 파드 토폴로지 분배 제약조건을 사용하려면 [API 서버](/ko/docs/concepts/overview/components/#kube-apiserver)와 -[스케줄러](/docs/reference/generated/kube-scheduler/)에서 +[스케줄러](/docs/reference/command-line-tools-reference/kube-scheduler/)에서 `EvenPodsSpread`[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 활성화해야 한다 {{< /note >}} @@ -82,12 +82,11 @@ spec: 사용자는 하나 또는 다중 `topologySpreadConstraint` 를 정의해서 kube-scheduler 에게 클러스터에 걸쳐 있는 기존 파드와 시작하는 각각의 파드와 연관하여 배치하는 방법을 명령할 수 있다. 필드는 다음과 같다. - **maxSkew** 는 파드가 균등하지 않게 분산될 수 있는 정도를 나타낸다. - 이것은 주어진 토폴로지 유형의 임의의 두 토폴로지 도메인에 일치하는 - 파드의 수 사이에서 허용되는 차이의 최댓값이다. 이것은 0보다는 커야 - 한다. 그 의미는 `whenUnsatisfiable` 의 값에 따라 다르다. + 이것은 0보다는 커야 한다. 그 의미는 `whenUnsatisfiable` 의 값에 따라 다르다. - `whenUnsatisfiable` 이 "DoNotSchedule"과 같을 때, `maxSkew` 는 - 대상 토폴로지에서 일치하는 파드 수와 전역 최솟값 사이에 - 허용되는 최대 차이이다. + 대상 토폴로지에서 일치하는 파드 수와 전역 최솟값 + (토폴로지 도메인에서 레이블 셀렉터와 일치하는 최소 파드 수. 예를 들어 3개의 영역에 각각 0, 2, 3개의 일치하는 파드가 있으면, 전역 최솟값은 0) + 사이에 허용되는 최대 차이이다. - `whenUnsatisfiable` 이 "ScheduleAnyway"와 같으면, 스케줄러는 왜곡을 줄이는데 도움이 되는 토폴로지에 더 높은 우선 순위를 부여한다. - **topologyKey** 는 노드 레이블의 키다. 만약 두 노드가 이 키로 레이블이 지정되고, 레이블이 동일한 값을 가진다면 스케줄러는 두 노드를 같은 토폴로지에 있는것으로 여기게 된다. 스케줄러는 각 토폴로지 도메인에 균형잡힌 수의 파드를 배치하려고 시도한다. @@ -96,6 +95,8 @@ spec: - `ScheduleAnyway` 는 스케줄러에게 차이(skew)를 최소화하는 노드에 높은 우선 순위를 부여하면서, 스케줄링을 계속하도록 지시한다. - **labelSelector** 는 일치하는 파드를 찾는데 사용된다. 이 레이블 셀렉터와 일치하는 파드의 수를 계산하여 해당 토폴로지 도메인에 속할 파드의 수를 결정한다. 자세한 내용은 [레이블 셀렉터](/ko/docs/concepts/overview/working-with-objects/labels/#레이블-셀렉터)를 참조한다. +파드에 2개 이상의 `topologySpreadConstraint`가 정의되어 있으면, 각 제약 조건은 AND로 연결된다 - kube-scheduler는 새로운 파드의 모든 제약 조건을 만족하는 노드를 찾는다. + 사용자는 `kubectl explain Pod.spec.topologySpreadConstraints` 를 실행해서 이 필드에 대한 자세한 내용을 알 수 있다. ### 예시: 단수 토폴로지 분배 제약 조건 @@ -387,7 +388,8 @@ profiles: ## 알려진 제한사항 -- 디플로이먼트를 스케일링 다운하면 그 결과로 파드의 분포가 불균형이 될 수 있다. +- 파드가 제거된 이후에도 제약 조건이 계속 충족된다는 보장은 없다. 예를 들어 디플로이먼트를 스케일링 다운하면 그 결과로 파드의 분포가 불균형해질 수 있다. +[Descheduler](https://github.com/kubernetes-sigs/descheduler)를 사용하여 파드 분포를 다시 균형있게 만들 수 있다. - 파드와 일치하는 테인트(taint)가 된 노드가 존중된다. [이슈 80921](https://github.com/kubernetes/kubernetes/issues/80921)을 본다. ## {{% heading "whatsnext" %}} diff --git a/content/ko/docs/contribute/_index.md b/content/ko/docs/contribute/_index.md index 0582739545..dcb7a68f49 100644 --- a/content/ko/docs/contribute/_index.md +++ b/content/ko/docs/contribute/_index.md @@ -46,7 +46,7 @@ card: 1. CNCF [Contributor License Agreement](https://github.com/kubernetes/community/blob/master/CLA.md)에 서명합니다. 1. [문서 리포지터리](https://github.com/kubernetes/website)와 웹사이트의 [정적 사이트 생성기](https://gohugo.io)를 숙지합니다. -1. [풀 리퀘스트 열기](/ko/docs/contribute/new-content/new-content/)와 +1. [풀 리퀘스트 열기](/ko/docs/contribute/new-content/open-a-pr/)와 [변경 검토](/ko/docs/contribute/review/reviewing-prs/)의 기본 프로세스를 이해하도록 합니다. @@ -60,7 +60,7 @@ card: 기여할 수 있는 다양한 방법에 대해 알아봅니다. - [`kubernetes/website` 이슈 목록](https://github.com/kubernetes/website/issues/)을 확인하여 좋은 진입점이 되는 이슈를 찾을 수 있습니다. -- 기존 문서에 대해 [GitHub을 사용해서 풀 리퀘스트 열거나](/ko/docs/contribute/new-content/new-content/#github을-사용하여-변경하기) +- 기존 문서에 대해 [GitHub을 사용해서 풀 리퀘스트 열거나](/ko/docs/contribute/new-content/open-a-pr/#github을-사용하여-변경하기) GitHub에서의 이슈 제기에 대해 자세히 알아봅니다. - 정확성과 언어에 대해 다른 쿠버네티스 커뮤니티 맴버의 [풀 리퀘스트 검토](/ko/docs/contribute/review/reviewing-prs/)를 합니다. @@ -71,7 +71,7 @@ card: ## 다음 단계 -- 리포지터리의 [로컬 복제본에서 작업](/ko/docs/contribute/new-content/new-content/#fork-the-repo)하는 +- 리포지터리의 [로컬 복제본에서 작업](/ko/docs/contribute/new-content/open-a-pr/#fork-the-repo)하는 방법을 배워봅니다. - [릴리스된 기능](/docs/contribute/new-content/new-features/)을 문서화 합니다. - [SIG Docs](/ko/docs/contribute/participate/)에 참여하고, @@ -96,6 +96,6 @@ SIG Docs는 여러가지 방법으로 의견을 나누고 있습니다. ## 다른 기여 방법들 -- [쿠버네티스 커뮤니티 사이트](/community/)를 방문하십시오. 트위터 또는 스택 오버플로우에 참여하고, 현지 쿠버네티스 모임과 이벤트 등에 대해 알아봅니다. +- [쿠버네티스 커뮤니티 사이트](/ko/community/)를 방문하십시오. 트위터 또는 스택 오버플로우에 참여하고, 현지 쿠버네티스 모임과 이벤트 등에 대해 알아봅니다. - [기여자 치트시트](https://github.com/kubernetes/community/tree/master/contributors/guide/contributor-cheatsheet)를 읽고 쿠버네티스 기능 개발에 참여합니다. - [블로그 게시물 또는 사례 연구](/docs/contribute/new-content/blogs-case-studies/)를 제출합니다. diff --git a/content/ko/docs/contribute/analytics.md b/content/ko/docs/contribute/analytics.md new file mode 100644 index 0000000000..d96d1ed576 --- /dev/null +++ b/content/ko/docs/contribute/analytics.md @@ -0,0 +1,25 @@ +--- +title: 사이트 분석 보기 +content_type: concept +weight: 100 +card: + name: contribute + weight: 100 +--- + + + +이 페이지는 kubernetes.io 사이트 분석을 제공하는 대시보드에 대한 정보를 담고 있다. + + + + +[대시보드 보기](https://datastudio.google.com/reporting/fede2672-b2fd-402a-91d2-7473bdb10f04). + +이 대시보드는 Google Data Studio를 사용하여 구축되었으며 kubernetes.io에서 Google Analytics를 사용하여 수집한 정보를 보여준다. + +### 대시보드 사용 + +기본적으로 대시보드는 지난 30일 동안 수집된 모든 데이터의 분석을 제공한다. 날짜 선택을 통해 특정 날짜 범위의 데이터를 볼 수 있다. 그 외 필터링 옵션을 사용하면, 사용자의 위치, 사이트에 접속하는데 사용된 장치, 번역된 문서 언어 등을 기준으로 데이터를 확인할 수 있다. + + 이 대시보드에 문제가 있거나 개선을 요청하려면, [이슈를 오픈](https://github.com/kubernetes/website/issues/new/choose) 한다. diff --git a/content/ko/docs/contribute/generate-ref-docs/quickstart.md b/content/ko/docs/contribute/generate-ref-docs/quickstart.md index 6e3fbb5263..6855696b9d 100644 --- a/content/ko/docs/contribute/generate-ref-docs/quickstart.md +++ b/content/ko/docs/contribute/generate-ref-docs/quickstart.md @@ -18,7 +18,7 @@ weight: 40 ## `website` 저장소 클론하기 {#Getting-the-docs-repository} -개인 계정에 있는 포크 버전의 `website` 저장소가 `kubernetes/website` 저장소의 master 브랜치만큼 최신인지 확인한 뒤, +개인 계정에 있는 포크 버전의 `website` 저장소가 GitHub에 있는 `kubernetes/website` 저장소(`main` 브랜치)의 최신 상태와 일치하는지 확인한 뒤, 개인 계정에 있는 포크 버전의 `website` 저장소를 로컬 개발 환경으로 클론한다. ```shell @@ -171,7 +171,7 @@ cd /update-imported-docs `release.yml` 환경설정 파일은 상대경로 링크를 수정하는 방법을 포함하고 있다. 임포트하는 파일 안에 있는 상대경로 링크를 수정하려면, `gen-absolute-links` 필드를 `true` 로 명시한다. 이에 대한 예시는 -[`release.yml`](https://github.com/kubernetes/website/blob/master/update-imported-docs/release.yml) 에서 볼 수 있다. +[`release.yml`](https://github.com/kubernetes/website/blob/main/update-imported-docs/release.yml) 에서 볼 수 있다. ## `kubernetes/website` 의 변경사항을 커밋하기 {#Adding-and-committing-changes-in-kubernetes-website} diff --git a/content/ko/docs/contribute/localization_ko.md b/content/ko/docs/contribute/localization_ko.md index fe506a7bb1..e2e10f01cc 100644 --- a/content/ko/docs/contribute/localization_ko.md +++ b/content/ko/docs/contribute/localization_ko.md @@ -133,7 +133,7 @@ weight: 10 ### API 오브젝트 용어 한글화 방침 일반적으로 `kubectl api-resources` 결과의 `kind` 에 해당하는 API 오브젝트는 -[국립국어원 외래어 표기법](http://kornorms.korean.go.kr/regltn/regltnView.do?regltn_code=0003#a)에 +[국립국어원 외래어 표기법](https://kornorms.korean.go.kr/regltn/regltnView.do?regltn_code=0003#a)에 따라 한글로 표기하고 영문을 병기한다. 예를 들면 다음과 같다. API 오브젝트(kind) | 한글화(외래어 표기 및 영문 병기) diff --git a/content/ko/docs/contribute/new-content/open-a-pr.md b/content/ko/docs/contribute/new-content/open-a-pr.md index 552a6e1a0c..a1f0178b3c 100644 --- a/content/ko/docs/contribute/new-content/open-a-pr.md +++ b/content/ko/docs/contribute/new-content/open-a-pr.md @@ -127,7 +127,7 @@ git에 익숙하거나, 변경 사항이 몇 줄보다 클 경우, upstream https://github.com/kubernetes/website.git (push) ``` -6. 포크의 `origin/master` 와 `kubernetes/website` 의 `upstream/master` 에서 커밋을 가져온다. +6. 포크의 `origin/main` 와 `kubernetes/website` 의 `upstream/main` 에서 커밋을 가져온다. ```bash git fetch origin @@ -137,15 +137,15 @@ git에 익숙하거나, 변경 사항이 몇 줄보다 클 경우, 이를 통해 변경을 시작하기 전에 로컬 리포지터리가 최신 상태인지 확인한다. {{< note >}} - 이 워크플로는 [쿠버네티스 커뮤니티 GitHub 워크플로](https://github.com/kubernetes/community/blob/master/contributors/guide/github-workflow.md)와 다르다. 포크에 업데이트를 푸시하기 전에 로컬의 `master` 복사본을 `upstream/master` 와 병합할 필요가 없다. + 이 워크플로는 [쿠버네티스 커뮤니티 GitHub 워크플로](https://github.com/kubernetes/community/blob/master/contributors/guide/github-workflow.md)와 다르다. 포크에 업데이트를 푸시하기 전에 로컬의 `main` 복사본을 `upstream/main` 와 병합할 필요가 없다. {{< /note >}} ### 브랜치 만들기 1. 작업할 브랜치 기반을 결정한다. - - 기존 콘텐츠를 개선하려면, `upstream/master` 를 사용한다. - - 기존 기능에 대한 새로운 콘텐츠를 작성하려면, `upstream/master` 를 사용한다. + - 기존 콘텐츠를 개선하려면, `upstream/main` 를 사용한다. + - 기존 기능에 대한 새로운 콘텐츠를 작성하려면, `upstream/main` 를 사용한다. - 현지화된 콘텐츠의 경우, 현지화 규칙을 사용한다. 자세한 내용은 [쿠버네티스 문서 현지화](/ko/docs/contribute/localization_ko/)를 참고한다. - 다가오는 쿠버네티스 릴리스의 새로운 기능에 대해서는 기능 브랜치(feature branch)를 사용한다. 자세한 정보는 [릴리스 문서화](/docs/contribute/new-content/new-features/)를 참고한다. - 콘텐츠 재구성과 같이 여러 SIG Docs 기여자들이 협업하는 장기적인 작업에는, @@ -154,10 +154,10 @@ git에 익숙하거나, 변경 사항이 몇 줄보다 클 경우, 브랜치 선택에 도움이 필요하면, 슬랙 채널 `#sig-docs` 에 문의한다. -2. 1단계에서 식별된 브랜치를 기반으로 새 브랜치를 작성한다. 이 예에서는 기본 브랜치가 `upstream/master` 라고 가정한다. +2. 1단계에서 식별된 브랜치를 기반으로 새 브랜치를 작성한다. 이 예에서는 기본 브랜치가 `upstream/main` 라고 가정한다. ```bash - git checkout -b upstream/master + git checkout -b upstream/main ``` 3. 텍스트 편집기를 사용하여 변경한다. @@ -264,7 +264,7 @@ website의 컨테이너 이미지를 만들거나 Hugo를 로컬에서 실행할 또는, 컴퓨터에 `hugo` 명령을 설치하여 사용한다. -1. [`website/netlify.toml`](https://raw.githubusercontent.com/kubernetes/website/master/netlify.toml)에 지정된 [Hugo](https://gohugo.io/getting-started/installing/) 버전을 설치한다. +1. [`website/netlify.toml`](https://raw.githubusercontent.com/kubernetes/website/main/netlify.toml)에 지정된 [Hugo](https://gohugo.io/getting-started/installing/) 버전을 설치한다. 2. website 리포지터리를 업데이트하지 않았다면, `website/themes/docsy` 디렉터리가 비어 있다. 테마의 로컬 복제본이 없으면 사이트를 빌드할 수 없다. website 테마를 업데이트하려면, 다음을 실행한다. @@ -372,11 +372,11 @@ PR을 연 후, GitHub는 자동 테스트를 실행하고 [Netlify](https://www. git push --force-with-lease origin ``` -2. `kubernetes/website` 의 `upstream/master` 에 대한 변경 사항을 가져오고 브랜치를 리베이스한다. +2. `kubernetes/website` 의 `upstream/main` 에 대한 변경 사항을 가져오고 브랜치를 리베이스한다. ```bash git fetch upstream - git rebase upstream/master + git rebase upstream/main ``` 3. 리베이스의 결과를 검사한다. diff --git a/content/ko/docs/contribute/new-content/overview.md b/content/ko/docs/contribute/new-content/overview.md index 5c8f6569da..540f1e6c12 100644 --- a/content/ko/docs/contribute/new-content/overview.md +++ b/content/ko/docs/contribute/new-content/overview.md @@ -42,7 +42,7 @@ CLA에 서명하지 않은 기여자의 풀 리퀘스트(pull request)는 자동 시나리오 | 브랜치 :---------|:------------ -현재 릴리스의 기존 또는 새로운 영어 콘텐츠 | `master` +현재 릴리스의 기존 또는 새로운 영어 콘텐츠 | `main` 기능 변경 릴리스의 콘텐츠 | `dev-` 패턴을 사용하여 기능 변경이 있는 주 버전과 부 버전에 해당하는 브랜치. 예를 들어, `v{{< skew nextMinorVersion >}}` 에서 기능이 변경된 경우, ``dev-{{< skew nextMinorVersion >}}`` 에 문서 변경을 추가한다. 다른 언어로된 콘텐츠(현지화) | 현지화 규칙을 사용. 자세한 내용은 [현지화 브랜치 전략](/docs/contribute/localization/#branching-strategy)을 참고한다. @@ -60,6 +60,6 @@ PR 당 하나의 언어로 풀 리퀘스트를 제한한다. 여러 언어로 ## 기여자를 위한 도구들 -`kubernetes/website` 리포지터리의 [문서 기여자를 위한 도구](https://github.com/kubernetes/website/tree/master/content/en/docs/doc-contributor-tools) 디렉터리에는 기여 여정이 좀 더 순조롭게 진행되도록 도와주는 도구들이 포함되어 있다. +`kubernetes/website` 리포지터리의 [문서 기여자를 위한 도구](https://github.com/kubernetes/website/tree/main/content/en/docs/doc-contributor-tools) 디렉터리에는 기여 여정이 좀 더 순조롭게 진행되도록 도와주는 도구들이 포함되어 있다. diff --git a/content/ko/docs/contribute/participate/_index.md b/content/ko/docs/contribute/participate/_index.md index ef271ca31c..c2d9aed771 100644 --- a/content/ko/docs/contribute/participate/_index.md +++ b/content/ko/docs/contribute/participate/_index.md @@ -73,8 +73,8 @@ GitHub의 SIG Docs [팀]에는 두 분류가 있다. - approve 이 두 플러그인은 `kubernetes/website` GitHub 리포지터리 최상위 수준에 있는 -[OWNERS](https://github.com/kubernetes/website/blob/master/OWNERS)와 -[OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS_ALIASES) +[OWNERS](https://github.com/kubernetes/website/blob/main/OWNERS)와 +[OWNERS_ALIASES](https://github.com/kubernetes/website/blob/main/OWNERS_ALIASES) 파일을 사용해서 해당 리포지터리에 대해 prow가 작동하는 방식을 제어한다. @@ -94,7 +94,7 @@ PR 소유자에게 조언하는데 활용된다. ## 병합 작업 방식 풀 리퀘스트 요청이 콘텐츠를 발행하는데 사용하는 -브랜치에 병합되면, 해당 콘텐츠는 http://kubernetes.io 에 공개된다. 게시된 콘텐츠의 +브랜치에 병합되면, 해당 콘텐츠는 https://kubernetes.io 에 공개된다. 게시된 콘텐츠의 품질을 높히기 위해 SIG Docs 승인자가 풀 리퀘스트를 병합하는 것을 제한한다. 작동 방식은 다음과 같다. diff --git a/content/ko/docs/contribute/participate/pr-wranglers.md b/content/ko/docs/contribute/participate/pr-wranglers.md index 30c0979969..674696ee90 100644 --- a/content/ko/docs/contribute/participate/pr-wranglers.md +++ b/content/ko/docs/contribute/participate/pr-wranglers.md @@ -19,7 +19,7 @@ PR 랭글러는 일주일 간 매일 다음의 일을 해야 한다. - 매일 새로 올라오는 이슈를 심사하고 태그를 지정한다. SIG Docs가 메타데이터를 사용하는 방법에 대한 지침은 [이슈 심사 및 분류](/ko/docs/contribute/review/for-approvers/#이슈-심사와-분류)를 참고한다. - [스타일](/docs/contribute/style/style-guide/)과 [콘텐츠](/docs/contribute/style/content-guide/) 가이드를 준수하는지에 대해 [열린(open) 풀 리퀘스트](https://github.com/kubernetes/website/pulls)를 매일 리뷰한다. - 가장 작은 PR(`size/XS`)부터 시작하고, 가장 큰(`size/XXL`) PR까지 리뷰한다. 가능한 한 많은 PR을 리뷰한다. -- PR 기여자들이 [CLA]()에 서명했는지 확인한다. +- PR 기여자들이 [CLA](https://github.com/kubernetes/community/blob/master/CLA.md)에 서명했는지 확인한다. - CLA에 서명하지 않은 기여자에게 CLA에 서명하도록 알리려면 [이](https://github.com/zparnold/k8s-docs-pr-botherer) 스크립트를 사용한다. - 제안된 변경 사항에 대한 피드백을 제공하고 다른 SIG의 멤버에게 기술 리뷰를 요청한다. - 제안된 콘텐츠 변경에 대해 PR에 인라인 제안(inline suggestion)을 제공한다. @@ -45,8 +45,8 @@ PR 랭글러는 일주일 간 매일 다음의 일을 해야 한다. 지정한다. 콘텐츠에 대한 작업이 필요하다면, 제안하거나 인라인 피드백을 추가한다. - [LGTM 보유, 문서 승인 필요](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+-label%3Ado-not-merge%2Fwork-in-progress+-label%3Ado-not-merge%2Fhold+label%3Alanguage%2Fen+label%3Algtm+): 병합을 위해 `/approve` 코멘트가 필요한 PR을 나열한다. -- [퀵윈(Quick Wins)](https://github.com/kubernetes/website/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+base%3Amaster+-label%3A%22do-not-merge%2Fwork-in-progress%22+-label%3A%22do-not-merge%2Fhold%22+label%3A%22cncf-cla%3A+yes%22+label%3A%22size%2FXS%22+label%3A%22language%2Fen%22): 명확한 결격 사유가 없는 메인 브랜치에 대한 PR을 나열한다. ([XS, S, M, L, XL, XXL] 크기의 PR을 작업할 때 크기 레이블에서 "XS"를 변경한다) -- [메인 브랜치이외의 브랜치에 대한 PR](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+label%3Alanguage%2Fen+-base%3Amaster): `dev-` 브랜치에 대한 것일 경우, 곧 출시될 예정인 릴리스이다. `/assign @` 을 사용하여 [문서 릴리스 관리자](https://github.com/kubernetes/sig-release/tree/master/release-team#kubernetes-release-team-roles)를 할당한다. 오래된 브랜치에 대한 PR인 경우, PR 작성자가 가장 적합한 브랜치를 대상으로 하고 있는지 여부를 파악할 수 있도록 도와준다. +- [퀵윈(Quick Wins)](https://github.com/kubernetes/website/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+base%3Amain+-label%3A%22do-not-merge%2Fwork-in-progress%22+-label%3A%22do-not-merge%2Fhold%22+label%3A%22cncf-cla%3A+yes%22+label%3A%22size%2FXS%22+label%3A%22language%2Fen%22): 명확한 결격 사유가 없는 메인 브랜치에 대한 PR을 나열한다. ([XS, S, M, L, XL, XXL] 크기의 PR을 작업할 때 크기 레이블에서 "XS"를 변경한다) +- [메인 브랜치이외의 브랜치에 대한 PR](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+label%3Alanguage%2Fen+-base%3Amain): `dev-` 브랜치에 대한 것일 경우, 곧 출시될 예정인 릴리스이다. `/assign @` 을 사용하여 [문서 릴리스 관리자](https://github.com/kubernetes/sig-release/tree/master/release-team#kubernetes-release-team-roles)를 할당한다. 오래된 브랜치에 대한 PR인 경우, PR 작성자가 가장 적합한 브랜치를 대상으로 하고 있는지 여부를 파악할 수 있도록 도와준다. ### 랭글러를 위한 유용한 Prow 명령어 diff --git a/content/ko/docs/contribute/participate/roles-and-responsibilities.md b/content/ko/docs/contribute/participate/roles-and-responsibilities.md index 448502c0c3..ad27eea6ff 100644 --- a/content/ko/docs/contribute/participate/roles-and-responsibilities.md +++ b/content/ko/docs/contribute/participate/roles-and-responsibilities.md @@ -29,7 +29,7 @@ GitHub 계정을 가진 누구나 쿠버네티스에 기여할 수 있다. SIG D 이슈를 올린다. - 풀 리퀘스트에 대해 구속력 없는 피드백을 제공한다. - 현지화에 기여한다. -- [슬랙](http://slack.k8s.io/) 또는 +- [슬랙](https://slack.k8s.io/) 또는 [SIG docs 메일링 리스트](https://groups.google.com/forum/#!forum/kubernetes-sig-docs)에 개선을 제안한다. [CLA에 서명](/ko/docs/contribute/new-content/overview/#sign-the-cla) 후에 누구나 다음을 할 수 있다. @@ -144,14 +144,14 @@ LGTM은 "Looks good to me"의 약자이며 풀 리퀘스트가 기술적으로 지원하려면, 다음을 수행한다. 1. `kubernetes/website` 리포지터리 내 - [OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS) 파일의 섹션에 + [OWNERS_ALIASES](https://github.com/kubernetes/website/blob/main/OWNERS) 파일의 섹션에 여러분의 GitHub 사용자 이름을 추가하는 풀 리퀘스트를 연다. - - {{< note >}} - 자신을 추가할 위치가 확실하지 않으면, `sig-docs-ko-reviews` 에 추가한다. - {{< /note >}} - -1. PR을 하나 이상의 SIG-Docs 승인자(`sig-docs-{language}-owners` 에 + + {{< note >}} + 자신을 추가할 위치가 확실하지 않으면, `sig-docs-ko-reviews` 에 추가한다. + {{< /note >}} + +2. PR을 하나 이상의 SIG-Docs 승인자(`sig-docs-{language}-owners` 에 나열된 사용자 이름)에게 지정한다. 승인되면, SIG Docs 리더가 적당한 GitHub 팀에 여러분을 추가한다. 일단 추가되면, @@ -203,7 +203,7 @@ PR은 자동으로 병합된다. SIG Docs 승인자는 추가적인 기술 리 - 주간 로테이션을 위해 [PR Wrangler 로테이션 스케줄](https://github.com/kubernetes/website/wiki/PR-Wranglers)에 참여한다. SIG Docs는 모든 승인자들이 이 로테이션에 참여할 것으로 기대한다. 자세한 내용은 - [PR 랭글러(PR wrangler)](/ko/docs/contribute/participating/pr-wranglers/)를 + [PR 랭글러(PR wrangler)](/ko/docs/contribute/participate/pr-wranglers/)를 참고한다. ## 승인자 되기 @@ -216,7 +216,7 @@ PR은 자동으로 병합된다. SIG Docs 승인자는 추가적인 기술 리 지원하려면 다음을 수행한다. 1. `kubernetes/website` 리포지터리 내 - [OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS) + [OWNERS_ALIASES](https://github.com/kubernetes/website/blob/main/OWNERS) 파일의 섹션에 자신을 추가하는 풀 리퀘스트를 연다. {{< note >}} @@ -231,4 +231,4 @@ PR은 자동으로 병합된다. SIG Docs 승인자는 추가적인 기술 리 ## {{% heading "whatsnext" %}} -- 모든 승인자가 교대로 수행하는 역할인 [PR 랭글러](/ko/docs/contribute/participating/pr-wranglers)에 대해 읽어보기 +- 모든 승인자가 교대로 수행하는 역할인 [PR 랭글러](/ko/docs/contribute/participate/pr-wranglers)에 대해 읽어보기 diff --git a/content/ko/docs/contribute/review/reviewing-prs.md b/content/ko/docs/contribute/review/reviewing-prs.md index f0a164de00..e0b07a79a9 100644 --- a/content/ko/docs/contribute/review/reviewing-prs.md +++ b/content/ko/docs/contribute/review/reviewing-prs.md @@ -18,7 +18,7 @@ weight: 10 - 적합한 코멘트를 남길 수 있도록 [콘텐츠 가이드](/docs/contribute/style/content-guide/)와 [스타일 가이드](/docs/contribute/style/style-guide/)를 읽는다. - 쿠버네티스 문서화 커뮤니티의 다양한 - [역할과 책임](/ko/docs/contribute/participating/#역할과-책임)을 이해한다. + [역할과 책임](/ko/docs/contribute/participate/#역할과-책임)을 이해한다. @@ -87,7 +87,7 @@ weight: 10 - PR이 새로운 페이지를 소개하는가? 그렇다면, - 페이지가 올바른 [페이지 콘텐츠 타입](/docs/contribute/style/page-content-types/)과 연관된 Hugo 단축 코드를 사용하는가? - 섹션의 측면 탐색에 페이지가 올바르게 나타나는가? - - 페이지가 [문서 홈](/ko/docs/home/) 목록에 나타나야 하는가? + - 페이지가 [문서 홈](/docs/home/) 목록에 나타나야 하는가? - 변경 사항이 Netlify 미리보기에 표시되는가? 목록, 코드 블록, 표, 메모 및 이미지에 특히 주의한다. ### 기타 diff --git a/content/ko/docs/contribute/style/write-new-topic.md b/content/ko/docs/contribute/style/write-new-topic.md index 7441882615..9bb3376933 100644 --- a/content/ko/docs/contribute/style/write-new-topic.md +++ b/content/ko/docs/contribute/style/write-new-topic.md @@ -172,4 +172,4 @@ kubectl create -f https://k8s.io/examples/pods/storage/gce-volume.yaml ## {{% heading "whatsnext" %}} * [페이지 콘텐츠 타입 사용](/docs/contribute/style/page-content-types/)에 대해 알아보기. -* [풀 리퀘스트 작성](/ko/docs/contribute/new-content/new-content/)에 대해 알아보기. +* [풀 리퀘스트 작성](/ko/docs/contribute/new-content/open-a-pr/)에 대해 알아보기. diff --git a/content/ko/docs/contribute/suggesting-improvements.md b/content/ko/docs/contribute/suggesting-improvements.md index 7dd9f80a71..e10faf6ea8 100644 --- a/content/ko/docs/contribute/suggesting-improvements.md +++ b/content/ko/docs/contribute/suggesting-improvements.md @@ -10,7 +10,7 @@ card: -쿠버네티스 문서에 문제가 있거나, 새로운 내용에 대한 아이디어가 있으면, 이슈를 연다. [GitHub 계정](https://github.com/join)과 웹 브라우저만 있으면 된다. +쿠버네티스 문서의 문제를 발견하거나 새로운 내용에 대한 아이디어가 있으면, 이슈를 연다. [GitHub 계정](https://github.com/join)과 웹 브라우저만 있으면 된다. 대부분의 경우, 쿠버네티스 문서에 대한 새로운 작업은 GitHub의 이슈로 시작된다. 그런 다음 쿠버네티스 기여자는 필요에 따라 이슈를 리뷰, 분류하고 태그를 지정한다. 다음으로, 여러분이나 @@ -22,7 +22,7 @@ card: ## 이슈 열기 -기존 콘텐츠에 대한 개선을 제안하거나, 오류를 발견하면, 이슈를 연다. +기존 콘텐츠에 대한 개선을 제안하고 싶거나 오류를 발견하면, 이슈를 연다. 1. 오른쪽 사이드바에서 **문서에 이슈 생성** 링크를 클릭한다. 그러면 헤더가 미리 채워진 GitHub 이슈 페이지로 리디렉션된다. diff --git a/content/ko/docs/home/supported-doc-versions.md b/content/ko/docs/home/supported-doc-versions.md index 07d33e49b9..35f6f9a1b0 100644 --- a/content/ko/docs/home/supported-doc-versions.md +++ b/content/ko/docs/home/supported-doc-versions.md @@ -7,3 +7,6 @@ card: weight: 10 title: 사용 가능한 문서 버전 --- + +이 웹사이트에서는 쿠버네티스 최신 버전 및 +이전 4개 버전에 대한 문서를 제공하고 있다. diff --git a/content/ko/docs/reference/_index.md b/content/ko/docs/reference/_index.md index a441e80783..55401988b4 100644 --- a/content/ko/docs/reference/_index.md +++ b/content/ko/docs/reference/_index.md @@ -9,6 +9,7 @@ content_type: concept no_list: true --- + 쿠버네티스 문서의 본 섹션에서는 레퍼런스를 다룬다. @@ -37,7 +38,7 @@ no_list: true - [쿠버네티스 Python 클라이언트 라이브러리](https://github.com/kubernetes-client/python) - [쿠버네티스 Java 클라이언트 라이브러리](https://github.com/kubernetes-client/java) - [쿠버네티스 JavaScript 클라이언트 라이브러리](https://github.com/kubernetes-client/javascript) -- [쿠버네티스 Dotnet 클라이언트 라이브러리](https://github.com/kubernetes-client/csharp) +- [쿠버네티스 C# 클라이언트 라이브러리](https://github.com/kubernetes-client/csharp) - [쿠버네티스 Haskell 클라이언트 라이브러리](https://github.com/kubernetes-client/haskell) ## CLI @@ -48,26 +49,26 @@ no_list: true ## 컴포넌트 -* [kubelet](/docs/reference/command-line-tools-reference/kubelet/) - 각 -노드에서 구동되는 주요한 에이전트. kubelet은 PodSpecs 집합을 가지며 +* [kubelet](/docs/reference/command-line-tools-reference/kubelet/) - 각 +노드에서 구동되는 주요한 에이전트. kubelet은 PodSpecs 집합을 가지며 기술된 컨테이너가 구동되고 있는지, 정상 작동하는지를 보장한다. -* [kube-apiserver](/docs/reference/command-line-tools-reference/kube-apiserver/) - -파드, 서비스, 레플리케이션 컨트롤러와 같은 API 오브젝트에 대한 검증과 구성을 +* [kube-apiserver](/docs/reference/command-line-tools-reference/kube-apiserver/) - +파드, 서비스, 레플리케이션 컨트롤러와 같은 API 오브젝트에 대한 검증과 구성을 수행하는 REST API. * [kube-controller-manager](/docs/reference/command-line-tools-reference/kube-controller-manager/) - 쿠버네티스에 탑재된 핵심 제어 루프를 포함하는 데몬. -* [kube-proxy](/docs/reference/command-line-tools-reference/kube-proxy/) - 간단한 -TCP/UDP 스트림 포워딩이나 백-엔드 집합에 걸쳐서 라운드-로빈 TCP/UDP 포워딩을 +* [kube-proxy](/ko/docs/reference/command-line-tools-reference/kube-proxy/) - 간단한 +TCP/UDP 스트림 포워딩이나 백-엔드 집합에 걸쳐서 라운드-로빈 TCP/UDP 포워딩을 할 수 있다. * [kube-scheduler](/docs/reference/command-line-tools-reference/kube-scheduler/) - 가용성, 성능 및 용량을 관리하는 스케줄러. * [kube-scheduler 정책](/ko/docs/reference/scheduling/policies) * [kube-scheduler 프로파일](/ko/docs/reference/scheduling/config/#여러-프로파일) -## 환경설정 API +## API 설정 -이 섹션은 쿠버네티스 구성요소 또는 도구를 환경설정하는 데에 사용되는 -"미발표된" API를 다룬다. 이 API들은 사용자나 관리자가 클러스터를 -사용/관리하는 데에 중요하지만, 이들 API의 대부분은 아직 API 서버가 +이 섹션은 쿠버네티스 구성요소 또는 도구를 환경설정하는 데에 사용되는 +"미발표된" API를 다룬다. 이 API들은 사용자나 관리자가 클러스터를 +사용/관리하는 데에 중요하지만, 이들 API의 대부분은 아직 API 서버가 제공하지 않는다. * [kubelet 환경설정 (v1beta1)](/docs/reference/config-api/kubelet-config.v1beta1/) @@ -78,6 +79,10 @@ TCP/UDP 스트림 포워딩이나 백-엔드 집합에 걸쳐서 라운드-로 * [클라이언트 인증 API (v1beta1)](/docs/reference/config-api/client-authentication.v1beta1/) * [WebhookAdmission 환경설정 (v1)](/docs/reference/config-api/apiserver-webhookadmission.v1/) +## kubeadm을 위한 API 설정 + +* [v1beta2](/docs/reference/config-api/kubeadm-config.v1beta2/) + ## 설계 문서 쿠버네티스 기능에 대한 설계 문서의 아카이브. diff --git a/content/ko/docs/reference/access-authn-authz/service-accounts-admin.md b/content/ko/docs/reference/access-authn-authz/service-accounts-admin.md index c5a13a5608..ca06783465 100644 --- a/content/ko/docs/reference/access-authn-authz/service-accounts-admin.md +++ b/content/ko/docs/reference/access-authn-authz/service-accounts-admin.md @@ -53,7 +53,7 @@ weight: 50 1. 이전 단계는 파드에 참조되는 `ServiceAccount` 가 있도록 하고, 그렇지 않으면 이를 거부한다. 1. 서비스어카운트 `automountServiceAccountToken` 와 파드의 `automountServiceAccountToken` 중 어느 것도 `false` 로 설정되어 있지 않다면, API 접근을 위한 토큰이 포함된 `volume` 을 파드에 추가한다. 1. 이전 단계에서 서비스어카운트 토큰을 위한 볼륨이 만들어졌다면, `/var/run/secrets/kubernetes.io/serviceaccount` 에 마운트된 파드의 각 컨테이너에 `volumeSource` 를 추가한다. -1. 파드에 `ImagePullSecrets` 이 없는 경우, `ServiceAccount` 의 `ImagePullSecrets` 이 파드에 추가된다. +1. 파드에 `imagePullSecrets` 이 없는 경우, `ServiceAccount` 의 `imagePullSecrets` 이 파드에 추가된다. #### 바인딩된 서비스 어카운트 토큰 볼륨 @@ -86,14 +86,14 @@ weight: 50 프로젝티드 볼륨은 세 가지로 구성된다. 1. kube-apiserver로부터 TokenRequest API를 통해 얻은 서비스어카운트토큰(ServiceAccountToken). 서비스어카운트토큰은 기본적으로 1시간 뒤에, 또는 파드가 삭제될 때 만료된다. 서비스어카운트토큰은 파드에 연결되며 kube-apiserver를 위해 존재한다. -1. kube-apiserver에 대한 연결을 확인하는 데 사용되는 CA 번들을 포함하는 컨피그맵(ConfigMap). 이 기능은 모든 네임스페이스에 "kube-root-ca.crt" 컨피그맵을 게시하는 기능 게이트인 `RootCAConfigMap`이 활성화되어 있어야 동작한다. `RootCAConfigMap`은 1.20에서 기본적으로 활성화되어 있으며, 1.21 이상에서는 항상 활성화된 상태이다. +1. kube-apiserver에 대한 연결을 확인하는 데 사용되는 CA 번들을 포함하는 컨피그맵(ConfigMap). 이 기능은 모든 네임스페이스에 "kube-root-ca.crt" 컨피그맵을 게시하는 기능 게이트인 `RootCAConfigMap`에 의해 동작한다. `RootCAConfigMap` 기능 게이트는 1.21에서 GA로 전환되었으며 기본적으로 활성화되어 있다. (이 플래그는 1.22에서 `--feature-gate` 인자에서 제외될 예정이다.) 1. 파드의 네임스페이스를 참조하는 DownwardAPI. 상세 사항은 [프로젝티드 볼륨](/docs/tasks/configure-pod-container/configure-projected-volume-storage/)을 참고한다. `BoundServiceAccountTokenVolume` 기능 게이트가 활성화되어 있지 않은 경우, -위의 프로젝티드 볼륨을 파드 스펙에 추가하여 시크릿 기반 서비스 어카운트 볼륨을 프로젝티드 볼륨으로 수동으로 옮길 수 있다. -그러나, `RootCAConfigMap`은 활성화되어 있어야 한다. +위의 프로젝티드 볼륨을 파드 스펙에 추가하여 +시크릿 기반 서비스 어카운트 볼륨을 프로젝티드 볼륨으로 수동으로 옮길 수 있다. ### 토큰 컨트롤러 diff --git a/content/ko/docs/reference/command-line-tools-reference/feature-gates.md b/content/ko/docs/reference/command-line-tools-reference/feature-gates.md index a658d58497..97262073a1 100644 --- a/content/ko/docs/reference/command-line-tools-reference/feature-gates.md +++ b/content/ko/docs/reference/command-line-tools-reference/feature-gates.md @@ -61,6 +61,7 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `BalanceAttachedNodeVolumes` | `false` | 알파 | 1.11 | | | `BoundServiceAccountTokenVolume` | `false` | 알파 | 1.13 | 1.20 | | `BoundServiceAccountTokenVolume` | `true` | 베타 | 1.21 | | +| `ControllerManagerLeaderMigration` | `false` | 알파 | 1.21 | | | `CPUManager` | `false` | 알파 | 1.8 | 1.9 | | `CPUManager` | `true` | 베타 | 1.10 | | | `CSIInlineVolume` | `false` | 알파 | 1.15 | 1.15 | @@ -152,7 +153,8 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `ProbeTerminationGracePeriod` | `false` | 알파 | 1.21 | | | `ProcMountType` | `false` | 알파 | 1.12 | | | `QOSReserved` | `false` | 알파 | 1.11 | | -| `RemainingItemCount` | `false` | 알파 | 1.15 | | +| `RemainingItemCount` | `false` | 알파 | 1.15 | 1.15 | +| `RemainingItemCount` | `true` | 베타 | 1.16 | | | `RemoveSelfLink` | `false` | 알파 | 1.16 | 1.19 | | `RemoveSelfLink` | `true` | 베타 | 1.20 | | | `RotateKubeletServerCertificate` | `false` | 알파 | 1.7 | 1.11 | @@ -378,7 +380,6 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `TokenRequestProjection` | `false` | 알파 | 1.11 | 1.11 | | `TokenRequestProjection` | `true` | 베타 | 1.12 | 1.19 | | `TokenRequestProjection` | `true` | GA | 1.20 | - | -| `VolumeCapacityPriority` | `false` | 알파 | 1.21 | - | | `VolumePVCDataSource` | `false` | 알파 | 1.15 | 1.15 | | `VolumePVCDataSource` | `true` | 베타 | 1.16 | 1.17 | | `VolumePVCDataSource` | `true` | GA | 1.18 | - | @@ -478,6 +479,11 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 `kube-apiserver`를 시작하여 확장 토큰 기능을 끈다. 자세한 내용은 [바운드 서비스 계정 토큰](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/1205-bound-service-account-tokens/README.md)을 확인한다. +- `ControllerManagerLeaderMigration`: HA 클러스터에서 클러스터 오퍼레이터가 + kube-controller-manager의 컨트롤러들을 외부 controller-manager(예를 들면, + cloud-controller-manager)로 다운타임 없이 라이브 마이그레이션할 수 있도록 허용하도록 + [kube-controller-manager](/docs/tasks/administer-cluster/controller-manager-leader-migration/#initial-leader-migration-configuration)와 [cloud-controller-manager](/docs/tasks/administer-cluster/controller-manager-leader-migration/#deploy-cloud-controller-manager)의 + 리더 마이그레이션(Leader Migration)을 활성화한다. - `CPUManager`: 컨테이너 수준의 CPU 어피니티 지원을 활성화한다. [CPU 관리 정책](/docs/tasks/administer-cluster/cpu-management-policies/)을 참고한다. - `CRIContainerLogRotation`: cri 컨테이너 런타임에 컨테이너 로그 로테이션을 활성화한다. 로그 파일 사이즈 기본값은 10MB이며, @@ -611,12 +617,12 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 - `EnableEquivalenceClassCache`: 스케줄러가 파드를 스케줄링할 때 노드의 동등성을 캐시할 수 있게 한다. - `EndpointSlice`: 보다 스케일링 가능하고 확장 가능한 네트워크 엔드포인트에 대한 - 엔드포인트슬라이스(EndpointSlices)를 활성화한다. [엔드포인트슬라이스 활성화](/docs/tasks/administer-cluster/enabling-endpointslices/)를 참고한다. + 엔드포인트슬라이스(EndpointSlices)를 활성화한다. [엔드포인트슬라이스 활성화](/ko/docs/concepts/services-networking/endpoint-slices/)를 참고한다. - `EndpointSliceNodeName` : 엔드포인트슬라이스 `nodeName` 필드를 활성화한다. - `EndpointSliceProxying`: 활성화되면, 리눅스에서 실행되는 kube-proxy는 엔드포인트 대신 엔드포인트슬라이스를 기본 데이터 소스로 사용하여 확장성과 성능을 향상시킨다. - [엔드포인트 슬라이스 활성화](/docs/tasks/administer-cluster/enabling-endpointslices/)를 참고한다. + [엔드포인트슬라이스 활성화](/ko/docs/concepts/services-networking/endpoint-slices/)를 참고한다. - `EndpointSliceTerminatingCondition`: 엔드포인트슬라이스 `terminating` 및 `serving` 조건 필드를 활성화한다. - `EphemeralContainers`: 파드를 실행하기 위한 @@ -636,7 +642,7 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 - `ExperimentalCriticalPodAnnotation`: 특정 파드에 *critical* 로 어노테이션을 달아서 [스케줄링이 보장되도록](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) 한다. 이 기능은 v1.13부터 파드 우선 순위 및 선점으로 인해 사용 중단되었다. -- `ExperimentalHostUserNamespaceDefaultingGate`: 사용자 네임스페이스를 호스트로 +- `ExperimentalHostUserNamespaceDefaulting`: 사용자 네임스페이스를 호스트로 기본 활성화한다. 이것은 다른 호스트 네임스페이스, 호스트 마운트, 권한이 있는 컨테이너 또는 특정 비-네임스페이스(non-namespaced) 기능(예: `MKNODE`, `SYS_MODULE` 등)을 사용하는 컨테이너를 위한 것이다. 도커 데몬에서 사용자 네임스페이스 @@ -726,7 +732,7 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 [CrossNamespacePodAffinity](/ko/docs/concepts/policy/resource-quotas/#네임스페이스-간-파드-어피니티-쿼터) 쿼터 범위 기능을 활성화한다. - `PodOverhead`: 파드 오버헤드를 판단하기 위해 [파드오버헤드(PodOverhead)](/ko/docs/concepts/scheduling-eviction/pod-overhead/) 기능을 활성화한다. -- `PodPriority`: [우선 순위](/ko/docs/concepts/configuration/pod-priority-preemption/)를 +- `PodPriority`: [우선 순위](/ko/docs/concepts/scheduling-eviction/pod-priority-preemption/)를 기반으로 파드의 스케줄링 취소와 선점을 활성화한다. - `PodReadinessGates`: 파드 준비성 평가를 확장하기 위해 `PodReadinessGate` 필드 설정을 활성화한다. 자세한 내용은 [파드의 준비성 게이트](/ko/docs/concepts/workloads/pods/pod-lifecycle/#pod-readiness-gate)를 @@ -763,6 +769,8 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 - `RotateKubeletClientCertificate`: kubelet에서 클라이언트 TLS 인증서의 로테이션을 활성화한다. 자세한 내용은 [kubelet 구성](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration)을 참고한다. - `RotateKubeletServerCertificate`: kubelet에서 서버 TLS 인증서의 로테이션을 활성화한다. + 자세한 사항은 + [kubelet 구성](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration)을 확인한다. - `RunAsGroup`: 컨테이너의 init 프로세스에 설정된 기본 그룹 ID 제어를 활성화한다. - `RuntimeClass`: 컨테이너 런타임 구성을 선택하기 위해 [런타임클래스(RuntimeClass)](/ko/docs/concepts/containers/runtime-class/) @@ -793,6 +801,8 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 - `SetHostnameAsFQDN`: 전체 주소 도메인 이름(FQDN)을 파드의 호스트 이름으로 설정하는 기능을 활성화한다. [파드의 `setHostnameAsFQDN` 필드](/ko/docs/concepts/services-networking/dns-pod-service/#pod-sethostnameasfqdn-field)를 참고한다. +- `SizeMemoryBackedVolumes`: memory-backed 볼륨(보통 `emptyDir` 볼륨)의 크기 상한을 + 지정할 수 있도록 kubelets를 활성화한다. - `StartupProbe`: kubelet에서 [스타트업](/ko/docs/concepts/workloads/pods/pod-lifecycle/#언제-스타트업-프로브를-사용해야-하는가) 프로브를 활성화한다. @@ -859,12 +869,12 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 - `WindowsGMSA`: 파드에서 컨테이너 런타임으로 GMSA 자격 증명 스펙을 전달할 수 있다. - `WindowsRunAsUserName` : 기본 사용자가 아닌(non-default) 사용자로 윈도우 컨테이너에서 애플리케이션을 실행할 수 있도록 지원한다. 자세한 내용은 - [RunAsUserName 구성](/docs/tasks/configure-pod-container/configure-runasusername)을 + [RunAsUserName 구성](/ko/docs/tasks/configure-pod-container/configure-runasusername/)을 참고한다. - `WindowsEndpointSliceProxying`: 활성화되면, 윈도우에서 실행되는 kube-proxy는 엔드포인트 대신 엔드포인트슬라이스를 기본 데이터 소스로 사용하여 확장성과 성능을 향상시킨다. - [엔드포인트 슬라이스 활성화하기](/docs/tasks/administer-cluster/enabling-endpointslices/)를 참고한다. + [엔드포인트슬라이스 활성화하기](/ko/docs/concepts/services-networking/endpoint-slices/)를 참고한다. ## {{% heading "whatsnext" %}} diff --git a/content/ko/docs/reference/command-line-tools-reference/kube-proxy.md b/content/ko/docs/reference/command-line-tools-reference/kube-proxy.md index eab89638db..29e9deee83 100644 --- a/content/ko/docs/reference/command-line-tools-reference/kube-proxy.md +++ b/content/ko/docs/reference/command-line-tools-reference/kube-proxy.md @@ -424,7 +424,7 @@ kube-proxy [flags] --show-hidden-metrics-for-version string -

    숨겨진 메트릭을 표시할 이전 버전. 이전 마이너 버전만 인식하며, 다른 값은 허용하지 않는다. '1.16' 형태로 사용한다. 이 옵션의 존재 목적은, 다음 릴리스에서 추가적인 메트릭을 숨기는지에 대한 여부를 사용자가 알게 하여, 그 이후 릴리스에서 메트릭이 영구적으로 삭제됐을 때 사용자가 놀라지 않도록 하기 위함이다.

    +

    숨겨진 메트릭을 표시하려는 이전 버전. 이전 마이너 버전만 인식하며, 다른 값은 허용하지 않는다. 포멧은 <메이저>.<마이너> 와 같으며, 예를 들면 '1.16' 과 같다. 이 포멧의 목적은, 다음 릴리스가 숨길 추가적인 메트릭을 사용자에게 공지하여, 그 이후 릴리스에서 메트릭이 영구적으로 삭제됐을 때 사용자가 놀라지 않도록 하기 위함이다.

    diff --git a/content/ko/docs/reference/glossary/annotation.md b/content/ko/docs/reference/glossary/annotation.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/api-eviction.md b/content/ko/docs/reference/glossary/api-eviction.md new file mode 100644 index 0000000000..f8a65c606e --- /dev/null +++ b/content/ko/docs/reference/glossary/api-eviction.md @@ -0,0 +1,23 @@ +--- +title: API를 이용한 축출(Eviction) +id: api-eviction +date: 2021-04-27 +full_link: /docs/concepts/scheduling-eviction/pod-eviction/#api-eviction +short_description: > + API를 이용한 축출은 축출 API를 사용하여 파드의 정상 종료를 트리거하는 + 축출 오브젝트를 만드는 프로세스이다 +aka: +tags: + - operation +--- + +API를 이용한 축출은 [축출 API](/docs/reference/generated/kubernetes-api/{{}}/#create-eviction-pod-v1-core)를 사용하여 +생성된 `Eviction` 오브젝트로 파드를 정상 종료한다. + + + +`kubectl drain` 명령과 같은 kube-apiserver의 클라이언트를 사용하여 +축출 API를 직접 호출해 축출 요청을 할 수 있다. +`Eviction` 오브젝트가 생성되면, API 서버가 파드를 종료한다. + +API를 이용한 축출은 [노드-압박 축출](/docs/concepts/scheduling-eviction/eviction/#kubelet-eviction)과 동일하지 않다. diff --git a/content/ko/docs/reference/glossary/certificate.md b/content/ko/docs/reference/glossary/certificate.md index b5bc067015..7c40e48795 100644 --- a/content/ko/docs/reference/glossary/certificate.md +++ b/content/ko/docs/reference/glossary/certificate.md @@ -2,7 +2,7 @@ title: 인증서(Certificate) id: certificate date: 2018-04-12 -full_link: /docs/tasks/tls/managing-tls-in-a-cluster/ +full_link: /ko/docs/tasks/tls/managing-tls-in-a-cluster/ short_description: > 암호화된 안전한 파일로 쿠버네티스 클러스터 접근 검증에 사용한다. diff --git a/content/ko/docs/reference/glossary/cluster.md b/content/ko/docs/reference/glossary/cluster.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/configmap.md b/content/ko/docs/reference/glossary/configmap.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/container-env-variables.md b/content/ko/docs/reference/glossary/container-env-variables.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/container.md b/content/ko/docs/reference/glossary/container.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/cronjob.md b/content/ko/docs/reference/glossary/cronjob.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/customresourcedefinition.md b/content/ko/docs/reference/glossary/customresourcedefinition.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/daemonset.md b/content/ko/docs/reference/glossary/daemonset.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/deployment.md b/content/ko/docs/reference/glossary/deployment.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/docker.md b/content/ko/docs/reference/glossary/docker.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/extensions.md b/content/ko/docs/reference/glossary/extensions.md index caf7bfa226..547cd934bc 100644 --- a/content/ko/docs/reference/glossary/extensions.md +++ b/content/ko/docs/reference/glossary/extensions.md @@ -2,7 +2,7 @@ title: 익스텐션(Extensions) id: Extensions date: 2019-02-01 -full_link: /ko/docs/concepts/extend-kubernetes/extend-cluster/#익스텐션 +full_link: /ko/docs/concepts/extend-kubernetes/#익스텐션 short_description: > 익스텐션은 새로운 타입의 하드웨어를 지원하기 위해 쿠버네티스를 확장하고 깊게 통합시키는 소프트웨어 컴포넌트이다. @@ -15,4 +15,4 @@ tags: -대부분의 클러스터 관리자는 호스트된 쿠버네티스 또는 쿠버네티스의 배포 인스턴스를 사용할 것이다. 그 결과, 대부분의 쿠버네티스 사용자는 [익스텐션](/ko/docs/concepts/extend-kubernetes/extend-cluster/#익스텐션)의 설치가 필요할 것이며, 일부 사용자만 직접 새로운 것을 만들 것이다. +대부분의 클러스터 관리자는 호스트된 쿠버네티스 또는 쿠버네티스의 배포 인스턴스를 사용할 것이다. 그 결과, 대부분의 쿠버네티스 사용자는 [익스텐션](/ko/docs/concepts/extend-kubernetes/#익스텐션)의 설치가 필요할 것이며, 일부 사용자만 직접 새로운 것을 만들 것이다. diff --git a/content/ko/docs/reference/glossary/image.md b/content/ko/docs/reference/glossary/image.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/index.md b/content/ko/docs/reference/glossary/index.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/ingress.md b/content/ko/docs/reference/glossary/ingress.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/init-container.md b/content/ko/docs/reference/glossary/init-container.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/istio.md b/content/ko/docs/reference/glossary/istio.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/job.md b/content/ko/docs/reference/glossary/job.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/kube-controller-manager.md b/content/ko/docs/reference/glossary/kube-controller-manager.md index e327a6c285..f4cf8f1bd2 100644 --- a/content/ko/docs/reference/glossary/kube-controller-manager.md +++ b/content/ko/docs/reference/glossary/kube-controller-manager.md @@ -4,15 +4,15 @@ id: kube-controller-manager date: 2018-04-12 full_link: /docs/reference/command-line-tools-reference/kube-controller-manager/ short_description: > - {{< glossary_tooltip text="컨트롤러" term_id="controller" >}} 프로세스를 실행하는 컨트롤 플레인 컴포넌트. + 컨트롤러 프로세스를 실행하는 컨트롤 플레인 컴포넌트. -aka: +aka: tags: - architecture - fundamental --- - {{< glossary_tooltip text="컨트롤러" term_id="controller" >}}를 구동하는 마스터 상의 컴포넌트. + {{< glossary_tooltip text="컨트롤러" term_id="controller" >}} 프로세스를 실행하는 컨트롤 플레인 컴포넌트. - + -논리적으로, 각 {{< glossary_tooltip text="컨트롤러" term_id="controller" >}}는 개별 프로세스이지만, 복잡성을 낮추기 위해 모두 단일 바이너리로 컴파일되고 단일 프로세스 내에서 실행된다. +논리적으로, 각 {{< glossary_tooltip text="컨트롤러" term_id="controller" >}}는 분리된 프로세스이지만, 복잡성을 낮추기 위해 모두 단일 바이너리로 컴파일되고 단일 프로세스 내에서 실행된다. diff --git a/content/ko/docs/reference/glossary/kube-proxy.md b/content/ko/docs/reference/glossary/kube-proxy.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/kube-scheduler.md b/content/ko/docs/reference/glossary/kube-scheduler.md index 38562f6087..33f79adf67 100644 --- a/content/ko/docs/reference/glossary/kube-scheduler.md +++ b/content/ko/docs/reference/glossary/kube-scheduler.md @@ -2,7 +2,7 @@ title: kube-scheduler id: kube-scheduler date: 2018-04-12 -full_link: /docs/reference/generated/kube-scheduler/ +full_link: /docs/reference/command-line-tools-reference/kube-scheduler/ short_description: > 노드가 배정되지 않은 새로 생성된 파드를 감지하고, 실행할 노드를 선택하는 컨트롤 플레인 컴포넌트. diff --git a/content/ko/docs/reference/glossary/kubectl.md b/content/ko/docs/reference/glossary/kubectl.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/kubernetes-api.md b/content/ko/docs/reference/glossary/kubernetes-api.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/label.md b/content/ko/docs/reference/glossary/label.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/limitrange.md b/content/ko/docs/reference/glossary/limitrange.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/minikube.md b/content/ko/docs/reference/glossary/minikube.md old mode 100755 new mode 100644 index f43966260e..8efe83c0cd --- a/content/ko/docs/reference/glossary/minikube.md +++ b/content/ko/docs/reference/glossary/minikube.md @@ -2,7 +2,7 @@ title: Minikube id: minikube date: 2018-04-12 -full_link: /ko/docs/setup/learning-environment/minikube/ +full_link: /ko/docs/tasks/tools/#minikube short_description: > 로컬에서 쿠버네티스를 실행하기 위한 도구. diff --git a/content/ko/docs/reference/glossary/mirror-pod.md b/content/ko/docs/reference/glossary/mirror-pod.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/name.md b/content/ko/docs/reference/glossary/name.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/namespace.md b/content/ko/docs/reference/glossary/namespace.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/network-policy.md b/content/ko/docs/reference/glossary/network-policy.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/node-pressure-eviction.md b/content/ko/docs/reference/glossary/node-pressure-eviction.md new file mode 100644 index 0000000000..b0984ab807 --- /dev/null +++ b/content/ko/docs/reference/glossary/node-pressure-eviction.md @@ -0,0 +1,24 @@ +--- +title: 노드-압박 축출 +id: node-pressure-eviction +date: 2021-05-13 +full_link: /ko/docs/concepts/scheduling-eviction/node-pressure-eviction/ +short_description: > + 노드-압박 축출은 kubelet이 노드의 자원을 회수하기 위해 + 파드를 능동적으로 중단시키는 절차이다. +aka: +- kubelet eviction +tags: +- operation +--- +노드-압박 축출은 {{}}이 노드의 자원을 회수하기 위해 +파드를 능동적으로 중단시키는 절차이다. + + + +kubelet은 클러스터 노드의 CPU, 메모리, 디스크 공간, 파일시스템 +inode와 같은 자원을 모니터링한다. 이러한 자원 중 하나 이상이 +특정 소모 수준에 도달하면, kubelet은 하나 이상의 파드를 능동적으로 중단시켜 +자원을 회수하고 고갈 상황을 방지할 수 있다. + +노드-압박 축출은 [API를 이용한 축출](/ko/docs/concepts/scheduling-eviction/api-eviction/)과는 차이가 있다. diff --git a/content/ko/docs/reference/glossary/node.md b/content/ko/docs/reference/glossary/node.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/pod-security-policy.md b/content/ko/docs/reference/glossary/pod-security-policy.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/pod.md b/content/ko/docs/reference/glossary/pod.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/qos-class.md b/content/ko/docs/reference/glossary/qos-class.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/rbac.md b/content/ko/docs/reference/glossary/rbac.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/replica-set.md b/content/ko/docs/reference/glossary/replica-set.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/replication-controller.md b/content/ko/docs/reference/glossary/replication-controller.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/resource-quota.md b/content/ko/docs/reference/glossary/resource-quota.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/selector.md b/content/ko/docs/reference/glossary/selector.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/service-account.md b/content/ko/docs/reference/glossary/service-account.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/service.md b/content/ko/docs/reference/glossary/service.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/statefulset.md b/content/ko/docs/reference/glossary/statefulset.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/static-pod.md b/content/ko/docs/reference/glossary/static-pod.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/uid.md b/content/ko/docs/reference/glossary/uid.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/glossary/volume.md b/content/ko/docs/reference/glossary/volume.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/kubectl/_index.md b/content/ko/docs/reference/kubectl/_index.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/reference/kubectl/kubectl.md b/content/ko/docs/reference/kubectl/kubectl.md index ede8c85457..81e4d3fa74 100644 --- a/content/ko/docs/reference/kubectl/kubectl.md +++ b/content/ko/docs/reference/kubectl/kubectl.md @@ -9,7 +9,7 @@ weight: 30 kubectl은 쿠버네티스 클러스터 관리자를 제어한다. - 자세한 정보는 https://kubernetes.io/docs/reference/kubectl/overview/ 에서 확인한다. + 자세한 정보는 [kubectl 개요](/ko/docs/reference/kubectl/overview/)를 확인한다. ``` kubectl [flags] diff --git a/content/ko/docs/reference/labels-annotations-taints.md b/content/ko/docs/reference/labels-annotations-taints.md new file mode 100644 index 0000000000..0854c1b5cf --- /dev/null +++ b/content/ko/docs/reference/labels-annotations-taints.md @@ -0,0 +1,325 @@ +--- +title: 잘 알려진 레이블, 어노테이션, 테인트(Taint) +content_type: concept +weight: 20 +--- + + + +쿠버네티스는 모든 레이블과 어노테이션을 `kubernetes.io` 네임스페이스 아래에 정의해 놓았다. + +이 문서는 각 값에 대한 레퍼런스를 제공하며, 값을 할당하기 위한 협력 포인트도 제공한다. + + + + + +## kubernetes.io/arch + +예시: `kubernetes.io/arch=amd64` + +적용 대상: 노드 + +Go에 의해 정의된 `runtime.GOARCH` 값을 kubelet이 읽어서 이 레이블의 값으로 채운다. arm 노드와 x86 노드를 혼합하여 사용하는 경우 유용할 수 있다. + +## kubernetes.io/os + +예시: `kubernetes.io/os=linux` + +적용 대상: 노드 + +Go에 의해 정의된 `runtime.GOOS` 값을 kubelet이 읽어서 이 레이블의 값으로 채운다. 클러스터에서 여러 운영체제를 혼합하여 사용(예: 리눅스 및 윈도우 노드)하는 경우 유용할 수 있다. + +## kubernetes.io/metadata.name + +예시: `kubernetes.io/metadata.name=mynamespace` + +적용 대상: 네임스페이스 + +`NamespaceDefaultLabelName` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)가 +활성화되어 있으면, +쿠버네티스 API 서버가 모든 네임스페이스에 이 레이블을 적용한다. +레이블의 값은 네임스페이스의 이름으로 적용된다. + +레이블 {{< glossary_tooltip text="셀렉터" term_id="selector" >}}를 이용하여 특정 네임스페이스를 지정하고 싶다면 +이 레이블이 유용할 수 있다. + +## beta.kubernetes.io/arch (사용 중단됨) + +이 레이블은 사용 중단되었다. 대신 `kubernetes.io/arch` 을 사용한다. + +## beta.kubernetes.io/os (사용 중단됨) + +이 레이블은 사용 중단되었다. 대신 `kubernetes.io/os` 을 사용한다. + +## kubernetes.io/hostname {#kubernetesiohostname} + +예시: `kubernetes.io/hostname=ip-172-20-114-199.ec2.internal` + +적용 대상: 노드 + +kubelet이 호스트네임을 읽어서 이 레이블의 값으로 채운다. `kubelet` 에 `--hostname-override` 플래그를 전달하여 실제 호스트네임과 다른 값으로 설정할 수도 있다. + +이 레이블은 토폴로지 계층의 일부로도 사용된다. [`topology.kubernetes.io/zone`](#topologykubernetesiozone)에서 세부 사항을 확인한다. + + +## controller.kubernetes.io/pod-deletion-cost {#pod-deletion-cost} + +예시: `controller.kubernetes.io/pod-deletion-cost=10` + +적용 대상: Pod + +이 어노테이션은 레플리카셋(ReplicaSet) 다운스케일 순서를 조정할 수 있는 요소인 [파드 삭제 비용](/ko/docs/concepts/workloads/controllers/replicaset/#파드-삭제-비용)을 +설정하기 위해 사용한다. 명시된 값은 `int32` 타입으로 파싱된다. + +## beta.kubernetes.io/instance-type (사용 중단됨) + +{{< note >}} v1.17부터, [`node.kubernetes.io/instance-type`](#nodekubernetesioinstance-type)으로 대체되었다. {{< /note >}} + +## node.kubernetes.io/instance-type {#nodekubernetesioinstance-type} + +예시: `node.kubernetes.io/instance-type=m3.medium` + +적용 대상: 노드 + +`클라우드 제공자`에 의해 정의된 인스턴스 타입의 값을 kubelet이 읽어서 이 레이블의 값으로 채운다. +`클라우드 제공자`를 사용하는 경우에만 이 레이블이 설정된다. +특정 워크로드를 특정 인스턴스 타입에 할당하고 싶다면 이 레이블이 유용할 수 있다. +하지만 일반적으로는 자원 기반 스케줄링을 수행하는 쿠버네티스 스케줄러를 이용하게 된다. 인스턴스 타입 보다는 특성을 기준으로 스케줄링을 고려해야 한다(예: `g2.2xlarge` 를 요구하기보다는, GPU가 필요하다고 요구한다). + +## failure-domain.beta.kubernetes.io/region (사용 중단됨) {#failure-domainbetakubernetesioregion} + +[`topology.kubernetes.io/region`](#topologykubernetesioregion)을 확인한다. + +{{< note >}} v1.17부터, [`topology.kubernetes.io/region`](#topologykubernetesioregion)으로 대체되었다. {{< /note >}} + +## failure-domain.beta.kubernetes.io/zone (사용 중단됨) {#failure-domainbetakubernetesiozone} + +[`topology.kubernetes.io/zone`](#topologykubernetesiozone)을 확인한다. + +{{< note >}} v1.17부터, [`topology.kubernetes.io/zone`](#topologykubernetesiozone)으로 대체되었다. {{< /note >}} + +## statefulset.kubernetes.io/pod-name {#statefulsetkubernetesiopod-name} + +예시: + +`statefulset.kubernetes.io/pod-name=mystatefulset-7` + +스테이트풀셋(StatefulSet) 컨트롤러가 파드를 위한 스테이트풀셋을 생성하면, 컨트롤 플레인이 파드에 이 레이블을 설정한다. +생성되는 파드의 이름을 이 레이블의 값으로 설정한다. + +스테이트풀셋 문서의 [파드 이름 레이블](/ko/docs/concepts/workloads/controllers/statefulset/#파드-이름-레이블)에서 +상세 사항을 확인한다. + +## topology.kubernetes.io/region {#topologykubernetesioregion} + +예시: + +`topology.kubernetes.io/region=us-east-1` + +[`topology.kubernetes.io/zone`](#topologykubernetesiozone)을 확인한다. + +## topology.kubernetes.io/zone {#topologykubernetesiozone} + +예시: + +`topology.kubernetes.io/zone=us-east-1c` + +적용 대상: 노드, 퍼시스턴트볼륨(PersistentVolume) + +노드의 경우: `클라우드 제공자`가 제공하는 값을 이용하여 `kubelet` 또는 외부 `cloud-controller-manager`가 이 어노테이션의 값을 설정한다. `클라우드 제공자`를 사용하는 경우에만 이 레이블이 설정된다. 하지만, 토폴로지 내에서 의미가 있는 경우에만 이 레이블을 노드에 설정해야 한다. + +퍼시스턴트볼륨의 경우: 토폴로지 어웨어 볼륨 프로비저너가 자동으로 퍼시스턴트볼륨에 노드 어피니티 제약을 설정한다. + +영역(zone)은 논리적 고장 도메인을 나타낸다. 가용성 향상을 위해 일반적으로 쿠버네티스 클러스터는 여러 영역에 걸쳐 구성된다. 영역에 대한 정확한 정의는 사업자 별 인프라 구현에 따라 다르지만, 일반적으로 영역은 '영역 내 매우 낮은 네트워크 지연시간, 영역 내 네트워크 트래픽 비용 없음, 다른 영역의 고장에 독립적임' 등의 공통적인 특성을 갖는다. 예를 들어, 같은 영역 내의 노드는 하나의 네트워크 스위치를 공유하여 활용할 수 있으며, 반대로 다른 영역에 있는 노드는 하나의 네트워크 스위치를 공유해서는 안 된다. + +지역(region)은 하나 이상의 영역으로 구성된 더 큰 도메인을 나타낸다. 쿠버네티스 클러스터가 여러 지역에 걸쳐 있는 경우는 드물다. 영역이나 지역에 대한 정확한 정의는 사업자 별 인프라 구현에 따라 다르지만, 일반적으로 지역은 '지역 내 네트워크 지연시간보다 지역 간 네트워크 지연시간이 큼, 지역 간 네트워크 트래픽은 비용이 발생함, 다른 영역/지역의 고장에 독립적임' 등의 공통적인 특성을 갖는다. 예를 들어, 같은 지역 내의 노드는 전력 인프라(예: UPS 또는 발전기)를 공유하여 활용할 수 있으며, 반대로 다른 지역에 있는 노드는 일반적으로 전력 인프라를 공유하지 않는다. + +쿠버네티스는 영역과 지역의 구조에 대해 다음과 같이 가정한다. +1) 지역과 영역은 계층적이다. 영역은 지역의 엄격한 부분집합(strict subset)이며, 하나의 영역이 두 개의 지역에 속할 수는 없다. +2) 영역 이름은 모든 지역에 걸쳐서 유일하다. 예를 들어, "africa-east-1" 라는 지역은 "africa-east-1a" 와 "africa-east-1b" 라는 영역으로 구성될 수 있다. + +토폴로지 레이블이 변경되는 일은 없다고 가정할 수 있다. 일반적으로 레이블의 값은 변경될 수 있지만, 특정 노드가 삭제 후 재생성되지 않고서는 다른 영역으로 이동할 수 없기 때문이다. + +쿠버네티스는 이 정보를 다양한 방식으로 활용할 수 있다. 예를 들어, 단일 영역 클러스터에서는 스케줄러가 자동으로 레플리카셋의 파드를 여러 노드에 퍼뜨린다(노드 고장의 영향을 줄이기 위해 - [`kubernetes.io/hostname`](#kubernetesiohostname) 참고). 복수 영역 클러스터에서는, 여러 영역에 퍼뜨린다(영역 고장의 영향을 줄이기 위해). 이는 _SelectorSpreadPriority_ 를 통해 실현된다. + +_SelectorSpreadPriority_ 는 최선 노력(best effort) 배치 방법이다. 클러스터가 위치한 영역들의 특성이 서로 다르다면(예: 노드 숫자가 다름, 노드 타입이 다름, 파드 자원 요구사항이 다름), 파드 숫자를 영역별로 다르게 하여 배치할 수 있다. 필요하다면, 영역들의 특성(노드 숫자/타입)을 일치시켜 불균형 배치의 가능성을 줄일 수 있다. + +스케줄러도 (_VolumeZonePredicate_ 표시자를 이용하여) '파드가 요청하는 볼륨'이 위치하는 영역과 같은 영역에 파드를 배치한다. 여러 영역에서 볼륨에 접근할 수는 없다. + +`PersistentVolumeLabel`이 퍼시스턴트볼륨의 자동 레이블링을 지원하지 않는다면, 레이블을 수동으로 추가하거나 `PersistentVolumeLabel`이 동작하도록 변경할 수 있다. +`PersistentVolumeLabel`이 설정되어 있으면, 스케줄러는 파드가 다른 영역에 있는 볼륨에 마운트하는 것을 막는다. 만약 사용 중인 인프라에 이러한 제약이 없다면, 볼륨에 영역 레이블을 추가할 필요가 전혀 없다. + +## node.kubernetes.io/windows-build {#nodekubernetesiowindows-build} + +예시: `node.kubernetes.io/windows-build=10.0.17763` + +적용 대상: 노드 + +kubelet이 Microsoft 윈도우에서 실행되고 있다면, 사용 중인 Windows Server 버전을 기록하기 위해 kubelet이 노드에 이 레이블을 추가한다. + +이 레이블의 값은 "MajorVersion.MinorVersion.BuildNumber"의 형태를 갖는다. + +## service.kubernetes.io/headless {#servicekubernetesioheadless} + +예시: `service.kubernetes.io/headless=""` + +적용 대상: 서비스 + +서비스가 헤드리스(headless)이면, 컨트롤 플레인이 엔드포인트(Endpoints) 오브젝트에 이 레이블을 추가한다. + +## kubernetes.io/service-name {#kubernetesioservice-name} + +예시: `kubernetes.io/service-name="nginx"` + +적용 대상: 서비스 + +쿠버네티스가 여러 서비스를 구분하기 위해 이 레이블을 사용한다. 현재는 `ELB`(Elastic Load Balancer) 를 위해서만 사용되고 있다. + +## endpointslice.kubernetes.io/managed-by {#endpointslicekubernetesiomanaged-by} + +예시: `endpointslice.kubernetes.io/managed-by="controller"` + +적용 대상: 엔드포인트슬라이스(EndpointSlices) + +이 레이블은 엔드포인트슬라이스(EndpointSlice)를 어떤 컨트롤러나 엔티티가 관리하는지를 나타내기 위해 사용된다. 이 레이블을 사용함으로써 한 클러스터 내에서 여러 엔드포인트슬라이스 오브젝트가 각각 다른 컨트롤러나 엔티티에 의해 관리될 수 있다. + +## endpointslice.kubernetes.io/skip-mirror {#endpointslicekubernetesioskip-mirror} + +예시: `endpointslice.kubernetes.io/skip-mirror="true"` + +적용 대상: 엔드포인트(Endpoints) + +특정 자원에 이 레이블을 `"true"` 로 설정하여, EndpointSliceMirroring 컨트롤러가 엔드포인트슬라이스를 이용하여 해당 자원을 미러링하지 않도록 지시할 수 있다. + +## service.kubernetes.io/service-proxy-name {#servicekubernetesioservice-proxy-name} + +예시: `service.kubernetes.io/service-proxy-name="foo-bar"` + +적용 대상: 서비스 + +kube-proxy 에는 커스텀 프록시를 위한 이와 같은 레이블이 있으며, 이 레이블은 서비스 컨트롤을 커스텀 프록시에 위임한다. + +## experimental.windows.kubernetes.io/isolation-type + +예시: `experimental.windows.kubernetes.io/isolation-type: "hyperv"` + +적용 대상: 파드 + +Hyper-V 격리(isolation)를 사용하여 윈도우 컨테이너를 실행하려면 이 어노테이션을 사용한다. Hyper-V 격리 기능을 활성화하고 Hyper-V 격리가 적용된 컨테이너를 생성하기 위해, kubelet은 기능 게이트 `HyperVContainer=true` 로 설정하여 실행되어야 하며, 파드에는 `experimental.windows.kubernetes.io/isolation-type=hyperv` 어노테이션이 설정되어 있어야 한다. + +{{< note >}} +이 어노테이션은 하나의 컨테이너로 구성된 파드에만 설정할 수 있다. +{{< /note >}} + +## ingressclass.kubernetes.io/is-default-class + +예시: `ingressclass.kubernetes.io/is-default-class: "true"` + +적용 대상: 인그레스클래스(IngressClass) + +하나의 인그레스클래스 리소스에 이 어노테이션이 `"true"`로 설정된 경우, 클래스가 명시되지 않은 새로운 인그레스(Ingress) 리소스는 해당 기본 클래스로 할당될 것이다. + +## kubernetes.io/ingress.class (사용 중단됨) + +{{< note >}} +v1.18부터, `spec.ingressClassName`으로 대체되었다. +{{< /note >}} + +## storageclass.kubernetes.io/is-default-class + +예시: `storageclass.kubernetes.io/is-default-class=true` + +적용 대상: 스토리지클래스(StorageClass) + +하나의 스토리지클래스(StorageClass) 리소스에 이 어노테이션이 `"true"`로 설정된 경우, +클래스가 명시되지 않은 새로운 퍼시스턴트볼륨클레임(PersistentVolumeClaim) 리소스는 해당 기본 클래스로 할당될 것이다. + +## alpha.kubernetes.io/provided-node-ip + +예시: `alpha.kubernetes.io/provided-node-ip: "10.0.0.1"` + +적용 대상: 노드 + +kubelet이 노드에 할당된 IPv4 주소를 명시하기 위해 이 어노테이션을 사용할 수 있다. + +kubelet이 "외부" 클라우드 제공자에 의해 실행되었다면, 명령줄 플래그(`--node-ip`)를 통해 설정된 IP 주소를 명시하기 위해 kubelet이 이 어노테이션을 노드에 설정한다. cloud-controller-manager는 클라우드 제공자에게 이 IP 주소가 유효한지를 검증한다. + +## batch.kubernetes.io/job-completion-index + +예시: `batch.kubernetes.io/job-completion-index: "3"` + +적용 대상: 파드 + +kube-controller-manager의 잡(Job) 컨트롤러는 +`Indexed` [완료 모드](/ko/docs/concepts/workloads/controllers/job/#완료-모드)로 생성된 파드에 이 어노테이션을 추가한다. + +## kubectl.kubernetes.io/default-container + +예시: `kubectl.kubernetes.io/default-container: "front-end-app"` + +파드의 기본 컨테이너로 사용할 컨테이너 이름을 지정하는 어노테이션이다. 예를 들어, `kubectl logs` 또는 `kubectl exec` 명령을 사용할 때 `-c` 또는 `--container` 플래그를 지정하지 않으면, 이 어노테이션으로 명시된 기본 컨테이너를 대상으로 실행될 것이다. + +## endpoints.kubernetes.io/over-capacity + +예시: `endpoints.kubernetes.io/over-capacity:warning` + +적용 대상: 엔드포인트(Endpoints) + +v1.21 이상의 쿠버네티스 클러스터에서, 엔드포인트(Endpoints) 컨트롤러가 1000개 이상의 엔드포인트를 관리하고 있다면 각 엔드포인트 리소스에 이 어노테이션을 추가한다. 이 어노테이션은 엔드포인트 리소스가 용량 초과 되었음을 나타낸다. + +**이 이후로 나오는 테인트는 모두 '적용 대상: 노드' 이다.** + +## node.kubernetes.io/not-ready + +예시: `node.kubernetes.io/not-ready:NoExecute` + +노드 컨트롤러는 노드의 헬스를 모니터링하여 노드가 사용 가능한 상태인지를 감지하고 그에 따라 이 테인트를 추가하거나 제거한다. + +## node.kubernetes.io/unreachable + +예시: `node.kubernetes.io/unreachable:NoExecute` + +노드 컨트롤러는 [노드 컨디션](/ko/docs/concepts/architecture/nodes/#condition)이 `Ready`에서 `Unknown`으로 변경된 노드에 이 테인트를 추가한다. + +## node.kubernetes.io/unschedulable + +예시: `node.kubernetes.io/unschedulable:NoSchedule` + +경쟁 상태(race condition) 발생을 막기 위해, 생성 중인 노드에 이 테인트가 추가된다. + +## node.kubernetes.io/memory-pressure + +예시: `node.kubernetes.io/memory-pressure:NoSchedule` + +kubelet은 노드의 `memory.available`와 `allocatableMemory.available`을 관측하여 메모리 압박을 감지한다. 그 뒤, 관측한 값을 kubelet에 설정된 문턱값(threshold)과 비교하여 노드 컨디션과 테인트의 추가/삭제 여부를 결정한다. + +## node.kubernetes.io/disk-pressure + +예시: `node.kubernetes.io/disk-pressure:NoSchedule` + +kubelet은 노드의 `imagefs.available`, `imagefs.inodesFree`, `nodefs.available`, `nodefs.inodesFree`(리눅스에 대해서만)를 관측하여 디스크 압박을 감지한다. 그 뒤, 관측한 값을 kubelet에 설정된 문턱값(threshold)과 비교하여 노드 컨디션과 테인트의 추가/삭제 여부를 결정한다. + +## node.kubernetes.io/network-unavailable + +예시: `node.kubernetes.io/network-unavailable:NoSchedule` + +사용 중인 클라우드 공급자가 추가 네트워크 환경설정을 필요로 한다고 명시하면, kubelet이 이 테인트를 설정한다. 클라우드 상의 네트워크 경로가 올바르게 구성되어야, 클라우드 공급자가 이 테인트를 제거할 것이다. + +## node.kubernetes.io/pid-pressure + +예시: `node.kubernetes.io/pid-pressure:NoSchedule` + +kubelet은 '`/proc/sys/kernel/pid_max`의 크기의 D-값'과 노드에서 쿠버네티스가 사용 중인 PID를 확인하여, `pid.available` 지표라고 불리는 '사용 가능한 PID 수'를 가져온다. 그 뒤, 관측한 지표를 kubelet에 설정된 문턱값(threshold)과 비교하여 노드 컨디션과 테인트의 추가/삭제 여부를 결정한다. + +## node.cloudprovider.kubernetes.io/uninitialized + +예시: `node.cloudprovider.kubernetes.io/uninitialized:NoSchedule` + +kubelet이 "외부" 클라우드 공급자에 의해 실행되었다면 노드가 '사용 불가능'한 상태라고 표시하기 위해 이 테인트가 추가되며, 추후 cloud-controller-manager가 이 노드를 초기화하고 이 테인트를 제거한다. + +## node.cloudprovider.kubernetes.io/shutdown + +예시: `node.cloudprovider.kubernetes.io/shutdown:NoSchedule` + +노드의 상태가 클라우드 공급자가 정의한 'shutdown' 상태이면, 이에 따라 노드에 `node.cloudprovider.kubernetes.io/shutdown` 테인트가 `NoSchedule` 값으로 설정된다. diff --git a/content/ko/docs/reference/scheduling/config.md b/content/ko/docs/reference/scheduling/config.md index 5da54ed813..2f46c78d8b 100644 --- a/content/ko/docs/reference/scheduling/config.md +++ b/content/ko/docs/reference/scheduling/config.md @@ -18,9 +18,9 @@ weight: 20 각 단계는 익스텐션 포인트(extension point)를 통해 노출된다. 플러그인은 이러한 익스텐션 포인트 중 하나 이상을 구현하여 스케줄링 동작을 제공한다. -[KubeSchedulerConfiguration (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) -구조에 맞게 파일을 작성하고, -`kube-scheduler --config `을 실행하여 +[KubeSchedulerConfiguration (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) +구조에 맞게 파일을 작성하고, +`kube-scheduler --config `을 실행하여 스케줄링 프로파일을 지정할 수 있다. 최소 구성은 다음과 같다. @@ -149,8 +149,8 @@ profiles: 또는 바인딩할 수 있는지 확인한다. 익스텐션 포인트: `PreFilter`, `Filter`, `Reserve`, `PreBind`, `Score`. {{< note >}} - `Score` 익스텐션 포인트는 `VolumeCapacityPriority` 기능이 - 활성화되어 있어야 활성화되며, + `Score` 익스텐션 포인트는 `VolumeCapacityPriority` 기능이 + 활성화되어 있어야 활성화되며, 요청된 볼륨 사이즈를 만족하는 가장 작은 PV들을 우선순위 매긴다. {{< /note >}} - `VolumeRestrictions`: 노드에 마운트된 볼륨이 볼륨 제공자에 특정한 @@ -250,6 +250,6 @@ profiles: ## {{% heading "whatsnext" %}} -* [kube-scheduler 레퍼런스](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/) 읽어보기 +* [kube-scheduler 레퍼런스](/docs/reference/command-line-tools-reference/kube-scheduler/) 읽어보기 * [스케줄링](/ko/docs/concepts/scheduling-eviction/kube-scheduler/)에 대해 알아보기 * [kube-scheduler configuration (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) 레퍼런스 읽어보기 diff --git a/content/ko/docs/reference/tools/_index.md b/content/ko/docs/reference/tools/_index.md index a38158bf14..6ac3b1dc82 100644 --- a/content/ko/docs/reference/tools/_index.md +++ b/content/ko/docs/reference/tools/_index.md @@ -1,8 +1,10 @@ --- - - title: 도구 + + content_type: concept +weight: 80 +no_list: true --- @@ -10,13 +12,6 @@ content_type: concept -## Kubectl - -[`kubectl`](/ko/docs/tasks/tools/install-kubectl/)은 쿠버네티스를 위한 커맨드라인 툴이며, 쿠버네티스 클러스터 매니저을 제어한다. - -## Kubeadm - -[`kubeadm`](/ko/docs/setup/production-environment/tools/kubeadm/install-kubeadm/)은 물리적 환경, 클라우드 서버, 또는 가상머신 상에서 안전한 쿠버네티스를 쉽게 프로비저닝하기 위한 커맨드라인 툴이다(현재는 알파 상태). ## Minikube @@ -31,8 +26,8 @@ content_type: concept ## Helm -[`쿠버네티스 Helm`](https://github.com/kubernetes/helm)은 사전 구성된 쿠버네티스 리소스를 관리하기위한 도구이며 -또한 Helm의 쿠버네티스 차트라고도 한다. +[Helm](https://helm.sh/)은 사전 구성된 쿠버네티스 리소스 패키지를 관리하기 위한 도구이다. +이 패키지는 _Helm charts_ 라고 알려져 있다. Helm의 용도 diff --git a/content/ko/docs/reference/using-api/client-libraries.md b/content/ko/docs/reference/using-api/client-libraries.md index 639c10ac34..11d2e793bc 100644 --- a/content/ko/docs/reference/using-api/client-libraries.md +++ b/content/ko/docs/reference/using-api/client-libraries.md @@ -65,7 +65,6 @@ API 호출 또는 요청/응답 타입을 직접 구현할 필요는 없다. | PHP | [github.com/maclof/kubernetes-client](https://github.com/maclof/kubernetes-client) | | PHP | [github.com/travisghansen/kubernetes-client-php](https://github.com/travisghansen/kubernetes-client-php) | | PHP | [github.com/renoki-co/php-k8s](https://github.com/renoki-co/php-k8s) | -| Python | [github.com/eldarion-gondor/pykube](https://github.com/eldarion-gondor/pykube) | | Python | [github.com/fiaas/k8s](https://github.com/fiaas/k8s) | | Python | [github.com/mnubo/kubernetes-py](https://github.com/mnubo/kubernetes-py) | | Python | [github.com/tomplus/kubernetes_asyncio](https://github.com/tomplus/kubernetes_asyncio) | diff --git a/content/ko/docs/setup/_index.md b/content/ko/docs/setup/_index.md index b09963d0e2..3c6013c590 100644 --- a/content/ko/docs/setup/_index.md +++ b/content/ko/docs/setup/_index.md @@ -1,17 +1,21 @@ --- -no_issue: true + + + + title: 시작하기 main_menu: true weight: 20 content_type: concept +no_list: true card: name: setup weight: 20 anchors: - anchor: "#학습-환경" title: 학습 환경 - - anchor: "#운영-환경" - title: 운영 환경 + - anchor: "#프로덕션-환경" + title: 프로덕션 환경 --- @@ -20,16 +24,40 @@ card: 쿠버네티스를 설치할 때는 유지보수의 용이성, 보안, 제어, 사용 가능한 리소스, 그리고 클러스터를 운영하고 관리하기 위해 필요한 전문성을 기반으로 설치 유형을 선택한다. -쿠버네티스 클러스터를 로컬 머신에, 클라우드에, 온-프레미스 데이터센터에 배포할 수 있고, 아니면 매니지드 쿠버네티스 클러스터를 선택할 수도 있다. 광범위한 클라우드 제공 업체 또는 베어 메탈 환경에 걸쳐 사용할 수 있는 맞춤형 솔루션도 있다. +[쿠버네티스를 다운로드](/releases/download/)하여 +로컬 머신에, 클라우드에, 데이터센터에 쿠버네티스 클러스터를 구축할 수 있다. + +쿠버네티스 클러스터를 직접 관리하고 싶지 않다면, [인증된 플랫폼](/ko/docs/setup/production-environment/turnkey-solutions/)과 +같은 매니지드 서비스를 선택할 수도 있다. +광범위한 클라우드 또는 베어 메탈 환경에 걸쳐 사용할 수 있는 +표준화된/맞춤형 솔루션도 있다. ## 학습 환경 -쿠버네티스를 배우고 있다면, 쿠버네티스 커뮤니티에서 지원하는 도구나, 로컬 머신에서 쿠버네티스를 설치하기 위한 생태계 내의 도구를 사용하자. +쿠버네티스를 배우고 있다면, 쿠버네티스 커뮤니티에서 지원하는 도구나, +로컬 머신에서 쿠버네티스를 설치하기 위한 생태계 내의 도구를 사용한다. +[도구 설치](/ko/docs/tasks/tools/)를 살펴본다. -## 운영 환경 +## 프로덕션 환경 -운영 환경을 위한 솔루션을 평가할 때에는, 쿠버네티스 클러스터 운영에 대한 어떤 측면(또는 _추상적인 개념_)을 스스로 관리하기를 원하는지, 제공자에게 넘기기를 원하는지 고려하자. +[프로덕션 환경](/ko/docs/setup/production-environment/)을 위한 +솔루션을 평가할 때에는, 쿠버네티스 클러스터(또는 _추상화된 객체_) +운영에 대한 어떤 측면을 스스로 관리하기를 원하는지, +또는 제공자에게 넘기기를 원하는지 고려한다. -[쿠버네티스 파트너](https://kubernetes.io/partners/#conformance)에는 [공인 쿠버네티스](https://github.com/cncf/k8s-conformance/#certified-kubernetes) 공급자 목록이 포함되어 있다. +클러스터를 직접 관리하는 경우, 공식적으로 지원되는 쿠버네티스 구축 도구는 +[kubeadm](/ko/docs/setup/production-environment/tools/kubeadm/)이다. + +## {{% heading "whatsnext" %}} + +- [쿠버네티스를 다운로드](/releases/download/)한다. +- `kubectl`을 포함한 [도구를 설치](/ko/docs/tasks/tools/)한다. +- 새로운 클러스터에 사용할 [컨테이너 런타임](/ko/docs/setup/production-environment/container-runtimes/)을 선택한다. +- 클러스터 구성의 [모범 사례](/ko/docs/setup/best-practices/)를 확인한다. + +쿠버네티스의 {{< glossary_tooltip term_id="control-plane" text="컨트롤 플레인" >}}은 +리눅스에서 실행되도록 설계되었다. 클러스터 내에서는 리눅스 또는 +다른 운영 체제(예: 윈도우)에서 애플리케이션을 실행할 수 있다. +- [윈도우 노드를 포함하는 클러스터 구성하기](/ko/docs/setup/production-environment/windows/)를 살펴본다. diff --git a/content/ko/docs/setup/best-practices/certificates.md b/content/ko/docs/setup/best-practices/certificates.md index 77665edbe2..e6640be52d 100644 --- a/content/ko/docs/setup/best-practices/certificates.md +++ b/content/ko/docs/setup/best-practices/certificates.md @@ -1,23 +1,23 @@ --- -title: PKI 인증서 및 요구 조건 +title: PKI 인증서 및 요구 사항 content_type: concept weight: 40 --- -쿠버네티스는 TLS 위에 인증을 위해 PKI 인증서가 필요하다. -만약 [kubeadm](/ko/docs/reference/setup-tools/kubeadm/)으로 쿠버네티스를 설치했다면, 클러스터에 필요한 인증서는 자동으로 생성된다. +쿠버네티스는 TLS를 통한 인증을 위해서 PKI 인증서가 필요하다. +만약 [kubeadm](/ko/docs/reference/setup-tools/kubeadm/)으로 쿠버네티스를 설치한다면, 클러스터에 필요한 인증서는 자동으로 생성된다. 또한 더 안전하게 자신이 소유한 인증서를 생성할 수 있다. 이를 테면, 개인키를 API 서버에 저장하지 않으므로 더 안전하게 보관할 수 있다. -이 페이지는 클러스터에 필요한 인증서를 설명한다. +이 페이지는 클러스터가 필요로 하는 인증서에 대해서 설명한다. -## 클러스터에서 인증서는 어떻게 이용되나? +## 클러스터에서 인증서가 이용되는 방식 -쿠버네티스는 다음 작업에서 PKI가 필요하다. +쿠버네티스는 다음 작업에서 PKI를 필요로 한다. * kubelet에서 API 서버 인증서를 인증시 사용하는 클라이언트 인증서 * API 서버 엔드포인트를 위한 서버 인증서 @@ -36,7 +36,7 @@ etcd 역시 클라이언트와 피어 간에 상호 TLS 인증을 구현한다. ## 인증서를 저장하는 위치 -만약 쿠버네티스를 kubeadm으로 설치했다면 인증서는 `/etc/kubernets/pki`에 저장된다. 이 문서에 언급된 모든 파일 경로는 그 디렉터리에 상대적이다. +만약 쿠버네티스를 kubeadm으로 설치했다면 인증서는 `/etc/kubernetes/pki`에 저장된다. 이 문서에 언급된 모든 파일 경로는 그 디렉터리에 상대적이다. ## 인증서 수동 설정 diff --git a/content/ko/docs/setup/best-practices/cluster-large.md b/content/ko/docs/setup/best-practices/cluster-large.md index d0293e72f6..899c63f6b7 100644 --- a/content/ko/docs/setup/best-practices/cluster-large.md +++ b/content/ko/docs/setup/best-practices/cluster-large.md @@ -6,13 +6,13 @@ weight: 20 클러스터는 {{< glossary_tooltip text="컨트롤 플레인" term_id="control-plane" >}}에서 관리하는 쿠버네티스 에이전트를 실행하는 {{< glossary_tooltip text="노드" term_id="node" >}}(물리 또는 가상 머신)의 집합이다. -쿠버네티스 {{}}는 노드 5000개까지의 클러스터를 지원한다. 보다 정확하게는, +쿠버네티스 {{}}는 노드 5,000개까지의 클러스터를 지원한다. 보다 정확하게는, 쿠버네티스는 다음 기준을 *모두* 만족하는 설정을 수용하도록 설계되었다. -* 노드 당 파드 100 개 이하 -* 노드 5000개 이하 -* 전체 파드 150000개 이하 -* 전체 컨테이너 300000개 이하 +* 노드 당 파드 110 개 이하 +* 노드 5,000개 이하 +* 전체 파드 150,000개 이하 +* 전체 컨테이너 300,000개 이하 노드를 추가하거나 제거하여 클러스터를 확장할 수 있다. 이를 수행하는 방법은 클러스터 배포 방법에 따라 다르다. diff --git a/content/ko/docs/setup/best-practices/multiple-zones.md b/content/ko/docs/setup/best-practices/multiple-zones.md index 3d825ebd08..93ab353d37 100644 --- a/content/ko/docs/setup/best-practices/multiple-zones.md +++ b/content/ko/docs/setup/best-practices/multiple-zones.md @@ -55,7 +55,7 @@ content_type: concept 특정 kubelet을 나타내는 노드 오브젝트에 {{< glossary_tooltip text="레이블" term_id="label" >}}을 자동으로 추가한다. 이러한 레이블에는 -[영역 정보](/docs/reference/labels-annotations-taints/#topologykubernetesiozone)가 포함될 수 있다. +[영역 정보](/ko/docs/reference/labels-annotations-taints/#topologykubernetesiozone)가 포함될 수 있다. 클러스터가 여러 영역 또는 지역에 걸쳐있는 경우, [파드 토폴로지 분배 제약 조건](/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints/)과 diff --git a/content/ko/docs/setup/production-environment/_index.md b/content/ko/docs/setup/production-environment/_index.md index 3471214564..1394c2f325 100644 --- a/content/ko/docs/setup/production-environment/_index.md +++ b/content/ko/docs/setup/production-environment/_index.md @@ -53,7 +53,7 @@ no_list: true 관리하여, 사용자 및 워크로드가 접근할 수 있는 자원에 대한 제한을 설정할 수 있다. 쿠버네티스 프로덕션 환경을 직접 구축하기 전에, 이 작업의 일부 또는 전체를 -[턴키 클라우드 솔루션](/docs/setup/production-environment/turnkey-solutions/) +[턴키 클라우드 솔루션](/ko/docs/setup/production-environment/turnkey-solutions/) 제공 업체 또는 기타 [쿠버네티스 파트너](/ko/partners/)에게 넘기는 것을 고려할 수 있다. 다음과 같은 옵션이 있다. @@ -151,7 +151,7 @@ etcd는 클러스터 구성 데이터를 저장하므로 [kube-controller-manager](/docs/reference/command-line-tools-reference/kube-controller-manager/), [kube-scheduler](/docs/reference/command-line-tools-reference/kube-scheduler/)를 참조한다. 고가용성 컨트롤 플레인 예제는 -[고가용성 토폴로지를 위한 옵션](/docs/setup/production-environment/tools/kubeadm/ha-topology/), +[고가용성 토폴로지를 위한 옵션](/ko/docs/setup/production-environment/tools/kubeadm/ha-topology/), [kubeadm을 이용하여 고가용성 클러스터 생성하기](/docs/setup/production-environment/tools/kubeadm/high-availability/), [쿠버네티스를 위한 etcd 클러스터 운영하기](/docs/tasks/administer-cluster/configure-upgrade-etcd/)를 참조한다. etcd 백업 계획을 세우려면 @@ -274,8 +274,8 @@ DNS 서비스도 확장할 준비가 되어 있어야 한다. ## {{% heading "whatsnext" %}} - 프로덕션 쿠버네티스를 직접 구축할지, -아니면 [턴키 클라우드 솔루션](/docs/setup/production-environment/turnkey-solutions/) 또는 -[쿠버네티스 파트너](/partners/)가 제공하는 서비스를 이용할지 결정한다. +아니면 [턴키 클라우드 솔루션](/ko/docs/setup/production-environment/turnkey-solutions/) 또는 +[쿠버네티스 파트너](/ko/partners/)가 제공하는 서비스를 이용할지 결정한다. - 클러스터를 직접 구축한다면, [인증서](/ko/docs/setup/best-practices/certificates/)를 어떻게 관리할지, [etcd](/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm/)와 diff --git a/content/ko/docs/setup/production-environment/container-runtimes.md b/content/ko/docs/setup/production-environment/container-runtimes.md index d2638f4433..fb749a2346 100644 --- a/content/ko/docs/setup/production-environment/container-runtimes.md +++ b/content/ko/docs/setup/production-environment/container-runtimes.md @@ -97,7 +97,10 @@ containerd를 설치한다. {{< tabs name="tab-cri-containerd-installation" >}} {{% tab name="Linux" %}} -1. 공식 도커 리포지터리에서 `containerd.io` 패키지를 설치한다. 각 리눅스 배포한에 대한 도커 리포지터리를 설정하고 `containerd.io` 패키지를 설치하는 방법은 [도커 엔진 설치](https://docs.docker.com/engine/install/#server)에서 찾을 수 있다. +1. 공식 도커 리포지터리에서 `containerd.io` 패키지를 설치한다. +각 리눅스 배포판에 대한 도커 리포지터리를 설정하고 +`containerd.io` 패키지를 설치하는 방법은 +[도커 엔진 설치](https://docs.docker.com/engine/install/#server)에서 찾을 수 있다. 2. containerd 설정 @@ -115,7 +118,8 @@ containerd를 설치한다. {{% /tab %}} {{% tab name="Windows (PowerShell)" %}} -PowerShell 세션을 시작하고 `$Version`을 원하는 버전(예: `$Version:1.4.3`)으로 설정한 후 다음 명령을 실행한다. +PowerShell 세션을 시작하고 `$Version`을 원하는 버전으로 +설정(예: `$Version:1.4.3`)한 후 다음 명령을 실행한다. 1. containerd 다운로드 @@ -242,7 +246,8 @@ sudo apt-get install cri-o cri-o-runc {{% tab name="Ubuntu" %}} -다음의 운영 체제에서 CRI-O를 설치하려면, 환경 변수 `OS` 를 아래의 표에서 적절한 필드로 설정한다. +다음의 운영 체제에서 CRI-O를 설치하려면, 환경 변수 `OS` 를 +아래의 표에서 적절한 필드로 설정한다. | 운영 체제 | `$OS` | | ---------------- | ----------------- | @@ -277,7 +282,8 @@ apt-get install cri-o cri-o-runc {{% tab name="CentOS" %}} -다음의 운영 체제에서 CRI-O를 설치하려면, 환경 변수 `OS` 를 아래의 표에서 적절한 필드로 설정한다. +다음의 운영 체제에서 CRI-O를 설치하려면, 환경 변수 `OS` 를 +아래의 표에서 적절한 필드로 설정한다. | 운영 체제 | `$OS` | | ---------------- | ----------------- | @@ -357,7 +363,10 @@ CRI-O의 cgroup 드라이버 구성을 동기화 상태로 ### 도커 -1. 각 노드에서 [도커 엔진 설치](https://docs.docker.com/engine/install/#server)에 따라 리눅스 배포판용 도커를 설치한다. 이 [의존성 파일](https://git.k8s.io/kubernetes/build/dependencies.yaml)에서 검증된 최신 버전의 도커를 찾을 수 있다. +1. 각 노드에서 [도커 엔진 설치](https://docs.docker.com/engine/install/#server)에 따라 +리눅스 배포판용 도커를 설치한다. +이 [의존성 파일](https://git.k8s.io/kubernetes/build/dependencies.yaml)에서 +검증된 최신 버전의 도커를 찾을 수 있다. 2. 특히 컨테이너의 cgroup 관리에 systemd를 사용하도록 도커 데몬을 구성한다. @@ -376,7 +385,8 @@ CRI-O의 cgroup 드라이버 구성을 동기화 상태로 ``` {{< note >}} - `overlay2`는 리눅스 커널 4.0 이상 또는 3.10.0-514 버전 이상을 사용하는 RHEL 또는 CentOS를 구동하는 시스템에서 선호하는 스토리지 드라이버이다. + `overlay2`는 리눅스 커널 4.0 이상 또는 3.10.0-514 버전 이상을 사용하는 RHEL + 또는 CentOS를 구동하는 시스템에서 선호하는 스토리지 드라이버이다. {{< /note >}} 3. 도커 재시작과 부팅시 실행되게 설정 diff --git a/content/ko/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md b/content/ko/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md index d978e7d59f..cae9a85b0a 100644 --- a/content/ko/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md +++ b/content/ko/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md @@ -9,7 +9,8 @@ weight: 40 {{< feature-state for_k8s_version="v1.12" state="stable" >}} -kubeadm의 `ClusterConfiguration` 오브젝트는 API 서버, 컨트롤러매니저, 스케줄러와 같은 컨트롤 플레인 구성요소에 전달되는 기본 플래그 `extraArgs` 필드를 노출한다. 이 구성요소는 다음 필드를 사용하도록 정의되어 있다. +kubeadm의 `ClusterConfiguration` 오브젝트는 API 서버, 컨트롤러매니저, 스케줄러와 같은 컨트롤 플레인 구성요소에 전달되는 +기본 플래그 `extraArgs` 필드를 노출한다. 이 구성요소는 다음 필드를 사용하도록 정의되어 있다. - `apiServer` - `controllerManager` @@ -19,10 +20,10 @@ kubeadm의 `ClusterConfiguration` 오브젝트는 API 서버, 컨트롤러매니 1. 사용자 구성에서 적절한 필드를 추가한다. 2. 필드에 대체할 플래그를 추가한다. -3. `kubeadm init`에 `--config ` 파라미터를 추가해서 실행한다. +3. `kubeadm init`에 `--config ` 파라미터를 추가해서 실행한다. 각 필드의 구성에서 자세한 정보를 보려면, -[API 참고 문서](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2#ClusterConfiguration)에서 확인해 볼 수 있다. +[API 참고 문서](/docs/reference/config-api/kubeadm-config.v1beta2/)에서 확인해 볼 수 있다. {{< note >}} `kubeadm config print init-defaults`를 실행하고 원하는 파일에 출력을 저장하여 기본값인 `ClusterConfiguration` 오브젝트를 생성할 수 있다. @@ -34,9 +35,9 @@ kubeadm의 `ClusterConfiguration` 오브젝트는 API 서버, 컨트롤러매니 ## APIServer 플래그 -자세한 내용은 [kube-apiserver에 대한 참고 문서](/docs/reference/command-line-tools-reference/kube-apiserver/)를 확인한다. +자세한 내용은 [kube-apiserver 레퍼런스 문서](/docs/reference/command-line-tools-reference/kube-apiserver/)를 확인한다. -사용 예: +예시: ```yaml apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration @@ -51,9 +52,9 @@ apiServer: ## 컨트롤러매니저 플래그 -자세한 내용은 [kube-controller-manager에 대한 참고 문서](/docs/reference/command-line-tools-reference/kube-controller-manager/)를 확인한다. +자세한 내용은 [kube-controller-manager 레퍼런스 문서](/docs/reference/command-line-tools-reference/kube-controller-manager/)를 확인한다. -사용 예: +예시: ```yaml apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration @@ -67,9 +68,9 @@ controllerManager: ## 스케줄러 플래그 -자세한 내용은 [kube-scheduler에 대한 참고 문서](/docs/reference/command-line-tools-reference/kube-scheduler/)를 확인한다. +자세한 내용은 [kube-scheduler 레퍼런스 문서](/docs/reference/command-line-tools-reference/kube-scheduler/)를 확인한다. -사용 예: +예시: ```yaml apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration diff --git a/content/ko/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/ko/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index a7ce213fda..6f50124f8d 100644 --- a/content/ko/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/ko/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -169,7 +169,7 @@ kubeadm은 `kubelet` 또는 `kubectl` 을 설치하거나 관리하지 **않으 버전 차이에 대한 자세한 내용은 다음을 참고한다. -* 쿠버네티스 [버전 및 버전-차이 정책](/docs/setup/release/version-skew-policy/) +* 쿠버네티스 [버전 및 버전-차이 정책](/ko/releases/version-skew-policy/) * Kubeadm 관련 [버전 차이 정책](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#version-skew-policy) {{< tabs name="k8s_install" >}} diff --git a/content/ko/docs/setup/production-environment/turnkey-solutions.md b/content/ko/docs/setup/production-environment/turnkey-solutions.md new file mode 100644 index 0000000000..2feb5de30a --- /dev/null +++ b/content/ko/docs/setup/production-environment/turnkey-solutions.md @@ -0,0 +1,14 @@ +--- +title: 턴키 클라우드 솔루션 +content_type: concept +weight: 30 +--- + + +이 페이지는 인증된 쿠버네티스 솔루션 제공자 목록을 제공한다. 각 제공자 +페이지를 통해서, 프로덕션에 준비된 클러스터를 설치 및 설정하는 방법을 +학습할 수 있다. + + + +{{< cncf-landscape helpers=true category="certified-kubernetes-hosted" >}} diff --git a/content/ko/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md b/content/ko/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md index eb48f3f65d..67db901778 100644 --- a/content/ko/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md +++ b/content/ko/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md @@ -84,7 +84,7 @@ weight: 65 단계별 지침을 제공한다. 이 가이드에는 클러스터 노드와 함께 사용자 애플리케이션을 업그레이드하기 위한 권장 업그레이드 절차가 포함된다. 윈도우 노드는 현재 리눅스 노드와 동일한 방식으로 쿠버네티스 -[버전-스큐(skew) 정책](/ko/docs/setup/release/version-skew-policy/)(노드 대 컨트롤 플레인 +[버전-차이(skew) 정책](/ko/releases/version-skew-policy/)(노드 대 컨트롤 플레인 버전 관리)을 준수한다. @@ -102,6 +102,8 @@ weight: 65 Microsoft는 `mcr.microsoft.com/oss/kubernetes/pause:3.4.1`에서 윈도우 퍼즈 인프라 컨테이너를 유지한다. +이외에도 `k8s.gcr.io/pause:3.5`를 통해 쿠버네티스에서 관리하는 다중 아키텍처 이미지를 +사용할 수도 있는데, 이 이미지는 리눅스와 윈도우를 모두 지원한다. #### 컴퓨트 @@ -809,7 +811,7 @@ DNS, 라우트, 메트릭과 같은 많은 구성은 리눅스에서와 같이 / 1. [BitLocker](https://docs.microsoft.com/ko-kr/windows/security/information-protection/bitlocker/bitlocker-how-to-deploy-on-windows-server)를 사용한 볼륨-레벨 암호화를 사용한다. -[RunAsUsername](/ko/docs/tasks/configure-pod-container/configure-runasusername)은 +[RunAsUsername](/ko/docs/tasks/configure-pod-container/configure-runasusername/)은 컨테이너 프로세스를 노드 기본 사용자로 실행하기 위해 윈도우 파드 또는 컨테이너에 지정할 수 있다. 이것은 [RunAsUser](/ko/docs/concepts/policy/pod-security-policy/#사용자-및-그룹)와 거의 동일하다. diff --git a/content/ko/docs/setup/production-environment/windows/user-guide-windows-containers.md b/content/ko/docs/setup/production-environment/windows/user-guide-windows-containers.md index aabc838ea5..5c3d52e475 100644 --- a/content/ko/docs/setup/production-environment/windows/user-guide-windows-containers.md +++ b/content/ko/docs/setup/production-environment/windows/user-guide-windows-containers.md @@ -6,7 +6,8 @@ weight: 75 -많은 조직에서 실행하는 서비스와 애플리케이션의 상당 부분이 윈도우 애플리케이션으로 구성된다. 이 가이드는 쿠버네티스에서 윈도우 컨테이너를 구성하고 배포하는 단계를 안내한다. +많은 조직에서 실행하는 서비스와 애플리케이션의 상당 부분이 윈도우 애플리케이션으로 구성된다. +이 가이드는 쿠버네티스에서 윈도우 컨테이너를 구성하고 배포하는 단계를 안내한다. @@ -19,12 +20,18 @@ weight: 75 ## 시작하기 전에 -* [윈도우 서버에서 운영하는 마스터와 워커 노드](/ko/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes)를 포함한 쿠버네티스 클러스터를 생성한다. -* 쿠버네티스에서 서비스와 워크로드를 생성하고 배포하는 것은 리눅스나 윈도우 컨테이너 모두 비슷한 방식이라는 것이 중요하다. [Kubectl 커맨드](/ko/docs/reference/kubectl/overview/)로 클러스터에 접속하는 것은 동일하다. 아래 단원의 예시는 윈도우 컨테이너를 경험하기 위해 제공한다. +* [윈도우 서버에서 운영하는 마스터와 워커 노드](/ko/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes)를 +포함한 쿠버네티스 클러스터를 생성한다. +* 쿠버네티스에서 서비스와 워크로드를 생성하고 배포하는 것은 리눅스나 윈도우 컨테이너 +모두 비슷한 방식이라는 것이 중요하다. +[Kubectl 커맨드](/ko/docs/reference/kubectl/overview/)로 클러스터에 접속하는 것은 동일하다. +아래 단원의 예시를 통해 윈도우 컨테이너와 좀 더 빨리 친숙해질 수 있다. ## 시작하기: 윈도우 컨테이너 배포하기 -쿠버네티스에서 윈도우 컨테이너를 배포하려면, 먼저 예시 애플리케이션을 생성해야 한다. 아래 예시 YAML 파일은 간단한 웹서버 애플리케이션을 생성한다. 아래 내용으로 채운 서비스 스펙을 `win-webserver.yaml`로 생성하자. +쿠버네티스에서 윈도우 컨테이너를 배포하려면, 먼저 예시 애플리케이션을 생성해야 한다. +아래 예시 YAML 파일은 간단한 웹서버 애플리케이션을 생성한다. +아래 내용으로 채운 서비스 스펙을 `win-webserver.yaml`로 생성하자. ```yaml apiVersion: v1 @@ -71,7 +78,8 @@ spec: ``` {{< note >}} -포트 매핑도 지원하지만, 간략한 예시를 위해 컨테이너 포트 80을 직접 서비스로 노출한다. +포트 매핑도 지원하지만, 간략한 예시를 위해 +컨테이너 포트 80을 직접 서비스로 노출한다. {{< /note >}} 1. 모든 노드가 건강한지 확인한다. @@ -91,53 +99,87 @@ spec: 1. 이 디플로이먼트가 성공적인지 확인한다. 다음을 검토하자. - * 윈도우 노드에 파드당 두 컨테이너, `docker ps`를 사용한다. - * 리눅스 마스터에서 나열된 두 파드, `kubectl get pods`를 사용한다. - * 네트워크를 통한 노드에서 파드 간에 통신, 리눅스 마스터에서 `curl`을 파드 IP 주소의 80 포트로 실행하여 웹 서버 응답을 확인한다. - * 파드와 파드 간에 통신, `docker exec` 나 `kubectl exec`를 이용해 파드 간에 핑(ping)한다(윈도우 노드를 여럿가지고 있다면 호스트를 달리하며). - * 서비스와 파드 간에 통신, 리눅스 마스터와 독립 파드에서 `curl`을 가상 서비스 IP 주소(`kubectl get services`로 보여지는)로 실행한다. - * 서비스 검색(discovery), 쿠버네티스 [기본 DNS 접미사](/ko/docs/concepts/services-networking/dns-pod-service/#서비스)와 서비스 이름으로 `curl`을 실행한다. - * 인바운드 연결, 클러스터 외부 장비나 리눅스 마스터에서 NodePort로 `curl`을 실행한다. - * 아웃바운드 연결, `kubectl exec`를 이용해서 파드에서 외부 IP 주소로 `curl`을 실행한다. + * 윈도우 노드에 파드당 두 컨테이너가 존재하는지 확인하려면, `docker ps`를 사용한다. + * 리눅스 마스터에서 나열된 두 파드가 존재하는지 확인하려면, `kubectl get pods`를 사용한다. + * 네트워크를 통한 노드에서 파드로의 통신이 되는지 확인하려면, 리눅스 마스터에서 `curl`을 + 파드 IP 주소의 80 포트로 실행하여 웹 서버 응답을 확인한다. + * 파드 간 통신이 되는지 확인하려면, `docker exec` 나 `kubectl exec`를 이용해 파드 간에 + 핑(ping)한다(윈도우 노드가 2대 이상이라면, 서로 다른 노드에 있는 파드 간 통신도 확인할 수 있다). + * 서비스에서 파드로의 통신이 되는지 확인하려면, 리눅스 마스터와 독립 파드에서 `curl`을 가상 서비스 + IP 주소(`kubectl get services`로 볼 수 있는)로 실행한다. + * 서비스 검색(discovery)이 되는지 확인하려면, 쿠버네티스 [기본 DNS 접미사](/ko/docs/concepts/services-networking/dns-pod-service/#서비스)와 서비스 이름으로 `curl`을 실행한다. + * 인바운드 연결이 되는지 확인하려면, 클러스터 외부 장비나 리눅스 마스터에서 NodePort로 `curl`을 실행한다. + * 아웃바운드 연결이 되는지 확인하려면, `kubectl exec`를 이용해서 파드에서 외부 IP 주소로 `curl`을 실행한다. {{< note >}} -윈도우 컨테이너 호스트는 현재 윈도우 네트워킹 스택의 플랫폼 제한으로 인해, 그 안에서 스케줄링하는 서비스의 IP 주소로 접근할 수 없다. 윈도우 파드만 서비스 IP 주소로 접근할 수 있다. +윈도우 컨테이너 호스트는 현재 윈도우 네트워킹 스택의 플랫폼 제한으로 인해, 그 안에서 스케줄링하는 서비스의 IP 주소로 접근할 수 없다. +윈도우 파드만 서비스 IP 주소로 접근할 수 있다. {{< /note >}} ## 가시성 ### 워크로드에서 로그 캡쳐하기 -로그는 가시성의 중요한 요소이다. 로그는 사용자가 워크로드의 운영측면을 파악할 수 있도록 하며 문제 해결의 핵심 요소이다. 윈도우 컨테이너와 워크로드 내의 윈도우 컨테이너가 리눅스 컨테이너와는 다르게 동작하기 때문에, 사용자가 로그를 수집하는 데 어려움을 겪었기에 운영 가시성이 제한되었다. 예를 들어 윈도우 워크로드는 일반적으로 ETW(Event Tracing for Windows)에 로그인하거나 애플리케이션 이벤트 로그에 항목을 푸시하도록 구성한다. Microsoft의 오픈 소스 도구인 [LogMonitor](https://github.com/microsoft/windows-container-tools/tree/master/LogMonitor)는 윈도우 컨테이너 안에 구성된 로그 소스를 모니터링하는 권장하는 방법이다. LogMonitor는 이벤트 로그, ETW 공급자 그리고 사용자 정의 애플리케이션 로그 모니터링을 지원하고 `kubectl logs ` 에 의한 사용을 위해 STDOUT으로 파이프한다. +로그는 가시성의 중요한 요소이다. 로그는 사용자가 워크로드의 운영측면을 +파악할 수 있도록 하며 문제 해결의 핵심 요소이다. +윈도우 컨테이너, 그리고 윈도우 컨테이너 내의 워크로드는 리눅스 컨테이너와는 다르게 동작하기 때문에, +사용자가 로그를 수집하기 어려웠고 이로 인해 운영 가시성이 제한되어 왔다. +예를 들어 윈도우 워크로드는 일반적으로 ETW(Event Tracing for Windows)에 로그인하거나 +애플리케이션 이벤트 로그에 항목을 푸시하도록 구성한다. +Microsoft의 오픈 소스 도구인 [LogMonitor](https://github.com/microsoft/windows-container-tools/tree/master/LogMonitor)는 +윈도우 컨테이너 안에 구성된 로그 소스를 모니터링하는 권장하는 방법이다. +LogMonitor는 이벤트 로그, ETW 공급자 그리고 사용자 정의 애플리케이션 로그 모니터링을 지원하고 +`kubectl logs ` 에 의한 사용을 위해 STDOUT으로 파이프한다. -LogMonitor Github 페이지의 지침에 따라 모든 컨테이너 바이너리와 설정 파일을 복사하고, LogMonitor에 필요한 입력 지점을 추가해서 로그를 STDOUT으로 푸시한다. +LogMonitor GitHub 페이지의 지침에 따라 모든 컨테이너 바이너리와 설정 파일을 복사하고, +LogMonitor가 로그를 STDOUT으로 푸시할 수 있도록 필요한 엔트리포인트를 추가한다. ## 설정 가능한 컨테이너 username 사용하기 -쿠버네티스 v1.16 부터, 윈도우 컨테이너는 이미지 기본 값과는 다른 username으로 엔트리포인트와 프로세스를 실행하도록 설정할 수 있다. 이 방식은 리눅스 컨테이너에서 지원되는 방식과는 조금 차이가 있다. [여기](/docs/tasks/configure-pod-container/configure-runasusername/)에서 이에 대해 추가적으로 배울 수 있다. +쿠버네티스 v1.16 부터, 윈도우 컨테이너는 이미지 기본 값과는 다른 username으로 엔트리포인트와 프로세스를 +실행하도록 설정할 수 있다. +이 방식은 리눅스 컨테이너에서 지원되는 방식과는 조금 차이가 있다. +[여기](/ko/docs/tasks/configure-pod-container/configure-runasusername/)에서 이에 대해 추가적으로 배울 수 있다. ## 그룹 매니지드 서비스 어카운트를 이용하여 워크로드 신원 관리하기 -쿠버네티스 v1.14부터 윈도우 컨테이너 워크로드는 그룹 매니지드 서비스 어카운트(GMSA, Group Managed Service Account)를 이용하여 구성할 수 있다. 그룹 매니지드 서비스 어카운트는 액티브 디렉터리 어카운트의 특정한 종류로 자동 암호 관리 기능, 단순화된 서비스 주체 이름(SPN, simplified service principal name), 여러 서버의 다른 관리자에게 관리를 위임하는 기능을 제공한다. GMSA로 구성한 컨테이너는 GMSA로 구성된 신원을 들고 있는 동안 외부 액티브 디렉터리 도메인 리소스를 접근할 수 있다. 윈도우 컨테이너를 위한 GMSA를 이용하고 구성하는 방법은 [여기](/docs/tasks/configure-pod-container/configure-gmsa/)에서 알아보자. +쿠버네티스 v1.14부터 윈도우 컨테이너 워크로드는 그룹 매니지드 서비스 어카운트(GMSA, Group Managed Service Account)를 이용하여 구성할 수 있다. +그룹 매니지드 서비스 어카운트는 액티브 디렉터리 어카운트의 특정한 종류로 자동 암호 관리 기능, +단순화된 서비스 주체 이름(SPN, simplified service principal name), 여러 서버의 다른 관리자에게 관리를 위임하는 기능을 제공한다. +GMSA로 구성한 컨테이너는 GMSA로 구성된 신원을 들고 있는 동안 외부 액티브 디렉터리 도메인 리소스를 접근할 수 있다. +윈도우 컨테이너를 위한 GMSA를 이용하고 구성하는 방법은 [여기](/docs/tasks/configure-pod-container/configure-gmsa/)에서 알아보자. ## 테인트(Taint)와 톨러레이션(Toleration) -오늘날 사용자는 리눅스와 윈도우 워크로드를 특정 OS 노드별로 보존하기 위해 테인트와 노드 셀렉터(nodeSelector)의 조합을 이용해야 한다. 이것은 윈도우 사용자에게만 부담을 줄 것으로 보인다. 아래는 권장되는 방식의 개요인데, 이것의 주요 목표 중에 하나는 이 방식이 기존 리눅스 워크로드와 호환되어야 한다는 것이다. +오늘날 사용자는 리눅스와 윈도우 워크로드를 (동일한 OS를 실행하는) 적절한 노드에 할당되도록 하기 위해 테인트와 +노드셀렉터(nodeSelector)의 조합을 이용해야 한다. +이것은 윈도우 사용자에게만 부담을 줄 것으로 보인다. 아래는 권장되는 방식의 개요인데, +이것의 주요 목표 중에 하나는 이 방식이 기존 리눅스 워크로드와 호환되어야 한다는 것이다. ### 특정 OS 워크로드를 적절한 컨테이너 호스트에서 처리하도록 보장하기 -사용자는 윈도우 컨테이너가 테인트와 톨러레이션을 이용해서 적절한 호스트에서 스케줄링되기를 보장할 수 있다. 오늘날 모든 쿠버네티스 노드는 다음 기본 레이블을 가지고 있다. +사용자는 테인트와 톨러레이션을 이용하여 윈도우 컨테이너가 적절한 호스트에서 스케줄링되기를 보장할 수 있다. +오늘날 모든 쿠버네티스 노드는 다음 기본 레이블을 가지고 있다. * kubernetes.io/os = [windows|linux] * kubernetes.io/arch = [amd64|arm64|...] -파드 사양에 노드 셀렉터를 `"kubernetes.io/os": windows`와 같이 지정하지 않았다면, 그 파드는 리눅스나 윈도우, 아무 호스트에나 스케줄링될 수 있다. 윈도우 컨테이너는 윈도우에서만 운영될 수 있고 리눅스 컨테이너는 리눅스에서만 운영될 수 있기 때문에 이는 문제를 일으킬 수 있다. 가장 좋은 방법은 노드 셀렉터를 사용하는 것이다. +파드 사양에 노드 셀렉터를 `"kubernetes.io/os": windows`와 같이 지정하지 않았다면, +그 파드는 리눅스나 윈도우, 아무 호스트에나 스케줄링될 수 있다. +윈도우 컨테이너는 윈도우에서만 운영될 수 있고 리눅스 컨테이너는 리눅스에서만 운영될 수 있기 때문에 이는 문제를 일으킬 수 있다. +가장 좋은 방법은 노드 셀렉터를 사용하는 것이다. -그러나 많은 경우 사용자는 이미 존재하는 대량의 리눅스 컨테이너용 디플로이먼트를 가지고 있을 뿐만 아니라, 헬름(Helm) 차트 커뮤니티 같은 상용 구성의 에코시스템이나, 오퍼레이터(Operator) 같은 프로그래밍 방식의 파드 생성 사례가 있음을 알고 있다. 이런 상황에서는 노드 셀렉터를 추가하는 구성 변경을 망설일 수 있다. 이에 대한 대안은 테인트를 사용하는 것이다. Kubelet은 등록하는 동안 테인트를 설정할 수 있기 때문에, 윈도우에서만 운영할 때에 자동으로 테인트를 추가하기 쉽다. +그러나 많은 경우 사용자는 이미 존재하는 대량의 리눅스 컨테이너용 디플로이먼트를 가지고 있을 뿐만 아니라, +헬름(Helm) 차트 커뮤니티 같은 상용 구성의 에코시스템이나, 오퍼레이터(Operator) 같은 프로그래밍 방식의 파드 생성 사례가 있음을 알고 있다. +이런 상황에서는 노드 셀렉터를 추가하는 구성 변경을 망설일 수 있다. +이에 대한 대안은 테인트를 사용하는 것이다. Kubelet은 등록하는 동안 테인트를 설정할 수 있기 때문에, +윈도우에서만 운영할 때에 자동으로 테인트를 추가하기 쉽다. 예를 들면, `--register-with-taints='os=windows:NoSchedule'` -모든 윈도우 노드에 테인트를 추가하여 아무 것도 거기에 스케줄링하지 않게 될 것이다(존재하는 리눅스 파드를 포함하여). 윈도우 파드가 윈도우 노드에 스케줄링되려면, 윈도우를 선택하기 위한 노드 셀렉터 및 적합하게 일치하는 톨러레이션이 모두 필요하다. +모든 윈도우 노드에 테인트를 추가하여 아무 것도 거기에 스케줄링하지 않게 될 것이다(존재하는 리눅스 파드를 포함하여). +윈도우 파드가 윈도우 노드에 스케줄링되려면, +윈도우를 선택하기 위한 노드 셀렉터 및 적합하게 일치하는 톨러레이션이 모두 필요하다. ```yaml nodeSelector: @@ -152,14 +194,14 @@ tolerations: ### 동일 클러스터에서 여러 윈도우 버전을 조작하는 방법 -파드에서 사용하는 윈도우 서버 버전은 노드 버전과 일치해야 한다. 만약 동일한 클러스터에서 여러 윈도우 -서버 버전을 사용하려면, 추가로 노드 레이블과 nodeSelectors를 설정해야만 한다. +파드에서 사용하는 윈도우 서버 버전은 노드의 윈도우 서버 버전과 일치해야 한다. 만약 동일한 클러스터에서 여러 윈도우 +서버 버전을 사용하려면, 추가로 노드 레이블과 nodeSelectors를 설정해야 한다. -쿠버네티스 1.17은 이것을 단순화하기 위해 새로운 레이블인 `node.kubernetes.io/windows-build` 를 자동으로 추가 한다. 만약 이전 버전을 -실행 중인 경우 이 레이블을 윈도우 노드에 수동으로 추가하는 것을 권장한다. +쿠버네티스 1.17은 이것을 단순화하기 위해 새로운 레이블인 `node.kubernetes.io/windows-build` 를 자동으로 추가한다. +만약 이전 버전을 실행 중인 경우, 이 레이블을 윈도우 노드에 수동으로 추가하는 것을 권장한다. -이 레이블은 호환성을 일치해야 하는 윈도우 메이저, 마이너 및 빌드 번호를 나타낸다. 여기에 현재 -사용하는 각 윈도우 서버 버전이 있다. +이 레이블은 호환성을 위해 일치시켜야 하는 윈도우 메이저, 마이너 및 빌드 번호를 나타낸다. +각 윈도우 서버 버전에 대해 현재 사용하고 있는 빌드 번호는 다음과 같다. | 제품 이름 | 빌드 번호 | |--------------------------------------|------------------------| @@ -170,11 +212,12 @@ tolerations: ### RuntimeClass로 단순화 -[RuntimeClass] 를 사용해서 테인트(taint)와 톨러레이션(toleration)을 사용하는 프로세스를 간소화 할 수 있다. 클러스터 관리자는 -이 테인트와 톨러레이션을 캡슐화하는데 사용되는 `RuntimeClass` 오브젝트를 생성할 수 있다. +[런타임클래스(RuntimeClass)](/ko/docs/concepts/containers/runtime-class/)를 사용해서 테인트(taint)와 톨러레이션(toleration)을 사용하는 프로세스를 간소화 할 수 있다. +클러스터 관리자는 이 테인트와 톨러레이션을 캡슐화하는 데 사용되는 `RuntimeClass` 오브젝트를 생성할 수 있다. -1. 이 파일을 `runtimeClasses.yml` 로 저장한다. 여기에는 윈도우 OS, 아키텍처 및 버전에 적합한 `nodeSelector` 가 포함되었다. +1. 이 파일을 `runtimeClasses.yml` 로 저장한다. 여기에는 윈도우 OS, +아키텍처 및 버전에 적합한 `nodeSelector` 가 포함되어 있다. ```yaml apiVersion: node.k8s.io/v1 diff --git a/content/ko/docs/tasks/access-application-cluster/_index.md b/content/ko/docs/tasks/access-application-cluster/_index.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md b/content/ko/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md index 477e310943..b3997580f2 100644 --- a/content/ko/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md +++ b/content/ko/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md @@ -7,7 +7,6 @@ card: weight: 40 --- - 이 페이지에서는 구성 파일을 사용하여 다수의 클러스터에 접근할 수 있도록 @@ -22,19 +21,21 @@ card: {{< /note >}} +{{< warning >}} +신뢰할 수 있는 소스의 kubeconfig 파일만 사용해야 한다. 특수 제작된 kubeconfig 파일은 악성코드를 실행하거나 파일을 노출시킬 수 있다. +신뢰할 수 없는 kubeconfig 파일을 꼭 사용해야 한다면, 셸 스크립트를 사용하는 경우처럼 신중한 검사가 선행되어야 한다. +{{< /warning>}} + ## {{% heading "prerequisites" %}} - {{< include "task-tutorial-prereqs.md" >}} {{< glossary_tooltip text="kubectl" term_id="kubectl" >}}이 설치되었는지 확인하려면, `kubectl version --client`을 실행한다. kubectl 버전은 클러스터의 API 서버 버전과 -[마이너 버전 하나 차이 이내](/ko/docs/setup/release/version-skew-policy/#kubectl)여야 +[마이너 버전 하나 차이 이내](/ko/releases/version-skew-policy/#kubectl)여야 한다. - - ## 클러스터, 사용자, 컨텍스트 정의 @@ -49,7 +50,7 @@ scratch 클러스터에 접근하려면 사용자네임과 패스워드로 인 `config-exercise`라는 디렉터리를 생성한다. `config-exercise` 디렉터리에 다음 내용을 가진 `config-demo`라는 파일을 생성한다. -```shell +```yaml apiVersion: v1 kind: Config preferences: {} @@ -114,7 +115,7 @@ kubectl config --kubeconfig=config-demo view 두 클러스터, 두 사용자, 세 컨텍스트들이 출력 결과로 나온다. -```shell +```yaml apiVersion: v1 clusters: - cluster: @@ -186,7 +187,7 @@ kubectl config --kubeconfig=config-demo view --minify `dev-frontend` 컨텍스트에 관련된 구성 정보가 출력 결과로 표시될 것이다. -```shell +```yaml apiVersion: v1 clusters: - cluster: @@ -238,7 +239,6 @@ kubectl config --kubeconfig=config-demo use-context dev-storage 현재 컨텍스트인 `dev-storage`에 관련된 설정을 보자. - ```shell kubectl config --kubeconfig=config-demo view --minify ``` @@ -247,7 +247,7 @@ kubectl config --kubeconfig=config-demo view --minify `config-exercise` 디렉터리에서 다음 내용으로 `config-demo-2`라는 파일을 생성한다. -```shell +```yaml apiVersion: v1 kind: Config preferences: {} @@ -269,13 +269,17 @@ contexts: 예: ### 리눅스 + ```shell -export KUBECONFIG_SAVED=$KUBECONFIG +export KUBECONFIG_SAVED=$KUBECONFIG ``` + ### 윈도우 PowerShell -```shell + +```powershell $Env:KUBECONFIG_SAVED=$ENV:KUBECONFIG ``` + `KUBECONFIG` 환경 변수는 구성 파일들의 경로의 리스트이다. 이 리스트는 리눅스와 Mac에서는 콜론으로 구분되며 윈도우에서는 세미콜론으로 구분된다. `KUBECONFIG` 환경 변수를 가지고 있다면, 리스트에 포함된 구성 파일들에 @@ -284,11 +288,14 @@ $Env:KUBECONFIG_SAVED=$ENV:KUBECONFIG 다음 예와 같이 임시로 `KUBECONFIG` 환경 변수에 두 개의 경로들을 덧붙여보자. ### 리눅스 + ```shell -export KUBECONFIG=$KUBECONFIG:config-demo:config-demo-2 +export KUBECONFIG=$KUBECONFIG:config-demo:config-demo-2 ``` + ### 윈도우 PowerShell -```shell + +```powershell $Env:KUBECONFIG=("config-demo;config-demo-2") ``` @@ -303,7 +310,7 @@ kubectl config view 컨텍스트와 `config-demo` 파일의 세 개의 컨텍스트들을 가지고 있다는 것에 주목하길 바란다. -```shell +```yaml contexts: - context: cluster: development @@ -347,12 +354,15 @@ kubeconfig 파일들을 어떻게 병합하는지에 대한 상세정보는 예: ### 리눅스 + ```shell export KUBECONFIG=$KUBECONFIG:$HOME/.kube/config ``` + ### 윈도우 Powershell -```shell - $Env:KUBECONFIG="$Env:KUBECONFIG;$HOME\.kube\config" + +```powershell +$Env:KUBECONFIG="$Env:KUBECONFIG;$HOME\.kube\config" ``` 이제 `KUBECONFIG` 환경 변수에 리스트에 포함된 모든 파일들이 합쳐진 구성 정보를 보자. @@ -367,19 +377,18 @@ kubectl config view `KUBECONFIG` 환경 변수를 원래 값으로 되돌려 놓자. 예를 들면:
    ### 리눅스 + ```shell export KUBECONFIG=$KUBECONFIG_SAVED ``` ### 윈도우 PowerShell -```shell - $Env:KUBECONFIG=$ENV:KUBECONFIG_SAVED + +```powershell +$Env:KUBECONFIG=$ENV:KUBECONFIG_SAVED ``` - - ## {{% heading "whatsnext" %}} - * [kubeconfig 파일을 사용하여 클러스터 접근 구성하기](/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig/) * [kubectl config](/docs/reference/generated/kubectl/kubectl-commands#config) diff --git a/content/ko/docs/tasks/access-application-cluster/connecting-frontend-backend.md b/content/ko/docs/tasks/access-application-cluster/connecting-frontend-backend.md index 488ea59ff6..11afa655e8 100644 --- a/content/ko/docs/tasks/access-application-cluster/connecting-frontend-backend.md +++ b/content/ko/docs/tasks/access-application-cluster/connecting-frontend-backend.md @@ -220,4 +220,4 @@ kubectl delete deployment frontend backend * [서비스](/ko/docs/concepts/services-networking/service/)에 대해 더 알아본다. * [컨피그맵](/docs/tasks/configure-pod-container/configure-pod-configmap/)에 대해 더 알아본다. -* [서비스와 파드용 DNS](/docs/concepts/services-networking/dns-pod-service/)에 대해 더 알아본다. +* [서비스와 파드용 DNS](/ko/docs/concepts/services-networking/dns-pod-service/)에 대해 더 알아본다. diff --git a/content/ko/docs/tasks/access-application-cluster/list-all-running-container-images.md b/content/ko/docs/tasks/access-application-cluster/list-all-running-container-images.md index 77f5f5d635..f777d192cd 100644 --- a/content/ko/docs/tasks/access-application-cluster/list-all-running-container-images.md +++ b/content/ko/docs/tasks/access-application-cluster/list-all-running-container-images.md @@ -22,7 +22,7 @@ weight: 100 ## 모든 네임스페이스의 모든 컨테이너 이미지 가져오기 - `kubectl get pods --all-namespaces` 를 사용하여 모든 네임스페이스의 모든 파드 정보를 가져온다. -- 컨테이너 이미지 이름만 출력하기 위해 `-o jsonpath={..image}` 를 사용한다. +- 컨테이너 이미지 이름만 출력하기 위해 `-o jsonpath={.items[*].spec.containers[*].image}` 를 사용한다. 이 명령어는 결과값으로 받은 json을 반복적으로 파싱하여, `image` 필드만을 출력한다. - jsonpath를 사용하는 방법에 대해 더 많은 정보를 얻고 싶다면 @@ -33,7 +33,7 @@ weight: 100 - `uniq` 를 사용하여 이미지 개수를 합산한다. ```shell -kubectl get pods --all-namespaces -o jsonpath="{..image}" |\ +kubectl get pods --all-namespaces -o jsonpath="{.items[*].spec.containers[*].image}" |\ tr -s '[[:space:]]' '\n' |\ sort |\ uniq -c @@ -80,7 +80,7 @@ sort 명령어 결과값은 `app=nginx` 레이블에 일치하는 파드만 출력한다. ```shell -kubectl get pods --all-namespaces -o=jsonpath="{..image}" -l app=nginx +kubectl get pods --all-namespaces -o=jsonpath="{.items[*].spec.containers[*].image}" -l app=nginx ``` ## 파드 네임스페이스로 필터링된 컨테이너 이미지 목록 보기 @@ -89,7 +89,7 @@ kubectl get pods --all-namespaces -o=jsonpath="{..image}" -l app=nginx 아래의 명령어 결과값은 `kube-system` 네임스페이스에 있는 파드만 출력한다. ```shell -kubectl get pods --namespace kube-system -o jsonpath="{..image}" +kubectl get pods --namespace kube-system -o jsonpath="{.items[*].spec.containers[*].image}" ``` ## jsonpath 대신 Go 템플릿을 사용하여 컨테이너 이미지 목록 보기 diff --git a/content/ko/docs/tasks/administer-cluster/_index.md b/content/ko/docs/tasks/administer-cluster/_index.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/tasks/administer-cluster/certificates.md b/content/ko/docs/tasks/administer-cluster/certificates.md index 8c8f6a148b..076f09faf4 100644 --- a/content/ko/docs/tasks/administer-cluster/certificates.md +++ b/content/ko/docs/tasks/administer-cluster/certificates.md @@ -116,7 +116,10 @@ weight: 20 openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key \ -CAcreateserial -out server.crt -days 10000 \ -extensions v3_ext -extfile csr.conf -1. 인증서를 본다. +1. 인증서 서명 요청을 확인한다. + + openssl req -noout -text -in ./server.csr +1. 인증서를 확인한다. openssl x509 -noout -text -in ./server.crt @@ -246,5 +249,5 @@ done. ## 인증서 API `certificates.k8s.io` API를 사용해서 -[여기](/docs/tasks/tls/managing-tls-in-a-cluster)에 +[여기](/ko/docs/tasks/tls/managing-tls-in-a-cluster/)에 설명된 대로 인증에 사용할 x509 인증서를 프로비전 할 수 있다. diff --git a/content/ko/docs/tasks/administer-cluster/declare-network-policy.md b/content/ko/docs/tasks/administer-cluster/declare-network-policy.md index 2a476d520d..6d4193e63c 100644 --- a/content/ko/docs/tasks/administer-cluster/declare-network-policy.md +++ b/content/ko/docs/tasks/administer-cluster/declare-network-policy.md @@ -89,7 +89,7 @@ remote file exists {{< codenew file="service/networking/nginx-policy.yaml" >}} 네트워크폴리시 오브젝트의 이름은 유효한 -[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names#dns-subdomain-names)이어야 한다. +[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)이어야 한다. {{< note >}} 네트워크폴리시는 정책이 적용되는 파드의 그룹을 선택하는 `podSelector` 를 포함한다. 사용자는 이 정책이 `app=nginx` 레이블을 갖는 파드를 선택하는 것을 볼 수 있다. 레이블은 `nginx` 디플로이먼트에 있는 파드에 자동으로 추가된다. 빈 `podSelector` 는 네임스페이스의 모든 파드를 선택한다. diff --git a/content/ko/docs/tasks/administer-cluster/dns-custom-nameservers.md b/content/ko/docs/tasks/administer-cluster/dns-custom-nameservers.md index 9521bb1ec6..f681bc8778 100644 --- a/content/ko/docs/tasks/administer-cluster/dns-custom-nameservers.md +++ b/content/ko/docs/tasks/administer-cluster/dns-custom-nameservers.md @@ -23,7 +23,7 @@ DNS 변환(DNS resolution) 절차를 사용자 정의하는 방법을 설명한 ## 소개 -DNS는 _애드온 관리자_ 인 [클러스터 애드온](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/README.md)을 +DNS는 _애드온 관리자_ 인 [클러스터 애드온](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/README.md)을 사용하여 자동으로 시작되는 쿠버네티스 내장 서비스이다. diff --git a/content/ko/docs/tasks/administer-cluster/enable-disable-api.md b/content/ko/docs/tasks/administer-cluster/enable-disable-api.md new file mode 100644 index 0000000000..202035c291 --- /dev/null +++ b/content/ko/docs/tasks/administer-cluster/enable-disable-api.md @@ -0,0 +1,29 @@ +--- +title: 쿠버네티스 API 활성화 혹은 비활성화하기 +content_type: task +--- + + +이 페이지는 클러스터 {{< glossary_tooltip text="컨트롤 플레인" term_id="control-plane" >}}의 +특정한 API 버전을 활성화하거나 비활성화하는 방법에 대해 설명한다. + + + + +API 서버에 `--runtime-config=api/` 커맨드 라인 인자를 사용함으로서 특정한 API 버전을 +활성화하거나 비활성화할 수 있다. 이 인자에 대한 값으로는 콤마로 구분된 API 버전의 목록을 사용한다. +뒤쪽에 위치한 값은 앞쪽의 값보다 우선적으로 사용된다. + +이 `runtime-config` 커맨드 라인 인자에는 다음의 두 개의 특수 키를 사용할 수도 있다. + +- `api/all`: 사용할 수 있는 모든 API를 선택한다. +- `api/legacy`: 레거시 API만을 선택한다. 여기서 레거시 API란 명시적으로 + [사용이 중단된](/docs/reference/using-api/deprecation-policy/) 모든 API를 가리킨다. + +예를 들어서, v1을 제외한 모든 API 버전을 비활성화하기 위해서는 `kube-apiserver`에 +`--runtime-config=api/all=false,api/v1=true` 인자를 사용한다. + +## {{% heading "whatsnext" %}} + +`kube-apiserver` 컴포넌트에 대한 더 자세한 내용은 다음의 [문서](/docs/reference/command-line-tools-reference/kube-apiserver/) +를 참고한다. diff --git a/content/ko/docs/tasks/administer-cluster/enabling-topology-aware-hints.md b/content/ko/docs/tasks/administer-cluster/enabling-topology-aware-hints.md new file mode 100644 index 0000000000..c0342fb377 --- /dev/null +++ b/content/ko/docs/tasks/administer-cluster/enabling-topology-aware-hints.md @@ -0,0 +1,38 @@ +--- +title: 토폴로지 인지 힌트 활성화하기 +content_type: task +min-kubernetes-server-version: 1.21 +--- + + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +_토폴로지 인지 힌트_ 는 {{< glossary_tooltip text="엔드포인트슬라이스(EndpointSlices)" term_id="endpoint-slice" >}}에 포함되어 있는 +토폴로지 정보를 이용해 토폴로지 인지 라우팅을 가능하게 한다. +이 방법은 트래픽을 해당 트래픽이 시작된 곳과 최대한 근접하도록 라우팅하는데, +이를 통해 비용을 줄이거나 네트워크 성능을 향상시킬 수 있다. + +## {{% heading "prerequisites" %}} + + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + +토폴로지 인지 힌트를 활성화하기 위해서는 다음의 필수 구성 요소가 필요하다. + +* {{< glossary_tooltip text="kube-proxy" term_id="kube-proxy" >}}가 + iptables 모드 혹은 IPVS 모드로 동작하도록 설정 +* 엔드포인트슬라이스가 비활성화되지 않았는지 확인 + +## 토폴로지 인지 힌트 활성화하기 + +서비스 토폴로지 힌트를 활성화하기 위해서는 kube-apiserver, kube-controller-manager, kube-proxy에 대해 +`TopologyAwareHints` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 +활성화한다. + +``` +--feature-gates="TopologyAwareHints=true" +``` + +## {{% heading "whatsnext" %}} + +* 서비스 항목 아래의 [토폴로지 인지 힌트](/docs/concepts/services-networking/topology-aware-hints)를 참고 +* [서비스와 애플리케이션 연결하기](/ko/docs/concepts/services-networking/connect-applications-service/)를 참고 diff --git a/content/ko/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods.md b/content/ko/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods.md new file mode 100644 index 0000000000..bbc44c94a1 --- /dev/null +++ b/content/ko/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods.md @@ -0,0 +1,25 @@ +--- + + + + +title: 중요한 애드온 파드 스케줄링 보장하기 +content_type: concept +--- + + + +API 서버, 스케줄러 및 컨트롤러 매니저와 같은 쿠버네티스 주요 컴포넌트들은 컨트롤 플레인 노드에서 동작한다. 반면, 애드온들은 일반 클러스터 노드에서 동작한다. +이러한 애드온들 중 일부(예: 메트릭 서버, DNS, UI)는 클러스터 전부가 정상적으로 동작하는 데 필수적일 수 있다. +만약, 필수 애드온이 축출되고(수동 축출, 혹은 업그레이드와 같은 동작으로 인한 의도하지 않은 축출) +pending 상태가 된다면, 클러스터가 더 이상 제대로 동작하지 않을 수 있다. (사용률이 매우 높은 클러스터에서 해당 애드온이 +축출되자마자 다른 대기중인 파드가 스케줄링되거나 다른 이유로 노드에서 사용할 수 있는 자원량이 줄어들어 pending 상태가 발생할 수 있다) + +유의할 점은, 파드를 중요(critical)로 표시하는 것은 축출을 완전히 방지하기 위함이 아니다. 이것은 단지 파드가 영구적으로 사용할 수 없게 되는 것만을 방지하기 위함이다. +중요로 표시한 스태틱(static) 파드는 축출될 수 없다. 반면, 중요로 표시한 일반적인(non-static) 파드의 경우 항상 다시 스케줄링된다. + + + +### 파드를 중요(critical)로 표시하기 + +파드를 중요로 표시하기 위해서는, 해당 파드에 대해 priorityClassName을 `system-cluster-critical`이나 `system-node-critical`로 설정한다. `system-node-critical`은 가장 높은 우선 순위를 가지며, 심지어 `system-cluster-critical`보다도 우선 순위가 높다. diff --git a/content/ko/docs/tasks/administer-cluster/highly-available-control-plane.md b/content/ko/docs/tasks/administer-cluster/highly-available-control-plane.md new file mode 100644 index 0000000000..2ee11427f3 --- /dev/null +++ b/content/ko/docs/tasks/administer-cluster/highly-available-control-plane.md @@ -0,0 +1,216 @@ +--- + + +title: 고가용성 쿠버네티스 클러스터 컨트롤 플레인 설정하기 +content_type: task + +--- + + + +{{< feature-state for_k8s_version="v1.5" state="alpha" >}} + +구글 컴퓨트 엔진(Google Compute Engine, 이하 GCE)의 `kube-up`이나 `kube-down` 스크립트에 쿠버네티스 컨트롤 플레인 노드를 복제할 수 있다. 하지만 이러한 스크립트들은 프로덕션 용도로 사용하기에 적합하지 않으며, 프로젝트의 CI에서만 주로 사용된다. +이 문서는 kube-up/down 스크립트를 사용하여 고가용(HA) 컨트롤 플레인을 관리하는 방법과 GCE와 함께 사용하기 위해 HA 컨트롤 플레인을 구현하는 방법에 관해 설명한다. + + + + +## {{% heading "prerequisites" %}} + + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + + + + +## HA 호환 클러스터 시작 + +새 HA 호환 클러스터를 생성하려면, `kube-up` 스크립트에 다음 플래그를 설정해야 한다. + +* `MULTIZONE=true` - 서버의 기본 영역(zone)과 다른 영역에서 컨트롤 플레인 kubelet이 제거되지 않도록 한다. +여러 영역에서 컨트롤 플레인 노드를 실행(권장됨)하려는 경우에 필요하다. + +* `ENABLE_ETCD_QUORUM_READ=true` - 모든 API 서버에서 읽은 내용이 최신 데이터를 반환하도록 하기 위한 것이다. +true인 경우, Etcd의 리더 복제본에서 읽는다. +이 값을 true로 설정하는 것은 선택 사항이다. 읽기는 더 안정적이지만 느리게 된다. + +선택적으로, 첫 번째 컨트롤 플레인 노드가 생성될 GCE 영역을 지정할 수 있다. +다음 플래그를 설정한다. + +* `KUBE_GCE_ZONE=zone` - 첫 번째 컨트롤 플레인 노드가 실행될 영역. + +다음 샘플 커맨드는 europe-west1-b GCE 영역에 HA 호환 클러스터를 구성한다. + +```shell +MULTIZONE=true KUBE_GCE_ZONE=europe-west1-b ENABLE_ETCD_QUORUM_READS=true ./cluster/kube-up.sh +``` + +위의 커맨드는 하나의 컨트롤 플레인 노드를 포함하는 클러스터를 생성한다. +그러나 후속 커맨드로 새 컨트롤 플레인 노드를 추가할 수 있다. + +## 새 컨트롤 플레인 노드 추가 + +HA 호환 클러스터를 생성했다면, 여기에 컨트롤 플레인 노드를 추가할 수 있다. +`kube-up` 스크립트에 다음 플래그를 사용하여 컨트롤 플레인 노드를 추가한다. + +* `KUBE_REPLICATE_EXISTING_MASTER=true` - 기존 컨트롤 플레인 노드의 복제본을 +만든다. + +* `KUBE_GCE_ZONE=zone` - 컨트롤 플레인 노드가 실행될 영역. +반드시 다른 컨트롤 플레인 노드가 존재하는 영역과 동일한 지역(region)에 있어야 한다. + +HA 호환 클러스터를 시작할 때, 상속되는 `MULTIZONE`이나 `ENABLE_ETCD_QUORUM_READS` 플래그를 따로 +설정할 필요는 없다. + +다음 샘플 커맨드는 기존 HA 호환 클러스터에서 +컨트롤 플레인 노드를 복제한다. + +```shell +KUBE_GCE_ZONE=europe-west1-c KUBE_REPLICATE_EXISTING_MASTER=true ./cluster/kube-up.sh +``` + +## 컨트롤 플레인 노드 제거 + +다음 플래그가 있는 `kube-down` 스크립트를 사용하여 HA 클러스터에서 컨트롤 플레인 노드를 제거할 수 있다. + +* `KUBE_DELETE_NODES=false` - kubelet을 삭제하지 않기 위한 것이다. + +* `KUBE_GCE_ZONE=zone` - 컨트롤 플레인 노드가 제거될 영역. + +* `KUBE_REPLICA_NAME=replica_name` - (선택) 제거할 컨트롤 플레인 노드의 이름. +명시하지 않으면, 해당 영역의 모든 복제본이 제거된다. + +다음 샘플 커맨드는 기존 HA 클러스터에서 컨트롤 플레인 노드를 제거한다. + +```shell +KUBE_DELETE_NODES=false KUBE_GCE_ZONE=europe-west1-c ./cluster/kube-down.sh +``` + +## 동작에 실패한 컨트롤 플레인 노드 처리 + +HA 클러스터의 컨트롤 플레인 노드 중 하나가 동작에 실패하면, +클러스터에서 해당 노드를 제거하고 동일한 영역에 새 컨트롤 플레인 +노드를 추가하는 것이 가장 좋다. +다음 샘플 커맨드로 이 과정을 시연한다. + +1. 손상된 복제본을 제거한다. + +```shell +KUBE_DELETE_NODES=false KUBE_GCE_ZONE=replica_zone KUBE_REPLICA_NAME=replica_name ./cluster/kube-down.sh +``` + +
    1. 기존 복제본을 대신할 새 노드를 추가한다.
    + +```shell +KUBE_GCE_ZONE=replica-zone KUBE_REPLICATE_EXISTING_MASTER=true ./cluster/kube-up.sh +``` + +## HA 클러스터에서 컨트롤 플레인 노드 복제에 관한 모범 사례 + +* 다른 영역에 컨트롤 플레인 노드를 배치하도록 한다. 한 영역이 동작에 실패하는 동안, +해당 영역에 있는 컨트롤 플레인 노드도 모두 동작에 실패할 것이다. +영역 장애를 극복하기 위해 노드를 여러 영역에 배치한다 +(더 자세한 내용은 [멀티 영역](/ko/docs/setup/best-practices/multiple-zones/)를 참조한다). + +* 두 개의 노드로 구성된 컨트롤 플레인은 사용하지 않는다. 두 개의 노드로 구성된 +컨트롤 플레인에서의 합의를 위해서는 지속적 상태(persistent state) 변경 시 두 컨트롤 플레인 노드가 모두 정상적으로 동작 중이어야 한다. +결과적으로 두 컨트롤 플레인 노드 모두 필요하고, 둘 중 한 컨트롤 플레인 노드에만 장애가 발생해도 +클러스터의 심각한 장애 상태를 초래한다. +따라서 HA 관점에서는 두 개의 노드로 구성된 컨트롤 플레인은 +단일 노드로 구성된 컨트롤 플레인보다도 못하다. + +* 컨트롤 플레인 노드를 추가하면, 클러스터의 상태(Etcd)도 새 인스턴스로 복사된다. +클러스터가 크면, 이 상태를 복제하는 시간이 오래 걸릴 수 있다. +이 작업은 [etcd 관리 가이드](https://etcd.io/docs/v2.3/admin_guide/#member-migration)에 기술한 대로 +Etcd 데이터 디렉터리를 마이그레이션하여 속도를 높일 수 있다. +(향후에 Etcd 데이터 디렉터리 마이그레이션 지원 추가를 고려 중이다) + + + + + +## 구현 지침 + +![ha-master-gce](/images/docs/ha-master-gce.png) + +### 개요 + +각 컨트롤 플레인 노드는 다음 모드에서 다음 구성 요소를 실행한다. + +* Etcd 인스턴스: 모든 인스턴스는 합의를 사용하여 함께 클러스터화 한다. + +* API 서버: 각 서버는 내부 Etcd와 통신한다. 클러스터의 모든 API 서버가 가용하게 된다. + +* 컨트롤러, 스케줄러, 클러스터 오토스케일러: 임대 방식을 이용한다. 각 인스턴스 중 하나만이 클러스터에서 활성화된다. + +* 애드온 매니저: 각 매니저는 애드온의 동기화를 유지하려고 독립적으로 작업한다. + +또한 API 서버 앞단에 외부/내부 트래픽을 라우팅하는 로드 밸런서가 있을 것이다. + +### 로드 밸런싱 + +두 번째 컨트롤 플레인 노드를 배치할 때, 두 개의 복제본에 대한 로드 밸런서가 생성될 것이고, 첫 번째 복제본의 IP 주소가 로드 밸런서의 IP 주소로 승격된다. +비슷하게 끝에서 두 번째의 컨트롤 플레인 노드를 제거한 후에는 로드 밸런서가 제거되고 +해당 IP 주소는 마지막으로 남은 복제본에 할당된다. +로드 밸런서 생성 및 제거는 복잡한 작업이며, 이를 전파하는 데 시간(~20분)이 걸릴 수 있다. + +### 컨트롤 플레인 서비스와 Kubelet + +쿠버네티스 서비스에서 최신의 쿠버네티스 API 서버 목록을 유지하는 대신, +시스템은 모든 트래픽을 외부 IP 주소로 보낸다. + +* 단일 노드 컨트롤 플레인의 경우, IP 주소는 단일 컨트롤 플레인 노드를 가리킨다. + +* 고가용성 컨트롤 플레인의 경우, IP 주소는 컨트롤 플레인 노드 앞의 로드밸런서를 가리킨다. + +마찬가지로 Kubelet은 외부 IP 주소를 사용하여 컨트롤 플레인과 통신한다. + +### 컨트롤 플레인 노드 인증서 + +쿠버네티스는 각 컨트롤 플레인 노드의 외부 퍼블릭 IP 주소와 내부 IP 주소를 대상으로 TLS 인증서를 발급한다. +컨트롤 플레인 노드의 임시 퍼블릭 IP 주소에 대한 인증서는 없다. +임시 퍼블릭 IP 주소를 통해 컨트롤 플레인 노드에 접근하려면, TLS 검증을 건너뛰어야 한다. + +### etcd 클러스터화 + +etcd를 클러스터로 구축하려면, etcd 인스턴스간 통신에 필요한 포트를 열어야 한다(클러스터 내부 통신용). +이러한 배포를 안전하게 하기 위해, etcd 인스턴스간의 통신은 SSL을 이용하여 승인한다. + +### API 서버 신원 + +{{< feature-state state="alpha" for_k8s_version="v1.20" >}} + +API 서버 식별 기능은 +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)에 +의해 제어되며 기본적으로 활성화되지 않는다. +{{< glossary_tooltip text="API 서버" term_id="kube-apiserver" >}} +시작 시 `APIServerIdentity` 라는 기능 게이트를 활성화하여 API 서버 신원을 활성화할 수 있다. + +```shell +kube-apiserver \ +--feature-gates=APIServerIdentity=true \ + # …다른 플래그는 평소와 같다. +``` + +부트스트랩 중에 각 kube-apiserver는 고유한 ID를 자신에게 할당한다. ID는 +`kube-apiserver-{UUID}` 형식이다. 각 kube-apiserver는 +_kube-system_ {{< glossary_tooltip text="네임스페이스" term_id="namespace">}}에 +[임대](/docs/reference/generated/kubernetes-api/{{< param "version" >}}//#lease-v1-coordination-k8s-io)를 생성한다. +임대 이름은 kube-apiserver의 고유 ID이다. 임대에는 +`k8s.io/component=kube-apiserver` 라는 레이블이 있다. 각 kube-apiserver는 +`IdentityLeaseRenewIntervalSeconds` (기본값은 10초)마다 임대를 새로 갱신한다. 각 +kube-apiserver는 `IdentityLeaseDurationSeconds` (기본값은 3600초)마다 +모든 kube-apiserver 식별 ID 임대를 확인하고, +`IdentityLeaseDurationSeconds` 이상 갱신되지 않은 임대를 삭제한다. +`IdentityLeaseRenewIntervalSeconds` 및 `IdentityLeaseDurationSeconds`는 +kube-apiserver 플래그 `identity-lease-renew-interval-seconds` +및 `identity-lease-duration-seconds`로 구성된다. + +이 기능을 활성화하는 것은 HA API 서버 조정과 관련된 기능을 +사용하기 위한 전제조건이다(예: `StorageVersionAPI` 기능 게이트). + +## 추가 자료 + +[자동화된 HA 마스터 배포 - 제안 문서](https://git.k8s.io/community/contributors/design-proposals/cluster-lifecycle/ha_master.md) diff --git a/content/ko/docs/tasks/administer-cluster/highly-available-master.md b/content/ko/docs/tasks/administer-cluster/highly-available-master.md deleted file mode 100644 index 76a734bd65..0000000000 --- a/content/ko/docs/tasks/administer-cluster/highly-available-master.md +++ /dev/null @@ -1,207 +0,0 @@ ---- -reviewers: -title: 고가용성 쿠버네티스 클러스터 마스터 설정하기 -content_type: task ---- - - - -{{< feature-state for_k8s_version="v1.5" state="alpha" >}} - -구글 컴퓨트 엔진(Google Compute Engine, 이하 GCE)의 `kube-up`이나 `kube-down` 스크립트에 쿠버네티스 마스터를 복제할 수 있다. -이 문서는 kube-up/down 스크립트를 사용하여 고가용(HA) 마스터를 관리하는 방법과 GCE와 함께 사용하기 위해 HA 마스터를 구현하는 방법에 관해 설명한다. - - - - -## {{% heading "prerequisites" %}} - - -{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} - - - - - -## HA 호환 클러스터 시작 - -새 HA 호환 클러스터를 생성하려면, `kube-up` 스크립트에 다음 플래그를 설정해야 한다. - -* `MULTIZONE=true` - 서버의 기본 존(zone)과 다른 존에서 마스터 복제본의 kubelet이 제거되지 않도록 한다. -다른 존에서 마스터 복제본을 실행하려는 경우에 권장하고 필요하다. - -* `ENABLE_ETCD_QUORUM_READ=true` - 모든 API 서버에서 읽은 내용이 최신 데이터를 반환하도록 하기 위한 것이다. -true인 경우, Etcd의 리더 복제본에서 읽는다. -이 값을 true로 설정하는 것은 선택 사항이다. 읽기는 더 안정적이지만 느리게 된다. - -선택적으로 첫 번째 마스터 복제본이 생성될 GCE 존을 지정할 수 있다. -다음 플래그를 설정한다. - -* `KUBE_GCE_ZONE=zone` - 첫 마스터 복제본이 실행될 존. - -다음 샘플 커맨드는 europe-west1-b GCE 존에 HA 호환 클러스터를 구성한다. - -```shell -MULTIZONE=true KUBE_GCE_ZONE=europe-west1-b ENABLE_ETCD_QUORUM_READS=true ./cluster/kube-up.sh -``` - -위에 커맨드는 하나의 마스터로 클러스터를 생성한다. -그러나 후속 커맨드로 새 마스터 복제본을 추가할 수 있다. - -## 새 마스터 복제본 추가 - -HA 호환 클러스터를 생성한 다음 그것의 마스터 복제본을 추가할 수 있다. -`kube-up` 스크립트에 다음 플래그를 사용하여 마스터 복제본을 추가한다. - -* `KUBE_REPLICATE_EXISTING_MASTER=true` - 기존 마스터의 복제본을 -만든다. - -* `KUBE_GCE_ZONE=zone` - 마스터 복제본이 실행될 존. -반드시 다른 복제본 존과 동일한 존에 있어야 한다. - -HA 호환 클러스터를 시작할 때, 상속되는 `MULTIZONE`이나 `ENABLE_ETCD_QUORUM_READS` 플래그를 따로 -설정할 필요는 없다. - -다음 샘플 커맨드는 기존 HA 호환 클러스터에서 마스터를 복제한다. - -```shell -KUBE_GCE_ZONE=europe-west1-c KUBE_REPLICATE_EXISTING_MASTER=true ./cluster/kube-up.sh -``` - -## 마스터 복제본 제거 - -다음 플래그가 있는 `kube-down` 스크립트를 사용하여 HA 클러스터에서 마스터 복제본을 제거할 수 있다. - -* `KUBE_DELETE_NODES=false` - kubelet을 삭제하지 않기 위한 것이다. - -* `KUBE_GCE_ZONE=zone` - 마스터 복제본이 제거될 존. - -* `KUBE_REPLICA_NAME=replica_name` - (선택) 제거할 마스터 복제본의 이름. -비어있는 경우, 해당 존의 모든 복제본이 제거된다. - -다음 샘플 커맨드는 기존 HA 클러스터에서 마스터 복제본을 제거한다. - -```shell -KUBE_DELETE_NODES=false KUBE_GCE_ZONE=europe-west1-c ./cluster/kube-down.sh -``` - -## 마스터 복제 실패 처리 - -HA 클러스터의 마스터 복제본 중 하나가 실패하면, -클러스터에서 복제본을 제거하고 동일한 존에서 새 복제본을 추가하는 것이 가장 좋다. -다음 샘플 커맨드로 이 과정을 시연한다. - -1. 손상된 복제본을 제거한다. - - ```shell - KUBE_DELETE_NODES=false KUBE_GCE_ZONE=replica_zone KUBE_REPLICA_NAME=replica_name ./cluster/kube-down.sh - ``` - -1. 기존 복제본 대신 새 복제본을 추가한다. - - ```shell - KUBE_GCE_ZONE=replica-zone KUBE_REPLICATE_EXISTING_MASTER=true ./cluster/kube-up.sh - ``` - -## HA 클러스터에서 마스터 복제에 관한 모범 사례 - -* 다른 존에 마스터 복제본을 배치하도록 한다. 한 존이 실패하는 동안, 해당 존에 있는 마스터도 모두 실패할 것이다. -존 장애를 극복하기 위해 노드를 여러 존에 배치한다 -(더 자세한 내용은 [멀티 존](/ko/docs/setup/best-practices/multiple-zones/)를 참조한다). - -* 두 개의 마스터 복제본은 사용하지 않는다. 두 개의 복제 클러스터에 대한 합의는 지속적 상태를 변경해야 할 때 두 복제본 모두 실행해야 한다. -결과적으로 두 복제본 모두 필요하고, 어떤 복제본의 장애에도 클러스터가 대부분 장애 상태로 변한다. -따라서 두 개의 복제본 클러스터는 HA 관점에서 단일 복제 클러스터보다 열등하다. - -* 마스터 복제본을 추가하면, 클러스터의 상태(Etcd)도 새 인스턴스로 복사된다. -클러스터가 크면, 이 상태를 복제하는 시간이 오래 걸릴 수 있다. -이 작업은 [여기](https://coreos.com/etcd/docs/latest/admin_guide.html#member-migration) 기술한 대로 -Etcd 데이터 디렉터리를 마이그레이션하여 속도를 높일 수 있다(향후에 Etcd 데이터 디렉터리 마이그레이션 지원 추가를 고려 중이다). - - - - - -## 구현 지침 - -![ha-master-gce](/images/docs/ha-master-gce.png) - -### 개요 - -각 마스터 복제본은 다음 모드에서 다음 구성 요소를 실행한다. - -* Etcd 인스턴스: 모든 인스턴스는 합의를 사용하여 함께 클러스터화 한다. - -* API 서버: 각 서버는 내부 Etcd와 통신한다. 클러스터의 모든 API 서버가 가용하게 된다. - -* 컨트롤러, 스케줄러, 클러스터 오토스케일러: 임대 방식을 이용한다. 각 인스턴스 중 하나만이 클러스터에서 활성화된다. - -* 애드온 매니저: 각 매니저는 애드온의 동기화를 유지하려고 독립적으로 작업한다. - -또한 API 서버 앞단에 외부/내부 트래픽을 라우팅하는 로드 밸런서가 있을 것이다. - -### 로드 밸런싱 - -두 번째 마스터 복제본을 시작할 때, 두 개의 복제본을 포함된 로드 밸런서가 생성될 것이고, 첫 번째 복제본의 IP 주소가 로드 밸런서의 IP 주소로 승격된다. -비슷하게 끝에서 두 번째의 마스터 복제본을 제거한 후에는 로드 밸런서가 제거되고 -해당 IP 주소는 마지막으로 남은 복제본에 할당된다. -로드 밸런서 생성 및 제거는 복잡한 작업이며, 이를 전파하는 데 시간(~20분)이 걸릴 수 있다. - -### 마스터 서비스와 Kubelet - -쿠버네티스 서비스에서 최신의 쿠버네티스 API 서버 목록을 유지하는 대신, -시스템은 모든 트래픽을 외부 IP 주소로 보낸다. - -* 단일 마스터 클러스터에서 IP 주소는 단일 마스터를 가리킨다. - -* 다중 마스터 클러스터에서 IP 주소는 마스터 앞에 로드밸런서를 가리킨다. - -마찬가지로 Kubelet은 외부 IP 주소를 사용하여 마스터와 통신한다. - -### 마스터 인증서 - -쿠버네티스는 각 복제본의 외부 퍼블릭 IP 주소와 내부 IP 주소를 대상으로 마스터 TLS 인증서를 발급한다. -복제본의 임시 공개 IP 주소에 대한 인증서는 없다. -임시 퍼블릭 IP 주소를 통해 복제본에 접근하려면, TLS 검증을 건너뛰어야 한다. - -### etcd 클러스터화 - -etcd를 클러스터로 구축하려면, etcd 인스턴스간 통신에 필요한 포트를 열어야 한다(클러스터 내부 통신용). -이러한 배포를 안전하게 하기 위해, etcd 인스턴스간의 통신은 SSL을 이용하여 승인한다. - -### API 서버 신원 - -{{< feature-state state="alpha" for_k8s_version="v1.20" >}} - -API 서버 식별 기능은 -[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)에 -의해 제어되며 기본적으로 활성화되지 않는다. -{{< glossary_tooltip text="API 서버" term_id="kube-apiserver" >}} -시작 시 `APIServerIdentity` 라는 기능 게이트를 활성화하여 API 서버 신원을 활성화할 수 있다. - -```shell -kube-apiserver \ ---feature-gates=APIServerIdentity=true \ - # …다른 플래그는 평소와 같다. -``` - -부트스트랩 중에 각 kube-apiserver는 고유한 ID를 자신에게 할당한다. ID는 -`kube-apiserver-{UUID}` 형식이다. 각 kube-apiserver는 -_kube-system_ {{< glossary_tooltip text="네임스페이스" term_id="namespace">}}에 -[임대](/docs/reference/generated/kubernetes-api/{{< param "version" >}}//#lease-v1-coordination-k8s-io)를 생성한다. -임대 이름은 kube-apiserver의 고유 ID이다. 임대에는 -`k8s.io/component=kube-apiserver` 라는 레이블이 있다. 각 kube-apiserver는 -`IdentityLeaseRenewIntervalSeconds` (기본값은 10초)마다 임대를 새로 갱신한다. 각 -kube-apiserver는 `IdentityLeaseDurationSeconds` (기본값은 3600초)마다 -모든 kube-apiserver 식별 ID 임대를 확인하고, -`IdentityLeaseDurationSeconds` 이상 갱신되지 않은 임대를 삭제한다. -`IdentityLeaseRenewIntervalSeconds` 및 `IdentityLeaseDurationSeconds`는 -kube-apiserver 플래그 `identity-lease-renew-interval-seconds` -및 `identity-lease-duration-seconds`로 구성된다. - -이 기능을 활성화하는 것은 HA API 서버 조정과 관련된 기능을 -사용하기 위한 전제조건이다(예: `StorageVersionAPI` 기능 게이트). - -## 추가 자료 - -[자동화된 HA 마스터 배포 - 제안 문서](https://git.k8s.io/community/contributors/design-proposals/cluster-lifecycle/ha_master.md) diff --git a/content/ko/docs/tasks/administer-cluster/kubeadm/_index.md b/content/ko/docs/tasks/administer-cluster/kubeadm/_index.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md b/content/ko/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md index 16c84d451c..e67a08a74e 100644 --- a/content/ko/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md +++ b/content/ko/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md @@ -183,7 +183,7 @@ curl.exe -LO https://github.com/kubernetes-sigs/sig-windows-tools/releases/lates ```powershell # 예 -.\Install-Containerd.ps1 -ContainerDVersion v1.4.1 +.\Install-Containerd.ps1 -ContainerDVersion 1.4.1 ``` {{< /note >}} diff --git a/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md b/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md index 02b2b1330f..de6feb480d 100644 --- a/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md +++ b/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md @@ -85,7 +85,11 @@ front-proxy-ca Dec 28, 2029 23:36 UTC 9y no {{< /warning >}} {{< note >}} -kubeadm은 자동 인증서 갱신을 위해 kubelet을 구성하기 때문에 `kubelet.conf` 는 위 목록에 포함되어 있지 않다. +`kubelet.conf` 는 위 목록에 포함되어 있지 않은데, 이는 +kubeadm이 [자동 인증서 갱신](/ko/docs/tasks/tls/certificate-rotation/)을 위해 +`/var/lib/kubelet/pki`에 있는 갱신 가능한 인증서를 이용하여 kubelet을 구성하기 때문이다. +만료된 kubelet 클라이언트 인증서를 갱신하려면 +[kubelet 클라이언트 갱신 실패](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/#kubelet-client-cert) 섹션을 확인한다. {{< /note >}} {{< warning >}} @@ -157,7 +161,7 @@ HA 클러스터를 실행 중인 경우, 모든 컨트롤 플레인 노드에서 빌트인 서명자를 활성화하려면, `--cluster-signing-cert-file` 와 `--cluster-signing-key-file` 플래그를 전달해야 한다. -새 클러스터를 생성하는 경우, kubeadm [구성 파일](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2)을 사용할 수 있다. +새 클러스터를 생성하는 경우, kubeadm [구성 파일](/docs/reference/config-api/kubeadm-config.v1beta2/)을 사용할 수 있다. ```yaml apiVersion: kubeadm.k8s.io/v1beta2 @@ -238,7 +242,7 @@ serverTLSBootstrap: true `serverTLSBootstrap: true` 필드는 kubelet 인증서를 이용한 부트스트랩을 `certificates.k8s.io` API에 요청함으로써 활성화할 것이다. 한 가지 알려진 제약은 이 인증서들에 대한 CSR(인증서 서명 요청)들이 kube-controller-manager - -[`kubernetes.io/kubelet-serving`](https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/#kubernetes-signers)의 +[`kubernetes.io/kubelet-serving`](/docs/reference/access-authn-authz/certificate-signing-requests/#kubernetes-signers)의 기본 서명자(default signer)에 의해서 자동으로 승인될 수 없다는 점이다. 이것은 사용자나 제 3의 컨트롤러의 액션을 필요로 할 것이다. diff --git a/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md b/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md index 2227c49c9e..c009339acc 100644 --- a/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md +++ b/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md @@ -38,7 +38,7 @@ weight: 20 ### 추가 정보 - kubelet 마이너 버전을 업그레이드하기 전에 [노드 드레이닝(draining)](/docs/tasks/administer-cluster/safely-drain-node/)이 - 필요하다. 컨트롤 플레인 노드의 경우 CoreNDS 파드 또는 기타 중요한 워크로드를 실행할 수 있다. + 필요하다. 컨트롤 플레인 노드의 경우 CoreDNS 파드 또는 기타 중요한 워크로드를 실행할 수 있다. - 컨테이너 사양 해시 값이 변경되므로, 업그레이드 후 모든 컨테이너가 다시 시작된다. diff --git a/content/ko/docs/tasks/configmap-secret/_index.md b/content/ko/docs/tasks/configmap-secret/_index.md new file mode 100644 index 0000000000..e63c605924 --- /dev/null +++ b/content/ko/docs/tasks/configmap-secret/_index.md @@ -0,0 +1,6 @@ +--- +title: "시크릿(Secret) 관리" +weight: 28 +description: 시크릿을 사용하여 기밀 설정 데이터 관리. +--- + diff --git a/content/ko/docs/tasks/configmap-secret/managing-secret-using-config-file.md b/content/ko/docs/tasks/configmap-secret/managing-secret-using-config-file.md new file mode 100644 index 0000000000..3248328907 --- /dev/null +++ b/content/ko/docs/tasks/configmap-secret/managing-secret-using-config-file.md @@ -0,0 +1,198 @@ +--- +title: 환경 설정 파일을 사용하여 시크릿을 관리 +content_type: task +weight: 20 +description: 환경 설정 파일을 사용하여 시크릿 오브젝트를 생성. +--- + + + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} + + + +## 환경 설정 파일 생성 + +먼저 새 파일에 JSON 이나 YAML 형식으로 시크릿(Secret)에 대한 상세 사항을 기록하고, +이 파일을 이용하여 해당 시크릿 오브젝트를 생성할 수 있다. 이 +[시크릿](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#secret-v1-core) +리소스에는 `data` 와 `stringData` 의 두 가지 맵이 포함되어 있다. +`data` 필드는 base64로 인코딩된 임의의 데이터를 기입하는 데 사용된다. +`stringData` 필드는 편의를 위해 제공되며, 이를 사용해 시크릿 데이터를 인코딩되지 않은 문자열로 +기입할 수 있다. +`data` 및 `stringData`은 영숫자, +`-`, `_` 그리고 `.`로 구성되어야 한다. + +예를 들어 시크릿에 `data` 필드를 사용하여 두 개의 문자열을 저장하려면 다음과 같이 +문자열을 base64로 변환한다. + +```shell +echo -n 'admin' | base64 +``` + +출력은 다음과 유사하다. + +``` +YWRtaW4= +``` + +```shell +echo -n '1f2d1e2e67df' | base64 +``` + +출력은 다음과 유사하다. + +``` +MWYyZDFlMmU2N2Rm +``` + +다음과 같이 시크릿 구성 파일을 작성한다. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: mysecret +type: Opaque +data: + username: YWRtaW4= + password: MWYyZDFlMmU2N2Rm +``` + +시크릿 오브젝트의 이름은 유효한 +[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)이어야 한다. + +{{< note >}} +시크릿 데이터의 직렬화된(serialized) JSON 및 YAML 값은 base64 문자열로 인코딩된다. +이러한 문자열에는 개행(newline)을 사용할 수 없으므로 생략해야 한다. +Darwin/macOS에서 `base64` 도구를 사용할 경우, 사용자는 긴 줄을 분할하는 `-b` 옵션을 사용해서는 안 된다. +반대로, 리눅스 사용자는 `-w` 옵션을 사용할 수 없는 경우 +`base64` 명령어 또는 `base64 | tr -d '\n'` 파이프라인에 +`-w 0` 옵션을 *추가해야 한다*. +{{< /note >}} + +특정 시나리오의 경우 `stringData` 필드를 대신 사용할 수 있다. 이 +필드를 사용하면 base64로 인코딩되지 않은 문자열을 시크릿에 직접 넣을 수 있으며, +시크릿이 생성되거나 업데이트될 때 문자열이 인코딩된다. + +이에 대한 실제적인 예로, +시크릿을 사용하여 구성 파일을 저장하는 애플리케이션을 배포하면서, +배포 프로세스 중에 해당 구성 파일의 일부를 채우려는 경우를 들 수 있다. + +예를 들어 애플리케이션에서 다음 구성 파일을 사용하는 경우: + +```yaml +apiUrl: "https://my.api.com/api/v1" +username: "" +password: "" +``` + +다음 정의를 사용하여 이를 시크릿에 저장할 수 있다. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: mysecret +type: Opaque +stringData: + config.yaml: | + apiUrl: "https://my.api.com/api/v1" + username: + password: +``` + +## 시크릿 오브젝트 생성 + +[`kubectl apply`](/docs/reference/generated/kubectl/kubectl-commands#apply)를 이용하여 시크릿 오브젝트를 생성한다. + +```shell +kubectl apply -f ./secret.yaml +``` + +출력은 다음과 유사하다. + +``` +secret/mysecret created +``` + +## 시크릿 확인 + +`stringData` 필드는 쓰기 전용 편의 필드이다. 시크릿을 조회할 때 절대 출력되지 않는다. +예를 들어 다음 명령을 실행하는 경우: + +```shell +kubectl get secret mysecret -o yaml +``` + +출력은 다음과 유사하다. + +```yaml +apiVersion: v1 +data: + config.yaml: YXBpVXJsOiAiaHR0cHM6Ly9teS5hcGkuY29tL2FwaS92MSIKdXNlcm5hbWU6IHt7dXNlcm5hbWV9fQpwYXNzd29yZDoge3twYXNzd29yZH19 +kind: Secret +metadata: + creationTimestamp: 2018-11-15T20:40:59Z + name: mysecret + namespace: default + resourceVersion: "7225" + uid: c280ad2e-e916-11e8-98f2-025000000001 +type: Opaque +``` + +`kubectl get` 및 `kubectl describe` 명령은 기본적으로 `시크릿`의 내용을 표시하지 않는다. +이는 `시크릿`이 실수로 구경꾼에게 노출되거나 +터미널 로그에 저장되는 것을 방지하기 위한 것이다. +인코딩된 데이터의 실제 내용을 확인하려면 다음을 참조한다. +[시크릿 디코딩](/ko/docs/tasks/configmap-secret/managing-secret-using-kubectl/#decoding-secret). + +하나의 필드(예: `username`)가 `data`와 `stringData`에 모두 명시되면, `stringData`에 명시된 값이 사용된다. +예를 들어 다음과 같은 시크릿인 경우: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: mysecret +type: Opaque +data: + username: YWRtaW4= +stringData: + username: administrator +``` + +결과는 다음과 같은 시크릿이다. + +```yaml +apiVersion: v1 +data: + username: YWRtaW5pc3RyYXRvcg== +kind: Secret +metadata: + creationTimestamp: 2018-11-15T20:46:46Z + name: mysecret + namespace: default + resourceVersion: "7579" + uid: 91460ecb-e917-11e8-98f2-025000000001 +type: Opaque +``` + +여기서 `YWRtaW5pc3RyYXRvcg==`는 `administrator`으로 디코딩된다. + +## 삭제 + +생성한 시크릿을 삭제하려면 다음 명령을 실행한다. + +```shell +kubectl delete secret mysecret +``` + +## {{% heading "whatsnext" %}} + +- [시크릿 개념](/ko/docs/concepts/configuration/secret/)에 대해 자세히 알아보기 +- [`kubectl` 커맨드를 사용하여 시크릿을 관리](/ko/docs/tasks/configmap-secret/managing-secret-using-kubectl/)하는 방법 알아보기 +- [kustomize를 사용하여 시크릿을 관리](/ko/docs/tasks/configmap-secret/managing-secret-using-kustomize/)하는 방법 알아보기 + diff --git a/content/ko/docs/tasks/configmap-secret/managing-secret-using-kubectl.md b/content/ko/docs/tasks/configmap-secret/managing-secret-using-kubectl.md new file mode 100644 index 0000000000..8b3f62217e --- /dev/null +++ b/content/ko/docs/tasks/configmap-secret/managing-secret-using-kubectl.md @@ -0,0 +1,156 @@ +--- +title: kubectl을 사용한 시크릿 관리 +content_type: task +weight: 10 +description: kubectl 커맨드를 사용하여 시크릿 오브젝트를 생성. +--- + + + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} + + + +## 시크릿 생성 + +`시크릿`에는 파드가 데이터베이스에 접근하는 데 필요한 사용자 자격 증명이 포함될 수 있다. +예를 들어 데이터베이스 연결 문자열은 사용자 이름과 암호로 구성된다. +사용자 이름은 로컬 컴퓨터의 `./username.txt` 파일에, 비밀번호는 +`./password.txt` 파일에 저장할 수 있다. + +```shell +echo -n 'admin' > ./username.txt +echo -n '1f2d1e2e67df' > ./password.txt +``` +이 명령에서 `-n` 플래그는 생성된 파일의 +텍스트 끝에 추가 개행 문자가 포함되지 않도록 해 준다. 이는 `kubectl`이 파일을 읽고 +내용을 base64 문자열로 인코딩할 때 개행 문자도 함께 인코딩될 수 있기 때문에 +중요하다. + +`kubectl create secret` 명령은 이러한 파일들을 시크릿으로 패키징하고 +API 서버에 오브젝트를 생성한다. + +```shell +kubectl create secret generic db-user-pass \ + --from-file=./username.txt \ + --from-file=./password.txt +``` + +출력은 다음과 유사하다. + +``` +secret/db-user-pass created +``` + +기본 키 이름은 파일 이름이다. 선택적으로 `--from-file=[key=]source`를 사용하여 키 이름을 설정할 수 있다. +예제: + +```shell +kubectl create secret generic db-user-pass \ + --from-file=username=./username.txt \ + --from-file=password=./password.txt +``` + +파일에 포함하는 암호 문자열에서 +특수 문자를 이스케이프하지 않아도 된다. + +`--from-literal==` 태그를 사용하여 시크릿 데이터를 제공할 수도 있다. +이 태그는 여러 키-값 쌍을 제공하기 위해 두 번 이상 지정할 수 있다. +`$`, `\`, `*`, `=` 및 `!`와 같은 특수 문자는 +[shell](https://en.wikipedia.org/wiki/Shell_(computing))에 해석하고 처리하기 때문에 +이스케이프할 필요가 있다. + +대부분의 셸에서 암호를 이스케이프하는 가장 쉬운 방법은 암호를 작은따옴표(`'`)로 둘러싸는 것이다. +예를 들어, 비밀번호가 `S!B\*d$zDsb=`인 경우, +다음 커맨드를 실행한다. + +```shell +kubectl create secret generic dev-db-secret \ + --from-literal=username=devuser \ + --from-literal=password='S!B\*d$zDsb=' +``` + +## 시크릿 확인 + +시크릿이 생성되었는지 확인한다. + +```shell +kubectl get secrets +``` + +출력은 다음과 유사하다. + +``` +NAME TYPE DATA AGE +db-user-pass Opaque 2 51s +``` + +다음 명령을 실행하여 `시크릿`에 대한 상세 사항을 볼 수 있다. + +```shell +kubectl describe secrets/db-user-pass +``` + +출력은 다음과 유사하다. + +``` +Name: db-user-pass +Namespace: default +Labels: +Annotations: + +Type: Opaque + +Data +==== +password: 12 bytes +username: 5 bytes +``` + +`kubectl get` 및 `kubectl describe` 명령은 +기본적으로 `시크릿`의 내용을 표시하지 않는다. 이는 `시크릿`이 실수로 노출되거나 +터미널 로그에 저장되는 것을 방지하기 위한 것이다. + +## 시크릿 디코딩 {#decoding-secret} + +생성한 시크릿을 보려면 다음 명령을 실행한다. + +```shell +kubectl get secret db-user-pass -o jsonpath='{.data}' +``` + +출력은 다음과 유사하다. + +```json +{"password":"MWYyZDFlMmU2N2Rm","username":"YWRtaW4="} +``` + +이제 `password` 데이터를 디코딩할 수 있다. + +```shell +echo 'MWYyZDFlMmU2N2Rm' | base64 --decode +``` + +출력은 다음과 유사하다. + +``` +1f2d1e2e67df +``` + +## 삭제 + +생성한 시크릿을 삭제하려면 다음 명령을 실행한다. + +```shell +kubectl delete secret db-user-pass +``` + + + +## {{% heading "whatsnext" %}} + +- [시크릿 개념](/ko/docs/concepts/configuration/secret/)에 대해 자세히 알아보기 +- [환경 설정 파일을 사용하여 시크릿을 관리](/ko/docs/tasks/configmap-secret/managing-secret-using-config-file/)하는 방법 알아보기 +- [kustomize를 사용하여 시크릿을 관리](/ko/docs/tasks/configmap-secret/managing-secret-using-kustomize/)하는 방법 알아보기 diff --git a/content/ko/docs/tasks/configmap-secret/managing-secret-using-kustomize.md b/content/ko/docs/tasks/configmap-secret/managing-secret-using-kustomize.md new file mode 100644 index 0000000000..2198903885 --- /dev/null +++ b/content/ko/docs/tasks/configmap-secret/managing-secret-using-kustomize.md @@ -0,0 +1,139 @@ +--- +title: kustomize를 사용하여 시크릿 관리 +content_type: task +weight: 30 +description: kustomization.yaml 파일을 사용하여 시크릿 오브젝트 생성. +--- + + + +쿠버네티스 v1.14부터 `kubectl`은 +[Kustomize를 이용한 쿠버네티스 오브젝트의 선언형 관리](/ko/docs/tasks/manage-kubernetes-objects/kustomization/)를 지원한다. +Kustomize는 시크릿 및 컨피그맵을 생성하기 위한 리소스 생성기를 제공한다. +Kustomize 생성기는 디렉토리 내의 `kustomization.yaml` 파일에 지정되어야 한다. +시크릿 생성 후 `kubectl apply`를 통해 API +서버에 시크릿을 생성할 수 있다. + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} + + + +## Kustomization 파일 생성 + +`kustomization.yaml` 파일에 다른 기존 파일을 참조하는 +`secretGenerator`를 정의하여 시크릿을 생성할 수 있다. +예를 들어 다음 kustomization 파일은 +`./username.txt` 및 `./password.txt` 파일을 참조한다. + +```yaml +secretGenerator: +- name: db-user-pass + files: + - username.txt + - password.txt +``` + +`kustomization.yaml` 파일에 리터럴을 명시하여 `secretGenerator`를 +정의할 수도 있다. +예를 들어 다음 `kustomization.yaml` 파일에는 +각각 `username`과 `password`에 대한 두 개의 리터럴이 포함되어 있다. + +```yaml +secretGenerator: +- name: db-user-pass + literals: + - username=admin + - password=1f2d1e2e67df +``` + +`kustomization.yaml` 파일에 `.env` 파일을 명시하여 +`secretGenerator`를 정의할 수도 있다. +예를 들어 다음 `kustomization.yaml` 파일은 +`.env.secret` 파일에서 데이터를 가져온다. + +```yaml +secretGenerator: +- name: db-user-pass + envs: + - .env.secret +``` + +모든 경우에 대해, 값을 base64로 인코딩하지 않아도 된다. + +## 시크릿 생성 + +다음 명령을 실행하여 시크릿을 생성한다. + +```shell +kubectl apply -k . +``` + +출력은 다음과 유사하다. + +``` +secret/db-user-pass-96mffmfh4k created +``` + +시크릿이 생성되면 시크릿 데이터를 해싱하고 +이름에 해시 값을 추가하여 시크릿 이름이 생성된다. 이렇게 함으로써 +데이터가 수정될 때마다 시크릿이 새롭게 생성된다. + +## 생성된 시크릿 확인 + +시크릿이 생성된 것을 확인할 수 있다. + +```shell +kubectl get secrets +``` + +출력은 다음과 유사하다. + +``` +NAME TYPE DATA AGE +db-user-pass-96mffmfh4k Opaque 2 51s +``` + +다음 명령을 실행하여 시크릿에 대한 상세 사항을 볼 수 있다. + +```shell +kubectl describe secrets/db-user-pass-96mffmfh4k +``` + +출력은 다음과 유사하다. + +``` +Name: db-user-pass-96mffmfh4k +Namespace: default +Labels: +Annotations: + +Type: Opaque + +Data +==== +password.txt: 12 bytes +username.txt: 5 bytes +``` + +`kubectl get` 및 `kubectl describe` 명령은 기본적으로 `시크릿`의 내용을 표시하지 않는다. +이는 `시크릿`이 실수로 구경꾼에게 노출되는 것을 방지하기 위한 것으로, +또는 터미널 로그에 저장되지 않는다. +인코딩된 데이터의 실제 내용을 확인하려면 다음을 참조한다. +[시크릿 디코딩](/ko/docs/tasks/configmap-secret/managing-secret-using-kubectl/#decoding-secret). + +## 삭제 + +생성한 시크릿을 삭제하려면 다음 명령을 실행한다. + +```shell +kubectl delete secret db-user-pass-96mffmfh4k +``` + + +## {{% heading "whatsnext" %}} + +- [시크릿 개념](/ko/docs/concepts/configuration/secret/)에 대해 자세히 알아보기 +- [`kubectl` 커맨드을 사용하여 시크릿 관리](/ko/docs/tasks/configmap-secret/managing-secret-using-kubectl/) 방법 알아보기 +- [환경 설정 파일을 사용하여 시크릿을 관리](/ko/docs/tasks/configmap-secret/managing-secret-using-config-file/)하는 방법 알아보기 diff --git a/content/ko/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md b/content/ko/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md index dc4acf8411..3461c57061 100644 --- a/content/ko/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md +++ b/content/ko/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md @@ -28,7 +28,7 @@ weight: 60 * 사용자는 노드가 단 하나만 있는 쿠버네티스 클러스터가 필요하고, {{< glossary_tooltip text="kubectl" term_id="kubectl" >}} 커맨드라인 툴이 사용자의 클러스터와 통신할 수 있도록 설정되어 있어야 한다. 만약 사용자가 -아직 단일 노드 클러스터를 가지고 있지 않다면, [Minikube](/ko/docs/setup/learning-environment/minikube/)를 +아직 단일 노드 클러스터를 가지고 있지 않다면, [Minikube](/ko/docs/tasks/tools/#minikube)를 사용하여 클러스터 하나를 생성할 수 있다. * [퍼시스턴트 볼륨](https://minikube.sigs.k8s.io/docs/)의 diff --git a/content/ko/docs/tasks/configure-pod-container/pull-image-private-registry.md b/content/ko/docs/tasks/configure-pod-container/pull-image-private-registry.md index 5a8295aff2..2188ced539 100644 --- a/content/ko/docs/tasks/configure-pod-container/pull-image-private-registry.md +++ b/content/ko/docs/tasks/configure-pod-container/pull-image-private-registry.md @@ -55,7 +55,7 @@ cat ~/.docker/config.json ## 기존의 도커 자격 증명을 기반으로 시크릿 생성하기 {#registry-secret-existing-credentials} 쿠버네티스 클러스터는 프라이빗 이미지를 받아올 때, 컨테이너 레지스트리에 인증하기 위하여 -`docker-registry` 타입의 시크릿을 사용한다. +`kubernetes.io/dockerconfigjson` 타입의 시크릿을 사용한다. 만약 이미 `docker login` 을 수행하였다면, 이 때 생성된 자격 증명을 쿠버네티스 클러스터로 복사할 수 있다. diff --git a/content/ko/docs/tasks/configure-pod-container/quality-service-pod.md b/content/ko/docs/tasks/configure-pod-container/quality-service-pod.md index 57e0a94b40..c644a8aff1 100644 --- a/content/ko/docs/tasks/configure-pod-container/quality-service-pod.md +++ b/content/ko/docs/tasks/configure-pod-container/quality-service-pod.md @@ -45,10 +45,14 @@ kubectl create namespace qos-example 파드에 Guaranteed QoS 클래스 할당을 위한 전제 조건은 다음과 같다. -* 파드의 초기화 컨테이너를 포함한 모든 컨테이너는 메모리 상한과 메모리 요청량을 가지고 있어야 하며, 이는 동일해야 한다. -* 파드의 초기화 컨테이너를 포함한 모든 컨테이너는 CPU 상한과 CPU 요청량을 가지고 있어야 하며, 이는 동일해야 한다. +* 파드 내 모든 컨테이너는 메모리 상한과 메모리 요청량을 가지고 있어야 한다. +* 파드 내 모든 컨테이너의 메모리 상한이 메모리 요청량과 일치해야 한다. +* 파드 내 모든 컨테이너는 CPU 상한과 CPU 요청량을 가지고 있어야 한다. +* 파드 내 모든 컨테이너의 CPU 상한이 CPU 요청량과 일치해야 한다. -이것은 하나의 컨테이너를 갖는 파드의 구성 파일이다. 해당 컨테이너는 메모리 상한과 +이러한 제약은 초기화 컨테이너와 앱 컨테이너 모두에 동일하게 적용된다. + +다음은 하나의 컨테이너를 갖는 파드의 구성 파일이다. 해당 컨테이너는 메모리 상한과 메모리 요청량을 갖고 있고, 200MiB로 동일하다. 해당 컨테이너는 CPU 상한과 CPU 요청량을 가지며, 700 milliCPU로 동일하다. {{< codenew file="pods/qos/qos-pod.yaml" >}} diff --git a/content/ko/docs/tasks/debug-application-cluster/_index.md b/content/ko/docs/tasks/debug-application-cluster/_index.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md b/content/ko/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md index 3c8df08ede..4dde485c13 100644 --- a/content/ko/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md +++ b/content/ko/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md @@ -41,7 +41,7 @@ content_type: task kubectl apply -f https://k8s.io/examples/debug/termination.yaml - YAML 파일에 있는 `cmd` 와 `args` 필드에서 컨테이너가 10초 간 잠든 뒤에 + YAML 파일에 있는 `command` 와 `args` 필드에서 컨테이너가 10초 간 잠든 뒤에 "Sleep expired" 문자열을 `/dev/termination-log` 파일에 기록하는 것을 확인할 수 있다. 컨테이너는 "Sleep expired" 메시지를 기록한 후에 종료된다. diff --git a/content/ko/docs/tasks/manage-daemon/update-daemon-set.md b/content/ko/docs/tasks/manage-daemon/update-daemon-set.md index ec29259de7..50a3a6ad2b 100644 --- a/content/ko/docs/tasks/manage-daemon/update-daemon-set.md +++ b/content/ko/docs/tasks/manage-daemon/update-daemon-set.md @@ -1,41 +1,42 @@ --- + + title: 데몬셋(DaemonSet)에서 롤링 업데이트 수행 content_type: task weight: 10 --- - - - 이 페이지는 데몬셋에서 롤링 업데이트를 수행하는 방법을 보여준다. ## {{% heading "prerequisites" %}} -* 데몬셋 롤링 업데이트 기능은 쿠버네티스 버전 1.6 이상에서만 지원된다. - ## 데몬셋 업데이트 전략 데몬셋에는 두 가지 업데이트 전략 유형이 있다. -* OnDelete: `OnDelete` 업데이트 전략을 사용하여, 데몬셋 템플릿을 업데이트한 후, +* `OnDelete`: `OnDelete` 업데이트 전략을 사용하여, 데몬셋 템플릿을 업데이트한 후, 이전 데몬셋 파드를 수동으로 삭제할 때 *만* 새 데몬셋 파드가 생성된다. 이것은 쿠버네티스 버전 1.5 이하에서의 데몬셋의 동작과 동일하다. -* RollingUpdate: 기본 업데이트 전략이다. +* `RollingUpdate`: 기본 업데이트 전략이다. `RollingUpdate` 업데이트 전략을 사용하여, 데몬셋 템플릿을 업데이트한 후, 오래된 데몬셋 파드가 종료되고, 새로운 데몬셋 파드는 - 제어 방식으로 자동 생성된다. 전체 업데이트 프로세스 동안 데몬셋의 최대 하나의 파드가 각 노드에서 실행된다. + 제어 방식으로 자동 생성된다. 전체 업데이트 프로세스 동안 + 데몬셋의 최대 하나의 파드가 각 노드에서 실행된다. ## 롤링 업데이트 수행 데몬셋의 롤링 업데이트 기능을 사용하려면, `.spec.updateStrategy.type` 에 `RollingUpdate` 를 설정해야 한다. -[`.spec.updateStrategy.rollingUpdate.maxUnavailable`](/ko/docs/concepts/workloads/controllers/deployment/#최대-불가max-unavailable)(기본값은 1)과 -[`.spec.minReadySeconds`](/ko/docs/concepts/workloads/controllers/deployment/#최소-대기-시간초)(기본값은 0)으로 설정할 수도 있다. +[`.spec.updateStrategy.rollingUpdate.maxUnavailable`](/ko/docs/concepts/workloads/controllers/deployment/#최대-불가max-unavailable) +(기본값은 1)과 +[`.spec.minReadySeconds`](/ko/docs/concepts/workloads/controllers/deployment/#최소-대기-시간초) +(기본값은 0)으로 +설정할 수도 있다. ### `RollingUpdate` 업데이트 전략으로 데몬셋 생성 @@ -142,7 +143,7 @@ daemonset "fluentd-elasticsearch" successfully rolled out #### 일부 노드에 리소스가 부족하다 적어도 하나의 노드에서 새 데몬셋 파드를 스케줄링할 수 없어서 롤아웃이 -중단되었다. 노드에 [리소스가 부족](/docs/tasks/administer-cluster/out-of-resource/)할 때 +중단되었다. 노드에 [리소스가 부족](/docs/concepts/scheduling-eviction/node-pressure-eviction/)할 때 발생할 수 있다. 이 경우, `kubectl get nodes` 의 출력 결과와 다음의 출력 결과를 비교하여 @@ -184,12 +185,7 @@ kubectl get pods -l name=fluentd-elasticsearch -o wide -n kube-system kubectl delete ds fluentd-elasticsearch -n kube-system ``` - - - ## {{% heading "whatsnext" %}} - -* [태스크: 데몬셋에서 롤백 - 수행](/ko/docs/tasks/manage-daemon/rollback-daemon-set/)을 참고한다. -* [개념: 기존 데몬셋 파드를 채택하기 위한 데몬셋 생성](/ko/docs/concepts/workloads/controllers/daemonset/)을 참고한다. +* [데몬셋에서 롤백 수행](/ko/docs/tasks/manage-daemon/rollback-daemon-set/)을 참고한다. +* [기존 데몬셋 파드를 채택하기 위한 데몬셋 생성](/ko/docs/concepts/workloads/controllers/daemonset/)을 참고한다. diff --git a/content/ko/docs/tasks/manage-kubernetes-objects/kustomization.md b/content/ko/docs/tasks/manage-kubernetes-objects/kustomization.md index ab442ebafd..9484882f30 100644 --- a/content/ko/docs/tasks/manage-kubernetes-objects/kustomization.md +++ b/content/ko/docs/tasks/manage-kubernetes-objects/kustomization.md @@ -180,7 +180,7 @@ spec: containers: - name: app image: my-app - volumeMount: + volumeMounts: - name: config mountPath: /config volumes: @@ -234,7 +234,7 @@ spec: containers: - image: my-app name: app - volumeMount: + volumeMounts: - mountPath: /config name: config volumes: @@ -327,7 +327,7 @@ spec: containers: - name: app image: my-app - volumeMount: + volumeMounts: - name: password mountPath: /secrets volumes: diff --git a/content/ko/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md b/content/ko/docs/tasks/network/customize-hosts-file-for-pods.md similarity index 98% rename from content/ko/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md rename to content/ko/docs/tasks/network/customize-hosts-file-for-pods.md index be39f13f21..1901f16726 100644 --- a/content/ko/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md +++ b/content/ko/docs/tasks/network/customize-hosts-file-for-pods.md @@ -1,6 +1,6 @@ --- title: HostAliases로 파드의 /etc/hosts 항목 추가하기 -content_type: concept +content_type: task weight: 60 min-kubernetes-server-version: 1.7 --- @@ -13,7 +13,7 @@ min-kubernetes-server-version: 1.7 HostAliases를 사용하지 않은 수정은 권장하지 않는데, 이는 호스트 파일이 kubelet에 의해 관리되고, 파드 생성/재시작 중에 덮어쓰여질 수 있기 때문이다. - + ## 기본 호스트 파일 내용 diff --git a/content/ko/docs/tasks/run-application/horizontal-pod-autoscale.md b/content/ko/docs/tasks/run-application/horizontal-pod-autoscale.md index b4cc1b3be5..90d151e768 100644 --- a/content/ko/docs/tasks/run-application/horizontal-pod-autoscale.md +++ b/content/ko/docs/tasks/run-application/horizontal-pod-autoscale.md @@ -77,7 +77,7 @@ HorizontalPodAutoscaler는 보통 일련의 API 집합(`metrics.k8s.io`, 힙스터에서 메트릭 가져오기는 Kubernetes 1.11에서 사용 중단(deprecated)됨. {{< /note >}} -자세한 사항은 [메트릭 API를 위한 지원](#메트릭-API를-위한-지원)을 참조한다. +자세한 사항은 [메트릭 API를 위한 지원](#메트릭-api를-위한-지원)을 참조한다. 오토스케일러는 스케일 하위 리소스를 사용하여 상응하는 확장 가능 컨트롤러(예: 레플리케이션 컨트롤러, 디플로이먼트, 레플리케이션 셋)에 접근한다. 스케일은 레플리카의 개수를 동적으로 설정하고 각 현재 상태를 검사 할 수 있게 해주는 인터페이스이다. diff --git a/content/ko/docs/tasks/tls/certificate-rotation.md b/content/ko/docs/tasks/tls/certificate-rotation.md index 037f99d87a..eadec87b4f 100644 --- a/content/ko/docs/tasks/tls/certificate-rotation.md +++ b/content/ko/docs/tasks/tls/certificate-rotation.md @@ -27,10 +27,10 @@ kubelet은 쿠버네티스 API 인증을 위해 인증서를 사용한다. 기본적으로 이러한 인증서는 1년 만기로 발급되므로 너무 자주 갱신할 필요는 없다. -쿠버네티스 1.8은 [kubelet 인증서 +쿠버네티스는 [kubelet 인증서 갱신](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/)을 포함하며, 이 기능은 현재 인증서의 만료 시한이 임박한 경우, -새로운 키를 자동으로 생성하고 쿠버네티스 API에서 새로운 인증서를 요청하는 베타 기능이다. +새로운 키를 자동으로 생성하고 쿠버네티스 API에서 새로운 인증서를 요청하는 기능이다. 새로운 인증서를 사용할 수 있게 되면 쿠버네티스 API에 대한 연결을 인증하는데 사용된다. diff --git a/content/ko/docs/tasks/tools/_index.md b/content/ko/docs/tasks/tools/_index.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/tasks/tools/included/install-kubectl-gcloud.md b/content/ko/docs/tasks/tools/included/install-kubectl-gcloud.md deleted file mode 100644 index f3deae981c..0000000000 --- a/content/ko/docs/tasks/tools/included/install-kubectl-gcloud.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "gcloud kubectl install" -description: "gcloud를 이용하여 kubectl을 설치하는 방법을 각 OS별 탭에 포함하기 위한 스니펫." -headless: true ---- - -Google Cloud SDK를 사용하여 kubectl을 설치할 수 있다. - -1. [Google Cloud SDK](https://cloud.google.com/sdk/)를 설치한다. - -1. `kubectl` 설치 명령을 실행한다. - - ```shell - gcloud components install kubectl - ``` - -1. 설치한 버전이 최신 버전인지 확인한다. - - ```shell - kubectl version --client - ``` \ No newline at end of file diff --git a/content/ko/docs/tasks/tools/included/kubectl-convert-overview.md b/content/ko/docs/tasks/tools/included/kubectl-convert-overview.md new file mode 100644 index 0000000000..cec8b3f55b --- /dev/null +++ b/content/ko/docs/tasks/tools/included/kubectl-convert-overview.md @@ -0,0 +1,11 @@ +--- +title: "kubectl-convert 개요" +description: >- + 특정 버전의 쿠버네티스 API로 작성된 매니페스트를 다른 버전으로 변환하는 + kubectl 플러그인. +headless: true +--- + +이것은 쿠버네티스 커맨드 라인 도구인 `kubectl`의 플러그인으로서, 특정 버전의 쿠버네티스 API로 작성된 매니페스트를 다른 버전으로 +변환할 수 있도록 한다. 이것은 매니페스트를 최신 쿠버네티스 릴리스의 사용 중단되지 않은 API로 마이그레이션하는 데 특히 유용하다. +더 많은 정보는 다음의 [사용 중단되지 않은 API로 마이그레이션](/docs/reference/using-api/deprecation-guide/#migrate-to-non-deprecated-apis)을 참고한다. diff --git a/content/ko/docs/tasks/tools/install-kubectl-linux.md b/content/ko/docs/tasks/tools/install-kubectl-linux.md index 39c442c939..77717372d1 100644 --- a/content/ko/docs/tasks/tools/install-kubectl-linux.md +++ b/content/ko/docs/tasks/tools/install-kubectl-linux.md @@ -22,7 +22,6 @@ card: - [리눅스에 curl을 사용하여 kubectl 바이너리 설치](#install-kubectl-binary-with-curl-on-linux) - [기본 패키지 관리 도구를 사용하여 설치](#install-using-native-package-management) - [다른 패키지 관리 도구를 사용하여 설치](#install-using-other-package-management) -- [리눅스에 Google Cloud SDK를 사용하여 설치](#install-on-linux-as-part-of-the-google-cloud-sdk) ### 리눅스에서 curl을 사용하여 kubectl 바이너리 설치 {#install-kubectl-binary-with-curl-on-linux} @@ -83,6 +82,7 @@ card: 대상 시스템에 root 접근 권한을 가지고 있지 않더라도, `~/.local/bin` 디렉터리에 kubectl을 설치할 수 있다. ```bash + chmod +x kubectl mkdir -p ~/.local/bin/kubectl mv ./kubectl ~/.local/bin/kubectl # 그리고 ~/.local/bin/kubectl을 $PATH에 추가 @@ -168,15 +168,11 @@ kubectl version --client {{< /tabs >}} -### 리눅스에 Google Cloud SDK를 사용하여 설치 {#install-on-linux-as-part-of-the-google-cloud-sdk} - -{{< include "included/install-kubectl-gcloud.md" >}} - ## kubectl 구성 확인 {{< include "included/verify-kubectl.md" >}} -## 선택적 kubectl 구성 +## 선택적 kubectl 구성 및 플러그인 ### 셸 자동 완성 활성화 @@ -189,6 +185,61 @@ kubectl은 Bash 및 Zsh에 대한 자동 완성 지원을 제공하므로 입력 {{< tab name="Zsh" include="included/optional-kubectl-configs-zsh.md" />}} {{< /tabs >}} +### `kubectl convert` 플러그인 설치 + +{{< include "included/kubectl-convert-overview.md" >}} + +1. 다음 명령으로 최신 릴리스를 다운로드한다. + + ```bash + curl -LO https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert + ``` + +1. 바이너리를 검증한다. (선택 사항) + + kubectl-convert 체크섬(checksum) 파일을 다운로드한다. + + ```bash + curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert.sha256" + ``` + + kubectl-convert 바이너리를 체크섬 파일을 통해 검증한다. + + ```bash + echo "$(}} + 동일한 버전의 바이너리와 체크섬을 다운로드한다. + {{< /note >}} + +1. kubectl-convert 설치 + + ```bash + sudo install -o root -g root -m 0755 kubectl-convert /usr/local/bin/kubectl-convert + ``` + +1. 플러그인이 정상적으로 설치되었는지 확인한다. + + ```shell + kubectl convert --help + ``` + + 에러가 출력되지 않는다면, 플러그인이 정상적으로 설치된 것이다. + ## {{% heading "whatsnext" %}} {{< include "included/kubectl-whats-next.md" >}} diff --git a/content/ko/docs/tasks/tools/install-kubectl-macos.md b/content/ko/docs/tasks/tools/install-kubectl-macos.md index 614134da8a..90fefb0c3a 100644 --- a/content/ko/docs/tasks/tools/install-kubectl-macos.md +++ b/content/ko/docs/tasks/tools/install-kubectl-macos.md @@ -22,7 +22,6 @@ card: - [macOS에서 curl을 사용하여 kubectl 바이너리 설치](#install-kubectl-binary-with-curl-on-macos) - [macOS에서 Homebrew를 사용하여 설치](#install-with-homebrew-on-macos) - [macOS에서 Macports를 사용하여 설치](#install-with-macports-on-macos) -- [macOS에서 Google Cloud SDK를 사용하여 설치](#install-on-macos-as-part-of-the-google-cloud-sdk) ### macOS에서 curl을 사용하여 kubectl 바이너리 설치 {#install-kubectl-binary-with-curl-on-macos} @@ -99,10 +98,14 @@ card: 1. kubectl 바이너리를 시스템 `PATH` 의 파일 위치로 옮긴다. ```bash - sudo mv ./kubectl /usr/local/bin/kubectl && \ + sudo mv ./kubectl /usr/local/bin/kubectl sudo chown root: /usr/local/bin/kubectl ``` + {{< note >}} + `PATH` 환경 변수 안에 `/usr/local/bin` 이 있는지 확인한다. + {{< /note >}} + 1. 설치한 버전이 최신 버전인지 확인한다. ```bash @@ -148,16 +151,11 @@ macOS에서 [Macports](https://macports.org/) 패키지 관리자를 사용하 kubectl version --client ``` - -### Google Cloud SDK를 사용하여 설치 {#install-on-macos-as-part-of-the-google-cloud-sdk} - -{{< include "included/install-kubectl-gcloud.md" >}} - ## kubectl 구성 확인 {{< include "included/verify-kubectl.md" >}} -## 선택적 kubectl 구성 +## 선택적 kubectl 구성 및 플러그인 ### 셸 자동 완성 활성화 @@ -170,6 +168,82 @@ kubectl은 Bash 및 Zsh에 대한 자동 완성 지원을 제공하므로 입력 {{< tab name="Zsh" include="included/optional-kubectl-configs-zsh.md" />}} {{< /tabs >}} +### `kubectl convert` 플러그인 설치 + +{{< include "included/kubectl-convert-overview.md" >}} + +1. 다음 명령으로 최신 릴리스를 다운로드한다. + + {{< tabs name="download_convert_binary_macos" >}} + {{< tab name="Intel" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl-convert" + {{< /tab >}} + {{< tab name="Apple Silicon" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl-convert" + {{< /tab >}} + {{< /tabs >}} + +1. 바이너리를 검증한다. (선택 사항) + + kubectl-convert 체크섬(checksum) 파일을 다운로드한다. + + {{< tabs name="download_convert_checksum_macos" >}} + {{< tab name="Intel" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl-convert.sha256" + {{< /tab >}} + {{< tab name="Apple Silicon" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl-convert.sha256" + {{< /tab >}} + {{< /tabs >}} + + kubectl-convert 바이너리를 체크섬 파일을 통해 검증한다. + + ```bash + echo "$(}} + 동일한 버전의 바이너리와 체크섬을 다운로드한다. + {{< /note >}} + +1. kubectl-convert 바이너리를 실행 가능하게 한다. + + ```bash + chmod +x ./kubectl-convert + ``` + +1. kubectl-convert 바이너리를 시스템 `PATH` 의 파일 위치로 옮긴다. + + ```bash + sudo mv ./kubectl /usr/local/bin/kubectl-convert + sudo chown root: /usr/local/bin/kubectl-convert + ``` + + {{< note >}} + `PATH` 환경 변수 안에 `/usr/local/bin` 이 있는지 확인한다. + {{< /note >}} + +1. 플러그인이 정상적으로 설치되었는지 확인한다. + + ```shell + kubectl convert --help + ``` + + 에러가 출력되지 않는다면, 플러그인이 정상적으로 설치된 것이다. + ## {{% heading "whatsnext" %}} {{< include "included/kubectl-whats-next.md" >}} diff --git a/content/ko/docs/tasks/tools/install-kubectl-windows.md b/content/ko/docs/tasks/tools/install-kubectl-windows.md index 23b16e3da6..ab5e7ca05d 100644 --- a/content/ko/docs/tasks/tools/install-kubectl-windows.md +++ b/content/ko/docs/tasks/tools/install-kubectl-windows.md @@ -21,7 +21,6 @@ card: - [윈도우에서 curl을 사용하여 kubectl 바이너리 설치](#install-kubectl-binary-with-curl-on-windows) - [Chocolatey 또는 Scoop을 사용하여 윈도우에 설치](#install-on-windows-using-chocolatey-or-scoop) -- [윈도우에서 Google Cloud SDK를 사용하여 설치](#install-on-windows-as-part-of-the-google-cloud-sdk) ### 윈도우에서 curl을 사용하여 kubectl 바이너리 설치 {#install-kubectl-binary-with-curl-on-windows} @@ -31,7 +30,7 @@ card: 또는 `curl` 을 설치한 경우, 다음 명령을 사용한다. ```powershell - curl -LO https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe + curl -LO "https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe" ``` {{< note >}} @@ -43,7 +42,7 @@ card: kubectl 체크섬 파일을 다운로드한다. ```powershell - curl -LO https://dl.k8s.io/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe.sha256 + curl -LO "https://dl.k8s.io/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe.sha256" ``` kubectl 바이너리를 체크섬 파일을 통해 검증한다. @@ -127,15 +126,11 @@ card: 메모장과 같은 텍스트 편집기를 선택하여 구성 파일을 편집한다. {{< /note >}} -### 윈도우에서 Google Cloud SDK를 사용하여 설치 {#install-on-windows-as-part-of-the-google-cloud-sdk} - -{{< include "included/install-kubectl-gcloud.md" >}} - ## kubectl 구성 확인 {{< include "included/verify-kubectl.md" >}} -## 선택적 kubectl 구성 +## 선택적 kubectl 구성 및 플러그인 ### 셸 자동 완성 활성화 @@ -145,6 +140,49 @@ kubectl은 Bash 및 Zsh에 대한 자동 완성 지원을 제공하므로 입력 {{< include "included/optional-kubectl-configs-zsh.md" >}} +### `kubectl convert` 플러그인 설치 + +{{< include "included/kubectl-convert-overview.md" >}} + +1. 다음 명령으로 최신 릴리스를 다운로드한다. + + ```powershell + curl -LO "https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl-convert.exe" + ``` + +1. 바이너리를 검증한다. (선택 사항) + + kubectl-convert 체크섬(checksum) 파일을 다운로드한다. + + ```powershell + curl -LO "https://dl.k8s.io/{{< param "fullversion" >}}/bin/windows/amd64/kubectl-convert.exe.sha256" + ``` + + kubectl-convert 바이너리를 체크섬 파일을 통해 검증한다. + + - 수동으로 `CertUtil` 의 출력과 다운로드한 체크섬 파일을 비교하기 위해서 커맨드 프롬프트를 사용한다. + + ```cmd + CertUtil -hashfile kubectl-convert.exe SHA256 + type kubectl-convert.exe.sha256 + ``` + + - `-eq` 연산자를 통해 `True` 또는 `False` 결과를 얻는 자동 검증을 위해서 PowerShell을 사용한다. + + ```powershell + $($(CertUtil -hashfile .\kubectl-convert.exe SHA256)[1] -replace " ", "") -eq $(type .\kubectl-convert.exe.sha256) + ``` + +1. 바이너리를 `PATH` 가 설정된 디렉터리에 추가한다. + +1. 플러그인이 정상적으로 설치되었는지 확인한다. + + ```shell + kubectl convert --help + ``` + + 에러가 출력되지 않는다면, 플러그인이 정상적으로 설치된 것이다. + ## {{% heading "whatsnext" %}} {{< include "included/kubectl-whats-next.md" >}} diff --git a/content/ko/docs/tutorials/_index.md b/content/ko/docs/tutorials/_index.md index 8d3fd54010..093208f22d 100644 --- a/content/ko/docs/tutorials/_index.md +++ b/content/ko/docs/tutorials/_index.md @@ -35,7 +35,7 @@ content_type: concept * [외부 IP 주소를 노출하여 클러스터의 애플리케이션에 접속하기](/ko/docs/tutorials/stateless-application/expose-external-ip-address/) -* [예시: MongoDB를 사용한 PHP 방명록 애플리케이션 배포하기](/ko/docs/tutorials/stateless-application/guestbook/) +* [예시: Redis를 사용한 PHP 방명록 애플리케이션 배포하기](/ko/docs/tutorials/stateless-application/guestbook/) ## 상태 유지가 필요한(stateful) 애플리케이션 diff --git a/content/ko/docs/tutorials/configuration/_index.md b/content/ko/docs/tutorials/configuration/_index.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/tutorials/configuration/configure-redis-using-configmap.md b/content/ko/docs/tutorials/configuration/configure-redis-using-configmap.md index c1b21d1404..fb1ac922fc 100644 --- a/content/ko/docs/tutorials/configuration/configure-redis-using-configmap.md +++ b/content/ko/docs/tutorials/configuration/configure-redis-using-configmap.md @@ -55,7 +55,7 @@ EOF ```shell kubectl apply -f example-redis-config.yaml -kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/pods/config/redis-pod.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/pods/config/redis-pod.yaml ``` Redis 파드 매니페스트의 내용을 검토하고 다음의 사항을 염두에 둔다. @@ -206,7 +206,7 @@ kubectl exec -it redis -- redis-cli ```shell kubectl delete pod redis -kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/pods/config/redis-pod.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/pods/config/redis-pod.yaml ``` 이제 마지막으로 설정값을 다시 확인해 본다. diff --git a/content/ko/docs/tutorials/hello-minikube.md b/content/ko/docs/tutorials/hello-minikube.md index eaa81c3809..091a1c684f 100644 --- a/content/ko/docs/tutorials/hello-minikube.md +++ b/content/ko/docs/tutorials/hello-minikube.md @@ -217,7 +217,7 @@ minikube 툴은 활성화하거나 비활성화할 수 있고 로컬 쿠버네 storage-provisioner-gluster: disabled ``` -2. 한 애드온을 활성화 한다. 예를 들어 `metrics-server` +2. 애드온을 활성화 한다. 여기서는 `metrics-server`를 예시로 사용한다. ```shell minikube addons enable metrics-server @@ -226,7 +226,7 @@ minikube 툴은 활성화하거나 비활성화할 수 있고 로컬 쿠버네 다음과 유사하게 출력된다. ``` - metrics-server was successfully enabled + The 'metrics-server' addon is enabled ``` 3. 생성한 파드와 서비스를 확인한다. diff --git a/content/ko/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html b/content/ko/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html index d9d621d867..fcad9b42b3 100644 --- a/content/ko/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html +++ b/content/ko/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html @@ -25,8 +25,8 @@ weight: 20
    diff --git a/content/ko/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html b/content/ko/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html index 2cf9daa6e1..ce5be2cfc0 100644 --- a/content/ko/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html +++ b/content/ko/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html @@ -37,8 +37,9 @@ weight: 20 diff --git a/content/ko/docs/tutorials/kubernetes-basics/explore/explore-interactive.html b/content/ko/docs/tutorials/kubernetes-basics/explore/explore-interactive.html index f82846a390..e3b67a1dae 100644 --- a/content/ko/docs/tutorials/kubernetes-basics/explore/explore-interactive.html +++ b/content/ko/docs/tutorials/kubernetes-basics/explore/explore-interactive.html @@ -29,8 +29,9 @@ weight: 20 diff --git a/content/ko/docs/tutorials/kubernetes-basics/explore/explore-intro.html b/content/ko/docs/tutorials/kubernetes-basics/explore/explore-intro.html index 2e34002571..e218222010 100644 --- a/content/ko/docs/tutorials/kubernetes-basics/explore/explore-intro.html +++ b/content/ko/docs/tutorials/kubernetes-basics/explore/explore-intro.html @@ -74,11 +74,11 @@ weight: 10

    노드

    -

    파드는 언제나 노드 상에서 동작한다. 노드는 쿠버네티스에서 워커 머신을 말하며 클러스터에 따라 가상 또는 물리 머신일 수 있다. 각 노드는 마스터에 의해 관리된다. 하나의 노드는 여러 개의 파드를 가질 수 있고, 쿠버네티스 마스터는 클러스터 내 노드를 통해서 파드에 대한 스케쥴링을 자동으로 처리한다.

    +

    파드는 언제나 노드 상에서 동작한다. 노드는 쿠버네티스에서 워커 머신을 말하며 클러스터에 따라 가상 또는 물리 머신일 수 있다. 각 노드는 컨트롤 플레인에 의해 관리된다. 하나의 노드는 여러 개의 파드를 가질 수 있고, 쿠버네티스 컨트롤 플레인은 클러스터 내 노드를 통해서 파드에 대한 스케쥴링을 자동으로 처리한다. 컨트롤 플레인의 자동 스케줄링은 각 노드의 사용 가능한 리소스를 모두 고려합니다.

    모든 쿠버네티스 노드는 최소한 다음과 같이 동작한다.

      -
    • Kubelet은, 쿠버네티스 마스터와 노드 간 통신을 책임지는 프로세스이며, 하나의 머신 상에서 동작하는 파드와 컨테이너를 관리한다.
    • +
    • Kubelet은, 쿠버네티스 컨트롤 플레인과 노드 간 통신을 책임지는 프로세스이며, 하나의 머신 상에서 동작하는 파드와 컨테이너를 관리한다.
    • 컨테이너 런타임(도커와 같은)은 레지스트리에서 컨테이너 이미지를 가져와 묶여 있는 것을 풀고 애플리케이션을 동작시키는 책임을 맡는다.
    diff --git a/content/ko/docs/tutorials/kubernetes-basics/expose/expose-interactive.html b/content/ko/docs/tutorials/kubernetes-basics/expose/expose-interactive.html index bfbb0eb1c8..09dde78cb8 100644 --- a/content/ko/docs/tutorials/kubernetes-basics/expose/expose-interactive.html +++ b/content/ko/docs/tutorials/kubernetes-basics/expose/expose-interactive.html @@ -26,7 +26,9 @@ weight: 20
    diff --git a/content/ko/docs/tutorials/kubernetes-basics/scale/scale-interactive.html b/content/ko/docs/tutorials/kubernetes-basics/scale/scale-interactive.html index 31c1d859a2..22b5d41342 100644 --- a/content/ko/docs/tutorials/kubernetes-basics/scale/scale-interactive.html +++ b/content/ko/docs/tutorials/kubernetes-basics/scale/scale-interactive.html @@ -26,8 +26,9 @@ weight: 20
    diff --git a/content/ko/docs/tutorials/kubernetes-basics/update/update-interactive.html b/content/ko/docs/tutorials/kubernetes-basics/update/update-interactive.html index 24da082b89..4038e3b358 100644 --- a/content/ko/docs/tutorials/kubernetes-basics/update/update-interactive.html +++ b/content/ko/docs/tutorials/kubernetes-basics/update/update-interactive.html @@ -26,7 +26,8 @@ weight: 20 diff --git a/content/ko/docs/tutorials/services/source-ip.md b/content/ko/docs/tutorials/services/source-ip.md index 6874d6669b..7300b06615 100644 --- a/content/ko/docs/tutorials/services/source-ip.md +++ b/content/ko/docs/tutorials/services/source-ip.md @@ -6,10 +6,10 @@ min-kubernetes-server-version: v1.5 -쿠버네티스 클러스터에서 실행 중인 애플리케이션은 서로 간에 외부 세계와 -서비스 추상화를 통해 찾고 통신한다. 이 문서는 -다른 종류의 서비스로 보내진 패킷의 소스 IP 주소에 어떤 일이 벌어지는지와 -이 동작을 요구에 따라 토글할 수 있는지 설명한다. +쿠버네티스 클러스터에서 실행 중인 애플리케이션은 서비스 추상화를 통해서 +서로를, 그리고 외부 세계를 찾고 통신한다. 이 문서는 +다른 종류의 서비스로 전송된 패킷의 소스 IP에 어떤 일이 벌어지는지와 +이 동작을 필요에 따라 어떻게 전환할 수 있는지 설명한다. @@ -29,16 +29,16 @@ min-kubernetes-server-version: v1.5 : 네트워크 주소 변환 [소스 NAT](https://en.wikipedia.org/wiki/Network_address_translation#SNAT) -: 패킷 상의 소스 IP 주소를 변경함, 보통 노드의 IP 주소 +: 패킷 상의 소스 IP 주소를 변경하는 것. 이 페이지에서는 일반적으로 노드 IP 주소로의 변경을 의미함. [대상 NAT](https://en.wikipedia.org/wiki/Network_address_translation#DNAT) -: 패킷 상의 대상 IP 주소를 변경함, 보통 파드의 IP 주소 +: 패킷 상의 대상 IP 주소를 변경하는 것. 이 페이지에서는 일반적으로 {{< glossary_tooltip term_id="pod" text="파드" >}} IP 주소로의 변경을 의미함. [VIP](/ko/docs/concepts/services-networking/service/#가상-ip와-서비스-프록시) -: 가상 IP 주소, 모든 쿠버네티스 서비스에 할당된 것 같은 +: 쿠버네티스의 모든 {{< glossary_tooltip text="서비스" term_id="service" >}}에 할당되어 있는 것과 같은, 가상 IP 주소. [Kube-proxy](/ko/docs/concepts/services-networking/service/#가상-ip와-서비스-프록시) -: 네트워크 데몬으로 모든 노드에서 서비스 VIP 관리를 관리한다. +: 모든 노드에서 서비스 VIP 관리를 조율하는 네트워크 데몬. ### 전제 조건 @@ -80,7 +80,7 @@ deployment.apps/source-ip-app created ```console kubectl get nodes ``` -출력은 다음과 유사하다 +출력은 다음과 유사하다. ``` NAME STATUS ROLES AGE VERSION kubernetes-node-6jst Ready 2h v1.13.0 @@ -220,7 +220,6 @@ graph LR; class client plain; {{}} - 이를 피하기 위해 쿠버네티스는 [클라이언트 소스 IP 주소를 보존](/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip)하는 기능이 있다. `service.spec.externalTrafficPolicy` 의 값을 `Local` 로 하면 @@ -439,5 +438,5 @@ kubectl delete deployment source-ip-app ## {{% heading "whatsnext" %}} -* [서비스를 통한 애플리케이션 연결하기](/ko/docs/concepts/services-networking/connect-applications-service/)에 더 자세히 본다. -* 어떻게 [외부 로드밸런서 생성](/docs/tasks/access-application-cluster/create-external-load-balancer/)하는지 본다. +* [서비스를 통한 애플리케이션 연결하기](/ko/docs/concepts/services-networking/connect-applications-service/)를 더 자세히 본다. +* [외부 로드밸런서 생성](/docs/tasks/access-application-cluster/create-external-load-balancer/) 방법을 본다. diff --git a/content/ko/docs/tutorials/stateful-application/basic-stateful-set.md b/content/ko/docs/tutorials/stateful-application/basic-stateful-set.md index 8b0a258ae6..ee7cccb70d 100644 --- a/content/ko/docs/tutorials/stateful-application/basic-stateful-set.md +++ b/content/ko/docs/tutorials/stateful-application/basic-stateful-set.md @@ -16,7 +16,7 @@ weight: 10 튜토리얼을 시작하기 전에 다음의 쿠버네티스 컨셉에 대해 익숙해야 한다. -* [파드](/docs/user-guide/pods/single-container/) +* [파드](/ko/docs/concepts/workloads/pods/) * [클러스터 DNS(Cluster DNS)](/ko/docs/concepts/services-networking/dns-pod-service/) * [헤드리스 서비스(Headless Services)](/ko/docs/concepts/services-networking/service/#헤드리스-headless-서비스) * [퍼시스턴트볼륨(PersistentVolumes)](/ko/docs/concepts/storage/persistent-volumes/) @@ -833,11 +833,11 @@ kubectl get pods -w -l app=nginx 다른 터미널에서는 스테이트풀셋을 지우기 위해 [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands/#delete) 명령어를 이용하자. -이 명령어에 `--cascade=false` 파라미터가 추가되었다. +이 명령어에 `--cascade=orphan` 파라미터가 추가되었다. 이 파라미터는 쿠버네티스에 스테이트풀셋만 삭제하고 그에 속한 파드는 지우지 않도록 요청한다. ```shell -kubectl delete statefulset web --cascade=false +kubectl delete statefulset web --cascade=orphan ``` ``` statefulset.apps "web" deleted @@ -953,7 +953,7 @@ kubectl get pods -w -l app=nginx ``` 다른 터미널창에서 스테이트풀셋을 다시 지우자. 이번에는 -`--cascade=false` 파라미터를 생략하자. +`--cascade=orphan` 파라미터를 생략하자. ```shell kubectl delete statefulset web diff --git a/content/ko/docs/tutorials/stateful-application/cassandra.md b/content/ko/docs/tutorials/stateful-application/cassandra.md index 7b1888a15e..3ebb7ec387 100644 --- a/content/ko/docs/tutorials/stateful-application/cassandra.md +++ b/content/ko/docs/tutorials/stateful-application/cassandra.md @@ -7,7 +7,7 @@ weight: 30 -이 튜토리얼은 쿠버네티스에서 [아파치 카산드라](http://cassandra.apache.org/)를 실행하는 방법을 소개한다. +이 튜토리얼은 쿠버네티스에서 [아파치 카산드라](https://cassandra.apache.org/)를 실행하는 방법을 소개한다. 데이터베이스인 카산드라는 데이터 내구성을 제공하기 위해 퍼시스턴트 스토리지가 필요하다(애플리케이션 _상태_). 이 예제에서 사용자 지정 카산드라 시드 공급자는 카산드라가 클러스터에 가입할 때 카산드라가 인스턴스를 검색할 수 있도록 한다. @@ -266,7 +266,7 @@ kubectl apply -f cassandra-statefulset.yaml 이 튜토리얼의 *파드* 는 구글의 [컨테이너 레지스트리](https://cloud.google.com/container-registry/docs/)에 [`gcr.io/google-samples/cassandra:v13`](https://github.com/kubernetes/examples/blob/master/cassandra/image/Dockerfile) 이미지를 이용한다. -이 도커 이미지는 [debian-base](https://github.com/kubernetes/kubernetes/tree/master/build/debian-base)에 +이 도커 이미지는 [debian-base](https://github.com/kubernetes/release/tree/master/images/build/debian-base)에 기반하였고 OpenJDK 8을 포함한다. 이 이미지는 아파치 데비안 리포의 표준 카산드라 설치본을 포함한다. diff --git a/content/ko/docs/tutorials/stateless-application/_index.md b/content/ko/docs/tutorials/stateless-application/_index.md old mode 100755 new mode 100644 diff --git a/content/ko/docs/tutorials/stateless-application/guestbook.md b/content/ko/docs/tutorials/stateless-application/guestbook.md index 1a984319d8..1a5e4a6079 100644 --- a/content/ko/docs/tutorials/stateless-application/guestbook.md +++ b/content/ko/docs/tutorials/stateless-application/guestbook.md @@ -1,5 +1,6 @@ --- -title: "예시: MongoDB를 사용한 PHP 방명록 애플리케이션 배포하기" +title: "예시: Redis를 사용한 PHP 방명록 애플리케이션 배포하기" + content_type: tutorial @@ -7,310 +8,411 @@ weight: 20 card: name: tutorials weight: 30 - title: "상태를 유지하지 않는 예제: MongoDB를 사용한 PHP 방명록" + title: "상태를 유지하지 않는 예제: Redis를 사용한 PHP 방명록" min-kubernetes-server-version: v1.14 +source: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook --- -이 튜토리얼에서는 쿠버네티스와 [Docker](https://www.docker.com/)를 사용하여 간단한 _(운영 준비가 아닌)_ 멀티 티어 웹 애플리케이션을 빌드하고 배포하는 방법을 보여준다. 이 예제는 다음과 같은 구성으로 이루어져 있다. +이 튜토리얼에서는 쿠버네티스와 [Docker](https://www.docker.com/)를 사용하여 간단한 +_(운영 수준이 아닌)_ 멀티 티어 웹 애플리케이션을 빌드하고 배포하는 방법을 보여준다. +이 예제는 다음과 같은 구성으로 +이루어져 있다. -* 방명록을 저장하는 단일 인스턴스 [MongoDB](https://www.mongodb.com/) +* 방명록 항목을 저장하기 위한 단일 인스턴스 [Redis](https://www.redis.com/) * 여러 개의 웹 프론트엔드 인스턴스 ## {{% heading "objectives" %}} -* Mongo 데이터베이스를 시작 -* 방명록 프론트엔드를 시작 +* Redis 리더를 실행 +* 2개의 Redis 팔로워를 실행 +* 방명록 프론트엔드를 실행 * 프론트엔드 서비스를 노출하고 확인 -* 정리 하기 - +* 정리하기 ## {{% heading "prerequisites" %}} - {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} - - -## Mongo 데이터베이스를 실행 +## Redis 데이터베이스를 실행 -방명록 애플리케이션은 MongoDB를 사용해서 데이터를 저장한다. +방명록 애플리케이션은 Redis를 사용하여 데이터를 저장한다. -### Mongo 디플로이먼트를 생성하기 +### Redis 디플로이먼트를 생성하기 -아래의 매니페스트 파일은 단일 복제본 Mongo 파드를 실행하는 디플로이먼트 컨트롤러를 지정한다. +아래의 매니페스트 파일은 단일 복제본 Redis 파드를 실행하는 디플로이먼트 컨트롤러에 대한 명세를 담고 있다. -{{< codenew file="application/guestbook/mongo-deployment.yaml" >}} +{{< codenew file="application/guestbook/redis-leader-deployment.yaml" >}} 1. 매니페스트 파일을 다운로드한 디렉터리에서 터미널 창을 시작한다. -1. `mongo-deployment.yaml` 파일을 통해 MongoDB 디플로이먼트에 적용한다. +1. `redis-leader-deployment.yaml` 파일을 이용하여 Redis 디플로이먼트를 생성한다. - + - ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/mongo-deployment.yaml - ``` + ```shell + kubectl apply -f https://k8s.io/examples/application/guestbook/redis-leader-deployment.yaml + ``` +1. 파드의 목록을 질의하여 Redis 파드가 실행 중인지 확인한다. -1. 파드의 목록을 질의하여 MongoDB 파드가 실행 중인지 확인한다. + ```shell + kubectl get pods + ``` - ```shell - kubectl get pods - ``` + 결과는 아래와 같은 형태로 나타난다. - 결과는 아래와 같은 형태로 나타난다. + ``` + NAME READY STATUS RESTARTS AGE + redis-leader-fb76b4755-xjr2n 1/1 Running 0 13s + ``` - ```shell - NAME READY STATUS RESTARTS AGE - mongo-5cfd459dd4-lrcjb 1/1 Running 0 28s - ``` +2. Redis 리더 파드의 로그를 보려면 다음 명령어를 실행한다. -2. MongoDB 파드에서 로그를 보려면 다음 명령어를 실행한다. + ```shell + kubectl logs -f deployment/redis-leader + ``` - ```shell - kubectl logs -f deployment/mongo - ``` +### Redis 리더 서비스 생성하기 -### MongoDB 서비스 생성하기 +방명록 애플리케이션에서 데이터를 쓰려면 Redis와 통신해야 한다. +Redis 파드로 트래픽을 프록시하려면 [서비스](/ko/docs/concepts/services-networking/service/)를 생성해야 한다. +서비스는 파드에 접근하기 위한 정책을 +정의한다. -방명록 애플리케이션에서 데이터를 쓰려면 MongoDB와 통신해야 한다. MongoDB 파드로 트래픽을 프록시하려면 [서비스](/ko/docs/concepts/services-networking/service/)를 적용해야 한다. 서비스는 파드에 접근하기 위한 정책을 정의한다. +{{< codenew file="application/guestbook/redis-leader-service.yaml" >}} -{{< codenew file="application/guestbook/mongo-service.yaml" >}} +1. `redis-leader-service.yaml` 파일을 이용하여 Redis 서비스를 실행한다. -1. `mongo-service.yaml` 파일을 통해 MongoDB 서비스에 적용한다. + - + ```shell + kubectl apply -f https://k8s.io/examples/application/guestbook/redis-leader-service.yaml + ``` - ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/mongo-service.yaml - ``` +1. 서비스의 목록을 질의하여 Redis 서비스가 실행 중인지 확인한다. + ```shell + kubectl get service + ``` -1. 서비스의 목록을 질의하여 MongoDB 서비스가 실행 중인지 확인한다. + 결과는 아래와 같은 형태로 나타난다. - ```shell - kubectl get service - ``` - - 결과는 아래와 같은 형태로 나타난다. - - ```shell - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - kubernetes ClusterIP 10.0.0.1 443/TCP 1m - mongo ClusterIP 10.0.0.151 27017/TCP 8s - ``` + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + kubernetes ClusterIP 10.0.0.1 443/TCP 1m + redis-leader ClusterIP 10.103.78.24 6379/TCP 16s + ``` {{< note >}} -이 매니페스트 파일은 이전에 정의된 레이블과 일치하는 레이블 집합을 가진 `mongo`라는 서비스를 생성하므로, 서비스는 네트워크 트래픽을 MongoDB 파드로 라우팅한다. +이 매니페스트 파일은 이전에 정의된 레이블과 일치하는 레이블 집합을 가진 +`redis-leader`라는 서비스를 생성하므로, 서비스는 네트워크 트래픽을 +Redis 파드로 라우팅한다. {{< /note >}} +### Redis 팔로워 구성하기 + +Redis 리더는 단일 파드이지만, 몇 개의 Redis 팔로워 또는 복제본을 추가하여 +가용성을 높이고 트래픽 요구를 충족할 수 있다. + +{{< codenew file="application/guestbook/redis-follower-deployment.yaml" >}} + +1. `redis-follower-deployment.yaml` 파일을 이용하여 Redis 서비스를 실행한다. + + + + ```shell + kubectl apply -f https://k8s.io/examples/application/guestbook/redis-follower-deployment.yaml + ``` + +1. 파드의 목록을 질의하여 2개의 Redis 팔로워 레플리카가 실행 중인지 확인한다. + + ```shell + kubectl get pods + ``` + + 결과는 아래와 같은 형태로 나타난다. + + ``` + NAME READY STATUS RESTARTS AGE + redis-follower-dddfbdcc9-82sfr 1/1 Running 0 37s + redis-follower-dddfbdcc9-qrt5k 1/1 Running 0 38s + redis-leader-fb76b4755-xjr2n 1/1 Running 0 11m + ``` + +### Redis 팔로워 서비스 생성하기 + +방명록 애플리케이션이 데이터를 읽으려면 Redis 팔로워와 통신해야 한다. +Redis 팔로워를 발견 가능(discoverable)하게 만드려면, 새로운 +[서비스](/ko/docs/concepts/services-networking/service/)를 구성해야 한다. + +{{< codenew file="application/guestbook/redis-follower-service.yaml" >}} + +1. `redis-follower-service.yaml` 파일을 이용하여 Redis 서비스를 실행한다. + + + + ```shell + kubectl apply -f https://k8s.io/examples/application/guestbook/redis-follower-service.yaml + ``` + +1. 서비스의 목록을 질의하여 Redis 서비스가 실행 중인지 확인한다. + + ```shell + kubectl get service + ``` + + 결과는 아래와 같은 형태로 나타난다. + + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + kubernetes ClusterIP 10.96.0.1 443/TCP 3d19h + redis-follower ClusterIP 10.110.162.42 6379/TCP 9s + redis-leader ClusterIP 10.103.78.24 6379/TCP 6m10s + ``` + +{{< note >}} +이 매니페스트 파일은 이전에 정의된 레이블과 일치하는 레이블 집합을 가진 +`redis-follower`라는 서비스를 생성하므로, 서비스는 네트워크 트래픽을 +Redis 파드로 라우팅한다. +{{< /note >}} ## 방명록 프론트엔드를 설정하고 노출하기 -방명록 애플리케이션에는 PHP로 작성된 HTTP 요청을 처리하는 웹 프론트엔드가 있다. 방명록 항목들을 저장하기 위해 `mongo` 서비스에 연결하도록 구성 한다. +방명록을 위한 Redis 저장소를 구성하고 실행했으므로, 이제 방명록 웹 서버를 실행한다. +Redis 팔로워와 마찬가지로, 프론트엔드는 쿠버네티스 디플로이먼트(Deployment)를 +사용하여 배포된다. + +방명록 앱은 PHP 프론트엔드를 사용한다. DB에 대한 요청이 읽기인지 쓰기인지에 따라, +Redis 팔로워 또는 리더 서비스와 통신하도록 구성된다. 프론트엔드는 JSON 인터페이스를 +노출하고, +jQuery-Ajax 기반 UX를 제공한다. ### 방명록 프론트엔드의 디플로이먼트 생성하기 {{< codenew file="application/guestbook/frontend-deployment.yaml" >}} -1. `frontend-deployment.yaml` 파일을 통해 프론트엔드의 디플로이먼트에 적용한다. +1. `frontend-deployment.yaml` 파일을 이용하여 프론트엔드 디플로이먼트를 생성한다. - - - ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/frontend-deployment.yaml - ``` + + ```shell + kubectl apply -f https://k8s.io/examples/application/guestbook/frontend-deployment.yaml + ``` 1. 파드의 목록을 질의하여 세 개의 프론트엔드 복제본이 실행되고 있는지 확인한다. - ```shell - kubectl get pods -l app.kubernetes.io/name=guestbook -l app.kubernetes.io/component=frontend - ``` + ```shell + kubectl get pods -l app=guestbook -l tier=frontend + ``` - 결과는 아래와 같은 형태로 나타난다. + 결과는 아래와 같은 형태로 나타난다. - ``` - NAME READY STATUS RESTARTS AGE - frontend-3823415956-dsvc5 1/1 Running 0 54s - frontend-3823415956-k22zn 1/1 Running 0 54s - frontend-3823415956-w9gbt 1/1 Running 0 54s - ``` + ``` + NAME READY STATUS RESTARTS AGE + frontend-85595f5bf9-5tqhb 1/1 Running 0 47s + frontend-85595f5bf9-qbzwm 1/1 Running 0 47s + frontend-85595f5bf9-zchwc 1/1 Running 0 47s + ``` ### 프론트엔드 서비스 생성하기 -서비스의 기본 유형은 [ClusterIP](/ko/docs/concepts/services-networking/service/#publishing-services-service-types)이기 때문에 적용한 `mongo` 서비스는 컨테이너 클러스터 내에서만 접근할 수 있다. `ClusterIP`는 서비스가 가리키는 파드 집합에 대한 단일 IP 주소를 제공한다. 이 IP 주소는 클러스터 내에서만 접근할 수 있다. +서비스의 기본 유형은 +[ClusterIP](/ko/docs/concepts/services-networking/service/#publishing-services-service-types) +이기 때문에 생성한 `Redis` 서비스는 컨테이너 클러스터 내에서만 접근할 수 있다. +`ClusterIP`는 서비스가 가리키는 파드 집합에 대한 +단일 IP 주소를 제공한다. 이 IP 주소는 클러스터 내에서만 접근할 수 있다. -게스트가 방명록에 접근할 수 있도록 하려면, 외부에서 볼 수 있도록 프론트엔드 서비스를 구성해야 한다. 그렇게 하면 클라이언트가 쿠버네티스 클러스터 외부에서 서비스를 요청할 수 있다. 그러나 쿠버네티스 사용자는 `ClusterIP`를 사용하더라도 `kubectl port-forward`를 사용해서 서비스에 접근할 수 있다. +게스트가 방명록에 접근할 수 있도록 하려면, 외부에서 볼 수 있도록 프론트엔드 +서비스를 구성해야 한다. 그렇게 하면 클라이언트가 쿠버네티스 클러스터 외부에서 +서비스를 요청할 수 있다. 그러나 쿠버네티스 사용자는 `ClusterIP`를 +사용하더라도 `kubectl port-forward`를 사용해서 서비스에 +접근할 수 있다. {{< note >}} -Google Compute Engine 또는 Google Kubernetes Engine과 같은 일부 클라우드 공급자는 외부 로드 밸런서를 지원한다. 클라우드 공급자가 로드 밸런서를 지원하고 이를 사용하려면 `type : LoadBalancer`의 주석을 제거해야 한다. +Google Compute Engine 또는 Google Kubernetes Engine +과 같은 일부 클라우드 공급자는 외부 로드 밸런서를 지원한다. 클라우드 공급자가 로드 +밸런서를 지원하고 이를 사용하려면 `type : LoadBalancer`의 주석을 제거해야 한다. {{< /note >}} {{< codenew file="application/guestbook/frontend-service.yaml" >}} -1. `frontend-service.yaml` 파일을 통해 프론트엔드 서비스에 적용시킨다. +1. `frontend-service.yaml` 파일을 이용하여 프론트엔드 서비스를 실행한다. - - - ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/frontend-service.yaml - ``` + + ```shell + kubectl apply -f https://k8s.io/examples/application/guestbook/frontend-service.yaml + ``` 1. 서비스의 목록을 질의하여 프론트엔드 서비스가 실행 중인지 확인한다. - ```shell - kubectl get services - ``` + ```shell + kubectl get services + ``` - 결과는 아래와 같은 형태로 나타난다. + 결과는 아래와 같은 형태로 나타난다. - ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - frontend ClusterIP 10.0.0.112 80/TCP 6s - kubernetes ClusterIP 10.0.0.1 443/TCP 4m - mongo ClusterIP 10.0.0.151 6379/TCP 2m - ``` + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + frontend ClusterIP 10.97.28.230 80/TCP 19s + kubernetes ClusterIP 10.96.0.1 443/TCP 3d19h + redis-follower ClusterIP 10.110.162.42 6379/TCP 5m48s + redis-leader ClusterIP 10.103.78.24 6379/TCP 11m + ``` ### `kubectl port-forward`를 통해 프론트엔드 서비스 확인하기 1. 다음 명령어를 실행해서 로컬 머신의 `8080` 포트를 서비스의 `80` 포트로 전달한다. - ```shell - kubectl port-forward svc/frontend 8080:80 - ``` + ```shell + kubectl port-forward svc/frontend 8080:80 + ``` - 결과는 아래와 같은 형태로 나타난다. + 결과는 아래와 같은 형태로 나타난다. - ``` - Forwarding from 127.0.0.1:8080 -> 80 - Forwarding from [::1]:8080 -> 80 - ``` + ``` + Forwarding from 127.0.0.1:8080 -> 80 + Forwarding from [::1]:8080 -> 80 + ``` -1. 방명록을 보기위해 브라우저에서 [http://localhost:8080](http://localhost:8080) 페이지를 로드한다. +1. 방명록을 보기 위해 브라우저에서 [http://localhost:8080](http://localhost:8080) 페이지를 로드한다. ### `LoadBalancer`를 통해 프론트엔드 서비스 확인하기 -`frontend-service.yaml` 매니페스트를 `LoadBalancer`와 함께 배포한 경우, 방명록을 보기 위해 IP 주소를 찾아야 한다. +`frontend-service.yaml` 매니페스트를 `LoadBalancer`와 함께 배포한 경우, +방명록을 보기 위해 IP 주소를 찾아야 한다. 1. 프론트엔드 서비스의 IP 주소를 얻기 위해 아래 명령어를 실행한다. - ```shell - kubectl get service frontend - ``` + ```shell + kubectl get service frontend + ``` - 결과는 아래와 같은 형태로 나타난다. + 결과는 아래와 같은 형태로 나타난다. - ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - frontend LoadBalancer 10.51.242.136 109.197.92.229 80:32372/TCP 1m - ``` + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + frontend LoadBalancer 10.51.242.136 109.197.92.229 80:32372/TCP 1m + ``` 1. IP 주소를 복사하고, 방명록을 보기 위해 브라우저에서 페이지를 로드한다. +{{< note >}} +메시지를 입력하고 'Submit'을 클릭하여 방명록에 글을 작성해 본다. +입력한 메시지가 프론트엔드에 나타난다. 이 메시지는 앞서 생성한 서비스를 +통해 데이터가 Redis에 성공적으로 입력되었음을 나타낸다. +{{< /note >}} + ## 웹 프론트엔드 확장하기 -서버가 디플로이먼트 컨르롤러를 사용하는 서비스로 정의되어 있기에 필요에 따라 확장 또는 축소할 수 있다. +서버가 디플로이먼트 컨트롤러를 사용하는 서비스로 정의되어 있으므로 +필요에 따라 확장 또는 축소할 수 있다. 1. 프론트엔드 파드의 수를 확장하기 위해 아래 명령어를 실행한다. - ```shell - kubectl scale deployment frontend --replicas=5 - ``` + ```shell + kubectl scale deployment frontend --replicas=5 + ``` 1. 파드의 목록을 질의하여 실행 중인 프론트엔드 파드의 수를 확인한다. - ```shell - kubectl get pods - ``` + ```shell + kubectl get pods + ``` - 결과는 아래와 같은 형태로 나타난다. + 결과는 아래와 같은 형태로 나타난다. - ``` - NAME READY STATUS RESTARTS AGE - frontend-3823415956-70qj5 1/1 Running 0 5s - frontend-3823415956-dsvc5 1/1 Running 0 54m - frontend-3823415956-k22zn 1/1 Running 0 54m - frontend-3823415956-w9gbt 1/1 Running 0 54m - frontend-3823415956-x2pld 1/1 Running 0 5s - mongo-1068406935-3lswp 1/1 Running 0 56m - ``` + ``` + NAME READY STATUS RESTARTS AGE + frontend-85595f5bf9-5df5m 1/1 Running 0 83s + frontend-85595f5bf9-7zmg5 1/1 Running 0 83s + frontend-85595f5bf9-cpskg 1/1 Running 0 15m + frontend-85595f5bf9-l2l54 1/1 Running 0 14m + frontend-85595f5bf9-l9c8z 1/1 Running 0 14m + redis-follower-dddfbdcc9-82sfr 1/1 Running 0 97m + redis-follower-dddfbdcc9-qrt5k 1/1 Running 0 97m + redis-leader-fb76b4755-xjr2n 1/1 Running 0 108m + ``` 1. 프론트엔드 파드의 수를 축소하기 위해 아래 명령어를 실행한다. - ```shell - kubectl scale deployment frontend --replicas=2 - ``` + ```shell + kubectl scale deployment frontend --replicas=2 + ``` 1. 파드의 목록을 질의하여 실행 중인 프론트엔드 파드의 수를 확인한다. - ```shell - kubectl get pods - ``` - - 결과는 아래와 같은 형태로 나타난다. - - ``` - NAME READY STATUS RESTARTS AGE - frontend-3823415956-k22zn 1/1 Running 0 1h - frontend-3823415956-w9gbt 1/1 Running 0 1h - mongo-1068406935-3lswp 1/1 Running 0 1h - ``` + ```shell + kubectl get pods + ``` + 결과는 아래와 같은 형태로 나타난다. + ``` + NAME READY STATUS RESTARTS AGE + frontend-85595f5bf9-cpskg 1/1 Running 0 16m + frontend-85595f5bf9-l9c8z 1/1 Running 0 15m + redis-follower-dddfbdcc9-82sfr 1/1 Running 0 98m + redis-follower-dddfbdcc9-qrt5k 1/1 Running 0 98m + redis-leader-fb76b4755-xjr2n 1/1 Running 0 109m + ``` ## {{% heading "cleanup" %}} -디플로이먼트 및 서비스를 삭제하면 실행 중인 모든 파드도 삭제된다. 레이블을 사용하여 하나의 명령어로 여러 자원을 삭제해보자. +디플로이먼트 및 서비스를 삭제하면 실행 중인 모든 파드도 삭제된다. +레이블을 사용하여 하나의 명령어로 여러 자원을 삭제해보자. 1. 모든 파드, 디플로이먼트, 서비스를 삭제하기 위해 아래 명령어를 실행한다. - ```shell - kubectl delete deployment -l app.kubernetes.io/name=mongo - kubectl delete service -l app.kubernetes.io/name=mongo - kubectl delete deployment -l app.kubernetes.io/name=guestbook - kubectl delete service -l app.kubernetes.io/name=guestbook - ``` + ```shell + kubectl delete deployment -l app=redis + kubectl delete service -l app=redis + kubectl delete deployment frontend + kubectl delete service frontend + ``` - 결과는 아래와 같은 형태로 나타난다. + 결과는 아래와 같은 형태로 나타난다. - ``` - deployment.apps "mongo" deleted - service "mongo" deleted - deployment.apps "frontend" deleted - service "frontend" deleted - ``` + ``` + deployment.apps "redis-follower" deleted + deployment.apps "redis-leader" deleted + deployment.apps "frontend" deleted + service "frontend" deleted + ``` 1. 파드의 목록을 질의하여 실행 중인 파드가 없는지 확인한다. - ```shell - kubectl get pods - ``` - - 결과는 아래와 같은 형태로 나타난다. - - ``` - No resources found. - ``` + ```shell + kubectl get pods + ``` + 결과는 아래와 같은 형태로 나타난다. + ``` + No resources found in default namespace. + ``` ## {{% heading "whatsnext" %}} diff --git a/content/ko/examples/application/guestbook/frontend-deployment.yaml b/content/ko/examples/application/guestbook/frontend-deployment.yaml index 613c654aa9..f97f20dab6 100644 --- a/content/ko/examples/application/guestbook/frontend-deployment.yaml +++ b/content/ko/examples/application/guestbook/frontend-deployment.yaml @@ -1,32 +1,29 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook apiVersion: apps/v1 kind: Deployment metadata: name: frontend - labels: - app.kubernetes.io/name: guestbook - app.kubernetes.io/component: frontend spec: + replicas: 3 selector: matchLabels: - app.kubernetes.io/name: guestbook - app.kubernetes.io/component: frontend - replicas: 3 + app: guestbook + tier: frontend template: metadata: labels: - app.kubernetes.io/name: guestbook - app.kubernetes.io/component: frontend + app: guestbook + tier: frontend spec: containers: - - name: guestbook - image: paulczar/gb-frontend:v5 - # image: gcr.io/google-samples/gb-frontend:v4 + - name: php-redis + image: gcr.io/google_samples/gb-frontend:v5 + env: + - name: GET_HOSTS_FROM + value: "dns" resources: requests: cpu: 100m memory: 100Mi - env: - - name: GET_HOSTS_FROM - value: dns ports: - - containerPort: 80 + - containerPort: 80 \ No newline at end of file diff --git a/content/ko/examples/application/guestbook/frontend-service.yaml b/content/ko/examples/application/guestbook/frontend-service.yaml index 34ad3771d7..410c6bbaf2 100644 --- a/content/ko/examples/application/guestbook/frontend-service.yaml +++ b/content/ko/examples/application/guestbook/frontend-service.yaml @@ -1,16 +1,19 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook apiVersion: v1 kind: Service metadata: name: frontend labels: - app.kubernetes.io/name: guestbook - app.kubernetes.io/component: frontend + app: guestbook + tier: frontend spec: # if your cluster supports it, uncomment the following to automatically create # an external load-balanced IP for the frontend service. # type: LoadBalancer + #type: LoadBalancer ports: + # the port that this service should serve on - port: 80 selector: - app.kubernetes.io/name: guestbook - app.kubernetes.io/component: frontend + app: guestbook + tier: frontend \ No newline at end of file diff --git a/content/ko/examples/application/guestbook/mongo-deployment.yaml b/content/ko/examples/application/guestbook/mongo-deployment.yaml deleted file mode 100644 index 04908ce25b..0000000000 --- a/content/ko/examples/application/guestbook/mongo-deployment.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: mongo - labels: - app.kubernetes.io/name: mongo - app.kubernetes.io/component: backend -spec: - selector: - matchLabels: - app.kubernetes.io/name: mongo - app.kubernetes.io/component: backend - replicas: 1 - template: - metadata: - labels: - app.kubernetes.io/name: mongo - app.kubernetes.io/component: backend - spec: - containers: - - name: mongo - image: mongo:4.2 - args: - - --bind_ip - - 0.0.0.0 - resources: - requests: - cpu: 100m - memory: 100Mi - ports: - - containerPort: 27017 diff --git a/content/ko/examples/application/guestbook/mongo-service.yaml b/content/ko/examples/application/guestbook/mongo-service.yaml deleted file mode 100644 index b9cef607bc..0000000000 --- a/content/ko/examples/application/guestbook/mongo-service.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: mongo - labels: - app.kubernetes.io/name: mongo - app.kubernetes.io/component: backend -spec: - ports: - - port: 27017 - targetPort: 27017 - selector: - app.kubernetes.io/name: mongo - app.kubernetes.io/component: backend diff --git a/content/ko/examples/application/guestbook/redis-follower-deployment.yaml b/content/ko/examples/application/guestbook/redis-follower-deployment.yaml new file mode 100644 index 0000000000..c418cf7364 --- /dev/null +++ b/content/ko/examples/application/guestbook/redis-follower-deployment.yaml @@ -0,0 +1,30 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis-follower + labels: + app: redis + role: follower + tier: backend +spec: + replicas: 2 + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + role: follower + tier: backend + spec: + containers: + - name: follower + image: gcr.io/google_samples/gb-redis-follower:v2 + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 \ No newline at end of file diff --git a/content/ko/examples/application/guestbook/redis-follower-service.yaml b/content/ko/examples/application/guestbook/redis-follower-service.yaml new file mode 100644 index 0000000000..53283d35c4 --- /dev/null +++ b/content/ko/examples/application/guestbook/redis-follower-service.yaml @@ -0,0 +1,17 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: v1 +kind: Service +metadata: + name: redis-follower + labels: + app: redis + role: follower + tier: backend +spec: + ports: + # the port that this service should serve on + - port: 6379 + selector: + app: redis + role: follower + tier: backend \ No newline at end of file diff --git a/content/ko/examples/application/guestbook/redis-leader-deployment.yaml b/content/ko/examples/application/guestbook/redis-leader-deployment.yaml new file mode 100644 index 0000000000..9c7547291c --- /dev/null +++ b/content/ko/examples/application/guestbook/redis-leader-deployment.yaml @@ -0,0 +1,30 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis-leader + labels: + app: redis + role: leader + tier: backend +spec: + replicas: 1 + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + role: leader + tier: backend + spec: + containers: + - name: leader + image: "docker.io/redis:6.0.5" + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 \ No newline at end of file diff --git a/content/ko/examples/application/guestbook/redis-leader-service.yaml b/content/ko/examples/application/guestbook/redis-leader-service.yaml new file mode 100644 index 0000000000..e04cc183d0 --- /dev/null +++ b/content/ko/examples/application/guestbook/redis-leader-service.yaml @@ -0,0 +1,17 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: v1 +kind: Service +metadata: + name: redis-leader + labels: + app: redis + role: leader + tier: backend +spec: + ports: + - port: 6379 + targetPort: 6379 + selector: + app: redis + role: leader + tier: backend \ No newline at end of file diff --git a/content/ko/examples/policy/baseline-psp.yaml b/content/ko/examples/policy/baseline-psp.yaml new file mode 100644 index 0000000000..679b780096 --- /dev/null +++ b/content/ko/examples/policy/baseline-psp.yaml @@ -0,0 +1,74 @@ +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: baseline + annotations: + # 선택 사항: 기본 AppArmor 프로파일을 활성화한다. 이 경우 기본값을 설정해야 한다. + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' +spec: + privileged: false + # Moby의 기본 캐퍼빌리티 집합(NET_RAW는 제외되었음) + allowedCapabilities: + - 'CHOWN' + - 'DAC_OVERRIDE' + - 'FSETID' + - 'FOWNER' + - 'MKNOD' + - 'SETGID' + - 'SETUID' + - 'SETFCAP' + - 'SETPCAP' + - 'NET_BIND_SERVICE' + - 'SYS_CHROOT' + - 'KILL' + - 'AUDIT_WRITE' + # hostpath를 제외한 모든 볼륨 타입을 허용 + volumes: + # '코어' 볼륨 타입 + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + # 클러스터 관리자에 의해 구성된 휘발성 CSI 드라이버와 퍼시스턴트볼륨(PersistentVolume)은 사용하기에 안전하다고 가정한다. + - 'csi' + - 'persistentVolumeClaim' + - 'ephemeral' + # hostpath 타입이 아닌 다른 모든 볼륨 타입을 허용 + - 'awsElasticBlockStore' + - 'azureDisk' + - 'azureFile' + - 'cephFS' + - 'cinder' + - 'fc' + - 'flexVolume' + - 'flocker' + - 'gcePersistentDisk' + - 'gitRepo' + - 'glusterfs' + - 'iscsi' + - 'nfs' + - 'photonPersistentDisk' + - 'portworxVolume' + - 'quobyte' + - 'rbd' + - 'scaleIO' + - 'storageos' + - 'vsphereVolume' + hostNetwork: false + hostIPC: false + hostPID: false + readOnlyRootFilesystem: false + runAsUser: + rule: 'RunAsAny' + seLinux: + # 이 파드시큐리티폴리시는 노드가 SELinux가 아닌 AppArmor를 사용하고 있다고 가정한다. + # 파드시큐리티폴리시 SELinux API는 SELinux 파드 보안 표준을 표현할 수 없으므로, + # SELinux를 사용하는 경우 더 제한적인 기본값을 선택해야 한다. + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' diff --git a/content/ko/examples/policy/restricted-psp.yaml b/content/ko/examples/policy/restricted-psp.yaml index cbaf2758c0..4cdc12639a 100644 --- a/content/ko/examples/policy/restricted-psp.yaml +++ b/content/ko/examples/policy/restricted-psp.yaml @@ -5,14 +5,11 @@ metadata: annotations: seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' - seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' spec: privileged: false - # 루트로의 에스컬레이션을 방지하는데 필요하다. + # 루트로의 에스컬레이션을 방지하는 데 필요하다. allowPrivilegeEscalation: false - # 이것은 루트가 아닌 사용자 + 권한 에스컬레이션을 허용하지 않는 것으로 중복이지만, - # 심층 방어를 위해 이를 제공한다. requiredDropCapabilities: - ALL # 기본 볼륨 유형을 허용한다. @@ -22,8 +19,10 @@ spec: - 'projected' - 'secret' - 'downwardAPI' - # 클러스터 관리자가 설정한 퍼시스턴트볼륨을 사용하는 것이 안전하다고 가정한다. + # 클러스터 관리자에 의해 구성된 휘발성 CSI 드라이버와 퍼시스턴트볼륨(PersistentVolume)의 사용은 안전하다고 가정한다. + - 'csi' - 'persistentVolumeClaim' + - 'ephemeral' hostNetwork: false hostIPC: false hostPID: false diff --git a/content/ko/includes/task-tutorial-prereqs.md b/content/ko/includes/task-tutorial-prereqs.md index 65651286bd..e27f4b99e4 100644 --- a/content/ko/includes/task-tutorial-prereqs.md +++ b/content/ko/includes/task-tutorial-prereqs.md @@ -5,4 +5,4 @@ 다음의 쿠버네티스 플레이그라운드 중 하나를 사용할 수 있다. * [Katacoda](https://www.katacoda.com/courses/kubernetes/playground) -* [Play with Kubernetes](http://labs.play-with-k8s.com/) +* [Play with Kubernetes](https://labs.play-with-k8s.com/) diff --git a/content/ko/releases/version-skew-policy.md b/content/ko/releases/version-skew-policy.md index 38052aa18d..dba98dcdf8 100644 --- a/content/ko/releases/version-skew-policy.md +++ b/content/ko/releases/version-skew-policy.md @@ -6,22 +6,21 @@ -title: 쿠버네티스 버전 및 버전 차이(skew) 지원 정책 -content_type: concept -weight: 30 +title: 버전 차이(skew) 정책 +type: docs +description: > + 다양한 쿠버네티스 구성 요소 간에 지원되는 최대 버전 차이 --- 이 문서는 다양한 쿠버네티스 구성 요소 간에 지원되는 최대 버전 차이를 설명한다. 특정 클러스터 배포 도구는 버전 차이에 대한 추가적인 제한을 설정할 수 있다. - ## 지원되는 버전 -쿠버네티스 버전은 **x.y.z** 로 표현되는데, -여기서 **x** 는 메이저 버전, **y** 는 마이너 버전, **z** 는 패치 버전을 의미하며, 이는 [시맨틱 버전](https://semver.org/) 용어에 따른 것이다. +쿠버네티스 버전은 **x.y.z** 로 표현되는데, 여기서 **x** 는 메이저 버전, **y** 는 마이너 버전, **z** 는 패치 버전을 의미하며, 이는 [시맨틱 버전](https://semver.org/) 용어에 따른 것이다. 자세한 내용은 [쿠버네티스 릴리스 버전](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md#kubernetes-release-versioning)을 참조한다. 쿠버네티스 프로젝트는 최근 세 개의 마이너 릴리스 ({{< skew latestVersion >}}, {{< skew prevMinorVersion >}}, {{< skew oldestMinorVersion >}}) 에 대한 릴리스 분기를 유지한다. 쿠버네티스 1.19 이상은 약 1년간의 패치 지원을 받는다. 쿠버네티스 1.18 이상은 약 9개월의 패치 지원을 받는다. diff --git a/content/pl/_index.html b/content/pl/_index.html index 1096114700..03ec4a4c44 100644 --- a/content/pl/_index.html +++ b/content/pl/_index.html @@ -44,12 +44,12 @@ Kubernetes jako projekt open-source daje Ci wolność wyboru ⏤ skorzystaj z pr

    - Weź udział w wirtualnym KubeCon NA, 17-20.11.2020 + Weź udział w KubeCon North America 11-15.10.2021



    - Weź udział w wirtualnym KubeCon EU 4–7.05.2021 + Weź udział w wirtualnym KubeCon Europe 17-20.5.2022
    diff --git a/content/pl/docs/concepts/overview/_index.md b/content/pl/docs/concepts/overview/_index.md old mode 100755 new mode 100644 diff --git a/content/pl/docs/concepts/overview/components.md b/content/pl/docs/concepts/overview/components.md index dba2d1e782..b6843e04db 100644 --- a/content/pl/docs/concepts/overview/components.md +++ b/content/pl/docs/concepts/overview/components.md @@ -27,7 +27,7 @@ Poniższy rysunek przedstawia klaster Kubernetes i powiązania pomiędzy jego r Komponenty warstwy sterowania podejmują ogólne decyzje dotyczące klastra (np. zlecanie zadań), a także wykrywają i reagują na zdarzenia w klastrze (przykładowo, start nowego {{< glossary_tooltip text="poda" term_id="pod">}}, kiedy wartość `replicas` dla deploymentu nie zgadza się z faktyczną liczbą replik). -Komponenty warstwy sterowania mogą być uruchomione na dowolnej maszynie w klastrze. Dla uproszczenia jednak skrypty instalacyjne zazwyczaj startują wszystkie składniki na tej samej maszynie i jednocześnie nie pozwalają na uruchamianie na niej kontenerów użytkowników. Na stronie [Tworzenie Wysoko Dostępnych Klastrów](/docs/admin/high-availability/) jest więcej informacji o konfiguracji typu *multi-master-VM*. +Komponenty warstwy sterowania mogą być uruchomione na dowolnej maszynie w klastrze. Dla uproszczenia jednak skrypty instalacyjne zazwyczaj startują wszystkie składniki na tej samej maszynie i jednocześnie nie pozwalają na uruchamianie na niej kontenerów użytkowników. Na stronie [Creating Highly Available clusters with kubeadm](/docs/setup/production-environment/tools/kubeadm/high-availability/) znajdziesz opis konfiguracji warstwy sterowania działającej na wielu maszynach wirtualnych. ### kube-apiserver @@ -45,10 +45,11 @@ Komponenty warstwy sterowania mogą być uruchomione na dowolnej maszynie w klas {{< glossary_definition term_id="kube-controller-manager" length="all" >}} -Kontrolerami są: +Przykładowe kontrolery: * Node controller: Odpowiada za rozpoznawanie i reagowanie na sytuacje, kiedy węzeł staje się z jakiegoś powodu niedostępny. -* Replication controller: Odpowiada za utrzymanie prawidłowej liczby podów dla każdego obiektu typu *ReplicationController* w systemie. +* Job controller: Czeka na obiekty typu *Job*, które definiują zadania uruchamiane jednorazowo + i startuje Pody, odpowiadające za ich wykonanie tych zadań. * Endpoints controller: Dostarcza informacji do obiektów typu *Endpoints* (tzn. łączy ze sobą Serwisy i Pody). * Service Account & Token controllers: Tworzy domyślne konta i tokeny dostępu API dla nowych przestrzeni nazw (*namespaces*). diff --git a/content/pl/docs/concepts/overview/what-is-kubernetes.md b/content/pl/docs/concepts/overview/what-is-kubernetes.md index d28c841553..7391ed6602 100644 --- a/content/pl/docs/concepts/overview/what-is-kubernetes.md +++ b/content/pl/docs/concepts/overview/what-is-kubernetes.md @@ -14,11 +14,10 @@ sitemap: Na tej stronie znajdziesz ogólne informacje o Kubernetesie. - Kubernetes to przenośna, rozszerzalna platforma oprogramowania *open-source* służąca do zarządzania zadaniami i serwisami uruchamianymi w kontenerach, która umożliwia deklaratywną konfigurację i automatyzację. Ekosystem Kubernetesa jest duży i dynamicznie się rozwija. Serwisy Kubernetesa, wsparcie i narzędzia są szeroko dostępne. -Nazwa Kubernetes pochodzi z greki i oznacza sternika albo pilota. Google otworzyło projekt Kubernetes publicznie w 2014. Kubernetes korzysta z [piętnastoletniego doświadczenia Google w uruchamianiu wielkoskalowych serwisów](/blog/2015/04/borg-predecessor-to-kubernetes/) i łączy je z najlepszymi pomysłami i praktykami wypracowanymi przez społeczność. +Nazwa Kubernetes pochodzi z greki i oznacza sternika albo pilota. Skrót K8s powstał poprzez zastąpienie ośmiu liter pomiędzy "K" i "s" .Google otworzyło projekt Kubernetes publicznie w 2014. Kubernetes korzysta z [piętnastoletniego doświadczenia Google w uruchamianiu wielkoskalowych serwisów](/blog/2015/04/borg-predecessor-to-kubernetes/) i łączy je z najlepszymi pomysłami i praktykami wypracowanymi przez społeczność. ## Trochę historii diff --git a/content/pl/docs/reference/glossary/cloud-controller-manager.md b/content/pl/docs/reference/glossary/cloud-controller-manager.md old mode 100755 new mode 100644 diff --git a/content/pl/docs/reference/glossary/cluster.md b/content/pl/docs/reference/glossary/cluster.md old mode 100755 new mode 100644 diff --git a/content/pl/docs/reference/glossary/etcd.md b/content/pl/docs/reference/glossary/etcd.md old mode 100755 new mode 100644 diff --git a/content/pl/docs/reference/glossary/index.md b/content/pl/docs/reference/glossary/index.md old mode 100755 new mode 100644 diff --git a/content/pl/docs/reference/glossary/kube-apiserver.md b/content/pl/docs/reference/glossary/kube-apiserver.md old mode 100755 new mode 100644 diff --git a/content/pl/docs/reference/glossary/kube-controller-manager.md b/content/pl/docs/reference/glossary/kube-controller-manager.md old mode 100755 new mode 100644 diff --git a/content/pl/docs/reference/glossary/kube-proxy.md b/content/pl/docs/reference/glossary/kube-proxy.md old mode 100755 new mode 100644 diff --git a/content/pl/docs/reference/glossary/kube-scheduler.md b/content/pl/docs/reference/glossary/kube-scheduler.md old mode 100755 new mode 100644 diff --git a/content/pl/docs/reference/glossary/kubelet.md b/content/pl/docs/reference/glossary/kubelet.md old mode 100755 new mode 100644 diff --git a/content/pl/docs/reference/tools.md b/content/pl/docs/reference/tools.md deleted file mode 100644 index 2ec66964ed..0000000000 --- a/content/pl/docs/reference/tools.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Narzędzia -content_type: concept ---- - - -Kubernetes zawiera różne wbudowane narzędzia służące do pracy z systemem: - - - -## Kubectl - -[`kubectl`](/docs/tasks/tools/install-kubectl/) to narzędzie tekstowe (linii poleceń) do Kubernetes. Służy do zarządzania klastrem Kubernetes. - -## Kubeadm - -[`kubeadm`](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) to narzędzie tekstowe do łatwej instalacji klastra Kubernetes w bezpiecznej konfiguracji, uruchamianego na infrastrukturze serwerów fizycznych, serwerów w chmurze bądź na maszynach wirtualnych (aktualnie w fazie rozwojowej alfa). - -## Minikube - -[`minikube`](https://minikube.sigs.k8s.io/docs/) to narzędzie do uruchamiania jednowęzłowego klastra Kubernetes na twojej stacji roboczej na potrzeby rozwoju oprogramowania lub prowadzenia testów. - -## Pulpit *(Dashboard)* - -[`Dashboard`](/docs/tasks/access-application-cluster/web-ui-dashboard/) - graficzny interfejs użytkownika w przeglądarce web, który umożliwia instalację aplikacji w kontenerach na klastrze Kubernetes, rozwiązywanie problemów z nimi związanych oraz zarządzanie samym klastrem i jego zasobami. - -## Helm - -[`Kubernetes Helm`](https://github.com/kubernetes/helm) — narzędzie do zarządzania pakietami wstępnie skonfigurowanych zasobów Kubernetes (nazywanych *Kubernetes charts*). - -Helm-a można używać do: - -* Wyszukiwania i instalowania popularnego oprogramowania dystrybuowanego jako Kubernetes *charts* -* Udostępniania własnych aplikacji w postaci pakietów Kubernetes *charts* -* Definiowania powtarzalnych instalacji aplikacji na Kubernetes -* Inteligentnego zarządzania plikami list (*manifests*) Kubernetes -* Zarządzaniem kolejnymi wydaniami pakietów Helm - -## Kompose - -[`Kompose`](https://github.com/kubernetes/kompose) to narzędzie, które ma pomóc użytkownikom Docker Compose przenieść się na Kubernetes. - -Kompose można używać do: - -* Tłumaczenia plików Docker Compose na obiekty Kubernetes -* Zmiany sposóbu zarządzania twoimi aplikacjami z lokalnego środowiska Docker na system Kubernetes -* Zamiany plików `yaml` Docker Compose v1 lub v2 oraz [Distributed Application Bundles](https://docs.docker.com/compose/bundles/) - diff --git a/content/pl/docs/setup/_index.md b/content/pl/docs/setup/_index.md index 6107fe0f03..a2908369f7 100644 --- a/content/pl/docs/setup/_index.md +++ b/content/pl/docs/setup/_index.md @@ -1,9 +1,9 @@ --- -no_issue: true title: Od czego zacząć main_menu: true weight: 20 content_type: concept +no_list: true card: name: setup weight: 20 @@ -19,16 +19,44 @@ card: Ten rozdział poświęcony jest różnym metodom konfiguracji i uruchomienia Kubernetesa. Instalując Kubernetesa, przy wyborze platformy kieruj się: łatwością w utrzymaniu, spełnianymi wymogami bezpieczeństwa, poziomem sterowania, dostępnością zasobów oraz doświadczeniem wymaganym do zarządzania klastrem. -Klaster Kubernetes możesz zainstalować na lokalnym komputerze, w chmurze czy w prywatnym centrum obliczeniowym albo skorzystać z klastra Kubernetes udostępnianego jako usługa. Inną możliwością jest budowa własnego systemu opartego o różnych dostawców usług chmurowych, bądź bazującego bezpośrednio na sprzęcie fizycznym. +Możesz [pobrać Kubernetesa](/releases/download/), aby zainstalować klaster +na lokalnym komputerze, w chmurze czy w prywatnym centrum obliczeniowym. + +Jeśli nie chcesz zarządzać klastrem Kubernetesa samodzielnie, możesz wybrać serwis zarządzany przez zewnętrznego dostawcę, +wybierając na przykład spośród [certyfikowanych platform](/docs/setup/production-environment/turnkey-solutions/). +Dostępne są także inne standardowe i specjalizowane rozwiązania dla różnych środowisk chmurowych +bądź bazujące bezpośrednio na sprzęcie fizycznym. ## Środowisko do nauki {#srodowisko-do-nauki} -Do nauki Kubernetesa wykorzystaj narzędzia wspierane przez społeczność Kubernetesa lub inne narzędzia dostępne w ekosystemie, aby uruchomić klaster Kubernetesa na swoim komputerze lokalnym. +Do nauki Kubernetesa wykorzystaj narzędzia wspierane przez społeczność Kubernetesa +lub inne narzędzia dostępne w ekosystemie, aby uruchomić klaster Kubernetesa na swoim komputerze lokalnym. +Zapoznaj się z [narzędziami instalacyjnymi](/docs/tasks/tools/). ## Środowisko produkcyjne {#srodowisko-produkcyjne} -Wybierając rozwiązanie dla środowiska produkcyjnego musisz zdecydować, którymi poziomami zarządzania klastrem (_abstrakcjami_) chcesz zajmować się sam, a które będą realizowane po stronie zewnętrznego operatora. +Wybierając rozwiązanie dla +[środowiska produkcyjnego](/docs/setup/production-environment/) musisz zdecydować, +którymi poziomami zarządzania klastrem (_abstrakcjami_) chcesz zajmować się sam, +a które będą realizowane po stronie zewnętrznego operatora. -Na stronie [Partnerzy Kubernetes](https://kubernetes.io/partners/#conformance) znajdziesz listę dostawców posiadających [certyfikację Kubernetes](https://github.com/cncf/k8s-conformance/#certified-kubernetes). +Do instalacji klastra Kubernetesa zarządzanego samodzielnie oficjalnym narzędziem +jest [kubeadm](/docs/setup/production-environment/tools/kubeadm/). + +## {{% heading "whatsnext" %}} + +- [Pobierz Kubernetesa](/releases/download/) +- Pobierz i [zainstaluj narzędzia](/docs/tasks/tools/), w tym `kubectl` +- Wybierz [środowisko uruchomieniowe dla kontenerów](/docs/setup/production-environment/container-runtimes/) w nowym klastrze +- Naucz się [najlepszych praktyk](/docs/setup/best-practices/) przy konfigurowaniu klastra + +Na stronie [Partnerów Kubernetesa](https://kubernetes.io/partners/#conformance) znajdziesz listę dostawców posiadających +[certyfikację Kubernetes](https://github.com/cncf/k8s-conformance/#certified-kubernetes). + +Kubernetes zaprojektowano w ten sposób, że {{< glossary_tooltip term_id="control-plane" text="warstwa sterowania" >}} +wymaga do działania systemu Linux. W ramach klastra aplikacje mogą być uruchamiane na systemie Linux i innych, +w tym Windows. + +- Naucz się, [jak zbudować klaster z węzłami Windows](/docs/setup/production-environment/windows/) diff --git a/content/pl/docs/setup/release/_index.md b/content/pl/docs/setup/release/_index.md deleted file mode 100755 index 2783105198..0000000000 --- a/content/pl/docs/setup/release/_index.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: "Informacje o wydaniach i dozwolonych różnicach wersji" -weight: 10 ---- diff --git a/content/pl/docs/tutorials/kubernetes-basics/_index.html b/content/pl/docs/tutorials/kubernetes-basics/_index.html index e27a3ad6bf..0996edc64b 100644 --- a/content/pl/docs/tutorials/kubernetes-basics/_index.html +++ b/content/pl/docs/tutorials/kubernetes-basics/_index.html @@ -11,7 +11,7 @@ card: - + diff --git a/content/pl/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html b/content/pl/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html index 12211e42b6..72409e9238 100644 --- a/content/pl/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html +++ b/content/pl/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html @@ -5,7 +5,7 @@ weight: 20 - + diff --git a/content/pl/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html b/content/pl/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html index 3955e557c4..c5eddaf5f9 100644 --- a/content/pl/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html +++ b/content/pl/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html @@ -5,7 +5,7 @@ weight: 10 - + diff --git a/content/pl/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html b/content/pl/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html index 64c1a0a9a1..954bad22b3 100644 --- a/content/pl/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html +++ b/content/pl/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html @@ -5,7 +5,7 @@ weight: 20 - + diff --git a/content/pl/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html b/content/pl/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html index c879aa82b9..f4b893d60b 100644 --- a/content/pl/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html +++ b/content/pl/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html @@ -5,7 +5,7 @@ weight: 10 - + diff --git a/content/pl/docs/tutorials/kubernetes-basics/explore/explore-interactive.html b/content/pl/docs/tutorials/kubernetes-basics/explore/explore-interactive.html index 1ae1e88382..14afae5a3d 100644 --- a/content/pl/docs/tutorials/kubernetes-basics/explore/explore-interactive.html +++ b/content/pl/docs/tutorials/kubernetes-basics/explore/explore-interactive.html @@ -5,7 +5,7 @@ weight: 20 - + diff --git a/content/pl/docs/tutorials/kubernetes-basics/explore/explore-intro.html b/content/pl/docs/tutorials/kubernetes-basics/explore/explore-intro.html index edfff527e6..f62563e1cd 100644 --- a/content/pl/docs/tutorials/kubernetes-basics/explore/explore-intro.html +++ b/content/pl/docs/tutorials/kubernetes-basics/explore/explore-intro.html @@ -5,7 +5,7 @@ weight: 10 - + diff --git a/content/pl/docs/tutorials/kubernetes-basics/expose/expose-interactive.html b/content/pl/docs/tutorials/kubernetes-basics/expose/expose-interactive.html index a1aca99ce3..1aefa3e793 100644 --- a/content/pl/docs/tutorials/kubernetes-basics/expose/expose-interactive.html +++ b/content/pl/docs/tutorials/kubernetes-basics/expose/expose-interactive.html @@ -5,7 +5,7 @@ weight: 20 - + diff --git a/content/pl/docs/tutorials/kubernetes-basics/expose/expose-intro.html b/content/pl/docs/tutorials/kubernetes-basics/expose/expose-intro.html index f9f9134e4a..199ab9dfe7 100644 --- a/content/pl/docs/tutorials/kubernetes-basics/expose/expose-intro.html +++ b/content/pl/docs/tutorials/kubernetes-basics/expose/expose-intro.html @@ -5,7 +5,7 @@ weight: 10 - + diff --git a/content/pl/docs/tutorials/kubernetes-basics/scale/scale-interactive.html b/content/pl/docs/tutorials/kubernetes-basics/scale/scale-interactive.html index e8017f5ad6..7990fbd1a6 100644 --- a/content/pl/docs/tutorials/kubernetes-basics/scale/scale-interactive.html +++ b/content/pl/docs/tutorials/kubernetes-basics/scale/scale-interactive.html @@ -5,7 +5,7 @@ weight: 20 - + diff --git a/content/pl/docs/tutorials/kubernetes-basics/scale/scale-intro.html b/content/pl/docs/tutorials/kubernetes-basics/scale/scale-intro.html index 91eb10eb6d..bb2c40ffee 100644 --- a/content/pl/docs/tutorials/kubernetes-basics/scale/scale-intro.html +++ b/content/pl/docs/tutorials/kubernetes-basics/scale/scale-intro.html @@ -5,7 +5,7 @@ weight: 10 - + diff --git a/content/pl/docs/tutorials/kubernetes-basics/update/update-interactive.html b/content/pl/docs/tutorials/kubernetes-basics/update/update-interactive.html index 07731b5849..5664abc0e6 100644 --- a/content/pl/docs/tutorials/kubernetes-basics/update/update-interactive.html +++ b/content/pl/docs/tutorials/kubernetes-basics/update/update-interactive.html @@ -5,7 +5,7 @@ weight: 20 - + diff --git a/content/pl/docs/tutorials/kubernetes-basics/update/update-intro.html b/content/pl/docs/tutorials/kubernetes-basics/update/update-intro.html index b51779237d..2c42eee6d6 100644 --- a/content/pl/docs/tutorials/kubernetes-basics/update/update-intro.html +++ b/content/pl/docs/tutorials/kubernetes-basics/update/update-intro.html @@ -5,7 +5,7 @@ weight: 10 - + diff --git a/content/pl/releases/_index.md b/content/pl/releases/_index.md new file mode 100644 index 0000000000..46c2a7659f --- /dev/null +++ b/content/pl/releases/_index.md @@ -0,0 +1,27 @@ +--- +linktitle: Historia wydań +title: Wydania +type: docs +--- + + + + +Projekt Kubernetes zapewnia wsparcie dla trzech ostatnich wydań _minor_ ({{< skew latestVersion >}}, {{< skew prevMinorVersion >}}, {{< skew oldestMinorVersion >}}). Poprawki do wydania 1.19 i nowszych będą publikowane przez około rok. Kuberetes w wersji 1.18 i wcześniejszych będzie otrzymywał poprawki przez 9 miesięcy. + +Wersje Kubernetesa oznaczane są jako **x.y.z**, +gdzie **x** jest oznaczeniem wersji głównej (_major_), **y** — podwersji (_minor_), a **z** — numer poprawki (_patch_), zgodnie z terminologią [Semantic Versioning](https://semver.org/). + +Więcej informacji można z znaleźć w dokumencie [version skew policy](/releases/version-skew-policy/). + + + +## Historia wydań + +{{< release-data >}} + +## Nadchodzące wydania + +Zajrzyj na [harmonogram](https://github.com/kubernetes/sig-release/tree/master/releases/release-{{< skew nextMinorVersion >}}) nadchodzącego wydania Kubernetesa numer **{{< skew nextMinorVersion >}}**! + +## Przydatne zasoby diff --git a/content/pl/training/_index.html b/content/pl/training/_index.html index 2dd7ed433e..0fd093af3d 100644 --- a/content/pl/training/_index.html +++ b/content/pl/training/_index.html @@ -70,7 +70,7 @@ class: training

    Nauka z Linux Foundation

    -

    Linux Foundation oferuje szkolenia prowadzone przez instruktora oraz szkolenia samodzielne obejmujące wszystkie aspekty rozwijania i zarządzania aplikacjami na Kubrnetesie.

    +

    Linux Foundation oferuje szkolenia prowadzone przez instruktora oraz szkolenia samodzielne obejmujące wszystkie aspekty rozwijania i zarządzania aplikacjami na Kubernetesie.



    Sprawdź ofertę szkoleń
    diff --git a/content/pt-br/docs/_index.md b/content/pt-br/docs/_index.md index 1d1529975c..6e34880dd2 100644 --- a/content/pt-br/docs/_index.md +++ b/content/pt-br/docs/_index.md @@ -16,7 +16,7 @@ Como você pode ver, a maior parte da documentação ainda está disponível ape -Se você quiser participar, você pode entrar no canal Slack [#kubernets-docs-pt](http://slack.kubernetes.io/) e fazer parte da equipe por trás da tradução. +Se você quiser participar, você pode entrar no canal Slack [#kubernetes-docs-pt](http://slack.kubernetes.io/) e fazer parte da equipe por trás da tradução. Você também pode acessar o canal para solicitar a tradução de uma página específica ou relatar qualquer erro que possa ter sido encontrado. Qualquer contribuição será bem recebida! diff --git a/content/pt-br/docs/concepts/architecture/_index.md b/content/pt-br/docs/concepts/architecture/_index.md old mode 100755 new mode 100644 diff --git a/content/pt-br/docs/concepts/cluster-administration/_index.md b/content/pt-br/docs/concepts/cluster-administration/_index.md old mode 100755 new mode 100644 diff --git a/content/pt-br/docs/concepts/cluster-administration/addons.md b/content/pt-br/docs/concepts/cluster-administration/addons.md index 0a50c96190..79b62bf832 100644 --- a/content/pt-br/docs/concepts/cluster-administration/addons.md +++ b/content/pt-br/docs/concepts/cluster-administration/addons.md @@ -1,58 +1,54 @@ --- -title: Instalando Addons +title: Instalando Complementos content_type: concept --- +{{% thirdparty-content %}} -Addons estendem a funcionalidade do Kubernetes. - -Esta página lista alguns dos add-ons e links com suas respectivas instruções de instalação. - -Os Add-ons de cada sessão são classificados em ordem alfabética - a ordem não implica qualquer status preferencial. - - +Complementos estendem as funcionalidades do Kubernetes. +Esta página lista alguns dos complementos disponíveis e links com suas respectivas instruções de instalação. ## Rede e Política de Rede - * [ACI](https://www.github.com/noironetworks/aci-containers) fornece rede integrada de contêineres e segurança de rede com a Cisco ACI. -* [Calico](https://docs.projectcalico.org/latest/getting-started/kubernetes/) é um provedor de políticas de rede e rede L3 seguro. +* [Antrea](https://antrea.io/) opera nas camadas 3 e 4 do modelo de rede OSI para fornecer serviços de rede e de segurança para o Kubernetes, aproveitando o Open vSwitch como camada de dados de rede. +* [Calico](https://docs.projectcalico.org/latest/introduction/) é um provedor de serviços de rede e de políticas de rede. Este complemento suporta um conjunto flexível de opções de rede, de modo a permitir a escolha da opção mais eficiente para um dado caso de uso, incluindo redes _overlay_ (sobrepostas) e não-_overlay_, com ou sem o uso do protocolo BGP. Calico usa o mesmo mecanismo para aplicar políticas de rede a hosts, pods, e aplicações na camada de _service mesh_ (quando Istio e Envoy estão instalados). * [Canal](https://github.com/tigera/canal/tree/master/k8s-install) une Flannel e Calico, fornecendo rede e política de rede. -* [Cilium](https://github.com/cilium/cilium) é um plug-in de políticas de rede e rede L3 que pode impor políticas de HTTP / API / L7 de forma transparente. Tanto o modo de roteamento quanto o de sobreposição / encapsulamento são suportados. -* [CNI-Genie](https://github.com/Huawei-PaaS/CNI-Genie) permite que o Kubernetes se conecte facilmente a uma variedade de plugins CNI, como Calico, Canal, Flannel, Romana ou Weave. -* [Contiv](http://contiv.github.io) fornece um rede configurável (L3 nativa usando BGP, sobreposição usando vxlan, L2 clássico e Cisco-SDN / ACI) para vários casos de uso e uma estrutura rica de políticas de rede. O projeto Contiv é totalmente [open source](http://github.com/contiv). O script de [instalação](http://github.com/contiv/install) fornece opções de instalação com ou sem kubeadm. -* [Contrail](http://www.juniper.net/us/en/products-services/sdn/contrail/contrail-networking/), baseado no [Tungsten Fabric](https://tungsten.io), é um projeto open source, multi-cloud com uma rede virtualizada e com uma plataforma de gerenciamento de políticas de rede. O Contrail e o Tungsten Fabric estão integrados a sistemas de orquestração, como Kubernetes, OpenShift, OpenStack e Mesos, e fornecem modos de isolamento para máquinas virtuais, containers / pods e cargas em servidores físicos. -* [Flannel](https://github.com/coreos/flannel/blob/master/Documentation/kubernetes.md) é um provedor de rede de sobreposição que pode ser usado com o Kubernetes. -* [Knitter](https://github.com/ZTE/Knitter/) é uma solução de rede que suporta múltiplas redes no Kubernetes. -* [Multus](https://github.com/Intel-Corp/multus-cni) é um plugin Multi para suporte a várias redes no Kubernetes para suportar todos os plugins CNI (por exemplo, Calico, Cilium, Contiv, Flannel), além das cargas de trabalho baseadas em SRIOV, DPDK, OVS-DPDK e VPP no Kubernetes. -* [NSX-T](https://docs.vmware.com/en/VMware-NSX-T/2.0/nsxt_20_ncp_kubernetes.pdf) O Plugin de contêiner (NCP) fornece integração entre o VMware NSX-T e orquestradores de contêineres como o Kubernetes, além da integração entre o NSX-T e as plataformas CaaS / PaaS baseadas em contêiner, como Pivotal Container Service (PKS) e OpenShift. -* [Nuage](https://github.com/nuagenetworks/nuage-kubernetes/blob/v5.1.1-1/docs/kubernetes-1-installation.rst) é uma plataforma SDN que fornece uma rede baseada em políticas entre os Pods Kubernetes e os ambientes não-Kubernetes, com visibilidade e monitoramento de segurança. -* [Romana](http://romana.io) é uma solução de rede Camada 3 para redes de pods que também suporta [NetworkPolicy API](/docs/concepts/services-networking/network-policies/). Detalhes da instalação do add-on Kubeadm disponíveis [aqui](https://github.com/romana/romana/tree/master/containerize). -* [Weave Net](https://www.weave.works/docs/net/latest/kube-addon/) fornece rede e política de rede, continuará trabalhando em ambos os lados de uma partição de rede e não requer um banco de dados externo. +* [Cilium](https://github.com/cilium/cilium) é um plug-in de rede de camada 3 e de políticas de rede que pode aplicar políticas HTTP/API/camada 7 de forma transparente. Tanto o modo de roteamento quanto o de sobreposição/encapsulamento são suportados. Este plug-in também consegue operar no topo de outros plug-ins CNI. +* [CNI-Genie](https://github.com/Huawei-PaaS/CNI-Genie) permite que o Kubernetes se conecte facilmente a uma variedade de plug-ins CNI, como Calico, Canal, Flannel, Romana ou Weave. +* [Contiv](http://contiv.github.io) oferece serviços de rede configuráveis para diferentes casos de uso (camada 3 nativa usando BGP, _overlay_ (sobreposição) usando vxlan, camada 2 clássica e Cisco-SDN/ACI) e também um _framework_ rico de políticas de rede. O projeto Contiv é totalmente [open source](http://github.com/contiv). O [instalador](http://github.com/contiv/install) fornece opções de instalação com ou sem kubeadm. +* [Contrail](http://www.juniper.net/us/en/products-services/sdn/contrail/contrail-networking/) é uma plataforma open source baseada no [Tungsten Fabric](https://tungsten.io) que oferece virtualização de rede multi-nuvem e gerenciamento de políticas de rede. O Contrail e o Tungsten Fabric são integrados a sistemas de orquestração de contêineres, como Kubernetes, OpenShift, OpenStack e Mesos, e fornecem modos de isolamento para cargas de trabalho executando em máquinas virtuais, contêineres/pods e servidores físicos. +* [Flannel](https://github.com/flannel-io/flannel#deploying-flannel-manually) é um provedor de redes _overlay_ (sobrepostas) que pode ser usado com o Kubernetes. +* [Knitter](https://github.com/ZTE/Knitter/) é um plug-in para suporte de múltiplas interfaces de rede em Pods do Kubernetes. +* [Multus](https://github.com/Intel-Corp/multus-cni) é um plugin para suporte a várias interfaces de rede em Pods no Kubernetes. Este plug-in pode agir como um "meta-plug-in", ou um plug-in CNI que se comunica com múltiplos outros plug-ins CNI (por exemplo, Calico, Cilium, Contiv, Flannel), além das cargas de trabalho baseadas em SRIOV, DPDK, OVS-DPDK e VPP no Kubernetes. +* [NSX-T](https://docs.vmware.com/en/VMware-NSX-T/2.0/nsxt_20_ncp_kubernetes.pdf) Container Plug-in (NCP) fornece integração entre o VMware NSX-T e sistemas de orquestração de contêineres como o Kubernetes. Além disso, oferece também integração entre o NSX-T e as plataformas CaaS/PaaS baseadas em contêiner, como o Pivotal Container Service (PKS) e o OpenShift. +* [Nuage](https://github.com/nuagenetworks/nuage-kubernetes/blob/v5.1.1-1/docs/kubernetes-1-installation.rst) é uma plataforma de rede definida por software que fornece serviços de rede baseados em políticas entre os Pods do Kubernetes e os ambientes não-Kubernetes, com visibilidade e monitoramento de segurança. +* [OVN-Kubernetes](https://github.com/ovn-org/ovn-kubernetes/) é um provedor de rede para o Kubernetes baseado no [OVN (Open Virtual Network)](https://github.com/ovn-org/ovn/), uma implementação de redes virtuais que surgiu através do projeto Open vSwitch (OVS). O OVN-Kubernetes fornece uma implementação de rede baseada em _overlay_ (sobreposição) para o Kubernetes, incluindo uma implementação baseada em OVS para serviços de balanceamento de carga e políticas de rede. +* [OVN4NFV-K8S-Plugin](https://github.com/opnfv/ovn4nfv-k8s-plugin) é um plug-in controlador CNI baseado no OVN (Open Virtual Network) que fornece serviços de rede _cloud native_, como _Service Function Chaining_ (SFC), redes _overlay_ (sobrepostas) OVN múltiplas, criação dinâmica de subredes, criação dinâmica de redes virtuais, provedor de rede VLAN e provedor de rede direto, e é plugável a outros plug-ins multi-rede. Ideal para cargas de trabalho que utilizam computação de borda _cloud native_ em redes multi-cluster. +* [Romana](http://romana.io) é uma solução de rede de camada 3 para redes de pods que também suporta a [API NetworkPolicy](/docs/concepts/services-networking/network-policies/). Detalhes da instalação do complemento Kubeadm disponíveis [aqui](https://github.com/romana/romana/tree/master/containerize). +* [Weave Net](https://www.weave.works/docs/net/latest/kube-addon/) fornece rede e política de rede, funciona em ambos os lados de uma partição de rede e não requer um banco de dados externo. ## Descoberta de Serviço -* [CoreDNS](https://coredns.io) é um servidor DNS flexível e extensível que pode ser [instalado](https://github.com/coredns/deployment/tree/master/kubernetes) como DNS dentro do cluster para ser utilizado por pods. +* [CoreDNS](https://coredns.io) é um servidor DNS flexível e extensível que pode ser [instalado](https://github.com/coredns/deployment/tree/master/kubernetes) como o serviço de DNS dentro do cluster para ser utilizado por pods. ## Visualização & Controle * [Dashboard](https://github.com/kubernetes/dashboard#kubernetes-dashboard) é uma interface web para gestão do Kubernetes. -* [Weave Scope](https://www.weave.works/documentation/scope-latest-installing/#k8s) é uma ferramenta gráfica para visualizar contêineres, pods, serviços etc. Use-o em conjunto com o [Weave Cloud account](https://cloud.weave.works/) ou hospede você mesmo a interface do usuário. +* [Weave Scope](https://www.weave.works/documentation/scope-latest-installing/#k8s) é uma ferramenta gráfica para visualizar contêineres, pods, serviços, entre outros objetos do cluster. Pode ser utilizado com uma [conta Weave Cloud](https://cloud.weave.works/). Como alternativa, é possível hospedar a interface do usuário por conta própria. -## A infraestrutura +## Infraestrutura -* [KubeVirt](https://kubevirt.io/user-guide/docs/latest/administration/intro.html#cluster-side-add-on-deployment) é um add-on para executar máquinas virtuais no Kubernetes. É geralmente executado em clusters em maquina fisica. +* [KubeVirt](https://kubevirt.io/user-guide/#/installation/installation) é um complemento para executar máquinas virtuais no Kubernetes. É geralmente executado em clusters em máquina física. -## Add-ons Legado - -Existem vários outros complementos documentados no diretório não mais ultilizados [cluster/addons](https://git.k8s.io/kubernetes/cluster/addons). - -Projetos bem mantidos deveriam ser linkados aqui. PRs são bem vindas! +## Complementos Legados +Existem vários outros complementos documentados no diretório [cluster/addons](https://git.k8s.io/kubernetes/cluster/addons) que não são mais utilizados. +Projetos bem mantidos devem ser listados aqui. PRs são bem-vindos! diff --git a/content/pt-br/docs/concepts/cluster-administration/system-logs.md b/content/pt-br/docs/concepts/cluster-administration/system-logs.md new file mode 100644 index 0000000000..fb79360821 --- /dev/null +++ b/content/pt-br/docs/concepts/cluster-administration/system-logs.md @@ -0,0 +1,132 @@ +--- +title: Logs de Sistema +content_type: concept +weight: 60 +--- + + + +Logs de componentes do sistema armazenam eventos que acontecem no cluster, sendo muito úteis para depuração. Seus níveis de detalhe podem ser ajustados para mais ou para menos. Podendo se ater, por exemplo, a mostrar apenas os erros que ocorrem no componente, ou chegando a mostrar cada passo de um evento. (Como acessos HTTP, mudanças no estado dos pods, ações dos controllers, ou decisões do scheduler). + + + +## Klog + +[Klog](https://github.com/kubernetes/klog) é a biblioteca de logs do Kubernetes. Responsável por gerar as mensagens de log para os componentes do sistema. + +Para mais informações acerca da sua configuração, veja a documentação da [ferramenta de linha de comando](https://kubernetes.io/docs/reference/command-line-tools-reference/). + +Um exemplo do formato padrão dos logs da biblioteca: +``` +I1025 00:15:15.525108 1 httplog.go:79] GET /api/v1/namespaces/kube-system/pods/metrics-server-v0.3.1-57c75779f-9p8wg: (1.512ms) 200 [pod_nanny/v0.0.0 (linux/amd64) kubernetes/$Format 10.56.1.19:51756] +``` + +### Logs Estruturados + +{{< feature-state for_k8s_version="v1.19" state="alpha" >}} + +{{< warning >}} +A migração pro formato de logs estruturados é um processo em andamento. Nem todos os logs estão dessa forma na versão atual. Sendo assim, para realizar o processamento de arquivos de log, você também precisa lidar com logs não estruturados. + +A formatação e serialização dos logs ainda estão sujeitas a alterações. +{{< /warning>}} + +A estruturação dos logs trás uma estrutura uniforme para as mensagens de log, permitindo a extração programática de informações. Logs estruturados podem ser armazenados e processados com menos esforço e custo. Esse formato é totalmente retrocompatível e é habilitado por padrão. + +Formato dos logs estruturados: + +```ini + "" ="" ="" ... +``` + +Exemplo: + +```ini +I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kube-system/kubedns" status="ready" +``` + + +### Logs em formato JSON + +{{< feature-state for_k8s_version="v1.19" state="alpha" >}} + +{{}} +Algumas opções da biblioteca klog ainda não funcionam com os logs em formato JSON. Para ver uma lista completa de quais são estas, veja a documentação da [ferramenta de linha de comando](/docs/reference/command-line-tools-reference/). + +Nem todos os logs estarão garantidamente em formato JSON (como por exemplo durante o início de processos). Sendo assim, se você pretende realizar o processamento dos logs, seu código deverá saber tratar também linhas que não são JSON. + +O nome dos campos e a serialização JSON ainda estão sujeitos a mudanças. +{{< /warning >}} + +A opção `--logging-format=json` muda o formato dos logs, do formato padrão da klog para JSON. Abaixo segue um exemplo de um log em formato JSON (identado): +```json +{ + "ts": 1580306777.04728, + "v": 4, + "msg": "Pod status updated", + "pod":{ + "name": "nginx-1", + "namespace": "default" + }, + "status": "ready" +} +``` + +Chaves com significados especiais: +* `ts` - Data e hora no formato Unix (obrigatório, float) +* `v` - Nível de detalhe (obrigatório, int, padrão 0) +* `err` - Mensagem de erro (opcional, string) +* `msg` - Mensagem (obrigatório, string) + +Lista dos componentes que suportam o formato JSON atualmente: +* {{< glossary_tooltip term_id="kube-controller-manager" text="kube-controller-manager" >}} +* {{< glossary_tooltip term_id="kube-apiserver" text="kube-apiserver" >}} +* {{< glossary_tooltip term_id="kube-scheduler" text="kube-scheduler" >}} +* {{< glossary_tooltip term_id="kubelet" text="kubelet" >}} + +### Limpeza dos Logs + +{{< feature-state for_k8s_version="v1.20" state="alpha" >}} + +{{}} +A funcionalidade de limpeza dos logs pode causar impactos significativos na performance, sendo portanto contraindicada em produção. +{{< /warning >}} + +A opção `--experimental-logging-sanitization` habilita o filtro de limpeza dos logs. +Quando habilitado, esse filtro inspeciona todos os argumentos dos logs, procurando por campos contendo dados sensíveis (como senhas, chaves e tokens). Tais campos não serão expostos nas mensagens de log. + +Lista dos componentes que suportam a limpeza de logs atualmente: +* {{< glossary_tooltip term_id="kube-controller-manager" text="kube-controller-manager" >}} +* {{< glossary_tooltip term_id="kube-apiserver" text="kube-apiserver" >}} +* {{< glossary_tooltip term_id="kube-scheduler" text="kube-scheduler" >}} +* {{< glossary_tooltip term_id="kubelet" text="kubelet" >}} + +{{< note >}} +O filtro de limpeza dos logs não impede a exposição de dados sensíveis nos logs das aplicações em execução. +{{< /note >}} + +### Nível de detalhe dos logs + +A opção `-v` controla o nível de detalhe dos logs. Um valor maior aumenta o número de eventos registrados, começando a registrar também os eventos menos importantes. Similarmente, um valor menor restringe os logs apenas aos eventos mais importantes. O valor padrão 0 registra apenas eventos críticos. + +### Localização dos Logs + +Existem dois tipos de componentes do sistema: aqueles que são executados em um contêiner e aqueles que não são. Por exemplo: + +* O [Kubernetes scheduler](https://kubernetes.io/pt-br/docs/concepts/overview/components/#kube-scheduler) e o [kube-proxy](https://kubernetes.io/pt-br/docs/concepts/overview/components/#kube-proxy) são executados em um contêiner. +* O [kubelet](https://kubernetes.io/pt-br/docs/concepts/overview/components/#kubelet) e os [agentes de execução](https://kubernetes.io/pt-br/docs/concepts/overview/components/#container-runtime), como o Docker por exemplo, não são executados em contêineres. + +Em máquinas com systemd, o kubelet e os agentes de execução gravam os logs no journald. +Em outros casos, eles escrevem os logs em arquivos `.log` no diretório `/var/log`. +Já os componentes executados dentro de contêineres, sempre irão escrever os logs em arquivos `.log` +no diretório `/var/log`, ignorando o mecanismo padrão de log. + +De forma similar aos logs de contêiner, os logs de componentes do sistema no diretório `/var/log` devem ser rotacionados. +Nos clusters Kubernetes criados com o script `kube-up.sh`, a rotação dos logs é configurada pela ferramenta `logrotate`. Essa ferramenta rotaciona os logs diariamente +ou quando o tamanho do arquivo excede 100MB. + +## {{% heading "whatsnext" %}} + +* Leia sobre [Arquitetura de Logs do Kubernetes](/pt-br/docs/concepts/cluster-administration/logging/) +* Leia sobre [Logs Estruturados](https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/1602-structured-logging) +* Leia sobre [Convenções sobre os níveis de logs](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md) diff --git a/content/pt-br/docs/concepts/configuration/overview.md b/content/pt-br/docs/concepts/configuration/overview.md new file mode 100644 index 0000000000..66f369c03b --- /dev/null +++ b/content/pt-br/docs/concepts/configuration/overview.md @@ -0,0 +1,126 @@ +--- +title: Melhores Práticas de Configuração +content_type: concept +weight: 10 +--- + + +Esse documento destaca e consolida as melhores práticas de configuração apresentadas em todo o guia de usuário, +na documentação de introdução e nos exemplos. + +Este é um documento vivo. Se você pensar em algo que não está nesta lista, mas pode ser útil para outras pessoas, +não hesite em criar uma *issue* ou submeter um PR. + + + +## Dicas Gerais de Configuração + +- Ao definir configurações, especifique a versão mais recente estável da API. + +- Os arquivos de configuração devem ser armazenados em um sistema de controle antes de serem enviados ao cluster. +Isso permite que você reverta rapidamente uma alteração de configuração, caso necessário. Isso também auxilia na recriação e restauração do cluster. + +- Escreva seus arquivos de configuração usando YAML ao invés de JSON. Embora esses formatos possam ser usados alternadamente em quase todos os cenários, YAML tende a ser mais amigável. + +- Agrupe objetos relacionados em um único arquivo sempre que fizer sentido. Geralmente, um arquivo é mais fácil de +gerenciar do que vários. Veja o [guestbook-all-in-one.yaml](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/guestbook/all-in-one/guestbook-all-in-one.yaml) como exemplo dessa sintaxe. + +- Observe também que vários comandos `kubectl` podem ser chamados em um diretório. Por exemplo, você pode chamar +`kubectl apply` em um diretório de arquivos de configuração. + +- Não especifique valores padrões desnecessariamente: configurações simples e mínimas diminuem a possibilidade de erros. + +- Coloque descrições de objetos nas anotações para permitir uma melhor análise. + + +## "Naked" Pods comparados a ReplicaSets, Deployments, e Jobs {#naked-pods-vs-replicasets-deployments-and-jobs} + +- Se você puder evitar, não use "naked" Pods (ou seja, se você puder evitar, pods não vinculados a um [ReplicaSet](/docs/concepts/workloads/controllers/replicaset/) ou [Deployment](/docs/concepts/workloads/controllers/deployment/)). +Os "naked" pods não serão reconfigurados em caso de falha de um nó. + + Criar um Deployment, que cria um ReplicaSet para garantir que o número desejado de Pods esteja disponível e especifica uma estratégia para substituir os Pods (como [RollingUpdate](/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment)), é quase sempre preferível do que criar Pods diretamente, exceto para alguns cenários explícitos de restartPolicy:Never. Um Job também pode ser apropriado. + + +## Services + +- Crie o [Service](/docs/concepts/services-networking/service/) antes de suas cargas de trabalho de backend correspondentes (Deployments ou ReplicaSets) e antes de quaisquer cargas de trabalho que precisem acessá-lo. Quando o +Kubernetes inicia um contêiner, ele fornece variáveis de ambiente apontando para todos os Services que estavam em execução quando o contêiner foi iniciado. Por exemplo, se um Service chamado `foo` existe, todos os contêineres vão +receber as seguintes variáveis em seu ambiente inicial: + + ```shell + FOO_SERVICE_HOST= + FOO_SERVICE_PORT= + ``` + +*Isso implica em um requisito de pedido* - qualquer `Service` que um `Pod` quer acessar precisa ser criado antes do `Pod` em si, ou então as variáveis de ambiente não serão populadas. O DNS não possui essa restrição. + +- Um [cluster add-on](/docs/concepts/cluster-administration/addons/) opcional (embora fortemente recomendado) é um servidor DNS. O +servidor DNS monitora a API do Kubernetes buscando novos `Services` e cria um conjunto de DNS para cada um. Se o DNS foi habilitado em todo o cluster, então todos os `Pods` devem ser capazes de fazer a resolução de `Services` automaticamente. + +- Não especifique um `hostPort` para um Pod a menos que isso seja absolutamente necessário. Quando você vincula um Pod a um `hostPort`, isso limita o número de lugares em que o Pod pode ser agendado, porque cada +combinação de <`hostIP`, `hostPort`, `protocol`> deve ser única. Se você não especificar o `hostIP` e `protocol` explicitamente, o Kubernetes vai usar `0.0.0.0` como o `hostIP` padrão e `TCP` como `protocol` padrão. + + Se você precisa de acesso a porta apenas para fins de depuração, pode usar o [apiserver proxy](/docs/tasks/access-application-cluster/access-cluster/#manually-constructing-apiserver-proxy-urls) ou o [`kubectl port-forward`](/docs/tasks/access-application-cluster/port-forward-access-application-cluster/). + + Se você precisa expor explicitamente a porta de um Pod no nó, considere usar um Service do tipo [NodePort](/docs/concepts/services-networking/service/#nodeport) antes de recorrer a `hostPort`. + +- Evite usar `hostNetwork` pelos mesmos motivos do `hostPort`. + +- Use [headless Services](/docs/concepts/services-networking/service/#headless-services) (que tem um `ClusterIP` ou `None`) para descoberta de serviço quando você não precisar de um balanceador de carga `kube-proxy`. +## Usando Labels + +- Defina e use [labels](/docs/concepts/overview/working-with-objects/labels/) que identifiquem _atributos semânticos_ da sua aplicação ou Deployment, como `{ app: myapp, tier: frontend, phase: test, deployment: v3 }`. Você pode usar essas labels para selecionar os Pods apropriados para outros recursos; por exemplo, um Service que seleciona todos os Pods `tier: frontend`, ou todos +os componentes de `app: myapp`. Veja o app [guestbook](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/guestbook/) para exemplos dessa abordagem. + +Um Service pode ser feito para abranger vários Deployments, omitindo labels específicas de lançamento de seu seletor. Quando você +precisar atualizar um serviço em execução sem _downtime_, use um [Deployment](/docs/concepts/workloads/controllers/deployment/). + +Um estado desejado de um objeto é descrito por um Deployment, e se as alterações nesse _spec_ forem _aplicadas_ o controlador +do Deployment altera o estado real para o estado desejado em uma taxa controlada. + +- Use as [labels comuns do Kubernetes](/docs/concepts/overview/working-with-objects/common-labels/) para casos de uso comuns. +Essas labels padronizadas enriquecem os metadados de uma forma que permite que ferramentas, incluindo `kubectl` e a [dashboard](/docs/tasks/access-application-cluster/web-ui-dashboard), funcionem de uma forma interoperável. + +- Você pode manipular labels para depuração. Como os controladores do Kubernetes (como ReplicaSet) e Services se relacionam com os Pods usando seletor de labels, remover as labels relevantes de um Pod impedirá que ele seja considerado por um controlador ou que +seja atendido pelo tráfego de um Service. Se você remover as labels de um Pod existente, seu controlador criará um novo Pod para +substituí-lo. Essa é uma maneira útil de depurar um Pod anteriormente "ativo" em um ambiente de "quarentena". Para remover ou +alterar labels interativamente, use [`kubectl label`](/docs/reference/generated/kubectl/kubectl-commands#label). + + +## Imagens de Contêiner + +A [imagePullPolicy](/docs/concepts/containers/images/#updating-images) e tag da imagem afetam quando o [kubelet](/docs/reference/command-line-tools-reference/kubelet/) tenta puxar a imagem especificada. + +- `imagePullPolicy: IfNotPresent`: a imagem é puxada apenas se ainda não estiver presente localmente. + +- `imagePullPolicy: Always`: sempre que o kubelet inicia um contêiner, ele consulta o *registry* da imagem do contêiner para verificar o resumo de assinatura da imagem. Se o kubelet tiver uma imagem do contêiner com o mesmo resumo de assinatura +armazenado em cache localmente, o kubelet usará a imagem em cache, caso contrário, o kubelet baixa(*pulls*) a imagem com o resumo de assinatura resolvido, e usa essa imagem para iniciar o contêiner. + +- `imagePullPolicy` é omitido se a tag da imagem é `:latest` ou se `imagePullPolicy` é omitido é automaticamente definido como `Always`. Observe que _não_ será utilizado para `ifNotPresent`se o valor da tag mudar. + +- `imagePullPolicy` é omitido se uma tag da imagem existe mas não `:latest`: `imagePullPolicy` é automaticamente definido como `ifNotPresent`. Observe que isto _não_ será atualizado para `Always` se a tag for removida ou alterada para `:latest`. + +- `imagePullPolicy: Never`: presume-se que a imagem exista localmente. Não é feita nenhuma tentativa de puxar a imagem. + +{{< note >}} +Para garantir que seu contêiner sempre use a mesma versão de uma imagem, você pode especificar seu [resumo de assinatura](https://docs.docker.com/engine/reference/commandline/pull/#pull-an-image-by-digest-immutable-identifier); +substitua `:` por `@` (por exemplo, `image@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2`). Esse resumo de assinatura identifica exclusivamente uma versão +específica de uma imagem, então isso nunca vai ser atualizado pelo Kubernetes a menos que você mude o valor do resumo de assinatura da imagem. +{{< /note >}} + +{{< note >}} +Você deve evitar o uso da tag `:latest` em produção, pois é mais difícil rastrear qual versão da imagem está sendo executada e mais difícil reverter adequadamente. +{{< /note >}} + +{{< note >}} +A semântica de cache do provedor de imagem subjacente torna até mesmo `imagePullPolicy: Always` eficiente, contanto que o registro esteja acessível de forma confiável. Com o Docker, por exemplo, se a imagem já existe, a tentativa de baixar(pull) é rápida porque todas as camadas da imagem são armazenadas em cache e nenhum download de imagem é necessário. +{{< /note >}} + +## Usando kubectl + +- Use `kubectl apply -f `. Isso procura por configurações do Kubernetes em todos os arquivos `.yaml`, `.yml` em `` e passa isso para `apply`. + +- Use *labels selectors* para operações `get` e `delete` em vez de nomes de objetos específicos. Consulte as seções sobre [label selectors](/docs/concepts/overview/working-with-objects/labels/#label-selectors) +e [usando Labels efetivamente](/docs/concepts/cluster-administration/manage-deployment/#using-labels-effectively). + +- Use `kubectl create deployment` e `kubectl expose` para criar rapidamente Deployments e Services de um único contêiner. Consulte [Use um Service para acessar uma aplicação em um cluster](/docs/tasks/access-application-cluster/service-access-application-cluster/) para obter um exemplo. diff --git a/content/pt-br/docs/concepts/extend-kubernetes/operator.md b/content/pt-br/docs/concepts/extend-kubernetes/operator.md index ba20161490..627bcbfbb8 100644 --- a/content/pt-br/docs/concepts/extend-kubernetes/operator.md +++ b/content/pt-br/docs/concepts/extend-kubernetes/operator.md @@ -52,7 +52,7 @@ Algumas das coisas que um operador pode ser usado para automatizar incluem: como esquemas de base de dados ou definições de configuração extra * publicar um *Service* para aplicações que não suportam a APIs do Kubernetes para as descobrir -* simular una falha em todo ou parte do cluster de forma a testar a resiliência +* simular uma falha em todo ou parte do cluster de forma a testar a resiliência * escolher um lider para uma aplicação distribuída sem um processo de eleição de membro interno @@ -128,7 +128,7 @@ que pode atuar como um [cliente da API do Kubernetes](/docs/reference/using-api/ * Use ferramentes existentes para escrever os seus Operadores: * usando [KUDO](https://kudo.dev/) (Kubernetes Universal Declarative Operator) * usando [kubebuilder](https://book.kubebuilder.io/) - * usando [Metacontroller](https://metacontroller.app/) juntamente com WebHooks que + * usando [Metacontroller](https://metacontroller.github.io/metacontroller/intro.html) juntamente com WebHooks que implementa você mesmo * usando o [Operator Framework](https://github.com/operator-framework/getting-started) * [Publique](https://operatorhub.io/) o seu operador para que outras pessoas o possam usar diff --git a/content/pt-br/docs/concepts/overview/what-is-kubernetes.md b/content/pt-br/docs/concepts/overview/what-is-kubernetes.md index 29473a7f75..4a072c01d2 100644 --- a/content/pt-br/docs/concepts/overview/what-is-kubernetes.md +++ b/content/pt-br/docs/concepts/overview/what-is-kubernetes.md @@ -90,5 +90,5 @@ Kubernetes: ## {{% heading "whatsnext" %}} -* Dê uma olhada em [Componentes do Kubernetes](/docs/concepts/overview/components/). +* Dê uma olhada em [Componentes do Kubernetes](/pt-br/docs/concepts/overview/components/). * Pronto para [Iniciar](/docs/setup/)? diff --git a/content/pt-br/docs/concepts/overview/working-with-objects/_index.md b/content/pt-br/docs/concepts/overview/working-with-objects/_index.md old mode 100755 new mode 100644 diff --git a/content/pt-br/docs/concepts/services-networking/_index.md b/content/pt-br/docs/concepts/services-networking/_index.md new file mode 100755 index 0000000000..cbe38ae33a --- /dev/null +++ b/content/pt-br/docs/concepts/services-networking/_index.md @@ -0,0 +1,14 @@ +--- +title: "Serviços, balanceamento de carga e conectividade" +weight: 60 +description: > + Conceitos e recursos por trás da conectividade no Kubernetes. +--- + +A conectividade do Kubernetes trata quatro preocupações: +- Contêineres em um Pod se comunicam via interface _loopback_. +- A conectividade do cluster provê a comunicação entre diferentes Pods. +- O recurso de _Service_ permite a você expor uma aplicação executando em um Pod, +de forma a ser alcançável de fora de seu cluster. +- Você também pode usar os _Services_ para publicar serviços de consumo interno do +seu cluster. diff --git a/content/pt-br/docs/concepts/services-networking/network-policies.md b/content/pt-br/docs/concepts/services-networking/network-policies.md new file mode 100644 index 0000000000..2c45902e40 --- /dev/null +++ b/content/pt-br/docs/concepts/services-networking/network-policies.md @@ -0,0 +1,345 @@ +--- +title: Políticas de rede +content_type: concept +weight: 50 +--- + + + +Se você deseja controlar o fluxo do tráfego de rede no nível do endereço IP ou de portas TCP e UDP +(camadas OSI 3 e 4) então você deve considerar usar Políticas de rede (`NetworkPolicies`) do Kubernetes para aplicações +no seu cluster. `NetworkPolicy` é um objeto focado em aplicações/experiência do desenvolvedor +que permite especificar como é permitido a um {{< glossary_tooltip text="pod" term_id="pod">}} +comunicar-se com várias "entidades" de rede. + +As entidades que um Pod pode se comunicar são identificadas através de uma combinação dos 3 +identificadores à seguir: + +1. Outros pods que são permitidos (exceção: um pod não pode bloquear a si próprio) +2. Namespaces que são permitidos +3. Blocos de IP (exceção: o tráfego de e para o nó que um Pod está executando sempre é permitido, +independentemente do endereço IP do Pod ou do Nó) + +Quando definimos uma política de rede baseada em pod ou namespace, utiliza-se um {{< glossary_tooltip text="selector" term_id="selector">}} +para especificar qual tráfego é permitido de e para o(s) Pod(s) que correspondem ao seletor. + +Quando uma política de redes baseada em IP é criada, nós definimos a política baseada em blocos de IP (faixas CIDR). + + +## Pré requisitos + +As políticas de rede são implementadas pelo [plugin de redes](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/). Para usar +uma política de redes, você deve usar uma solução de redes que suporte o objeto `NetworkPolicy`. +A criação de um objeto `NetworkPolicy` sem um controlador que implemente essas regras não tem efeito. + +## Pods isolados e não isolados + +Por padrão, pods não são isolados; eles aceitam tráfego de qualquer origem. + +Os pods tornam-se isolados ao existir uma `NetworkPolicy` que selecione eles. Uma vez que +exista qualquer `NetworkPolicy` no namespace selecionando um pod em específico, aquele pod +irá rejeitar qualquer conexão não permitida por qualquer `NetworkPolicy`. (Outros pod no mesmo +namespace que não são selecionados por nenhuma outra `NetworkPolicy` irão continuar aceitando +todo tráfego de rede.) + +As políticas de rede não conflitam; elas são aditivas. Se qualquer política selecionar um pod, +o pod torna-se restrito ao que é permitido pela união das regras de entrada/saída de tráfego definidas +nas políticas. Assim, a ordem de avaliação não afeta o resultado da política. + +Para o fluxo de rede entre dois pods ser permitido, tanto a política de saída no pod de origem +e a política de entrada no pod de destino devem permitir o tráfego. Se a política de saída na +origem, ou a política de entrada no destino negar o tráfego, o tráfego será bloqueado. + +## O recurso NetworkPolicy {#networkpolicy-resource} + +Veja a referência [NetworkPolicy](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#networkpolicy-v1-networking-k8s-io) para uma definição completa do recurso. + +Uma `NetworkPolicy` de exemplo é similar ao abaixo: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: test-network-policy + namespace: default +spec: + podSelector: + matchLabels: + role: db + policyTypes: + - Ingress + - Egress + ingress: + - from: + - ipBlock: + cidr: 172.17.0.0/16 + except: + - 172.17.1.0/24 + - namespaceSelector: + matchLabels: + project: myproject + - podSelector: + matchLabels: + role: frontend + ports: + - protocol: TCP + port: 6379 + egress: + - to: + - ipBlock: + cidr: 10.0.0.0/24 + ports: + - protocol: TCP + port: 5978 +``` + +{{< note >}} +Criar esse objeto no seu cluster não terá efeito a não ser que você escolha uma +solução de redes que suporte políticas de rede. +{{< /note >}} + +__Campos obrigatórios__: Assim como todas as outras configurações do Kubernetes, uma `NetworkPolicy` +necessita dos campos `apiVersion`, `kind` e `metadata`. Para maiores informações sobre +trabalhar com arquivos de configuração, veja +[Configurando containeres usando ConfigMap](/docs/tasks/configure-pod-container/configure-pod-configmap/), +e [Gerenciamento de objetos](/docs/concepts/overview/working-with-objects/object-management). + +__spec__: A [spec](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status) contém todas as informações necessárias +para definir uma política de redes em um namespace. + +__podSelector__: Cada `NetworkPolicy` inclui um `podSelector` que seleciona o grupo de pods +que a política se aplica. A política acima seleciona os pods com a _label_ "role=db". Um `podSelector` +vazio seleciona todos os pods no namespace. + +__policyTypes__: Cada `NetworkPolicy` inclui uma lista de `policyTypes` que pode incluir `Ingress`, +`Egress` ou ambos. O campo `policyTypes` indica se a política se aplica ao tráfego de entrada +com destino aos pods selecionados, o tráfego de saída com origem dos pods selecionados ou ambos. +Se nenhum `policyType` for definido então por padrão o tipo `Ingress` será sempre utilizado, e o +tipo `Egress` será configurado apenas se o objeto contiver alguma regra de saída. (campo `egress` a seguir). + +__ingress__: Cada `NetworkPolicy` pode incluir uma lista de regras de entrada permitidas através do campo `ingress`. +Cada regra permite o tráfego que corresponde simultaneamente às sessões `from` (de) e `ports` (portas). +A política de exemplo acima contém uma regra simples, que corresponde ao tráfego em uma única porta, +de uma das três origens definidas, sendo a primeira definida via `ipBlock`, a segunda via `namespaceSelector` e +a terceira via `podSelector`. + +__egress__: Cada política pode incluir uma lista de regras de regras de saída permitidas através do campo `egress`. +Cada regra permite o tráfego que corresponde simultaneamente às sessões `to` (para) e `ports` (portas). +A política de exemplo acima contém uma regra simples, que corresponde ao tráfego destinado a uma +porta em qualquer destino pertencente à faixa de IPs em `10.0.0.0/24`. + +Então a `NetworkPolicy` acima: + +1. Isola os pods no namespace "default" com a _label_ "role=db" para ambos os tráfegos de entrada +e saída (se eles ainda não estavam isolados) +2. (Regras de entrada/ingress) permite conexões para todos os pods no namespace "default" com a _label_ "role=db" na porta TCP 6379 de: + + * qualquer pod no namespace "default" com a _label_ "role=frontend" + * qualquer pod em um namespace que tenha a _label_ "project=myproject" (aqui cabe ressaltar que o namespace que deve ter a _label_ e não os pods dentro desse namespace) + * IPs dentro das faixas 172.17.0.0–172.17.0.255 e 172.17.2.0–172.17.255.255 (ex.:, toda 172.17.0.0/16 exceto 172.17.1.0/24) + +3. (Regras de saída/egress) permite conexões de qualquer pod no namespace "default" com a _label_ +"role=db" para a faixa de destino 10.0.0.0/24 na porta TCP 5978. + +Veja o tutorial [Declarando uma política de redes](/docs/tasks/administer-cluster/declare-network-policy/) para mais exemplos. + +## Comportamento dos seletores `to` e `from` + +Existem quatro tipos de seletores que podem ser especificados nas sessões `ingress.from` ou +`egress.to`: + +__podSelector__: Seleciona Pods no mesmo namespace que a política de rede foi criada, e que deve +ser permitido origens no tráfego de entrada ou destinos no tráfego de saída. + +__namespaceSelector__: Seleciona namespaces para o qual todos os Pods devem ser permitidos como +origens no caso de tráfego de entrada ou destino no tráfego de saída. + +__namespaceSelector__ *e* __podSelector__: Uma entrada `to`/`from` única que permite especificar +ambos `namespaceSelector` e `podSelector` e seleciona um conjunto de Pods dentro de um namespace. +Seja cuidadoso em utilizar a sintaxe YAML correta; essa política: + +```yaml + ... + ingress: + - from: + - namespaceSelector: + matchLabels: + user: alice + podSelector: + matchLabels: + role: client + ... +``` +contém um único elemento `from` permitindo conexões de Pods com a label `role=client` em +namespaces com a _label_ `user=alice`. Mas *essa* política: + +```yaml + ... + ingress: + - from: + - namespaceSelector: + matchLabels: + user: alice + - podSelector: + matchLabels: + role: client + ... +``` + +contém dois elementos no conjunto `from` e permite conexões de Pods no namespace local com +a _label_ `role=client`, *OU* de qualquer outro Pod em qualquer outro namespace que tenha +a label `user=alice`. + +Quando estiver em dúvida, utilize o comando `kubectl describe` para verificar como o +Kubernetes interpretou a política. + +__ipBlock__: Isso seleciona um conjunto particular de faixas de IP a serem permitidos como +origens no caso de entrada ou destinos no caso de saída. Devem ser considerados IPs externos +ao cluster, uma vez que os IPs dos Pods são efêmeros e imprevisíveis. + +Os mecanismos de entrada e saída do cluster geralmente requerem que os IPs de origem ou destino +sejam reescritos. Em casos em que isso aconteça, não é definido se deve acontecer antes ou +depois do processamento da `NetworkPolicy` que corresponde a esse tráfego, e o comportamento +pode ser diferente para cada plugin de rede, provedor de nuvem, implementação de `Service`, etc. + +No caso de tráfego de entrada, isso significa que em alguns casos você pode filtrar os pacotes +de entrada baseado no IP de origem atual, enquanto que em outros casos o IP de origem que +a `NetworkPolicy` atua pode ser o IP de um `LoadBalancer` ou do Nó em que o Pod está executando. + +No caso de tráfego de saída, isso significa que conexões de Pods para `Services` que são reescritos +para IPs externos ao cluster podem ou não estar sujeitos a políticas baseadas no campo `ipBlock`. + +## Políticas padrão + +Por padrão, se nenhuma política existir no namespace, então todo o tráfego de entrada e saída é +permitido de e para os pods nesse namespace. Os exemplos a seguir permitem a você mudar o +comportamento padrão nesse namespace. + +### Bloqueio padrão de todo tráfego de entrada + +Você pode criar uma política padrão de isolamento para um namespace criando um objeto `NetworkPolicy` +que seleciona todos os pods mas não permite o tráfego de entrada para esses pods. + +{{< codenew file="service/networking/network-policy-default-deny-ingress.yaml" >}} + +Isso garante que mesmo pods que não são selecionados por nenhuma outra política de rede ainda +serão isolados. Essa política não muda o comportamento padrão de isolamento de tráfego de saída +nesse namespace. + +### Permitir por padrão todo tráfego de entrada + +Se você deseja permitir todo o tráfego de todos os pods em um namespace (mesmo que políticas que +sejam adicionadas faça com que alguns pods sejam tratados como "isolados"), você pode criar +uma política que permite explicitamente todo o tráfego naquele namespace. + +{{< codenew file="service/networking/network-policy-allow-all-ingress.yaml" >}} + +### Bloqueio padrão de todo tráfego de saída + +Você pode criar uma política de isolamento de saída padrão para um namespace criando uma +política de redes que selecione todos os pods, mas não permita o tráfego de saída a partir +de nenhum desses pods. + +{{< codenew file="service/networking/network-policy-default-deny-egress.yaml" >}} + +Isso garante que mesmo pods que não são selecionados por outra política de rede não seja permitido +tráfego de saída. Essa política não muda o comportamento padrão de tráfego de entrada. + +### Permitir por padrão todo tráfego de saída + +Caso você queira permitir todo o tráfego de todos os pods em um namespace (mesmo que políticas sejam +adicionadas e cause com que alguns pods sejam tratados como "isolados"), você pode criar uma +política explicita que permite todo o tráfego de saída no namespace. + +{{< codenew file="service/networking/network-policy-allow-all-egress.yaml" >}} + +### Bloqueio padrão de todo tráfego de entrada e saída + +Você pode criar uma política padrão em um namespace que previne todo o tráfego de entrada +E saída criando a política a seguir no namespace. + +{{< codenew file="service/networking/network-policy-default-deny-all.yaml" >}} + +Isso garante que mesmo pods que não são selecionados por nenhuma outra política de redes não +possuam permissão de tráfego de entrada ou saída. + +## Selecionando uma faixa de portas + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +Ao escrever uma política de redes, você pode selecionar uma faixa de portas ao invés de uma +porta única, utilizando-se do campo `endPort` conforme a seguir: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: multi-port-egress + namespace: default +spec: + podSelector: + matchLabels: + role: db + policyTypes: + - Egress + egress: + - to: + - ipBlock: + cidr: 10.0.0.0/24 + ports: + - protocol: TCP + port: 32000 + endPort: 32768 +``` + +A regra acima permite a qualquer Pod com a _label_ "role=db" no namespace `default` de se comunicar +com qualquer IP na faixa `10.0.0.0/24` através de protocolo TCP, desde que a porta de destino +esteja na faixa entre 32000 e 32768. + +As seguintes restrições aplicam-se ao se utilizar esse campo: + +* Por ser uma funcionalidade "alpha", ela é desativada por padrão. Para habilitar o campo `endPort` +no cluster, você (ou o seu administrador do cluster) deve habilitar o [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) `NetworkPolicyEndPort` no `kube-apiserver` com a flag `--feature-gates=NetworkPolicyEndPort=true,...`. +* O valor de `endPort` deve ser igual ou maior ao valor do campo `port`. +* O campo `endPort` só pode ser definido se o campo `port` também for definido. +* Ambos os campos `port` e `endPort` devem ser números. + +{{< note >}} +Seu cluster deve utilizar um plugin {{< glossary_tooltip text="CNI" term_id="cni" >}} +que suporte o campo `endPort` na especificação da política de redes. +{{< /note >}} + +## Selecionando um Namespace pelo seu nome + +{{< feature-state state="beta" for_k8s_version="1.21" >}} + +A camada de gerenciamento do Kubernetes configura uma _label_ imutável `kubernetes.io/metadata.name` em +todos os namespaces, uma vez que o [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) esteja habilitado por padrão. +O valor dessa _label_ é o nome do namespace. + +Enquanto que um objeto `NetworkPolicy` não pode selecionar um namespace pelo seu nome através de +um campo específico, você pode utilizar essa _label_ padrão para selecionar um namespace pelo seu nome. + +## O que você não pode fazer com `NetworkPolicies` (ao menos por enquanto!) +Por enquanto no Kubernetes {{< skew latestVersion >}} as funcionalidades a seguir não existem +mas você pode conseguir implementar de forma alternativa utilizando componentes do Sistema Operacional +(como SELinux, OpenVSwitch, IPtables, etc) ou tecnologias da camada 7 OSI (Ingress controllers, implementações de service mesh) ou ainda _admission controllers_. +No caso do assunto "segurança de redes no Kubernetes" ser novo para você, vale notar que as +histórias de usuário a seguir ainda não podem ser implementadas: + +- Forçar o tráfego interno do cluster passar por um gateway comum (pode ser implementado via service mesh ou outros proxies) +- Qualquer coisa relacionada a TLS/mTLS (use um service mesh ou ingress controller para isso) +- Políticas específicas a nível do nó kubernetes (você pode utilizar as notações de IP CIDR para isso, mas não pode selecionar nós Kubernetes por suas identidades) +- Selecionar `Services` pelo seu nome (você pode, contudo, selecionar pods e namespaces por seus {{< glossary_tooltip text="labels" term_id="label" >}} o que torna-se uma solução de contorno viável). +- Criação ou gerenciamento +- Políticas padrão que são aplicadas a todos os namespaces e pods (existem alguns plugins externos do Kubernetes e projetos que podem fazer isso, e a comunidade está trabalhando nessa especificação). +- Ferramental de testes para validação de políticas de redes. +- Possibilidade de logar eventos de segurança de redes (conexões bloqueadas, aceitas). Existem plugins CNI que conseguem fazer isso à parte. +- Possibilidade de explicitamente negar políticas de rede (o modelo das `NetworkPolicies` são "negar por padrão e conforme a necessidade, deve-se adicionar regras que permitam o tráfego). +- Bloquear o tráfego que venha da interface de loopback/localhost ou que venham do nó em que o Pod se encontre. + +## {{% heading "whatsnext" %}} + + +- Veja o tutorial [Declarando políticas de redes](/docs/tasks/administer-cluster/declare-network-policy/) para mais exemplos. +- Veja mais [cenários comuns e exemplos](https://github.com/ahmetb/kubernetes-network-policy-recipes) de políticas de redes. diff --git a/content/pt-br/docs/concepts/storage/persistent-volumes.md b/content/pt-br/docs/concepts/storage/persistent-volumes.md new file mode 100644 index 0000000000..65396a3c37 --- /dev/null +++ b/content/pt-br/docs/concepts/storage/persistent-volumes.md @@ -0,0 +1,738 @@ +--- +reviewers: +- jsafrane +- saad-ali +- thockin +- msau42 +- xing-yang +title: Volumes Persistentes +feature: + title: Orquestração de Armazenamento + description: > + Montar automaticamente o armazenamento de sua escolha, seja de um armazenamento local, de um provedor de cloud pública, como GCP ou AWS, ou um armazenameto de rede, como NFS, iSCSI, Gluster, Ceph, Cinder ou Flocker. + +content_type: conceito +weight: 20 +--- + + + +Esse documento descreve o estado atual dos _volumes persistentes_ no Kubernetes. Sugerimos que esteja familiarizado com [volumes](/docs/concepts/storage/volumes/). + + + +## Introdução + + +O gerenciamento de armazenamento é uma questão bem diferente do gerenciamento de instâncias computacionais. O subsistema PersistentVolume provê uma API para usuários e administradores que mostra de forma detalhada de como o armazenamento é provido e como ele é consumido. Para isso, nós introduzimos duas novas APIs: PersistentVolume e PersistentVolumeClaim. + +Um _PersistentVolume_ (PV) é uma parte do armazenamento dentro do cluster que tenha sido provisionada por um administrador, ou dinamicamente utilizando [Classes de Armazenamento](/docs/concepts/storage/storage-classes/). Isso é um recurso dentro do cluster da mesma forma que um nó também é. PVs são plugins de volume da mesma forma que Volumes, porém eles têm um ciclo de vida independente de qualquer Pod que utilize um PV. Essa API tem por objetivo mostrar os detalhes da implementação do armazenamento, seja ele NFS, iSCSI, ou um armazenamento específico de um provedor de cloud pública. + +Uma_PersistentVolumeClaim_ (PVC) é uma requisição para armazenamento por um usuário. É similar a um Pod. Pods utilizam recursos do nó e PVCs utilizam recursos do PV. Pods podem solicitar níveis específicos de recursos (CPU e Memória). Claims podem solicitar tamanho e modos de acesso específicos (exemplo: montagem como ReadWriteOnce, ReadOnlyMany ou ReadWriteMany, veja [Modos de Acesso](#modos-de-acesso)). + +Enquanto as PersistentVolumeClaims permitem que um usuário utilize recursos de armazenamento de forma limitada, é comum que usuários precisem de PersistentVolumes com diversas propriedades, como desempenho, para problemas diversos. Os administradores de cluster precisam estar aptos a oferecer uma variedade de PersistentVolumes que difiram em tamanho e modo de acesso, sem expor os usuários a detalhes de como esses volumes são implementados. Para necessidades como essas, temos o recurso de _StorageClass_. + +Veja os [exemplos de passo a passo de forma detalhada](/docs/tasks/configure-pod-container/configure-persistent-volume-storage/). + +## Requisição e ciclo de vida de um volume + +PVs são recursos dentro um cluster. PVCs são requisições para esses recursos e também atuam como uma validação da solicitação desses recursos. O ciclo de vida da interação entre PVs e PVCs funcionam da seguinte forma: + +### Provisionamento + +Existem duas formas de provisionar um PV: estaticamente ou dinamicamente. + +#### Estático + +O administrador do cluster cria uma determinada quantidade de PVs. Eles possuem todos os detalhes do armazenamento os quais estão atrelados, que neste caso fica disponível para utilização por um usuário dentro do cluster. Eles estão presentes na API do Kubernetes e disponíveis para utilização. + +#### Dinâmico + +Quando nenhum dos PVs estáticos, que foram criados anteriormente pelo administrador, satisfazem os critérios de uma PersistentVolumeClaim enviado por um usuário, o cluster pode tentar realizar um provisionamento dinâmico para atender a essa PVC. Esse provisionamento é baseado em StorageClasses: a PVC deve solicitar uma [classe de armazenamento](/docs/concepts/storage/storage-classes/) e o administrador deve ter previamente criado e configurado essa classe para que o provisionamento dinâmico possa ocorrer. Requisições que solicitam a classe `""` efetivamente desabilitam o provisionamento dinâmico para elas mesmas. + +Para habilitar o provisionamento de armazenamento dinâmico baseado em classe de armazenamento, o administrador do cluster precisa habilitar o [controle de admissão](/docs/reference/access-authn-authz/admission-controllers/#defaultstorageclass) `DefaultStorageClass` no servidor da API. Isso pode ser feito, por exemplo, garantindo que `DefaultStorageClass` esteja entre aspas simples, ordenado por uma lista de valores para a flag `--enable-admission-plugins`, componente do servidor da API. Para mais informações sobre os comandos das flags do servidor da API, consulte a documentação [kube-apiserver](/docs/admin/kube-apiserver/). + +### Binding + +Um usuário cria, ou em caso de um provisionamento dinâmico já ter criado, uma PersistentVolumeClaim solicitando uma quantidade específica de armazenamento e um determinado modo de acesso. Um controle de loop no master monitora por novas PVCs, encontra um PV (se possível) que satisfaça os requisitos e realiza o bind. Se o PV foi provisionado dinamicamente por uma PVC, o loop sempre vai fazer o bind desse PV com essa PVC em específico. Caso contrário, o usuário vai receber no mínimo o que ele havia solicitado, porém, o volume possa exceder em relação à solicitação inicial. Uma vez realizado esse processo, PersistentVolumeClaim sempre vai ter um bind exclusivo, sem levar em conta como o isso aconteceu. Um bind entre uma PVC e um PV é um mapeamento de um para um, utilizando o ClaimRef que é um bind bidirecional entre o PersistentVolume e o PersistentVolumeClaim. + +As requisições permanecerão sem bind se o volume solicitado não existir. O bind ocorrerá somente se os requisitos forem atendidos exatamente da mesma forma como solicitado. Por exemplo, um bind de uma PVC de 100 GB não ocorrerá num cluster que foi provisionado com vários PVs de 50 GB. O bind ocorrerá somente no momento em que um PV de 100 GB for adicionado. + +### Utilização + +Pods utilizam requisições como volumes. O cluster inspeciona a requisição para encontrar o volume atrelado a ela e monta esse volume para um Pod. Para volumes que suportam múltiplos modos de acesso, o usuário especifica qual o modo desejado quando utiliza essas requisições. + +Uma vez que o usuário tem a requisição atrelada a um PV, ele pertence ao usuário pelo tempo que ele precisar. Usuários agendam Pods e acessam seus PVs requisitados através da seção `persistentVolumeClaim` no bloco `volumes` do Pod. Para mais detalhes sobre isso, veja [Requisições como Volumes](#requisições-como-volumes). + +### Proteção de Uso de um Objeto de Armazenamento + +O propósito da funcionalidade do Objeto de Armazenamento em Proteção de Uso é garantir que as PersistentVolumeClaims (PVCs) que estejam sendo utilizadas por um Pod e PersistentVolume (PVs) que pertençam aos PVCs não sejam removidos do sistema, pois isso pode resultar numa perda de dados. + +{{< note >}} +Uma PVC está sendo utilizada por um Pod quando existe um Pod que está usando essa PVC. +{{< /note >}} + +Se um usuário deleta uma PVC que está sendo utilizada por um Pod, esta PVC não é removida imediatamente. A remoção da PVC é adiada até que a PVC não esteja mais sendo utilizado por nenhum Pod. Se um administrador deleta um PV que está atrelado a uma PVC, o PV não é removido imediatamente também. A remoção do PV é adiada até que o PV não esteja mais atrelado à PVC. + +Note que uma PVC é protegida quando o status da PVC é `Terminating` e a lista `Finalizers` contém `kubernetes.io/pvc-protection`: + +```shell +kubectl describe pvc hostpath +Name: hostpath +Namespace: default +StorageClass: example-hostpath +Status: Terminating +Volume: +Labels: +Annotations: volume.beta.kubernetes.io/storage-class=example-hostpath + volume.beta.kubernetes.io/storage-provisioner=example.com/hostpath +Finalizers: [kubernetes.io/pvc-protection] +... +``` + +Note que um PV é protegido quando o status da PVC é `Terminating` e a lista `Finalizers` contém `kubernetes.io/pv-protection` também: + +```shell +kubectl describe pv task-pv-volume +Name: task-pv-volume +Labels: type=local +Annotations: +Finalizers: [kubernetes.io/pv-protection] +StorageClass: standard +Status: Terminating +Claim: +Reclaim Policy: Delete +Access Modes: RWO +Capacity: 1Gi +Message: +Source: + Type: HostPath (bare host directory volume) + Path: /tmp/data + HostPathType: +Events: +``` + +### Recuperação + +Quando um usuário não precisar mais utilizar um volume, ele pode deletar a PVC pela API, que, permite a recuperação do recurso. A política de recuperação para um PersistentVolume diz ao cluster o que fazer com o volume após ele ter sido liberado da sua requisição. Atualmente, volumes podem ser Retidos, Reciclados ou Deletados. + +#### Retenção + +A política `Retain` permite a recuperação de forma manual do recurso. Quando a PersistentVolumeClaim é deletada, ela continua existindo e o volume é considerado "livre". Mas ele ainda não está disponível para outra requisição porque os dados da requisição anterior ainda permanecem no volume. Um administrador pode manualmente recuperar o volume executando os seguintes passos: + + +1. Deletar o PersistentVolume. O armazenamento associado à infraestrutura externa (AWS EBS, GCE PD, Azure Disk ou Cinder volume) ainda continuará existindo após o PV ser deletado. +1. Limpar os dados de forma manual no armazenamento associado. +1. Deletar manualmente o armazenamento associado. Caso você queira utilizar o mesmo armazenamento, crie um novo PersistentVolume com esse armazenamento. + +#### Deletar + +Para plugins de volume que suportam a política de recuperação `Delete`, a deleção vai remover o tanto o PersistentVolume do Kubernetes, quanto o armazenamento associado à infraestrutura externa, como AWS EBS, GCE PD, Azure Disk, ou Cinder volume. Volumes que foram provisionados dinamicamente herdam a [política de retenção da sua StorageClass](#política-de-retenção), que por padrão é `Delete`. O administrador precisa configurar a StorageClass de acordo com as necessidades dos usuários. Caso contrário, o PV deve ser editado ou reparado após sua criação. Veja [Alterar a política de retenção de um PersistentVolume](/docs/tasks/administer-cluster/change-pv-reclaim-policy/). + +#### Reciclar + +{{< warning >}} +A política de retenção `Recycle` está depreciada. Ao invés disso, recomendamos a utilização de provisionamento dinâmico. +{{< /warning >}} + +Em caso do volume plugin ter suporte a essa operação, a política de retenção `Recycle` faz uma limpeza básica (`rm -rf /thevolume/*`) no volume e torna ele disponível novamente para outra requisição. + +Contudo, um administrador pode configurar um template personalizado de um Pod reciclador utilizando a linha de comando do gerenciamento de controle do Kubernetes como descrito em [referência](/docs/reference/command-line-tools-reference/kube-controller-manager/). +O Pod reciclador personalizado deve conter a spec `volume` como é mostrado no exemplo abaixo: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: pv-recycler + namespace: default +spec: + restartPolicy: Never + volumes: + - name: vol + hostPath: + path: /any/path/it/will/be/replaced + containers: + - name: pv-recycler + image: "k8s.gcr.io/busybox" + command: ["/bin/sh", "-c", "test -e /scrub && rm -rf /scrub/..?* /scrub/.[!.]* /scrub/* && test -z \"$(ls -A /scrub)\" || exit 1"] + volumeMounts: + - name: vol + mountPath: /scrub +``` + +Contudo, o caminho especificado no Pod reciclador personalizado em `volumes` é substituído pelo caminho do volume que está sendo reciclado. + +### Reservando um PersistentVolume + +A camada de gerenciamento pode [fazer o bind de um PersistentVolumeClaims com PersistentVolumes equivalentes](#binding) no cluster. Contudo, se você quer que uma PVC faça um bind com um PV específico, é preciso fazer o pré-bind deles. + +Especificando um PersistentVolume na PersistentVolumeClaim, você declara um bind entre uma PVC e um PV específico. O bind ocorrerá se o PersistentVolume existir e não estiver reservado por uma PersistentVolumeClaims através do seu campo `claimRef`. + +O bind ocorre independentemente se algum volume atender ao critério, incluindo afinidade de nó. A camada de gerenciamento verifica se a [classe de armazenamento](/docs/concepts/storage/storage-classes/), modo de acesso e tamanho do armazenamento solicitado ainda são válidos. + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: foo-pvc + namespace: foo +spec: + storageClassName: "" # Empty string must be explicitly set otherwise default StorageClass will be set + volumeName: foo-pv + ... +``` + +Esse método não garante nenhum privilégio de bind no PersistentVolume. Para evitar que alguma outra PersistentVolumeClaims possa usar o PV que você especificar, você precisa primeiro reservar esse volume de armazenamento. Especifique sua PersistentVolumeClaim no campo `claimRef` do PV para que outras PVCs não façam bind nele. + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: foo-pv +spec: + storageClassName: "" + claimRef: + name: foo-pvc + namespace: foo + ... +``` + +Isso é útil se você deseja utilizar PersistentVolumes que possuem suas `claimPolicy` configuradas para `Retain`, incluindo situações onde você estiver reutilizando um PV existente. + +### Expandindo Requisições de Volumes Persistentes + +{{< feature-state for_k8s_version="v1.11" state="beta" >}} + +Agora, o suporte à expansão de PersistentVolumeClaims (PVCs) já é habilitado por padrão. Você pode expandir os tipos de volumes abaixo: + +* gcePersistentDisk +* awsElasticBlockStore +* Cinder +* glusterfs +* rbd +* Azure File +* Azure Disk +* Portworx +* FlexVolumes +* {{< glossary_tooltip text="CSI" term_id="csi" >}} + +Você só pode expandir uma PVC se o campo da classe de armazenamento `allowVolumeExpansion` é `true`. + +``` yaml +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: gluster-vol-default +provisioner: kubernetes.io/glusterfs +parameters: + resturl: "http://192.168.10.100:8080" + restusuário: "" + secretNamespace: "" + secretName: "" +allowVolumeExpansion: true +``` + +Para solicitar um volume maior para uma PVC, edite a PVC e especifique um tamanho maior. Isso irá fazer com o que volume atrelado ao respectivo PersistentVolume seja expandido. Nunca um PersistentVolume é criado para satisfazer a requisição. Ao invés disso, um volume existente é redimensionado. + +#### Expansão de volume CSI + +{{< feature-state for_k8s_version="v1.16" state="beta" >}} + +O suporte à expansão de volumes CSI é habilitada por padrão, porém é necessário um driver CSI específico para suportar a expansão do volume. Verifique a documentação do driver CSI específico para mais informações. + +#### Redimensionando um volume que contém um sistema de arquivo + +Só podem ser redimensionados os volumes que contém os seguintes sistemas de arquivo: XFS, Ext3 ou Ext4. + +Quando um volume contém um sistema de arquivo, o sistema de arquivo somente é redimensionado quando um novo Pod está utilizando a PersistentVolumeClaim no modo `ReadWrite`. A expansão de sistema de arquivo é feita quando um Pod estiver inicializando ou quando um Pod estiver em execução e o respectivo sistema de arquivo tenha suporte para expansão a quente. + +FlexVolumes permitem redimensionamento se o `RequiresFSResize` do drive é configurado como `true`. O FlexVolume pode ser redimensionado na reinicialização do Pod. + +#### Redimensionamento de uma PersistentVolumeClaim em uso + +{{< feature-state for_k8s_version="v1.15" state="beta" >}} + +{{< note >}} +A Expansão de PVCs em uso está disponível como beta desde o Kubernetes 1.15, e como alpha desde a versão 1.11. A funcionalidade `ExpandInUsePersistentVolumes` precisa ser habilitada, o que já está automático para vários clusters que possuem funcionalidades beta. Verifique a documentação [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) para mais informações. +{{< /note >}} + +Neste caso, você não precisa deletar e recriar um Pod ou um deployment que está sendo utilizado por uma PVC existente. +Automaticamente, qualquer PVC em uso fica disponível para o Pod assim que o sistema de arquivo for expandido. +Essa funcionalidade não tem efeito em PVCs que não estão em uso por um Pod ou deployment. Você deve criar um Pod que utilize a PVC antes que a expansão seja completada. + +Da mesma forma que outros tipos de volumes - volumes FlexVolume também podem ser expandidos quando estiverem em uso por um Pod. + +{{< note >}} +Redimensionamento de FlexVolume somente é possível quando o respectivo driver suportar essa operação. +{{< /note >}} + +{{< note >}} +Expandir volumes do tipo EBS é uma operação que toma muito tempo. Além disso, só é possível fazer uma modificação por volume a cada 6 horas. +{{< /note >}} + +#### Recuperação em caso de falha na expansão de volumes + +Se a expansão do respectivo armazenamento falhar, o administrador do cluster pode recuperar manualmente o estado da Persistent Volume Claim (PVC) e cancelar as solicitações de redimensionamento. Caso contrário, as tentativas de solicitação de redimensionamento ocorrerão de forma contínua pelo controlador sem nenhuma intervenção do administrador. + +1. Marque o PersistentVolume(PV) que estiver atrelado à PersistentVolumeClaim(PVC) com a política de recuperação `Retain`. +2. Delete a PVC. Desde que o PV tenha a política de recuperação `Retain` - nenhum dado será perdido quando a PVC for recriada. +3. Delete a entrada `claimRef` da especificação do PV para que uma PVC possa fazer bind com ele. Isso deve tornar o PV `Available`. +4. Recrie a PVC com um tamanho menor que o PV e configure o campo `volumeName` da PCV com o nome do PV. Isso deve fazer o bind de uma nova PVC a um PV existente. +5. Não esqueça de restaurar a política de recuperação do PV. + +## Tipos de volumes persistentes + +Tipos de PersistentVolume são implementados como plugins. Atualmente o Kubernetes suporta os plugins abaixo: + +* [`awsElasticBlockStore`](/docs/concepts/storage/volumes/#awselasticblockstore) - AWS Elastic Block Store (EBS) +* [`azureDisk`](/docs/concepts/storage/volumes/#azuredisk) - Azure Disk +* [`azureFile`](/docs/concepts/storage/volumes/#azurefile) - Azure File +* [`cephfs`](/docs/concepts/storage/volumes/#cephfs) - CephFS volume +* [`cinder`](/docs/concepts/storage/volumes/#cinder) - Cinder (OpenStack block storage) + (**depreciado**) +* [`csi`](/docs/concepts/storage/volumes/#csi) - Container Storage Interface (CSI) +* [`fc`](/docs/concepts/storage/volumes/#fc) - Fibre Channel (FC) storage +* [`flexVolume`](/docs/concepts/storage/volumes/#flexVolume) - FlexVolume +* [`flocker`](/docs/concepts/storage/volumes/#flocker) - Flocker storage +* [`gcePersistentDisk`](/docs/concepts/storage/volumes/#gcepersistentdisk) - GCE Persistent Disk +* [`glusterfs`](/docs/concepts/storage/volumes/#glusterfs) - Glusterfs volume +* [`hostPath`](/docs/concepts/storage/volumes/#hostpath) - HostPath volume + (somente para teste de nó único; ISSO NÃO FUNCIONARÁ num cluster multi-nós; ao invés disso, considere a utilização de volume `local`.) +* [`iscsi`](/docs/concepts/storage/volumes/#iscsi) - iSCSI (SCSI over IP) storage +* [`local`](/docs/concepts/storage/volumes/#local) - storage local montados nos nós. +* [`nfs`](/docs/concepts/storage/volumes/#nfs) - Network File System (NFS) storage +* `photonPersistentDisk` - Controlador Photon para disco persistente. + (Esse tipo de volume não funciona mais desde a removação do provedor de cloud correspondente.) +* [`portworxVolume`](/docs/concepts/storage/volumes/#portworxvolume) - Volume Portworx +* [`quobyte`](/docs/concepts/storage/volumes/#quobyte) - Volume Quobyte +* [`rbd`](/docs/concepts/storage/volumes/#rbd) - Volume Rados Block Device (RBD) +* [`scaleIO`](/docs/concepts/storage/volumes/#scaleio) - Volume ScaleIO + (**depreciado**) +* [`storageos`](/docs/concepts/storage/volumes/#storageos) - Volume StorageOS +* [`vsphereVolume`](/docs/concepts/storage/volumes/#vspherevolume) - Volume vSphere VMDK + +## Volumes Persistentes + +Cada PV contém uma `spec` e um status, que é a especificação e o status do volume. O nome do PersistentVolume deve ser um [DNS](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names) válido. + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: pv0003 +spec: + capacity: + storage: 5Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: slow + mountOptions: + - hard + - nfsvers=4.1 + nfs: + path: /tmp + server: 172.17.0.2 +``` + +{{< note >}} +Talvez sejam necessários programas auxiliares para um determinado tipo de volume utilizar um PersistentVolume no cluster. Neste exemplo, o PersistentVolume é do tipo NFS e o programa auxiliar _/sbin/mount.nfs_ é necessário para suportar a montagem dos sistemas de arquivos NFS. +{{< /note >}} + +### Capacidade + +Geralmente, um PV terá uma capacidade de armazenamento específica. Isso é configurado usando o atributo `capacity` do PV. Veja o [Modelo de Recurso](https://git.k8s.io/community/contributors/design-proposals/scheduling/resources.md) do Kubernetes para entender as unidades aceitas pelo atributo `capacity`. + +Atualmente, o tamanho do armazenamento é o único recurso que pode ser configurado ou solicitado. Os futuros atributos podem incluir IOPS, throughput, etc. + +### Modo do Volume + +{{< feature-state for_k8s_version="v1.18" state="stable" >}} + +O Kubernetes suporta dois `volumeModes` de PersistentVolumes: `Filesystem` e `Block`. + +`volumeMode` é um parâmetro opcional da API. +`Filesystem` é o modo padrão utilizado quando o parâmetro `volumeMode` é omitido. + +Um volume com `volumeMode: Filesystem` é *montado* em um diretório nos Pods. Se o volume for de um dispositivo de bloco e ele estiver vazio, o Kubernetes cria o sistema de arquivo no dispositivo antes de fazer a montagem pela primeira vez. + +Você pode configurar o valor do `volumeMode` para `Block` para utilizar um disco bruto como volume. Esse volume é apresentado num Pod como um dispositivo de bloco, sem nenhum sistema de arquivo. Esse modo é útil para prover ao Pod a forma mais rápida para acessar um volume, sem nenhuma camada de sistema de arquivo entre o Pod e o volume. Por outro lado, a aplicação que estiver rodando no Pod deverá saber como tratar um dispositivo de bloco. Veja [Suporte a Volume de Bloco Bruto](#raw-block-volume-support) para um exemplo de como utilizar o volume como `volumeMode: Block` num Pod. + +### Modos de Acesso + +Um PersistentVolume pode ser montado num host das mais variadas formas suportadas pelo provedor. Como mostrado na tabela abaixo, os provedores terão diferentes capacidades e cada modo de acesso do PV são configurados nos modos específicos suportados para cada volume em particular. Por exemplo, o NFS pode suportar múltiplos clientes read/write, mas um PV NFS específico pode ser exportado no server como read-only. Cada PV recebe seu próprio modo de acesso que descreve suas capacidades específicas. + +Os modos de acesso são: + +* ReadWriteOnce -- o volume pode ser montado como leitura-escrita por um nó único +* ReadOnlyMany -- o volume pode ser montado como somente-leitura por vários nós +* ReadWriteMany -- o volume pode ser montado como leitura-escrita por vários nós + +Na linha de comando, os modos de acesso ficam abreviados: + +* RWO - ReadWriteOnce +* ROX - ReadOnlyMany +* RWX - ReadWriteMany + +> __Importante!__ Um volume somente pode ser montado utilizando um único modo de acesso por vez, independente se ele suportar mais de um. Por exemplo, um GCEPersistentDisk pode ser montado como ReadWriteOnce por um único nó ou ReadOnlyMany por vários nós, porém não simultaneamente. + + +| Plugin de Volume | ReadWriteOnce | ReadOnlyMany | ReadWriteMany| +| :--- | :---: | :---: | :---: | +| AWSElasticBlockStore | ✓ | - | - | +| AzureFile | ✓ | ✓ | ✓ | +| AzureDisk | ✓ | - | - | +| CephFS | ✓ | ✓ | ✓ | +| Cinder | ✓ | - | - | +| CSI | depende do driver | depende do driver | depende do driver | +| FC | ✓ | ✓ | - | +| FlexVolume | ✓ | ✓ | depende do driver | +| Flocker | ✓ | - | - | +| GCEPersistentDisk | ✓ | ✓ | - | +| Glusterfs | ✓ | ✓ | ✓ | +| HostPath | ✓ | - | - | +| iSCSI | ✓ | ✓ | - | +| Quobyte | ✓ | ✓ | ✓ | +| NFS | ✓ | ✓ | ✓ | +| RBD | ✓ | ✓ | - | +| VsphereVolume | ✓ | - | (funcionam quando os Pods são do tipo collocated) | +| PortworxVolume | ✓ | - | ✓ | +| ScaleIO | ✓ | ✓ | - | +| StorageOS | ✓ | - | - | + +### Classe + +Um PV pode ter uma classe, que é especificada na configuração do atributo `storageClassName` com o nome da [StorageClass](/docs/concepts/storage/storage-classes/). Um PV de uma classe específica só pode ser atrelado a requisições PVCs dessa mesma classe. Um PV sem `storageClassName` não possui nenhuma classe e pode ser montado somente a PVCs que não solicitem nenhuma classe em específico. + +No passado, a notação `volume.beta.kubernetes.io/storage-class` era utilizada no lugar do atributo `storageClassName`. Essa notação ainda funciona. Contudo, ela será totalmente depreciada numa futura versão do Kubernetes. + +### Política de Retenção + +Atualmente as políticas de retenção são: + +* Retain -- recuperação manual +* Recycle -- limpeza básica (`rm -rf /thevolume/*`) +* Delete -- o volume de armazenamento associado, como AWS EBS, GCE PD, Azure Disk ou OpenStack Cinder é deletado + +Atualmente, somente NFS e HostPath suportam reciclagem. Volumes AWS EBS, GCE PD, Azure Disk e Cinder suportam delete. + +### Opções de Montagem + +Um administrador do Kubernetes pode especificar opções de montagem adicionais quando um Volume Persistente é montado num nó. + +{{< note >}} +Nem todos os tipos de Volume Persistente suportam opções de montagem. +{{< /note >}} + +Seguem os tipos de volumes que suportam opções de montagem. + +* AWSElasticBlockStore +* AzureDisk +* AzureFile +* CephFS +* Cinder (OpenStack block storage) +* GCEPersistentDisk +* Glusterfs +* NFS +* Quobyte Volumes +* RBD (Ceph Block Device) +* StorageOS +* VsphereVolume +* iSCSI + +Não há validação em relação às opções de montagem. A montagem irá falhar se houver alguma opção inválida. + +No passado, a notação `volume.beta.kubernetes.io/mount-options` era usada no lugar do atributo `mountOptions`. Essa notação ainda funciona. Contudo, ela será totalmente depreciada numa futura versão do Kubernetes. + +### Afinidade de Nó + +{{< note >}} +Para a maioria dos tipos de volume, a configuração desse campo não se faz necessária. Isso é automaticamente populado pelos seguintes volumes de bloco do tipo: [AWS EBS](/docs/concepts/storage/volumes/#awselasticblockstore), [GCE PD](/docs/concepts/storage/volumes/#gcepersistentdisk) e [Azure Disk](/docs/concepts/storage/volumes/#azuredisk). Você precisa deixar isso configurado para volumes do tipo [local](/docs/concepts/storage/volumes/#local). +{{< /note >}} + +Um PV pode especificar uma [afinidade de nó](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#volumenodeaffinity-v1-core) para definir restrições em relação ao limite de nós que podem acessar esse volume. Pods que utilizam um PV serão somente reservados para nós selecionados pela afinidade de nó. + +### Estado + +Um volume sempre estará em um dos seguintes estados: + +* Available -- um recurso que está livre e ainda não foi atrelado a nenhuma requisição +* Bound -- um volume atrelado a uma requisição +* Released -- a requisição foi deletada, mas o curso ainda não foi recuperado pelo cluster +* Failed -- o volume fracassou na sua recuperação automática + + +A CLI mostrará o nome do PV que foi atrelado à PVC + +## PersistentVolumeClaims + +Cada PVC contém uma `spec` e um status, que é a especificação e estado de uma requisição. O nome de um objeto PersistentVolumeClaim precisa ser um [DNS](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names) válido. + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: myclaim +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 8Gi + storageClassName: slow + selector: + matchLabels: + release: "stable" + matchExpressions: + - {key: environment, operator: In, values: [dev]} +``` + +### Modos de Acesso + +As requisições usam as mesmas convenções que os volumes quando eles solicitam um armazenamento com um modo de acesso específico. + +### Modos de Volume + +As requisições usam as mesmas convenções que os volumes quando eles indicam o tipo de volume, seja ele um sistema de arquivo ou dispositivo de bloco. + +### Recursos + + +Assim como Pods, as requisições podem solicitar quantidades específicas de recurso. Neste caso, a solicitação é por armazenamento. O mesmo [modelo de recurso](https://git.k8s.io/community/contributors/design-proposals/scheduling/resources.md) vale para volumes e requisições. + +### Seletor + +Requisições podem especifiar um [seletor de rótulo](/docs/concepts/overview/working-with-objects/labels/#label-selectors) para posteriormente filtrar um grupo de volumes. Somente os volumes que possuam rótulos que satisfaçam os critérios do seletor podem ser atrelados à requisição. O seletor pode conter dois campos: + +* `matchLabels` - o volume deve ter um rótulo com esse valor +* `matchExpressions` - uma lista de requisitos, como chave, lista de valores e operador relacionado aos valores e chaves. São operadores válidos: In, NotIn, Exists e DoesNotExist. + +Todos os requisitos de `matchLabels` e `matchExpressions`, são do tipo AND - todos eles juntos devem ser atendidos. + +### Classe + +Uma requisição pode solicitar uma classe específica através da [StorageClass](/docs/concepts/storage/storage-classes/) utilizando o atributo `storageClassName`. Neste caso o bind ocorrerá somente com os PVs que possuírem a mesma classe do `storageClassName` dos PVCs. + +As PVCs não precisam necessariamente solicitar uma classe. Uma PVC com sua `storageClassName` configurada como `""` sempre solicitará um PV sem classe, dessa forma ela sempre será atrelada a um PV sem classe (que não tenha nenhuma notação, ou seja, igual a `""`). Uma PVC sem `storageClassName` não é a mesma coisa e será tratada pelo cluster de forma diferente, porém isso dependerá se o [puglin de admissão](/docs/reference/access-authn-authz/admission-controllers/#defaultstorageclass) `DefaultStorageClass` estiver habilitado. + +* Se o plugin de admissão estiver habilitado, o administrador poderá especificar a StorageClass padrão. Todas as PVCs que não tiverem `storageClassName` podem ser atreladas somente a PVs que atendam a esse padrão. A especificação de uma StorageClass padrão é feita através da notação `storageclass.kubernetes.io/is-default-class` recebendo o valor `true` no objeto da StorageClass. Se o administrador não especificar nenhum padrão, o cluster vai tratar a criação de uma PVC como se o plugin de admissão estivesse desabilitado. Se mais de um valor padrão for especificado, o plugin de admissão proíbe a criação de todas as PVCs. +* Se o plugin de admissão estiver desabilitado, não haverá nenhuma notação para a StorageClass padrão. Todas as PVCs que não tiverem `storageClassName` poderão ser atreladas somente aos PVs que não possuem classe. Neste caso, as PVCs que não tiverem `storageClassName` são tratadas da mesma forma como as PVCs que possuem suas `storageClassName` configuradas como `""`. + +Dependendo do modo de instalação, uma StorageClass padrão pode ser implantada num cluster Kubernetes durante a instalação pelo addon manager. + +Quando uma PVC especifica um `selector` para solicitar uma StorageClass, os requisitos são do tipo AND: somente um PV com a classe solicitada e com o rótulo requisitado pode ser atrelado à PVC. + +{{< note >}} +Atualmente, uma PVC que tenha `selector` não pode ter um PV dinamicamente provisionado. +{{< /note >}} + +No passado, a notação `volume.beta.kubernetes.io/storage-class` era usada no lugar do atributo `storageClassName` Essa notação ainda funciona. Contudo, ela será totalmente depreciada numa futura versão do Kubernetes. + +## Requisições como Volumes + +Os Pods podem ter acesso ao armazenamento utilizando a requisição como um volume. Para isso, a requisição tem que estar no mesmo namespace que o Pod. Ao localizar a requisição no namespace do Pod, o cluster passa o PersistentVolume para a requisição. + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: mypod +spec: + containers: + - name: myfrontend + image: nginx + volumeMounts: + - mountPath: "/var/www/html" + name: mypd + volumes: + - name: mypd + persistentVolumeClaim: + claimName: myclaim +``` + +### Sobre Namespaces + +Os binds dos PersistentVolumes são exclusivos e, desde que as PersistentVolumeClaims são objetos do namespace, fazer a montagem das requisições com "Muitos" nós (`ROX`, `RWX`) é possível somente para um namespace. + +### PersistentVolumes do tipo `hostPath` + +Um PersistentVolume do tipo `hostPath` utiliza um arquivo ou diretório no nó para emular um network-attached storage (NAS). Veja [um exemplo de volume do tipo `hostPath`](/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolume). + +## Suporte a Volume de Bloco Bruto + +{{< feature-state for_k8s_version="v1.18" state="stable" >}} + +Os plugins de volume abaixo suportam volumes de bloco bruto, incluindo provisionamento dinâmico onde for aplicável: + +* AWSElasticBlockStore +* AzureDisk +* CSI +* FC (Fibre Channel) +* GCEPersistentDisk +* iSCSI +* Local volume +* OpenStack Cinder +* RBD (Ceph Block Device) +* VsphereVolume + +### Utilização de PersistentVolume com Volume de Bloco Bruto {#persistent-volume-using-a-raw-block-volume} + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: block-pv +spec: + capacity: + storage: 10Gi + accessModes: + - ReadWriteOnce + volumeMode: Block + persistentVolumeReclaimPolicy: Retain + fc: + targetWWNs: ["50060e801049cfd1"] + lun: 0 + readOnly: false +``` + +### Requisição de PersistentVolumeClaim com Volume de Bloco Bruto {#persistent-volume-claim-requesting-a-raw-block-volume} + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: block-pvc +spec: + accessModes: + - ReadWriteOnce + volumeMode: Block + resources: + requests: + storage: 10Gi +``` + +### Especificação de Pod com Dispositivo de Bloco Bruto no contêiner + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: pod-with-block-volume +spec: + containers: + - name: fc-container + image: fedora:26 + command: ["/bin/sh", "-c"] + args: [ "tail -f /dev/null" ] + volumeDevices: + - name: data + devicePath: /dev/xvda + volumes: + - name: data + persistentVolumeClaim: + claimName: block-pvc +``` + +{{< note >}} +Quando adicionar um dispositivo de bloco bruto num Pod, você especifica o caminho do dispositivo no contêiner ao invés de um ponto de montagem. +{{< /note >}} + +### Bind de Volumes de Bloco + +Se um usuário solicita um volume de bloco bruto através do campo `volumeMode` na `spec` da PersistentVolumeClaim, as regras de bind agora têm uma pequena diferença em relação às versões anteriores que não consideravam esse modo como parte da `spec`. +A tabela abaixo mostra as possíveis combinações que um usuário e um administrador pode especificar para requisitar um dispositivo de bloco bruto. A tabela indica se o volume será ou não atrelado com base nas combinações: +Matriz de bind de volume para provisionamento estático de volumes: + +| PV volumeMode | PVC volumeMode | Result | +| --------------|:---------------:| ----------------:| +| unspecified | unspecified | BIND | +| unspecified | Block | NO BIND | +| unspecified | Filesystem | BIND | +| Block | unspecified | NO BIND | +| Block | Block | BIND | +| Block | Filesystem | NO BIND | +| Filesystem | Filesystem | BIND | +| Filesystem | Block | NO BIND | +| Filesystem | unspecified | BIND | + +{{< note >}} +O provisionamento estático de volumes é suportado somente na versão alpha. Os administradores devem tomar cuidado ao considerar esses valores quando estiverem trabalhando com dispositivos de bloco bruto. +{{< /note >}} + +## Snapshot de Volume e Restauração de Volume a partir de um Snapshot + +{{< feature-state for_k8s_version="v1.20" state="stable" >}} + +O snapshot de volume é suportado somente pelo plugin de volume CSI. Veja [Snapshot de Volume](/docs/concepts/storage/volume-snapshots/) para mais detalhes. +Plugins de volume in-tree estão depreciados. Você pode consultar sobre os plugins de volume depreciados em [Perguntas Frequentes sobre Plugins de Volume](https://github.com/kubernetes/community/blob/master/sig-storage/volume-plugin-faq.md). + +### Criar uma PersistentVolumeClaim a partir de um Snapshot de Volume {#create-persistent-volume-claim-from-volume-snapshot} + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: restore-pvc +spec: + storageClassName: csi-hostpath-sc + dataSource: + name: new-snapshot-test + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi +``` + +## Clonagem de Volume + +A [Clonagem de Volume](/docs/concepts/storage/volume-pvc-datasource/) é possível somente com plugins de volume CSI. + +### Criação de PersistentVolumeClaim a partir de uma PVC já existente {#create-persistent-volume-claim-from-an-existing-pvc} + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: cloned-pvc +spec: + storageClassName: my-csi-plugin + dataSource: + name: existing-src-pvc-name + kind: PersistentVolumeClaim + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi +``` + +## Boas Práticas de Configuração + + +Se você está criando templates ou exemplos que rodam numa grande quantidade de clusters e que precisam de armazenamento persistente, recomendamos que utilize o padrão abaixo: + +- Inclua objetos PersistentVolumeClaim em seu pacote de configuração (com Deployments, ConfigMaps, etc.). +- Não inclua objetos PersistentVolume na configuração, pois o usuário que irá instanciar a configuração talvez não tenha permissão para criar PersistentVolume. +- Dê ao usuário a opção dele informar o nome de uma classe de armazenamento quando instanciar o template. + - Se o usuário informar o nome de uma classe de armazenamento, coloque esse valor no campo `persistentVolumeClaim.storageClassName`. Isso fará com que a PVC encontre a classe de armazenamento correta se o cluster tiver a StorageClasses habilitado pelo administrador. + - Se o usuário não informar o nome da classe de armazenamento, deixe o campo `persistentVolumeClaim.storageClassName` sem nenhum valor (vazio). Isso fará com que o PV seja provisionado automaticamente no cluster para o usuário com o StorageClass padrão. Muitos ambientes de cluster já possuem uma StorageClass padrão, ou então os administradores podem criar suas StorageClass de acordo com seus critérios. +- Durante suas tarefas de administração, busque por PVCs que após um tempo não estão sendo atreladas, pois, isso talvez indique que o cluster não tem provisionamento dinâmico (onde o usuário deveria criar um PV que satisfaça os critérios da PVC) ou cluster não tem um sistema de armazenamento (onde usuário não pode realizar um deploy solicitando PVCs). + + ## {{% heading "whatsnext" %}} + + +* Saiba mais sobre [Criando um PersistentVolume](/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolume). +* Saiba mais sobre [Criando um PersistentVolumeClaim](/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolumeclaim). +* Leia a [documentação sobre planejamento de Armazenamento Persistente](https://git.k8s.io/community/contributors/design-proposals/storage/persistent-storage.md). + +### Referência + +* [PersistentVolume](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolume-v1-core) +* [PersistentVolumeSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumespec-v1-core) +* [PersistentVolumeClaim](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaim-v1-core) diff --git a/content/pt-br/docs/concepts/workloads/controllers/_index.md b/content/pt-br/docs/concepts/workloads/controllers/_index.md old mode 100755 new mode 100644 diff --git a/content/pt-br/docs/concepts/workloads/controllers/cron-jobs.md b/content/pt-br/docs/concepts/workloads/controllers/cron-jobs.md index 669d3276d4..19c7cd8604 100644 --- a/content/pt-br/docs/concepts/workloads/controllers/cron-jobs.md +++ b/content/pt-br/docs/concepts/workloads/controllers/cron-jobs.md @@ -1,54 +1,109 @@ --- -reviewers: - - erictune - - soltysh - - janetkuo title: CronJob content_type: concept weight: 80 --- - + -{{< feature-state for_k8s_version="v1.8" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} -Um _Cron Job_ cria [Jobs](/docs/concepts/workloads/controllers/jobs-run-to-completion/) em um cronograma baseado em tempo. +Um _CronJob_ cria {{< glossary_tooltip term_id="job" text="Jobs" >}} em um cronograma recorrente. -Um objeto CronJob é como um arquivo _crontab_ (tabela cron). Executa um job periodicamente em um determinado horário, escrito no formato [Cron](https://en.wikipedia.org/wiki/Cron). +Um objeto CronJob é como uma linha em um arquivo _crontab_ (tabela cron). Executa uma tarefa periodicamente em um determinado cronograma, escrito no formato [Cron](https://en.wikipedia.org/wiki/Cron). -{{< note >}} -Todos os **CronJob** `schedule (horários):` são indicados em UTC. -{{< /note >}} +{{< caution >}} -Ao criar o manifesto para um recurso CronJob, verifique se o nome que você fornece é um [nome de subdomínio DNS](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names) válido. -O nome não deve ter mais que 52 caracteres. Isso ocorre porque o controlador do CronJob anexará automaticamente 11 caracteres ao nome da tarefa fornecido e há uma restrição de que o comprimento máximo de um nome da tarefa não pode ultrapassar 63 caracteres. +Todos os horários da propriedade `schedule:` do *CronJob* são baseadas no fuso horário do {{< glossary_tooltip term_id="kube-controller-manager" >}}. -Para obter instruções sobre como criar e trabalhar com tarefas cron, e para obter um exemplo de arquivo de especificação para uma tarefa cron, consulte [Executando tarefas automatizadas com tarefas cron](/docs/tasks/job/automated-tasks-with-cron-jobs). +Se a camada de gerenciamento do cluster executa o kube-controller-manager em Pods ou contêineres avulsos, o fuso horário configurado para o contêiner executando o kube-controller-manager determina o fuso horário que o controlador dos objetos CronJob utiliza. +{{< /caution >}} +Ao criar o manifesto para um objeto CronJob, verifique se o nome que você forneceu é um [nome de subdomínio DNS](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names) válido. +O nome não pode ter mais que 52 caracteres. Esta limitação existe porque o controlador do CronJob adicionará automaticamente 11 caracteres ao final do nome escolhido para a tarefa, e o tamanho máximo de um nome de tarefa não pode ultrapassar 63 caracteres. -## Limitações do Cron Job +## CronJob -Um trabalho cron cria um objeto de trabalho _about_ uma vez por tempo de execução de seu planejamento, Dizemos "about" porque há certas circunstâncias em que duas tarefas podem ser criadas ou nenhum trabalho pode ser criado. Tentamos torná-los únicos, mas não os impedimos completamente. Portanto, os trabalhos devem ser _idempotente_. +CronJobs são úteis para criar tarefas periódicas e recorrentes, como a execução de _backups_ ou o envio de mensagens de e-mail. CronJobs também permitem o agendamento de tarefas individuais para um horário específico, como por exemplo uma tarefa que é executada em um período maior de ociosidade do cluster. -Se `startingDeadlineSeconds` estiver definido como um valor grande ou não definido (o padrão) e se `concurrencyPolicy` estiver definido como `Allow(Permitir)` os trabalhos sempre serão executados pelo menos uma vez. +### Exemplo -Para cada CronJob, o CronJob {{< glossary_tooltip term_id="controller" >}} verifica quantas agendas faltou na duração, desde o último horário agendado até agora. Se houver mais de 100 agendamentos perdidos, ele não iniciará o trabalho e registrará o erro +Este manifesto de CronJob de exemplo imprime a data e horário atuais, seguidos da mensagem "Hello from the Kubernetes cluster", uma vez por minuto: + +{{< codenew file="application/job/cronjob.yaml" >}} + +(O artigo [Running Automated Tasks with a CronJob](/docs/tasks/job/automated-tasks-with-cron-jobs/) demonstra este exemplo com maiores detalhes). + +### Sintaxe do cronograma cron + +``` +# ┌───────────── minuto (0 - 59) +# │ ┌───────────── hora (0 - 23) +# │ │ ┌───────────── dia do mês (1 - 31) +# │ │ │ ┌───────────── mês (1 - 12) +# │ │ │ │ ┌───────────── dia da semana (0 - 6) (domingo a sábado; +# │ │ │ │ │ 7 também representa domingo em alguns sistemas operacionais) +# │ │ │ │ │ +# │ │ │ │ │ +# * * * * * +``` + +| Expressão | Descrição | Equivalente a | +| ------------- | ------------- |------------- | +| @yearly (ou @annually) | Executa uma vez por ano, à meia-noite de 1º de janeiro | 0 0 1 1 * | +| @monthly | Executa uma vez por mês, à meia-noite do primeiro dia do mês| 0 0 1 * * | +| @weekly | Executa uma vez por semana, à meia-noite de domingo | 0 0 * * 0 | +| @daily (ou @midnight) | Executa uma vez por dia, à meia-noite | 0 0 * * * | +| @hourly | Executa uma vez por hora, no minuto zero | 0 * * * * | + +Por exemplo, a linha abaixo determina que a tarefa deve iniciar toda sexta-feira à meia-noite, bem como em todo dia 13 do mês à meia-noite: + +`0 0 13 * 5` + +É também possível gerar expressões de cronograma para CronJobs utilizando ferramentas da _web_ como o [crontab.guru](https://crontab.guru/). + +## Limitações do CronJob + +Um CronJob cria uma tarefa _aproximadamente_ uma vez por tempo de execução de seu cronograma. Dizemos "aproximadamente" porque existem circunstâncias em que duas tarefas podem ser criadas, e outras circunstâncias em que nenhuma tarefa será criada. Tentamos tornar estas situações raras, mas não é possível preveni-las completamente. Portanto, as tarefas devem ser _idempotentes_. + +Se o valor da propriedade `startingDeadlineSeconds` (limite de tempo de inicialização, em segundos) estiver definido como um valor grande, ou não definido (o padrão), e se a propriedade `concurrencyPolicy` (política de concorrência) estiver definido como `Allow` (permitir), as tarefas sempre serão executadas pelo menos uma vez. + +{{< caution >}} + +Se a propriedade `startingDeadlineSeconds` estiver definida com um valor menor que 10 segundos, a tarefa cron poderá não ser agendada. Isso ocorre porque o cronograma de execução do {{< glossary_tooltip term_id="controller" text="controlador" >}} do CronJob verifica tarefas a cada 10 segundos. + +{{< /caution >}} + +Para cada CronJob, o {{< glossary_tooltip term_id="controller" text="controlador" >}} do CronJob verifica quantos agendamentos foram perdidos no tempo entre o último horário agendado e o horário atual. Se houver mais de 100 agendamentos perdidos no período, o controlador não iniciará o trabalho e gerará a seguinte mensagem de erro: ``` Cannot determine if job needs to be started. Too many missed start time (> 100). Set or decrease .spec.startingDeadlineSeconds or check clock skew. ``` -É importante observar que, se o campo `startingDeadlineSeconds` estiver definido (não `nil`), o controlador contará quantas tarefas perdidas ocorreram a partir do valor de `startingDeadlineSeconds` até agora, e não do último horário programado até agora. Por exemplo, se `startingDeadlineSeconds` for `200`, o controlador contará quantas tarefas perdidas ocorreram nos últimos 200 segundos. +É importante observar que, se o campo `startingDeadlineSeconds` estiver definido (não `nil`), o controlador contará quantas tarefas perdidas ocorreram a partir do valor de `startingDeadlineSeconds` até agora, e não do último horário agendado até agora. Por exemplo, se `startingDeadlineSeconds` for `200`, o controlador contará quantas tarefas perdidas ocorreram nos últimos 200 segundos. -Um CronJob é contado como perdido se não tiver sido criado no horário agendado. Por exemplo, se `concurrencyPolicy` estiver definido como `Forbid` e um CronJob tiver sido tentado ser agendado quando havia um agendamento anterior ainda em execução, será contabilizado como perdido. +Um CronJob é considerado perdido se não for criado no horário agendado. Por exemplo, se `concurrencyPolicy` estiver definido como `Forbid` (proibir) e uma tentativa de agendamento de um novo CronJob ocorreu quando havia um agendamento anterior ainda em execução, o novo agendamento será contabilizado como perdido. -Por exemplo, suponha que um CronJob esteja definido para agendar um novo trabalho a cada minuto, começando em `08:30:00`, e seu campo `startingDeadlineSeconds` não esteja defindo. Se o controlador CronJob estiver baixo de `08:29:00` para `10:21:00`, o trabalho não será iniciado, pois o número de trabalhos perdidos que perderam o cronograma é maior que 100. +Por exemplo, suponha que um CronJob esteja definido para agendar uma nova tarefa a cada minuto, começando às `08:30:00`, e seu campo `startingDeadlineSeconds` não esteja definido. Se o controlador do CronJob estiver inativo das `08:29:00` até as `10:21:00`, a tarefa não será iniciada, pois o número de tarefas que perderam seus horários agendados é maior que 100. -Para ilustrar ainda mais esse conceito, suponha que um CronJob esteja definido para agendar um novo trabalho a cada minuto, começando em `08:30:00`, e seu `startingDeadlineSeconds` está definido em 200 segundos. Se o controlador CronJob estiver inativo no mesmo período do exemplo anterior (`08:29:00` a `10:21:00`), o trabalho ainda será iniciado às 10:22:00. Isso acontece pois o controlador agora verifica quantos agendamentos perdidos ocorreram nos últimos 200 segundos (ou seja, 3 agendamentos perdidos), em vez do último horário agendado até agora. +Para ilustrar melhor este conceito, suponha que um CronJob esteja definido para agendar uma nova tarefa a cada minuto, começando às `08:30:00`, e seu `startingDeadlineSeconds` esteja definido em 200 segundos. Se o controlador do CronJob estiver inativo no mesmo período do exemplo anterior (das `08:29:00` às `10:21:00`), a tarefa ainda será iniciada às 10:22:00. Isso acontece pois o controlador agora verifica quantos agendamentos perdidos ocorreram nos últimos 200 segundos (ou seja, 3 agendamentos perdidos), ao invés de verificar o período entre o último horário agendado e o horário atual. -O CronJob é responsável apenas pela criação de trabalhos que correspondem à sua programação, e o trabalho, por sua vez, é responsável pelo gerenciamento dos Pods que ele representa. +O CronJob é responsável apenas pela criação das tarefas que correspondem à sua programação, e a tarefa, por sua vez, é responsável pelo gerenciamento dos Pods que ele representa. + +## Versão do controlador + +A partir da versão 1.21 do Kubernetes, a segunda versão do controlador do CronJob é a implementação ativada por padrão. Para desativar o controlador do CronJob padrão e utilizar a versão original do controlador do CronJob, é necessário adicionar o _flag_ de [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) `CronJobControllerV2` à chamada do {{< glossary_tooltip term_id="kube-controller-manager" >}} com o valor `false` (falso). Por exemplo: +``` +--feature-gates="CronJobControllerV2=false" +``` +## {{% heading "whatsnext" %}} + +A página [Cron expression format](https://en.wikipedia.org/wiki/Cron) documenta o formato dos campos de agendamento do CronJob. + +Para instruções sobre criação e utilização de tarefas cron, e para um exemplo de manifesto de CronJob, veja +[Running automated tasks with cron jobs](/docs/tasks/job/automated-tasks-with-cron-jobs). diff --git a/content/pt-br/docs/contribute/_index.md b/content/pt-br/docs/contribute/_index.md index 86c4d92967..e5a227c2f1 100644 --- a/content/pt-br/docs/contribute/_index.md +++ b/content/pt-br/docs/contribute/_index.md @@ -1,31 +1,41 @@ --- content_type: concept -title: Contribua com o Kubernetes docs -linktitle: Contribute +title: Contribua com a documentação do Kubernetes +linktitle: Contribuir main_menu: true weight: 80 +no_list: true +card: + name: contribuir + weight: 10 + title: Comece a contribuir para o K8s +--- --- -Caso você gostaria de contribuir com a documentação ou o site do Kubernetes, -ficamos felizes em ter sua ajuda! Qualquer pessoa pode contribuir, seja você novo no -projeto ou se você já esta no mercado há muito tempo. Além disso, Se você se identifica como -desenvolvedor, usuário final ou alguém que simplesmente não suporta ver erros de digitação. +*O Kubernetes agradece as melhorias de todos os contribuidores, novos e experientes!* +{{< note >}} +Para saber mais sobre como contribuir o Kubernetes em geral, veja a +[documentação para contribuidor](https://www.kubernetes.dev/docs/). +{{< /note >}} + +Este site é mantido pelo [Kubernetes SIG Docs](/docs/contribute/#get-involved-with-sig-docs). + +Contribuidores da documentação do Kubernetes podem: + - Melhorar o conteúdo existente + - Criar novo conteúdo + - Traduzir a documentação + - Gerenciar e publicar a documentação como parte do ciclo de lançamento do Kubernetes ## Começando -Qualquer pessoa pode abrir uma issue descrevendo o problema ou melhorias desejadas com a documentação ou contribuir com uma alteração e uma solicitação de mudança (Pull Request - PR). -Algumas tarefas exigem mais confiança e precisam de mais acesso na organização Kubernetes. -Veja [Participando do SIG Docs](/docs/contribute/participating/) para mais detalhes sobre -as funções e permissões. - -A documentação do Kubernetes reside em um repositório do GitHub. Nós damos as boas-vindas -a todas as contribuições, mas você vai precisa estar familiarizado com o uso básico de git e GitHub para -operar efetivamente na comunidade Kubernetes. +Qualquer pessoa pode abrir uma issue sobre a documentação, ou contribuir com uma mudança por meio de um pull request (PR) para o [repositório do Github `kubernetes/website`](https://github.com/kubernetes/website). +É recomendável que você se sinta confortável com [git](https://git-scm.com/) e +[Github](https://lab.github.com/) para trabalhar efetivamente na comunidade Kubernetes. Para se envolver com a documentação: @@ -33,30 +43,42 @@ Para se envolver com a documentação: 2. Familiarize-se com o [repositório de documentação](https://github.com/kubernetes/website) e o [gerador de site estático](https://gohugo.io) hugo. 3. Certifique-se de entender os processos básicos para [melhorar o conteúdo](https://kubernetes.io/docs/contribute/start/#improve-existing-content) e [revisar alterações](https://kubernetes.io/docs/contribute/start/#review-docs-pull-requests). -## Melhores Práticas recomendadas para contribuições +Algumas tarefas requerem mais confiança e mais acessos na organização do Kubernetes. +Veja [Participando no SIG Docs](/docs/contribute/participate/) para mais detalhes +sobre funções e permissões. -- Escreva mensagens GIT claras e significativas. -- Certifique-se de incluir _Github Special Keywords_ que faz referência a issue e o fecha automaticamente quando o PR é mergeado. -- Quando você faz uma pequena alteração em um PR, como corrigir um erro de digitação, qualquer alteração de estilo ou gramática, certifique-se de esmagar seus commits (squash) para não obter um grande número de commits por uma alteração relativamente pequena. -- Certifique-se de incluir uma boa descrição de PR explicando as alterações no código, o motivo de alterar um trecho de código e garantir que haja informações suficientes para o revisor entender seu PR. -- Leituras adicionais: - - [chris.beams.io/posts/git-commit/](https://chris.beams.io/posts/git-commit/) - - [github.com/blog/1506-closing-issues-via-pull-requests ](https://github.com/blog/1506-closing-issues-via-pull-requests ) - - [davidwalsh.name/squash-commits-git ](https://davidwalsh.name/squash-commits-git ) +## Sua primeira contribuição +- Leia sobre [visão geral para contribuição](/docs/contribute/new-content/overview/) para saber mais sobre diferentes formas para você contribuir. +- Veja a [lista de issues em `kubernetes/website`](https://github.com/kubernetes/website/issues/) para identificar issues que sejam um bom ponto de partida. +- [Abra um pull request usando o Github](/docs/contribute/new-content/open-a-pr/#changes-using-github) para documentações existentes e aprenda mais sobre resolver issues no Github. +- Leia sobre o [guia de conteúdo](/docs/contribute/style/content-guide/) e [guias de estilo](/docs/contribute/style/style-guide/). +- Leia sobre [tipos de conteúdo de páginas](/docs/contribute/style/page-content-types/) e [shortcodes do Hugo](/docs/contribute/style/hugo-shortcodes/). -## Outras maneiras de contribuir +## Próximos passos -- Para contribuir com a comunidade Kubernetes por meio de fóruns on-line, como Twitter ou Stack Overflow, ou aprender sobre encontros locais e eventos do Kubernetes, visite o a area de [comunidade Kubernetes](/community/). -- Para contribuir com o desenvolvimento de novas funções, leia o [cheatsheet do colaborador](https://github.com/kubernetes/community/tree/master/contributors/guide/contributor-cheatsheet) para começar. + - Aprenda a [trabalhar com um clone local](/docs/contribute/new-content/open-a-pr/#fork-the-repo) de um repositório. + - Documente [funcionalidades em uma release](/docs/contribute/new-content/new-features/). + - Participe do [SIG Docs](/docs/contribute/participate/), e se torne um + [membro ou revisor](/docs/contribute/participate/roles-and-responsibilities/). + - Comece ou ajude com uma [localização](/docs/contribute/localization/). + +## Se envolva com o SIG Docs + +O [SIG Docs](/docs/contribute/participate/) é um grupo de contribuidores que publica e mantém +a documentação e o site do Kubernetes. Se envolver com o SIG Docs é uma ótima forma de contribuidores Kubernetes (pessoas desenvolvedoras de features ou outros) terem um grande impacto dentro do projeto Kubernetes. + +A comunicação do SIG Docs é feita de diferentes formas: + - [Entre em `#sig-docs` no slack do Kubernetes](https://slack.k8s.io/). + - [Se inscreva na lista de email `kubernetes-dig-docs`](https://groups.google.com/forum/#!forum/kubernetes-sig-docs), onde acontecem discussões e + decisões oficiais são registradas. + - [Participe do encontro semanal do SIG Docs](https://github.com/kubernetes/community/tree/master/sig-docs). Os encontros são sempre anunciados no `#sig-docs` e adicionados ao [calendário de eventos de comunidade do Kubernetes](https://calendar.google.com/calendar/embed?src=cgnt364vd8s86hr2phapfjc6uk%40group.calendar.google.com&ctz=America/Los_Angeles). Você precisa baixar o [cliente do Zoom](https://zoom.us/download) ou usar um telefone. + +## Outras formas de contribuir + +- Para contribuir com a comunidade Kubernetes por meio de fóruns on-line, como Twitter ou Stack Overflow, ou aprender sobre encontros locais e eventos do Kubernetes, visite a area de [comunidade Kubernetes](/community/). +- Para contribuir com o desenvolvimento de novas funcionalidades, leia o [cheatsheet do colaborador](https://github.com/kubernetes/community/tree/master/contributors/guide/contributor-cheatsheet) para começar. +- Leia o [cheatsheet de contribuidor](https://github.com/kubernetes/community/tree/master/contributors/guide/contributor-cheatsheet) para saber mais sobre as funcionalidades de desenvolvimento do Kubernetes. +- Submeta [um post de blog ou um caso de estudo](/docs/contribute/new-content/blogs-case-studies/). -## {{% heading "whatsnext" %}} - - -- Para obter mais informações sobre os conceitos básicos de contribuição para a documentação, leia [Comece a contribuir](/docs/contribute/start/). -- Siga o [Guia de estilo de documentação do Kubernetes](/docs/contribute/style/style-guide/) ao propor mudanças. -- Para mais informações sobre o SIG Docs, leia [Participando do SIG Docs](/docs/contribute/participating/). -- Para mais informações sobre a localização de documentos do Kubernetes, leia [Localização da documentação do Kubernetes](/docs/contribute/localization/). - - diff --git a/content/pt-br/docs/contribute/analytics.md b/content/pt-br/docs/contribute/analytics.md new file mode 100644 index 0000000000..305e0804cc --- /dev/null +++ b/content/pt-br/docs/contribute/analytics.md @@ -0,0 +1,28 @@ +--- +title: Visualizando Analytics do Site +content_type: concept +weight: 100 +card: + name: contribuir + weight: 100 +--- + + + +Essa página contém informações sobre a dashboard de analystics do kubernetes.io. + + + +Essa [dashboard](https://datastudio.google.com/reporting/fede2672-b2fd-402a-91d2-7473bdb10f04) foi feita usando +o Google Data Studio e possui informações coletadas do +kubernetes.io usando o Google Analytics. + +### Usando a dashboard + +Por padrão, a dashboard mostra todos os analytics coletados nos últimos 30 dias. Use o seletor de data +para ver dados de outros intervalos de data. Outras +opções de filtros permitem que você veja dados baseados +em localização do usuário para acessar o site, a tradução +da documentação usada e outros. + +Se você identificar um problema com essa dashboard ou quer solicitar qualquer melhoria, [abra uma issue](https://github.com/kubernetes/website/issues/new/choose) no repositório. diff --git a/content/pt-br/docs/reference/glossary/controller.md b/content/pt-br/docs/reference/glossary/controller.md old mode 100755 new mode 100644 diff --git a/content/pt-br/docs/reference/glossary/customresourcedefinition.md b/content/pt-br/docs/reference/glossary/customresourcedefinition.md old mode 100755 new mode 100644 diff --git a/content/pt-br/docs/reference/glossary/job.md b/content/pt-br/docs/reference/glossary/job.md new file mode 100644 index 0000000000..47a5b451c2 --- /dev/null +++ b/content/pt-br/docs/reference/glossary/job.md @@ -0,0 +1,19 @@ +--- +title: Job +id: job +date: 2021-07-14 +full_link: /docs/concepts/workloads/controllers/job +short_description: > + Uma tarefa finita ou em lotes que executa até finalizar. + +aka: +tags: +- fundamental +- core-object +- workload +--- +Uma tarefa finita ou em lotes que executa até finalizar. + + + +Cria um ou mais objetos do tipo {{< glossary_tooltip term_id="pod" >}} e garante que um número determinado destes finaliza sua execução com sucesso. Conforme os Pods finalizam com sucesso, o Job observa as execuções bem-sucedidas. diff --git a/content/pt-br/docs/reference/glossary/kubelet.md b/content/pt-br/docs/reference/glossary/kubelet.md old mode 100755 new mode 100644 diff --git a/content/pt-br/docs/reference/glossary/node.md b/content/pt-br/docs/reference/glossary/node.md old mode 100755 new mode 100644 diff --git a/content/pt-br/docs/reference/glossary/pod.md b/content/pt-br/docs/reference/glossary/pod.md old mode 100755 new mode 100644 diff --git a/content/pt-br/docs/tasks/configmap-secret/_index.md b/content/pt-br/docs/tasks/configmap-secret/_index.md new file mode 100755 index 0000000000..b12622f4fd --- /dev/null +++ b/content/pt-br/docs/tasks/configmap-secret/_index.md @@ -0,0 +1,6 @@ +--- +title: "Gerenciando Secrets" +weight: 28 +description: Gerenciando dados de configurações usando Secrets. +--- + diff --git a/content/pt-br/docs/tasks/configmap-secret/managing-secret-using-config-file.md b/content/pt-br/docs/tasks/configmap-secret/managing-secret-using-config-file.md new file mode 100644 index 0000000000..0bac8410fa --- /dev/null +++ b/content/pt-br/docs/tasks/configmap-secret/managing-secret-using-config-file.md @@ -0,0 +1,193 @@ +--- +title: Gerenciando Secret usando Arquivo de Configuração +content_type: task +weight: 20 +description: Criando objetos Secret usando arquivos de configuração de recursos. +--- + + + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} + + + +## Crie o arquivo de configuração + +Você pode criar um Secret primeiramente em um arquivo, no formato JSON ou YAML, e depois +criar o objeto. O recurso [Secret](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#secret-v1-core) +contém dois mapas: `data` e `stringData`. +O campo `data` é usado para armazenar dados arbitrários, codificados usando base64. O +campo `stringData` é usado por conveniência, e permite que você use dados para um Secret +como *strings* não codificadas. +As chaves para `data` e `stringData` precisam ser compostas por caracteres alfanuméricos, +`_`, `-` ou `.`. + +Por exemplo, para armazenar duas strings em um Secret usando o campo `data`, converta +as strings para base64 da seguinte forma: + +```shell +echo -n 'admin' | base64 +``` +A saída deve ser similar a: + +``` +YWRtaW4= +``` + +```shell +echo -n '1f2d1e2e67df' | base64 +``` + +A saída deve ser similar a: + +``` +MWYyZDFlMmU2N2Rm +``` + +Escreva o arquivo de configuração do Secret, que será parecido com: +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: mysecret +type: Opaque +data: + username: YWRtaW4= + password: MWYyZDFlMmU2N2Rm +``` + +Perceba que o nome do objeto Secret precisa ser um +[nome de subdomínio DNS](/docs/concepts/overview/working-with-objects/names#dns-subdomain-name) válido. + +{{< note >}} +Os valores serializados dos dados JSON e YAML de um Secret são codificados em strings +base64. Novas linhas não são válidas com essas strings e devem ser omitidas. Quando +usar o utilitário `base64` em Darwin/MacOS, os usuários devem evitar usar a opção `-b` +para separar linhas grandes. Por outro lado, usuários de Linux *devem* adicionar a opção +`-w 0` ao comando `base64` ou o *pipe* `base64 | tr -d '\n'` se a opção `w` não estiver disponível +{{< /note >}} + +Para cenários específicos, você pode querer usar o campo `stringData` ao invés de `data`. +Esse campo permite que você use strings não-base64 diretamente dentro do Secret, +e a string vai ser codificada para você quando o Secret for criado ou atualizado. + +Um exemplo prático para isso pode ser quando você esteja fazendo *deploy* de uma aplicação +que usa um Secret para armazenar um arquivo de configuração, e você quer popular partes desse +arquivo de configuração durante o processo de implantação. + +Por exemplo, se sua aplicação usa o seguinte arquivo de configuração: + +```yaml +apiUrl: "https://my.api.com/api/v1" +username: "" +password: "" +``` + +Você pode armazenar isso em um Secret usando a seguinte definição: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: mysecret +type: Opaque +stringData: + config.yaml: | + apiUrl: "https://my.api.com/api/v1" + username: + password: +``` + +## Crie o objeto Secret + +Agora, crie o Secret usando [`kubectl apply`](/docs/reference/generated/kubectl/kubectl-commands#apply): + +```shell +kubectl apply -f ./secret.yaml +``` + +A saída deve ser similar a: + +``` +secret/mysecret created +``` + +## Verifique o Secret + +O campo `stringData` é um campo de conveniência apenas de leitura. Ele nunca vai ser exibido +ao buscar um Secret. Por exemplo, se você executar o seguinte comando: + +```shell +kubectl get secret mysecret -o yaml +``` + +A saída deve ser similar a: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + creationTimestamp: 2018-11-15T20:40:59Z + name: mysecret + namespace: default + resourceVersion: "7225" + uid: c280ad2e-e916-11e8-98f2-025000000001 +type: Opaque +data: + config.yaml: YXBpVXJsOiAiaHR0cHM6Ly9teS5hcGkuY29tL2FwaS92MSIKdXNlcm5hbWU6IHt7dXNlcm5hbWV9fQpwYXNzd29yZDoge3twYXNzd29yZH19 +``` + +Os comandos `kubectl get` e `kubectl describe` omitem o conteúdo de um `Secret` por padrão. +Isso para proteger o `Secret` de ser exposto acidentalmente para uma pessoa não autorizada, +ou ser armazenado em um log de terminal. +Para verificar o conteúdo atual de um dado codificado, veja [decodificando secret](/docs/tasks/configmap-secret/managing-secret-using-kubectl/#decoding-secret). + +Se um campo, como `username`, é especificado em `data` e `stringData`, +o valor de `stringData` é o usado. Por exemplo, dada a seguinte definição do Secret: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: mysecret +type: Opaque +data: + username: YWRtaW4= +stringData: + username: administrator +``` + +Resulta no seguinte Secret: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + creationTimestamp: 2018-11-15T20:46:46Z + name: mysecret + namespace: default + resourceVersion: "7579" + uid: 91460ecb-e917-11e8-98f2-025000000001 +type: Opaque +data: + username: YWRtaW5pc3RyYXRvcg== +``` + +Onde `YWRtaW5pc3RyYXRvcg==` é decodificado em `administrator`. + +## Limpeza + +Para apagar o Secret que você criou: + +```shell +kubectl delete secret mysecret +``` + +## {{% heading "whatsnext" %}} + +- Leia mais sobre o [conceito do Secret](/docs/concepts/configuration/secret/) +- Leia sobre como [gerenciar Secret com o comando `kubectl`](/docs/tasks/configmap-secret/managing-secret-using-kubectl/) +- Leia sobre como [gerenciar Secret usando kustomize](/docs/tasks/configmap-secret/managing-secret-using-kustomize/) + diff --git a/content/pt-br/docs/tasks/configmap-secret/managing-secret-using-kubectl.md b/content/pt-br/docs/tasks/configmap-secret/managing-secret-using-kubectl.md new file mode 100644 index 0000000000..7e6ca6dc7c --- /dev/null +++ b/content/pt-br/docs/tasks/configmap-secret/managing-secret-using-kubectl.md @@ -0,0 +1,152 @@ +--- +title: Gerenciando Secret usando kubectl +content_type: task +weight: 10 +description: Criando objetos Secret usando a linha de comando kubectl. +--- + + + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} + + + +## Criando um Secret + +Um `Secret` pode conter credenciais de usuário requeridas por Pods para acesso a um banco de dados. +Por exemplo, uma string de conexão de banco de dados é composta por um usuário e senha. +Você pode armazenar o usuário em um arquivo `./username.txt` e a senha em um +arquivo `./password.txt` na sua máquina local. + +```shell +echo -n 'admin' > ./username.txt +echo -n '1f2d1e2e67df' > ./password.txt +``` + +A opção `-n` nos comandos acima garante que os arquivos criados não vão conter +uma nova linha extra no final do arquivo de texto. Isso é importante porque +quando o `kubectl` lê um arquivo e codifica o conteúdo em uma string base64, +o caractere da nova linha extra também é codificado. + +O comando `kubectl create secret` empacota os arquivos em um Secret e cria um +objeto no API server. + + +```shell +kubectl create secret generic db-user-pass \ + --from-file=./username.txt \ + --from-file=./password.txt +``` + +A saída deve ser similar a: + +``` +secret/db-user-pass created +``` + +O nome da chave padrão é o nome do arquivo. Opcionalmente, você pode definir +o nome da chave usando `--from-file=[key=]source`. Por exemplo: + +```shell +kubectl create secret generic db-user-pass \ + --from-file=username=./username.txt \ + --from-file=password=./password.txt +``` +Você não precisa escapar o caractere especial em senhas a partir de arquivos (`--from-file`). + +Você também pode prover dados para Secret usando a tag `--from-literal==`. +Essa tag pode ser especificada mais de uma vez para prover múltiplos pares de chave-valor. +Observe que caracteres especiais como `$`, `\`, `*`, `=`, e `!` vão ser interpretados +pelo seu [shell](https://en.wikipedia.org/wiki/Shell_(computing)) e precisam ser escapados. +Na maioria dos shells, a forma mais fácil de escapar as senhas é usar aspas simples (`'`). +Por exemplo, se sua senha atual é `S!B\*d$zDsb=`, você precisa executar o comando dessa forma: + +```shell +kubectl create secret generic dev-db-secret \ + --from-literal=username=devuser \ + --from-literal=password='S!B\*d$zDsb=' +``` + +## Verificando o Secret + +Você pode verificar se o secret foi criado: + +```shell +kubectl get secrets +``` + +A saída deve ser similar a: + +``` +NAME TYPE DATA AGE +db-user-pass Opaque 2 51s +``` + +Você pode ver a descrição do `Secret`: + +```shell +kubectl describe secrets/db-user-pass +``` +A saída deve ser similar a: + +``` +Name: db-user-pass +Namespace: default +Labels: +Annotations: + +Type: Opaque + +Data +==== +password: 12 bytes +username: 5 bytes +``` + +Os comandos `kubectl get` e `kubectl describe` omitem o conteúdo de um `Secret` por padrão. +Isso para proteger o `Secret` de ser exposto acidentalmente para uma pessoa não autorizada, +ou ser armazenado em um log de terminal. + +## Decodificando o Secret {#decoding-secret} + +Para ver o conteúdo de um Secret que você criou, execute o seguinte comando: + +```shell +kubectl get secret db-user-pass -o jsonpath='{.data}' +``` + +A saída deve ser similar a: + +```json +{"password":"MWYyZDFlMmU2N2Rm","username":"YWRtaW4="} +``` + +Agora, você pode decodificar os dados de `password`: + +```shell +echo 'MWYyZDFlMmU2N2Rm' | base64 --decode +``` + +A saída deve ser similar a: + +``` +1f2d1e2e67df +``` + +## Limpeza + +Para apagar o Secret que você criou: + +```shell +kubectl delete secret db-user-pass +``` + + + +## {{% heading "whatsnext" %}} + +- Leia mais sobre o [conceito do Secret](/docs/concepts/configuration/secret/) +- Leia sobre como [gerenciar Secret com o comando `kubectl`](/docs/tasks/configmap-secret/managing-secret-using-kubectl/) +- Leia sobre como [gerenciar Secret usando kustomize](/docs/tasks/configmap-secret/managing-secret-using-kustomize/) diff --git a/content/pt-br/docs/tasks/configmap-secret/managing-secret-using-kustomize.md b/content/pt-br/docs/tasks/configmap-secret/managing-secret-using-kustomize.md new file mode 100644 index 0000000000..1658afc3de --- /dev/null +++ b/content/pt-br/docs/tasks/configmap-secret/managing-secret-using-kustomize.md @@ -0,0 +1,122 @@ +--- +title: Gerenciando Secret usando Kustomize +content_type: task +weight: 30 +description: Criando objetos Secret usando o arquivo kustomization.yaml +--- + + + +Desde o Kubernetes v1.14, o `kubectl` provê suporte para [gerenciamento de objetos usando Kustomize](/docs/tasks/manage-kubernetes-objects/kustomization/). +O Kustomize provê geradores de recursos para criar Secrets e ConfigMaps. +Os geradores Kustomize devem ser especificados em um arquivo `kustomization.yaml` dentro +de um diretório. Depois de gerar o Secret, você pode criar o Secret com `kubectl apply`. +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} + + + +## Criando um arquivo de Kustomization +Você pode criar um Secret definindo um `secretGenerator` em um +arquivo `kustomization.yaml` que referencia outros arquivos existentes. +Por exemplo, o seguinte arquivo kustomization referencia os +arquivos `./username.txt` e `./password.txt`: + +```yaml +secretGenerator: +- name: db-user-pass + files: + - username.txt + - password.txt +``` + +Você também pode definir o `secretGenerator` no arquivo `kustomization.yaml` +por meio de alguns *literais*. +Por exemplo, o seguinte arquivo `kustomization.yaml` contém dois literais +para `username` e `password` respectivamente: + +```yaml +secretGenerator: +- name: db-user-pass + literals: + - username=admin + - password=1f2d1e2e67df +``` + +Observe que nos dois casos, você não precisa codificar os valores em base64. + +## Criando o Secret + +Aplique o diretório que contém o arquivo `kustomization.yaml` para criar o Secret. + +```shell +kubectl apply -k . +``` + +A saída deve ser similar a: + +``` +secret/db-user-pass-96mffmfh4k created +``` + +Observe que quando um Secret é gerado, o nome do segredo é criado usando o hash +dos dados do Secret mais o valor do hash. Isso garante que +um novo Secret é gerado cada vez que os dados são modificados. + +## Verifique o Secret criado + +Você pode verificar que o secret foi criado: + +```shell +kubectl get secrets +``` + +A saída deve ser similar a: + +``` +NAME TYPE DATA AGE +db-user-pass-96mffmfh4k Opaque 2 51s +``` + +Você pode ver a descrição de um secret: + +```shell +kubectl describe secrets/db-user-pass-96mffmfh4k +``` +A saída deve ser similar a: + +``` +Name: db-user-pass-96mffmfh4k +Namespace: default +Labels: +Annotations: + +Type: Opaque + +Data +==== +password.txt: 12 bytes +username.txt: 5 bytes +``` + +Os comandos `kubectl get` e `kubectl describe` omitem o conteúdo de um `Secret` por padrão. +Isso para proteger o `Secret` de ser exposto acidentalmente para uma pessoa não autorizada, +ou ser armazenado em um log de terminal. +Para verificar o conteúdo atual de um dado codificado, veja [decodificando secret](/docs/tasks/configmap-secret/managing-secret-using-kubectl/#decoding-secret). + +## Limpeza + +Para apagar o Secret que você criou: + +```shell +kubectl delete secret db-user-pass-96mffmfh4k +``` + + +## {{% heading "whatsnext" %}} + +- Leia mais sobre o [conceito do Secret](/docs/concepts/configuration/secret/) +- Leia sobre como [gerenciar Secret com o comando `kubectl`](/docs/tasks/configmap-secret/managing-secret-using-kubectl/) +- Leia sobre como [gerenciar Secret usando kustomize](/docs/tasks/configmap-secret/managing-secret-using-kustomize/) + diff --git a/content/pt-br/docs/tutorials/kubernetes-basics/_index.html b/content/pt-br/docs/tutorials/kubernetes-basics/_index.html index b397afba37..10b721c3c7 100644 --- a/content/pt-br/docs/tutorials/kubernetes-basics/_index.html +++ b/content/pt-br/docs/tutorials/kubernetes-basics/_index.html @@ -27,10 +27,10 @@ card:

    Este tutorial fornece instruções básicas sobre o sistema de orquestração de cluster do Kubernetes. Cada módulo contém algumas informações básicas sobre os principais recursos e conceitos do Kubernetes e inclui um tutorial online interativo. Esses tutoriais interativos permitem que você mesmo gerencie um cluster simples e seus aplicativos em contêineres.

    Usando os tutoriais interativos, você pode aprender a:

      -
    • Implante um aplicativo em contêiner em um cluster.
    • -
    • Dimensione a implantação.
    • -
    • Atualize o aplicativo em contêiner com uma nova versão do software.
    • -
    • Depure o aplicativo em contêiner.
    • +
    • Implantar um aplicativo em contêiner em um cluster.
    • +
    • Dimensionar a implantação.
    • +
    • Atualizar o aplicativo em contêiner com uma nova versão do software.
    • +
    • Depurar o aplicativo em contêiner.

    Os tutoriais usam Katacoda para executar um terminal virtual em seu navegador da Web, executado em Minikube, uma implantação local em pequena escala do Kubernetes que pode ser executada em qualquer lugar. Não há necessidade de instalar nenhum software ou configurar nada; cada tutorial interativo é executado diretamente no navegador da web.

    diff --git a/content/pt-br/examples/application/job/cronjob.yaml b/content/pt-br/examples/application/job/cronjob.yaml new file mode 100644 index 0000000000..da905a9048 --- /dev/null +++ b/content/pt-br/examples/application/job/cronjob.yaml @@ -0,0 +1,19 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: hello +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: hello + image: busybox + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - date; echo Hello from the Kubernetes cluster + restartPolicy: OnFailure diff --git a/content/pt-br/examples/service/networking/network-policy-allow-all-egress.yaml b/content/pt-br/examples/service/networking/network-policy-allow-all-egress.yaml new file mode 100644 index 0000000000..42b2a2a296 --- /dev/null +++ b/content/pt-br/examples/service/networking/network-policy-allow-all-egress.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: allow-all-egress +spec: + podSelector: {} + egress: + - {} + policyTypes: + - Egress diff --git a/content/pt-br/examples/service/networking/network-policy-allow-all-ingress.yaml b/content/pt-br/examples/service/networking/network-policy-allow-all-ingress.yaml new file mode 100644 index 0000000000..462912dae4 --- /dev/null +++ b/content/pt-br/examples/service/networking/network-policy-allow-all-ingress.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: allow-all-ingress +spec: + podSelector: {} + ingress: + - {} + policyTypes: + - Ingress diff --git a/content/pt-br/examples/service/networking/network-policy-default-deny-all.yaml b/content/pt-br/examples/service/networking/network-policy-default-deny-all.yaml new file mode 100644 index 0000000000..5c0086bd71 --- /dev/null +++ b/content/pt-br/examples/service/networking/network-policy-default-deny-all.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default-deny-all +spec: + podSelector: {} + policyTypes: + - Ingress + - Egress diff --git a/content/pt-br/examples/service/networking/network-policy-default-deny-egress.yaml b/content/pt-br/examples/service/networking/network-policy-default-deny-egress.yaml new file mode 100644 index 0000000000..a4659e1417 --- /dev/null +++ b/content/pt-br/examples/service/networking/network-policy-default-deny-egress.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default-deny-egress +spec: + podSelector: {} + policyTypes: + - Egress diff --git a/content/pt-br/examples/service/networking/network-policy-default-deny-ingress.yaml b/content/pt-br/examples/service/networking/network-policy-default-deny-ingress.yaml new file mode 100644 index 0000000000..e823802487 --- /dev/null +++ b/content/pt-br/examples/service/networking/network-policy-default-deny-ingress.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default-deny-ingress +spec: + podSelector: {} + policyTypes: + - Ingress diff --git a/content/pt-br/includes/task-tutorial-prereqs.md b/content/pt-br/includes/task-tutorial-prereqs.md new file mode 100644 index 0000000000..eb4177b4fd --- /dev/null +++ b/content/pt-br/includes/task-tutorial-prereqs.md @@ -0,0 +1,6 @@ +Você precisa de um cluster Kubernetes e a ferramenta de linha de comando kubectl +precisa estar configurada para acessar o seu cluster. Se você ainda não tem um +cluster, pode criar um usando o [minikube](/docs/tasks/tools/#minikube) +ou você pode usar um dos seguintes ambientes: +* [Katacoda](https://www.katacoda.com/courses/kubernetes/playground) +* [Play with Kubernetes](http://labs.play-with-k8s.com/) diff --git a/content/ru/_index.html b/content/ru/_index.html index aaf9f136f5..a460376f12 100644 --- a/content/ru/_index.html +++ b/content/ru/_index.html @@ -41,12 +41,12 @@ Kubernetes — это проект с открытым исходным кодо

    - Посетите KubeCon NA онлайн, 17-20 ноября 2020 + Посетите KubeCon в Северной Америке, 11-15 октября 2021 года



    - Посетите KubeCon EU онлайн, 4 – 7 мая 2021 + Посетите KubeCon в Европе, 17-20 мая 2022 года
    @@ -56,4 +56,4 @@ Kubernetes — это проект с открытым исходным кодо {{< blocks/kubernetes-features >}} -{{< blocks/case-studies >}} \ No newline at end of file +{{< blocks/case-studies >}} diff --git a/content/ru/docs/concepts/architecture/cloud-controller.md b/content/ru/docs/concepts/architecture/cloud-controller.md index 287afad287..5bb9c3d4fa 100644 --- a/content/ru/docs/concepts/architecture/cloud-controller.md +++ b/content/ru/docs/concepts/architecture/cloud-controller.md @@ -1,5 +1,5 @@ --- -title: Диспетчер облочных контроллеров +title: Диспетчер облачных контроллеров content_type: concept weight: 40 --- @@ -8,11 +8,11 @@ weight: 40 {{< feature-state state="beta" for_k8s_version="v1.11" >}} -Технологии облочной инфраструктуры позволяет запускать Kubernetes в общедоступных, частных и гибритных облоках. Kubernetes верит в автоматизированную,управляемую API инфраструктуру без жесткой связи между компонентами. +Технологии облачной инфраструктуры позволяет запускать Kubernetes в общедоступных, частных и гибридных облаках. Kubernetes верит в автоматизированную, управляемую API инфраструктуру без жесткой связи между компонентами. -{{< glossary_definition term_id="cloud-controller-manager" length="all" prepend="Диспетчер облочных контроллеров">}} +{{< glossary_definition term_id="cloud-controller-manager" length="all" prepend="Диспетчер облачных контроллеров">}} -Диспетчер облочных контроллеров структурирован с использованием механизма плагинов, которые позволяют различным облочным провайдерам интегрировать свои платформы с Kubernetes. +Диспетчер облачных контроллеров спроектирован с использованием механизма плагинов, которые позволяют различным облачным провайдерам интегрировать свои платформы с Kubernetes. @@ -22,44 +22,42 @@ weight: 40 ![Kubernetes components](/images/docs/components-of-kubernetes.svg) -Диспетчер облочных контроллеров работает в панели управления как реплицированный набот процессов (обычно это контейнер в Pod-ах). Каждый диспетчер облочных контроллеров реализует многоразовые {{< glossary_tooltip text="контроллеры" term_id="controller" >}} в единственном процессе. +Диспетчер облачных контроллеров работает в панели управления как реплицированный набор процессов (обычно это контейнер в Pod-ах). Каждый диспетчер облачных контроллеров реализует множество {{< glossary_tooltip text="контроллеров" term_id="controller" >}} в единственном процессе. {{< note >}} -Вы так же можете запустить диспетчер облочных контроллеров как {{< glossary_tooltip text="дополнение" term_id="addons" >}} Kubernetes, а некак часть панели управления. +Вы также можете запустить диспетчер облачных контроллеров как {{< glossary_tooltip text="дополнение" term_id="addons" >}} Kubernetes, а не как часть панели управления. {{< /note >}} -## Функции диспетчера облочных контроллеров {#functions-of-the-ccm} +## Функции диспетчера облачных контроллеров {#functions-of-the-ccm} -Контроллеры внутри диспетчера облочных контроллеров включают в себя: +Контроллеры внутри диспетчера облачных контроллеров включают в себя: ### Контролер узла -Контроллер узла отвечает за создание объектов {{< glossary_tooltip text="узла" term_id="node" >}} при создании новых серверов в вашей облочной инфраструктуре. Контроллер узла получает информацию -о работающих хостах внутри вашего арендуемого облочного провайдера. +Контроллер узла отвечает за создание объектов {{< glossary_tooltip text="узла" term_id="node" >}} при создании новых серверов в вашей облачной инфраструктуре. Контроллер узла получает информацию о работающих хостах внутри вашей арендуемой инфраструктуры облачного провайдера. Контроллер узла выполняет следующие функции: -1. Инициализация объектов узла для каждого сервера, контроллер которого через API облочного провайдера. -2. Аннотирование и маркировка объеко узла специфичной для облока информацией, такой как регион, в котором развернут узел и доступные ему ресурсы (процессор, память и т.д.). +1. Инициализация объектов узла для каждого сервера, которые контроллер получает через API облачного провайдера. +2. Аннотирование и маркировка объектов узла специфичной для облака информацией, такой как регион узла и доступные ему ресурсы (процессор, память и т.д.). 3. Получение имени хоста и сетевых адресов. -4. Проверка работоспособности ущла. В случае, если узел перестает отвечать на запросы, этот контроллер проверяется с помощью API вашего облочного провайдера, был ли сервер деактевирован / удален / прекращен. - Если узел был удален из облока, контроллер удлаяет объект узла из вашего Kubernetes кластера.. +4. Проверка работоспособности узла. В случае, если узел перестает отвечать на запросы, этот контроллер проверяет с помощью API вашего облачного провайдера, был ли сервер деактивирован / удален / прекращен. Если узел был удален из облака, контроллер удлаяет объект узла из вашего Kubernetes кластера. -Некоторые облочные провайдеры реализуют его разделение на контроллер узла и отдельный контроллер жизненного цикла узла. +Некоторые облачные провайдеры реализуют его разделение на контроллер узла и отдельный контроллер жизненного цикла узла. ### Контролер маршрута -Контролер маршрута отвечае за соответствующую настройку маршрутов облоке, чтобы контейнеры на разных узлах кластера Kubernetes могли взаимодействовать друг с другом. +Контролер маршрута отвечает за соответствующую настройку маршрутов в облаке, чтобы контейнеры на разных узлах кластера Kubernetes могли взаимодействовать друг с другом. -В зависимости от облочного провайдера, контроллер маршрута способен также выделять блоки IP адресов для сети Pod. +В зависимости от облачного провайдера, контроллер маршрута способен также выделять блоки IP-адресов для сети Pod-ов. -### Сервисный контроллер +### Контроллер сервисов -{{< glossary_tooltip text="Службы" term_id="service" >}} интегрируются с компонентами облочной инфраструктуры, такими как управляемые балансировщики нагрузки, IP адреса, фильтрация сетевых пакетов и проверка работоспособности целевых объектов. Сервисный контроллер взаимодействует с API вашего облочного провайдера для настройки балансировщиков нагрузки и других компонентов инфраструктуры, когда вы объявляете ресурсные службы которые он требует. +{{< glossary_tooltip text="Сервисы" term_id="service" >}} интегрируются с компонентами облачной инфраструктуры, такими как управляемые балансировщики нагрузки, IP-адреса, фильтрация сетевых пакетов и проверка работоспособности целевых объектов. Контроллер сервисов взаимодействует с API вашего облачного провайдера для настройки требуемых балансировщиков нагрузки и других компонентов инфраструктуры, когда вы объявляете ресурсы сервисов. ## Авторизация -В этом разделе разбирается доступ, который нужен для управления облочным контроллером к различным объектам API для выполнения своих операций. +В этом разделе разбирается доступ к различным объектам API, который нужен облачным контроллерам для выполнения своих операций. ### Контроллер узла {#authorization-node-controller} @@ -77,19 +75,19 @@ weight: 40 ### Контролер маршрута {#authorization-route-controller} -Контролер маршрута прослушивает создание объектов узла и соответствующим образом настраивает маршруты. Для этого требуется получить доступ к объектам узла. +Контролер маршрута прослушивает создание объектов узла и соответствующим образом настраивает маршруты. Для этого требуется получить доступ к объектам узла. `v1/Node`: - Get -### Сервисный контроллер {#authorization-service-controller} +### Контроллер сервисов {#authorization-service-controller} -Сервисный контроллер прослушивает события Create, Update и Delete объектов службы, а затем соответствующим образом настраивает конечные точки для этих соответствующих сервисов. +Контроллер сервисов прослушивает события Create, Update и Delete объектов служб, а затем соответствующим образом настраивает конечные точки для соответствующих сервисов. -Для доступа к сервисам, требуется доступ к событиям List и Watch. Для обновления сервисов, требуется доступ к событиям Patch и Update. +Для доступа к сервисам требуется доступ к событиям List и Watch. Для обновления сервисов требуется доступ к событиям Patch и Update. -Чтобы настроить ресурсы конечных точек для сервисов, требуется доступ к событиям Create, List, Get, Watch, и Update. +Чтобы настроить ресурсы конечных точек для сервисов, требуется доступ к событиям Create, List, Get, Watch и Update. `v1/Service`: @@ -101,7 +99,7 @@ weight: 40 ### Другие {#authorization-miscellaneous} -Реализация ядра диспетчера облочных контроллеров требует доступ для создания создания объектов события, а для обеспечения безопасной работы требуется доступ для создания учетных записей сервисов (ServiceAccounts). +Реализация ядра диспетчера облачных контроллеров требует доступ для создания создания объектов событий, а для обеспечения безопасной работы требуется доступ к созданию сервисных учетных записей (ServiceAccounts). `v1/Event`: @@ -113,7 +111,7 @@ weight: 40 - Create -The {{< glossary_tooltip term_id="rbac" text="RBAC" >}} ClusterRole для диспетчера облочных контроллеров выглядить так: +The {{< glossary_tooltip term_id="rbac" text="RBAC" >}} ClusterRole для диспетчера облачных контроллеров выглядит так: ```yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -180,13 +178,13 @@ rules: ## {{% heading "whatsnext" %}} -[Администрирование диспетчера облочных контроллеров](/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager) -содержить инструкции по запуску и управлению диспетером облочных контроллеров. +[Администрирование диспетчера облачных контроллеров](/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager) +содержит инструкции по запуску и управлению диспетером облачных контроллеров. -Хотите знать как реализовать свой собственный диспетчер облочных контроллеров или расширить проект? +Хотите знать, как реализовать свой собственный диспетчер облачных контроллеров или расширить проект? -Диспетчер облочных контроллеров использует интерфейс Go, который позволяет реализовать подключение из любого облока. В частности, он использует `CloudProvider` интерфейс, который определен в [`cloud.go`](https://github.com/kubernetes/cloud-provider/blob/release-1.17/cloud.go#L42-L62) из [kubernetes/cloud-provider](https://github.com/kubernetes/cloud-provider). +Диспетчер облачных контроллеров использует интерфейсы Go, которые позволяют реализовать подключение из любого облака. В частности, он использует интерфейс `CloudProvider`, который определен в [`cloud.go`](https://github.com/kubernetes/cloud-provider/blob/release-1.21/cloud.go#L42-L69) из [kubernetes/cloud-provider](https://github.com/kubernetes/cloud-provider). -Реализация общих контроллеров выделенных в этом документе (Node, Route, и Service),а так же некоторые возведения вместе с общим облочным провайдерским интерфейсом являются частью ядра Kubernetes. особые реализации, для облочных провайдеров находятся вне ядра Kubernetes и реализуют интерфейс `CloudProvider`. +Реализация общих контроллеров, описанных в этом документе (Node, Route, и Service), а также некоторые другие вспомогательные конструкции, вместе с общим интерфейсом облачного провайдера являются частью ядра Kubernetes. Особые реализации для облачных провайдеров находятся вне ядра Kubernetes и реализуют интерфейс `CloudProvider`. -Дополнительные сведения о разработке плагинов см. в разделе [Разработка диспетчера облочных контроллеров](/docs/tasks/administer-cluster/developing-cloud-controller-manager/). +Дополнительные сведения о разработке плагинов см. в разделе [Разработка диспетчера облачных контроллеров](/docs/tasks/administer-cluster/developing-cloud-controller-manager/). diff --git a/content/ru/docs/concepts/architecture/garbage-collection.md b/content/ru/docs/concepts/architecture/garbage-collection.md new file mode 100644 index 0000000000..f7ded8b44e --- /dev/null +++ b/content/ru/docs/concepts/architecture/garbage-collection.md @@ -0,0 +1,134 @@ +--- +title: Сборщик мусора +content_type: concept +weight: 50 +--- + + +{{}} Это позволить очистить ресурсы, такие как: + + * [Неудачные pod-ы](/docs/concepts/workloads/pods/pod-lifecycle/#pod-garbage-collection) + * [Завершенные задания](/docs/concepts/workloads/controllers/ttlafterfinished/) + * [Объекты без ссылок на владельца Objects](#owners-dependents) + * [Не используемые контейнеры и образы контейнеров](#containers-images) + * [Dynamically provisioned PersistentVolumes with a StorageClass reclaim policy of Delete](/docs/concepts/storage/persistent-volumes/#delete) + * [Устаревшие или просроченные запросы подписания сертификатов (CSR)](/reference/access-authn-authz/certificate-signing-requests/#request-signing-process) + * {{}} удалено в следующих сценариях: + * В облаке, когда кластер использует [диспетчер облачных контроллеров](/docs/concepts/architecture/cloud-controller/) + * Локально когда кластер использует дополнение, аналогичное диспетчер облачных контроллеров + * [Объекты аренды узлов](/docs/concepts/architecture/nodes/#heartbeats) + +## Владельцы и зависимости {#owners-dependents} + +Многие объекты в Kubernetes ссылаются друг на друга через [*ссылки владельцев*](/docs/concepts/overview/working-with-objects/owners-dependents/). +Ссылки владельцев сообщают плоскости управления какие объекты зависят от других. +Kubernetes использует ссылки владельцев, чтобы предоставить плоскости управления и другим API +клиентам, возможность очистить связанные ресурсы передудалением объекта. В большинстве случаев, Kubernetes автоматический управляет ссылками владельцев. + +Владелец отличается от [меток и селекторов](/docs/concepts/overview/working-with-objects/labels/) +которые также используют некоторые ресурсы. Например, рассмотрим +{{}} которая создает объект +`EndpointSlice`. Служба использует *метки* чтобы позволить плоскости управления определить какие `EndpointSlice` объекты используются для этой службы. В дополнение +к меткам, каждый `EndpointSlice` управляет ои имени службы, имеет +ссылку владельца. Ссылки владельцев помогают различным частям Kubernetes избегать +вмешательства в объекты, которые они не контролируют. + +{{< note >}} +Ссылки на владельцев перекрестных пространств имен запрещены по дизайну. +Зависимости пространства имен могут указывать на область действия кластера или владельцев пространства имен. +Владелец пространства имен **должен** быть в том же пространстве имен что и зависимости. +Если это не возможно, cсылка владельца считается отсутствующей и зависимый объект подлежит удалению, как только будет проверено отсутствие всех владельцев. + +Зависимости области действия кластер может указывать только владельцев области действия кластера. +В версии v1.20+, если зависимость с областью действия кластера указывает на пространство имен как владелец, +тогда он рассматривается как имеющий неразрешимую ссылку на владельца и не может быть обработан сборщиком мусора. + +В версии v1.20+, если сборщик мусора обнаружит недопустимое перекрестное пространство имен `ownerReference`, +или зависящие от облости действия кластера `ownerReference` ссылка на тип пространства имен, предупреждающее событие с причиной `OwnerRefInvalidNamespace` и `involvedObject` сообщающеся о не действительной зависимости. +Вы можете проверить наличие такого рода событий, выполнив `kubectl get events -A --field-selector=reason=OwnerRefInvalidNamespace`. +{{< /note >}} + +## Каскадное удаление {#cascading-deletion} + +Kubernetes проверяет и удаляет объекты, на которые больше нет ссылок владельцев, так же как и pod-ов, оставленных после удаления ReplicaSet. Когда Вы удаляете объект, вы можете контролировать автоматический ли Kubernetes удаляет зависимые объекты автоматически в процессе вызова *каскадного удаления*. Существует два типа каскадного удаления, а именно: + + * Каскадное удалени Foreground + * Каскадное удаление Background + +Вы так же можете управлять как и когда сборщик мусора удаляет ресурсы, на которые ссылаются владельцы с помощью Kubernetes {{}}. + +### Каскадное удалени Foreground {#foreground-deletion} + +В Каскадном удалени Foreground, объект владельца, который вы удаляете, сначало переходить в состояние *в процессе удаления*. В этом состоянии с объектом-владельцем происходить следующее: + + * Сервер Kubernetes API устанавливает полю объекта `metadata.deletionTimestamp` + время, когда объект был помечен для удаления. + * Сервер Kubernetes API так же устанавливает метку `metadata.finalizers`для поля + `foregroundDeletion`. + * Объект остается видимым блогодоря Kubernetes API пока процесс удаления не завершиться + +После того, как владелец объекта переходит в состояние прогресса удаления, контроллер удаляет зависимые объекты. После удаления всех зависимых объектов, контроллер удаляет объект владельца. На этом этапе, объект больше не отображается в Kubernetes API. + +Во время каскадного удаления foreground, единственным зависимым, которые блокируют удаления владельца, являются те, у кого имеется поле `ownerReference.blockOwnerDeletion=true`. +Чтобы узнать больше. Смотрите [Использование каскадного удаления foreground](/docs/tasks/administer-cluster/use-cascading-deletion/#use-foreground-cascading-deletion). + +### Каскадное удаление Background {#background-deletion} + +В каскадном удалении background, сервер Kubernetes API немедленно удаляет владельца объекта, а контроллер очищает зависимые объекты в фоновом режиме. По умолчанию, Kubernetes использует каскадное удаление background, если вы в ручную не используете удаление foreground или не решите отключить зависимые объекты. + +Чтобы узнать больше. Смотрите [Использование каскадного удаления background](/docs/tasks/administer-cluster/use-cascading-deletion/#use-background-cascading-deletion). + +### Осиротевшие зависимости + +Когда Kubernetes удаляет владельца объекта, оставшиеся зависимости называются *осиротевшыми* объектами. По умолчанию, Kubernetes удаляет зависимые объекты. Чтобы узнать, как переопределить это повидение смотрите [Удаление объектов владельца и осиротевших зависимостей](/docs/tasks/administer-cluster/use-cascading-deletion/#set-orphan-deletion-policy). + +## Сбор мусора из неиспользуемых контейнеров и изобробразов {#containers-images} + +{{}} выполняет сбор мусора для неиспользуемых образов каждые пять минут и для неиспользуемых контейнеров каждую минуту. Вам следует избегать использования внешних инструментов для сборки мусора, так как они могут +нарушить поведение kubelet и удалить контейнеры, которые должны существовать. + +Чтобы настроить параметры для сборшика мусора для неиспользуемого контейнера и сборки мусора образа, подстройте +kubelet использую [конфигурационный файл](/docs/tasks/administer-cluster/kubelet-config-file/) +и измените параметры, связанные со сборшиком мусора используя тип ресурса +[`KubeletConfiguration`](/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration). + +### Жизненный цикл контейнерных образов Container image lifecycle + +Kubernetes управляет жизненным циклом всех образов с помощью своего *менеджера образов*, которые являются частью kubelet, в сотрудничестве с cadvisor. При принятии решений о сборке мусора, kubelet учитывает следующие ограничения использования диска: + + * `HighThresholdPercent` + * `LowThresholdPercent` + +Использование диска выше настроенного значения `HighThresholdPercent` запускает сборку мусора, которая удаляет образы в порядке основанном на последнем использовании, начиная с самого старого. kubelet удлаяет образы до тех пор, пока использование диска не достигнет значения `LowThresholdPercent`. + +### Сборщик мусора контейнерных образов {#container-image-garbage-collection} + +kubelet собирает не используемые контейнеры на основе следующих переменных, которые вы можете определить: + + * `MinAge`: минимальный возраст, при котором kubelet может начать собирать мусор контейнеров. Отключить, установив значение `0`. + * `MaxPerPodContainer`: максимальное количество некативныз контейнеров, которое может быть у каджой пары Pod-ов. Отключить, установив значение меньше чем `0`. + * `MaxContainers`: максимальное количество не используемых контейнеров, которые могут быть в кластере. Отключить, установив значение меньше чем `0`. + +В дополнение к этим переменным, kubelet собирает неопознанные и удаленные контейнеры, обычно начиная с самого старого. + +`MaxPerPodContainer` и `MaxContainer` могут потенциально конфликтовать друг с другом в ситуациях, когда требуется максимальное количество контейнеров в Pod-е (`MaxPerPodContainer`) выйдет за пределы допустимого общего количества глобальных не используемых контейнеров (`MaxContainers`). В этой ситуации kubelet регулирует `MaxPodPerContainer` для устранения конфликта. наихудшим сценарием было бы понизить `MaxPerPodContainer` да `1` и изгнать самые старые контейнеры. +Кроме того, владельцы контейнеров в pod-е могут быть удалены, как только они становятся старше чем `MinAge`. + +{{}} +Kubelet собирает мусор только у контейнеров, которыми он управляет. +{{}} + +## Настройка сборщик мусора {#configuring-gc} + +Вы можете настроить сборку мусора ресурсов, настроив параметры, специфичные для контроллеров, управляющих этими ресурсами. В последующих страницах показанно, как настроить сборку мусора: + + * [Настройка каскадного удаления объектов Kubernetes](/docs/tasks/administer-cluster/use-cascading-deletion/) + * [Настройка очистки завершенных заданий](/docs/concepts/workloads/controllers/ttlafterfinished/) + + + +## {{% heading "whatsnext" %}} + +* Узнайте больше о [ownership of Kubernetes objects](/docs/concepts/overview/working-with-objects/owners-dependents/). +* Узнайте больше о Kubernetes [finalizers](/docs/concepts/overview/working-with-objects/finalizers/). +* Узнать о [TTL контроллере](/docs/concepts/workloads/controllers/ttlafterfinished/) (beta) that cleans up finished Jobs. \ No newline at end of file diff --git a/content/ru/docs/concepts/overview/working-with-objects/names.md b/content/ru/docs/concepts/overview/working-with-objects/names.md index e73d436533..73477a1475 100644 --- a/content/ru/docs/concepts/overview/working-with-objects/names.md +++ b/content/ru/docs/concepts/overview/working-with-objects/names.md @@ -37,7 +37,7 @@ weight: 20 Некоторые типы ресурсов должны соответствовать стандарту меток DNS, который описан в [RFC 1123](https://tools.ietf.org/html/rfc1123). Таким образом, имя должно: - содержать не более 63 символов -- содержать только строчные буквенно-цифровые символы или '.' +- содержать только строчные буквенно-цифровые символы или '-' - начинаться с буквенно-цифрового символа - заканчивается буквенно-цифровым символом diff --git a/content/ru/docs/contribute/generate-ref-docs/contribute-upstream.md b/content/ru/docs/contribute/generate-ref-docs/contribute-upstream.md index 223aba429f..0834caa522 100644 --- a/content/ru/docs/contribute/generate-ref-docs/contribute-upstream.md +++ b/content/ru/docs/contribute/generate-ref-docs/contribute-upstream.md @@ -113,7 +113,6 @@ On branch master hack/update-generated-swagger-docs.sh hack/update-openapi-spec.sh hack/update-generated-protobuf.sh -hack/update-api-reference-docs.sh ``` Выполните команду `git status`, чтобы посмотреть, какие файлы изменились. @@ -122,8 +121,6 @@ hack/update-api-reference-docs.sh On branch master ... modified: api/openapi-spec/swagger.json - modified: api/swagger-spec/apps_v1.json - modified: docs/api-reference/apps/v1/definitions.html modified: staging/src/k8s.io/api/apps/v1/generated.proto modified: staging/src/k8s.io/api/apps/v1/types.go modified: staging/src/k8s.io/api/apps/v1/types_swagger_doc_generated.go diff --git a/content/ru/docs/contribute/generate-ref-docs/kubernetes-api.md b/content/ru/docs/contribute/generate-ref-docs/kubernetes-api.md index 883fc8d16c..e96bc29ea7 100644 --- a/content/ru/docs/contribute/generate-ref-docs/kubernetes-api.md +++ b/content/ru/docs/contribute/generate-ref-docs/kubernetes-api.md @@ -82,8 +82,8 @@ git clone https://github.com/kubernetes/kubernetes $GOPATH/src/k8s.io/kubernetes Примеры: ```shell -export K8S_WEBROOT=$(GOPATH)/src/github.com//website -export K8S_ROOT=$(GOPATH)/src/k8s.io/kubernetes +export K8S_WEBROOT=${GOPATH}/src/github.com//website +export K8S_ROOT=${GOPATH}/src/k8s.io/kubernetes export K8S_RELEASE=1.17.0 ``` diff --git a/content/ru/docs/reference/glossary/garbage-collection.md b/content/ru/docs/reference/glossary/garbage-collection.md new file mode 100644 index 0000000000..defcd10476 --- /dev/null +++ b/content/ru/docs/reference/glossary/garbage-collection.md @@ -0,0 +1,23 @@ +--- +title: Сборшик мусора +id: garbage-collection +date: 2021-07-07 +full_link: /docs/concepts/workloads/controllers/garbage-collection/ +short_description: > + A collective term for the various mechanisms Kubernetes uses to clean up cluster + resources. + +aka: +tags: +- fundamental +- operation +--- + Сборщик мусора - это собирательный термин для различных механизмов? используемых Kubernetes для очистки ресурсов кластера. + + + +Kubernetes использует сборку мусора для очистки таких ресурсов, как [неиспользуемые контейнеры и образы](/docs/concepts/workloads/controllers/garbage-collection/#containers-images), +[неудачные Pod-ы](/docs/concepts/workloads/pods/pod-lifecycle/#pod-garbage-collection), +[объекты, принадлежащие целевому ресурсу](/docs/concepts/overview/working-with-objects/owners-dependents/), +[завершенные задачи](/docs/concepts/workloads/controllers/ttlafterfinished/), and resources +that have expired or failed. \ No newline at end of file diff --git a/content/uk/_index.html b/content/uk/_index.html index ae7873ea7a..ebf3f6ea18 100644 --- a/content/uk/_index.html +++ b/content/uk/_index.html @@ -62,12 +62,12 @@ Kubernetes - проект з відкритим вихідним кодом. В

    - Відвідайте KubeCon NA онлайн, 17-20 листопада 2020 року + Відвідайте KubeCon у Північній Америці, 11-15 жовтня 2021 року



    - Відвідайте KubeCon EU онлайн, 17-20 травня 2021 року + Відвідайте KubeCon в Європі, 17-20 травня 2022 року
    diff --git a/content/uk/docs/reference/glossary/service.md b/content/uk/docs/reference/glossary/service.md old mode 100755 new mode 100644 diff --git a/content/uk/docs/setup/release/_index.md b/content/uk/docs/setup/release/_index.md old mode 100755 new mode 100644 diff --git a/content/vi/docs/tutorials/kubernetes-basics/_index.html b/content/vi/docs/tutorials/kubernetes-basics/_index.html index 2440ca5e67..30e370294f 100644 --- a/content/vi/docs/tutorials/kubernetes-basics/_index.html +++ b/content/vi/docs/tutorials/kubernetes-basics/_index.html @@ -10,7 +10,7 @@ card: - + diff --git a/content/vi/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html b/content/vi/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html index 76fd8004ee..4c2ea13af3 100644 --- a/content/vi/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html +++ b/content/vi/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html @@ -5,7 +5,7 @@ weight: 20 - + diff --git a/content/vi/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html b/content/vi/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html index 9fc822147c..c5d8e13ad9 100644 --- a/content/vi/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html +++ b/content/vi/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html @@ -5,7 +5,7 @@ weight: 10 - + diff --git a/content/vi/docs/tutorials/kubernetes-basics/explore/explore-interactive.html b/content/vi/docs/tutorials/kubernetes-basics/explore/explore-interactive.html index 2a8d45f170..8169a3b989 100644 --- a/content/vi/docs/tutorials/kubernetes-basics/explore/explore-interactive.html +++ b/content/vi/docs/tutorials/kubernetes-basics/explore/explore-interactive.html @@ -5,7 +5,7 @@ weight: 20 - + diff --git a/content/vi/docs/tutorials/kubernetes-basics/explore/explore-intro.html b/content/vi/docs/tutorials/kubernetes-basics/explore/explore-intro.html index fbde8b0797..7a27af279a 100644 --- a/content/vi/docs/tutorials/kubernetes-basics/explore/explore-intro.html +++ b/content/vi/docs/tutorials/kubernetes-basics/explore/explore-intro.html @@ -5,7 +5,7 @@ weight: 10 - + diff --git a/content/zh/blog/_posts/2020-12-08-kubernetes-release-1.20.md b/content/zh/blog/_posts/2020-12-08-kubernetes-release-1.20.md new file mode 100644 index 0000000000..b8ab648317 --- /dev/null +++ b/content/zh/blog/_posts/2020-12-08-kubernetes-release-1.20.md @@ -0,0 +1,236 @@ +--- +layout: blog +title: 'Kubernetes 1.20: 最新版本' +date: 2020-12-08 +slug: kubernetes-1-20-release-announcement +--- + + + +**作者:** [Kubernetes 1.20 发布团队](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.20/release_team.md) + + +我们很高兴地宣布 Kubernetes 1.20 的发布,这是我们 2020 年的第三个也是最后一个版本!此版本包含 42 项增强功能:11 项增强功能已升级到稳定版,15 项增强功能正在进入测试版,16 项增强功能正在进入 Alpha 版。 + + +1.20 发布周期在上一个延长的发布周期之后恢复到 11 周的正常节奏。这是一段时间以来功能最密集的版本之一:Kubernetes 创新周期仍呈上升趋势。此版本具有更多的 Alpha 而非稳定的增强功能,表明云原生生态系统仍有许多需要探索的地方。 + + +## 主题 {#major-themes} + + +### Volume 快照操作变得稳定 {#volume-snapshot-operations-goes-stable} + + +此功能提供了触发卷快照操作的标准方法,并允许用户以可移植的方式在任何 Kubernetes 环境和支持的存储提供程序上合并快照操作。 + + +此外,这些 Kubernetes 快照原语充当基本构建块,解锁为 Kubernetes 开发高级企业级存储管理功能的能力,包括应用程序或集群级备份解决方案。 + + +请注意,快照支持要求 Kubernetes 分销商捆绑 Snapshot 控制器、Snapshot CRD 和验证 webhook。还必须在集群上部署支持快照功能的 CSI 驱动程序。 + + + + +### Kubectl Debug 功能升级到 Beta {#kubectl-debug-graduates-to-beta} + + +`kubectl alpha debug` 功能在 1.20 中升级到测试版,成为 `kubectl debug`. 该功能直接从 kubectl 提供对常见调试工作流的支持。此版本的 kubectl 支持的故障排除场景包括: + + +* 通过创建使用不同容器映像或命令的 pod 副本,对在启动时崩溃的工作负载进行故障排除。 +* 通过在 pod 的新副本或使用临时容器中添加带有调试工具的新容器来对 distroless 容器进行故障排除。(临时容器是默认未启用的 alpha 功能。) +* 通过创建在主机命名空间中运行并可以访问主机文件系统的容器来对节点进行故障排除。 + + +请注意,作为新的内置命令,`kubectl debug` 优先于任何名为 “debug” 的 kubectl 插件。你必须重命名受影响的插件。 + + +`kubectl alpha debug` 现在不推荐使用,并将在后续版本中删除。更新你的脚本以使用 `kubectl debug`。 有关更多信息 `kubectl debug`,请参阅[调试正在运行的 Pod]((https://kubernetes.io/zh/docs/tasks/debug-application-cluster/debug-running-pod/)。 + + +### 测试版:API 优先级和公平性 {#beta-api-priority-and-fairness) + + +Kubernetes 1.20 由 1.18 引入,现在默认启用 API 优先级和公平性 (APF)。这允许 `kube-apiserver` 按优先级对传入请求进行分类。 + + +### Alpha 更新:IPV4/IPV6 {#alpha-with-updates-ipv4-ipv6} + + +基于用户和社区反馈,重新实现了 IPv4/IPv6 双栈以支持双栈服务。 +这允许将 IPv4 和 IPv6 服务集群 IP 地址分配给单个服务,还允许服务从单 IP 堆栈转换为双 IP 堆栈,反之亦然。 + + +### GA:进程 PID 稳定性限制 {#ga-process-pid-limiting-for-stability} + + +进程 ID (pid) 是 Linux 主机上的基本资源。达到任务限制而不达到任何其他资源限制并导致主机不稳定是很可能发生的。 + + + +管理员需要机制来确保用户 pod 不会导致 pid 耗尽,从而阻止主机守护程序(运行时、kubelet 等)运行。此外,重要的是要确保 pod 之间的 pid 受到限制,以确保它们对节点上的其他工作负载的影响有限。 +默认启用一年后,SIG Node 在 `SupportNodePidsLimit`(节点到 Pod PID 隔离)和 `SupportPodPidsLimit`(限制每个 Pod 的 PID 的能力)上都将 PID 限制升级为 GA。 + + +### Alpha:节点体面地关闭 {#alpha-graceful-node-shutdown} + + +用户和集群管理员希望 Pod 遵守预期的 Pod 生命周期,包括 Pod 终止。目前,当一个节点关闭时,Pod 不会遵循预期的 Pod 终止生命周期,也不会正常终止,这可能会导致某些工作负载出现问题。 +该 `GracefulNodeShutdown` 功能现在处于 Alpha 阶段。`GracefulNodeShutdown` 使 kubelet 知道节点系统关闭,从而在系统关闭期间正常终止 pod。 + + +## 主要变化 {#major-changes} + + +### Dockershim 弃用 {#dockershim-deprecation} + + +Dockershim,Docker 的容器运行时接口 (CRI) shim 已被弃用。不推荐使用对 Docker 的支持,并将在未来版本中删除。由于 Docker 映像遵循开放容器计划 (OCI) 映像规范,因此 Docker 生成的映像将继续在具有所有 CRI 兼容运行时的集群中工作。 +Kubernetes 社区写了一篇关于弃用的详细[博客文章](https://blog.k8s.io/2020/12/02/dont-panic-kubernetes-and-docker/),并为其提供了一个专门的常见问题[解答页面](https://blog.k8s.io/2020/12/02/dockershim-faq/)。 + + +### Exec 探测超时处理 {#exec-probe-timeout-handling} + + +一个关于 exec 探测超时的长期错误可能会影响现有的 pod 定义,已得到修复。在此修复之前,exec 探测器不考虑 `timeoutSeconds` 字段。相反,探测将无限期运行,甚至超过其配置的截止日期,直到返回结果。 +通过此更改,如果未指定值,将应用默认值 `1 second`,并且如果探测时间超过一秒,现有 pod 定义可能不再足够。 +新引入的 `ExecProbeTimeout` 特性门控所提供的修复使集群操作员能够恢复到以前的行为,但这种行为将在后续版本中锁定并删除。为了恢复到以前的行为,集群运营商应该将此特性门控设置为 `false`。 + + +有关更多详细信息,请查看有关配置探针的[更新文档](/zh/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes)。 + + +## 其他更新 {#other-updates} + + +### 稳定版 {#graduated-to-stable} + + +* [RuntimeClass](https://github.com/kubernetes/enhancements/issues/585) +* [内置 API 类型默认值](https://github.com/kubernetes/enhancements/issues/1929) +* [添加了对 Pod 层面启动探针和活跃性探针的扼制](https://github.com/kubernetes/enhancements/issues/950) +* [在 Windows 上支持 CRI-ContainerD](https://github.com/kubernetes/enhancements/issues/1001) +* [SCTP 对 Services 的支持](https://github.com/kubernetes/enhancements/issues/614) +* [将 AppProtocol 添加到 Services 和 Endpoints 上](https://github.com/kubernetes/enhancements/issues/1507) + + +### 值得注意的功能更新 {#notable-feature-updates} + + +* [CronJobs](https://github.com/kubernetes/enhancements/issues/19) + + +# 发行说明 {#release-notes} + + +你可以在[发行说明](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.20.md)中查看 1.20 发行版的完整详细信息。 + + +# 可用的发布 {#availability-of-release} + + +Kubernetes 1.20 可在 [GitHub](https://github.com/kubernetes/kubernetes/releases/tag/v1.20.0) 上下载。有一些很棒的资源可以帮助你开始使用 Kubernetes。你可以在 Kubernetes 主站点上查看一些[交互式教程](https://kubernetes.io/docs/tutorials/),或者使用 [kind](https://kind.sigs.k8s.io) 的 Docker 容器在你的机器上运行本地集群。如果你想尝试从头开始构建集群,请查看 Kelsey Hightower 的 [Kubernetes the Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way) 教程。 + + +# 发布团队 {#release-team} + + +这个版本是由一群非常敬业的人促成的,他们在世界上发生的许多事情的时段作为一个团队走到了一起。 +非常感谢发布负责人 Jeremy Rickard 以及发布团队中的其他所有人,感谢他们相互支持,并努力为社区发布 1.20 版本。 + + +# 发布 Logo {#release-logo} + +![Kubernetes 1.20 Release Logo](/images/blog/2020-12-08-kubernetes-1.20-release-announcement/laser.png) + +[raddest](https://www.dictionary.com/browse/rad): *adjective*, Slang. excellent; wonderful; cool: + + +> Kubernetes 1.20 版本是迄今为止最激动人心的版本。 + + +2020 年对我们中的许多人来说都是充满挑战的一年,但 Kubernetes 贡献者在此版本中提供了创纪录的增强功能。这是一项了不起的成就,因此发布负责人希望以一点轻松的方式结束这一年,并向 [Kubernetes 1.14 - Caturnetes](https://github.com/kubernetes/sig-release/tree/master/releases/release-1.14) 和一只名叫 Humphrey 的 “rad” 猫致敬。 + + +Humphrey是发布负责人的猫,有一个永久的 `blep`. 在 1990 年代,*Rad* 是美国非常普遍的俚语,激光背景也是如此。Humphrey 在 1990 年代风格的学校照片中感觉像是结束这一年的有趣方式。希望 Humphrey 和它的 *blep* 在 2020 年底给你带来一点快乐! + + +发布标志由 [Henry Hsu - @robotdancebattle](https://www.instagram.com/robotdancebattle/) 创建。 + + +# 用户亮点 {#user-highlights} + + +- Apple 正在世界各地的数据中心运行数千个节点的 Kubernetes 集群。观看 [Alena Prokarchyk](https://youtu.be/Tx8qXC-U3KM) 的 KubeCon NA 主题演讲,了解有关他们的云原生之旅的更多信息。 + + +# 项目速度 {#project-velocity} + + +[CNCF K8S DevStats 项目](https://k8s.devstats.cncf.io/)聚集了许多有关Kubernetes和各分项目的速度有趣的数据点。这包括从个人贡献到做出贡献的公司数量的所有内容,并且清楚地说明了为发展这个生态系统所做的努力的深度和广度。 + + +在持续 11 周(9 月 25 日至 12 月 9 日)的 v1.20 发布周期中,我们看到了来自 [26 个国家/地区](https://k8s.devstats.cncf.io/d/50/countries-stats?orgId=1&from=1601006400000&to=1607576399000&var-period_name=Quarter&var-countries=All&var-repogroup_name=Kubernetes&var-metric=rcommitters&var-cum=countries) 的 [967 家公司](https://k8s.devstats.cncf.io/d/9/companies-table?orgId=1&var-period_name=v1.19.0%20-%20now&var-metric=contributions) 和 [1335 名个人](https://k8s.devstats.cncf.io/d/66/developer-activity-counts-by-companies?orgId=1&var-period_name=v1.19.0%20-%20now&var-metric=contributions&var-repogroup_name=Kubernetes&var-country_name=All&var-companies=All)(其中 [44 人](https://k8s.devstats.cncf.io/d/52/new-contributors?orgId=1&from=1601006400000&to=1607576399000&var-repogroup_name=Kubernetes)首次为 Kubernetes 做出贡献)的贡献。 + + +# 生态系统更新 {#ecosystem-updates} + + +- KubeCon North America 三周前刚刚结束,这是第二个虚拟的此类活动!现在所有演讲都可以[点播](https://www.youtube.com/playlist?list=PLj6h78yzYM2Pn8RxfLh2qrXBDftr6Qjut),供任何需要赶上的人使用! +- 6 月,Kubernetes 社区成立了一个新的工作组,作为对美国各地发生的 Black Lives Matter 抗议活动的直接回应。WG Naming 的目标是尽可能彻底地删除 Kubernetes 项目中有害和不清楚的语言,并以可移植到其他 CNCF 项目的方式进行。在 [KubeCon 2020 North America](https://sched.co/eukp) 上就这项重要工作及其如何进行进行了精彩的介绍性演讲,这项工作的初步影响[实际上可以在 v1.20 版本中看到](https://github.com/kubernetes/enhancements/issues/2067)。 +- 此前于今年夏天宣布,在 Kubecon NA 期间发布了经认证的 [Kubernetes 安全专家 (CKS) 认证](https://www.cncf.io/announcements/2020/11/17/kubernetes-security-specialist-certification-now-available/) ,以便立即安排!遵循 CKA 和 CKAD 的模型,CKS 是一项基于性能的考试,侧重于以安全为主题的能力和领域。该考试面向当前的 CKA 持有者,尤其是那些想要完善其在保护云工作负载方面的基础知识的人(这是我们所有人,对吧?)。 + + + + +# 活动更新 {#event-updates} + + +KubeCon + CloudNativeCon Europe 2021 将于 2021 年 5 月 4 日至 7 日举行!注册将于 1 月 11 日开放。你可以在[此处](https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/)找到有关会议的更多信息。 +请记住,[CFP](https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/program/cfp/) 将于太平洋标准时间 12 月 13 日星期日晚上 11:59 关闭! + + +# 即将发布的网络研讨会 {#upcoming-release-webinar} + + +请继续关注今年 1 月即将举行的发布网络研讨会。 + + +# 参与其中 {#get-involved} + + +如果你有兴趣为 Kubernetes 社区做出贡献,那么特别兴趣小组 (SIG) 是一个很好的起点。其中许多可能符合你的兴趣!如果你有什么想与社区分享的内容,你可以参加每周的社区会议,或使用以下任一渠道: + + + +* 在新的 [Kubernetes Contributor 网站](https://www.kubernetes.dev/)上了解更多关于为Kubernetes 做出贡献的信息 +* 在 Twitter [@Kubernetesio](https://twitter.com/kubernetesio) 上关注我们以获取最新更新 +* 加入关于讨论的[社区](https://discuss.kubernetes.io/)讨论 +* 加入 [Slack 社区](http://slack.k8s.io/) +* 分享你的 [Kubernetes 故事](https://docs.google.com/a/linuxfoundation.org/forms/d/e/1FAIpQLScuI7Ye3VQHQTwBASrgkjQDSS5TP0g3AXfFhwSM9YpHgxRKFA/viewform) +* 在[博客](https://kubernetes.io/blog/)上阅读更多关于 Kubernetes 发生的事情 +* 了解有关 [Kubernetes 发布团队](https://github.com/kubernetes/sig-release/tree/master/release-team)的更多信息 diff --git a/content/zh/docs/concepts/architecture/cloud-controller.md b/content/zh/docs/concepts/architecture/cloud-controller.md index f97922ec17..3a0a6c42f7 100644 --- a/content/zh/docs/concepts/architecture/cloud-controller.md +++ b/content/zh/docs/concepts/architecture/cloud-controller.md @@ -326,11 +326,11 @@ Want to know how to implement your own cloud controller manager, or extend an ex 想要了解如何实现自己的云控制器管理器,或者对现有项目进行扩展么? 云控制器管理器使用 Go 语言的接口,从而使得针对各种云平台的具体实现都可以接入。 其中使用了在 [kubernetes/cloud-provider](https://github.com/kubernetes/cloud-provider) -项目中 [`cloud.go`](https://github.com/kubernetes/cloud-provider/blob/release-1.17/cloud.go#L42-L62) +项目中 [`cloud.go`](https://github.com/kubernetes/cloud-provider/blob/release-1.21/cloud.go#L42-L69) 文件所定义的 `CloudProvider` 接口。 +{{< warning >}} +只使用来源可靠的 kubeconfig 文件。使用特制的 kubeconfig 文件可能会导致恶意代码执行或文件暴露。 +如果必须使用不受信任的 kubeconfig 文件,请首先像检查 shell 脚本一样仔细检查它。 +{{< /warning>}} + -## 一般配置提示 +## 一般配置提示 {#general-configuration-tips} -## “Naked”Pods 与 ReplicaSet,Deployment 和 Jobs +## “Naked” Pods 与 ReplicaSet,Deployment 和 Jobs - - - -{{< feature-state for_k8s_version="v1.14" state="stable" >}} - - -[Pods](/zh/docs/concepts/workloads/pods/) 可以有*优先级(Priority)*。 -优先级体现的是当前 Pod 与其他 Pod 相比的重要程度。如果 Pod 无法被调度,则 -调度器会尝试抢占(逐出)低优先级的 Pod,从而使得悬决的 Pod 可被调度。 - - - - -{{< warning >}} -在一个并非所有用户都可信任的集群中,一个有恶意的用户可能创建优先级最高的 -Pod,从而导致其他 Pod 被逐出或者无法调度。 -管理员可以使用 ResourceQuota 来避免用户创建高优先级的 Pod。 - -参考[限制默认使用的优先级类](/zh/docs/concepts/policy/resource-quotas/#limit-priority-class-consumption-by-default) -以了解更多细节。 -{{< /warning >}} - - -## 如何使用优先级和抢占 - -要使用优先级和抢占特性: - -1. 添加一个或多个 [PriorityClasses](#priorityclass) 对象 - -1. 创建 Pod 时设置其 [`priorityClassName`](#pod-priority) 为所添加的 PriorityClass 之一。 - 当然你也不必一定要直接创建 Pod;通常你会在一个集合对象(如 Deployment)的 Pod - 模板中添加 `priorityClassName`。 - -关于这些步骤的详细信息,请继续阅读。 - - -{{< note >}} -Kubernetes 发行时已经带有两个 PriorityClasses:`system-cluster-critical` 和 `system-node-critical`。 -这些优先级类是公共的,用来 -[确保关键组件总是能够先被调度](/zh/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/). -{{< /note >}} - - -## 如何禁用抢占 {#how-to-disable-preemption} - - -{{< caution >}} -关键 Pod 依赖调度器抢占机制以在集群资源压力较大时得到调度。 -因此,不建议禁用抢占。 -{{< /caution >}} - - -{{< note >}} -在 Kubernetes 1.15 及之后版本中,如果特性门控 `NonPreemptingPriority` 被启用, -则 PriorityClass 对象可以选择设置 `preemptionPolicy: Never`。 -这样就会避免属于该 PriorityClass 的 Pod 抢占其他 Pod。 -{{< /note >}} - - -抢占能力是通过 `kube-scheduler` 的标志 `disablePreemption` -来控制的,该标志默认为 `false`。 -如果你在了解上述提示的前提下仍希望禁用抢占,可以将 `disablePreemption` -设置为`true`。 - -这一选项只能通过组件配置来设置,无法通过命令行选项这种较老的形式设置。 -下面是禁用抢占的组件配置示例: - -```yaml -apiVersion: kubescheduler.config.k8s.io/v1alpha1 -kind: KubeSchedulerConfiguration -algorithmSource: - provider: DefaultProvider - -... - -disablePreemption: true -``` - -## PriorityClass - - -PriorityClass 是一种不属于任何名字空间的对象,定义的是从优先级类名向优先级整数值的映射。 -优先级类名称用 PriorityClass 对象的元数据的 `name` 字段指定。 -优先级整数值在必须提供的 `value` 字段中指定。 -优先级值越大,优先级越高。 -PriorityClass 对象的名称必须是合法的 -[DNS 子域名](/zh/docs/concepts/overview/working-with-objects/names#dns-subdomain-names) -且不可包含 `system-` 前缀。 - - -PriorityClass 对象可以设置数值小于等于 10 亿的 32 位整数。 -更大的数值保留给那些通常不可被抢占或逐出的系统 Pod。 -集群管理员应该为每个优先级值映射创建一个 PriorityClass 对象。 - - -PriorityClass 对象还有两个可选字段:`globalDefault` 和 `description`。 -前者用来表明此 PriorityClass 的数值应该用于未设置 `priorityClassName` 的 Pod。 -系统中只能存在一个 `globalDefault` 设为真的 PriorityClass 对象。 -如果没有 PriorityClass 对象的 `globalDefault` 被设置,则未设置 -`priorityClassName` 的 Pod 的优先级为 0。 - -`description` 字段可以设置任意字符串值。其目的是告诉用户何时该使用该 -PriorityClass。 - - -### 关于 Pod 优先级与现有集群的说明 - -- 如果你要升级一个不支持 Pod 优先级的集群,现有 Pod 的有效优先级都被视为 0。 - -- 向集群中添加 `globalDefault` 设置为 `true` 的 PriorityClass 不会改变现有 - Pod 的优先级。新添加的 PriorityClass 值仅适用于 PriorityClass 被添加之后 - 新建的 Pod。 - -- 如果你要删除 PriorityClass,则使用所删除的 PriorityClass 名称的现有 Pod 都 - 不会受影响,但是你不可以再创建使用该 PriorityClass 名称的新 Pod。 - - -### PriorityClass 示例 - -```yaml -apiVersion: scheduling.k8s.io/v1 -kind: PriorityClass -metadata: - name: high-priority -value: 1000000 -globalDefault: false -description: "This priority class should be used for XYZ service pods only." -``` - - -## 非抢占式的 PriorityClass {#non-preempting-priority-class} - -{{< feature-state for_k8s_version="v1.15" state="alpha" >}} - - -配置 `preemptionPolicy: Never` 的 Pod 在调度队列中会被放在低优先级的 Pod -的前面,但是它们不可以抢占其他 Pod。 -非抢占 Pod 会在调度队列中等待调度,直到有足够空闲资源时才被调度。 -非抢占 Pod 与其他 Pod 一样,也受调度器回退(Back-off)机制影响。 -换言之,如果调度器尝试调度这些 Pod 时发现它们无法调度,它们会被再次尝试,并且 -重试的频率会被降低,这样可以使得其他优先级较低的 Pod 有机会在它们之前被调度。 - - -非抢占 Pod 仍有可能被其他高优先级的 Pod 抢占。 - -`preemptionPolicy` 默认取值为 `PreemptLowerPriority`,这会使得该 PriorityClass -的 Pod 能够抢占低优先级的 Pod(这也是当前的默认行为)。 -如果 `preemptionPolicy` 被设置为 `Never`,则该 PriorityClass 下的 Pod 都是非抢占的。 - - -使用 `preemptionPolicy` 字段要求启用 `NonPreemptingPriority` -[特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/)。 - -一种示例应用场景是数据科学负载。 -用户可能希望所提交的 Job 比其他负载的优先级都高,但又不希望因为抢占运行中的 -Pod 而丢弃现有工作。 -只要集群中"自然地"释放出足够的资源,配置了 `preemptionPolicy: Never` -的高优先级 Job 可以在队列中其他 Pod 之前获得调度机会。 - - -### 非抢占 PriorityClass 示例 - -```yaml -apiVersion: scheduling.k8s.io/v1 -kind: PriorityClass -metadata: - name: high-priority-nonpreempting -value: 1000000 -preemptionPolicy: Never -globalDefault: false -description: "This priority class will not cause other pods to be preempted." -``` - - -## Pod 优先级 {#pod-priority} - -在已经创建了一个或多个 PriorityClass 对象之后,你就可以创建 Pod 并在其规约中 -指定这些 PriorityClass 的名字之一。优先级准入控制器使用 `priorityClassName` -字段来填充优先级整数值。如果所指定优先级类不存在,则 Pod 被拒绝。 - -下面的 YAML 是一个 Pod 配置,使用了前面例子中创建的 PriorityClass。 -优先级准入控制器检查 Pod 的规约并将 Pod 优先级解析为 1000000。 - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: nginx - labels: - env: test -spec: - containers: - - name: nginx - image: nginx - imagePullPolicy: IfNotPresent - priorityClassName: high-priority -``` - - -### 优先级对 Pod 调度顺序的影响 - -当集群启用了 Pod 优先级时,调度器会基于 Pod 的优先级来排序悬决的 Pod。 -新 Pod 会被放在调度队列中较低优先级的其他悬决 Pod 前面。 -因此,优先级较高的 Pod 在其调度需求被满足的前提下会比优先级低的 Pod 先被调度。 -如果优先级较高的 Pod 无法被调度,调度器会继续尝试调度其他较低优先级的 Pod。 - - -## 抢占 {#preemption} - -Pod 被创建时会被放入一个队列中等待调度。调度器从队列中选择 Pod,尝试将其调度到某 Node 上。 -如果找不到能够满足 Pod 所设置需求的 Node,就会触发悬决 Pod 的抢占逻辑。 -假定 P 是悬决的 Pod,抢占逻辑会尝试找到一个这样的节点,在该节点上移除一个或者多个 -优先级比 P 低的 Pod 后,P 就可以被调度到该节点。如果调度器能够找到这样的节点, -该节点上的一个或者多个优先级较低的 Pod 就会被逐出。当被逐出的 Pod 从该节点上 -消失时,P 就可以调度到此节点。 - - -### 暴露给用户的信息 {#user-exposed-information} - -当 Pod P 在节点 N 上抢占了一个或多个 Pod 时,Pod P 的状态中的`nominatedNodeName` 字段 -会被设置为节点 N 的名字。此字段有助于调度器跟踪为 P -所预留的资源,同时也给用户提供了其集群中发生的抢占的信息。 - - -请注意,Pod P 不一定会被调度到其 "nominated node(提名节点)"。 -当选定的 Pod 被抢占时,它们都会有其体面终止时限(Graceful Termination Period)。 -如果在调度器等待选定的(被牺牲的)Pod 终止期间有新的节点可用,调度器会使用其他 -节点来调度 Pod P。因此,Pod 中的 `nominatedNodeName` 和 `nodeName` 并不总是相同。 -此外,如果调度器抢占了节点 N 上的 Pod,但接下来出现优先级比 P 还高的 Pod 要被 -调度,则调度器会把节点 N 让给新的优先级更高的 Pod。如果发生了这种情况,调度器 -会清除 Pod P 的 `nominatedNodeName`。通过清除操作,调度器使得 Pod P 可以尝试 -抢占别的节点上的 Pod。 - - -### 抢占的局限性 {#limitations-of-preemption} - -#### 抢占牺牲者的体面终止期限 - -当 Pod 被抢占时,做出牺牲的 Pod 仍有各自的 -[体面终止期限](/zh/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination)。 -这些 Pod 可以在给定的期限内结束其工作并退出。如果它们不能及时退出则会被杀死。 -这一体面终止期限带来了一个时间空隙,跨度从调度器开始抢占 Pod 的那一刻到悬决 Pod -(P)可以被调度到节点(N)上的那一刻。 -与此同时,调度器还要继续调度其他悬决的 Pod。 -随着被抢占的 Pod 退出或终止,调度器尝试继续尝试调度悬决队列中的 Pod。 -因此,从调度器抢占被牺牲的 Pod 到 Pod P 被调度,中间通常存在一个时间间隔。 -为了缩短此时间间隔,用户可以将低优先级的 Pod 的体面终止期限设置为 0 -或者较小的数字。 - - -#### PodDisruptionBudget 是被支持的,但不提供保证 - -[PodDisruptionBudget](/zh/docs/concepts/workloads/pods/disruptions/) (PDB) -的存在使得应用的属主能够限制多副本应用因主动干扰而同时离线的 Pod 的个数。 -Kubernetes 在抢占 Pod 时是可以支持 PDB 的,但对 PDB 的约束也仅限于尽力而为。 -调度器会尝试寻找不会因为抢占而违反其 PDB 约束的 Pod 作为牺牲品,不过如果 -找不到这样的待逐出 Pod,抢占行为仍会发生,低优先级的 Pod 仍会被逐出而不管 -是否违反其 PDB 约束。 - - -#### 低优先级 Pod 间的亲和性 - -只有对下面的问题的回答是肯定的的时候,才会考虑在节点上执行抢占操作: -"如果所有优先级低于悬决 Pod 的 Pod 都从节点上逐出,悬决 Pod -可以调度到此节点么?" - - -{{< note >}} -抢占操作不一定要逐出所有优先级较低的 Pod。 -如果少逐出几个 Pod 而不是逐出所有较低优先级的 Pod 即可令悬决 Pod -被调度,则优先级较低的 Pod 中只有一部分会被逐出。 -即便如此,对上述问题的回答仍须是肯定的。如果回答是否定的,Kubernetes -不会考虑在该节点上执行抢占操作。 -{{< /note >}} - - -如果悬决 Pod 与节点上一个或多个较低优先级的 Pod 之间存在 Pod 间亲和性关系, -那些对应的低优先级 Pod 若被逐出则无法满足此亲和性规则。 -在这种场合下,调度器不会抢占节点上的任何 Pod。相反,它会尝试寻找其他节点。 -调度器可能能找到也可能找不到合适的节点。 -Kubernetes 并不保证悬决的 Pod 最终会被调度。 - -对此问题的一种解决方案是仅针对优先级相同或更高的 Pod 设置 Pod 间亲和性。 - - -#### 跨节点的抢占 {#cross-node-preemption} - -假定当前正在考虑在节点 N 上执行抢占操作以便 Pod P 能够被调度到 N 上执行。 -可是只有当另一个节点上的某个 Pod 被抢占,P 才有可能在 N 上调度执行。例如: - - -* Pod P 正在考虑被调度到节点 N。 -* Pod Q 正运行在节点 N 所处区域(Zone)的另一个节点上。 -* Pod P 设置了区域范畴的与 Pod Q 的反亲和性 - (`topologyKey: topology.kubernetes.io/zone`)。 -* Pod P 与区域中的其他 Pod 之间都不存在反亲和性关系。 -* 为了将 P 调度到节点 N 上,Pod Q 可以被抢占,但是调度器不会执行跨节点的 - 抢占操作。因此,Pod P 会被视为无法调度到节点 N 上执行。 - - -如果 Pod Q 真的被从其节点上移除,Pod 间反亲和性的规则就会得到满足,Pod P -就有可能被调度到节点 N 上执行。 - -我们可能在将来版本中考虑添加跨节点的抢占能力。前提是在这方面有足够多的需求, -并且我们找到了性能可接受的算法。 - - -## 故障排查 {#troubleshooting} - -Pod 优先级和抢占机制可能产生一些不想看到的副作用。 -下面是一些可能存在的问题以及相应的处理方法。 - - -### Pod 被不必要地抢占 - -抢占操作会在集群中资源压力较大,进而无法为高优先级的悬决 Pod 腾出空间时发生。 -如果你不小心给某些 Pod 赋予了较高优先级,这些意外获得高优先级的 Pod 可能导致 -集群中出现抢占行为。Pod 优先级是通过在其规约中的 `priorityClassName` 来设定的。 -优先级的整数值被解析出来后会添加到 Pod 规约的 `priority` 字段。 - - -要解决这一问题,你可以修改这些 Pod 的 `priorityClassName` 设置,使用优先级 -较低的优先级类,或者将该字段留空。空的 `priorityClassName` 默认解析为优先级 0。 - -Pod 被抢占时,被抢占的 Pod 会有对应的事件被记录下来。 -只有集群中无法为某 Pod 提供足够资源的时候才会发生抢占。 -在出现这种情况时,也只有悬决 Pod(抢占者)的优先级高于被牺牲的 Pod -的优先级时,才会发生抢占现象。 -当没有悬决 Pod,或者悬决 Pod 的优先级等于或者低于现有 Pod 时,都不应发生抢占行为。 -如果在这种条件下仍然发生了抢占,请登记一个 Issue。 - - -### Pod 被抢占但抢占者未被调度 - -当有 Pod 被抢占时,它们会得到各自的体面终止期限(默认为 30 秒)。 -如果被牺牲的 Pod 在此限期内未能终止,则 Pod 会被强制终止 -一旦所有被牺牲的 Pod 都已消失不见,抢占者 Pod 就可被调度。 - - -在抢占者 Pod 等待被牺牲的 Pod 消失期间,可能有更高优先级的 Pod 被创建,且适合 -调度到同一节点。如果是这种情况,调度器会调度优先级更高的 Pod 而不是抢占者。 - -这是期望发生的行为:优先级更高的 Pod 应该取代优先级较低的 Pod。 - - -### 高优先级的 Pod 比低优先级的 Pod 先被抢占 - -调度器尝试寻找可以运行悬决 Pod 的节点。如果找不到这样的节点,调度器会尝试从任一 -节点上逐出优先级较低的 Pod 以运行悬决 Pod。 -如果包含低优先级 Pod 的节点不适合用来运行悬决 Pod,调度器可能会选择其他的、 -运行着较高优先级(相对之前所评估的节点上的 Pod 而言)的 Pod 的节点来执行抢占操作。 -即使如此,被牺牲的 Pod 的优先级也必须比抢占者 Pod 的优先级低。 - - -当有多个节点可供抢占时,调度器会选择 Pod 集合的优先级最低的节点。不过如果这些 -Pod 上定义了 PodDisruptionBudget(PDB)而且如果被抢占了的话就会违反 PDB, -则调度器会选择另一个 Pod 集合优先级稍高的节点。 - -当存在多个节点可供抢占,但以上场景都不适用,则调度器会选择优先级最低的节点。 - - -## Pod 优先级与服务质量间关系 {#interactions-of-pod-priority-and-qos} - -Pod 优先级与 {{< glossary_tooltip text="QoS 类" term_id="qos-class" >}} 是两个 -相互独立的功能特性,其间交互之处很少,并且不存在基于 Pod QoS 类来为其设置 -优先级方面的默认限制。 -调度器的抢占逻辑在选择抢占目标时不会考虑 QoS 因素。 -抢占考虑的是 Pod 优先级,并选择优先级最低的 Pod 作为抢占目标。 -只有移除最低优先级的 Pod 尚不足以允许调度器调度抢占者 Pod 或者最低优先级的 Pod -受到 Pod 干扰预算(PDB)保护时,才会考虑抢占优先级稍高的 Pod。 - - -唯一同时考虑 QoS 和 Pod 优先级的组件是 `kubelet`,体现在其 -[资源不足时的逐出](/zh/docs/tasks/administer-cluster/out-of-resource/)操作。 -`kubelet` 首先根据 Pod 对濒危资源的使用是否超出其请求值来选择要被逐出的 Pod, -接下来对这些 Pod 按优先级排序,再按其相对 Pod 的调度请求所耗用的濒危资源的用量 -排序。更多细节可参阅 -[逐出最终用户的 Pod](/zh/docs/tasks/administer-cluster/out-of-resource/#evicting-end-user-pods)。 - - -`kubelet` 资源不足时的逐出操作不会逐出 Pod 资源用量未超出其请求值的 Pod。 -如果优先级较低的 Pod 未超出其请求值,它们不会被逐出。其他优先级较高的 -且用量超出请求值的 Pod 则可能被逐出。 - -## {{% heading "whatsnext" %}} - - -* 阅读结合 PriorityClass 来使用 ResourceQuota 的介绍: - [限制默认可使用的优先级类](/zh/docs/concepts/policy/resource-quotas/#limit-priority-class-consumption-by-default) - diff --git a/content/zh/docs/concepts/containers/runtime-class.md b/content/zh/docs/concepts/containers/runtime-class.md index c45df4e62b..d1ed733804 100644 --- a/content/zh/docs/concepts/containers/runtime-class.md +++ b/content/zh/docs/concepts/containers/runtime-class.md @@ -313,6 +313,4 @@ Pod 开销通过 RuntimeClass 的 `overhead` 字段定义。 - [RuntimeClass 设计](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/585-runtime-class/README.md) - [RuntimeClass 调度设计](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/585-runtime-class/README.md#runtimeclass-scheduling) - 阅读关于 [Pod 开销](/zh/docs/concepts/scheduling-eviction/pod-overhead/) 的概念 -- [PodOverhead 特性设计](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/20190226-pod-overhead.md) - - +- [PodOverhead 特性设计](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/688-pod-overhead) diff --git a/content/zh/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md b/content/zh/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md index f11620acf2..738ba23e06 100644 --- a/content/zh/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md +++ b/content/zh/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md @@ -52,7 +52,7 @@ The aggregation layer runs in-process with the kube-apiserver. Until an extensio -APIService 的最常见实现方式是在集群中某 Pod 内运行 *扩展 API 服务器*。 +APIService 的最常见实现方式是在集群中某 Pod 内运行 *扩展 API 服务器*。 如果你在使用扩展 API 服务器来管理集群中的资源,该扩展 API 服务器(也被写成“extension-apiserver”) 一般需要和一个或多个{{< glossary_tooltip text="控制器" term_id="controller" >}}一起使用。 apiserver-builder 库同时提供构造扩展 API 服务器和控制器框架代码。 @@ -71,20 +71,21 @@ If your extension API server cannot achieve that latency requirement, consider m 扩展 API 服务器与 kube-apiserver 之间需要存在低延迟的网络连接。 发现请求需要在五秒钟或更短的时间内完成到 kube-apiserver 的往返。 -如果你的扩展 API 服务器无法满足这一延迟要求,应考虑如何更改配置已满足需要。 +如果你的扩展 API 服务器无法满足这一延迟要求,应考虑如何更改配置以满足需要。 ## {{% heading "whatsnext" %}} * 阅读[配置聚合层](/zh/docs/tasks/extend-kubernetes/configure-aggregation-layer/) 文档, 了解如何在自己的环境中启用聚合器。 * 接下来,了解[安装扩展 API 服务器](/zh/docs/tasks/extend-kubernetes/setup-extension-api-server/), 开始使用聚合层。 -* 也可以学习怎样[使用自定义资源定义扩展 Kubernetes API](/zh/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/)。 -* 阅读 [APIService](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#apiservice-v1-apiregistration-k8s-io) 的规范 +* 从 API 参考资料中研究关于 [APIService](/docs/reference/kubernetes-api/cluster-resources/api-service-v1/) 的内容。 +或者,学习如何[使用自定义资源定义扩展 Kubernetes API](/zh/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/)。 diff --git a/content/zh/docs/concepts/extend-kubernetes/api-extension/custom-resources.md b/content/zh/docs/concepts/extend-kubernetes/api-extension/custom-resources.md index 5094d8910b..65bf222ff9 100644 --- a/content/zh/docs/concepts/extend-kubernetes/api-extension/custom-resources.md +++ b/content/zh/docs/concepts/extend-kubernetes/api-extension/custom-resources.md @@ -324,7 +324,7 @@ making them available to all of its clients. 通常,Kubernetes API 中的每个都需要处理 REST 请求和管理对象持久性存储的代码。 Kubernetes API 主服务器能够处理诸如 *pods* 和 *services* 这些内置资源,也可以 -按通用的方式通过 CRD {#customresourcedefinitions} 来处理定制资源。 +按通用的方式通过 [CRD](#customresourcedefinitions) 来处理定制资源。 [聚合层(Aggregation Layer)](/zh/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/) 使得你可以通过编写和部署你自己的独立的 API 服务器来为定制资源提供特殊的实现。 diff --git a/content/zh/docs/concepts/overview/what-is-kubernetes.md b/content/zh/docs/concepts/overview/what-is-kubernetes.md index 761d4736c1..97110f4f51 100644 --- a/content/zh/docs/concepts/overview/what-is-kubernetes.md +++ b/content/zh/docs/concepts/overview/what-is-kubernetes.md @@ -107,7 +107,7 @@ Containers are becoming popular because they have many benefits. Some of the con * Agile application creation and deployment: increased ease and efficiency of container image creation compared to VM image use. * Continuous development, integration, and deployment: provides for reliable and frequent container image build and deployment with quick and easy rollbacks (due to image immutability). * Dev and Ops separation of concerns: create application container images at build/release time rather than deployment time, thereby decoupling applications from infrastructure. -* Observability not only surfaces OS-level information and metrics, but also application health and other signals. +* Observability: not only surfaces OS-level information and metrics, but also application health and other signals. * Environmental consistency across development, testing, and production: Runs the same on a laptop as it does in the cloud. * Cloud and OS distribution portability: Runs on Ubuntu, RHEL, CoreOS, on-prem, Google Kubernetes Engine, and anywhere else. * Application-centric management: Raises the level of abstraction from running an OS on virtual hardware to running an application on an OS using logical resources. @@ -120,7 +120,7 @@ Containers are becoming popular because they have many benefits. Some of the con 容器镜像构建和部署。 * 关注开发与运维的分离:在构建/发布时而不是在部署时创建应用程序容器镜像, 从而将应用程序与基础架构分离。 -* 可观察性不仅可以显示操作系统级别的信息和指标,还可以显示应用程序的运行状况和其他指标信号。 +* 可观察性:不仅可以显示操作系统级别的信息和指标,还可以显示应用程序的运行状况和其他指标信号。 * 跨开发、测试和生产的环境一致性:在便携式计算机上与在云中相同地运行。 * 跨云和操作系统发行版本的可移植性:可在 Ubuntu、RHEL、CoreOS、本地、 Google Kubernetes Engine 和其他任何地方运行。 diff --git a/content/zh/docs/concepts/overview/working-with-objects/namespaces.md b/content/zh/docs/concepts/overview/working-with-objects/namespaces.md index 6966d7afb8..5d43643cdf 100644 --- a/content/zh/docs/concepts/overview/working-with-objects/namespaces.md +++ b/content/zh/docs/concepts/overview/working-with-objects/namespaces.md @@ -21,6 +21,7 @@ These virtual clusters are called namespaces. --> Kubernetes 支持多个虚拟集群,它们底层依赖于同一个物理集群。 这些虚拟集群被称为名字空间。 +在一些文档里名字空间也称为命名空间。 diff --git a/content/zh/docs/concepts/policy/node-resource-managers.md b/content/zh/docs/concepts/policy/node-resource-managers.md index 0651a66f73..73f28da383 100644 --- a/content/zh/docs/concepts/policy/node-resource-managers.md +++ b/content/zh/docs/concepts/policy/node-resource-managers.md @@ -38,7 +38,7 @@ The configuration of individual managers is elaborated in dedicated documents: - [CPU 管理器策略](/zh/docs/tasks/administer-cluster/cpu-management-policies/) - [设备管理器](/zh/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#device-plugin-integration-with-the-topology-manager) diff --git a/content/zh/docs/concepts/policy/pod-security-policy.md b/content/zh/docs/concepts/policy/pod-security-policy.md index fe4bcc8021..46db85215e 100644 --- a/content/zh/docs/concepts/policy/pod-security-policy.md +++ b/content/zh/docs/concepts/policy/pod-security-policy.md @@ -17,7 +17,7 @@ weight: 30 -PodSecurityPolicy 在 Kubernetes v1.21 版本中被启用,将在 v1.25 中删除。 +PodSecurityPolicy 在 Kubernetes v1.21 版本中被弃用,将在 v1.25 中删除。 #### 名字空间选择算符 -{{< feature-state for_k8s_version="v1.21" state="alpha" >}} +{{< feature-state for_k8s_version="v1.22" state="beta" >}} -此功能特性是 Alpha 版本的,默认是被禁用的。你可以通过针对 kube-apiserver 和 +此功能特性是 Beta 版本的,默认是被启用的。你可以通过针对 kube-apiserver 和 kube-scheduler 设置 [特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/) -`PodAffinityNamespaceSelector` 来启用此特性。 +`PodAffinityNamespaceSelector` 来禁用此特性。 - - - -本页提供 Kubernetes 驱逐策略的概览。 - - - - -## 驱逐策略 {#eviction-policy} - -{{< glossary_tooltip text="Kubelet" term_id="kubelet" >}} 主动监测和防止 -计算资源的全面短缺。在资源短缺时,`kubelet` 可以主动地结束一个或多个 Pod -以回收短缺的资源。 -当 `kubelet` 结束一个 Pod 时,它将终止 Pod 中的所有容器,而 Pod 的 `Phase` -将变为 `Failed`。 -如果被驱逐的 Pod 由 Deployment 管理,这个 Deployment 会创建另一个 Pod 给 -Kubernetes 来调度。 - -## {{% heading "whatsnext" %}} - - -- 阅读[配置资源不足的处理](/zh/docs/tasks/administer-cluster/out-of-resource/), - 进一步了解驱逐信号和阈值。 - diff --git a/content/zh/docs/concepts/scheduling-eviction/kube-scheduler.md b/content/zh/docs/concepts/scheduling-eviction/kube-scheduler.md index 65306db584..c950fb1e20 100644 --- a/content/zh/docs/concepts/scheduling-eviction/kube-scheduler.md +++ b/content/zh/docs/concepts/scheduling-eviction/kube-scheduler.md @@ -95,7 +95,7 @@ the API server about this decision in a process called _binding_. kube-apiserver,这个过程叫做 _绑定_。 + +{{}}
    + + +{{}} +监控集群节点的 CPU、内存、磁盘空间和文件系统的 inode 等资源。 +当这些资源中的一个或者多个达到特定的消耗水平, +kubelet 可以主动地使节点上一个或者多个 Pod 失效,以回收资源防止饥饿。 + +在节点压力驱逐期间,kubelet 将所选 Pod 的 `PodPhase` 设置为 `Failed`。这将终止 Pod。 + +节点压力驱逐不同于 [API 发起的驱逐](/zh/docs/concepts/scheduling-eviction/api-eviction/)。 + + +kubelet 并不理会你配置的 `PodDisruptionBudget` 或者是 Pod 的 `terminationGracePeriodSeconds`。 +如果你使用了[软驱逐条件](#soft-eviction-thresholds),kubelet 会考虑你所配置的 +`eviction-max-pod-grace-period`。 +如果你使用了[硬驱逐条件](#hard-eviction-thresholds),它使用 `0s` 宽限期来终止 Pod。 + +如果 Pod 是由替换失败 Pod 的{{< glossary_tooltip text="工作负载" term_id="workload" >}}资源 +(例如 {{< glossary_tooltip text="StatefulSet" term_id="statefulset" >}} +或者 {{< glossary_tooltip text="Deployment" term_id="deployment" >}})管理, +则控制平面或 `kube-controller-manager` 会创建新的 Pod 来代替被驱逐的 Pod。 + +{{}} + +kubelet 在终止最终用户 Pod 之前会尝试[回收节点级资源](#reclaim-node-resources)。 +例如,它会在磁盘资源不足时删除未使用的容器镜像。 +{{}} + + +kubelet 使用各种参数来做出驱逐决定,如下所示: + + * 驱逐信号 + * 驱逐条件 + * 监控间隔 + + +### 驱逐信号 {#eviction-signals} + +驱逐信号是特定资源在特定时间点的当前状态。 +kubelet 使用驱逐信号,通过将信号与驱逐条件进行比较来做出驱逐决定, +驱逐条件是节点上应该可用资源的最小量。 + +kubelet 使用以下驱逐信号: + +| 驱逐信号 | 描述 | +|----------------------|---------------------------------------------------------------------------------------| +| `memory.available` | `memory.available` := `node.status.capacity[memory]` - `node.stats.memory.workingSet` | +| `nodefs.available` | `nodefs.available` := `node.stats.fs.available` | +| `nodefs.inodesFree` | `nodefs.inodesFree` := `node.stats.fs.inodesFree` | +| `imagefs.available` | `imagefs.available` := `node.stats.runtime.imagefs.available` | +| `imagefs.inodesFree` | `imagefs.inodesFree` := `node.stats.runtime.imagefs.inodesFree` | +| `pid.available` | `pid.available` := `node.stats.rlimit.maxpid` - `node.stats.rlimit.curproc` | + + +在上表中,`描述`列显示了 kubelet 如何获取信号的值。每个信号支持百分比值或者是字面值。 +kubelet 计算相对于与信号有关的总量的百分比值。 + + +`memory.available` 的值来自 cgroupfs,而不是像 `free -m` 这样的工具。 +这很重要,因为 `free -m` 在容器中不起作用,如果用户使用 +[节点可分配资源](/zh/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable) +这一功能特性,资源不足的判定是基于 CGroup 层次结构中的用户 Pod 所处的局部及 CGroup 根节点作出的。 +这个[脚本](/zh/examples/admin/resource/memory-available.sh) +重现了 kubelet 为计算 `memory.available` 而执行的相同步骤。 +kubelet 在其计算中排除了 inactive_file(即非活动 LRU 列表上基于文件来虚拟的内存的字节数), +因为它假定在压力下内存是可回收的。 + + +kubelet 支持以下文件系统分区: + +1. `nodefs`:节点的主要文件系统,用于本地磁盘卷、emptyDir、日志存储等。 + 例如,`nodefs` 包含 `/var/lib/kubelet/`。 +1. `imagefs`:可选文件系统,供容器运行时存储容器镜像和容器可写层。 + +kubelet 会自动发现这些文件系统并忽略其他文件系统。kubelet 不支持其他配置。 + +{{}} + +一些 kubelet 垃圾收集功能已被弃用,以支持驱逐。 +有关已弃用功能的列表,请参阅 +[kubelet 垃圾收集弃用](/zh/docs/concepts/cluster-administration/kubelet-garbage-collection/#deprecation)。 +{{}} + + +### 驱逐条件 {#eviction-thresholds} + +你可以为 kubelet 指定自定义驱逐条件,以便在作出驱逐决定时使用。 + +驱逐条件的形式为 `[eviction-signal][operator][quantity]`,其中: + +* `eviction-signal` 是要使用的[驱逐信号](#eviction-signals)。 +* `operator` 是你想要的[关系运算符](https://en.wikipedia.org/wiki/Relational_operator#Standard_relational_operators), + 比如 `<`(小于)。 +* `quantity` 是驱逐条件数量,例如 `1Gi`。 + `quantity` 的值必须与 Kubernetes 使用的数量表示相匹配。 + 你可以使用文字值或百分比(`%`)。 + + +例如,如果一个节点的总内存为 10Gi 并且你希望在可用内存低于 1Gi 时触发驱逐, +则可以将驱逐条件定义为 `memory.available<10%` 或 `memory.available< 1G`。 +你不能同时使用二者。 + +你可以配置软和硬驱逐条件。 + + +#### 软驱逐条件 {#soft-eviction-thresholds} + +软驱逐条件将驱逐条件与管理员所必须指定的宽限期配对。 +在超过宽限期之前,kubelet 不会驱逐 Pod。 +如果没有指定的宽限期,kubelet 会在启动时返回错误。 + + +你可以既指定软驱逐条件宽限期,又指定 Pod 终止宽限期的上限,,给 kubelet 在驱逐期间使用。 +如果你指定了宽限期的上限并且 Pod 满足软驱逐阈条件,则 kubelet 将使用两个宽限期中的较小者。 +如果你没有指定宽限期上限,kubelet 会立即杀死被驱逐的 Pod,不允许其体面终止。 + + +你可以使用以下标志来配置软驱逐条件: + +* `eviction-soft`:一组驱逐条件,如 `memory.available<1.5Gi`, + 如果驱逐条件持续时长超过指定的宽限期,可以触发 Pod 驱逐。 +* `eviction-soft-grace-period`:一组驱逐宽限期, + 如 `memory.available=1m30s`,定义软驱逐条件在触发 Pod 驱逐之前必须保持多长时间。 +* `eviction-max-pod-grace-period`:在满足软驱逐条件而终止 Pod 时使用的最大允许宽限期(以秒为单位)。 + + +#### 硬驱逐条件 {#hard-eviction-thresholds} + +硬驱逐条件没有宽限期。当达到硬驱逐条件时, +kubelet 会立即杀死 pod,而不会正常终止以回收紧缺的资源。 + +你可以使用 `eviction-hard` 标志来配置一组硬驱逐条件, +例如 `memory.available<1Gi`。 + + +kubelet 具有以下默认硬驱逐条件: + +* `memory.available<100Mi` +* `nodefs.available<10%` +* `imagefs.available<15%` +* `nodefs.inodesFree<5%`(Linux 节点) + + +### 驱逐监测间隔 + +kubelet 根据其配置的 `housekeeping-interval`(默认为 `10s`)评估驱逐条件。 + + +### 节点条件 {#node-conditions} + +kubelet 报告节点状况以反映节点处于压力之下,因为满足硬或软驱逐条件,与配置的宽限期无关。 + + +kubelet 根据下表将驱逐信号映射为节点状况: + +| 节点条件 | 驱逐信号 | 描述 | +|---------|--------|------| +| `MemoryPressure` | `memory.available` | 节点上的可用内存已满足驱逐条件 | +| `DiskPressure` | `nodefs.available`、`nodefs.inodesFree`、`imagefs.available` 或 `imagefs.inodesFree` | 节点的根文件系统或映像文件系统上的可用磁盘空间和 inode 已满足驱逐条件 | +| `PIDPressure` | `pid.available` | (Linux) 节点上的可用进程标识符已低于驱逐条件 | + +kubelet 根据配置的 `--node-status-update-frequency` 更新节点条件,默认为 `10s`。 + + +#### 节点条件振荡 + +在某些情况下,节点在软驱逐条件上下振荡,而没有保持定义的宽限期。 +这会导致报告的节点条件在 `true` 和 `false` 之间不断切换,从而导致错误的驱逐决策。 + +为了防止振荡,你可以使用 `eviction-pressure-transition-period` 标志, +该标志控制 kubelet 在将节点条件转换为不同状态之前必须等待的时间。 +过渡期的默认值为 `5m`。 + + +### 回收节点级资源 {#reclaim-node-resources} + +kubelet 在驱逐最终用户 Pod 之前会先尝试回收节点级资源。 + +当报告 `DiskPressure` 节点状况时,kubelet 会根据节点上的文件系统回收节点级资源。 + + +#### 有 `imagefs` + +如果节点有一个专用的 `imagefs` 文件系统供容器运行时使用,kubelet 会执行以下操作: + + * 如果 `nodefs` 文件系统满足驱逐条件,kubelet 垃圾收集死亡 Pod 和容器。 + * 如果 `imagefs` 文件系统满足驱逐条件,kubelet 将删除所有未使用的镜像。 + + +#### 没有 `imagefs` + +如果节点只有一个满足驱逐条件的 `nodefs` 文件系统, +kubelet 按以下顺序释放磁盘空间: + +1. 对死亡的 Pod 和容器进行垃圾收集 +1. 删除未使用的镜像 + + +### kubelet 驱逐时 Pod 的选择 + +如果 kubelet 回收节点级资源的尝试没有使驱逐信号低于条件, +则 kubelet 开始驱逐最终用户 Pod。 + +kubelet 使用以下参数来确定 Pod 驱逐顺序: + +1. Pod 的资源使用是否超过其请求 +1. [Pod 优先级](/zh/docs/concepts/scheduling-eviction/pod-priority-preemption/) +1. Pod 相对于请求的资源使用情况 + + +因此,kubelet 按以下顺序排列和驱逐 Pod: + +1. 首先考虑资源使用量超过其请求的 `BestEffort` 或 `Burstable` Pod。 + 这些 Pod 会根据它们的优先级以及它们的资源使用级别超过其请求的程度被逐出。 +1. 资源使用量少于请求量的 `Guaranteed` Pod 和 `Burstable` Pod 根据其优先级被最后驱逐。 + +{{}} + +kubelet 不使用 Pod 的 QoS 类来确定驱逐顺序。 +在回收内存等资源时,你可以使用 QoS 类来估计最可能的 Pod 驱逐顺序。 +QoS 不适用于临时存储(EphemeralStorage)请求, +因此如果节点在 `DiskPressure` 下,则上述场景将不适用。 +{{}} + + +仅当 `Guaranteed` Pod 中所有容器都被指定了请求和限制并且二者相等时,才保证 Pod 不被驱逐。 +这些 Pod 永远不会因为另一个 Pod 的资源消耗而被驱逐。 +如果系统守护进程(例如 `kubelet`、`docker` 和 `journald`) +消耗的资源比通过 `system-reserved` 或 `kube-reserved` 分配保留的资源多, +并且该节点只有 `Guaranteed` 或 `Burstable` Pod 使用的资源少于其上剩余的请求, +那么 kubelet 必须选择驱逐这些 Pod 中的一个以保持节点稳定性并减少资源匮乏对其他 Pod 的影响。 +在这种情况下,它会选择首先驱逐最低优先级的 Pod。 + + +当 kubelet 因 inode 或 PID 不足而驱逐 pod 时, +它使用优先级来确定驱逐顺序,因为 inode 和 PID 没有请求。 + +kubelet 根据节点是否具有专用的 `imagefs` 文件系统对 Pod 进行不同的排序: + + +#### 有 `imagefs` + +如果 `nodefs` 触发驱逐, +kubelet 会根据 `nodefs` 使用情况(`本地卷 + 所有容器的日志`)对 Pod 进行排序。 + +如果 `imagefs` 触发驱逐,kubelet 会根据所有容器的可写层使用情况对 Pod 进行排序。 + +#### 没有 `imagefs` + +如果 `nodefs` 触发驱逐, +kubelet 会根据磁盘总用量(`本地卷 + 日志和所有容器的可写层`)对 Pod 进行排序。 + + +### 最小驱逐回收 {#minimum-eviction-reclaim} + +在某些情况下,驱逐 Pod 只会回收少量的紧俏资源。 +这可能导致 kubelet 反复达到配置的驱逐条件并触发多次驱逐。 + + +你可以使用 `--eviction-minimum-reclaim` 标志或 +[kubelet 配置文件](/zh/docs/tasks/administer-cluster/kubelet-config-file/) +为每个资源配置最小回收量。 +当 kubelet 注意到某个资源耗尽时,它会继续回收该资源,直到回收到你所指定的数量为止。 + +例如,以下配置设置最小回收量: + +```yaml +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +evictionHard: + memory.available: "500Mi" + nodefs.available: "1Gi" + imagefs.available: "100Gi" +evictionMinimumReclaim: + memory.available: "0Mi" + nodefs.available: "500Mi" + imagefs.available: "2Gi" +``` + + +在这个例子中,如果 `nodefs.available` 信号满足驱逐条件, +kubelet 会回收资源,直到信号达到 `1Gi` 的条件, +然后继续回收至少 `500Mi` 直到信号达到 `1.5Gi`。 + +类似地,kubelet 会回收 `imagefs` 资源,直到 `imagefs.available` 信号达到 `102Gi`。 + +对于所有资源,默认的 `eviction-minimum-reclaim` 为 `0`。 + + +### 节点内存不足行为 + +如果节点在 kubelet 能够回收内存之前遇到内存不足(OOM)事件, +则节点依赖 [oom_killer](https://lwn.net/Articles/391222/) 来响应。 + +kubelet 根据 Pod 的服务质量(QoS)为每个容器设置一个 `oom_score_adj` 值。 + +| 服务质量 | oom_score_adj | +|--------------------|-----------------------------------------------------------------------------------| +| `Guaranteed` | -997 | +| `BestEffort` | 1000 | +| `Burstable` | min(max(2, 1000 - (1000 * memoryRequestBytes) / machineMemoryCapacityBytes), 999) | + +{{}} + +kubelet 还将具有 `system-node-critical` +{{}} +的 Pod 中的容器 `oom_score_adj` 值设为 `-997`。 +{{}} + + +如果 kubelet 在节点遇到 OOM 之前无法回收内存, +则 `oom_killer` 根据它在节点上使用的内存百分比计算 `oom_score`, +然后加上 `oom_score_adj` 得到每个容器有效的 `oom_score`。 +然后它会杀死得分最高的容器。 + +这意味着低 QoS Pod 中相对于其调度请求消耗内存较多的容器,将首先被杀死。 + +与 Pod 驱逐不同,如果容器被 OOM 杀死, +`kubelet` 可以根据其 `RestartPolicy` 重新启动它。 + + +### 最佳实践 {#node-pressure-eviction-good-practices} + +以下部分描述了驱逐配置的最佳实践。 + + +#### 可调度的资源和驱逐策略 + +当你为 kubelet 配置驱逐策略时, +你应该确保调度程序不会在 Pod 触发驱逐时对其进行调度,因为这类 Pod 会立即引起内存压力。 + + +考虑以下场景: + +* 节点内存容量:`10Gi` +* 操作员希望为系统守护进程(内核、`kubelet` 等)保留 10% 的内存容量 +* 操作员希望驱逐内存利用率为 95% 的Pod,以减少系统 OOM 的概率。 + + +为此,kubelet 启动设置如下: + +``` +--eviction-hard=memory.available<500Mi +--system-reserved=memory=1.5Gi +``` + + +在此配置中,`--system-reserved` 标志为系统预留了 `1.5Gi` 的内存, +即 `总内存的 10% + 驱逐条件量`。 + +如果 Pod 使用的内存超过其请求值或者系统使用的内存超过 `1Gi`, +则节点可以达到驱逐条件,这使得 `memory.available` 信号低于 `500Mi` 并触发条件。 + + +### DaemonSet + +Pod 优先级是做出驱逐决定的主要因素。 +如果你不希望 kubelet 驱逐属于 `DaemonSet` 的 Pod, +请在 Pod 规约中为这些 Pod 提供足够高的 `priorityClass`。 +你还可以使用优先级较低的 `priorityClass` 或默认配置, +仅在有足够资源时才运行 `DaemonSet` Pod。 + + +### 已知问题 + +以下部分描述了与资源不足处理相关的已知问题。 + + +#### kubelet 可能不会立即观察到内存压力 + +默认情况下,kubelet 轮询 `cAdvisor` 以定期收集内存使用情况统计信息。 +如果该轮询时间窗口内内存使用量迅速增加,kubelet 可能无法足够快地观察到 `MemoryPressure`, +但是 `OOMKiller` 仍将被调用。 + + +你可以使用 `--kernel-memcg-notification` +标志在 kubelet 上启用 `memcg` 通知 API,以便在超过条件时立即收到通知。 + +如果你不是追求极端利用率,而是要采取合理的过量使用措施, +则解决此问题的可行方法是使用 `--kube-reserved` 和 `--system-reserved` 标志为系统分配内存。 + + +#### active_file 内存未被视为可用内存 + +在 Linux 上,内核跟踪活动 LRU 列表上的基于文件所虚拟的内存字节数作为 `active_file` 统计信息。 +kubelet 将 `active_file` 内存区域视为不可回收。 +对于大量使用块设备形式的本地存储(包括临时本地存储)的工作负载, +文件和块数据的内核级缓存意味着许多最近访问的缓存页面可能被计为 `active_file`。 +如果这些内核块缓冲区中在活动 LRU 列表上有足够多, +kubelet 很容易将其视为资源用量过量并为节点设置内存压力污点,从而触发 Pod 驱逐。 + + +更多细节请参见 [https://github.com/kubernetes/kubernetes/issues/43916](https://github.com/kubernetes/kubernetes/issues/43916) + +你可以通过为可能执行 I/O 密集型活动的容器设置相同的内存限制和内存请求来应对该行为。 +你将需要估计或测量该容器的最佳内存限制值。 + +## {{% heading "whatsnext" %}} + + +* 了解 [API 发起的驱逐](/zh/docs/concepts/scheduling-eviction/api-eviction/) +* 了解 [Pod 优先级和驱逐](/zh/docs/concepts/scheduling-eviction/pod-priority-preemption/) +* 了解 [PodDisruptionBudgets](/docs/tasks/run-application/configure-pdb/) +* 了解[服务质量](/zh/docs/tasks/configure-pod-container/quality-service-pod/)(QoS) +* 查看[驱逐 API](/docs/reference/generated/kubernetes-api/{{}}/#create-eviction-pod-v1-core) \ No newline at end of file diff --git a/content/zh/docs/concepts/scheduling-eviction/pod-priority-preemption.md b/content/zh/docs/concepts/scheduling-eviction/pod-priority-preemption.md new file mode 100644 index 0000000000..7c9fe009c3 --- /dev/null +++ b/content/zh/docs/concepts/scheduling-eviction/pod-priority-preemption.md @@ -0,0 +1,667 @@ +--- +title: Pod 优先级和抢占 +content_type: concept +weight: 50 +--- + + + + + +{{< feature-state for_k8s_version="v1.14" state="stable" >}} + + +[Pod](/zh/docs/concepts/workloads/pods/) 可以有 _优先级_。 +优先级表示一个 Pod 相对于其他 Pod 的重要性。 +如果一个 Pod 无法被调度,调度程序会尝试抢占(驱逐)较低优先级的 Pod, +以使悬决 Pod 可以被调度。 + + + +{{< warning >}} + +在一个并非所有用户都是可信的集群中,恶意用户可能以最高优先级创建 Pod, +导致其他 Pod 被驱逐或者无法被调度。 +管理员可以使用 ResourceQuota 来阻止用户创建高优先级的 Pod。 +参见[默认限制优先级消费](/zh/docs/concepts/policy/resource-quotas/#limit-priority-class-consumption-by-default)。 + +{{< /warning >}} + + +## 如何使用优先级和抢占 + +要使用优先级和抢占: + +1. 新增一个或多个 [PriorityClass](#priorityclass)。 + +1. 创建 Pod,并将其 [`priorityClassName`](#pod-priority) 设置为新增的 PriorityClass。 + 当然你不需要直接创建 Pod;通常,你将会添加 `priorityClassName` 到集合对象(如 Deployment) + 的 Pod 模板中。 + +继续阅读以获取有关这些步骤的更多信息。 + +{{< note >}} + +Kubernetes 已经提供了 2 个 PriorityClass: +`system-cluster-critical` 和 `system-node-critical`。 +这些是常见的类,用于[确保始终优先调度关键组件](/zh/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/)。 +{{< /note >}} + + +## PriorityClass {#priorityclass} + +PriorityClass 是一个无名称空间对象,它定义了从优先级类名称到优先级整数值的映射。 +名称在 PriorityClass 对象元数据的 `name` 字段中指定。 +值在必填的 `value` 字段中指定。值越大,优先级越高。 +PriorityClass 对象的名称必须是有效的 +[DNS 子域名](/zh/docs/concepts/overview/working-with-objects/names#dns-subdomain-names), +并且它不能以 `system-` 为前缀。 + + +PriorityClass 对象可以设置任何小于或等于 10 亿的 32 位整数值。 +较大的数字是为通常不应被抢占或驱逐的关键的系统 Pod 所保留的。 +集群管理员应该为这类映射分别创建独立的 PriorityClass 对象。 + +PriorityClass 还有两个可选字段:`globalDefault` 和 `description`。 +`globalDefault` 字段表示这个 PriorityClass 的值应该用于没有 `priorityClassName` 的 Pod。 +系统中只能存在一个 `globalDefault` 设置为 true 的 PriorityClass。 +如果不存在设置了 `globalDefault` 的 PriorityClass, +则没有 `priorityClassName` 的 Pod 的优先级为零。 + +`description` 字段是一个任意字符串。 +它用来告诉集群用户何时应该使用此 PriorityClass。 + + +### 关于 PodPriority 和现有集群的注意事项 + +- 如果你升级一个已经存在的但尚未使用此特性的集群,该集群中已经存在的 Pod 的优先级等效于零。 + +- 添加一个将 `globalDefault` 设置为 `true` 的 PriorityClass 不会改变现有 Pod 的优先级。 + 此类 PriorityClass 的值仅用于添加 PriorityClass 后创建的 Pod。 + +- 如果你删除了某个 PriorityClass 对象,则使用被删除的 PriorityClass 名称的现有 Pod 保持不变, + 但是你不能再创建使用已删除的 PriorityClass 名称的 Pod。 + + +### PriorityClass 示例 + +```yaml +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: high-priority +value: 1000000 +globalDefault: false +description: "此优先级类应仅用于 XYZ 服务 Pod。" +``` + + +## 非抢占式 PriorityClass {#non-preempting-priority-class} + +{{< feature-state for_k8s_version="v1.19" state="beta" >}} + +配置了 `PreemptionPolicy: Never` 的 Pod 将被放置在调度队列中较低优先级 Pod 之前, +但它们不能抢占其他 Pod。等待调度的非抢占式 Pod 将留在调度队列中,直到有足够的可用资源, +它才可以被调度。非抢占式 Pod,像其他 Pod 一样,受调度程序回退的影响。 +这意味着如果调度程序尝试这些 Pod 并且无法调度它们,它们将以更低的频率被重试, +从而允许其他优先级较低的 Pod 排在它们之前。 + +非抢占式 Pod 仍可能被其他高优先级 Pod 抢占。 + + +`PreemptionPolicy` 默认为 `PreemptLowerPriority`, +这将允许该 PriorityClass 的 Pod 抢占较低优先级的 Pod(现有默认行为也是如此)。 +如果 `PreemptionPolicy` 设置为 `Never`,则该 PriorityClass 中的 Pod 将是非抢占式的。 + +数据科学工作负载是一个示例用例。用户可以提交他们希望优先于其他工作负载的作业, +但不希望因为抢占运行中的 Pod 而导致现有工作被丢弃。 +设置为 `PreemptionPolicy: Never` 的高优先级作业将在其他排队的 Pod 之前被调度, +只要足够的集群资源“自然地”变得可用。 + + +### 非抢占式 PriorityClass 示例 + +```yaml +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: high-priority-nonpreempting +value: 1000000 +preemptionPolicy: Never +globalDefault: false +description: "This priority class will not cause other pods to be preempted." +``` + + +## Pod 优先级 {#pod-priority} + +在你拥有一个或多个 PriorityClass 对象之后, +你可以创建在其规约中指定这些 PriorityClass 名称之一的 Pod。 +优先级准入控制器使用 `priorityClassName` 字段并填充优先级的整数值。 +如果未找到所指定的优先级类,则拒绝 Pod。 + +以下 YAML 是 Pod 配置的示例,它使用在前面的示例中创建的 PriorityClass。 +优先级准入控制器检查 Pod 规约并将其优先级解析为 1000000。 + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx + labels: + env: test +spec: + containers: + - name: nginx + image: nginx + imagePullPolicy: IfNotPresent + priorityClassName: high-priority +``` + + +### Pod 优先级对调度顺序的影响 + +当启用 Pod 优先级时,调度程序会按优先级对悬决 Pod 进行排序, +并且每个悬决的 Pod 会被放置在调度队列中其他优先级较低的悬决 Pod 之前。 +因此,如果满足调度要求,较高优先级的 Pod 可能会比具有较低优先级的 Pod 更早调度。 +如果无法调度此类 Pod,调度程序将继续并尝试调度其他较低优先级的 Pod。 + + +## 抢占 {#preemption} + +Pod 被创建后会进入队列等待调度。 +调度器从队列中挑选一个 Pod 并尝试将它调度到某个节点上。 +如果没有找到满足 Pod 的所指定的所有要求的节点,则触发对悬决 Pod 的抢占逻辑。 +让我们将悬决 Pod 称为 P。抢占逻辑试图找到一个节点, +在该节点中删除一个或多个优先级低于 P 的 Pod,则可以将 P 调度到该节点上。 +如果找到这样的节点,一个或多个优先级较低的 Pod 会被从节点中驱逐。 +被驱逐的 Pod 消失后,P 可以被调度到该节点上。 + + +### 用户暴露的信息 + +当 Pod P 抢占节点 N 上的一个或多个 Pod 时, +Pod P 状态的 `nominatedNodeName` 字段被设置为节点 N 的名称。 +该字段帮助调度程序跟踪为 Pod P 保留的资源,并为用户提供有关其集群中抢占的信息。 + +请注意,Pod P 不一定会调度到“被提名的节点(Nominated Node)”。 +在 Pod 因抢占而牺牲时,它们将获得体面终止期。 +如果调度程序正在等待牺牲者 Pod 终止时另一个节点变得可用, +则调度程序将使用另一个节点来调度 Pod P。 +因此,Pod 规约中的 `nominatedNodeName` 和 `nodeName` 并不总是相同。 +此外,如果调度程序抢占节点 N 上的 Pod,但随后比 Pod P 更高优先级的 Pod 到达, +则调度程序可能会将节点 N 分配给新的更高优先级的 Pod。 +在这种情况下,调度程序会清除 Pod P 的 `nominatedNodeName`。 +通过这样做,调度程序使 Pod P 有资格抢占另一个节点上的 Pod。 + + +### 抢占的限制 + +#### 被抢占牺牲者的体面终止 + +当 Pod 被抢占时,牺牲者会得到他们的 +[体面终止期](/zh/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination)。 +它们可以在体面终止期内完成工作并退出。如果它们不这样做就会被杀死。 +这个体面终止期在调度程序抢占 Pod 的时间点和待处理的 Pod (P) +可以在节点 (N) 上调度的时间点之间划分出了一个时间跨度。 +同时,调度器会继续调度其他待处理的 Pod。当牺牲者退出或被终止时, +调度程序会尝试在待处理队列中调度 Pod。 +因此,调度器抢占牺牲者的时间点与 Pod P 被调度的时间点之间通常存在时间间隔。 +为了最小化这个差距,可以将低优先级 Pod 的体面终止时间设置为零或一个小数字。 + + +#### 支持 PodDisruptionBudget,但不保证 + +[PodDisruptionBudget](/zh/docs/concepts/workloads/pods/disruptions/) +(PDB) 允许多副本应用程序的所有者限制因自愿性质的干扰而同时终止的 Pod 数量。 +Kubernetes 在抢占 Pod 时支持 PDB,但对 PDB 的支持是基于尽力而为原则的。 +调度器会尝试寻找不会因被抢占而违反 PDB 的牺牲者,但如果没有找到这样的牺牲者, +抢占仍然会发生,并且即使违反了 PDB 约束也会删除优先级较低的 Pod。 + + +#### 与低优先级 Pod 之间的 Pod 间亲和性 + +只有当这个问题的答案是肯定的时,才考虑在一个节点上执行抢占操作: +“如果从此节点上删除优先级低于悬决 Pod 的所有 Pod,悬决 Pod 是否可以在该节点上调度?” + +{{< note >}} +抢占并不一定会删除所有较低优先级的 Pod。 +如果悬决 Pod 可以通过删除少于所有较低优先级的 Pod 来调度, +那么只有一部分较低优先级的 Pod 会被删除。 +即便如此,上述问题的答案必须是肯定的。 +如果答案是否定的,则不考虑在该节点上执行抢占。 +{{< /note >}} + + +如果悬决 Pod 与节点上的一个或多个较低优先级 Pod 具有 Pod 间{{< glossary_tooltip text="亲和性" term_id="affinity" >}}, +则在没有这些较低优先级 Pod 的情况下,无法满足 Pod 间亲和性规则。 +在这种情况下,调度程序不会抢占节点上的任何 Pod。 +相反,它寻找另一个节点。调度程序可能会找到合适的节点, +也可能不会。无法保证悬决 Pod 可以被调度。 + +我们针对此问题推荐的解决方案是仅针对同等或更高优先级的 Pod 设置 Pod 间亲和性。 + + +#### 跨节点抢占 + +假设正在考虑在一个节点 N 上执行抢占,以便可以在 N 上调度待处理的 Pod P。 +只有当另一个节点上的 Pod 被抢占时,P 才可能在 N 上变得可行。 +下面是一个例子: + +* 正在考虑将 Pod P 调度到节点 N 上。 +* Pod Q 正在与节点 N 位于同一区域的另一个节点上运行。 +* Pod P 与 Pod Q 具有 Zone 维度的反亲和(`topologyKey:topology.kubernetes.io/zone`)。 +* Pod P 与 Zone 中的其他 Pod 之间没有其他反亲和性设置。 +* 为了在节点 N 上调度 Pod P,可以抢占 Pod Q,但调度器不会进行跨节点抢占。 + 因此,Pod P 将被视为在节点 N 上不可调度。 + +如果将 Pod Q 从所在节点中移除,则不会违反 Pod 间反亲和性约束, +并且 Pod P 可能会被调度到节点 N 上。 + +如果有足够的需求,并且如果我们找到性能合理的算法, +我们可能会考虑在未来版本中添加跨节点抢占。 + + +## 故障排除 + +Pod 优先级和抢占可能会产生不必要的副作用。以下是一些潜在问题的示例以及处理这些问题的方法。 + + +### Pod 被不必要地抢占 + +抢占在资源压​​力较大时从集群中删除现有 Pod,为更高优先级的悬决 Pod 腾出空间。 +如果你错误地为某些 Pod 设置了高优先级,这些无意的高优先级 Pod 可能会导致集群中出现抢占行为。 +Pod 优先级是通过设置 Pod 规约中的 `priorityClassName` 字段来指定的。 +优先级的整数值然后被解析并填充到 `podSpec` 的 `priority` 字段。 + +为了解决这个问题,你可以将这些 Pod 的 `priorityClassName` 更改为使用较低优先级的类, +或者将该字段留空。默认情况下,空的 `priorityClassName` 解析为零。 + +当 Pod 被抢占时,集群会为被抢占的 Pod 记录事件。只有当集群没有足够的资源用于 Pod 时, +才会发生抢占。在这种情况下,只有当悬决 Pod(抢占者)的优先级高于受害 Pod 时才会发生抢占。 +当没有悬决 Pod,或者悬决 Pod 的优先级等于或低于牺牲者时,不得发生抢占。 +如果在这种情况下发生抢占,请提出问题。 + + +### 有 Pod 被抢占,但抢占者并没有被调度 + +当 Pod 被抢占时,它们会收到请求的体面终止期,默认为 30 秒。 +如果受害 Pod 在此期限内没有终止,它们将被强制终止。 +一旦所有牺牲者都离开,就可以调度抢占者 Pod。 + +在抢占者 Pod 等待牺牲者离开的同时,可能某个适合同一个节点的更高优先级的 Pod 被创建。 +在这种情况下,调度器将调度优先级更高的 Pod 而不是抢占者。 + +这是预期的行为:具有较高优先级的 Pod 应该取代具有较低优先级的 Pod。 + + +### 优先级较高的 Pod 在优先级较低的 Pod 之前被抢占 + +调度程序尝试查找可以运行悬决 Pod 的节点。如果没有找到这样的节点, +调度程序会尝试从任意节点中删除优先级较低的 Pod,以便为悬决 Pod 腾出空间。 +如果具有低优先级 Pod 的节点无法运行悬决 Pod, +调度器可能会选择另一个具有更高优先级 Pod 的节点(与其他节点上的 Pod 相比)进行抢占。 +牺牲者的优先级必须仍然低于抢占者 Pod。 + +当有多个节点可供执行抢占操作时,调度器会尝试选择具有一组优先级最低的 Pod 的节点。 +但是,如果此类 Pod 具有 PodDisruptionBudget,当它们被抢占时, +则会违反 PodDisruptionBudget,那么调度程序可能会选择另一个具有更高优先级 Pod 的节点。 + +当存在多个节点抢占且上述场景均不适用时,调度器会选择优先级最低的节点。 + + +## Pod 优先级和服务质量之间的相互作用 {#interactions-of-pod-priority-and-qos} + +Pod 优先级和 {{}} +是两个正交特征,交互很少,并且对基于 QoS 类设置 Pod 的优先级没有默认限制。 +调度器的抢占逻辑在选择抢占目标时不考虑 QoS。 +抢占会考虑 Pod 优先级并尝试选择一组优先级最低的目标。 +仅当移除优先级最低的 Pod 不足以让调度程序调度抢占式 Pod, +或者最低优先级的 Pod 受 PodDisruptionBudget 保护时,才会考虑优先级较高的 Pod。 + + +kubelet 使用优先级来确定 +[节点压力驱逐](/zh/docs/concepts/scheduling-eviction/node-pressure-eviction/) Pod 的顺序。 +你可以使用 QoS 类来估计 Pod 最有可能被驱逐的顺序。kubelet 根据以下因素对 Pod 进行驱逐排名: + + 1. 对紧俏资源的使用是否超过请求值 + 1. Pod 优先级 + 1. 相对于请求的资源使用量 + +有关更多详细信息,请参阅 +[kubelet 驱逐时 Pod 的选择](/zh/docs/concepts/scheduling-eviction/node-pressure-eviction/#pod-selection-for-kubelet-eviction)。 + +当某 Pod 的资源用量未超过其请求时,kubelet 节点压力驱逐不会驱逐该 Pod。 +如果优先级较低的 Pod 没有超过其请求,则不会被驱逐。 +另一个优先级高于其请求的 Pod 可能会被驱逐。 + +## {{% heading "whatsnext" %}} + + +* 阅读有关将 ResourceQuota 与 PriorityClass 结合使用的信息: + [默认限制优先级消费](/zh/docs/concepts/policy/resource-quotas/#limit-priority-class-consumption-by-default) +* 了解 [Pod 干扰](/zh/docs/concepts/workloads/pods/disruptions/) +* 了解 [API 发起的驱逐](/zh/docs/concepts/scheduling-eviction/api-eviction/) +* 了解[节点压力驱逐](/zh/docs/concepts/scheduling-eviction/node-pressure-eviction/) \ No newline at end of file diff --git a/content/zh/docs/concepts/scheduling-eviction/resource-bin-packing.md b/content/zh/docs/concepts/scheduling-eviction/resource-bin-packing.md index b8c097e5df..08eb73003a 100644 --- a/content/zh/docs/concepts/scheduling-eviction/resource-bin-packing.md +++ b/content/zh/docs/concepts/scheduling-eviction/resource-bin-packing.md @@ -32,60 +32,70 @@ The kube-scheduler can be configured to enable bin packing of resources along wi ## 使用 RequestedToCapacityRatioResourceAllocation 启用装箱 -在 Kubernetes 1.15 之前,Kube-scheduler 通常允许根据对主要资源(如 CPU 和内存) -的请求数量和可用容量 之比率对节点评分。 -Kubernetes 1.16 在优先级函数中添加了一个新参数,该参数允许用户指定资源以及每类资源的权重, +Kubernetes 允许用户指定资源以及每类资源的权重, 以便根据请求数量与可用容量之比率为节点评分。 这就使得用户可以通过使用适当的参数来对扩展资源执行装箱操作,从而提高了大型集群中稀缺资源的利用率。 `RequestedToCapacityRatioResourceAllocation` 优先级函数的行为可以通过名为 -`requestedToCapacityRatioArguments` 的配置选项进行控制。 +`RequestedToCapacityRatioArgs` 的配置选项进行控制。 该标志由两个参数 `shape` 和 `resources` 组成。 -`shape` 允许用户根据 `utilization` 和 `score` 值将函数调整为最少请求 -(least requested)或 -最多请求(most requested)计算。 +`shape` 允许用户根据 `utilization` 和 `score` 值将函数调整为 +最少请求(least requested)或最多请求(most requested)计算。 `resources` 包含由 `name` 和 `weight` 组成,`name` 指定评分时要考虑的资源, `weight` 指定每种资源的权重。 以下是一个配置示例,该配置将 `requestedToCapacityRatioArguments` 设置为对扩展资源 `intel.com/foo` 和 `intel.com/bar` 的装箱行为 -```json -{ - "kind": "Policy", - "apiVersion": "v1", - ... - "priorities": [ - ... - { - "name": "RequestedToCapacityRatioPriority", - "weight": 2, - "argument": { - "requestedToCapacityRatioArguments": { - "shape": [ - {"utilization": 0, "score": 0}, - {"utilization": 100, "score": 10} - ], - "resources": [ - {"name": "intel.com/foo", "weight": 3}, - {"name": "intel.com/bar", "weight": 5} - ] - } - } - } - ], -} +```yaml +apiVersion: kubescheduler.config.k8s.io/v1beta1 +kind: KubeSchedulerConfiguration +profiles: +# ... + pluginConfig: + - name: RequestedToCapacityRatio + args: + shape: + - utilization: 0 + score: 10 + - utilization: 100 + score: 0 + resources: + - name: intel.com/foo + weight: 3 + - name: intel.com/bar + weight: 5 ``` + +使用 kube-scheduler 标志 `--config=/path/to/config/file` +引用 `KubeSchedulerConfiguration` 文件将配置传递给调度器。 + diff --git a/content/zh/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md b/content/zh/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md index 8a43385d13..398a06f18d 100644 --- a/content/zh/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md +++ b/content/zh/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md @@ -81,11 +81,11 @@ kube-scheduler 的表现等价于设置值为 100。 -要修改这个值,先编辑 [kube-scheduler 的配置文件](/zh/docs/reference/config-api/kube-scheduler-config.v1beta1/) +要修改这个值,先编辑 [kube-scheduler 的配置文件](/zh/docs/reference/config-api/kube-scheduler-config.v1beta2/) 然后重启调度器。 大多数情况下,这个配置文件是 `/etc/kubernetes/config/kube-scheduler.yaml`。 @@ -298,6 +298,6 @@ After going over all the Nodes, it goes back to Node 1. ## {{% heading "whatsnext" %}} - + -* 参见 [kube-scheduler 配置参考 (v1beta1)](/zh/docs/reference/config-api/kube-scheduler-config.v1beta1/) +* 参见 [kube-scheduler 配置参考 (v1beta1)](/zh/docs/reference/config-api/kube-scheduler-config.v1beta2/) diff --git a/content/zh/docs/concepts/scheduling-eviction/scheduling-framework.md b/content/zh/docs/concepts/scheduling-eviction/scheduling-framework.md index 1107c19565..303b707a2f 100644 --- a/content/zh/docs/concepts/scheduling-eviction/scheduling-framework.md +++ b/content/zh/docs/concepts/scheduling-eviction/scheduling-framework.md @@ -16,7 +16,7 @@ weight: 90 -{{< feature-state for_k8s_version="1.15" state="alpha" >}} +{{< feature-state for_k8s_version="1.19" state="stable" >}} -节点亲和性(详见[这里](/zh/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity)) +[_节点亲和性_](/zh/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) 是 {{< glossary_tooltip text="Pod" term_id="pod" >}} 的一种属性,它使 Pod -被吸引到一类特定的{{< glossary_tooltip text="节点" term_id="node" >}}。 -这可能出于一种偏好,也可能是硬性要求。 -Taint(污点)则相反,它使节点能够排斥一类特定的 Pod。 +被吸引到一类特定的{{< glossary_tooltip text="节点" term_id="node" >}} +(这可能出于一种偏好,也可能是硬性要求)。 +_污点_(Taint)则相反——它使节点能够排斥一类特定的 Pod。 -容忍度(Tolerations)是应用于 Pod 上的,允许(但并不要求)Pod +容忍度(Toleration)是应用于 Pod 上的,允许(但并不要求)Pod 调度到带有与之匹配的污点的节点上。 污点和容忍度(Toleration)相互配合,可以用来避免 Pod 被分配到不合适的节点上。 @@ -312,7 +312,7 @@ manually add tolerations to your pods. 来表示特殊硬件,给配置了特殊硬件的节点添加污点时包含扩展资源名称, 然后运行一个 [ExtendedResourceToleration](/zh/docs/reference/access-authn-authz/admission-controllers/#extendedresourcetoleration) 准入控制器。此时,因为节点已经被设置污点了,没有对应容忍度的 Pod - 会被调度到这些节点。但当你创建一个使用了扩展资源的 Pod 时, + 不会被调度到这些节点。但当你创建一个使用了扩展资源的 Pod 时, `ExtendedResourceToleration` 准入控制器会自动给 Pod 加上正确的容忍度, 这样 Pod 就会被自动调度到这些配置了特殊硬件件的节点上。 这样就能够确保这些配置了特殊硬件的节点专门用于运行需要使用这些硬件的 Pod, @@ -476,10 +476,43 @@ This ensures that DaemonSet pods are never evicted due to these problems. ## 基于节点状态添加污点 + +控制平面使用节点{{}}自动创建 +与[节点状况](/zh/docs/concepts/scheduling-eviction/node-pressure-eviction/#node-conditions)对应的带有 `NoSchedule` 效应的污点。 + +调度器在进行调度时检查污点,而不是检查节点状况。这确保节点状况不会直接影响调度。 +例如,如果 `DiskPressure` 节点状况处于活跃状态,则控制平面 +添加 `node.kubernetes.io/disk-pressure` 污点并且不会调度新的 pod +到受影响的节点。如果 `MemoryPressure` 节点状况处于活跃状态,则 +控制平面添加 `node.kubernetes.io/memory-pressure` 污点。 + + + +对于新创建的 Pod,可以通过添加相应的 Pod 容忍度来忽略节点状况。 +控制平面还在具有除 `BestEffort` 之外的 {{}}的 pod 上 +添加 `node.kubernetes.io/memory-pressure` 容忍度。 +这是因为 Kubernetes 将 `Guaranteed` 或 `Burstable` QoS 类中的 Pod(甚至没有设置内存请求的 Pod) +视为能够应对内存压力,而新创建的 `BestEffort` Pod 不会被调度到受影响的节点上。 + + -Node 生命周期控制器会自动创建与 Node 条件相对应的带有 `NoSchedule` 效应的污点。 -同样,调度器不检查节点条件,而是检查节点污点。这确保了节点条件不会影响调度到节点上的内容。 -用户可以通过添加适当的 Pod 容忍度来选择忽略某些 Node 的问题(表示为 Node 的调度条件)。 DaemonSet 控制器自动为所有守护进程添加如下 `NoSchedule` 容忍度以防 DaemonSet 崩溃: @@ -512,8 +542,8 @@ arbitrary tolerations to DaemonSets. ## {{% heading "whatsnext" %}} -* 阅读[资源耗尽的处理](/zh/docs/tasks/administer-cluster/out-of-resource/),以及如何配置其行为 -* 阅读 [Pod 优先级](/zh/docs/concepts/configuration/pod-priority-preemption/) +* 阅读[节点压力驱逐](/zh/docs/concepts/scheduling-eviction/node-pressure-eviction/),以及如何配置其行为 +* 阅读 [Pod 优先级](/zh/docs/concepts/scheduling-eviction/pod-priority-preemption/) diff --git a/content/zh/docs/concepts/services-networking/dns-pod-service.md b/content/zh/docs/concepts/services-networking/dns-pod-service.md index 50a6c47d86..7864aad7d8 100644 --- a/content/zh/docs/concepts/services-networking/dns-pod-service.md +++ b/content/zh/docs/concepts/services-networking/dns-pod-service.md @@ -92,10 +92,10 @@ options ndots:5 概括起来,名字空间 `test` 中的 Pod 可以成功地解析 `data.prod` 或者 -`data.prod.cluster.local`。 +`data.prod.svc.cluster.local`。 ### Pod 的 setHostnameAsFQDN 字段 {#pod-sethostnameasfqdn-field} -{{< feature-state for_k8s_version="v1.20" state="beta" >}} +{{< feature-state for_k8s_version="v1.22" state="stable" >}} ### Pod 的 DNS 配置 {#pod-dns-config} +{{< feature-state for_k8s_version="v1.14" state="stable" >}} + Pod 的 DNS 配置可让用户对 Pod 的 DNS 设置进行更多控制。 `dnsConfig` 字段是可选的,它可以与任何 `dnsPolicy` 设置一起使用。 @@ -541,6 +545,28 @@ search default.svc.cluster-domain.example svc.cluster-domain.example cluster-dom options ndots:5 ``` + +#### 扩展 DNS 配置 {#expanded-dns-configuration} + +{{< feature-state for_k8s_version="1.22" state="alpha" >}} + +对于 Pod DNS 配置,Kubernetes 默认允许最多 6 个 搜索域( Search Domain) +以及一个最多 256 个字符的搜索域列表。 + +如果启用 kube-apiserver 和 kubelet 的特性门控 `ExpandedDNSConfig`,Kubernetes 将可以有最多 32 个 +搜索域以及一个最多 2048 个字符的搜索域列表。 + -* 了解[启用 EndpointSlice](/zh/docs/tasks/administer-cluster/enabling-endpointslices) * 阅读[使用服务连接应用](/zh/docs/concepts/services-networking/connect-applications-service/) diff --git a/content/zh/docs/concepts/services-networking/ingress-controllers.md b/content/zh/docs/concepts/services-networking/ingress-controllers.md index a2c9eaaae3..54286d81a4 100644 --- a/content/zh/docs/concepts/services-networking/ingress-controllers.md +++ b/content/zh/docs/concepts/services-networking/ingress-controllers.md @@ -69,6 +69,7 @@ Kubernetes 作为一个项目,目前支持和维护 的 Ingress 控制器。 * [EnRoute](https://getenroute.io/) 是一个基于 [Envoy](https://www.envoyproxy.io) API 网关, 可以作为 Ingress 控制器来执行。 +* [Easegress IngressController](https://github.com/megaease/easegress/blob/main/doc/ingresscontroller.md) 是一个基于 [Easegress](https://megaease.com/easegress/) API 网关,可以作为 Ingress 控制器来执行。 #### 名字空间域的参数 -{{< feature-state for_k8s_version="v1.21" state="alpha" >}} +{{< feature-state for_k8s_version="v1.22" state="beta" >}} `parameters` 字段有一个 `scope` 和 `namespace` 字段,可用来引用特定 于名字空间的资源,对 Ingress 类进行配置。 @@ -436,6 +441,9 @@ will reference a parameters resource in a specific namespace: 将 `scope` 设置为 `Namespace` 并设置 `namespace` 字段就可以引用某特定 名字空间中的参数资源。 +有了名字空间域的参数,就不再需要为一个参数资源配置集群范围的 CustomResourceDefinition。 +除此之外,之前对访问集群范围的资源进行授权,需要用到 RBAC 相关的资源,现在也不再需要了。 + {{< codenew file="service/networking/namespaced-params.yaml" >}} ## SCTP 支持 -{{< feature-state for_k8s_version="v1.19" state="beta" >}} +{{< feature-state for_k8s_version="v1.20" state="stable" >}} -作为一个 Beta 特性,SCTP 支持默认是被启用的。 +作为一个稳定特性,SCTP 支持默认是被启用的。 要在集群层面禁用 SCTP,你(或你的集群管理员)需要为 API 服务器指定 `--feature-gates=SCTPSupport=false,...` 来禁用 `SCTPSupport` [特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/)。 @@ -439,7 +440,7 @@ You must be using a {{< glossary_tooltip text="CNI" term_id="cni" >}} plugin tha --> ## 针对某个端口范围 {#targeting-a-range-of-ports} -{{< feature-state for_k8s_version="v1.21" state="alpha" >}} +{{< feature-state for_k8s_version="v1.22" state="beta" >}} 上面的规则允许名字空间 `default` 中所有带有标签 `db` 的 Pod 使用 TCP 协议 与 `10.0.0.0/24` 范围内的 IP 通信,只要目标端口介于 32000 和 32768 之间就可以。 使用此字段时存在以下限制: -* 作为一种 Alpha 阶段的特性,端口范围设定默认是被禁用的。要在整个集群 - 范围内允许使用 `endPort` 字段,你(或者你的集群管理员)需要为 API - 服务器设置 `-feature-gates=NetworkPolicyEndPort=true,...` 以启用 +* 作为一种 Beta 阶段的特性,端口范围设定默认是被启用的。要在整个集群 + 范围内禁止使用 `endPort` 字段,你(或者你的集群管理员)需要为 API + 服务器设置 `-feature-gates=NetworkPolicyEndPort=false,...` 以禁用 `NetworkPolicyEndPort` [特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/)。 * `endPort` 字段必须等于或者大于 `port` 字段的值。 @@ -499,9 +502,15 @@ The following restrictions apply when using this field: 你的集群所使用的 {{< glossary_tooltip text="CNI" term_id="cni" >}} 插件 必须支持在 NetworkPolicy 规约中使用 `endPort` 字段。 +如果你的[网络插件](/zh/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) +不支持 `endPort` 字段,而你指定了一个包含 `endPort` 字段的 NetworkPolicy, +策略只对单个 `port` 字段生效。 {{< /note >}} ### 超出容量的 Endpoints {#over-capacity-endpoints} -如果某个 Endpoints 资源中包含的端点个数超过 1000,则 Kubernetes v1.21 版本 +如果某个 Endpoints 资源中包含的端点个数超过 1000,则 Kubernetes v1.22 版本 (及更新版本)的集群会将为该 Endpoints 添加注解 -`endpoints.kubernetes.io/over-capacity: warning`。 -这一注解表明所影响到的 Endpoints 对象已经超出容量。 +`endpoints.kubernetes.io/over-capacity: truncated`。 +这一注解表明所影响到的 Endpoints 对象已经超出容量,此外 Endpoints 控制器还会将 Endpoints 对象数量截断到 1000。 +## 流量策略 {#traffic-policies} + + +### 外部流量策略 {#external-traffic-policy} + + + +你可以通过设置 `spec.externalTrafficPolicy` 字段来控制来自于外部的流量是如何路由的。 +可选值有 `Cluster` 和 `Local`。字段设为 `Cluster` 会将外部流量路由到所有就绪的端点, +设为 `Local` 会只路由到当前节点上就绪的端点。 +如果流量策略设置为 `Local`,而且当前节点上没有就绪的端点,kube-proxy 不会转发请求相关服务的任何流量。 + +{{< note >}} +{{< feature-state for_k8s_version="v1.22" state="alpha" >}} + + + +如果你启用了 kube-proxy 的 `ProxyTerminatingEndpoints` +[特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/), +kube-proxy 会检查节点是否有本地的端点,以及是否所有的本地端点都被标记为终止中。 + + + +如果本地有端点,而且所有端点处于终止中的状态,那么 kube-proxy 会忽略任何设为 `Local` 的外部流量策略。 +在所有本地端点处于终止中的状态的同时,kube-proxy 将请求指定服务的流量转发到位于其它节点的 +状态健康的端点,如同外部流量策略设为 `Cluster`。 + + +针对处于正被终止状态的端点这一转发行为使得外部负载均衡器可以优雅地排出由 +`NodePort` 服务支持的连接,就算是健康检查节点端口开始失败也是如此。 +否则,当节点还在负载均衡器的节点池内,在 Pod 终止过程中的流量会被丢掉,这些流量可能会丢失。 + +{{< /note >}} + + +### 内部流量策略 {#internal-traffic-policy} + +{{< feature-state for_k8s_version="v1.22" state="beta" >}} + + +你可以设置 `spec.internalTrafficPolicy` 字段来控制内部来源的流量是如何转发的。可设置的值有 `Cluster` 和 `Local`。 +将字段设置为 `Cluster` 会将内部流量路由到所有就绪端点,设置为 `Local` 只会路由到当前节点上就绪的端点。 +如果流量策略是 `Local`,而且当前节点上没有就绪的端点,那么 kube-proxy 会丢弃流量。 + #### 设置负载均衡器实现的类别 {#load-balancer-class} -{{< feature-state for_k8s_version="v1.21" state="alpha" >}} +{{< feature-state for_k8s_version="v1.22" state="beta" >}} -从 v1.21 开始,你可以有选择地为 `LoadBalancer` 类型的服务设置字段 -`.spec.loadBalancerClass`,以指定其负载均衡器实现的类别。 -默认情况下,`.spec.loadBalancerClass` 的取值是 `nil`,`LoadBalancer` 类型 -服务会使用云提供商的默认负载均衡器实现。 +`spec.loadBalancerClass` 允许你不使用云提供商的默认负载均衡器实现,转而使用指定的负载均衡器实现。 +这个特性从 v1.21 版本开始可以使用,你在 v1.21 版本中使用这个字段必须启用 `ServiceLoadBalancerClass` +特性门控,这个特性门控从 v1.22 版本及以后默认打开。 +默认情况下,`.spec.loadBalancerClass` 的取值是 `nil`,如果集群使用 `--cloud-provider` 配置了云提供商, +`LoadBalancer` 类型服务会使用云提供商的默认负载均衡器实现。 如果设置了 `.spec.loadBalancerClass`,则假定存在某个与所指定的类相匹配的 负载均衡器实现在监视服务变化。 所有默认的负载均衡器实现(例如,由云提供商所提供的)都会忽略设置了此字段 @@ -1152,12 +1230,10 @@ Once set, it cannot be changed. The value of `spec.loadBalancerClass` must be a label-style identifier, with an optional prefix such as "`internal-vip`" or "`example.com/internal-vip`". Unprefixed names are reserved for end-users. -You must enable the `ServiceLoadBalancerClass` feature gate to use this field. --> `.spec.loadBalancerClass` 的值必须是一个标签风格的标识符, 可以有选择地带有类似 "`internal-vip`" 或 "`example.com/internal-vip`" 这类 前缀。没有前缀的名字是保留给最终用户的。 -你必须启用 `ServiceLoadBalancerClass` 特性门控才能使用此字段。 你也可以看到当 PV 对象的状态为 `Terminating` 且其 `Finalizers` 列表中包含 -`kubernetes.io/pvc-protection` 时,PV 对象是处于被保护状态的。 +`kubernetes.io/pv-protection` 时,PV 对象是处于被保护状态的。 ```shell kubectl describe pv task-pv-volume diff --git a/content/zh/docs/concepts/storage/storage-classes.md b/content/zh/docs/concepts/storage/storage-classes.md index 9cf05702b5..28ca72dd42 100644 --- a/content/zh/docs/concepts/storage/storage-classes.md +++ b/content/zh/docs/concepts/storage/storage-classes.md @@ -1038,12 +1038,12 @@ metadata: provisioner: kubernetes.io/azure-disk parameters: storageaccounttype: Standard_LRS - kind: Shared + kind: managed ``` * `storageaccounttype`:Azure 存储帐户 Sku 层。默认为空。 -* `kind`:可能的值是 `shared`(默认)、`dedicated` 和 `managed`。 +* `kind`:可能的值是 `shared`、`dedicated` 和 `managed`(默认)。 当 `kind` 的值是 `shared` 时,所有非托管磁盘都在集群的同一个资源组中的几个共享存储帐户中创建。 当 `kind` 的值是 `dedicated` 时,将为在集群的同一个资源组中新的非托管磁盘创建新的专用存储帐户。 * `resourceGroup`: 指定要创建 Azure 磁盘所属的资源组。必须是已存在的资源组名称。 diff --git a/content/zh/docs/concepts/workloads/controllers/statefulset.md b/content/zh/docs/concepts/workloads/controllers/statefulset.md index 4cd6606a38..56c27ae3ea 100644 --- a/content/zh/docs/concepts/workloads/controllers/statefulset.md +++ b/content/zh/docs/concepts/workloads/controllers/statefulset.md @@ -163,7 +163,7 @@ The name of a StatefulSet object must be a valid * `volumeClaimTemplates` 将通过 PersistentVolumes 驱动提供的 [PersistentVolumes](/zh/docs/concepts/storage/persistent-volumes/) 来提供稳定的存储。 -StatefulSet 的命名需要遵循[DNS 子域名](zh/docs/concepts/overview/working-with-objects/names#dns-subdomain-names)规范。 +StatefulSet 的命名需要遵循[DNS 子域名](/zh/docs/concepts/overview/working-with-objects/names#dns-subdomain-names)规范。 -## 容器的特权模式 {#rivileged-mode-for-containers} +## 容器的特权模式 {#privileged-mode-for-containers} Pod 中的任何容器都可以使用容器规约中的 [安全性上下文](/zh/docs/tasks/configure-pod-container/security-context/)中的 diff --git a/content/zh/docs/concepts/workloads/pods/disruptions.md b/content/zh/docs/concepts/workloads/pods/disruptions.md index c200a51757..e146d66bc5 100644 --- a/content/zh/docs/concepts/workloads/pods/disruptions.md +++ b/content/zh/docs/concepts/workloads/pods/disruptions.md @@ -170,7 +170,7 @@ in your pod spec can also cause voluntary (and involuntary) disruptions. 实现可能导致碎片整理和紧缩节点的自愿干扰。集群 管理员或托管提供商应该已经记录了各级别的自愿干扰(如果有的话)。 有些配置选项,例如在 pod spec 中 -[使用 PriorityClasses](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/) +[使用 PriorityClasses](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/) 也会产生自愿(和非自愿)的干扰。 -### 临时容器 API {#ephemeral-containers-api}」 +### 临时容器 API {#ephemeral-containers-api} {{< note >}} -你可以在 [调度方案(Schedulingg Profile)](/zh/docs/reference/scheduling/config/#profiles) +你可以在 [调度方案(Scheduling Profile)](/zh/docs/reference/scheduling/config/#profiles) 中将默认约束作为 `PodTopologySpread` 插件参数的一部分来设置。 约束的设置采用[如前所述的 API](#api),只是 `labelSelector` 必须为空。 选择算符是根据 Pod 所属的服务、副本控制器、ReplicaSet 或 StatefulSet 来设置的。 diff --git a/content/zh/docs/contribute/analytics.md b/content/zh/docs/contribute/analytics.md new file mode 100644 index 0000000000..b6175a2670 --- /dev/null +++ b/content/zh/docs/contribute/analytics.md @@ -0,0 +1,51 @@ +--- +title: 查看站点分析 +content_type: concept +weight: 100 +card: + name: contribute + weight: 100 +--- + + + + + + +此页面包含有关 kubernetes.io 分析仪表板的信息。 + + + + +[查看仪表板](https://datastudio.google.com/reporting/fede2672-b2fd-402a-91d2-7473bdb10f04)。 + +此仪表板使用 Google Data Studio 构建,显示使用 Google Analytics 在 kubernetes.io 上收集的信息。 + + +### 使用仪表板 + +默认情况下,仪表板显示过去 30 天收集的所有分析。 +使用日期选择器查看来自不同日期范围的数据。 +其他过滤选项允许你根据用户位置、用于访问站点的设备、所用文档的翻译等查看数据。 + +如果你发现此仪表板存在问题,或者想要请求任何改进, +请[开启一个问题](https://github.com/kubernetes/website/issues/new/choose)。 diff --git a/content/zh/docs/contribute/generate-ref-docs/contribute-upstream.md b/content/zh/docs/contribute/generate-ref-docs/contribute-upstream.md index cf7b51cbaf..d16879d7e7 100644 --- a/content/zh/docs/contribute/generate-ref-docs/contribute-upstream.md +++ b/content/zh/docs/contribute/generate-ref-docs/contribute-upstream.md @@ -159,12 +159,12 @@ will be different in your situation. 以下在 Kubernetes 源代码中编辑注释的示例。 -在您本地的 kubernetes/kubernetes 代码仓库中,检出 master 分支,并确保它是最新的: +在您本地的 kubernetes/kubernetes 代码仓库中,检出默认分支,并确保它是最新的: ```shell cd @@ -173,9 +173,9 @@ git pull https://github.com/kubernetes/kubernetes master ``` -假设 master 分支中的下面源文件中包含拼写错误 "atmost": +假设默认分支中的下面源文件中包含拼写错误 "atmost": [kubernetes/kubernetes/staging/src/k8s.io/api/apps/v1/types.go](https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/api/apps/v1/types.go) @@ -228,7 +228,6 @@ Go to `` and run these scripts: hack/update-generated-swagger-docs.sh hack/update-openapi-spec.sh hack/update-generated-protobuf.sh -hack/update-api-reference-docs.sh ``` @@ -238,8 +237,6 @@ hack/update-api-reference-docs.sh On branch master ... modified: api/openapi-spec/swagger.json - modified: api/swagger-spec/apps_v1.json - modified: docs/api-reference/apps/v1/definitions.html modified: staging/src/k8s.io/api/apps/v1/generated.proto modified: staging/src/k8s.io/api/apps/v1/types.go modified: staging/src/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -310,24 +307,27 @@ In the preceding section, you edited a file in the master branch and then ran sc to generate an OpenAPI spec and related files. Then you submitted your changes in a pull request to the master branch of the kubernetes/kubernetes repository. Now suppose you want to backport your change into a release branch. For example, suppose the master branch is being used to develop -Kubernetes version 1.10, and you want to backport your change into the release-1.9 branch. +Kubernetes version {{< skew latestVersion >}}, and you want to backport your change into the +release-{{< skew prevMinorVersion >}} branch. --> ### 将你的提交 Cherrypick 到发布分支 在上一节中,你在 master 分支中编辑了一个文件,然后运行了脚本用来生成 OpenAPI 规范和相关文件。 然后用 PR 将你的更改提交到 kubernetes/kubernetes 代码仓库的 master 分支中。 现在,需要将你的更改反向移植到已经发布的分支。 -例如,假设 master 分支被用来开发 Kubernetes 1.10 版,并且你想将更改反向移植到 release-1.9 分支。 +例如,假设 master 分支被用来开发 Kubernetes {{< skew latestVersion >}} 版, +并且你想将更改反向移植到 release-{{< skew prevMinorVersion >}} 分支。 回想一下,您的 PR 有两个提交:一个用于编辑 `types.go`,一个用于由脚本生成的文件。 -下一步是将你的第一次提交 cherrypick 到 release-1.9 分支。这样做的原因是仅 cherrypick 编辑了 types.go 的提交, +下一步是将你的第一次提交 cherrypick 到 release-{{< skew prevMinorVersion >}} 分支。 +这样做的原因是仅 cherrypick 编辑了 types.go 的提交, 而不是具有脚本运行结果的提交。 有关说明,请参见[提出 Cherry Pick](https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md)。 @@ -337,16 +337,17 @@ pull request. If you don't have those permissions, you will need to work with so and milestone for you. --> {{< note >}} -提出 Cherry Pick 要求你有权在 PR 中设置标签和里程碑。如果您没有这些权限, +提出 Cherry Pick 要求你有权在 PR 中设置标签和里程碑。如果你没有这些权限, 则需要与可以为你设置标签和里程碑的人员合作。 {{< /note >}} -当你发起 PR 将你的一个提交 cherry pick 到 release-1.9 分支中时,下一步是在本地环境的 release-1.9 -分支中运行如下脚本。 +当你发起 PR 将你的一个提交 cherry pick 到 release-{{< skew prevMinorVersion >}} 分支中时, +下一步是在本地环境的 release-{{< skew prevMinorVersion >}} 分支中运行如下脚本。 ```shell hack/update-generated-swagger-docs.sh @@ -357,24 +358,29 @@ hack/update-api-reference-docs.sh 现在将提交添加到您的 Cherry-Pick PR 中,该 PR 中包含最新生成的 OpenAPI 规范和相关文件。 -关注你的 PR,直到其合并到 release-1.9 分支中为止。 +关注你的 PR,直到其合并到 release-{{< skew prevMinorVersion >}} 分支中为止。 -此时,master 分支和 release-1.9 分支都具有更新的 `types.go` 文件和一组生成的文件, +此时,master 分支和 release-{{< skew prevMinorVersion >}} +分支都具有更新的 `types.go` 文件和一组生成的文件, 这些文件反映了对 `types.go` 所做的更改。 -请注意,生成的 OpenAPI 规范和其他 release-1.9 分支中生成的文件不一定与 master 分支中生成的文件相同。 -release-1.9 分支中生成的文件仅包含来自 Kubernetes 1.9 的 API 元素。 -master 分支中生成的文件可能包含不在 1.9 中但正在为 1.10 开发的 API 元素。 +请注意,生成的 OpenAPI 规范和其他 release-{{< skew prevMinorVersion >}} +分支中生成的文件不一定与 master 分支中生成的文件相同。 +release-{{< skew prevMinorVersion >}} 分支中生成的文件仅包含来自 +Kubernetes {{< skew prevMinorVersion >}} 的 API 元素。 +master 分支中生成的文件可能包含不在 {{< skew prevMinorVersion >}} +中但正在为 {{< skew latestVersion >}} 开发的 API 元素。 在本地的 k8s.io/kubernetes 仓库中,检出感兴趣的分支并确保它是最新的。例如, -如果你想要生成 Kubernetes 1.17 的文档,可以使用以下命令: +如果你想要生成 Kubernetes {{< skew prevMinorVersion >}}.0 的文档,可以使用以下命令: ```shell cd -git checkout v1.17.0 -git pull https://github.com/kubernetes/kubernetes v1.17.0 +git checkout v{{< skew prevMinorVersion >}}.0 +git pull https://github.com/kubernetes/kubernetes {{< skew prevMinorVersion >}}.0 ``` [PR 56673](https://github.com/kubernetes/kubernetes/pull/56673/files) 是一个对 kubectl 源码中的笔误进行修复的 PR 示例。 -跟踪你的 PR,并回应评审人的评论。继续跟踪你的 PR,直到它合入到 kubernetes/kubernetes 仓库的 master 分支中。 +跟踪你的 PR,并回应评审人的评论。继续跟踪你的 PR,直到它合入到 kubernetes/kubernetes 仓库的目标分支中。 -例如,假设 master 分支正用于开发 Kubernetes 1.16 版本,而你希望将修改合入到已发布的 1.15 版本分支。 +例如,假设 master 分支正用于开发 Kubernetes {{< skew currentVersion >}} 版本, +而你希望将修改合入到 release-{{< skew prevMinorVersion >}} 版本分支。 相关的操作指南,请参见 [提议一个 cherry-pick](https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md)。 @@ -233,21 +235,22 @@ Go to ``, and open the `Makefile` for editing: * Set `K8S_ROOT` to ``. * Set `K8S_WEBROOT` to ``. * Set `K8S_RELEASE` to the version of the docs you want to build. - For example, if you want to build docs for Kubernetes 1.17, set `K8S_RELEASE` to 1.17. + For example, if you want to build docs for Kubernetes {{< skew prevMinorVersion >}}, set `K8S_RELEASE` to {{< skew prevMinorVersion >}}. For example, update the following variables: --> * 设置 `K8S_ROOT` 为 ``。 * 设置 `K8S_WEBROOT` 为 ``。 * 设置 `K8S_RELEASE` 为要构建文档的版本。 - 例如,如果您想为 Kubernetes 1.17 构建文档,请将 `K8S_RELEASE` 设置为 1.17。 + 例如,如果您想为 Kubernetes {{< skew prevMinorVersion >}} 构建文档, + 请将 `K8S_RELEASE` 设置为 {{< skew prevMinorVersion >}}。 例如: ``` export K8S_WEBROOT=$(GOPATH)/src/github.com//website export K8S_ROOT=$(GOPATH)/src/k8s.io/kubernetes -export K8S_RELEASE=1.17 +export K8S_RELEASE={{< skew prevMinorVersion >}} ``` ## 从 kubernetes/kubernetes 检出一个分支 在本地 `` 仓库中,检出你想要生成文档的、包含 Kubernetes 版本的分支。 -例如,如果希望为 Kubernetes 1.17 版本生成文档,请检出 `v1.17.0` 标记。 +例如,如果希望为 Kubernetes {{< skew prevMinorVersion >}}.0 版本生成文档, +请检出 `v{{< skew prevMinorVersion >}}` 标记。 确保本地分支是最新的。 ```shell cd -git checkout v1.17.0 -git pull https://github.com/kubernetes/kubernetes v1.17.0 +git checkout v{{< skew prevMinorVersion >}}.0 +git pull https://github.com/kubernetes/kubernetes v{{< skew prevMinorVersion >}}.0 ``` -本页讨论如何使用 `update-imported-docs` 脚本来生成 Kubernetes 参考文档。 +本页讨论如何使用 `update-imported-docs.py` 脚本来生成 Kubernetes 参考文档。 此脚本将构建的配置过程自动化,并为某个发行版本生成参考文档。 ## {{% heading "prerequisites" %}} @@ -27,13 +27,13 @@ the build setup and generates the reference documentation for a release. ## 获取文档仓库 {#getting-the-docs-repository} -确保你的 `website` 派生仓库与 `kubernetes/website` 主分支一致,并克隆 -你的派生仓库。 +确保你的 `website` 派生仓库与 GitHub 上的 `kubernetes/website` 远程仓库(`main` 分支)保持同步, +并克隆你的派生仓库。 ```shell mkdir github.com @@ -63,7 +63,7 @@ see the [contributing upstream guide](/docs/contribute/generate-ref-docs/contrib ## update-imported-docs 的概述 -脚本 `update-imported-docs` 位于 `/update-imported-docs/` 目录下, +脚本 `update-imported-docs.py` 位于 `/update-imported-docs/` 目录下, 能够生成以下参考文档: * Kubernetes 组件和工具的参考页面 @@ -82,7 +82,7 @@ The script builds the following references: * Kubernetes API 参考文档 -脚本 `update-imported-docs` 基于 Kubernetes 源代码生成参考文档。 +脚本 `update-imported-docs.py` 基于 Kubernetes 源代码生成参考文档。 过程中会在你的机器的 `/tmp` 目录下创建临时目录,克隆所需要的仓库 `kubernetes/kubernetes` 和 `kubernetes-sigs/reference-docs` 到此临时目录。 脚本会将 `GOPATH` 环境变量设置为指向此临时目录。 @@ -124,7 +124,7 @@ determines the version of the release. 变量 `K8S_RELEASE` 用来确定所针对的发行版本。 -脚本 `update-imported-docs` 执行以下步骤: +脚本 `update-imported-docs.py` 执行以下步骤: 1. 克隆配置文件中所指定的相关仓库。就生成参考文档这一目的而言,要克隆的 仓库默认为 `kubernetes-sigs/reference-docs`。 @@ -260,22 +260,22 @@ For example: ## 运行 update-imported-docs 工具 -你可以用如下方式运行 `update-imported-docs` 工具: +你可以用如下方式运行 `update-imported-docs.py` 工具: ```shell cd /update-imported-docs -./update-imported-docs +./update-imported-docs.py ``` 例如: ```shell -./update-imported-docs reference.yml 1.17 +./update-imported-docs.py reference.yml 1.17 ``` @@ -284,13 +284,13 @@ cd /update-imported-docs The `release.yml` configuration file contains instructions to fix relative links. To fix relative links within your imported files, set the`gen-absolute-links` property to `true`. You can find an example of this in -[`release.yml`](https://github.com/kubernetes/website/blob/master/update-imported-docs/release.yml). +[`release.yml`](https://github.com/kubernetes/website/blob/main/update-imported-docs/release.yml). --> ## 修复链接 配置文件 `release.yml` 中包含用来修复相对链接的指令。 若要修复导入文件中的相对链接,将 `gen-absolute-links` 属性设置为 `true`。 -你可以在 [`release.yml`](https://github.com/kubernetes/website/blob/master/update-imported-docs/release.yml) +你可以在 [`release.yml`](https://github.com/kubernetes/website/blob/main/update-imported-docs/release.yml) 文件中找到示例。 -## 起步 +## 为现有的本地化做出贡献 -由于贡献者无法批准他们自己的请求,因此您至少需要两个贡献者才能开始本地化。 +你可以帮助添加或改进现有本地化的内容。在 [Kubernetes Slack](https://slack.k8s.io/) 中, +你能找到每个本地化的频道。还有一个通用的 +[SIG Docs Localizations Slack 频道](https://kubernetes.slack.com/messages/sig-docs-localizations), +你可以在这里打个招呼。 -所有本地化团队必须使用自身的资源持续工作。我们很高兴托管你的产出,但无法为你翻译。 +{{< note >}} + +如果你想处理已经存在的本地化,请在该本地化(如果存在)中检查此页面,而不是英文原版。 +你可能会在那里看到额外的详细信息。 +{{< /note >}} ### 找到两个字母的语言代码 -首先,有关本地化的两个字母的国家代码,请参考 +首先,有关本地化的两个字母的语言代码,请参考 [ISO 639-1 标准](https://www.loc.gov/standards/iso639-2/php/code_list.php)。 例如,韩国的两个字母代码是 `ko`。 @@ -74,23 +85,110 @@ git clone https://github.com//website cd website ``` +网站内容目录包括每种语言的子目录。你想要助力的本地化位于 `content/` 中。 + -### 发起拉取请求(PR){#open-a-pull-request} +### 建议更改 {#suggest-changes} -接下来,[提交 PR 请求](/zh/docs/contribute/new-content/open-a-pr/#open-a-pr), -将本地化添加到 `kubernetes/website` 仓库。 +根据英文原件创建或更新你选择的本地化页面。 +有关更多详细信息,请参阅[翻译内容](#translating-content)。 -该 PR 必须包含所有[最低要求的内容](#minimum-required-content),然后才能被批准。 +如果你发现上游(英文)文档存在技术错误或其他问题, +你应该先修复上游文档,然后通过更新你正在处理的本地化来重复等效的修复。 -有关添加新本地化的示例,请参见添加[法语文档](https://github.com/kubernetes/website/pull/12548) 的 PR。 +请将拉取请求限制为单个本地化,因为在多个本地化中更改内容的拉取请求可能难以审查。 + +按照[内容改进建议](/zh/docs/contribute/suggest-improvements/)提出对该本地化的更改。 +该过程与提议更改上游(英文)内容非常相似。 + + +## 开始新的本地化 + +如果你希望将 Kubernetes 文档本地化为一种新语言,你需要执行以下操作。 + +因为贡献者不能批准他们自己的拉取请求,你需要 _至少两个贡献者_ 来开始本地化。 + +所有本地化团队都必须能够自我维持。 +Kubernetes 网站很乐意托管你的作品,但要由你来翻译它并使现有的本地化内容保持最新。 + + +你需要知道你的语言的两个字母的语言代码。 +请查阅 [ISO 639-1 标准](https://www.loc.gov/standards/iso639-2/php/code_list.php) +以查找你的本地化的两字母语言代码。例如,韩语的两字母代码是`ko`。 + +当你开始新的本地化时,你必须先本地化所有[最少要求的内容](#minimum-required-content), +Kubernetes 项目才能将你的更改发布到当前网站。 + +SIG Docs 可以帮助你在单独的分支上工作,以便你可以逐步实现该目标。 + + +### 找到社区 + +让 Kubernetes SIG Docs 知道你有兴趣创建本地化! +加入 [SIG Docs Slack 频道](https://kubernetes.slack.com/messages/sig-docs) +和 [SIG Docs Localizations Slack 频道](https://kubernetes.slack.com/messages/sig-docs-localizations)。 +其他本地化团队很乐意帮助你入门并回答你的任何问题。 + + +也请考虑参加 +[SIG Docs 本地化小组的会议](https://github.com/kubernetes/community/tree/master/sig-docs)。 +SIG Docs 本地化小组的任务是与 SIG Docs 本地化团队合作, +共同定义和记录创建本地化贡献指南的流程。 +此外,SIG Docs 本地化小组将寻找机会在本地化团队中创建和共享通用工具, +并为 SIG Docs 领导团队确定新要求。如果你对本次会议有任何疑问, +请在 [SIG Docs Localizations Slack 频道](https://kubernetes.slack.com/messages/sig-docs-localizations) +中提问。 + +你还可以在 `kubernetes/community` 仓库中为你的本地化创建一个 Slack 频道。 +有关添加 Slack 频道的示例,请参阅 +[为波斯语添加频道](https://github.com/kubernetes/community/pull/4980)的 PR。 ### 配置工作流程 {#configure-the-workflow} -接下来,在 `kubernetes/test-infra` 仓库中为您的本地化添加一个 GitHub 标签。 -标签可让您过滤 issues 和针对特定语言的 PR。 +接下来,在 `kubernetes/test-infra` 仓库中为你的本地化添加一个 GitHub 标签。 +标签可让你过滤 issues 和针对特定语言的 PR。 有关添加标签的示例,请参见添加[意大利语标签](https://github.com/kubernetes/test-infra/pull/11316)的 PR。 - -### 寻找社区 - -让 Kubernetes SIG Docs 知道你对创建本地化感兴趣! -加入[SIG Docs Slack 频道](https://kubernetes.slack.com/messages/C1J0BPD2M/)。 -其他本地化团队很乐意帮助你起步并回答你的任何问题。 - 你还可以在 `kubernetes/community` 仓库中为你的本地化创建一个 Slack 频道。 有关添加 Slack 频道的示例,请参见[为印尼语和葡萄牙语添加频道](https://github.com/kubernetes/community/pull/3605)的 PR。 @@ -175,8 +258,8 @@ Add a configuration block for the new language to `config.toml`, under the exist ### 修改站点配置 Kubernetes 网站使用 Hugo 作为其 Web 框架。网站的 Hugo 配置位于 -[`config.toml`](https://github.com/kubernetes/website/tree/master/config.toml)文件中。 -为了支持新的本地化,您需要修改 `config.toml`。 +[`config.toml`](https://github.com/kubernetes/website/tree/main/config.toml)文件中。 +为了支持新的本地化,你需要修改 `config.toml`。 在现有的 `[languages]` 下,将新语言的配置添加到 `config.toml` 中。 例如,下面是德语的配置示例: @@ -207,7 +290,7 @@ Add a language-specific subdirectory to the [`content`](https://github.com/kuber ### 添加一个新的本地化目录 将特定语言的子目录添加到仓库中的 -[`content`](https://github.com/kubernetes/website/tree/master/content) 文件夹下。 +[`content`](https://github.com/kubernetes/website/tree/main/content) 文件夹下。 例如,德语的两个字母的代码是 `de`: ```shell @@ -215,7 +298,27 @@ mkdir content/de ``` +你还需要在 `data/i18n` 中为 [localized strings](#site-strings-in-i18n) 创建一个目录; +以现有的本地化为例。要使用这些新字符串, +你还必须创建从 `i18n/.toml` +到 `data/i18n//.toml` +中实际字符串配置的符号链接(记得提交符号链接关联)。 + +例如,对于德语,字符串位于 `data/i18n/de/de.toml` 中, +而 `i18n/de.toml` 是指向 `data/i18n/de/de.toml` 的符号链接。 + + -### 添加本地化的 README 文件 - -为了指导其他本地化贡献者,请在 k/website 的根目录添加一个新的 -[`README-**.md`](https://help.github.com/articles/about-readmes/), -其中 `**` 是两个字母的语言代码。例如,德语 README 文件为 `README-de.md`。 - -在本地化的 `README-**.md` 文件中为本地化贡献者提供指导。包含 `README.md` 中包含的相同信息,以及: - -- 本地化项目的联系人 -- 任何特定于本地化的信息 - - -创建本地化的 README 文件后,请在英语版文件 `README.md` 中添加指向该文件的链接, -并给出英文形式的联系信息。你可以提供 GitHub ID、电子邮件地址、 -[Slack 频道](https://slack.com/)或其他联系方式。你还必须提供指向本地化的社区行为准则的链接。 - +### 打开拉取请求 {#open-a-pull-request} + +接下来,[打开拉取请求](/zh/docs/contribute/new-content/open-a-pr/#open-a-pr)(PR) +将本地化添加到 `kubernetes/website` 存储库。 + +PR 必须包含所有[最低要求内容](#minimum-required-content)才能获得批准。 + +有关添加新本地化的示例, +请参阅 PR 以启用[法语文档](https://github.com/kubernetes/website/pull/12548)。 + + +### 添加本地化的 README 文件 + +为了指导其他本地化贡献者,请在 [k/website](https://github.com/kubernetes/website/) +的根目录添加一个新的 [`README-**.md`](https://help.github.com/articles/about-readmes/), +其中 `**` 是两个字母的语言代码。例如,德语 README 文件为 `README-de.md`。 + +在本地化的 `README-**.md` 文件中为本地化贡献者提供指导。包含 `README.md` 中包含的相同信息,以及: + +- 本地化项目的联系人 +- 任何特定于本地化的信息 + + +创建本地化的 README 文件后,请在英语版文件 `README.md` 中添加指向该文件的链接, +并给出英文形式的联系信息。你可以提供 GitHub ID、电子邮件地址、 +[Slack 频道](https://slack.com/)或其他联系方式。你还必须提供指向本地化的社区行为准则的链接。 + + +### 启动你的新本地化 + +一旦本地化满足工作流程和最小输出的要求,SIG Docs 将: + +- 在网站上启用语言选择 +- 通过[云原生计算基金会](https://www.cncf.io/about/)(CNCF)渠道, + 包括 [Kubernetes 博客](https://kubernetes.io/blog/),来宣传本地化的可用性。 + -## 翻译文档 +## 翻译文档 {#translating-content} 本地化*所有* Kubernetes 文档是一项艰巨的任务。从小做起,循序渐进。 @@ -352,15 +491,15 @@ Description | URLs -----|----- Home | [All heading and subheading URLs](/docs/home/) Setup | [All heading and subheading URLs](/docs/setup/) -Tutorials | [Kubernetes Basics](/docs/tutorials/kubernetes-basics/), [Hello Minikube](/docs/tutorials/stateless-application/hello-minikube/) -Site strings | [All site strings in a new localized TOML file](https://github.com/kubernetes/website/tree/master/i18n) +Tutorials | [Kubernetes Basics](/docs/tutorials/kubernetes-basics/), [Hello Minikube](/docs/tutorials/hello-minikube/) +Site strings | [All site strings](#Site-strings-in-i18n) in a new localized TOML file --> 描述 | 网址 -----|----- 主页 | [所有标题和副标题网址](/zh/docs/home/) 安装 | [所有标题和副标题网址](/zh/docs/setup/) 教程 | [Kubernetes 基础](/zh/docs/tutorials/kubernetes-basics/), [Hello Minikube](/zh/docs/tutorials/hello-minikube/) -网站字符串 | [新的本地化 TOML 文件中的所有网站字符串](https://github.com/kubernetes/website/tree/master/i18n) +网站字符串 | [所有网站字符串](#Site-strings-in-i18n) ### 源文件 @@ -421,39 +560,45 @@ The `master` branch holds content for the current release `{{< latest-version >} 目标版本 | 分支 -----|----- + 最新版本 | [`main`](https://github.com/kubernetes/website/tree/main) + 上一个版本 | [`release-{{< skew prevMinorVersion >}}`](https://github.com/kubernetes/website/tree/release-{{< skew prevMinorVersion >}}) 下一个版本 | [`dev-{{< skew nextMinorVersion >}}`](https://github.com/kubernetes/website/tree/dev-{{< skew nextMinorVersion >}}) - 最新版本 | [`master`](https://github.com/kubernetes/website/tree/master) - 之前的版本 | `release-*.**` -`master` 分支中保存的是当前发行版本 `{{< latest-version >}}` 的内容。 +`main` 分支中保存的是当前发行版本 `{{< latest-version >}}` 的内容。 发行团队会在下一个发行版本 v{{< skew nextMinorVersion >}} 出现之前创建 `{{< release-branch >}}` 分支。 -### i18n/ 中的网站字符串 +### i18n/ 中的网站字符串 {#site-strings-in-i18n} 本地化必须在新的语言特定文件中包含 -[`i18n/en.toml`](https://github.com/kubernetes/website/blob/master/i18n/en.toml) -的内容。以德语为例:`i18n/de.toml`。 +[`data/i18n/en/en.toml`](https://github.com/kubernetes/website/blob/master/i18n/en.toml) +的内容。以德语为例:`data/i18n/de/de.toml`。 将新的本地化文件添加到 `i18n/`。例如德语 (`de`): -```shell -cp i18n/en.toml i18n/de.toml +```bash +mkdir -p data/i18n/de +cp data/i18n/en/en.toml data/i18n/de/de.toml ``` -然后翻译每个字符串的值: + +修改文件顶部的注释以适合你的本地化, +然后翻译每个字符串的值。例如,这是搜索表单的德语占位符文本: -```TOML -[docs_label_i_am] -other = "ICH BIN..." +```toml +[ui_search_placeholder] +other = "Suchen" ``` ### 分支策略 {#branching-strategy} 因为本地化项目是高度协同的工作,所以我们鼓励团队基于共享的本地化分支工作。 +- 特别是在开始并且本地化尚未生效时。 在本地化分支上协作需要: @@ -536,37 +684,38 @@ Teams must merge localized content into the same branch from which the content w For example: -- a localization branch sourced from `master` must be merged into `master`. -- a localization branch sourced from `release-1.19` must be merged into `release-1.19`. +- a localization branch sourced from `main` must be merged into `main`. +- a localization branch sourced from `release-{{ skew "prevMinorVersion" }}` must be merged into `release-{{ skew "prevMinorVersion" }}`. {{< note >}} -If your localization branch was created from `master` branch but it is not merged into `master` before new release branch `{{< release-branch >}}` created, merge it into both `master` and new release branch `{{< release-branch >}}`. To merge your localization branch into new release branch `{{< release-branch >}}`, you need to switch upstream branch of your localization branch to `{{< release-branch >}}`. +If your localization branch was created from `main` branch but it is not merged into `main` before new release branch `{{< release-branch >}}` created, merge it into both `main` and new release branch `{{< release-branch >}}`. To merge your localization branch into new release branch `{{< release-branch >}}`, you need to switch upstream branch of your localization branch to `{{< release-branch >}}`. {{< /note >}} --> 团队必须将本地化内容合入到发布分支中,该发布分支是内容的来源。 例如: -- 源于 `master` 分支的本地化分支必须被合并到 `master`。 -- 源于 `release-1.19` 的本地化分支必须被合并到 `release-1.19`。 +- 源于 `main` 分支的本地化分支必须被合并到 `main`。 +- 源于 `release-{{ skew "prevMinorVersion" }}` + 的本地化分支必须被合并到 `release-{{ skew "prevMinorVersion" }}`。 -如果你的本地化分支是基于 `master` 分支创建的,但最终没有在新的发行 -分支 `{{< release-branch >}}` 被创建之前合并到 `master` 中,需要将其 -同时将其合并到 `master` 和新的发行分支 `{{< release-branch >}}` 中。 +如果你的本地化分支是基于 `main` 分支创建的,但最终没有在新的发行 +分支 `{{< release-branch >}}` 被创建之前合并到 `main` 中,需要将其 +同时将其合并到 `main` 和新的发行分支 `{{< release-branch >}}` 中。 要将本地化分支合并到新的发行分支 `{{< release-branch >}}` 中,你需要 将你本地化分支的上游分支切换到 `{{< release-branch >}}`。 在团队每个里程碑的开始时段,创建一个 issue 来比较先前的本地化分支 和当前的本地化分支之间的上游变化很有帮助。 现在有两个脚本用来比较上游的变化。 -[`upstream_changes.py`](https://github.com/kubernetes/website/tree/master/scripts#upstream_changespy) +[`upstream_changes.py`](https://github.com/kubernetes/website/tree/main/scripts#upstream_changespy) 对于检查对某个文件的变更很有用。 -[`diff_l10n_branches.py`](https://github.com/kubernetes/website/tree/master/scripts#diff_l10n_branchespy) +[`diff_l10n_branches.py`](https://github.com/kubernetes/website/tree/main/scripts#diff_l10n_branchespy) 可以用来为某个特定本地化分支创建过时文件的列表。 虽然只有批准人才能创建新的本地化分支并合并 PR,任何人都可以 @@ -585,32 +734,3 @@ SIG Docs welcomes upstream contributions and corrections to the English source. ### 上游贡献 {#upstream-contributions} Sig Docs 欢迎对英文原文的上游贡献和修正。 - - -## 帮助现有的本地化 - -您还可以向现有本地化添加或改进内容提供帮助。 -加入本地化团队的 [Slack 频道](https://kubernetes.slack.com/messages/C1J0BPD2M/), -然后开始新建 PR 来提供帮助。 -请限制每个 PR 只涉及一种语言,这是因为更改多种语言版本内容的 PR -可能非常难审阅。 - -## {{% heading "whatsnext" %}} - - -本地化满足工作流程和最低输出要求后,SIG 文档将: - -- 在网站上启用语言选择 -- 通过[Cloud Native Computing Foundation](https://www.cncf.io/about/) (CNCF) 频道, - 包括[ Kubernetes 博客](https://kubernetes.io/blog/)公开本地化的可用性。 - diff --git a/content/zh/docs/contribute/localization_zh.md b/content/zh/docs/contribute/localization_zh.md index 38e5ee8000..8b157d4a18 100644 --- a/content/zh/docs/contribute/localization_zh.md +++ b/content/zh/docs/contribute/localization_zh.md @@ -6,8 +6,7 @@ content_type: concept 本节详述文档中文本地化过程中须注意的事项。 -这里列举的内容包含了*中文本地化小组*早期给出的指导性建议和后续实践过程中 -积累的经验。 +这里列举的内容包含了*中文本地化小组*早期给出的指导性建议和后续实践过程中积累的经验。 在阅读、贡献、评阅中文本地化文档的过程中,如果对本文的指南有任何改进建议, 都请直接提出 PR。我们欢迎任何形式的补充和更正! @@ -167,8 +166,8 @@ English text {{}} ``` -评阅人应该不难理解中英文段落的对应关系。但是如果采用下面的方式,则会出现 -两个 `note`,因此需要避免。这是因为被注释起来的短代码仍会起作用! +评阅人应该不难理解中英文段落的对应关系。但是如果采用下面的方式, +则会出现两个 `note`,因此需要避免。这是因为被注释起来的短代码仍会起作用! ``` - 博客文章应该是原创内容。 - 官方博客的目的不是将某第三方已发表的内容重新作为新内容发表。 - - 博客的[授权协议](https://github.com/kubernetes/website/blob/master/LICENSE) + - 博客的[授权协议](https://github.com/kubernetes/website/blob/main/LICENSE) 的确允许出于商业目的来使用博客内容;但并不是所有可以商用的内容都适合在这里发表。 - 博客文章的内容应该在一段时间内不过期。 - 考虑到项目的开发速度,我们希望读者看到的是不必更新就能保持长期准确的内容。 @@ -144,7 +144,7 @@ SIG Docs [博客子项目](https://github.com/kubernetes/community/tree/master/s 要提交博文,你可以遵从以下指南: - [发起一个包含博文的 PR](/zh/docs/contribute/new-content/open-a-pr/#fork-the-repo)。 - 新博文要创建于 [`content/en/blog/_posts`](https://github.com/kubernetes/website/tree/master/content/en/blog/_posts) 目录下。 + 新博文要创建于 [`content/en/blog/_posts`](https://github.com/kubernetes/website/tree/main/content/en/blog/_posts) 目录下。 - 确保你的博文遵从合适的命名规范,并带有下面的引言(元数据)信息: @@ -205,7 +205,7 @@ Case studies highlight how organizations are using Kubernetes to solve real-world problems. The Kubernetes marketing team and members of the {{< glossary_tooltip text="CNCF" term_id="cncf" >}} collaborate with you on all case studies. Have a look at the source for the -[existing case studies](https://github.com/kubernetes/website/tree/master/content/en/case-studies). +[existing case studies](https://github.com/kubernetes/website/tree/main/content/en/case-studies). Refer to the [case study guidelines](https://github.com/cncf/foundation/blob/master/case-study-guidelines.md) and submit your request as outlined in the guidelines. --> @@ -216,7 +216,7 @@ Kubernetes 市场化团队和 {{< glossary_tooltip text="CNCF" term_id="cncf" >} 会与你一起工作,撰写所有的案例分析。 请查看 -[现有案例分析](https://github.com/kubernetes/website/tree/master/content/en/case-studies) +[现有案例分析](https://github.com/kubernetes/website/tree/main/content/en/case-studies) 的源码。 参考[案例分析指南](https://github.com/cncf/foundation/blob/master/case-study-guidelines.md) diff --git a/content/zh/docs/contribute/new-content/open-a-pr.md b/content/zh/docs/contribute/new-content/open-a-pr.md index ee5a2fa503..7d2919b713 100644 --- a/content/zh/docs/contribute/new-content/open-a-pr.md +++ b/content/zh/docs/contribute/new-content/open-a-pr.md @@ -222,9 +222,9 @@ Make sure you have [git](https://git-scm.com/book/en/v2/Getting-Started-Installi upstream https://github.com/kubernetes/website.git (push) ``` -6. 从你的克隆副本取回 `origin/master` 分支,从 `kubernetes/website` 取回 `upstream/master`: +6. 从你的克隆副本取回 `origin/master` 分支,从 `kubernetes/website` 取回 `upstream/main`: ```bash git fetch origin @@ -236,10 +236,11 @@ Make sure you have [git](https://git-scm.com/book/en/v2/Getting-Started-Installi 这样可以确保你本地的仓库在开始工作前是最新的。 {{< note >}} - 此工作流程与 [Kubernetes 社区 GitHub 工作流](https://github.com/kubernetes/community/blob/master/contributors/guide/github-workflow.md)有所不同。在推送你的变更到你的远程派生副本库之前,你不需要将你本地的 `master` 与 `upstream/master` 合并。 + 此工作流程与 [Kubernetes 社区 GitHub 工作流](https://github.com/kubernetes/community/blob/master/contributors/guide/github-workflow.md)有所不同。 + 在推送你的变更到你的远程派生副本库之前,你不需要将你本地的 `main` 与 `upstream/main` 合并。 {{< /note >}} 2. 基于第一步中选定的分支,创建新分支。 - 下面的例子假定基础分支是 `upstream/master`: + 下面的例子假定基础分支是 `upstream/main`: ```bash - git checkout -b upstream/master + git checkout -b upstream/main ``` -1. 安装 [`website/netlify.toml`](https://raw.githubusercontent.com/kubernetes/website/master/netlify.toml) +1. 安装 [`website/netlify.toml`](https://raw.githubusercontent.com/kubernetes/website/main/netlify.toml) 文件中指定的 [Hugo](https://gohugo.io/getting-started/installing/) 版本。 2. 启动一个终端窗口,进入 Kubernetes 网站仓库目录,启动 Hugo 服务器: @@ -651,13 +652,13 @@ If another contributor commits changes to the same file in another PR, it can cr git push --force-with-lease origin ``` -2. 从 `kubernetes/website` 的 `upstream/master` 分支取回更改,然后重设本地分支的基线: +2. 从 `kubernetes/website` 的 `upstream/main` 分支取回更改,然后重设本地分支的基线: ```bash git fetch upstream - git rebase upstream/master + git rebase upstream/main ``` ## 为贡献者提供的工具 `kubernetes/website` 仓库的 -[文档贡献者工具](https://github.com/kubernetes/website/tree/master/content/en/docs/doc-contributor-tools) +[文档贡献者工具](https://github.com/kubernetes/website/tree/main/content/en/docs/doc-contributor-tools) 目录中包含了一些工具,能够助你的贡献过程更为顺畅。 diff --git a/content/zh/docs/contribute/participate/_index.md b/content/zh/docs/contribute/participate/_index.md index 1fcab79b2a..ba6ab0c738 100644 --- a/content/zh/docs/contribute/participate/_index.md +++ b/content/zh/docs/contribute/participate/_index.md @@ -136,14 +136,14 @@ Kubernetes 项目使用名为 prow 的自动化工具来自动处理 GitHub issu 这两个插件使用位于 `kubernetes/website` 仓库顶层的 -[OWNERS](https://github.com/kubernetes/website/blob/master/OWNERS) 文件和 -[OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS_ALIASES) +[OWNERS](https://github.com/kubernetes/website/blob/main/OWNERS) 文件和 +[OWNERS_ALIASES](https://github.com/kubernetes/website/blob/main/OWNERS_ALIASES) 文件来控制 prow 在仓库范围的工作方式。 - [未签署 CLA,不可合并的 PR](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+label%3A%22cncf-cla%3A+no%22+-label%3A%22do-not-merge%2Fwork-in-progress%22+-label%3A%22do-not-merge%2Fhold%22+label%3Alanguage%2Fen): 提醒贡献者签署 CLA。如果机器人和审阅者都已经提醒他们,请关闭 PR,并提醒他们在签署 CLA 后可以重新提交。 @@ -105,11 +105,11 @@ These queries exclude localization PRs. All queries are against the main branch - [已有 LGTM标签,需要 Docs 团队批准](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+-label%3Ado-not-merge%2Fwork-in-progress+-label%3Ado-not-merge%2Fhold+label%3Alanguage%2Fen+label%3Algtm+): 列举需要 `/approve` 评论来合并的 PR。 -- [快速批阅](https://github.com/kubernetes/website/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+base%3Amaster+-label%3A%22do-not-merge%2Fwork-in-progress%22+-label%3A%22do-not-merge%2Fhold%22+label%3A%22cncf-cla%3A+yes%22+label%3A%22size%2FXS%22+label%3A%22language%2Fen%22): +- [快速批阅](https://github.com/kubernetes/website/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+base%3Amain+-label%3A%22do-not-merge%2Fwork-in-progress%22+-label%3A%22do-not-merge%2Fhold%22+label%3A%22cncf-cla%3A+yes%22+label%3A%22size%2FXS%22+label%3A%22language%2Fen%22): 列举针对主分支的、没有明确合并障碍的 PR。 在浏览 PR 时,可以将 "XS" 尺寸标签更改为 "S"、"M"、"L"、"XL"、"XXL"。 -- [非主分支的 PR](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+label%3Alanguage%2Fen+-base%3Amaster): If the PR is against a `dev-` branch, it's for an upcoming release. Assign the [docs release manager](https://github.com/kubernetes/sig-release/tree/master/release-team#kubernetes-release-team-roles) using: `/assign @`. If the PR is against an old branch, help the author figure out whether it's targeted against the best branch. +- [非主分支的 PR](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+label%3Alanguage%2Fen+-base%3Amain): 如果 PR 针对 `dev-` 分支,则表示它适用于即将发布的版本。 请添加带有 `/assign @<负责人的 github 账号>`,将其指派给 [发行版本负责人](https://github.com/kubernetes/sig-release/tree/master/release-team#kubernetes-release-team-roles)。 diff --git a/content/zh/docs/contribute/participate/roles-and-responsibilities.md b/content/zh/docs/contribute/participate/roles-and-responsibilities.md index 51b4399857..2b91550a60 100644 --- a/content/zh/docs/contribute/participate/roles-and-responsibilities.md +++ b/content/zh/docs/contribute/participate/roles-and-responsibilities.md @@ -269,7 +269,7 @@ To apply: 1. 发起 PR,将你的 GitHub 用户名添加到 `kubernetes/website` 仓库中 - [OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS) + [OWNERS_ALIASES](https://github.com/kubernetes/website/blob/main/OWNERS) 文件的特定节。 {{< note >}} @@ -383,7 +383,7 @@ When you meet the [requirements](https://github.com/kubernetes/community/blob/ma -词汇术语的原始数据保存在 [https://github.com/kubernetes/website/tree/master/content/en/docs/reference/glossary](https://github.com/kubernetes/website/tree/master/content/en/docs/reference/glossary),每个内容文件对应相应的术语解释。 +词汇术语的原始数据保存在 [https://github.com/kubernetes/website/tree/main/content/en/docs/reference/glossary](https://github.com/kubernetes/website/tree/main/content/en/docs/reference/glossary),每个内容文件对应相应的术语解释。 ### 避免使用隐含用户对某技术有一定理解的词汇 @@ -1261,8 +1261,8 @@ These simple steps ... | These steps ... :--| :----- 在 ... 中包含一个命令 | 只需要在... 中包含一个命令 运行容器 ... | 只需运行该容器... -你可以很容易地移除... | 你可以移除... -这些简单的步骤... | 这些步骤... +你可以移除... | 你可以很容易地移除... +这些步骤... | 这些简单的步骤... {{< /table >}} ## {{% heading "whatsnext" %}} diff --git a/content/zh/docs/contribute/suggesting-improvements.md b/content/zh/docs/contribute/suggesting-improvements.md index fe5b469f84..cdc033d966 100644 --- a/content/zh/docs/contribute/suggesting-improvements.md +++ b/content/zh/docs/contribute/suggesting-improvements.md @@ -20,13 +20,13 @@ card: -如果你发现 Kubernetes 文档中存在问题,或者你有一个关于新内容的想法,可以考虑 +如果你发现 Kubernetes 文档中存在问题或者你有一个关于新内容的想法,可以考虑 提出一个问题(issue)。你只需要具有 [GitHub 账号](https://github.com/join)和 Web 浏览器就可以完成这件事。 @@ -40,7 +40,7 @@ Kubernetes 贡献者会审阅这些问题并根据需要对其分类、打标签 ## 创建问题 {#opening-an-issue} -如果你希望就改进已有内容提出建议,或者在文档中发现了错误,请创建一个问题(issue)。 +如果你希望就改进已有内容提出建议或者在文档中发现了错误,请创建一个问题(issue)。 1. 滚动到页面底部,点击“报告问题”按钮。浏览器会重定向到一个 GitHub 问题页面,其中 包含了一些预先填充的内容。 diff --git a/content/zh/docs/reference/_index.md b/content/zh/docs/reference/_index.md index d1c1c93035..f28e5b1e5d 100644 --- a/content/zh/docs/reference/_index.md +++ b/content/zh/docs/reference/_index.md @@ -15,6 +15,7 @@ linkTitle: "Reference" main_menu: true weight: 70 content_type: concept +no_list: true --> @@ -29,16 +30,26 @@ This section of the Kubernetes documentation contains references. ## API 参考 +* [术语表](/zh/docs/reference/glossary/) - 一个全面的标准化的 Kubernetes 术语表 + +* [Kubernetes API 单页参考](/zh/docs/reference/kubernetes-api/) * [Kubernetes API 参考 {{< param "version" >}}](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/)。 * [使用 Kubernetes API ](/zh/docs/reference/using-api/) - Kubernetes 的 API 概述 +* [API 的访问控制](/zh/docs/reference/access-authn-authz/) - 关于 Kubernetes 如何控制 API 访问的详细信息 +* [常见的标签、注解和污点](/zh/docs/reference/labels-annotations-taints/) -## API 客户端库 +## 官方支持的客户端库 如果您需要通过编程语言调用 Kubernetes API,您可以使用 [客户端库](/zh/docs/reference/using-api/client-libraries/)。以下是官方支持的客户端库: @@ -58,16 +71,17 @@ client libraries: - [Kubernetes Python 语言客户端库](https://github.com/kubernetes-client/python) - [Kubernetes Java 语言客户端库](https://github.com/kubernetes-client/java) - [Kubernetes JavaScript 语言客户端库](https://github.com/kubernetes-client/javascript) +- [Kubernetes Dotnet 语言客户端库](https://github.com/kubernetes-client/csharp) +- [Kubernetes Haskell 语言客户端库](https://github.com/kubernetes-client/haskell) -## CLI 参考 +## CLI * [kubectl](/zh/docs/reference/kubectl/overview/) - 主要的 CLI 工具,用于运行命令和管理 Kubernetes 集群。 * [JSONPath](/zh/docs/reference/kubectl/jsonpath/) - 通过 kubectl 使用 @@ -75,29 +89,75 @@ client libraries: * [kubeadm](/zh/docs/reference/setup-tools/kubeadm/) - 此 CLI 工具可轻松配置安全的 Kubernetes 集群。 -## 组件参考 +## 组件 + +* [kubelet](/zh/docs/reference/command-line-tools-reference/kubelet/) - + 在每个节点上运行的主代理。kubelet 接收一组 PodSpecs 并确保其所描述的容器健康地运行。 +* [kube-apiserver](/zh/docs/reference/command-line-tools-reference/kube-apiserver/) - + REST API,用于验证和配置 API 对象(如 Pod、服务或副本控制器等)的数据。 +* [kube-controller-manager](/zh/docs/reference/command-line-tools-reference/kube-controller-manager/) - + 一个守护进程,其中包含 Kubernetes 所附带的核心控制回路。 +* [kube-proxy](/zh/docs/reference/command-line-tools-reference/kube-proxy/) - + 可进行简单的 TCP/UDP 流转发或针对一组后端执行轮流 TCP/UDP 转发。 +* [kube-scheduler](/zh/docs/reference/command-line-tools-reference/kube-scheduler/) - + 一个调度程序,用于管理可用性、性能和容量。 + + * [调度策略](/zh/docs/reference/scheduling/policies) + * [调度配置](/zh/docs/reference/scheduling/config#profiles) + + +## 配置 API + +本节包含用于配置 kubernetes 组件或工具的 "未发布" API 的文档。 +尽管这些 API 对于用户或操作者使用或管理集群来说是必不可少的, +它们大都没有以 RESTful 的方式在 API 服务器上公开。 + +* [kubelet 配置 (v1beta1)](/zh/docs/reference/config-api/kubelet-config.v1beta1/) +* [kube-scheduler 配置 (v1beta1)](/zh/docs/reference/config-api/kube-scheduler-config.v1beta1/) +* [kube-scheduler 策略参考 (v1)](/zh/docs/reference/config-api/kube-scheduler-policy-config.v1/) +* [kube-proxy 配置 (v1alpha1)](/zh/docs/reference/config-api/kube-proxy-config.v1alpha1/) +* [`audit.k8s.io/v1` API](/zh/docs/reference/config-api/apiserver-audit.v1/) +* [客户端认证 API (v1beta1)](/zh/docs/reference/config-api/client-authentication.v1beta1/) +* [WebhookAdmission 配置 (v1)](/zh/docs/reference/config-api/apiserver-webhookadmission.v1/) -* [kubelet](/zh/docs/reference/command-line-tools-reference/kubelet/) - 在每个节点上运行的主 *节点代理* 。kubelet 采用一组 PodSpecs 并确保所描述的容器健康地运行。 -* [kube-apiserver](/zh/docs/reference/command-line-tools-reference/kube-apiserver/) - REST API,用于验证和配置 API 对象(如 Pod、服务或副本控制器等)的数据。 -* [kube-controller-manager](/zh/docs/reference/command-line-tools-reference/kube-controller-manager/) - 一个守护进程,它嵌入到了 Kubernetes 的附带的核心控制循环。 -* [kube-proxy](/zh/docs/reference/command-line-tools-reference/kube-proxy/) - 可进行简单的 TCP/UDP 流转发或针对一组后端执行轮流 TCP/UDP 转发。 -* [kube-scheduler](/zh/docs/reference/command-line-tools-reference/kube-scheduler/) - 一个调度程序,用于管理可用性、性能和容量。 - * [kube-scheduler 策略](/zh/docs/reference/scheduling/policies) - * [kube-scheduler 配置](/zh/docs/reference/scheduling/config#profiles) ## 设计文档 diff --git a/content/zh/docs/reference/access-authn-authz/admission-controllers.md b/content/zh/docs/reference/access-authn-authz/admission-controllers.md index 80d1256504..d7d612af16 100644 --- a/content/zh/docs/reference/access-authn-authz/admission-controllers.md +++ b/content/zh/docs/reference/access-authn-authz/admission-controllers.md @@ -1351,7 +1351,7 @@ PVC/PV 不会被删除。 ### TaintNodesByCondition {#taintnodesbycondition} -{{< feature-state for_k8s_version="v1.12" state="beta" >}} +{{< feature-state for_k8s_version="v1.17" state="stable" >}} ## 签名者 {#signers} +也可以指定自定义 signerName。 所有签名者都应该提供自己工作方式的信息, 以便客户端可以预期到他们的 CSR 将发生什么。 此类信息包括: @@ -423,8 +424,8 @@ O is the group that this user will belong to. You can refer to 你可以参考 [RBAC](/zh/docs/reference/access-authn-authz/rbac/) 了解标准组的信息。 ```shell -openssl genrsa -out john.key 2048 -openssl req -new -key john.key -out john.csr +openssl genrsa -out myuser.key 2048 +openssl req -new -key myuser.key -out myuser.csr ``` 需要注意的几点: - `usage` 字段必须是 '`client auth`' - `request` 字段是 CSR 文件内容的 base64 编码值。 - 要得到该值,可以执行命令 `cat john.csr | base64 | tr -d "\n"`。 + 要得到该值,可以执行命令 `cat myuser.csr | base64 | tr -d "\n"`。 证书的内容使用 base64 编码,存放在字段 `status.certificate`。 +从 CertificateSigningRequest 导出颁发的证书。 + +``` +kubectl get csr myuser -o jsonpath='{.status.certificate}'| base64 -d > myuser.crt +``` + @@ -555,7 +564,7 @@ First, we need to add new credentials: 首先,我们需要添加新的凭据: ```shell -kubectl config set-credentials john --client-key=/home/vagrant/work/john.key --client-certificate=/home/vagrant/work/john.crt --embed-certs=true +kubectl config set-credentials myuser --client-key=myuser.key --client-certificate=myuser.crt --embed-certs=true ``` @@ -565,16 +574,16 @@ Then, you need to add the context: 然后,你需要添加上下文: ```shell -kubectl config set-context john --cluster=kubernetes --user=john +kubectl config set-context myuser --cluster=kubernetes --user=myuser ``` -来测试一下,把上下文切换为 `john`: +来测试一下,把上下文切换为 `myuser`: ```shell -kubectl config use-context john +kubectl config use-context myuser ``` `status.conditions.reason` 字段通常设置为一个首字母大写的对机器友好的原因码; 这是一个命名约定,但你也可以随你的个人喜好设置。 -如果你想添加一个仅供人类使用的注释,那就用 `status.conditions.message` 字段。 +如果你想添加一个供人类使用的注释,那就用 `status.conditions.message` 字段。 1. 针对不同角色的绑定是完全不一样的绑定。要求通过删除/重建绑定来更改 `roleRef`, - 这样可以确保要赋予绑定的所有主体会被授予新的角色(而不是在允许修改 - `roleRef` 的情况下导致所有现有主体未经验证即被授予新角色对应的权限)。 + 这样可以确保要赋予绑定的所有主体会被授予新的角色(而不是在允许或者不小心修改 + 了 `roleRef` 的情况下导致所有现有主体未经验证即被授予新角色对应的权限)。 1. 将 `roleRef` 设置为不可以改变,这使得可以为用户授予对现有绑定对象的 `update` 权限, 这样可以让他们管理主体列表,同时不能更改被授予这些主体的角色。 @@ -503,7 +503,7 @@ as a cluster administrator, include rules for custom resources, such as those se or aggregated API servers, to extend the default roles. For example: the following ClusterRoles let the "admin" and "edit" default roles manage the custom resource -named CronTab, whereas the "view" role can perform just read actions on CronTab resources. +named CronTab, whereas the "view" role can perform only read actions on CronTab resources. You can assume that CronTab objects are named `"crontabs"` in URLs as seen by the API server. --> 默认的[面向用户的角色](#default-roles-and-role-bindings) 使用 ClusterRole 聚合。 @@ -870,7 +870,7 @@ Auto-reconciliation is enabled by default if the RBAC authorizer is active. ### 自动协商 {#auto-reconciliation} 在每次启动时,API 服务器都会更新默认 ClusterRole 以添加缺失的各种权限,并更新 -默认的 ClusterRoleBinding 以增加缺失的的各类主体。 +默认的 ClusterRoleBinding 以增加缺失的各类主体。 这种自动协商机制允许集群去修复一些不小心发生的修改,并且有助于保证角色和角色绑定 在新的发行版本中有权限或主体变更时仍然保持最新。 diff --git a/content/zh/docs/reference/access-authn-authz/service-accounts-admin.md b/content/zh/docs/reference/access-authn-authz/service-accounts-admin.md index 82b37c9043..451c8d4b3b 100644 --- a/content/zh/docs/reference/access-authn-authz/service-accounts-admin.md +++ b/content/zh/docs/reference/access-authn-authz/service-accounts-admin.md @@ -1,5 +1,5 @@ --- -title: 管理 Service Accounts +title: 管理服务账号 content_type: concept weight: 50 --- @@ -20,7 +20,7 @@ weight: 50 This is a Cluster Administrator guide to service accounts. You should be familiar with [configuring Kubernetes service accounts](/docs/tasks/configure-pod-container/configure-service-account/). -Support for authorization and user accounts is planned but incomplete. Sometimes +Support for authorization and user accounts is planned but incomplete. Sometimes incomplete features are referred to in order to better describe service accounts. --> 这是一篇针对服务账号的集群管理员指南。你应该熟悉 @@ -102,41 +102,86 @@ It acts synchronously to modify pods as they are created or updated. When this p 或更新时它会进行以下操作: -1. 如果该 Pod 没有设置 `serviceAccountName`,将其 `serviceAccountName` 设为 - `default`。 -1. 保证 Pod 所引用的 `serviceAccountName` 确实存在,否则拒绝该 Pod。 -1. 如果 Pod 不包含 `imagePullSecrets` 设置,将 `serviceAccountName` 所引用 - 的服务账号中的 `imagePullSecrets` 信息添加到 Pod 中。 +1. 如果该 Pod 没有设置 `ServiceAccount`,将其 `ServiceAccount` 设为 `default`。 +1. 保证 Pod 所引用的 `ServiceAccount` 确实存在,否则拒绝该 Pod。 1. 如果服务账号的 `automountServiceAccountToken` 或 Pod 的 - `automountServiceAccountToken` 都为设置为 `false`,则为 Pod 创建一个 + `automountServiceAccountToken` 都未显式设置为 `false`,则为 Pod 创建一个 `volume`,在其中包含用来访问 API 的令牌。 1. 如果前一步中为服务账号令牌创建了卷,则为 Pod 中的每个容器添加一个 `volumeSource`,挂载在其 `/var/run/secrets/kubernetes.io/serviceaccount` 目录下。 +1. 如果 Pod 不包含 `imagePullSecrets` 设置,将 `ServiceAccount` 所引用 + 的服务账号中的 `imagePullSecrets` 信息添加到 Pod 中。 -当 `BoundServiceAccountTokenVolume` 特性门控被启用时,你可以将服务账号卷迁移到投射卷。 -服务账号令牌会在 1 小时后或者 Pod 被删除之后过期。 -更多信息可参阅[投射卷](/zh/docs/tasks/configure-pod-container/configure-projected-volume-storage/)。 +#### 绑定的服务账号令牌卷 {#bound-service-account-token-volume} + +{{< feature-state for_k8s_version="v1.22" state="stable" >}} + + +当 `BoundServiceAccountTokenVolume` +ServiceAccount 准入控制器将添加如下投射卷,而不是为令牌控制器 +所生成的不过期的服务账号令牌而创建的基于 Secret 的卷。 + +```yaml +- name: kube-api-access-<随机后缀> + projected: + defaultMode: 420 # 0644 + sources: + - serviceAccountToken: + expirationSeconds: 3600 + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + path: namespace +``` + + +此投射卷有三个数据源: + +1. 通过 TokenRequest API 从 kube-apiserver 处获得的 ServiceAccountToken。 + 这一令牌默认会在一个小时之后或者 Pod 被删除时过期。 + 该令牌绑定到 Pod 实例上,并将 kube-apiserver 作为其受众(audience)。 +1. 包含用来验证与 kube-apiserver 连接的 CA 证书包的 ConfigMap 对象。 + 这一特性依赖于 `RootCAConfigMap` 特性门控。该特性被启用时, + 控制面会公开一个名为 `kube-root-ca.crt` 的 ConfigMap 给所有名字空间。 + `RootCAConfigMap` 在 1.21 版本中进入 GA 状态,默认被启用, + 该特性门控会在 1.22 版本中从 `--feature-gate` 参数中删除。 +1. 引用 Pod 名字空间的一个 DownwardAPI。 + + +参阅[投射卷](/zh/docs/tasks/configure-pod-container/configure-projected-volume-storage/) +了解进一步的细节。 diff --git a/content/zh/docs/reference/command-line-tools-reference/feature-gates.md b/content/zh/docs/reference/command-line-tools-reference/feature-gates.md index ff81c38d6b..dd038fccaf 100644 --- a/content/zh/docs/reference/command-line-tools-reference/feature-gates.md +++ b/content/zh/docs/reference/command-line-tools-reference/feature-gates.md @@ -2,12 +2,18 @@ title: 特性门控 weight: 10 content_type: concept +card: + name: reference + weight: 60 --- @@ -47,7 +53,7 @@ To set feature gates for a component, such as kubelet, use the `--feature-gates` 传递一个特性设置键值对列表: ```shell ---feature-gates="...,DynamicKubeletConfig=true" +--feature-gates="...,GracefulNodeShutdown=true" ``` - `APIListChunking`:启用 API 客户端以块的形式从 API 服务器检索(“LIST” 或 “GET”)资源。 - `APIPriorityAndFairness`: 在每个服务器上启用优先级和公平性来管理请求并发。(由 `RequestManagement` 重命名而来) - `APIResponseCompression`:压缩 “LIST” 或 “GET” 请求的 API 响应。 - `APIServerIdentity`:为集群中的每个 API 服务器赋予一个 ID。 +- `APIServerTracing`: 为集群中的每个 API 服务器添加对分布式跟踪的支持。 - `AttachVolumeLimit`:启用卷插件用于报告可连接到节点的卷数限制。有关更多详细信息,请参阅 [动态卷限制](/zh/docs/concepts/storage/storage-limits/#dynamic-volume-limits)。 @@ -609,9 +701,15 @@ Each feature gate is designed for enabling/disabling a specific feature: `--service-account-extend-token-expiration=false` 参数关闭扩展令牌。查看 [绑定服务账号令牌](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/1205-bound-service-account-tokens/README.md) 获取更多详细信息。 +- `ControllerManagerLeaderMigration`: 为 + [kube-controller-manager](/zh/docs/tasks/administer-cluster/controller-manager-leader-migration/#initial-leader-migration-configuration) 和 + [cloud-controller-manager](/zh/docs/tasks/administer-cluster/controller-manager-leader-migration/#deploy-cloud-controller-manager) + 启用 Leader 迁移,它允许集群管理者在没有停机的高可用集群环境下,实时 + 把 kube-controller-manager 迁移迁移到外部的 controller-manager (例如 cloud-controller-manager) 中。 - `CPUManager`:启用容器级别的 CPU 亲和性支持,有关更多详细信息,请参见 [CPU 管理策略](/zh/docs/tasks/administer-cluster/cpu-management-policies/)。 -- `CRIContainerLogRotation`:为 CRI 容器运行时启用容器日志轮换。日志文件的默认最大大小为10MB,缺省情况下,一个容器允许的最大日志文件数为5。这些值可以在kubelet配置中配置。 - 更多细节请参见[日志架构]( /zh/docs/concepts/cluster-administration/logging/#logging-at-the-node-level)。 +- `CRIContainerLogRotation`:为 CRI 容器运行时启用容器日志轮换。日志文件的默认最大大小为 + 10MB,缺省情况下,一个容器允许的最大日志文件数为5。这些值可以在kubelet配置中配置。 + 更多细节请参见 [日志架构](/zh/docs/concepts/cluster-administration/logging/#logging-at-the-node-level)。 +- `CPUManagerPolicyOptions`: 允许微调 CPU 管理策略。 - `CSIBlockVolume`:启用外部 CSI 卷驱动程序用于支持块存储。有关更多详细信息,请参见 [`csi` 原始块卷支持](/zh/docs/concepts/storage/volumes/#csi-raw-block-volume-support)。 - `CSIDriverRegistry`:在 csi.storage.k8s.io 中启用与 CSIDriver API 对象有关的所有逻辑。 @@ -642,7 +742,9 @@ Each feature gate is designed for enabling/disabling a specific feature: kubelet and volume controllers and enables shims and translation logic to route volume operations from the AWS-EBS in-tree plugin to EBS CSI plugin. Requires CSIMigration and CSIMigrationAWS feature flags enabled and EBS CSI - plugin installed and configured on all nodes in the cluster. + plugin installed and configured on all nodes in the cluster. This flag has + been deprecated in favor of the `InTreePluginAWSUnregister` feature flag + which prevents the registration of in-tree EBS plugin. - `CSIMigrationAzureDisk`: Enables shims and translation logic to route volume operations from the Azure-Disk in-tree plugin to AzureDisk CSI plugin. Supports falling back to in-tree AzureDisk plugin if a node does not have @@ -653,7 +755,9 @@ Each feature gate is designed for enabling/disabling a specific feature: logic to route volume operations from the Azure-Disk in-tree plugin to AzureDisk CSI plugin. Requires CSIMigration and CSIMigrationAzureDisk feature flags enabled and AzureDisk CSI plugin installed and configured on all nodes - in the cluster. + in the cluster. This flag has been deprecated in favor of the + `InTreePluginAzureFileUnregister` feature flag which prevents the registration + of in-tree AzureFile plugin. --> - `CSIMigrationAWS`:确保填充和转换逻辑能够将卷操作从 AWS-EBS 内嵌插件路由到 EBS CSI 插件。 如果节点未安装和配置 EBS CSI 插件,则支持回退到内嵌 EBS 插件。 @@ -661,14 +765,15 @@ Each feature gate is designed for enabling/disabling a specific feature: - `CSIMigrationAWSComplete`:停止在 kubelet 和卷控制器中注册 EBS 内嵌插件, 并启用 shims 和转换逻辑将卷操作从AWS-EBS 内嵌插件路由到 EBS CSI 插件。 这需要启用 CSIMigration 和 CSIMigrationAWS 特性标志,并在集群中的所有节点上安装和配置 - EBS CSI 插件。 + EBS CSI 插件。该特性标志已被废弃,取而代之的是 `InTreePluginAWSUnregister` ,这会阻止注册 EBS 内嵌插件。 - `CSIMigrationAzureDisk`:确保填充和转换逻辑能够将卷操作从 Azure 磁盘内嵌插件路由到 Azure 磁盘 CSI 插件。如果节点未安装和配置 AzureDisk CSI 插件, 支持回退到内建 AzureDisk 插件。这需要启用 CSIMigration 特性标志。 - `CSIMigrationAzureDiskComplete`:停止在 kubelet 和卷控制器中注册 Azure 磁盘内嵌插件, 并启用 shims 和转换逻辑以将卷操作从 Azure 磁盘内嵌插件路由到 AzureDisk CSI 插件。 这需要启用 CSIMigration 和 CSIMigrationAzureDisk 特性标志, - 并在集群中的所有节点上安装和配置 AzureDisk CSI 插件。 + 并在集群中的所有节点上安装和配置 AzureDisk CSI 插件。该特性标志已被废弃,取而代之的是 + 能防止注册内嵌 AzureDisk 插件的 `InTreePluginAzureDiskUnregister` 特性标志。 - `CSIMigrationAzureFile`:确保封装和转换逻辑能够将卷操作从 Azure 文件内嵌插件路由到 Azure 文件 CSI 插件。如果节点未安装和配置 AzureFile CSI 插件, @@ -688,7 +794,8 @@ Each feature gate is designed for enabling/disabling a specific feature: - `CSIMigrationAzureFileComplete`:停止在 kubelet 和卷控制器中注册 Azure-File 内嵌插件, 并启用 shims 和转换逻辑以将卷操作从 Azure-File 内嵌插件路由到 AzureFile CSI 插件。 这需要启用 CSIMigration 和 CSIMigrationAzureFile 特性标志, - 并在集群中的所有节点上安装和配置 AzureFile CSI 插件。 + 并在集群中的所有节点上安装和配置 AzureFile CSI 插件。该特性标志已被废弃,取而代之的是 + 能防止注册内嵌 AzureDisk 插件的 `InTreePluginAzureFileUnregister` 特性标志。 - `CSIMigrationGCE`:启用 shims 和转换逻辑,将卷操作从 GCE-PD 内嵌插件路由到 PD CSI 插件。如果节点未安装和配置 PD CSI 插件,支持回退到内嵌 GCE 插件。 @@ -706,7 +814,8 @@ Each feature gate is designed for enabling/disabling a specific feature: - `CSIMigrationGCEComplete`:停止在 kubelet 和卷控制器中注册 GCE-PD 内嵌插件, 并启用 shims 和转换逻辑以将卷操作从 GCE-PD 内嵌插件路由到 PD CSI 插件。 这需要启用 CSIMigration 和 CSIMigrationGCE 特性标志,并在集群中的所有节点上 - 安装和配置 PD CSI 插件。 + 安装和配置 PD CSI 插件。该特性标志已被废弃,取而代之的是 + 能防止注册内嵌 GCE PD 插件的 `InTreePluginGCEUnregister` 特性标志。 - `CSIMigrationOpenStack`:确保填充和转换逻辑能够将卷操作从 Cinder 内嵌插件路由到 Cinder CSI 插件。如果节点未安装和配置 Cinder CSI 插件,支持回退到内嵌 Cinder 插件。 @@ -724,7 +834,8 @@ Each feature gate is designed for enabling/disabling a specific feature: - `CSIMigrationOpenStackComplete`:停止在 kubelet 和卷控制器中注册 Cinder 内嵌插件, 并启用 shims 和转换逻辑将卷操作从 Cinder 内嵌插件路由到 Cinder CSI 插件。 这需要启用 CSIMigration 和 CSIMigrationOpenStack 特性标志,并在集群中的所有节点上 - 安装和配置 Cinder CSI 插件。 + 安装和配置 Cinder CSI 插件。该特性标志已被废弃,取而代之的是 + 能防止注册内嵌 openstack cinder 插件的 `InTreePluginOpenStackUnregister` 特性标志。 - `CSIMigrationvSphere`: 允许封装和转换逻辑将卷操作从 vSphere 内嵌插件路由到 vSphere CSI 插件。如果节点未安装和配置 vSphere CSI 插件,则支持回退到 @@ -742,7 +854,8 @@ Each feature gate is designed for enabling/disabling a specific feature: - `CSIMigrationvSphereComplete`: 停止在 kubelet 和卷控制器中注册 vSphere 内嵌插件, 并启用 shims 和转换逻辑以将卷操作从 vSphere 内嵌插件路由到 vSphere CSI 插件。 这需要启用 CSIMigration 和 CSIMigrationvSphere 特性标志,并在集群中的所有节点上 - 安装和配置 vSphere CSI 插件。 + 安装和配置 vSphere CSI 插件。该特性标志已被废弃,取而代之的是 + 能防止注册内嵌 vsphere 插件的 `InTreePluginvSphereUnregister` 特性标志。 - `CSIVolumeFSGroupPolicy`: 允许 CSIDrivers 使用 `fsGroupPolicy` 字段. 该字段能控制由 CSIDriver 创建的卷在挂载这些卷时是否支持卷所有权和权限修改。 - `CSIVolumeHealth`: 启用对节点上的 CSI volume 运行状况监控的支持 +- `CSRDuration`: 允许客户端来通过请求 Kubernetes CSR API 签署的证书的持续时间。 - `ConfigurableFSGroupPolicy`:在 Pod 中挂载卷时,允许用户为 fsGroup 配置卷访问权限和属主变更策略。请参见 [为 Pod 配置卷访问权限和属主变更策略](/zh/docs/tasks/configure-pod-container/security-context/#configure-volume-permission-and-ownership-change-policy-for-pods)。 +- `ControllerManagerLeaderMigration`: 为 `kube-controller-manager` 和 `cloud-controller-manager` + 开启 leader 迁移功能。 - `CronJobControllerV2`:使用 {{< glossary_tooltip text="CronJob" term_id="cronjob" >}} 控制器的一种替代实现。否则,系统会选择同一控制器的 v1 版本。 - 控制器的 v2 版本提供试验性的性能改进。 - `CustomCPUCFSQuotaPeriod`:使节点能够更改 [kubelet 配置](/zh/docs/tasks/administer-cluster/kubelet-config-file/). @@ -821,13 +941,20 @@ Each feature gate is designed for enabling/disabling a specific feature: - `CustomResourceWebhookConversion`:对于用 [CustomResourceDefinition](/zh/docs/concepts/extend-kubernetes/api-extension/custom-resources/) 创建的资源启用基于 Webhook 的转换。 +- `DaemonSetUpdateSurge`: 使 DaemonSet 工作负载在每个节点的更新期间保持可用性。 - `DefaultPodTopologySpread`: 启用 `PodTopologySpread` 调度插件来完成 [默认的调度传播](/zh/docs/concepts/workloads/pods/pod-topology-spread-constraints/#internal-default-constraints). +- `DelegateFSGroupToCSIDriver`: 如果 CSI 驱动程序支持,则通过 NodeStageVolume 和 + NodePublishVolume CSI 调用传递 `fsGroup` ,将应用 `fsGroup` 从 Pod 的 + `securityContext` 的角色委托给驱动。 - `DevicePlugins`:在节点上启用基于 [设备插件](/zh/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/)的 资源制备。 - `DisableAcceleratorUsageMetrics`: [禁用 kubelet 收集加速器指标](/zh/docs/concepts/cluster-administration/system-metrics/#disable-accelerator-metrics). +- `DisableCloudProviders`: 禁用 `kube-apiserver`, + `kube-controller-manager` 和 `kubelet` 组件的 `--cloud-provider` 标志相关 + 的所有功能。 - `DownwardAPIHugePages`:允许在 [下行(Downward)API](/zh/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information) 中使用巨页信息。 @@ -916,6 +1049,9 @@ Each feature gate is designed for enabling/disabling a specific feature: 参阅[就绪态探针](/zh/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). - `ExpandCSIVolumes`: 启用扩展 CSI 卷。 +- `ExpandedDNSConfig`: 在 kubelet 和 kube-apiserver 上启用后, + 允许更多的 DNS 搜索域和搜索域列表。 参阅 + [扩展 DNS 配置](/zh/docs/concepts/services-networking/dns-pod-service/#expanded-dns-configuration). - `ExpandInUsePersistentVolumes`:启用扩充使用中的 PVC 的尺寸。请查阅 [调整使用中的 PersistentVolumeClaim 的大小](/zh/docs/concepts/storage/persistent-volumes/#resizing-an-in-use-persistentvolumeclaim)。 - `ExpandPersistentVolumes`:允许扩充持久卷。请查阅 @@ -988,8 +1127,18 @@ Each feature gate is designed for enabling/disabling a specific feature: for Windows containers. - `ImmutableEphemeralVolumes`: Allows for marking individual Secrets and ConfigMaps as immutable for better safety and performance. -- `IPv6DualStack`: Enable [dual stack](/docs/concepts/services-networking/dual-stack/) - support for IPv6. +- `InTreePluginAWSUnregister`: Stops registering the aws-ebs in-tree plugin in kubelet + and volume controllers. +- `InTreePluginAzureDiskUnregister`: Stops registering the azuredisk in-tree plugin in kubelet + and volume controllers. +- `InTreePluginAzureFileUnregister`: Stops registering the azurefile in-tree plugin in kubelet + and volume controllers. +- `InTreePluginGCEUnregister`: Stops registering the gce-pd in-tree plugin in kubelet + and volume controllers. +- `InTreePluginOpenStackUnregister`: Stops registering the OpenStack cinder in-tree plugin in kubelet + and volume controllers. +- `InTreePluginvSphereUnregister`: Stops registering the vSphere in-tree plugin in kubelet + and volume controllers. - `IndexedJob`: Allows the [Job](/docs/concepts/workloads/controllers/job/) controller to manage Pod completions per completion index. - `IngressClassNamespacedParams`: Allow namespace-scoped parameters reference in @@ -997,11 +1146,19 @@ Each feature gate is designed for enabling/disabling a specific feature: to `IngressClass.spec.parameters`. - `Initializers`: Allow asynchronous coordination of object creation using the Initializers admission plugin. +- `IPv6DualStack`: Enable [dual stack](/docs/concepts/services-networking/dual-stack/) + support for IPv6. +- `JobTrackingWithFinalizers`: Enables tracking [Job](/docs/concepts/workloads/controllers/job) + completions without relying on Pods remaining in the cluster indefinitely. + The Job controller uses Pod finalizers and a field in the Job status to keep + track of the finished Pods to count towards completion. - `KubeletConfigFile`: Enable loading kubelet configuration from a file specified using a config file. See [setting kubelet parameters via a config file](/docs/tasks/administer-cluster/kubelet-config-file/) for more details. - `KubeletCredentialProviders`: Enable kubelet exec credential providers for image pull credentials. +- `KubeletInUserNamespace`: Enables support for running kubelet in a {{}}. + See [Running Kubernetes Node Components as a Non-root User](/docs/tasks/administer-cluster/kubelet-in-userns/). - `KubeletPluginsWatcher`: Enable probe-based plugin watcher utility to enable kubelet to discover plugins such as [CSI volume drivers](/docs/concepts/storage/volumes/#csi). --> @@ -1009,16 +1166,27 @@ Each feature gate is designed for enabling/disabling a specific feature: [Hyper-V 隔离](https://docs.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/hyperv-container)。 - `ImmutableEphemeralVolumes`:允许将各个 Secret 和 ConfigMap 标记为不可变更的, 以提高安全性和性能。 -- `IPv6DualStack`:启用[双协议栈](/zh/docs/concepts/services-networking/dual-stack/) - 以支持 IPv6。 +- `InTreePluginAWSUnregister`: 在 kubelet 和 卷控制器上关闭注册 aws-ebs 内嵌插件。 +- `InTreePluginAzureDiskUnregister`: 在 kubelet 和 卷控制器上关闭注册 azuredisk 内嵌插件。 +- `InTreePluginAzureFileUnregister`: 在 kubelet 和 卷控制器上关闭注册 azurefile 内嵌插件。 +- `InTreePluginGCEUnregister`: 在 kubelet 和 卷控制器上关闭注册 gce-pd 内嵌插件。 +- `InTreePluginOpenStackUnregister`: 在 kubelet 和 卷控制器上关闭注册 OpenStack cinder 内嵌插件。 +- `InTreePluginvSphereUnregister`: 在 kubelet 和 卷控制器上关闭注册 vSphere 内嵌插件。 - `IndexedJob`:允许 [Job](/zh/docs/concepts/workloads/controllers/job/) 控制器按每个完成的索引去管理 Pod 完成。 - `IngressClassNamespacedParams`:允许引用命名空间范围的参数引用 `IngressClass`资源。该特性增加了两个字段 —— `Scope` 和 `Namespace` 到 `IngressClass.spec.parameters`。 - `Initializers`: 使用 Initializers 准入插件允许异步协调对象创建。 +- `IPv6DualStack`:启用 [双协议栈](/zh/docs/concepts/services-networking/dual-stack/) + 以支持 IPv6。 +- `JobTrackingWithFinalizers`: 启用跟踪 [Job](/zh/docs/concepts/workloads/controllers/job) + 完成情况,而不是永远从集群剩余 pod 来获取信息判断完成情况。Job 控制器使 + 用 Pod finalizers 和 Job 状态中的一个字段来跟踪已完成的 Pod 以计算完成。 - `KubeletConfigFile`:启用从使用配置文件指定的文件中加载 kubelet 配置。 有关更多详细信息,请参见 [通过配置文件设置 kubelet 参数](/zh/docs/tasks/administer-cluster/kubelet-config-file/)。 - `KubeletCredentialProviders`:允许使用 kubelet exec 凭据提供程序来设置 镜像拉取凭据。 +- `KubeletInUserNamespace`: 支持在 {{}} 里运行 kubelet 。 + 请参见 [使用非 Root 用户来运行 Kubernetes 节点组件](/zh/docs/tasks/administer-cluster/kubelet-in-userns/). - `KubeletPluginsWatcher`:启用基于探针的插件监视应用程序,使 kubelet 能够发现 类似 [CSI 卷驱动程序](/zh/docs/concepts/storage/volumes/#csi)这类插件。 -- `KubeletPodResources`:启用 kubelet 的 Pod 资源 GRPC 端点。更多详细信息,请参见 +- `KubeletPodResources`:启用 kubelet 上 Pod 资源 GRPC 端点。更多详细信息,请参见 [支持设备监控](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/compute-device-assignment.md)。 -- `KubeletPodResourcesGetAllocatable`:启用 kubelet 的 pod 资源 `GetAllocatableResources` 功能。 +- `KubeletPodResourcesGetAllocatable`:启用 kubelet 的 pod 资源 + 的 `GetAllocatableResources` 功能。 该 API 增强了[资源分配报告](/zh/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#monitoring-device-plugin-resources) 包含有关可分配资源的信息,使客户端能够正确跟踪节点上的可用计算资源。 - `LegacyNodeRoleBehavior`:禁用此门控时,服务负载均衡器中和节点干扰中的原先行为 @@ -1054,6 +1223,9 @@ Each feature gate is designed for enabling/disabling a specific feature: filesystem walk for better performance and accuracy. - `LogarithmicScaleDown`: Enable semi-random selection of pods to evict on controller scaledown based on logarithmic bucketing of pod timestamps. +- `MemoryManager`: Allows setting memory affinity for a container based on + NUMA topology. +- `MemoryQoS`: Enable memory protection and usage throttle on pod / container using cgroup v2 memory controller. - `MixedProtocolLBService`: Enable using different protocols in the same `LoadBalancer` type Service instance. - `MountContainers`: Enable using utility containers on host as @@ -1069,7 +1241,9 @@ Each feature gate is designed for enabling/disabling a specific feature: 的后备文件系统支持项目配额,并且启用了这些配额,将使用项目配额来监视 [emptyDir 卷](/zh/docs/concepts/storage/volumes/#emptydir)的存储消耗 而不是遍历文件系统,以此获得更好的性能和准确性。 -- `LogarithmicScaleDown`:启用Pod的半随机(semi-random)选择,控制器将根据 Pod 时间戳的对数桶按比例缩小去驱逐 Pod。 +- `LogarithmicScaleDown`:启用 Pod 的半随机(semi-random)选择,控制器将根据 Pod 时间戳的对数桶按比例缩小去驱逐 Pod。 +- `MemoryManager`: 允许基于 NUMA 拓扑为容器设置内存亲和性。 +- `MemoryQoS`: 使用 cgroup v2 内存控制器在 pod / 容器上启用内存保护和使用限制。 - `MixedProtocolLBService`:允许在同一 `LoadBalancer` 类型的 Service 实例中使用不同 的协议。 - `MountContainers`:允许使用主机上的工具容器作为卷挂载程序。 @@ -1082,6 +1256,9 @@ Each feature gate is designed for enabling/disabling a specific feature: - `NodeDisruptionExclusion`: Enable use of the node label `node.kubernetes.io/exclude-disruption` which prevents nodes from being evacuated during zone failures. - `NodeLease`: Enable the new Lease API to report node heartbeats, which could be used as a node health signal. +- `NodeSwap`: Enable the kubelet to allocate swap memory for Kubernetes workloads on a node. + Must be used with `KubeletConfiguration.failSwapOn` set to false. + For more details, please see [swap memory](/docs/concepts/architecture/nodes/#swap-memory) - `NonPreemptingPriority`: Enable `preemptionPolicy` field for PriorityClass and Pod. - `PVCProtection`: Enable the prevention of a PersistentVolumeClaim (PVC) from being deleted when it is still used by any Pod. @@ -1102,53 +1279,70 @@ Each feature gate is designed for enabling/disabling a specific feature: - `NodeDisruptionExclusion`:启用节点标签 `node.kubernetes.io/exclude-disruption`, 以防止在可用区发生故障期间驱逐节点。 - `NodeLease`:启用新的 Lease(租期)API 以报告节点心跳,可用作节点运行状况信号。 +- `NodeSwap`: 启用 kubelet 为节点上的 Kubernetes 工作负载分配交换内存的能力。 + 必须将 `KubeletConfiguration.failSwapOn` 设置为 false 的情况下才能使用。 + 更多详细信息,请参见 [交换内存](/zh/docs/concepts/architecture/nodes/#swap-memory)。 - `NonPreemptingPriority`:为 PriorityClass 和 Pod 启用 `preemptionPolicy` 选项。 - `PVCProtection`:启用防止仍被某 Pod 使用的 PVC 被删除的特性。 -- `PodDeletionCost`:启用[Pod 删除成本](/zh/docs/concepts/workloads/controllers/replicaset/#pod-deletion-cost)功能。 +- `PodDeletionCost`:启用 [Pod 删除成本](/zh/docs/concepts/workloads/controllers/replicaset/#pod-deletion-cost) 功能。 该功能使用户可以影响 ReplicaSet 的降序顺序。 -- `PersistentLocalVolumes`:允许在 Pod 中使用 `local(本地)`卷类型。 +- `PersistentLocalVolumes`:允许在 Pod 中使用 `local(本地)` 卷类型。 如果请求 `local` 卷,则必须指定 Pod 亲和性属性。 - `PodDisruptionBudget`:启用 [PodDisruptionBudget](/zh/docs/tasks/run-application/configure-pdb/) 特性。 -- `PodAffinityNamespaceSelector`:启用[Pod 亲和性名称空间选择器](/zh/docs/concepts/scheduling-eviction/assign-pod-node/#namespace-selector) - 和[CrossNamespacePodAffinity](/zh/docs/concepts/policy/resource-quotas/#cross-namespace-pod-affinity-quota)资源配额功能。 +- `PodAffinityNamespaceSelector`:启用 [Pod 亲和性名称空间选择器](/zh/docs/concepts/scheduling-eviction/assign-pod-node/#namespace-selector) + 和 [CrossNamespacePodAffinity](/zh/docs/concepts/policy/resource-quotas/#cross-namespace-pod-affinity-quota) 资源配额功能。 - `PodOverhead`:启用 [PodOverhead](/zh/docs/concepts/scheduling-eviction/pod-overhead/) 特性以考虑 Pod 开销。 -- `PodPriority`:根据[优先级](/zh/docs/concepts/configuration/pod-priority-preemption/) +- `PodPriority`:根据 [优先级](/zh/docs/concepts/scheduling-eviction/pod-priority-preemption/) 启用 Pod 的调度和抢占。 - `PodReadinessGates`:启用 `podReadinessGate` 字段的设置以扩展 Pod 准备状态评估。 有关更多详细信息,请参见 [Pod 就绪状态判别](/zh/docs/concepts/workloads/pods/pod-lifecycle/#pod-readiness-gate)。 +- `PodSecurity`: 开启 `PodSecurity` 准入控制插件。 - `PodShareProcessNamespace`:在 Pod 中启用 `shareProcessNamespace` 的设置, 以便在 Pod 中运行的容器之间共享同一进程名字空间。更多详细信息,请参见 [在 Pod 中的容器间共享同一进程名字空间](/zh/docs/tasks/configure-pod-container/share-process-namespace/)。 +- `PreferNominatedNode`: 这个标志告诉调度器在循环遍历集群中的所有其他节点 + 之前,是否首先检查指定的节点。 - `ProbeTerminationGracePeriod`:在 Pod 上 启用 [设置探测器级别 `terminationGracePeriodSeconds`](/zh/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#probe-level-terminationgraceperiodseconds)。 有关更多信息,请参见 [enhancement proposal](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2238-liveness-probe-grace-period)。 - `ProcMountType`:允许容器通过设置 SecurityContext 的 `procMount` 字段来控制 对 proc 文件系统的挂载方式。 +- `ProxyTerminatingEndpoints`: 当 `ExternalTrafficPolicy=Local` 时, + 允许 kube-proxy 来处理终止过程中的端点。 - `QOSReserved`:允许在 QoS 级别进行资源预留,以防止处于较低 QoS 级别的 Pod 突发进入处于较高 QoS 级别的请求资源(目前仅适用于内存)。 +- `ReadWriteOncePod`: 允许使用 `ReadWriteOncePod` 访问模式的 PersistentVolume。 - `RemainingItemCount`:允许 API 服务器在 [分块列表请求](/zh/docs/reference/using-api/api-concepts/#retrieving-large-results-sets-in-chunks) 的响应中显示剩余条目的个数。 @@ -1209,6 +1403,10 @@ Each feature gate is designed for enabling/disabling a specific feature: - `SCTPSupport`:在 Pod、Service、Endpoints、NetworkPolicy 定义中 允许将 _SCTP_ 用作 `protocol` 值。 +- `SeccompDefault`: 允许将所有工作负载的默认 seccomp 配置文件为 `RuntimeDefault`。 + seccomp 配置在 Pod 或者容器的 `securityContext` 字段中指定。 +- `SelectorIndex`: 允许在 API 服务器 watch 的缓存中基于标签和字段的索引来加速 list 的操作。 - `ServerSideApply`:在 API 服务器上启用 [服务器端应用(SSA)](/zh/docs/reference/using-api/server-side-apply/) 。 - `ServiceAccountIssuerDiscovery`:在 API 服务器中为服务帐户颁发者启用 OIDC 发现端点 @@ -1263,6 +1464,8 @@ Each feature gate is designed for enabling/disabling a specific feature: - `StartupProbe`: Enable the [startup](/docs/concepts/workloads/pods/pod-lifecycle/#when-should-you-use-a-startup-probe) probe in the kubelet. +- `StatefulSetMinReadySeconds`: Allows `minReadySeconds` to be respected by + the StatefulSet controller. - `StorageObjectInUseProtection`: Postpone the deletion of PersistentVolume or PersistentVolumeClaim objects if they are still being used. - `StorageVersionAPI`: Enable the @@ -1275,6 +1478,7 @@ Each feature gate is designed for enabling/disabling a specific feature: --> - `StartupProbe`:在 kubelet 中启用 [启动探针](/zh/docs/concepts/workloads/pods/pod-lifecycle/#when-should-you-use-a-startup-probe)。 +- `StatefulSetMinReadySeconds`: 允许 StatefulSet 控制器采纳 `minReadySeconds` 设置。 - `StorageObjectInUseProtection`:如果仍在使用 PersistentVolume 或 PersistentVolumeClaim 对象,则将其删除操作推迟。 - `StorageVersionAPI`: 启用 @@ -1342,14 +1546,14 @@ Each feature gate is designed for enabling/disabling a specific feature: [`projected` 卷](/zh/docs/concepts/storage/volumes/#projected) 将服务帐户令牌注入到 Pod 中的特性。 - `TopologyAwareHints`: 在 EndpointSlices 中启用基于拓扑提示的拓扑感知路由。 - 更多详细信息可参见[Topology Aware Hints](/docs/concepts/services-networking/topology-aware-hints/) + 更多详细信息可参见[Topology Aware Hints](/zh/docs/concepts/services-networking/topology-aware-hints/) - `TopologyManager`:启用一种机制来协调 Kubernetes 不同组件的细粒度硬件资源分配。 详见[控制节点上的拓扑管理策略](/zh/docs/tasks/administer-cluster/topology-manager/)。 - `ValidateProxyRedirects`: 这个标志控制 API 服务器是否应该验证只跟随到相同的主机的重定向。 仅在启用 `StreamingProxyRedirects` 标志时被使用。 -- 'VolumeCapacityPriority`: 基于可用 PV 容量的拓扑,启用对不同节点的优先级支持。 +- `VolumeCapacityPriority`: 基于可用 PV 容量的拓扑,启用对不同节点的优先级支持。 - `VolumePVCDataSource`:启用对将现有 PVC 指定数据源的支持。 - `VolumeScheduling`:启用卷拓扑感知调度,并使 PersistentVolumeClaim(PVC) 绑定能够了解调度决策;当与 PersistentLocalVolumes 特性门控一起使用时, - 还允许使用 [`local`](/docs/concepts/storage/volumes/#local) 卷类型。 + 还允许使用 [`local`](/zh/docs/concepts/storage/volumes/#local) 卷类型。 - `VolumeSnapshotDataSource`:启用卷快照数据源支持。 - `VolumeSubpath`: 允许在容器中挂载卷的子路径。 - `VolumeSubpathEnvExpansion`:启用 `subPathExpr` 字段用于将环境变量在 `subPath` 中展开。 @@ -1391,13 +1596,14 @@ Each feature gate is designed for enabling/disabling a specific feature: - `WatchBookmark`:启用对 watch 操作中 bookmark 事件的支持。 - `WinDSR`:允许 kube-proxy 为 Windows 创建 DSR 负载均衡。 - `WinOverlay`:允许 kube-proxy 在 Windows 的覆盖网络模式下运行。 +- `WindowsEndpointSliceProxying`: 当启用时,运行在 Windows 上的 kube-proxy + 将使用 EndpointSlices 而不是 Endpoints 作为主要数据源,从而实现可伸缩性和并改进性能。 + 详情请参见[启用端点切片](/zh/docs/tasks/administer-cluster/enabling-endpointslices/). - `WindowsGMSA`:允许将 GMSA 凭据规范从 Pod 传递到容器运行时。 +- `WindowsHostProcessContainers`: 启用对 Windows HostProcess 容器的支持。 - `WindowsRunAsUserName`:提供使用非默认用户在 Windows 容器中运行应用程序的支持。 详情请参见 [配置 RunAsUserName](/zh/docs/tasks/configure-pod-container/configure-runasusername)。 -- `WindowsEndpointSliceProxying`:启用此特性门控后,Windows 上运行的 kube-proxy - 将使用 EndpointSlices 取代 Endpoints 作为主要数据源,进而提高扩展性和性能。参见 - [启用 EndpointSlice](/zh/docs/tasks/administer-cluster/enabling-endpointslices/)。 ## {{% heading "whatsnext" %}} diff --git a/content/zh/docs/reference/command-line-tools-reference/kube-apiserver.md b/content/zh/docs/reference/command-line-tools-reference/kube-apiserver.md index ac4d11b631..96d0229b43 100644 --- a/content/zh/docs/reference/command-line-tools-reference/kube-apiserver.md +++ b/content/zh/docs/reference/command-line-tools-reference/kube-apiserver.md @@ -96,7 +96,8 @@ the host's default interface will be used. The map from metric-label to value allow-list of this label. The key's format is <MetricName>,<LabelName>. The value's format is <allowed_value>,<allowed_value>...e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'. --> 允许使用的指标标签到指标值的映射列表。键的格式为 <MetricName>,<LabelName>. -值得格式为 <allowed_value>,<allowed_value>...。 例如:metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'。 +值的格式为 <allowed_value>,<allowed_value>...。 +例如:metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'

    @@ -2251,7 +2252,7 @@ are permanently removed in the release after that. --> 你要显示隐藏指标的先前版本。仅先前的次要版本有意义,不允许其他值。 格式为 <major>.<minor>,例如:"1.16"。 -这种格式的目的是确保您有机会注意到下一个版本是否隐藏了其他指标, +这种格式的目的是确保你有机会注意到下一个版本是否隐藏了其他指标, 而不是在此之后将它们从发行版中永久删除时感到惊讶。 diff --git a/content/zh/docs/reference/command-line-tools-reference/kube-controller-manager.md b/content/zh/docs/reference/command-line-tools-reference/kube-controller-manager.md index 0e456cff71..7cb8e9778d 100644 --- a/content/zh/docs/reference/command-line-tools-reference/kube-controller-manager.md +++ b/content/zh/docs/reference/command-line-tools-reference/kube-controller-manager.md @@ -342,7 +342,7 @@ The instance prefix for the cluster. Filename containing a PEM-encoded X509 CA certificate used to issue cluster-scoped certificates. If specified, no more specific --cluster-signing-* flag may be specified. --> 包含 PEM 编码格式的 X509 CA 证书的文件名。该证书用来发放集群范围的证书。 -如果设置了此标志,则不需要锦衣设置 --cluster-signing-* 标志。 +如果设置了此标志,则不能指定更具体的--cluster-signing-* 标志。 diff --git a/content/zh/docs/reference/command-line-tools-reference/kube-proxy.md b/content/zh/docs/reference/command-line-tools-reference/kube-proxy.md index 1c98ff35da..feeedae04c 100644 --- a/content/zh/docs/reference/command-line-tools-reference/kube-proxy.md +++ b/content/zh/docs/reference/command-line-tools-reference/kube-proxy.md @@ -4,17 +4,24 @@ content_type: tool-reference weight: 30 --- + + ## {{% heading "synopsis" %}} - - -Kubernetes 网络代理在每个节点上运行。网络代理反映了每个节点上 Kubernetes API 中定义的服务,并且可以执行简单的 TCP、UDP 和 SCTP 流转发,或者在一组后端进行循环 TCP、UDP 和 SCTP 转发。当前可通过 Docker-links-compatible 环境变量找到服务集群 IP 和端口,这些环境变量指定了服务代理打开的端口。有一个可选的插件,可以为这些集群 IP 提供集群 DNS。用户必须使用 apiserver API 创建服务才能配置代理。 +Kubernetes 网络代理在每个节点上运行。网络代理反映了每个节点上 Kubernetes API +中定义的服务,并且可以执行简单的 TCP、UDP 和 SCTP 流转发,或者在一组后端进行 +循环 TCP、UDP 和 SCTP 转发。 +当前可通过 Docker-links-compatible 环境变量找到服务集群 IP 和端口, +这些环境变量指定了服务代理打开的端口。 +有一个可选的插件,可以为这些集群 IP 提供集群 DNS。 +用户必须使用 apiserver API 创建服务才能配置代理。 ``` kube-proxy [flags] ``` - - ## {{% heading "options" %}} @@ -42,61 +53,92 @@ kube-proxy [flags] + +--add-dir-header + + +

    + +若此标志为 true,则将文件目录添加到日志消息的头部。 +

    + + + +--alsologtostderr + + +

    + +将日志输出到文件时也输出到标准错误输出(stderr)。 +

    + + --azure-container-registry-config string - +

    包含 Azure 容器仓库配置信息的文件的路径。 +

    - - ---bind-address 0.0.0.0     默认值: 0.0.0.0 - +--bind-address 0.0.0.0     默认值:0.0.0.0 - +

    -代理服务器要使用的 IP 地址(对于所有 IPv4 接口设置为 0.0.0.0,对于所有 IPv6 接口设置为 ::) +代理服务器要使用的 IP 地址(设置为 '0.0.0.0' 表示要使用所有 IPv4 接口; +设置为 '::' 表示使用所有 IPv6 接口)。 +

    + +--bind-address-hard-fail + + +

    + +若此标志为 true,kube-proxy 会将无法绑定端口的失败操作视为致命错误并退出。 +

    + + + +--boot-id-file string     默认值:"/proc/sys/kernel/random/boot_id" + + +

    + +用来检查 Boot-ID 的文件名,用逗号隔开。 +第一个存在的文件会被使用。 +

    + + --cleanup - +

    如果为 true,清理 iptables 和 ipvs 规则并退出。 - - - - - - ---cleanup-ipvs     默认值: true - - - - - -如果设置为 true 并指定了 --cleanup,则 kube-proxy 除了常规清理外,还将刷新 IPVS 规则。 +

    @@ -104,11 +146,14 @@ If true and --cleanup is specified, kube-proxy will also flush IPVS rules, in ad --cluster-cidr string - +

    -集群中 Pod 的 CIDR 范围。配置后,将从该范围之外发送到服务集群 IP 的流量被伪装,从 Pod 发送到外部 LoadBalancer IP 的流量将被重定向到相应的集群 IP。 +集群中 Pod 的 CIDR 范围。配置后,将从该范围之外发送到服务集群 IP +的流量被伪装,从 Pod 发送到外部 LoadBalancer IP 的流量将被重定向 +到相应的集群 IP。 +

    @@ -116,96 +161,80 @@ The CIDR range of pods in the cluster. When configured, traffic sent to a Servic --config string - +

    配置文件的路径。 +

    - - ---config-sync-period duration     默认值: 15m0s - +--config-sync-period duration     默认值:15m0s - +

    来自 apiserver 的配置的刷新频率。必须大于 0。 +

    - - ---conntrack-max-per-core int32     默认值: 32768 - +--conntrack-max-per-core int32     默认值:32768 - +

    -每个 CPU 核跟踪的最大 NAT 连接数(0 表示保留原样限制并忽略 conntrack-min)。 +每个 CPU 核跟踪的最大 NAT 连接数(0 表示保留当前限制并忽略 conntrack-min 设置)。 +

    - - ---conntrack-min int32     默认值: 131072 - +--conntrack-min int32     默认值:131072 - +

    -无论 conntrack-max-per-core 多少,要分配的 conntrack 条目的最小数量(将 conntrack-max-per-core 设置为 0 即可保持原样的限制)。 +无论 conntrack-max-per-core 多少,要分配的 conntrack +条目的最小数量(将 conntrack-max-per-core 设置为 0 即可 +保持当前的限制)。 +

    - - ---conntrack-tcp-timeout-close-wait duration     默认值: 1h0m0s +--conntrack-tcp-timeout-close-wait duration     默认值:1h0m0s - +

    -处于 CLOSE_WAIT 状态的 TCP 连接的 NAT 超时 +处于 CLOSE_WAIT 状态的 TCP 连接的 NAT 超时。 +

    - - ---conntrack-tcp-timeout-established duration     默认值: 24h0m0s - +--conntrack-tcp-timeout-established duration     默认值:24h0m0s - +

    -已建立的 TCP 连接的空闲超时(0 保持原样) +已建立的 TCP 连接的空闲超时(0 保持当前设置)。 +

    @@ -213,228 +242,233 @@ Idle timeout for established TCP connections (0 to leave as-is) --detect-local-mode LocalMode - +

    -用于检测本地流量的模式 +用于检测本地流量的模式。 +

    ---feature-gates mapStringBool +--feature-gates <逗号分隔的 'key=True|False' 对’> - +

    一组键=值(key=value)对,描述了 alpha/experimental 的特征。可选项有: -
    APIListChunking=true|false (BETA - 默认值=true) -
    APIPriorityAndFairness=true|false (ALPHA - 默认值=false) -
    APIResponseCompression=true|false (BETA - 默认值=true) -
    AllAlpha=true|false (ALPHA - 默认值=false) -
    AllBeta=true|false (BETA - 默认值=false) -
    AllowInsecureBackendProxy=true|false (BETA - 默认值=true) -
    AnyVolumeDataSource=true|false (ALPHA - 默认值=false) -
    AppArmor=true|false (BETA - 默认值=true) -
    BalanceAttachedNodeVolumes=true|false (ALPHA - 默认值=false) -
    BoundServiceAccountTokenVolume=true|false (ALPHA - 默认值=false) -
    CPUManager=true|false (BETA - 默认值=true) -
    CRIContainerLogRotation=true|false (BETA - 默认值=true) -
    CSIInlineVolume=true|false (BETA - 默认值=true) -
    CSIMigration=true|false (BETA - 默认值=true) -
    CSIMigrationAWS=true|false (BETA - 默认值=false) -
    CSIMigrationAWSComplete=true|false (ALPHA - 默认值=false) -
    CSIMigrationAzureDisk=true|false (BETA - 默认值=false) -
    CSIMigrationAzureDiskComplete=true|false (ALPHA - 默认值=false) -
    CSIMigrationAzureFile=true|false (ALPHA - 默认值=false) -
    CSIMigrationAzureFileComplete=true|false (ALPHA - 默认值=false) -
    CSIMigrationGCE=true|false (BETA - 默认值=false) -
    CSIMigrationGCEComplete=true|false (ALPHA - 默认值=false) -
    CSIMigrationOpenStack=true|false (BETA - 默认值=false) -
    CSIMigrationOpenStackComplete=true|false (ALPHA - 默认值=false) -
    CSIMigrationvSphere=true|false (BETA - 默认值=false) -
    CSIMigrationvSphereComplete=true|false (BETA - 默认值=false) -
    CSIStorageCapacity=true|false (ALPHA - 默认值=false) -
    CSIVolumeFSGroupPolicy=true|false (ALPHA - 默认值=false) -
    ConfigurableFSGroupPolicy=true|false (ALPHA - 默认值=false) -
    CustomCPUCFSQuotaPeriod=true|false (ALPHA - 默认值=false) -
    DefaultPodTopologySpread=true|false (ALPHA - 默认值=false) -
    DevicePlugins=true|false (BETA - 默认值=true) -
    DisableAcceleratorUsageMetrics=true|false (ALPHA - 默认值=false) -
    DynamicKubeletConfig=true|false (BETA - 默认值=true) -
    EndpointSlice=true|false (BETA - 默认值=true) -
    EndpointSliceProxying=true|false (BETA - 默认值=true) -
    EphemeralContainers=true|false (ALPHA - 默认值=false) -
    ExpandCSIVolumes=true|false (BETA - 默认值=true) -
    ExpandInUsePersistentVolumes=true|false (BETA - 默认值=true) -
    ExpandPersistentVolumes=true|false (BETA - 默认值=true) -
    ExperimentalHostUserNamespace默认值ing=true|false (BETA - 默认值=false) -
    GenericEphemeralVolume=true|false (ALPHA - 默认值=false) -
    HPAScaleToZero=true|false (ALPHA - 默认值=false) -
    HugePageStorageMediumSize=true|false (BETA - 默认值=true) -
    HyperVContainer=true|false (ALPHA - 默认值=false) -
    IPv6DualStack=true|false (ALPHA - 默认值=false) -
    ImmutableEphemeralVolumes=true|false (BETA - 默认值=true) -
    KubeletPodResources=true|false (BETA - 默认值=true) -
    LegacyNodeRoleBehavior=true|false (BETA - 默认值=true) -
    LocalStorageCapacityIsolation=true|false (BETA - 默认值=true) -
    LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - 默认值=false) -
    NodeDisruptionExclusion=true|false (BETA - 默认值=true) -
    NonPreemptingPriority=true|false (BETA - 默认值=true) -
    PodDisruptionBudget=true|false (BETA - 默认值=true) -
    PodOverhead=true|false (BETA - 默认值=true) -
    ProcMountType=true|false (ALPHA - 默认值=false) -
    QOSReserved=true|false (ALPHA - 默认值=false) -
    RemainingItemCount=true|false (BETA - 默认值=true) -
    RemoveSelfLink=true|false (ALPHA - 默认值=false) -
    RotateKubeletServerCertificate=true|false (BETA - 默认值=true) -
    RunAsGroup=true|false (BETA - 默认值=true) -
    RuntimeClass=true|false (BETA - 默认值=true) -
    SCTPSupport=true|false (BETA - 默认值=true) -
    SelectorIndex=true|false (BETA - 默认值=true) -
    ServerSideApply=true|false (BETA - 默认值=true) -
    ServiceAccountIssuerDiscovery=true|false (ALPHA - 默认值=false) -
    ServiceAppProtocol=true|false (BETA - 默认值=true) -
    ServiceNodeExclusion=true|false (BETA - 默认值=true) -
    ServiceTopology=true|false (ALPHA - 默认值=false) -
    SetHostnameAsFQDN=true|false (ALPHA - 默认值=false) -
    StartupProbe=true|false (BETA - 默认值=true) -
    StorageVersionHash=true|false (BETA - 默认值=true) -
    SupportNodePidsLimit=true|false (BETA - 默认值=true) -
    SupportPodPidsLimit=true|false (BETA - 默认值=true) -
    Sysctls=true|false (BETA - 默认值=true) -
    TTLAfterFinished=true|false (ALPHA - 默认值=false) -
    TokenRequest=true|false (BETA - 默认值=true) -
    TokenRequestProjection=true|false (BETA - 默认值=true) -
    TopologyManager=true|false (BETA - 默认值=true) -
    ValidateProxyRedirects=true|false (BETA - 默认值=true) -
    VolumeSnapshotDataSource=true|false (BETA - 默认值=true) -
    WarningHeaders=true|false (BETA - 默认值=true) -
    WinDSR=true|false (ALPHA - 默认值=false) -
    WinOverlay=true|false (ALPHA - 默认值=false) -
    WindowsEndpointSliceProxying=true|false (ALPHA - 默认值=false) +APIListChunking=true|false (BETA - 默认值=true)
    +APIPriorityAndFairness=true|false (BETA - 默认值=true)
    +APIResponseCompression=true|false (BETA - 默认值=true)
    +APIServerIdentity=true|false (ALPHA - 默认值=false)
    +AllAlpha=true|false (ALPHA - 默认值=false)
    +AllBeta=true|false (BETA - 默认值=false)
    +AnyVolumeDataSource=true|false (ALPHA - 默认值=false)
    +AppArmor=true|false (BETA - 默认值=true)
    +BalanceAttachedNodeVolumes=true|false (ALPHA - 默认值=false)
    +BoundServiceAccountTokenVolume=true|false (BETA - 默认值=true)
    +CPUManager=true|false (BETA - 默认值=true)
    +CSIInlineVolume=true|false (BETA - 默认值=true)
    +CSIMigration=true|false (BETA - 默认值=true)
    +CSIMigrationAWS=true|false (BETA - 默认值=false)
    +CSIMigrationAzureDisk=true|false (BETA - 默认值=false)
    +CSIMigrationAzureFile=true|false (BETA - 默认值=false)
    +CSIMigrationGCE=true|false (BETA - 默认值=false)
    +CSIMigrationOpenStack=true|false (BETA - 默认值=true)
    +CSIMigrationvSphere=true|false (BETA - 默认值=false)
    +CSIMigrationvSphereComplete=true|false (BETA - 默认值=false)
    +CSIServiceAccountToken=true|false (BETA - 默认值=true)
    +CSIStorageCapacity=true|false (BETA - 默认值=true)
    +CSIVolumeFSGroupPolicy=true|false (BETA - 默认值=true)
    +CSIVolumeHealth=true|false (ALPHA - 默认值=false)
    +ConfigurableFSGroupPolicy=true|false (BETA - 默认值=true)
    +ControllerManagerLeaderMigration=true|false (ALPHA - 默认值=false)
    +CronJobControllerV2=true|false (BETA - 默认值=true)
    +CustomCPUCFSQuotaPeriod=true|false (ALPHA - 默认值=false)
    +DaemonSetUpdateSurge=true|false (ALPHA - 默认值=false)
    +DefaultPodTopologySpread=true|false (BETA - 默认值=true)
    +DevicePlugins=true|false (BETA - 默认值=true)
    +DisableAcceleratorUsageMetrics=true|false (BETA - 默认值=true)
    +DownwardAPIHugePages=true|false (BETA - 默认值=false)
    +DynamicKubeletConfig=true|false (BETA - 默认值=true)
    +EfficientWatchResumption=true|false (BETA - 默认值=true)
    +EndpointSliceProxying=true|false (BETA - 默认值=true)
    +EndpointSliceTerminatingCondition=true|false (ALPHA - 默认值=false)
    +EphemeralContainers=true|false (ALPHA - 默认值=false)
    +ExpandCSIVolumes=true|false (BETA - 默认值=true)
    +ExpandInUsePersistentVolumes=true|false (BETA - 默认值=true)
    +ExpandPersistentVolumes=true|false (BETA - 默认值=true)
    +ExperimentalHostUserNamespaceDefaulting=true|false (BETA - 默认值=false)
    +GenericEphemeralVolume=true|false (BETA - 默认值=true)
    +GracefulNodeShutdown=true|false (BETA - 默认值=true)
    +HPAContainerMetrics=true|false (ALPHA - 默认值=false)
    +HPAScaleToZero=true|false (ALPHA - 默认值=false)
    +HugePageStorageMediumSize=true|false (BETA - 默认值=true)
    +IPv6DualStack=true|false (BETA - 默认值=true)
    +InTreePluginAWSUnregister=true|false (ALPHA - 默认值=false)
    +InTreePluginAzureDiskUnregister=true|false (ALPHA - 默认值=false)
    +InTreePluginAzureFileUnregister=true|false (ALPHA - 默认值=false)
    +InTreePluginGCEUnregister=true|false (ALPHA - 默认值=false)
    +InTreePluginOpenStackUnregister=true|false (ALPHA - 默认值=false)
    +InTreePluginvSphereUnregister=true|false (ALPHA - 默认值=false)
    +IndexedJob=true|false (ALPHA - 默认值=false)
    +IngressClassNamespacedParams=true|false (ALPHA - 默认值=false)
    +KubeletCredentialProviders=true|false (ALPHA - 默认值=false)
    +KubeletPodResources=true|false (BETA - 默认值=true)
    +KubeletPodResourcesGetAllocatable=true|false (ALPHA - 默认值=false)
    +LocalStorageCapacityIsolation=true|false (BETA - 默认值=true)
    +LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - 默认值=false)
    +LogarithmicScaleDown=true|false (ALPHA - 默认值=false)
    +MemoryManager=true|false (ALPHA - 默认值=false)
    +MixedProtocolLBService=true|false (ALPHA - 默认值=false)
    +NamespaceDefaultLabelName=true|false (BETA - 默认值=true)
    +NetworkPolicyEndPort=true|false (ALPHA - 默认值=false)
    +NonPreemptingPriority=true|false (BETA - 默认值=true)
    +PodAffinityNamespaceSelector=true|false (ALPHA - 默认值=false)
    +PodDeletionCost=true|false (ALPHA - 默认值=false)
    +PodOverhead=true|false (BETA - 默认值=true)
    +PreferNominatedNode=true|false (ALPHA - 默认值=false)
    +ProbeTerminationGracePeriod=true|false (ALPHA - 默认值=false)
    +ProcMountType=true|false (ALPHA - 默认值=false)
    +QOSReserved=true|false (ALPHA - 默认值=false)
    +RemainingItemCount=true|false (BETA - 默认值=true)
    +RemoveSelfLink=true|false (BETA - 默认值=true)
    +RotateKubeletServerCertificate=true|false (BETA - 默认值=true)
    +ServerSideApply=true|false (BETA - 默认值=true)
    +ServiceInternalTrafficPolicy=true|false (ALPHA - 默认值=false)
    +ServiceLBNodePortControl=true|false (ALPHA - 默认值=false)
    +ServiceLoadBalancerClass=true|false (ALPHA - 默认值=false)
    +ServiceTopology=true|false (ALPHA - 默认值=false)
    +SetHostnameAsFQDN=true|false (BETA - 默认值=true)
    +SizeMemoryBackedVolumes=true|false (ALPHA - 默认值=false)
    +StorageVersionAPI=true|false (ALPHA - 默认值=false)
    +StorageVersionHash=true|false (BETA - 默认值=true)
    +SuspendJob=true|false (ALPHA - 默认值=false)
    +TTLAfterFinished=true|false (BETA - 默认值=true)
    +TopologyAwareHints=true|false (ALPHA - 默认值=false)
    +TopologyManager=true|false (BETA - 默认值=true)
    +ValidateProxyRedirects=true|false (BETA - 默认值=true)
    +VolumeCapacityPriority=true|false (ALPHA - 默认值=false)
    +WarningHeaders=true|false (BETA - 默认值=true)
    +WinDSR=true|false (ALPHA - 默认值=false)
    +WinOverlay=true|false (BETA - 默认值=true)
    +WindowsEndpointSliceProxying=true|false (BETA - 默认值=true) +

    - - ---healthz-bind-address 0.0.0.0     默认值: 0.0.0.0:10256 - +--healthz-bind-address 0.0.0.0     默认值:0.0.0.0:10256 - +

    -服务健康检查的 IP 地址和端口(对于所有 IPv4 接口设置为 '0.0.0.0:10256',对于所有 IPv6 接口设置为 '[::]:10256') +服务健康状态检查的 IP 地址和端口(设置为 '0.0.0.0:10256' 表示使用所有 +IPv4 接口,设置为 '[::]:10256' 表示使用所有 IPv6 接口); 设置为空则禁用。 - - - - - - ---healthz-bind-address 0.0.0.0     默认值: 0.0.0.0:10256 - - - - - -服务健康检查的 IP 地址和端口(设置为 0.0.0.0 表示使用所有 IPv4 接口,设置为 :: 表示使用所有 IPv6 接口) +

    @@ -442,11 +476,12 @@ The IP address for the health check server to serve on (set to 0.0.0.0 for all I -h, --help - +

    -kube-proxy 操作的帮助命令 +kube-proxy 操作的帮助命令。 +

    @@ -454,74 +489,65 @@ kube-proxy 操作的帮助命令 --hostname-override string - +

    -如果非空,将使用此字符串作为标识而不是实际的主机名。 +如果非空,将使用此字符串而不是实际的主机名作为标识。 +

    - - ---iptables-masquerade-bit int32     默认值: 14 - +--iptables-masquerade-bit int32     默认值:14 - +

    -如果使用纯 iptables 代理,则 fwmark 空间的 bit 用于标记需要 SNAT 的数据包。必须在 [0,31] 范围内。 +在使用纯 iptables 代理时,用来设置 fwmark 空间的 bit,标记需要 +SNAT 的数据包。必须在 [0,31] 范围内。 +

    - - - --iptables-min-sync-period duration     默认值:1s - +--iptables-min-sync-period duration     默认值:1s - +

    iptables 规则可以随着端点和服务的更改而刷新的最小间隔(例如 '5s'、'1m'、'2h22m')。 +

    - - ---iptables-sync-period duration     默认值: 30s - +--iptables-sync-period duration     默认值:30s - +

    刷新 iptables 规则的最大间隔(例如 '5s'、'1m'、'2h22m')。必须大于 0。 +

    ---ipvs-exclude-cidrs stringSlice +--ipvs-exclude-cidrs strings - +

    -逗号分隔的 CIDR 列表,ipvs 代理在清理 IPVS 规则时不应使用此列表。 +逗号分隔的 CIDR 列表,ipvs 代理在清理 IPVS 规则时不会此列表中的地址范围。 +

    @@ -529,11 +555,12 @@ A comma-separated list of CIDR's which the ipvs proxier should not touch when cl --ipvs-min-sync-period duration - +

    ipvs 规则可以随着端点和服务的更改而刷新的最小间隔(例如 '5s'、'1m'、'2h22m')。 +

    @@ -541,11 +568,12 @@ ipvs 规则可以随着端点和服务的更改而刷新的最小间隔(例如 --ipvs-scheduler string - +

    -代理模式为 ipvs 时的 ipvs 调度器类型 +代理模式为 ipvs 时所选的 ipvs 调度器类型。 +

    @@ -553,28 +581,26 @@ The ipvs scheduler type when proxy mode is ipvs --ipvs-strict-arp - +

    -通过将 arp_ignore 设置为 1 并将 arp_announce 设置为 2 启用严格的 ARP +通过将 arp_ignore 设置为 1 并将 arp_announce +设置为 2 启用严格的 ARP。 +

    - - ---ipvs-sync-period duration     默认值: 30s - +--ipvs-sync-period duration     默认值:30s - +

    刷新 ipvs 规则的最大间隔(例如 '5s'、'1m'、'2h22m')。必须大于 0。 +

    @@ -583,11 +609,12 @@ The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h --ipvs-tcp-timeout duration - +

    空闲 IPVS TCP 连接的超时时间,0 保持连接(例如 '5s'、'1m'、'2h22m')。 +

    @@ -595,11 +622,12 @@ The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', ' --ipvs-tcpfin-timeout duration - +

    -收到 FIN 数据包后,IPVS TCP 连接的超时,0 保持连接不变(例如 '5s'、'1m'、'2h22m')。 +收到 FIN 数据包后,IPVS TCP 连接的超时,0 保持当前设置不变。(例如 '5s'、'1m'、'2h22m')。 +

    @@ -607,62 +635,51 @@ The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as --ipvs-udp-timeout duration - +

    -IPVS UDP 数据包的超时,0 保持连接不动(例如 '5s'、'1m'、'2h22m')。 +IPVS UDP 数据包的超时,0 保持当前设置不变。(例如 '5s'、'1m'、'2h22m')。 +

    - - ---kube-api-burst int32     默认值: 10 - +--kube-api-burst int32     默认值:10 - +

    -与 kubernetes apiserver 通信的数量 +与 kubernetes apiserver 通信的突发数量。 +

    - - ---kube-api-content-type string     默认值: "application/vnd.kubernetes.protobuf" - +--kube-api-content-type string     默认值:"application/vnd.kubernetes.protobuf" - +

    发送到 apiserver 的请求的内容类型。 +

    - - ---kube-api-qps float32     默认值: 5 - +--kube-api-qps float32     默认值:5 - +

    -与 kubernetes apiserver 交互时使用的 QPS +与 kubernetes apiserver 交互时使用的 QPS。 +

    @@ -670,20 +687,69 @@ QPS to use while talking with kubernetes apiserver --kubeconfig string - +

    -包含授权信息的 kubeconfig 文件的路径(master 位置由 master 标志设置)。 +包含鉴权信息的 kubeconfig 文件的路径(主控节点位置由 master 标志设置)。 +

    - +--log-backtrace-at <形式为 'file:N' 的字符串>     Default: :0 + + +

    ---log-flush-frequency duration     默认值: 5s +当日志逻辑执行到文件 file 的第 N 行时,输出调用堆栈跟踪。 +

    + + + + +--log-dir string + + +

    + +若此标志费控,则将日志文件写入到此标志所给的目录下。 +

    + + + + +--log-file string + + +

    + +若此标志非空,则该字符串作为日志文件名。 +

    + + + +--log-file-max-size uint     默认值:1800 + + +

    + +定义日志文件可增长到的最大尺寸。单位是兆字节(MB)。 +如果此值为 0,则最大文件大小无限制。 +

    + + + + +--log-flush-frequency duration     默认值:5s @@ -691,19 +757,34 @@ Path to kubeconfig file with authorization information (the master location is s -两次日志刷新之间的最大秒数 +两次日志刷新之间的最大秒数。 + +--machine-id-file string     默认值:"/etc/machine-id,/var/lib/dbus/machine-id" + + +

    + +用来检查 Machine-ID 的文件列表,用逗号分隔。 +使用找到的第一个文件。 +

    + + --masquerade-all - +

    -如果使用纯 iptables 代理,则对通过服务集群 IP 发送的所有流量进行 SNAT(通常不需要) +如果使用纯 iptables 代理,则对通过服务集群 IP 发送的所有流量 +进行 SNAT(通常不需要)。 +

    @@ -711,78 +792,70 @@ If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs --master string - +

    -Kubernetes API 服务器的地址(覆盖 kubeconfig 中的任何值) +Kubernetes API 服务器的地址(覆盖 kubeconfig 中的相关值)。 +

    - - ---metrics-bind-address ipport 0.0.0.0     默认值: 127.0.0.1:10249 - +--metrics-bind-address ipport     默认值:127.0.0.1:10249 - +

    metrics 服务器要使用的 IP 地址和端口 -(设置为 '0.0.0.0:10249' 则使用 IPv4 接口,设置为 '[::]:10249' 则使用所有 IPv6 接口) +(设置为 '0.0.0.0:10249' 则使用所有 IPv4 接口,设置为 '[::]:10249' 则使用所有 IPv6 接口) 设置为空则禁用。 +

    - - ---metrics-port int32     默认值: 10249 - +--nodeport-addresses strings - - -绑定 metrics 服务器的端口。使用 0 表示禁用。 - - - - ---nodeport-addresses stringSlice - - - +

    -一个字符串值,指定用于 NodePorts 的地址。值可以是有效的 IP 块(例如 1.2.3.0/24, 1.2.3.4/32)。默认的空字符串切片([])表示使用所有本地地址。 +一个字符串值,指定用于 NodePort 服务的地址。 +值可以是有效的 IP 块(例如 1.2.3.0/24, 1.2.3.4/32)。 +默认的空字符串切片([])表示使用所有本地地址。 +

    - - ---oom-score-adj int32     默认值: -999 - +--one-output - +

    + +若此标志为 true,则仅将日志写入到其原本的严重性级别之下 +(而不是将其写入到所有更低严重性级别中)。 +

    + + + +--oom-score-adj int32     默认值:-999 + + +

    -kube-proxy 进程中的 oom-score-adj 值必须在 [-1000,1000] 范围内 +kube-proxy 进程中的 oom-score-adj 值,必须在 [-1000,1000] 范围内。 +

    @@ -790,23 +863,28 @@ kube-proxy 进程中的 oom-score-adj 值必须在 [-1000,1000] 范围内 --profiling - +

    -如果为 true,则通过 Web 接口 /debug/pprof 启用性能分析。 +如果为 true,则通过 Web 接口 /debug/pprof 启用性能分析。 +

    ---proxy-mode ProxyMode +--proxy-mode string - +

    -使用哪种代理模式:'userspace'(较旧)或 'iptables'(较快)或 'ipvs'(实验)。如果为空,使用最佳可用代理(当前为 iptables)。如果选择了 iptables 代理,无论如何,但系统的内核或 iptables 版本较低,这总是会回退到用户空间代理。 +使用哪种代理模式:'userspace'(较旧)或 'iptables'(较快)或 'ipvs'。 +如果为空,使用最佳可用代理(当前为 iptables)。 +如果选择了 iptables 代理(无论是否为显式设置),但系统的内核或 +iptables 版本较低,总是会回退到 userspace 代理。 +

    @@ -814,11 +892,14 @@ Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. I --proxy-port-range port-range - +

    -可以使用代理服务流量的主机端口(包括 beginPort-endPort、single port、beginPort+offset)的范围。如果(未指定,0 或 0-0)则随机选择端口。 +可以用来代理服务流量的主机端口范围(包括'起始端口-结束端口'、 +'单个端口'、'起始端口+偏移'几种形式)。 +如果未指定或者设置为 0(或 0-0),则随机选择端口。 +

    @@ -826,56 +907,117 @@ Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusi --show-hidden-metrics-for-version string - +

    -你要显示隐藏指标的先前版本。 +要显示隐藏指标的先前版本。 仅先前的次要版本有意义,不允许其他值。 格式为 <major>.<minor> ,例如:'1.16'。 这种格式的目的是确保你有机会注意到下一个发行版是否隐藏了其他指标, 而不是在之后将其永久删除时感到惊讶。 +

    - - ---udp-timeout duration     默认值: 250ms - +--skip-headers - +

    + +若此标志为 true,则避免在日志消息中包含头部前缀。 +

    + + + +--skip-log-headers + + +

    + +如果此标志为 true,则避免在打开日志文件时使用头部。 +

    + + + +--stderrthreshold int     默认值:2 + + +

    + +如果日志消息处于或者高于此阈值所设置的级别,则将其输出到标准错误输出(stderr)。 +

    + + + +--udp-timeout duration     默认值:250ms + + +

    -空闲 UDP 连接将保持打开的时长(例如 '250ms','2s')。必须大于 0。仅适用于 proxy-mode=userspace +空闲 UDP 连接将保持打开的时长(例如 '250ms','2s')。必须大于 0。 +仅适用于 proxy-mode=userspace。 +

    + +-v, --v int + + +

    + +用来设置日志详细程度的数值。 +

    + + --version version[=true] - +

    -打印版本信息并退出 +打印版本信息并退出。 +

    + +--vmodule <逗号分隔的 'pattern=N' 设置’> + + +

    + +用逗号分隔的列表,其中每一项为 'pattern=N' 格式。 +用来支持基于文件过滤的日志机制。 +

    + + --write-config-to string - +

    -如果设置,将配置值写入此文件并退出。 +如果设置,将默认配置信息写入此文件并退出。 +

    diff --git a/content/zh/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping.md b/content/zh/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping.md index 60f3772c07..4151e1bc99 100644 --- a/content/zh/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping.md +++ b/content/zh/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping.md @@ -299,8 +299,7 @@ A kubelet authenticating using bootstrap tokens is authenticated as a user in th @@ -354,7 +353,7 @@ If you want to use bootstrap tokens, you must enable it on kube-apiserver with t 如果你希望使用启动引导令牌,你必须在 kube-apiserver 上使用下面的标志启用之: -``` +```console --enable-bootstrap-token-auth=true ``` @@ -373,7 +372,7 @@ kube-apiserver 能够将令牌视作身份认证依据。 至少 128 位混沌数据。这里的随机数生成器可以是现代 Linux 系统上的 `/dev/urandom`。生成令牌的方式有很多种。例如: -``` +```shell head -c 16 /dev/urandom | od -An -t x | tr -d ' ' ``` @@ -388,7 +387,7 @@ values can be anything and the quoted group name should be as depicted: 令牌文件看起来是下面的例子这样,其中前面三个值可以是任何值,用引号括起来 的组名称则只能用例子中给的值。 -``` +```console 02b50b05283e98dd0fd71db496ef01e8,kubelet-bootstrap,10001,"system:bootstrappers" ``` @@ -406,9 +405,13 @@ further details. ### 授权 kubelet 创建 CSR {#authorize-kubelet-to-create-csr} @@ -420,7 +423,7 @@ To do this, you just need to create a `ClusterRoleBinding` that binds the `syste 为了实现这一点,你只需要创建 `ClusterRoleBinding`,将 `system:bootstrappers` 组绑定到集群角色 `system:node-bootstrapper`。 -``` +```yaml # 允许启动引导节点创建 CSR apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -495,7 +498,7 @@ kubelet 身份认证,很重要的一点是为控制器管理器所提供的 CA 要将 Kubernetes CA 密钥和证书提供给 kube-controller-manager,可使用以下标志: -``` +```shell --cluster-signing-cert-file="/etc/path/to/kubernetes/ca/ca.crt" --cluster-signing-key-file="/etc/path/to/kubernetes/ca/ca.key" ``` @@ -504,7 +507,7 @@ For example: --> 例如: -``` +```shell --cluster-signing-cert-file="/var/lib/kubernetes/ca.pem" --cluster-signing-key-file="/var/lib/kubernetes/ca-key.pem" ``` @@ -513,7 +516,7 @@ The validity duration of signed certificates can be configured with flag: --> 所签名的证书的合法期限可以通过下面的标志来配置: -``` +```shell --cluster-signing-duration ``` @@ -602,7 +605,7 @@ collection. --> 作为 [kube-controller-manager](/zh/docs/reference/generated/kube-controller-manager/) 的一部分的 `csrapproving` 控制器是自动被启用的。 -该控制器使用 [`SubjectAccessReview` API](/docs/reference/access-authn-authz/authorization/#checking-api-access) +该控制器使用 [`SubjectAccessReview` API](/zh/docs/reference/access-authn-authz/authorization/#checking-api-access) 来确定是否某给定用户被授权请求 CSR,之后基于鉴权结果执行批复操作。 为了避免与其它批复组件发生冲突,内置的批复组件不会显式地拒绝任何 CSRs。 该组件仅是忽略未被授权的请求。 @@ -682,7 +685,7 @@ The important elements to note are: diff --git a/content/zh/docs/reference/command-line-tools-reference/kubelet.md b/content/zh/docs/reference/command-line-tools-reference/kubelet.md index 6d7d2e757a..f542978d01 100644 --- a/content/zh/docs/reference/command-line-tools-reference/kubelet.md +++ b/content/zh/docs/reference/command-line-tools-reference/kubelet.md @@ -8,13 +8,15 @@ weight: 28 kubelet 是在每个 Node 节点上运行的主要 “节点代理”。它可以使用以下之一向 apiserver 注册: 主机名(hostname);覆盖主机名的参数;某云驱动的特定逻辑。 kubelet 是基于 PodSpec 来工作的。每个 PodSpec 是一个描述 Pod 的 YAML 或 JSON 对象。 kubelet 接受通过各种机制(主要是通过 apiserver)提供的一组 PodSpec,并确保这些 @@ -431,9 +433,9 @@ kubelet 将从此标志所指的文件中加载其初始配置。此路径可以 -<警告:beta 特性> 设置容器的日志文件个数上限。此值必须不小于 2。 +设置容器的日志文件个数上限。此值必须不小于 2。 此标志只能与 --container-runtime=remote 标志一起使用。 已弃用:应在 --config 所给的配置文件中进行设置。 (进一步了解) @@ -446,10 +448,9 @@ kubelet 将从此标志所指的文件中加载其初始配置。此路径可以 -<警告:beta 特性> 设置容器日志文件在轮换生成新文件时之前的最大值 -(例如,10Mi)。 +设置容器日志文件在轮换生成新文件时之前的最大值(例如,10Mi)。 此标志只能与 --container-runtime=remote 标志一起使用。 已弃用:应在 --config 所给的配置文件中进行设置。 (进一步了解) @@ -892,7 +893,6 @@ AppArmor=true|false (BETA - default=true)
    BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
    BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
    CPUManager=true|false (BETA - default=true)
    -CRIContainerLogRotation=true|false (BETA - default=true)
    CSIInlineVolume=true|false (BETA - default=true)
    CSIMigration=true|false (BETA - default=true)
    CSIMigrationAWS=true|false (BETA - default=false)
    @@ -984,7 +984,6 @@ AppArmor=true|false (BETA - 默认值为 true)
    BalanceAttachedNodeVolumes=true|false (ALPHA - 默认值为 false)
    BoundServiceAccountTokenVolume=true|false (ALPHA - 默认值为 false)
    CPUManager=true|false (BETA - 默认值为 true)
    -CRIContainerLogRotation=true|false (BETA - 默认值为 true)
    CSIInlineVolume=true|false (BETA - 默认值为 true)
    CSIMigration=true|false (BETA - 默认值为 true)
    CSIMigrationAWS=true|false (BETA - 默认值为 false)
    @@ -1814,10 +1813,12 @@ The CIDR to use for pod IP addresses, only used in standalone mode. In cluster m -指定基础设施镜像,Pod 内所有容器与其共享网络和 IPC 命名空间。 -仅当容器运行环境设置为 docker 时,此特定于 docker 的参数才有效。 +所指定的镜像不会被镜像垃圾收集器删除。 +当容器运行环境设置为 docker 时,各个 Pod 中的所有容器都会 +使用此镜像中的网络和 IPC 名字空间。 +其他 CRI 实现有自己的配置来设置此镜像。 diff --git a/content/zh/docs/reference/glossary/cloud-controller-manager.md b/content/zh/docs/reference/glossary/cloud-controller-manager.md index e2f10617e2..3d33b8c7fe 100644 --- a/content/zh/docs/reference/glossary/cloud-controller-manager.md +++ b/content/zh/docs/reference/glossary/cloud-controller-manager.md @@ -2,9 +2,9 @@ title: 云控制器管理器(Cloud Controller Manager) id: cloud-controller-manager date: 2018-04-12 -full_link: /zh/docs/tasks/administer-cluster/running-cloud-controller/ +full_link: /zh/docs/concepts/architecture/cloud-controller/ short_description: > - 云控制器管理器是 1.8 的 alpha 特性。在未来发布的版本中,这是将 Kubernetes 与任何其他云集成的最佳方式。 + 将 Kubernetes 与第三方云提供商进行集成的控制面组件。 aka: tags: @@ -12,36 +12,31 @@ tags: - architecture - operation --- - - - 云控制器管理器是指嵌入特定云的控制逻辑的 {{< glossary_tooltip text="控制平面" term_id="control-plane" >}}组件。 -云控制器管理器允许您链接聚合到云提供商的应用编程接口中, -并分离出相互作用的组件与您的集群交互的组件。 +云控制器管理器使得你可以将你的集群连接到云提供商的 API 之上, +并将与该云平台交互的组件同与你的集群交互的组件分离开来。 @@ -51,4 +46,5 @@ infrastructure, the cloud-controller-manager component enables cloud providers t features at a different pace compared to the main Kubernetes project. --> 通过分离 Kubernetes 和底层云基础设置之间的互操作性逻辑, -云控制器管理器组件使云提供商能够以不同于 Kubernetes 主项目的速度进行发布新特征。 \ No newline at end of file +云控制器管理器组件使云提供商能够以不同于 Kubernetes 主项目的 +步调发布新特征。 diff --git a/content/zh/docs/reference/glossary/cloud-provider.md b/content/zh/docs/reference/glossary/cloud-provider.md old mode 100755 new mode 100644 diff --git a/content/zh/docs/reference/glossary/disruption.md b/content/zh/docs/reference/glossary/disruption.md index 29c7e1ebe6..2b59797a39 100644 --- a/content/zh/docs/reference/glossary/disruption.md +++ b/content/zh/docs/reference/glossary/disruption.md @@ -42,6 +42,6 @@ Kubernetes terms that an _involuntary disruption_. See [Disruptions](/docs/concepts/workloads/pods/disruptions/) for more information. --> 如果您作为一个集群操作人员,销毁了一个从属于某个应用的 Pod, Kubernetes 视之为 _自愿干扰(Voluntary Disruption)_。如果由于节点故障 -或者影响更大区域故障的断电导致 Pod 离线,Kubrenetes 视之为 _非愿干扰(Involuntary Disruption)_。 +或者影响更大区域故障的断电导致 Pod 离线,kubernetes 视之为 _非愿干扰(Involuntary Disruption)_。 更多信息请查阅[Disruptions](/zh/docs/concepts/workloads/pods/disruptions/) \ No newline at end of file diff --git a/content/zh/docs/reference/glossary/index.md b/content/zh/docs/reference/glossary/index.md index d5d593d062..2c265c92a5 100644 --- a/content/zh/docs/reference/glossary/index.md +++ b/content/zh/docs/reference/glossary/index.md @@ -1,5 +1,5 @@ --- -title: 标准化词汇表 +title: 词汇表 layout: glossary noedit: true default_active_tag: fundamental diff --git a/content/zh/docs/reference/glossary/kube-controller-manager.md b/content/zh/docs/reference/glossary/kube-controller-manager.md index 5d18857fde..43aa192d17 100644 --- a/content/zh/docs/reference/glossary/kube-controller-manager.md +++ b/content/zh/docs/reference/glossary/kube-controller-manager.md @@ -29,9 +29,9 @@ tags: --> -在主节点上运行 {{< glossary_tooltip text="控制器" term_id="controller" >}} 的组件。 +运行{{< glossary_tooltip text="控制器" term_id="controller" >}}进程的控制平面组件。 diff --git a/content/zh/docs/reference/glossary/manifest.md b/content/zh/docs/reference/glossary/manifest.md index 041cc0c0d1..811e59a7cf 100644 --- a/content/zh/docs/reference/glossary/manifest.md +++ b/content/zh/docs/reference/glossary/manifest.md @@ -29,4 +29,4 @@ tags: -清单指定了在应用该清单时 Kubrenetes 将维护的对象的期望状态。每个配置文件可包含多个清单。 +清单指定了在应用该清单时 kubernetes 将维护的对象的期望状态。每个配置文件可包含多个清单。 diff --git a/content/zh/docs/reference/glossary/namespace.md b/content/zh/docs/reference/glossary/namespace.md index 257f411e6b..27e2e5018f 100644 --- a/content/zh/docs/reference/glossary/namespace.md +++ b/content/zh/docs/reference/glossary/namespace.md @@ -39,3 +39,4 @@ Namespaces are used to organize objects in a cluster and provide a way to divide --> 名字空间用来组织集群中对象,并为集群资源划分提供了一种方法。同一名字空间内的资源名称必须唯一,但跨名字空间时不作要求。 +在一些文档里名字空间也称为命名空间。 diff --git a/content/zh/docs/reference/glossary/node-pressure-eviction.md b/content/zh/docs/reference/glossary/node-pressure-eviction.md new file mode 100644 index 0000000000..d1336d57d2 --- /dev/null +++ b/content/zh/docs/reference/glossary/node-pressure-eviction.md @@ -0,0 +1,50 @@ +--- +title: 节点压力驱逐 +id: node-pressure-eviction +date: 2021-05-13 +full_link: /zh/docs/concepts/scheduling-eviction/node-pressure-eviction/ +short_description: > + 节点压力驱逐是 kubelet 主动使 Pod 失败以回收节点上的资源的过程。 +aka: +- kubelet eviction +tags: +- operation +--- + + + +节点压力驱逐是 {{}} 主动终止 Pod 以回收节点上资源的过程。 + + + + +kubelet 监控集群节点上的 CPU、内存、磁盘空间和文件系统 inode 等资源。 +当这些资源中的一个或多个达到特定消耗水平时, +kubelet 可以主动使节点上的一个或多个 Pod 失效,以回收资源并防止饥饿。 + + +节点压力驱逐不用于 [API 发起的驱逐](/zh/docs/concepts/scheduling-eviction/api-eviction/)。 diff --git a/content/zh/docs/reference/glossary/object.md b/content/zh/docs/reference/glossary/object.md old mode 100755 new mode 100644 diff --git a/content/zh/docs/reference/glossary/platform-developer.md b/content/zh/docs/reference/glossary/platform-developer.md index 0f533b170c..41e8b99995 100644 --- a/content/zh/docs/reference/glossary/platform-developer.md +++ b/content/zh/docs/reference/glossary/platform-developer.md @@ -44,7 +44,7 @@ Others develop closed-source commercial or site-specific extensions. 平台开发人员可以使用[定制资源](/zh/docs/concepts/extend-kubernetes/api-extension/custom-resources/) 或[使用汇聚层扩展 Kubernetes API](/zh/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/) 来为其 Kubernetes 实例增加功能,特别是为其应用程序添加功能。 -一些平台开发人员也是 Kubrenetes {{< glossary_tooltip text="贡献者" term_id="contributor" >}}, +一些平台开发人员也是 kubernetes {{< glossary_tooltip text="贡献者" term_id="contributor" >}}, 他们会开发贡献给 Kubernetes 社区的扩展。 另一些平台开发人员则开发封闭源代码的商业扩展或用于特定网站的扩展。 diff --git a/content/zh/docs/reference/glossary/pod.md b/content/zh/docs/reference/glossary/pod.md index 71a1494e0a..873ec90e62 100644 --- a/content/zh/docs/reference/glossary/pod.md +++ b/content/zh/docs/reference/glossary/pod.md @@ -40,4 +40,4 @@ tags: A Pod is typically set up to run a single primary container. It can also run optional sidecar containers that add supplementary features like logging. Pods are commonly managed by a {{< glossary_tooltip term_id="deployment" >}}. --> -通常创建 Pod 是为了运行单个主容器。Pod 还可以运行可选的挂斗(sidecar)容器,以添加诸如日志记录之类的补充特性。通常用 {{< glossary_tooltip term_id="deployment" >}} 来管理 Pod。 +通常创建 Pod 是为了运行单个主容器。Pod 还可以运行可选的边车(sidecar)容器,以添加诸如日志记录之类的补充特性。通常用 {{< glossary_tooltip term_id="deployment" >}} 来管理 Pod。 diff --git a/content/zh/docs/reference/glossary/wg.md b/content/zh/docs/reference/glossary/wg.md index 563ebacd69..2eedaaf356 100644 --- a/content/zh/docs/reference/glossary/wg.md +++ b/content/zh/docs/reference/glossary/wg.md @@ -36,11 +36,11 @@ tags: -工作组可以将人们组织起来,一起完成一项分散的任务。它组建简单,完成任务即可解散。 +工作组可以将人们组织起来,一起完成一项分散的任务。 更多信息请参考 [kubernetes/community](https://github.com/kubernetes/community) 代码库和当前的 [SIGs 和工作组](https://github.com/kubernetes/community/blob/master/sig-list.md) 列表。 diff --git a/content/zh/docs/reference/issues-security/_index.md b/content/zh/docs/reference/issues-security/_index.md index 7d3da26e97..51ca47cd3c 100644 --- a/content/zh/docs/reference/issues-security/_index.md +++ b/content/zh/docs/reference/issues-security/_index.md @@ -1,4 +1,4 @@ --- title: Kubernetes 问题和安全 -weight: 10 +weight: 40 --- diff --git a/content/zh/docs/reference/kubectl/_index.md b/content/zh/docs/reference/kubectl/_index.md index 3049dcb1c1..5c679220a6 100644 --- a/content/zh/docs/reference/kubectl/_index.md +++ b/content/zh/docs/reference/kubectl/_index.md @@ -1,4 +1,4 @@ --- -title: "kubectl 命令行界面" +title: "kubectl" weight: 60 --- diff --git a/content/zh/docs/reference/kubectl/cheatsheet.md b/content/zh/docs/reference/kubectl/cheatsheet.md index 88397aa4ec..97db36722b 100644 --- a/content/zh/docs/reference/kubectl/cheatsheet.md +++ b/content/zh/docs/reference/kubectl/cheatsheet.md @@ -332,6 +332,9 @@ kubectl get pods --show-labels JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}' \ && kubectl get nodes -o jsonpath="$JSONPATH" | grep "Ready=True" +# Output decoded secrets without external tools +kubectl get secret my-secret -o go-template='{{range $k,$v := .data}}{{"### "}}{{$k}}{{"\n"}}{{$v|base64decode}}{{"\n\n"}}{{end}}' + # List all Secrets currently in use by a pod kubectl get pods -o json | jq '.items[].spec.containers[].env[]?.valueFrom.secretKeyRef.name' | grep -v null | sort | uniq @@ -352,6 +355,9 @@ kubectl get nodes -o json | jq -c 'path(..)|[.[]|tostring]|join(".")' # Produce a period-delimited tree of all keys returned for pods, etc kubectl get pods -o json | jq -c 'path(..)|[.[]|tostring]|join(".")' +# Produce ENV for all pods, assuming you have a default container for the pods, default namespace and the `env` command is supported. +# Helpful when running any supported command across all pods, not just `env` +for pod in $(kubectl get po --output=jsonpath={.items..metadata.name}); do echo $pod && kubectl exec -it $pod env; done ``` --> ```bash @@ -405,11 +411,14 @@ kubectl get pods --show-labels JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}' \ && kubectl get nodes -o jsonpath="$JSONPATH" | grep "Ready=True" +# 不使用外部工具来输出解码后的 Secret +kubectl get secret my-secret -o go-template='{{range $k,$v := .data}}{{"### "}}{{$k}}{{"\n"}}{{$v|base64decode}}{{"\n\n"}}{{end}}' + # 列出被一个 Pod 使用的全部 Secret kubectl get pods -o json | jq '.items[].spec.containers[].env[]?.valueFrom.secretKeyRef.name' | grep -v null | sort | uniq # 列举所有 Pods 中初始化容器的容器 ID(containerID) -# Helpful when cleaning up stopped containers, while avoiding removal of initContainers. +# 可用于在清理已停止的容器时避免删除初始化容器 kubectl get pods --all-namespaces -o jsonpath='{range .items[*].status.initContainerStatuses[*]}{.containerID}{"\n"}{end}' | cut -d/ -f3 # 列出事件(Events),按时间戳排序 @@ -425,6 +434,9 @@ kubectl get nodes -o json | jq -c 'path(..)|[.[]|tostring]|join(".")' # 生成一个句点分隔的树,其中包含为pod等返回的所有键 kubectl get pods -o json | jq -c 'path(..)|[.[]|tostring]|join(".")' +# 假设你的 Pods 有默认的容器和默认的名字空间,并且支持 'env' 命令,可以使用以下脚本为所有 Pods 生成 ENV 变量。 +# 该脚本也可用于在所有的 Pods 里运行任何受支持的命令,而不仅仅是 'env'。 +for pod in $(kubectl get po --output=jsonpath={.items..metadata.name}); do echo $pod && kubectl exec -it $pod env; done ``` ```bash @@ -632,6 +645,35 @@ kubectl exec my-pod -- ls / # 在已有的 Pod 中运行 kubectl exec --stdin --tty my-pod -- /bin/sh # 使用交互 shell 访问正在运行的 Pod (一个容器场景) kubectl exec my-pod -c my-container -- ls / # 在已有的 Pod 中运行命令(多容器场景) kubectl top pod POD_NAME --containers # 显示给定 Pod 和其中容器的监控数据 +kubectl top pod POD_NAME --sort-by=cpu # 显示给定 Pod 的指标并且按照 'cpu' 或者 'memory' 排序 +``` + + +## 与 Deployments 和 Services 进行交互 + + +```bash +kubectl logs deploy/my-deployment # 获取一个 Deployment 的 Pod 的日志(单容器例子) +kubectl logs deploy/my-deployment -c my-container # 获取一个 Deployment 的 Pod 的日志(多容器例子) + +kubectl port-forward svc/my-service 5000 # 侦听本地端口 5000 并转发到 Service 后端端口 5000 +kubectl port-forward svc/my-service 5000:my-service-port # 侦听本地端口 5000 并转发到名字为 的 Service 目标端口 + +kubectl port-forward deploy/my-deployment 5000:6000 # 侦听本地端口 5000 并转发到 创建的 Pod 里的端口 6000 +kubectl exec deploy/my-deployment -- ls # 在 Deployment 里的第一个 Pod 的第一个容器里运行命令(单容器和多容器例子) ``` -列出所支持的全部资源类型和它们的简称、[API 组](/zh/docs/concepts/overview/kubernetes-api/#api-groups), 是否是[名字空间作用域](/zh/docs/concepts/overview/working-with-objects/namespaces) 和 [Kind](/zh/docs/concepts/overview/working-with-objects/kubernetes-objects)。 +列出所支持的全部资源类型和它们的简称、[API 组](/zh/docs/concepts/overview/kubernetes-api/#api-groups-and-versioning), 是否是[名字空间作用域](/zh/docs/concepts/overview/working-with-objects/namespaces) 和 [Kind](/zh/docs/concepts/overview/working-with-objects/kubernetes-objects)。 ```bash kubectl api-resources @@ -689,7 +731,7 @@ Other operations for exploring API resources: ```bash kubectl api-resources --namespaced=true # All namespaced resources kubectl api-resources --namespaced=false # All non-namespaced resources -kubectl api-resources -o name # All resources with simple output (just the resource name) +kubectl api-resources -o name # All resources with simple output (only the resource name) kubectl api-resources -o wide # All resources with expanded (aka "wide") output kubectl api-resources --verbs=list,get # All resources that support the "list" and "get" request verbs kubectl api-resources --api-group=extensions # All resources in the "extensions" API group @@ -743,7 +785,10 @@ Examples using `-o=custom-columns`: # All images running in a cluster kubectl get pods -A -o=custom-columns='DATA:spec.containers[*].image' - # All images excluding "k8s.gcr.io/coredns:1.6.2" +# All images running in namespace: default, grouped by Pod +kubectl get pods --namespace default --output=custom-columns="NAME:.metadata.name,IMAGE:.spec.containers[*].image" + +# All images excluding "k8s.gcr.io/coredns:1.6.2" kubectl get pods -A -o=custom-columns='DATA:spec.containers[?(@.image!="k8s.gcr.io/coredns:1.6.2")].image' # All fields under metadata regardless of name @@ -758,7 +803,10 @@ More examples in the kubectl [reference documentation](/docs/reference/kubectl/o # 集群中运行着的所有镜像 kubectl get pods -A -o=custom-columns='DATA:spec.containers[*].image' - # 除 "k8s.gcr.io/coredns:1.6.2" 之外的所有镜像 +# 列举 default 名字空间中运行的所有镜像,按 Pod 分组 +kubectl get pods --namespace default --output=custom-columns="NAME:.metadata.name,IMAGE:.spec.containers[*].image" + +# 除 "k8s.gcr.io/coredns:1.6.2" 之外的所有镜像 kubectl get pods -A -o=custom-columns='DATA:spec.containers[?(@.image!="k8s.gcr.io/coredns:1.6.2")].image' # 输出 metadata 下面的所有字段,无论 Pod 名字为何 diff --git a/content/zh/docs/reference/kubectl/jsonpath.md b/content/zh/docs/reference/kubectl/jsonpath.md index c1302ec3d6..9d9c82b974 100644 --- a/content/zh/docs/reference/kubectl/jsonpath.md +++ b/content/zh/docs/reference/kubectl/jsonpath.md @@ -3,7 +3,7 @@ title: JSONPath 支持 content_type: concept weight: 25 --- - Kubectl 支持 JSONPath 模板。 - JSONPath 模板由 {} 包起来的 JSONPath 表达式组成。Kubectl 使用 JSONPath 表达式来过滤 JSON 对象中的特定字段并格式化输出。除了原始的 JSONPath 模板语法,以下函数和语法也是有效的: - 1. 使用双引号将 JSONPath 表达式内的文本引起来。 2. 使用 `range`,`end` 运算符来迭代列表。 3. 使用负片索引后退列表。负索引不会“环绕”列表,并且只要 `-index + listLength> = 0` 就有效。 {{< note >}} - - `$` 运算符是可选的,因为默认情况下表达式总是从根对象开始。 @@ -48,8 +48,8 @@ JSONPath 模板由 {} 包起来的 JSONPath 表达式组成。Kubectl 使用 JSO {{< /note >}} - 给定 JSON 输入: @@ -90,7 +90,7 @@ Given the JSON input: } ``` - 函数 | 描述 | 示例 | 结果 --------------------|---------------------------|-----------------------------------------------------------------|------------------ @@ -117,8 +117,8 @@ Function | Description | Example `range`, `end` | 迭代列表 | `{range .items[*]}[{.metadata.name}, {.status.capacity}] {end}` | `[127.0.0.1, map[cpu:4]] [127.0.0.2, map[cpu:8]]` `''` | 引用解释执行字符串 | `{range .items[*]}{.metadata.name}{'\t'}{end}` | `127.0.0.1 127.0.0.2` - 使用 `kubectl` 和 JSONPath 表达式的示例: @@ -131,7 +131,7 @@ kubectl get pods -o=jsonpath="{.items[*]['metadata.name', 'status.capacity']}" kubectl get pods -o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.status.startTime}{"\n"}{end}' ``` - {{< note >}} -在 Windows 上,您必须用双引号把任何包含空格的 JSONPath 模板(不是上面 bash 所示的单引号)。 +在 Windows 上,对于任何包含空格的 JSONPath 模板,您必须使用双引号(不是上面 bash 所示的单引号)。 反过来,这意味着您必须在模板中的所有文字周围使用单引号或转义的双引号。 例如: @@ -176,4 +176,3 @@ kubectl get pods -o jsonpath='{.items[?(@.metadata.name=~/^test$/)].metadata.nam kubectl get pods -o json | jq -r '.items[] | select(.metadata.name | test("test-")).spec.containers[].image' ``` {{< /note >}} - diff --git a/content/zh/docs/reference/kubectl/overview.md b/content/zh/docs/reference/kubectl/overview.md index cd740388d6..ddc83e8536 100644 --- a/content/zh/docs/reference/kubectl/overview.md +++ b/content/zh/docs/reference/kubectl/overview.md @@ -139,9 +139,9 @@ Flags that you specify from the command line override default values and any cor {{< /caution >}} -如果你需要帮助,只需从终端窗口运行 ` kubectl help ` 即可。 +如果你需要帮助,从终端窗口运行 `kubectl help` 。 Kubelet 用 Go 定义的 `runtime.GOOS` 生成该标签的键值。在混合使用异构操作系统场景下(例如:混合使用 Linux 和 Windows 节点),此键值可以带来极大便利。 +## kubernetes.io/metadata.name + +示例:`kubernetes.io/metadata.name=mynamespace` + +用于:Namespaces + + +当 `NamespaceDefaultLabelName` [特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/) +被启用时,Kubernetes API 服务器会在所有命名空间上设置此标签。标签值被设置为命名空间的名称。 + +如果你想使用标签 {{< glossary_tooltip text="选择器" term_id="selector" >}} 来指向特定的命名空间,这很有用。 + ## beta.kubernetes.io/arch (deprecated) +该注解用于设置 [Pod 删除开销](/zh/docs/concepts/workloads/controllers/replicaset/#pod-deletion-cost), +允许用户影响 ReplicaSet 的缩减顺序。该注解解析为 `int32` 类型。 + ## beta.kubernetes.io/instance-type (deprecated) {{< note >}} @@ -124,6 +157,22 @@ Starting in v1.17, this label is deprecated in favor of [topology.kubernetes.io/ 从 v1.17 开始,此标签被弃用,取而代之的是 [topology.kubernetes.io/zone](#topologykubernetesiozone). {{< /note >}} +## statefulset.kubernetes.io/pod-name {#statefulsetkubernetesiopod-name} + +示例:`statefulset.kubernetes.io/pod-name=mystatefulset-7` + + +当 StatefulSet 控制器为 StatefulSet 创建 Pod 时,控制平面会在该 Pod 上设置此标签。 +标签的值是正在创建的 Pod 的名称。 + +更多细节请参见 StatefulSet 文章中的 [Pod 名称标签](/zh/docs/concepts/workloads/controllers/statefulset/#pod-name-label)。 + ## topology.kubernetes.io/region {#topologykubernetesioregion} 示例 @@ -316,6 +365,17 @@ Starting in v1.18, this annotation is deprecated in favor of `spec.ingressClassN 从 v1.18 开始,此注解被弃用,取而代之的是 `spec.ingressClassName`。 {{< /note >}} +## storageclass.kubernetes.io/is-default-class + +示例:`storageclass.kubernetes.io/is-default-class=true` + +用于:StorageClass + + +当单个的 StorageClass 资源将这个注解设置为 `"true"` 时,新的持久卷申领(PVC) +资源若未指定类别,将被设定为此默认类别。 ## alpha.kubernetes.io/provided-node-ip @@ -327,14 +387,50 @@ Starting in v1.18, this annotation is deprecated in favor of `spec.ingressClassN The kubelet can set this annotation on a Node to denote its configured IPv4 address. When kubelet is started with the "external" cloud provider, it sets this annotation on the Node to denote an IP address set from the command line flag (`--node-ip`). This IP is verified with the cloud provider as valid by the cloud-controller-manager. - -**The taints listed below are always used on Nodes** --> kubectl 在 Node 上设置此注解,表示它的 IPv4 地址。 当 kubectl 由外部的云供应商启动时,在 Node 上设置此注解,表示由命令行标记(`--node-ip`)设置的 IP 地址。 cloud-controller-manager 向云供应商验证此 IP 是否有效。 +## batch.kubernetes.io/job-completion-index + +示例:`batch.kubernetes.io/job-completion-index: "3"` + +用于:Pod + + +kube-controller-manager 中的 Job 控制器给创建使用索引 +[完成模式](/zh/docs/concepts/workloads/controllers/job/#completion-mode) +的 Pod 设置此注解。 + +## kubectl.kubernetes.io/default-container + +示例:`kubectl.kubernetes.io/default-container: "front-end-app"` + + +注解的值是此 Pod 的默认容器名称。 +例如,`kubectl logs` 或 `kubectl exec` 没有 `-c` 或 `--container` 参数时,将使用这个默认的容器。 + +## endpoints.kubernetes.io/over-capacity + +示例:`endpoints.kubernetes.io/over-capacity:warning` + +用于:Endpoints + + +在 Kubernetes 集群 v1.21(或更高版本)中,如果 Endpoint 超过 1000 个,Endpoint 控制器 +就会向其添加这个注解。该注解表示 Endpoint 资源已超过容量。 + **以下列出的污点只能用于 Node** ## node.kubernetes.io/not-ready diff --git a/content/zh/docs/reference/setup-tools/kubeadm/_index.md b/content/zh/docs/reference/setup-tools/kubeadm/_index.md index 7b8c2ac158..2c6e1be1e9 100644 --- a/content/zh/docs/reference/setup-tools/kubeadm/_index.md +++ b/content/zh/docs/reference/setup-tools/kubeadm/_index.md @@ -69,7 +69,7 @@ To install kubeadm, see the [installation guide](/docs/setup/production-environm 用于管理 `kubeadm join` 使用的令牌 * [kubeadm reset](/zh/docs/reference/setup-tools/kubeadm/kubeadm-reset) 用于恢复通过 `kubeadm init` 或者 `kubeadm join` 命令对节点进行的任何变更 -* [kubeadm certs](/docs/reference/setup-tools/kubeadm/kubeadm-certs) +* [kubeadm certs](/zh/docs/reference/setup-tools/kubeadm/kubeadm-certs) 用于管理 Kubernetes 证书 * [kubeadm kubeconfig](/docs/reference/setup-tools/kubeadm/kubeadm-kubeconfig) 用于管理 kubeconfig 文件 diff --git a/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-init.md b/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-init.md index 637024a9cd..6914bdee27 100644 --- a/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-init.md +++ b/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-init.md @@ -293,10 +293,10 @@ For information about passing flags to control plane components see: 默认情况下, kubeadm 会从 `k8s.gcr.io` 仓库拉取镜像。如果请求的 Kubernetes 版本是 CI 标签 -(例如 `ci/latest`),则使用 `gcr.io/kubernetes-ci-images`。 +(例如 `ci/latest`),则使用 `gcr.io/k8s-staging-ci-images`。 + + + +Kubernetes 包含多个内置工具来帮助你使用 Kubernetes 系统。 + + + + + +## Minikube + +[`minikube`](https://minikube.sigs.k8s.io/docs/) +是一种在你的工作站上本地运行单节点 Kubernetes 集群的工具,用于开发和测试。 + + +## 仪表盘 + +[`Dashboard`](/zh/docs/tasks/access-application-cluster/web-ui-dashboard/), +基于 Web 的 Kubernetes 用户界面, +允许你将容器化的应用程序部署到 Kubernetes 集群, +对它们进行故障排查,并管理集群及其资源本身。 + + +## Helm + +[`Kubernetes Helm`](https://github.com/kubernetes/helm) +是一个用于管理预配置 Kubernetes 资源包的工具,也就是 Kubernetes 图表。 + + +使用 Helm 来: + +* 查找和使用打包为 Kubernetes 图表的流行软件 +* 将你自己的应用程序共享为 Kubernetes 图表 +* 为你的 Kubernetes 应用程序创建可重现的构建 +* 智能管理你的 Kubernetes 清单文件 +* 管理 Helm 包的发布 + + +## Kompose + +[`Kompose`](https://github.com/kubernetes/kompose) +是一个帮助 Docker Compose 用户迁移到 Kubernetes 的工具。 + + + +使用 Kompose: + +* 将 Docker Compose 文件翻译成 Kubernetes 对象 +* 从本地 Docker 开发转到通过 Kubernetes 管理你的应用程序 +* 转换 Docker Compose v1 或 v2 版本的 `yaml` 文件或[分布式应用程序包](https://docs.docker.com/compose/bundles/) \ No newline at end of file diff --git a/content/zh/docs/reference/using-api/_index.md b/content/zh/docs/reference/using-api/_index.md index 303f2152ac..837adf9dc6 100644 --- a/content/zh/docs/reference/using-api/_index.md +++ b/content/zh/docs/reference/using-api/_index.md @@ -111,7 +111,7 @@ Here's a summary of each level: 特性默认开启。 - 尽管一些特性会发生细节上的变化,但它们将会被长期支持。 - + --> - 在随后的 Beta 版或稳定版中,对象的模式和(或)语义可能以不兼容的方式改变。 当这种情况发生时,将提供迁移说明。 模式更改可能需要删除、编辑和重建 API 对象。 @@ -130,10 +130,10 @@ Here's a summary of each level: 后续发布版本可能会有不兼容的变动。 如果你有多个集群可以独立升级,可以放宽这一限制。 - + --> {{< note >}} 请试用测试版特性时并提供反馈。特性完成 Beta 阶段测试后, 就可能不会有太多的变更了。 diff --git a/content/zh/docs/reference/using-api/api-concepts.md b/content/zh/docs/reference/using-api/api-concepts.md index d44feae85c..4637ef108f 100644 --- a/content/zh/docs/reference/using-api/api-concepts.md +++ b/content/zh/docs/reference/using-api/api-concepts.md @@ -463,7 +463,7 @@ Accept: application/json;as=Table;g=meta.k8s.io;v=v1beta1, application/json GET 和 LIST 操作的语义含义如下: @@ -1142,7 +1142,7 @@ reply with a `410 Gone` HTTP response. ### 不可用的资源版本 {#unavailable-resource-versions} diff --git a/content/zh/docs/reference/using-api/client-libraries.md b/content/zh/docs/reference/using-api/client-libraries.md index cf01bcc56e..470328e777 100644 --- a/content/zh/docs/reference/using-api/client-libraries.md +++ b/content/zh/docs/reference/using-api/client-libraries.md @@ -5,13 +5,11 @@ weight: 30 --- @@ -58,22 +56,21 @@ The following client libraries are officially maintained by -| 语言 | 客户端库 | 样例程序 | -|----------|----------------|-----------------| -| Go | [github.com/kubernetes/client-go/](https://github.com/kubernetes/client-go/) | [浏览](https://github.com/kubernetes/client-go/tree/master/examples) -| Python | [github.com/kubernetes-client/python/](https://github.com/kubernetes-client/python/) | [浏览](https://github.com/kubernetes-client/python/tree/master/examples) -| Java | [github.com/kubernetes-client/java](https://github.com/kubernetes-client/java/) | [浏览](https://github.com/kubernetes-client/java#installation) +| 语言 | 客户端库 | 样例程序 | +|---------|-----------------|-----------------| | dotnet | [github.com/kubernetes-client/csharp](https://github.com/kubernetes-client/csharp) | [浏览](https://github.com/kubernetes-client/csharp/tree/master/examples/simple) -| JavaScript | [github.com/kubernetes-client/javascript](https://github.com/kubernetes-client/javascript) | [浏览](https://github.com/kubernetes-client/javascript/tree/master/examples) +| Go | [github.com/kubernetes/client-go/](https://github.com/kubernetes/client-go/) | [浏览](https://github.com/kubernetes/client-go/tree/master/examples) | Haskell | [github.com/kubernetes-client/haskell](https://github.com/kubernetes-client/haskell) | [浏览](https://github.com/kubernetes-client/haskell/tree/master/kubernetes-client/example) - +| Java | [github.com/kubernetes-client/java](https://github.com/kubernetes-client/java/) | [浏览](https://github.com/kubernetes-client/java#installation) +| JavaScript | [github.com/kubernetes-client/javascript](https://github.com/kubernetes-client/javascript) | [浏览](https://github.com/kubernetes-client/javascript/tree/master/examples) +| Python | [github.com/kubernetes-client/python/](https://github.com/kubernetes-client/python/) | [浏览](https://github.com/kubernetes-client/python/tree/master/examples) | 语言 | 客户端库 | | -------------------- | ---------------------------------------- | | Clojure | [github.com/yanatan16/clj-kubernetes-api](https://github.com/yanatan16/clj-kubernetes-api) | +| DotNet | [github.com/tonnyeremin/kubernetes_gen](https://github.com/tonnyeremin/kubernetes_gen) | +| DotNet (RestSharp) | [github.com/masroorhasan/Kubernetes.DotNet](https://github.com/masroorhasan/Kubernetes.DotNet) | +| Elixir | [github.com/obmarg/kazan](https://github.com/obmarg/kazan/) | +| Elixir | [github.com/coryodaniel/k8s](https://github.com/coryodaniel/k8s) | | Go | [github.com/ericchiang/k8s](https://github.com/ericchiang/k8s) | | Java (OSGi) | [bitbucket.org/amdatulabs/amdatu-kubernetes](https://bitbucket.org/amdatulabs/amdatu-kubernetes) | | Java (Fabric8, OSGi) | [github.com/fabric8io/kubernetes-client](https://github.com/fabric8io/kubernetes-client) | @@ -142,23 +143,17 @@ their authors, not the Kubernetes team. | PHP | [github.com/maclof/kubernetes-client](https://github.com/maclof/kubernetes-client) | | PHP | [github.com/travisghansen/kubernetes-client-php](https://github.com/travisghansen/kubernetes-client-php) | | PHP | [github.com/renoki-co/php-k8s](https://github.com/renoki-co/php-k8s) | -| Python | [github.com/eldarion-gondor/pykube](https://github.com/eldarion-gondor/pykube) | | Python | [github.com/fiaas/k8s](https://github.com/fiaas/k8s) | | Python | [github.com/mnubo/kubernetes-py](https://github.com/mnubo/kubernetes-py) | | Python | [github.com/tomplus/kubernetes_asyncio](https://github.com/tomplus/kubernetes_asyncio) | | Python | [github.com/Frankkkkk/pykorm](https://github.com/Frankkkkk/pykorm) | | Ruby | [github.com/abonas/kubeclient](https://github.com/abonas/kubeclient) | | Ruby | [github.com/Ch00k/kuber](https://github.com/Ch00k/kuber) | +| Ruby | [github.com/k8s-ruby/k8s-ruby](https://github.com/k8s-ruby/k8s-ruby) | | Ruby | [github.com/kontena/k8s-client](https://github.com/kontena/k8s-client) | | Rust | [github.com/clux/kube-rs](https://github.com/clux/kube-rs) | | Rust | [github.com/ynqa/kubernetes-rust](https://github.com/ynqa/kubernetes-rust) | | Scala | [github.com/hagay3/skuber](https://github.com/hagay3/skuber) | | Scala | [github.com/joan38/kubernetes-client](https://github.com/joan38/kubernetes-client) | | Swift | [github.com/swiftkube/client](https://github.com/swiftkube/client) | -| DotNet | [github.com/tonnyeremin/kubernetes_gen](https://github.com/tonnyeremin/kubernetes_gen) | -| DotNet (RestSharp) | [github.com/masroorhasan/Kubernetes.DotNet](https://github.com/masroorhasan/Kubernetes.DotNet) | -| Elixir | [github.com/obmarg/kazan](https://github.com/obmarg/kazan/) | -| Elixir | [github.com/coryodaniel/k8s](https://github.com/coryodaniel/k8s) | - - diff --git a/content/zh/docs/reference/using-api/deprecation-policy.md b/content/zh/docs/reference/using-api/deprecation-policy.md index 27c2a5b636..50d77df4cf 100644 --- a/content/zh/docs/reference/using-api/deprecation-policy.md +++ b/content/zh/docs/reference/using-api/deprecation-policy.md @@ -171,7 +171,8 @@ This covers the [maximum supported version skew of 2 releases](/docs/setup/relea * **Beta: 9 个月或者 3 个发布版本(取其较长者)** * **Alpha: 0 个发布版本** -这里也包含了关于[最大支持 2 个发布版本的版本偏差](/zh/docs/setup/release/version-skew-policy/)的约定。 +这里也包含了关于[最大支持 2 个发布版本的版本偏差](/zh/docs/setup/release/version-skew-policy/) +的约定。 {{< note >}} -在[#52185](https://github.com/kubernetes/kubernetes/issues/52185)被解决之前, +在 [#52185](https://github.com/kubernetes/kubernetes/issues/52185) 被解决之前, 已经被保存到持久性存储中的 API 版本都不可以被去除。 你可以禁止这些版本所对应的 REST 末端(在符合本文中弃用时间线的前提下), 但是 API 服务器必须仍能解析和转换存储中以前写入的数据。 @@ -699,6 +700,14 @@ therefore the rules for deprecation are as follows: 特性门控的版本管理与之前讨论的组件版本管理不同,因此其对应的弃用策略如下: + **规则 #8:特性门控所对应的功能特性经历下面所列的成熟性阶段转换时,特性门控 必须被弃用。特性门控弃用时必须在以下时长内保持其功能可用:** @@ -730,8 +739,7 @@ this impacts removal of a metric during a Kubernetes release. These classes are determined by the perceived importance of the metric. The rules for deprecating and removing a metric are as follows: --> - -### 弃用度量值 {#Deprecating a metric} +### 弃用度量值 {#deprecating-a-metric} Kubernetes 控制平面的每个组件都公开度量值(通常是 `/metrics` 端点),它们通常由集群管理员使用。 并不是所有的度量值都是同样重要的:一些度量值通常用作 SLIs 或被使用来确定 SLOs,这些往往比较重要。 @@ -755,20 +763,25 @@ Kubernetes 控制平面的每个组件都公开度量值(通常是 `/metrics` --> **规则 #9a: 对于相应的稳定性类别,度量值起作用的周期必须不小于:** - * **STABLE: 4 个发布版本或者 12 个月 (取其较长者)** - * **ALPHA: 0 个发布版本** +* **STABLE: 4 个发布版本或者 12 个月 (取其较长者)** +* **ALPHA: 0 个发布版本** **规则 #9b: 在度量值被宣布启用之后,它起作用的周期必须不小于:** - * **STABLE: 3 个发布版本或者 9 个月 (取其较长者)** - * **ALPHA: 0 个发布版本** +* **STABLE: 3 个发布版本或者 9 个月 (取其较长者)** +* **ALPHA: 0 个发布版本** +已弃用的度量值将在其描述文本前加上一个已弃用通知字符串 '(Deprecated from x.y)', +并将在度量值被记录期间发出警告日志。就像稳定的、未被弃用的度量指标一样, +被弃用的度量值将自动注册到 metrics 端点,因此被弃用的度量值也是可见的。 + -已弃用的度量值将在其描述文本前加上一个已弃用通知字符串 '(Deprecated from x.y)', -并将在度量值被记录期间发出警告日志。就像稳定的、未被弃用的度量指标一样, -被弃用的度量值将自动注册到 metrics 端点,因此被弃用的度量值也是可见的。 - 在随后的版本中(当度量值 `deprecatedVersion` 等于_当前 Kubernetes 版本 - 3_), 被弃用的度量值将变成 _隐藏(Hidden)_ metric 度量值。 与被弃用的度量值不同,隐藏的度量值将不再被自动注册到 metrics 端点(因此被隐藏)。 -但是,它们可以通过可执行文件的命令行标志显式启用(`--show-hidden-metrics-for-version=`)。 - +但是,它们可以通过可执行文件的命令行标志显式启用 +(`--show-hidden-metrics-for-version=`)。 如果集群管理员不能对早期的弃用警告作出反应,这一设计就为他们提供了抓紧迁移弃用度量值的途径。 隐藏的度量值应该在再过一个发行版本后被删除。 diff --git a/content/zh/docs/reference/using-api/server-side-apply.md b/content/zh/docs/reference/using-api/server-side-apply.md index 207f8b0796..aa0b3fef20 100644 --- a/content/zh/docs/reference/using-api/server-side-apply.md +++ b/content/zh/docs/reference/using-api/server-side-apply.md @@ -5,7 +5,6 @@ weight: 25 min-kubernetes-server-version: 1.16 --- @@ -25,15 +23,15 @@ min-kubernetes-server-version: 1.16 ## 简介 {#introduction} 服务器端应用协助用户、控制器通过声明式配置的方式管理他们的资源。 -它发送完整描述的目标(A fully specified intent), +客户端可以发送完整描述的目标(A fully specified intent), 声明式地创建和/或修改 [对象](/zh/docs/concepts/overview/working-with-objects/kubernetes-objects/)。 @@ -84,7 +82,7 @@ Server side apply is meant both as a replacement for the original `kubectl apply` and as a simpler mechanism for controllers to enact their changes. If you have Server Side Apply enabled, the control plane tracks managed fields -for all newlly created objects. +for all newly created objects. --> 服务器端应用既是原有 `kubectl apply` 的替代品, 也是控制器发布自身变化的一个简化机制。 @@ -133,7 +131,7 @@ the appliers, results in a conflict. Shared field owners may give up ownership of a field by removing it from their configuration. Field management is stored in a`managedFields` field that is part of an object's -[`metadata`](/docs/reference/generated/kubernetes-api/{{< latest-version >}}/#objectmeta-v1-meta). +[`metadata`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#objectmeta-v1-meta). A simple example of an object created by Server Side Apply could look like this: --> @@ -142,7 +140,8 @@ A simple example of an object created by Server Side Apply could look like this: 共享字段的所有者可以放弃字段的所有权,这只需从配置文件中删除该字段即可。 字段管理的信息存储在 `managedFields` 字段中,该字段是对象的 -[`metadata`](/docs/reference/generated/kubernetes-api/{{< latest-version >}}/#objectmeta-v1-meta)中的一部分。 +[`metadata`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#objectmeta-v1-meta) +中的一部分。 服务器端应用创建对象的简单示例如下: @@ -356,15 +355,14 @@ would have failed due to conflicting ownership. The merging strategy, implemented with Server Side Apply, provides a generally more stable object lifecycle. Server Side Apply tries to merge fields based on -the fact who manages them instead of overruling just based on values. This way -it is intended to make it easier and more stable for multiple actors updating -the same object by causing less unexpected interference. +the actor who manages them instead of overruling based on values. This way +multiple actors can update the same object without causing unexpected interference. --> ## 合并策略 {#merge-strategy} 由服务器端应用实现的合并策略,提供了一个总体更稳定的对象生命周期。 -服务器端应用试图依据谁管理它们来合并字段,而不只是根据值来否决。 -这么做是为了多个参与者可以更简单、更稳定的更新同一个对象,且避免引起意外干扰。 +服务器端应用试图依据负责管理它们的主体来合并字段,而不是根据值来否决。 +这么做是为了多个主体可以更新同一个对象,且不会引起意外的相互干扰。 Kubernetes 1.16 和 1.17 中添加了一些标记, @@ -399,18 +397,116 @@ Kubernetes 1.16 和 1.17 中添加了一些标记, | Golang 标记 | OpenAPI extension | 可接受的值 | 描述 | 引入版本 | |---|---|---|---|---| -| `//+listType` | `x-kubernetes-list-type` | `atomic`/`set`/`map` | 适用于 list。 `atomic` 和 `set` 适用于只包含标量元素的 list。 `map` 适用于只包含嵌套类型的 list。 如果配置为 `atomic`, 合并时整个列表会被替换掉; 任何时候,唯一的管理器都把列表作为一个整体来管理。如果是 `set` 或 `map` ,不同的管理器也可以分开管理条目。 | 1.16 | -| `//+listMapKey` | `x-kubernetes-list-map-keys` | 用来唯一标识条目的 map keys 切片,例如 `["port", "protocol"]` | 仅当 `+listType=map` 时适用。组合值的字符串切片必须唯一标识列表中的条目。尽管有多个 key,`listMapKey` 是单数的,这是因为 key 需要在 Go 类型中单独的指定。 | 1.16 | +| `//+listType` | `x-kubernetes-list-type` | `atomic`/`set`/`map` | 适用于 list。`set` 适用于仅包含标量元素的列表。这些元素必须是不重复的。`map` 仅适用于包含嵌套类型的列表。列表中的键(参见 `listMapKey`)不可以重复。`atomic` 适用于任何类型的列表。如果配置为 `atomic`,则合并时整个列表会被替换掉。任何时候,只有一个管理器负责管理指定列表。如果配置为 `set` 或 `map`,不同的管理器也可以分开管理条目。 | 1.16 | +| `//+listMapKey` | `x-kubernetes-list-map-keys` | 字段名称的列表,例如,`["port", "protocol"]` | 仅当 `+listType=map` 时适用。取值为字段名称的列表,这些字段值的组合能够唯一标识列表中的条目。尽管可以存在多个键,`listMapKey` 是单数的,这是因为键名需要在 Go 类型中各自独立指定。键字段必须是标量。 | 1.16 | | `//+mapType` | `x-kubernetes-map-type` | `atomic`/`granular` | 适用于 map。 `atomic` 指 map 只能被单个的管理器整个的替换。 `granular` 指 map 支持多个管理器各自更新自己的字段。 | 1.17 | | `//+structType` | `x-kubernetes-map-type` | `atomic`/`granular` | 适用于 structs;否则就像 `//+mapType` 有相同的用法和 openapi 注释.| 1.17 | + +若未指定 `listType`,API 服务器将 `patchMergeStrategy=merge` 标记解释为 +`listType=map` 并且视对应的 `patchMergeKey` 标记为 `listMapKey` 取值。 + +`atomic` 列表类型是递归的。 + +这些标记都是用源代码注释的方式给出的,不必作为字段标签(tag)再重复。 + + +### 拓扑变化时的兼容性 {#compatibility-across-toplogy-changes} + + +在极少的情况下,CRD 或者内置类型的作者可能希望更改其资源中的某个字段的 +拓扑配置,同时又不提升版本号。 +通过升级集群或者更新 CRD 来更改类型的拓扑信息与更新现有对象的结果不同。 +变更的类型有两种:一种是将字段从 `map`/`set`/`granular` 更改为 `atomic`, +另一种是做逆向改变。 + + +当 `listType`、`mapType` 或 `structType` 从 `map`/`set`/`granular` 改为 +`atomic` 时,现有对象的整个列表、映射或结构的属主都会变为这些类型的 +元素之一的属主。这意味着,对这些对象的进一步变更会引发冲突。 + + +当一个列表、映射或结构从 `atomic` 改为 `map`/`set`/`granular` 之一 +时,API 服务器无法推导这些字段的新的属主。因此,当对象的这些字段 +再次被更新时不会引发冲突。出于这一原因,不建议将某类型从 `atomic` 改为 +`map`/`set`/`granular`。 + +以下面的自定义资源为例: + +```yaml +apiVersion: example.com/v1 +kind: Foo +metadata: + name: foo-sample + managedFields: + - manager: manager-one + operation: Apply + apiVersion: example.com/v1 + fields: + f:spec: + f:data: {} +spec: + data: + key1: val1 + key2: val2 +``` + + +在 `spec.data` 从 `atomic` 改为 `granular` 之前,`manager-one` 是 +`spec.data` 字段及其所包含字段(`key1` 和 `key2`)的属主。 +当对应的 CRD 被更改,使得 `spec.data` 变为 `granular` 拓扑时, +`manager-one` 继续拥有顶层字段 `spec.data`(这意味着其他管理者想 +删除名为 `data` 的映射而不引起冲突是不可能的),但不再拥有 +`key1` 和 `key2`。因此,其他管理者可以在不引起冲突的情况下更改 +或删除这些字段。 + -### 在控制器中使用服务器端应用 {#using-server-side-apply-in-controller} +## 在控制器中使用服务器端应用 {#using-server-side-apply-in-controller} 控制器的开发人员可以把服务器端应用作为简化控制器的更新逻辑的方式。 读-改-写 和/或 patch 的主要区别如下所示: @@ -463,7 +559,7 @@ might not be able to resolve or act on these conflicts. 强烈推荐:设置控制器在冲突时强制执行,这是因为冲突发生时,它们没有其他解决方案或措施。 -### 转移所有权 {#transferring-ownership} +## 转移所有权 {#transferring-ownership} 除了通过[冲突解决方案](#conflicts)提供的并发控制, 服务器端应用提供了一些协作方式来将字段所有权从用户转移到控制器。 @@ -526,7 +622,7 @@ is not what the user wants to happen, even temporarily. 这里有两个解决方案: -- (容易) 把 `replicas` 留在配置文件中;当 HPA 最终写入那个字段, +- (基本操作)把 `replicas` 留在配置文件中;当 HPA 最终写入那个字段, 系统基于此事件告诉用户:冲突发生了。在这个时间点,可以安全的删除配置文件。 -- (高级)然而,如果用户不想等待,比如他们想为合作伙伴保持集群清晰, +- (高级操作)然而,如果用户不想等待,比如他们想为合作伙伴保持集群清晰, 那他们就可以执行以下步骤,安全的从配置文件中删除 `replicas`。 首先,用户新定义一个只包含 `replicas` 字段的配置文件: @@ -561,13 +657,13 @@ kubectl apply -f https://k8s.io/examples/application/ssa/nginx-deployment-replic 如果应用操作和 HPA 控制器产生冲突,那什么都不做。 -冲突只是表明控制器在更早的流程中已经对字段声明过所有权。 +冲突表明控制器在更早的流程中已经对字段声明过所有权。 在此时间点,用户可以从配置文件中删除 `replicas` 。 @@ -583,7 +679,7 @@ automatically deleted. No clean up is required. 这里不需要执行清理工作。 -## 在用户之间转移所有权 {#transferring-ownership-between-users} +### 在用户之间转移所有权 {#transferring-ownership-between-users} 通过在配置文件中把一个字段设置为相同的值,用户可以在他们之间转移字段的所有权, 从而共享了字段的所有权。 @@ -763,7 +859,7 @@ Data: [{"op": "replace", "path": "/metadata/managedFields", "value": [{}]}] -这一操作将用只包含一个空条目的 list 覆写 managedFields, +这一操作将用只包含一个空条目的列表覆写 managedFields, 来实现从对象中整个的去除 managedFields。 -注意,只把 managedFields 设置为空 list 并不会重置字段。 +注意,只把 managedFields 设置为空列表并不会重置字段。 这么做是有目的的,所以 managedFields 将永远不会被与该字段无关的客户删除。 在重置操作结合 managedFields 以外其他字段更改的场景中, @@ -804,7 +900,8 @@ should have the same flag setting. --> ## 禁用此功能 {#disabling-the-feature} -服务器端应用是一个 beta 版特性,默认启用。 +服务器端应用是一个 Beta 版特性,默认启用。 要关闭此[特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates), 你需要在启动 `kube-apiserver` 时包含参数 `--feature-gates ServerSideApply=false`。 -如果你有多个 `kube-apiserver` 副本,他们都应该有相同的标记设置。 \ No newline at end of file +如果你有多个 `kube-apiserver` 副本,它们的标志设置应该都相同。 + diff --git a/content/zh/docs/setup/_index.md b/content/zh/docs/setup/_index.md index d7828c8ac3..27b2487087 100644 --- a/content/zh/docs/setup/_index.md +++ b/content/zh/docs/setup/_index.md @@ -1,9 +1,9 @@ --- -no_issue: true title: 入门 main_menu: true weight: 20 content_type: concept +no_list: true card: name: setup weight: 20 @@ -19,11 +19,11 @@ reviewers: - brendandburns - erictune - mikedanese -no_issue: true title: Getting started main_menu: true weight: 20 content_type: concept +no_list: true card: name: setup weight: 20 @@ -48,10 +48,17 @@ control, available resources, and expertise required to operate and manage a clu 安装 Kubernetes 时,请根据以下条件选择安装类型:易于维护、安全性、可控制性、可用资源以及操作和管理 Kubernetes 集群所需的专业知识。 -可以在本地机器、云、本地数据中心上部署 Kubernetes 集群,或选择一个托管的 Kubernetes 集群。还可以跨各种云提供商或裸机环境创建自定义解决方案。 +You can [download Kubernetes](/releases/download/) to deploy a Kubernetes cluster +on a local machine, into the cloud, or for your own datacenter. +If you don't want to manage a Kubernetes cluster yourself, you could pick a managed service, including +[certified platforms](/docs/setup/production-environment/turnkey-solutions/). +There are also other standardized and custom solutions across a wide range of cloud and +bare metal environments. +-->。 +可以[下载 Kubernetes](/releases/download/),在本地机器、云或你自己的数据中心上部署 Kubernetes 集群。 +如果你不想自己管理 Kubernetes 集群,则可以选择托管服务,包括[经过认证的平台](/zh/docs/setup/production-environment/turnkey-solutions/)。 +在各种云和裸机环境中,还有其他标准化和定制的解决方案。 -如果正打算学习 Kubernetes,请使用 Kubernetes 社区支持或生态系统中的工具在本地计算机上设置 Kubernetes 集群。 +如果正打算学习 Kubernetes,请使用 Kubernetes 社区支持 +或生态系统中的工具在本地计算机上设置 Kubernetes 集群。 +请参阅[安装工具](/zh/docs/tasks/tools/)。 -在评估生产环境的解决方案时,请考虑要管理自己 Kubernetes 集群(_抽象层面_)的哪些方面或将其转移给提供商。 +在评估[生产环境](/zh/docs/setup/production-environment/)的解决方案时, +请考虑要自己管理 Kubernetes 集群(或相关抽象)的哪些方面,将哪些托付给提供商。 + +对于你自己管理的集群,官方支持的用于部署 Kubernetes 的工具是 +[kubeadm](/zh/docs/setup/production-environment/tools/kubeadm/)。 -[Kubernetes 合作伙伴](https://kubernetes.io/zh/partners/#kcsp) 包括一个 -[已认证的 Kubernetes](https://github.com/cncf/k8s-conformance/#certified-kubernetes) 提供商列表。 +## {{% heading "whatsnext" %}} + +- [下载 Kubernetes](/releases/download/) +- 下载并[安装工具](/zh/docs/tasks/tools/),包括 kubectl 在内 +- 为新集群选择[容器运行时](/zh/docs/setup/production-environment/container-runtimes/) +- 了解集群设置的[最佳实践](/zh/docs/setup/best-practices/) + +Kubernetes 的设计是让其{{< glossary_tooltip term_id="control-plane" text="控制平面" >}}在 Linux 上运行的。 +在集群中,你可以在 Linux 或其他操作系统(包括 Windows)上运行应用程序。 +- 学习[配置包含 Windows 节点的集群](/zh/docs/setup/production-environment/windows/) diff --git a/content/zh/docs/setup/best-practices/cluster-large.md b/content/zh/docs/setup/best-practices/cluster-large.md index d2c6d8fcb9..7e40a387ea 100644 --- a/content/zh/docs/setup/best-practices/cluster-large.md +++ b/content/zh/docs/setup/best-practices/cluster-large.md @@ -25,12 +25,12 @@ Kubernetes {{< param "version" >}} 支持的最大节点数为 5000。 更具体地说,Kubernetes旨在适应满足以下*所有*标准的配置: -* 每个节点的 Pod 数量不超过 100 +* 每个节点的 Pod 数量不超过 110 * 节点数不超过 5000 * Pod 总数不超过 150000 * 容器总数不超过 300000 @@ -46,7 +46,7 @@ on how your cluster is deployed. To avoid running into cloud provider quota issues, when creating a cluster with many nodes, consider: -* Request a quota increase for cloud resources such as: +* Requesting a quota increase for cloud resources such as: * Computer instances * CPUs * Storage volumes @@ -55,7 +55,7 @@ consider: * Number of load balancers * Network subnets * Log streams -* Gate the cluster scaling actions to brings up new nodes in batches, with a pause +* Gating the cluster scaling actions to brings up new nodes in batches, with a pause between batches, because some cloud providers rate limit the creation of new instances. --> ## 云供应商资源配额 {#quota-issues} @@ -132,6 +132,15 @@ When creating a cluster, you can (using custom tooling): * 启动并配置额外的 etcd 实例 * 配置 {{< glossary_tooltip term_id="kube-apiserver" text="API 服务器" >}},将它用于存储事件 + +有关为大型集群配置和管理 etcd 的详细信息,请参阅 +[为 Kubernetes 运行 etcd 集群](/zh/docs/tasks/administer-cluster/configure-upgrade-etcd/) +和使用 [kubeadm 创建一个高可用 etcd 集群](/zh/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm/)。 + @@ -226,4 +235,12 @@ nodes for the level of resource demand in your cluster. 以及如何使用它来扩展集群组件(包括对集群至关重要的插件)的信息。 [集群自动扩缩器](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#readme) -与许多云供应商集成在一起,帮助你在你的集群中,按照资源需求级别运行正确数量的节点。 \ No newline at end of file +与许多云供应商集成在一起,帮助你在你的集群中,按照资源需求级别运行正确数量的节点。 + + + +[addon resizer](https://github.com/kubernetes/autoscaler/tree/master/addon-resizer#readme) +可帮助你在集群规模变化时自动调整插件的大小。 diff --git a/content/zh/docs/setup/production-environment/_index.md b/content/zh/docs/setup/production-environment/_index.md index 8ee244e33d..b16ed481b8 100644 --- a/content/zh/docs/setup/production-environment/_index.md +++ b/content/zh/docs/setup/production-environment/_index.md @@ -1,4 +1,638 @@ --- title: 生产环境 weight: 30 +no_list: true --- + + + + +生产质量的 Kubernetes 集群需要规划和准备。 +如果你的 Kubernetes 集群是用来运行关键负载的,该集群必须被配置为弹性的(Resilient)。 +本页面阐述你在安装生产就绪的集群或将现有集群升级为生产用途时可以遵循的步骤。 +如果你已经熟悉生产环境安装,因此只关注一些链接,则可以跳到[接下来](#what-s-next)节。 + + + + +## 生产环境考量 {#production-considerations} + +通常,一个生产用 Kubernetes 集群环境与个人学习、开发或测试环境所使用的 +Kubernetes 相比有更多的需求。生产环境可能需要被很多用户安全地访问,需要 +提供一致的可用性,以及能够与需求变化相适配的资源。 + + +在你决定在何处运行你的生产用 Kubernetes 环境(在本地或者在云端),以及 +你希望承担或交由他人承担的管理工作量时,需要考察以下因素如何影响你对 +Kubernetes 集群的需求: + + +- *可用性*:一个单机的 Kubernetes [学习环境](/zh/docs/setup/#学习环境) + 具有单点失效特点。创建高可用的集群则意味着需要考虑: + - 将控制面与工作节点分开 + - 在多个节点上提供控制面组件的副本 + - 为针对集群的 {{< glossary_tooltip term_id="kube-apiserver" text="API 服务器" >}} + 的流量提供负载均衡 + - 随着负载的合理需要,提供足够的可用的(或者能够迅速变为可用的)工作节点 + + +- *规模*:如果你预期你的生产用 Kubernetes 环境要承受固定量的请求, + 你可能可以针对所需要的容量来一次性完成安装。 + 不过,如果你预期服务请求会随着时间增长,或者因为类似季节或者特殊事件的 + 原因而发生剧烈变化,你就需要规划如何处理请求上升时对控制面和工作节点 + 的压力,或者如何缩减集群规模以减少未使用资源的消耗。 + + +- *安全性与访问管理*:在你自己的学习环境 Kubernetes 集群上,你拥有完全的管理员特权。 + 但是针对运行着重要工作负载的共享集群,用户账户不止一两个时,就需要更细粒度 + 的方案来确定谁或者哪些主体可以访问集群资源。 + 你可以使用基于角色的访问控制([RBAC](/zh/docs/reference/access-authn-authz/rbac/)) + 和其他安全机制来确保用户和负载能够访问到所需要的资源,同时确保工作负载及集群 + 自身仍然是安全的。 + 你可以通过管理[策略](/zh/docs/concets/policy/)和 + [容器资源](/zh/docs/concepts/configuration/manage-resources-containers)来 + 针对用户和工作负载所可访问的资源设置约束, + + +在自行构造 Kubernetes 生产环境之前,请考虑将这一任务的部分或者全部交给 +[云方案承包服务](/zh/docs/setup/production-environment/turnkey-solutions) +提供商或者其他 [Kubernetes 合作伙伴](https://kubernetes.io/partners/)。 +选项有: + + +- *无服务*:仅是在第三方设备上运行负载,完全不必管理集群本身。你需要为 + CPU 用量、内存和磁盘请求等付费。 +- *托管控制面*:让供应商决定集群控制面的规模和可用性,并负责打补丁和升级等操作。 +- *托管工作节点*:配置一个节点池来满足你的需要,由供应商来确保节点始终可用, + 并在需要的时候完成升级。 +- *集成*:有一些供应商能够将 Kubernetes 与一些你可能需要的其他服务集成, + 这类服务包括存储、容器镜像仓库、身份认证方法以及开发工具等。 + + +无论你是自行构造一个生产用 Kubernetes 集群还是与合作伙伴一起协作,请审阅 +下面章节以评估你的需求,因为这关系到你的集群的 *控制面*、*工作节点*、 +*用户访问* 以及 *负载资源*。 + + +## 生产用集群安装 {#production-cluster-setup} + +在生产质量的 Kubernetes 集群中,控制面用不同的方式来管理集群和可以 +分布到多个计算机上的服务。每个工作节点则代表的是一个可配置来运行 +Kubernetes Pods 的实体。 + + +### 生产用控制面 {#production-control-plane} + +最简单的 Kubernetes 集群中,整个控制面和工作节点服务都运行在同一台机器上。 +你可以通过添加工作节点来提升环境能力,正如 +[Kubernetes 组件](/zh/docs/concepts/overview/components/)示意图所示。 +如果只需要集群在很短的一段时间内可用,或者可以在某些事物出现严重问题时直接丢弃, +这种配置可能符合你的需要。 + + +如果你需要一个更为持久的、高可用的集群,那么你就需要考虑扩展控制面的方式。 +根据设计,运行在一台机器上的单机控制面服务不是高可用的。 +如果保持集群处于运行状态并且需要确保在出现问题时能够被修复这点很重要, +可以考虑以下步骤: + + +- *选择部署工具*:你可以使用类似 kubeadm、kops 和 kubespray 这类工具来部署控制面。 + 参阅[使用部署工具安装 Kubernetes](/zh/docs/setup/production-environment/tools/) + 以了解使用这类部署方法来完成生产就绪部署的技巧。 + 存在不同的[容器运行时](/zh/docs/setup/production-environment/container-runtimes/) + 可供你的部署采用。 + +- *管理证书*:控制面服务之间的安全通信是通过证书来完成的。证书是在部署期间 + 自动生成的,或者你也可以使用你自己的证书机构来生成它们。 + 参阅 [PKI 证书和需求](/zh/docs/setup/best-practices/certificates/)了解细节。 + +- *为 API 服务器配置负载均衡*:配置负载均衡器来将外部的 API 请求散布给运行在 + 不同节点上的 API 服务实例。参阅 + [创建外部负载均衡器](/zh/docs/access-application-cluster/create-external-load-balancer/) + 了解细节。 + +- *分离并备份 etcd 服务*:etcd 服务可以运行于其他控制面服务所在的机器上, + 也可以运行在不同的机器上以获得更好的安全性和可用性。 + 因为 etcd 存储着集群的配置数据,应该经常性地对 etcd 数据库进行备份, + 以确保在需要的时候你可以修复该数据库。与配置和使用 etcd 相关的细节可参阅 + [etcd FAQ](/https://etcd.io/docs/v3.4/faq/)。 + 更多的细节可参阅[为 Kubernetes 运维 etcd 集群](/zh/docs/tasks/administer-cluster/configure-upgrade-etcd/) + 和[使用 kubeadm 配置高可用的 etcd 集群](/zh/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm/)。 + +- *创建多控制面系统*:为了实现高可用性,控制面不应被限制在一台机器上。 + 如果控制面服务是使用某 init 服务(例如 systemd)来运行的,每个服务应该 + 至少运行在三台机器上。不过,将控制面作为服务运行在 Kubernetes Pods + 中可以确保你所请求的个数的服务始终保持可用。 + 调度器应该是可容错的,但不是高可用的。 + 某些部署工具会安装 [Raft](https://raft.github.io/) 票选算法来对 Kubernetes + 服务执行领导者选举。如果主节点消失,另一个服务会被选中并接手相应服务。 + +- *跨多个可用区*:如果保持你的集群一直可用这点非常重要,可以考虑创建一个跨 + 多个数据中心的集群;在云环境中,这些数据中心被视为可用区。 + 若干个可用区在一起可构成地理区域。 + 通过将集群分散到同一区域中的多个可用区内,即使某个可用区不可用,整个集群 + 能够继续工作的机会也大大增加。 + 更多的细节可参阅[跨多个可用区运行](/zh/docs/setup/best-practices/multiple-zones/)。 + +- *管理演进中的特性*:如果你计划长时间保留你的集群,就需要执行一些维护其 + 健康和安全的任务。例如,如果你采用 kubeadm 安装的集群,则有一些可以帮助你完成 + [证书管理](/zh/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/) + 和[升级 kubeadm 集群](/zh/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade) + 的指令。 + 参见[管理集群](/zh/docs/tasks/administer-cluster)了解一个 Kubernetes + 管理任务的较长列表。 + + +要了解运行控制面服务时可使用的选项,可参阅 +[kube-apiserver](/zh/docs/reference/command-line-tools-reference/kube-apiserver/)、 +[kube-controller-manager](/zh/docs/reference/command-line-tools-reference/kube-controller-manager/) 和 +[kube-scheduler](/zh/docs/reference/command-line-tools-reference/kube-scheduler/) +组件参考页面。 +如要了解高可用控制面的例子,可参阅 +[高可用拓扑结构选项](/zh/docs/setup/production-environment/tools/kubeadm/ha-topology/)、 +[使用 kubeadm 创建高可用集群](/zh/docs/setup/production-environment/tools/kubeadm/high-availability/) 以及[为 Kubernetes 运维 etcd 集群](/zh/docs/tasks/administer-cluster/configure-upgrade-etcd/)。 +关于制定 etcd 备份计划,可参阅 +[对 etcd 集群执行备份](/zh/docs/tasks/administer-cluster/configure-upgrade-etcd/#backing-up-an-etcd-cluster)。 + + +### 生产用工作节点 + +生产质量的工作负载需要是弹性的;它们所依赖的其他组件(例如 CoreDNS)也需要是弹性的。 +无论你是自行管理控制面还是让云供应商来管理,你都需要考虑如何管理工作节点 +(有时也简称为*节点*)。 + + +- *配置节点*:节点可以是物理机或者虚拟机。如果你希望自行创建和管理节点, + 你可以安装一个受支持的操作系统,之后添加并运行合适的 + [节点服务](/zh/docs/concepts/overview/components/#node-components)。 + 考虑: + + - 在安装节点时要通过配置适当的内存、CPU 和磁盘速度、存储容量来满足 + 你的负载的需求。 + - 是否通用的计算机系统即足够,还是你有负载需要使用 GPU 处理器、Windows 节点 + 或者 VM 隔离。 + +- *验证节点*:参阅[验证节点配置](/zh/docs/setup/best-practices/node-conformance/) + 以了解如何确保节点满足加入到 Kubernetes 集群的需求。 + +- *添加节点到集群中*:如果你自行管理你的集群,你可以通过安装配置你的机器, + 之后或者手动加入集群,或者让它们自动注册到集群的 API 服务器。参阅 + [节点](/zh/docs/concepts/architecture/nodes/)节,了解如何配置 Kubernetes + 以便以这些方式来添加节点。 + +- *向集群中添加 Windows 节点*:Kubernetes 提供对 Windows 工作节点的支持; + 这使得你可以运行实现于 Windows 容器内的工作负载。参阅 + [Kubernetes 中的 Windows](/zh/docs/setup/production-environment/windows/) + 了解进一步的详细信息。 + +- *扩缩节点*:制定一个扩充集群容量的规划,你的集群最终会需要这一能力。 + 参阅[大规模集群考察事项](/zh/docs/setup/best-practices/cluster-large/) + 以确定你所需要的节点数;这一规模是基于你要运行的 Pod 和容器个数来确定的。 + 如果你自行管理集群节点,这可能意味着要购买和安装你自己的物理设备。 + +- *节点自动扩缩容*:大多数云供应商支持 + [集群自动扩缩器(Cluster Autoscaler)](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#readme) + 以便替换不健康的节点、根据需求来增加或缩减节点个数。参阅 + [常见问题](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md) + 了解自动扩缩器的工作方式,并参阅 + [Deployment](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#deployment) + 了解不同云供应商是如何实现集群自动扩缩器的。 + 对于本地集群,有一些虚拟化平台可以通过脚本来控制按需启动新节点。 + +- *安装节点健康检查*:对于重要的工作负载,你会希望确保节点以及在节点上 + 运行的 Pod 处于健康状态。通过使用 + [Node Problem Detector](/zh/docs/tasks/debug-application-cluster/monitor-node-health/), + 你可以确保你的节点是健康的。 + + +### 生产级用户环境 + +在生产环境中,情况可能不再是你或者一小组人在访问集群,而是几十 +上百人需要访问集群。在学习环境或者平台原型环境中,你可能具有一个 +可以执行任何操作的管理账号。在生产环境中,你可需要对不同名字空间 +具有不同访问权限级别的很多账号。 + + +建立一个生产级别的集群意味着你需要决定如何有选择地允许其他用户访问集群。 +具体而言,你需要选择验证尝试访问集群的人的身份标识(身份认证),并确定 +他们是否被许可执行他们所请求的操作(鉴权): + + +- *认证(Authentication)*:API 服务器可以使用客户端证书、持有者令牌、身份 + 认证代理或者 HTTP 基本认证机制来完成身份认证操作。 + 你可以选择你要使用的认证方法。通过使用插件,API 服务器可以充分利用你所在 + 组织的现有身份认证方法,例如 LDAP 或者 Kerberos。 + 关于认证 Kubernetes 用户身份的不同方法的描述,可参阅 + [身份认证](/zh/docs/reference/access-authn-authz/authentication/)。 + +- *鉴权(Authorization)*:当你准备为一般用户执行权限判定时,你可能会需要 + 在 RBAC 和 ABAC 鉴权机制之间做出选择。参阅 + [鉴权概述](/zh/docs/reference/access-authn-authz/authorization/),了解 + 对用户账户(以及访问你的集群的服务账户)执行鉴权的不同模式。 + + - *基于角色的访问控制*([RBAC](/zh/docs/reference/access-authn-authz/rbac/)): + 让你通过为通过身份认证的用户授权特定的许可集合来控制集群访问。 + 访问许可可以针对某特定名字空间(Role)或者针对整个集群(CLusterRole)。 + 通过使用 RoleBinding 和 ClusterRoleBinding 对象,这些访问许可可以被 + 关联到特定的用户身上。 + + - *基于属性的访问控制*([ABAC](/zh/docs/reference/access-authn-authz/abac/)): + 让你能够基于集群中资源的属性来创建访问控制策略,基于对应的属性来决定 + 允许还是拒绝访问。策略文件的每一行都给出版本属性(apiVersion 和 kind) + 以及一个规约属性的映射,用来匹配主体(用户或组)、资源属性、非资源属性 + (/version 或 /apis)和只读属性。 + 参阅[示例](/zh/docs/reference/access-authn-authz/abac/#examples)以了解细节。 + + +作为在你的生产用 Kubernetes 集群中安装身份认证和鉴权机制的负责人, +要考虑的事情如下: + + +- *设置鉴权模式*:当 Kubernetes API 服务器 + ([kube-apiserver](/docs/reference/command-line-tools-reference/kube-apiserver/)) + 启动时,所支持的鉴权模式必须使用 `--authorization-mode` 标志配置。 + 例如,`kube-apiserver.yaml`(位于 `/etc/kubernetes/manifests` 下)中对应的 + 标志可以设置为 `Node,RBAC`。这样就会针对已完成身份认证的请求执行 Node 和 RBAC + 鉴权。 + +- *创建用户证书和角色绑定(RBAC)*:如果你在使用 RBAC 鉴权,用户可以创建 + 由集群 CA 签名的 CertificateSigningRequest(CSR)。接下来你就可以将 Role + 和 ClusterRole 绑定到每个用户身上。 + 参阅[证书签名请求](/zh/docs/reference/access-authn-authz/certificate-signing-requests/) + 了解细节。 + +- *创建组合属性的策略(ABAC)*:如果你在使用 ABAC 鉴权,你可以设置属性组合 + 以构造策略对所选用户或用户组执行鉴权,判定他们是否可访问特定的资源 + (例如 Pod)、名字空间或者 apiGroup。进一步的详细信息可参阅 + [示例](/zh/docs/reference/access-authn-authz/abac/#examples)。 + +- *考虑准入控制器*:针对指向 API 服务器的请求的其他鉴权形式还包括 + [Webhook 令牌认证](/zh/docs/reference/access-authn-authz/authentication/#webhook-token-authentication)。 + Webhook 和其他特殊的鉴权类型需要通过向 API 服务器添加 + [准入控制器](/zh/docs/reference/access-authn-authz/admission-controllers/) + 来启用。 + + +## 为负载资源设置约束 {#set-limits-on-workload-resources} + +生产环境负载的需求可能对 Kubernetes 的控制面内外造成压力。 +在针对你的集群的负载执行配置时,要考虑以下条目: + + +- *设置名字空间限制*:为每个名字空间的内存和 CPU 设置配额。 + 参阅[管理内存、CPU 和 API 资源](/zh/docs/tasks/administer-cluster/manage-resources/) + 以了解细节。你也可以设置 + [层次化名字空间](/blog/2020/08/14/introducing-hierarchical-namespaces/) + 来继承这类约束。 + +- *为 DNS 请求做准备*:如果你希望工作负载能够完成大规模扩展,你的 DNS 服务 + 也必须能够扩大规模。参阅 + [自动扩缩集群中 DNS 服务](/zh/docs/tasks/administer-cluster/dns-horizontal-autoscaling/)。 + +- *创建额外的服务账户*:用户账户决定用户可以在集群上执行的操作,服务账号则定义的 + 是在特定名字空间中 Pod 的访问权限。 + 默认情况下,Pod 使用所在名字空间中的 default 服务账号。 + 参阅[管理服务账号](/zh/docs/reference/access-authn-authz/service-accounts-admin/) + 以了解如何创建新的服务账号。例如,你可能需要: + + - 为 Pod 添加 Secret,以便 Pod 能够从某特定的容器镜像仓库拉取镜像。 + 参阅[为 Pod 配置服务账号](/zh/docs/tasks/configure-pod-container/configure-service-account/) + 以获得示例。 + - 为服务账号设置 RBAC 访问许可。参阅 + [服务账号访问许可](/zh/docs/reference/access-authn-authz/rbac/#service-account-permissions) + 了解细节。 + +## {{% heading "whatsnext" %}} + + +- 决定你是想自行构造自己的生产用 Kubernetes 还是从某可用的 + [云服务外包厂商](/zh/docs/setup/production-environment/turnkey-solutions/) + 或 [Kubernetes 合作伙伴](https://kubernetes.io/partners/)获得集群。 +- 如果你决定自行构造集群,则需要规划如何处理 + [证书](/zh/docs/setup/best-practices/certificates/) + 并为类似 + [etcd](/zh/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm/) + 和 + [API 服务器](/zh/docs/setup/production-environment/tools/kubeadm/ha-topology/) + 这些功能组件配置高可用能力。 + +- 选择使用 [kubeadm](/zh/docs/setup/production-environment/tools/kubeadm/)、 + [kops](/zh/docs/setup/production-environment/tools/kops/) 或 + [Kubespray](/zh/docs/setup/production-environment/tools/kubespray/) + 作为部署方法。 + +- 通过决定[身份认证](/zh/docs/reference/access-authn-authz/authentication/)和 + [鉴权](/zh/docs/reference/access-authn-authz/authorization/)方法来配置用户管理。 + +- 通过配置[资源限制](/zh/docs/tasks/administer-cluster/manage-resources/)、 + [DNS 自动扩缩](/zh/docs/tasks/administer-cluster/dns-horizontal-autoscaling/) + 和[服务账号](/zh/docs/reference/access-authn-authz/service-accounts-admin/) + 来为应用负载作准备。 + diff --git a/content/zh/docs/setup/production-environment/container-runtimes.md b/content/zh/docs/setup/production-environment/container-runtimes.md index 8536b7a74b..655db08c84 100644 --- a/content/zh/docs/setup/production-environment/container-runtimes.md +++ b/content/zh/docs/setup/production-environment/container-runtimes.md @@ -106,6 +106,64 @@ configuration, or reinstall it using automation. 如果你有切实可行的自动化方案,使用其他已更新配置的节点来替换该节点, 或者使用自动化方案来重新安装。 + +## Cgroup v2 +Cgroup v2 是 cgroup Linux API 的下一个版本。与 cgroup v1 不同的是, +Cgroup v2 只有一个层次结构,而不是每个控制器有一个不同的层次结构。 + + +新版本对 cgroup v1 进行了多项改进,其中一些改进是: + +- 更简洁、更易于使用的 API +- 可将安全子树委派给容器 +- 更新的功能,如压力失速信息(Pressure Stall Information) + + +尽管内核支持混合配置,即其中一些控制器由 cgroup v1 管理,另一些由 cgroup v2 管理, +Kubernetes 仅支持使用同一 cgroup 版本来管理所有控制器。 + +如果 systemd 默认不使用 cgroup v2,你可以通过在内核命令行中添加 +`systemd.unified_cgroup_hierarchy=1` 来配置系统去使用它。 + +```shell +# dnf install -y grubby && \ + sudo grubby \ + --update-kernel=ALL \ + --args=”systemd.unified_cgroup_hierarchy=1" +``` + + +要应用配置,必须重新启动节点。 + +切换到 cgroup v2 时,用户体验不应有任何明显差异, +除非用户直接在节点上或在容器内访问 cgroup 文件系统。 +为了使用它,CRI 运行时也必须支持 cgroup v2。 + @@ -169,7 +227,10 @@ Install containerd: {{% tab name="Linux" %}} 1. 从官方Docker仓库安装 `containerd.io` 软件包。可以在 [安装 Docker 引擎](https://docs.docker.com/engine/install/#server) @@ -199,9 +260,10 @@ Install containerd: {{% tab name="Windows (PowerShell)" %}} -启动 Powershell 会话,将 `$Version` 设置为所需的版本(例如:`$ Version=1.4.3`), +启动 Powershell 会话,将 `$Version` 设置为所需的版本(例如:`$Version=1.4.3`), 然后运行以下命令: 1. 在每个节点上,根据[安装 Docker 引擎](https://docs.docker.com/engine/install/#server) 为你的 Linux 发行版安装 Docker。 @@ -597,7 +664,8 @@ in sync. {{< note >}} 对于运行 Linux 内核版本 4.0 或更高版本,或使用 3.10.0-51 及更高版本的 RHEL 或 CentOS 的系统,`overlay2`是首选的存储驱动程序。 diff --git a/content/zh/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md b/content/zh/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md index 5ee1199d64..771888f4ab 100644 --- a/content/zh/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md +++ b/content/zh/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md @@ -1,5 +1,5 @@ --- -title: 使用 kubeadm 定制控制平面配置 +title: 使用 kubeadm API 定制组件 content_type: concept weight: 40 --- @@ -7,7 +7,7 @@ weight: 40 --- reviewers: - sig-cluster-lifecycle -title: Customizing control plane configuration with kubeadm +title: Customizing components with the kubeadm API content_type: concept weight: 40 --- @@ -15,54 +15,109 @@ weight: 40 + +本页面介绍了如何自定义 kubeadm 部署的组件。 +你可以使用 `ClusteConfiguration` 结构中定义的参数,或者在每个节点上应用补丁来定制控制平面组件。 +你可以使用 `KubeletConfiguration` 和 `KubeProxyConfiguration` 结构分别定制 kubelet 和 kube-proxy 组件。 + +所有这些选项都可以通过 kubeadm 配置 API 实现。 +有关配置中的每个字段的详细信息,你可以导航到我们的 +[API 参考页面](/docs/reference/config-api/kubeadm-config.v1beta3/) 。 + +{{< note >}} + +kubeadm 目前不支持对 CoreDNS 部署进行定制。 +你必须手动更新 `kube-system/coredns` {{< glossary_tooltip text="ConfigMap" term_id="configmap" >}} +并在更新后重新创建 CoreDNS {{< glossary_tooltip text="Pods" term_id="pod" >}}。 +或者,你可以跳过默认的 CoreDNS 部署并部署你自己的 CoreDNS 变种。 +有关更多详细信息,请参阅[在 kubeadm 中使用 init phases](/zh/docs/reference/setup-tools/kubeadm/kubeadm-init/#init-phases). +{{< /note >}} + + + {{< feature-state for_k8s_version="1.12" state="stable" >}} -kubeadm `ClusterConfiguration` 对象公开了 `extraArgs` 字段,它可以覆盖传递给控制平面组件(如 APIServer、ControllerManager 和 Scheduler)的默认参数。各组件配置使用如下字段定义: +## 使用 `ClusterConfiguration` 中的标志自定义控制平面 {#customizing-the-control-plane-with-flags-in-clusterconfiguration} + +kubeadm `ClusterConfiguration` 对象为用户提供了一种方法, +用以覆盖传递给控制平面组件(如 APIServer、ControllerManager、Scheduler 和 Etcd)的默认参数。 +各组件配置使用如下字段定义: - `apiServer` - `controllerManager` - `scheduler` +- `etcd` -`extraArgs` 字段由 `key: value` 对组成。 -要覆盖控制平面组件的参数: +这些结构包含一个通用的 `extraArgs` 字段,该字段由 `key: value` 组成。 +要覆盖控制平面组件的参数: -1. 将适当的字段添加到配置中。 -2. 向字段添加要覆盖的参数值。 +1. 将适当的字段 `extraArgs` 添加到配置中。 +2. 向字段 `extraArgs` 添加要覆盖的参数值。 3. 用 `--config ` 运行 `kubeadm init`。 - -有关配置中的每个字段的详细信息,您可以导航到我们的 [API 参考页面](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2#ClusterConfiguration)。 - {{< note >}} -您可以通过运行 `kubeadm config print init-defaults` 并将输出保存到您选择的文件中,以默认值形式生成 `ClusterConfiguration` 对象。 +你可以通过运行 `kubeadm config print init-defaults` 并将输出保存到你所选的文件中, +以默认值形式生成 `ClusterConfiguration` 对象。 {{< /note >}} +{{< note >}} + +`ClusterConfiguration` 对象目前在 kubeadm 集群中是全局的。 +这意味着你添加的任何标志都将应用于同一组件在不同节点上的所有实例。 +要在不同节点上为每个组件应用单独的配置,您可以使用[补丁](#patches)。 +{{< /note >}} - - +{{< note >}} + +当前不支持重复的参数(keys)或多次传递相同的参数 `--foo`。 +要解决此问题,你必须使用[补丁](#patches)。 +{{< /note >}} -## APIServer 参数 +### APIServer 参数 {#apiserver-flags} 使用示例: ```yaml -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration kubernetesVersion: v1.16.0 apiServer: extraArgs: - advertise-address: 192.168.0.103 anonymous-auth: "false" enable-admission-plugins: AlwaysPullImages,DefaultStorageClass audit-log-path: /home/johndoe/audit.log @@ -88,7 +142,7 @@ apiServer: -## ControllerManager 参数 +### ControllerManager 参数 {#controllermanager-flags} 使用示例: ```yaml -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration kubernetesVersion: v1.16.0 controllerManager: extraArgs: cluster-signing-key-file: /home/johndoe/keys/ca.key - bind-address: 0.0.0.0 deployment-controller-sync-period: "50" ``` -## Scheduler 参数 +## Scheduler 参数 {#scheduler-flags} 使用示例: ```yaml -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration kubernetesVersion: v1.16.0 scheduler: extraArgs: - bind-address: 0.0.0.0 - config: /home/johndoe/schedconfig.yaml - kubeconfig: /home/johndoe/kubeconfig.yaml + config: /etc/kubernetes/scheduler-config.yaml + extraVolumes: + - name: schedulerconfig + hostPath: /home/johndoe/schedconfig.yaml + mountPath: /etc/kubernetes/scheduler-config.yaml + readOnly: true + pathType: "File" +``` + +### Etcd 参数 {#etcd-flags} + +有关详细信息,请参阅 [etcd 服务文档](https://etcd.io/docs/). + +使用示例: + +```yaml +apiVersion: kubeadm.k8s.io/v1beta3 +kind: ClusterConfiguration +etcd: + local: + extraArgs: + election-timeout: 1000 +``` + +## 使用补丁定制控制平面 {#patches} + +{{< feature-state for_k8s_version="v1.22" state="beta" >}} + +Kubeadm 允许将包含补丁文件的目录传递给各个节点上的 `InitConfiguration` 和 `JoinConfiguration`。 +这些补丁可被用作控制平面组件清单写入磁盘之前的最后一个自定义步骤。 + +可以使用 `--config <你的 YAML 格式控制文件>` 将配置文件传递给 `kubeadm init`: + +```yaml +apiVersion: kubeadm.k8s.io/v1beta3 +kind: InitConfiguration +nodeRegistration: + patches: + directory: /home/user/somedir ``` +{{< note >}} + +对于 `kubeadm init`,你可以传递一个包含 `ClusterConfiguration` 和 `InitConfiguration` 的文件,以 `---` 分隔。 +{{< /note >}} + + +你可以使用 `--config <你的 YAML 格式配置文件>` 将配置文件传递给 `kubeadm join`: + +```yaml +apiVersion: kubeadm.k8s.io/v1beta3 +kind: JoinConfiguration +nodeRegistration: + patches: + directory: /home/user/somedir +``` + + +补丁目录必须包含名为 `target[suffix][+patchtype].extension` 的文件。 +例如,`kube-apiserver0+merge.yaml` 或只是 `etcd.json`。 + + +- `target` 可以是 `kube-apiserver`、`kube-controller-manager`、`kube-scheduler` 和 `etcd` 之一。 +- `patchtype` 可以是 `strategy`、`merge` 或 `json` 之一,并且这些必须匹配 + [kubectl 支持](/zh/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch) 的补丁格式。 + 默认补丁类型是 `strategic` 的。 +- `extension` 必须是 `json` 或 `yaml`。 +- `suffix` 是一个可选字符串,可用于确定首先按字母数字应用哪些补丁。 + +{{< note >}} + +如果你使用 `kubeadm upgrade` 升级 kubeadm 节点,你必须再次提供相同的补丁,以便在升级后保留自定义配置。 +为此,你可以使用 `--patches` 参数,该参数必须指向同一目录。 `kubeadm upgrade` 目前不支持用于相同目的的 API 结构配置。 +{{< /note >}} + + +## 自定义 kubelet {#customizing-the-kubelet} + +要自定义 kubelet,你可以在同一配置文件中的 `ClusterConfiguration` 或 `InitConfiguration` +之外添加一个 `KubeletConfiguration`,用 `---` 分隔。 +然后可以将此文件传递给 `kubeadm init`。 + +{{< note >}} + +kubeadm 将相同的 `KubeletConfiguration` 配置应用于集群中的所有节点。 +要应用节点特定设置,你可以使用 `kubelet` 参数进行覆盖,方法是将它们传递到 `InitConfiguration` 和 `JoinConfiguration` +支持的 `nodeRegistration.kubeletExtraArgs` 字段中。一些 kubelet 参数已被弃用, +因此在使用这些参数之前,请在 [kubelet 参考文档](/zh/docs/reference/command-line-tools-reference/kubelet) 中检查它们的状态。 +{{< /note >}} + + +更多详情,请参阅[使用 kubeadm 配置集群中的每个 kubelet](/zh/docs/setup/production-environment/tools/kubeadm/kubelet-integration) + + +## 自定义 kube-proxy {#customizing-kube-proxy} + +要自定义 kube-proxy,你可以在 `ClusterConfiguration` 或 `InitConfiguration` 之外添加一个 +由 `---` 分隔的 `KubeProxyConfiguration`, 传递给 `kubeadm init`。 + +可以导航到 [API 参考页面](/docs/reference/config-api/kubeadm-config.v1beta3/) 查看更多详情, + +{{< note >}} + +kubeadm 将 kube-proxy 部署为 {{< glossary_tooltip text="DaemonSet" term_id="daemonset" >}}, +这意味着 `KubeProxyConfiguration` 将应用于集群中的所有 kube-proxy 实例。 +{{< /note >}} + diff --git a/content/zh/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md b/content/zh/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md index 9f7ac07584..9c6137c1d6 100644 --- a/content/zh/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md +++ b/content/zh/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md @@ -743,7 +743,7 @@ Talking to the control-plane node with the appropriate credentials, run: 使用适当的凭证与控制平面节点通信,运行: ```bash -kubectl drain --delete-local-data --force --ignore-daemonsets +kubectl drain --delete-emptydir-data --force --ignore-daemonsets ``` 标志 `kubeadm init`、`--config` 和 `--certificate-key` 不能混合使用, 因此如果你要使用 - [kubeadm 配置](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2), + [kubeadm 配置](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3), 你必须在相应的配置文件 (位于 `InitConfiguration` 和 `JoinConfiguration: controlPlane`)添加 `certificateKey` 字段。 {{< /note >}} @@ -418,7 +418,7 @@ in the kubeadm config file. 1. Create a file called `kubeadm-config.yaml` with the following contents: ```yaml - apiVersion: kubeadm.k8s.io/v1beta2 + apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration kubernetesVersion: stable controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" diff --git a/content/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index e2640977be..6bd7a1994b 100644 --- a/content/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -21,7 +21,7 @@ card: 本页面显示如何安装 `kubeadm` 工具箱。 有关在执行此安装过程后如何使用 kubeadm 创建集群的信息,请参见 @@ -414,8 +414,9 @@ Install CNI plugins (required for most pod network): ```bash CNI_VERSION="v0.8.2" +ARCH="amd64" sudo mkdir -p /opt/cni/bin -curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-amd64-${CNI_VERSION}.tgz" | sudo tar -C /opt/cni/bin -xz +curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-${ARCH}-${CNI_VERSION}.tgz" | sudo tar -C /opt/cni/bin -xz ``` 这可能是由许多问题引起的。最常见的是: - 网络连接问题。在继续之前,请检查你的计算机是否具有全部联通的网络连接。 -- kubelet 的默认 cgroup 驱动程序配置不同于 Docker 使用的配置。 - 检查系统日志文件 (例如 `/var/log/message`) 或检查 `journalctl -u kubelet` 的输出。 如果你看见以下内容: - - ```shell - error: failed to run Kubelet: failed to create kubelet: - misconfiguration: kubelet cgroup driver: "systemd" is different from docker cgroup driver: "cgroupfs" - ``` - - 有两种常见方法可解决 cgroup 驱动程序问题: - - 1. 按照[此处](/zh/docs/setup/production-environment/container-runtimes/#docker) 的说明 - 重新安装 Docker。 - - 1. 更改 kubelet 配置以手动匹配 Docker cgroup 驱动程序,你可以参考 - [在主节点上配置 kubelet 要使用的 cgroup 驱动程序](/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#configure-cgroup-driver-used-by-kubelet-on-control-plane-node) - +- 容器运行时的 cgroup 驱动不同于 kubelet 使用的 cgroup 驱动。要了解如何正确配置 cgroup 驱动, + 请参阅[配置 cgroup 驱动](/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/)。 - 控制平面上的 Docker 容器持续进入崩溃状态或(因其他原因)挂起。你可以运行 `docker ps` 命令来检查以及 `docker logs` 命令来检视每个容器的运行日志。 + 对于其他容器运行时,请参阅[使用 crictl 对 Kubernetes 节点进行调试](/zh/docs/tasks/debug-application-cluster/crictl/)。 ## 无法通过其服务 IP 访问 Pod -- 许多网络附加组件尚未启用 [hairpin 模式](/zh/docs/tasks/debug-application-cluster/debug-service/#a-pod-cannot-reach-itself-via-service-ip) +- 许多网络附加组件尚未启用 [hairpin 模式](/zh/docs/tasks/debug-application-cluster/debug-service/#a-pod-fails-to-reach-itself-via-the-service-ip) 该模式允许 Pod 通过其服务 IP 进行访问。这是与 [CNI](https://github.com/containernetworking/cni/issues/476) 有关的问题。 请与网络附加组件提供商联系,以获取他们所提供的 hairpin 模式的最新状态。 @@ -378,6 +353,51 @@ Error from server (NotFound): the server could not find the requested resource This may lead to problems with flannel, which defaults to the first interface on a host. This leads to all hosts thinking they have the same public IP address. To prevent this, pass the `-iface eth1` flag to flannel so that the second interface is chosen. --> + + +## Kubelet 客户端证书轮换失败 {#kubelet-client-cert} + +默认情况下,kubeadm 使用 `/etc/kubernetes/kubelet.conf` 中指定的 `/var/lib/kubelet/pki/kubelet-client-current.pem` 符号链接 +来配置 kubelet 自动轮换客户端证书。如果此轮换过程失败,你可能会在 kube-apiserver 日志中看到 +诸如 `x509: certificate has expired or is not yet valid` 之类的错误。要解决此问题,你必须执行以下步骤: + +1. 从故障节点备份和删除 `/etc/kubernetes/kubelet.conf` 和 `/var/lib/kubelet/pki/kubelet-client*`。 +2. 在集群中具有 `/etc/kubernetes/pki/ca.key` 的、正常工作的控制平面节点上 + 执行 `kubeadm kubeconfig user --org system:nodes --client-name system:node:$NODE > kubelet.conf`。 + `$NODE` 必须设置为集群中现有故障节点的名称。 + 手动修改生成的 `kubelet.conf` 以调整集群名称和服务器端点, + 或传递 `kubeconfig user --config`(此命令接受 `InitConfiguration`)。 + 如果你的集群没有 `ca.key`,你必须在外部对 `kubelet.conf` 中的嵌入式证书进行签名。 + +3. 将得到的 `kubelet.conf` 文件复制到故障节点上,作为 `/etc/kubernetes/kubelet.conf`。 +4. 在故障节点上重启 kubelet(`systemctl restart kubelet`),等待 `/var/lib/kubelet/pki/kubelet-client-current.pem` 重新创建。 + +5. 在故障节点上运行 `kubeadm init phase kubelet-finalize all`。 + 这将使新的 `kubelet.conf` 文件使用 `/var/lib/kubelet/pki/kubelet-client-current.pem` 并将重新启动 kubelet。 +6. 确保节点状况变为 `Ready`。 + ## 在 Vagrant 中使用 flannel 作为 pod 网络时的默认 NIC 以下错误可能表明 Pod 网络中出现问题: @@ -411,8 +431,13 @@ Error from server: Get https://10.19.0.41:10250/containerLogs/default/mysql-ddc6 curl http://169.254.169.254/metadata/v1/interfaces/public/0/anchor_ipv4/address ``` - The workaround is to tell `kubelet` which IP to use using `-node-ip`. When using Digital Ocean, it can be the public one (assigned to `eth0`) or the private one (assigned to `eth1`) should you want to use the optional private network. The [`KubeletExtraArgs` section of the kubeadm `NodeRegistrationOptions` structure](https://github.com/kubernetes/kubernetes/blob/release-1.13/cmd/kubeadm/app/apis/kubeadm/v1beta1/types.go) can be used for this. - + The workaround is to tell `kubelet` which IP to use using `--node-ip`. + When using DigitalOcean, it can be the public one (assigned to `eth0`) or + the private one (assigned to `eth1`) should you want to use the optional + private network. The `kubeletExtraArgs` section of the kubeadm + [`NodeRegistrationOptions` structure](/docs/reference/config-api/kubeadm-config.v1beta2/#kubeadm-k8s-io-v1beta2-NodeRegistrationOptions) + can be used for this. + Then restart `kubelet`: ```sh @@ -443,7 +468,8 @@ Error from server: Get https://10.19.0.41:10250/containerLogs/default/mysql-ddc6 解决方法是通知 `kubelet` 使用哪个 `--node-ip`。当使用 Digital Ocean 时,可以是公网IP(分配给 `eth0`的), 或者是私网IP(分配给 `eth1` 的)。私网 IP 是可选的。 - [kubadm `NodeRegistrationOptions` 结构的 `KubeletExtraArgs` 部分](https://github.com/kubernetes/kubernetes/blob/release-1.13/cmd/kubeadm/app/apis/kubeadm/v1beta1/types.go) 被用来处理这种情况。 + [kubadm `NodeRegistrationOptions` 结构](/zh/docs/reference/config-api/kubeadm-config.v1beta2/#kubeadm-k8s-io-v1beta2-NodeRegistrationOptions) + 的 `KubeletExtraArgs` 部分被用来处理这种情况。 然后重启 `kubelet`: @@ -569,7 +595,7 @@ Alternatively, you can try separating the `key=value` pairs like so: `-apiserver-extra-args "enable-admission-plugins=LimitRanger,enable-admission-plugins=NamespaceExists"` but this will result in the key `enable-admission-plugins` only having the value of `NamespaceExists`. -A known workaround is to use the kubeadm [configuration file](/docs/setup/production-environment/tools/kubeadm/control-plane-flags/#apiserver-flags). +A known workaround is to use the kubeadm [configuration file](/docs/reference/config-api/kubeadm-config.v1beta2/). --> ## 无法将以逗号分隔的值列表传递给 `--component-extra-args` 标志内的参数 @@ -587,7 +613,7 @@ kube-apiserver 这样的控制平面组件。然而,由于解析 (`mapStringSt 但这将导致键 `enable-admission-plugins` 仅有值 `NamespaceExists`。 已知的解决方法是使用 kubeadm -[配置文件](/zh/docs/setup/production-environment/tools/kubeadm/control-plane-flags/#apiserver-flags)。 +[配置文件](/zh/docs/reference/config-api/kubeadm-config.v1beta2/)。 -## NodeRegistration.Taints 字段在编组 kubeadm 配置时丢失 - -*注意:这个 [问题](https://github.com/kubernetes/kubeadm/issues/1358) -仅适用于操控 kubeadm 数据类型的工具(例如,YAML 配置文件)。它将在 kubeadm API v1beta2 修复。* - -默认情况下,kubeadm 将 `node-role.kubernetes.io/master:NoSchedule` 污点应用于控制平面节点。 -如果你希望 kubeadm 不污染控制平面节点,并将 `InitConfiguration.NodeRegistration.Taints` 设置成空切片,则应在编组时省略该字段。 -如果省略该字段,则 kubeadm 将应用默认污点。 - -至少有两种解决方法: - -1. 使用 `node-role.kubernetes.io/master:PreferNoSchedule` 污点代替空切片。 - 除非其他节点具有容量,[否则将在主节点上调度 Pods](/zh/docs/concepts/scheduling-eviction/taint-and-toleration/)。 - -2. 在 kubeadm init 退出后删除污点: - - ```shell - kubectl taint nodes NODE_NAME node-role.kubernetes.io/master:NoSchedule- - ``` - -为了解决这个问题,你可以使用 kubeadm 的[配置文件](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2) +为了解决这个问题,你可以使用 kubeadm 的[配置文件](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3) 来配置 FlexVolume 的目录。 在(使用 `kubeadm init` 创建的)主控制节点上,使用 `-config` 参数传入如下文件: ```yaml -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: InitConfiguration nodeRegistration: kubeletExtraArgs: volume-plugin-dir: "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/" --- -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration controllerManager: extraArgs: @@ -722,7 +710,7 @@ On joining Nodes: 在加入到集群中的节点上,使用下面的文件: ```yaml -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: JoinConfiguration nodeRegistration: kubeletExtraArgs: diff --git a/content/zh/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md b/content/zh/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md index ce14ee6354..286ede85a4 100644 --- a/content/zh/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md +++ b/content/zh/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md @@ -5,8 +5,10 @@ weight: 65 --- 在很多组织中,其服务和应用的很大比例是 Windows 应用。 [Windows 容器](https://aka.ms/windowscontainers)提供了一种对进程和包依赖关系 @@ -32,16 +46,27 @@ Windows 容器调度到 Kubernetes 集群中 Windows 节点上的生产级支持 ## kubernetes 中的 Windows 容器 {#windows-containers-in-kubernetes} -若要在 Kubernetes 中启用对 Windows 容器的编排,只需在现有的 Linux 集群中 +若要在 Kubernetes 中启用对 Windows 容器的编排,可以在现有的 Linux 集群中 包含 Windows 节点。在 Kubernetes 上调度 {{< glossary_tooltip text="Pods" term_id="pod" >}} -中的 Windows 容器与调用基于 Linux 的容器一样简单、一样容易。 +中的 Windows 容器与调用基于 Linux 的容器类似。 为了运行 Windows 容器,你的 Kubernetes 集群必须包含多个操作系统,控制面 节点运行 Linux,工作节点则可以根据负载需要运行 Windows 或 Linux。 @@ -52,15 +77,22 @@ Windows Server 2019 是唯一被支持的 Windows 操作系统,在 Windows 上 [Microsoft 文档](https://docs.microsoft.com/en-us/windows-server/get-started-19/servicing-channels-19)。 {{< note >}} -Kubernetes 控制面,包括[主控组件](/zh/docs/concepts/overview/components/),继续 -在 Linux 上运行。目前没有支持完全是 Windows 节点的 Kubernetes 集群的计划。 +Kubernetes 控制面,包括[主控组件](/zh/docs/concepts/overview/components/), +继续在 Linux 上运行。 +目前没有支持完全是 Windows 节点的 Kubernetes 集群的计划。 {{< /note >}} {{< note >}} 在本文中,当我们讨论 Windows 容器时,我们所指的是具有进程隔离能力的 Windows @@ -75,7 +107,10 @@ In this document, when we talk about Windows containers we mean Windows containe #### Windows OS Version Support -Refer to the following table for Windows operating system support in Kubernetes. A single heterogeneous Kubernetes cluster can have both Windows and Linux worker nodes. Windows containers have to be scheduled on Windows nodes and Linux containers on Linux nodes. +Refer to the following table for Windows operating system support in +Kubernetes. A single heterogeneous Kubernetes cluster can have both Windows +and Linux worker nodes. Windows containers have to be scheduled on Windows +nodes and Linux containers on Linux nodes. --> ## 支持的功能与局限性 {#supported-functionality-and-limitations} @@ -89,17 +124,14 @@ Windows 容器仅能调度到 Windows 节点,Linux 容器则只能调度到 Li | Kubernetes 版本 | Windows Server LTSC 版本 | Windows Server SAC 版本 | | --- | --- | --- | --- | -| *Kubernetes v1.14* | Windows Server 2019 | Windows Server ver 1809 | -| *Kubernetes v1.15* | Windows Server 2019 | Windows Server ver 1809 | -| *Kubernetes v1.16* | Windows Server 2019 | Windows Server ver 1809 | -| *Kubernetes v1.17* | Windows Server 2019 | Windows Server ver 1809 | -| *Kubernetes v1.18* | Windows Server 2019 | Windows Server ver 1809, Windows Server ver 1903, Windows Server ver 1909 | -| *Kubernetes v1.19* | Windows Server 2019 | Windows Server ver 1909, Windows Server ver 2004 | +| *Kubernetes v1.20* | Windows Server 2019 | Windows Server ver 1909, Windows Server ver 2004 | +| *Kubernetes v1.21* | Windows Server 2019 | Windows Server ver 2004, Windows Server ver 20H2 | +| *Kubernetes v1.22* | Windows Server 2019 | Windows Server ver 2004, Windows Server ver 20H2 | 关于不同的 Windows Server 版本的服务渠道,包括其支持模式等相关信息可以在 [Windows Server servicing channels](https://docs.microsoft.com/en-us/windows-server/get-started-19/servicing-channels-19) @@ -113,8 +145,8 @@ chose to upgrade their operating system for containers running on Kubernetes, we will offer guidance and step-by-step instructions when we add support for a new operating system version. This guidance will include recommended upgrade procedures for upgrading user applications together with cluster nodes. -Windows nodes adhere to Kubernetes [version-skew -policy](/docs/setup/release/version-skew-policy/) (node to control plane +Windows nodes adhere to Kubernetes +[version-skew policy](/docs/setup/release/version-skew-policy/) (node to control plane versioning) the same way as Linux nodes do today. --> 我们并不指望所有 Windows 客户都为其应用频繁地更新操作系统。 @@ -126,20 +158,59 @@ Windows 节点遵从 Kubernetes [版本偏差策略](/zh/docs/setup/release/version-skew-policy/)(节点到控制面的 版本控制),与 Linux 节点的现行策略相同。 -Windows Server 主机操作系统会受 [Windows Server](https://www.microsoft.com/en-us/cloud-platform/windows-server-pricing) + +Windows Server 主机操作系统会受 +[Windows Server](https://www.microsoft.com/en-us/cloud-platform/windows-server-pricing) 授权策略控制。Windows 容器镜像则遵从 [Windows 容器的补充授权条款](https://docs.microsoft.com/en-us/virtualization/windowscontainers/images-eula) 约定。 + 带进程隔离的 Windows 容器受一些严格的兼容性规则约束, [其中宿主 OS 版本必须与容器基准镜像的 OS 版本相同](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility)。 一旦我们在 Kubernetes 中支持带 Hyper-V 隔离的 Windows 容器, 这一约束和兼容性规则也会发生改变。 + +#### Pause 镜像 {#pause-image} + +Kubernetes 维护着一个多体系结构镜像,其中包括对 Windows 的支持。 +对于 Kubernetes v1.22,推荐的 pause 镜像是 `k8s.gcr.io/pause:3.5`。 +[源代码](https://github.com/kubernetes/kubernetes/tree/master/build/pause)可在 GitHub 上找到。 + +Microsoft 维护了一个支持 Linux 和 Windows amd64 的多体系结构镜像: `mcr.microsoft.com/oss/kubernetes/pause:3.5`。 +此镜像与 Kubernetes 维护的镜像是从同一来源构建,但所有 Windows 二进制文件 +均由 Microsoft [签名](https://docs.microsoft.com/en-us/windows-hardware/drivers/install/authenticode)。 +当生产环境需要被签名的二进制文件时,建议使用 Microsoft 维护的镜像。 + #### 计算 {#compute} @@ -192,7 +263,8 @@ to Windows. * [控制器(Controllers)](/zh/docs/concepts/workloads/controllers/) Kubernetes 控制器处理 Pod 的期望状态。Windows 容器支持以下负载控制器: @@ -207,7 +279,10 @@ to Windows. * [服务(Services)](/zh/docs/concepts/services-networking/service/) Kubernetes Service 是一种抽象对象,用来定义 Pod 的一个逻辑集合及用来访问这些 Pod 的策略。Service 有时也称作微服务(Micro-service)。你可以使用服务来实现 @@ -221,7 +296,10 @@ to Windows. * 无头(Headless)服务 Docker EE-basic 19.03+ 是建议所有 Windows Server 版本采用的容器运行时。 该容器运行时能够与 kubelet 中的 dockershim 代码协同工作。 ##### CRI-ContainerD -{{< feature-state for_k8s_version="v1.19" state="beta" >}} +{{< feature-state for_k8s_version="v1.20" state="stable" >}} -{{< caution >}} -在 ContainerD 上使用 GMSA 访问 Windows 网络共享资源时,有一个 -[已知的局限](/zh/docs/tasks/configure-pod-container/configure-gmsa/#gmsa-limitations), -需要内核补丁来解决。 -你可以在关注 [Microsoft Windows Containers 问题跟踪](https://github.com/microsoft/Windows-Containers/issues/44) -来跟进相关的更新。 -{{< /caution >}} - - -{{< glossary_tooltip term_id="containerd" text="ContainerD" >}} 1.4.0-beta.2+ -也可在 Windows Kubernetes 节点上用作容器运行时。 - - -在 Windows 对 ContainerD 的最初支持是在 Kubernetes v1.18 加入的。 -Windows 上 ContainerD 的进展可以在 -[enhancements#1001](https://github.com/kubernetes/enhancements/issues/1001) -跟进。 - -你可以进一步了解如何[在 Windows 上安装 ContainerD](/zh/docs/setup/production-environment/container-runtimes/#install-containerd). +{{< glossary_tooltip term_id="containerd" text="ContainerD" >}} 1.4.0+ +也可作为 Windows Kubernetes 节点上的容器运行时。 #### 持久性存储 {#persistent-storage} @@ -310,7 +375,13 @@ Windows 支持以下大类的 Kubernetes 卷插件: ##### 树内卷插件 {#in-tree-volume-plugins} @@ -329,11 +400,20 @@ Code associated with in-tree volume plugins ship as part of the core Kubernetes ##### FlexVolume 插件 {#flexvolume-plugins} -与 [FlexVolume](/docs/concepts/storage/volumes/#flexVolume) 插件相关的代码是作为 +与 [FlexVolume](/zh/docs/concepts/storage/volumes/#flexVolume) 插件相关的代码是作为 树外(Out-of-tree)脚本或可执行文件来发布的,因此需要在宿主系统上直接部署。 FlexVolume 插件处理将卷挂接到 Kubernetes 节点或从其上解挂、将卷挂载到 Pod 中 各个容器上或从其上卸载等操作。对于与 FlexVolume 插件相关联的持久卷的配备和 @@ -350,10 +430,17 @@ FlexVolume 插件处理将卷挂接到 Kubernetes 节点或从其上解挂、将 --> ##### CSI 插件 {#csi-plugins} -{{< feature-state for_k8s_version="v1.16" state="alpha" >}} +{{< feature-state for_k8s_version="v1.22" state="stable" >}} 与 {{< glossary_tooltip text="CSI" term_id="csi" >}} 插件相关联的代码作为 树外脚本和可执行文件来发布且通常发布为容器镜像形式,并使用 DaemonSet 和 @@ -361,24 +448,38 @@ StatefulSet 这类标准的 Kubernetes 构造体来部署。 CSI 插件处理 Kubernetes 中的很多卷管理操作:对卷的配备、去配和调整大小, 将卷挂接到 Kubernetes 节点或从节点上解除挂接,将卷挂载到需要持久数据的 Pod 中的某容器或从容器上卸载,使用快照和克隆来备份或恢复持久数据。 -CSI 插件通常包含节点插件(以 DaemonSet 形式运行于各节点上)和控制器插件。 -CSI 节点插件(尤其是那些通过块设备或者共享文件系统形式来提供持久卷的插件) -需要执行很多特权级操作,例如扫描磁盘设备、挂载文件系统等等。 -这些操作在不同的宿主操作系统上差别较大。对于 Linux 工作节点而言,容器化的 -CSI 节点插件通常部署为特权级的容器。对于 Windows 工作节点而言,容器化的 -CSI 节点插件的特权操作通过 -[csi-proxy](https://github.com/kubernetes-csi/csi-proxy) 来支持;csi-proxy 是一个社区管理的、独立的可执行文件,需要预安装在每个 Windows 节点之上。请参考你要部署的 CSI 插件的部署指南以进一步了解其细节。 +CSI 插件与执行本地存储操作的 CSI 节点插件通信。 +在 Windows 节点上,CSI 节点插件通常调用处理本地存储操作的 [csi-proxy](https://github.com/kubernetes-csi/csi-proxy) +公开的 API, csi-proxy 由社区管理。 + +有关安装的更多详细信息,请参阅你要部署的 Windows CSI 插件的环境部署指南。 +你也可以参考以下[安装步骤](https://github.com/kubernetes-csi/csi-proxy#installation) 。 + #### 联网 {#networking} @@ -412,7 +513,12 @@ The following service spec types are supported: ##### 网络模式 {#network-modes} @@ -421,26 +527,193 @@ Windows 支持五种不同的网络驱动/模式:二层桥接(L2bridge)、 在一个包含 Windows 和 Linux 工作节点的异构集群中,你需要选择一种对 Windows 和 Linux 兼容的联网方案。下面是 Windows 上支持的一些树外插件及何时使用某种 CNI 插件的建议: - -| 网络驱动 | 描述 | 容器报文修改 | 网络插件 | 网络插件特点 | -| ----------- | ---------- | -------------- | ---------- | ---------------| -| L2bridge | 容器挂接到外部 vSwitch 上。容器挂接到下层网络之上,但由于容器的 MAC 地址在入站和出站时被重写,物理网络不需要这些地址。 | MAC 地址被重写为宿主系统的 MAC 地址,IP 地址也可能依据 HNS OutboundNAT 策略重写为宿主的 IP 地址。 | [win-bridge](https://github.com/containernetworking/plugins/tree/master/plugins/main/windows/win-bridge)、[Azure-CNI](https://github.com/Azure/azure-container-networking/blob/master/docs/cni.md);Flannel 宿主网关(host-gateway)使用 win-bridge | win-bridge 使用二层桥接(L2bridge)网络模式,将容器连接到下层宿主系统上,从而提供最佳性能。需要用户定义的路由(User-Defined Routes,UDR)才能实现节点间的连接。 | -| L2Tunnel | 这是二层桥接的一种特殊情形,但仅被用于 Azure 上。所有报文都被发送到虚拟化环境中的宿主机上并根据 SDN 策略进行处理。 | MAC 地址被改写,IP 地址在下层网络上可见。 | [Azure-CNI](https://github.com/Azure/azure-container-networking/blob/master/docs/cni.md) | Azure-CNI 使得容器能够与 Azure vNET 集成,并允许容器利用 [Azure 虚拟网络](https://azure.microsoft.com/en-us/services/virtual-network/)所提供的功能特性集合。例如,可以安全地连接到 Azure 服务上或者使用 Azure NSG。你可以参考 [azure-cni](https://docs.microsoft.com/en-us/azure/aks/concepts-network#azure-cni-advanced-networking) 所提供的一些示例。 | -| 覆盖网络(Kubernetes 中为 Windows 提供的覆盖网络支持处于 *alpha* 阶段) | 每个容器会获得一个连接到外部 vSwitch 的虚拟网卡(vNIC)。每个覆盖网络都有自己的、通过定制 IP 前缀来定义的 IP 子网。覆盖网络驱动使用 VXLAN 封装。 | 封装于外层包头内。 | [Win-overlay](https://github.com/containernetworking/plugins/tree/master/plugins/main/windows/win-overlay)、Flannel VXLAN(使用 win-overlay) | 当(比如出于安全原因)期望虚拟容器网络与下层宿主网络隔离时,应该使用 win-overlay。如果你的数据中心可用 IP 地址受限,覆盖网络允许你在不同的网络中复用 IP 地址(每个覆盖网络有不同的 VNID 标签)。这一选项要求在 Windows Server 2009 上安装 [KB4489899](https://support.microsoft.com/help/4489899) 补丁。 | -| 透明网络([ovn-kubernetes](https://github.com/openvswitch/ovn-kubernetes) 的特殊用例) | 需要一个外部 vSwitch。容器挂接到某外部 vSwitch 上,该 vSwitch 通过逻辑网络(逻辑交换机和路由器)允许 Pod 间通信。 | 报文或者通过 [GENEVE](https://datatracker.ietf.org/doc/draft-gross-geneve/) 来封装,或者通过 [STT](https://datatracker.ietf.org/doc/draft-davie-stt/) 隧道来封装,以便能够到达不在同一宿主系统上的每个 Pod。
    报文通过 OVN 网络控制器所提供的隧道元数据信息来判定是转发还是丢弃。
    北-南向通信通过 NAT 网络地址转译来实现。 | [ovn-kubernetes](https://github.com/openvswitch/ovn-kubernetes) | [通过 Ansible 来部署](https://github.com/openvswitch/ovn-kubernetes/tree/master/contrib)。所发布的 ACL 可以通过 Kubernetes 策略来应用实施。支持 IPAM 。负载均衡能力不依赖 kube-proxy。网络地址转译(NAT)也不需要 iptables 或 netsh。 | -| NAT(*未在 Kubernetes 中使用*) | 容器获得一个连接到某内部 vSwitch 的 vNIC 接口。DNS/DHCP 服务通过名为 [WinNAT](https://blogs.technet.microsoft.com/virtualization/2016/05/25/windows-nat-winnat-capabilities-and-limitations/) 的内部组件来提供。 | MAC 地址和 IP 地址都被重写为宿主系统的 MAC 地址和 IP 地址。| [nat](https://github.com/Microsoft/windows-container-networking/tree/master/plugins/nat) | 列在此表中仅出于完整性考虑 | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    网络驱动描述容器报文更改网络插件网络插件特点
    L2bridge + 容器挂接到外部 vSwitch 上。容器挂接到下层网络之上,但由于容器的 MAC + 地址在入站和出站时被重写,物理网络不需要这些地址。 + + + MAC 地址被重写为宿主系统的 MAC 地址,IP 地址也可能依据 HNS OutboundNAT + 策略重写为宿主的 IP 地址。 + + win-bridge、 + Azure-CNI、 + + Flannel 宿主网关(host-gateway)使用 win-bridge + + + win-bridge 使用二层桥接(L2bridge)网络模式,将容器连接到下层宿主系统上, + 从而提供最佳性能。需要用户定义的路由(User-Defined Routes,UDR)才能 + 实现节点间的连接。 +
    L2Tunnel + + 这是二层桥接的一种特殊情形,但仅被用于 Azure 上。 + 所有报文都被发送到虚拟化环境中的宿主机上并根据 SDN 策略进行处理。 + + + MAC 地址被改写,IP 地址在下层网络上可见。 + + Azure-CNI + + + Azure-CNI 使得容器能够与 Azure vNET 集成,并允许容器利用 + [Azure 虚拟网络](https://azure.microsoft.com/en-us/services/virtual-network/) + 所提供的功能特性集合。例如,可以安全地连接到 Azure 服务上或者使用 Azure NSG。 + 你可以参考 + [azure-cni](https://docs.microsoft.com/en-us/azure/aks/concepts-network#azure-cni-advanced-networking) + 所提供的一些示例。 +
    覆盖网络(Kubernetes 中为 Windows 提供的覆盖网络支持处于 *alpha* 阶段) + + 每个容器会获得一个连接到外部 vSwitch 的虚拟网卡(vNIC)。 + 每个覆盖网络都有自己的、通过定制 IP 前缀来定义的 IP 子网。 + 覆盖网络驱动使用 VxLAN 封装。 + + + 封装于外层包头内。 + + Win-overlay、 + Flannel VXLAN(使用 win-overlay) + + + 当(比如出于安全原因)期望虚拟容器网络与下层宿主网络隔离时, + 应该使用 win-overlay。如果你的数据中心可用 IP 地址受限, + 覆盖网络允许你在不同的网络中复用 IP 地址(每个覆盖网络有不同的 VNID 标签)。 + 这一选项要求在 Windows Server 2009 上安装 + [KB4489899](https://support.microsoft.com/help/4489899) 补丁。 +
    + + 透明网络([ovn-kubernetes](https://github.com/openvswitch/ovn-kubernetes) 的特殊用例) + + + 需要一个外部 vSwitch。容器挂接到某外部 vSwitch 上,该 vSwitch + 通过逻辑网络(逻辑交换机和路由器)允许 Pod 间通信。 + + + 报文或者通过 [GENEVE](https://datatracker.ietf.org/doc/draft-gross-geneve/) 来封装, + 或者通过 [STT](https://datatracker.ietf.org/doc/draft-davie-stt/) 隧道来封装, + 以便能够到达不在同一宿主系统上的每个 Pod。
    + 报文通过 OVN 网络控制器所提供的隧道元数据信息来判定是转发还是丢弃。
    + 北-南向通信通过 NAT 网络地址转译来实现。 +
    + ovn-kubernetes + + + [通过 Ansible 来部署](https://github.com/openvswitch/ovn-kubernetes/tree/master/contrib)。 + 所发布的 ACL 可以通过 Kubernetes 策略来应用实施。支持 IPAM 。 + 负载均衡能力不依赖 kube-proxy。 + 网络地址转译(NAT)也不需要 iptables 或 netsh。 +
    NAT(未在 Kubernetes 中使用 + + 容器获得一个连接到某内部 vSwitch 的 vNIC 接口。 + DNS/DHCP 服务通过名为 + [WinNAT](https://blogs.technet.microsoft.com/virtualization/2016/05/25/windows-nat-winnat-capabilities-and-limitations/) + 的内部组件来提供。 + + + MAC 地址和 IP 地址都被重写为宿主系统的 MAC 地址和 IP 地址。 + + nat + + + 列在此表中仅出于完整性考虑 +
    如前所述,[Flannel](https://github.com/coreos/flannel) CNI [meta 插件](https://github.com/containernetworking/plugins/tree/master/plugins/meta/flannel) @@ -458,7 +731,8 @@ As outlined above, the [Flannel](https://github.com/coreos/flannel) CNI [meta pl 并将包含节点所被分配的子网信息的正确配置发送给 IPAM 插件(例如 host-local)。 ##### 负载均衡与服务 {#load-balancing-and-services} 在 Windows 系统上,你可以使用以下配置来设定服务和负载均衡行为: +{{< table caption="Windows 服务设置" >}} - -{{< table caption="Windows 服务配置" >}} - -| 功能特性 | 描述 | 支持的 Kubernetes 版本 | 支持的 Windows OS 版本 | 如何启用 | -| -------- | --------| ---------------------- | ---------------------- | ---------- | -| 会话亲和性 | 确保来自特定客户的连接每次都被交给同一 Pod。 | v1.19+ | [Windows Server vNext Insider Preview Build 19551](https://blogs.windows.com/windowsexperience/2020/01/28/announcing-windows-server-vnext-insider-preview-build-19551/) 或更高版本 | 将 `service.spec.sessionAffinity` 设置为 "ClientIP" | -| 直接服务器返回 | 这是一种负载均衡模式,IP 地址的修正和负载均衡地址转译(LBNAT)直接在容器的 vSwitch 端口上处理;服务流量到达时,其源端 IP 地址设置为来源 Pod 的 IP。这种方案的延迟很低且可扩缩性好。 | v1.15+ | Windows Server 2004 版 | 为 kube-proxy 设置标志:`--feature-gates="WinDSR=true" --enable-dsr=true` | -| 保留目标地址 | 对服务流量略过 DNAT 步骤,这样就可以在到达后端 Pod 的报文中保留目标服务的虚拟 IP 地址。这一配置也会确保入站报文的客户端 IP 地址也被保留下来。 | v1.15+ | Windows Server 1903 或更高版本 | 在服务注解中设置 `"preserve-destination": "true"` 并启用 kube-proxy 中的 DSR 标志。 | -| IPv4/IPv6 双栈网络 | 在集群内外同时支持原生的 IPv4-到-IPv4 和 IPv6-到-IPv6 通信。 | v1.19+ | Windows Server vNext Insider Preview Build 19603 或更高版本 | 参见 [IPv4/IPv6 dual-stack](#ipv4ipv6-dual-stack) | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    功能特性描述所支持的 Kubernetes 版本所支持的 Windows OS 版本如何启用
    会话亲和性 + + 确保来自特定客户的连接每次都被交给同一 Pod。 + v1.20+ + + [Windows Server vNext Insider Preview Build 19551](https://blogs.windows.com/windowsexperience/2020/01/28/announcing-windows-server-vnext-insider-preview-build-19551/) + 或更高版本 + + + 将 service.spec.sessionAffinitys 设置为 "ClientIP" +
    直接服务器返回(DSR) + + 这是一种负载均衡模式,IP 地址的修正和负载均衡地址转译(LBNAT) + 直接在容器的 vSwitch 端口上处理;服务流量到达时,其源端 IP 地址 + 设置为来源 Pod 的 IP。 + v1.20+ + Windows Server 2019 + + + 为 kube-proxy 设置标志:`--feature-gates="WinDSR=true" --enable-dsr=true` +
    保留目标地址 + + 对服务流量略过 DNAT 步骤,这样就可以在到达后端 Pod 的报文中保留目标服务的 + 虚拟 IP 地址。还要禁止节点之间的转发。 + v1.20+Windows Server 1903 或更高版本 + + 在服务注解中设置 `"preserve-destination": "true"` 并启用 + kube-proxy 中的 DSR 标志。 +
    IPv4/IPv6 双栈网络 + + 在集群内外同时支持原生的 IPv4-到-IPv4 和 IPv6-到-IPv6 通信。 + v1.19+Windows Server 2004 或更高版本 + + 参见 [IPv4/IPv6 双栈网络](#ipv4ipv6-dual-stack) +
    保留客户端 IP + + 确保入站流量的源 IP 地址被保留。同样要禁止节点之间的转发。 + v1.20+Windows Server 2019 或更高版本 + + 将 service.spec.externalTrafficPolicy 设置为 "Local", + 并在 kube-proxy 上启用 DSR。 +
    {{< /table >}} #### IPv4/IPv6 双栈支持 {#ipv4ipv6-dual-stack} 你可以通过使用 `IPv6DualStack` [特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/) 来为 `l2bridge` 网络启用 IPv4/IPv6 双栈联网支持。 -进一步的细节可参见[启用 IPv4/IPv6 双协议栈](/zh/docs/concepts/services-networking/dual-stack#enable-ipv4ipv6-dual-stack)。 +进一步的细节可参见 +[启用 IPv4/IPv6 双协议栈](/zh/docs/concepts/services-networking/dual-stack#enable-ipv4ipv6-dual-stack)。 对 Windows 而言,在 Kubernetes 中使用 IPv6 需要 -Windows Server vNext Insider Preview Build 19603 或更高版本。 +Windows Server 2004 (内核版本 10.0.19041.610)或更高版本。 目前 Windows 上的覆盖网络(VXLAN)还不支持双协议栈联网。 ### 局限性 {#limitations} -#### 控制面 {#control-plane} - 在 Kubernetes 架构和节点阵列中仅支持将 Windows 作为工作节点使用。 这意味着 Kubernetes 集群必须总是包含 Linux 主控节点,零个或者多个 Linux 工作节点以及零个或者多个 Windows 工作节点。 -#### 计算 {#compute} - -##### 资源管理与进程隔离 {#resource-management-and-process-isolation} +#### 资源处理 {#resource-handling} Linux 上使用 Linux 控制组(CGroups)作为 Pod 的边界,以实现资源控制。 容器都创建于这一边界之内,从而实现网络、进程和文件系统的隔离。 @@ -587,16 +955,82 @@ Linux 上使用 Linux 控制组(CGroups)作为 Pod 的边界,以实现资 获得宿主系统上的任何身份标识。 -##### 操作系统限制 {#operating-system-restrictions} +#### 资源预留 {#resource-reservations} -Windows 有着严格的兼容性规则,宿主 OS 的版本必须与容器基准镜像 OS 的版本匹配。 -目前仅支持容器操作系统为 Windows Server 2019 的 Windows 容器。 -对于容器的 Hyper-V 隔离、允许一定程度上的 Windows 容器镜像版本向后兼容性等等, -都是将来版本计划的一部分。 +##### 内存预留 {#memory-reservations} + +Windows 不像 Linux 一样有一个内存耗尽(Out-of-memory)进程杀手(Process +Killer)机制。Windows 总是将用户态的内存分配视为虚拟请求,页面文件(Pagefile) +是必需的。这一差异的直接结果是 Windows 不会像 Linux 那样出现内存耗尽的状况, +系统会将进程内存页面写入磁盘而不会因内存耗尽而终止进程。 +当内存被过量使用且所有物理内存都被用光时,系统的换页行为会导致性能下降。 + + +使用 kubelet 参数 `--kubelet-reserve` 与/或 `-system-reserve` 可以统计 +节点上的内存用量(各容器之外),进而可能将内存用量限制在一个合理的范围,。 +这样做会减少节点可分配内存 +([NodeAllocatable](/zh/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable))。 + + +在你部署工作负载时,对容器使用资源限制(必须仅设置 limits 或者让 limits 等于 +requests 值)。这也会从 NodeAllocatable 中耗掉部分内存量,从而避免在节点 +负荷已满时调度器继续向节点添加 Pods。 + + +避免过量分配的最佳实践是为 kubelet 配置至少 2 GB 的系统预留内存,以供 +Windows、Docker 和 Kubernetes 进程使用。 + + +##### CPU 预留 {#cpu-reservations} + +为了统计 Windows、Docker 和其他 Kubernetes 宿主进程的 CPU 用量,建议 +预留一定比例的 CPU,以便对事件作出相应。此值需要根据 Windows 节点上 +CPU 核的个数来调整,要确定此百分比值,用户需要为其所有节点确定 Pod +密度的上线,并监控系统服务的 CPU 用量,从而选择一个符合其负载需求的值。 + + +使用 kubelet 参数 `--kubelet-reserve` 与/或 `-system-reserve` 可以统计 +节点上的 CPU 用量(各容器之外),进而可能将 CPU 用量限制在一个合理的范围,。 +这样做会减少节点可分配 CPU +([NodeAllocatable](/zh/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable))。 ##### 功能特性限制 {#feature-restrictions} @@ -621,48 +1058,36 @@ Windows 有着严格的兼容性规则,宿主 OS 的版本必须与容器基 * 并非支持共享名字空间的所有功能特性(参见 API 节以了解详细信息) -##### 内存预留与处理 {#memory-reservations-and-handling} +The behavior of the following kubelet flags is different on Windows nodes as described below: -Windows 不像 Linux 一样有一个内存耗尽(Out-of-memory)进程杀手(Process -Killer)机制。Windows 总是将用户态的内存分配视为虚拟请求,页面文件(Pagefile) -是必需的。这一差异的直接结果是 Windows 不会像 Linux 那样出现内存耗尽的状况, -系统会将进程内存页面写入磁盘而不会因内存耗尽而终止进程。 -当内存被过量使用且所有物理内存都被用光时,系统的换页行为会导致性能下降。 - - -通过一个两步的过程是有可能将内存用量限制在一个合理的范围的。 -首先,使用 kubelet 参数 `--kubelet-reserve` 与/或 `--system-reserve` -来划分节点上的内存用量(各容器之外)。 -这样做会减少节点可分配内存 -([NodeAllocatable](/zh/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable))。 -在你部署工作负载时,对容器使用资源限制(必须仅设置 limits 或者让 limits 等于 -requests 值)。这也会从 NodeAllocatable 中耗掉部分内存量,从而避免在节点 -负荷已满时调度器继续向节点添加 Pods。 - -避免过量分配的最佳实践是为 kubelet 配置至少 2 GB 的系统预留内存,以供 -Windows、Docker 和 Kubernetes 进程使用。 - - -参数的不同行为描述如下: +#### 与 Linux 相比参数行为的差别 -* `--kubelet-reserve`、`--system-reserve` 和 `--eviction-hard` 标志会更新节点可分配内存量 +以下 kubelet 参数的行为在 Windows 节点上有些不同,描述如下: + +* `--kubelet-reserve`、`--system-reserve` 和 `--eviction-hard` 标志 + 会更新节点可分配资源量 * 未实现通过使用 `--enforce-node-allocable` 来完成的 Pod 驱逐 * 未实现通过使用 `--eviction-hard` 和 `--eviction-soft` 来完成的 Pod 驱逐 * `MemoryPressure` 状况未实现 @@ -671,24 +1096,42 @@ The behavior of the flags behave differently as described below: `--kubelet-reserve` 和 `--system-reserve` 不会为 kubelet 或宿主系统上运行 的进程设限。这意味着 kubelet 或宿主系统上的进程可能导致内存资源紧张, 而这一情况既不受节点可分配量影响,也不会被调度器感知。 +* 在 Windows 节点上存在一个额外的参数用来设置 kubelet 进程的优先级,称作 + `--windows-priorityclass`。此参数允许 kubelet 进程获得与 Windows 宿主上 + 其他进程相比更多的 CPU 时间片。 + 关于可用参数值及其含义的进一步信息可参考 + [Windows Priority Classes](https://docs.microsoft.com/en-us/windows/win32/procthread/scheduling-priorities#priority-class)。 + 为了让 kubelet 总能够获得足够的 CPU 周期,建议将此参数设置为 + `ABOVE_NORMAL_PRIORITY_CLASS` 或更高。 #### 存储 {#storage} Windows 上包含一个分层的文件系统来挂载容器的分层,并会基于 NTFS 来创建一个 拷贝文件系统。容器中的所有文件路径都仅在该容器的上下文内完成解析。 -* 卷挂载仅可针对容器中的目录进行,不可针对独立的文件 -* 卷挂载无法将文件或目录投射回宿主文件系统 +* Docker 卷挂载仅可针对容器中的目录进行,不可针对独立的文件。 + 这一限制不适用于 CRI-containerD。 +* 卷挂载无法将文件或目录投射回宿主文件系统。 * 不支持只读文件系统,因为 Windows 注册表和 SAM 数据库总是需要写访问权限。 不过,Windows 支持只读的卷。 * 不支持卷的用户掩码和访问许可,因为宿主与容器之间并不共享 SAM,二者之间不存在 @@ -708,25 +1151,27 @@ As a result, the following storage functionality is not supported on Windows nod * NFS based storage/volume support * Expanding the mounted volume (resizefs) --> -因此,Windows 节点上不支持以下存储功能: +因此,Windows 节点上不支持以下存储功能特性: * 卷的子路径挂载;只能在 Windows 容器上挂载整个卷。 -* 为 Secret 执行子路径挂载 -* 宿主挂载投射 -* 默认访问模式(因为该特性依赖 UID/GID) -* 只读的根文件系统;映射的卷仍然支持 `readOnly` -* 块设备映射 -* 将内存作为存储介质 -* 类似 UUID/GUID、每用户不同的 Linux 文件系统访问许可等文件系统特性 -* 基于 NFS 的存储和卷支持 -* 扩充已挂载卷(resizefs) +* 为 Secret 执行子路径挂载; +* 宿主挂载投射; +* 默认访问模式 defaultMode(因为该特性依赖 UID/GID); +* 只读的根文件系统;映射的卷仍然支持 `readOnly`; +* 块设备映射; +* 将内存作为存储介质; +* 类似 UUID/GUID、每用户不同的 Linux 文件系统访问许可等文件系统特性; +* 基于 NFS 的存储和卷支持; +* 扩充已挂载卷(resizefs)。 -#### 联网 {#networking} +#### 联网 {#networking-limitations} Windows 容器联网与 Linux 联网有着非常重要的差别。 [Microsoft documentation for Windows Container Networking](https://docs.microsoft.com/en-us/virtualization/windowscontainers/container-networking/architecture) @@ -757,35 +1202,67 @@ Windows 为容器提供的注册表与宿主系统的注册表是分离的,因 The following networking functionality is not supported on Windows nodes * Host networking mode is not available for Windows pods -* Local NodePort access from the node itself fails (works for other nodes or external clients) -* Accessing service VIPs from nodes will be available with a future release of Windows Server -* Overlay networking support in kube-proxy is an alpha release. In addition, it requires [KB4482887](https://support.microsoft.com/en-us/help/4482887/windows-10-update-kb4482887) to be installed on Windows Server 2019 -* Local Traffic Policy and DSR mode -* Windows containers connected to l2bridge, l2tunnel, or overlay networks do not support communicating over the IPv6 stack. There is outstanding Windows platform work required to enable these network drivers to consume IPv6 addresses and subsequent Kubernetes work in kubelet, kube-proxy, and CNI plugins. -* Outbound communication using the ICMP protocol via the win-overlay, win-bridge, and Azure-CNI plugin. Specifically, the Windows data plane ([VFP](https://www.microsoft.com/en-us/research/project/azure-virtual-filtering-platform/)) doesn't support ICMP packet transpositions. This means: - * ICMP packets directed to destinations within the same network (e.g. pod to pod communication via ping) work as expected and without any limitations - * TCP/UDP packets work as expected and without any limitations - * ICMP packets directed to pass through a remote network (e.g. pod to external internet communication via ping) cannot be transposed and thus will not be routed back to their source - * Since TCP/UDP packets can still be transposed, one can substitute `ping ` with `curl ` to be able to debug connectivity to the outside world. + +* Local NodePort access from the node itself fails (works for other nodes or + external clients) + +* Accessing service VIPs from nodes will be available with a future release of + Windows Server + +* A single service can only support up to 64 backend pods / unique destination IPs + +* Overlay networking support in kube-proxy is a beta feature. In addition, it + requires + [KB4482887](https://support.microsoft.com/en-us/help/4482887/windows-10-update-kb4482887) + to be installed on Windows Server 2019 + +* Local Traffic Policy in non-DSR mode +* Windows containers connected to overlay networks do not support + communicating over the IPv6 stack. There is outstanding Windows platform + work required to enable this network driver to consume IPv6 addresses and + subsequent Kubernetes work in kubelet, kube-proxy, and CNI plugins. --> Windows 节点不支持以下联网功能: -* Windows Pod 不能使用宿主网络模式 +* Windows Pod 不能使用宿主网络模式; * 从节点本地访问 NodePort 会失败(但从其他节点或外部客户端可访问) -* Windows Server 的未来版本中会支持从节点访问服务的 VIPs -* kube-proxy 的覆盖网络支持是 Alpha 特性。此外,它要求在 Windows Server 2019 上安装 - [KB4482887](https://support.microsoft.com/en-us/help/4482887/windows-10-update-kb4482887) 补丁 -* 本地流量策略和 DSR(保留目标地址)模式 -* 连接到 l2bridge、l2tunnel 或覆盖网络的 Windows 容器不支持使用 IPv6 协议栈通信。 - 要使得这些网络驱动能够支持 IPv6 地址需要在 Windows 平台上开展大量的工作, +* Windows Server 的未来版本中会支持从节点访问服务的 VIP; +* 每个服务最多支持 64 个后端 Pod 或独立的目标 IP 地址; +* kube-proxy 的覆盖网络支持是 Beta 特性。此外,它要求在 Windows Server 2019 上安装 + [KB4482887](https://support.microsoft.com/en-us/help/4482887/windows-10-update-kb4482887) 补丁; +* 非 DSR(保留目标地址)模式下的本地流量策略; +* 连接到覆盖网络的 Windows 容器不支持使用 IPv6 协议栈通信。 + 要使得这一网络驱动支持 IPv6 地址需要在 Windows 平台上开展大量的工作, 还需要在 Kubernetes 侧修改 kubelet、kube-proxy 以及 CNI 插件。 + * 通过 win-overlay、win-bridge 和 Azure-CNI 插件使用 ICMP 协议向集群外通信。 - 尤其是,Windows 数据面([VFP](https://www.microsoft.com/en-us/research/project/azure-virtual-filtering-platform/)) + 尤其是,Windows 数据面 + ([VFP](https://www.microsoft.com/en-us/research/project/azure-virtual-filtering-platform/)) 不支持转换 ICMP 报文。这意味着: - * 指向同一网络内目标地址的 ICMP 报文(例如 Pod 之间的 ping 通信)是可以工作的,没有局限性 - * TCP/UDP 报文可以正常工作,没有局限性 + + * 指向同一网络内目标地址的 ICMP 报文(例如 Pod 之间的 ping 通信)是可以工作的, + 没有局限性; + * TCP/UDP 报文可以正常工作,没有局限性; * 指向远程网络的 ICMP 报文(例如,从 Pod 中 ping 外部互联网的通信)无法被转换, - 因此也无法被路由回到其源点。 + 因此也无法被路由回到其源点; * 由于 TCP/UDP 包仍可被转换,用户可以将 `ping <目标>` 操作替换为 `curl <目标>` 以便能够调试与外部世界的网络连接。 @@ -801,17 +1278,25 @@ Kubernetes v1.15 中添加了以下功能特性: ##### CNI 插件 {#cni-plugins} * Windows 参考网络插件 win-bridge 和 win-overlay 当前未实现 [CNI spec](https://github.com/containernetworking/cni/blob/master/SPEC.md) v0.4.0, - 原因是缺少检查用(CHECK)的实现。 + 原因是缺少检查(CHECK)用的实现。 + * Windows 上的 Flannel VXLAN CNI 有以下局限性: 1. 其设计上不支持从节点到 Pod 的连接。 @@ -824,12 +1309,25 @@ Kubernetes v1.15 中添加了以下功能特性: ##### DNS {#dns-limitations} * 不支持 DNS 的 ClusterFirstWithHostNet 配置。Windows 将所有包含 “.” 的名字 视为全限定域名(FQDN),因而不会对其执行部分限定域名(PQDN)解析。 + * 在 Linux 上,你可以有一个 DNS 后缀列表供解析部分限定域名时使用。 在 Windows 上,我们只有一个 DNS 后缀,即与该 Pod 名字空间相关联的 DNS 后缀(例如 `mydns.svc.cluster.local`)。 @@ -838,12 +1336,17 @@ Kubernetes v1.15 中添加了以下功能特性: `default.svc.cluster.local`。在 Windows Pod 中,你可以解析 `kubernetes.default.svc.cluster.local` 和 `kubernetes`,但无法解析二者 之间的形式,如 `kubernetes.default` 或 `kubernetes.default.svc`。 + * 在 Windows 上,可以使用的 DNS 解析程序有很多。由于这些解析程序彼此之间 会有轻微的行为差别,建议使用 `Resolve-DNSName` 工具来完成名字查询解析。 ##### IPv6 @@ -853,7 +1356,9 @@ Windows 上的 Kubernetes 不支持单协议栈的“只用 IPv6”联网选项 ##### 会话亲和性 {#session-affinity} @@ -863,10 +1368,12 @@ Windows 服务设置最大会话粘滞时间。 ##### 安全性 {#security} @@ -874,19 +1381,25 @@ Secret 以明文形式写入节点的卷中(而不是像 Linux 那样写入内 这意味着客户必须做以下两件事: 1. 使用文件访问控制列表来保护 Secret 文件所在的位置 -2. 使用 [BitLocker](https://docs.microsoft.com/en-us/windows/security/information-protection/bitlocker/bitlocker-how-to-deploy-on-windows-server) +1. 使用 [BitLocker](https://docs.microsoft.com/en-us/windows/security/information-protection/bitlocker/bitlocker-how-to-deploy-on-windows-server) 来执行卷层面的加密 -Windows 上目前不支持 [`RunAsUser`](/zh/docs/concepts/policy/pod-security-policy/#users-and-groups)。 -一种替代方案是在为容器打包时创建本地账号。 -将来的版本中可能会添加对 `RunAsUser` 的支持。 +用户可以为 Windows Pods 或 Container 设置 +[`RunAsUserName`](/zh/docs/tasks/configure-pod-container/configure-runasusername) +以便以非节点默认用户来执行容器中的进程。这大致等价于设置 +[`RunAsUser`](/zh/docs/concepts/policy/pod-security-policy/#users-and-groups)。 不支持特定于 Linux 的 Pod 安全上下文特权,例如 SELinux、AppArmor、Seccomp、 权能字(POSIX 权能字)等等。 @@ -896,7 +1409,11 @@ Windows 上目前不支持 [`RunAsUser`](/zh/docs/concepts/policy/pod-security-p @@ -910,29 +1427,57 @@ At a high level, these OS concepts are different: 在较高层面,不同的 OS 概念有: * 身份标识 - Linux 使用证书类型来表示用户 ID(UID)和组 ID(GID)。用户和组名 - 没有特定标准,它们仅是 `/etc/groups` 或 `/etc/passwd` 中的别名表项,会映射回 + 没有特定标准,它们是 `/etc/groups` 或 `/etc/passwd` 中的别名表项,会映射回 UID+GID。Windows 使用一个更大的二进制安全标识符(SID),保存在 Windows 安全访问管理器(Security Access Manager,SAM)数据库中。此数据库并不在宿主系统 与容器间,或者任意两个容器之间共享。 + * 文件许可 - Windows 使用基于 SID 的访问控制列表,而不是基于 UID+GID 的访问权限位掩码。 -* 文件路径 - Windows 上的习惯是使用 `\` 而非 `/`。Go 语言的 IO 库通常能够同时接受二者, - 并做出正确判断。不过当你在指定要在容器内解析的路径或命令行时,可能需要使用 `\`。 -* 信号 - Windows 交互式应用以不同方式来处理终止事件,并可实现以下方式之一或组合: - * UI 线程处理包含 WM_CLOSE 在内的良定的消息 - * 控制台应用使用控制处理程序来处理 Ctrl-C 或 Ctrl-Break - * 服务会注册服务控制处理程序,接受 SERVICE_CONTROL_STOP 控制代码 +* 文件路径 - Windows 上的习惯是使用 `\` 而非 `/`。Go 语言的 IO + 库同时接受这两种文件路径分隔符。不过,当你在指定要在容器内解析的路径或命令行时, + 可能需要使用 `\`。 + +* 信号(Signal) - Windows 交互式应用以不同方式来处理终止事件,并可实现以下方式之一或组合: + + * UI 线程处理包含 `WM_CLOSE` 在内的良定的消息 + + * 控制台应用使用控制处理程序来处理 Ctrl-C 或 Ctrl-Break + + * 服务会注册服务控制处理程序,接受 `SERVICE_CONTROL_STOP` 控制代码 + + 退出代码遵从相同的习惯,0 表示成功,非 0 值表示失败。 特定的错误代码在 Windows 和 Linux 上可能会不同。不过,从 Kubernetes 组件 @@ -941,23 +1486,21 @@ Exit Codes follow the same convention where 0 is success, nonzero is failure. Th -##### V1.Container +* V1.Container.ResourceRequirements.limits.cpu and + V1.Container.ResourceRequirements.limits.memory - Windows doesn't use hard + limits for CPU allocations. Instead, a share system is used. The existing + fields based on millicores are scaled into relative shares that are followed + by the Windows scheduler. + See [kuberuntime/helpers_windows.go](https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/kuberuntime/helpers_windows.go), + and [resource controls in Microsoft docs](https://docs.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/resource-controls) -* `v1.Container.ResourceRequirements.limits.cpu` 和 `v1.Container.ResourceRequirements.limits.memory` - Windows + * Huge pages are not implemented in the Windows container runtime, and are + not available. They require + [asserting a user privilege](https://docs.microsoft.com/en-us/windows/desktop/Memory/large-page-support) + that's not configurable for containers. +--> +* `v1.Container.ResourceRequirements.limits.cpu` 和 + `v1.Container.ResourceRequirements.limits.memory` - Windows 不对 CPU 分配设置硬性的限制。与之相反,Windows 使用一个份额(share)系统。 基于毫核(millicores)的现有字段值会被缩放为相对的份额值,供 Windows 调度器使用。 参见 [kuberuntime/helpers_windows.go](https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/kuberuntime/helpers_windows.go) 和 @@ -967,22 +1510,75 @@ Exit Codes follow the same convention where 0 is success, nonzero is failure. Th 巨页支持需要[判定用户的特权](https://docs.microsoft.com/en-us/windows/desktop/Memory/large-page-support) 而这一特性无法在容器级别配置。 -* `v1.Container.ResourceRequirements.requests.cpu` 和 `v1.Container.ResourceRequirements.requests.memory` - 请求 + +* `v1.Container.ResourceRequirements.requests.cpu` 和 + `v1.Container.ResourceRequirements.requests.memory` - 请求 值会从节点可分配资源中扣除,从而可用来避免节点上的资源过量分配。 但是,它们无法用来在一个已经过量分配的节点上提供资源保障。 如果操作员希望彻底避免过量分配,作为最佳实践,他们就需要为所有容器设置资源请求值。 -* `v1.Container.SecurityContext.allowPrivilegeEscalation` - 在 Windows 上无法实现,对应的权能 - 无一可在 Windows 上生效。 + +* `v1.Container.SecurityContext.allowPrivilegeEscalation` - 在 Windows + 上无法实现,对应的权能无一可在 Windows 上生效。 + + * `v1.Container.SecurityContext.Capabilities` - Windows 上未实现 POSIX 权能机制 * `v1.Container.SecurityContext.privileged` - Windows 不支持特权容器 * `v1.Container.SecurityContext.procMount` - Windows 不包含 `/proc` 文件系统 * `v1.Container.SecurityContext.readOnlyRootFilesystem` - 在 Windows 上无法实现, 要在容器内使用注册表或运行系统进程就必需写访问权限。 + + * `v1.Container.SecurityContext.runAsGroup` - 在 Windows 上无法实现,没有 GID 支持 + * `v1.Container.SecurityContext.runAsNonRoot` - Windows 上没有 root 用户。 与之最接近的等价用户是 `ContainerAdministrator`,而该身份标识在节点上并不存在。 -* `v1.Container.SecurityContext.runAsUser` - 在 Windows 上无法实现,因为没有作为整数支持的 GID。 -* `v1.Container.SecurityContext.seLinuxOptions` - 在 Windows 上无法实现,因为没有 SELinux + +* `v1.Container.SecurityContext.runAsUser` - 在 Windows 上无法实现, + 因为没有作为整数支持的 GID。 + +* `v1.Container.SecurityContext.seLinuxOptions` - 在 Windows 上无法实现, + 因为没有 SELinux + * `V1.Container.terminationMessagePath` - 因为 Windows 不支持单个文件的映射,这一功能 在 Windows 上也受限。默认值 `/dev/termination-log` 在 Windows 上也无法使用因为 对应路径在 Windows 上不存在。 @@ -991,15 +1587,18 @@ Exit Codes follow the same convention where 0 is success, nonzero is failure. Th ##### V1.Pod * V1.Pod.hostIPC, v1.pod.hostpid - host namespace sharing is not possible on Windows + * V1.Pod.hostNetwork - There is no Windows OS support to share the host network -* V1.Pod.dnsPolicy - ClusterFirstWithHostNet - is not supported because Host Networking is not supported on Windows. + +* V1.Pod.dnsPolicy - ClusterFirstWithHostNet - is not supported because Host + Networking is not supported on Windows. + * V1.Pod.podSecurityContext - see V1.PodSecurityContext below -* V1.Pod.shareProcessNamespace - this is a beta feature, and depends on Linux namespaces which are not implemented on Windows. Windows cannot share process namespaces or the container's root filesystem. Only the network can be shared. -* V1.Pod.terminationGracePeriodSeconds - this is not fully implemented in Docker on Windows, see: [reference](https://github.com/moby/moby/issues/25982). The behavior today is that the ENTRYPOINT process is sent CTRL_SHUTDOWN_EVENT, then Windows waits 5 seconds by default, and finally shuts down all processes using the normal Windows shutdown behavior. The 5 second default is actually in the Windows registry [inside the container](https://github.com/moby/moby/issues/25982#issuecomment-426441183), so it can be overridden when the container is built. -* V1.Pod.volumeDevices - this is a beta feature, and is not implemented on Windows. Windows cannot attach raw block devices to pods. -* V1.Pod.volumes - EmptyDir, Secret, ConfigMap, HostPath - all work and have tests in TestGrid - * V1.emptyDirVolumeSource - the Node default medium is disk on Windows. Memory is not supported, as Windows does not have a built-in RAM disk. -* V1.VolumeMount.mountPropagation - mount propagation is not supported on Windows. + +* V1.Pod.shareProcessNamespace - this is a beta feature, and depends on Linux + namespaces which are not implemented on Windows. Windows cannot share + process namespaces or the container's root filesystem. Only the network can be + shared. --> ##### V1.Pod @@ -1007,51 +1606,117 @@ Exit Codes follow the same convention where 0 is success, nonzero is failure. Th * `v1.Pod.hostNetwork` - Windows 操作系统不支持共享宿主网络 * `v1.Pod.dnsPolicy` - 不支持 `ClusterFirstWithHostNet`,因为 Windows 不支持宿主网络 * `v1.Pod.podSecurityContext` - 参见下面的 `v1.PodSecurityContext` -* `v1.Pod.shareProcessNamespace` - 此为 Beta 特性且依赖于 Windows 上未实现的 Linux - 名字空间。Windows 无法共享进程名字空间或者容器的根文件系统。只能共享网络。 +* `v1.Pod.shareProcessNamespace` - 此为 Beta 特性且依赖于 Windows 上未实现 + 的 Linux 名字空间。 + Windows 无法共享进程名字空间或者容器的根文件系统。只能共享网络。 + + * `v1.Pod.terminationGracePeriodSeconds` - 这一特性未在 Windows 版本的 Docker 中完全实现。 参见[问题报告](https://github.com/moby/moby/issues/25982)。 - 目前实现的行为是向 ENTRYPOINT 进程发送 CTRL_SHUTDOWN_EVENT 时间,之后 Windows 默认 + 目前实现的行为是向 `ENTRYPOINT` 进程发送 `CTRL_SHUTDOWN_EVENT` 事件,之后 Windows 默认 等待 5 秒钟,并最终使用正常的 Windows 关机行为关闭所有进程。 - 这里的 5 秒钟默认值实际上保存在[容器内](https://github.com/moby/moby/issues/25982#issuecomment-426441183) + 这里的 5 秒钟默认值实际上保存在 + [容器内](https://github.com/moby/moby/issues/25982#issuecomment-426441183) 的 Windows 注册表中,因此可以在构造容器时重载。 + * `v1.Pod.volumeDevices` - 此为 Beta 特性且未在 Windows 上实现。Windows 无法挂接 原生的块设备到 Pod 中。 -* `v1.Pod.volumes` - `emptyDir`、`secret`、`configMap` 和 `hostPath` 都可正常工作且在 - TestGrid 中测试。 - * `v1.emptyDir.volumeSource` - Windows 上节点的默认介质是磁盘。不支持将内存作为介质, - 因为 Windows 不支持内置的 RAM 磁盘。 + + +* `v1.Pod.volumes` - `emptyDir`、`secret`、`configMap` 和 `hostPath` + 都可正常工作且在 TestGrid 中测试。 + + * `v1.emptyDir.volumeSource` - Windows 上节点的默认介质是磁盘。 + 不支持将内存作为介质,因为 Windows 不支持内置的 RAM 磁盘。 + * `v1.VolumeMount.mountPropagation` - Windows 上不支持挂载传播。 ##### V1.PodSecurityContext PodSecurityContext 的所有选项在 Windows 上都无法工作。这些选项列在下面仅供参考。 * `v1.PodSecurityContext.seLinuxOptions` - Windows 上无 SELinux + * `v1.PodSecurityContext.runAsUser` - 提供 UID;Windows 不支持 + * `v1.PodSecurityContext.runAsGroup` - 提供 GID;Windows 不支持 + * `v1.PodSecurityContext.runAsNonRoot` - Windows 上没有 root 用户 最接近的等价账号是 `ContainerAdministrator`,而该身份标识在节点上不存在 + * `v1.PodSecurityContext.supplementalGroups` - 提供 GID;Windows 不支持 + * `v1.PodSecurityContext.sysctls` - 这些是 Linux sysctl 接口的一部分;Windows 上 没有等价机制。 + +#### 操作系统版本限制 {#operating-system-version-restrictions} + +Windows 有着严格的兼容性规则,宿主 OS 的版本必须与容器基准镜像 OS 的版本匹配。 +目前仅支持容器操作系统为 Windows Server 2019 的 Windows 容器。 +对于容器的 Hyper-V 隔离、允许一定程度上的 Windows 容器镜像版本向后兼容性等等, +都是将来版本计划的一部分。 + ## 获取帮助和故障排查 {#troubleshooting} @@ -1059,453 +1724,528 @@ Your main source of help for troubleshooting your Kubernetes cluster should star [这份文档](/docs/tasks/debug-application-cluster/troubleshooting/)。 该文档中包含了一些额外的、特定于 Windows 系统的故障排查帮助信息。 Kubernetes 中日志是故障排查的一个重要元素。确保你在尝试从其他贡献者那里获得 -故障排查帮助时提供日志信息。 -你可以按照 SIG-Windows [贡献指南和收集日志](https://github.com/kubernetes/community/blob/master/sig-windows/CONTRIBUTING.md#gathering-logs) +故障排查帮助时提供日志信息。你可以按照 SIG-Windows +[贡献指南和收集日志](https://github.com/kubernetes/community/blob/master/sig-windows/CONTRIBUTING.md#gathering-logs) 所给的指令来操作。 -1. 我怎样知道 `start.ps1` 是否已成功完成? +* 我怎样知道 `start.ps1` 是否已成功完成? - 你应该能看到节点上运行的 kubelet、kube-proxy 和(如果你选择 Flannel - 作为联网方案)flanneld 宿主代理进程,它们的运行日志显示在不同的 - PowerShell 窗口中。此外,你的 Windows 节点应该在你的 Kubernetes 集群 - 列举为 "Ready" 节点。 + 你应该能看到节点上运行的 kubelet、kube-proxy 和(如果你选择 Flannel + 作为联网方案)flanneld 宿主代理进程,它们的运行日志显示在不同的 + PowerShell 窗口中。此外,你的 Windows 节点应该在你的 Kubernetes 集群 + 列举为 "Ready" 节点。 -2. 我可以将 Kubernetes 节点进程配置为服务运行在后台么? +* 我可以将 Kubernetes 节点进程配置为服务运行在后台么? - kubelet 和 kube-proxy 都已经被配置为以本地 Windows 服务运行, - 并且在出现失效事件(例如进程意外结束)时通过自动重启服务来提供一定的弹性。 - 你有两种办法将这些节点组件配置为服务。 + kubelet 和 kube-proxy 都已经被配置为以本地 Windows 服务运行, + 并且在出现失效事件(例如进程意外结束)时通过自动重启服务来提供一定的弹性。 + 你有两种办法将这些节点组件配置为服务。 - - 1. 以本地 Windows 服务的形式 - - Kubelet 和 kube-proxy 可以用 `sc.exe` 以本地 Windows 服务的形式运行: - - ```powershell - # 用两个单独的命令为 kubelet 和 kube-proxy 创建服务 - sc.exe create <组件名称> binPath= "<可执行文件路径> -service <其它参数>" - - # 请注意如果参数中包含空格,必须使用转义 - sc.exe create kubelet binPath= "C:\kubelet.exe --service --hostname-override 'minion' <其它参数>" - - # 启动服务 - Start-Service kubelet - Start-Service kube-proxy - - # 停止服务 - Stop-Service kubelet (-Force) - Stop-Service kube-proxy (-Force) - - # 查询服务状态 - Get-Service kubelet - Get-Service kube-proxy - ``` - - - 2. 使用 nssm.exe - - 你也总是可以使用替代的服务管理器,例如[nssm.exe](https://nssm.cc/),来为你在后台运行 - 这些进程(`flanneld`、`kubelet` 和 `kube-proxy`)。你可以使用这一 - [示例脚本](https://github.com/Microsoft/SDN/tree/master/Kubernetes/flannel/register-svc.ps1), - 利用 `nssm.exe` 将 `kubelet`、`kube-proxy` 和 `flanneld.exe` 注册为要在后台运行的 - Windows 服务。 - - ```powershell - register-svc.ps1 -NetworkMode <网络模式> -ManagementIP -ClusterCIDR <集群子网> -KubeDnsServiceIP -LogDir <日志目录> - - # NetworkMode = 网络模式 l2bridge(flannel host-gw,也是默认值)或 overlay(flannel vxlan)选做网络方案 - # ManagementIP = 分配给 Windows 节点的 IP 地址。你可以使用 ipconfig 得到此值 - # ClusterCIDR = 集群子网范围(默认值为 10.244.0.0/16) - # KubeDnsServiceIP = Kubernetes DNS 服务 IP(默认值为 10.96.0.10) - # LogDir = kubelet 和 kube-proxy 的日志会被重定向到这一目录中的对应输出文件,默认值为 `C:\k`。 - ``` - - 若以上所引用的脚本不适合,你可以使用下面的例子手动配置 `nssm.exe`。 - - ```powershell - # 注册 flanneld.exe - nssm install flanneld C:\flannel\flanneld.exe - nssm set flanneld AppParameters --kubeconfig-file=c:\k\config --iface= --ip-masq=1 --kube-subnet-mgr=1 - nssm set flanneld AppEnvironmentExtra NODE_NAME=<主机名> - nssm set flanneld AppDirectory C:\flannel - nssm start flanneld - - # 注册 kubelet.exe - # Microsoft 在 mcr.microsoft.com/k8s/core/pause:1.2.0 发布其 pause 基础设施容器 - nssm install kubelet C:\k\kubelet.exe - nssm set kubelet AppParameters --hostname-override= --v=6 --pod-infra-container-image=mcr.microsoft.com/k8s/core/pause:1.2.0 --resolv-conf="" --allow-privileged=true --enable-debugging-handlers --cluster-dns= --cluster-domain=cluster.local --kubeconfig=c:\k\config --hairpin-mode=promiscuous-bridge --image-pull-progress-deadline=20m --cgroups-per-qos=false --log-dir= --logtostderr=false --enforce-node-allocatable="" --network-plugin=cni --cni-bin-dir=c:\k\cni --cni-conf-dir=c:\k\cni\config - nssm set kubelet AppDirectory C:\k - nssm start kubelet - - # 注册 kube-proxy.exe (l2bridge / host-gw) - nssm install kube-proxy C:\k\kube-proxy.exe - nssm set kube-proxy AppDirectory c:\k - nssm set kube-proxy AppParameters --v=4 --proxy-mode=kernelspace --hostname-override=<主机名>--kubeconfig=c:\k\config --enable-dsr=false --log-dir=<日志目录> --logtostderr=false - nssm.exe set kube-proxy AppEnvironmentExtra KUBE_NETWORK=cbr0 - nssm set kube-proxy DependOnService kubelet - nssm start kube-proxy - - # 注册 kube-proxy.exe (overlay / vxlan) - nssm install kube-proxy C:\k\kube-proxy.exe - nssm set kube-proxy AppDirectory c:\k - nssm set kube-proxy AppParameters --v=4 --proxy-mode=kernelspace --feature-gates="WinOverlay=true" --hostname-override=<主机名> --kubeconfig=c:\k\config --network-name=vxlan0 --source-vip=<源端 VIP> --enable-dsr=false --log-dir=<日志目录> --logtostderr=false - nssm set kube-proxy DependOnService kubelet - nssm start kube-proxy - ``` - - - 作为初始的故障排查操作,你可以使用在 [nssm.exe](https://nssm.cc/) 中使用下面的标志 - 以便将标准输出和标准错误输出重定向到一个输出文件: - - ```powershell - nssm set <服务名称> AppStdout C:\k\mysvc.log - nssm set <服务名称> AppStderr C:\k\mysvc.log - ``` - - 要了解更多的细节,可参见官方的 [nssm 用法](https://nssm.cc/usage)文档。 - - -3. 我的 Windows Pods 无发连接网络 - - 如果你在使用虚拟机,请确保 VM 网络适配器均已开启 MAC 侦听(Spoofing)。 - - -4. 我的 Windows Pods 无法 ping 外部资源 - - Windows Pods 目前没有为 ICMP 协议提供出站规则。不过 TCP/UDP 是支持的。 - 尝试与集群外资源连接时,可以将 `ping ` 命令替换为对应的 `curl ` 命令。 - - 如果你还遇到问题,很可能你在 - [cni.conf](https://github.com/Microsoft/SDN/blob/master/Kubernetes/flannel/l2bridge/cni/config/cni.conf) - 中的网络配置值得额外的注意。你总是可以编辑这一静态文件。 - 配置的更新会应用到所有新创建的 Kubernetes 资源上。 - - Kubernetes 网络的需求之一(参见[Kubernetes 模型](/zh/docs/concepts/cluster-administration/networking/)) - 是集群内部无需网络地址转译(NAT)即可实现通信。为了符合这一要求,对所有我们不希望出站时发生 NAT - 的通信都存在一个 [ExceptionList](https://github.com/Microsoft/SDN/blob/master/Kubernetes/flannel/l2bridge/cni/config/cni.conf#L20)。 - 然而这也意味着你需要将你要查询的外部 IP 从 ExceptionList 中移除。 - 只有这时,从你的 Windows Pod 发起的网络请求才会被正确地通过 SNAT 转换以接收到 - 来自外部世界的响应。 - 就此而言,你在 `cni.conf` 中的 `ExceptionList` 应该看起来像这样: - - ```conf - "ExceptionList": [ - "10.244.0.0/16", # 集群子网 - "10.96.0.0/12", # 服务子网 - "10.127.130.0/24" # 管理(主机)子网 - ] - ``` - - -5. 我的 Windows 节点无法访问 NodePort 服务 - - 从节点自身发起的本地 NodePort 请求会失败。这是一个已知的局限。 - NodePort 服务的访问从其他节点或者外部客户端都可正常进行。 - - -6. 容器的 vNICs 和 HNS 端点被删除了 - - 这一问题可能因为 `hostname-override` 参数未能传递给 - [kube-proxy](/docs/reference/command-line-tools-reference/kube-proxy/) 而导致。 - 解决这一问题时,用户需要按如下方式将主机名传递给 kube-proxy: + Kubelet & kube-proxy can be run as native Windows Services using `sc.exe`. ```powershell - C:\k\kube-proxy.exe --hostname-override=$(hostname) + # Create the services for kubelet and kube-proxy in two separate commands + sc.exe create binPath= " -service " + + # Please note that if the arguments contain spaces, they must be escaped. + sc.exe create kubelet binPath= "C:\kubelet.exe --service --hostname-override 'minion' " + + # Start the services + Start-Service kubelet + Start-Service kube-proxy + + # Stop the service + Stop-Service kubelet (-Force) + Stop-Service kube-proxy (-Force) + + # Query the service status + Get-Service kubelet + Get-Service kube-proxy ``` + --> + * 以本地 Windows 服务的形式 - -7. 使用 Flannel 时,我的节点在重新加入集群时遇到问题 - - 无论何时,当一个之前被删除的节点被重新添加到集群时,flannelD 都会将为节点分配 - 一个新的 Pod 子网。 - 用户需要将将下面路径中的老的 Pod 子网配置文件删除: - - ```powershell - Remove-Item C:\k\SourceVip.json - Remove-Item C:\k\SourceVipRequest.json - ``` - - -8. 在启动了 `start.ps1` 之后,flanneld 一直停滞在 "Waiting for the Network to be created" 状态 + # 用两个单独的命令为 kubelet 和 kube-proxy 创建服务 + sc.exe create <组件名称> binPath="<可执行文件路径> -service <其它参数>" - 关于这一[正在被分析的问题](https://github.com/coreos/flannel/issues/1066)有很多的报告; - 最可能的一种原因是关于何时设置 Flannel 网络的管理 IP 的时间问题。 - 一种解决办法是重新启动 `start.ps1` 或者按如下方式手动重启之: + # 请注意如果参数中包含空格,必须使用转义 + sc.exe create kubelet binPath= "C:\kubelet.exe --service --hostname-override 'minion' <其它参数>" - ```powershell - PS C:> [Environment]::SetEnvironmentVariable("NODE_NAME", "") - PS C:> C:\flannel\flanneld.exe --kubeconfig-file=c:\k\config --iface= --ip-masq=1 --kube-subnet-mgr=1 - ``` + # 启动服务 + Start-Service kubelet + Start-Service kube-proxy - -9. 我的 Windows Pods 无法启动,因为缺少 `/run/flannel/subnet.env` 文件 - - 这表明 Flannel 网络未能正确启动。你可以尝试重启 flanneld.exe 或者将文件手动地 - 从 Kubernetes 主控节点的 `/run/flannel/subnet.env` 路径复制到 Windows 工作 - 节点的 `C:\run\flannel\subnet.env` 路径,并将 `FLANNEL_SUBNET` 行改为一个 - 不同的数值。例如,如果期望节点子网为 `10.244.4.1/24`: - - ```env - FLANNEL_NETWORK=10.244.0.0/16 - FLANNEL_SUBNET=10.244.4.1/24 - FLANNEL_MTU=1500 - FLANNEL_IPMASQ=true + # 查询服务状态 + Get-Service kubelet + Get-Service kube-proxy ``` - -10. 我的 Windows 节点无法使用服务 IP 访问我的服务 + You can also always use alternative service managers like + [nssm.exe](https://nssm.cc/) to run these processes (flanneld, kubelet & + kube-proxy) in the background for you. You can use this [sample + script](https://github.com/Microsoft/SDN/tree/master/Kubernetes/flannel/register-svc.ps1), + leveraging `nssm.exe` to register kubelet, kube-proxy, and `flanneld.exe` to run + as Windows services in the background. + --> + * 使用 nssm.exe - 这是 Windows 上当前网络协议栈的一个已知的限制。 - Windows Pods 能够访问服务 IP。 - - -11. 启动 kubelet 时找不到网络适配器 - - Windows 网络堆栈需要一个虚拟的适配器,这样 Kubernetes 网络才能工作。 - 如果下面的命令(在管理员 Shell 中)没有任何返回结果,证明虚拟网络创建 - (kubelet 正常工作的必要前提之一)失败了: + 你也总是可以使用替代的服务管理器,例如[nssm.exe](https://nssm.cc/),来为你在后台运行 + 这些进程(`flanneld`、`kubelet` 和 `kube-proxy`)。你可以使用这一 + [示例脚本](https://github.com/Microsoft/SDN/tree/master/Kubernetes/flannel/register-svc.ps1), + 利用 `nssm.exe` 将 `kubelet`、`kube-proxy` 和 `flanneld.exe` 注册为要在后台运行的 + Windows 服务。 + + ```powershell + register-svc.ps1 -NetworkMode <网络模式> -ManagementIP -ClusterCIDR <集群子网> -KubeDnsServiceIP -LogDir <日志目录> ``` - 当宿主系统的网络适配器名称不是 "Ethernet" 时,通常值得更改 `start.ps1` 脚本中的 - [InterfaceName](https://github.com/microsoft/SDN/blob/master/Kubernetes/flannel/start.ps1#L7) - 参数来重试。否则可以查验 `start-kubelet.ps1` 的输出,看看是否在虚拟网络创建 - 过程中报告了其他错误。 + 这里的参数解释如下: - + - `NetworkMode`:网络模式 l2bridge(flannel host-gw,也是默认值)或 + overlay(flannel vxlan)选做网络方案 + - `ManagementIP`:分配给 Windows 节点的 IP 地址。你可以使用 ipconfig 得到此值 + - `ClusterCIDR`:集群子网范围(默认值为 10.244.0.0/16) + - `KubeDnsServiceIP`:Kubernetes DNS 服务 IP(默认值为 10.96.0.10) + - `LogDir`:kubelet 和 kube-proxy 的日志会被重定向到这一目录中的对应输出文件, + 默认值为 `C:\k`。 - Check that your pause image is compatible with your OS version. The [instructions](https://docs.microsoft.com/en-us/virtualization/windowscontainers/kubernetes/deploying-resources) assume that both the OS and the containers are version 1803. If you have a later version of Windows, such as an Insider build, you need to adjust the images accordingly. Please refer to the Microsoft's [Docker repository](https://hub.docker.com/u/microsoft/) for images. Regardless, both the pause image Dockerfile and the sample service expect the image to be tagged as :latest. + -12. 我的 Pods 停滞在 "Container Creating" 状态或者反复重启 + Register flanneld.exe: + --> + 若以上所引用的脚本不适合,你可以使用下面的例子手动配置 `nssm.exe`。 - 检查你的 pause 镜像是与你的 OS 版本兼容的。 - [这里的指令](https://docs.microsoft.com/en-us/virtualization/windowscontainers/kubernetes/deploying-resources) - 假定你的 OS 和容器版本都是 1803。如果你安装的是更新版本的 Windows,比如说 - 某个 Insider 构造版本,你需要相应地调整要使用的镜像。 - 请参照 Microsoft 的 [Docker 仓库](https://hub.docker.com/u/microsoft/) - 了解镜像。不管怎样,pause 镜像的 Dockerfile 和示例服务都期望镜像的标签 - 为 `:latest`。 + 注册 flanneld.exe: - 从 Kubernetes v1.14 版本起,Microsoft 开始在 `mcr.microsoft.com/k8s/core/pause:1.2.0` - 发布其 pause 基础设施容器。 - - -13. DNS 解析无法正常工作 - - 参阅 Windows 上 [DNS 相关的局限](#dns-limitations) 节。 - - -14. `kubectl port-forward` 失败,错误信息为 "unable to do port forwarding: wincat not found" - - 此功能是在 Kubernetes v1.15 中实现的,pause 基础设施容器为 `mcr.microsoft.com/k8s/core/pause:1.2.0`。 - 请确保你使用的是这些版本或者更新版本。 - 如果你想要自行构造你自己的 pause 基础设施容器,要确保其中包含了 - [wincat](https://github.com/kubernetes-sigs/sig-windows-tools/tree/master/cmd/wincat) - - -15. 我的 Kubernetes 安装失败,因为我的 Windows Server 节点在防火墙后面 - - 如果你处于防火墙之后,那么必须定义如下 PowerShell 环境变量: - - ```PowerShell - [Environment]::SetEnvironmentVariable("HTTP_PROXY", "http://proxy.example.com:80/", [EnvironmentVariableTarget]::Machine) - [Environment]::SetEnvironmentVariable("HTTPS_PROXY", "http://proxy.example.com:443/", [EnvironmentVariableTarget]::Machine) + ```powershell + nssm install flanneld C:\flannel\flanneld.exe + nssm set flanneld AppParameters --kubeconfig-file=c:\k\config --iface= --ip-masq=1 --kube-subnet-mgr=1 + nssm set flanneld AppEnvironmentExtra NODE_NAME= + nssm set flanneld AppDirectory C:\flannel + nssm start flanneld ``` + + 注册 kubelet.exe: + + ```powershell + nssm install kubelet C:\k\kubelet.exe + nssm set kubelet AppParameters --hostname-override= --v=6 --pod-infra-container-image=k8s.gcr.io/pause:3.5 --resolv-conf="" --allow-privileged=true --enable-debugging-handlers --cluster-dns= --cluster-domain=cluster.local --kubeconfig=c:\k\config --hairpin-mode=promiscuous-bridge --image-pull-progress-deadline=20m --cgroups-per-qos=false --log-dir= --logtostderr=false --enforce-node-allocatable="" --network-plugin=cni --cni-bin-dir=c:\k\cni --cni-conf-dir=c:\k\cni\config + nssm set kubelet AppDirectory C:\k + nssm start kubelet + ``` + + + 注册 kube-proxy.exe(二层网桥模式和主机网关模式) + + ```powershell + nssm install kube-proxy C:\k\kube-proxy.exe + nssm set kube-proxy AppDirectory c:\k + nssm set kube-proxy AppParameters --v=4 --proxy-mode=kernelspace --hostname-override=--kubeconfig=c:\k\config --enable-dsr=false --log-dir= --logtostderr=false + nssm.exe set kube-proxy AppEnvironmentExtra KUBE_NETWORK=cbr0 + nssm set kube-proxy DependOnService kubelet + nssm start kube-proxy + ``` + + + 注册 kube-proxy.exe(覆盖网络模式或 VxLAN 模式) + + ```powershell + nssm install kube-proxy C:\k\kube-proxy.exe + nssm set kube-proxy AppDirectory c:\k + nssm set kube-proxy AppParameters --v=4 --proxy-mode=kernelspace --feature-gates="WinOverlay=true" --hostname-override= --kubeconfig=c:\k\config --network-name=vxlan0 --source-vip= --enable-dsr=false --log-dir= --logtostderr=false + nssm set kube-proxy DependOnService kubelet + nssm start kube-proxy + ``` + + + 作为初始的故障排查操作,你可以使用在 [nssm.exe](https://nssm.cc/) 中使用下面的标志 + 以便将标准输出和标准错误输出重定向到一个输出文件: + + ```powershell + nssm set <服务名称> AppStdout C:\k\mysvc.log + nssm set <服务名称> AppStderr C:\k\mysvc.log + ``` + + 要了解更多的细节,可参见官方的 [nssm 用法](https://nssm.cc/usage)文档。 + -15. `pause` 容器是什么? +* 我的 Windows Pods 无发连接网络 - 在一个 Kubernetes Pod 中,一个基础设施容器,或称 "pause" 容器,会被首先创建出来, - 用以托管容器端点。属于同一 Pod 的容器,包括基础设施容器和工作容器,会共享相同的 - 网络名字空间和端点(相同的 IP 和端口空间)。我们需要 pause 容器来工作容器崩溃或 - 重启的状况,以确保不会丢失任何网络配置。 + 如果你在使用虚拟机,请确保 VM 网络适配器均已开启 MAC 侦听(Spoofing)。 - "pause" (基础设施)镜像托管在 Microsoft Container Registry (MCR) 上。 - 你可以使用 `docker pull mcr.microsoft.com/k8s/core/pause:1.2.0` 来访问它。 - 要了解进一步的细节,可参阅 [DOCKERFILE](https://github.com/kubernetes-sigs/sig-windows-tools/tree/master/cmd/wincat)。 + +* 我的 Windows Pods 无法 ping 外部资源 + + Windows Pods 目前没有为 ICMP 协议提供出站规则。不过 TCP/UDP 是支持的。 + 尝试与集群外资源连接时,可以将 `ping ` 命令替换为对应的 `curl ` 命令。 + + + 如果你还遇到问题,很可能你在 + [cni.conf](https://github.com/Microsoft/SDN/blob/master/Kubernetes/flannel/l2bridge/cni/config/cni.conf) + 中的网络配置值得额外的注意。你总是可以编辑这一静态文件。 + 配置的更新会应用到所有新创建的 Kubernetes 资源上。 + + + Kubernetes 网络的需求之一(参见 + [Kubernetes 网络模型](/zh/docs/concepts/cluster-administration/networking/)) + 是集群内部无需网络地址转译(NAT)即可实现通信。 + 为了符合这一要求,对所有我们不希望出站时发生 NAT 的通信都存在一个 + [ExceptionList](https://github.com/Microsoft/SDN/blob/master/Kubernetes/flannel/l2bridge/cni/config/cni.conf#L20)。 + 然而这也意味着你需要将你要查询的外部 IP 从 ExceptionList 中移除。 + 只有这时,从你的 Windows Pod 发起的网络请求才会被正确地通过 SNAT 转换以接收到 + 来自外部世界的响应。 + 就此而言,你在 `cni.conf` 中的 `ExceptionList` 应该看起来像这样: + + + ```conf + "ExceptionList": [ + "10.244.0.0/16", # 集群子网 + "10.96.0.0/12", # 服务子网 + "10.127.130.0/24" # 管理(主机)子网 + ] + ``` + + +* 我的 Windows 节点无法访问 NodePort 服务 + + 从节点自身发起的本地 NodePort 请求会失败。这是一个已知的局限。 + NodePort 服务的访问从其他节点或者外部客户端都可正常进行。 + + +* 容器的 vNICs 和 HNS 端点被删除了 + + 这一问题可能因为 `hostname-override` 参数未能传递给 + [kube-proxy](/zh/docs/reference/command-line-tools-reference/kube-proxy/) + 而导致。解决这一问题时,用户需要按如下方式将主机名传递给 kube-proxy: + + ```powershell + C:\k\kube-proxy.exe --hostname-override=$(hostname) + ``` + + +* 使用 Flannel 时,我的节点在重新加入集群时遇到问题 + + 无论何时,当一个之前被删除的节点被重新添加到集群时,flannelD 都会将为节点分配 + 一个新的 Pod 子网。 + 用户需要将将下面路径中的老的 Pod 子网配置文件删除: + + ```powershell + Remove-Item C:\k\SourceVip.json + Remove-Item C:\k\SourceVipRequest.json + ``` + + +* 在启动了 `start.ps1` 之后,flanneld 一直停滞在 "Waiting for the Network + to be created" 状态 + + 关于这一[问题](https://github.com/coreos/flannel/issues/1066)有很多的报告; + 最可能的一种原因是关于何时设置 Flannel 网络的管理 IP 的时间问题。 + 一种解决办法是重新启动 `start.ps1` 或者按如下方式手动重启之: + + ```powershell + [Environment]::SetEnvironmentVariable("NODE_NAME", "") + C:\flannel\flanneld.exe --kubeconfig-file=c:\k\config --iface= --ip-masq=1 --kube-subnet-mgr=1 + ``` + + +* 我的 Windows Pods 无法启动,因为缺少 `/run/flannel/subnet.env` 文件 + + 这表明 Flannel 网络未能正确启动。你可以尝试重启 flanneld.exe 或者将文件手动地 + 从 Kubernetes 主控节点的 `/run/flannel/subnet.env` 路径复制到 Windows 工作 + 节点的 `C:\run\flannel\subnet.env` 路径,并将 `FLANNEL_SUBNET` 行改为一个 + 不同的数值。例如,如果期望节点子网为 `10.244.4.1/24`: + + ```none + FLANNEL_NETWORK=10.244.0.0/16 + FLANNEL_SUBNET=10.244.4.1/24 + FLANNEL_MTU=1500 + FLANNEL_IPMASQ=true + ``` + +* 我的 Windows 节点无法使用服务 IP 访问我的服务 + + 这是 Windows 上当前网络协议栈的一个已知的限制。 + Windows Pods 能够访问服务 IP。 + + +* 启动 kubelet 时找不到网络适配器 + + Windows 网络堆栈需要一个虚拟的适配器,这样 Kubernetes 网络才能工作。 + 如果下面的命令(在管理员 Shell 中)没有任何返回结果,证明虚拟网络创建 + (kubelet 正常工作的必要前提之一)失败了: + + ```powershell + Get-HnsNetwork | ? Name -ieq "cbr0" + Get-NetAdapter | ? Name -Like "vEthernet (Ethernet*" + ``` + + + 当宿主系统的网络适配器名称不是 "Ethernet" 时,通常值得更改 `start.ps1` 脚本中的 + [InterfaceName](https://github.com/microsoft/SDN/blob/master/Kubernetes/flannel/start.ps1#L7) + 参数来重试。否则可以查验 `start-kubelet.ps1` 的输出,看看是否在虚拟网络创建 + 过程中报告了其他错误。 + + +* 我的 Pods 停滞在 "Container Creating" 状态或者反复重启 + + 检查你的 pause 镜像是与你的 OS 版本兼容的。 + [这里的指令](https://docs.microsoft.com/en-us/virtualization/windowscontainers/kubernetes/deploying-resources) + 假定你的 OS 和容器版本都是 1803。如果你安装的是更新版本的 Windows,比如说 + 某个 Insider 构造版本,你需要相应地调整要使用的镜像。 + 请参照 Microsoft 的 [Docker 仓库](https://hub.docker.com/u/microsoft/) + 了解镜像。不管怎样,pause 镜像的 Dockerfile 和示例服务都期望镜像的标签 + 为 `:latest`。 + + +* DNS 解析无法正常工作 + + 参阅 Windows 上 [DNS 相关的局限](#dns-limitations) 节。 + + +* `kubectl port-forward` 失败,错误信息为 "unable to do port forwarding: wincat not found" + + 此功能是在 Kubernetes v1.15 中实现的,pause 基础设施容器 + `mcr.microsoft.com/oss/kubernetes/pause:3.4.1` 中包含了 wincat.exe。 + 请确保你使用的是这些版本或者更新版本。 + 如果你想要自行构造你自己的 pause 基础设施容器,要确保其中包含了 + [wincat](https://github.com/kubernetes-sigs/sig-windows-tools/tree/master/cmd/wincat) + + Windows 的端口转发支持需要在 [pause 基础设施容器](#pause-image) 中提供 wincat.exe。 + 确保你使用的是与你的 Windows 操作系统版本兼容的受支持镜像。 + 如果你想构建自己的 pause 基础架构容器,请确保包含 [wincat](https://github.com/kubernetes/kubernetes/tree/master/build/pause/windows/wincat).。 + + +* 我的 Kubernetes 安装失败,因为我的 Windows Server 节点在防火墙后面 + + 如果你处于防火墙之后,那么必须定义如下 PowerShell 环境变量: + + ```PowerShell + [Environment]::SetEnvironmentVariable("HTTP_PROXY", "http://proxy.example.com:80/", [EnvironmentVariableTarget]::Machine) + [Environment]::SetEnvironmentVariable("HTTPS_PROXY", "http://proxy.example.com:443/", [EnvironmentVariableTarget]::Machine) + ``` + + +* `pause` 容器是什么? + + 在一个 Kubernetes Pod 中,一个基础设施容器,或称 "pause" 容器,会被首先创建出来, + 用以托管容器端点。属于同一 Pod 的容器,包括基础设施容器和工作容器,会共享相同的 + 网络名字空间和端点(相同的 IP 和端口空间)。我们需要 pause 容器来工作容器崩溃或 + 重启的状况,以确保不会丢失任何网络配置。 + + 请参阅 [pause 镜像](#pause-image) 部分以查找 pause 镜像的推荐版本。 ### 进一步探究 {#further-investigation} @@ -1520,7 +2260,15 @@ If these steps don't resolve your problem, you can get help running Windows cont ## 报告问题和功能需求 {#reporting-issues-and-feature-requests} @@ -1533,13 +2281,16 @@ If you have what looks like a bug, or you would like to make a feature request, 生成新的 Ticket 之前对一些想法进行故障分析。 在登记软件缺陷时,请给出如何重现该问题的详细信息,例如: @@ -1553,7 +2304,11 @@ If filing a bug, please include detailed information about how to reproduce the ## {{% heading "whatsnext" %}} 在我们的未来蓝图中包含很多功能特性(要实现)。下面是一个浓缩的简要列表,不过我们 鼓励你查看我们的 [roadmap 项目](https://github.com/orgs/kubernetes/projects/8)并 @@ -1563,11 +2318,16 @@ We have a lot of features in our roadmap. An abbreviated high level list is incl ### Hyper-V 隔离 {#hyper-v-isolation} @@ -1575,52 +2335,20 @@ Hyper-V isolation is requried to enable the following use cases for Windows cont 要满足 Kubernetes 中 Windows 容器的如下用例,需要利用 Hyper-V 隔离: * 在 Pod 之间实施基于监管程序(Hypervisor)的隔离,以增强安全性 -* 出于向后兼容需要,允许添加运行新 Windows Server 版本的节点时不必重新创建容器 +* 出于向后兼容需要,允许添加运行新 Windows Server 版本的节点时不必 + 重新创建容器 * 为 Pod 设置特定的 CPU/NUMA 配置 * 实施内存隔离与预留 - -现有的 Hyper-V 隔离支持是添加自 v1.10 版本的实验性功能特性,会在未来版本中弃用, -向前文所提到的 CRI-ContainerD 和 RuntimeClass 特性倾斜。 -要使用当前的功能特性并创建 Hyper-V 隔离的容器,需要在启动 kubelet 时设置特性门控 -`HyperVContainer=true`,同时为 Pod 添加注解 -`experimental.windows.kubernetes.io/isolation-type=hyperv`。 -在实验性实现版本中,此功能特性限制每个 Pod 中只能包含一个容器。 - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: iis -spec: - selector: - matchLabels: - app: iis - replicas: 3 - template: - metadata: - labels: - app: iis - annotations: - experimental.windows.kubernetes.io/isolation-type: hyperv - spec: - containers: - - name: iis - image: microsoft/iis - ports: - - containerPort: 80 -``` - ### 使用 kubeadm 和 Cluster API 来部署 {#deployment-with-kubeadm-and-cluster-api} @@ -1629,15 +2357,3 @@ kubeadm 对 Windows 节点的支持目前还在开发过程中,不过你可以 [指南](/zh/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes/)。 我们也在投入资源到 Cluster API,以确保 Windows 节点被正确配置。 - -### 若干其他关键功能 {#a-few-other-key-features} - -* 为组管理的服务账号(Group Managed Service Accounts,GMSA)提供 Beta 支持 -* 添加更多的 CNI 支持 -* 实现更多的存储插件 - diff --git a/content/zh/docs/setup/production-environment/windows/user-guide-windows-containers.md b/content/zh/docs/setup/production-environment/windows/user-guide-windows-containers.md index 93eae4e7d6..88c23e548f 100644 --- a/content/zh/docs/setup/production-environment/windows/user-guide-windows-containers.md +++ b/content/zh/docs/setup/production-environment/windows/user-guide-windows-containers.md @@ -1,12 +1,14 @@ --- -title: Kubernetes 中调度 Windows 容器的指南 +title: Kubernetes 中 Windows 容器的调度指南 content_type: concept weight: 75 --- Windows 应用程序构成了许多组织中运行的服务和应用程序的很大一部分。 本指南将引导您完成在 Kubernetes 中配置和部署 Windows 容器的步骤。 @@ -36,20 +39,28 @@ Windows 应用程序构成了许多组织中运行的服务和应用程序的很 ## 在你开始之前 -* 创建一个 Kubernetes 集群,其中包括一个 - [运行 Windows 服务器的主节点和工作节点](/zh/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes/) -* 重要的是要注意,对于 Linux 和 Windows 容器,在 Kubernetes 上创建和部署服务和工作负载的行为几乎相同。 - 与集群接口的 [Kubectl 命令](/zh/docs/reference/kubectl/overview/)相同。提供以下部分中的示例只是为了快速启动 Windows 容器的使用体验。 +* 创建一个 Kubernetes 集群,其中包括一个控制平面和 + [运行 Windows 服务器的工作节点](/zh/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes/) +* 重要的是要注意,对于 Linux 和 Windows 容器,在 Kubernetes + 上创建和部署服务和工作负载的行为几乎相同。 + 与集群接口的 [kubectl 命令](/zh/docs/reference/kubectl/overview/)相同。 + 提供以下部分中的示例只是为了快速启动 Windows 容器的使用体验。 ## 入门:部署 Windows 容器 @@ -102,7 +113,8 @@ spec: ``` {{< note >}} 端口映射也是支持的,但为简单起见,在此示例中容器端口 80 直接暴露给服务。 @@ -127,12 +139,15 @@ Port mapping is also supported, but for simplicity in this example the container 1. Check that the deployment succeeded. To verify: * Two containers per pod on the Windows node, use `docker ps` - * Two pods listed from the Linux master, use `kubectl get pods` - * Node-to-pod communication across the network, `curl` port 80 of your pod IPs from the Linux master to check for a web server response - * Pod-to-pod communication, ping between pods (and across hosts, if you have more than one Windows node) using docker exec or kubectl exec - * Service-to-pod communication, `curl` the virtual service IP (seen under `kubectl get services`) from the Linux master and from individual pods + * Two pods listed from the Linux control plane node, use `kubectl get pods` + * Node-to-pod communication across the network, `curl` port 80 of your pod IPs from the Linux control plane node + to check for a web server response + * Pod-to-pod communication, ping between pods (and across hosts, if you have more than one Windows node) + using docker exec or kubectl exec + * Service-to-pod communication, `curl` the virtual service IP (seen under `kubectl get services`) + from the Linux control plane node and from individual pods * Service discovery, `curl` the service name with the Kubernetes [default DNS suffix](/docs/concepts/services-networking/dns-pod-service/#services) - * Inbound connectivity, `curl` the NodePort from the Linux master or machines outside of the cluster + * Inbound connectivity, `curl` the NodePort from the Linux control plane node or machines outside of the cluster * Outbound connectivity, `curl` external IPs from inside the pod using kubectl exec --> 1. 检查所有节点是否健康: @@ -153,36 +168,92 @@ Port mapping is also supported, but for simplicity in this example the container 1. 检查部署是否成功。验证: * Windows 节点上每个 Pod 有两个容器,使用 `docker ps` - * Linux 主机列出两个 Pod,使用 `kubectl get pods` - * 跨网络的节点到 Pod 通信,从 Linux 主服务器 `curl` 您的 pod IPs 的端口80,以检查 Web 服务器响应 - * Pod 到 Pod 的通信,使用 docker exec 或 kubectl exec 在 pod 之间(以及跨主机,如果您有多个 Windows 节点)进行 ping 操作 - * 服务到 Pod 的通信,从 Linux 主服务器和各个 Pod 中 `curl` 虚拟服务 IP(在 `kubectl get services` 下可见) - * 服务发现,使用 Kubernetes `curl` 服务名称[默认 DNS 后缀](/zh/docs/concepts/services-networking/dns-pod-service/#services) - * 入站连接,从 Linux 主服务器或集群外部的计算机 `curl` NodePort + * Linux 控制平面节点列出两个 Pod,使用 `kubectl get pods` + * 跨网络的节点到 Pod 通信,从 Linux 控制平面节点 `curl` 您的 pod IPs 的端口80,以检查 Web 服务器响应 + * Pod 到 Pod 的通信,使用 docker exec 或 kubectl exec 在 Pod 之间 + (以及跨主机,如果你有多个 Windows 节点)进行 ping 操作 + * 服务到 Pod 的通信,从 Linux 控制平面节点和各个 Pod 中 `curl` 虚拟服务 IP + (在 `kubectl get services` 下可见) + * 服务发现,使用 Kubernetes `curl` 服务名称 + [默认 DNS 后缀](/zh/docs/concepts/services-networking/dns-pod-service/#services) + * 入站连接,从 Linux 控制平面节点或集群外部的计算机 `curl` NodePort * 出站连接,使用 kubectl exec 从 Pod 内部 curl 外部 IP {{< note >}} 由于当前平台对 Windows 网络堆栈的限制,Windows 容器主机无法访问在其上调度的服务的 IP。只有 Windows pods 才能访问服务 IP。 {{< /note >}} + +## 可观测性 {#observability} + +### 抓取来自工作负载的日志 + + +日志是可观测性的重要一环;使用日志用户可以获得对负载运行状况的洞察, +因而日志是故障排查的一个重要手法。 +因为 Windows 容器中的 Windows 容器和负载与 Linux 容器的行为不同, +用户很难收集日志,因此运行状态的可见性很受限。 +例如,Windows 工作负载通常被配置为将日志输出到 Windows 事件跟踪 +(Event Tracing for Windows,ETW),或者将日志条目推送到应用的事件日志中。 +[LogMonitor](https://github.com/microsoft/windows-container-tools/tree/master/LogMonitor) +是 Microsoft 提供的一个开源工具,是监视 Windows 容器中所配置的日志源 +的推荐方式。 +LogMonitor 支持监视时间日志、ETW 提供者模块以及自定义的应用日志, +并使用管道的方式将其输出到标准输出(stdout),以便 `kubectl logs ` +这类命令能够读取这些数据。 + + +请遵照 LogMonitor GitHub 页面上的指令,将其可执行文件和配置文件复制到 +你的所有容器中,并为其添加必要的入口点(Entrypoint),以便 LogMonitor +能够将你的日志输出推送到标准输出(stdout)。 + + ## 使用可配置的容器用户名 -从 Kubernetes v1.16 开始,可以为 Windows 容器配置与其镜像默认值不同的用户名来运行其入口点和进程。 +从 Kubernetes v1.16 开始,可以为 Windows 容器配置与其镜像默认值不同的用户名 +来运行其入口点和进程。 此能力的实现方式和 Linux 容器有些不同。 -在[此处](/zh/docs/tasks/configure-pod-container/configure-runasusername/)可了解更多信息。 +在[此处](/zh/docs/tasks/configure-pod-container/configure-runasusername/) +可了解更多信息。 ## 使用组托管服务帐户管理工作负载身份 @@ -190,7 +261,8 @@ Starting with Kubernetes v1.14, Windows container workloads can be configured to 组托管服务帐户是 Active Directory 帐户的一种特定类型,它提供自动密码管理, 简化的服务主体名称(SPN)管理以及将管理委派给跨多台服务器的其他管理员的功能。 配置了 GMSA 的容器可以访问外部 Active Directory 域资源,同时携带通过 GMSA 配置的身份。 -在[此处](/zh/docs/tasks/configure-pod-container/configure-gmsa/)了解有关为 Windows 容器配置和使用 GMSA 的更多信息。 +在[此处](/zh/docs/tasks/configure-pod-container/configure-gmsa/)了解有关为 +Windows 容器配置和使用 GMSA 的更多信息。 目前,用户需要将 Linux 和 Windows 工作负载运行在各自特定的操作系统的节点上, 因而需要结合使用污点和节点选择算符。 这可能仅给 Windows 用户造成不便。 @@ -210,7 +285,8 @@ Users today need to use some combination of taints and node selectors in order t ### 确保特定操作系统的工作负载落在适当的容器主机上 用户可以使用污点和容忍度确保 Windows 容器可以调度在适当的主机上。目前所有 Kubernetes 节点都具有以下默认标签: @@ -218,7 +294,10 @@ Users can ensure Windows containers can be scheduled on the appropriate host usi * kubernetes.io/arch = [amd64|arm64|...] 如果 Pod 规范未指定诸如 `"kubernetes.io/os": windows` 之类的 nodeSelector,则该 Pod 可能会被调度到任何主机(Windows 或 Linux)上。 @@ -226,7 +305,11 @@ If a Pod specification does not specify a nodeSelector like `"kubernetes.io/os": 最佳实践是使用 nodeSelector。 但是,我们了解到,在许多情况下,用户都有既存的大量的 Linux 容器部署,以及一个现成的配置生态系统, 例如社区 Helm charts,以及程序化 Pod 生成案例,例如 Operators。 @@ -239,11 +322,12 @@ For example: `--register-with-taints='os=windows:NoSchedule'` 例如:`--register-with-taints='os=windows:NoSchedule'` 向所有 Windows 节点添加污点后,Kubernetes 将不会在它们上调度任何负载(包括现有的 Linux Pod)。 -为了使某 Windows Pod 调度到 Windows 节点上,该 Pod 既需要 nodeSelector 选择 Windows, -也需要合适的匹配的容忍度设置。 +为了使某 Windows Pod 调度到 Windows 节点上,该 Pod 需要 nodeSelector 和合适的匹配的容忍度设置来选择 Windows, ```yaml nodeSelector: @@ -266,18 +350,22 @@ The Windows Server version used by each pod must match that of the node. If you Server versions in the same cluster, then you should set additional node labels and nodeSelectors. --> 每个 Pod 使用的 Windows Server 版本必须与该节点的 Windows Server 版本相匹配。 -如果要在同一集群中使用多个 Windows Server 版本,则应该设置其他节点标签和 nodeSelector。 +如果要在同一集群中使用多个 Windows Server 版本,则应该设置其他节点标签和 +nodeSelector。 Kubernetes 1.17 自动添加了一个新标签 `node.kubernetes.io/windows-build` 来简化此操作。 如果您运行的是旧版本,则建议手动将此标签添加到 Windows 节点。 -此标签反映了需要兼容的 Windows 主要、次要和内部版本号。以下是当前每个 Windows Server 版本使用的值。 +此标签反映了需要兼容的 Windows 主要、次要和内部版本号。以下是当前每个 +Windows Server 版本使用的值。 | 产品名称 | 内部编号 | |--------------------------------------|------------------------| @@ -292,15 +380,19 @@ This label reflects the Windows major, minor, and build number that need to matc ### 使用 RuntimeClass 简化 -[RuntimeClass](/zh/docs/concepts/containers/runtime-class/) 可用于简化使用污点和容忍度的过程。 +[RuntimeClass](/zh/docs/concepts/containers/runtime-class/) 可用于 +简化使用污点和容忍度的过程。 集群管理员可以创建 `RuntimeClass` 对象,用于封装这些污点和容忍度。 -1. 将此文件保存到 `runtimeClasses.yml` 文件。它包括适用于 Windows 操作系统、体系结构和版本的 `nodeSelector`。 +1. 将此文件保存到 `runtimeClasses.yml` 文件。 + 它包括适用于 Windows 操作系统、体系结构和版本的 `nodeSelector`。 ```yaml apiVersion: node.k8s.io/v1 @@ -324,7 +416,7 @@ This label reflects the Windows major, minor, and build number that need to matc 1. Run `kubectl create -f runtimeClasses.yml` using as a cluster administrator 1. Add `runtimeClassName: windows-2019` as appropriate to Pod specs --> -2. 集群管理员运行 `kubectl create -f runtimeClasses.yml` 操作 +2. 集群管理员执行 `kubectl create -f runtimeClasses.yml` 操作 3. 根据需要向 Pod 规约中添加 `runtimeClassName: windows-2019` - -# v1.18.0 - -[Documentation](https://docs.k8s.io) - -## Downloads for v1.18.0 - -filename | sha512 hash --------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes.tar.gz) | `cd5b86a3947a4f2cea6d857743ab2009be127d782b6f2eb4d37d88918a5e433ad2c7ba34221c34089ba5ba13701f58b657f0711401e51c86f4007cb78744dee7` -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-src.tar.gz) | `fb42cf133355ef18f67c8c4bb555aa1f284906c06e21fa41646e086d34ece774e9d547773f201799c0c703ce48d4d0e62c6ba5b2a4d081e12a339a423e111e52` - -### Client Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-client-darwin-386.tar.gz) | `26df342ef65745df12fa52931358e7f744111b6fe1e0bddb8c3c6598faf73af997c00c8f9c509efcd7cd7e82a0341a718c08fbd96044bfb58e80d997a6ebd3c2` -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-client-darwin-amd64.tar.gz) | `803a0fed122ef6b85f7a120b5485723eaade765b7bc8306d0c0da03bd3df15d800699d15ea2270bb7797fa9ce6a81da90e730dc793ea4ed8c0149b63d26eca30` -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-client-linux-386.tar.gz) | `110844511b70f9f3ebb92c15105e6680a05a562cd83f79ce2d2e25c2dd70f0dbd91cae34433f61364ae1ce4bd573b635f2f632d52de8f72b54acdbc95a15e3f0` -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-client-linux-amd64.tar.gz) | `594ca3eadc7974ec4d9e4168453e36ca434812167ef8359086cd64d048df525b7bd46424e7cc9c41e65c72bda3117326ba1662d1c9d739567f10f5684fd85bee` -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-client-linux-arm.tar.gz) | `d3627b763606557a6c9a5766c34198ec00b3a3cd72a55bc2cb47731060d31c4af93543fb53f53791062bb5ace2f15cbaa8592ac29009641e41bd656b0983a079` -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-client-linux-arm64.tar.gz) | `ba9056eff1452cbdaef699efbf88f74f5309b3f7808d372ebf6918442d0c9fea1653c00b9db3b7626399a460eef9b1fa9e29b827b7784f34561cbc380554e2ea` -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-client-linux-ppc64le.tar.gz) | `f80fb3769358cb20820ff1a1ce9994de5ed194aabe6c73fb8b8048bffc394d1b926de82c204f0e565d53ffe7562faa87778e97a3ccaaaf770034a992015e3a86` -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-client-linux-s390x.tar.gz) | `a9b658108b6803d60fa3cd4e76d9e58bf75201017164fe54054b7ccadbb68c4ad7ba7800746940bc518d90475e6c0a96965a26fa50882f4f0e56df404f4ae586` -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-client-windows-386.tar.gz) | `18adffab5d1be146906fd8531f4eae7153576aac235150ce2da05aee5ae161f6bd527e8dec34ae6131396cd4b3771e0d54ce770c065244ad3175a1afa63c89e1` -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-client-windows-amd64.tar.gz) | `162396256429cef07154f817de2a6b67635c770311f414e38b1e2db25961443f05d7b8eb1f8da46dec8e31c5d1d2cd45f0c95dad1bc0e12a0a7278a62a0b9a6b` - -### Server Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-server-linux-amd64.tar.gz) | `a92f8d201973d5dfa44a398e95fcf6a7b4feeb1ef879ab3fee1c54370e21f59f725f27a9c09ace8c42c96ac202e297fd458e486c489e05f127a5cade53b8d7c4` -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-server-linux-arm.tar.gz) | `62fbff3256bc0a83f70244b09149a8d7870d19c2c4b6dee8ca2714fc7388da340876a0f540d2ae9bbd8b81fdedaf4b692c72d2840674db632ba2431d1df1a37d` -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-server-linux-arm64.tar.gz) | `842910a7013f61a60d670079716b207705750d55a9e4f1f93696d19d39e191644488170ac94d8740f8e3aa3f7f28f61a4347f69d7e93d149c69ac0efcf3688fe` -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-server-linux-ppc64le.tar.gz) | `95c5b952ac1c4127a5c3b519b664972ee1fb5e8e902551ce71c04e26ad44b39da727909e025614ac1158c258dc60f504b9a354c5ab7583c2ad769717b30b3836` -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-server-linux-s390x.tar.gz) | `a46522d2119a0fd58074564c1fa95dd8a929a79006b82ba3c4245611da8d2db9fd785c482e1b61a9aa361c5c9a6d73387b0e15e6a7a3d84fffb3f65db3b9deeb` - -### Node Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-node-linux-amd64.tar.gz) | `f714f80feecb0756410f27efb4cf4a1b5232be0444fbecec9f25cb85a7ccccdcb5be588cddee935294f460046c0726b90f7acc52b20eeb0c46a7200cf10e351a` -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-node-linux-arm.tar.gz) | `806000b5f6d723e24e2f12d19d1b9b3d16c74b855f51c7063284adf1fcc57a96554a3384f8c05a952c6f6b929a05ed12b69151b1e620c958f74c9600f3db0fcb` -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-node-linux-arm64.tar.gz) | `c207e9ab60587d135897b5366af79efe9d2833f33401e469b2a4e0d74ecd2cf6bb7d1e5bc18d80737acbe37555707f63dd581ccc6304091c1d98dafdd30130b7` -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-node-linux-ppc64le.tar.gz) | `a542ed5ed02722af44ef12d1602f363fcd4e93cf704da2ea5d99446382485679626835a40ae2ba47a4a26dce87089516faa54479a1cfdee2229e8e35aa1c17d7` -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-node-linux-s390x.tar.gz) | `651e0db73ee67869b2ae93cb0574168e4bd7918290fc5662a6b12b708fa628282e3f64be2b816690f5a2d0f4ff8078570f8187e65dee499a876580a7a63d1d19` -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-node-windows-amd64.tar.gz) | `d726ed904f9f7fe7e8831df621dc9094b87e767410a129aa675ee08417b662ddec314e165f29ecb777110fbfec0dc2893962b6c71950897ba72baaa7eb6371ed` - -## Changelog since v1.17.0 - -A complete changelog for the release notes is now hosted in a customizable -format at [https://relnotes.k8s.io][1]. Check it out and please give us your -feedback! - -[1]: https://relnotes.k8s.io/?releaseVersions=1.18.0 - -## What’s New (Major Themes) - -### Kubernetes Topology Manager Moves to Beta - Align Up! - -A beta feature of Kubernetes in release 1.18, the [Topology Manager feature](https://github.com/nolancon/website/blob/f4200307260ea3234540ef13ed80de325e1a7267/content/en/docs/tasks/administer-cluster/topology-manager.md) enables NUMA alignment of CPU and devices (such as SR-IOV VFs) that will allow your workload to run in an environment optimized for low-latency. Prior to the introduction of the Topology Manager, the CPU and Device Manager would make resource allocation decisions independent of each other. This could result in undesirable allocations on multi-socket systems, causing degraded performance on latency critical applications. - -### Serverside Apply - Beta 2 - -Server-side Apply was promoted to Beta in 1.16, but is now introducing a second Beta in 1.18. This new version will track and manage changes to fields of all new Kubernetes objects, allowing you to know what changed your resources and when. - -### Extending Ingress with and replacing a deprecated annotation with IngressClass - -In Kubernetes 1.18, there are two significant additions to Ingress: A new `pathType` field and a new `IngressClass` resource. The `pathType` field allows specifying how paths should be matched. In addition to the default `ImplementationSpecific` type, there are new `Exact` and `Prefix` path types. - -The `IngressClass` resource is used to describe a type of Ingress within a Kubernetes cluster. Ingresses can specify the class they are associated with by using a new `ingressClassName` field on Ingresses. This new resource and field replace the deprecated `kubernetes.io/ingress.class` annotation. - -### SIG CLI introduces kubectl debug - -SIG CLI was debating the need for a debug utility for quite some time already. With the development of [ephemeral containers](/zh/docs/concepts/workloads/pods/ephemeral-containers/), it became more obvious how we can support developers with tooling built on top of `kubectl exec`. The addition of the `kubectl debug` [command](https://github.com/kubernetes/enhancements/blob/master/keps/sig-cli/20190805-kubectl-debug.md) (it is alpha but your feedback is more than welcome), allows developers to easily debug their Pods inside the cluster. We think this addition is invaluable. This command allows one to create a temporary container which runs next to the Pod one is trying to examine, but also attaches to the console for interactive troubleshooting. - -### Introducing Windows CSI support alpha for Kubernetes - -With the release of Kubernetes 1.18, an alpha version of CSI Proxy for Windows is getting released. CSI proxy enables non-privileged (pre-approved) containers to perform privileged storage operations on Windows. CSI drivers can now be supported in Windows by leveraging CSI proxy. -SIG Storage made a lot of progress in the 1.18 release. -In particular, the following storage features are moving to GA in Kubernetes 1.18: -- Raw Block Support: Allow volumes to be surfaced as block devices inside containers instead of just mounted filesystems. -- Volume Cloning: Duplicate a PersistentVolumeClaim and underlying storage volume using the Kubernetes API via CSI. -- CSIDriver Kubernetes API Object: Simplifies CSI driver discovery and allows CSI Drivers to customize Kubernetes behavior. - -SIG Storage is also introducing the following new storage features as alpha in Kubernetes 1.18: -- Windows CSI Support: Enabling containerized CSI node plugins in Windows via new [CSIProxy](https://github.com/kubernetes-csi/csi-proxy) -- Recursive Volume Ownership OnRootMismatch Option: Add a new “OnRootMismatch” policy that can help shorten the mount time for volumes that require ownership change and have many directories and files. - -### Other notable announcements - -SIG Network is moving IPv6 to Beta in Kubernetes 1.18, after incrementing significantly the test coverage with new CI jobs. - -NodeLocal DNSCache is an add-on that runs a dnsCache pod as a daemonset to improve clusterDNS performance and reliability. The feature has been in Alpha since 1.13 release. The SIG Network is announcing the GA graduation of Node Local DNSCache [#1351](https://github.com/kubernetes/enhancements/pull/1351) - -## Known Issues - -No Known Issues Reported - -## Urgent Upgrade Notes - -### (No, really, you MUST read this before you upgrade) - -#### kube-apiserver: -- in an `--encryption-provider-config` config file, an explicit `cacheSize: 0` parameter previously silently defaulted to caching 1000 keys. In Kubernetes 1.18, this now returns a config validation error. To disable caching, you can specify a negative cacheSize value in Kubernetes 1.18+. -- consumers of the 'certificatesigningrequests/approval' API must now have permission to 'approve' CSRs for the specific signer requested by the CSR. More information on the new signerName field and the required authorization can be found at https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests#authorization ([#88246](https://github.com/kubernetes/kubernetes/pull/88246), [@munnerz](https://github.com/munnerz)) [SIG API Machinery, Apps, Auth, CLI, Node and Testing] -- The following features are unconditionally enabled and the corresponding `--feature-gates` flags have been removed: `PodPriority`, `TaintNodesByCondition`, `ResourceQuotaScopeSelectors` and `ScheduleDaemonSetPods` ([#86210](https://github.com/kubernetes/kubernetes/pull/86210), [@draveness](https://github.com/draveness)) [SIG Apps and Scheduling] - -#### kubelet: -- `--enable-cadvisor-endpoints` is now disabled by default. If you need access to the cAdvisor v1 Json API please enable it explicitly in the kubelet command line. Please note that this flag was deprecated in 1.15 and will be removed in 1.19. ([#87440](https://github.com/kubernetes/kubernetes/pull/87440), [@dims](https://github.com/dims)) [SIG Instrumentation, Node and Testing] -- Promote CSIMigrationOpenStack to Beta (off by default since it requires installation of the OpenStack Cinder CSI Driver. The in-tree AWS OpenStack Cinder driver "kubernetes.io/cinder" was deprecated in 1.16 and will be removed in 1.20. Users should enable CSIMigration + CSIMigrationOpenStack features and install the OpenStack Cinder CSI Driver (https://github.com/kubernetes-sigs/cloud-provider-openstack) to avoid disruption to existing Pod and PVC objects at that time. Users should start using the OpenStack Cinder CSI Driver directly for any new volumes. ([#85637](https://github.com/kubernetes/kubernetes/pull/85637), [@dims](https://github.com/dims)) [SIG Cloud Provider] - -#### kubectl: -- `kubectl` and k8s.io/client-go no longer default to a server address of `http://localhost:8080`. If you own one of these legacy clusters, you are *strongly* encouraged to secure your server. If you cannot secure your server, you can set the `$KUBERNETES_MASTER` environment variable to `http://localhost:8080` to continue defaulting the server address. `kubectl` users can also set the server address using the `--server` flag, or in a kubeconfig file specified via `--kubeconfig` or `$KUBECONFIG`. ([#86173](https://github.com/kubernetes/kubernetes/pull/86173), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, CLI and Testing] -- `kubectl run` has removed the previously deprecated generators, along with flags unrelated to creating pods. `kubectl run` now only creates pods. See specific `kubectl create` subcommands to create objects other than pods. -([#87077](https://github.com/kubernetes/kubernetes/pull/87077), [@soltysh](https://github.com/soltysh)) [SIG Architecture, CLI and Testing] -- The deprecated command `kubectl rolling-update` has been removed ([#88057](https://github.com/kubernetes/kubernetes/pull/88057), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG Architecture, CLI and Testing] - -#### client-go: -- Signatures on methods in generated clientsets, dynamic, metadata, and scale clients have been modified to accept `context.Context` as a first argument. Signatures of Create, Update, and Patch methods have been updated to accept CreateOptions, UpdateOptions and PatchOptions respectively. Signatures of Delete and DeleteCollection methods now accept DeleteOptions by value instead of by reference. Generated clientsets with the previous interface have been added in new "deprecated" packages to allow incremental migration to the new APIs. The deprecated packages will be removed in the 1.21 release. A tool is available at http://sigs.k8s.io/clientgofix to rewrite method invocations to the new signatures. - -- The following deprecated metrics are removed, please convert to the corresponding metrics: - - The following replacement metrics are available from v1.14.0: - - `rest_client_request_latency_seconds` -> `rest_client_request_duration_seconds` - - `scheduler_scheduling_latency_seconds` -> `scheduler_scheduling_duration_seconds ` - - `docker_operations` -> `docker_operations_total` - - `docker_operations_latency_microseconds` -> `docker_operations_duration_seconds` - - `docker_operations_errors` -> `docker_operations_errors_total` - - `docker_operations_timeout` -> `docker_operations_timeout_total` - - `network_plugin_operations_latency_microseconds` -> `network_plugin_operations_duration_seconds` - - `kubelet_pod_worker_latency_microseconds` -> `kubelet_pod_worker_duration_seconds` - - `kubelet_pod_start_latency_microseconds` -> `kubelet_pod_start_duration_seconds` - - `kubelet_cgroup_manager_latency_microseconds` -> `kubelet_cgroup_manager_duration_seconds` - - `kubelet_pod_worker_start_latency_microseconds` -> `kubelet_pod_worker_start_duration_seconds` - - `kubelet_pleg_relist_latency_microseconds` -> `kubelet_pleg_relist_duration_seconds` - - `kubelet_pleg_relist_interval_microseconds` -> `kubelet_pleg_relist_interval_seconds` - - `kubelet_eviction_stats_age_microseconds` -> `kubelet_eviction_stats_age_seconds` - - `kubelet_runtime_operations` -> `kubelet_runtime_operations_total` - - `kubelet_runtime_operations_latency_microseconds` -> `kubelet_runtime_operations_duration_seconds` - - `kubelet_runtime_operations_errors` -> `kubelet_runtime_operations_errors_total` - - `kubelet_device_plugin_registration_count` -> `kubelet_device_plugin_registration_total` - - `kubelet_device_plugin_alloc_latency_microseconds` -> `kubelet_device_plugin_alloc_duration_seconds` - - `scheduler_e2e_scheduling_latency_microseconds` -> `scheduler_e2e_scheduling_duration_seconds` - - `scheduler_scheduling_algorithm_latency_microseconds` -> `scheduler_scheduling_algorithm_duration_seconds` - - `scheduler_scheduling_algorithm_predicate_evaluation` -> `scheduler_scheduling_algorithm_predicate_evaluation_seconds` - - `scheduler_scheduling_algorithm_priority_evaluation` -> `scheduler_scheduling_algorithm_priority_evaluation_seconds` - - `scheduler_scheduling_algorithm_preemption_evaluation` -> `scheduler_scheduling_algorithm_preemption_evaluation_seconds` - - `scheduler_binding_latency_microseconds` -> `scheduler_binding_duration_seconds` - - `kubeproxy_sync_proxy_rules_latency_microseconds` -> `kubeproxy_sync_proxy_rules_duration_seconds` - - `apiserver_request_latencies` -> `apiserver_request_duration_seconds` - - `apiserver_dropped_requests` -> `apiserver_dropped_requests_total` - - `etcd_request_latencies_summary` -> `etcd_request_duration_seconds` - - `apiserver_storage_transformation_latencies_microseconds ` -> `apiserver_storage_transformation_duration_seconds` - - `apiserver_storage_data_key_generation_latencies_microseconds` -> `apiserver_storage_data_key_generation_duration_seconds` - - `apiserver_request_count` -> `apiserver_request_total` - - `apiserver_request_latencies_summary` - - The following replacement metrics are available from v1.15.0: - - `apiserver_storage_transformation_failures_total` -> `apiserver_storage_transformation_operations_total` ([#76496](https://github.com/kubernetes/kubernetes/pull/76496), [@danielqsj](https://github.com/danielqsj)) [SIG API Machinery, Cluster Lifecycle, Instrumentation, Network, Node and Scheduling] - -## Changes by Kind - -### Deprecation - -#### kube-apiserver: -- the following deprecated APIs can no longer be served: - - All resources under `apps/v1beta1` and `apps/v1beta2` - use `apps/v1` instead - - `daemonsets`, `deployments`, `replicasets` resources under `extensions/v1beta1` - use `apps/v1` instead - - `networkpolicies` resources under `extensions/v1beta1` - use `networking.k8s.io/v1` instead - - `podsecuritypolicies` resources under `extensions/v1beta1` - use `policy/v1beta1` instead ([#85903](https://github.com/kubernetes/kubernetes/pull/85903), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps, Cluster Lifecycle, Instrumentation and Testing] - -#### kube-controller-manager: -- Azure service annotation service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset has been deprecated. Its support would be removed in a future release. ([#88462](https://github.com/kubernetes/kubernetes/pull/88462), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] - -#### kubelet: -- The StreamingProxyRedirects feature and `--redirect-container-streaming` flag are deprecated, and will be removed in a future release. The default behavior (proxy streaming requests through the kubelet) will be the only supported option. If you are setting `--redirect-container-streaming=true`, then you must migrate off this configuration. The flag will no longer be able to be enabled starting in v1.20. If you are not setting the flag, no action is necessary. ([#88290](https://github.com/kubernetes/kubernetes/pull/88290), [@tallclair](https://github.com/tallclair)) [SIG API Machinery and Node] -- resource metrics endpoint `/metrics/resource/v1alpha1` as well as all metrics under this endpoint have been deprecated. Please convert to the following metrics emitted by endpoint `/metrics/resource`: - - scrape_error --> scrape_error - - node_cpu_usage_seconds_total --> node_cpu_usage_seconds - - node_memory_working_set_bytes --> node_memory_working_set_bytes - - container_cpu_usage_seconds_total --> container_cpu_usage_seconds - - container_memory_working_set_bytes --> container_memory_working_set_bytes - - scrape_error --> scrape_error - ([#86282](https://github.com/kubernetes/kubernetes/pull/86282), [@RainbowMango](https://github.com/RainbowMango)) [SIG Node] -- In a future release, kubelet will no longer create the CSI NodePublishVolume target directory, in accordance with the CSI specification. CSI drivers may need to be updated accordingly to properly create and process the target path. ([#75535](https://github.com/kubernetes/kubernetes/issues/75535)) [SIG Storage] - -#### kube-proxy: -- `--healthz-port` and `--metrics-port` flags are deprecated, please use `--healthz-bind-address` and `--metrics-bind-address` instead ([#88512](https://github.com/kubernetes/kubernetes/pull/88512), [@SataQiu](https://github.com/SataQiu)) [SIG Network] -- a new `EndpointSliceProxying` feature gate has been added to control the use of EndpointSlices in kube-proxy. The EndpointSlice feature gate that used to control this behavior no longer affects kube-proxy. This feature has been disabled by default. ([#86137](https://github.com/kubernetes/kubernetes/pull/86137), [@robscott](https://github.com/robscott)) - -#### kubeadm: -- command line option "kubelet-version" for `kubeadm upgrade node` has been deprecated and will be removed in a future release. ([#87942](https://github.com/kubernetes/kubernetes/pull/87942), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] -- deprecate the usage of the experimental flag '--use-api' under the 'kubeadm alpha certs renew' command. ([#88827](https://github.com/kubernetes/kubernetes/pull/88827), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- kube-dns is deprecated and will not be supported in a future version ([#86574](https://github.com/kubernetes/kubernetes/pull/86574), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] -- the `ClusterStatus` struct present in the kubeadm-config ConfigMap is deprecated and will be removed in a future version. It is going to be maintained by kubeadm until it gets removed. The same information can be found on `etcd` and `kube-apiserver` pod annotations, `kubeadm.kubernetes.io/etcd.advertise-client-urls` and `kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint` respectively. ([#87656](https://github.com/kubernetes/kubernetes/pull/87656), [@ereslibre](https://github.com/ereslibre)) [SIG Cluster Lifecycle] - -#### kubectl: -- the boolean and unset values for the --dry-run flag are deprecated and a value --dry-run=server|client|none will be required in a future version. ([#87580](https://github.com/kubernetes/kubernetes/pull/87580), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG CLI] -- `kubectl apply --server-dry-run` is deprecated and replaced with --dry-run=server ([#87580](https://github.com/kubernetes/kubernetes/pull/87580), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG CLI] - -#### add-ons: -- Remove cluster-monitoring addon ([#85512](https://github.com/kubernetes/kubernetes/pull/85512), [@serathius](https://github.com/serathius)) [SIG Cluster Lifecycle, Instrumentation, Scalability and Testing] - -#### kube-scheduler: -- The `scheduling_duration_seconds` summary metric is deprecated ([#86586](https://github.com/kubernetes/kubernetes/pull/86586), [@xiaoanyunfei](https://github.com/xiaoanyunfei)) [SIG Scheduling] -- The `scheduling_algorithm_predicate_evaluation_seconds` and - `scheduling_algorithm_priority_evaluation_seconds` metrics are deprecated, replaced by `framework_extension_point_duration_seconds[extension_point="Filter"]` and `framework_extension_point_duration_seconds[extension_point="Score"]`. ([#86584](https://github.com/kubernetes/kubernetes/pull/86584), [@xiaoanyunfei](https://github.com/xiaoanyunfei)) [SIG Scheduling] -- `AlwaysCheckAllPredicates` is deprecated in scheduler Policy API. ([#86369](https://github.com/kubernetes/kubernetes/pull/86369), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] - -#### Other deprecations: -- The k8s.io/node-api component is no longer updated. Instead, use the RuntimeClass types located within k8s.io/api, and the generated clients located within k8s.io/client-go ([#87503](https://github.com/kubernetes/kubernetes/pull/87503), [@liggitt](https://github.com/liggitt)) [SIG Node and Release] -- Removed the 'client' label from apiserver_request_total. ([#87669](https://github.com/kubernetes/kubernetes/pull/87669), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery and Instrumentation] - -### API Change - -#### New API types/versions: -- A new IngressClass resource has been added to enable better Ingress configuration. ([#88509](https://github.com/kubernetes/kubernetes/pull/88509), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps, CLI, Network, Node and Testing] -- The CSIDriver API has graduated to storage.k8s.io/v1, and is now available for use. ([#84814](https://github.com/kubernetes/kubernetes/pull/84814), [@huffmanca](https://github.com/huffmanca)) [SIG Storage] - -#### New API fields: -- autoscaling/v2beta2 HorizontalPodAutoscaler added a `spec.behavior` field that allows scale behavior to be configured. Behaviors are specified separately for scaling up and down. In each direction a stabilization window can be specified as well as a list of policies and how to select amongst them. Policies can limit the absolute number of pods added or removed, or the percentage of pods added or removed. ([#74525](https://github.com/kubernetes/kubernetes/pull/74525), [@gliush](https://github.com/gliush)) [SIG API Machinery, Apps, Autoscaling and CLI] -- Ingress: - - `spec.ingressClassName` replaces the deprecated `kubernetes.io/ingress.class` annotation, and allows associating an Ingress object with a particular controller. - - path definitions added a `pathType` field to allow indicating how the specified path should be matched against incoming requests. Valid values are `Exact`, `Prefix`, and `ImplementationSpecific` ([#88587](https://github.com/kubernetes/kubernetes/pull/88587), [@cmluciano](https://github.com/cmluciano)) [SIG Apps, Cluster Lifecycle and Network] -- The alpha feature `AnyVolumeDataSource` enables PersistentVolumeClaim objects to use the spec.dataSource field to reference a custom type as a data source ([#88636](https://github.com/kubernetes/kubernetes/pull/88636), [@bswartz](https://github.com/bswartz)) [SIG Apps and Storage] -- The alpha feature `ConfigurableFSGroupPolicy` enables v1 Pods to specify a spec.securityContext.fsGroupChangePolicy policy to control how file permissions are applied to volumes mounted into the pod. ([#88488](https://github.com/kubernetes/kubernetes/pull/88488), [@gnufied](https://github.com/gnufied)) [SIG Storage] -- The alpha feature `ServiceAppProtocol` enables setting an `appProtocol` field in ServicePort and EndpointPort definitions. ([#88503](https://github.com/kubernetes/kubernetes/pull/88503), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- The alpha feature `ImmutableEphemeralVolumes` enables an `immutable` field in both Secret and ConfigMap objects to mark their contents as immutable. ([#86377](https://github.com/kubernetes/kubernetes/pull/86377), [@wojtek-t](https://github.com/wojtek-t)) [SIG Apps, CLI and Testing] - -#### Other API changes: -- The beta feature `ServerSideApply` enables tracking and managing changed fields for all new objects, which means there will be `managedFields` in `metadata` with the list of managers and their owned fields. -- The alpha feature `ServiceAccountIssuerDiscovery` enables publishing OIDC discovery information and service account token verification keys at `/.well-known/openid-configuration` and `/openid/v1/jwks` endpoints by API servers configured to issue service account tokens. ([#80724](https://github.com/kubernetes/kubernetes/pull/80724), [@cceckman](https://github.com/cceckman)) [SIG API Machinery, Auth, Cluster Lifecycle and Testing] -- CustomResourceDefinition schemas that use `x-kubernetes-list-map-keys` to specify properties that uniquely identify list items must make those properties required or have a default value, to ensure those properties are present for all list items. See https://kubernetes.io/docs/reference/using-api/api-concepts/#merge-strategy for details. ([#88076](https://github.com/kubernetes/kubernetes/pull/88076), [@eloyekunle](https://github.com/eloyekunle)) [SIG API Machinery and Testing] -- CustomResourceDefinition schemas that use `x-kubernetes-list-type: map` or `x-kubernetes-list-type: set` now enable validation that the list items in the corresponding custom resources are unique. ([#84920](https://github.com/kubernetes/kubernetes/pull/84920), [@sttts](https://github.com/sttts)) [SIG API Machinery] - -#### Configuration file changes: - -#### kube-apiserver: -- The `--egress-selector-config-file` configuration file now accepts an apiserver.k8s.io/v1beta1 EgressSelectorConfiguration configuration object, and has been updated to allow specifying HTTP or GRPC connections to the network proxy ([#87179](https://github.com/kubernetes/kubernetes/pull/87179), [@Jefftree](https://github.com/Jefftree)) [SIG API Machinery, Cloud Provider and Cluster Lifecycle] - -#### kube-scheduler: -- A kubescheduler.config.k8s.io/v1alpha2 configuration file version is now accepted, with support for multiple scheduling profiles ([#87628](https://github.com/kubernetes/kubernetes/pull/87628), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] - - HardPodAffinityWeight moved from a top level ComponentConfig parameter to a PluginConfig parameter of InterPodAffinity Plugin in `kubescheduler.config.k8s.io/v1alpha2` ([#88002](https://github.com/kubernetes/kubernetes/pull/88002), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling and Testing] - - Kube-scheduler can run more than one scheduling profile. Given a pod, the profile is selected by using its `.spec.schedulerName`. ([#88285](https://github.com/kubernetes/kubernetes/pull/88285), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps, Scheduling and Testing] - - Scheduler Extenders can now be configured in the v1alpha2 component config ([#88768](https://github.com/kubernetes/kubernetes/pull/88768), [@damemi](https://github.com/damemi)) [SIG Release, Scheduling and Testing] - - The PostFilter of scheduler framework is renamed to PreScore in kubescheduler.config.k8s.io/v1alpha2. ([#87751](https://github.com/kubernetes/kubernetes/pull/87751), [@skilxn-go](https://github.com/skilxn-go)) [SIG Scheduling and Testing] - -#### kube-proxy: -- Added kube-proxy flags `--ipvs-tcp-timeout`, `--ipvs-tcpfin-timeout`, `--ipvs-udp-timeout` to configure IPVS connection timeouts. ([#85517](https://github.com/kubernetes/kubernetes/pull/85517), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cluster Lifecycle and Network] -- Added optional `--detect-local-mode` flag to kube-proxy. Valid values are "ClusterCIDR" (default matching previous behavior) and "NodeCIDR" ([#87748](https://github.com/kubernetes/kubernetes/pull/87748), [@satyasm](https://github.com/satyasm)) [SIG Cluster Lifecycle, Network and Scheduling] -- Kube-controller-manager and kube-scheduler expose profiling by default to match the kube-apiserver. Use `--enable-profiling=false` to disable. ([#88663](https://github.com/kubernetes/kubernetes/pull/88663), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Cloud Provider and Scheduling] -- Kubelet pod resources API now provides the information about active pods only. ([#79409](https://github.com/kubernetes/kubernetes/pull/79409), [@takmatsu](https://github.com/takmatsu)) [SIG Node] -- New flag `--endpointslice-updates-batch-period` in kube-controller-manager can be used to reduce the number of endpointslice updates generated by pod changes. ([#88745](https://github.com/kubernetes/kubernetes/pull/88745), [@mborsz](https://github.com/mborsz)) [SIG API Machinery, Apps and Network] -- New flag `--show-hidden-metrics-for-version` in kube-proxy, kubelet, kube-controller-manager, and kube-scheduler can be used to show all hidden metrics that are deprecated in the previous minor release. ([#85279](https://github.com/kubernetes/kubernetes/pull/85279), [@RainbowMango](https://github.com/RainbowMango)) [SIG Cluster Lifecycle and Network] - -#### Features graduated to beta: - - StartupProbe ([#83437](https://github.com/kubernetes/kubernetes/pull/83437), [@matthyx](https://github.com/matthyx)) [SIG Node, Scalability and Testing] - -#### Features graduated to GA: - - VolumePVCDataSource ([#88686](https://github.com/kubernetes/kubernetes/pull/88686), [@j-griffith](https://github.com/j-griffith)) [SIG Storage] - - TaintBasedEvictions ([#87487](https://github.com/kubernetes/kubernetes/pull/87487), [@skilxn-go](https://github.com/skilxn-go)) [SIG API Machinery, Apps, Node, Scheduling and Testing] - - BlockVolume and CSIBlockVolume ([#88673](https://github.com/kubernetes/kubernetes/pull/88673), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] - - Windows RunAsUserName ([#87790](https://github.com/kubernetes/kubernetes/pull/87790), [@marosset](https://github.com/marosset)) [SIG Apps and Windows] -- The following feature gates are removed, because the associated features were unconditionally enabled in previous releases: CustomResourceValidation, CustomResourceSubresources, CustomResourceWebhookConversion, CustomResourcePublishOpenAPI, CustomResourceDefaulting ([#87475](https://github.com/kubernetes/kubernetes/pull/87475), [@liggitt](https://github.com/liggitt)) [SIG API Machinery] - -### Feature - -- API request throttling (due to a high rate of requests) is now reported in client-go logs at log level 2. The messages are of the form:`Throttling request took 1.50705208s, request: GET:` The presence of these messages may indicate to the administrator the need to tune the cluster accordingly. ([#87740](https://github.com/kubernetes/kubernetes/pull/87740), [@jennybuckley](https://github.com/jennybuckley)) [SIG API Machinery] -- Add support for mount options to the FC volume plugin ([#87499](https://github.com/kubernetes/kubernetes/pull/87499), [@ejweber](https://github.com/ejweber)) [SIG Storage] -- Added a config-mode flag in azure auth module to enable getting AAD token without spn: prefix in audience claim. When it's not specified, the default behavior doesn't change. ([#87630](https://github.com/kubernetes/kubernetes/pull/87630), [@weinong](https://github.com/weinong)) [SIG API Machinery, Auth, CLI and Cloud Provider] -- Allow for configuration of CoreDNS replica count ([#85837](https://github.com/kubernetes/kubernetes/pull/85837), [@pickledrick](https://github.com/pickledrick)) [SIG Cluster Lifecycle] -- Allow user to specify resource using --filename flag when invoking kubectl exec ([#88460](https://github.com/kubernetes/kubernetes/pull/88460), [@soltysh](https://github.com/soltysh)) [SIG CLI and Testing] -- Apiserver added a new flag --goaway-chance which is the fraction of requests that will be closed gracefully(GOAWAY) to prevent HTTP/2 clients from getting stuck on a single apiserver. ([#88567](https://github.com/kubernetes/kubernetes/pull/88567), [@answer1991](https://github.com/answer1991)) [SIG API Machinery] -- Azure Cloud Provider now supports using Azure network resources (Virtual Network, Load Balancer, Public IP, Route Table, Network Security Group, etc.) in different AAD Tenant and Subscription than those for the Kubernetes cluster. To use the feature, please reference https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/cloud-provider-config.md#host-network-resources-in-different-aad-tenant-and-subscription. ([#88384](https://github.com/kubernetes/kubernetes/pull/88384), [@bowen5](https://github.com/bowen5)) [SIG Cloud Provider] -- Azure VMSS/VMSSVM clients now suppress requests on throttling ([#86740](https://github.com/kubernetes/kubernetes/pull/86740), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Azure cloud provider cache TTL is configurable, list of the azure cloud provider is as following: - - "availabilitySetNodesCacheTTLInSeconds" - - "vmssCacheTTLInSeconds" - - "vmssVirtualMachinesCacheTTLInSeconds" - - "vmCacheTTLInSeconds" - - "loadBalancerCacheTTLInSeconds" - - "nsgCacheTTLInSeconds" - - "routeTableCacheTTLInSeconds" - ([#86266](https://github.com/kubernetes/kubernetes/pull/86266), [@zqingqing1](https://github.com/zqingqing1)) [SIG Cloud Provider] -- Azure global rate limit is switched to per-client. A set of new rate limit configure options are introduced, including routeRateLimit, SubnetsRateLimit, InterfaceRateLimit, RouteTableRateLimit, LoadBalancerRateLimit, PublicIPAddressRateLimit, SecurityGroupRateLimit, VirtualMachineRateLimit, StorageAccountRateLimit, DiskRateLimit, SnapshotRateLimit, VirtualMachineScaleSetRateLimit and VirtualMachineSizeRateLimit. The original rate limit options would be default values for those new client's rate limiter. ([#86515](https://github.com/kubernetes/kubernetes/pull/86515), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Azure network and VM clients now suppress requests on throttling ([#87122](https://github.com/kubernetes/kubernetes/pull/87122), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Azure storage clients now suppress requests on throttling ([#87306](https://github.com/kubernetes/kubernetes/pull/87306), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Azure: add support for single stack IPv6 ([#88448](https://github.com/kubernetes/kubernetes/pull/88448), [@aramase](https://github.com/aramase)) [SIG Cloud Provider] -- DefaultConstraints can be specified for PodTopologySpread Plugin in the scheduler’s ComponentConfig ([#88671](https://github.com/kubernetes/kubernetes/pull/88671), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- DisableAvailabilitySetNodes is added to avoid VM list for VMSS clusters. It should only be used when vmType is "vmss" and all the nodes (including control plane nodes) are VMSS virtual machines. ([#87685](https://github.com/kubernetes/kubernetes/pull/87685), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Elasticsearch supports automatically setting the advertise address ([#85944](https://github.com/kubernetes/kubernetes/pull/85944), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle and Instrumentation] -- EndpointSlices will now be enabled by default. A new `EndpointSliceProxying` feature gate determines if kube-proxy will use EndpointSlices, this is disabled by default. ([#86137](https://github.com/kubernetes/kubernetes/pull/86137), [@robscott](https://github.com/robscott)) [SIG Network] -- Kube-proxy: Added dual-stack IPv4/IPv6 support to the iptables proxier. ([#82462](https://github.com/kubernetes/kubernetes/pull/82462), [@vllry](https://github.com/vllry)) [SIG Network] -- Kubeadm now supports automatic calculations of dual-stack node cidr masks to kube-controller-manager. ([#85609](https://github.com/kubernetes/kubernetes/pull/85609), [@Arvinderpal](https://github.com/Arvinderpal)) [SIG Cluster Lifecycle] -- Kubeadm: add a upgrade health check that deploys a Job ([#81319](https://github.com/kubernetes/kubernetes/pull/81319), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: add the experimental feature gate PublicKeysECDSA that can be used to create a - cluster with ECDSA certificates from "kubeadm init". Renewal of existing ECDSA certificates is also supported using "kubeadm alpha certs renew", but not switching between the RSA and ECDSA algorithms on the fly or during upgrades. ([#86953](https://github.com/kubernetes/kubernetes/pull/86953), [@rojkov](https://github.com/rojkov)) [SIG API Machinery, Auth and Cluster Lifecycle] -- Kubeadm: implemented structured output of 'kubeadm config images list' command in JSON, YAML, Go template and JsonPath formats ([#86810](https://github.com/kubernetes/kubernetes/pull/86810), [@bart0sh](https://github.com/bart0sh)) [SIG Cluster Lifecycle] -- Kubeadm: on kubeconfig certificate renewal, keep the embedded CA in sync with the one on disk ([#88052](https://github.com/kubernetes/kubernetes/pull/88052), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: reject a node joining the cluster if a node with the same name already exists ([#81056](https://github.com/kubernetes/kubernetes/pull/81056), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: support Windows specific kubelet flags in kubeadm-flags.env ([#88287](https://github.com/kubernetes/kubernetes/pull/88287), [@gab-satchi](https://github.com/gab-satchi)) [SIG Cluster Lifecycle and Windows] -- Kubeadm: support automatic retry after failing to pull image ([#86899](https://github.com/kubernetes/kubernetes/pull/86899), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] -- Kubeadm: upgrade supports fallback to the nearest known etcd version if an unknown k8s version is passed ([#88373](https://github.com/kubernetes/kubernetes/pull/88373), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] -- Kubectl/drain: add disable-eviction option.Force drain to use delete, even if eviction is supported. This will bypass checking PodDisruptionBudgets, and should be used with caution. ([#85571](https://github.com/kubernetes/kubernetes/pull/85571), [@michaelgugino](https://github.com/michaelgugino)) [SIG CLI] -- Kubectl/drain: add skip-wait-for-delete-timeout option. If a pod’s `DeletionTimestamp` is older than N seconds, skip waiting for the pod. Seconds must be greater than 0 to skip. ([#85577](https://github.com/kubernetes/kubernetes/pull/85577), [@michaelgugino](https://github.com/michaelgugino)) [SIG CLI] -- Option `preConfiguredBackendPoolLoadBalancerTypes` is added to azure cloud provider for the pre-configured load balancers, possible values: `""`, `"internal"`, `"external"`,`"all"` ([#86338](https://github.com/kubernetes/kubernetes/pull/86338), [@gossion](https://github.com/gossion)) [SIG Cloud Provider] -- PodTopologySpread plugin now excludes terminatingPods when making scheduling decisions. ([#87845](https://github.com/kubernetes/kubernetes/pull/87845), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] -- Provider/azure: Network security groups can now be in a separate resource group. ([#87035](https://github.com/kubernetes/kubernetes/pull/87035), [@CecileRobertMichon](https://github.com/CecileRobertMichon)) [SIG Cloud Provider] -- SafeSysctlWhitelist: add net.ipv4.ping_group_range ([#85463](https://github.com/kubernetes/kubernetes/pull/85463), [@AkihiroSuda](https://github.com/AkihiroSuda)) [SIG Auth] -- Scheduler framework permit plugins now run at the end of the scheduling cycle, after reserve plugins. Waiting on permit will remain in the beginning of the binding cycle. ([#88199](https://github.com/kubernetes/kubernetes/pull/88199), [@mateuszlitwin](https://github.com/mateuszlitwin)) [SIG Scheduling] -- Scheduler: Add DefaultBinder plugin ([#87430](https://github.com/kubernetes/kubernetes/pull/87430), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling and Testing] -- Skip default spreading scoring plugin for pods that define TopologySpreadConstraints ([#87566](https://github.com/kubernetes/kubernetes/pull/87566), [@skilxn-go](https://github.com/skilxn-go)) [SIG Scheduling] -- The kubectl --dry-run flag now accepts the values 'client', 'server', and 'none', to support client-side and server-side dry-run strategies. The boolean and unset values for the --dry-run flag are deprecated and a value will be required in a future version. ([#87580](https://github.com/kubernetes/kubernetes/pull/87580), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG CLI] -- Support server-side dry-run in kubectl with --dry-run=server for commands including apply, patch, create, run, annotate, label, set, autoscale, drain, rollout undo, and expose. ([#87714](https://github.com/kubernetes/kubernetes/pull/87714), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG API Machinery, CLI and Testing] -- Add --dry-run=server|client to kubectl delete, taint, replace ([#88292](https://github.com/kubernetes/kubernetes/pull/88292), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG CLI and Testing] -- The feature PodTopologySpread (feature gate `EvenPodsSpread`) has been enabled by default in 1.18. ([#88105](https://github.com/kubernetes/kubernetes/pull/88105), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling and Testing] -- The kubelet and the default docker runtime now support running ephemeral containers in the Linux process namespace of a target container. Other container runtimes must implement support for this feature before it will be available for that runtime. ([#84731](https://github.com/kubernetes/kubernetes/pull/84731), [@verb](https://github.com/verb)) [SIG Node] -- The underlying format of the `CPUManager` state file has changed. Upgrades should be seamless, but any third-party tools that rely on reading the previous format need to be updated. ([#84462](https://github.com/kubernetes/kubernetes/pull/84462), [@klueska](https://github.com/klueska)) [SIG Node and Testing] -- Update CNI version to v0.8.5 ([#78819](https://github.com/kubernetes/kubernetes/pull/78819), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Cluster Lifecycle, Network, Release and Testing] -- Webhooks have alpha support for network proxy ([#85870](https://github.com/kubernetes/kubernetes/pull/85870), [@Jefftree](https://github.com/Jefftree)) [SIG API Machinery, Auth and Testing] -- When client certificate files are provided, reload files for new connections, and close connections when a certificate changes. ([#79083](https://github.com/kubernetes/kubernetes/pull/79083), [@jackkleeman](https://github.com/jackkleeman)) [SIG API Machinery, Auth, Node and Testing] -- When deleting objects using kubectl with the --force flag, you are no longer required to also specify --grace-period=0. ([#87776](https://github.com/kubernetes/kubernetes/pull/87776), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] -- Windows nodes on GCE can use virtual TPM-based authentication to the control plane. ([#85466](https://github.com/kubernetes/kubernetes/pull/85466), [@pjh](https://github.com/pjh)) [SIG Cluster Lifecycle] -- You can now pass "--node-ip ::" to kubelet to indicate that it should autodetect an IPv6 address to use as the node's primary address. ([#85850](https://github.com/kubernetes/kubernetes/pull/85850), [@danwinship](https://github.com/danwinship)) [SIG Cloud Provider, Network and Node] -- `kubectl` now contains a `kubectl alpha debug` command. This command allows attaching an ephemeral container to a running pod for the purposes of debugging. ([#88004](https://github.com/kubernetes/kubernetes/pull/88004), [@verb](https://github.com/verb)) [SIG CLI] -- TLS Server Name overrides can now be specified in a kubeconfig file and via --tls-server-name in kubectl ([#88769](https://github.com/kubernetes/kubernetes/pull/88769), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth and CLI] - -#### Metrics: -- Add `rest_client_rate_limiter_duration_seconds` metric to component-base to track client side rate limiter latency in seconds. Broken down by verb and URL. ([#88134](https://github.com/kubernetes/kubernetes/pull/88134), [@jennybuckley](https://github.com/jennybuckley)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] -- Added two client certificate metrics for exec auth: - - `rest_client_certificate_expiration_seconds` a gauge reporting the lifetime of the current client certificate. Reports the time of expiry in seconds since January 1, 1970 UTC. - - `rest_client_certificate_rotation_age` a histogram reporting the age of a just rotated client certificate in seconds. ([#84382](https://github.com/kubernetes/kubernetes/pull/84382), [@sambdavidson](https://github.com/sambdavidson)) [SIG API Machinery, Auth, Cluster Lifecycle and Instrumentation] -- Controller manager serve workqueue metrics ([#87967](https://github.com/kubernetes/kubernetes/pull/87967), [@zhan849](https://github.com/zhan849)) [SIG API Machinery] -- Following metrics have been turned off: - - kubelet_pod_worker_latency_microseconds - - kubelet_pod_start_latency_microseconds - - kubelet_cgroup_manager_latency_microseconds - - kubelet_pod_worker_start_latency_microseconds - - kubelet_pleg_relist_latency_microseconds - - kubelet_pleg_relist_interval_microseconds - - kubelet_eviction_stats_age_microseconds - - kubelet_runtime_operations - - kubelet_runtime_operations_latency_microseconds - - kubelet_runtime_operations_errors - - kubelet_device_plugin_registration_count - - kubelet_device_plugin_alloc_latency_microseconds - - kubelet_docker_operations - - kubelet_docker_operations_latency_microseconds - - kubelet_docker_operations_errors - - kubelet_docker_operations_timeout - - network_plugin_operations_latency_microseconds ([#83841](https://github.com/kubernetes/kubernetes/pull/83841), [@RainbowMango](https://github.com/RainbowMango)) [SIG Network and Node] -- Kube-apiserver metrics will now include request counts, latencies, and response sizes for /healthz, /livez, and /readyz requests. ([#83598](https://github.com/kubernetes/kubernetes/pull/83598), [@jktomer](https://github.com/jktomer)) [SIG API Machinery] -- Kubelet now exports a `server_expiration_renew_failure` and `client_expiration_renew_failure` metric counter if the certificate rotations cannot be performed. ([#84614](https://github.com/kubernetes/kubernetes/pull/84614), [@rphillips](https://github.com/rphillips)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Node and Release] -- Kubelet: the metric process_start_time_seconds be marked as with the ALPHA stability level. ([#85446](https://github.com/kubernetes/kubernetes/pull/85446), [@RainbowMango](https://github.com/RainbowMango)) [SIG API Machinery, Cluster Lifecycle, Instrumentation and Node] -- New metric `kubelet_pleg_last_seen_seconds` to aid diagnosis of PLEG not healthy issues. ([#86251](https://github.com/kubernetes/kubernetes/pull/86251), [@bboreham](https://github.com/bboreham)) [SIG Node] - -### Other (Bug, Cleanup or Flake) - -- Fixed a regression with clients prior to 1.15 not being able to update podIP in pod status, or podCIDR in node spec, against >= 1.16 API servers ([#88505](https://github.com/kubernetes/kubernetes/pull/88505), [@liggitt](https://github.com/liggitt)) [SIG Apps and Network] -- Fixed "kubectl describe statefulsets.apps" printing garbage for rolling update partition ([#85846](https://github.com/kubernetes/kubernetes/pull/85846), [@phil9909](https://github.com/phil9909)) [SIG CLI] -- Add a event to PV when filesystem on PV does not match actual filesystem on disk ([#86982](https://github.com/kubernetes/kubernetes/pull/86982), [@gnufied](https://github.com/gnufied)) [SIG Storage] -- Add azure disk WriteAccelerator support ([#87945](https://github.com/kubernetes/kubernetes/pull/87945), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Add delays between goroutines for vm instance update ([#88094](https://github.com/kubernetes/kubernetes/pull/88094), [@aramase](https://github.com/aramase)) [SIG Cloud Provider] -- Add init containers log to cluster dump info. ([#88324](https://github.com/kubernetes/kubernetes/pull/88324), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] -- Addons: elasticsearch discovery supports IPv6 ([#85543](https://github.com/kubernetes/kubernetes/pull/85543), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle and Instrumentation] -- Adds "volume.beta.kubernetes.io/migrated-to" annotation to PV's and PVC's when they are migrated to signal external provisioners to pick up those objects for Provisioning and Deleting. ([#87098](https://github.com/kubernetes/kubernetes/pull/87098), [@davidz627](https://github.com/davidz627)) [SIG Storage] -- All api-server log request lines in a more greppable format. ([#87203](https://github.com/kubernetes/kubernetes/pull/87203), [@lavalamp](https://github.com/lavalamp)) [SIG API Machinery] -- Azure VMSS LoadBalancerBackendAddressPools updating has been improved with sequential-sync + concurrent-async requests. ([#88699](https://github.com/kubernetes/kubernetes/pull/88699), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Azure cloud provider now obtains AAD token who audience claim will not have spn: prefix ([#87590](https://github.com/kubernetes/kubernetes/pull/87590), [@weinong](https://github.com/weinong)) [SIG Cloud Provider] -- AzureFile and CephFS use the new Mount library that prevents logging of sensitive mount options. ([#88684](https://github.com/kubernetes/kubernetes/pull/88684), [@saad-ali](https://github.com/saad-ali)) [SIG Storage] -- Bind dns-horizontal containers to linux nodes to avoid Windows scheduling on kubernetes cluster includes linux nodes and windows nodes ([#83364](https://github.com/kubernetes/kubernetes/pull/83364), [@wawa0210](https://github.com/wawa0210)) [SIG Cluster Lifecycle and Windows] -- Bind kube-dns containers to linux nodes to avoid Windows scheduling ([#83358](https://github.com/kubernetes/kubernetes/pull/83358), [@wawa0210](https://github.com/wawa0210)) [SIG Cluster Lifecycle and Windows] -- Bind metadata-agent containers to linux nodes to avoid Windows scheduling on kubernetes cluster includes linux nodes and windows nodes ([#83363](https://github.com/kubernetes/kubernetes/pull/83363), [@wawa0210](https://github.com/wawa0210)) [SIG Cluster Lifecycle, Instrumentation and Windows] -- Bind metrics-server containers to linux nodes to avoid Windows scheduling on kubernetes cluster includes linux nodes and windows nodes ([#83362](https://github.com/kubernetes/kubernetes/pull/83362), [@wawa0210](https://github.com/wawa0210)) [SIG Cluster Lifecycle, Instrumentation and Windows] -- Bug fixes: Make sure we include latest packages node #351 (@caseydavenport) ([#84163](https://github.com/kubernetes/kubernetes/pull/84163), [@david-tigera](https://github.com/david-tigera)) [SIG Cluster Lifecycle] -- CPU limits are now respected for Windows containers. If a node is over-provisioned, no weighting is used, only limits are respected. ([#86101](https://github.com/kubernetes/kubernetes/pull/86101), [@PatrickLang](https://github.com/PatrickLang)) [SIG Node, Testing and Windows] -- Changed core_pattern on COS nodes to be an absolute path. ([#86329](https://github.com/kubernetes/kubernetes/pull/86329), [@mml](https://github.com/mml)) [SIG Cluster Lifecycle and Node] -- Client-go certificate manager rotation gained the ability to preserve optional intermediate chains accompanying issued certificates ([#88744](https://github.com/kubernetes/kubernetes/pull/88744), [@jackkleeman](https://github.com/jackkleeman)) [SIG API Machinery and Auth] -- Cloud provider config CloudProviderBackoffMode has been removed since it won't be used anymore. ([#88463](https://github.com/kubernetes/kubernetes/pull/88463), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Conformance image now depends on stretch-slim instead of debian-hyperkube-base as that image is being deprecated and removed. ([#88702](https://github.com/kubernetes/kubernetes/pull/88702), [@dims](https://github.com/dims)) [SIG Cluster Lifecycle, Release and Testing] -- Deprecate --generator flag from kubectl create commands ([#88655](https://github.com/kubernetes/kubernetes/pull/88655), [@soltysh](https://github.com/soltysh)) [SIG CLI] -- During initialization phase (preflight), kubeadm now verifies the presence of the conntrack executable ([#85857](https://github.com/kubernetes/kubernetes/pull/85857), [@hnanni](https://github.com/hnanni)) [SIG Cluster Lifecycle] -- EndpointSlice should not contain endpoints for terminating pods ([#89056](https://github.com/kubernetes/kubernetes/pull/89056), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps and Network] -- Evictions due to pods breaching their ephemeral storage limits are now recorded by the `kubelet_evictions` metric and can be alerted on. ([#87906](https://github.com/kubernetes/kubernetes/pull/87906), [@smarterclayton](https://github.com/smarterclayton)) [SIG Node] -- Filter published OpenAPI schema by making nullable, required fields non-required in order to avoid kubectl to wrongly reject null values. ([#85722](https://github.com/kubernetes/kubernetes/pull/85722), [@sttts](https://github.com/sttts)) [SIG API Machinery] -- Fix /readyz to return error immediately after a shutdown is initiated, before the --shutdown-delay-duration has elapsed. ([#88911](https://github.com/kubernetes/kubernetes/pull/88911), [@tkashem](https://github.com/tkashem)) [SIG API Machinery] -- Fix API Server potential memory leak issue in processing watch request. ([#85410](https://github.com/kubernetes/kubernetes/pull/85410), [@answer1991](https://github.com/answer1991)) [SIG API Machinery] -- Fix EndpointSlice controller race condition and ensure that it handles external changes to EndpointSlices. ([#85703](https://github.com/kubernetes/kubernetes/pull/85703), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- Fix IPv6 addresses lost issue in pure ipv6 vsphere environment ([#86001](https://github.com/kubernetes/kubernetes/pull/86001), [@hubv](https://github.com/hubv)) [SIG Cloud Provider] -- Fix LoadBalancer rule checking so that no unexpected LoadBalancer updates are made ([#85990](https://github.com/kubernetes/kubernetes/pull/85990), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Fix a bug in kube-proxy that caused it to crash when using load balancers with a different IP family ([#87117](https://github.com/kubernetes/kubernetes/pull/87117), [@aojea](https://github.com/aojea)) [SIG Network] -- Fix a bug in port-forward: named port not working with service ([#85511](https://github.com/kubernetes/kubernetes/pull/85511), [@oke-py](https://github.com/oke-py)) [SIG CLI] -- Fix a bug in the dual-stack IPVS proxier where stale IPv6 endpoints were not being cleaned up ([#87695](https://github.com/kubernetes/kubernetes/pull/87695), [@andrewsykim](https://github.com/andrewsykim)) [SIG Network] -- Fix a bug that orphan revision cannot be adopted and statefulset cannot be synced ([#86801](https://github.com/kubernetes/kubernetes/pull/86801), [@likakuli](https://github.com/likakuli)) [SIG Apps] -- Fix a bug where ExternalTrafficPolicy is not applied to service ExternalIPs. ([#88786](https://github.com/kubernetes/kubernetes/pull/88786), [@freehan](https://github.com/freehan)) [SIG Network] -- Fix a bug where kubenet fails to parse the tc output. ([#83572](https://github.com/kubernetes/kubernetes/pull/83572), [@chendotjs](https://github.com/chendotjs)) [SIG Network] -- Fix a regression in kubenet that prevent pods to obtain ip addresses ([#85993](https://github.com/kubernetes/kubernetes/pull/85993), [@chendotjs](https://github.com/chendotjs)) [SIG Network and Node] -- Fix azure file AuthorizationFailure ([#85475](https://github.com/kubernetes/kubernetes/pull/85475), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix bug where EndpointSlice controller would attempt to modify shared objects. ([#85368](https://github.com/kubernetes/kubernetes/pull/85368), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps and Network] -- Fix handling of aws-load-balancer-security-groups annotation. Security-Groups assigned with this annotation are no longer modified by kubernetes which is the expected behaviour of most users. Also no unnecessary Security-Groups are created anymore if this annotation is used. ([#83446](https://github.com/kubernetes/kubernetes/pull/83446), [@Elias481](https://github.com/Elias481)) [SIG Cloud Provider] -- Fix invalid VMSS updates due to incorrect cache ([#89002](https://github.com/kubernetes/kubernetes/pull/89002), [@ArchangelSDY](https://github.com/ArchangelSDY)) [SIG Cloud Provider] -- Fix isCurrentInstance for Windows by removing the dependency of hostname. ([#89138](https://github.com/kubernetes/kubernetes/pull/89138), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Fix issue #85805 about a resource not found in azure cloud provider when LoadBalancer specified in another resource group. ([#86502](https://github.com/kubernetes/kubernetes/pull/86502), [@levimm](https://github.com/levimm)) [SIG Cloud Provider] -- Fix kubectl annotate error when local=true is set ([#86952](https://github.com/kubernetes/kubernetes/pull/86952), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] -- Fix kubectl create deployment image name ([#86636](https://github.com/kubernetes/kubernetes/pull/86636), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] -- Fix `kubectl drain ignore` daemonsets and others. ([#87361](https://github.com/kubernetes/kubernetes/pull/87361), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] -- Fix missing "apiVersion" for "involvedObject" in Events for Nodes. ([#87537](https://github.com/kubernetes/kubernetes/pull/87537), [@uthark](https://github.com/uthark)) [SIG Apps and Node] -- Fix nil pointer dereference in azure cloud provider ([#85975](https://github.com/kubernetes/kubernetes/pull/85975), [@ldx](https://github.com/ldx)) [SIG Cloud Provider] -- Fix regression in statefulset conversion which prevents applying a statefulset multiple times. ([#87706](https://github.com/kubernetes/kubernetes/pull/87706), [@liggitt](https://github.com/liggitt)) [SIG Apps and Testing] -- Fix route conflicted operations when updating multiple routes together ([#88209](https://github.com/kubernetes/kubernetes/pull/88209), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Fix that prevents repeated fetching of PVC/PV objects by kubelet when processing of pod volumes fails. While this prevents hammering API server in these error scenarios, it means that some errors in processing volume(s) for a pod could now take up to 2-3 minutes before retry. ([#88141](https://github.com/kubernetes/kubernetes/pull/88141), [@tedyu](https://github.com/tedyu)) [SIG Node and Storage] -- Fix the bug PIP's DNS is deleted if no DNS label service annotation isn't set. ([#87246](https://github.com/kubernetes/kubernetes/pull/87246), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Fix control plane hosts rolling upgrade causing thundering herd of LISTs on etcd leading to control plane unavailability. ([#86430](https://github.com/kubernetes/kubernetes/pull/86430), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery, Node and Testing] -- Fix: add azure disk migration support for CSINode ([#88014](https://github.com/kubernetes/kubernetes/pull/88014), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix: add non-retriable errors in azure clients ([#87941](https://github.com/kubernetes/kubernetes/pull/87941), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: add remediation in azure disk attach/detach ([#88444](https://github.com/kubernetes/kubernetes/pull/88444), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: azure data disk should use same key as os disk by default ([#86351](https://github.com/kubernetes/kubernetes/pull/86351), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: azure disk could not mounted on Standard_DC4s/DC2s instances ([#86612](https://github.com/kubernetes/kubernetes/pull/86612), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix: azure file mount timeout issue ([#88610](https://github.com/kubernetes/kubernetes/pull/88610), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix: check disk status before disk azure disk ([#88360](https://github.com/kubernetes/kubernetes/pull/88360), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: corrupted mount point in csi driver ([#88569](https://github.com/kubernetes/kubernetes/pull/88569), [@andyzhangx](https://github.com/andyzhangx)) [SIG Storage] -- Fix: get azure disk lun timeout issue ([#88158](https://github.com/kubernetes/kubernetes/pull/88158), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix: update azure disk max count ([#88201](https://github.com/kubernetes/kubernetes/pull/88201), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fixed "requested device X but found Y" attach error on AWS. ([#85675](https://github.com/kubernetes/kubernetes/pull/85675), [@jsafrane](https://github.com/jsafrane)) [SIG Cloud Provider and Storage] -- Fixed NetworkPolicy validation that `Except` values are accepted when they are outside the CIDR range. ([#86578](https://github.com/kubernetes/kubernetes/pull/86578), [@tnqn](https://github.com/tnqn)) [SIG Network] -- Fixed a bug in the TopologyManager. Previously, the TopologyManager would only guarantee alignment if container creation was serialized in some way. Alignment is now guaranteed under all scenarios of container creation. ([#87759](https://github.com/kubernetes/kubernetes/pull/87759), [@klueska](https://github.com/klueska)) [SIG Node] -- Fixed a bug which could prevent a provider ID from ever being set for node if an error occurred determining the provider ID when the node was added. ([#87043](https://github.com/kubernetes/kubernetes/pull/87043), [@zjs](https://github.com/zjs)) [SIG Apps and Cloud Provider] -- Fixed a data race in the kubelet image manager that can cause static pod workers to silently stop working. ([#88915](https://github.com/kubernetes/kubernetes/pull/88915), [@roycaihw](https://github.com/roycaihw)) [SIG Node] -- Fixed a panic in the kubelet cleaning up pod volumes ([#86277](https://github.com/kubernetes/kubernetes/pull/86277), [@tedyu](https://github.com/tedyu)) [SIG Storage] -- Fixed a regression where the kubelet would fail to update the ready status of pods. ([#84951](https://github.com/kubernetes/kubernetes/pull/84951), [@tedyu](https://github.com/tedyu)) [SIG Node] -- Fixed an issue that could cause the kubelet to incorrectly run concurrent pod reconciliation loops and crash. ([#89055](https://github.com/kubernetes/kubernetes/pull/89055), [@tedyu](https://github.com/tedyu)) [SIG Node] -- Fixed block CSI volume cleanup after timeouts. ([#88660](https://github.com/kubernetes/kubernetes/pull/88660), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] -- Fixed cleaning of CSI raw block volumes. ([#87978](https://github.com/kubernetes/kubernetes/pull/87978), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] -- Fixed AWS Cloud Provider attempting to delete LoadBalancer security group it didn’t provision, and fixed AWS Cloud Provider creating a default LoadBalancer security group even if annotation `service.beta.kubernetes.io/aws-load-balancer-security-groups` is present because the intended behavior of aws-load-balancer-security-groups is to replace all security groups assigned to the load balancer. ([#84265](https://github.com/kubernetes/kubernetes/pull/84265), [@bhagwat070919](https://github.com/bhagwat070919)) [SIG Cloud Provider] -- Fixed two scheduler metrics (pending_pods and schedule_attempts_total) not being recorded ([#87692](https://github.com/kubernetes/kubernetes/pull/87692), [@everpeace](https://github.com/everpeace)) [SIG Scheduling] -- Fixes an issue with kubelet-reported pod status on deleted/recreated pods. ([#86320](https://github.com/kubernetes/kubernetes/pull/86320), [@liggitt](https://github.com/liggitt)) [SIG Node] -- Fixes conversion error in multi-version custom resources that could cause metadata.generation to increment on no-op patches or updates of a custom resource. ([#88995](https://github.com/kubernetes/kubernetes/pull/88995), [@liggitt](https://github.com/liggitt)) [SIG API Machinery] -- Fixes issue where AAD token obtained by kubectl is incompatible with on-behalf-of flow and oidc. The audience claim before this fix has "spn:" prefix. After this fix, "spn:" prefix is omitted. ([#86412](https://github.com/kubernetes/kubernetes/pull/86412), [@weinong](https://github.com/weinong)) [SIG API Machinery, Auth and Cloud Provider] -- Fixes an issue where you can't attach more than 15 GCE Persistent Disks to c2, n2, m1, m2 machine types. ([#88602](https://github.com/kubernetes/kubernetes/pull/88602), [@yuga711](https://github.com/yuga711)) [SIG Storage] -- Fixes kube-proxy when EndpointSlice feature gate is enabled on Windows. ([#86016](https://github.com/kubernetes/kubernetes/pull/86016), [@robscott](https://github.com/robscott)) [SIG Auth and Network] -- Fixes kubelet crash in client certificate rotation cases ([#88079](https://github.com/kubernetes/kubernetes/pull/88079), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Auth and Node] -- Fixes service account token admission error in clusters that do not run the service account token controller ([#87029](https://github.com/kubernetes/kubernetes/pull/87029), [@liggitt](https://github.com/liggitt)) [SIG Auth] -- Fixes v1.17.0 regression in --service-cluster-ip-range handling with IPv4 ranges larger than 65536 IP addresses ([#86534](https://github.com/kubernetes/kubernetes/pull/86534), [@liggitt](https://github.com/liggitt)) [SIG Network] -- Fixes wrong validation result of NetworkPolicy PolicyTypes ([#85747](https://github.com/kubernetes/kubernetes/pull/85747), [@tnqn](https://github.com/tnqn)) [SIG Network] -- For subprotocol negotiation, both client and server protocol is required now. ([#86646](https://github.com/kubernetes/kubernetes/pull/86646), [@tedyu](https://github.com/tedyu)) [SIG API Machinery and Node] -- For volumes that allow attaches across multiple nodes, attach and detach operations across different nodes are now executed in parallel. ([#88678](https://github.com/kubernetes/kubernetes/pull/88678), [@verult](https://github.com/verult)) [SIG Storage] -- Garbage collector now can correctly orphan ControllerRevisions when StatefulSets are deleted with orphan propagation policy. ([#84984](https://github.com/kubernetes/kubernetes/pull/84984), [@cofyc](https://github.com/cofyc)) [SIG Apps] -- `Get-kube.sh` uses the gcloud's current local GCP service account for auth when the provider is GCE or GKE instead of the metadata server default ([#88383](https://github.com/kubernetes/kubernetes/pull/88383), [@BenTheElder](https://github.com/BenTheElder)) [SIG Cluster Lifecycle] -- Golang/x/net has been updated to bring in fixes for CVE-2020-9283 ([#88381](https://github.com/kubernetes/kubernetes/pull/88381), [@BenTheElder](https://github.com/BenTheElder)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle and Instrumentation] -- If a serving certificate’s param specifies a name that is an IP for an SNI certificate, it will have priority for replying to server connections. ([#85308](https://github.com/kubernetes/kubernetes/pull/85308), [@deads2k](https://github.com/deads2k)) [SIG API Machinery] -- Improved yaml parsing performance ([#85458](https://github.com/kubernetes/kubernetes/pull/85458), [@cjcullen](https://github.com/cjcullen)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Node] -- Improves performance of the node authorizer ([#87696](https://github.com/kubernetes/kubernetes/pull/87696), [@liggitt](https://github.com/liggitt)) [SIG Auth] -- In GKE alpha clusters it will be possible to use the service annotation `cloud.google.com/network-tier: Standard` ([#88487](https://github.com/kubernetes/kubernetes/pull/88487), [@zioproto](https://github.com/zioproto)) [SIG Cloud Provider] -- Includes FSType when describing CSI persistent volumes. ([#85293](https://github.com/kubernetes/kubernetes/pull/85293), [@huffmanca](https://github.com/huffmanca)) [SIG CLI and Storage] -- Iptables/userspace proxy: improve performance by getting local addresses only once per sync loop, instead of for every external IP ([#85617](https://github.com/kubernetes/kubernetes/pull/85617), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Network] -- Kube-aggregator: always sets unavailableGauge metric to reflect the current state of a service. ([#87778](https://github.com/kubernetes/kubernetes/pull/87778), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery] -- Kube-apiserver: fixed a conflict error encountered attempting to delete a pod with gracePeriodSeconds=0 and a resourceVersion precondition ([#85516](https://github.com/kubernetes/kubernetes/pull/85516), [@michaelgugino](https://github.com/michaelgugino)) [SIG API Machinery] -- Kube-proxy no longer modifies shared EndpointSlices. ([#86092](https://github.com/kubernetes/kubernetes/pull/86092), [@robscott](https://github.com/robscott)) [SIG Network] -- Kube-proxy: on dual-stack mode, if it is not able to get the IP Family of an endpoint, logs it with level InfoV(4) instead of Warning, avoiding flooding the logs for endpoints without addresses ([#88934](https://github.com/kubernetes/kubernetes/pull/88934), [@aojea](https://github.com/aojea)) [SIG Network] -- Kubeadm allows to configure single-stack clusters if dual-stack is enabled ([#87453](https://github.com/kubernetes/kubernetes/pull/87453), [@aojea](https://github.com/aojea)) [SIG API Machinery, Cluster Lifecycle and Network] -- Kubeadm now includes CoreDNS version 1.6.7 ([#86260](https://github.com/kubernetes/kubernetes/pull/86260), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] -- Kubeadm upgrades always persist the etcd backup for stacked ([#86861](https://github.com/kubernetes/kubernetes/pull/86861), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] -- Kubeadm: 'kubeadm alpha kubelet config download' has been removed, please use 'kubeadm upgrade node phase kubelet-config' instead ([#87944](https://github.com/kubernetes/kubernetes/pull/87944), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] -- Kubeadm: Forward cluster name to the controller-manager arguments ([#85817](https://github.com/kubernetes/kubernetes/pull/85817), [@ereslibre](https://github.com/ereslibre)) [SIG Cluster Lifecycle] -- Kubeadm: add support for the "ci/k8s-master" version label as a replacement for "ci-cross/*", which no longer exists. ([#86609](https://github.com/kubernetes/kubernetes/pull/86609), [@Pensu](https://github.com/Pensu)) [SIG Cluster Lifecycle] -- Kubeadm: apply further improvements to the tentative support for concurrent etcd member join. Fixes a bug where multiple members can receive the same hostname. Increase the etcd client dial timeout and retry timeout for add/remove/... operations. ([#87505](https://github.com/kubernetes/kubernetes/pull/87505), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: don't write the kubelet environment file on "upgrade apply" ([#85412](https://github.com/kubernetes/kubernetes/pull/85412), [@boluisa](https://github.com/boluisa)) [SIG Cluster Lifecycle] -- Kubeadm: fix potential panic when executing "kubeadm reset" with a corrupted kubelet.conf file ([#86216](https://github.com/kubernetes/kubernetes/pull/86216), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: fix the bug that 'kubeadm upgrade' hangs in single node cluster ([#88434](https://github.com/kubernetes/kubernetes/pull/88434), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] -- Kubeadm: make sure images are pre-pulled even if a tag did not change but their contents changed ([#85603](https://github.com/kubernetes/kubernetes/pull/85603), [@bart0sh](https://github.com/bart0sh)) [SIG Cluster Lifecycle] -- Kubeadm: remove 'kubeadm upgrade node config' command since it was deprecated in v1.15, please use 'kubeadm upgrade node phase kubelet-config' instead ([#87975](https://github.com/kubernetes/kubernetes/pull/87975), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] -- Kubeadm: remove the deprecated CoreDNS feature-gate. It was set to "true" since v1.11 when the feature went GA. In v1.13 it was marked as deprecated and hidden from the CLI. ([#87400](https://github.com/kubernetes/kubernetes/pull/87400), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: retry `kubeadm-config` ConfigMap creation or mutation if the apiserver is not responding. This will improve resiliency when joining new control plane nodes. ([#85763](https://github.com/kubernetes/kubernetes/pull/85763), [@ereslibre](https://github.com/ereslibre)) [SIG Cluster Lifecycle] -- Kubeadm: tolerate whitespace when validating certificate authority PEM data in kubeconfig files ([#86705](https://github.com/kubernetes/kubernetes/pull/86705), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: use bind-address option to configure the kube-controller-manager and kube-scheduler http probes ([#86493](https://github.com/kubernetes/kubernetes/pull/86493), [@aojea](https://github.com/aojea)) [SIG Cluster Lifecycle] -- Kubeadm: uses the api-server AdvertiseAddress IP family to choose the etcd endpoint IP family for non external etcd clusters ([#85745](https://github.com/kubernetes/kubernetes/pull/85745), [@aojea](https://github.com/aojea)) [SIG Cluster Lifecycle] -- Kubectl cluster-info dump --output-directory=xxx now generates files with an extension depending on the output format. ([#82070](https://github.com/kubernetes/kubernetes/pull/82070), [@olivierlemasle](https://github.com/olivierlemasle)) [SIG CLI] -- `Kubectl describe ` and `kubectl top pod` will return a message saying `"No resources found"` or `"No resources found in namespace"` if there are no results to display. ([#87527](https://github.com/kubernetes/kubernetes/pull/87527), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] -- `Kubectl drain node --dry-run` will list pods that would be evicted or deleted ([#82660](https://github.com/kubernetes/kubernetes/pull/82660), [@sallyom](https://github.com/sallyom)) [SIG CLI] -- `Kubectl set resources` will no longer return an error if passed an empty change for a resource. `kubectl set subject` will no longer return an error if passed an empty change for a resource. ([#85490](https://github.com/kubernetes/kubernetes/pull/85490), [@sallyom](https://github.com/sallyom)) [SIG CLI] -- Kubelet metrics gathered through metrics-server or prometheus should no longer timeout for Windows nodes running more than 3 pods. ([#87730](https://github.com/kubernetes/kubernetes/pull/87730), [@marosset](https://github.com/marosset)) [SIG Node, Testing and Windows] -- Kubelet metrics have been changed to buckets. For example the `exec/{podNamespace}/{podID}/{containerName}` is now just exec. ([#87913](https://github.com/kubernetes/kubernetes/pull/87913), [@cheftako](https://github.com/cheftako)) [SIG Node] -- Kubelets perform fewer unnecessary pod status update operations on the API server. ([#88591](https://github.com/kubernetes/kubernetes/pull/88591), [@smarterclayton](https://github.com/smarterclayton)) [SIG Node and Scalability] -- Kubernetes will try to acquire the iptables lock every 100 msec during 5 seconds instead of every second. This is especially useful for environments using kube-proxy in iptables mode with a high churn rate of services. ([#85771](https://github.com/kubernetes/kubernetes/pull/85771), [@aojea](https://github.com/aojea)) [SIG Network] -- Limit number of instances in a single update to GCE target pool to 1000. ([#87881](https://github.com/kubernetes/kubernetes/pull/87881), [@wojtek-t](https://github.com/wojtek-t)) [SIG Cloud Provider, Network and Scalability] -- Make Azure clients only retry on specified HTTP status codes ([#88017](https://github.com/kubernetes/kubernetes/pull/88017), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Make error message and service event message more clear ([#86078](https://github.com/kubernetes/kubernetes/pull/86078), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Minimize AWS NLB health check timeout when externalTrafficPolicy set to Local ([#73363](https://github.com/kubernetes/kubernetes/pull/73363), [@kellycampbell](https://github.com/kellycampbell)) [SIG Cloud Provider] -- Pause image contains "Architecture" in non-amd64 images ([#87954](https://github.com/kubernetes/kubernetes/pull/87954), [@BenTheElder](https://github.com/BenTheElder)) [SIG Release] -- Pause image upgraded to 3.2 in kubelet and kubeadm. ([#88173](https://github.com/kubernetes/kubernetes/pull/88173), [@BenTheElder](https://github.com/BenTheElder)) [SIG CLI, Cluster Lifecycle, Node and Testing] -- Plugin/PluginConfig and Policy APIs are mutually exclusive when running the scheduler ([#88864](https://github.com/kubernetes/kubernetes/pull/88864), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Remove `FilteredNodesStatuses` argument from `PreScore`'s interface. ([#88189](https://github.com/kubernetes/kubernetes/pull/88189), [@skilxn-go](https://github.com/skilxn-go)) [SIG Scheduling and Testing] -- Resolved a performance issue in the node authorizer index maintenance. ([#87693](https://github.com/kubernetes/kubernetes/pull/87693), [@liggitt](https://github.com/liggitt)) [SIG Auth] -- Resolved regression in admission, authentication, and authorization webhook performance in v1.17.0-rc.1 ([#85810](https://github.com/kubernetes/kubernetes/pull/85810), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Testing] -- Resolves performance regression in `kubectl get all` and in client-go discovery clients constructed using `NewDiscoveryClientForConfig` or `NewDiscoveryClientForConfigOrDie`. ([#86168](https://github.com/kubernetes/kubernetes/pull/86168), [@liggitt](https://github.com/liggitt)) [SIG API Machinery] -- Reverted a kubectl azure auth module change where oidc claim spn: prefix was omitted resulting a breaking behavior with existing Azure AD OIDC enabled api-server ([#87507](https://github.com/kubernetes/kubernetes/pull/87507), [@weinong](https://github.com/weinong)) [SIG API Machinery, Auth and Cloud Provider] -- Shared informers are now more reliable in the face of network disruption. ([#86015](https://github.com/kubernetes/kubernetes/pull/86015), [@squeed](https://github.com/squeed)) [SIG API Machinery] -- Specifying PluginConfig for the same plugin more than once fails scheduler startup. - Specifying extenders and configuring .ignoredResources for the NodeResourcesFit plugin fails ([#88870](https://github.com/kubernetes/kubernetes/pull/88870), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Terminating a restartPolicy=Never pod no longer has a chance to report the pod succeeded when it actually failed. ([#88440](https://github.com/kubernetes/kubernetes/pull/88440), [@smarterclayton](https://github.com/smarterclayton)) [SIG Node and Testing] -- The CSR signing cert/key pairs will be reloaded from disk like the kube-apiserver cert/key pairs ([#86816](https://github.com/kubernetes/kubernetes/pull/86816), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Apps and Auth] -- The EventRecorder from k8s.io/client-go/tools/events will now create events in the default namespace (instead of kube-system) when the related object does not have it set. ([#88815](https://github.com/kubernetes/kubernetes/pull/88815), [@enj](https://github.com/enj)) [SIG API Machinery] -- The audit event sourceIPs list will now always end with the IP that sent the request directly to the API server. ([#87167](https://github.com/kubernetes/kubernetes/pull/87167), [@tallclair](https://github.com/tallclair)) [SIG API Machinery and Auth] -- The sample-apiserver aggregated conformance test has updated to use the Kubernetes v1.17.0 sample apiserver ([#84735](https://github.com/kubernetes/kubernetes/pull/84735), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Architecture, CLI and Testing] -- To reduce chances of throttling, VM cache is set to nil when Azure node provisioning state is deleting ([#87635](https://github.com/kubernetes/kubernetes/pull/87635), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- VMSS cache is added so that less chances of VMSS GET throttling ([#85885](https://github.com/kubernetes/kubernetes/pull/85885), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Wait for kubelet & kube-proxy to be ready on Windows node within 10s ([#85228](https://github.com/kubernetes/kubernetes/pull/85228), [@YangLu1031](https://github.com/YangLu1031)) [SIG Cluster Lifecycle] -- `kubectl apply -f --prune -n ` should prune all resources not defined in the file in the cli specified namespace. ([#85613](https://github.com/kubernetes/kubernetes/pull/85613), [@MartinKaburu](https://github.com/MartinKaburu)) [SIG CLI] -- `kubectl create clusterrolebinding` creates rbac.authorization.k8s.io/v1 object ([#85889](https://github.com/kubernetes/kubernetes/pull/85889), [@oke-py](https://github.com/oke-py)) [SIG CLI] -- `kubectl diff` now returns 1 only on diff finding changes, and >1 on kubectl errors. The "exit status code 1" message has also been muted. ([#87437](https://github.com/kubernetes/kubernetes/pull/87437), [@apelisse](https://github.com/apelisse)) [SIG CLI and Testing] - -## Dependencies - -- Update Calico to v3.8.4 ([#84163](https://github.com/kubernetes/kubernetes/pull/84163), [@david-tigera](https://github.com/david-tigera))[SIG Cluster Lifecycle] -- Update aws-sdk-go dependency to v1.28.2 ([#87253](https://github.com/kubernetes/kubernetes/pull/87253), [@SaranBalaji90](https://github.com/SaranBalaji90))[SIG API Machinery and Cloud Provider] -- Update CNI version to v0.8.5 ([#78819](https://github.com/kubernetes/kubernetes/pull/78819), [@justaugustus](https://github.com/justaugustus))[SIG Release, Testing, Network, Cluster Lifecycle and API Machinery] -- Update cri-tools to v1.17.0 ([#86305](https://github.com/kubernetes/kubernetes/pull/86305), [@saschagrunert](https://github.com/saschagrunert))[SIG Release and Cluster Lifecycle] -- Pause image upgraded to 3.2 in kubelet and kubeadm ([#88173](https://github.com/kubernetes/kubernetes/pull/88173), [@BenTheElder](https://github.com/BenTheElder))[SIG CLI, Node, Testing and Cluster Lifecycle] -- Update CoreDNS version to 1.6.7 in kubeadm ([#86260](https://github.com/kubernetes/kubernetes/pull/86260), [@rajansandeep](https://github.com/rajansandeep))[SIG Cluster Lifecycle] -- Update golang.org/x/crypto to fix CVE-2020-9283 ([#8838](https://github.com/kubernetes/kubernetes/pull/88381), [@BenTheElder](https://github.com/BenTheElder))[SIG CLI, Instrumentation, API Machinery, CLuster Lifecycle and Cloud Provider] -- Update Go to 1.13.8 ([#87648](https://github.com/kubernetes/kubernetes/pull/87648), [@ialidzhikov](https://github.com/ialidzhikov))[SIG Release and Testing] -- Update Cluster-Autoscaler to 1.18.0 ([#89095](https://github.com/kubernetes/kubernetes/pull/89095), [@losipiuk](https://github.com/losipiuk))[SIG Autoscaling and Cluster Lifecycle] - - - -# v1.18.0-rc.1 - -[Documentation](https://docs.k8s.io) - -## Downloads for v1.18.0-rc.1 - -filename | sha512 hash --------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes.tar.gz) | `c17231d5de2e0677e8af8259baa11a388625821c79b86362049f2edb366404d6f4b4587b8f13ccbceeb2f32c6a9fe98607f779c0f3e1caec438f002e3a2c8c21` -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-src.tar.gz) | `e84ffad57c301f5d6e90f916b996d5abb0c987928c3ca6b1565f7b042588f839b994ca12c43fc36f0ffb63f9fabc15110eb08be253b8939f49cd951e956da618` - -### Client Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-client-darwin-386.tar.gz) | `1aea99923d492436b3eb91aaecffac94e5d0aa2b38a0930d266fda85c665bbc4569745c409aa302247df3b578ce60324e7a489eb26240e97d4e65a67428ea3d1` -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-client-darwin-amd64.tar.gz) | `07fa7340a959740bd52b83ff44438bbd988e235277dad1e43f125f08ac85230a24a3b755f4e4c8645743444fa2b66a3602fc445d7da6d2fc3770e8c21ba24b33` -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-client-linux-386.tar.gz) | `48cebd26448fdd47aa36257baa4c716a98fda055bbf6a05230f2a3fe3c1b99b4e483668661415392190f3eebb9cb6e15c784626b48bb2541d93a37902f0e3974` -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-client-linux-amd64.tar.gz) | `c3a5fedf263f07a07f59c01fea6c63c1e0b76ee8dc67c45b6c134255c28ed69171ccc2f91b6a45d6a8ec5570a0a7562e24c33b9d7b0d1a864f4dc04b178b3c04` -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-client-linux-arm.tar.gz) | `a6b11a55bd38583bbaac14931a6862f8ce6493afe30947ba29e5556654a571593358278df59412bbeb6888fa127e9ae4c0047a9d46cb59394995010796df6b14` -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-client-linux-arm64.tar.gz) | `9e15331ac8010154a9b64f5488969fc8ee2f21059639896cb84c5cf4f05f4c9d1d8970cb6f9831de6b34013848227c1972c12a698d07aac1ecc056e972fe6f79` -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-client-linux-ppc64le.tar.gz) | `f828fe6252678de9d4822e482f5873309ae9139b2db87298ab3273ce45d38aa07b6b9b42b76c140705f27ba71e101d58b43e59ac7259d7c08dc647ea809e207c` -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-client-linux-s390x.tar.gz) | `19da4b45f0666c063934af616f3e7ed3caa99d4ee1e46d53efadc7a8a4d38e43a36ced7249acd7ad3dcc4b4f60d8451b4f7ec7727e478ee2fadd14d353228bce` -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-client-windows-386.tar.gz) | `775c9afb6cb3e7c4ba53e9f48a5df2cf207234a33059bd74448bc9f177dd120fb3f9c58ab45048a566326acc43bc8a67e886e10ef99f20780c8f63bb17426ebd` -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-client-windows-amd64.tar.gz) | `208d2595a5b57ac97aac75b4a2a6130f0c937f781a030bde1a432daf4bc51f2fa523fca2eb84c38798489c4b536ee90aad22f7be8477985d9691d51ad8e1c4dc` - -### Server Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-server-linux-amd64.tar.gz) | `dcf832eae04f9f52ff473754ef5cfe697b35f4dc1a282622c94fa10943c8c35f4a8777a0c58c7de871c3c428c8973bf72d6bcd8751416d4c682125268b8fcefe` -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-server-linux-arm.tar.gz) | `a04e34bea28eb1c8b492e8b1dd3c0dd87ebee71a7dbbef72be10a335e553361af7e48296e504f9844496b04e66350871114d20cfac3f3b49550d8be60f324ba3` -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-server-linux-arm64.tar.gz) | `a6af086b07a8c2e498f32b43e6511bf6a5e6baf358c572c6910c8df17cd6cae94f562f459714fcead1595767cb14c7f639c5735f1411173bbd38d5604c082a77` -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-server-linux-ppc64le.tar.gz) | `5a960ef5ba0c255f587f2ac0b028cd03136dc91e4efc5d1becab46417852e5524d18572b6f66259531ec6fea997da3c4d162ac153a9439672154375053fec6c7` -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-server-linux-s390x.tar.gz) | `0f32c7d9b14bc238b9a5764d8f00edc4d3bf36bcf06b340b81061424e6070768962425194a8c2025c3a7ffb97b1de551d3ad23d1591ae34dd4e3ba25ab364c33` - -### Node Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-node-linux-amd64.tar.gz) | `27d8955d535d14f3f4dca501fd27e4f06fad84c6da878ea5332a5c83b6955667f6f731bfacaf5a3a23c09f14caa400f9bee927a0f269f5374de7f79cd1919b3b` -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-node-linux-arm.tar.gz) | `0d56eccad63ba608335988e90b377fe8ae978b177dc836cdb803a5c99d99e8f3399a666d9477ca9cfe5964944993e85c416aec10a99323e3246141efc0b1cc9e` -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-node-linux-arm64.tar.gz) | `79bb9be66f9e892d866b28e5cc838245818edb9706981fab6ccbff493181b341c1fcf6fe5d2342120a112eb93af413f5ba191cfba1ab4c4a8b0546a5ad8ec220` -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-node-linux-ppc64le.tar.gz) | `3e9e2c6f9a2747d828069511dce8b4034c773c2d122f005f4508e22518055c1e055268d9d86773bbd26fbd2d887d783f408142c6c2f56ab2f2365236fd4d2635` -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-node-linux-s390x.tar.gz) | `4f96e018c336fa13bb6df6f7217fe46a2b5c47f806f786499c429604ccba2ebe558503ab2c72f63250aa25b61dae2d166e4b80ae10f6ab37d714f87c1dcf6691` -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-node-windows-amd64.tar.gz) | `ab110d76d506746af345e5897ef4f6993d5f53ac818ba69a334f3641047351aa63bfb3582841a9afca51dd0baff8b9010077d9c8ec85d2d69e4172b8d4b338b0` - -## Changelog since v1.18.0-beta.2 - -## Changes by Kind - -### API Change - -- Removes ConfigMap as suggestion for IngressClass parameters ([#89093](https://github.com/kubernetes/kubernetes/pull/89093), [@robscott](https://github.com/robscott)) [SIG Network] - -### Other (Bug, Cleanup or Flake) - -- EndpointSlice should not contain endpoints for terminating pods ([#89056](https://github.com/kubernetes/kubernetes/pull/89056), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps and Network] -- Fix a bug where ExternalTrafficPolicy is not applied to service ExternalIPs. ([#88786](https://github.com/kubernetes/kubernetes/pull/88786), [@freehan](https://github.com/freehan)) [SIG Network] -- Fix invalid VMSS updates due to incorrect cache ([#89002](https://github.com/kubernetes/kubernetes/pull/89002), [@ArchangelSDY](https://github.com/ArchangelSDY)) [SIG Cloud Provider] -- Fix isCurrentInstance for Windows by removing the dependency of hostname. ([#89138](https://github.com/kubernetes/kubernetes/pull/89138), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Fixed a data race in kubelet image manager that can cause static pod workers to silently stop working. ([#88915](https://github.com/kubernetes/kubernetes/pull/88915), [@roycaihw](https://github.com/roycaihw)) [SIG Node] -- Fixed an issue that could cause the kubelet to incorrectly run concurrent pod reconciliation loops and crash. ([#89055](https://github.com/kubernetes/kubernetes/pull/89055), [@tedyu](https://github.com/tedyu)) [SIG Node] -- Kube-proxy: on dual-stack mode, if it is not able to get the IP Family of an endpoint, logs it with level InfoV(4) instead of Warning, avoiding flooding the logs for endpoints without addresses ([#88934](https://github.com/kubernetes/kubernetes/pull/88934), [@aojea](https://github.com/aojea)) [SIG Network] -- Update Cluster Autoscaler to 1.18.0; changelog: https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.18.0 ([#89095](https://github.com/kubernetes/kubernetes/pull/89095), [@losipiuk](https://github.com/losipiuk)) [SIG Autoscaling and Cluster Lifecycle] - - -# v1.18.0-beta.2 - -[Documentation](https://docs.k8s.io) - -## Downloads for v1.18.0-beta.2 - -filename | sha512 hash --------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes.tar.gz) | `3017430ca17f8a3523669b4a02c39cedfc6c48b07281bc0a67a9fbe9d76547b76f09529172cc01984765353a6134a43733b7315e0dff370bba2635dd2a6289af` -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-src.tar.gz) | `c5fd60601380a99efff4458b1c9cf4dc02195f6f756b36e590e54dff68f7064daf32cf63980dddee13ef9dec7a60ad4eeb47a288083fdbbeeef4bc038384e9ea` - -### Client Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-client-darwin-386.tar.gz) | `7e49ede167b9271d4171e477fa21d267b2fb35f80869337d5b323198dc12f71b61441975bf925ad6e6cd7b61cbf6372d386417dc1e5c9b3c87ae651021c37237` -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-client-darwin-amd64.tar.gz) | `3f5cdf0e85eee7d0773e0ae2df1c61329dea90e0da92b02dae1ffd101008dc4bade1c4951fc09f0cad306f0bcb7d16da8654334ddee43d5015913cc4ac8f3eda` -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-client-linux-386.tar.gz) | `b67b41c11bfecb88017c33feee21735c56f24cf6f7851b63c752495fc0fb563cd417a67a81f46bca091f74dc00fca1f296e483d2e3dfe2004ea4b42e252d30b9` -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-client-linux-amd64.tar.gz) | `1fef2197cb80003e3a5c26f05e889af9d85fbbc23e27747944d2997ace4bfa28f3670b13c08f5e26b7e274176b4e2df89c1162aebd8b9506e63b39b311b2d405` -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-client-linux-arm.tar.gz) | `84e5f4d9776490219ee94a84adccd5dfc7c0362eb330709771afcde95ec83f03d96fe7399eec218e47af0a1e6445e24d95e6f9c66c0882ef8233a09ff2022420` -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-client-linux-arm64.tar.gz) | `ba613b114e0cca32fa21a3d10f845aa2f215d3af54e775f917ff93919f7dd7075efe254e4047a85a1f4b817fc2bd78006c2e8873885f1208cbc02db99e2e2e25` -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-client-linux-ppc64le.tar.gz) | `502a6938d8c4bbe04abbd19b59919d86765058ff72334848be4012cec493e0e7027c6cd950cf501367ac2026eea9f518110cb72d1c792322b396fc2f73d23217` -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-client-linux-s390x.tar.gz) | `c24700e0ed2ef5c1d2dd282d638c88d90392ae90ea420837b39fd8e1cfc19525017325ccda71d8472fdaea174762208c09e1bba9bbc77c89deef6fac5e847ba2` -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-client-windows-386.tar.gz) | `0d4c5a741b052f790c8b0923c9586ee9906225e51cf4dc8a56fc303d4d61bb5bf77fba9e65151dec7be854ff31da8fc2dcd3214563e1b4b9951e6af4aa643da4` -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-client-windows-amd64.tar.gz) | `841ef2e306c0c9593f04d9528ee019bf3b667761227d9afc1d6ca8bf1aa5631dc25f5fe13ff329c4bf0c816b971fd0dec808f879721e0f3bf51ce49772b38010` - -### Server Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-server-linux-amd64.tar.gz) | `b373df2e6ef55215e712315a5508e85a39126bd81b7b93c6b6305238919a88c740077828a6f19bcd97141951048ef7a19806ef6b1c3e1772dbc45715c5fcb3af` -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-server-linux-arm.tar.gz) | `b8103cb743c23076ce8dd7c2da01c8dd5a542fbac8480e82dc673139c8ee5ec4495ca33695e7a18dd36412cf1e18ed84c8de05042525ddd8e869fbdfa2766569` -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-server-linux-arm64.tar.gz) | `8f8f05cf64fb9c8d80cdcb4935b2d3e3edc48bdd303231ae12f93e3f4d979237490744a11e24ba7f52dbb017ca321a8e31624dcffa391b8afda3d02078767fa0` -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-server-linux-ppc64le.tar.gz) | `b313b911c46f2ec129537407af3f165f238e48caeb4b9e530783ffa3659304a544ed02bef8ece715c279373b9fb2c781bd4475560e02c4b98a6d79837bc81938` -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-server-linux-s390x.tar.gz) | `a1b6b06571141f507b12e5ef98efb88f4b6b9aba924722b2a74f11278d29a2972ab8290608360151d124608e6e24da0eb3516d484cb5fa12ff2987562f15964a` - -### Node Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-node-linux-amd64.tar.gz) | `20e02ca327543cddb2568ead3d5de164cbfb2914ab6416106d906bf12fcfbc4e55b13bea4d6a515e8feab038e2c929d72c4d6909dfd7881ba69fd1e8c772ab99` -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-node-linux-arm.tar.gz) | `ecd817ef05d6284f9c6592b84b0a48ea31cf4487030c9fb36518474b2a33dad11b9c852774682e60e4e8b074e6bea7016584ca281dddbe2994da5eaf909025c0` -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-node-linux-arm64.tar.gz) | `0020d32b7908ffd5055c8b26a8b3033e4702f89efcfffe3f6fcdb8a9921fa8eaaed4193c85597c24afd8c523662454f233521bb7055841a54c182521217ccc9d` -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-node-linux-ppc64le.tar.gz) | `e065411d66d486e7793449c1b2f5a412510b913bf7f4e728c0a20e275642b7668957050dc266952cdff09acc391369ae6ac5230184db89af6823ba400745f2fc` -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-node-linux-s390x.tar.gz) | `082ee90413beaaea41d6cbe9a18f7d783a95852607f3b94190e0ca12aacdd97d87e233b87117871bfb7d0a4b6302fbc7688549492a9bc50a2f43a5452504d3ce` -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-node-windows-amd64.tar.gz) | `fb5aca0cc36be703f9d4033eababd581bac5de8399c50594db087a99ed4cb56e4920e960eb81d0132d696d094729254eeda2a5c0cb6e65e3abca6c8d61da579e` - -## Changelog since v1.18.0-beta.1 - -## Urgent Upgrade Notes - -### (No, really, you MUST read this before you upgrade) - -- `kubectl` no longer defaults to `http://localhost:8080`. If you own one of these legacy clusters, you are *strongly- encouraged to secure your server. If you cannot secure your server, you can set `KUBERNETES_MASTER` if you were relying on that behavior and you're a client-go user. Set `--server`, `--kubeconfig` or `KUBECONFIG` to make it work in `kubectl`. ([#86173](https://github.com/kubernetes/kubernetes/pull/86173), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, CLI and Testing] - -## Changes by Kind - -### Deprecation - -- AlgorithmSource is removed from v1alpha2 Scheduler ComponentConfig ([#87999](https://github.com/kubernetes/kubernetes/pull/87999), [@damemi](https://github.com/damemi)) [SIG Scheduling] -- Kube-proxy: deprecate `--healthz-port` and `--metrics-port` flag, please use `--healthz-bind-address` and `--metrics-bind-address` instead ([#88512](https://github.com/kubernetes/kubernetes/pull/88512), [@SataQiu](https://github.com/SataQiu)) [SIG Network] -- Kubeadm: deprecate the usage of the experimental flag '--use-api' under the 'kubeadm alpha certs renew' command. ([#88827](https://github.com/kubernetes/kubernetes/pull/88827), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] - -### API Change - -- A new IngressClass resource has been added to enable better Ingress configuration. ([#88509](https://github.com/kubernetes/kubernetes/pull/88509), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps, CLI, Network, Node and Testing] -- Added GenericPVCDataSource feature gate to enable using arbitrary custom resources as the data source for a PVC. ([#88636](https://github.com/kubernetes/kubernetes/pull/88636), [@bswartz](https://github.com/bswartz)) [SIG Apps and Storage] -- Allow user to specify fsgroup permission change policy for pods ([#88488](https://github.com/kubernetes/kubernetes/pull/88488), [@gnufied](https://github.com/gnufied)) [SIG Apps and Storage] -- BlockVolume and CSIBlockVolume features are now GA. ([#88673](https://github.com/kubernetes/kubernetes/pull/88673), [@jsafrane](https://github.com/jsafrane)) [SIG Apps, Node and Storage] -- CustomResourceDefinition schemas that use `x-kubernetes-list-map-keys` to specify properties that uniquely identify list items must make those properties required or have a default value, to ensure those properties are present for all list items. See https://kubernetes.io/docs/reference/using-api/api-concepts/#merge-strategy for details. ([#88076](https://github.com/kubernetes/kubernetes/pull/88076), [@eloyekunle](https://github.com/eloyekunle)) [SIG API Machinery and Testing] -- Fixes a regression with clients prior to 1.15 not being able to update podIP in pod status, or podCIDR in node spec, against >= 1.16 API servers ([#88505](https://github.com/kubernetes/kubernetes/pull/88505), [@liggitt](https://github.com/liggitt)) [SIG Apps and Network] -- Ingress: Add Exact and Prefix maching to Ingress PathTypes ([#88587](https://github.com/kubernetes/kubernetes/pull/88587), [@cmluciano](https://github.com/cmluciano)) [SIG Apps, Cluster Lifecycle and Network] -- Ingress: Add alternate backends via TypedLocalObjectReference ([#88775](https://github.com/kubernetes/kubernetes/pull/88775), [@cmluciano](https://github.com/cmluciano)) [SIG Apps and Network] -- Ingress: allow wildcard hosts in IngressRule ([#88858](https://github.com/kubernetes/kubernetes/pull/88858), [@cmluciano](https://github.com/cmluciano)) [SIG Network] -- Kube-controller-manager and kube-scheduler expose profiling by default to match the kube-apiserver. Use `--enable-profiling=false` to disable. ([#88663](https://github.com/kubernetes/kubernetes/pull/88663), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Cloud Provider and Scheduling] -- Move TaintBasedEvictions feature gates to GA ([#87487](https://github.com/kubernetes/kubernetes/pull/87487), [@skilxn-go](https://github.com/skilxn-go)) [SIG API Machinery, Apps, Node, Scheduling and Testing] -- New flag --endpointslice-updates-batch-period in kube-controller-manager can be used to reduce number of endpointslice updates generated by pod changes. ([#88745](https://github.com/kubernetes/kubernetes/pull/88745), [@mborsz](https://github.com/mborsz)) [SIG API Machinery, Apps and Network] -- Scheduler Extenders can now be configured in the v1alpha2 component config ([#88768](https://github.com/kubernetes/kubernetes/pull/88768), [@damemi](https://github.com/damemi)) [SIG Release, Scheduling and Testing] -- The apiserver/v1alph1#EgressSelectorConfiguration API is now beta. ([#88502](https://github.com/kubernetes/kubernetes/pull/88502), [@caesarxuchao](https://github.com/caesarxuchao)) [SIG API Machinery] -- The storage.k8s.io/CSIDriver has moved to GA, and is now available for use. ([#84814](https://github.com/kubernetes/kubernetes/pull/84814), [@huffmanca](https://github.com/huffmanca)) [SIG API Machinery, Apps, Auth, Node, Scheduling, Storage and Testing] -- VolumePVCDataSource moves to GA in 1.18 release ([#88686](https://github.com/kubernetes/kubernetes/pull/88686), [@j-griffith](https://github.com/j-griffith)) [SIG Apps, CLI and Cluster Lifecycle] - -### Feature - -- Add `rest_client_rate_limiter_duration_seconds` metric to component-base to track client side rate limiter latency in seconds. Broken down by verb and URL. ([#88134](https://github.com/kubernetes/kubernetes/pull/88134), [@jennybuckley](https://github.com/jennybuckley)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] -- Allow user to specify resource using --filename flag when invoking kubectl exec ([#88460](https://github.com/kubernetes/kubernetes/pull/88460), [@soltysh](https://github.com/soltysh)) [SIG CLI and Testing] -- Apiserver add a new flag --goaway-chance which is the fraction of requests that will be closed gracefully(GOAWAY) to prevent HTTP/2 clients from getting stuck on a single apiserver. - After the connection closed(received GOAWAY), the client's other in-flight requests won't be affected, and the client will reconnect. - The flag min value is 0 (off), max is .02 (1/50 requests); .001 (1/1000) is a recommended starting point. - Clusters with single apiservers, or which don't use a load balancer, should NOT enable this. ([#88567](https://github.com/kubernetes/kubernetes/pull/88567), [@answer1991](https://github.com/answer1991)) [SIG API Machinery] -- Azure: add support for single stack IPv6 ([#88448](https://github.com/kubernetes/kubernetes/pull/88448), [@aramase](https://github.com/aramase)) [SIG Cloud Provider] -- DefaultConstraints can be specified for the PodTopologySpread plugin in the component config ([#88671](https://github.com/kubernetes/kubernetes/pull/88671), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Kubeadm: support Windows specific kubelet flags in kubeadm-flags.env ([#88287](https://github.com/kubernetes/kubernetes/pull/88287), [@gab-satchi](https://github.com/gab-satchi)) [SIG Cluster Lifecycle and Windows] -- Kubectl cluster-info dump changed to only display a message telling you the location where the output was written when the output is not standard output. ([#88765](https://github.com/kubernetes/kubernetes/pull/88765), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] -- Print NotReady when pod is not ready based on its conditions. ([#88240](https://github.com/kubernetes/kubernetes/pull/88240), [@soltysh](https://github.com/soltysh)) [SIG CLI] -- Scheduler Extender API is now located under k8s.io/kube-scheduler/extender ([#88540](https://github.com/kubernetes/kubernetes/pull/88540), [@damemi](https://github.com/damemi)) [SIG Release, Scheduling and Testing] -- Signatures on scale client methods have been modified to accept `context.Context` as a first argument. Signatures of Get, Update, and Patch methods have been updated to accept GetOptions, UpdateOptions and PatchOptions respectively. ([#88599](https://github.com/kubernetes/kubernetes/pull/88599), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG API Machinery, Apps, Autoscaling and CLI] -- Signatures on the dynamic client methods have been modified to accept `context.Context` as a first argument. Signatures of Delete and DeleteCollection methods now accept DeleteOptions by value instead of by reference. ([#88906](https://github.com/kubernetes/kubernetes/pull/88906), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps, CLI, Cluster Lifecycle, Storage and Testing] -- Signatures on the metadata client methods have been modified to accept `context.Context` as a first argument. Signatures of Delete and DeleteCollection methods now accept DeleteOptions by value instead of by reference. ([#88910](https://github.com/kubernetes/kubernetes/pull/88910), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps and Testing] -- Webhooks will have alpha support for network proxy ([#85870](https://github.com/kubernetes/kubernetes/pull/85870), [@Jefftree](https://github.com/Jefftree)) [SIG API Machinery, Auth and Testing] -- When client certificate files are provided, reload files for new connections, and close connections when a certificate changes. ([#79083](https://github.com/kubernetes/kubernetes/pull/79083), [@jackkleeman](https://github.com/jackkleeman)) [SIG API Machinery, Auth, Node and Testing] -- When deleting objects using kubectl with the --force flag, you are no longer required to also specify --grace-period=0. ([#87776](https://github.com/kubernetes/kubernetes/pull/87776), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] -- `kubectl` now contains a `kubectl alpha debug` command. This command allows attaching an ephemeral container to a running pod for the purposes of debugging. ([#88004](https://github.com/kubernetes/kubernetes/pull/88004), [@verb](https://github.com/verb)) [SIG CLI] - -### Documentation - -- Update Japanese translation for kubectl help ([#86837](https://github.com/kubernetes/kubernetes/pull/86837), [@inductor](https://github.com/inductor)) [SIG CLI and Docs] -- `kubectl plugin` now prints a note how to install krew ([#88577](https://github.com/kubernetes/kubernetes/pull/88577), [@corneliusweig](https://github.com/corneliusweig)) [SIG CLI] - -### Other (Bug, Cleanup or Flake) - -- Azure VMSS LoadBalancerBackendAddressPools updating has been improved with squential-sync + concurrent-async requests. ([#88699](https://github.com/kubernetes/kubernetes/pull/88699), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- AzureFile and CephFS use new Mount library that prevents logging of sensitive mount options. ([#88684](https://github.com/kubernetes/kubernetes/pull/88684), [@saad-ali](https://github.com/saad-ali)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Storage] -- Build: Enable kube-cross image-building on K8s Infra ([#88562](https://github.com/kubernetes/kubernetes/pull/88562), [@justaugustus](https://github.com/justaugustus)) [SIG Release and Testing] -- Client-go certificate manager rotation gained the ability to preserve optional intermediate chains accompanying issued certificates ([#88744](https://github.com/kubernetes/kubernetes/pull/88744), [@jackkleeman](https://github.com/jackkleeman)) [SIG API Machinery and Auth] -- Conformance image now depends on stretch-slim instead of debian-hyperkube-base as that image is being deprecated and removed. ([#88702](https://github.com/kubernetes/kubernetes/pull/88702), [@dims](https://github.com/dims)) [SIG Cluster Lifecycle, Release and Testing] -- Deprecate --generator flag from kubectl create commands ([#88655](https://github.com/kubernetes/kubernetes/pull/88655), [@soltysh](https://github.com/soltysh)) [SIG CLI] -- FIX: prevent apiserver from panicking when failing to load audit webhook config file ([#88879](https://github.com/kubernetes/kubernetes/pull/88879), [@JoshVanL](https://github.com/JoshVanL)) [SIG API Machinery and Auth] -- Fix /readyz to return error immediately after a shutdown is initiated, before the --shutdown-delay-duration has elapsed. ([#88911](https://github.com/kubernetes/kubernetes/pull/88911), [@tkashem](https://github.com/tkashem)) [SIG API Machinery] -- Fix a bug where kubenet fails to parse the tc output. ([#83572](https://github.com/kubernetes/kubernetes/pull/83572), [@chendotjs](https://github.com/chendotjs)) [SIG Network] -- Fix describe ingress annotations not sorted. ([#88394](https://github.com/kubernetes/kubernetes/pull/88394), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] -- Fix handling of aws-load-balancer-security-groups annotation. Security-Groups assigned with this annotation are no longer modified by kubernetes which is the expected behaviour of most users. Also no unnecessary Security-Groups are created anymore if this annotation is used. ([#83446](https://github.com/kubernetes/kubernetes/pull/83446), [@Elias481](https://github.com/Elias481)) [SIG Cloud Provider] -- Fix kubectl create deployment image name ([#86636](https://github.com/kubernetes/kubernetes/pull/86636), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] -- Fix missing "apiVersion" for "involvedObject" in Events for Nodes. ([#87537](https://github.com/kubernetes/kubernetes/pull/87537), [@uthark](https://github.com/uthark)) [SIG Apps and Node] -- Fix that prevents repeated fetching of PVC/PV objects by kubelet when processing of pod volumes fails. While this prevents hammering API server in these error scenarios, it means that some errors in processing volume(s) for a pod could now take up to 2-3 minutes before retry. ([#88141](https://github.com/kubernetes/kubernetes/pull/88141), [@tedyu](https://github.com/tedyu)) [SIG Node and Storage] -- Fix: azure file mount timeout issue ([#88610](https://github.com/kubernetes/kubernetes/pull/88610), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix: corrupted mount point in csi driver ([#88569](https://github.com/kubernetes/kubernetes/pull/88569), [@andyzhangx](https://github.com/andyzhangx)) [SIG Storage] -- Fixed a bug in the TopologyManager. Previously, the TopologyManager would only guarantee alignment if container creation was serialized in some way. Alignment is now guaranteed under all scenarios of container creation. ([#87759](https://github.com/kubernetes/kubernetes/pull/87759), [@klueska](https://github.com/klueska)) [SIG Node] -- Fixed block CSI volume cleanup after timeouts. ([#88660](https://github.com/kubernetes/kubernetes/pull/88660), [@jsafrane](https://github.com/jsafrane)) [SIG Node and Storage] -- Fixes issue where you can't attach more than 15 GCE Persistent Disks to c2, n2, m1, m2 machine types. ([#88602](https://github.com/kubernetes/kubernetes/pull/88602), [@yuga711](https://github.com/yuga711)) [SIG Storage] -- For volumes that allow attaches across multiple nodes, attach and detach operations across different nodes are now executed in parallel. ([#88678](https://github.com/kubernetes/kubernetes/pull/88678), [@verult](https://github.com/verult)) [SIG Apps, Node and Storage] -- Hide kubectl.kubernetes.io/last-applied-configuration in describe command ([#88758](https://github.com/kubernetes/kubernetes/pull/88758), [@soltysh](https://github.com/soltysh)) [SIG Auth and CLI] -- In GKE alpha clusters it will be possible to use the service annotation `cloud.google.com/network-tier: Standard` ([#88487](https://github.com/kubernetes/kubernetes/pull/88487), [@zioproto](https://github.com/zioproto)) [SIG Cloud Provider] -- Kubelets perform fewer unnecessary pod status update operations on the API server. ([#88591](https://github.com/kubernetes/kubernetes/pull/88591), [@smarterclayton](https://github.com/smarterclayton)) [SIG Node and Scalability] -- Plugin/PluginConfig and Policy APIs are mutually exclusive when running the scheduler ([#88864](https://github.com/kubernetes/kubernetes/pull/88864), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Specifying PluginConfig for the same plugin more than once fails scheduler startup. - - Specifying extenders and configuring .ignoredResources for the NodeResourcesFit plugin fails ([#88870](https://github.com/kubernetes/kubernetes/pull/88870), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Support TLS Server Name overrides in kubeconfig file and via --tls-server-name in kubectl ([#88769](https://github.com/kubernetes/kubernetes/pull/88769), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth and CLI] -- Terminating a restartPolicy=Never pod no longer has a chance to report the pod succeeded when it actually failed. ([#88440](https://github.com/kubernetes/kubernetes/pull/88440), [@smarterclayton](https://github.com/smarterclayton)) [SIG Node and Testing] -- The EventRecorder from k8s.io/client-go/tools/events will now create events in the default namespace (instead of kube-system) when the related object does not have it set. ([#88815](https://github.com/kubernetes/kubernetes/pull/88815), [@enj](https://github.com/enj)) [SIG API Machinery] -- The audit event sourceIPs list will now always end with the IP that sent the request directly to the API server. ([#87167](https://github.com/kubernetes/kubernetes/pull/87167), [@tallclair](https://github.com/tallclair)) [SIG API Machinery and Auth] -- Update to use golang 1.13.8 ([#87648](https://github.com/kubernetes/kubernetes/pull/87648), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Release and Testing] -- Validate kube-proxy flags --ipvs-tcp-timeout, --ipvs-tcpfin-timeout, --ipvs-udp-timeout ([#88657](https://github.com/kubernetes/kubernetes/pull/88657), [@chendotjs](https://github.com/chendotjs)) [SIG Network] - - -# v1.18.0-beta.1 - -[Documentation](https://docs.k8s.io) - -## Downloads for v1.18.0-beta.1 - -filename | sha512 hash --------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes.tar.gz) | `7c182ca905b3a31871c01ab5fdaf46f074547536c7975e069ff230af0d402dfc0346958b1d084bd2c108582ffc407484e6a15a1cd93e9affbe34b6e99409ef1f` -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-src.tar.gz) | `d104b8c792b1517bd730787678c71c8ee3b259de81449192a49a1c6e37a6576d28f69b05c2019cc4a4c40ddeb4d60b80138323df3f85db8682caabf28e67c2de` - -### Client Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-client-darwin-386.tar.gz) | `bc337bb8f200a789be4b97ce99b9d7be78d35ebd64746307c28339dc4628f56d9903e0818c0888aaa9364357a528d1ac6fd34f74377000f292ec502fbea3837e` -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-client-darwin-amd64.tar.gz) | `38dfa5e0b0cfff39942c913a6bcb2ad8868ec43457d35cffba08217bb6e7531720e0731f8588505f4c81193ce5ec0e5fe6870031cf1403fbbde193acf7e53540` -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-client-linux-386.tar.gz) | `8e63ec7ce29c69241120c037372c6c779e3f16253eabd612c7cbe6aa89326f5160eb5798004d723c5cd72d458811e98dac3574842eb6a57b2798ecd2bbe5bcf9` -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-client-linux-amd64.tar.gz) | `c1be9f184a7c3f896a785c41cd6ece9d90d8cb9b1f6088bdfb5557d8856c55e455f6688f5f54c2114396d5ae7adc0361e34ebf8e9c498d0187bd785646ccc1d0` -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-client-linux-arm.tar.gz) | `8eab02453cfd9e847632a774a0e0cf3a33c7619fb4ced7f1840e1f71444e8719b1c8e8cbfdd1f20bb909f3abe39cdcac74f14cb9c878c656d35871b7c37c7cbe` -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-client-linux-arm64.tar.gz) | `f7df0ec02d2e7e63278d5386e8153cfe2b691b864f17b6452cc824a5f328d688976c975b076e60f1c6b3c859e93e477134fbccc53bb49d9e846fb038b34eee48` -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-client-linux-ppc64le.tar.gz) | `36dd5b10addca678a518e6d052c9d6edf473e3f87388a2f03f714c93c5fbfe99ace16cf3b382a531be20a8fe6f4160f8d891800dd2cff5f23c9ca12c2f4a151b` -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-client-linux-s390x.tar.gz) | `5bdbb44b996ab4ccf3a383780270f5cfdbf174982c300723c8bddf0a48ae5e459476031c1d51b9d30ffd621d0a126c18a5de132ef1d92fca2f3e477665ea10cc` -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-client-windows-386.tar.gz) | `5dea3d4c4e91ef889850143b361974250e99a3c526f5efee23ff9ccdcd2ceca4a2247e7c4f236bdfa77d2150157da5d676ac9c3ba26cf3a2f1e06d8827556f77` -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-client-windows-amd64.tar.gz) | `db298e698391368703e6aea7f4345aec5a4b8c69f9d8ff6c99fb5804a6cea16d295fb01e70fe943ade3d4ce9200a081ad40da21bd331317ec9213f69b4d6c48f` - -### Server Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-server-linux-amd64.tar.gz) | `c6284929dd5940e750b48db72ffbc09f73c5ec31ab3db283babb8e4e07cd8cbb27642f592009caae4717981c0db82c16312849ef4cbafe76acc4264c7d5864ac` -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-server-linux-arm.tar.gz) | `6fc9552cf082c54cc0833b19876117c87ba7feb5a12c7e57f71b52208daf03eaef3ca56bd22b7bce2d6e81b5a23537cf6f5497a6eaa356c0aab1d3de26c309f9` -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-server-linux-arm64.tar.gz) | `b794b9c399e548949b5bfb2fe71123e86c2034847b2c99aca34b6de718a35355bbecdae9dc2a81c49e3c82fb4b5862526a3f63c2862b438895e12c5ea884f22e` -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-server-linux-ppc64le.tar.gz) | `fddaed7a54f97046a91c29534645811c6346e973e22950b2607b8c119c2377e9ec2d32144f81626078cdaeca673129cc4016c1a3dbd3d43674aa777089fb56ac` -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-server-linux-s390x.tar.gz) | `65951a534bb55069c7419f41cbcdfe2fae31541d8a3f9eca11fc2489addf281c5ad2d13719212657da0be5b898f22b57ac39446d99072872fbacb0a7d59a4f74` - -### Node Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-node-linux-amd64.tar.gz) | `992059efb5cae7ed0ef55820368d854bad1c6d13a70366162cd3b5111ce24c371c7c87ded2012f055e08b2ff1b4ef506e1f4e065daa3ac474fef50b5efa4fb07` -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-node-linux-arm.tar.gz) | `c63ae0f8add5821ad267774314b8c8c1ffe3b785872bf278e721fd5dfdad1a5db1d4db3720bea0a36bf10d9c6dd93e247560162c0eac6e1b743246f587d3b27a` -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-node-linux-arm64.tar.gz) | `47adb9ddf6eaf8f475b89f59ee16fbd5df183149a11ad1574eaa645b47a6d58aec2ca70ba857ce9f1a5793d44cf7a61ebc6874793bb685edaf19410f4f76fd13` -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-node-linux-ppc64le.tar.gz) | `a3bc4a165567c7b76a3e45ab7b102d6eb3ecf373eb048173f921a4964cf9be8891d0d5b8dafbd88c3af7b0e21ef3d41c1e540c3347ddd84b929b3a3d02ceb7b2` -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-node-linux-s390x.tar.gz) | `109ddf37c748f69584c829db57107c3518defe005c11fcd2a1471845c15aae0a3c89aafdd734229f4069ed18856cc650c80436684e1bdc43cfee3149b0324746` -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-node-windows-amd64.tar.gz) | `a3a75d2696ad3136476ad7d811e8eabaff5111b90e592695e651d6111f819ebf0165b8b7f5adc05afb5f7f01d1e5fb64876cb696e492feb20a477a5800382b7a` - -## Changelog since v1.18.0-beta.0 - -## Urgent Upgrade Notes - -### (No, really, you MUST read this before you upgrade) - -- The StreamingProxyRedirects feature and `--redirect-container-streaming` flag are deprecated, and will be removed in a future release. The default behavior (proxy streaming requests through the kubelet) will be the only supported option. - If you are setting `--redirect-container-streaming=true`, then you must migrate off this configuration. The flag will no longer be able to be enabled starting in v1.20. If you are not setting the flag, no action is necessary. ([#88290](https://github.com/kubernetes/kubernetes/pull/88290), [@tallclair](https://github.com/tallclair)) [SIG API Machinery and Node] - -- Yes. - - Feature Name: Support using network resources (VNet, LB, IP, etc.) in different AAD Tenant and Subscription than those for the cluster. - - Changes in Pull Request: - - 1. Add properties `networkResourceTenantID` and `networkResourceSubscriptionID` in cloud provider auth config section, which indicates the location of network resources. - 2. Add function `GetMultiTenantServicePrincipalToken` to fetch multi-tenant service principal token, which will be used by Azure VM/VMSS Clients in this feature. - 3. Add function `GetNetworkResourceServicePrincipalToken` to fetch network resource service principal token, which will be used by Azure Network Resource (Load Balancer, Public IP, Route Table, Network Security Group and their sub level resources) Clients in this feature. - 4. Related unit tests. - - None. - - User Documentation: In PR https://github.com/kubernetes-sigs/cloud-provider-azure/pull/301 ([#88384](https://github.com/kubernetes/kubernetes/pull/88384), [@bowen5](https://github.com/bowen5)) [SIG Cloud Provider] - -## Changes by Kind - -### Deprecation - -- Azure service annotation service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset has been deprecated. Its support would be removed in a future release. ([#88462](https://github.com/kubernetes/kubernetes/pull/88462), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] - -### API Change - -- API additions to apiserver types ([#87179](https://github.com/kubernetes/kubernetes/pull/87179), [@Jefftree](https://github.com/Jefftree)) [SIG API Machinery, Cloud Provider and Cluster Lifecycle] -- Add Scheduling Profiles to kubescheduler.config.k8s.io/v1alpha2 ([#88087](https://github.com/kubernetes/kubernetes/pull/88087), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling and Testing] -- Added support for multiple sizes huge pages on a container level ([#84051](https://github.com/kubernetes/kubernetes/pull/84051), [@bart0sh](https://github.com/bart0sh)) [SIG Apps, Node and Storage] -- AppProtocol is a new field on Service and Endpoints resources, enabled with the ServiceAppProtocol feature gate. ([#88503](https://github.com/kubernetes/kubernetes/pull/88503), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- Fixed missing validation of uniqueness of list items in lists with `x-kubernetes-list-type: map` or x-kubernetes-list-type: set` in CustomResources. ([#84920](https://github.com/kubernetes/kubernetes/pull/84920), [@sttts](https://github.com/sttts)) [SIG API Machinery] -- Introduces optional --detect-local flag to kube-proxy. - Currently the only supported value is "cluster-cidr", - which is the default if not specified. ([#87748](https://github.com/kubernetes/kubernetes/pull/87748), [@satyasm](https://github.com/satyasm)) [SIG Cluster Lifecycle, Network and Scheduling] -- Kube-scheduler can run more than one scheduling profile. Given a pod, the profile is selected by using its `.spec.SchedulerName`. ([#88285](https://github.com/kubernetes/kubernetes/pull/88285), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps, Scheduling and Testing] -- Moving Windows RunAsUserName feature to GA ([#87790](https://github.com/kubernetes/kubernetes/pull/87790), [@marosset](https://github.com/marosset)) [SIG Apps and Windows] - -### Feature - -- Add --dry-run to kubectl delete, taint, replace ([#88292](https://github.com/kubernetes/kubernetes/pull/88292), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG CLI and Testing] -- Add huge page stats to Allocated resources in "kubectl describe node" ([#80605](https://github.com/kubernetes/kubernetes/pull/80605), [@odinuge](https://github.com/odinuge)) [SIG CLI] -- Kubeadm: The ClusterStatus struct present in the kubeadm-config ConfigMap is deprecated and will be removed on a future version. It is going to be maintained by kubeadm until it gets removed. The same information can be found on `etcd` and `kube-apiserver` pod annotations, `kubeadm.kubernetes.io/etcd.advertise-client-urls` and `kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint` respectively. ([#87656](https://github.com/kubernetes/kubernetes/pull/87656), [@ereslibre](https://github.com/ereslibre)) [SIG Cluster Lifecycle] -- Kubeadm: add the experimental feature gate PublicKeysECDSA that can be used to create a - cluster with ECDSA certificates from "kubeadm init". Renewal of existing ECDSA certificates is - also supported using "kubeadm alpha certs renew", but not switching between the RSA and - ECDSA algorithms on the fly or during upgrades. ([#86953](https://github.com/kubernetes/kubernetes/pull/86953), [@rojkov](https://github.com/rojkov)) [SIG API Machinery, Auth and Cluster Lifecycle] -- Kubeadm: on kubeconfig certificate renewal, keep the embedded CA in sync with the one on disk ([#88052](https://github.com/kubernetes/kubernetes/pull/88052), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: upgrade supports fallback to the nearest known etcd version if an unknown k8s version is passed ([#88373](https://github.com/kubernetes/kubernetes/pull/88373), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] -- New flag `--show-hidden-metrics-for-version` in kube-scheduler can be used to show all hidden metrics that deprecated in the previous minor release. ([#84913](https://github.com/kubernetes/kubernetes/pull/84913), [@serathius](https://github.com/serathius)) [SIG Instrumentation and Scheduling] -- Scheduler framework permit plugins now run at the end of the scheduling cycle, after reserve plugins. Waiting on permit will remain in the beginning of the binding cycle. ([#88199](https://github.com/kubernetes/kubernetes/pull/88199), [@mateuszlitwin](https://github.com/mateuszlitwin)) [SIG Scheduling] -- The kubelet and the default docker runtime now support running ephemeral containers in the Linux process namespace of a target container. Other container runtimes must implement this feature before it will be available in that runtime. ([#84731](https://github.com/kubernetes/kubernetes/pull/84731), [@verb](https://github.com/verb)) [SIG Node] - -### Other (Bug, Cleanup or Flake) - -- Add delays between goroutines for vm instance update ([#88094](https://github.com/kubernetes/kubernetes/pull/88094), [@aramase](https://github.com/aramase)) [SIG Cloud Provider] -- Add init containers log to cluster dump info. ([#88324](https://github.com/kubernetes/kubernetes/pull/88324), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] -- CPU limits are now respected for Windows containers. If a node is over-provisioned, no weighting is used - only limits are respected. ([#86101](https://github.com/kubernetes/kubernetes/pull/86101), [@PatrickLang](https://github.com/PatrickLang)) [SIG Node, Testing and Windows] -- Cloud provider config CloudProviderBackoffMode has been removed since it won't be used anymore. ([#88463](https://github.com/kubernetes/kubernetes/pull/88463), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Evictions due to pods breaching their ephemeral storage limits are now recorded by the `kubelet_evictions` metric and can be alerted on. ([#87906](https://github.com/kubernetes/kubernetes/pull/87906), [@smarterclayton](https://github.com/smarterclayton)) [SIG Node] -- Fix: add remediation in azure disk attach/detach ([#88444](https://github.com/kubernetes/kubernetes/pull/88444), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: check disk status before disk azure disk ([#88360](https://github.com/kubernetes/kubernetes/pull/88360), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fixed cleaning of CSI raw block volumes. ([#87978](https://github.com/kubernetes/kubernetes/pull/87978), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] -- Get-kube.sh uses the gcloud's current local GCP service account for auth when the provider is GCE or GKE instead of the metadata server default ([#88383](https://github.com/kubernetes/kubernetes/pull/88383), [@BenTheElder](https://github.com/BenTheElder)) [SIG Cluster Lifecycle] -- Golang/x/net has been updated to bring in fixes for CVE-2020-9283 ([#88381](https://github.com/kubernetes/kubernetes/pull/88381), [@BenTheElder](https://github.com/BenTheElder)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle and Instrumentation] -- Kubeadm now includes CoreDNS version 1.6.7 ([#86260](https://github.com/kubernetes/kubernetes/pull/86260), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] -- Kubeadm: fix the bug that 'kubeadm upgrade' hangs in single node cluster ([#88434](https://github.com/kubernetes/kubernetes/pull/88434), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] -- Optimize kubectl version help info ([#88313](https://github.com/kubernetes/kubernetes/pull/88313), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] -- Removes the deprecated command `kubectl rolling-update` ([#88057](https://github.com/kubernetes/kubernetes/pull/88057), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG Architecture, CLI and Testing] - - -# v1.18.0-alpha.5 - -[Documentation](https://docs.k8s.io) - -## Downloads for v1.18.0-alpha.5 - -filename | sha512 hash --------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes.tar.gz) | `6452cac2b80721e9f577cb117c29b9ac6858812b4275c2becbf74312566f7d016e8b34019bd1bf7615131b191613bf9b973e40ad9ac8f6de9007d41ef2d7fd70` -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-src.tar.gz) | `e41d9d4dd6910a42990051fcdca4bf5d3999df46375abd27ffc56aae9b455ae984872302d590da6aa85bba6079334fb5fe511596b415ee79843dee1c61c137da` - -### Client Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-client-darwin-386.tar.gz) | `5c95935863492b31d4aaa6be93260088dafea27663eb91edca980ca3a8485310e60441bc9050d4d577e9c3f7ffd96db516db8d64321124cec1b712e957c9fe1c` -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-client-darwin-amd64.tar.gz) | `868faa578b3738604d8be62fae599ccc556799f1ce54807f1fe72599f20f8a1f98ad8152fac14a08a463322530b696d375253ba3653325e74b587df6e0510da3` -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-client-linux-386.tar.gz) | `76a89d1d30b476b47f8fb808e342f89608e5c1c1787c4c06f2d7e763f9482e2ae8b31e6ad26541972e2b9a3a7c28327e3150cdd355e8b8d8b050a801bbf08d49` -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-client-linux-amd64.tar.gz) | `07ad96a09b44d1c707d7c68312c5d69b101a3424bf1e6e9400b2e7a3fba78df04302985d473ddd640d8f3f0257be34110dbe1304b9565dd9d7a4639b7b7b85fd` -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-client-linux-arm.tar.gz) | `c04fed9fa370a75c1b8e18b2be0821943bb9befcc784d14762ea3278e73600332a9b324d5eeaa1801d20ad6be07a553c41dcf4fa7ab3eadd0730ab043d687c8c` -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-client-linux-arm64.tar.gz) | `4199147dea9954333df26d34248a1cb7b02ebbd6380ffcd42d9f9ed5fdabae45a59215474dab3c11436c82e60bd27cbd03b3dde288bf611cd3e78b87c783c6a9` -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-client-linux-ppc64le.tar.gz) | `4f6d4d61d1c52d3253ca19031ebcd4bad06d19b68bbaaab5c8e8c590774faea4a5ceab1f05f2706b61780927e1467815b3479342c84d45df965aba78414727c4` -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-client-linux-s390x.tar.gz) | `e2a454151ae5dd891230fb516a3f73f73ab97832db66fd3d12e7f1657a569f58a9fe2654d50ddd7d8ec88a5ff5094199323a4c6d7d44dcf7edb06cca11dd4de1` -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-client-windows-386.tar.gz) | `14b262ba3b71c41f545db2a017cf1746075ada5745a858d2a62bc9df7c5dc10607220375db85e2c4cb85307b09709e58bc66a407488e0961191e3249dc7742b0` -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-client-windows-amd64.tar.gz) | `26353c294755a917216664364b524982b7f5fc6aa832ce90134bb178df8a78604963c68873f121ea5f2626ff615bdbf2ffe54e00578739cde6df42ffae034732` - -### Server Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-server-linux-amd64.tar.gz) | `ba77e0e7c610f59647c1b2601f82752964a0f54b7ad609a89b00fcfd553d0f0249f6662becbabaa755bb769b36a2000779f08022c40fb8cc61440337481317a1` -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-server-linux-arm.tar.gz) | `45e87b3e844ea26958b0b489e8c9b90900a3253000850f5ff9e87ffdcafba72ab8fd17b5ba092051a58a4bc277912c047a85940ec7f093dff6f9e8bf6fed3b42` -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-server-linux-arm64.tar.gz) | `155e136e3124ead69c594eead3398d6cfdbb8f823c324880e8a7bbd1b570b05d13a77a69abd0a6758cfcc7923971cc6da4d3e0c1680fd519b632803ece00d5ce` -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-server-linux-ppc64le.tar.gz) | `3fa0fb8221da19ad9d03278961172b7fa29a618b30abfa55e7243bb937dede8df56658acf02e6b61e7274fbc9395e237f49c62f2a83017eca2a69f67af31c01c` -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-server-linux-s390x.tar.gz) | `db3199c3d7ba0b326d71dc8b80f50b195e79e662f71386a3b2976d47d13d7b0136887cc21df6f53e70a3d733da6eac7bbbf3bab2df8a1909a3cee4b44c32dd0b` - -### Node Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-node-linux-amd64.tar.gz) | `addcdfbad7f12647e6babb8eadf853a374605c8f18bf63f416fa4d3bf1b903aa206679d840433206423a984bb925e7983366edcdf777cf5daef6ef88e53d6dfa` -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-node-linux-arm.tar.gz) | `b2ac54e0396e153523d116a2aaa32c919d6243931e0104cd47a23f546d710e7abdaa9eae92d978ce63c92041e63a9b56f5dd8fd06c812a7018a10ecac440f768` -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-node-linux-arm64.tar.gz) | `7aab36f2735cba805e4fd109831a1af0f586a88db3f07581b6dc2a2aab90076b22c96b490b4f6461a8fb690bf78948b6d514274f0d6fb0664081de2d44dc48e1` -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-node-linux-ppc64le.tar.gz) | `a579936f07ebf86f69f297ac50ba4c34caf2c0b903f73190eb581c78382b05ef36d41ade5bfd25d7b1b658cfcbee3d7125702a18e7480f9b09a62733a512a18a` -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-node-linux-s390x.tar.gz) | `58fa0359ddd48835192fab1136a2b9b45d1927b04411502c269cda07cb8a8106536973fb4c7fedf1d41893a524c9fe2e21078fdf27bfbeed778273d024f14449` -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-node-windows-amd64.tar.gz) | `9086c03cd92b440686cea6d8c4e48045cc46a43ab92ae0e70350b3f51804b9e2aaae7178142306768bae00d9ef6dd938167972bfa90b12223540093f735a45db` - -## Changelog since v1.18.0-alpha.3 - -### Deprecation - -- Kubeadm: command line option "kubelet-version" for `kubeadm upgrade node` has been deprecated and will be removed in a future release. ([#87942](https://github.com/kubernetes/kubernetes/pull/87942), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] - -### API Change - -- Kubelet podresources API now provides the information about active pods only. ([#79409](https://github.com/kubernetes/kubernetes/pull/79409), [@takmatsu](https://github.com/takmatsu)) [SIG Node] -- Remove deprecated fields from .leaderElection in kubescheduler.config.k8s.io/v1alpha2 ([#87904](https://github.com/kubernetes/kubernetes/pull/87904), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Signatures on generated clientset methods have been modified to accept `context.Context` as a first argument. Signatures of generated Create, Update, and Patch methods have been updated to accept CreateOptions, UpdateOptions and PatchOptions respectively. Clientsets that with the previous interface have been added in new "deprecated" packages to allow incremental migration to the new APIs. The deprecated packages will be removed in the 1.21 release. ([#87299](https://github.com/kubernetes/kubernetes/pull/87299), [@mikedanese](https://github.com/mikedanese)) [SIG API Machinery, Apps, Auth, Autoscaling, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Scheduling, Storage, Testing and Windows] -- The k8s.io/node-api component is no longer updated. Instead, use the RuntimeClass types located within k8s.io/api, and the generated clients located within k8s.io/client-go ([#87503](https://github.com/kubernetes/kubernetes/pull/87503), [@liggitt](https://github.com/liggitt)) [SIG Node and Release] - -### Feature - -- Add indexer for storage cacher ([#85445](https://github.com/kubernetes/kubernetes/pull/85445), [@shaloulcy](https://github.com/shaloulcy)) [SIG API Machinery] -- Add support for mount options to the FC volume plugin ([#87499](https://github.com/kubernetes/kubernetes/pull/87499), [@ejweber](https://github.com/ejweber)) [SIG Storage] -- Added a config-mode flag in azure auth module to enable getting AAD token without spn: prefix in audience claim. When it's not specified, the default behavior doesn't change. ([#87630](https://github.com/kubernetes/kubernetes/pull/87630), [@weinong](https://github.com/weinong)) [SIG API Machinery, Auth, CLI and Cloud Provider] -- Introduced BackoffManager interface for backoff management ([#87829](https://github.com/kubernetes/kubernetes/pull/87829), [@zhan849](https://github.com/zhan849)) [SIG API Machinery] -- PodTopologySpread plugin now excludes terminatingPods when making scheduling decisions. ([#87845](https://github.com/kubernetes/kubernetes/pull/87845), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] -- Promote CSIMigrationOpenStack to Beta (off by default since it requires installation of the OpenStack Cinder CSI Driver) - The in-tree AWS OpenStack Cinder "kubernetes.io/cinder" was already deprecated a while ago and will be removed in 1.20. Users should enable CSIMigration + CSIMigrationOpenStack features and install the OpenStack Cinder CSI Driver (https://github.com/kubernetes-sigs/cloud-provider-openstack) to avoid disruption to existing Pod and PVC objects at that time. - Users should start using the OpenStack Cinder CSI Driver directly for any new volumes. ([#85637](https://github.com/kubernetes/kubernetes/pull/85637), [@dims](https://github.com/dims)) [SIG Cloud Provider] - -### Design - -- The scheduler Permit extension point doesn't return a boolean value in its Allow() and Reject() functions. ([#87936](https://github.com/kubernetes/kubernetes/pull/87936), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] - -### Other (Bug, Cleanup or Flake) - -- Adds "volume.beta.kubernetes.io/migrated-to" annotation to PV's and PVC's when they are migrated to signal external provisioners to pick up those objects for Provisioning and Deleting. ([#87098](https://github.com/kubernetes/kubernetes/pull/87098), [@davidz627](https://github.com/davidz627)) [SIG Apps and Storage] -- Fix a bug in the dual-stack IPVS proxier where stale IPv6 endpoints were not being cleaned up ([#87695](https://github.com/kubernetes/kubernetes/pull/87695), [@andrewsykim](https://github.com/andrewsykim)) [SIG Network] -- Fix kubectl drain ignore daemonsets and others. ([#87361](https://github.com/kubernetes/kubernetes/pull/87361), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] -- Fix: add azure disk migration support for CSINode ([#88014](https://github.com/kubernetes/kubernetes/pull/88014), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix: add non-retriable errors in azure clients ([#87941](https://github.com/kubernetes/kubernetes/pull/87941), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fixed NetworkPolicy validation that Except values are accepted when they are outside the CIDR range. ([#86578](https://github.com/kubernetes/kubernetes/pull/86578), [@tnqn](https://github.com/tnqn)) [SIG Network] -- Improves performance of the node authorizer ([#87696](https://github.com/kubernetes/kubernetes/pull/87696), [@liggitt](https://github.com/liggitt)) [SIG Auth] -- Iptables/userspace proxy: improve performance by getting local addresses only once per sync loop, instead of for every external IP ([#85617](https://github.com/kubernetes/kubernetes/pull/85617), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Network] -- Kube-aggregator: always sets unavailableGauge metric to reflect the current state of a service. ([#87778](https://github.com/kubernetes/kubernetes/pull/87778), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery] -- Kubeadm allows to configure single-stack clusters if dual-stack is enabled ([#87453](https://github.com/kubernetes/kubernetes/pull/87453), [@aojea](https://github.com/aojea)) [SIG API Machinery, Cluster Lifecycle and Network] -- Kubeadm: 'kubeadm alpha kubelet config download' has been removed, please use 'kubeadm upgrade node phase kubelet-config' instead ([#87944](https://github.com/kubernetes/kubernetes/pull/87944), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] -- Kubeadm: remove 'kubeadm upgrade node config' command since it was deprecated in v1.15, please use 'kubeadm upgrade node phase kubelet-config' instead ([#87975](https://github.com/kubernetes/kubernetes/pull/87975), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] -- Kubectl describe and kubectl top pod will return a message saying "No resources found" or "No resources found in namespace" if there are no results to display. ([#87527](https://github.com/kubernetes/kubernetes/pull/87527), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] -- Kubelet metrics gathered through metrics-server or prometheus should no longer timeout for Windows nodes running more than 3 pods. ([#87730](https://github.com/kubernetes/kubernetes/pull/87730), [@marosset](https://github.com/marosset)) [SIG Node, Testing and Windows] -- Kubelet metrics have been changed to buckets. - For example the exec/{podNamespace}/{podID}/{containerName} is now just exec. ([#87913](https://github.com/kubernetes/kubernetes/pull/87913), [@cheftako](https://github.com/cheftako)) [SIG Node] -- Limit number of instances in a single update to GCE target pool to 1000. ([#87881](https://github.com/kubernetes/kubernetes/pull/87881), [@wojtek-t](https://github.com/wojtek-t)) [SIG Cloud Provider, Network and Scalability] -- Make Azure clients only retry on specified HTTP status codes ([#88017](https://github.com/kubernetes/kubernetes/pull/88017), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Pause image contains "Architecture" in non-amd64 images ([#87954](https://github.com/kubernetes/kubernetes/pull/87954), [@BenTheElder](https://github.com/BenTheElder)) [SIG Release] -- Pods that are considered for preemption and haven't started don't produce an error log. ([#87900](https://github.com/kubernetes/kubernetes/pull/87900), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Prevent error message from being displayed when running kubectl plugin list and your path includes an empty string ([#87633](https://github.com/kubernetes/kubernetes/pull/87633), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] -- `kubectl create clusterrolebinding` creates rbac.authorization.k8s.io/v1 object ([#85889](https://github.com/kubernetes/kubernetes/pull/85889), [@oke-py](https://github.com/oke-py)) [SIG CLI] - -# v1.18.0-alpha.4 - -[Documentation](https://docs.k8s.io) - -## Important note about manual tag - -Due to a [tagging bug in our Release Engineering tooling](https://github.com/kubernetes/release/issues/1080) during `v1.18.0-alpha.3`, we needed to push a manual tag (`v1.18.0-alpha.4`). - -**No binaries have been produced or will be provided for `v1.18.0-alpha.4`.** - -The changelog for `v1.18.0-alpha.4` is included as part of the [changelog since v1.18.0-alpha.3][#changelog-since-v1180-alpha3] section. - -# v1.18.0-alpha.3 - -[Documentation](https://docs.k8s.io) - -## Downloads for v1.18.0-alpha.3 - -filename | sha512 hash --------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes.tar.gz) | `60bf3bfc23b428f53fd853bac18a4a905b980fcc0bacd35ccd6357a89cfc26e47de60975ea6b712e65980e6b9df82a22331152d9f08ed4dba44558ba23a422d4` -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-src.tar.gz) | `8adf1016565a7c93713ab6fa4293c2d13b4f6e4e1ec4dcba60bd71e218b4dbe9ef5eb7dbb469006743f498fc7ddeb21865cd12bec041af60b1c0edce8b7aecd5` - -### Client Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-client-darwin-386.tar.gz) | `abb32e894e8280c772e96227b574da81cd1eac374b8d29158b7f222ed550087c65482eef4a9817dfb5f2baf0d9b85fcdfa8feced0fbc1aacced7296853b57e1f` -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-client-darwin-amd64.tar.gz) | `5e4b1a993264e256ec1656305de7c306094cae9781af8f1382df4ce4eed48ce030827fde1a5e757d4ad57233d52075c9e4e93a69efbdc1102e4ba810705ccddc` -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-client-linux-386.tar.gz) | `68da39c2ae101d2b38f6137ceda07eb0c2124794982a62ef483245dbffb0611c1441ca085fa3127e7a9977f45646788832a783544ff06954114548ea0e526e46` -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-client-linux-amd64.tar.gz) | `dc236ffa8ad426620e50181419e9bebe3c161e953dbfb8a019f61b11286e1eb950b40d7cc03423bdf3e6974973bcded51300f98b55570c29732fa492dcde761d` -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-client-linux-arm.tar.gz) | `ab0a8bd6dc31ea160b731593cdc490b3cc03668b1141cf95310bd7060dcaf55c7ee9842e0acae81063fdacb043c3552ccdd12a94afd71d5310b3ce056fdaa06c` -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-client-linux-arm64.tar.gz) | `159ea083c601710d0d6aea423eeb346c99ffaf2abd137d35a53e87a07f5caf12fca8790925f3196f67b768fa92a024f83b50325dbca9ccd4dde6c59acdce3509` -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-client-linux-ppc64le.tar.gz) | `16b0459adfa26575d13be49ab53ac7f0ffd05e184e4e13d2dfbfe725d46bb8ac891e1fd8aebe36ecd419781d4cc5cf3bd2aaaf5263cf283724618c4012408f40` -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-client-linux-s390x.tar.gz) | `d5aa1f5d89168995d2797eb839a04ce32560f405b38c1c0baaa0e313e4771ae7bb3b28e22433ad5897d36aadf95f73eb69d8d411d31c4115b6b0adf5fe041f85` -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-client-windows-386.tar.gz) | `374e16a1e52009be88c94786f80174d82dff66399bf294c9bee18a2159c42251c5debef1109a92570799148b08024960c6c50b8299a93fd66ebef94f198f34e9` -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-client-windows-amd64.tar.gz) | `5a94c1068c19271f810b994adad8e62fae03b3d4473c7c9e6d056995ff7757ea61dd8d140c9267dd41e48808876673ce117826d35a3c1bb5652752f11a044d57` - -### Server Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-server-linux-amd64.tar.gz) | `a677bec81f0eba75114b92ff955bac74512b47e53959d56a685dae5edd527283d91485b1e86ad74ef389c5405863badf7eb22e2f0c9a568a4d0cb495c6a5c32f` -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-server-linux-arm.tar.gz) | `2fb696f86ff13ebeb5f3cf2b254bf41303644c5ea84a292782eac6123550702655284d957676d382698c091358e5c7fe73f32803699c19be7138d6530fe413b6` -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-server-linux-arm64.tar.gz) | `738e95da9cfb8f1309479078098de1c38cef5e1dd5ee1129b77651a936a412b7cd0cf15e652afc7421219646a98846ab31694970432e48dea9c9cafa03aa59cf` -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-server-linux-ppc64le.tar.gz) | `7a85bfcbb2aa636df60c41879e96e788742ecd72040cb0db2a93418439c125218c58a4cfa96d01b0296c295793e94c544e87c2d98d50b49bc4cb06b41f874376` -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-server-linux-s390x.tar.gz) | `1f1cdb2efa3e7cac857203d8845df2fdaa5cf1f20df764efffff29371945ec58f6deeba06f8fbf70b96faf81b0c955bf4cb84e30f9516cb2cc1ed27c2d2185a6` - -### Node Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-node-linux-amd64.tar.gz) | `4ccfced3f5ba4adfa58f4a9d1b2c5bdb3e89f9203ab0e27d11eb1c325ac323ebe63c015d2c9d070b233f5d1da76cab5349da3528511c1cd243e66edc9af381c4` -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-node-linux-arm.tar.gz) | `d695a69d18449062e4c129e54ec8384c573955f8108f4b78adc2ec929719f2196b995469c728dd6656c63c44cda24315543939f85131ebc773cfe0de689df55b` -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-node-linux-arm64.tar.gz) | `21df1da88c89000abc22f97e482c3aaa5ce53ec9628d83dda2e04a1d86c4d53be46c03ed6f1f211df3ee5071bce39d944ff7716b5b6ada3b9c4821d368b0a898` -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-node-linux-ppc64le.tar.gz) | `ff77e3aacb6ed9d89baed92ef542c8b5cec83151b6421948583cf608bca3b779dce41fc6852961e00225d5e1502f6a634bfa61a36efa90e1aee90dedb787c2d2` -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-node-linux-s390x.tar.gz) | `57d75b7977ec1a0f6e7ed96a304dbb3b8664910f42ca19aab319a9ec33535ff5901dfca4abcb33bf5741cde6d152acd89a5f8178f0efe1dc24430e0c1af5b98f` -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-node-windows-amd64.tar.gz) | `63fdbb71773cfd73a914c498e69bb9eea3fc314366c99ffb8bd42ec5b4dae807682c83c1eb5cfb1e2feb4d11d9e49cc85ba644e954241320a835798be7653d61` - -## Changelog since v1.18.0-alpha.2 - -### Deprecation - -- Remove all the generators from kubectl run. It will now only create pods. Additionally, deprecates all the flags that are not relevant anymore. ([#87077](https://github.com/kubernetes/kubernetes/pull/87077), [@soltysh](https://github.com/soltysh)) [SIG Architecture, SIG CLI, and SIG Testing] -- kubeadm: kube-dns is deprecated and will not be supported in a future version ([#86574](https://github.com/kubernetes/kubernetes/pull/86574), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] - -### API Change - -- Add kubescheduler.config.k8s.io/v1alpha2 ([#87628](https://github.com/kubernetes/kubernetes/pull/87628), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- --enable-cadvisor-endpoints is now disabled by default. If you need access to the cAdvisor v1 Json API please enable it explicitly in the kubelet command line. Please note that this flag was deprecated in 1.15 and will be removed in 1.19. ([#87440](https://github.com/kubernetes/kubernetes/pull/87440), [@dims](https://github.com/dims)) [SIG Instrumentation, SIG Node, and SIG Testing] -- The following feature gates are removed, because the associated features were unconditionally enabled in previous releases: CustomResourceValidation, CustomResourceSubresources, CustomResourceWebhookConversion, CustomResourcePublishOpenAPI, CustomResourceDefaulting ([#87475](https://github.com/kubernetes/kubernetes/pull/87475), [@liggitt](https://github.com/liggitt)) [SIG API Machinery] - -### Feature - -- aggragation api will have alpha support for network proxy ([#87515](https://github.com/kubernetes/kubernetes/pull/87515), [@Sh4d1](https://github.com/Sh4d1)) [SIG API Machinery] -- API request throttling (due to a high rate of requests) is now reported in client-go logs at log level 2. The messages are of the form - - Throttling request took 1.50705208s, request: GET: - - The presence of these messages, may indicate to the administrator the need to tune the cluster accordingly. ([#87740](https://github.com/kubernetes/kubernetes/pull/87740), [@jennybuckley](https://github.com/jennybuckley)) [SIG API Machinery] -- kubeadm: reject a node joining the cluster if a node with the same name already exists ([#81056](https://github.com/kubernetes/kubernetes/pull/81056), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- disableAvailabilitySetNodes is added to avoid VM list for VMSS clusters. It should only be used when vmType is "vmss" and all the nodes (including masters) are VMSS virtual machines. ([#87685](https://github.com/kubernetes/kubernetes/pull/87685), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- The kubectl --dry-run flag now accepts the values 'client', 'server', and 'none', to support client-side and server-side dry-run strategies. The boolean and unset values for the --dry-run flag are deprecated and a value will be required in a future version. ([#87580](https://github.com/kubernetes/kubernetes/pull/87580), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG CLI] -- Add support for pre-allocated hugepages for more than one page size ([#82820](https://github.com/kubernetes/kubernetes/pull/82820), [@odinuge](https://github.com/odinuge)) [SIG Apps] -- Update CNI version to v0.8.5 ([#78819](https://github.com/kubernetes/kubernetes/pull/78819), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, SIG Cluster Lifecycle, SIG Network, SIG Release, and SIG Testing] -- Skip default spreading scoring plugin for pods that define TopologySpreadConstraints ([#87566](https://github.com/kubernetes/kubernetes/pull/87566), [@skilxn-go](https://github.com/skilxn-go)) [SIG Scheduling] -- Added more details to taint toleration errors ([#87250](https://github.com/kubernetes/kubernetes/pull/87250), [@starizard](https://github.com/starizard)) [SIG Apps, and SIG Scheduling] -- Scheduler: Add DefaultBinder plugin ([#87430](https://github.com/kubernetes/kubernetes/pull/87430), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling, and SIG Testing] -- Kube-apiserver metrics will now include request counts, latencies, and response sizes for /healthz, /livez, and /readyz requests. ([#83598](https://github.com/kubernetes/kubernetes/pull/83598), [@jktomer](https://github.com/jktomer)) [SIG API Machinery] - -### Other (Bug, Cleanup or Flake) - -- Fix the masters rolling upgrade causing thundering herd of LISTs on etcd leading to control plane unavailability. ([#86430](https://github.com/kubernetes/kubernetes/pull/86430), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery, SIG Node, and SIG Testing] -- `kubectl diff` now returns 1 only on diff finding changes, and >1 on kubectl errors. The "exit status code 1" message as also been muted. ([#87437](https://github.com/kubernetes/kubernetes/pull/87437), [@apelisse](https://github.com/apelisse)) [SIG CLI, and SIG Testing] -- To reduce chances of throttling, VM cache is set to nil when Azure node provisioning state is deleting ([#87635](https://github.com/kubernetes/kubernetes/pull/87635), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Fix regression in statefulset conversion which prevented applying a statefulset multiple times. ([#87706](https://github.com/kubernetes/kubernetes/pull/87706), [@liggitt](https://github.com/liggitt)) [SIG Apps, and SIG Testing] -- fixed two scheduler metrics (pending_pods and schedule_attempts_total) not being recorded ([#87692](https://github.com/kubernetes/kubernetes/pull/87692), [@everpeace](https://github.com/everpeace)) [SIG Scheduling] -- Resolved a performance issue in the node authorizer index maintenance. ([#87693](https://github.com/kubernetes/kubernetes/pull/87693), [@liggitt](https://github.com/liggitt)) [SIG Auth] -- Removed the 'client' label from apiserver_request_total. ([#87669](https://github.com/kubernetes/kubernetes/pull/87669), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, and SIG Instrumentation] -- `(*"k8s.io/client-go/rest".Request).{Do,DoRaw,Stream,Watch}` now require callers to pass a `context.Context` as an argument. The context is used for timeout and cancellation signaling and to pass supplementary information to round trippers in the wrapped transport chain. If you don't need any of this functionality, it is sufficient to pass a context created with `context.Background()` to these functions. The `(*"k8s.io/client-go/rest".Request).Context` method is removed now that all methods that execute a request accept a context directly. ([#87597](https://github.com/kubernetes/kubernetes/pull/87597), [@mikedanese](https://github.com/mikedanese)) [SIG API Machinery, SIG Apps, SIG Auth, SIG Autoscaling, SIG CLI, SIG Cloud Provider, SIG Cluster Lifecycle, SIG Instrumentation, SIG Network, SIG Node, SIG Scheduling, SIG Storage, and SIG Testing] -- For volumes that allow attaches across multiple nodes, attach and detach operations across different nodes are now executed in parallel. ([#87258](https://github.com/kubernetes/kubernetes/pull/87258), [@verult](https://github.com/verult)) [SIG Apps, SIG Node, and SIG Storage] -- kubeadm: apply further improvements to the tentative support for concurrent etcd member join. Fixes a bug where multiple members can receive the same hostname. Increase the etcd client dial timeout and retry timeout for add/remove/... operations. ([#87505](https://github.com/kubernetes/kubernetes/pull/87505), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Reverted a kubectl azure auth module change where oidc claim spn: prefix was omitted resulting a breaking behavior with existing Azure AD OIDC enabled api-server ([#87507](https://github.com/kubernetes/kubernetes/pull/87507), [@weinong](https://github.com/weinong)) [SIG API Machinery, SIG Auth, and SIG Cloud Provider] -- Update cri-tools to v1.17.0 ([#86305](https://github.com/kubernetes/kubernetes/pull/86305), [@saschagrunert](https://github.com/saschagrunert)) [SIG Cluster Lifecycle, and SIG Release] -- kubeadm: remove the deprecated CoreDNS feature-gate. It was set to "true" since v1.11 when the feature went GA. In v1.13 it was marked as deprecated and hidden from the CLI. ([#87400](https://github.com/kubernetes/kubernetes/pull/87400), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Shared informers are now more reliable in the face of network disruption. ([#86015](https://github.com/kubernetes/kubernetes/pull/86015), [@squeed](https://github.com/squeed)) [SIG API Machinery] -- the CSR signing cert/key pairs will be reloaded from disk like the kube-apiserver cert/key pairs ([#86816](https://github.com/kubernetes/kubernetes/pull/86816), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, SIG Apps, and SIG Auth] -- "kubectl describe statefulsets.apps" prints garbage for rolling update partition ([#85846](https://github.com/kubernetes/kubernetes/pull/85846), [@phil9909](https://github.com/phil9909)) [SIG CLI] - - - - - -# v1.18.0-alpha.2 - -[Documentation](https://docs.k8s.io) - -## Downloads for v1.18.0-alpha.2 - - -filename | sha512 hash --------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes.tar.gz) | `7af83386b4b35353f0aa1bdaf73599eb08b1d1ca11ecc2c606854aff754db69f3cd3dc761b6d7fc86f01052f615ca53185f33dbf9e53b2f926b0f02fc103fbd3` -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-src.tar.gz) | `a14b02a0a0bde97795a836a8f5897b0ee6b43e010e13e43dd4cca80a5b962a1ef3704eedc7916fed1c38ec663a71db48c228c91e5daacba7d9370df98c7ddfb6` - -### Client Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-client-darwin-386.tar.gz) | `427f214d47ded44519007de2ae87160c56c2920358130e474b768299751a9affcbc1b1f0f936c39c6138837bca2a97792a6700896976e98c4beee8a1944cfde1` -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-client-darwin-amd64.tar.gz) | `861fd81ac3bd45765575bedf5e002a2294aba48ef9e15980fc7d6783985f7d7fcde990ea0aef34690977a88df758722ec0a2e170d5dcc3eb01372e64e5439192` -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-client-linux-386.tar.gz) | `7d59b05d6247e2606a8321c72cd239713373d876dbb43b0fb7f1cb857fa6c998038b41eeed78d9eb67ce77b0b71776ceed428cce0f8d2203c5181b473e0bd86c` -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-client-linux-amd64.tar.gz) | `7cdefb4e32bad9d2df5bb8e7e0a6f4dab2ae6b7afef5d801ac5c342d4effdeacd799081fa2dec699ecf549200786c7623c3176252010f12494a95240dd63311d` -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-client-linux-arm.tar.gz) | `6212bbf0fa1d01ced77dcca2c4b76b73956cd3c6b70e0701c1fe0df5ff37160835f6b84fa2481e0e6979516551b14d8232d1c72764a559a3652bfe2a1e7488ff` -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-client-linux-arm64.tar.gz) | `1f0d9990700510165ee471acb2f88222f1b80e8f6deb351ce14cf50a70a9840fb99606781e416a13231c74b2bd7576981b5348171aa33b628d2666e366cd4629` -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-client-linux-ppc64le.tar.gz) | `77e00ba12a32db81e96f8de84609de93f32c61bb3f53875a57496d213aa6d1b92c09ad5a6de240a78e1a5bf77fac587ff92874f34a10f8909ae08ca32fda45d2` -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-client-linux-s390x.tar.gz) | `a39ec2044bed5a4570e9c83068e0fc0ce923ccffa44380f8bbc3247426beaff79c8a84613bcb58b05f0eb3afbc34c79fe3309aa2e0b81abcfd0aa04770e62e05` -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-client-windows-386.tar.gz) | `1a0ab88f9b7e34b60ab31d5538e97202a256ad8b7b7ed5070cae5f2f12d5d4edeae615db7a34ebbe254004b6393c6b2480100b09e30e59c9139492a3019a596a` -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-client-windows-amd64.tar.gz) | `1966eb5dfb78c1bc33aaa6389f32512e3aa92584250a0164182f3566c81d901b59ec78ee4e25df658bc1dd221b5a9527d6ce3b6c487ca3e3c0b319a077caa735` - -### Server Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-server-linux-amd64.tar.gz) | `f814d6a3872e4572aa4da297c29def4c1fad8eba0903946780b6bf9788c72b99d71085c5aef9e12c01133b26fa4563c1766ba724ad2a8af2670a24397951a94d` -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-server-linux-arm.tar.gz) | `56aa08225e546c92c2ff88ac57d3db7dd5e63640772ea72a429f080f7069827138cbc206f6f5fe3a0c01bfca043a9eda305ecdc1dcb864649114893e46b6dc84` -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-server-linux-arm64.tar.gz) | `fb87128d905211ba097aa860244a376575ae2edbaca6e51402a24bc2964854b9b273e09df3d31a2bcffc91509f7eecb2118b183fb0e0eb544f33403fa235c274` -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-server-linux-ppc64le.tar.gz) | `6d21fbf39b9d3a0df9642407d6f698fabdc809aca83af197bceb58a81b25846072f407f8fb7caae2e02dc90912e3e0f5894f062f91bcb69f8c2329625d3dfeb7` -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-server-linux-s390x.tar.gz) | `ddcda4dc360ca97705f71bf2a18ddacd7b7ddf77535b62e699e97a1b2dd24843751313351d0112e238afe69558e8271eba4d27ab77bb67b4b9e3fbde6eec85c9` - -### Node Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-node-linux-amd64.tar.gz) | `78915a9bde35c70c67014f0cea8754849db4f6a84491a3ad9678fd3bc0203e43af5a63cfafe104ae1d56b05ce74893a87a6dcd008d7859e1af6b3bce65425b5d` -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-node-linux-arm.tar.gz) | `3218e811abcb0cb09d80742def339be3916db5e9bbc62c0dc8e6d87085f7e3d9eeed79dea081906f1de78ddd07b7e3acdbd7765fdb838d262bb35602fd1df106` -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-node-linux-arm64.tar.gz) | `fa22de9c4440b8fb27f4e77a5a63c5e1c8aa8aa30bb79eda843b0f40498c21b8c0ad79fff1d841bb9fef53fe20da272506de9a86f81a0b36d028dbeab2e482ce` -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-node-linux-ppc64le.tar.gz) | `bbda9b5cc66e8f13d235703b2a85e2c4f02fa16af047be4d27a3e198e11eb11706e4a0fbb6c20978c770b069cd4cd9894b661f09937df9d507411548c36576e0` -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-node-linux-s390x.tar.gz) | `b2ed1eda013069adce2aac00b86d75b84e006cfce9bafac0b5a2bafcb60f8f2cb346b5ea44eafa72d777871abef1ea890eb3a2a05de28968f9316fa88886a8ed` -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-node-windows-amd64.tar.gz) | `bd8eb23dba711f31b5148257076b1bbe9629f2a75de213b2c779bd5b29279e9bf22f8bde32f4bc814f4c0cc49e19671eb8b24f4105f0fe2c1490c4b78ec3c704` - -## Changelog since v1.18.0-alpha.1 - -### Other notable changes - -* Bump golang/mock version to v1.3.1 ([#87326](https://github.com/kubernetes/kubernetes/pull/87326), [@wawa0210](https://github.com/wawa0210)) -* fix a bug that orphan revision cannot be adopted and statefulset cannot be synced ([#86801](https://github.com/kubernetes/kubernetes/pull/86801), [@likakuli](https://github.com/likakuli)) -* Azure storage clients now suppress requests on throttling ([#87306](https://github.com/kubernetes/kubernetes/pull/87306), [@feiskyer](https://github.com/feiskyer)) -* Introduce Alpha field `Immutable` in both Secret and ConfigMap objects to mark their contents as immutable. The implementation is hidden behind feature gate `ImmutableEphemeralVolumes` (currently in Alpha stage). ([#86377](https://github.com/kubernetes/kubernetes/pull/86377), [@wojtek-t](https://github.com/wojtek-t)) -* EndpointSlices will now be enabled by default. A new `EndpointSliceProxying` feature gate determines if kube-proxy will use EndpointSlices, this is disabled by default. ([#86137](https://github.com/kubernetes/kubernetes/pull/86137), [@robscott](https://github.com/robscott)) -* kubeadm upgrades always persist the etcd backup for stacked ([#86861](https://github.com/kubernetes/kubernetes/pull/86861), [@SataQiu](https://github.com/SataQiu)) -* Fix the bug PIP's DNS is deleted if no DNS label service annotation isn't set. ([#87246](https://github.com/kubernetes/kubernetes/pull/87246), [@nilo19](https://github.com/nilo19)) -* New flag `--show-hidden-metrics-for-version` in kube-controller-manager can be used to show all hidden metrics that deprecated in the previous minor release. ([#85281](https://github.com/kubernetes/kubernetes/pull/85281), [@RainbowMango](https://github.com/RainbowMango)) -* Azure network and VM clients now suppress requests on throttling ([#87122](https://github.com/kubernetes/kubernetes/pull/87122), [@feiskyer](https://github.com/feiskyer)) -* `kubectl apply -f --prune -n ` should prune all resources not defined in the file in the cli specified namespace. ([#85613](https://github.com/kubernetes/kubernetes/pull/85613), [@MartinKaburu](https://github.com/MartinKaburu)) -* Fixes service account token admission error in clusters that do not run the service account token controller ([#87029](https://github.com/kubernetes/kubernetes/pull/87029), [@liggitt](https://github.com/liggitt)) -* CustomResourceDefinition status fields are no longer required for client validation when submitting manifests. ([#87213](https://github.com/kubernetes/kubernetes/pull/87213), [@hasheddan](https://github.com/hasheddan)) -* All apiservers log request lines in a more greppable format. ([#87203](https://github.com/kubernetes/kubernetes/pull/87203), [@lavalamp](https://github.com/lavalamp)) -* provider/azure: Network security groups can now be in a separate resource group. ([#87035](https://github.com/kubernetes/kubernetes/pull/87035), [@CecileRobertMichon](https://github.com/CecileRobertMichon)) -* Cleaned up the output from `kubectl describe CSINode `. ([#85283](https://github.com/kubernetes/kubernetes/pull/85283), [@huffmanca](https://github.com/huffmanca)) -* Fixed the following ([#84265](https://github.com/kubernetes/kubernetes/pull/84265), [@bhagwat070919](https://github.com/bhagwat070919)) - * - AWS Cloud Provider attempts to delete LoadBalancer security group it didn’t provision - * - AWS Cloud Provider creates default LoadBalancer security group even if annotation [service.beta.kubernetes.io/aws-load-balancer-security-groups] is present -* kubelet: resource metrics endpoint `/metrics/resource/v1alpha1` as well as all metrics under this endpoint have been deprecated. ([#86282](https://github.com/kubernetes/kubernetes/pull/86282), [@RainbowMango](https://github.com/RainbowMango)) - * Please convert to the following metrics emitted by endpoint `/metrics/resource`: - * - scrape_error --> scrape_error - * - node_cpu_usage_seconds_total --> node_cpu_usage_seconds - * - node_memory_working_set_bytes --> node_memory_working_set_bytes - * - container_cpu_usage_seconds_total --> container_cpu_usage_seconds - * - container_memory_working_set_bytes --> container_memory_working_set_bytes - * - scrape_error --> scrape_error -* You can now pass "--node-ip ::" to kubelet to indicate that it should autodetect an IPv6 address to use as the node's primary address. ([#85850](https://github.com/kubernetes/kubernetes/pull/85850), [@danwinship](https://github.com/danwinship)) -* kubeadm: support automatic retry after failing to pull image ([#86899](https://github.com/kubernetes/kubernetes/pull/86899), [@SataQiu](https://github.com/SataQiu)) -* TODO ([#87044](https://github.com/kubernetes/kubernetes/pull/87044), [@jennybuckley](https://github.com/jennybuckley)) -* Improved yaml parsing performance ([#85458](https://github.com/kubernetes/kubernetes/pull/85458), [@cjcullen](https://github.com/cjcullen)) -* Fixed a bug which could prevent a provider ID from ever being set for node if an error occurred determining the provider ID when the node was added. ([#87043](https://github.com/kubernetes/kubernetes/pull/87043), [@zjs](https://github.com/zjs)) -* fix a regression in kubenet that prevent pods to obtain ip addresses ([#85993](https://github.com/kubernetes/kubernetes/pull/85993), [@chendotjs](https://github.com/chendotjs)) -* Bind kube-dns containers to linux nodes to avoid Windows scheduling ([#83358](https://github.com/kubernetes/kubernetes/pull/83358), [@wawa0210](https://github.com/wawa0210)) -* The following features are unconditionally enabled and the corresponding `--feature-gates` flags have been removed: `PodPriority`, `TaintNodesByCondition`, `ResourceQuotaScopeSelectors` and `ScheduleDaemonSetPods` ([#86210](https://github.com/kubernetes/kubernetes/pull/86210), [@draveness](https://github.com/draveness)) -* Bind dns-horizontal containers to linux nodes to avoid Windows scheduling on kubernetes cluster includes linux nodes and windows nodes ([#83364](https://github.com/kubernetes/kubernetes/pull/83364), [@wawa0210](https://github.com/wawa0210)) -* fix kubectl annotate error when local=true is set ([#86952](https://github.com/kubernetes/kubernetes/pull/86952), [@zhouya0](https://github.com/zhouya0)) -* Bug fixes: ([#84163](https://github.com/kubernetes/kubernetes/pull/84163), [@david-tigera](https://github.com/david-tigera)) - * Make sure we include latest packages node #351 ([@caseydavenport](https://github.com/caseydavenport)) -* fix kuebctl apply set-last-applied namespaces error ([#86474](https://github.com/kubernetes/kubernetes/pull/86474), [@zhouya0](https://github.com/zhouya0)) -* Add VolumeBinder method to FrameworkHandle interface, which allows user to get the volume binder when implementing scheduler framework plugins. ([#86940](https://github.com/kubernetes/kubernetes/pull/86940), [@skilxn-go](https://github.com/skilxn-go)) -* elasticsearch supports automatically setting the advertise address ([#85944](https://github.com/kubernetes/kubernetes/pull/85944), [@SataQiu](https://github.com/SataQiu)) -* If a serving certificates param specifies a name that is an IP for an SNI certificate, it will have priority for replying to server connections. ([#85308](https://github.com/kubernetes/kubernetes/pull/85308), [@deads2k](https://github.com/deads2k)) -* kube-proxy: Added dual-stack IPv4/IPv6 support to the iptables proxier. ([#82462](https://github.com/kubernetes/kubernetes/pull/82462), [@vllry](https://github.com/vllry)) -* Azure VMSS/VMSSVM clients now suppress requests on throttling ([#86740](https://github.com/kubernetes/kubernetes/pull/86740), [@feiskyer](https://github.com/feiskyer)) -* New metric kubelet_pleg_last_seen_seconds to aid diagnosis of PLEG not healthy issues. ([#86251](https://github.com/kubernetes/kubernetes/pull/86251), [@bboreham](https://github.com/bboreham)) -* For subprotocol negotiation, both client and server protocol is required now. ([#86646](https://github.com/kubernetes/kubernetes/pull/86646), [@tedyu](https://github.com/tedyu)) -* kubeadm: use bind-address option to configure the kube-controller-manager and kube-scheduler http probes ([#86493](https://github.com/kubernetes/kubernetes/pull/86493), [@aojea](https://github.com/aojea)) -* Marked scheduler's metrics scheduling_algorithm_predicate_evaluation_seconds and ([#86584](https://github.com/kubernetes/kubernetes/pull/86584), [@xiaoanyunfei](https://github.com/xiaoanyunfei)) - * scheduling_algorithm_priority_evaluation_seconds as deprecated. Those are replaced by framework_extension_point_duration_seconds[extenstion_point="Filter"] and framework_extension_point_duration_seconds[extenstion_point="Score"] respectively. -* Marked scheduler's scheduling_duration_seconds Summary metric as deprecated ([#86586](https://github.com/kubernetes/kubernetes/pull/86586), [@xiaoanyunfei](https://github.com/xiaoanyunfei)) -* Add instructions about how to bring up e2e test cluster ([#85836](https://github.com/kubernetes/kubernetes/pull/85836), [@YangLu1031](https://github.com/YangLu1031)) -* If a required flag is not provided to a command, the user will only see the required flag error message, instead of the entire usage menu. ([#86693](https://github.com/kubernetes/kubernetes/pull/86693), [@sallyom](https://github.com/sallyom)) -* kubeadm: tolerate whitespace when validating certificate authority PEM data in kubeconfig files ([#86705](https://github.com/kubernetes/kubernetes/pull/86705), [@neolit123](https://github.com/neolit123)) -* kubeadm: add support for the "ci/k8s-master" version label as a replacement for "ci-cross/*", which no longer exists. ([#86609](https://github.com/kubernetes/kubernetes/pull/86609), [@Pensu](https://github.com/Pensu)) -* Fix EndpointSlice controller race condition and ensure that it handles external changes to EndpointSlices. ([#85703](https://github.com/kubernetes/kubernetes/pull/85703), [@robscott](https://github.com/robscott)) -* Fix nil pointer dereference in azure cloud provider ([#85975](https://github.com/kubernetes/kubernetes/pull/85975), [@ldx](https://github.com/ldx)) -* fix: azure disk could not mounted on Standard_DC4s/DC2s instances ([#86612](https://github.com/kubernetes/kubernetes/pull/86612), [@andyzhangx](https://github.com/andyzhangx)) -* Fixes v1.17.0 regression in --service-cluster-ip-range handling with IPv4 ranges larger than 65536 IP addresses ([#86534](https://github.com/kubernetes/kubernetes/pull/86534), [@liggitt](https://github.com/liggitt)) -* Adds back support for AlwaysCheckAllPredicates flag. ([#86496](https://github.com/kubernetes/kubernetes/pull/86496), [@ahg-g](https://github.com/ahg-g)) -* Azure global rate limit is switched to per-client. A set of new rate limit configure options are introduced, including routeRateLimit, SubnetsRateLimit, InterfaceRateLimit, RouteTableRateLimit, LoadBalancerRateLimit, PublicIPAddressRateLimit, SecurityGroupRateLimit, VirtualMachineRateLimit, StorageAccountRateLimit, DiskRateLimit, SnapshotRateLimit, VirtualMachineScaleSetRateLimit and VirtualMachineSizeRateLimit. ([#86515](https://github.com/kubernetes/kubernetes/pull/86515), [@feiskyer](https://github.com/feiskyer)) - * The original rate limit options would be default values for those new client's rate limiter. -* Fix issue [#85805](https://github.com/kubernetes/kubernetes/pull/85805) about resource not found in azure cloud provider when lb specified in other resource group. ([#86502](https://github.com/kubernetes/kubernetes/pull/86502), [@levimm](https://github.com/levimm)) -* `AlwaysCheckAllPredicates` is deprecated in scheduler Policy API. ([#86369](https://github.com/kubernetes/kubernetes/pull/86369), [@Huang-Wei](https://github.com/Huang-Wei)) -* Kubernetes KMS provider for data encryption now supports disabling the in-memory data encryption key (DEK) cache by setting cachesize to a negative value. ([#86294](https://github.com/kubernetes/kubernetes/pull/86294), [@enj](https://github.com/enj)) -* option `preConfiguredBackendPoolLoadBalancerTypes` is added to azure cloud provider for the pre-configured load balancers, possible values: `""`, `"internal"`, "external"`, `"all"` ([#86338](https://github.com/kubernetes/kubernetes/pull/86338), [@gossion](https://github.com/gossion)) -* Promote StartupProbe to beta for 1.18 release ([#83437](https://github.com/kubernetes/kubernetes/pull/83437), [@matthyx](https://github.com/matthyx)) -* Fixes issue where AAD token obtained by kubectl is incompatible with on-behalf-of flow and oidc. ([#86412](https://github.com/kubernetes/kubernetes/pull/86412), [@weinong](https://github.com/weinong)) - * The audience claim before this fix has "spn:" prefix. After this fix, "spn:" prefix is omitted. -* change CounterVec to Counter about PLEGDiscardEvent ([#86167](https://github.com/kubernetes/kubernetes/pull/86167), [@yiyang5055](https://github.com/yiyang5055)) -* hollow-node do not use remote CRI anymore ([#86425](https://github.com/kubernetes/kubernetes/pull/86425), [@jkaniuk](https://github.com/jkaniuk)) -* hollow-node use fake CRI ([#85879](https://github.com/kubernetes/kubernetes/pull/85879), [@gongguan](https://github.com/gongguan)) - - - -# v1.18.0-alpha.1 - -[Documentation](https://docs.k8s.io) - -## Downloads for v1.18.0-alpha.1 - - -filename | sha512 hash --------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes.tar.gz) | `0c4904efc7f4f1436119c91dc1b6c93b3bd9c7490362a394bff10099c18e1e7600c4f6e2fcbaeb2d342a36c4b20692715cf7aa8ada6dfac369f44cc9292529d7` -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-src.tar.gz) | `0a50fc6816c730ca5ae4c4f26d5ad7b049607d29f6a782a4e5b4b05ac50e016486e269dafcc6a163bd15e1a192780a9a987f1bb959696993641c603ed1e841c8` - -### Client Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-client-darwin-386.tar.gz) | `c6d75f7f3f20bef17fc7564a619b54e6f4a673d041b7c9ec93663763a1cc8dd16aecd7a2af70e8d54825a0eecb9762cf2edfdade840604c9a32ecd9cc2d5ac3c` -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-client-darwin-amd64.tar.gz) | `ca1f19db289933beace6daee6fc30af19b0e260634ef6e89f773464a05e24551c791be58b67da7a7e2a863e28b7cbcc7b24b6b9bf467113c26da76ac8f54fdb6` -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-client-linux-386.tar.gz) | `af2e673653eb39c3f24a54efc68e1055f9258bdf6cf8fea42faf42c05abefc2da853f42faac3b166c37e2a7533020b8993b98c0d6d80a5b66f39e91d8ae0a3fb` -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-client-linux-amd64.tar.gz) | `9009032c3f94ac8a78c1322a28e16644ce3b20989eb762685a1819148aed6e883ca8e1200e5ec37ec0853f115c67e09b5d697d6cf5d4c45f653788a2d3a2f84f` -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-client-linux-arm.tar.gz) | `afba9595b37a3f2eead6e3418573f7ce093b55467dce4da0b8de860028576b96b837a2fd942f9c276e965da694e31fbd523eeb39aefb902d7e7a2f169344d271` -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-client-linux-arm64.tar.gz) | `04fc3b2fe3f271807f0bc6c61be52456f26a1af904964400be819b7914519edc72cbab9afab2bb2e2ba1a108963079367cedfb253c9364c0175d1fcc64d52f5c` -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-client-linux-ppc64le.tar.gz) | `04c7edab874b33175ff7bebfff5b3a032bc6eb088fcd7387ffcd5b3fa71395ca8c5f9427b7ddb496e92087dfdb09eaf14a46e9513071d3bd73df76c182922d38` -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-client-linux-s390x.tar.gz) | `499287dbbc33399a37b9f3b35e0124ff20b17b6619f25a207ee9c606ef261af61fa0c328dde18c7ce2d3dfb2eea2376623bc3425d16bc8515932a68b44f8bede` -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-client-windows-386.tar.gz) | `cf84aeddf00f126fb13c0436b116dd0464a625659e44c84bf863517db0406afb4eefd86807e7543c4f96006d275772fbf66214ae7d582db5865c84ac3545b3e6` -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-client-windows-amd64.tar.gz) | `69f20558ccd5cd6dbaccf29307210db4e687af21f6d71f68c69d3a39766862686ac1333ab8a5012010ca5c5e3c11676b45e498e3d4c38773da7d24bcefc46d95` - -### Server Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-server-linux-amd64.tar.gz) | `3f29df2ce904a0f10db4c1d7a425a36f420867b595da3fa158ae430bfead90def2f2139f51425b349faa8a9303dcf20ea01657cb6ea28eb6ad64f5bb32ce2ed1` -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-server-linux-arm.tar.gz) | `4a21073b2273d721fbf062c254840be5c8471a010bcc0c731b101729e36e61f637cb7fcb521a22e8d24808510242f4fff8a6ca40f10e9acd849c2a47bf135f27` -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-server-linux-arm64.tar.gz) | `7f1cb6d721bedc90e28b16f99bea7e59f5ad6267c31ef39c14d34db6ad6aad87ee51d2acdd01b6903307c1c00b58ff6b785a03d5a491cc3f8a4df9a1d76d406c` -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-server-linux-ppc64le.tar.gz) | `8f2b552030b5274b1c2c7c166eacd5a14b0c6ca0f23042f4c52efe87e22a167ba4460dcd66615a5ecd26d9e88336be1fb555548392e70efe59070dd2c314da98` -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-server-linux-s390x.tar.gz) | `8d9f2c96f66edafb7c8b3aa90960d29b41471743842aede6b47b3b2e61f4306fb6fc60b9ebc18820c547ee200bfedfe254c1cde962d447c791097dd30e79abdb` - -### Node Binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-node-linux-amd64.tar.gz) | `84194cb081d1502f8ca68143569f9707d96f1a28fcf0c574ebd203321463a8b605f67bb2a365eaffb14fbeb8d55c8d3fa17431780b242fb9cba3a14426a0cd4a` -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-node-linux-arm.tar.gz) | `0091e108ab94fd8683b89c597c4fdc2fbf4920b007cfcd5297072c44bc3a230dfe5ceed16473e15c3e6cf5edab866d7004b53edab95be0400cc60e009eee0d9d` -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-node-linux-arm64.tar.gz) | `b7e85682cc2848a35d52fd6f01c247f039ee1b5dd03345713821ea10a7fa9939b944f91087baae95eaa0665d11857c1b81c454f720add077287b091f9f19e5d3` -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-node-linux-ppc64le.tar.gz) | `cd1f0849e9c62b5d2c93ff0cebf58843e178d8a88317f45f76de0db5ae020b8027e9503a5fccc96445184e0d77ecdf6f57787176ac31dbcbd01323cd0a190cbb` -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-node-linux-s390x.tar.gz) | `e1e697a34424c75d75415b613b81c8af5f64384226c5152d869f12fd7db1a3e25724975b73fa3d89e56e4bf78d5fd07e68a709ba8566f53691ba6a88addc79ea` -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-node-windows-amd64.tar.gz) | `c725a19a4013c74e22383ad3fb4cb799b3e161c4318fdad066daf806730a89bc3be3ff0f75678d02b3cbe52b2ef0c411c0639968e200b9df470be40bb2c015cc` - -## Changelog since v1.17.0 - -### Action Required - -* action required ([#85363](https://github.com/kubernetes/kubernetes/pull/85363), [@immutableT](https://github.com/immutableT)) - * 1. Currently, if users were to explicitly specify CacheSize of 0 for KMS provider, they would end-up with a provider that caches up to 1000 keys. This PR changes this behavior. - * Post this PR, when users supply 0 for CacheSize this will result in a validation error. - * 2. CacheSize type was changed from int32 to *int32. This allows defaulting logic to differentiate between cases where users explicitly supplied 0 vs. not supplied any value. - * 3. KMS Provider's endpoint (path to Unix socket) is now validated when the EncryptionConfiguration files is loaded. This used to be handled by the GRPCService. - -### Other notable changes - -* fix: azure data disk should use same key as os disk by default ([#86351](https://github.com/kubernetes/kubernetes/pull/86351), [@andyzhangx](https://github.com/andyzhangx)) -* New flag `--show-hidden-metrics-for-version` in kube-proxy can be used to show all hidden metrics that deprecated in the previous minor release. ([#85279](https://github.com/kubernetes/kubernetes/pull/85279), [@RainbowMango](https://github.com/RainbowMango)) -* Remove cluster-monitoring addon ([#85512](https://github.com/kubernetes/kubernetes/pull/85512), [@serathius](https://github.com/serathius)) -* Changed core_pattern on COS nodes to be an absolute path. ([#86329](https://github.com/kubernetes/kubernetes/pull/86329), [@mml](https://github.com/mml)) -* Track mount operations as uncertain if operation fails with non-final error ([#82492](https://github.com/kubernetes/kubernetes/pull/82492), [@gnufied](https://github.com/gnufied)) -* add kube-proxy flags --ipvs-tcp-timeout, --ipvs-tcpfin-timeout, --ipvs-udp-timeout to configure IPVS connection timeouts. ([#85517](https://github.com/kubernetes/kubernetes/pull/85517), [@andrewsykim](https://github.com/andrewsykim)) -* The sample-apiserver aggregated conformance test has updated to use the Kubernetes v1.17.0 sample apiserver ([#84735](https://github.com/kubernetes/kubernetes/pull/84735), [@liggitt](https://github.com/liggitt)) -* The underlying format of the `CPUManager` state file has changed. Upgrades should be seamless, but any third-party tools that rely on reading the previous format need to be updated. ([#84462](https://github.com/kubernetes/kubernetes/pull/84462), [@klueska](https://github.com/klueska)) -* kubernetes will try to acquire the iptables lock every 100 msec during 5 seconds instead of every second. This specially useful for environments using kube-proxy in iptables mode with a high churn rate of services. ([#85771](https://github.com/kubernetes/kubernetes/pull/85771), [@aojea](https://github.com/aojea)) -* Fixed a panic in the kubelet cleaning up pod volumes ([#86277](https://github.com/kubernetes/kubernetes/pull/86277), [@tedyu](https://github.com/tedyu)) -* azure cloud provider cache TTL is configurable, list of the azure cloud provider is as following: ([#86266](https://github.com/kubernetes/kubernetes/pull/86266), [@zqingqing1](https://github.com/zqingqing1)) - * - "availabilitySetNodesCacheTTLInSeconds" - * - "vmssCacheTTLInSeconds" - * - "vmssVirtualMachinesCacheTTLInSeconds" - * - "vmCacheTTLInSeconds" - * - "loadBalancerCacheTTLInSeconds" - * - "nsgCacheTTLInSeconds" - * - "routeTableCacheTTLInSeconds" -* Fixes kube-proxy when EndpointSlice feature gate is enabled on Windows. ([#86016](https://github.com/kubernetes/kubernetes/pull/86016), [@robscott](https://github.com/robscott)) -* Fixes wrong validation result of NetworkPolicy PolicyTypes ([#85747](https://github.com/kubernetes/kubernetes/pull/85747), [@tnqn](https://github.com/tnqn)) -* Fixes an issue with kubelet-reported pod status on deleted/recreated pods. ([#86320](https://github.com/kubernetes/kubernetes/pull/86320), [@liggitt](https://github.com/liggitt)) -* kube-apiserver no longer serves the following deprecated APIs: ([#85903](https://github.com/kubernetes/kubernetes/pull/85903), [@liggitt](https://github.com/liggitt)) - * All resources under `apps/v1beta1` and `apps/v1beta2` - use `apps/v1` instead - * `daemonsets`, `deployments`, `replicasets` resources under `extensions/v1beta1` - use `apps/v1` instead - * `networkpolicies` resources under `extensions/v1beta1` - use `networking.k8s.io/v1` instead - * `podsecuritypolicies` resources under `extensions/v1beta1` - use `policy/v1beta1` instead -* kubeadm: fix potential panic when executing "kubeadm reset" with a corrupted kubelet.conf file ([#86216](https://github.com/kubernetes/kubernetes/pull/86216), [@neolit123](https://github.com/neolit123)) -* Fix a bug in port-forward: named port not working with service ([#85511](https://github.com/kubernetes/kubernetes/pull/85511), [@oke-py](https://github.com/oke-py)) -* kube-proxy no longer modifies shared EndpointSlices. ([#86092](https://github.com/kubernetes/kubernetes/pull/86092), [@robscott](https://github.com/robscott)) -* allow for configuration of CoreDNS replica count ([#85837](https://github.com/kubernetes/kubernetes/pull/85837), [@pickledrick](https://github.com/pickledrick)) -* Fixed a regression where the kubelet would fail to update the ready status of pods. ([#84951](https://github.com/kubernetes/kubernetes/pull/84951), [@tedyu](https://github.com/tedyu)) -* Resolves performance regression in client-go discovery clients constructed using `NewDiscoveryClientForConfig` or `NewDiscoveryClientForConfigOrDie`. ([#86168](https://github.com/kubernetes/kubernetes/pull/86168), [@liggitt](https://github.com/liggitt)) -* Make error message and service event message more clear ([#86078](https://github.com/kubernetes/kubernetes/pull/86078), [@feiskyer](https://github.com/feiskyer)) -* e2e-test-framework: add e2e test namespace dump if all tests succeed but the cleanup fails. ([#85542](https://github.com/kubernetes/kubernetes/pull/85542), [@schrodit](https://github.com/schrodit)) -* SafeSysctlWhitelist: add net.ipv4.ping_group_range ([#85463](https://github.com/kubernetes/kubernetes/pull/85463), [@AkihiroSuda](https://github.com/AkihiroSuda)) -* kubelet: the metric process_start_time_seconds be marked as with the ALPHA stability level. ([#85446](https://github.com/kubernetes/kubernetes/pull/85446), [@RainbowMango](https://github.com/RainbowMango)) -* API request throttling (due to a high rate of requests) is now reported in the kubelet (and other component) logs by default. The messages are of the form ([#80649](https://github.com/kubernetes/kubernetes/pull/80649), [@RobertKrawitz](https://github.com/RobertKrawitz)) - * Throttling request took 1.50705208s, request: GET: - * The presence of large numbers of these messages, particularly with long delay times, may indicate to the administrator the need to tune the cluster accordingly. -* Fix API Server potential memory leak issue in processing watch request. ([#85410](https://github.com/kubernetes/kubernetes/pull/85410), [@answer1991](https://github.com/answer1991)) -* Verify kubelet & kube-proxy can recover after being killed on Windows nodes ([#84886](https://github.com/kubernetes/kubernetes/pull/84886), [@YangLu1031](https://github.com/YangLu1031)) -* Fixed an issue that the scheduler only returns the first failure reason. ([#86022](https://github.com/kubernetes/kubernetes/pull/86022), [@Huang-Wei](https://github.com/Huang-Wei)) -* kubectl/drain: add skip-wait-for-delete-timeout option. ([#85577](https://github.com/kubernetes/kubernetes/pull/85577), [@michaelgugino](https://github.com/michaelgugino)) - * If pod DeletionTimestamp older than N seconds, skip waiting for the pod. Seconds must be greater than 0 to skip. -* Following metrics have been turned off: ([#83841](https://github.com/kubernetes/kubernetes/pull/83841), [@RainbowMango](https://github.com/RainbowMango)) - * - kubelet_pod_worker_latency_microseconds - * - kubelet_pod_start_latency_microseconds - * - kubelet_cgroup_manager_latency_microseconds - * - kubelet_pod_worker_start_latency_microseconds - * - kubelet_pleg_relist_latency_microseconds - * - kubelet_pleg_relist_interval_microseconds - * - kubelet_eviction_stats_age_microseconds - * - kubelet_runtime_operations - * - kubelet_runtime_operations_latency_microseconds - * - kubelet_runtime_operations_errors - * - kubelet_device_plugin_registration_count - * - kubelet_device_plugin_alloc_latency_microseconds - * - kubelet_docker_operations - * - kubelet_docker_operations_latency_microseconds - * - kubelet_docker_operations_errors - * - kubelet_docker_operations_timeout - * - network_plugin_operations_latency_microseconds -* - Renamed Kubelet metric certificate_manager_server_expiration_seconds to certificate_manager_server_ttl_seconds and changed to report the second until expiration at read time rather than absolute time of expiry. ([#85874](https://github.com/kubernetes/kubernetes/pull/85874), [@sambdavidson](https://github.com/sambdavidson)) - * - Improved accuracy of Kubelet metric rest_client_exec_plugin_ttl_seconds. -* Bind metadata-agent containers to linux nodes to avoid Windows scheduling on kubernetes cluster includes linux nodes and windows nodes ([#83363](https://github.com/kubernetes/kubernetes/pull/83363), [@wawa0210](https://github.com/wawa0210)) -* Bind metrics-server containers to linux nodes to avoid Windows scheduling on kubernetes cluster includes linux nodes and windows nodes ([#83362](https://github.com/kubernetes/kubernetes/pull/83362), [@wawa0210](https://github.com/wawa0210)) -* During initialization phase (preflight), kubeadm now verifies the presence of the conntrack executable ([#85857](https://github.com/kubernetes/kubernetes/pull/85857), [@hnanni](https://github.com/hnanni)) -* VMSS cache is added so that less chances of VMSS GET throttling ([#85885](https://github.com/kubernetes/kubernetes/pull/85885), [@nilo19](https://github.com/nilo19)) -* Update go-winio module version from 0.4.11 to 0.4.14 ([#85739](https://github.com/kubernetes/kubernetes/pull/85739), [@wawa0210](https://github.com/wawa0210)) -* Fix LoadBalancer rule checking so that no unexpected LoadBalancer updates are made ([#85990](https://github.com/kubernetes/kubernetes/pull/85990), [@feiskyer](https://github.com/feiskyer)) -* kubectl drain node --dry-run will list pods that would be evicted or deleted ([#82660](https://github.com/kubernetes/kubernetes/pull/82660), [@sallyom](https://github.com/sallyom)) -* Windows nodes on GCE can use TPM-based authentication to the master. ([#85466](https://github.com/kubernetes/kubernetes/pull/85466), [@pjh](https://github.com/pjh)) -* kubectl/drain: add disable-eviction option. ([#85571](https://github.com/kubernetes/kubernetes/pull/85571), [@michaelgugino](https://github.com/michaelgugino)) - * Force drain to use delete, even if eviction is supported. This will bypass checking PodDisruptionBudgets, and should be used with caution. -* kubeadm now errors out whenever a not supported component config version is supplied for the kubelet and kube-proxy ([#85639](https://github.com/kubernetes/kubernetes/pull/85639), [@rosti](https://github.com/rosti)) -* Fixed issue with addon-resizer using deprecated extensions APIs ([#85793](https://github.com/kubernetes/kubernetes/pull/85793), [@bskiba](https://github.com/bskiba)) -* Includes FSType when describing CSI persistent volumes. ([#85293](https://github.com/kubernetes/kubernetes/pull/85293), [@huffmanca](https://github.com/huffmanca)) -* kubelet now exports a "server_expiration_renew_failure" and "client_expiration_renew_failure" metric counter if the certificate rotations cannot be performed. ([#84614](https://github.com/kubernetes/kubernetes/pull/84614), [@rphillips](https://github.com/rphillips)) -* kubeadm: don't write the kubelet environment file on "upgrade apply" ([#85412](https://github.com/kubernetes/kubernetes/pull/85412), [@boluisa](https://github.com/boluisa)) -* fix azure file AuthorizationFailure ([#85475](https://github.com/kubernetes/kubernetes/pull/85475), [@andyzhangx](https://github.com/andyzhangx)) -* Resolved regression in admission, authentication, and authorization webhook performance in v1.17.0-rc.1 ([#85810](https://github.com/kubernetes/kubernetes/pull/85810), [@liggitt](https://github.com/liggitt)) -* kubeadm: uses the apiserver AdvertiseAddress IP family to choose the etcd endpoint IP family for non external etcd clusters ([#85745](https://github.com/kubernetes/kubernetes/pull/85745), [@aojea](https://github.com/aojea)) -* kubeadm: Forward cluster name to the controller-manager arguments ([#85817](https://github.com/kubernetes/kubernetes/pull/85817), [@ereslibre](https://github.com/ereslibre)) -* Fixed "requested device X but found Y" attach error on AWS. ([#85675](https://github.com/kubernetes/kubernetes/pull/85675), [@jsafrane](https://github.com/jsafrane)) -* addons: elasticsearch discovery supports IPv6 ([#85543](https://github.com/kubernetes/kubernetes/pull/85543), [@SataQiu](https://github.com/SataQiu)) -* kubeadm: retry `kubeadm-config` ConfigMap creation or mutation if the apiserver is not responding. This will improve resiliency when joining new control plane nodes. ([#85763](https://github.com/kubernetes/kubernetes/pull/85763), [@ereslibre](https://github.com/ereslibre)) -* Update Cluster Autoscaler to 1.17.0; changelog: https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.17.0 ([#85610](https://github.com/kubernetes/kubernetes/pull/85610), [@losipiuk](https://github.com/losipiuk)) -* Filter published OpenAPI schema by making nullable, required fields non-required in order to avoid kubectl to wrongly reject null values. ([#85722](https://github.com/kubernetes/kubernetes/pull/85722), [@sttts](https://github.com/sttts)) -* kubectl set resources will no longer return an error if passed an empty change for a resource. ([#85490](https://github.com/kubernetes/kubernetes/pull/85490), [@sallyom](https://github.com/sallyom)) - * kubectl set subject will no longer return an error if passed an empty change for a resource. -* kube-apiserver: fixed a conflict error encountered attempting to delete a pod with gracePeriodSeconds=0 and a resourceVersion precondition ([#85516](https://github.com/kubernetes/kubernetes/pull/85516), [@michaelgugino](https://github.com/michaelgugino)) -* kubeadm: add a upgrade health check that deploys a Job ([#81319](https://github.com/kubernetes/kubernetes/pull/81319), [@neolit123](https://github.com/neolit123)) -* kubeadm: make sure images are pre-pulled even if a tag did not change but their contents changed ([#85603](https://github.com/kubernetes/kubernetes/pull/85603), [@bart0sh](https://github.com/bart0sh)) -* kube-apiserver: Fixes a bug that hidden metrics can not be enabled by the command-line option `--show-hidden-metrics-for-version`. ([#85444](https://github.com/kubernetes/kubernetes/pull/85444), [@RainbowMango](https://github.com/RainbowMango)) -* kubeadm now supports automatic calculations of dual-stack node cidr masks to kube-controller-manager. ([#85609](https://github.com/kubernetes/kubernetes/pull/85609), [@Arvinderpal](https://github.com/Arvinderpal)) -* Fix bug where EndpointSlice controller would attempt to modify shared objects. ([#85368](https://github.com/kubernetes/kubernetes/pull/85368), [@robscott](https://github.com/robscott)) -* Use context to check client closed instead of http.CloseNotifier in processing watch request which will reduce 1 goroutine for each request if proto is HTTP/2.x . ([#85408](https://github.com/kubernetes/kubernetes/pull/85408), [@answer1991](https://github.com/answer1991)) -* kubeadm: reset raises warnings if it cannot delete folders ([#85265](https://github.com/kubernetes/kubernetes/pull/85265), [@SataQiu](https://github.com/SataQiu)) -* Wait for kubelet & kube-proxy to be ready on Windows node within 10s ([#85228](https://github.com/kubernetes/kubernetes/pull/85228), [@YangLu1031](https://github.com/YangLu1031)) diff --git a/content/zh/docs/setup/release/version-skew-policy.md b/content/zh/docs/setup/release/version-skew-policy.md deleted file mode 100644 index b2fbebfbd1..0000000000 --- a/content/zh/docs/setup/release/version-skew-policy.md +++ /dev/null @@ -1,365 +0,0 @@ ---- -title: Kubernetes 版本及版本偏差支持策略 -content_type: concept -weight: 30 ---- - - - -本文描述 Kubernetes 各组件之间版本偏差支持策略。 -特定的集群部署工具可能会有额外的限制。 - - - - - -## 版本支持策略 - - -Kubernetes 版本号格式为 **x.y.z**,其中 **x** 为大版本号,**y** 为小版本号,**z** 为补丁版本号。 -版本号格式遵循 [Semantic Versioning](https://semver.org/) 规则。 -更多信息,请参阅 -[Kubernetes 发布版本](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md#kubernetes-release-versioning)。 - - -Kubernetes 项目会维护最近的三个小版本分支({{< skew latestVersion >}}, -{{< skew prevMinorVersion >}}, {{< skew oldestMinorVersion >}})。 -Kubernetes 1.19 及更高的版本将获得大约1年的补丁支持。 -Kubernetes 1.18 及更早的版本获得大约9个月的补丁支持。 - - -一些 bug 修复,包括安全修复,取决于其严重性和可行性,有可能会反向合并到这三个发布分支。 -补丁版本会[定期](https://git.k8s.io/sig-release/releases/patch-releases.md#cadence) -或根据需要从这些分支中发布。 -最终是否发布是由 -[发布管理者](https://github.com/kubernetes/sig-release/blob/master/release-managers.md) -来决定的。 -如需了解更多信息,请查看 Kubernetes -[补丁发布](https://github.com/kubernetes/sig-release/blob/master/releases/patch-releases.md)。 - - -## 版本偏差策略 - -### kube-apiserver - - -在 [高可用(HA)集群](/zh/docs/setup/production-environment/tools/kubeadm/high-availability/) 中, -多个 `kube-apiserver` 实例小版本号最多差1。 - - -例如: - - -* 最新的 `kube-apiserver` 版本号如果是 **{{< skew latestVersion >}}** -* 其他受支持的 `kube-apiserver` 版本号包括 **{{< skew latestVersion >}}** 和 - **{{< skew prevMinorVersion >}}** - -### kubelet - - -`kubelet` 版本号不能高于 `kube-apiserver`,最多可以比 `kube-apiserver` 低两个小版本。 - - -例如: - -* `kube-apiserver` 版本号如果是 **{{< skew latestVersion >}}** -* 受支持的的 `kubelet` 版本将包括 **{{< skew latestVersion >}}**、 - **{{< skew prevMinorVersion >}}** 和 **{{< skew oldestMinorVersion >}}** - - -{{< note >}} -如果 HA 集群中多个 `kube-apiserver` 实例版本号不一致,相应的 `kubelet` 版本号可选范围也要减小。 -{{}} - - -例如: - -* 如果 `kube-apiserver` 实例同时存在 **{{< skew latestVersion >}}** 和 - **{{< skew prevMinorVersion >}}** -* `kubelet` 的受支持版本将是 **{{< skew prevMinorVersion >}}** 和 - **{{< skew oldestMinorVersion >}}** - (**{{< skew latestVersion >}}** 不再支持,因为它比 - **{{< skew prevMinorVersion >}}** 版本的 `kube-apiserver` 更新) - - -### kube-controller-manager、 kube-scheduler 和 cloud-controller-manager - - -`kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` -版本不能高于 `kube-apiserver` 版本号。 -最好它们的版本号与 `kube-apiserver` 保持一致,但允许比 `kube-apiserver` -低一个小版本(为了支持在线升级)。 - - -例如: - -* 如果 `kube-apiserver` 版本号为 **{{< skew latestVersion >}}** -* `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` - 版本支持 **{{< skew latestVersion >}}** 和 **{{< skew prevMinorVersion >}}** - - -{{< note >}} -如果在 HA 集群中,多个 `kube-apiserver` 实例版本号不一致,他们也可以跟 -任意一个 `kube-apiserver` 实例通信(例如,通过 load balancer), -但 `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` -版本可用范围会相应的减小。 -{{< /note >}} - - -例如: - -* `kube-apiserver` 实例同时存在 **{{< skew latestVersion >}}** 和 - **{{< skew prevMinorVersion >}}** 版本 -* `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` - 可以通过 load balancer 与所有的 `kube-apiserver` 通信 -* `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` - 可选版本为 **{{< skew prevMinorVersion >}}** - (**{{< skew latestVersion >}}** 不再支持,因为它比 **{{< skew prevMinorVersion >}}** - 版本的 `kube-apiserver` 更新) - -### kubectl - - -`kubectl` 可以比 `kube-apiserver` 高一个小版本,也可以低一个小版本。 - - -例如: - -* 如果 `kube-apiserver` 当前是 **{{< skew latestVersion >}}** 版本 -* `kubectl` 则支持 **{{< skew nextMinorVersion >}}**、**{{< skew latestVersion >}}** - 和 **{{< skew prevMinorVersion >}}** - - -{{< note >}} -如果 HA 集群中的多个 `kube-apiserver` 实例版本号不一致,相应的 `kubectl` 可用版本范围也会减小。 -{{< /note >}} - - -例如: - -* `kube-apiserver` 多个实例同时存在 **{{< skew latestVersion >}}** 和 - **{{< skew prevMinorVersion >}}** -* `kubectl` 可选的版本为 **{{< skew latestVersion >}}** 和 - **{{< skew prevMinorVersion >}}**(其他版本不再支持, - 因为它会比其中某个 `kube-apiserver` 实例高或低一个小版本) - - -## 支持的组件升级次序 - - -组件之间支持的版本偏差会影响组件升级的顺序。 -本节描述组件从版本 **{{< skew prevMinorVersion >}}** 到 **{{< skew latestVersion >}}** -的升级次序。 - -### kube-apiserver - - -前提条件: - - -* 单实例集群中,`kube-apiserver` 实例版本号须是 **{{< skew prevMinorVersion >}}** -* 高可用(HA)集群中,所有的 `kube-apiserver` 实例版本号必须是 - **{{< skew prevMinorVersion >}}** 或 **{{< skew latestVersion >}}** - (确保满足最新和最旧的实例小版本号相差不大于1) -* `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` - 版本号必须为 **{{< skew prevMinorVersion >}}** - (确保不高于 API server 的版本,且版本号相差不大于1) -* `kubelet` 实例版本号必须是 **{{< skew prevMinorVersion >}}** 或 - **{{< skew oldestMinorVersion >}}**(确保版本号不高于 API server,且版本号相差不大于2) -* 注册的 admission 插件必须能够处理新的 `kube-apiserver` 实例发送过来的数据: - * `ValidatingWebhookConfiguration` 和 `MutatingWebhookConfiguration` 对象必须升级到可以处理 - **{{< skew latestVersion >}}** 版本新加的 REST 资源(或使用 1.15 版本提供的 - [`matchPolicy: Equivalent` 选项](/zh/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy)) - * 插件可以处理任何 **{{< skew latestVersion >}}** 版本新的 REST 资源数据和新加的字段 - - -升级 `kube-apiserver` 到 **{{< skew latestVersion >}}** - -{{< note >}} - -根据 [API 弃用策略](/zh/docs/reference/using-api/deprecation-policy/) 和 -[API 变更指南](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api_changes.md), -`kube-apiserver` 不能跨小版本号升级,即使是单实例集群也不可以。 - -{{< /note >}} - - -### kube-controller-manager、kube-scheduler 和 cloud-controller-manager - - -前提条件: - -* `kube-apiserver` 实例必须为 **{{< skew latestVersion >}}** - (HA 集群中,所有的`kube-apiserver` 实例必须在组件升级前完成升级) - - -升级 `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` -到 **{{< skew latestVersion >}}** - -### kubelet - - -前提条件: - -* `kube-apiserver` 实例必须为 **{{< skew latestVersion >}}** 版本 - -`kubelet` 可以升级到 **{{< skew latestVersion >}}**(或者停留在 -**{{< skew prevMinorVersion >}}** 或 **{{< skew oldestMinorVersion >}}**) - -{{< note >}} - -在对 `kubelet` 执行次版本升级时,先[腾空](/zh/docs/tasks/administer-cluster/safely-drain-node/) -节点上的 Pods。 -目前不支持原地升级 `kubelet` 的次版本。 -{{}} - -{{< warning >}} - -集群中 `kubelet` 版本号不建议比 `kube-apiserver` 低两个版本号: - -* 它们必须升级到与 `kube-apiserver` 相差不超过 1 个小版本,才可以升级其他控制面组件 -* 有可能使用低于 3 个在维护的小版本 -{{}} - - -### kube-proxy - - -* `kube-proxy` 必须与节点上的 `kubelet` 的小版本相同 -* `kube-proxy` 一定不能比 `kube-apiserver` 小版本更新 -* `kube-proxy` 最多只能比 `kube-apiserver` 早两个小版本 - - -例如: - -如果 `kube-proxy` 的版本是 **{{< skew oldestMinorVersion >}}**: - -* `kubelet` 版本必须相同,也是 **{{< skew oldestMinorVersion >}}** -* `kube-apiserver` 版本必须在 **{{< skew oldestMinorVersion >}}** 到 - **{{< skew latestVersion >}}** 之间(闭区间) diff --git a/content/zh/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md b/content/zh/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md index 66c4200ea8..867063c0f6 100644 --- a/content/zh/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md +++ b/content/zh/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md @@ -34,6 +34,19 @@ It does not mean that there is a file named `kubeconfig`. 这是一种引用配置文件的通用方式,并不意味着存在一个名为 `kubeconfig` 的文件。 {{< /note >}} + + +{{< warning >}} +只使用来源可靠的 kubeconfig 文件。使用特制的 kubeconfig 文件可能会导致恶意代码执行或文件暴露。 +如果必须使用不受信任的 kubeconfig 文件,请首先像检查 shell 脚本一样仔细检查它。 +{{< /warning>}} + + ## {{% heading "prerequisites" %}} {{< include "task-tutorial-prereqs.md" >}} @@ -41,13 +54,13 @@ It does not mean that there is a file named `kubeconfig`. 要检查 {{< glossary_tooltip text="kubectl" term_id="kubectl" >}} 是否安装, 执行 `kubectl version --client` 命令。 kubectl 的版本应该与集群的 API 服务器 -[使用同一次版本号](/zh/docs/setup/release/version-skew-policy/#kubectl)。 +[使用同一次版本号](/zh/releases/version-skew-policy/#kubectl)。 -输出应该类似于: - -``` -Forwarding from 127.0.0.1:63753 -> 27017 -Forwarding from [::1]:63753 -> 27017 -``` - - **容器镜像**(必填):公共镜像仓库上的 Docker [容器镜像](/zh/docs/concepts/containers/images/) 或者私有镜像仓库 - (通常是 Google Container Registery 或者 Docker Hub)的 URL。容器镜像参数说明必须以冒号结尾。 + (通常是 Google Container Registry 或者 Docker Hub)的 URL。容器镜像参数说明必须以冒号结尾。 -许多[样例](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/) +许多[样例](https://github.com/kubernetes/examples/tree/master/) 提供了使用 kubectl 的介绍。完整文档请见 [kubectl 手册](/zh/docs/reference/kubectl/overview/)。 如果该应用程序部署为集群中的一个 -Pod,请参阅[下一节](#accessing-the-api-from-within-accessing-the-api-from-within-a-pod)。 +Pod,请参阅[从 Pod 内访问 API](/zh/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod)。 #### Python 客户端 {#python-client} diff --git a/content/zh/docs/tasks/administer-cluster/change-pv-reclaim-policy.md b/content/zh/docs/tasks/administer-cluster/change-pv-reclaim-policy.md index 1499ecfc1b..950e493f52 100644 --- a/content/zh/docs/tasks/administer-cluster/change-pv-reclaim-policy.md +++ b/content/zh/docs/tasks/administer-cluster/change-pv-reclaim-policy.md @@ -27,7 +27,7 @@ volume is automatically deleted when a user deletes the corresponding PersistentVolumeClaim. This automatic behavior might be inappropriate if the volume contains precious data. In that case, it is more appropriate to use the "Retain" policy. With the "Retain" policy, if a user deletes a PersistentVolumeClaim, -the corresponding PersistentVolume is not be deleted. Instead, it is moved to the +the corresponding PersistentVolume will not be deleted. Instead, it is moved to the Released phase, where all of its data can be manually recovered. --> ## 为什么要更改 PersistentVolume 的回收策略 diff --git a/content/zh/docs/tasks/administer-cluster/cluster-upgrade.md b/content/zh/docs/tasks/administer-cluster/cluster-upgrade.md index e9b9d0dec5..72535b27e8 100644 --- a/content/zh/docs/tasks/administer-cluster/cluster-upgrade.md +++ b/content/zh/docs/tasks/administer-cluster/cluster-upgrade.md @@ -62,14 +62,14 @@ If your cluster was deployed using the `kubeadm` tool, refer to for detailed information on how to upgrade the cluster. Once you have upgraded the cluster, remember to -[install the latest version of `kubectl`](/docs/tasks/tools/install-kubectl/). +[install the latest version of `kubectl`](/docs/tasks/tools/). --> 如果你的集群是使用 `kubeadm` 安装工具部署而来, 那么升级群集的详细信息,请参阅 [升级 kubeadm 集群](/zh/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/)。 升级集群之后,要记得 -[安装最新版本的 `kubectl`](/zh/docs/tasks/tools/install-kubectl/). +[安装最新版本的 `kubectl`](/zh/docs/tasks/tools/). ### 手动部署 {#manual-deployments} @@ -101,7 +101,7 @@ You should manually update the control plane following this sequence: 现在,你应该 -[安装最新版本的 `kubectl`](/zh/docs/tasks/tools/install-kubectl/). +[安装最新版本的 `kubectl`](/zh/docs/tasks/tools/). 对于群集中的每个节点, [排空](/zh/docs/tasks/administer-cluster/safely-drain-node/) diff --git a/content/zh/docs/tasks/administer-cluster/controller-manager-leader-migration.md b/content/zh/docs/tasks/administer-cluster/controller-manager-leader-migration.md index f8ed2cdb4a..83a716ce44 100644 --- a/content/zh/docs/tasks/administer-cluster/controller-manager-leader-migration.md +++ b/content/zh/docs/tasks/administer-cluster/controller-manager-leader-migration.md @@ -17,7 +17,7 @@ content_type: task -{{< feature-state state="alpha" for_k8s_version="v1.21" >}} +{{< feature-state state="beta" for_k8s_version="v1.22" >}} {{< glossary_definition term_id="cloud-controller-manager" length="all" prepend="云管理控制器是">}} @@ -43,17 +43,14 @@ For a single-node control plane, or if unavailability of controller managers can 对于单节点控制平面,或者在升级过程中可以容忍控制器管理器不可用的情况,则不需要领导者迁移,并且可以忽略本指南。 -领导者迁移是一项 Alpha 阶段功能,默认情况下处于禁用状态,它需要设置控制器管理器的 `--enable-leader-migration` 参数。 -可以通过在 `kube-controller-manager` 或 `cloud-controller-manager` 上设置特性门控 -`ControllerManagerLeaderMigration` 和 `--enable-leader-migration` 来启用。 +领导者迁移可以通过在 `kube-controller-manager` 或 `cloud-controller-manager` 上设置 `--enable-leader-migration` 来启用。 领导者迁移仅在升级期间适用,并且可以安全地禁用,也可以在升级完成后保持启用状态。 本指南将引导你手动将控制平面从内置的云驱动的 `kube-controller-manager` 升级为 @@ -64,14 +61,14 @@ If you use a tool to administrator the cluster, please refer to the documentatio 假定控制平面正在运行 Kubernetes N 版本,并且要升级到 N+1 版本。 -尽管可以在同一版本中进行迁移,但理想情况下,迁移应作为升级的一部分执行,以便可以更改配置与发布保持一致。 +尽管可以在同一版本中进行迁移,但理想情况下,迁移应作为升级的一部分执行,以便可以更改配置与每个发布版本保持一致。 N 和 N+1的确切版本取决于各个云驱动。例如,如果云驱动构建了一个可与 Kubernetes 1.22 配合使用的 `cloud-controller-manager`, 则 N 可以为 1.21,N+1 可以为 1.22。 @@ -80,19 +77,21 @@ N 和 N+1的确切版本取决于各个云驱动。例如,如果云驱动构 树外云驱动必须已经构建了一个实现领导者迁移的 `cloud-controller-manager`。 如果云驱动导入了 v0.21.0 或更高版本的 `k8s.io/cloud-provider` 和 `k8s.io/controller-manager`, 则可以进行领导者迁移。 +但是,对 v0.22.0 以下的版本,领导者迁移是一项 Alpha 阶段功能,它需要启用特性门控 `ControllerManagerLeaderMigration`。 本指南假定每个控制平面节点的 kubelet 以静态 pod 的形式启动 `kube-controller-manager` 和 `cloud-controller-manager`,静态 pod 的定义在清单文件中。 @@ -137,19 +136,21 @@ Do the same to the `system::leader-locking-cloud-controller-manager` role. ### 初始领导者迁移配置 -领导者迁移需要一个表示控制器到管理器分配状态的配置文件。 +领导者迁移可以选择使用一个表示控制器到管理器分配状态的配置文件。 目前,对于树内云驱动,`kube-controller-manager` 运行 `route`、`service` 和 `cloud-node-lifecycle`。 以下示例配置显示了分配。 +领导者迁移可以不指定配置来启用。请参阅 [默认配置](#default-configuration) 以获取更多详细信息。 + ```yaml kind: LeaderMigrationConfiguration -apiVersion: controllermanager.config.k8s.io/v1alpha1 +apiVersion: controllermanager.config.k8s.io/v1beta1 leaderName: cloud-provider-extraction-migration resourceLock: leases controllerLeaders: @@ -166,7 +167,6 @@ On each control plane node, save the content to `/etc/leadermigration.conf`, and update the manifest of `kube-controller-manager` so that the file is mounted inside the container at the same location. Also, update the same manifest to add the following arguments: -- `--feature-gates=ControllerManagerLeaderMigration=true` to enable Leader Migration which is an alpha feature - `--enable-leader-migration` to enable Leader Migration on the controller manager - `--leader-migration-config=/etc/leadermigration.conf` to set configuration file @@ -176,7 +176,6 @@ Restart `kube-controller-manager` on each node. At this moment, `kube-controller 并更新 `kube-controller-manager` 清单,以便将文件安装在容器内的同一位置。 另外,更新相同的清单,添加以下参数: -- `--feature-gates=ControllerManagerLeaderMigration=true` 启用领导者迁移(这是 Alpha 版功能) - `--enable-leader-migration` 在控制器管理器上启用领导者迁移 - `--leader-migration-config=/etc/leadermigration.conf` 设置配置文件 @@ -196,7 +195,7 @@ Please note `component` field of each `controllerLeaders` changing from `kube-co ```yaml kind: LeaderMigrationConfiguration -apiVersion: controllermanager.config.k8s.io/v1alpha1 +apiVersion: controllermanager.config.k8s.io/v1beta1 leaderName: cloud-provider-extraction-migration resourceLock: leases controllerLeaders: @@ -286,6 +285,22 @@ To re-enable Leader Migration, recreate the configuration file and add its mount 最后删除 `/etc/leadermigration.conf`。 要重新启用领导者迁移,请重新创建配置文件,并将其挂载和启用领导者迁移的标志添加回到 `cloud-controller-manager`。 + +### 默认配置 {#default-configuration} + +从 Kubernetes 1.22 开始,领导者迁移提供了一个默认配置,它适用于默认的控制器到管理器分配。 +可以通过设置 `--enable-leader-migration`,但不设置 `--leader-migration-config=` 来启用默认配置。 + +对于 `kube-controller-manager` 和 `cloud-controller-manager`,如果没有用参数来启用树内云驱动或者改变控制器属主, +则可以使用默认配置来避免手动创建配置文件。 + ## {{% heading "whatsnext" %}} +Static 策略的行为可以使用 `--cpu-manager-policy-options` 参数来微调。 +该参数采用一个逗号分隔的 `key=value` 策略选项列表。 + ### none 策略 `none` 策略显式地启用现有的默认 CPU 亲和方案,不提供操作系统调度器默认行为之外的亲和性策略。 通过 CFS 配额来实现 [Guaranteed pods](/zh/docs/tasks/configure-pod-container/quality-service-pod/) +和 [Burstable pods](/zh/docs/tasks/configure-pod-container/quality-service-pod/) 的 CPU 使用限制。 +#### Static 策略选项 + +如果使用 `full-pcpus-only` 策略选项,static 策略总是会分配完整的物理核心。 +你可以通过在 CPUManager 策略选项里加上 `full-pcups-only=true` 来启用该选项。 + +默认情况下,如果不使用该选项,static 策略会使用拓扑感知最适合的分配方法来分配 CPU。 +在启用了 SMT 的系统上,此策略所分配是与硬件线程对应的、独立的虚拟核。 +这会导致不同的容器共享相同的物理核心,该行为进而会导致 +[吵闹的邻居问题](https://en.wikipedia.org/wiki/Cloud_computing_issues#Performance_interference_and_noisy_neighbors)。 + +启用该选项之后,只有当一个 Pod 里所有容器的 CPU 请求都能够分配到完整的物理核心时,kubelet 才会接受该 Pod。 +如果 Pod 没有被准入,它会被置于 Failed 状态,错误消息是 `SMTAlignmentError`。 \ No newline at end of file diff --git a/content/zh/docs/tasks/administer-cluster/encrypt-data.md b/content/zh/docs/tasks/administer-cluster/encrypt-data.md index 6a645eb2ec..2b62f86725 100644 --- a/content/zh/docs/tasks/administer-cluster/encrypt-data.md +++ b/content/zh/docs/tasks/administer-cluster/encrypt-data.md @@ -253,11 +253,11 @@ program to retrieve the contents of your secret. ``` 其输出应该是 `mykey: bXlkYXRh`,`mydata` 数据是被加密过的,请参阅 - [解密 Secret](/zh/docs/concepts/configuration/secret#decoding-a-secret) + [解密 Secret](/zh/docs/tasks/configmap-secret/managing-secret-using-kubectl/#decoding-secret) 了解如何完全解码 Secret 内容。 {{< note >}} -上面的列表中没有包含 `kubelet.conf` 因为 kubeadm 将 kubelet 配置为自动更新证书。 +上面的列表中没有包含 `kubelet.conf`,因为 kubeadm 将 kubelet 配置为 +[自动更新证书](/docs/tasks/tls/certificate-rotation/)。 +轮换的证书位于目录 `/var/lib/kubelet/pki`。 +要修复过期的 kubelet 客户端证书,请参阅 +[kubelet 客户端证书轮换失败](/zh/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/#kubelet-client-cert)。 {{< /note >}} ## 手动更新证书 -你能随时通过 `kubeadm alpha certs renew` 命令手动更新你的证书。 +你能随时通过 `kubeadm certs renew` 命令手动更新你的证书。 `kubeadm certs renew`提供以下选项: @@ -303,10 +311,10 @@ Kubernetes 证书颁发机构不是开箱即用。 要激活内置签名者,请传递 `--cluster-signing-cert-file` 和 `--cluster-signing-key-file` 参数。 如果你正在创建一个新的集群,你可以使用 kubeadm 的 -[配置文件](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2)。 +[配置文件](/docs/reference/config-api/kubeadm-config.v1beta2/)。 ```yaml apiVersion: kubeadm.k8s.io/v1beta2 @@ -331,7 +339,7 @@ See [Create CertificateSigningRequest](/docs/reference/access-authn-authz/certif ## 通过外部 CA 更新证书 @@ -349,7 +357,7 @@ CSR 表示向 CA 请求客户的签名证书。 ### 创建证书签名请求 (CSR) -你可以通过 `kubeadm alpha certs renew --csr-only` 命令创建证书签名请求。 +你可以通过 `kubeadm certs renew --csr-only` 命令创建证书签名请求。 CSR 和随附的私钥都在输出中给出。 你可以传入一个带有 `--csr-dir` 的目录,将 CRS 输出到指定位置。 @@ -444,7 +452,7 @@ serverTLSBootstrap: true -此示例演示了一种限制名字空间中存储使用量的简便方法。 +此示例演示了如何限制一个名字空间中的存储使用量。 -{{< feature-state for_k8s_version="v1.11" state="beta" >}} +{{< feature-state for_k8s_version="v1.22" state="deprecated" >}} + + +{{< caution >}} +[动态 kubelet 配置](https://github.com/kubernetes/enhancements/issues/281) +已经废弃不建议使用。请选择其他方法将配置分发到集群中的节点。 +{{< /caution >}} [动态 kubelet 配置](https://github.com/kubernetes/enhancements/issues/281) -允许你在一个运行的 Kubernetes 集群上通过部署 ConfigMap -并配置每个节点来使用它来更改每个 kubelet 的配置,。 +允许你通过部署一个所有节点都会使用的 ConfigMap +达到在运行中的 Kubernetes 集群中更改 kubelet 配置的目的。 {{< warning >}} -所有 kubelet 配置参数都可以动态更改,但这对某些参数来说是不安全的。 -在决定动态更改参数之前,你需要深刻理解这种变化将如何影响你的集群的行为。 -在把一组变更推广到集群范围之前,需要在较小规模的节点集合上仔细地测试这些配置变化。 -与特定字段配置相关的建议可以在源码中 `KubeletConfiguration` -[类型文档](https://github.com/kubernetes/kubernetes/blob/release-1.11/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go)中找到。 +所有 kubelet 配置参数都可以被动态更改,但对某些参数来说这类更改是不安全的。 +在决定动态更改参数之前,你需要深刻理解这个改动将会如何影响集群的行为。 +在将变更扩散到整个集群之前,你需要先在小规模的节点集合上仔细地测试这些配置变动。 +特定字段相关的配置建议可以在文档 +[`KubeletConfiguration`](/docs/reference/config-api/kubelet-config.v1beta1/)中找到。 {{< /warning >}} ## {{% heading "prerequisites" %}} @@ -54,10 +62,10 @@ or v1.17; other combinations [aren't supported](/docs/setup/release/version-skew-policy/#kubectl). --> 你需要一个 Kubernetes 集群。 -你需要 v1.11 或更高版本的 kubectl,并以配置好与集群通信。 +你需要 v1.11 或更高版本的 kubectl,并配置好与集群的通信。 {{< version-check >}} -你的集群 API 服务器版本(如 v1.12)不能比你所用的 kubectl -的版本差不止一个小版本号。 +你的集群 API 服务器版本(如 v1.12)不能和你的 kubectl +版本相差超过一个小版本号。 例如,如果你的集群在运行 v1.16,那么你可以使用 v1.15、v1.16、v1.17 的 kubectl, 所有其他的组合都是 [不支持的](/zh/docs/setup/release/version-skew-policy/#kubectl)。 @@ -70,10 +78,10 @@ because there are manual alternatives. For each node that you're reconfiguring, you must set the kubelet `-dynamic-config-dir` flag to a writable directory. --> -某些例子中使用了命令行工具 [jq](https://stedolan.github.io/jq/)。 +在某些例子中使用了命令行工具 [jq](https://stedolan.github.io/jq/)。 你并不一定需要 `jq` 才能完成这些任务,因为总是有一些手工替代的方式。 -针对你所重新配置的每个节点,你必须设置 kubelet 的参数 +针对你重新配置的每个节点,你必须设置 kubelet 的标志 `-dynamic-config-dir`,使之指向一个可写的目录。 @@ -85,21 +93,21 @@ For each node that you're reconfiguring, you must set the kubelet --> ## 重配置 集群中运行节点上的 kubelet -### 基本工作流程概述 +### 基本工作流程概览 在运行中的集群中配置 kubelet 的基本工作流程如下: -1. 编写一个 YAML 或 JSON 的配置文件包含 kubelet 的配置。 +1. 编写一个包含 kubelet 配置的 YAML 或 JSON 文件。 2. 将此文件包装在 ConfigMap 中并将其保存到 Kubernetes 控制平面。 -3. 更新 kubelet 的相应节点对象以使用此 ConfigMap。 +3. 更新 kubelet 所在节点对象以使用此 ConfigMap。 -每个 kubelet 都会在其各自的节点对象上监测(Watch)配置引用。当引用更改时,kubelet 将下载新配置, -更新本地引用以引用该文件,然后退出。 -要想使该功能正常地工作,你必须运行操作系统级别的服务管理器(如 systemd), -在 kubelet 退出时将其重启。 +每个 kubelet 都会在其各自的节点对象上监测(Watch)配置引用。当引用更改时,kubelet 将下载新的配置文件, +更新本地引用指向该文件,然后退出。 +为了使该功能正常地工作,你必须运行操作系统级别的服务管理器(如 systemd), +它将会在 kubelet 退出后将其重启。 kubelet 重新启动时,将开始使用新配置。 -这个新配置完全地覆盖 `--config` 所提供的配置,并被命令行标志覆盖。 +新配置将会完全地覆盖 `--config` 所提供的配置,并被命令行标志覆盖。 新配置中未指定的值将收到适合配置版本的默认值 (e.g. `kubelet.config.k8s.io/v1beta1`),除非被命令行标志覆盖。 @@ -132,16 +140,16 @@ ConfigMap, you can observe this status to confirm that the Node is using the intended configuration. --> 节点 kubelet 配置状态可通过 `node.spec.status.config` 获取。 -一旦你已经改变了一个节点去使用新的 ConfigMap, -就可以观察此状态以确认该节点正在使用的预期配置。 +一旦你更新了一个节点去使用新的 ConfigMap, +就可以通过观察此状态来确认该节点是否正在使用预期配置。 -本文用命令 `kubectl edit` 描述节点的编辑,还有一些其他的方式去修改节点的规约, -包括更利于脚本化的工作流程的 `kubectl patch`。 +本文中使用命令 `kubectl edit` 来编辑节点,还有其他的方式可以修改节点的规约, +比如更利于脚本化工作流程的 `kubectl patch`。 {{< warning >}} -通过就地更新 ConfigMap 来更改配置是 *可能的*。 -尽管如此,这样做会导致所有配置为使用该 ConfigMap 的 kubelet 被同时更新。 +尽管通过就地更新 ConfigMap 来更改配置是 *可能的*。 +但是这样做会导致所有使用该 ConfigMap 配置的 kubelet 同时更新。 更安全的做法是按惯例将 ConfigMap 视为不可变更的,借助于 `kubectl` 的 `--append-hash` 选项逐步把更新推广到 `node.spec.configSource`。 {{< /warning >}} @@ -249,22 +257,22 @@ adapt the steps if you prefer to extract the `kubeletconfig` subobject manually. 1. 选择要重新配置的节点。在本例中,此节点的名称为 `NODE_NAME`。 2. 使用以下命令在后台启动 kubectl 代理: - ```bash + ```shell kubectl proxy --port=8001 & ``` 3. 运行以下命令从 `configz` 端点中下载并解压配置。这个命令很长,因此在复制粘贴时要小心。 **如果你使用 zsh**,请注意常见的 zsh 配置要添加反斜杠转义 URL 中变量名称周围的大括号。 @@ -477,12 +485,12 @@ by eye. -如果发生错误,kubelet 会在 `node.status.config.error` 中显示出错误信息的结构体。 -可能的错误列在[了解节点配置错误信息](#understanding-node-config-status-errors)节。 +如果发生错误,kubelet 会在 `Node.Status.Config.Error` 中显示出错误信息的结构体。 +错误可能出现在列表[理解节点状态配置错误信息](#understanding-node-config-status-errors)中。 你可以在 kubelet 日志中搜索相同的文本以获取更多详细信息和有关错误的上下文。 -## 了解节点配置错误信息 {#understanding-node-config-status-errors} +## 理解 `Node.Status.Config.Error` 消息 {#understanding-node-config-status-errors} 下表描述了使用动态 kubelet 配置时可能发生的错误消息。 你可以在 kubelet 日志中搜索相同的文本来获取有关错误的其他详细信息和上下文。 @@ -646,11 +654,15 @@ internal failure, see Kubelet log for details | 在对配置进行同步的循 ## {{% heading "whatsnext" %}} - 关于如何通过配置文件来配置 kubelet 的更多细节信息,可参阅 [使用配置文件设置 kubelet 参数](/zh/docs/tasks/administer-cluster/kubelet-config-file). - 阅读 API 文档中 [`NodeConfigSource`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#nodeconfigsource-v1-core) 说明 - +- 查阅[`KubeletConfiguration`](/docs/reference/config-api/kubelet-config.v1beta1/)文献进一步了解 kubelet + 配置信息。 \ No newline at end of file diff --git a/content/zh/docs/tasks/configmap-secret/managing-secret-using-config-file.md b/content/zh/docs/tasks/configmap-secret/managing-secret-using-config-file.md index cce9f50f4f..7e155136ed 100644 --- a/content/zh/docs/tasks/configmap-secret/managing-secret-using-config-file.md +++ b/content/zh/docs/tasks/configmap-secret/managing-secret-using-config-file.md @@ -5,7 +5,7 @@ weight: 20 description: 使用资源配置文件创建 Secret 对象。 --- - 进一步阅读 [Secret 概念](/zh/docs/concepts/configuration/secret/) - 了解如何[使用 `kubectl` 命令管理 Secret](/zh/docs/tasks/configmap-secret/managing-secret-using-kubectl/) diff --git a/content/zh/docs/tasks/configmap-secret/managing-secret-using-kubectl.md b/content/zh/docs/tasks/configmap-secret/managing-secret-using-kubectl.md index a4be3c9d04..8e2dd7d202 100644 --- a/content/zh/docs/tasks/configmap-secret/managing-secret-using-kubectl.md +++ b/content/zh/docs/tasks/configmap-secret/managing-secret-using-kubectl.md @@ -104,7 +104,7 @@ run the following command: 可以像下面一样执行命令: ```shell -kubectl create secret generic dev-db-secret \ +kubectl create secret generic db-user-pass \ --from-literal=username=devuser \ --from-literal=password='S!B\*d$zDsb=' ``` diff --git a/content/zh/docs/tasks/configmap-secret/managing-secret-using-kustomize.md b/content/zh/docs/tasks/configmap-secret/managing-secret-using-kustomize.md index e46a1d3ec2..de6967d438 100644 --- a/content/zh/docs/tasks/configmap-secret/managing-secret-using-kustomize.md +++ b/content/zh/docs/tasks/configmap-secret/managing-secret-using-kustomize.md @@ -5,7 +5,7 @@ weight: 30 description: 使用 kustomization.yaml 文件创建 Secret 对象。 --- - 进一步阅读 [Secret 概念](/zh/docs/concepts/configuration/secret/) - 了解如何[使用 `kubectl` 命令管理 Secret](/zh/docs/tasks/configmap-secret/managing-secret-using-kubectl/) diff --git a/content/zh/docs/tasks/debug-application-cluster/audit.md b/content/zh/docs/tasks/debug-application-cluster/audit.md index 595d4d66e2..6ba032c030 100644 --- a/content/zh/docs/tasks/debug-application-cluster/audit.md +++ b/content/zh/docs/tasks/debug-application-cluster/audit.md @@ -168,7 +168,7 @@ rules: 如果你在打磨自己的审计配置文件,你可以使用为 Google Container-Optimized OS 设计的审计配置作为出发点。你可以参考 -[configure-helper.sh](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh) +[configure-helper.sh](https://github.com/kubernetes/kubernetes/blob/master/cluster/gce/gci/configure-helper.sh) 脚本,该脚本能够生成审计策略文件。你可以直接在脚本中看到审计策略的绝大部份内容。 你也可以参考 [`Policy` 配置参考](/zh/docs/reference/config-api/apiserver-audit.v1/#audit-k8s-io-v1-Policy) diff --git a/content/zh/docs/tasks/debug-application-cluster/debug-application.md b/content/zh/docs/tasks/debug-application-cluster/debug-application.md index d2e95ab2b4..2e71c22af0 100644 --- a/content/zh/docs/tasks/debug-application-cluster/debug-application.md +++ b/content/zh/docs/tasks/debug-application-cluster/debug-application.md @@ -124,7 +124,7 @@ Once your pod has been scheduled, the methods described in [Debug Running Pods]( #### Pod 处于 Crashing 或别的不健康状态 一旦 Pod 被调度,就可以采用 -[调试运行中的 Pod](/zh/docs/concepts/configuration/manage-resources-containers/) +[调试运行中的 Pod](/zh/docs/tasks/debug-application-cluster/debug-running-pod/) 中的方法来进一步调试。 ## 使用临时调试容器来进行调试 {#ephemeral-container} -{{< feature-state state="alpha" for_k8s_version="v1.18" >}} +{{< feature-state state="alpha" for_k8s_version="v1.22" >}} 当由于容器崩溃或容器镜像不包含调试程序(例如[无发行版镜像](https://github.com/GoogleContainerTools/distroless)等) 而导致 `kubectl exec` 无法运行时,{{< glossary_tooltip text="临时容器" term_id="ephemeral-container" >}}对于排除交互式故障很有用。 -从 'v1.18' 版本开始,'kubectl' 有一个可以创建用于调试的临时容器的 alpha 命令。 @@ -234,7 +231,8 @@ You can view the state of the newly created ephemeral container using `kubectl d {{< note >}} {{< glossary_tooltip text="容器运行时" term_id="container-runtime" >}}必须支持`--target`参数。 -如果不支持,则临时容器可能不会启动,或者可能使用隔离的进程命名空间启动。 +如果不支持,则临时容器可能不会启动,或者可能使用隔离的进程命名空间启动, +以便 `ps` 不显示其他容器内的进程。 {{< /note >}} 你可以使用 `kubectl describe` 查看新创建的临时容器的状态: diff --git a/content/zh/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md b/content/zh/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md index 93c6cfc31c..19f29d781b 100644 --- a/content/zh/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md +++ b/content/zh/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md @@ -54,11 +54,11 @@ the container starts. kubectl create -f https://k8s.io/examples/debug/termination.yaml ``` - - YAML 文件中,在 `cmd` 和 `args` 字段,你可以看到容器休眠 10 秒然后将 "Sleep expired" + YAML 文件中,在 `command` 和 `args` 字段,你可以看到容器休眠 10 秒然后将 "Sleep expired" 写入 `/dev/termination-log` 文件。 容器写完 "Sleep expired" 消息后就终止了。 diff --git a/content/zh/docs/tasks/debug-application-cluster/logging-stackdriver.md b/content/zh/docs/tasks/debug-application-cluster/logging-stackdriver.md deleted file mode 100644 index aacac3e5f0..0000000000 --- a/content/zh/docs/tasks/debug-application-cluster/logging-stackdriver.md +++ /dev/null @@ -1,629 +0,0 @@ ---- -title: 使用 Stackdriver 生成日志 -content_type: concept ---- - - - - - - -在阅读这篇文档之前,强烈建议你先熟悉一下 [Kubernetes 日志概况](/zh/docs/concepts/cluster-administration/logging) - - - -{{< note >}} -默认情况下,Stackdriver 日志机制仅收集容器的标准输出和标准错误流。 -如果要收集你的应用程序写入一个文件(例如)的任何日志,请参见 Kubernetes 日志概述中的 [sidecar 方式](/zh/docs/concepts/cluster-administration/logging#sidecar-container-with-a-logging-agent) -{{< /note >}} - - - - - -## 部署 {#deploying} - - -为了接收日志,你必须将 Stackdriver 日志代理部署到集群中的每个节点。 -此代理是一个已配置的 `fluentd`,其配置存在一个 `ConfigMap` 中,且实例使用 Kubernetes 的 `DaemonSet` 进行管理。 -`ConfigMap` 和 `DaemonSet` 的实际部署,取决你的集群设置。 - - - -### 部署到一个新的集群 - -#### Google Kubernetes Engine - - -对于部署在 Google Kubernetes Engine 上的集群,Stackdriver 是默认的日志解决方案。 -Stackdriver 日志机制会默认部署到你的新集群上,除非你明确地不选择。 - - - -#### 其他平台 - - -为了将 Stackdriver 日志机制部署到你正在使用 `kube-up.sh` 创建的*新*集群上,执行如下操作: - - -1. 设置环境变量 `KUBE_LOGGING_DESTINATION` 为 `gcp`。 -1. **如果不是跑在 GCE 上**,在 `KUBE_NODE_LABELS` 变量中包含 `beta.kubernetes.io/fluentd-ds-ready=true`。 - - - -集群启动后,每个节点都应该运行 Stackdriver 日志代理。 -`DaemonSet` 和 `ConfigMap` 作为附加组件进行配置。 -如果你不是使用 `kube-up.sh`,可以考虑不使用预先配置的日志方案启动集群,然后部署 Stackdriver 日志代理到正在运行的集群。 - - - -{{< warning >}} -除了 Google Kubernetes Engine,Stackdriver 日志守护进程在其他的平台有已知的问题。 -请自行承担风险。 -{{< /warning >}} - - -### 部署到一个已知集群 - - -1. 在每个节点上打标签(如果尚未存在) - - - Stackdriver 日志代理部署使用节点标签来确定应该将其分配到给哪些节点。 - 引入这些标签是为了区分 Kubernetes 1.6 或更高版本的节点。 - 如果集群是在配置了 Stackdriver 日志机制的情况下创建的,并且节点的版本为 1.5.X 或更低版本,则它将使用 fluentd 用作静态容器。 - 节点最多只能有一个 fluentd 实例,因此只能将标签打在未分配过 fluentd pod 的节点上。 - 你可以通过运行 `kubectl describe` 来确保你的节点被正确标记,如下所示: - - ``` - kubectl describe node $NODE_NAME - ``` - - 输出应类似于如下内容: - - ``` - Name: NODE_NAME - Role: - Labels: beta.kubernetes.io/fluentd-ds-ready=true - ... - ``` - - 确保输出内容包含 `beta.kubernetes.io/fluentd-ds-ready=true` 标签。 - 如果不存在,则可以使用 `kubectl label` 命令添加,如下所示: - - ``` - kubectl label node $NODE_NAME beta.kubernetes.io/fluentd-ds-ready=true - ``` - - - {{< note >}} - 如果节点发生故障并且必须重新创建,则必须将标签重新打在重新创建了的节点。 - 为了让此操作更便捷,你可以在节点启动脚本中使用 Kubelet 的命令行参数给节点添加标签。 - {{< /note >}} - - -2. 通过运行以下命令,部署一个带有日志代理配置的 `ConfigMap`: - - ``` - kubectl apply -f https://k8s.io/examples/debug/fluentd-gcp-configmap.yaml - ``` - - 该命令在 `default` 命名空间中创建 `ConfigMap`。你可以在创建 `ConfigMap` 对象之前手动下载文件并进行更改。 - - -3. 通过运行以下命令,部署日志代理的 `DaemonSet`: - - ``` - kubectl apply -f https://k8s.io/examples/debug/fluentd-gcp-ds.yaml - ``` - - 你也可以在使用前下载和编辑此文件。 - - -## 验证日志代理部署 - - -部署 Stackdriver `DaemonSet` 之后,你可以通过运行以下命令来查看日志代理的部署状态: - -```shell -kubectl get ds --all-namespaces -``` - - -如果你的集群中有 3 个节点,则输出应类似于如下: - -``` -NAMESPACE NAME DESIRED CURRENT READY NODE-SELECTOR AGE -... -default fluentd-gcp-v2.0 3 3 3 beta.kubernetes.io/fluentd-ds-ready=true 5m -... -``` - - -要了解使用 Stackdriver 进行日志记录的工作方式,请考虑以下具有日志生成的 pod 定义 [counter-pod.yaml](/examples/debug/counter-pod.yaml): - -{{< codenew file="debug/counter-pod.yaml" >}} - - -这个 pod 定义里有一个容器,该容器运行一个 bash 脚本,脚本每秒写一次计数器的值和日期时间,并无限期地运行。 -让我们在默认命名空间中创建此 pod。 - -```shell -kubectl apply -f https://k8s.io/examples/debug/counter-pod.yaml -``` - - -你可以观察到正在运行的 pod: - -```shell -kubectl get pods -``` -``` -NAME READY STATUS RESTARTS AGE -counter 1/1 Running 0 5m -``` - - -在短时间内,你可以观察到 "pending" 的 pod 的状态,因为 kubelet 必须先下载容器镜像。 -当 pod 状态变为 `Running` 时,你可以使用 `kubectl logs` 命令查看此 counter pod 的输出。 - -```shell -kubectl logs counter -``` -``` -0: Mon Jan 1 00:00:00 UTC 2001 -1: Mon Jan 1 00:00:01 UTC 2001 -2: Mon Jan 1 00:00:02 UTC 2001 -... -``` - - -正如日志概览所述,此命令从容器日志文件中获取日志项。 -如果该容器被 Kubernetes 杀死然后重新启动,你仍然可以访问前一个容器的日志。 -但是,如果将 Pod 从节点中驱逐,则日志文件会丢失。让我们通过删除当前运行的 counter 容器来演示这一点: - -```shell -kubectl delete pod counter -``` -``` -pod "counter" deleted -``` - - -然后重建它: - -```shell -kubectl create -f https://k8s.io/examples/debug/counter-pod.yaml -``` -``` -pod/counter created -``` - - -一段时间后,你可以再次从 counter pod 访问日志: - -```shell -kubectl logs counter -``` -``` -0: Mon Jan 1 00:01:00 UTC 2001 -1: Mon Jan 1 00:01:01 UTC 2001 -2: Mon Jan 1 00:01:02 UTC 2001 -... -``` - - -如预期的那样,日志中仅出现最近的日志记录。 -但是,对于实际应用程序,你可能希望能够访问所有容器的日志,特别是出于调试的目的。 -这就是先前启用的 Stackdriver 日志机制可以提供帮助的地方。 - - -## 查看日志 - - -Stackdriver 日志代理为每个日志项关联元数据,供你在后续的查询中只选择感兴趣的消息: -例如,来自某个特定 Pod 的消息。 - - -元数据最重要的部分是资源类型和日志名称。 -容器日志的资源类型为 `container`,在用户界面中名为 `GKE Containers`(即使 Kubernetes 集群不在 Google Kubernetes Engine 上)。 -日志名称是容器的名称,因此,如果你有一个包含两个容器的 pod,在 spec 中名称定义为 `container_1` 和 `container_2`,则它们的日志的名称分别为 `container_1` 和 `container_2`。 - - -系统组件的资源类型为 `compute`,在接口中名为 `GCE VM Instance`。 -系统组件的日志名称是固定的。 -对于 Google Kubernetes Engine 节点,系统组件中的每个日志项都具有以下日志名称之一: - -* docker -* kubelet -* kube-proxy - - -你可以在[Stackdriver 专用页面](https://cloud.google.com/logging/docs/view/overview) -上了解有关查看日志的更多信息。 - - -查看日志的一种可能方法是使用 [Google Cloud SDK](https://cloud.google.com/sdk/) -中的 [`gcloud logging`](https://cloud.google.com/logging/docs/reference/tools/gcloud-logging) -命令行接口。 -它使用 Stackdriver 日志机制的 -[过滤语法](https://cloud.google.com/logging/docs/view/advanced_filters)查询特定日志。 -例如,你可以运行以下命令: - -```none -gcloud beta logging read 'logName="projects/$YOUR_PROJECT_ID/logs/count"' --format json | jq '.[].textPayload' -``` -``` -... -"2: Mon Jan 1 00:01:02 UTC 2001\n" -"1: Mon Jan 1 00:01:01 UTC 2001\n" -"0: Mon Jan 1 00:01:00 UTC 2001\n" -... -"2: Mon Jan 1 00:00:02 UTC 2001\n" -"1: Mon Jan 1 00:00:01 UTC 2001\n" -"0: Mon Jan 1 00:00:00 UTC 2001\n" -``` - - -如你所见,尽管 kubelet 已经删除了第一个容器的日志,日志中仍会包含 counter -容器第一次和第二次运行时输出的消息。 - - -### 导出日志 - - -你可以将日志导出到 [Google Cloud Storage](https://cloud.google.com/storage/) 或 -[BigQuery](https://cloud.google.com/bigquery/) 进行进一步的分析。 -Stackdriver 日志机制提供了接收器(Sink)的概念,你可以在其中指定日志项的存放地。 -可在 Stackdriver [导出日志页面](https://cloud.google.com/logging/docs/export/configure_export_v2) -上获得更多信息。 - - -## 配置 Stackdriver 日志代理 - - -有时默认的 Stackdriver 日志机制安装可能无法满足你的需求,例如: - - -* 你可能需要添加更多资源,因为默认的行为表现无法满足你的需求。 -* 你可能需要引入额外的解析机制以便从日志消息中提取更多元数据,例如严重性或源代码引用。 -* 你可能想要将日志不仅仅发送到 Stackdriver 或仅将部分日志发送到 Stackdriver。 - - -在这种情况下,你需要更改 `DaemonSet` 和 `ConfigMap` 的参数。 - - -### 先决条件 - - -如果使用的是 GKE,并且集群中启用了 Stackdriver 日志机制,则无法更改其配置, -因为它是由 GKE 管理和支持的。 -但是,你可以禁用默认集成的日志机制并部署自己的。 - - -{{< note >}} -你将需要自己支持和维护新部署的配置了:更新映像和配置、调整资源等等。 -{{< /note >}} - - -若要禁用默认的日志记录集成,请使用以下命令: - -``` -gcloud beta container clusters update --logging-service=none CLUSTER -``` - - -你可以在[部署部分](#deploying)中找到有关如何将 Stackdriver 日志代理安装到 -正在运行的集群中的说明。 - - -### 更改 `DaemonSet` 参数 {#changing-daemonset-parameters} - - -当集群中有 Stackdriver 日志机制的 `DaemonSet` 时,你只需修改其 spec 中的 -`template` 字段,DaemonSet 控制器将为你管理 Pod。 -例如,假设你按照上面的描述已经安装了 Stackdriver 日志机制。 -现在,你想更改内存限制,来给 fluentd 提供的更多内存,从而安全地处理更多日志。 - - -获取集群中运行的 `DaemonSet` 的 spec: - -```shell -kubectl get ds fluentd-gcp-v2.0 --namespace kube-system -o yaml > fluentd-gcp-ds.yaml -``` - - -然后在 spec 文件中编辑资源需求,并使用以下命令更新 apiserver 中的 `DaemonSet` 对象: - -```shell -kubectl replace -f fluentd-gcp-ds.yaml -``` - - -一段时间后,Stackdriver 日志代理的 pod 将使用新配置重新启动。 - - -### 更改 fluentd 参数 - - -Fluentd 的配置存在 `ConfigMap` 对象中。 -它实际上是一组合并在一起的配置文件。 -你可以在[官方网站](https://docs.fluentd.org)上了解 fluentd 的配置。 - - -假设你要向配置添加新的解析逻辑,以便 fluentd 可以理解默认的 Python 日志记录格式。 -一个合适的 fluentd 过滤器类似如下: - -``` - - type parser - format /^(?\w):(?\w):(?.*)/ - reserve_data true - suppress_parse_error_log true - key_name log - -``` - - -现在,你需要将其放入配置中,并使 Stackdriver 日志代理感知它。 -通过运行以下命令,获取集群中当前版本的 Stackdriver 日志机制的 `ConfigMap`: - -```shell -kubectl get cm fluentd-gcp-config --namespace kube-system -o yaml > fluentd-gcp-configmap.yaml -``` - - -然后在 `containers.input.conf` 键的值中,在 `source` 部分之后插入一个新的过滤器。 - - - -{{< note >}} -顺序很重要。 -{{< /note >}} - - -在 apiserver 中更新 `ConfigMap` 比更新 `DaemonSet` 更复杂。 -最好考虑 `ConfigMap` 是不可变的。 -如果是这样,要更新配置,你应该使用新名称创建 `ConfigMap`,然后使用 -[上面的指南](#changing-daemonset-parameters)将 `DaemonSet` 更改为指向它。 - - -### 添加 fluentd 插件 - - -Fluentd 用 Ruby 编写,并允许使用 [plugins](https://www.fluentd.org/plugins) 扩展其功能。 -如果要使用默认的 Stackdriver 日志机制容器镜像中未包含的插件,则必须构建自定义镜像。 -假设你要为来自特定容器添加 Kafka 信息接收器,以进行其他处理。 -你可以复用默认的[容器镜像源](https://git.k8s.io/contrib/fluentd/fluentd-gcp-image),并仅添加少量更改: - - -* 将 Makefile 更改为指向你的容器仓库,例如 `PREFIX=gcr.io/`。 -* 将你的依赖项添加到 Gemfile 中,例如 `gem 'fluent-plugin-kafka'`。 - - -然后在该目录运行 `make build push`。 -在更新 `DaemonSet` 以使用新镜像后,你就可以使用在 fluentd 配置中安装的插件了。 - diff --git a/content/zh/docs/tasks/debug-application-cluster/monitor-node-health.md b/content/zh/docs/tasks/debug-application-cluster/monitor-node-health.md index 4e81fd8567..5d4a265b10 100644 --- a/content/zh/docs/tasks/debug-application-cluster/monitor-node-health.md +++ b/content/zh/docs/tasks/debug-application-cluster/monitor-node-health.md @@ -22,7 +22,7 @@ To learn how to install and use Node Problem Detector, see [Node Problem Detector project documentation](https://github.com/kubernetes/node-problem-detector). --> -*节点问题检测器(Node Problem Detector)*是一个守护程序,用于监视和报告节点的健康状况。 +*节点问题检测器(Node Problem Detector)* 是一个守护程序,用于监视和报告节点的健康状况。 你可以将节点问题探测器以 `DaemonSet` 或独立守护程序运行。 节点问题检测器从各种守护进程收集节点问题,并以 [NodeCondition](/zh/docs/concepts/architecture/nodes/#condition) 和 @@ -203,7 +203,7 @@ Kernel monitor watches the kernel log and detects known kernel issues following --> ## 内核监视器 -*内核监视器(Kernel Monitor)*是节点问题检测器中支持的系统日志监视器守护进程。 +*内核监视器(Kernel Monitor)* 是节点问题检测器中支持的系统日志监视器守护进程。 内核监视器观察内核日志并根据预定义规则检测已知的内核问题。 {{< note >}} 要使聚合层在你的环境中正常工作以支持代理服务器和扩展 apiserver 之间的相互 TLS 身份验证, 需要满足一些设置要求。Kubernetes 和 kube-apiserver 具有多个 CA, -因此请确保代理是由聚合层 CA 签名的,而不是由主 CA 签名的。 +因此请确保代理是由聚合层 CA 签名的,而不是由 Kubernetes 通用 CA 签名的。 {{< /note >}} 如果转换失败,则 Webhook 应该返回包含以下字段的 `response` 节: -*`uid`,从发送到 Webhook 的 `request.uid` 复制而来 -*`result`,设置为 `{"status": "Failed"}` +* `uid`,从发送到 Webhook 的 `request.uid` 复制而来 +* `result`,设置为 `{"status": "Failed"}` {{< warning >}} 8. 确保你的扩展 apiserver 从该卷中加载了那些证书,并在 HTTPS 握手过程中使用它们。 -9. 在你的命令空间中创建一个 Kubernetes 服务账号。 +9. 在你的命名空间中创建一个 Kubernetes 服务账号。 10. 为资源允许的操作创建 Kubernetes 集群角色。 -11. 用你命令空间中的服务账号创建一个 Kubernetes 集群角色绑定,绑定到你创建的角色上。 -12. 用你命令空间中的服务账号创建一个 Kubernetes 集群角色绑定,绑定到 `system:auth-delegator` +11. 用你命名空间中的服务账号创建一个 Kubernetes 集群角色绑定,绑定到你创建的角色上。 +12. 用你命名空间中的服务账号创建一个 Kubernetes 集群角色绑定,绑定到 `system:auth-delegator` 集群角色,以将 auth 决策委派给 Kubernetes 核心 API 服务器。 -13. 以你命令空间中的服务账号创建一个 Kubernetes 集群角色绑定,绑定到 +13. 以你命名空间中的服务账号创建一个 Kubernetes 集群角色绑定,绑定到 `extension-apiserver-authentication-reader` 角色。 这将让你的扩展 api-server 能够访问 `extension-apiserver-authentication` configmap。 @@ -114,4 +114,3 @@ Alternatively, you can use an existing 3rd party solution, such as [apiserver-bu 并启用 apiserver 的相关参数。 * 高级概述,请参阅[使用聚合层扩展 Kubernetes API](/zh/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation)。 * 了解如何[使用 Custom Resource Definition 扩展 Kubernetes API](/zh/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/)。 - diff --git a/content/zh/docs/tasks/job/automated-tasks-with-cron-jobs.md b/content/zh/docs/tasks/job/automated-tasks-with-cron-jobs.md index b4039ee281..0f2f5e90b6 100644 --- a/content/zh/docs/tasks/job/automated-tasks-with-cron-jobs.md +++ b/content/zh/docs/tasks/job/automated-tasks-with-cron-jobs.md @@ -230,9 +230,9 @@ It takes a [Cron](https://en.wikipedia.org/wiki/Cron) format string, such as `0 格式串,例如 `0 * * * *` or `@hourly` ,作为它的任务被创建和执行的调度时间。 -该格式也包含了扩展的 `vixie cron` 步长值。 +该格式也包含了扩展的 "Vixie cron" 步长值。 [FreeBSD 手册](https://www.freebsd.org/cgi/man.cgi?crontab%285%29)中解释如下: -Kubernets 接收清单文件并执行你所创建的 Job。 +Kubernetes 接收清单文件并执行你所创建的 Job。 + +{{< feature-state for_k8s_version="v1.20" state="alpha" >}} + + + + +从 Kubernetes v1.20 开始,kubelet 可以使用 exec 插件动态检索容器镜像注册中心的凭据。 +kubelet 和 exec 插件使用 Kubernetes 版本化 API 通过标准输入输出(标准输入、标准输出和标准错误)通信。 +这些插件允许 kubelet 动态请求容器注册中心的凭据,而不是将静态凭据存储在磁盘上。 +例如,插件可能会与本地元数据通信,以检索 kubelet 正在拉取的镜像的短期凭据。 + + +如果以下任一情况属实,你可能对此功能感兴趣: + +* 需要调用云提供商的 API 来检索注册中心的身份验证信息。 +* 凭据的到期时间很短,需要频繁请求新凭据。 +* 将注册中心凭据存储在磁盘或者 imagePullSecret 是不可接受的。 + +## {{% heading "prerequisites" %}} + + +* kubelet 镜像凭证提供程序在 v1.20 版本作为 alpha 功能引入。 + 与其他 alpha 功能一样,当前仅当在 kubelet 启动 `KubeletCredentialProviders` 特性门禁才能使该功能正常工作。 +* 凭据提供程序 exec 插件的工作实现。你可以构建自己的插件或使用云提供商提供的插件。 + + + + +## 在节点上安装插件 {#installing-plugins-on-nodes} + +凭据提供程序插件是将由 kubelet 运行的可执行二进制文件。 +确保插件二进制存在于你的集群的每个节点上,并存储在已知目录中。 +稍后配置 kubelet 标志需要该目录。 + + +## 配置 kubelet {#configuring-the-kubelet} + +为了使用这个特性,kubelet 需要设置以下两个标志: +* `--image-credential-provider-config` —— 凭据提供程序插件配置文件的路径。 +* `--image-credential-provider-bin-dir` —— 凭据提供程序插件二进制文件所在目录的路径。 + + +### 配置 kubelet 凭据提供程序 {#configure-a-kubelet-credential-provider} + +kubelet 会读取传入 `--image-credential-provider-config` 的配置文件文件, +以确定应该为哪些容器镜像调用哪些 exec 插件。 +如果你正在使用基于 [ECR](https://aws.amazon.com/ecr/) 插件, +这里有个样例配置文件你可能最终会使用到: + +```yaml +kind: CredentialProviderConfig +apiVersion: kubelet.config.k8s.io/v1alpha1 +# providers 是将由 kubelet 启用的凭证提供程序插件列表。 +# 多个提供程序可能与单个镜像匹配,在这种情况下,来自所有提供程序的凭据将返回到 kubelet。 +# 如果为单个镜像调用多个提供程序,则结果会合并。 +# 如果提供程序返回重叠的身份验证密钥,则使用提供程序列表中较早的值。 +providers: + # name 是凭据提供程序的必需名称。 + # 它必须与 kubelet 看到的提供程序可执行文件的名称相匹配。 + # 可执行文件必须在 kubelet 的 bin 目录中 + # (由 --image-credential-provider-bin-dir 标志设置)。 + - name: ecr + # matchImages 是一个必需的字符串列表,用于匹配镜像以确定是否应调用此提供程序。 + # 如果其中一个字符串与 kubelet 请求的镜像相匹配,则该插件将被调用并有机会提供凭据。 + # 镜像应包含注册域和 URL 路径。 + # + # matchImages 中的每个条目都是一个模式,可以选择包含端口和路径。 + # 通配符可以在域中使用,但不能在端口或路径中使用。 + # 支持通配符作为子域(例如“*.k8s.io”或“k8s.*.io”)和顶级域(例如“k8s.*”)。 + # 还支持匹配部分子域,如“app*.k8s.io”。 + # 每个通配符只能匹配一个子域段,因此 *.io 不匹配 *.k8s.io。 + # + # 当以下所有条件都为真时,镜像和 matchImage 之间存在匹配: + # - 两者都包含相同数量的域部分并且每个部分都匹配。 + # - imageMatch 的 URL 路径必须是目标镜像 URL 路径的前缀。 + # - 如果 imageMatch 包含端口,则该端口也必须在图像中匹配。 + # + # matchImages 的示例值: + # - 123456789.dkr.ecr.us-east-1.amazonaws.com + # - *.azurecr.io + # - gcr.io + # - *.*.registry.io + # - registry.io:8080/path + matchImages: + - "*.dkr.ecr.*.amazonaws.com" + - "*.dkr.ecr.*.amazonaws.cn" + - "*.dkr.ecr-fips.*.amazonaws.com" + - "*.dkr.ecr.us-iso-east-1.c2s.ic.gov" + - "*.dkr.ecr.us-isob-east-1.sc2s.sgov.gov" + # defaultCacheDuration 是插件将在内存中缓存凭据的默认持续时间 + # 如果插件响应中未提供缓存持续时间。此字段是必需的。 + defaultCacheDuration: "12h" + # exec CredentialProviderRequest 的必需输入版本。 + # 返回的 CredentialProviderResponse 必须使用与输入相同的编码版本。当前支持的值为: + # - credentialprovider.kubelet.k8s.io/v1alpha1 + apiVersion: credentialprovider.kubelet.k8s.io/v1alpha1 + # 执行命令时传递给命令的参数。 + # +可选 + args: + - get-credentials + # env 定义了额外的环境变量以暴露给进程。 + # 这些与主机环境以及 client-go 用于将参数传递给插件的变量结合在一起。 + # +可选 + env: + - name: AWS_PROFILE + value: example_profile +``` + + +`providers` 字段是 kubelet 使用的已启用插件列表。每个条目都有几个必填字段: +* `name`:插件的名称,必须与传入`--image-credential-provider-bin-dir` + 的目录中存在的可执行二进制文件的名称相匹配。 +* `matchImages`:用于匹配图像以确定是否应调用此提供程序的字符串列表。更多相关信息如下。 +* `defaultCacheDuration`:如果插件未指定缓存持续时间,kubelet 将在内存中缓存凭据的默认持续时间。 +* `apiVersion`:kubelet 和 exec 插件在通信时将使用的 api 版本。 + +每个凭证提供程序也可以被赋予可选的参数和环境变量。 +咨询插件实现者以确定给定插件需要哪些参数和环境变量集。 + + +#### 配置镜像匹配 {#configure-image-matching} + +kubelet 使用每个凭证提供程序的 `matchImages` 字段来确定是否应该为 Pod 正在使用的给定镜像调用插件。 +`matchImages` 中的每个条目都是一个镜像模式,可以选择包含端口和路径。 +通配符可以在域中使用,但不能在端口或路径中使用。 +支持通配符作为子域,如 `*.k8s.io` 或 `k8s.*.io`,以及顶级域,如 `k8s.*`。 +还支持匹配部分子域,如 `app*.k8s.io`。每个通配符只能匹配一个子域段, +因此 `*.io` 不匹配 `*.k8s.io`。 + + +当以下所有条件都为真时,镜像名称和 `matchImage` 条目之间存在匹配: + +* 两者都包含相同数量的域部分并且每个部分都匹配。 +* 匹配图片的 URL 路径必须是目标图片 URL 路径的前缀。 +* 如果 imageMatch 包含端口,则该端口也必须在镜像中匹配。 + +`matchImages` 模式的一些示例值: +* `123456789.dkr.ecr.us-east-1.amazonaws.com` +* `*.azurecr.io` +* `gcr.io` +* `*.*.registry.io` +* `foo.registry.io:8080/path` diff --git a/content/zh/docs/tasks/manage-daemon/update-daemon-set.md b/content/zh/docs/tasks/manage-daemon/update-daemon-set.md index 1b889c0aa6..6539786c25 100644 --- a/content/zh/docs/tasks/manage-daemon/update-daemon-set.md +++ b/content/zh/docs/tasks/manage-daemon/update-daemon-set.md @@ -19,10 +19,7 @@ This page shows how to perform a rolling update on a DaemonSet. ## {{% heading "prerequisites" %}} - -* Kubernetes 1.6 或者更高版本中才支持 DaemonSet 滚动更新功能。 +{{< include "task-tutorial-prereqs.md" >}} @@ -36,20 +33,20 @@ DaemonSet has two update strategy types: DaemonSet 有两种更新策略: -* OnDelete: 使用 `OnDelete` 更新策略时,在更新 DaemonSet 模板后,只有当你手动删除老的 +* `OnDelete`: 使用 `OnDelete` 更新策略时,在更新 DaemonSet 模板后,只有当你手动删除老的 DaemonSet pods 之后,新的 DaemonSet Pod *才会*被自动创建。跟 Kubernetes 1.6 以前的版本类似。 -* RollingUpdate: 这是默认的更新策略。使用 `RollingUpdate` 更新策略时,在更新 DaemonSet 模板后, +* `RollingUpdate`: 这是默认的更新策略。使用 `RollingUpdate` 更新策略时,在更新 DaemonSet 模板后, 老的 DaemonSet pods 将被终止,并且将以受控方式自动创建新的 DaemonSet pods。 更新期间,最多只能有 DaemonSet 的一个 Pod 运行于每个节点上。 @@ -64,12 +61,18 @@ To enable the rolling update feature of a DaemonSet, you must set its 要启用 DaemonSet 的滚动更新功能,必须设置 `.spec.updateStrategy.type` 为 `RollingUpdate`。 你可能想设置 -[`.spec.updateStrategy.rollingUpdate.maxUnavailable`](/zh/docs/concepts/workloads/controllers/deployment/#max-unavailable) (默认为 1) 和 -[`.spec.minReadySeconds`](/zh/docs/concepts/workloads/controllers/deployment/#min-ready-seconds) (默认为 0)。 +[`.spec.updateStrategy.rollingUpdate.maxUnavailable`](/zh/docs/concepts/workloads/controllers/deployment/#max-unavailable) (默认为 1), +[`.spec.minReadySeconds`](/zh/docs/concepts/workloads/controllers/deployment/#min-ready-seconds) (默认为 0) 和 +[`.spec.maxSurge`](/zh/docs/concepts/workloads/controllers/deployment/#max-surge) (一种 Beta 阶段的特性,默认为 25%) DaemonSet 滚动更新可能会卡住,其 Pod 至少在某个节点上无法调度运行。 -当节点上[可用资源耗尽](/zh/docs/tasks/administer-cluster/out-of-resource/)时, +当节点上[可用资源耗尽](/zh/docs/concepts/scheduling-eviction/node-pressure-eviction/)时, 这是可能的。 发生这种情况时,通过对 `kubectl get nodes` 和下面命令行的输出作比较, @@ -328,10 +331,9 @@ kubectl delete ds fluentd-elasticsearch -n kube-system ## {{% heading "whatsnext" %}} -* 查看[任务:在 DaemonSet 上执行回滚](/zh/docs/tasks/manage-daemon/rollback-daemon-set/) -* 查看[概念:创建 DaemonSet 以收养现有 DaemonSet Pod](/zh/docs/concepts/workloads/controllers/daemonset/) +* 查看[在 DaemonSet 上执行回滚](/zh/docs/tasks/manage-daemon/rollback-daemon-set/) +* 查看[创建 DaemonSet 以收养现有 DaemonSet Pod](/zh/docs/concepts/workloads/controllers/daemonset/) diff --git a/content/zh/docs/tasks/manage-kubernetes-objects/kustomization.md b/content/zh/docs/tasks/manage-kubernetes-objects/kustomization.md index 605e9113fe..b98ec00718 100644 --- a/content/zh/docs/tasks/manage-kubernetes-objects/kustomization.md +++ b/content/zh/docs/tasks/manage-kubernetes-objects/kustomization.md @@ -257,7 +257,7 @@ spec: containers: - name: app image: my-app - volumeMount: + volumeMounts: - name: config mountPath: /config volumes: @@ -317,7 +317,7 @@ spec: containers: - image: my-app name: app - volumeMount: + volumeMounts: - mountPath: /config name: config volumes: @@ -428,7 +428,7 @@ spec: containers: - name: app image: my-app - volumeMount: + volumeMounts: - name: password mountPath: /secrets volumes: diff --git a/content/zh/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md b/content/zh/docs/tasks/network/customize-hosts-file-for-pods.md similarity index 99% rename from content/zh/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md rename to content/zh/docs/tasks/network/customize-hosts-file-for-pods.md index 4229689422..f2f5e7fc1b 100644 --- a/content/zh/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md +++ b/content/zh/docs/tasks/network/customize-hosts-file-for-pods.md @@ -10,7 +10,7 @@ reviewers: - rickypai - thockin title: Adding entries to Pod /etc/hosts with HostAliases -content_type: concept +content_type: task weight: 60 min-kubernetes-server-version: 1.7 --> @@ -29,7 +29,7 @@ Modification not using HostAliases is not suggested because the file is managed 建议通过使用 HostAliases 来进行修改,因为该文件由 Kubelet 管理,并且 可以在 Pod 创建/重启过程中被重写。 - + -验证节点是否检测到 IPv4 和 IPv6 接口(用集群中的有效节点替换节点名称。 -在此示例中,节点名称为 `k8s-linuxpool1-34450317-0`): +验证节点是否检测到 IPv4 和 IPv6 接口。用集群中的有效节点替换节点名称。 +在此示例中,节点名称为 `k8s-linuxpool1-34450317-0`: ```shell kubectl get nodes k8s-linuxpool1-34450317-0 -o go-template --template='{{range .status.addresses}}{{printf "%s: %s \n" .type .address}}{{end}}' @@ -81,12 +81,12 @@ InternalIP: 2001:1234:5678:9abc::5 ### 验证 Pod 寻址 -验证 Pod 已分配了 IPv4 和 IPv6 地址。(用集群中的有效 Pod 替换 Pod 名称。 -在此示例中,Pod 名称为 pod01) +验证 Pod 已分配了 IPv4 和 IPv6 地址。用集群中的有效 Pod 替换 Pod 名称。 +在此示例中,Pod 名称为 `pod01`: ```shell kubectl get pods pod01 -o go-template --template='{{range .status.podIPs}}{{printf "%s \n" .ip}}{{end}}' @@ -209,7 +209,7 @@ Create the following Service that explicitly defines `IPv6` as the first array e Kubernetes 将 `service-cluster-ip-range` 配置的 IPv6 地址范围给 Service 分配集群 IP, 并将 `.spec.ipFamilyPolicy` 设置为 `SingleStack`。 -{{< codenew file="service/networking/dual-stack-ipv6-svc.yaml" >}} +{{< codenew file="service/networking/dual-stack-ipfamilies-ipv6.yaml" >}} ### 创建双协议栈负载均衡服务 -如果云提供商支持配置启用 IPv6 的外部负载均衡器,则将 `ipFamily` 字段设置为 -`IPv6` 并将 `type` 字段设置为 `LoadBalancer` 的方式创建以下服务: +如果云提供商支持配置启用 IPv6 的外部负载均衡器,则创建如下 Service 时将 +`.spec.ipFamilyPolicy` 设置为 `PreferDualStack`, 并将 `spec.ipFamilies` 字段 +的第一个元素设置为 `IPv6`,将 `type` 字段设置为 `LoadBalancer`: -{{< codenew file="service/networking/dual-stack-ipv6-lb-svc.yaml" >}} +{{< codenew file="service/networking/dual-stack-prefer-ipv6-lb-svc.yaml" >}} + + +检查服务: + +```shell +kubectl get svc -l app=MyApp +``` 当通过 `kubectl` 删除 StatefulSet 时,StatefulSet 会被缩容为 0。 属于该 StatefulSet 的所有 Pod 也被删除。 -如果你只想删除 StatefulSet 而不删除 Pod,使用 `--cascade=false`。 +如果你只想删除 StatefulSet 而不删除 Pod,使用 `--cascade=orphan`。 ```shell -kubectl delete -f --cascade=false +kubectl delete -f --cascade=orphan ``` -通过将 `--cascade=false` 传递给 `kubectl delete`,在删除 StatefulSet 对象之后, +通过将 `--cascade=orphan` 传递给 `kubectl delete`,在删除 StatefulSet 对象之后, StatefulSet 管理的 Pod 会被保留下来。如果 Pod 具有标签 `app=myapp`,则可以按照 如下方式删除它们: diff --git a/content/zh/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md b/content/zh/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md index ca9931c7d1..f831b28fab 100644 --- a/content/zh/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md +++ b/content/zh/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md @@ -304,7 +304,7 @@ First, get the YAML of your HorizontalPodAutoscaler in the `autoscaling/v2beta2` 首先,将 HorizontalPodAutoscaler 的 YAML 文件改为 `autoscaling/v2beta2` 格式: ```shell -kubectl get hpa.v2beta2.autoscaling -o yaml > /tmp/hpa-v2.yaml +kubectl get hpa php-apache -o yaml > /tmp/hpa-v2.yaml ``` ## 滚动升级时扩缩 {#autoscaling-during-rolling-update} -目前在 Kubernetes 中,可以针对 ReplicationController 或 Deployment 执行 -滚动更新,它们会为你管理底层副本数。 -Pod 水平扩缩只支持后一种:HPA 会被绑定到 Deployment 对象, -HPA 设置副本数量时,Deployment 会设置底层副本数。 +Kubernetes 允许你在 Deployment 上执行滚动更新。在这种情况下,Deployment 为你管理下层的 ReplicaSet。 +当你为一个 Deployment 配置自动扩缩时,你要为每个 Deployment 绑定一个 HorizontalPodAutoscaler。 +HorizontalPodAutoscaler 管理 Deployment 的 `replicas` 字段。 +Deployment Controller 负责设置下层 ReplicaSet 的 `replicas` 字段, +以便确保在上线及后续过程副本个数合适。 -通过直接操控副本控制器执行滚动升级时,HPA 不能工作, -也就是说你不能将 HPA 绑定到某个 RC 再执行滚动升级。 -HPA 不能工作的原因是它无法绑定到滚动更新时所新创建的副本控制器。 +如果你对一个副本个数被自动扩缩的 StatefulSet 执行滚动更新, 该 StatefulSet +会直接管理它的 Pod 集合 (不存在类似 ReplicaSet 这样的中间资源)。 输入 **Ctrl+C** 结束 watch 操作。 -如果你看不到任何进度,确保已启用[前提条件](#before-you-begin) +如果你看不到任何进度,确保已启用[前提条件](#准备开始) 中提到的动态 PersistentVolume 预配器。 -Kubernetes 1.8 版本中包含 beta 特性 +Kubernetes 包含特性 [kubelet 证书轮换](/zh/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/), 在当前证书即将过期时, 将自动生成新的秘钥,并从 Kubernetes API 申请新的证书。 一旦新的证书可用,它将被用于与 @@ -99,7 +99,7 @@ criteria, it will be auto approved by the controller manager, then it will have a status of `Approved`. Next, the controller manager will sign a certificate, issued for the duration specified by the `--cluster-signing-duration` parameter, and the signed certificate -will be attached to the certificate signing requests. +will be attached to the certificate signing request. --> 最初,来自节点上 kubelet 的证书签名请求处于 `Pending` 状态。 如果证书签名请求满足特定条件, 控制器管理器会自动批准,此时请求会处于 `Approved` 状态。 接下来,控制器管理器会签署证书, @@ -116,14 +116,16 @@ Kubelet 会从 Kubernetes API 取回签署的证书,并将其写入磁盘, -当签署的证书即将到期时,kubelet 会使用 Kubernetes API,发起新的证书签名请求。 +当签署的证书即将到期时,kubelet 会使用 Kubernetes API,自动发起新的证书签名请求。 +该请求会发生在证书的有效时间剩下 30% 到 10% 之间的任意时间点。 同样地,控制器管理器会自动批准证书请求,并将签署的证书附加到证书签名请求中。 Kubelet 会从 Kubernetes API 取回签署的证书,并将其写入磁盘。 然后它会更新与 Kubernetes API 的连接,使用新的证书重新连接到 Kubernetes API。 diff --git a/content/zh/docs/tasks/tools/included/kubectl-convert-overview.md b/content/zh/docs/tasks/tools/included/kubectl-convert-overview.md new file mode 100644 index 0000000000..f26827f2c2 --- /dev/null +++ b/content/zh/docs/tasks/tools/included/kubectl-convert-overview.md @@ -0,0 +1,24 @@ +--- +title: "kubectl-convert 概述" +description: >- + 一个 kubectl 插件,允许你将清单从一个 Kubernetes API 版本转换到不同的版本。 +headless: true +--- + + + +一个 Kubernetes 命令行工具 `kubectl` 的插件,允许你将清单在不同 API 版本间转换。 +在将清单迁移到具有较新 Kubernetes 版本的未弃用 API 版本时,这个插件特别有用。 +更多信息请访问 [迁移到非弃用 API](/zh/docs/reference/using-api/deprecation-guide/#migrate-to-non-deprecated-apis) diff --git a/content/zh/docs/tasks/tools/install-kubectl-linux.md b/content/zh/docs/tasks/tools/install-kubectl-linux.md index 2357358d97..91786b715d 100644 --- a/content/zh/docs/tasks/tools/install-kubectl-linux.md +++ b/content/zh/docs/tasks/tools/install-kubectl-linux.md @@ -44,12 +44,10 @@ The following methods exist for installing kubectl on Linux: - [Install kubectl binary with curl on Linux](#install-kubectl-binary-with-curl-on-linux) - [Install using native package management](#install-using-native-package-management) - [Install using other package management](#install-using-other-package-management) -- [Install on Linux as part of the Google Cloud SDK](#install-on-linux-as-part-of-the-google-cloud-sdk) --> - [用 curl 在 Linux 系统中安装 kubectl](#install-kubectl-binary-with-curl-on-linux) - [用原生包管理工具安装](#install-using-native-package-management) - [用其他包管理工具安装](#install-using-other-package-management) -- [作为谷歌云 SDK 的一部分,在 Linux 中安装](#install-on-linux-as-part-of-the-google-cloud-sdk) -### 作为谷歌云 SDK 的一部分,在 Linux 上安装 {#install-on-linux-as-part-of-the-google-cloud-sdk} - -{{< include "included/install-kubectl-gcloud.md" >}} - @@ -275,11 +267,11 @@ kubectl version --client {{< include "included/verify-kubectl.md" >}} -## kubectl 的可选配置 {#optional-kubectl-configurations} +## kubectl 的可选配置和插件 {#optional-kubectl-configurations} ### 启用 shell 自动补全功能 {#enable-shell-autocompletion} @@ -297,6 +289,91 @@ kubectl 为 Bash 和 Zsh 提供自动补全功能,可以减轻许多输入的 {{< tab name="Zsh" include="included/optional-kubectl-configs-zsh.md" />}} {{< /tabs >}} + +### 安装 `kubectl convert` 插件 + +{{< include "included/kubectl-convert-overview.md" >}} + + +1. 用以下命令下载最新发行版: + + ```bash + curl -LO https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert + ``` + +1. 验证该可执行文件(可选步骤) + + 下载 kubectl-convert 校验和文件: + + ```bash + curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert.sha256" + ``` + + + 基于校验和,验证 kubectl-convert 的可执行文件: + + ```bash + echo "$( + 验证通过时,输出为: + + ```console + kubectl-convert: OK + ``` + + + 验证失败时,`sha256` 将以非零值退出,并打印输出类似于: + + ```bash + kubectl-convert: FAILED + sha256sum: WARNING: 1 computed checksum did NOT match + ``` + {{< note >}} + + 下载相同版本的可执行文件和校验和。 + {{< /note >}} + + +1. 安装 kubectl-convert + + ```bash + sudo install -o root -g root -m 0755 kubectl-convert /usr/local/bin/kubectl-convert + ``` + + +1. 验证插件是否安装成功 + + ```shell + kubectl convert --help + ``` + + + 如果你没有看到任何错误就代表插件安装成功了。 + ## {{% heading "whatsnext" %}} {{< include "included/kubectl-whats-next.md" >}} diff --git a/content/zh/docs/tasks/tools/install-kubectl-macos.md b/content/zh/docs/tasks/tools/install-kubectl-macos.md index ec9dcae5f8..bfae71d454 100644 --- a/content/zh/docs/tasks/tools/install-kubectl-macos.md +++ b/content/zh/docs/tasks/tools/install-kubectl-macos.md @@ -169,6 +169,13 @@ The following methods exist for installing kubectl on macOS: sudo chown root: /usr/local/bin/kubectl ``` + {{< note >}} + + 确保 `/usr/local/bin` 在你的 PATH 环境变量中。 + {{< /note >}} + @@ -257,11 +264,11 @@ If you are on macOS and using [Macports](https://macports.org/) package manager, {{< include "included/verify-kubectl.md" >}} -## 可选的 kubectl 配置 {#optional-kubectl-configurations} +## 可选的 kubectl 配置和插件 {#optional-kubectl-configurations-and-plugins} ### 启用 shell 自动补全功能 {#enable-shell-autocompletion} @@ -279,6 +286,120 @@ kubectl 为 Bash 和 Zsh 提供自动补全功能,这可以节省许多输入 {{< tab name="Zsh" include="included/optional-kubectl-configs-zsh.md" />}} {{< /tabs >}} + +### 安装 `kubectl convert` 插件 + +{{< include "included/kubectl-convert-overview.md" >}} + + +1. 用以下命令下载最新发行版: + + {{< tabs name="download_convert_binary_macos" >}} + {{< tab name="Intel" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl-convert" + {{< /tab >}} + {{< tab name="Apple Silicon" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl-convert" + {{< /tab >}} + {{< /tabs >}} + + +1. 验证该可执行文件(可选步骤) + + 下载 kubectl-convert 校验和文件: + + {{< tabs name="download_convert_checksum_macos" >}} + {{< tab name="Intel" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl-convert.sha256" + {{< /tab >}} + {{< tab name="Apple Silicon" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl-convert.sha256" + {{< /tab >}} + {{< /tabs >}} + + + 基于校验和,验证 kubectl-convert 的可执行文件: + + ```bash + echo "$( + 验证通过时,输出为: + + ```console + kubectl-convert: OK + ``` + + + 验证失败时,`sha256` 将以非零值退出,并打印输出类似于: + + ```bash + kubectl-convert: FAILED + shasum: WARNING: 1 computed checksum did NOT match + ``` + + {{< note >}} + + 下载相同版本的可执行文件和校验和。 + {{< /note >}} + + +1. 使 kubectl-convert 二进制文件可执行 + + ```bash + chmod +x ./kubectl-convert + ``` + + +1. 将 kubectl-convert 可执行文件移动到系统 `PATH` 环境变量中的一个位置。 + + ```bash + sudo mv ./kubectl-convert /usr/local/bin/kubectl-convert + sudo chown root: /usr/local/bin/kubectl-convert + ``` + + {{< note >}} + + 确保你的 PATH 环境变量中存在 `/usr/local/bin` + {{< /note >}} + + +1. 验证插件是否安装成功 + + ```shell + kubectl convert --help + ``` + + + 如果你没有看到任何错误就代表插件安装成功了。 + ## {{% heading "whatsnext" %}} {{< include "included/kubectl-whats-next.md" >}} diff --git a/content/zh/docs/tasks/tools/install-kubectl-windows.md b/content/zh/docs/tasks/tools/install-kubectl-windows.md index 43d33e7510..d7f23a11dc 100644 --- a/content/zh/docs/tasks/tools/install-kubectl-windows.md +++ b/content/zh/docs/tasks/tools/install-kubectl-windows.md @@ -42,7 +42,6 @@ The following methods exist for installing kubectl on Windows: - [用 curl 在 Windows 上安装 kubectl](#install-kubectl-binary-with-curl-on-windows) - [在 Windows 上用 Chocolatey 或 Scoop 安装](#install-on-windows-using-chocolatey-or-scoop) -- [作为谷歌云 SDK 的一部分,在 Windows 上安装](#install-on-windows-as-part-of-the-google-cloud-sdk) -### 作为谷歌云 SDK 的一部分,在 Windows 上安装 {#install-on-windows-as-part-of-the-google-cloud-sdk} - -{{< include "included/install-kubectl-gcloud.md" >}} - @@ -225,11 +217,11 @@ Edit the config file with a text editor of your choice, such as Notepad. {{< include "included/verify-kubectl.md" >}} -## kubectl 可选配置 {#optional-kubectl-configurations} +## kubectl 可选配置和插件 {#optional-kubectl-configurations} ### 启用 shell 自动补全功能 {#enable-shell-autocompletion} @@ -244,7 +236,76 @@ kubectl 为 Bash 和 Zsh 提供自动补全功能,可以减轻许多输入的 {{< include "included/optional-kubectl-configs-zsh.md" >}} + +### 安装 `kubectl convert` 插件 + +{{< include "included/kubectl-convert-overview.md" >}} + + +1. 用以下命令下载最新发行版: + + ```powershell + curl -LO https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl-convert.exe + ``` + + +1. 验证该可执行文件(可选步骤) + + 下载 kubectl-convert 校验和文件: + + ```powershell + curl -LO https://dl.k8s.io/{{< param "fullversion" >}}/bin/windows/amd64/kubectl-convert.exe.sha256 + ``` + + + 基于校验和,验证 kubectl-convert 的可执行文件: + + - 用提示的命令对 `CertUtil` 的输出和下载的校验和文件进行手动比较。 + + ```cmd + CertUtil -hashfile kubectl-convert.exe SHA256 + type kubectl-convert.exe.sha256 + ``` + + + - 使用 PowerShell `-eq` 操作使验证自动化,获得 `True` 或者 `False` 的结果: + + ```powershell + $($(CertUtil -hashfile .\kubectl-convert.exe SHA256)[1] -replace " ", "") -eq $(type .\kubectl-convert.exe.sha256) + ``` + + +1. 将可执行文件添加到你的 `PATH` 环境变量。 + +1. 验证插件是否安装成功 + + ```shell + kubectl convert --help + ``` + + + 如果你没有看到任何错误就代表插件安装成功了。 + ## {{% heading "whatsnext" %}} {{< include "included/kubectl-whats-next.md" >}} - diff --git a/content/zh/docs/tutorials/_index.md b/content/zh/docs/tutorials/_index.md index 821855b712..d6c16b2e2c 100644 --- a/content/zh/docs/tutorials/_index.md +++ b/content/zh/docs/tutorials/_index.md @@ -72,7 +72,7 @@ Kubernetes 文档的这一部分包含教程。每个教程展示了如何完成 * [公开外部 IP 地址访问集群中的应用程序](/zh/docs/tutorials/stateless-application/expose-external-ip-address/) -* [示例:使用 MongoDB 部署 PHP 留言板应用程序](/zh/docs/tutorials/stateless-application/guestbook/) +* [示例:使用 Redis 部署 PHP 留言板应用程序](/zh/docs/tutorials/stateless-application/guestbook/) ## 集群 -* [AppArmor](/zh/docs/tutorials/clusters/apparmor/) +* [seccomp](/zh/docs/tutorials/clusters/seccomp/) ### 使用 PodSecurityPolicy 限制配置文件 +{{< note >}} + +PodSecurityPolicy 在 Kubernetes v1.21 版本中已被废弃,将在 v1.25 版本移除。 +查看 [PodSecurityPolicy 文档](/zh/docs/concepts/policy/pod-security-policy/)获取更多信息。 +{{< /note >}} + 如果启用了 PodSecurityPolicy 扩展,则可以应用群集范围的 AppArmor 限制。要启用 PodSecurityPolicy,必须在“apiserver”上设置以下标志: diff --git a/content/zh/docs/tutorials/clusters/seccomp.md b/content/zh/docs/tutorials/clusters/seccomp.md index 9f8d7dc2f3..d724d605b6 100644 --- a/content/zh/docs/tutorials/clusters/seccomp.md +++ b/content/zh/docs/tutorials/clusters/seccomp.md @@ -2,6 +2,7 @@ title: 使用 Seccomp 限制容器的系统调用 content_type: tutorial weight: 20 +min-kubernetes-server-version: v1.22 --- @@ -10,7 +11,7 @@ weight: 20 为了完成本教程中的所有步骤,你必须安装 [kind](https://kind.sigs.k8s.io/docs/user/quick-start/) -和 [kubectl](/zh/docs/tasks/tools/)。本教程将显示同时具有 alpha(v1.19 之前的版本) -和通常可用的 seccomp 功能的示例,因此请确保为所使用的版本[正确配置](https://kind.sigs.k8s.io/docs/user/quick-start/#setting-kubernetes-version)了集群。 +和 [kubectl](/zh/docs/tasks/tools/)。本教程将显示同时具有 alpha(v1.22 新版本) +和通常可用的 seccomp 功能的示例。 +你应该确保为所使用的版本[正确配置](https://kind.sigs.k8s.io/docs/user/quick-start/#setting-kubernetes-version)了集群。 + + +## 启用 `RuntimeDefault` 作为所有工作负载的默认 seccomp 配置文件 + +{{< feature-state state="alpha" for_k8s_version="v1.22" >}} + +`SeccompDefault` 是一个可选的 kubelet +[特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates), +相应地,`--seccomp-default` 是此特性门控的 +[命令行标志](/zh/docs/reference/command-line-tools-reference/kubelet)。 +必须同时启用两者才能使用该功能。 + + +如果启用,kubelet 将默认使用 `RuntimeDefault` seccomp 配置, +而不是使用 `Unconfined`(禁用 seccomp)模式,该配置由容器运行时定义。 +默认配置旨在提供一组强大的安全默认值设置,同时避免影响工作负载的功能。 +不同的容器运行时之间及其不同的发布版本之间的默认配置可能不同, +例如在比较 CRI-O 和 containerd 的配置文件时(就会发现这点)。 + + +某些工作负载可能相比其他工作负载需要更少的系统调用限制。 +这意味着即使使用 `RuntimeDefault` 配置文件,它们也可能在运行时失败。 +要处理此类失效,你可以: + +- 将工作负载显式运行为 `Unconfined`。 +- 禁用节点的 `SeccompDefault` 功能。 + 还要确保工作负载被安排在禁用该功能的节点上。 +- 为工作负载创建自定义 seccomp 配置文件。 + + +如果你将此功能引入到类似生产的集群中, +Kubernetes 项目建议你在节点的子集上启用此特性门控, +然后在集群范围内推出更改之前测试工作负载的执行情况。 + +有关可能的升级和降级策略的更多详细信息, +请参见[相关 Kubernetes 增强提案 (KEP)](https://github.com/kubernetes/enhancements/tree/a70cc18/keps/sig-node/2413-seccomp-by-default#upgrade--downgrade-strategy)。 + + +由于该功能处于 alpha 状态,因此默认情况下是被禁用的。要启用它, +请将标志 `--feature-gates=SeccompDefault=true --seccomp-default` +传递给 `kubelet` CLI 或通过 +[kubelet 配置文件](/zh/docs/tasks/administer-cluster/kubelet-config-file/)启用它。 +要在 [kind](https://kind.sigs.k8s.io) 中启用特性门控, +请确保 `kind` 提供所需的最低 Kubernetes 版本并 +[在 kind 配置中](https://kind.sigs.k8s.io/docs/user/quick-start/#enable-feature-gates-in-your-cluster) +启用 `SeccompDefault` 功能: + +```yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +featureGates: + SeccompDefault: true +``` + -## 使用 Seccomp 配置文件创建 Pod 以进行系统调用审核 +## 使用 seccomp 配置文件创建 Pod 以进行系统调用审核 首先,将 `audit.json` 配置文件应用到新的 Pod 中,该配置文件将记录该进程的所有系统调用。 @@ -297,14 +396,14 @@ kubectl delete svc/audit-pod ``` -## 使用导致违规的 Seccomp 配置文件创建 Pod +## 使用导致违规的 seccomp 配置文件创建 Pod 为了进行演示,请将不允许任何系统调用的配置文件应用于 Pod。 @@ -364,7 +463,7 @@ kubectl delete svc/violation-pod ``` -## 使用设置仅允许需要的系统调用的配置文件来创建 Pod +## 使用设置仅允许需要的系统调用的 seccomp 配置文件来创建 Pod 如果你看一下 `fine-pod.json` 文件,你会注意到在第一个示例中配置文件设置为 `"defaultAction": "SCMP_ACT_LOG"` 的一些系统调用。 现在,配置文件设置为 `"defaultAction": "SCMP_ACT_ERRNO"`,但是在 `"action": "SCMP_ACT_ALLOW"` 块中明确允许一组系统调用。 @@ -482,7 +581,7 @@ kubectl delete svc/fine-pod ``` -## 使用容器运行时默认的 Seccomp 配置文件创建 Pod +## 使用容器运行时默认的 seccomp 配置文件创建 Pod 大多数容器运行时都提供一组允许或不允许的默认系统调用。通过使用 `runtime/default` 注释 或将 Pod 或容器的安全上下文中的 seccomp 类型设置为 `RuntimeDefault`,可以轻松地在 Kubernetes 中应用默认值。 @@ -518,10 +617,10 @@ The default seccomp profile should provide adequate access for most workloads. 额外的资源: -* [Seccomp 概要](https://lwn.net/Articles/656307/) +* [seccomp 概要](https://lwn.net/Articles/656307/) * [Seccomp 在 Docker 中的安全配置](https://docs.docker.com/engine/security/seccomp/) \ No newline at end of file diff --git a/content/zh/docs/tutorials/configuration/configure-java-microservice/_index.md b/content/zh/docs/tutorials/configuration/configure-java-microservice/_index.md old mode 100755 new mode 100644 diff --git a/content/zh/docs/tutorials/configuration/configure-java-microservice/configure-java-microservice-interactive.html b/content/zh/docs/tutorials/configuration/configure-java-microservice/configure-java-microservice-interactive.html index f453fc75cb..5a119fcd37 100644 --- a/content/zh/docs/tutorials/configuration/configure-java-microservice/configure-java-microservice-interactive.html +++ b/content/zh/docs/tutorials/configuration/configure-java-microservice/configure-java-microservice-interactive.html @@ -11,7 +11,7 @@ weight: 20 - + diff --git a/content/zh/docs/tutorials/configuration/configure-redis-using-configmap.md b/content/zh/docs/tutorials/configuration/configure-redis-using-configmap.md index 027771a28c..fc6f0fa2f1 100644 --- a/content/zh/docs/tutorials/configuration/configure-redis-using-configmap.md +++ b/content/zh/docs/tutorials/configuration/configure-redis-using-configmap.md @@ -79,7 +79,7 @@ Apply the ConfigMap created above, along with a Redis pod manifest: ```shell kubectl apply -f example-redis-config.yaml -kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/pods/config/redis-pod.yaml +kubectl apply -f https://k8s.io/examples/pods/config/redis-pod.yaml ``` {{< note >}} -`dashboard` 命令启用仪表板插件,并在默认的 Web 浏览器中打开代理。你可以在仪表板上创建 Kubernetes 资源,例如 Deployment 和 Service。 +`dashboard` 命令启用仪表板插件,并在默认的 Web 浏览器中打开代理。 +你可以在仪表板上创建 Kubernetes 资源,例如 Deployment 和 Service。 如果你以 root 用户身份在环境中运行, 请参见[使用 URL 打开仪表板](#open-dashboard-with-url)。 +默认情况下,仪表板只能从内部 Kubernetes 虚拟网络中访问。 +`dashboard` 命令创建一个临时代理,使仪表板可以从 Kubernetes 虚拟网络外部访问。 + 要停止代理,请运行 `Ctrl+C` 退出该进程。仪表板仍在运行中。 +命令退出后,仪表板仍然在 Kubernetes 集群中运行。 +你可以再次运行 `dashboard` 命令创建另一个代理来访问仪表板。 + {{< /note >}} - +
    diff --git a/content/zh/docs/tutorials/kubernetes-basics/update/update-interactive.html b/content/zh/docs/tutorials/kubernetes-basics/update/update-interactive.html index 777d7515ad..9befc40bd7 100644 --- a/content/zh/docs/tutorials/kubernetes-basics/update/update-interactive.html +++ b/content/zh/docs/tutorials/kubernetes-basics/update/update-interactive.html @@ -12,7 +12,7 @@ weight: 20 - + diff --git a/content/zh/docs/tutorials/kubernetes-basics/update/update-intro.html b/content/zh/docs/tutorials/kubernetes-basics/update/update-intro.html index e4dab0b07c..8f53c752c3 100644 --- a/content/zh/docs/tutorials/kubernetes-basics/update/update-intro.html +++ b/content/zh/docs/tutorials/kubernetes-basics/update/update-intro.html @@ -12,7 +12,7 @@ weight: 10 - + diff --git a/content/zh/docs/tutorials/stateful-application/basic-stateful-set.md b/content/zh/docs/tutorials/stateful-application/basic-stateful-set.md index 0411d9c92e..8b7f166a69 100644 --- a/content/zh/docs/tutorials/stateful-application/basic-stateful-set.md +++ b/content/zh/docs/tutorials/stateful-application/basic-stateful-set.md @@ -1239,16 +1239,16 @@ kubectl get pods -w -l app=nginx 使用 [`kubectl delete`](/zh/docs/reference/generated/kubectl/kubectl-commands/#delete) 删除 StatefulSet。 -请确保提供了 `--cascade=false` 参数给命令。这个参数告诉 Kubernetes 只删除 StatefulSet 而不要删除它的任何 Pod。 +请确保提供了 `--cascade=orphan` 参数给命令。这个参数告诉 Kubernetes 只删除 StatefulSet 而不要删除它的任何 Pod。 ```shell -kubectl delete statefulset web --cascade=false +kubectl delete statefulset web --cascade=orphan ``` ``` statefulset.apps "web" deleted @@ -1416,9 +1416,10 @@ kubectl get pods -w -l app=nginx -在另一个窗口中再次删除这个 StatefulSet。这次省略 `--cascade=false` 参数。 +在另一个窗口中再次删除这个 StatefulSet。这次省略 `--cascade=orphan` 参数。 ```shell kubectl delete statefulset web diff --git a/content/zh/docs/tutorials/stateful-application/cassandra.md b/content/zh/docs/tutorials/stateful-application/cassandra.md index 461e3d1632..461b050938 100644 --- a/content/zh/docs/tutorials/stateful-application/cassandra.md +++ b/content/zh/docs/tutorials/stateful-application/cassandra.md @@ -87,14 +87,14 @@ To complete this tutorial, you should already have a basic familiarity with ### Additional Minikube setup instructions {{< caution >}} -[Minikube](https://minikube.sigs.k8s.io/docs/) defaults to 1024MiB of memory and 1 CPU. +[Minikube](https://minikube.sigs.k8s.io/docs/) defaults to 2048MB of memory and 2 CPU. Running Minikube with the default resource configuration results in insufficient resource errors during this tutorial. To avoid these errors, start Minikube with the following settings: --> ### 额外的 Minikube 设置说明 {{< caution >}} -[Minikube](https://minikube.sigs.k8s.io/docs/)默认为 1024MiB 内存和 1 个 CPU。 +[Minikube](https://minikube.sigs.k8s.io/docs/)默认为 2048MB 内存和 2 个 CPU。 在本教程中,使用默认资源配置运行 Minikube 会导致资源不足的错误。为避免这些错误,请使用以下设置启动 Minikube: ```shell @@ -405,7 +405,7 @@ to also be deleted. Never assume you'll be able to access data if its volume cla The Pods in this tutorial use the [`gcr.io/google-samples/cassandra:v13`](https://github.com/kubernetes/examples/blob/master/cassandra/image/Dockerfile) image from Google's [container registry](https://cloud.google.com/container-registry/docs/). -The Docker image above is based on [debian-base](https://github.com/kubernetes/kubernetes/tree/master/build/debian-base) +The Docker image above is based on [debian-base](https://github.com/kubernetes/release/tree/master/images/build/debian-base) and includes OpenJDK 8. This image includes a standard Cassandra installation from the Apache Debian repo. @@ -414,7 +414,7 @@ By using environment variables you can change values that are inserted into `cas ## Cassandra 容器环境变量 本教程中的 Pod 使用来自 Google [container registry](https://cloud.google.com/container-registry/docs/) 的 [`gcr.io/google-samples/cassandra:v13`](https://github.com/kubernetes/examples/blob/master/cassandra/image/Dockerfile) 镜像。 -上面的 Docker 镜像基于 [debian-base](https://github.com/kubernetes/kubernetes/tree/master/build/debian-base),并且包含 OpenJDK 8。 +上面的 Docker 镜像基于 [debian-base](https://github.com/kubernetes/release/tree/master/images/build/debian-base),并且包含 OpenJDK 8。 该映像包括来自 Apache Debian 存储库的标准 Cassandra 安装。 通过使用环境变量,您可以更改插入到 `cassandra.yaml` 中的值。 diff --git a/content/zh/docs/tutorials/stateful-application/zookeeper.md b/content/zh/docs/tutorials/stateful-application/zookeeper.md index 3f5baf3c27..7f3a0e9548 100644 --- a/content/zh/docs/tutorials/stateful-application/zookeeper.md +++ b/content/zh/docs/tutorials/stateful-application/zookeeper.md @@ -1412,7 +1412,7 @@ drain the node on which the `zk-0` Pod is scheduled. 来隔离和腾空 `zk-0` Pod 调度所在的节点。 ```shell -kubectl drain $(kubectl get pod zk-0 --template {{.spec.nodeName}}) --ignore-daemonsets --force --delete-local-data +kubectl drain $(kubectl get pod zk-0 --template {{.spec.nodeName}}) --ignore-daemonsets --force --delete-emptydir-data ``` ``` @@ -1453,7 +1453,7 @@ Keep watching the `StatefulSet`'s Pods in the first terminal and drain the node 在第一个终端中持续观察 StatefulSet 的 Pods 并腾空 `zk-1` 调度所在的节点。 ```shell -kubectl drain $(kubectl get pod zk-1 --template {{.spec.nodeName}}) --ignore-daemonsets --force --delete-local-data "kubernetes-node-ixsl" cordoned +kubectl drain $(kubectl get pod zk-1 --template {{.spec.nodeName}}) --ignore-daemonsets --force -delete-emptydir-data "kubernetes-node-ixsl" cordoned ``` ``` @@ -1504,7 +1504,7 @@ Continue to watch the Pods of the stateful set, and drain the node on which 继续观察 StatefulSet 中的 Pods 并腾空 `zk-2` 调度所在的节点。 ```shell -kubectl drain $(kubectl get pod zk-2 --template {{.spec.nodeName}}) --ignore-daemonsets --force --delete-local-data +kubectl drain $(kubectl get pod zk-2 --template {{.spec.nodeName}}) --ignore-daemonsets --force --delete-emptydir-data ``` ``` node "kubernetes-node-i4c4" cordoned @@ -1610,7 +1610,7 @@ Attempt to drain the node on which `zk-2` is scheduled. 尝试腾空 `zk-2` 调度所在的节点。 ```shell -kubectl drain $(kubectl get pod zk-2 --template {{.spec.nodeName}}) --ignore-daemonsets --force --delete-local-data +kubectl drain $(kubectl get pod zk-2 --template {{.spec.nodeName}}) --ignore-daemonsets --force --delete-emptydir-data ``` @@ -31,244 +32,350 @@ This tutorial shows you how to build and deploy a simple _(not production ready) 一个简单的_(非面向生产)的_多层 web 应用程序。本例由以下组件组成: -* 单实例 [MongoDB](https://www.mongodb.com/) 以保存留言板条目 +* 单实例 [Redis](https://www.redis.io/) 以保存留言板条目 * 多个 web 前端实例 - - - ## {{% heading "objectives" %}} - - -* 启动 Mongo 数据库。 -* 启动留言板前端。 -* 公开并查看前端服务。 -* 清理。 - - +* 启动 Redis 领导者(Leader) +* 启动两个 Redis 跟随者(Follower) +* 公开并查看前端服务 +* 清理 ## {{% heading "prerequisites" %}} - {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} - - - -## 启动 Mongo 数据库 +## 启动 Redis 数据库 -留言板应用程序使用 MongoDB 存储数据。 +留言板应用程序使用 Redis 存储数据。 -### 创建 Mongo 的 Deployment +### 创建 Redis Deployment -下面包含的清单文件指定了一个 Deployment 控制器,该控制器运行一个 MongoDB Pod 副本。 +下面包含的清单文件指定了一个 Deployment 控制器,该控制器运行一个 Redis Pod 副本。 -{{< codenew file="application/guestbook/mongo-deployment.yaml" >}} +{{< codenew file="application/guestbook/redis-leader-deployment.yaml" >}} 1. 在下载清单文件的目录中启动终端窗口。 -2. 从 `mongo-deployment.yaml` 文件中应用 MongoDB Deployment: +2. 从 `redis-leader-deployment.yaml` 文件中应用 Redis Deployment: - + - ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/mongo-deployment.yaml - ``` + ```shell + kubectl apply -f https://k8s.io/examples/application/guestbook/redis-leader-deployment.yaml + ``` -3. 查询 Pod 列表以验证 MongoDB Pod 是否正在运行: +3. 查询 Pod 列表以验证 Redis Pod 是否正在运行: - ```shell - kubectl get pods - ``` + ```shell + kubectl get pods + ``` + + + 响应应该与此类似: + + ```shell + NAME READY STATUS RESTARTS AGE + redis-leader-fb76b4755-xjr2n 1/1 Running 0 13s + ``` - 响应应该与此类似: +4. 运行以下命令查看 Redis Deployment 中的日志: - ```shell - NAME READY STATUS RESTARTS AGE - mongo-5cfd459dd4-lrcjb 1/1 Running 0 28s - ``` + ```shell + kubectl logs -f deployment/redis-leader + ``` -4. 运行以下命令查看 MongoDB Deployment 中的日志: - - ```shell - kubectl logs -f deployment/mongo - ``` +### 创建 Redis 领导者服务 +留言板应用程序需要往 Redis 中写数据。因此,需要创建 +[Service](/zh/docs/concepts/services-networking/service/) 来转发 Redis Pod +的流量。Service 定义了访问 Pod 的策略。 -### 创建 MongoDB 服务 +{{< codenew file="application/guestbook/redis-leader-service.yaml" >}} -留言板应用程序需要往 MongoDB 中写数据。因此,需要创建 [Service](/zh/docs/concepts/services-networking/service/) 来代理 MongoDB Pod 的流量。Service 定义了访问 Pod 的策略。 +1. 使用下面的 `redis-leader-service.yaml` 文件创建 Redis的服务: -{{< codenew file="application/guestbook/mongo-service.yaml" >}} - - -1. 使用下面的 `mongo-service.yaml` 文件创建 MongoDB 的服务: - - + - ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/mongo-service.yaml - ``` + ```shell + kubectl apply -f https://k8s.io/examples/application/guestbook/redis-leader-service.yaml + ``` -2. 查询服务列表验证 MongoDB 服务是否正在运行: +2. 查询服务列表验证 Redis 服务是否正在运行: - ```shell - kubectl get service - ``` + ```shell + kubectl get service + ``` + + + 响应应该与此类似: + + ```shell + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + kubernetes ClusterIP 10.0.0.1 443/TCP 1m + redis-leader ClusterIP 10.103.78.24 6379/TCP 16s + ``` - 响应应该与此类似: - - ```shell - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - kubernetes ClusterIP 10.0.0.1 443/TCP 1m - mongo ClusterIP 10.0.0.151 27017/TCP 8s - ``` - - {{< note >}} -这个清单文件创建了一个名为 `mongo` 的 Service,其中包含一组与前面定义的标签匹配的标签,因此服务将网络流量路由到 MongoDB Pod 上。 +这个清单文件创建了一个名为 `redis-leader` 的 Service,其中包含一组 +与前面定义的标签匹配的标签,因此服务将网络流量路由到 Redis Pod 上。 {{< /note >}} + +### 设置 Redis 跟随者 + +尽管 Redis 领导者只有一个 Pod,你可以通过添加若干 Redis 跟随者来将其配置为高可用状态, +以满足流量需求。 + +{{< codenew file="application/guestbook/redis-follower-deployment.yaml" >}} + + +1. 应用下面的 `redis-follower-deployment.yaml` 文件创建 Redis Deployment: + + + + ```shell + kubectl apply -f https://k8s.io/examples/application/guestbook/redis-follower-deployment.yaml + ``` + + +2. 通过查询 Pods 列表,验证两个 Redis 跟随者副本在运行: + + ```shell + kubectl get pods + ``` + + + 响应应该类似于这样: + + ``` + NAME READY STATUS RESTARTS AGE + redis-follower-dddfbdcc9-82sfr 1/1 Running 0 37s + redis-follower-dddfbdcc9-qrt5k 1/1 Running 0 38s + redis-leader-fb76b4755-xjr2n 1/1 Running 0 11m + ``` + + +### 创建 Redis 跟随者服务 + +Guestbook 应用需要与 Redis 跟随者通信以读取数据。 +为了让 Redis 跟随者可被发现,你必须创建另一个 +[Service](/zh/docs/concepts/services-networking/service/)。 + +{{< codenew file="application/guestbook/redis-follower-service.yaml" >}} + + +1. 应用如下所示 `redis-follower-service.yaml` 文件中的 Redis Service: + + + + ```shell + kubectl apply -f https://k8s.io/examples/application/guestbook/redis-follower-service.yaml + ``` + + +2. 查询 Service 列表,验证 Redis 服务在运行: + + ```shell + kubectl get service + ``` + + + 响应应该类似于这样: + + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + kubernetes ClusterIP 10.96.0.1 443/TCP 3d19h + redis-follower ClusterIP 10.110.162.42 6379/TCP 9s + redis-leader ClusterIP 10.103.78.24 6379/TCP 6m10s + ``` + +{{< note >}} + +清单文件创建了一个名为 `redis-follower` 的 Service,该 Service +具有一些与之前所定义的标签相匹配的标签,因此该 Service 能够将网络流量 +路由到 Redis Pod 之上。 +{{< /note >}} + + ## 设置并公开留言板前端 -留言板应用程序有一个 web 前端,服务于用 PHP 编写的 HTTP 请求。 -它被配置为连接到 `mongo` 服务以存储留言版条目。 +Now that you have the Redis storage of your guestbook up and running, start the guestbook web servers. Like the Redis followers, the frontend is deployed using a Kubernetes Deployment. + +The guestbook app uses a PHP frontend. It is configured to communicate with either the Redis follower or leader Services, depending on whether the request is a read or a write. The frontend exposes a JSON interface, and serves a jQuery-Ajax-based UX. +--> +现在你有了一个为 Guestbook 应用配置的 Redis 存储处于运行状态, +接下来可以启动 Guestbook 的 Web 服务器了。 +与 Redis 跟随者类似,前端也是使用 Kubernetes Deployment 来部署的。 + +Guestbook 应用使用 PHP 前端。该前端被配置成与后端的 Redis 跟随者或者 +领导者服务通信,具体选择哪个服务取决于请求是读操作还是写操作。 +前端对外暴露一个 JSON 接口,并提供基于 jQuery-Ajax 的用户体验。 - -### 创建留言板前端 Deployment +### 创建 Guestbook 前端 Deployment {{< codenew file="application/guestbook/frontend-deployment.yaml" >}} -1. 从 `frontend-deployment.yaml` 应用前端 Deployment 文件: +1. 应用来自 `frontend-deployment.yaml` 文件的前端 Deployment: - + - ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/frontend-deployment.yaml + ```shell + kubectl apply -f https://k8s.io/examples/application/guestbook/frontend-deployment.yaml ``` -2. 查询 Pod 列表,验证三个前端副本是否正在运行: +2. 查询 Pod 列表,验证三个前端副本正在运行: - ```shell - kubectl get pods -l app.kubernetes.io/name=guestbook -l app.kubernetes.io/component=frontend - ``` + ```shell + kubectl get pods -l app=guestbook -l tier=frontend + ``` - - 响应应该与此类似: + + 响应应该与此类似: - ``` - NAME READY STATUS RESTARTS AGE - frontend-3823415956-dsvc5 1/1 Running 0 54s - frontend-3823415956-k22zn 1/1 Running 0 54s - frontend-3823415956-w9gbt 1/1 Running 0 54s - ``` + ``` + NAME READY STATUS RESTARTS AGE + frontend-85595f5bf9-5tqhb 1/1 Running 0 47s + frontend-85595f5bf9-qbzwm 1/1 Running 0 47s + frontend-85595f5bf9-zchwc 1/1 Running 0 47s + ``` - ### 创建前端服务 -应用的 `mongo` 服务只能在 Kubernetes 集群中访问,因为服务的默认类型是 +应用的 `Redis` 服务只能在 Kubernetes 集群中访问,因为服务的默认类型是 [ClusterIP](/zh/docs/concepts/services-networking/service/#publishing-services-service-types)。 `ClusterIP` 为服务指向的 Pod 集提供一个 IP 地址。这个 IP 地址只能在集群中访问。 -如果您希望访客能够访问您的留言板,您必须将前端服务配置为外部可见的,以便客户端可以从 Kubernetes 集群之外请求服务。然而即便使用了 `ClusterIP` Kubernetes 用户仍可以通过 `kubectl port-forward` 访问服务。 +如果你希望访客能够访问你的 Guestbook,你必须将前端服务配置为外部可见的, +以便客户端可以从 Kubernetes 集群之外请求服务。 +然而即便使用了 `ClusterIP`,Kubernetes 用户仍可以通过 +`kubectl port-forward` 访问服务。 {{< note >}} -一些云提供商,如 Google Compute Engine 或 Google Kubernetes Engine,支持外部负载均衡器。如果您的云提供商支持负载均衡器,并且您希望使用它, -只需取消注释 `type: LoadBalancer` 即可。 +一些云提供商,如 Google Compute Engine 或 Google Kubernetes Engine, +支持外部负载均衡器。如果你的云提供商支持负载均衡器,并且你希望使用它, +只需取消注释 `type: LoadBalancer`。 {{< /note >}} {{< codenew file="application/guestbook/frontend-service.yaml" >}} @@ -276,37 +383,38 @@ Some cloud providers, like Google Compute Engine or Google Kubernetes Engine, su -1. 从 `frontend-service.yaml` 文件中应用前端服务: +1. 应用来自 `frontend-service.yaml` 文件中的前端服务: - + - ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/frontend-service.yaml - ``` + ```shell + kubectl apply -f https://k8s.io/examples/application/guestbook/frontend-service.yaml + ``` -2. 查询服务列表以验证前端服务正在运行: +2. 查询 Service 列表以验证前端服务正在运行: - ```shell - kubectl get services - ``` + ```shell + kubectl get services + ``` - - 响应应该与此类似: + + 响应应该与此类似: - ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - frontend ClusterIP 10.0.0.112 80/TCP 6s - kubernetes ClusterIP 10.0.0.1 443/TCP 4m - mongo ClusterIP 10.0.0.151 6379/TCP 2m - ``` + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + frontend ClusterIP 10.97.28.230 80/TCP 19s + kubernetes ClusterIP 10.96.0.1 443/TCP 3d19h + redis-follower ClusterIP 10.110.162.42 6379/TCP 5m48s + redis-leader ClusterIP 10.103.78.24 6379/TCP 11m + ``` 1. 运行以下命令将本机的 `8080` 端口转发到服务的 `80` 端口。 - ```shell - kubectl port-forward svc/frontend 8080:80 - ``` + ```shell + kubectl port-forward svc/frontend 8080:80 + ``` - - 响应应该与此类似: + + 响应应该与此类似: - ``` - Forwarding from 127.0.0.1:8080 -> 80 - Forwarding from [::1]:8080 -> 80 - ``` + ``` + Forwarding from 127.0.0.1:8080 -> 80 + Forwarding from [::1]:8080 -> 80 + ``` -2. 在浏览器中加载 [http://localhost:8080](http://localhost:8080) 页面以查看留言板。 +2. 在浏览器中加载 [http://localhost:8080](http://localhost:8080) +页面以查看 Guestbook。 - ### 通过 `LoadBalancer` 查看前端服务 -如果您部署了 `frontend-service.yaml`。你需要找到 IP 地址来查看你的留言板。 +如果你部署了 `frontend-service.yaml`,需要找到用来查看 Guestbook 的 +IP 地址。 1. 运行以下命令以获取前端服务的 IP 地址。 - ```shell - kubectl get service frontend - ``` + ```shell + kubectl get service frontend + ``` - - 响应应该与此类似: + + 响应应该与此类似: - ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - frontend LoadBalancer 10.51.242.136 109.197.92.229 80:32372/TCP 1m - ``` + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + frontend LoadBalancer 10.51.242.136 109.197.92.229 80:32372/TCP 1m + ``` -2. 复制外部 IP 地址,然后在浏览器中加载页面以查看留言板。 +2. 复制这里的外部 IP 地址,然后在浏览器中加载页面以查看留言板。 + +{{< note >}} + +尝试通过输入消息并点击 Submit 来添加一些留言板条目。 +你所输入的消息会在前端显示。这一消息表明数据被通过你 +之前所创建的 Service 添加到 Redis 存储中。 +{{< /note >}} - ## 扩展 Web 前端 -伸缩很容易是因为服务器本身被定义为使用一个 Deployment 控制器的 Service。 +你可以根据需要执行伸缩操作,这是因为服务器本身被定义为使用一个 +Deployment 控制器的 Service。 1. 运行以下命令扩展前端 Pod 的数量: - ```shell - kubectl scale deployment frontend --replicas=5 - ``` + ```shell + kubectl scale deployment frontend --replicas=5 + ``` 2. 查询 Pod 列表验证正在运行的前端 Pod 的数量: - ```shell - kubectl get pods - ``` + ```shell + kubectl get pods + ``` - - 响应应该类似于这样: + + 响应应该类似于这样: - ``` - NAME READY STATUS RESTARTS AGE - frontend-3823415956-70qj5 1/1 Running 0 5s - frontend-3823415956-dsvc5 1/1 Running 0 54m - frontend-3823415956-k22zn 1/1 Running 0 54m - frontend-3823415956-w9gbt 1/1 Running 0 54m - frontend-3823415956-x2pld 1/1 Running 0 5s - mongo-1068406935-3lswp 1/1 Running 0 56m - ``` + ``` + NAME READY STATUS RESTARTS AGE + frontend-85595f5bf9-5df5m 1/1 Running 0 83s + frontend-85595f5bf9-7zmg5 1/1 Running 0 83s + frontend-85595f5bf9-cpskg 1/1 Running 0 15m + frontend-85595f5bf9-l2l54 1/1 Running 0 14m + frontend-85595f5bf9-l9c8z 1/1 Running 0 14m + redis-follower-dddfbdcc9-82sfr 1/1 Running 0 97m + redis-follower-dddfbdcc9-qrt5k 1/1 Running 0 97m + redis-leader-fb76b4755-xjr2n 1/1 Running 0 108m + ``` 3. 运行以下命令缩小前端 Pod 的数量: - ```shell - kubectl scale deployment frontend --replicas=2 - ``` + ```shell + kubectl scale deployment frontend --replicas=2 + ``` 4. 查询 Pod 列表验证正在运行的前端 Pod 的数量: - ```shell - kubectl get pods - ``` - - - 响应应该类似于这样: - - ``` - NAME READY STATUS RESTARTS AGE - frontend-3823415956-k22zn 1/1 Running 0 1h - frontend-3823415956-w9gbt 1/1 Running 0 1h - mongo-1068406935-3lswp 1/1 Running 0 1h - ``` + ```shell + kubectl get pods + ``` + + 响应应该类似于这样: + ``` + NAME READY STATUS RESTARTS AGE + frontend-85595f5bf9-cpskg 1/1 Running 0 16m + frontend-85595f5bf9-l9c8z 1/1 Running 0 15m + redis-follower-dddfbdcc9-82sfr 1/1 Running 0 98m + redis-follower-dddfbdcc9-qrt5k 1/1 Running 0 98m + redis-leader-fb76b4755-xjr2n 1/1 Running 0 109m + ``` ## {{% heading "cleanup" %}} - -删除 Deployments 和服务还会删除正在运行的 Pod。使用标签用一个命令删除多个资源。 +删除 Deployments 和服务还会删除正在运行的 Pod。 +使用标签用一个命令删除多个资源。 1. 运行以下命令以删除所有 Pod,Deployments 和 Services。 - ```shell - kubectl delete deployment -l app.kubernetes.io/name=mongo - kubectl delete service -l app.kubernetes.io/name=mongo - kubectl delete deployment -l app.kubernetes.io/name=guestbook - kubectl delete service -l app.kubernetes.io/name=guestbook - ``` + ```shell + kubectl delete deployment -l app=redis + kubectl delete service -l app=redis + kubectl delete deployment frontend + kubectl delete service frontend + ``` - - 响应应该是: - - ``` - deployment.apps "mongo" deleted - service "mongo" deleted - deployment.apps "frontend" deleted - service "frontend" deleted - ``` + + 响应应该是: + ``` + deployment.apps "redis-follower" deleted + deployment.apps "redis-leader" deleted + deployment.apps "frontend" deleted + service "frontend" deleted + ``` 2. 查询 Pod 列表,确认没有 Pod 在运行: - ```shell - kubectl get pods - ``` + ```shell + kubectl get pods + ``` - - 响应应该是: - - ``` - No resources found. - ``` + + 响应应该是: + ``` + No resources found in default namespace. + ``` ## {{% heading "whatsnext" %}} - - -* 完成 [Kubernetes Basics](/zh/docs/tutorials/kubernetes-basics/) 交互式教程 -* 使用 Kubernetes 创建一个博客,使用 [MySQL 和 Wordpress 的持久卷](/zh/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/#visit-your-new-wordpress-blog) -* 阅读更多关于[连接应用程序](/zh/docs/concepts/services-networking/connect-applications-service/) -* 阅读更多关于[管理资源](/zh/docs/concepts/cluster-administration/manage-deployment/#using-labels-effectively) - +* 完成 [Kubernetes 基础](/zh/docs/tutorials/kubernetes-basics/) 交互式教程 +* 使用 Kubernetes 创建一个博客,使用 + [MySQL 和 Wordpress 的持久卷](/zh/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/#visit-your-new-wordpress-blog) +* 进一步阅读[连接应用程序](/zh/docs/concepts/services-networking/connect-applications-service/) +* 进一步阅读[管理资源](/zh/docs/concepts/cluster-administration/manage-deployment/#using-labels-effectively) diff --git a/content/zh/examples/application/guestbook/frontend-deployment.yaml b/content/zh/examples/application/guestbook/frontend-deployment.yaml index 613c654aa9..f97f20dab6 100644 --- a/content/zh/examples/application/guestbook/frontend-deployment.yaml +++ b/content/zh/examples/application/guestbook/frontend-deployment.yaml @@ -1,32 +1,29 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook apiVersion: apps/v1 kind: Deployment metadata: name: frontend - labels: - app.kubernetes.io/name: guestbook - app.kubernetes.io/component: frontend spec: + replicas: 3 selector: matchLabels: - app.kubernetes.io/name: guestbook - app.kubernetes.io/component: frontend - replicas: 3 + app: guestbook + tier: frontend template: metadata: labels: - app.kubernetes.io/name: guestbook - app.kubernetes.io/component: frontend + app: guestbook + tier: frontend spec: containers: - - name: guestbook - image: paulczar/gb-frontend:v5 - # image: gcr.io/google-samples/gb-frontend:v4 + - name: php-redis + image: gcr.io/google_samples/gb-frontend:v5 + env: + - name: GET_HOSTS_FROM + value: "dns" resources: requests: cpu: 100m memory: 100Mi - env: - - name: GET_HOSTS_FROM - value: dns ports: - - containerPort: 80 + - containerPort: 80 \ No newline at end of file diff --git a/content/zh/examples/application/guestbook/frontend-service.yaml b/content/zh/examples/application/guestbook/frontend-service.yaml index 34ad3771d7..410c6bbaf2 100644 --- a/content/zh/examples/application/guestbook/frontend-service.yaml +++ b/content/zh/examples/application/guestbook/frontend-service.yaml @@ -1,16 +1,19 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook apiVersion: v1 kind: Service metadata: name: frontend labels: - app.kubernetes.io/name: guestbook - app.kubernetes.io/component: frontend + app: guestbook + tier: frontend spec: # if your cluster supports it, uncomment the following to automatically create # an external load-balanced IP for the frontend service. # type: LoadBalancer + #type: LoadBalancer ports: + # the port that this service should serve on - port: 80 selector: - app.kubernetes.io/name: guestbook - app.kubernetes.io/component: frontend + app: guestbook + tier: frontend \ No newline at end of file diff --git a/content/zh/examples/application/guestbook/mongo-deployment.yaml b/content/zh/examples/application/guestbook/mongo-deployment.yaml deleted file mode 100644 index 04908ce25b..0000000000 --- a/content/zh/examples/application/guestbook/mongo-deployment.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: mongo - labels: - app.kubernetes.io/name: mongo - app.kubernetes.io/component: backend -spec: - selector: - matchLabels: - app.kubernetes.io/name: mongo - app.kubernetes.io/component: backend - replicas: 1 - template: - metadata: - labels: - app.kubernetes.io/name: mongo - app.kubernetes.io/component: backend - spec: - containers: - - name: mongo - image: mongo:4.2 - args: - - --bind_ip - - 0.0.0.0 - resources: - requests: - cpu: 100m - memory: 100Mi - ports: - - containerPort: 27017 diff --git a/content/zh/examples/application/guestbook/mongo-service.yaml b/content/zh/examples/application/guestbook/mongo-service.yaml deleted file mode 100644 index b9cef607bc..0000000000 --- a/content/zh/examples/application/guestbook/mongo-service.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: mongo - labels: - app.kubernetes.io/name: mongo - app.kubernetes.io/component: backend -spec: - ports: - - port: 27017 - targetPort: 27017 - selector: - app.kubernetes.io/name: mongo - app.kubernetes.io/component: backend diff --git a/content/zh/examples/application/guestbook/redis-follower-deployment.yaml b/content/zh/examples/application/guestbook/redis-follower-deployment.yaml new file mode 100644 index 0000000000..c418cf7364 --- /dev/null +++ b/content/zh/examples/application/guestbook/redis-follower-deployment.yaml @@ -0,0 +1,30 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis-follower + labels: + app: redis + role: follower + tier: backend +spec: + replicas: 2 + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + role: follower + tier: backend + spec: + containers: + - name: follower + image: gcr.io/google_samples/gb-redis-follower:v2 + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 \ No newline at end of file diff --git a/content/zh/examples/application/guestbook/redis-follower-service.yaml b/content/zh/examples/application/guestbook/redis-follower-service.yaml new file mode 100644 index 0000000000..53283d35c4 --- /dev/null +++ b/content/zh/examples/application/guestbook/redis-follower-service.yaml @@ -0,0 +1,17 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: v1 +kind: Service +metadata: + name: redis-follower + labels: + app: redis + role: follower + tier: backend +spec: + ports: + # the port that this service should serve on + - port: 6379 + selector: + app: redis + role: follower + tier: backend \ No newline at end of file diff --git a/content/zh/examples/application/guestbook/redis-leader-deployment.yaml b/content/zh/examples/application/guestbook/redis-leader-deployment.yaml new file mode 100644 index 0000000000..9c7547291c --- /dev/null +++ b/content/zh/examples/application/guestbook/redis-leader-deployment.yaml @@ -0,0 +1,30 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis-leader + labels: + app: redis + role: leader + tier: backend +spec: + replicas: 1 + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + role: leader + tier: backend + spec: + containers: + - name: leader + image: "docker.io/redis:6.0.5" + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 \ No newline at end of file diff --git a/content/zh/examples/application/guestbook/redis-leader-service.yaml b/content/zh/examples/application/guestbook/redis-leader-service.yaml new file mode 100644 index 0000000000..e04cc183d0 --- /dev/null +++ b/content/zh/examples/application/guestbook/redis-leader-service.yaml @@ -0,0 +1,17 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: v1 +kind: Service +metadata: + name: redis-leader + labels: + app: redis + role: leader + tier: backend +spec: + ports: + - port: 6379 + targetPort: 6379 + selector: + app: redis + role: leader + tier: backend \ No newline at end of file diff --git a/content/zh/examples/application/job/indexed-job-vol.yaml b/content/zh/examples/application/job/indexed-job-vol.yaml new file mode 100644 index 0000000000..ed40e1cc44 --- /dev/null +++ b/content/zh/examples/application/job/indexed-job-vol.yaml @@ -0,0 +1,27 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: 'indexed-job' +spec: + completions: 5 + parallelism: 3 + completionMode: Indexed + template: + spec: + restartPolicy: Never + containers: + - name: 'worker' + image: 'docker.io/library/busybox' + command: + - "rev" + - "/input/data.txt" + volumeMounts: + - mountPath: /input + name: input + volumes: + - name: input + downwardAPI: + items: + - path: "data.txt" + fieldRef: + fieldPath: metadata.annotations['batch.kubernetes.io/job-completion-index'] \ No newline at end of file diff --git a/content/zh/examples/application/job/indexed-job.yaml b/content/zh/examples/application/job/indexed-job.yaml new file mode 100644 index 0000000000..5b80d35264 --- /dev/null +++ b/content/zh/examples/application/job/indexed-job.yaml @@ -0,0 +1,35 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: 'indexed-job' +spec: + completions: 5 + parallelism: 3 + completionMode: Indexed + template: + spec: + restartPolicy: Never + initContainers: + - name: 'input' + image: 'docker.io/library/bash' + command: + - "bash" + - "-c" + - | + items=(foo bar baz qux xyz) + echo ${items[$JOB_COMPLETION_INDEX]} > /input/data.txt + volumeMounts: + - mountPath: /input + name: input + containers: + - name: 'worker' + image: 'docker.io/library/busybox' + command: + - "rev" + - "/input/data.txt" + volumeMounts: + - mountPath: /input + name: input + volumes: + - name: input + emptyDir: {} diff --git a/content/zh/examples/application/job/redis/worker.py b/content/zh/examples/application/job/redis/worker.py index b8abbee917..0e24f71f95 100644 --- a/content/zh/examples/application/job/redis/worker.py +++ b/content/zh/examples/application/job/redis/worker.py @@ -8,11 +8,11 @@ host="redis" # import os # host = os.getenv("REDIS_SERVICE_HOST") -q = rediswq.RedisWQ(name="job2", host="redis") +q = rediswq.RedisWQ(name="job2", host=host) print("Worker with sessionID: " + q.sessionID()) print("Initial queue state: empty=" + str(q.empty())) while not q.empty(): - item = q.lease(lease_secs=10, block=True, timeout=2) + item = q.lease(lease_secs=10, block=True, timeout=2) if item is not None: itemstr = item.decode("utf-8") print("Working on " + itemstr) diff --git a/content/zh/examples/controllers/daemonset.yaml b/content/zh/examples/controllers/daemonset.yaml index f291b750c1..685a137244 100644 --- a/content/zh/examples/controllers/daemonset.yaml +++ b/content/zh/examples/controllers/daemonset.yaml @@ -18,6 +18,7 @@ spec: # this toleration is to have the daemonset runnable on master nodes # remove it if your masters can't run pods - key: node-role.kubernetes.io/master + operator: Exists effect: NoSchedule containers: - name: fluentd-elasticsearch diff --git a/content/zh/examples/examples_test.go b/content/zh/examples/examples_test.go index db80be97fe..f868eb3d4a 100644 --- a/content/zh/examples/examples_test.go +++ b/content/zh/examples/examples_test.go @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/yaml" - // "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/apps" @@ -70,7 +69,6 @@ import ( _ "k8s.io/kubernetes/pkg/apis/networking/install" _ "k8s.io/kubernetes/pkg/apis/policy/install" _ "k8s.io/kubernetes/pkg/apis/rbac/install" - _ "k8s.io/kubernetes/pkg/apis/settings/install" _ "k8s.io/kubernetes/pkg/apis/storage/install" ) @@ -100,7 +98,6 @@ func (g TestGroup) Codec() runtime.Codec { func initGroups() { Groups = make(map[string]TestGroup) - groupNames := []string{ api.GroupName, apps.GroupName, @@ -109,7 +106,6 @@ func initGroups() { networking.GroupName, policy.GroupName, rbac.GroupName, - settings.GroupName, storage.GroupName, } @@ -152,6 +148,19 @@ func getCodecForObject(obj runtime.Object) (runtime.Codec, error) { } func validateObject(obj runtime.Object) (errors field.ErrorList) { + podValidationOptions := validation.PodValidationOptions{ + AllowMultipleHugePageResources: true, + AllowDownwardAPIHugePages: true, + } + + quotaValidationOptions := validation.ResourceQuotaValidationOptions{ + AllowPodAffinityNamespaceSelector: true, + } + + pspValidationOptions := policy_validation.PodSecurityPolicyValidationOptions{ + AllowEphemeralVolumeType: true, + } + // Enable CustomPodDNS for testing // feature.DefaultFeatureGate.Set("CustomPodDNS=true") switch t := obj.(type) { @@ -186,7 +195,7 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { opts := validation.PodValidationOptions{ AllowMultipleHugePageResources: true, } - errors = validation.ValidatePod(t, opts) + errors = validation.ValidatePodCreate(t, opts) case *api.PodList: for i := range t.Items { errors = append(errors, validateObject(&t.Items[i])...) @@ -195,12 +204,12 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { if t.Namespace == "" { t.Namespace = api.NamespaceDefault } - errors = validation.ValidatePodTemplate(t) + errors = validation.ValidatePodTemplate(t, podValidationOptions) case *api.ReplicationController: if t.Namespace == "" { t.Namespace = api.NamespaceDefault } - errors = validation.ValidateReplicationController(t) + errors = validation.ValidateReplicationController(t, podValidationOptions) case *api.ReplicationControllerList: for i := range t.Items { errors = append(errors, validateObject(&t.Items[i])...) @@ -209,7 +218,7 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { if t.Namespace == "" { t.Namespace = api.NamespaceDefault } - errors = validation.ValidateResourceQuota(t) + errors = validation.ValidateResourceQuota(t, quotaValidationOptions) case *api.Secret: if t.Namespace == "" { t.Namespace = api.NamespaceDefault @@ -219,7 +228,11 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { if t.Namespace == "" { t.Namespace = api.NamespaceDefault } - errors = validation.ValidateService(t, true) + // handle clusterIPs, logic copied from service strategy + if len(t.Spec.ClusterIP) > 0 && len(t.Spec.ClusterIPs) == 0 { + t.Spec.ClusterIPs = []string{t.Spec.ClusterIP} + } + errors = validation.ValidateService(t) case *api.ServiceAccount: if t.Namespace == "" { t.Namespace = api.NamespaceDefault @@ -233,7 +246,7 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { if t.Namespace == "" { t.Namespace = api.NamespaceDefault } - errors = apps_validation.ValidateStatefulSet(t) + errors = apps_validation.ValidateStatefulSet(t, podValidationOptions) case *autoscaling.HorizontalPodAutoscaler: if t.Namespace == "" { t.Namespace = api.NamespaceDefault @@ -254,12 +267,12 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { if t.Namespace == "" { t.Namespace = api.NamespaceDefault } - errors = apps_validation.ValidateDaemonSet(t) + errors = apps_validation.ValidateDaemonSet(t, podValidationOptions) case *apps.Deployment: if t.Namespace == "" { t.Namespace = api.NamespaceDefault } - errors = apps_validation.ValidateDeployment(t) + errors = apps_validation.ValidateDeployment(t, podValidationOptions) case *networking.Ingress: if t.Namespace == "" { t.Namespace = api.NamespaceDefault @@ -269,18 +282,30 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { Version: legacyscheme.Scheme.PrioritizedVersionsForGroup(networking.GroupName)[0].Version, } errors = networking_validation.ValidateIngressCreate(t, gv) + case *networking.IngressClass: + /* + if t.Namespace == "" { + t.Namespace = api.NamespaceDefault + } + gv := schema.GroupVersion{ + Group: networking.GroupName, + Version: legacyscheme.Scheme.PrioritizedVersionsForGroup(networking.GroupName)[0].Version, + } + */ + errors = networking_validation.ValidateIngressClass(t) + case *policy.PodSecurityPolicy: - errors = policy_validation.ValidatePodSecurityPolicy(t) + errors = policy_validation.ValidatePodSecurityPolicy(t, pspValidationOptions) case *apps.ReplicaSet: if t.Namespace == "" { t.Namespace = api.NamespaceDefault } - errors = apps_validation.ValidateReplicaSet(t) + errors = apps_validation.ValidateReplicaSet(t, podValidationOptions) case *batch.CronJob: if t.Namespace == "" { t.Namespace = api.NamespaceDefault } - errors = batch_validation.ValidateCronJob(t) + errors = batch_validation.ValidateCronJob(t, podValidationOptions) case *networking.NetworkPolicy: if t.Namespace == "" { t.Namespace = api.NamespaceDefault @@ -291,6 +316,9 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { t.Namespace = api.NamespaceDefault } errors = policy_validation.ValidatePodDisruptionBudget(t) + case *rbac.ClusterRole: + // clusterole does not accept namespace + errors = rbac_validation.ValidateClusterRole(t) case *rbac.ClusterRoleBinding: // clusterolebinding does not accept namespace errors = rbac_validation.ValidateClusterRoleBinding(t) @@ -418,6 +446,7 @@ func TestExampleObjectSchemas(t *testing.T) { "storagelimits": {&api.LimitRange{}}, }, "admin/sched": { + "clusterrole": {&rbac.ClusterRole{}}, "my-scheduler": {&api.ServiceAccount{}, &rbac.ClusterRoleBinding{}, &rbac.ClusterRoleBinding{}, &apps.Deployment{}}, "pod1": {&api.Pod{}}, "pod2": {&api.Pod{}}, @@ -441,12 +470,12 @@ func TestExampleObjectSchemas(t *testing.T) { "cassandra-statefulset": {&apps.StatefulSet{}, &storage.StorageClass{}}, }, "application/guestbook": { - "frontend-deployment": {&apps.Deployment{}}, - "frontend-service": {&api.Service{}}, - "redis-master-deployment": {&apps.Deployment{}}, - "redis-master-service": {&api.Service{}}, - "redis-slave-deployment": {&apps.Deployment{}}, - "redis-slave-service": {&api.Service{}}, + "frontend-deployment": {&apps.Deployment{}}, + "frontend-service": {&api.Service{}}, + "redis-follower-deployment": {&apps.Deployment{}}, + "redis-follower-service": {&api.Service{}}, + "redis-leader-deployment": {&apps.Deployment{}}, + "redis-leader-service": {&api.Service{}}, }, "application/hpa": { "php-apache": {&autoscaling.HorizontalPodAutoscaler{}}, @@ -456,8 +485,10 @@ func TestExampleObjectSchemas(t *testing.T) { "nginx-svc": {&api.Service{}}, }, "application/job": { - "cronjob": {&batch.CronJob{}}, - "job-tmpl": {&batch.Job{}}, + "cronjob": {&batch.CronJob{}}, + "job-tmpl": {&batch.Job{}}, + "indexed-job": {&batch.Job{}}, + "indexed-job-vol": {&batch.Job{}}, }, "application/job/rabbitmq": { "job": {&batch.Job{}}, @@ -536,13 +567,15 @@ func TestExampleObjectSchemas(t *testing.T) { "two-container-pod": {&api.Pod{}}, }, "pods/config": { - "redis-pod": {&api.Pod{}}, + "redis-pod": {&api.Pod{}}, + "example-redis-config": {&api.ConfigMap{}}, }, "pods/inject": { "dapi-envars-container": {&api.Pod{}}, "dapi-envars-pod": {&api.Pod{}}, "dapi-volume": {&api.Pod{}}, "dapi-volume-resources": {&api.Pod{}}, + "dependent-envars": {&api.Pod{}}, "envars": {&api.Pod{}}, "pod-multiple-secret-env-variable": {&api.Pod{}}, "pod-secret-envFrom": {&api.Pod{}}, @@ -588,10 +621,11 @@ func TestExampleObjectSchemas(t *testing.T) { "redis": {&api.Pod{}}, }, "policy": { - "baseline-psp": {&policy.PodSecurityPolicy{}}, - "example-psp": {&policy.PodSecurityPolicy{}}, - "privileged-psp": {&policy.PodSecurityPolicy{}}, - "restricted-psp": {&policy.PodSecurityPolicy{}}, + "baseline-psp": {&policy.PodSecurityPolicy{}}, + "example-psp": {&policy.PodSecurityPolicy{}}, + "priority-class-resourcequota": {&api.ResourceQuota{}}, + "privileged-psp": {&policy.PodSecurityPolicy{}}, + "restricted-psp": {&policy.PodSecurityPolicy{}}, "zookeeper-pod-disruption-budget-maxunavailable": {&policy.PodDisruptionBudget{}}, "zookeeper-pod-disruption-budget-minavailable": {&policy.PodDisruptionBudget{}}, }, @@ -600,29 +634,42 @@ func TestExampleObjectSchemas(t *testing.T) { "load-balancer-example": {&apps.Deployment{}}, }, "service/access": { - "frontend": {&api.Service{}, &apps.Deployment{}}, - "hello-application": {&apps.Deployment{}}, - "hello-service": {&api.Service{}}, - "hello": {&apps.Deployment{}}, + "backend-deployment": {&apps.Deployment{}}, + "backend-service": {&api.Service{}}, + "frontend-deployment": {&apps.Deployment{}}, + "frontend-service": {&api.Service{}}, + "hello-application": {&apps.Deployment{}}, }, "service/networking": { - "curlpod": {&apps.Deployment{}}, - "custom-dns": {&api.Pod{}}, - "dual-stack-default-svc": {&api.Service{}}, - "dual-stack-ipv4-svc": {&api.Service{}}, - "dual-stack-ipv6-lb-svc": {&api.Service{}}, - "dual-stack-ipv6-svc": {&api.Service{}}, - "hostaliases-pod": {&api.Pod{}}, - "ingress": {&networking.Ingress{}}, - "network-policy-allow-all-egress": {&networking.NetworkPolicy{}}, - "network-policy-allow-all-ingress": {&networking.NetworkPolicy{}}, - "network-policy-default-deny-egress": {&networking.NetworkPolicy{}}, - "network-policy-default-deny-ingress": {&networking.NetworkPolicy{}}, - "network-policy-default-deny-all": {&networking.NetworkPolicy{}}, - "nginx-policy": {&networking.NetworkPolicy{}}, - "nginx-secure-app": {&api.Service{}, &apps.Deployment{}}, - "nginx-svc": {&api.Service{}}, - "run-my-nginx": {&apps.Deployment{}}, + "curlpod": {&apps.Deployment{}}, + "custom-dns": {&api.Pod{}}, + "dual-stack-default-svc": {&api.Service{}}, + "dual-stack-ipfamilies-ipv6": {&api.Service{}}, + "dual-stack-ipv6-svc": {&api.Service{}}, + "dual-stack-prefer-ipv6-lb-svc": {&api.Service{}}, + "dual-stack-preferred-ipfamilies-svc": {&api.Service{}}, + "dual-stack-preferred-svc": {&api.Service{}}, + "external-lb": {&networking.IngressClass{}}, + "example-ingress": {&networking.Ingress{}}, + "hostaliases-pod": {&api.Pod{}}, + "ingress-resource-backend": {&networking.Ingress{}}, + "ingress-wildcard-host": {&networking.Ingress{}}, + "minimal-ingress": {&networking.Ingress{}}, + "name-virtual-host-ingress": {&networking.Ingress{}}, + "name-virtual-host-ingress-no-third-host": {&networking.Ingress{}}, + "namespaced-params": {&networking.IngressClass{}}, + "network-policy-allow-all-egress": {&networking.NetworkPolicy{}}, + "network-policy-allow-all-ingress": {&networking.NetworkPolicy{}}, + "network-policy-default-deny-egress": {&networking.NetworkPolicy{}}, + "network-policy-default-deny-ingress": {&networking.NetworkPolicy{}}, + "network-policy-default-deny-all": {&networking.NetworkPolicy{}}, + "nginx-policy": {&networking.NetworkPolicy{}}, + "nginx-secure-app": {&api.Service{}, &apps.Deployment{}}, + "nginx-svc": {&api.Service{}}, + "run-my-nginx": {&apps.Deployment{}}, + "simple-fanout-example": {&networking.Ingress{}}, + "test-ingress": {&networking.Ingress{}}, + "tls-example-ingress": {&networking.Ingress{}}, }, "windows": { "configmap-pod": {&api.ConfigMap{}, &api.Pod{}}, diff --git a/content/zh/examples/service/networking/dual-stack-ipv4-svc.yaml b/content/zh/examples/service/networking/dual-stack-ipfamilies-ipv6.yaml similarity index 73% rename from content/zh/examples/service/networking/dual-stack-ipv4-svc.yaml rename to content/zh/examples/service/networking/dual-stack-ipfamilies-ipv6.yaml index a875f44d6d..7c7239cae6 100644 --- a/content/zh/examples/service/networking/dual-stack-ipv4-svc.yaml +++ b/content/zh/examples/service/networking/dual-stack-ipfamilies-ipv6.yaml @@ -2,11 +2,13 @@ apiVersion: v1 kind: Service metadata: name: my-service + labels: + app: MyApp spec: - ipFamily: IPv4 + ipFamilies: + - IPv6 selector: app: MyApp ports: - protocol: TCP port: 80 - targetPort: 9376 \ No newline at end of file diff --git a/content/zh/examples/service/networking/dual-stack-ipv6-lb-svc.yaml b/content/zh/examples/service/networking/dual-stack-prefer-ipv6-lb-svc.yaml similarity index 76% rename from content/zh/examples/service/networking/dual-stack-ipv6-lb-svc.yaml rename to content/zh/examples/service/networking/dual-stack-prefer-ipv6-lb-svc.yaml index 2586ec9b39..0949a75428 100644 --- a/content/zh/examples/service/networking/dual-stack-ipv6-lb-svc.yaml +++ b/content/zh/examples/service/networking/dual-stack-prefer-ipv6-lb-svc.yaml @@ -5,11 +5,12 @@ metadata: labels: app: MyApp spec: - ipFamily: IPv6 + ipFamilyPolicy: PreferDualStack + ipFamilies: + - IPv6 type: LoadBalancer selector: app: MyApp ports: - protocol: TCP port: 80 - targetPort: 9376 \ No newline at end of file diff --git a/data/announcements/scheduled.yaml b/data/announcements/scheduled.yaml index eca320cc88..831ad1ebb6 100644 --- a/data/announcements/scheduled.yaml +++ b/data/announcements/scheduled.yaml @@ -71,4 +71,26 @@ announcements: KubeCon + CloudNativeCon EU 2021 virtual. message: | 4 days of incredible opportunities to collaborate, learn + share with the entire community!
    - May 4 - May 7, 2021. \ No newline at end of file + May 4 - May 7, 2021. + +- name: Kubecon 2021 NA + startTime: 2021-10-01T00:00:00 + endTime: 2021-10-16T01:00:00 + style: "background: linear-gradient(90deg, rgb(7, 132, 111) 0%, rgb(54, 214, 183) 100%)" + title: | + + KubeCon + CloudNativeCon North America 2021 Los Angeles, California + Virtual. + message: | + 5 days of incredible opportunites to collaborate, learn + share with the entire community!
    + October 11 - 15, 2021. + +- name: Kubecon 2021 China + startTime: 2021-11-30T00:00:00 + endTime: 2021-12-10T14:00:00 + style: "background: linear-gradient(90deg, rgb(253, 133, 1) 0%, rgb(128, 34, 196) 100%)" + title: | + + KubeCon + CloudNativeCon + Open Source Summit China 2021 Virtual. + message: | + 2 days of incredible opportunities to collaborate, learn + share with the entire community!
    + December 9 + 10, 2021. diff --git a/data/i18n/en/en.toml b/data/i18n/en/en.toml index 3eebea4ebb..e32d8819d2 100644 --- a/data/i18n/en/en.toml +++ b/data/i18n/en/en.toml @@ -60,6 +60,9 @@ other = "Older versions" [end_of_life] other = "End of Life:" +[envvars_heading] +other = "Environment variables" + [error_404_were_you_looking_for] other = "Were you looking for:" diff --git a/data/i18n/pl/pl.toml b/data/i18n/pl/pl.toml index 9621301a49..45069edd7a 100644 --- a/data/i18n/pl/pl.toml +++ b/data/i18n/pl/pl.toml @@ -1,6 +1,5 @@ # i18n strings for the Polish site. # NOTE: Please keep the entries in alphabetical order when editing - [caution] other = "Ostrzeżenie:" @@ -28,8 +27,11 @@ other = "Twitter" [community_youtube_name] other = "YouTube" +[deprecation_title] +other = "Teraz oglądasz dokumentację Kubernetesa w wersji:" + [deprecation_warning] -other = " dokumentacja nie jest już aktualizowana. Wyświetlona jest wersja archiwalna. Po aktualną dokumentację zajrzyj na" +other = " - dokumentacja nie jest już aktualizowana. Wyświetlona jest wersja archiwalna. Po aktualną dokumentację zajrzyj na" [deprecation_file_warning] other = "Przestarzały" @@ -46,6 +48,24 @@ other = "Jestem..." [docs_label_users] other = "Użytkownicy" +[docs_version_current] +other = "(ta dokumentacja)" + +[docs_version_latest_heading] +other = "Najnowsza wersja" + +[docs_version_other_heading] +other = "Starsze wersje" + +[end_of_life] +other = "Zakończenie wsparcia:" + +[error_404_were_you_looking_for] +other = "Czy chodziło o:" + +[examples_heading] +other = "Przykłady" + [feedback_heading] other = "Twoja opinia" @@ -58,11 +78,17 @@ other = "Czy ta strona była przydatna?" [feedback_yes] other = "Tak" +[inline_list_separator] +other = "," + [input_placeholder_email_address] other = "adres e-mail" +[latest_release] +other = "Najnowsze wydanie:" + [latest_version] -other = "to najnowsza wersja." +other = "najnowszą wersję." [layouts_blog_pager_prev] other = "<< Poprzedni" @@ -128,16 +154,10 @@ other = "Wnieś swój wkład" other = """The Linux Foundation ®. All rights reserved. The Linux Foundation has registered trademarks and uses trademarks. For a list of trademarks of The Linux Foundation, please see our Trademark Usage page""" [main_documentation_license] -other = """The Kubernetes Authors | Documentation Distributed under CC BY 4.0""" - -[main_edit_this_page] -other = "Edytuj stronę" - -[main_github_create_an_issue] -other = "Zgłoś problem" +other = """Autorzy Kubernetesa | Dokumentacja jest udostępniona w ramach licencji CC BY 4.0""" [main_github_invite] -other = "Chcesz zacząć współtworzyć kod Kubernetesa?" +other = "Chcesz zacząć zabawę z kodem Kubernetesa?" [main_github_view_on] other = "Zajrzyj na GitHub" @@ -172,26 +192,47 @@ other = "Informacja:" [objectives_heading] other = "Cele" +[options_heading] +other = "Opcje" + +[post_create_issue] +other = "Zgłoś problem" + [prerequisites_heading] other = "Nim zaczniesz" +[previous_patches] +other = "Poprawki:" + +[seealso_heading] +other = "Zobacz też" + [subscribe_button] other = "Subskrybuj" +[synopsis_heading] +other = "Streszczenie" + +[thirdparty_message] +other = """Ta sekcja przekierowuje do projektów osób trzecich, które udostępniają funkcjonalność wymaganą przez Kubernetesa. Autory projektu Kubernetes nie są odpowiedzialni za te projekty. Ta strona podąża za wytycznymi CNCF dla stron internetowych aby pokazać projekty w kolejności alfabetycznej. Aby dodać projekt na tę listę przeczytaj wytyczne dla zawartości przed wysyłaniem zmian.""" + [ui_search_placeholder] other = "Szukaj" [version_check_mustbe] -other = "Twój serwer Kubernetes musi być w wersji " +other = "Twój serwer Kubernetesa musi być w wersji " [version_check_mustbeorlater] -other = "Twój serwer Kubernetes musi być co najmniej w wersji " +other = "Twój serwer Kubernetesa musi być co najmniej w wersji " [version_check_tocheck] other = "Aby sprawdzić wersję, wpisz " +[version_menu] +other = "Wersje" + [warning] other = "Uwaga:" [whatsnext_heading] -other = "Następne:" +other = "Co dalej?" diff --git a/data/releases/schedule.yaml b/data/releases/schedule.yaml index 6b2c3c1a8f..99abec370e 100644 --- a/data/releases/schedule.yaml +++ b/data/releases/schedule.yaml @@ -1,22 +1,51 @@ schedules: -- release: 1.21 - next: 1.21.2 - cherryPickDeadline: 2021-06-12 - targetDate: 2021-06-16 - endOfLifeDate: 2022-04-30 +- release: 1.22 + next: 1.22.2 + cherryPickDeadline: 2021-09-10 + targetDate: 2021-09-15 + endOfLifeDate: 2022-10-28 previousPatches: + - release: 1.22.1 + cherryPickDeadline: 2021-08-16 + targetDate: 2021-08-19 +- release: 1.21 + next: 1.21.5 + cherryPickDeadline: 2021-09-10 + targetDate: 2021-09-15 + endOfLifeDate: 2022-06-28 + previousPatches: + - release: 1.21.4 + cherryPickDeadline: 2021-08-07 + targetDate: 2021-08-11 + - release: 1.21.3 + cherryPickDeadline: 2021-07-10 + targetDate: 2021-07-14 + - release: 1.21.2 + cherryPickDeadline: 2021-06-12 + targetDate: 2021-06-16 - release: 1.21.1 cherryPickDeadline: 2021-05-07 targetDate: 2021-05-12 + note: Regression https://groups.google.com/g/kubernetes-dev/c/KuF8s2zueFs - release: 1.20 - next: 1.20.8 - cherryPickDeadline: 2021-06-12 - targetDate: 2021-06-16 - endOfLifeDate: 2021-12-30 + next: 1.20.11 + cherryPickDeadline: 2021-09-10 + targetDate: 2021-09-15 + endOfLifeDate: 2022-02-28 previousPatches: + - release: 1.20.10 + cherryPickDeadline: 2021-08-07 + targetDate: 2021-08-11 + - release: 1.20.9 + cherryPickDeadline: 2021-07-10 + targetDate: 2021-07-14 + - release: 1.20.8 + cherryPickDeadline: 2021-06-12 + targetDate: 2021-06-16 - release: 1.20.7 cherryPickDeadline: 2021-05-07 targetDate: 2021-05-12 + note: Regression https://groups.google.com/g/kubernetes-dev/c/KuF8s2zueFs - release: 1.20.6 cherryPickDeadline: 2021-04-09 targetDate: 2021-04-14 @@ -27,23 +56,35 @@ schedules: cherryPickDeadline: 2021-02-12 targetDate: 2021-02-18 - release: 1.20.3 - cherryPickDeadline: "Conformance Tests Issue https://groups.google.com/g/kubernetes-dev/c/oUpY9vWgzJo" + cherryPickDeadline: 2021-02-12 targetDate: 2021-02-17 + note: "Conformance Tests Issue https://groups.google.com/g/kubernetes-dev/c/oUpY9vWgzJo" - release: 1.20.2 cherryPickDeadline: 2021-01-08 targetDate: 2021-01-13 - release: 1.20.1 - cherryPickDeadline: "Tagging Issue https://groups.google.com/g/kubernetes-dev/c/dNH2yknlCBA" + cherryPickDeadline: 2020-12-11 targetDate: 2020-12-18 + note: "Tagging Issue https://groups.google.com/g/kubernetes-dev/c/dNH2yknlCBA" - release: 1.19 - next: 1.19.12 - cherryPickDeadline: 2021-06-12 - targetDate: 2021-06-16 - endOfLifeDate: 2021-09-30 + next: 1.19.15 + cherryPickDeadline: 2021-09-10 + targetDate: 2021-09-15 + endOfLifeDate: 2021-10-28 previousPatches: + - release: 1.19.14 + cherryPickDeadline: 2021-08-07 + targetDate: 2021-08-11 + - release: 1.19.13 + cherryPickDeadline: 2021-07-10 + targetDate: 2021-07-14 + - release: 1.19.12 + cherryPickDeadline: 2021-06-12 + targetDate: 2021-06-16 - release: 1.19.11 cherryPickDeadline: 2021-05-07 targetDate: 2021-05-12 + note: Regression https://groups.google.com/g/kubernetes-dev/c/KuF8s2zueFs - release: 1.19.10 cherryPickDeadline: 2021-04-09 targetDate: 2021-04-14 @@ -57,8 +98,9 @@ schedules: cherryPickDeadline: 2021-01-08 targetDate: 2021-01-13 - release: 1.19.6 - cherryPickDeadline: "Tagging Issue https://groups.google.com/g/kubernetes-dev/c/dNH2yknlCBA" + cherryPickDeadline: 2020-12-11 targetDate: 2020-12-18 + note: "Tagging Issue https://groups.google.com/g/kubernetes-dev/c/dNH2yknlCBA" - release: 1.19.5 cherryPickDeadline: 2020-12-04 targetDate: 2020-12-09 diff --git a/go.mod b/go.mod index b45ff242a4..a472ae517d 100644 --- a/go.mod +++ b/go.mod @@ -1,38 +1,40 @@ module k8s.io/website -go 1.15 +go 1.16 require ( - k8s.io/apimachinery v0.20.0 - k8s.io/kubernetes v1.20.0 + github.com/google/go-cmp v0.5.6 // indirect + k8s.io/apimachinery v0.22.0 + k8s.io/kubernetes v0.0.0 ) replace ( - k8s.io/api => k8s.io/api v0.20.0 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.0 - k8s.io/apimachinery => k8s.io/apimachinery v0.20.0 - k8s.io/apiserver => k8s.io/apiserver v0.20.0 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.0 - k8s.io/client-go => k8s.io/client-go v0.20.0 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.0 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.0 - k8s.io/code-generator => k8s.io/code-generator v0.20.0 - k8s.io/component-base => k8s.io/component-base v0.20.0 - k8s.io/component-helpers => k8s.io/component-helpers v0.20.0 - k8s.io/controller-manager => k8s.io/controller-manager v0.20.0 - k8s.io/cri-api => k8s.io/cri-api v0.20.0 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.0 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.0 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.0 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.0 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.0 - k8s.io/kubectl => k8s.io/kubectl v0.20.0 - k8s.io/kubelet => k8s.io/kubelet v0.20.0 - k8s.io/kubernetes => k8s.io/kubernetes v1.20.0 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.0 - k8s.io/metrics => k8s.io/metrics v0.20.0 - k8s.io/mount-utils => k8s.io/mount-utils v0.20.0 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.0 - k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.20.0 - k8s.io/sample-controller => k8s.io/sample-controller v0.20.0 + k8s.io/api => k8s.io/api v0.22.0 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.22.0 + k8s.io/apimachinery => k8s.io/apimachinery v0.22.0 + k8s.io/apiserver => k8s.io/apiserver v0.22.0 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.22.0 + k8s.io/client-go => k8s.io/client-go v0.22.0 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.22.0 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.22.0 + k8s.io/code-generator => k8s.io/code-generator v0.22.0 + k8s.io/component-base => k8s.io/component-base v0.22.0 + k8s.io/component-helpers => k8s.io/component-helpers v0.22.0 + k8s.io/controller-manager => k8s.io/controller-manager v0.22.0 + k8s.io/cri-api => k8s.io/cri-api v0.22.0 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.22.0 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.22.0 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.22.0 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.22.0 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.22.0 + k8s.io/kubectl => k8s.io/kubectl v0.22.0 + k8s.io/kubelet => k8s.io/kubelet v0.22.0 + k8s.io/kubernetes => ../kubernetes + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.22.0 + k8s.io/metrics => k8s.io/metrics v0.22.0 + k8s.io/mount-utils => k8s.io/mount-utils v0.22.0 + k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.22.0 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.22.0 + k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.22.0 + k8s.io/sample-controller => k8s.io/sample-controller v0.22.0 ) diff --git a/go.sum b/go.sum index 723fccaf6a..c9bd6c742c 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,4 @@ +bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690 h1:N9r8OBSXAgEUfho3SQtZLY8zo6E1OdOMvelvP22aVFc= bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -10,240 +11,330 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0 h1:9x7Bx0A9R5/M9jibeJeZWqjeVEIxYW9fZYqB9a70/bY= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0 h1:UDpwYIwla4jHGzZJaEJYx1tOejbgSoNqsAfHAUYe2r8= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037 h1:+PdD6GLKejR9DizMAKT5DpSAkKswvZrurk1/eEt9+pw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v43.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v55.0.0+incompatible h1:L4/vUGbg1Xkw5L20LZD+hJI5I+ibWSytqQ68lTCfLwY= +github.com/Azure/azure-sdk-for-go v55.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.1.0 h1:ISSNzGUh+ZSzizJWOWzs8bwpXIePbGLW4z/AmUFGH5A= github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= +github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20200415212048-7901bc822317 h1:JhyuWIqYrstW7KHMjk/fTqU0xtMpBOHuiTA2FVc7L4E= github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20200415212048-7901bc822317/go.mod h1:DF8FZRxMHMGv/vP2lQP6h+dYzzjpuRn24VeRiYn3qjQ= +github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab h1:UKkYhof1njT1/xq4SEg5z+VpTgjmNeHwPGRQl7takDI= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= +github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.15 h1:qkLXKzb1QoVatRyd/YlXZ/Kg0m5K3SPuoD82jjSOaBc= github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.10-0.20200715222032-5eafd1556990 h1:1xpVY4dSUSbW3PcSGxZJhI8Z+CJiqbd933kM7HIinTc= github.com/Microsoft/hcsshim v0.8.10-0.20200715222032-5eafd1556990/go.mod h1:ay/0dTb7NsG8QMDfsRfLHgZo/6xAJShLe1+ePPflihk= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af h1:wVe6/Ea46ZMeNkQjjBW6xcqyQA/j5e0D6GytH95g0gQ= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= -github.com/aws/aws-sdk-go v1.6.10/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= -github.com/aws/aws-sdk-go v1.28.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/auth0/go-jwt-middleware v1.0.1 h1:/fsQ4vRr4zod1wKReUH+0A3ySRjGiT9G34kypO/EKwI= +github.com/auth0/go-jwt-middleware v1.0.1/go.mod h1:YSeUX3z6+TF2H+7padiEqNJ73Zy9vXW72U//IgN0BIM= github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= -github.com/bazelbuild/bazel-gazelle v0.18.2/go.mod h1:D0ehMSbS+vesFsLGiD6JXu3mVEzOlfUl8wNnq+x/9p0= -github.com/bazelbuild/bazel-gazelle v0.19.1-0.20191105222053-70208cbdc798/go.mod h1:rPwzNHUqEzngx1iVBfO/2X2npKaT3tqPqqHW6rVsn/A= -github.com/bazelbuild/buildtools v0.0.0-20190731111112-f720930ceb60/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= -github.com/bazelbuild/buildtools v0.0.0-20190917191645-69366ca98f89/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= -github.com/bazelbuild/rules_go v0.0.0-20190719190356-6dae44dc5cab/go.mod h1:MC23Dc/wkXEyk3Wpq6lCqz0ZAYOZDw2DR5y3N1q2i7M= +github.com/aws/aws-sdk-go v1.38.49 h1:E31vxjCe6a5I+mJLmUGaZobiWmg9KdWaud9IfceYeYQ= +github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= +github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c h1:+0HFd5KSZ/mm3JmhmrDukiId5iR6w4+BdFtfSy4yWIc= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c/go.mod h1:Xe6ZsFhtM8HrDku0pxJ3/Lr51rwykrzgFwpmTzleatY= +github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI= +github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= -github.com/checkpoint-restore/go-criu v0.0.0-20181120144056-17b0214f6c48/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= -github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0 h1:TW8f/UvntYoVDMN1K2HlT82qH1rb0sOjpGw3m6Ym+i4= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.0.0-20191025125908-95b36a581eed/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= -github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s= -github.com/cilium/ebpf v0.0.0-20200601085316-9f1617e5c574/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s= -github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2 h1:iHsfF/t4aW4heW2YKfeHrVPGdtYTL4C4KocpM8KTSnI= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313 h1:eIHD9GNM3Hp7kcRW5mvcz7WTR3ETeoYYKwpgA04kaXE= github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= -github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= +github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/container-storage-interface/spec v1.3.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= +github.com/container-storage-interface/spec v1.5.0 h1:lvKxe3uLgqQeVQcrnL2CPQKISoKjTJxojEs9cBk+HXo= +github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59 h1:qWj4qVYZ95vLWwqyNJCQg7rDsG5wPdze0UaPolH7DUk= github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= -github.com/containerd/console v0.0.0-20170925154832-84eeaae905fa/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= -github.com/containerd/containerd v1.0.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2 h1:Pi6D+aZXM+oUw1czuKgH5IJ+y0jhYcwBJfx5/Ghn9dE= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.4 h1:rtRG4N6Ct7GNssATwgpvMGfnjnwfjnu/Zs9W3Ikzq+M= +github.com/containerd/containerd v1.4.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc h1:TP+534wVlf61smEIq1nwLLAjQVEK2EADoW3CX9AuT+8= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 h1:PUD50EuOMkXVcpBIA/R95d56duJR9VxhwncsFbNnxW4= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3 h1:esQOJREg8nw8aXj6uCN5dfW5cKUBiEJ/+nni1Q/D/sw= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v1.0.0/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v1.0.2 h1:2/O3oTZN36q2xRolk0a2WWGgh7/Vf/liElg5hFYLX9U= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containerd/typeurl v1.0.0/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v1.0.1 h1:PvuK4E3D5S5q6IqsPDCy928FhP0LUIGcmZ/Yhgp5Djw= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= -github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/coredns/corefile-migration v1.0.6/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E= -github.com/coredns/corefile-migration v1.0.10/go.mod h1:RMy/mXdeDlYwzt0vdMEJvT2hGJ2I86/eO0UdXmH9XNI= +github.com/containernetworking/cni v0.8.1 h1:7zpDnQ3T3s4ucOuJ/ZCLrYBxzkg0AELFfII3Epo9TmI= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/coredns/caddy v1.1.0 h1:ezvsPrT/tA/7pYDBZxu0cT0VmWk75AfIaf6GSYCNMf0= +github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= +github.com/coredns/corefile-migration v1.0.11/go.mod h1:RMy/mXdeDlYwzt0vdMEJvT2hGJ2I86/eO0UdXmH9XNI= +github.com/coredns/corefile-migration v1.0.12 h1:TJGATo0YLQJVIKJZLajXE1IrhRFtYTR1cYsGIT1YNEk= +github.com/coredns/corefile-migration v1.0.12/go.mod h1:NJOI8ceUF/NTgEwtjD+TUq3/BnH/GF7WAM3RzCa3hBo= +github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea h1:n2Ltr3SrfQlf/9nOna1DoGKxLx3qTSI8Ttl6Xrqp6mw= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd h1:uVsMphB1eRx7xB1njzL3fuMdWRN8HtVzoUOItHMwv5c= github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954 h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v1.4.2-0.20200309214505-aa6a9891b09c/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/docker v20.10.2+incompatible h1:vFgEHPqWBTp4pTjdLwjAA4bSo3gvIGOYwuJTlEjVBCw= +github.com/docker/docker v20.10.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d h1:QyzYnTnPE15SQyUeqU6qLbWxMkwyAyu+vGksa0b7j00= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= -github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90 h1:WXb3TSNmHp2vHoCroCIB1foO/yQ36swABL8aOVeDpgg= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fvbommel/sortorder v1.0.1 h1:dSnXLt4mJYH25uDDGa3biZNQsozaUWDSWeKJ0qqFfzE= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= -github.com/go-critic/go-critic v0.3.5-0.20190526074819-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= +github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4 h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-ini/ini v1.9.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= +github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -252,16 +343,18 @@ github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2 github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -270,64 +363,61 @@ github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCs github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-ozzo/ozzo-validation v3.5.0+incompatible h1:sUy/in/P6askYr16XJgTKq/0SZhiWsdg4WZGaLsGQkM= github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= @@ -338,285 +428,355 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= -github.com/golangci/go-tools v0.0.0-20190318055746-e32c54105b7c/go.mod h1:unzUULGw35sjyOYjUt0jMTXqHlZPpPc6e+xfO4cd6mM= -github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= -github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= -github.com/golangci/gofmt v0.0.0-20181222123516-0b8337e80d98/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.18.0/go.mod h1:kaqo8l0OZKYPtjNmG4z4HrWLgcYNIJ9B9q3LWri9uLg= -github.com/golangci/gosec v0.0.0-20190211064107-66fb7fc33547/go.mod h1:0qUabqiIQgfmlAmulqxyiGkkyF6/tOGSnY2cnPVwrzU= -github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= -github.com/golangci/lint-1 v0.0.0-20190420132249-ee948d087217/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= -github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= -github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= -github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e h1:KhcknUwkWHKZPbFy2P7jH5LKJ3La+0ZeknkkmrSgqb0= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/cadvisor v0.35.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48= -github.com/google/cadvisor v0.37.0/go.mod h1:OhDE+goNVel0eGY8mR7Ifq1QUI1in5vJBIgIpcajK/I= -github.com/google/cadvisor v0.38.5/go.mod h1:1OFB9sOOMkBdUBGCO/1SArawTnDscgMzTodacVDe8mA= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/cadvisor v0.39.0/go.mod h1:rjQFmK4jPCpxeUdLq9bYhNFFsjgGOtpnDmDeap0+nsw= +github.com/google/cadvisor v0.39.2 h1:SzgL5IYoMZEFVA9usi0xCy8SXSVXKQ6aL/rYs/kQjXE= +github.com/google/cadvisor v0.39.2/go.mod h1:kN93gpdevu+bpS227TyHVZyCU5bbqCzTj5T9drl34MI= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3 h1:SRgJV+IoxM5MKyFdlSUeNy6/ycRUF2yBAKdAQswoHUk= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0= +github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.1.0 h1:BNQPM9ytxj6jbjjdRPioQ94T6YXriSopn0i8COv6SRA= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1 h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1 h1:sNCoNyDEvN1xa+X0baata4RdcpKwcMS6DH+xwfqPgjw= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0 h1:WhIgCr5a7AaVH6jPUwjtRuuE7/RDufnUvzIr48smyxs= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= +github.com/heketi/heketi v10.2.0+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= +github.com/heketi/heketi v10.3.0+incompatible h1:X4DBFPzcyWZWhia32d94UhDECQJHH0M5kpRb1gxxUHk= +github.com/heketi/heketi v10.3.0+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= +github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6 h1:oJ/NLadJn5HoxvonA6VxG31lg0d6XOURNA09BTtM4fY= github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5 h1:qPmlgoeRS18y2dT+iAH5vEKZgIqgiPi2Y8UCu/b7Aq8= github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= -github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5 h1:PJr+ZMXIecYc1Ey2zucXdR73SMBtgjPgwa31099IMv0= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/gotool v0.0.0-20161130080628-0de1eaf82fa3/go.mod h1:jxZFDH7ILpTPQTk+E2s+z4CUas9lVNjIuKR4c5/zKgM= +github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/libopenstorage/openstorage v1.0.0 h1:GLPam7/0mpdP8ZZtKjbfcXJBTIA/T1O6CBErVEFEyIM= github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/lpabon/godbc v0.1.1 h1:ilqjArN1UOENJJdM34I2YHKmF/B0gGq4VLoSGy9iAao= github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= -github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= +github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mesos/mesos-go v0.0.9/go.mod h1:kPYCMQ9gsOXVAle1OsoY4I1+9kPu8GHkf88aV59fDr4= github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= +github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989 h1:PS1dLCGtD8bb9RPKJrc8bS7qHL6JnW1CZvwzH9dPoUs= github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= -github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0 h1:lfGJxY7ToLJQjHHwi0EX6uYBdK78egf954SQl13PQJc= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/ipvs v1.0.1 h1:aoZ7fhLTXgDbzVrAnvV+XbKOU8kOET7B3+xULDF/1o0= github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= -github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 h1:yH0SvLzcbZxcJXho2yh7CqdENGMQe73Cw3woZBpPli0= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb h1:e+l77LJOEqXTIQihQJVkA6ZxPOUmfPM5e4H7rcpgtSk= github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= -github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= -github.com/mrunalp/fileutils v0.0.0-20200520151820-abd8a0e76976/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= +github.com/mrunalp/fileutils v0.5.0 h1:NKzVxiH7eSk+OQ4M+ZYW1K6h27RUV3MI6NUTsHhU6Z4= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mvdan/xurls v1.1.0 h1:OpuDelGQ1R1ueQ6sSryzi6P+1RtBpfQHM8fJwlE45ww= github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= -github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= -github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8= +github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc10/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc90.0.20200616040943-82d2fa4eb069/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8= -github.com/opencontainers/runc v1.0.0-rc91.0.20200707015106-819fcc687efb/go.mod h1:ZuXhqlr4EiRYgDrBDNfSbE4+n9JX4+V107NwAmF7sZA= -github.com/opencontainers/runc v1.0.0-rc92/go.mod h1:X1zlU4p7wOlX4+WRCz+hvlRv8phdL7UqbYD+vQwNMmE= -github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.0-rc95/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM= +github.com/opencontainers/runc v1.0.1 h1:G18PGckGdAm3yVQRWDVQ1rLSLntiniKJ0cNRT2Tm5gs= +github.com/opencontainers/runc v1.0.1/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= -github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= -github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= -github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2 h1:c4ca10UMgRcvZ6h0K4HtS15UaVSBEaE+iln2LVpAuGc= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021 h1:0XM1XL/OFFJjXsYXlG30spTkV/E9+gmd5GD1w2HE8xM= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -624,71 +784,85 @@ github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2 github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= +github.com/quobyte/api v0.1.8 h1:+sOX1gIlC/OaLipqVZWrHgly9Kh9Qo8OygeS0mWAg30= github.com/quobyte/api v0.1.8/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= +github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446 h1:/NRJ5vAYoqz+7sG51ubIDHXeWO8DlTSrToPu6q11ziA= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/robfig/cron v1.1.0 h1:jk4/Hud3TTdcrJgUOBgsqrZBarcxl6ADIjSC2iniwLY= github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= +github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 h1:if3/24+h9Sq6eDx8UUz1SO9cT9tizyIsATfB7b4D3tc= github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f h1:UFr9zpz4xgTnIE5yIMtWAMngCdZ9p/+q6lTbgelo80M= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.1.0 h1:MkTeG1DMwsrdH7QtLXy5W+fUxWq+vmb6cLmyJ7aRtF0= +github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= +github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= -github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -696,105 +870,151 @@ github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/storageos/go-api v2.2.0+incompatible h1:U0SablXoZIg06gvSlg8BCdzq1C/SkHVygOVX95Z2MU0= github.com/storageos/go-api v2.2.0+incompatible/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8 h1:ndzgwNDnKIqyCvHTXaCqh9KlOWKvBry6nuXMJmonVsE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= -github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.0-20200520041808-52d707b772fe/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vmware/govmomi v0.20.3 h1:gpw/0Ku+6RgF3jsi7fnCLmlcikBHfKBCUcu1qgc16OU= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= +github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI= +github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5 h1:dPmz1Snjq0kmkz159iL7S6WzdahUTHnHB5M56WFVifs= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 h1:VcrIfasaLFkyjk6KNlXQSzO+B0fZcnECiDrKJsfxka0= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5 h1:Gqga3zA9tdAcfqobUGjSoCob5L3f8Dt5EuOp3ihNZko= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0 h1:2aQv6F436YnN7I4VbI8PPYrBhu+SmrTaADcf8Mi/6PU= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0 h1:ftQ0nOOHMcbMS3KIaDQ0g5Qcd6bhaBrQT6b89DfwLTs= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v3 v3.5.0 h1:62Eh0XOro+rDwkrypAGDfgmNh5Joq+z+W9HZdlXMzek= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= +go.etcd.io/etcd/pkg/v3 v3.5.0 h1:ntrg6vvKRW26JRmHTE0iNlDgYK6JX3hg/4cD62X0ixk= +go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= +go.etcd.io/etcd/raft/v3 v3.5.0 h1:kw2TmO3yFTgE+F0mdKkG7xMxkit2duBDa2Hu6D/HMlw= +go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= +go.etcd.io/etcd/server/v3 v3.5.0 h1:jk8D/lwGEDlQU9kZXUFMSANkE22Sg5+mW27ip8xcF9E= +go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= +go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= +go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 h1:sO4WKdPAudZGKPcpZT4MJn6JaDmpyLrMPDGGyA1SttE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 h1:Q3C9yzW6I9jqEc8sawxzxZmY48fs9u220KXq6d5s3XU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= +go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg= +go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= +go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0 h1:JsxtGXd06J8jrnya7fdI/U/MR6yXA5DtbZy+qoHQlr8= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk/export/metric v0.20.0 h1:c5VRjxCXdQlx1HjzwGdQHzZaVI82b5EbBgOu2ljD92g= +go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= +go.opentelemetry.io/otel/sdk/metric v0.20.0 h1:7ao1wpzHRVKf0OQ7GIxiQJA6X7DLX9o14gmVon7mMK8= +go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/proto/otlp v0.7.0 h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= +go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -golang.org/x/build v0.0.0-20190927031335-2835ba2e683f/go.mod h1:fYw7AShPAhGMdXqA9gRadk/CcMsvLlClpE5oBwnS3dM= -golang.org/x/crypto v0.0.0-20180426230345-b49d69b5da94/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190424203555-c05e17bb3b2d/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -802,6 +1022,7 @@ golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= @@ -809,8 +1030,11 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20210220032938-85be41e4509f h1:GrkO5AtFUU9U/1f5ctbIBXtBGeSJbWwIYfIsTcFMaX4= +golang.org/x/exp v0.0.0-20210220032938-85be41e4509f/go.mod h1:I6l2HNBLBZEcrOoCpyKLdY2lHoRZ8lI4x60KMCQDft4= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -822,23 +1046,27 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f h1:kgfVkAEEQXXQ0qc6dH7n6y37NAYmTFmz0YRwrRjgxKw= +golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -850,7 +1078,6 @@ golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= @@ -859,7 +1086,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -868,28 +1095,34 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023 h1:ADo5wSpq2gqaCGQWzk7S5vd//0iyyLeAratkEoG5dLE= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -897,8 +1130,6 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190122071731-054c452bb702/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -908,20 +1139,21 @@ golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -938,57 +1170,64 @@ golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 h1:5/PjkGUjvEU5Gl6BxmvKRPpqo2uNMv4rcHBMwzk/st8= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201110211018-35f3e6cf4a65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190122202912-9c309ee22fab/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -997,18 +1236,19 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190909030654-5b82db07426d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1018,19 +1258,26 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/gonum v0.6.2 h1:4r+yNT0+8SWcOkXP+63H2zQbN+USnC73cjGUxnDF94Q= gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b h1:Qh4dB5D/WpoUUp3lSod7qgoyEHbDGPUWjIbnqdqqe1k= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1038,15 +1285,15 @@ google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.1-0.20200106000736-b8fc810ca6b5/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.1/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1054,7 +1301,6 @@ google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -1068,22 +1314,31 @@ google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4 google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1092,197 +1347,208 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.0 h1:0HIbH907iBTAntm+88IJV2qmJALDAh8sPekI9Vc1fm0= gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.1 h1:XM28wIgFzaBmeZ5dNHIpWLQpt/9DGKxk+rCg/22nnYE= gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/gotestsum v0.3.5/go.mod h1:Mnf3e5FUzXbkCfynWBGOwLssY7gTQgCHObK9tMpAriY= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.18.4 h1:8x49nBRxuXGUlDlwlWd3RMY1SayZrzFfxea3UZSkFw4= -k8s.io/api v0.18.4/go.mod h1:lOIQAKYgai1+vz9J7YcDZwC26Z0zQewYOGWdyIPUUQ4= -k8s.io/api v0.19.0 h1:XyrFIJqTYZJ2DU7FBE/bSPz7b1HvbVBuBf07oeo6eTc= -k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= -k8s.io/api v0.20.0 h1:WwrYoZNM1W1aQEbyl8HNG+oWGzLpZQBlcerS9BQw9yI= -k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg= -k8s.io/apiextensions-apiserver v0.18.4/go.mod h1:NYeyeYq4SIpFlPxSAB6jHPIdvu3hL0pc36wuRChybio= -k8s.io/apiextensions-apiserver v0.19.0/go.mod h1:znfQxNpjqz/ZehvbfMg5N6fvBJW5Lqu5HVLTJQdP4Fs= -k8s.io/apiextensions-apiserver v0.20.0/go.mod h1:ZH+C33L2Bh1LY1+HphoRmN1IQVLTShVcTojivK3N9xg= -k8s.io/apimachinery v0.18.4 h1:ST2beySjhqwJoIFk6p7Hp5v5O0hYY6Gngq/gUYXTPIA= -k8s.io/apimachinery v0.18.4/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= -k8s.io/apimachinery v0.19.0 h1:gjKnAda/HZp5k4xQYjL0K/Yb66IvNqjthCb03QlKpaQ= -k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/apimachinery v0.20.0 h1:jjzbTJRXk0unNS71L7h3lxGDH/2HPxMPaQY+MjECKL8= -k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apiserver v0.18.4 h1:pn1jSQkfboPSirZopkVpEdLW4FcQLnYMaIY8LFxxj30= -k8s.io/apiserver v0.18.4/go.mod h1:q+zoFct5ABNnYkGIaGQ3bcbUNdmPyOCoEBcg51LChY8= -k8s.io/apiserver v0.19.0 h1:jLhrL06wGAADbLUUQm8glSLnAGP6c7y5R3p19grkBoY= -k8s.io/apiserver v0.19.0/go.mod h1:XvzqavYj73931x7FLtyagh8WibHpePJ1QwWrSJs2CLk= -k8s.io/apiserver v0.20.0 h1:0MwO4xCoqZwhoLbFyyBSJdu55CScp4V4sAgX6z4oPBY= -k8s.io/apiserver v0.20.0/go.mod h1:6gRIWiOkvGvQt12WTYmsiYoUyYW0FXSiMdNl4m+sxY8= -k8s.io/cli-runtime v0.18.4/go.mod h1:9/hS/Cuf7NVzWR5F/5tyS6xsnclxoPLVtwhnkJG1Y4g= -k8s.io/cli-runtime v0.19.0/go.mod h1:tun9l0eUklT8IHIM0jors17KmUjcrAxn0myoBYwuNuo= -k8s.io/cli-runtime v0.20.0/go.mod h1:C5tewU1SC1t09D7pmkk83FT4lMAw+bvMDuRxA7f0t2s= -k8s.io/client-go v0.18.4 h1:un55V1Q/B3JO3A76eS0kUSywgGK/WR3BQ8fHQjNa6Zc= -k8s.io/client-go v0.18.4/go.mod h1:f5sXwL4yAZRkAtzOxRWUhA/N8XzGCb+nPZI8PfobZ9g= -k8s.io/client-go v0.19.0 h1:1+0E0zfWFIWeyRhQYWzimJOyAk2UT7TiARaLNwJCf7k= -k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= -k8s.io/client-go v0.20.0 h1:Xlax8PKbZsjX4gFvNtt4F5MoJ1V5prDvCuoq9B7iax0= -k8s.io/client-go v0.20.0/go.mod h1:4KWh/g+Ocd8KkCwKF8vUNnmqgv+EVnQDK4MBF4oB5tY= -k8s.io/cloud-provider v0.18.4/go.mod h1:JdI6cuSFPSPANEciv0v5qfwztkeyFCVc1S3krLYrw0E= -k8s.io/cloud-provider v0.19.0 h1:Ae09nHr6BVPEzmAWbZedYC0gjsIPbt7YsIY0V/NHGr0= -k8s.io/cloud-provider v0.19.0/go.mod h1:TYh7b7kQ6wiqF7Ftb+u3lN4IwvgOPbBrcvC3TDAW4cw= -k8s.io/cloud-provider v0.20.0 h1:CVPQ66iyfNgeGomUq2jE/TWrfzE77bdCpemhFS8955U= -k8s.io/cloud-provider v0.20.0/go.mod h1:Lz/luSVD5BrHDDhtVdjFh0C2qQCRYdf0b9BHQ9L+bXc= -k8s.io/cluster-bootstrap v0.18.4/go.mod h1:hNG705ec9SMN2BGlJ81R2CnyJjNKfROtAxvI9JXZdiM= -k8s.io/cluster-bootstrap v0.19.0/go.mod h1:kBn1DKyqoM245wzz+AAnGkuysJ+9GqVbPYveTo4KiaA= -k8s.io/cluster-bootstrap v0.20.0/go.mod h1:6WZaNIBvcvL7MkPzSRKrZDIr4u+ePW2oIWoRsEFMjmE= -k8s.io/code-generator v0.18.4/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= -k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= -k8s.io/code-generator v0.20.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= -k8s.io/component-base v0.18.4 h1:Kr53Fp1iCGNsl9Uv4VcRvLy7YyIqi9oaJOQ7SXtKI98= -k8s.io/component-base v0.18.4/go.mod h1:7jr/Ef5PGmKwQhyAz/pjByxJbC58mhKAhiaDu0vXfPk= -k8s.io/component-base v0.19.0 h1:OueXf1q3RW7NlLlUCj2Dimwt7E1ys6ZqRnq53l2YuoE= -k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y= -k8s.io/component-base v0.20.0 h1:BXGL8iitIQD+0NgW49UsM7MraNUUGDU3FBmrfUAtmVQ= -k8s.io/component-base v0.20.0/go.mod h1:wKPj+RHnAr8LW2EIBIK7AxOHPde4gme2lzXwVSoRXeA= -k8s.io/component-helpers v0.20.0/go.mod h1:nx6NOtfSfGOxnSZsDJxpGbnsVuUA1UXpwDvZIrtigNk= -k8s.io/controller-manager v0.20.0/go.mod h1:nD4qym/pmCz2v1tpqvlEBVlHW9CAZwedloM8GrJTLpg= -k8s.io/cri-api v0.18.4/go.mod h1:OJtpjDvfsKoLGhvcc0qfygved0S0dGX56IJzPbqTG1s= -k8s.io/cri-api v0.19.0/go.mod h1:UN/iU9Ua0iYdDREBXNE9vqCJ7MIh/FW3VIL0d8pw7Fw= -k8s.io/cri-api v0.20.0/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/csi-translation-lib v0.18.4/go.mod h1:FTci2m8/3oN8E+8OyblBXei8w4mwbiH4boNPeob4piE= -k8s.io/csi-translation-lib v0.19.0/go.mod h1:zGS1YqV8U2So/t4Hz8SoRXMx5y5/KSKnA6BXXxGuo4A= -k8s.io/csi-translation-lib v0.20.0/go.mod h1:M4CdD66GxEI6ev8aTtsA2NkK9kIF9K5VZQMcw/SsoLs= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/api v0.21.0 h1:gu5iGF4V6tfVCQ/R+8Hc0h7H1JuEhzyEi9S4R5LM8+Y= +k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= +k8s.io/api v0.22.0 h1:elCpMZ9UE8dLdYxr55E06TmSeji9I3KH494qH70/y+c= +k8s.io/api v0.22.0/go.mod h1:0AoXXqst47OI/L0oGKq9DG61dvGRPXs7X4/B7KyjBCU= +k8s.io/apiextensions-apiserver v0.21.0/go.mod h1:gsQGNtGkc/YoDG9loKI0V+oLZM4ljRPjc/sql5tmvzc= +k8s.io/apiextensions-apiserver v0.22.0 h1:QTuZIQggaE7N8FTjur+1zxLmEPziphK7nNm8t+VNO3g= +k8s.io/apiextensions-apiserver v0.22.0/go.mod h1:+9w/QQC/lwH2qTbpqndXXjwBgidlSmytvIUww16UACE= +k8s.io/apimachinery v0.21.0 h1:3Fx+41if+IRavNcKOz09FwEXDBG6ORh6iMsTSelhkMA= +k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/apimachinery v0.22.0 h1:CqH/BdNAzZl+sr3tc0D3VsK3u6ARVSo3GWyLmfIjbP0= +k8s.io/apimachinery v0.22.0/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apiserver v0.21.0 h1:1hWMfsz+cXxB77k6/y0XxWxwl6l9OF26PC9QneUVn1Q= +k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg= +k8s.io/apiserver v0.22.0 h1:KZh2asnRBjawLLfPOi6qiD+A2jaNt31HCnZG6AX3Qcs= +k8s.io/apiserver v0.22.0/go.mod h1:04kaIEzIQrTGJ5syLppQWvpkLJXQtJECHmae+ZGc/nc= +k8s.io/cli-runtime v0.21.0/go.mod h1:XoaHP93mGPF37MkLbjGVYqg3S1MnsFdKtiA/RZzzxOo= +k8s.io/cli-runtime v0.22.0 h1:xM0UJ91iPKvPeooS/LS4U3sPVRAeUrUslJ0sUtE7a7Q= +k8s.io/cli-runtime v0.22.0/go.mod h1:An6zELQ7udUI0GaXvkuMqyopPA14dIgNqpH8cZu1vig= +k8s.io/client-go v0.21.0 h1:n0zzzJsAQmJngpC0IhgFcApZyoGXPrDIAD601HD09ag= +k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= +k8s.io/client-go v0.22.0 h1:sD6o9O6tCwUKCENw8v+HFsuAbq2jCu8cWC61/ydwA50= +k8s.io/client-go v0.22.0/go.mod h1:GUjIuXR5PiEv/RVK5OODUsm6eZk7wtSWZSaSJbpFdGg= +k8s.io/cloud-provider v0.21.0/go.mod h1:z17TQgu3JgUFjcgby8sj5X86YdVK5Pbt+jm/eYMZU9M= +k8s.io/cloud-provider v0.22.0 h1:eK0swLQ1TZCLefRbgwEo/ZS4ZDo6FkOJDkDIBITshyw= +k8s.io/cloud-provider v0.22.0/go.mod h1:UsQNOxrStwOXoDfVNgEbKgcQt2BYuHGKobixm0zKTis= +k8s.io/cluster-bootstrap v0.21.0/go.mod h1:rs7i1JpBCa56YNmnYxFJuoUghIwpMzDidY8ZmqiRnrQ= +k8s.io/cluster-bootstrap v0.22.0 h1:XYx5fIoYJuD0+EyKXA5HXU7yc9beVHSe5hy6XRdx5jU= +k8s.io/cluster-bootstrap v0.22.0/go.mod h1:VeZXiGfH+yfnC2KtvkSwNTAqahg6yiCV/szbWpoI+3k= +k8s.io/code-generator v0.21.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= +k8s.io/code-generator v0.22.0 h1:wIo+6NuAEf+aP6dblF+fPJOkY/VnM6wqNHusiW/eQ3o= +k8s.io/code-generator v0.22.0/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= +k8s.io/component-base v0.21.0 h1:tLLGp4BBjQaCpS/KiuWh7m2xqvAdsxLm4ATxHSe5Zpg= +k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw= +k8s.io/component-base v0.22.0 h1:ZTmX8hUqH9T9gc0mM42O+KDgtwTYbVTt2MwmLP0eK8A= +k8s.io/component-base v0.22.0/go.mod h1:SXj6Z+V6P6GsBhHZVbWCw9hFjUdUYnJerlhhPnYCBCg= +k8s.io/component-helpers v0.21.0 h1:SoWLsd63LI5uwofcHVSO4jtlmZEJRycfwNBKU4eAGPQ= +k8s.io/component-helpers v0.21.0/go.mod h1:tezqefP7lxfvJyR+0a+6QtVrkZ/wIkyMLK4WcQ3Cj8U= +k8s.io/component-helpers v0.22.0 h1:OoTOtxTkg/T16FRS1K/WfABzxliTCq3RTbFHMBSod/o= +k8s.io/component-helpers v0.22.0/go.mod h1:YNIbQI59ayNiU8JHlPIxVkOUYycbKhk5Niy0pcyJOEY= +k8s.io/controller-manager v0.21.0/go.mod h1:Ohy0GRNRKPVjB8C8G+dV+4aPn26m8HYUI6ejloUBvUA= +k8s.io/controller-manager v0.22.0 h1:zFQx0Ji0IMv7z0gYC0Ruy0YQxtf1Lo2TQo9UqWNcKME= +k8s.io/controller-manager v0.22.0/go.mod h1:KCFcmFIjh512sVIm1EhAPJ+4miASDvbZA5eO/2nbr2M= +k8s.io/cri-api v0.21.0/go.mod h1:nJbXlTpXwYCYuGMR7v3PQb1Du4WOGj2I9085xMVjr3I= +k8s.io/cri-api v0.22.0 h1:YECUji0xxCTCWFO/TUkrL1b44Ip6mZJbiqP6Us/+Vys= +k8s.io/cri-api v0.22.0/go.mod h1:mj5DGUtElRyErU5AZ8EM0ahxbElYsaLAMTPhLPQ40Eg= +k8s.io/csi-translation-lib v0.21.0/go.mod h1:edq+UMpgqEx3roTuGF/03uIuSOsI986jtu65+ytLlkA= +k8s.io/csi-translation-lib v0.22.0 h1:mqyE5LVIn2jBEH1B9lSzgPwws3rzgJpflMPTbQJuXy8= +k8s.io/csi-translation-lib v0.22.0/go.mod h1:wb6bRqDth2jcHfty7mLdQc7nfknHhIkAlAZgSgplXhc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 h1:Uusb3oh8XcdzDF/ndlI4ToKTYVlkCSJP39SRY2mfRAw= +k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/kube-aggregator v0.18.4/go.mod h1:xOVy4wqhpivXCt07Diwdms2gonG+SONVx+1e7O+GfC0= -k8s.io/kube-aggregator v0.19.0/go.mod h1:1Ln45PQggFAG8xOqWPIYMxUq8WNtpPnYsbUJ39DpF/A= -k8s.io/kube-aggregator v0.20.0/go.mod h1:3Is/gzzWmhhG/rA3CpA1+eVye87lreBQDFGcAGT7gzo= -k8s.io/kube-controller-manager v0.18.4/go.mod h1:GrY1S0F7zA0LQlt0ApOLt4iMpphKTk3mFrQl1+usrfs= -k8s.io/kube-controller-manager v0.19.0/go.mod h1:uGZyiHK73NxNEN5EZv/Esm3fbCOzeq4ndttMexVZ1L0= -k8s.io/kube-controller-manager v0.20.0/go.mod h1:Pmli7dnwIVpwKJVeab97yBt35QEFdw65oqT5ti0ikUs= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-proxy v0.18.4/go.mod h1:h2c+ckQC1XpybDs53mWhLCvvM6txduWVLPQwwvGqR9M= -k8s.io/kube-proxy v0.19.0/go.mod h1:7NoJCFgsWb7iiMB1F6bW1St5rEXC+ir2aWiJehASmTU= -k8s.io/kube-proxy v0.20.0/go.mod h1:R97oobM6zSh3ZqFMXi5DzCH/qJXNzua/UzcDmuQRexM= -k8s.io/kube-scheduler v0.18.4/go.mod h1:vRFb/8Yi7hh670beaPrXttMpjt7H8EooDkgwFm8ts4k= -k8s.io/kube-scheduler v0.19.0/go.mod h1:1XGjJUgstM0/0x8to+bSGSyCs3Dp3dbCEr3Io/mvd4s= -k8s.io/kube-scheduler v0.20.0/go.mod h1:cRTGsJU3TfQvbMJBmpoPgq9rBF5cQLpLKoOafKwdZnI= -k8s.io/kubectl v0.18.4/go.mod h1:EzB+nfeUWk6fm6giXQ8P4Fayw3dsN+M7Wjy23mTRtB0= -k8s.io/kubectl v0.19.0/go.mod h1:gPCjjsmE6unJzgaUNXIFGZGafiUp5jh0If3F/x7/rRg= -k8s.io/kubectl v0.20.0/go.mod h1:8x5GzQkgikz7M2eFGGuu6yOfrenwnw5g4RXOUgbjR1M= -k8s.io/kubelet v0.18.4/go.mod h1:D0V9JYaTJRF+ry+9JfnM4uyg3ySRLQ02XjfQ5f2u4CM= -k8s.io/kubelet v0.19.0/go.mod h1:cGds22piF/LnFzfAaIT+efvOYBHVYdunqka6NVuNw9g= -k8s.io/kubelet v0.20.0/go.mod h1:lMdjO1NA+JZXSYtxb48pQmNERmC+vVIXIYkJIugVhl0= -k8s.io/kubernetes v1.18.4 h1:AYtJ24PIT91P1K8ekCrvay8LK8WctWhC5+NI0HZ8sqE= -k8s.io/kubernetes v1.18.4/go.mod h1:Efg82S+Ti02A/Mww53bxroc7IgzX2bgPsf6hT8gAs3M= -k8s.io/kubernetes v1.19.0 h1:ir53YuXsfsuVABmtYHCTUa3xjD41Htxv3o+xoQjJdUo= -k8s.io/kubernetes v1.19.0/go.mod h1:yhT1/ltQajQsha3tnYc9QPFYSumGM45nlZdjf7WqE1A= -k8s.io/kubernetes v1.20.0 h1:mnc69esJC3PJgSptxNJomGz2gBthyGLSEy18WiyRH4U= -k8s.io/kubernetes v1.20.0/go.mod h1:/xrHGNfoQphtkhZvyd5bA1lRmz+QkDVmBZu+O8QMoek= -k8s.io/kubernetes v1.20.2 h1:EsQROw+yFsDMfjEHp52cKs4JVI6lAHA2SHGAF88cK7s= -k8s.io/legacy-cloud-providers v0.18.4/go.mod h1:Mnxtra7DxVrODfGZHPsrkLi22lwmZOlWkjyyO3vW+WM= -k8s.io/legacy-cloud-providers v0.19.0/go.mod h1:Q5czDCPnStdpFohMpcbnqL+MLR75kUhIDIsnmwEm0/o= -k8s.io/legacy-cloud-providers v0.20.0/go.mod h1:1jEkaU7h9+b1EYdfWDBvhFAr+QpRfUjQfK+dGhxPGfA= -k8s.io/metrics v0.18.4/go.mod h1:luze4fyI9JG4eLDZy0kFdYEebqNfi0QrG4xNEbPkHOs= -k8s.io/metrics v0.19.0/go.mod h1:WykpW8B60OeAJx1imdwUgyOID2kDljr/Q+1zrPJ98Wo= -k8s.io/metrics v0.20.0/go.mod h1:9yiRhfr8K8sjdj2EthQQE9WvpYDvsXIV3CjN4Ruq4Jw= -k8s.io/mount-utils v0.20.0/go.mod h1:Jv9NRZ5L2LF87A17GaGlArD+r3JAJdZFvo4XD1cG4Kc= -k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8= -k8s.io/sample-apiserver v0.18.4/go.mod h1:j5XH5FUmMd/ztoz+9ch0+hL+lsvWdgxnTV7l3P3Ijoo= -k8s.io/sample-apiserver v0.19.0/go.mod h1:Bq9UulNoKnT72JqlkWF2JS14cXxJqcmvLtb5+EcwiNA= -k8s.io/sample-apiserver v0.20.0/go.mod h1:tScvbz/BcUG46IOsu2YLt4EjBP7XeUuMzMbQt2tQYWw= -k8s.io/system-validators v1.0.4/go.mod h1:HgSgTg4NAGNoYYjKsUyk52gdNi2PVDswQ9Iyn66R7NI= -k8s.io/system-validators v1.1.2/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= -k8s.io/system-validators v1.2.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= -k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/kube-aggregator v0.21.0/go.mod h1:sIaa9L4QCBo9gjPyoGJns4cBjYVLq3s49FxF7m/1A0A= +k8s.io/kube-aggregator v0.22.0 h1:he3plI8vlaPJxR9vsy/lL5ga1V8CoA8M8x1Bn8eTCeM= +k8s.io/kube-aggregator v0.22.0/go.mod h1:zHTepg0Q4tKzru7Pwg1QYHWrU/wrvIXM8hUdDAH66qg= +k8s.io/kube-controller-manager v0.21.0/go.mod h1:QGJ1P7eU4FQq8evpCHN5e4QwPpcr2sbWFJBO/DKBUrw= +k8s.io/kube-controller-manager v0.22.0 h1:9IP8Q1JQE6jVv5Vy4Ay8BBFp1oqgZw2fGKV7c4Frp80= +k8s.io/kube-controller-manager v0.22.0/go.mod h1:E/EYMoCj8bbPRmu19JF4B9QLyQL8Tywg+9Q/rg+F80U= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-proxy v0.21.0/go.mod h1:36jW3e6+5iQql9tHrLjVrmwpPsbhTywoI6OCFL7MWRs= +k8s.io/kube-proxy v0.22.0 h1:0wiIlhvkujEI//2JgKbWvlfnd2xsOV9TjXA6R4sHR0k= +k8s.io/kube-proxy v0.22.0/go.mod h1:2ckKSCr8kZ0kNNCgxM7lt0g5CAaY767djotK4AEFPmI= +k8s.io/kube-scheduler v0.21.0/go.mod h1:wf1oi1NHSsFYfG7lKwxJVmnQNBnhL9vOMXztcKQu5IU= +k8s.io/kube-scheduler v0.22.0 h1:zk4+z/vyv9MAlppaYgv5PpPdvaq8bOYwIzUVu3dRVTs= +k8s.io/kube-scheduler v0.22.0/go.mod h1:n6tdYAiaoqXGLazCwIpOEg42qby0VMDs1KmN4DjQf50= +k8s.io/kubectl v0.21.0/go.mod h1:EU37NukZRXn1TpAkMUoy8Z/B2u6wjHDS4aInsDzVvks= +k8s.io/kubectl v0.22.0 h1:EBb7xLUaidG/YXAI5AXam3lK2VlnoFShhlMjnJVTbGA= +k8s.io/kubectl v0.22.0/go.mod h1:eeuP92uZbVL2UnOaf0nj9OjtI0hi/nekHU+0isURer0= +k8s.io/kubelet v0.21.0/go.mod h1:G5ZxMTVev9t4bhmsSxDAWhH6wXDYEVHVVFyYsw4laR4= +k8s.io/kubelet v0.22.0 h1:cVu1RWuikW9dMJSXDG2f6k81u7NuURrnzphgY/tQxZE= +k8s.io/kubelet v0.22.0/go.mod h1:CMdsuh9OFgbpeE+n46GpVMDecLlI0HxSRHMoNrTmJk4= +k8s.io/legacy-cloud-providers v0.21.0/go.mod h1:bNxo7gDg+PGkBmT/MFZswLTWdSWK9kAlS1s8DJca5q4= +k8s.io/legacy-cloud-providers v0.22.0 h1:CL+nxjE1o2KxV2l+ySYadvieJYA/jdYU8PHWDIdy0JU= +k8s.io/legacy-cloud-providers v0.22.0/go.mod h1:2tKlbeA9r0OYnBHyqHcnO1EoAeqYXw2IZH99DYwwErM= +k8s.io/metrics v0.21.0/go.mod h1:L3Ji9EGPP1YBbfm9sPfEXSpnj8i24bfQbAFAsW0NueQ= +k8s.io/metrics v0.22.0 h1:fQ9Rc0ZAfTBevXSyjSk2yogoNHmS0ae+IFLVGHs8h/g= +k8s.io/metrics v0.22.0/go.mod h1:eYnwafAUNLLpVmY/msoq0RKIKH5C4TzfjKnMZ0Xrt3A= +k8s.io/mount-utils v0.21.0/go.mod h1:dwXbIPxKtTjrBEaX1aK/CMEf1KZ8GzMHpe3NEBfdFXI= +k8s.io/mount-utils v0.22.0 h1:yNUW+1HO+ZhYDEZ7a/14Un7nqW8Md4zeuLnenGCGDi4= +k8s.io/mount-utils v0.22.0/go.mod h1:gUi5ht+05KHYc/vJ9q9wbvG3MCYBeOsB5FdTyM60Pzo= +k8s.io/pod-security-admission v0.22.0 h1:WL+XUFyH++IyrFMMOKvL43Sx8hxH9GiawhV7ymOXAsc= +k8s.io/pod-security-admission v0.22.0/go.mod h1:xKTKO4nzxLDROM+RRndSU7kCZc2XcBYRKLYS+gYuqfo= +k8s.io/sample-apiserver v0.21.0/go.mod h1:yMffYq14yQZtuVPVBGaBJ+3Scb2xHT6QeqFfk3v+AEY= +k8s.io/sample-apiserver v0.22.0 h1:cpTwo4/nJgKczOBTE/o4Xa3qVhwO2Llnnjgs9YdM/58= +k8s.io/sample-apiserver v0.22.0/go.mod h1:Bkl0f9E1Moxwjvqct7kzDlTvNUTavsworU5FTPlVooA= +k8s.io/system-validators v1.4.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= +k8s.io/system-validators v1.5.0 h1:gGgluCTkpKc/zUszjamp4LFfMVM0wuYG2qjIFL4MMeQ= +k8s.io/system-validators v1.5.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM= +k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +modernc.org/cc v1.0.0 h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/golex v1.0.0 h1:wWpDlbK8ejRfSyi0frMyhilD3JBvtcx2AdGDnU+JtsE= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/mathutil v1.0.0 h1:93vKjrJopTPrtTNpZ8XIovER7iCIH1QU7wNbOQXC60I= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/strutil v1.0.0 h1:XVFtQwFVwc02Wk+0L/Z/zDDXO81r5Lhe6iMKmGX3KhE= modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/xc v1.0.0 h1:7ccXrupWZIS3twbUGrtKmHS2DXY6xegFua+6O3xgAFU= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= +rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7 h1:uuHDyjllyzRyCIvvn0OBjiRB0SgBZGqHNYAmjR7fO50= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9 h1:rusRLrDhjBp6aYtl9sGEvQJr6faoHoDLd0YcUBTZguI= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14 h1:TihvEz9MPj2u0KWds6E2OBUXfwaL4qRJ33c7HGiJpqk= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15 h1:4uqm9Mv+w2MmBYD+F4qf/v6tDFUdPOk29C095RbU5mY= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22 h1:fmRfl9WJ4ApJn7LxNuED4m0t18qivVQOxP6aAYG9J6c= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/kustomize/api v0.8.5/go.mod h1:M377apnKT5ZHJS++6H4rQoCHmWtt6qTpp3mbe7p6OLY= +sigs.k8s.io/kustomize/api v0.8.11 h1:LzQzlq6Z023b+mBtc6v72N2mSHYmN8x7ssgbf/hv0H8= +sigs.k8s.io/kustomize/api v0.8.11/go.mod h1:a77Ls36JdfCWojpUqR6m60pdGY1AYFix4AH83nJtY1g= +sigs.k8s.io/kustomize/cmd/config v0.9.7/go.mod h1:MvXCpHs77cfyxRmCNUQjIqCmZyYsbn5PyQpWiq44nW0= +sigs.k8s.io/kustomize/cmd/config v0.9.13 h1:lqOf0QcFhNvgZkgrPINNRs7TxEO7IGVtLMyUJId3oRE= +sigs.k8s.io/kustomize/cmd/config v0.9.13/go.mod h1:7547FLF8W/lTaDf0BDqFTbZxM9zqwEJqCKN9sSR0xSs= +sigs.k8s.io/kustomize/kustomize/v4 v4.0.5/go.mod h1:C7rYla7sI8EnxHE/xEhRBSHMNfcL91fx0uKmUlUhrBk= +sigs.k8s.io/kustomize/kustomize/v4 v4.2.0 h1:RKgbyHgzuHQZ35sBDzWcbnR3HBlJSYdSN0H+sx3tUkk= +sigs.k8s.io/kustomize/kustomize/v4 v4.2.0/go.mod h1:MOkR6fmhwG7hEDRXBYELTi5GSFcLwfqwzTRHW3kv5go= +sigs.k8s.io/kustomize/kyaml v0.10.15/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg= +sigs.k8s.io/kustomize/kyaml v0.11.0 h1:9KhiCPKaVyuPcgOLJXkvytOvjMJLoxpjodiycb4gHsA= +sigs.k8s.io/kustomize/kyaml v0.11.0/go.mod h1:GNMwjim4Ypgp/MueD3zXHLRJEjz7RvtPae0AwlvEMFM= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= -vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/layouts/docs/baseof.html b/layouts/docs/baseof.html index ec0b4d575a..cc6237938b 100644 --- a/layouts/docs/baseof.html +++ b/layouts/docs/baseof.html @@ -25,7 +25,7 @@ {{ if (and (not .Params.hide_feedback) (.Site.Params.ui.feedback.enable) (.Site.GoogleAnalytics)) }} {{ partial "feedback.html" .Site.Params.ui.feedback }} {{ end }} -
    {{ partial "page-meta-lastmod.html" . }}
    + {{ partial "page-meta-lastmod.html" . }} {{ if (.Site.DisqusShortname) }}
    {{ partial "disqus-comment.html" . }} @@ -40,4 +40,4 @@
    {{ partial "scripts.html" . }} - \ No newline at end of file + diff --git a/layouts/partials/head.html b/layouts/partials/head.html index 0fc1445b65..c15f9ba05d 100644 --- a/layouts/partials/head.html +++ b/layouts/partials/head.html @@ -87,7 +87,7 @@ {{ if .HasShortcode "mermaid" }} - + {{ end }} diff --git a/layouts/partials/hooks/body-end.html b/layouts/partials/hooks/body-end.html index 36e65f7791..e4442cb790 100644 --- a/layouts/partials/hooks/body-end.html +++ b/layouts/partials/hooks/body-end.html @@ -1,3 +1,6 @@ +{{ if .HasShortcode "kat-button" }} +
    +{{ end }} {{ with .Site.Params.algolia_docsearch }} {{ end }} diff --git a/layouts/partials/page-meta-links.html b/layouts/partials/page-meta-links.html index 97e84b55ae..529bcea29c 100644 --- a/layouts/partials/page-meta-links.html +++ b/layouts/partials/page-meta-links.html @@ -4,7 +4,7 @@ {{ $gh_repo := ($.Param "github_repo") }} {{ $gh_subdir := ($.Param "github_subdir") }} {{ $gh_project_repo := ($.Param "github_project_repo") }} - {{ $gh_branch := (default "master" ($.Param "github_branch")) }} + {{ $gh_branch := (default "main" ($.Param "github_branch")) }}
    {{ if $gh_repo }} {{ $gh_repo_path := printf "%s/content/%s" $gh_branch $pathFormatted }} diff --git a/layouts/partials/sidebar-tree.html b/layouts/partials/sidebar-tree.html index 4bc83737ae..f1a3c9926a 100644 --- a/layouts/partials/sidebar-tree.html +++ b/layouts/partials/sidebar-tree.html @@ -1,63 +1,94 @@ {{/* We cache this partial for bigger sites and set the active class client side. */}} -{{ $shouldDelayActive := ge (len .Site.Pages) 2000 }} +{{ $sidebarCacheLimit := cond (isset .Site.Params.ui "sidebar_cache_limit") .Site.Params.ui.sidebar_cache_limit 2000 -}} +{{ $shouldDelayActive := ge (len .Site.Pages) $sidebarCacheLimit -}}
    - {{ if not .Site.Params.ui.sidebar_search_disable }} + {{ if not .Site.Params.ui.sidebar_search_disable -}} - {{ end }} -
    -{{ define "section-tree-nav-section" }} -{{ $s := .section }} -{{ $p := .page }} -{{ $shouldDelayActive := .delayActive }} -{{ $active := eq $p.CurrentSection $s }} -{{ $show := or (eq $s $p.FirstSection) (and (not $p.Site.Params.ui.sidebar_menu_compact) ($p.IsDescendant $s)) }} -{{ $sid := $s.RelPermalink | anchorize }} -
      - {{ if (ne $s.File.Path "docs/_index.md") }} -
    • - - {{ $s.LinkTitle }} - +{{ define "section-tree-nav-section" -}} + {{ $s := .section -}} + {{ $p := .page -}} + {{ $shouldDelayActive := .shouldDelayActive -}} + {{ $sidebarMenuTruncate := .sidebarMenuTruncate -}} + {{ $treeRoot := cond (eq .ulNr 0) true false -}} + {{ $ulNr := .ulNr -}} + {{ $ulShow := .ulShow -}} + {{ $active := and (not $shouldDelayActive) (eq $s $p) -}} + {{ $activePath := and (not $shouldDelayActive) ($p.IsDescendant $s) -}} + {{ $show := cond (or (lt $ulNr $ulShow) $activePath (and (not $shouldDelayActive) (eq $s.Parent $p.Parent)) (and (not $shouldDelayActive) (eq $s.Parent $p)) (and (not $shouldDelayActive) ($p.IsDescendant $s.Parent))) true false -}} + {{ $mid := printf "m-%s" ($s.RelPermalink | anchorize) -}} + {{ $pages_tmp := where (union $s.Pages $s.Sections).ByWeight ".Params.toc_hide" "!=" true -}} + {{ $pages := $pages_tmp | first $sidebarMenuTruncate -}} + {{ $withChild := gt (len $pages) 0 -}} + {{ $manualLink := cond (isset $s.Params "manuallink") $s.Params.manualLink ( cond (isset $s.Params "manuallinkrelref") (relref $s $s.Params.manualLinkRelref) $s.RelPermalink) -}} + {{ $manualLinkTitle := cond (isset $s.Params "manuallinktitle") $s.Params.manualLinkTitle $s.Title -}} + +
    • + {{ if (and $p.Site.Params.ui.sidebar_menu_foldable (ge $ulNr 1)) -}} + + + {{ else -}} + {{ if not $treeRoot }} + {{ with $s.Params.Icon}}{{ end }}{{ $s.LinkTitle }} + {{ end -}} + {{ end -}} + {{ if $withChild -}} + {{ $ulNr := add $ulNr 1 -}} +
        + {{ $pages := where (union $s.Pages $s.Sections).ByWeight ".Params.toc_hide" "!=" true -}} + {{ with site.Params.language_alternatives -}} + {{ range . }} + {{ with (where $.section.Translations ".Lang" . ) -}} + {{ $p := index . 0 -}} + {{ $pages = $pages | lang.Merge (union $p.Pages $p.Sections) -}} + {{ end -}} + {{ end -}} + {{ end -}} + {{ $pages := $pages | first 50 -}} + {{ range $pages -}} + {{ if (not (and (eq $s $p.Site.Home) (eq .Params.toc_root true)) ) -}} + {{ $mid := printf "m-%s" (.RelPermalink | anchorize) -}} + {{ $active := eq . $p -}} + {{ $isForeignLanguage := (ne (string .Lang) (string $.currentLang)) -}} + {{ if (and $isForeignLanguage ($p.IsDescendant $s)) -}} + + {{ .LinkTitle }}{{ if $isForeignLanguage }} ({{ .Lang | upper }}){{ end -}} + + {{ else -}} + {{ template "section-tree-nav-section" (dict "page" $p "section" . "currentLang" $.currentLang "shouldDelayActive" $shouldDelayActive "sidebarMenuTruncate" $sidebarMenuTruncate "ulNr" $ulNr "ulShow" $ulShow) }} + {{- end }} + {{- end }} + {{- end }} +
      + {{- end }}
    • - {{ end }} -
        -
      • - {{ $pages := where (union $s.Pages $s.Sections).ByWeight ".Params.toc_hide" "!=" true }} - {{ with site.Params.language_alternatives }} - {{ range . }} - {{ with (where $.section.Translations ".Lang" . ) }} - {{ $p := index . 0 }} - {{ $pages = $pages | lang.Merge (union $p.Pages $p.Sections) }} - {{ end }} - {{ end }} - {{ end }} - {{ $pages := $pages | first 50 }} - {{ range $pages }} - {{ if .IsPage }} - {{ $mid := printf "m-%s" (.RelPermalink | anchorize) }} - {{ $active := eq . $p }} - {{ $isForeignLanguage := (ne (string .Lang) (string $.currentLang)) }} - - {{ .LinkTitle }}{{ if $isForeignLanguage }} ({{ .Lang | upper }}){{ end }} - - {{ else }} - {{ template "section-tree-nav-section" (dict "page" $p "section" . "currentLang" $.currentLang) }} - {{ end }} - {{ end }} -
      • -
      -
    -{{ end }} +{{- end }} diff --git a/layouts/shortcodes/capture.html b/layouts/shortcodes/capture.html deleted file mode 100644 index cc762273c3..0000000000 --- a/layouts/shortcodes/capture.html +++ /dev/null @@ -1,8 +0,0 @@ -{{ $_hugo_config := `{ "version": 1 }`}} -{{- $id := .Get 0 -}} -{{- if not $id -}} -{{- errorf "missing id in capture" -}} -{{- end -}} -{{- $capture_id := printf "capture %s" $id -}} -{{- .Page.Scratch.Set $capture_id .Inner -}} -{{ warnf "Invalid shortcode: %s, in %q" $capture_id (relLangURL .Page.Path) }} \ No newline at end of file diff --git a/layouts/shortcodes/codenew.html b/layouts/shortcodes/codenew.html index e13e7b9ead..4c860be19d 100644 --- a/layouts/shortcodes/codenew.html +++ b/layouts/shortcodes/codenew.html @@ -4,7 +4,7 @@ {{ $fileDir := path.Split $file }} {{ $bundlePath := path.Join .Page.File.Dir $fileDir.Dir }} {{ $filename := printf "/content/%s/examples/%s" .Page.Lang $file | safeURL }} -{{ $ghlink := printf "https://%s/master%s" site.Params.githubwebsiteraw $filename | safeURL }} +{{ $ghlink := printf "https://%s/main%s" site.Params.githubwebsiteraw $filename | safeURL }} {{/* First assume this is a bundle and the file is inside it. */}} {{ $resource := $p.Resources.GetMatch (printf "%s*" $file ) }} {{ with $resource }} diff --git a/layouts/shortcodes/example.html b/layouts/shortcodes/example.html new file mode 100644 index 0000000000..0d6eb7a370 --- /dev/null +++ b/layouts/shortcodes/example.html @@ -0,0 +1,14 @@ +{{ $file := .Get "file" }} +{{ $codelang := .Get "language" | default (path.Ext $file | strings.TrimPrefix ".") }} +{{ $fileDir := path.Split $file }} +{{ $bundlePath := path.Join .Page.File.Dir $fileDir.Dir }} +{{ $filename := printf "/content/%s/examples/%s" .Page.Lang $file | safeURL }} +{{ $ghlink := printf "https://%s/%s%s" site.Params.githubwebsiteraw (default "main" (site.Params.github_branch)) $filename | safeURL }} + + +{{- if gt (len .Inner) 0 -}} + {{- .Inner -}} +{{- else -}} + {{- $file -}} +{{- end -}} + diff --git a/layouts/shortcodes/kat-button b/layouts/shortcodes/kat-button index 3165e30150..4dcdfa5653 100644 --- a/layouts/shortcodes/kat-button +++ b/layouts/shortcodes/kat-button @@ -1,3 +1,2 @@ -
    - + diff --git a/netlify.toml b/netlify.toml index 978c34240a..981d398bad 100644 --- a/netlify.toml +++ b/netlify.toml @@ -7,9 +7,9 @@ functions = "functions" command = "git submodule update --init --recursive --depth 1 && make non-production-build" [build.environment] -HUGO_VERSION = "0.82.0" NODE_VERSION = "10.20.0" -RUBY_VERSION = "2.7.1" +HUGO_VERSION = "0.87.0" +RUBY_VERSION = "3.0.1" [context.production.environment] HUGO_BASEURL = "https://kubernetes.io/" diff --git a/static/_redirects b/static/_redirects index 0d95cef56f..3274c895a7 100644 --- a/static/_redirects +++ b/static/_redirects @@ -57,7 +57,7 @@ /docs/admin/node-conformance.md /docs/admin/node-conformance/ 301 /docs/admin/node-conformance/ /docs/setup/best-practices/node-conformance/ 301 /docs/admin/node-problem/ /docs/tasks/debug-application-cluster/monitor-node-health/ 301 -/docs/admin/out-of-resource/ /docs/tasks/administer-cluster/out-of-resource/ 301 +/docs/admin/out-of-resource/ /docs/concepts/scheduling-eviction/node-pressure-eviction/ 301 /docs/admin/rescheduler/ /docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ 301 /docs/admin/resourcequota/* /docs/concepts/policy/resource-quotas/ 301 /docs/admin/resourcequota/limitstorageconsumption/ /docs/tasks/administer-cluster/limit-storage-consumption/ 301 @@ -89,9 +89,10 @@ /docs/concepts/cluster-administration/device-plugins/ /docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/ 301 /docs/concepts/cluster-administration/etcd-upgrade/ /docs/tasks/administer-cluster/configure-upgrade-etcd/ 301 /docs/concepts/cluster-administration/guaranteed-scheduling-critical-addon-pods/ /docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ 301 +/docs/concepts/cluster-administration/kubelet-garbage-collection/ /docs/concepts/architecture/garbage-collection/#containers-images 301 /docs/concepts/cluster-administration/master-node-communication/ /docs/concepts/architecture/master-node-communication/ 301 /docs/concepts/cluster-administration/network-plugins/ /docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/ 301 -/docs/concepts/cluster-administration/out-of-resource/ /docs/concepts/scheduling-eviction/node-pressure-eviction/ 301 +/docs/concepts/cluster-administration/out-of-resource/ /docs/concepts/scheduling-eviction/node-pressure-eviction/ 301 /docs/concepts/cluster-administration/resource-usage-monitoring /docs/tasks/debug-application-cluster/resource-usage-monitoring/ 301 /docs/concepts/cluster-administration/monitoring/ /docs/concepts/cluster-administration/system-metrics/ 301 /docs/concepts/cluster-administration/controller-metrics/ /docs/concepts/cluster-administration/system-metrics/ 301 @@ -115,6 +116,7 @@ /docs/concepts/extend-kubernetes/extend-cluster/ /docs/concepts/extend-kubernetes/ 301 /docs/concepts/jobs/cron-jobs/ /docs/concepts/workloads/controllers/cron-jobs/ 301 /docs/concepts/jobs/run-to-completion-finite-workloads/ /docs/concepts/workloads/controllers/job/ 301 +/id/docs/concepts/jobs/run-to-completion-finite-workloads/ /id/docs/concepts/workloads/controllers/job/ 301 /docs/concepts/nodes/node/ /docs/concepts/architecture/nodes/ 301 /docs/concepts/object-metadata/annotations/ /docs/concepts/overview/working-with-objects/annotations/ 301 /docs/concepts/overview/ /docs/concepts/overview/what-is-kubernetes/ 301 @@ -128,8 +130,11 @@ /docs/concepts/scheduling/scheduling-framework/ /docs/concepts/scheduling-eviction/scheduling-framework/ 301 /id/docs/concepts/scheduling/scheduling-framework/ /id/docs/concepts/scheduling-eviction/scheduling-framework/ 301 /docs/concepts/scheduling-eviction/eviction-policy/ /docs/concepts/scheduling-eviction/node-pressure-eviction/ 301 +/docs/concepts/scheduling-eviction/out-of-resource/ /docs/concepts/scheduling-eviction/node-pressure-eviction/ 301 +/docs/concepts/scheduling-eviction/pod-eviction/ /docs/concepts/scheduling-eviction/#pod-disruption 301 /docs/concepts/service-catalog/ /docs/concepts/extend-kubernetes/service-catalog/ 301 /docs/concepts/services-networking/networkpolicies/ /docs/concepts/services-networking/network-policies/ 301 +/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ /docs/tasks/network/customize-hosts-file-for-pods/ 301 /docs/concepts/storage/etcd-store-api-object/ /docs/tasks/administer-cluster/configure-upgrade-etcd/ 301 /docs/concepts/storage/volumes/emptyDirapiVersion/ /docs/concepts/storage/volumes/#emptydir/ 301 /docs/concepts/tools/kubectl/object-management-overview/ /docs/concepts/overview/object-management-kubectl/overview/ 301 @@ -145,8 +150,9 @@ /docs/concepts/workloads/controllers/cron-jobs/deployment/ /docs/concepts/workloads/controllers/cron-jobs/ 301 /docs/concepts/workloads/controllers/daemonset/docs/concepts/workloads/pods/pod/ /docs/concepts/workloads/pods/ 301 /docs/concepts/workloads/controllers/deployment/docs/concepts/workloads/pods/pod/ /docs/concepts/workloads/pods/ 301 - +/docs/concepts/workloads/controllers/garbage-collection/ /docs/concepts/architecture/garbage-collection/ 301 /docs/concepts/workloads/controllers/jobs-run-to-completion/ /docs/concepts/workloads/controllers/job/ 301 +/id/docs/concepts/workloads/controllers/jobs-run-to-completion/ /id/docs/concepts/workloads/controllers/job/ 301 /docs/concepts/workloads/controllers/statefulsets/ /docs/concepts/workloads/controllers/statefulset/ 301 /docs/concepts/workloads/controllers/statefulset.md /docs/concepts/workloads/controllers/statefulset/ 301! /docs/concepts/workloads/pods/pod/ /docs/concepts/workloads/pods/ 301 @@ -203,12 +209,12 @@ /docs/reference/generated/kube-scheduler/ /docs/reference/command-line-tools-reference/kube-scheduler/ 301 /docs/reference/generated/kubectl/kubectl-options/ /docs/reference/kubectl/kubectl/ 301 /docs/reference/generated/kubectl/kubectl/ /docs/reference/generated/kubectl/kubectl-commands/ 301 -/docs/reference/generated/kubectl/kubectl/kubectl_*.md /docs/reference/generated/kubectl/kubectl-commands#:splat 301 +/docs/reference/generated/kubectl/kubectl/kubectl_* /docs/reference/generated/kubectl/kubectl-commands#:splat 301 /docs/reference/glossary/maintainer/ /docs/reference/glossary/approver/ 301 /docs/reference/kubectl/kubectl-cmds/ /docs/reference/generated/kubectl/kubectl-commands/ 301! -/docs/reference/kubectl/kubectl/kubectl_*.md /docs/reference/generated/kubectl/kubectl-commands#:splat 301 +/docs/reference/kubectl/kubectl/kubectl_* /docs/reference/generated/kubectl/kubectl-commands#:splat 301 /docs/reference/scheduling/profiles/ /docs/reference/scheduling/config/#profiles 301 /docs/reference/generated/kubernetes-api/v1.15/ https://v1-15.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.15/ 301 @@ -263,7 +269,7 @@ /docs/tasks/administer-cluster/overview/ /docs/concepts/cluster-administration/ 301 /docs/tasks/administer-cluster/quota-memory-cpu-namespace/ /docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace/ 301 /docs/tasks/administer-cluster/quota-pod-namespace/ /docs/tasks/administer-cluster/manage-resources/quota-pod-namespace/ 301 -/docs/tasks/administer-cluster/reserve-compute-resources/out-of-resource.md /docs/tasks/administer-cluster/out-of-resource/ 301 +/docs/tasks/administer-cluster/reserve-compute-resources/out-of-resource.md /docs/concepts/scheduling-eviction/node-pressure-eviction/ 301 /docs/tasks/administer-cluster/out-of-resource/ /docs/concepts/scheduling-eviction/node-pressure-eviction/ 301 /docs/tasks/administer-cluster/romana-network-policy/ /docs/tasks/administer-cluster/network-policy-provider/romana-network-policy/ 301 /docs/tasks/administer-cluster/running-cloud-controller.md /docs/tasks/administer-cluster/running-cloud-controller/ 301 @@ -290,6 +296,7 @@ /docs/tasks/configure-pod-container/weave-network-policy/ /docs/tasks/administer-cluster/weave-network-policy/ 301 /docs/tasks/debug-application-cluster/sematext-logging-monitoring/ https://sematext.com/kubernetes/ 301 /docs/tasks/job/work-queue-1/ /docs/concepts/workloads/controllers/job/ 301 +/id/docs/tasks/job/work-queue-1/ /id/docs/concepts/workloads/controllers/job/ 301 /docs/tasks/setup-konnectivity/setup-konnectivity/ /docs/tasks/extend-kubernetes/setup-konnectivity/ 301 /docs/tasks/kubectl/get-shell-running-container/ /docs/tasks/debug-application-cluster/get-shell-running-container/ 301 /docs/tasks/kubectl/install/ /docs/tasks/tools/ 301 @@ -383,6 +390,7 @@ /docs/user-guide/introspection-and-debugging/ /docs/tasks/debug-application-cluster/debug-application-introspection/ 301 /docs/user-guide/jsonpath/ /docs/reference/kubectl/jsonpath/ /docs/user-guide/jobs/ /docs/concepts/workloads/controllers/job/ 301 +/id/docs/user-guide/jobs/ /id/docs/concepts/workloads/controllers/job/ 301 /docs/user-guide/jobs/expansions/ /docs/tasks/job/parallel-processing-expansion/ 301 /docs/user-guide/jobs/work-queue-1/ /docs/tasks/job/coarse-parallel-processing-work-queue/ 301 /docs/user-guide/jobs/work-queue-2/ /docs/tasks/job/fine-parallel-processing-work-queue/ 301 @@ -392,7 +400,7 @@ /docs/user-guide/kubectl-conventions/ /docs/reference/kubectl/conventions/ /docs/user-guide/kubectl-cheatsheet/ /docs/reference/kubectl/cheatsheet/ /cheatsheet /docs/reference/kubectl/cheatsheet/ 302 -/docs/user-guide/kubectl/kubectl_*/ /docs/reference/generated/kubectl/kubectl-commands#:splat 301 +/docs/user-guide/kubectl/kubectl_* /docs/reference/generated/kubectl/kubectl-commands#:splat 301 /docs/user-guide/labels/ /docs/concepts/overview/working-with-objects/labels/ 301 /docs/user-guide/liveness/ /docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ 301 /docs/user-guide/load-balancer/ /docs/tasks/access-application-cluster/create-external-load-balancer/ 301 diff --git a/static/docs/reference/generated/kubectl/kubectl-commands.html b/static/docs/reference/generated/kubectl/kubectl-commands.html index 6d1b082c61..06cfbabb1d 100644 --- a/static/docs/reference/generated/kubectl/kubectl-commands.html +++ b/static/docs/reference/generated/kubectl/kubectl-commands.html @@ -28,17 +28,17 @@ inspect them.


    create

    -

    Create a pod using the data in pod.json.

    +

    Create a pod using the data in pod.json

    kubectl create -f ./pod.json
     
    -

    Create a pod based on the JSON passed into stdin.

    +

    Create a pod based on the JSON passed into stdin

    cat pod.json | kubectl create -f -
     
    -

    Edit the data in docker-registry.yaml in JSON then create the resource using the edited data.

    +

    Edit the data in docker-registry.yaml in JSON then create the resource using the edited data

    kubectl create -f docker-registry.yaml --edit -o json
     
    @@ -158,36 +158,36 @@ inspect them.


    clusterrole

    -

    Create a ClusterRole named "pod-reader" that allows user to perform "get", "watch" and "list" on pods

    +

    Create a cluster role named "pod-reader" that allows user to perform "get", "watch" and "list" on pods

    kubectl create clusterrole pod-reader --verb=get,list,watch --resource=pods
     
    -

    Create a ClusterRole named "pod-reader" with ResourceName specified

    +

    Create a cluster role named "pod-reader" with ResourceName specified

    kubectl create clusterrole pod-reader --verb=get --resource=pods --resource-name=readablepod --resource-name=anotherpod
     
    -

    Create a ClusterRole named "foo" with API Group specified

    +

    Create a cluster role named "foo" with API Group specified

    kubectl create clusterrole foo --verb=get,list,watch --resource=rs.extensions
     
    -

    Create a ClusterRole named "foo" with SubResource specified

    +

    Create a cluster role named "foo" with SubResource specified

    kubectl create clusterrole foo --verb=get,list,watch --resource=pods,pods/status
     
    -

    Create a ClusterRole name "foo" with NonResourceURL specified

    +

    Create a cluster role name "foo" with NonResourceURL specified

    kubectl create clusterrole "foo" --verb=get --non-resource-url=/logs/*
     
    -

    Create a ClusterRole name "monitoring" with AggregationRule specified

    +

    Create a cluster role name "monitoring" with AggregationRule specified

    kubectl create clusterrole monitoring --aggregation-rule="rbac.example.com/aggregate-to-monitoring=true"
     
    -

    Create a ClusterRole.

    +

    Create a cluster role.

    Usage

    $ kubectl create clusterrole NAME --verb=verb --resource=resource.group [--resource-name=resourcename] [--dry-run=server|client|none]

    Flags

    @@ -284,11 +284,11 @@ inspect them.


    clusterrolebinding

    -

    Create a ClusterRoleBinding for user1, user2, and group1 using the cluster-admin ClusterRole

    +

    Create a cluster role binding for user1, user2, and group1 using the cluster-admin cluster role

    kubectl create clusterrolebinding cluster-admin --clusterrole=cluster-admin --user=user1 --user=user2 --group=group1
     
    -

    Create a ClusterRoleBinding for a particular ClusterRole.

    +

    Create a cluster role binding for a particular cluster role.

    Usage

    $ kubectl create clusterrolebinding NAME --clusterrole=NAME [--user=username] [--group=groupname] [--serviceaccount=namespace:serviceaccountname] [--dry-run=server|client|none]

    Flags

    @@ -373,34 +373,34 @@ inspect them.


    configmap

    -

    Create a new configmap named my-config based on folder bar

    +

    Create a new config map named my-config based on folder bar

    kubectl create configmap my-config --from-file=path/to/bar
     
    -

    Create a new configmap named my-config with specified keys instead of file basenames on disk

    +

    Create a new config map named my-config with specified keys instead of file basenames on disk

    kubectl create configmap my-config --from-file=key1=/path/to/bar/file1.txt --from-file=key2=/path/to/bar/file2.txt
     
    -

    Create a new configmap named my-config with key1=config1 and key2=config2

    +

    Create a new config map named my-config with key1=config1 and key2=config2

    kubectl create configmap my-config --from-literal=key1=config1 --from-literal=key2=config2
     
    -

    Create a new configmap named my-config from the key=value pairs in the file

    +

    Create a new config map named my-config from the key=value pairs in the file

    kubectl create configmap my-config --from-file=path/to/bar
     
    -

    Create a new configmap named my-config from an env file

    +

    Create a new config map named my-config from an env file

    kubectl create configmap my-config --from-env-file=path/to/bar.env
     
    -

    Create a configmap based on a file, directory, or specified literal value.

    -

    A single configmap may package one or more key/value pairs.

    -

    When creating a configmap based on a file, the key will default to the basename of the file, and the value will default to the file content. If the basename is an invalid key, you may specify an alternate key.

    -

    When creating a configmap based on a directory, each file whose basename is a valid key in the directory will be packaged into the configmap. Any directory entries except regular files are ignored (e.g. subdirectories, symlinks, devices, pipes, etc).

    +

    Create a config map based on a file, directory, or specified literal value.

    +

    A single config map may package one or more key/value pairs.

    +

    When creating a config map based on a file, the key will default to the basename of the file, and the value will default to the file content. If the basename is an invalid key, you may specify an alternate key.

    +

    When creating a config map based on a directory, each file whose basename is a valid key in the directory will be packaged into the config map. Any directory entries except regular files are ignored (e.g. subdirectories, symlinks, devices, pipes, etc).

    Usage

    $ kubectl create configmap NAME [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run=server|client|none]

    Flags

    @@ -491,16 +491,16 @@ inspect them.


    cronjob

    -

    Create a cronjob

    +

    Create a cron job

    kubectl create cronjob my-job --image=busybox --schedule="*/1 * * * *"
     
    -

    Create a cronjob with command

    +

    Create a cron job with a command

    kubectl create cronjob my-job --image=busybox --schedule="*/1 * * * *" -- date
     
    -

    Create a cronjob with the specified name.

    +

    Create a cron job with the specified name.

    Usage

    $ kubectl create cronjob NAME --image=image --schedule='0/5 * * * ?' -- [COMMAND] [args...]

    Flags

    @@ -585,22 +585,22 @@ inspect them.


    deployment

    -

    Create a deployment named my-dep that runs the busybox image.

    +

    Create a deployment named my-dep that runs the busybox image

    kubectl create deployment my-dep --image=busybox
     
    -

    Create a deployment with command

    +

    Create a deployment with a command

    kubectl create deployment my-dep --image=busybox -- date
     
    -

    Create a deployment named my-dep that runs the nginx image with 3 replicas.

    +

    Create a deployment named my-dep that runs the nginx image with 3 replicas

    kubectl create deployment my-dep --image=nginx --replicas=3
     
    -

    Create a deployment named my-dep that runs the busybox image and expose port 5701.

    +

    Create a deployment named my-dep that runs the busybox image and expose port 5701

    kubectl create deployment my-dep --image=busybox --port=5701
     
    @@ -637,12 +637,6 @@ inspect them.

    Name of the manager used to track field ownership. -generator - - -The name of the API generator to use. - - image [] @@ -841,12 +835,12 @@ inspect them.

    kubectl create job my-job --image=busybox
     
    -

    Create a job with command

    +

    Create a job with a command

    kubectl create job my-job --image=busybox -- date
     
    -

    Create a job from a CronJob named "a-cronjob"

    +

    Create a job from a cron job named "a-cronjob"

    kubectl create job test-job --from=cronjob/a-cronjob
     
    @@ -1000,16 +994,16 @@ inspect them.


    poddisruptionbudget

    -

    Create a pod disruption budget named my-pdb that will select all pods with the app=rails label # and require at least one of them being available at any point in time.

    +

    Create a pod disruption budget named my-pdb that will select all pods with the app=rails label # and require at least one of them being available at any point in time

    kubectl create poddisruptionbudget my-pdb --selector=app=rails --min-available=1
     
    -

    Create a pod disruption budget named my-pdb that will select all pods with the app=nginx label # and require at least half of the pods selected to be available at any point in time.

    +

    Create a pod disruption budget named my-pdb that will select all pods with the app=nginx label # and require at least half of the pods selected to be available at any point in time

    kubectl create pdb my-pdb --selector=app=nginx --min-available=50%
     
    -

    Create a pod disruption budget with the specified name, selector, and desired minimum available pods

    +

    Create a pod disruption budget with the specified name, selector, and desired minimum available pods.

    Usage

    $ kubectl create poddisruptionbudget NAME --selector=SELECTOR --min-available=N [--dry-run=server|client|none]

    Flags

    @@ -1094,21 +1088,21 @@ inspect them.


    priorityclass

    -

    Create a priorityclass named high-priority

    +

    Create a priority class named high-priority

    kubectl create priorityclass high-priority --value=1000 --description="high priority"
     
    -

    Create a priorityclass named default-priority that considered as the global default priority

    +

    Create a priority class named default-priority that is considered as the global default priority

    kubectl create priorityclass default-priority --value=1000 --global-default=true --description="default priority"
     
    -

    Create a priorityclass named high-priority that can not preempt pods with lower priority

    +

    Create a priority class named high-priority that cannot preempt pods with lower priority

    kubectl create priorityclass high-priority --value=1000 --description="high priority" --preemption-policy="Never"
     
    -

    Create a priorityclass with the specified name, value, globalDefault and description

    +

    Create a priority class with the specified name, value, globalDefault and description.

    Usage

    $ kubectl create priorityclass NAME --value=VALUE --global-default=BOOL [--dry-run=server|client|none]

    Flags

    @@ -1199,16 +1193,16 @@ inspect them.


    quota

    -

    Create a new resourcequota named my-quota

    +

    Create a new resource quota named my-quota

    kubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3,replicationcontrollers=2,resourcequotas=1,secrets=5,persistentvolumeclaims=10
     
    -

    Create a new resourcequota named best-effort

    +

    Create a new resource quota named best-effort

    kubectl create quota best-effort --hard=pods=100 --scopes=BestEffort
     
    -

    Create a resourcequota with the specified name, hard limits and optional scopes

    +

    Create a resource quota with the specified name, hard limits, and optional scopes.

    Usage

    $ kubectl create quota NAME [--hard=key1=value1,key2=value2] [--scopes=Scope1,Scope2] [--dry-run=server|client|none]

    Flags

    @@ -1287,22 +1281,22 @@ inspect them.


    role

    -

    Create a Role named "pod-reader" that allows user to perform "get", "watch" and "list" on pods

    +

    Create a role named "pod-reader" that allows user to perform "get", "watch" and "list" on pods

    kubectl create role pod-reader --verb=get --verb=list --verb=watch --resource=pods
     
    -

    Create a Role named "pod-reader" with ResourceName specified

    +

    Create a role named "pod-reader" with ResourceName specified

    kubectl create role pod-reader --verb=get --resource=pods --resource-name=readablepod --resource-name=anotherpod
     
    -

    Create a Role named "foo" with API Group specified

    +

    Create a role named "foo" with API Group specified

    kubectl create role foo --verb=get,list,watch --resource=rs.extensions
     
    -

    Create a Role named "foo" with SubResource specified

    +

    Create a role named "foo" with SubResource specified

    kubectl create role foo --verb=get,list,watch --resource=pods,pods/status
     
    @@ -1391,11 +1385,11 @@ inspect them.


    rolebinding

    -

    Create a RoleBinding for user1, user2, and group1 using the admin ClusterRole

    +

    Create a role binding for user1, user2, and group1 using the admin cluster role

    kubectl create rolebinding admin --clusterrole=admin --user=user1 --user=user2 --group=group1
     
    -

    Create a RoleBinding for a particular Role or ClusterRole.

    +

    Create a role binding for a particular role or cluster role.

    Usage

    $ kubectl create rolebinding NAME --clusterrole=NAME|--role=NAME [--user=username] [--group=groupname] [--serviceaccount=namespace:serviceaccountname] [--dry-run=server|client|none]

    Flags

    @@ -1506,7 +1500,7 @@ inspect them.

    '$ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.

    That produces a ~/.dockercfg file that is used by subsequent 'docker push' and 'docker pull' commands to authenticate to the registry. The email address is optional.

    When creating applications, you may have a Docker registry that requires authentication. In order for the - nodes to pull images on your behalf, they have to have the credentials. You can provide this information + nodes to pull images on your behalf, they must have the credentials. You can provide this information by creating a dockercfg secret and attaching it to your service account.

    Usage

    $ kubectl create docker-registry NAME --docker-username=user --docker-password=password --docker-email=email [--docker-server=string] [--from-file=[key=]source] [--dry-run=server|client|none]

    @@ -1734,12 +1728,12 @@ inspect them.


    secret tls

    -

    Create a new TLS secret named tls-secret with the given key pair:

    +

    Create a new TLS secret named tls-secret with the given key pair

    kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/to/tls.key
     

    Create a TLS secret from the given public/private key pair.

    -

    The public/private key pair must exist before hand. The public key certificate must be .PEM encoded and match the given private key.

    +

    The public/private key pair must exist beforehand. The public key certificate must be .PEM encoded and match the given private key.

    Usage

    $ kubectl create tls NAME --cert=path/to/cert/file --key=path/to/key/file [--dry-run=server|client|none]

    Flags

    @@ -1823,7 +1817,7 @@ inspect them.


    service

    -

    Create a service using specified subcommand.

    +

    Create a service using a specified subcommand.

    Usage

    $ kubectl create service


    @@ -2232,67 +2226,67 @@ inspect them.


    get

    -

    List all pods in ps output format.

    +

    List all pods in ps output format

    kubectl get pods
     
    -

    List all pods in ps output format with more information (such as node name).

    +

    List all pods in ps output format with more information (such as node name)

    kubectl get pods -o wide
     
    -

    List a single replication controller with specified NAME in ps output format.

    +

    List a single replication controller with specified NAME in ps output format

    kubectl get replicationcontroller web
     
    -

    List deployments in JSON output format, in the "v1" version of the "apps" API group:

    +

    List deployments in JSON output format, in the "v1" version of the "apps" API group

    kubectl get deployments.v1.apps -o json
     
    -

    List a single pod in JSON output format.

    +

    List a single pod in JSON output format

    kubectl get -o json pod web-pod-13je7
     
    -

    List a pod identified by type and name specified in "pod.yaml" in JSON output format.

    +

    List a pod identified by type and name specified in "pod.yaml" in JSON output format

    kubectl get -f pod.yaml -o json
     
    -

    List resources from a directory with kustomization.yaml - e.g. dir/kustomization.yaml.

    +

    List resources from a directory with kustomization.yaml - e.g. dir/kustomization.yaml

    kubectl get -k dir/
     
    -

    Return only the phase value of the specified pod.

    +

    Return only the phase value of the specified pod

    kubectl get -o template pod/web-pod-13je7 --template={{.status.phase}}
     
    -

    List resource information in custom columns.

    +

    List resource information in custom columns

    kubectl get pod test-pod -o custom-columns=CONTAINER:.spec.containers[0].name,IMAGE:.spec.containers[0].image
     
    -

    List all replication controllers and services together in ps output format.

    +

    List all replication controllers and services together in ps output format

    kubectl get rc,services
     
    -

    List one or more resources by their type and names.

    +

    List one or more resources by their type and names

    kubectl get rc/web service/frontend pods/web-pod-13je7
     
    -

    Display one or many resources

    +

    Display one or many resources.

    Prints a table of the most important information about the specified resources. You can filter the list using a label selector and the --selector flag. If the desired resource type is namespaced you will only see results in your current namespace unless you pass --all-namespaces.

    Uninitialized objects are not shown unless --include-uninitialized is passed.

    By specifying the output as 'template' and providing a Go template as the value of the --template flag, you can filter the attributes of the fetched resources.

    Use "kubectl api-resources" for a complete list of supported resources.

    Usage

    -

    $ kubectl get [(-o|--output=)json|yaml|wide|custom-columns=...|custom-columns-file=...|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=...] (TYPE[.VERSION][.GROUP] [NAME | -l label] | TYPE[.VERSION][.GROUP]/NAME ...) [flags]

    +

    $ kubectl get [(-o|--output=)json|yaml|name|go-template|go-template-file|template|templatefile|jsonpath|jsonpath-as-json|jsonpath-file|custom-columns-file|custom-columns|wide] (TYPE[.VERSION][.GROUP] [NAME | -l label] | TYPE[.VERSION][.GROUP]/NAME ...) [flags]

    Flags

    @@ -2362,7 +2356,7 @@ inspect them.

    - + @@ -2447,47 +2441,47 @@ inspect them.


    run

    -

    Start a nginx pod.

    +

    Start a nginx pod

    kubectl run nginx --image=nginx
     
    -

    Start a hazelcast pod and let the container expose port 5701.

    +

    Start a hazelcast pod and let the container expose port 5701

    kubectl run hazelcast --image=hazelcast/hazelcast --port=5701
     
    -

    Start a hazelcast pod and set environment variables "DNS_DOMAIN=cluster" and "POD_NAMESPACE=default" in the container.

    +

    Start a hazelcast pod and set environment variables "DNS_DOMAIN=cluster" and "POD_NAMESPACE=default" in the container

    kubectl run hazelcast --image=hazelcast/hazelcast --env="DNS_DOMAIN=cluster" --env="POD_NAMESPACE=default"
     
    -

    Start a hazelcast pod and set labels "app=hazelcast" and "env=prod" in the container.

    +

    Start a hazelcast pod and set labels "app=hazelcast" and "env=prod" in the container

    kubectl run hazelcast --image=hazelcast/hazelcast --labels="app=hazelcast,env=prod"
     
    -

    Dry run. Print the corresponding API objects without creating them.

    +

    Dry run; print the corresponding API objects without creating them

    kubectl run nginx --image=nginx --dry-run=client
     
    -

    Start a nginx pod, but overload the spec with a partial set of values parsed from JSON.

    +

    Start a nginx pod, but overload the spec with a partial set of values parsed from JSON

    kubectl run nginx --image=nginx --overrides='{ "apiVersion": "v1", "spec": { ... } }'
     
    -

    Start a busybox pod and keep it in the foreground, don't restart it if it exits.

    +

    Start a busybox pod and keep it in the foreground, don't restart it if it exits

    kubectl run -i -t busybox --image=busybox --restart=Never
     
    -

    Start the nginx pod using the default command, but use custom arguments (arg1 .. argN) for that command.

    +

    Start the nginx pod using the default command, but use custom arguments (arg1 .. argN) for that command

    kubectl run nginx --image=nginx -- <arg1> <arg2> ... <argN>
     
    -

    Start the nginx pod using a different command and custom arguments.

    +

    Start the nginx pod using a different command and custom arguments

    kubectl run nginx --image=nginx --command -- <cmd> <arg1> ... <argN>
     
    @@ -2738,12 +2732,12 @@ inspect them.


    expose

    -

    Create a service for a replicated nginx, which serves on port 80 and connects to the containers on port 8000.

    +

    Create a service for a replicated nginx, which serves on port 80 and connects to the containers on port 8000

    kubectl expose rc nginx --port=80 --target-port=8000
     
    -

    Create a service for a replication controller identified by type and name specified in "nginx-controller.yaml", which serves on port 80 and connects to the containers on port 8000.

    +

    Create a service for a replication controller identified by type and name specified in "nginx-controller.yaml", which serves on port 80 and connects to the containers on port 8000

    kubectl expose -f nginx-controller.yaml --port=80 --target-port=8000
     
    @@ -2763,12 +2757,12 @@ inspect them.

    kubectl expose rc streamer --port=4100 --protocol=UDP --name=video-stream
     
    -

    Create a service for a replicated nginx using replica set, which serves on port 80 and connects to the containers on port 8000.

    +

    Create a service for a replicated nginx using replica set, which serves on port 80 and connects to the containers on port 8000

    kubectl expose rs nginx --port=80 --target-port=8000
     
    -

    Create a service for an nginx deployment, which serves on port 80 and connects to the containers on port 8000.

    +

    Create a service for an nginx deployment, which serves on port 80 and connects to the containers on port 8000

    kubectl expose deployment nginx --port=80 --target-port=8000
     
    @@ -2944,17 +2938,17 @@ inspect them.


    delete

    -

    Delete a pod using the type and name specified in pod.json.

    +

    Delete a pod using the type and name specified in pod.json

    kubectl delete -f ./pod.json
     
    -

    Delete resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml.

    +

    Delete resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml

    kubectl delete -k dir
     
    -

    Delete a pod based on the type and name in the JSON passed into stdin.

    +

    Delete a pod based on the type and name in the JSON passed into stdin

    cat pod.json | kubectl delete -f -
     
    @@ -2964,7 +2958,7 @@ inspect them.

    kubectl delete pod,service baz foo
     
    -

    Delete pods and services with label name=myLabel.

    +

    Delete pods and services with label name=myLabel

    kubectl delete pods,services -l name=myLabel
     
    @@ -2983,10 +2977,10 @@ inspect them.

    kubectl delete pods --all
     
    -

    Delete resources by filenames, stdin, resources and names, or by resources and label selector.

    -

    JSON and YAML formats are accepted. Only one type of the arguments may be specified: filenames, resources and names, or resources and label selector.

    -

    Some resources, such as pods, support graceful deletion. These resources define a default period before they are forcibly terminated (the grace period) but you may override that value with the --grace-period flag, or pass --now to set a grace-period of 1. Because these resources often represent entities in the cluster, deletion may not be acknowledged immediately. If the node hosting a pod is down or cannot reach the API server, termination may take significantly longer than the grace period. To force delete a resource, you must specify the --force flag. Note: only a subset of resources support graceful deletion. In absence of the support, --grace-period is ignored.

    -

    IMPORTANT: Force deleting pods does not wait for confirmation that the pod's processes have been terminated, which can leave those processes running until the node detects the deletion and completes graceful deletion. If your processes use shared storage or talk to a remote API and depend on the name of the pod to identify themselves, force deleting those pods may result in multiple processes running on different machines using the same identification which may lead to data corruption or inconsistency. Only force delete pods when you are sure the pod is terminated, or if your application can tolerate multiple copies of the same pod running at once. Also, if you force delete pods the scheduler may place new pods on those nodes before the node has released those resources and causing those pods to be evicted immediately.

    +

    Delete resources by file names, stdin, resources and names, or by resources and label selector.

    +

    JSON and YAML formats are accepted. Only one type of argument may be specified: file names, resources and names, or resources and label selector.

    +

    Some resources, such as pods, support graceful deletion. These resources define a default period before they are forcibly terminated (the grace period) but you may override that value with the --grace-period flag, or pass --now to set a grace-period of 1. Because these resources often represent entities in the cluster, deletion may not be acknowledged immediately. If the node hosting a pod is down or cannot reach the API server, termination may take significantly longer than the grace period. To force delete a resource, you must specify the --force flag. Note: only a subset of resources support graceful deletion. In absence of the support, the --grace-period flag is ignored.

    +

    IMPORTANT: Force deleting pods does not wait for confirmation that the pod's processes have been terminated, which can leave those processes running until the node detects the deletion and completes graceful deletion. If your processes use shared storage or talk to a remote API and depend on the name of the pod to identify themselves, force deleting those pods may result in multiple processes running on different machines using the same identification which may lead to data corruption or inconsistency. Only force delete pods when you are sure the pod is terminated, or if your application can tolerate multiple copies of the same pod running at once. Also, if you force delete pods, the scheduler may place new pods on those nodes before the node has released those resources and causing those pods to be evicted immediately.

    Note that the delete command does NOT do resource version checks, so if someone submits an update to a resource right when you submit a delete, their update will be lost along with the rest of the resource.

    Usage

    $ kubectl delete ([-f FILENAME] | [-k DIRECTORY] | TYPE [(NAME | -l label | --all)])

    @@ -3111,31 +3105,31 @@ viewing your workloads in a Kubernetes cluster.


    apply

    -

    Apply the configuration in pod.json to a pod.

    +

    Apply the configuration in pod.json to a pod

    kubectl apply -f ./pod.json
     
    -

    Apply resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml.

    +

    Apply resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml

    kubectl apply -k dir/
     
    -

    Apply the JSON passed into stdin to a pod.

    +

    Apply the JSON passed into stdin to a pod

    cat pod.json | kubectl apply -f -
     
    -

    Note: --prune is still in Alpha # Apply the configuration in manifest.yaml that matches label app=nginx and delete all the other resources that are not in the file and match label app=nginx.

    +

    Note: --prune is still in Alpha # Apply the configuration in manifest.yaml that matches label app=nginx and delete all other resources that are not in the file and match label app=nginx

    kubectl apply --prune -f manifest.yaml -l app=nginx
     
    -

    Apply the configuration in manifest.yaml and delete all the other configmaps that are not in the file.

    +

    Apply the configuration in manifest.yaml and delete all the other config maps that are not in the file

    kubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/ConfigMap
     
    -

    Apply a configuration to a resource by filename or stdin. The resource name must be specified. This resource will be created if it doesn't exist yet. To use 'apply', always create the resource initially with either 'apply' or 'create --save-config'.

    +

    Apply a configuration to a resource by file name or stdin. The resource name must be specified. This resource will be created if it doesn't exist yet. To use 'apply', always create the resource initially with either 'apply' or 'create --save-config'.

    JSON and YAML formats are accepted.

    Alpha Disclaimer: the --prune functionality is not yet complete. Do not use unless you are aware of what the current state is. See https://issues.k8s.io/34274.

    Usage

    @@ -3300,17 +3294,17 @@ viewing your workloads in a Kubernetes cluster.


    edit-last-applied

    -

    Edit the last-applied-configuration annotations by type/name in YAML.

    +

    Edit the last-applied-configuration annotations by type/name in YAML

    kubectl apply edit-last-applied deployment/nginx
     
    -

    Edit the last-applied-configuration annotations by file in JSON.

    +

    Edit the last-applied-configuration annotations by file in JSON

    kubectl apply edit-last-applied -f deploy.yaml -o json
     

    Edit the latest last-applied-configuration annotations of resources from the default editor.

    -

    The edit-last-applied command allows you to directly edit any API resource you can retrieve via the command line tools. It will open the editor defined by your KUBE_EDITOR, or EDITOR environment variables, or fall back to 'vi' for Linux or 'notepad' for Windows. You can edit multiple objects, although changes are applied one at a time. The command accepts filenames as well as command line arguments, although the files you point to must be previously saved versions of resources.

    +

    The edit-last-applied command allows you to directly edit any API resource you can retrieve via the command-line tools. It will open the editor defined by your KUBE_EDITOR, or EDITOR environment variables, or fall back to 'vi' for Linux or 'notepad' for Windows. You can edit multiple objects, although changes are applied one at a time. The command accepts file names as well as command-line arguments, although the files you point to must be previously saved versions of resources.

    The default format is YAML. To edit in JSON, specify "-o json".

    The flag --windows-line-endings can be used to force Windows line endings, otherwise the default for your operating system will be used.

    In the event an error occurs while updating, a temporary file will be created on disk that contains your unapplied changes. The most common error when updating a resource is another editor changing the resource on the server. When this occurs, you will have to apply your changes to the newer version of the resource, or update your temporary saved copy to include the latest resource version.

    @@ -3392,17 +3386,17 @@ viewing your workloads in a Kubernetes cluster.


    set-last-applied

    -

    Set the last-applied-configuration of a resource to match the contents of a file.

    +

    Set the last-applied-configuration of a resource to match the contents of a file

    kubectl apply set-last-applied -f deploy.yaml
     
    -

    Execute set-last-applied against each configuration file in a directory.

    +

    Execute set-last-applied against each configuration file in a directory

    kubectl apply set-last-applied -f path/
     
    -

    Set the last-applied-configuration of a resource to match the contents of a file, will create the annotation if it does not already exist.

    +

    Set the last-applied-configuration of a resource to match the contents of a file; will create the annotation if it does not already exist

    kubectl apply set-last-applied -f deploy.yaml --create-annotation=true
     
    @@ -3467,7 +3461,7 @@ viewing your workloads in a Kubernetes cluster.


    view-last-applied

    -

    View the last-applied-configuration annotations by type/name in YAML.

    +

    View the last-applied-configuration annotations by type/name in YAML

    kubectl apply view-last-applied deployment/nginx
     
    @@ -3477,7 +3471,7 @@ viewing your workloads in a Kubernetes cluster.

    kubectl apply view-last-applied -f deploy.yaml -o json
     

    View the latest last-applied-configuration annotations by type/name or file.

    -

    The default output will be printed to stdout in YAML format. One can use -o option to change output format.

    +

    The default output will be printed to stdout in YAML format. You can use the -o option to change the output format.

    Usage

    $ kubectl apply view-last-applied (TYPE [NAME | -l label] | TYPE/NAME | -f FILENAME)

    Flags

    @@ -3532,7 +3526,7 @@ viewing your workloads in a Kubernetes cluster.


    annotate

    -

    Update pod 'foo' with the annotation 'description' and the value 'my frontend'. # If the same annotation is set multiple times, only the last value will be applied

    +

    Update pod 'foo' with the annotation 'description' and the value 'my frontend' # If the same annotation is set multiple times, only the last value will be applied

    kubectl annotate pods foo description='my frontend'
     
    @@ -3542,7 +3536,7 @@ viewing your workloads in a Kubernetes cluster.

    kubectl annotate -f pod.json description='my frontend'
     
    -

    Update pod 'foo' with the annotation 'description' and the value 'my frontend running nginx', overwriting any existing value.

    +

    Update pod 'foo' with the annotation 'description' and the value 'my frontend running nginx', overwriting any existing value

    kubectl annotate --overwrite pods foo description='my frontend running nginx'
     
    @@ -3552,16 +3546,16 @@ viewing your workloads in a Kubernetes cluster.

    kubectl annotate pods --all description='my frontend running nginx'
     
    -

    Update pod 'foo' only if the resource is unchanged from version 1.

    +

    Update pod 'foo' only if the resource is unchanged from version 1

    kubectl annotate pods foo description='my frontend running nginx' --resource-version=1
     
    -

    Update pod 'foo' by removing an annotation named 'description' if it exists. # Does not require the --overwrite flag.

    +

    Update pod 'foo' by removing an annotation named 'description' if it exists # Does not require the --overwrite flag

    kubectl annotate pods foo description-
     
    -

    Update the annotations on one or more resources

    +

    Update the annotations on one or more resources.

    All Kubernetes objects support the ability to store additional data with the object as annotations. Annotations are key/value pairs that can be larger than labels and include arbitrary string values such as structured JSON. Tools and system extensions may use annotations to store their own data.

    Attempting to set an annotation that already exists will fail unless --overwrite is set. If --resource-version is specified and does not match the current resource version on the server the command will fail.

    Use "kubectl api-resources" for a complete list of supported resources.

    @@ -3585,6 +3579,12 @@ viewing your workloads in a Kubernetes cluster.

    + + + + + + @@ -3685,17 +3685,17 @@ viewing your workloads in a Kubernetes cluster.


    autoscale

    -

    Auto scale a deployment "foo", with the number of pods between 2 and 10, no target CPU utilization specified so a default autoscaling policy will be used:

    +

    Auto scale a deployment "foo", with the number of pods between 2 and 10, no target CPU utilization specified so a default autoscaling policy will be used

    kubectl autoscale deployment foo --min=2 --max=10
     
    -

    Auto scale a replication controller "foo", with the number of pods between 1 and 5, target CPU utilization at 80%:

    +

    Auto scale a replication controller "foo", with the number of pods between 1 and 5, target CPU utilization at 80%

    kubectl autoscale rc foo --max=5 --cpu-percent=80
     
    -

    Creates an autoscaler that automatically chooses and sets the number of pods that run in a kubernetes cluster.

    -

    Looks up a Deployment, ReplicaSet, StatefulSet, or ReplicationController by name and creates an autoscaler that uses the given resource as a reference. An autoscaler can automatically increase or decrease number of pods deployed within the system as needed.

    +

    Creates an autoscaler that automatically chooses and sets the number of pods that run in a Kubernetes cluster.

    +

    Looks up a deployment, replica set, stateful set, or replication controller by name and creates an autoscaler that uses the given resource as a reference. An autoscaler can automatically increase or decrease number of pods deployed within the system as needed.

    Usage

    $ kubectl autoscale (-f FILENAME | TYPE NAME | TYPE/NAME) [--min=MINPODS] --max=MAXPODS [--cpu-percent=CPU]

    Flags

    @@ -3740,12 +3740,6 @@ viewing your workloads in a Kubernetes cluster.

    - - - - - - @@ -3960,7 +3954,7 @@ viewing your workloads in a Kubernetes cluster.


    diff

    -

    Diff resources included in pod.json.

    +

    Diff resources included in pod.json

    kubectl diff -f pod.json
     
    @@ -3969,10 +3963,10 @@ viewing your workloads in a Kubernetes cluster.

    cat service.yaml | kubectl diff -f -
     
    -

    Diff configurations specified by filename or stdin between the current online configuration, and the configuration as it would be if applied.

    -

    Output is always YAML.

    +

    Diff configurations specified by file name or stdin between the current online configuration, and the configuration as it would be if applied.

    +

    The output is always YAML.

    KUBECTL_EXTERNAL_DIFF environment variable can be used to select your own diff command. Users can use external commands with params too, example: KUBECTL_EXTERNAL_DIFF="colordiff -N -u"

    -

    By default, the "diff" command available in your path will be run with "-u" (unified diff) and "-N" (treat absent files as empty) options.

    +

    By default, the "diff" command available in your path will be run with the "-u" (unified diff) and "-N" (treat absent files as empty) options.

    Exit status: 0 No differences were found. 1 Differences were found. >1 Kubectl or diff failed with an error.

    Note: KUBECTL_EXTERNAL_DIFF, if used, is expected to follow that convention.

    Usage

    @@ -4035,7 +4029,7 @@ viewing your workloads in a Kubernetes cluster.


    edit

    -

    Edit the service named 'docker-registry':

    +

    Edit the service named 'docker-registry'

    kubectl edit svc/docker-registry
     
    @@ -4045,17 +4039,17 @@ viewing your workloads in a Kubernetes cluster.

    KUBE_EDITOR="nano" kubectl edit svc/docker-registry
     
    -

    Edit the job 'myjob' in JSON using the v1 API format:

    +

    Edit the job 'myjob' in JSON using the v1 API format

    kubectl edit job.v1.batch/myjob -o json
     
    -

    Edit the deployment 'mydeployment' in YAML and save the modified config in its annotation:

    +

    Edit the deployment 'mydeployment' in YAML and save the modified config in its annotation

    kubectl edit deployment/mydeployment -o yaml --save-config
     

    Edit a resource from the default editor.

    -

    The edit command allows you to directly edit any API resource you can retrieve via the command line tools. It will open the editor defined by your KUBE_EDITOR, or EDITOR environment variables, or fall back to 'vi' for Linux or 'notepad' for Windows. You can edit multiple objects, although changes are applied one at a time. The command accepts filenames as well as command line arguments, although the files you point to must be previously saved versions of resources.

    +

    The edit command allows you to directly edit any API resource you can retrieve via the command-line tools. It will open the editor defined by your KUBE_EDITOR, or EDITOR environment variables, or fall back to 'vi' for Linux or 'notepad' for Windows. You can edit multiple objects, although changes are applied one at a time. The command accepts file names as well as command-line arguments, although the files you point to must be previously saved versions of resources.

    Editing is done with the API version used to fetch the resource. To edit using a specific API version, fully-qualify the resource, version, and group.

    The default format is YAML. To edit in JSON, specify "-o json".

    The flag --windows-line-endings can be used to force Windows line endings, otherwise the default for your operating system will be used.

    @@ -4185,10 +4179,10 @@ viewing your workloads in a Kubernetes cluster.

    - + - + @@ -4197,6 +4191,12 @@ viewing your workloads in a Kubernetes cluster.

    + + + + + + @@ -4209,6 +4209,12 @@ viewing your workloads in a Kubernetes cluster.

    + + + + + + @@ -4249,12 +4255,12 @@ viewing your workloads in a Kubernetes cluster.


    label

    -

    Update pod 'foo' with the label 'unhealthy' and the value 'true'.

    +

    Update pod 'foo' with the label 'unhealthy' and the value 'true'

    kubectl label pods foo unhealthy=true
     
    -

    Update pod 'foo' with the label 'status' and the value 'unhealthy', overwriting any existing value.

    +

    Update pod 'foo' with the label 'status' and the value 'unhealthy', overwriting any existing value

    kubectl label --overwrite pods foo status=unhealthy
     
    @@ -4269,19 +4275,19 @@ viewing your workloads in a Kubernetes cluster.

    kubectl label -f pod.json status=unhealthy
     
    -

    Update pod 'foo' only if the resource is unchanged from version 1.

    +

    Update pod 'foo' only if the resource is unchanged from version 1

    kubectl label pods foo status=unhealthy --resource-version=1
     
    -

    Update pod 'foo' by removing a label named 'bar' if it exists. # Does not require the --overwrite flag.

    +

    Update pod 'foo' by removing a label named 'bar' if it exists # Does not require the --overwrite flag

    kubectl label pods foo bar-
     

    Update the labels on a resource.

    • A label key and value must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores, up to 63 characters each.
    • -
    • Optionally, the key can begin with a DNS subdomain prefix and a single '/', like example.com/my-app
    • +
    • Optionally, the key can begin with a DNS subdomain prefix and a single '/', like example.com/my-app.
    • If --overwrite is true, then existing labels can be overwritten, otherwise attempting to overwrite a label will result in an error.
    • If --resource-version is specified, then updates will use this resource version, otherwise the existing resource-version will be used.
    @@ -4305,6 +4311,12 @@ viewing your workloads in a Kubernetes cluster.

    + + + + + + @@ -4405,31 +4417,31 @@ viewing your workloads in a Kubernetes cluster.


    patch

    -

    Partially update a node using a strategic merge patch. Specify the patch as JSON.

    +

    Partially update a node using a strategic merge patch, specifying the patch as JSON

    kubectl patch node k8s-node-1 -p '{"spec":{"unschedulable":true}}'
     
    -

    Partially update a node using a strategic merge patch. Specify the patch as YAML.

    +

    Partially update a node using a strategic merge patch, specifying the patch as YAML

    kubectl patch node k8s-node-1 -p $'spec:\n unschedulable: true'
     
    -

    Partially update a node identified by the type and name specified in "node.json" using strategic merge patch.

    +

    Partially update a node identified by the type and name specified in "node.json" using strategic merge patch

    kubectl patch -f node.json -p '{"spec":{"unschedulable":true}}'
     
    -

    Update a container's image; spec.containers[*].name is required because it's a merge key.

    +

    Update a container's image; spec.containers[*].name is required because it's a merge key

    kubectl patch pod valid-pod -p '{"spec":{"containers":[{"name":"kubernetes-serve-hostname","image":"new image"}]}}'
     
    -

    Update a container's image using a json patch with positional arrays.

    +

    Update a container's image using a JSON patch with positional arrays

    kubectl patch pod valid-pod --type='json' -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"new image"}]'
     
    -

    Update field(s) of a resource using strategic merge patch, a JSON merge patch, or a JSON patch.

    +

    Update fields of a resource using strategic merge patch, a JSON merge patch, or a JSON patch.

    JSON and YAML formats are accepted.

    Usage

    $ kubectl patch (-f FILENAME | TYPE NAME) [-p PATCH|--patch-file FILE]

    @@ -4533,12 +4545,12 @@ viewing your workloads in a Kubernetes cluster.


    replace

    -

    Replace a pod using the data in pod.json.

    +

    Replace a pod using the data in pod.json

    kubectl replace -f ./pod.json
     
    -

    Replace a pod based on the JSON passed into stdin.

    +

    Replace a pod based on the JSON passed into stdin

    cat pod.json | kubectl replace -f -
     
    @@ -4552,7 +4564,7 @@ viewing your workloads in a Kubernetes cluster.

    kubectl replace --force -f ./pod.json
     
    -

    Replace a resource by filename or stdin.

    +

    Replace a resource by file name or stdin.

    JSON and YAML formats are accepted. If replacing an existing resource, the complete resource spec must be provided. This can be obtained by

    $ kubectl get TYPE NAME -o yaml

    Usage

    @@ -4772,11 +4784,11 @@ viewing your workloads in a Kubernetes cluster.


    pause

    -

    Mark the nginx deployment as paused. Any current state of # the deployment will continue its function, new updates to the deployment will not # have an effect as long as the deployment is paused.

    +

    Mark the nginx deployment as paused # Any current state of the deployment will continue its function; new updates # to the deployment will not have an effect as long as the deployment is paused

    kubectl rollout pause deployment/nginx
     
    -

    Mark the provided resource as paused

    +

    Mark the provided resource as paused.

    Paused resources will not be reconciled by a controller. Use "kubectl rollout resume" to resume a paused resource. Currently only deployments support being paused.

    Usage

    $ kubectl rollout pause RESOURCE

    @@ -4849,12 +4861,12 @@ viewing your workloads in a Kubernetes cluster.

    kubectl rollout restart deployment/nginx
     
    -

    Restart a daemonset

    +

    Restart a daemon set

    kubectl rollout restart daemonset/abc
     

    Restart a resource.

    -

    Resource will be rollout restarted.

    +

    Resource rollout will be restarted.

    Usage

    $ kubectl rollout restart RESOURCE

    Flags

    @@ -4925,7 +4937,7 @@ viewing your workloads in a Kubernetes cluster.

    kubectl rollout resume deployment/nginx
     
    -

    Resume a paused resource

    +

    Resume a paused resource.

    Paused resources will not be reconciled by a controller. By resuming a resource, we allow it to be reconciled again. Currently only deployments support being resumed.

    Usage

    $ kubectl rollout resume RESOURCE

    @@ -5053,21 +5065,21 @@ viewing your workloads in a Kubernetes cluster.


    undo

    -

    Rollback to the previous deployment

    +

    Roll back to the previous deployment

    kubectl rollout undo deployment/abc
     
    -

    Rollback to daemonset revision 3

    +

    Roll back to daemonset revision 3

    kubectl rollout undo daemonset/abc --to-revision=3
     
    -

    Rollback to the previous deployment with dry-run

    +

    Roll back to the previous deployment with dry-run

    kubectl rollout undo --dry-run=server deployment/abc
     
    -

    Rollback to a previous rollout.

    +

    Roll back to a previous rollout.

    Usage

    $ kubectl rollout undo (TYPE NAME | TYPE/NAME) [flags]

    Flags

    @@ -5140,31 +5152,31 @@ viewing your workloads in a Kubernetes cluster.


    scale

    -

    Scale a replicaset named 'foo' to 3.

    +

    Scale a replica set named 'foo' to 3

    kubectl scale --replicas=3 rs/foo
     
    -

    Scale a resource identified by type and name specified in "foo.yaml" to 3.

    +

    Scale a resource identified by type and name specified in "foo.yaml" to 3

    kubectl scale --replicas=3 -f foo.yaml
     
    -

    If the deployment named mysql's current size is 2, scale mysql to 3.

    +

    If the deployment named mysql's current size is 2, scale mysql to 3

    kubectl scale --current-replicas=2 --replicas=3 deployment/mysql
     
    -

    Scale multiple replication controllers.

    +

    Scale multiple replication controllers

    kubectl scale --replicas=5 rc/foo rc/bar rc/baz
     
    -

    Scale statefulset named 'web' to 3.

    +

    Scale stateful set named 'web' to 3

    kubectl scale --replicas=3 statefulset/web
     
    -

    Set a new size for a Deployment, ReplicaSet, Replication Controller, or StatefulSet.

    +

    Set a new size for a deployment, replica set, replication controller, or stateful set.

    Scale also allows users to specify one or more preconditions for the scale action.

    If --current-replicas or --resource-version is specified, it is validated before the scale is attempted, and it is guaranteed that the precondition holds true when the scale is sent to the server.

    Usage

    @@ -5196,7 +5208,7 @@ viewing your workloads in a Kubernetes cluster.

    - + @@ -5274,7 +5286,7 @@ viewing your workloads in a Kubernetes cluster.

    output o Output format. One of: json|yaml|wide|name|custom-columns=...|custom-columns-file=...|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See custom columns [http://kubernetes.io/docs/user-guide/kubectl-overview/#custom-columns], golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://kubernetes.io/docs/user-guide/jsonpath]. Output format. One of: json|yaml|name|go-template|go-template-file|template|templatefile|jsonpath|jsonpath-as-json|jsonpath-file|custom-columns-file|custom-columns|wide See custom columns [https://kubernetes.io/docs/reference/kubectl/overview/#custom-columns], golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [https://kubernetes.io/docs/reference/kubectl/jsonpath/].
    output-watch-events Select all resources, including uninitialized ones, in the namespace of the specified resource types.
    all-namespacesAfalseIf true, check the specified action in all namespaces.
    allow-missing-template-keys true Filename, directory, or URL to files identifying the resource to autoscale.
    generatorhorizontalpodautoscaler/v1The name of the API generator to use. Currently there is only 1 generator.
    kustomize k
    allow-id-changesas-current-user falseenable changes to a resourceId use the uid and gid of the command executor to run the function in the container
    enable-alpha-plugins enable kustomize plugins
    enable-helmfalseEnable use of the Helm chart inflator generator.
    enable-managedby-label false a list of environment variables to be used by functions
    helm-commandhelmhelm command (path to executable)
    load-restrictor LoadRestrictionsRootOnly Select all resources, including uninitialized ones, in the namespace of the specified resource types
    all-namespacesAfalseIf true, check the specified action in all namespaces.
    allow-missing-template-keys true current-replicas -1Precondition for current size. Requires that the current size of the resource match this value in order to scale. Precondition for current size. Requires that the current size of the resource match this value in order to scale. -1 (default) for no condition.
    dry-run

    set

    -

    Configure application resources

    +

    Configure application resources.

    These commands help you make changes to existing application resources.

    Usage

    $ kubectl set SUBCOMMAND

    @@ -5339,7 +5351,7 @@ viewing your workloads in a Kubernetes cluster.

    List environment variable definitions in one or more pods, pod templates. Add, update, or remove container environment variable definitions in one or more pod templates (within replication controllers or deployment configurations). View or modify the environment variable definitions on all containers in the specified pods or pod templates, or just those that match a wildcard.

    If "--env -" is passed, environment variables can be read from STDIN using the standard env syntax.

    Possible resources include (case insensitive):

    -

    pod (po), replicationcontroller (rc), deployment (deploy), daemonset (ds), job, replicaset (rs)

    +

    pod (po), replicationcontroller (rc), deployment (deploy), daemonset (ds), statefulset (sts), cronjob (cj), replicaset (rs)

    Usage

    $ kubectl set env RESOURCE/NAME KEY_1=VAL_1 ... KEY_N=VAL_N

    Flags

    @@ -5478,7 +5490,7 @@ viewing your workloads in a Kubernetes cluster.


    image

    -

    Set a deployment's nginx container image to 'nginx:1.9.1', and its busybox container image to 'busybox'.

    +

    Set a deployment's nginx container image to 'nginx:1.9.1', and its busybox container image to 'busybox'

    kubectl set image deployment/nginx busybox=busybox nginx=nginx:1.9.1
     
    @@ -5499,7 +5511,7 @@ viewing your workloads in a Kubernetes cluster.

    Update existing container image(s) of resources.

    Possible resources include (case insensitive):

    -

    pod (po), replicationcontroller (rc), deployment (deploy), daemonset (ds), replicaset (rs)

    +

    pod (po), replicationcontroller (rc), deployment (deploy), daemonset (ds), statefulset (sts), cronjob (cj), replicaset (rs)

    Usage

    $ kubectl set image (-f FILENAME | TYPE NAME) CONTAINER_NAME_1=CONTAINER_IMAGE_1 ... CONTAINER_NAME_N=CONTAINER_IMAGE_N

    Flags

    @@ -5615,8 +5627,8 @@ viewing your workloads in a Kubernetes cluster.

    kubectl set resources -f path/to/file.yaml --limits=cpu=200m,memory=512Mi --local -o yaml
     
    -

    Specify compute resource requirements (cpu, memory) for any resource that defines a pod template. If a pod is successfully scheduled, it is guaranteed the amount of resource requested, but may burst up to its specified limits.

    -

    for each compute resource, if a limit is specified and a request is omitted, the request will default to the limit.

    +

    Specify compute resource requirements (CPU, memory) for any resource that defines a pod template. If a pod is successfully scheduled, it is guaranteed the amount of resource requested, but may burst up to its specified limits.

    +

    For each compute resource, if a limit is specified and a request is omitted, the request will default to the limit.

    Possible resources include (case insensitive): Use "kubectl api-resources" for a complete list of supported resources..

    Usage

    $ kubectl set resources (-f FILENAME | TYPE NAME) ([--limits=LIMITS & --requests=REQUESTS]

    @@ -5732,7 +5744,7 @@ viewing your workloads in a Kubernetes cluster.


    selector

    -

    set the labels and selector before creating a deployment/service pair.

    +

    Set the labels and selector before creating a deployment/service pair

    kubectl create service clusterip my-svc --clusterip="None" -o yaml --dry-run=client | kubectl set selector --local -f - 'environment=qa' -o yaml | kubectl create -f -
     kubectl create deployment my-dep -o yaml --dry-run=client | kubectl label --local -f - environment=qa -o yaml | kubectl create -f -
    @@ -5829,16 +5841,16 @@ kubectl create deployment my-dep -o yaml --dry-run<
     

    serviceaccount

    -

    Set Deployment nginx-deployment's ServiceAccount to serviceaccount1

    +

    Set deployment nginx-deployment's service account to serviceaccount1

    kubectl set serviceaccount deployment nginx-deployment serviceaccount1
     
    -

    Print the result (in yaml format) of updated nginx deployment with serviceaccount from local file, without hitting apiserver

    +

    Print the result (in YAML format) of updated nginx deployment with the service account from local file, without hitting the API server

    kubectl set sa -f nginx-deployment.yaml serviceaccount1 --local --dry-run=client -o yaml
     
    -

    Update ServiceAccount of pod template resources.

    +

    Update the service account of pod template resources.

    Possible resources (case insensitive) can be:

    replicationcontroller (rc), deployment (deploy), daemonset (ds), job, replicaset (rs), statefulset

    Usage

    @@ -5931,21 +5943,21 @@ kubectl create deployment my-dep -o yaml --dry-run<

    subject

    -

    Update a ClusterRoleBinding for serviceaccount1

    +

    Update a cluster role binding for serviceaccount1

    kubectl set subject clusterrolebinding admin --serviceaccount=namespace:serviceaccount1
     
    -

    Update a RoleBinding for user1, user2, and group1

    +

    Update a role binding for user1, user2, and group1

    kubectl set subject rolebinding admin --user=user1 --user=user2 --group=group1
     
    -

    Print the result (in yaml format) of updating rolebinding subjects from a local, without hitting the server

    +

    Print the result (in YAML format) of updating rolebinding subjects from a local, without hitting the server

    kubectl create rolebinding admin --role=admin --user=admin -o yaml --dry-run=client | kubectl set subject --local -f - --user=foo -o yaml
     
    -

    Update User, Group or ServiceAccount in a RoleBinding/ClusterRoleBinding.

    +

    Update the user, group, or service account in a role binding or cluster role binding.

    Usage

    $ kubectl set subject (-f FILENAME | TYPE NAME) [--user=username] [--group=groupname] [--serviceaccount=namespace:serviceaccountname] [--dry-run=server|client|none]

    Flags

    @@ -6048,17 +6060,17 @@ kubectl create deployment my-dep -o yaml --dry-run<

    wait

    -

    Wait for the pod "busybox1" to contain the status condition of type "Ready".

    +

    Wait for the pod "busybox1" to contain the status condition of type "Ready"

    kubectl wait --for=condition=Ready pod/busybox1
     
    -

    The default value of status condition is true, you can set false.

    +

    The default value of status condition is true; you can set it to false

    kubectl wait --for=condition=Ready=false pod/busybox1
     
    -

    Wait for the pod "busybox1" to be deleted, with a timeout of 60s, after having issued the "delete" command.

    +

    Wait for the pod "busybox1" to be deleted, with a timeout of 60s, after having issued the "delete" command

    kubectl delete pod/busybox1
     kubectl wait --for=delete pod/busybox1 --timeout=60s
    @@ -6066,7 +6078,7 @@ kubectl wait --for=delete pod/busybox1 Experimental: Wait for a specific condition on one or many resources.

    The command takes multiple resources and waits until the specified condition is seen in the Status field of every given resource.

    Alternatively, the command can wait for the given set of resources to be deleted by providing the "delete" keyword as the value to the --for flag.

    -

    A successful message will be printed to stdout indicating when the specified condition has been met. One can use -o option to change to output destination.

    +

    A successful message will be printed to stdout indicating when the specified condition has been met. You can use -o option to change to output destination.

    Usage

    $ kubectl wait ([-f FILENAME] | resource.group/resource.name | resource.group [(-l label | --all)]) [--for=delete|--for condition=available]

    Flags

    @@ -6171,7 +6183,7 @@ applications.


    attach

    -

    Get output from running pod mypod, use the kubectl.kubernetes.io/default-container annotation # for selecting the container to be attached or the first container in the pod will be chosen

    +

    Get output from running pod mypod; use the 'kubectl.kubernetes.io/default-container' annotation # for selecting the container to be attached or the first container in the pod will be chosen

    kubectl attach mypod
     
    @@ -6181,12 +6193,12 @@ applications.

    kubectl attach mypod -c ruby-container
     
    -

    Switch to raw terminal mode, sends stdin to 'bash' in ruby-container from pod mypod # and sends stdout/stderr from 'bash' back to the client

    +

    Switch to raw terminal mode; sends stdin to 'bash' in ruby-container from pod mypod # and sends stdout/stderr from 'bash' back to the client

    kubectl attach mypod -c ruby-container -i -t
     
    -

    Get output from the first pod of a ReplicaSet named nginx

    +

    Get output from the first pod of a replica set named nginx

    kubectl attach rs/nginx
     
    @@ -6279,7 +6291,7 @@ applications.

    kubectl auth can-i --list --namespace=foo
     

    Check whether an action is allowed.

    -

    VERB is a logical Kubernetes API verb like 'get', 'list', 'watch', 'delete', etc. TYPE is a Kubernetes resource. Shortcuts and groups will be resolved. NONRESOURCEURL is a partial URL starts with "/". NAME is the name of a particular Kubernetes resource.

    +

    VERB is a logical Kubernetes API verb like 'get', 'list', 'watch', 'delete', etc. TYPE is a Kubernetes resource. Shortcuts and groups will be resolved. NONRESOURCEURL is a partial URL that starts with "/". NAME is the name of a particular Kubernetes resource.

    Usage

    $ kubectl auth can-i VERB [TYPE | TYPE/NAME | NONRESOURCEURL]

    Flags

    @@ -6328,11 +6340,11 @@ applications.


    reconcile

    -

    Reconcile rbac resources from a file

    +

    Reconcile RBAC resources from a file

    kubectl auth reconcile -f my-rbac-rules.yaml
     
    -

    Reconciles rules for RBAC Role, RoleBinding, ClusterRole, and ClusterRoleBinding objects.

    +

    Reconciles rules for RBAC role, role binding, cluster role, and cluster role binding objects.

    Missing objects are created, and the containing namespace is created for namespaced objects, if required.

    Existing roles are updated to include the permissions in the input objects, and remove extra permissions if --remove-extra-permissions is specified.

    Existing bindings are updated to include the subjects in the input objects, and remove extra subjects if --remove-extra-subjects is specified.

    @@ -6415,7 +6427,7 @@ applications.


    cp

    -

    !!!Important Note!!! # Requires that the 'tar' binary is present in your container # image. If 'tar' is not present, 'kubectl cp' will fail. # # For advanced use cases, such as symlinks, wildcard expansion or # file mode preservation consider using 'kubectl exec'. # Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace

    +

    !!!Important Note!!! # Requires that the 'tar' binary is present in your container # image. If 'tar' is not present, 'kubectl cp' will fail. # # For advanced use cases, such as symlinks, wildcard expansion or # file mode preservation, consider using 'kubectl exec'. # Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace

    tar cf - /tmp/foo | kubectl exec -i -n <some-namespace> <some-pod> -- tar xf - -C /tmp/bar
     
    @@ -6500,11 +6512,11 @@ applications.

    kubectl describe po -l name=myLabel
     
    -

    Describe all pods managed by the 'frontend' replication controller (rc-created pods # get the name of the rc as a prefix in the pod the name).

    +

    Describe all pods managed by the 'frontend' replication controller (rc-created pods # get the name of the rc as a prefix in the pod the name)

    kubectl describe pods frontend
     
    -

    Show details of a specific resource or group of resources

    +

    Show details of a specific resource or group of resources.

    Print a detailed description of the selected resources, including related resources such as events or controllers. You may select a single object by name, all objects of that type, provide a name prefix, or label selector. For example:

    $ kubectl describe TYPE NAME_PREFIX

    will first check for an exact match on TYPE and NAME_PREFIX. If no such resource exists, it will output details for every resource that has a name prefixed with NAME_PREFIX.

    @@ -6529,6 +6541,12 @@ applications.

    If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. +chunk-size + +500 +Return large lists in chunks rather than all at once. Pass 0 to disable. This flag is beta and may change in the future. + + filename f [] @@ -6563,22 +6581,22 @@ applications.


    exec

    -

    Get output from running 'date' command from pod mypod, using the first container by default

    +

    Get output from running the 'date' command from pod mypod, using the first container by default

    kubectl exec mypod -- date
     
    -

    Get output from running 'date' command in ruby-container from pod mypod

    +

    Get output from running the 'date' command in ruby-container from pod mypod

    kubectl exec mypod -c ruby-container -- date
     
    -

    Switch to raw terminal mode, sends stdin to 'bash' in ruby-container from pod mypod # and sends stdout/stderr from 'bash' back to the client

    +

    Switch to raw terminal mode; sends stdin to 'bash' in ruby-container from pod mypod # and sends stdout/stderr from 'bash' back to the client

    kubectl exec mypod -c ruby-container -i -t -- bash -il
     
    -

    List contents of /usr from the first container of pod mypod and sort by modification time. # If the command you want to execute in the pod has any flags in common (e.g. -i), # you must use two dashes (--) to separate your command's flags/arguments. # Also note, do not surround your command and its flags/arguments with quotes # unless that is how you would execute it normally (i.e., do ls -t /usr, not "ls -t /usr").

    +

    List contents of /usr from the first container of pod mypod and sort by modification time # If the command you want to execute in the pod has any flags in common (e.g. -i), # you must use two dashes (--) to separate your command's flags/arguments # Also note, do not surround your command and its flags/arguments with quotes # unless that is how you would execute it normally (i.e., do ls -t /usr, not "ls -t /usr")

    kubectl exec mypod -i -t -- ls -t /usr
     
    @@ -6659,7 +6677,7 @@ applications.

    Return snapshot logs from all containers in pods defined by label app=nginx

    -
    kubectl logs -lapp=nginx --all-containers=true
    +
    kubectl logs -l app=nginx --all-containers=true
     

    Return snapshot of previous terminated ruby container logs from pod web-1

    @@ -6674,7 +6692,7 @@ applications.

    Begin streaming the logs from all containers in pods defined by label app=nginx

    -
    kubectl logs -f -lapp=nginx --all-containers=true
    +
    kubectl logs -f -l app=nginx --all-containers=true
     

    Display only the most recent 20 lines of output in pod nginx

    @@ -6844,9 +6862,9 @@ applications.

    kubectl port-forward pod/mypod :5000
     
    -

    Forward one or more local ports to a pod. This command requires the node to have 'socat' installed.

    +

    Forward one or more local ports to a pod.

    Use resource type/name such as deployment/mydeployment to select a pod. Resource type defaults to 'pod' if omitted.

    -

    If there are multiple pods matching the criteria, a pod will be selected automatically. The forwarding session ends when the selected pod terminates, and rerun of the command is needed to resume forwarding.

    +

    If there are multiple pods matching the criteria, a pod will be selected automatically. The forwarding session ends when the selected pod terminates, and a rerun of the command is needed to resume forwarding.

    Usage

    $ kubectl port-forward TYPE/NAME [options] [LOCAL_PORT:]REMOTE_PORT [...[LOCAL_PORT_N:]REMOTE_PORT_N]

    Flags

    @@ -6877,36 +6895,36 @@ applications.


    proxy

    -

    To proxy all of the kubernetes api and nothing else.

    +

    To proxy all of the Kubernetes API and nothing else

    kubectl proxy --api-prefix=/
     
    -

    To proxy only part of the kubernetes api and also some static files. # You can get pods info with 'curl localhost:8001/api/v1/pods'

    +

    To proxy only part of the Kubernetes API and also some static files # You can get pods info with 'curl localhost:8001/api/v1/pods'

    kubectl proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/
     
    -

    To proxy the entire kubernetes api at a different root. # You can get pods info with 'curl localhost:8001/custom/api/v1/pods'

    +

    To proxy the entire Kubernetes API at a different root # You can get pods info with 'curl localhost:8001/custom/api/v1/pods'

    kubectl proxy --api-prefix=/custom/
     
    -

    Run a proxy to kubernetes apiserver on port 8011, serving static content from ./local/www/

    +

    Run a proxy to the Kubernetes API server on port 8011, serving static content from ./local/www/

    kubectl proxy --port=8011 --www=./local/www/
     
    -

    Run a proxy to kubernetes apiserver on an arbitrary local port. # The chosen port for the server will be output to stdout.

    +

    Run a proxy to the Kubernetes API server on an arbitrary local port # The chosen port for the server will be output to stdout

    kubectl proxy --port=0
     
    -

    Run a proxy to kubernetes apiserver, changing the api prefix to k8s-api # This makes e.g. the pods api available at localhost:8001/k8s-api/v1/pods/

    +

    Run a proxy to the Kubernetes API server, changing the API prefix to k8s-api # This makes e.g. the pods API available at localhost:8001/k8s-api/v1/pods/

    kubectl proxy --api-prefix=/k8s-api
     
    -

    Creates a proxy server or application-level gateway between localhost and the Kubernetes API Server. It also allows serving static content over specified HTTP path. All incoming data enters through one port and gets forwarded to the remote kubernetes API Server port, except for the path matching the static content path.

    +

    Creates a proxy server or application-level gateway between localhost and the Kubernetes API server. It also allows serving static content over specified HTTP path. All incoming data enters through one port and gets forwarded to the remote Kubernetes API server port, except for the path matching the static content path.

    Usage

    $ kubectl proxy [--port=PORT] [--www=static-dir] [--www-prefix=prefix] [--api-prefix=prefix]

    Flags

    @@ -7013,7 +7031,7 @@ applications.

    kubectl top node NODE_NAME
     
    -

    Display Resource (CPU/Memory) usage of nodes.

    +

    Display resource (CPU/memory) usage of nodes.

    The top-node command allows you to see the resource consumption of nodes.

    Usage

    $ kubectl top node [NAME | -l label]

    @@ -7049,8 +7067,8 @@ applications.

    use-protocol-buffers -false -If present, protocol-buffers will be used to request metrics. +true +Enables using protocol-buffers to access Metrics API. @@ -7076,7 +7094,7 @@ applications.

    kubectl top pod -l name=myLabel
     
    -

    Display Resource (CPU/Memory) usage of pods.

    +

    Display resource (CPU/memory) usage of pods.

    The 'top pod' command allows you to see the resource consumption of pods.

    Due to the metrics pipeline delay, they may be unavailable for a few minutes since pod creation.

    Usage

    @@ -7105,6 +7123,12 @@ applications.

    If present, print usage of containers within a pod. +field-selector + + +Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type. + + no-headers false @@ -7125,8 +7149,8 @@ applications.

    use-protocol-buffers -false -If present, protocol-buffers will be used to request metrics. +true +Enables using protocol-buffers to access Metrics API. @@ -7138,7 +7162,7 @@ applications.

    kubectl api-versions
     
    -

    Print the supported API versions on the server, in the form of "group/version"

    +

    Print the supported API versions on the server, in the form of "group/version".

    Usage

    $ kubectl api-versions


    @@ -7148,6 +7172,11 @@ applications.

    $ kubectl certificate SUBCOMMAND


    approve

    +
    +

    Approve CSR 'csr-sqgzp'

    +
    +
    kubectl certificate approve csr-sqgzp
    +

    Approve a certificate signing request.

    kubectl certificate approve allows a cluster admin to approve a certificate signing request (CSR). This action tells a certificate signing controller to issue a certificate to the requestor with the attributes requested in the CSR.

    SECURITY NOTICE: Depending on the requested attributes, the issued certificate can potentially grant a requester access to cluster resources or to authenticate as a requested identity. Before approving a CSR, ensure you understand what the signed certificate can do.

    @@ -7216,6 +7245,11 @@ applications.


    deny

    +
    +

    Deny CSR 'csr-sqgzp'

    +
    +
    kubectl certificate deny csr-sqgzp
    +

    Deny a certificate signing request.

    kubectl certificate deny allows a cluster admin to deny a certificate signing request (CSR). This action tells a certificate signing controller to not to issue a certificate to the requestor.

    Usage

    @@ -7288,7 +7322,7 @@ applications.

    kubectl cluster-info
     
    -

    Display addresses of the control plane and services with label kubernetes.io/cluster-service=true To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

    +

    Display addresses of the control plane and services with label kubernetes.io/cluster-service=true. To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

    Usage

    $ kubectl cluster-info


    @@ -7313,8 +7347,8 @@ applications.

    kubectl cluster-info dump --namespaces default,kube-system --output-directory=/path/to/cluster-state
     
    -

    Dumps cluster info out suitable for debugging and diagnosing cluster problems. By default, dumps everything to stdout. You can optionally specify a directory with --output-directory. If you specify a directory, kubernetes will build a set of files in that directory. By default only dumps things in the 'kube-system' namespace, but you can switch to a different namespace with the --namespaces flag, or specify --all-namespaces to dump all namespaces.

    -

    The command also dumps the logs of all of the pods in the cluster, these logs are dumped into different directories based on namespace and pod name.

    +

    Dump cluster information out suitable for debugging and diagnosing cluster problems. By default, dumps everything to stdout. You can optionally specify a directory with --output-directory. If you specify a directory, Kubernetes will build a set of files in that directory. By default, only dumps things in the current namespace and 'kube-system' namespace, but you can switch to a different namespace with the --namespaces flag, or specify --all-namespaces to dump all namespaces.

    +

    The command also dumps the logs of all of the pods in the cluster; these logs are dumped into different directories based on namespace and pod name.

    Usage

    $ kubectl cluster-info dump

    Flags

    @@ -7381,7 +7415,7 @@ applications.


    cordon

    -

    Mark node "foo" as unschedulable.

    +

    Mark node "foo" as unschedulable

    kubectl cordon foo
     
    @@ -7416,20 +7450,20 @@ applications.


    drain

    -

    Drain node "foo", even if there are pods not managed by a ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet on it.

    +

    Drain node "foo", even if there are pods not managed by a replication controller, replica set, job, daemon set or stateful set on it

    -
    $ kubectl drain foo --force
    +
    kubectl drain foo --force
     
    -

    As above, but abort if there are pods not managed by a ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet, and use a grace period of 15 minutes.

    +

    As above, but abort if there are pods not managed by a replication controller, replica set, job, daemon set or stateful set, and use a grace period of 15 minutes

    -
    $ kubectl drain foo --grace-period=900
    +
    kubectl drain foo --grace-period=900
     

    Drain node in preparation for maintenance.

    -

    The given node will be marked unschedulable to prevent new pods from arriving. 'drain' evicts the pods if the APIServer supports http://kubernetes.io/docs/admin/disruptions/ . Otherwise, it will use normal DELETE to delete the pods. The 'drain' evicts or deletes all pods except mirror pods (which cannot be deleted through the API server). If there are DaemonSet-managed pods, drain will not proceed without --ignore-daemonsets, and regardless it will not delete any DaemonSet-managed pods, because those pods would be immediately replaced by the DaemonSet controller, which ignores unschedulable markings. If there are any pods that are neither mirror pods nor managed by ReplicationController, ReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete any pods unless you use --force. --force will also allow deletion to proceed if the managing resource of one or more pods is missing.

    +

    The given node will be marked unschedulable to prevent new pods from arriving. 'drain' evicts the pods if the API server supports https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ . Otherwise, it will use normal DELETE to delete the pods. The 'drain' evicts or deletes all pods except mirror pods (which cannot be deleted through the API server). If there are daemon set-managed pods, drain will not proceed without --ignore-daemonsets, and regardless it will not delete any daemon set-managed pods, because those pods would be immediately replaced by the daemon set controller, which ignores unschedulable markings. If there are any pods that are neither mirror pods nor managed by a replication controller, replica set, daemon set, stateful set, or job, then drain will not delete any pods unless you use --force. --force will also allow deletion to proceed if the managing resource of one or more pods is missing.

    'drain' waits for graceful termination. You should not operate on the machine until the command completes.

    When you are ready to put the node back into service, use kubectl uncordon, which will make the node schedulable again.

    -

    http://kubernetes.io/images/docs/kubectl_drain.svg

    +

    https://kubernetes.io/images/docs/kubectl_drain.svg

    Usage

    $ kubectl drain NODE

    Flags

    @@ -7444,6 +7478,12 @@ applications.

    +chunk-size + +500 +Return large lists in chunks rather than all at once. Pass 0 to disable. This flag is beta and may change in the future. + + delete-emptydir-data false @@ -7520,12 +7560,12 @@ applications.


    taint

    -

    Update node 'foo' with a taint with key 'dedicated' and value 'special-user' and effect 'NoSchedule'. # If a taint with that key and effect already exists, its value is replaced as specified.

    +

    Update node 'foo' with a taint with key 'dedicated' and value 'special-user' and effect 'NoSchedule' # If a taint with that key and effect already exists, its value is replaced as specified

    kubectl taint nodes foo dedicated=special-user:NoSchedule
     
    -

    Remove from node 'foo' the taint with key 'dedicated' and effect 'NoSchedule' if one exists.

    +

    Remove from node 'foo' the taint with key 'dedicated' and effect 'NoSchedule' if one exists

    kubectl taint nodes foo dedicated:NoSchedule-
     
    @@ -7548,7 +7588,7 @@ applications.

    • A taint consists of a key, value, and effect. As an argument here, it is expressed as key=value:effect.
    • The key must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores, up to 253 characters.
    • -
    • Optionally, the key can begin with a DNS subdomain prefix and a single '/', like example.com/my-app
    • +
    • Optionally, the key can begin with a DNS subdomain prefix and a single '/', like example.com/my-app.
    • The value is optional. If given, it must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores, up to 63 characters.
    • The effect must be NoSchedule, PreferNoSchedule or NoExecute.
    • Currently taint can only apply to node.
    • @@ -7631,9 +7671,9 @@ applications.


      uncordon

      -

      Mark node "foo" as schedulable.

      +

      Mark node "foo" as schedulable

      -
      $ kubectl uncordon foo
      +
      kubectl uncordon foo
       

      Mark node as schedulable.

      Usage

      @@ -7672,17 +7712,17 @@ applications.


      api-resources

      -

      Print the supported API Resources

      +

      Print the supported API resources

      kubectl api-resources
       
      -

      Print the supported API Resources with more information

      +

      Print the supported API resources with more information

      kubectl api-resources -o wide
       
      -

      Print the supported API Resources sorted by a column

      +

      Print the supported API resources sorted by a column

      kubectl api-resources --sort-by=name
       
      @@ -7697,11 +7737,11 @@ applications.

      kubectl api-resources --namespaced=false
       
      -

      Print the supported API Resources with specific APIGroup

      +

      Print the supported API resources with a specific APIGroup

      kubectl api-resources --api-group=extensions
       
      -

      Print the supported API resources on the server

      +

      Print the supported API resources on the server.

      Usage

      $ kubectl api-resources

      Flags

      @@ -7772,12 +7812,12 @@ applications.

      brew install bash-completion@2
       
      -

      If kubectl is installed via homebrew, this should start working immediately. ## If you've installed via other means, you may need add the completion to your completion directory

      +

      If kubectl is installed via homebrew, this should start working immediately ## If you've installed via other means, you may need add the completion to your completion directory

      kubectl completion bash > $(brew --prefix)/etc/bash_completion.d/kubectl
       
      -

      Installing bash completion on Linux ## If bash-completion is not installed on Linux, please install the 'bash-completion' package ## via your distribution's package manager. ## Load the kubectl completion code for bash into the current shell

      +

      Installing bash completion on Linux ## If bash-completion is not installed on Linux, install the 'bash-completion' package ## via your distribution's package manager. ## Load the kubectl completion code for bash into the current shell

      source <(kubectl completion bash)
       
      @@ -7805,8 +7845,14 @@ source $HOME/.bash_profile
      kubectl completion zsh > "${fpath[1]}/_kubectl"
       

      Output shell completion code for the specified shell (bash or zsh). The shell code must be evaluated to provide interactive completion of kubectl commands. This can be done by sourcing it from the .bash_profile.

      -

      Detailed instructions on how to do this are available here: https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion

      -

      Note for zsh users: [1] zsh completions are only supported in versions of zsh >= 5.2

      +

      Detailed instructions on how to do this are available here:

      +

      for macOS: + https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#enable-shell-autocompletion

      +

      for linux: + https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#enable-shell-autocompletion

      +

      for windows: + https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#enable-shell-autocompletion

      +

      Note for zsh users: [1] zsh completions are only supported in versions of zsh >= 5.2.

      Usage

      $ kubectl completion SHELL


      @@ -7827,7 +7873,7 @@ source $HOME/.bash_profile
      kubectl config current-context
       
      -

      Displays the current-context

      +

      Display the current-context.

      Usage

      $ kubectl config current-context


      @@ -7837,7 +7883,7 @@ source $HOME/.bash_profile
      kubectl config delete-cluster minikube
       
      -

      Delete the specified cluster from the kubeconfig

      +

      Delete the specified cluster from the kubeconfig.

      Usage

      $ kubectl config delete-cluster NAME


      @@ -7847,7 +7893,7 @@ source $HOME/.bash_profile
      kubectl config delete-context minikube
       
      -

      Delete the specified context from the kubeconfig

      +

      Delete the specified context from the kubeconfig.

      Usage

      $ kubectl config delete-context NAME


      @@ -7857,13 +7903,13 @@ source $HOME/.bash_profile
      kubectl config delete-user minikube
       
      -

      Delete the specified user from the kubeconfig

      +

      Delete the specified user from the kubeconfig.

      Usage

      $ kubectl config delete-user NAME


      get-clusters

      -

      List the clusters kubectl knows about

      +

      List the clusters that kubectl knows about

      kubectl config get-clusters
       
      @@ -7878,11 +7924,11 @@ source $HOME/.bash_profile
      kubectl config get-contexts
       
      -

      Describe one context in your kubeconfig file.

      +

      Describe one context in your kubeconfig file

      kubectl config get-contexts my-context
       
      -

      Displays one or many contexts from the kubeconfig file.

      +

      Display one or many contexts from the kubeconfig file.

      Usage

      $ kubectl config get-contexts [(-o|--output=)name)]

      Flags

      @@ -7913,7 +7959,7 @@ source $HOME/.bash_profile

      get-users

      -

      List the users kubectl knows about

      +

      List the users that kubectl knows about

      kubectl config get-users
       
      @@ -7928,37 +7974,37 @@ source $HOME/.bash_profile
      kubectl config rename-context old-name new-name
       

      Renames a context from the kubeconfig file.

      -

      CONTEXT_NAME is the context name that you wish to change.

      -

      NEW_NAME is the new name you wish to set.

      -

      Note: In case the context being renamed is the 'current-context', this field will also be updated.

      +

      CONTEXT_NAME is the context name that you want to change.

      +

      NEW_NAME is the new name you want to set.

      +

      Note: If the context being renamed is the 'current-context', this field will also be updated.

      Usage

      $ kubectl config rename-context CONTEXT_NAME NEW_NAME


      set

      -

      Set server field on the my-cluster cluster to https://1.2.3.4

      +

      Set the server field on the my-cluster cluster to https://1.2.3.4

      kubectl config set clusters.my-cluster.server https://1.2.3.4
       
      -

      Set certificate-authority-data field on the my-cluster cluster.

      +

      Set the certificate-authority-data field on the my-cluster cluster

      kubectl config set clusters.my-cluster.certificate-authority-data $(echo "cert_data_here" | base64 -i -)
       
      -

      Set cluster field in the my-context context to my-cluster.

      +

      Set the cluster field in the my-context context to my-cluster

      kubectl config set contexts.my-context.cluster my-cluster
       
      -

      Set client-key-data field in the cluster-admin user using --set-raw-bytes option.

      +

      Set the client-key-data field in the cluster-admin user using --set-raw-bytes option

      kubectl config set users.cluster-admin.client-key-data cert_data_here --set-raw-bytes=true
       
      -

      Sets an individual value in a kubeconfig file

      +

      Set an individual value in a kubeconfig file.

      PROPERTY_NAME is a dot delimited name where each token represents either an attribute name or a map key. Map keys may not contain dots.

      -

      PROPERTY_VALUE is the new value you wish to set. Binary fields such as 'certificate-authority-data' expect a base64 encoded string unless the --set-raw-bytes flag is used.

      -

      Specifying a attribute name that already exists will merge new fields on top of existing values.

      +

      PROPERTY_VALUE is the new value you want to set. Binary fields such as 'certificate-authority-data' expect a base64 encoded string unless the --set-raw-bytes flag is used.

      +

      Specifying an attribute name that already exists will merge new fields on top of existing values.

      Usage

      $ kubectl config set PROPERTY_NAME PROPERTY_VALUE

      Flags

      @@ -7983,7 +8029,7 @@ source $HOME/.bash_profile

      set-cluster

      -

      Set only the server field on the e2e cluster entry without touching other values.

      +

      Set only the server field on the e2e cluster entry without touching other values

      kubectl config set-cluster e2e --server=https://1.2.3.4
       
      @@ -8002,7 +8048,7 @@ source $HOME/.bash_profile
      kubectl config set-cluster e2e --tls-server-name=my-cluster-name
       
      -

      Sets a cluster entry in kubeconfig.

      +

      Set a cluster entry in kubeconfig.

      Specifying a name that already exists will merge new fields on top of existing values for those fields.

      Usage

      $ kubectl config set-cluster NAME [--server=server] [--certificate-authority=path/to/certificate/authority] [--insecure-skip-tls-verify=true] [--tls-server-name=example.com]

      @@ -8032,7 +8078,7 @@ source $HOME/.bash_profile
      kubectl config set-context gce --user=cluster-admin
       
      -

      Sets a context entry in kubeconfig

      +

      Set a context entry in kubeconfig.

      Specifying a name that already exists will merge new fields on top of existing values for those fields.

      Usage

      $ kubectl config set-context [NAME | --current] [--cluster=cluster_nickname] [--user=user_nickname] [--namespace=namespace]

      @@ -8058,7 +8104,7 @@ source $HOME/.bash_profile

      set-credentials

      -

      Set only the "client-key" field on the "cluster-admin" # entry, without touching other values:

      +

      Set only the "client-key" field on the "cluster-admin" # entry, without touching other values

      kubectl config set-credentials cluster-admin --client-key=~/.kube/admin.key
       
      @@ -8107,7 +8153,7 @@ source $HOME/.bash_profile
      kubectl config set-credentials cluster-admin --exec-env=var-to-remove-
       
      -

      Sets a user entry in kubeconfig

      +

      Set a user entry in kubeconfig.

      Specifying a name that already exists will merge new fields on top of existing values.

      Client-certificate flags: --client-certificate=certfile --client-key=keyfile

      @@ -8176,16 +8222,16 @@ source $HOME/.bash_profile

      unset

      -

      Unset the current-context.

      +

      Unset the current-context

      kubectl config unset current-context
       
      -

      Unset namespace in foo context.

      +

      Unset namespace in foo context

      kubectl config unset contexts.foo.namespace
       
      -

      Unsets an individual value in a kubeconfig file

      +

      Unset an individual value in a kubeconfig file.

      PROPERTY_NAME is a dot delimited name where each token represents either an attribute name or a map key. Map keys may not contain dots.

      Usage

      $ kubectl config unset PROPERTY_NAME

      @@ -8196,18 +8242,18 @@ source $HOME/.bash_profile
      kubectl config use-context minikube
       
      -

      Sets the current-context in a kubeconfig file

      +

      Set the current-context in a kubeconfig file.

      Usage

      $ kubectl config use-context CONTEXT_NAME


      view

      -

      Show merged kubeconfig settings.

      +

      Show merged kubeconfig settings

      kubectl config view
       
      -

      Show merged kubeconfig settings and raw certificate data.

      +

      Show merged kubeconfig settings and raw certificate data

      kubectl config view --raw
       
      @@ -8293,7 +8339,7 @@ source $HOME/.bash_profile
      kubectl explain pods.spec.containers
       
      -

      List the fields for supported resources

      +

      List the fields for supported resources.

      This command describes the fields associated with each supported API resource. Fields are identified via a simple JSONPath identifier:

      <type>.<fieldName>[.<fieldName>]

      Add the --recursive flag to display all of the fields at once without descriptions. Information about each field is retrieved from the server in OpenAPI format.

      @@ -8374,7 +8420,7 @@ source $HOME/.bash_profile
      kubectl version
       
      -

      Print the client and server version information for the current context

      +

      Print the client and server version information for the current context.

      Usage

      $ kubectl version

      Flags

      diff --git a/static/docs/reference/generated/kubernetes-api/v1.20/index.html b/static/docs/reference/generated/kubernetes-api/v1.20/index.html index 618eefa545..7476971f21 100644 --- a/static/docs/reference/generated/kubernetes-api/v1.20/index.html +++ b/static/docs/reference/generated/kubernetes-api/v1.20/index.html @@ -771,27 +771,27 @@
      - -
    + + + + + + + diff --git a/static/docs/reference/generated/kubernetes-api/v1.22/js/navData.js b/static/docs/reference/generated/kubernetes-api/v1.22/js/navData.js new file mode 100644 index 0000000000..cd194cf369 --- /dev/null +++ b/static/docs/reference/generated/kubernetes-api/v1.22/js/navData.js @@ -0,0 +1 @@ +(function(){navData={"toc":[{"section":"webhookclientconfig-v1-apiextensions-k8s-io","subsections":[]},{"section":"volumeerror-v1alpha1-storage-k8s-io","subsections":[]},{"section":"volumeattachmentsource-v1alpha1-storage-k8s-io","subsections":[]},{"section":"volumeattachment-v1alpha1-storage-k8s-io","subsections":[{"section":"-strong-read-operations-volumeattachment-v1alpha1-storage-k8s-io-strong-","subsections":[{"section":"watch-list-volumeattachment-v1alpha1-storage-k8s-io","subsections":[]},{"section":"watch-volumeattachment-v1alpha1-storage-k8s-io","subsections":[]},{"section":"list-volumeattachment-v1alpha1-storage-k8s-io","subsections":[]},{"section":"read-volumeattachment-v1alpha1-storage-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-volumeattachment-v1alpha1-storage-k8s-io-strong-","subsections":[{"section":"delete-collection-volumeattachment-v1alpha1-storage-k8s-io","subsections":[]},{"section":"delete-volumeattachment-v1alpha1-storage-k8s-io","subsections":[]},{"section":"replace-volumeattachment-v1alpha1-storage-k8s-io","subsections":[]},{"section":"patch-volumeattachment-v1alpha1-storage-k8s-io","subsections":[]},{"section":"create-volumeattachment-v1alpha1-storage-k8s-io","subsections":[]}]}]},{"section":"tokenrequest-v1-storage-k8s-io","subsections":[]},{"section":"subject-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"subject-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"servicereference-v1-apiregistration-k8s-io","subsections":[]},{"section":"servicereference-v1-apiextensions-k8s-io","subsections":[]},{"section":"scheduling-v1alpha1-node-k8s-io","subsections":[]},{"section":"scheduling-v1beta1-node-k8s-io","subsections":[]},{"section":"runtimeclass-v1alpha1-node-k8s-io","subsections":[{"section":"-strong-read-operations-runtimeclass-v1alpha1-node-k8s-io-strong-","subsections":[{"section":"watch-list-runtimeclass-v1alpha1-node-k8s-io","subsections":[]},{"section":"watch-runtimeclass-v1alpha1-node-k8s-io","subsections":[]},{"section":"list-runtimeclass-v1alpha1-node-k8s-io","subsections":[]},{"section":"read-runtimeclass-v1alpha1-node-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-runtimeclass-v1alpha1-node-k8s-io-strong-","subsections":[{"section":"delete-collection-runtimeclass-v1alpha1-node-k8s-io","subsections":[]},{"section":"delete-runtimeclass-v1alpha1-node-k8s-io","subsections":[]},{"section":"replace-runtimeclass-v1alpha1-node-k8s-io","subsections":[]},{"section":"patch-runtimeclass-v1alpha1-node-k8s-io","subsections":[]},{"section":"create-runtimeclass-v1alpha1-node-k8s-io","subsections":[]}]}]},{"section":"runtimeclass-v1beta1-node-k8s-io","subsections":[{"section":"-strong-read-operations-runtimeclass-v1beta1-node-k8s-io-strong-","subsections":[{"section":"watch-list-runtimeclass-v1beta1-node-k8s-io","subsections":[]},{"section":"watch-runtimeclass-v1beta1-node-k8s-io","subsections":[]},{"section":"list-runtimeclass-v1beta1-node-k8s-io","subsections":[]},{"section":"read-runtimeclass-v1beta1-node-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-runtimeclass-v1beta1-node-k8s-io-strong-","subsections":[{"section":"delete-collection-runtimeclass-v1beta1-node-k8s-io","subsections":[]},{"section":"delete-runtimeclass-v1beta1-node-k8s-io","subsections":[]},{"section":"replace-runtimeclass-v1beta1-node-k8s-io","subsections":[]},{"section":"patch-runtimeclass-v1beta1-node-k8s-io","subsections":[]},{"section":"create-runtimeclass-v1beta1-node-k8s-io","subsections":[]}]}]},{"section":"roleref-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"rolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[{"section":"-strong-read-operations-rolebinding-v1alpha1-rbac-authorization-k8s-io-strong-","subsections":[{"section":"watch-list-all-namespaces-rolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"watch-list-rolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"watch-rolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"list-all-namespaces-rolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"list-rolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"read-rolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-rolebinding-v1alpha1-rbac-authorization-k8s-io-strong-","subsections":[{"section":"delete-collection-rolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"delete-rolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"replace-rolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"patch-rolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"create-rolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[]}]}]},{"section":"role-v1alpha1-rbac-authorization-k8s-io","subsections":[{"section":"-strong-read-operations-role-v1alpha1-rbac-authorization-k8s-io-strong-","subsections":[{"section":"watch-list-all-namespaces-role-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"watch-list-role-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"watch-role-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"list-all-namespaces-role-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"list-role-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"read-role-v1alpha1-rbac-authorization-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-role-v1alpha1-rbac-authorization-k8s-io-strong-","subsections":[{"section":"delete-collection-role-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"delete-role-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"replace-role-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"patch-role-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"create-role-v1alpha1-rbac-authorization-k8s-io","subsections":[]}]}]},{"section":"resourcemetricstatus-v2beta1-autoscaling","subsections":[]},{"section":"resourcemetricsource-v2beta1-autoscaling","subsections":[]},{"section":"priorityclass-v1alpha1-scheduling-k8s-io","subsections":[{"section":"-strong-read-operations-priorityclass-v1alpha1-scheduling-k8s-io-strong-","subsections":[{"section":"watch-list-priorityclass-v1alpha1-scheduling-k8s-io","subsections":[]},{"section":"watch-priorityclass-v1alpha1-scheduling-k8s-io","subsections":[]},{"section":"list-priorityclass-v1alpha1-scheduling-k8s-io","subsections":[]},{"section":"read-priorityclass-v1alpha1-scheduling-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-priorityclass-v1alpha1-scheduling-k8s-io-strong-","subsections":[{"section":"delete-collection-priorityclass-v1alpha1-scheduling-k8s-io","subsections":[]},{"section":"delete-priorityclass-v1alpha1-scheduling-k8s-io","subsections":[]},{"section":"replace-priorityclass-v1alpha1-scheduling-k8s-io","subsections":[]},{"section":"patch-priorityclass-v1alpha1-scheduling-k8s-io","subsections":[]},{"section":"create-priorityclass-v1alpha1-scheduling-k8s-io","subsections":[]}]}]},{"section":"policyrule-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"podsmetricstatus-v2beta1-autoscaling","subsections":[]},{"section":"podsmetricsource-v2beta1-autoscaling","subsections":[]},{"section":"poddisruptionbudget-v1beta1-policy","subsections":[{"section":"-strong-status-operations-poddisruptionbudget-v1beta1-policy-strong-","subsections":[{"section":"replace-status-poddisruptionbudget-v1beta1-policy","subsections":[]},{"section":"read-status-poddisruptionbudget-v1beta1-policy","subsections":[]},{"section":"patch-status-poddisruptionbudget-v1beta1-policy","subsections":[]}]},{"section":"-strong-read-operations-poddisruptionbudget-v1beta1-policy-strong-","subsections":[{"section":"watch-list-all-namespaces-poddisruptionbudget-v1beta1-policy","subsections":[]},{"section":"watch-list-poddisruptionbudget-v1beta1-policy","subsections":[]},{"section":"watch-poddisruptionbudget-v1beta1-policy","subsections":[]},{"section":"list-all-namespaces-poddisruptionbudget-v1beta1-policy","subsections":[]},{"section":"list-poddisruptionbudget-v1beta1-policy","subsections":[]},{"section":"read-poddisruptionbudget-v1beta1-policy","subsections":[]}]},{"section":"-strong-write-operations-poddisruptionbudget-v1beta1-policy-strong-","subsections":[{"section":"delete-collection-poddisruptionbudget-v1beta1-policy","subsections":[]},{"section":"delete-poddisruptionbudget-v1beta1-policy","subsections":[]},{"section":"replace-poddisruptionbudget-v1beta1-policy","subsections":[]},{"section":"patch-poddisruptionbudget-v1beta1-policy","subsections":[]},{"section":"create-poddisruptionbudget-v1beta1-policy","subsections":[]}]}]},{"section":"overhead-v1alpha1-node-k8s-io","subsections":[]},{"section":"overhead-v1beta1-node-k8s-io","subsections":[]},{"section":"objectmetricstatus-v2beta1-autoscaling","subsections":[]},{"section":"objectmetricsource-v2beta1-autoscaling","subsections":[]},{"section":"metricstatus-v2beta1-autoscaling","subsections":[]},{"section":"metricspec-v2beta1-autoscaling","subsections":[]},{"section":"jobtemplatespec-v1beta1-batch","subsections":[]},{"section":"horizontalpodautoscalercondition-v2beta1-autoscaling","subsections":[]},{"section":"horizontalpodautoscaler-v2beta1-autoscaling","subsections":[{"section":"-strong-status-operations-horizontalpodautoscaler-v2beta1-autoscaling-strong-","subsections":[{"section":"replace-status-horizontalpodautoscaler-v2beta1-autoscaling","subsections":[]},{"section":"read-status-horizontalpodautoscaler-v2beta1-autoscaling","subsections":[]},{"section":"patch-status-horizontalpodautoscaler-v2beta1-autoscaling","subsections":[]}]},{"section":"-strong-read-operations-horizontalpodautoscaler-v2beta1-autoscaling-strong-","subsections":[{"section":"watch-list-all-namespaces-horizontalpodautoscaler-v2beta1-autoscaling","subsections":[]},{"section":"watch-list-horizontalpodautoscaler-v2beta1-autoscaling","subsections":[]},{"section":"watch-horizontalpodautoscaler-v2beta1-autoscaling","subsections":[]},{"section":"list-all-namespaces-horizontalpodautoscaler-v2beta1-autoscaling","subsections":[]},{"section":"list-horizontalpodautoscaler-v2beta1-autoscaling","subsections":[]},{"section":"read-horizontalpodautoscaler-v2beta1-autoscaling","subsections":[]}]},{"section":"-strong-write-operations-horizontalpodautoscaler-v2beta1-autoscaling-strong-","subsections":[{"section":"delete-collection-horizontalpodautoscaler-v2beta1-autoscaling","subsections":[]},{"section":"delete-horizontalpodautoscaler-v2beta1-autoscaling","subsections":[]},{"section":"replace-horizontalpodautoscaler-v2beta1-autoscaling","subsections":[]},{"section":"patch-horizontalpodautoscaler-v2beta1-autoscaling","subsections":[]},{"section":"create-horizontalpodautoscaler-v2beta1-autoscaling","subsections":[]}]}]},{"section":"horizontalpodautoscaler-v2beta2-autoscaling","subsections":[{"section":"-strong-status-operations-horizontalpodautoscaler-v2beta2-autoscaling-strong-","subsections":[{"section":"replace-status-horizontalpodautoscaler-v2beta2-autoscaling","subsections":[]},{"section":"read-status-horizontalpodautoscaler-v2beta2-autoscaling","subsections":[]},{"section":"patch-status-horizontalpodautoscaler-v2beta2-autoscaling","subsections":[]}]},{"section":"-strong-read-operations-horizontalpodautoscaler-v2beta2-autoscaling-strong-","subsections":[{"section":"watch-list-all-namespaces-horizontalpodautoscaler-v2beta2-autoscaling","subsections":[]},{"section":"watch-list-horizontalpodautoscaler-v2beta2-autoscaling","subsections":[]},{"section":"watch-horizontalpodautoscaler-v2beta2-autoscaling","subsections":[]},{"section":"list-all-namespaces-horizontalpodautoscaler-v2beta2-autoscaling","subsections":[]},{"section":"list-horizontalpodautoscaler-v2beta2-autoscaling","subsections":[]},{"section":"read-horizontalpodautoscaler-v2beta2-autoscaling","subsections":[]}]},{"section":"-strong-write-operations-horizontalpodautoscaler-v2beta2-autoscaling-strong-","subsections":[{"section":"delete-collection-horizontalpodautoscaler-v2beta2-autoscaling","subsections":[]},{"section":"delete-horizontalpodautoscaler-v2beta2-autoscaling","subsections":[]},{"section":"replace-horizontalpodautoscaler-v2beta2-autoscaling","subsections":[]},{"section":"patch-horizontalpodautoscaler-v2beta2-autoscaling","subsections":[]},{"section":"create-horizontalpodautoscaler-v2beta2-autoscaling","subsections":[]}]}]},{"section":"forzone-v1beta1-discovery-k8s-io","subsections":[]},{"section":"externalmetricstatus-v2beta1-autoscaling","subsections":[]},{"section":"externalmetricsource-v2beta1-autoscaling","subsections":[]},{"section":"eventseries-v1beta1-events-k8s-io","subsections":[]},{"section":"eventseries-v1-core","subsections":[]},{"section":"event-v1beta1-events-k8s-io","subsections":[{"section":"-strong-read-operations-event-v1beta1-events-k8s-io-strong-","subsections":[{"section":"watch-list-all-namespaces-event-v1beta1-events-k8s-io","subsections":[]},{"section":"watch-list-event-v1beta1-events-k8s-io","subsections":[]},{"section":"watch-event-v1beta1-events-k8s-io","subsections":[]},{"section":"list-all-namespaces-event-v1beta1-events-k8s-io","subsections":[]},{"section":"list-event-v1beta1-events-k8s-io","subsections":[]},{"section":"read-event-v1beta1-events-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-event-v1beta1-events-k8s-io-strong-","subsections":[{"section":"delete-collection-event-v1beta1-events-k8s-io","subsections":[]},{"section":"delete-event-v1beta1-events-k8s-io","subsections":[]},{"section":"replace-event-v1beta1-events-k8s-io","subsections":[]},{"section":"patch-event-v1beta1-events-k8s-io","subsections":[]},{"section":"create-event-v1beta1-events-k8s-io","subsections":[]}]}]},{"section":"event-v1-core","subsections":[{"section":"-strong-read-operations-event-v1-core-strong-","subsections":[{"section":"watch-list-all-namespaces-event-v1-core","subsections":[]},{"section":"watch-list-event-v1-core","subsections":[]},{"section":"watch-event-v1-core","subsections":[]},{"section":"list-all-namespaces-event-v1-core","subsections":[]},{"section":"list-event-v1-core","subsections":[]},{"section":"read-event-v1-core","subsections":[]}]},{"section":"-strong-write-operations-event-v1-core-strong-","subsections":[{"section":"delete-collection-event-v1-core","subsections":[]},{"section":"delete-event-v1-core","subsections":[]},{"section":"replace-event-v1-core","subsections":[]},{"section":"patch-event-v1-core","subsections":[]},{"section":"create-event-v1-core","subsections":[]}]}]},{"section":"endpointslice-v1beta1-discovery-k8s-io","subsections":[{"section":"-strong-read-operations-endpointslice-v1beta1-discovery-k8s-io-strong-","subsections":[{"section":"watch-list-all-namespaces-endpointslice-v1beta1-discovery-k8s-io","subsections":[]},{"section":"watch-list-endpointslice-v1beta1-discovery-k8s-io","subsections":[]},{"section":"watch-endpointslice-v1beta1-discovery-k8s-io","subsections":[]},{"section":"list-all-namespaces-endpointslice-v1beta1-discovery-k8s-io","subsections":[]},{"section":"list-endpointslice-v1beta1-discovery-k8s-io","subsections":[]},{"section":"read-endpointslice-v1beta1-discovery-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-endpointslice-v1beta1-discovery-k8s-io-strong-","subsections":[{"section":"delete-collection-endpointslice-v1beta1-discovery-k8s-io","subsections":[]},{"section":"delete-endpointslice-v1beta1-discovery-k8s-io","subsections":[]},{"section":"replace-endpointslice-v1beta1-discovery-k8s-io","subsections":[]},{"section":"patch-endpointslice-v1beta1-discovery-k8s-io","subsections":[]},{"section":"create-endpointslice-v1beta1-discovery-k8s-io","subsections":[]}]}]},{"section":"endpointport-v1beta1-discovery-k8s-io","subsections":[]},{"section":"endpointport-v1-discovery-k8s-io","subsections":[]},{"section":"endpointhints-v1beta1-discovery-k8s-io","subsections":[]},{"section":"endpointconditions-v1beta1-discovery-k8s-io","subsections":[]},{"section":"endpoint-v1beta1-discovery-k8s-io","subsections":[]},{"section":"crossversionobjectreference-v2beta1-autoscaling","subsections":[]},{"section":"crossversionobjectreference-v2beta2-autoscaling","subsections":[]},{"section":"cronjob-v1beta1-batch","subsections":[{"section":"-strong-status-operations-cronjob-v1beta1-batch-strong-","subsections":[{"section":"replace-status-cronjob-v1beta1-batch","subsections":[]},{"section":"read-status-cronjob-v1beta1-batch","subsections":[]},{"section":"patch-status-cronjob-v1beta1-batch","subsections":[]}]},{"section":"-strong-read-operations-cronjob-v1beta1-batch-strong-","subsections":[{"section":"watch-list-all-namespaces-cronjob-v1beta1-batch","subsections":[]},{"section":"watch-list-cronjob-v1beta1-batch","subsections":[]},{"section":"watch-cronjob-v1beta1-batch","subsections":[]},{"section":"list-all-namespaces-cronjob-v1beta1-batch","subsections":[]},{"section":"list-cronjob-v1beta1-batch","subsections":[]},{"section":"read-cronjob-v1beta1-batch","subsections":[]}]},{"section":"-strong-write-operations-cronjob-v1beta1-batch-strong-","subsections":[{"section":"delete-collection-cronjob-v1beta1-batch","subsections":[]},{"section":"delete-cronjob-v1beta1-batch","subsections":[]},{"section":"replace-cronjob-v1beta1-batch","subsections":[]},{"section":"patch-cronjob-v1beta1-batch","subsections":[]},{"section":"create-cronjob-v1beta1-batch","subsections":[]}]}]},{"section":"containerresourcemetricstatus-v2beta1-autoscaling","subsections":[]},{"section":"containerresourcemetricsource-v2beta1-autoscaling","subsections":[]},{"section":"clusterrolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[{"section":"-strong-read-operations-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io-strong-","subsections":[{"section":"watch-list-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"watch-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"list-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"read-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io-strong-","subsections":[{"section":"delete-collection-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"delete-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"replace-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"patch-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"create-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io","subsections":[]}]}]},{"section":"clusterrole-v1alpha1-rbac-authorization-k8s-io","subsections":[{"section":"-strong-read-operations-clusterrole-v1alpha1-rbac-authorization-k8s-io-strong-","subsections":[{"section":"watch-list-clusterrole-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"watch-clusterrole-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"list-clusterrole-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"read-clusterrole-v1alpha1-rbac-authorization-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-clusterrole-v1alpha1-rbac-authorization-k8s-io-strong-","subsections":[{"section":"delete-collection-clusterrole-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"delete-clusterrole-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"replace-clusterrole-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"patch-clusterrole-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"create-clusterrole-v1alpha1-rbac-authorization-k8s-io","subsections":[]}]}]},{"section":"csistoragecapacity-v1alpha1-storage-k8s-io","subsections":[{"section":"-strong-read-operations-csistoragecapacity-v1alpha1-storage-k8s-io-strong-","subsections":[{"section":"watch-list-all-namespaces-csistoragecapacity-v1alpha1-storage-k8s-io","subsections":[]},{"section":"watch-list-csistoragecapacity-v1alpha1-storage-k8s-io","subsections":[]},{"section":"watch-csistoragecapacity-v1alpha1-storage-k8s-io","subsections":[]},{"section":"list-all-namespaces-csistoragecapacity-v1alpha1-storage-k8s-io","subsections":[]},{"section":"list-csistoragecapacity-v1alpha1-storage-k8s-io","subsections":[]},{"section":"read-csistoragecapacity-v1alpha1-storage-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-csistoragecapacity-v1alpha1-storage-k8s-io-strong-","subsections":[{"section":"delete-collection-csistoragecapacity-v1alpha1-storage-k8s-io","subsections":[]},{"section":"delete-csistoragecapacity-v1alpha1-storage-k8s-io","subsections":[]},{"section":"replace-csistoragecapacity-v1alpha1-storage-k8s-io","subsections":[]},{"section":"patch-csistoragecapacity-v1alpha1-storage-k8s-io","subsections":[]},{"section":"create-csistoragecapacity-v1alpha1-storage-k8s-io","subsections":[]}]}]},{"section":"aggregationrule-v1alpha1-rbac-authorization-k8s-io","subsections":[]},{"section":"-strong-old-api-versions-strong-","subsections":[]},{"section":"windowssecuritycontextoptions-v1-core","subsections":[]},{"section":"weightedpodaffinityterm-v1-core","subsections":[]},{"section":"webhookconversion-v1-apiextensions-k8s-io","subsections":[]},{"section":"webhookclientconfig-v1-admissionregistration-k8s-io","subsections":[]},{"section":"watchevent-v1-meta","subsections":[]},{"section":"vspherevirtualdiskvolumesource-v1-core","subsections":[]},{"section":"volumeprojection-v1-core","subsections":[]},{"section":"volumenoderesources-v1-storage-k8s-io","subsections":[]},{"section":"volumenodeaffinity-v1-core","subsections":[]},{"section":"volumemount-v1-core","subsections":[]},{"section":"volumeerror-v1-storage-k8s-io","subsections":[]},{"section":"volumedevice-v1-core","subsections":[]},{"section":"volumeattachmentsource-v1-storage-k8s-io","subsections":[]},{"section":"validatingwebhook-v1-admissionregistration-k8s-io","subsections":[]},{"section":"usersubject-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"userinfo-v1-authentication-k8s-io","subsections":[]},{"section":"uncountedterminatedpods-v1-batch","subsections":[]},{"section":"typedlocalobjectreference-v1-core","subsections":[]},{"section":"topologyspreadconstraint-v1-core","subsections":[]},{"section":"topologyselectorterm-v1-core","subsections":[]},{"section":"topologyselectorlabelrequirement-v1-core","subsections":[]},{"section":"toleration-v1-core","subsections":[]},{"section":"time-v1-meta","subsections":[]},{"section":"taint-v1-core","subsections":[]},{"section":"tcpsocketaction-v1-core","subsections":[]},{"section":"sysctl-v1-core","subsections":[]},{"section":"supplementalgroupsstrategyoptions-v1beta1-policy","subsections":[]},{"section":"subjectrulesreviewstatus-v1-authorization-k8s-io","subsections":[]},{"section":"subject-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"storageversioncondition-v1alpha1-internal-apiserver-k8s-io","subsections":[]},{"section":"storageosvolumesource-v1-core","subsections":[]},{"section":"storageospersistentvolumesource-v1-core","subsections":[]},{"section":"statusdetails-v1-meta","subsections":[]},{"section":"statuscause-v1-meta","subsections":[]},{"section":"status-v1-meta","subsections":[]},{"section":"statefulsetupdatestrategy-v1-apps","subsections":[]},{"section":"statefulsetcondition-v1-apps","subsections":[]},{"section":"sessionaffinityconfig-v1-core","subsections":[]},{"section":"servicereference-v1-admissionregistration-k8s-io","subsections":[]},{"section":"serviceport-v1-core","subsections":[]},{"section":"servicebackendport-v1-networking-k8s-io","subsections":[]},{"section":"serviceaccounttokenprojection-v1-core","subsections":[]},{"section":"serviceaccountsubject-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"serverstorageversion-v1alpha1-internal-apiserver-k8s-io","subsections":[]},{"section":"serveraddressbyclientcidr-v1-meta","subsections":[]},{"section":"securitycontext-v1-core","subsections":[]},{"section":"secretvolumesource-v1-core","subsections":[]},{"section":"secretreference-v1-core","subsections":[]},{"section":"secretprojection-v1-core","subsections":[]},{"section":"secretkeyselector-v1-core","subsections":[]},{"section":"secretenvsource-v1-core","subsections":[]},{"section":"seccompprofile-v1-core","subsections":[]},{"section":"scopedresourceselectorrequirement-v1-core","subsections":[]},{"section":"scopeselector-v1-core","subsections":[]},{"section":"scheduling-v1-node-k8s-io","subsections":[]},{"section":"scaleiovolumesource-v1-core","subsections":[]},{"section":"scaleiopersistentvolumesource-v1-core","subsections":[]},{"section":"scale-v1-autoscaling","subsections":[]},{"section":"selinuxstrategyoptions-v1beta1-policy","subsections":[]},{"section":"selinuxoptions-v1-core","subsections":[]},{"section":"runtimeclassstrategyoptions-v1beta1-policy","subsections":[]},{"section":"runasuserstrategyoptions-v1beta1-policy","subsections":[]},{"section":"runasgroupstrategyoptions-v1beta1-policy","subsections":[]},{"section":"rulewithoperations-v1-admissionregistration-k8s-io","subsections":[]},{"section":"rollingupdatestatefulsetstrategy-v1-apps","subsections":[]},{"section":"roleref-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"resourcerule-v1-authorization-k8s-io","subsections":[]},{"section":"resourcerequirements-v1-core","subsections":[]},{"section":"resourcepolicyrule-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"resourcemetricstatus-v2beta2-autoscaling","subsections":[]},{"section":"resourcemetricsource-v2beta2-autoscaling","subsections":[]},{"section":"resourcefieldselector-v1-core","subsections":[]},{"section":"resourceattributes-v1-authorization-k8s-io","subsections":[]},{"section":"replicationcontrollercondition-v1-core","subsections":[]},{"section":"replicasetcondition-v1-apps","subsections":[]},{"section":"rbdvolumesource-v1-core","subsections":[]},{"section":"rbdpersistentvolumesource-v1-core","subsections":[]},{"section":"quobytevolumesource-v1-core","subsections":[]},{"section":"queuingconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"quantity-resource-core","subsections":[]},{"section":"projectedvolumesource-v1-core","subsections":[]},{"section":"probe-v1-core","subsections":[]},{"section":"prioritylevelconfigurationreference-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"prioritylevelconfigurationcondition-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"preferredschedulingterm-v1-core","subsections":[]},{"section":"preconditions-v1-meta","subsections":[]},{"section":"portworxvolumesource-v1-core","subsections":[]},{"section":"portstatus-v1-core","subsections":[]},{"section":"policyruleswithsubjects-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"policyrule-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"podsmetricstatus-v2beta2-autoscaling","subsections":[]},{"section":"podsmetricsource-v2beta2-autoscaling","subsections":[]},{"section":"podsecuritycontext-v1-core","subsections":[]},{"section":"podreadinessgate-v1-core","subsections":[]},{"section":"podip-v1-core","subsections":[]},{"section":"poddnsconfigoption-v1-core","subsections":[]},{"section":"poddnsconfig-v1-core","subsections":[]},{"section":"podcondition-v1-core","subsections":[]},{"section":"podantiaffinity-v1-core","subsections":[]},{"section":"podaffinityterm-v1-core","subsections":[]},{"section":"podaffinity-v1-core","subsections":[]},{"section":"photonpersistentdiskvolumesource-v1-core","subsections":[]},{"section":"persistentvolumeclaimvolumesource-v1-core","subsections":[]},{"section":"persistentvolumeclaimtemplate-v1-core","subsections":[]},{"section":"persistentvolumeclaimcondition-v1-core","subsections":[]},{"section":"patch-v1-meta","subsections":[]},{"section":"ownerreference-v1-meta","subsections":[]},{"section":"overhead-v1-node-k8s-io","subsections":[]},{"section":"objectreference-v1-core","subsections":[]},{"section":"objectmetricstatus-v2beta2-autoscaling","subsections":[]},{"section":"objectmetricsource-v2beta2-autoscaling","subsections":[]},{"section":"objectmeta-v1-meta","subsections":[]},{"section":"objectfieldselector-v1-core","subsections":[]},{"section":"nonresourcerule-v1-authorization-k8s-io","subsections":[]},{"section":"nonresourcepolicyrule-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"nonresourceattributes-v1-authorization-k8s-io","subsections":[]},{"section":"nodesysteminfo-v1-core","subsections":[]},{"section":"nodeselectorterm-v1-core","subsections":[]},{"section":"nodeselectorrequirement-v1-core","subsections":[]},{"section":"nodeselector-v1-core","subsections":[]},{"section":"nodedaemonendpoints-v1-core","subsections":[]},{"section":"nodeconfigstatus-v1-core","subsections":[]},{"section":"nodeconfigsource-v1-core","subsections":[]},{"section":"nodecondition-v1-core","subsections":[]},{"section":"nodeaffinity-v1-core","subsections":[]},{"section":"nodeaddress-v1-core","subsections":[]},{"section":"networkpolicyport-v1-networking-k8s-io","subsections":[]},{"section":"networkpolicypeer-v1-networking-k8s-io","subsections":[]},{"section":"networkpolicyingressrule-v1-networking-k8s-io","subsections":[]},{"section":"networkpolicyegressrule-v1-networking-k8s-io","subsections":[]},{"section":"namespacecondition-v1-core","subsections":[]},{"section":"nfsvolumesource-v1-core","subsections":[]},{"section":"mutatingwebhook-v1-admissionregistration-k8s-io","subsections":[]},{"section":"microtime-v1-meta","subsections":[]},{"section":"metricvaluestatus-v2beta2-autoscaling","subsections":[]},{"section":"metrictarget-v2beta2-autoscaling","subsections":[]},{"section":"metricstatus-v2beta2-autoscaling","subsections":[]},{"section":"metricspec-v2beta2-autoscaling","subsections":[]},{"section":"metricidentifier-v2beta2-autoscaling","subsections":[]},{"section":"managedfieldsentry-v1-meta","subsections":[]},{"section":"localvolumesource-v1-core","subsections":[]},{"section":"localobjectreference-v1-core","subsections":[]},{"section":"loadbalancerstatus-v1-core","subsections":[]},{"section":"loadbalanceringress-v1-core","subsections":[]},{"section":"listmeta-v1-meta","subsections":[]},{"section":"limitedprioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"limitresponse-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"limitrangeitem-v1-core","subsections":[]},{"section":"lifecycle-v1-core","subsections":[]},{"section":"labelselectorrequirement-v1-meta","subsections":[]},{"section":"labelselector-v1-meta","subsections":[]},{"section":"keytopath-v1-core","subsections":[]},{"section":"jobtemplatespec-v1-batch","subsections":[]},{"section":"jobcondition-v1-batch","subsections":[]},{"section":"jsonschemapropsorbool-v1-apiextensions-k8s-io","subsections":[]},{"section":"jsonschemapropsorarray-v1-apiextensions-k8s-io","subsections":[]},{"section":"jsonschemaprops-v1-apiextensions-k8s-io","subsections":[]},{"section":"json-v1-apiextensions-k8s-io","subsections":[]},{"section":"ingresstls-v1-networking-k8s-io","subsections":[]},{"section":"ingressservicebackend-v1-networking-k8s-io","subsections":[]},{"section":"ingressrule-v1-networking-k8s-io","subsections":[]},{"section":"ingressclassparametersreference-v1-networking-k8s-io","subsections":[]},{"section":"ingressbackend-v1-networking-k8s-io","subsections":[]},{"section":"iscsivolumesource-v1-core","subsections":[]},{"section":"iscsipersistentvolumesource-v1-core","subsections":[]},{"section":"ipblock-v1-networking-k8s-io","subsections":[]},{"section":"idrange-v1beta1-policy","subsections":[]},{"section":"hostportrange-v1beta1-policy","subsections":[]},{"section":"hostpathvolumesource-v1-core","subsections":[]},{"section":"hostalias-v1-core","subsections":[]},{"section":"horizontalpodautoscalercondition-v2beta2-autoscaling","subsections":[]},{"section":"horizontalpodautoscalerbehavior-v2beta2-autoscaling","subsections":[]},{"section":"handler-v1-core","subsections":[]},{"section":"httpingressrulevalue-v1-networking-k8s-io","subsections":[]},{"section":"httpingresspath-v1-networking-k8s-io","subsections":[]},{"section":"httpheader-v1-core","subsections":[]},{"section":"httpgetaction-v1-core","subsections":[]},{"section":"hpascalingrules-v2beta2-autoscaling","subsections":[]},{"section":"hpascalingpolicy-v2beta2-autoscaling","subsections":[]},{"section":"groupversionfordiscovery-v1-meta","subsections":[]},{"section":"groupsubject-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"glusterfsvolumesource-v1-core","subsections":[]},{"section":"glusterfspersistentvolumesource-v1-core","subsections":[]},{"section":"gitrepovolumesource-v1-core","subsections":[]},{"section":"gcepersistentdiskvolumesource-v1-core","subsections":[]},{"section":"forzone-v1-discovery-k8s-io","subsections":[]},{"section":"flowschemacondition-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"flowdistinguishermethod-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"flockervolumesource-v1-core","subsections":[]},{"section":"flexvolumesource-v1-core","subsections":[]},{"section":"flexpersistentvolumesource-v1-core","subsections":[]},{"section":"fieldsv1-v1-meta","subsections":[]},{"section":"fsgroupstrategyoptions-v1beta1-policy","subsections":[]},{"section":"fcvolumesource-v1-core","subsections":[]},{"section":"externalmetricstatus-v2beta2-autoscaling","subsections":[]},{"section":"externalmetricsource-v2beta2-autoscaling","subsections":[]},{"section":"externaldocumentation-v1-apiextensions-k8s-io","subsections":[]},{"section":"execaction-v1-core","subsections":[]},{"section":"eviction-v1-policy","subsections":[]},{"section":"eventsource-v1-core","subsections":[]},{"section":"eventseries-v1-events-k8s-io","subsections":[]},{"section":"ephemeralvolumesource-v1-core","subsections":[]},{"section":"ephemeralcontainer-v1-core","subsections":[]},{"section":"envvarsource-v1-core","subsections":[]},{"section":"envvar-v1-core","subsections":[]},{"section":"envfromsource-v1-core","subsections":[]},{"section":"endpointsubset-v1-core","subsections":[]},{"section":"endpointport-v1-core","subsections":[]},{"section":"endpointhints-v1-discovery-k8s-io","subsections":[]},{"section":"endpointconditions-v1-discovery-k8s-io","subsections":[]},{"section":"endpointaddress-v1-core","subsections":[]},{"section":"endpoint-v1-discovery-k8s-io","subsections":[]},{"section":"emptydirvolumesource-v1-core","subsections":[]},{"section":"downwardapivolumesource-v1-core","subsections":[]},{"section":"downwardapivolumefile-v1-core","subsections":[]},{"section":"downwardapiprojection-v1-core","subsections":[]},{"section":"deploymentcondition-v1-apps","subsections":[]},{"section":"deleteoptions-v1-meta","subsections":[]},{"section":"daemonsetupdatestrategy-v1-apps","subsections":[]},{"section":"daemonsetcondition-v1-apps","subsections":[]},{"section":"daemonendpoint-v1-core","subsections":[]},{"section":"customresourcevalidation-v1-apiextensions-k8s-io","subsections":[]},{"section":"customresourcesubresources-v1-apiextensions-k8s-io","subsections":[]},{"section":"customresourcesubresourcestatus-v1-apiextensions-k8s-io","subsections":[]},{"section":"customresourcesubresourcescale-v1-apiextensions-k8s-io","subsections":[]},{"section":"customresourcedefinitionversion-v1-apiextensions-k8s-io","subsections":[]},{"section":"customresourcedefinitionnames-v1-apiextensions-k8s-io","subsections":[]},{"section":"customresourcedefinitioncondition-v1-apiextensions-k8s-io","subsections":[]},{"section":"customresourceconversion-v1-apiextensions-k8s-io","subsections":[]},{"section":"customresourcecolumndefinition-v1-apiextensions-k8s-io","subsections":[]},{"section":"crossversionobjectreference-v1-autoscaling","subsections":[]},{"section":"containerstatewaiting-v1-core","subsections":[]},{"section":"containerstateterminated-v1-core","subsections":[]},{"section":"containerstaterunning-v1-core","subsections":[]},{"section":"containerstate-v1-core","subsections":[]},{"section":"containerresourcemetricstatus-v2beta2-autoscaling","subsections":[]},{"section":"containerresourcemetricsource-v2beta2-autoscaling","subsections":[]},{"section":"containerport-v1-core","subsections":[]},{"section":"containerimage-v1-core","subsections":[]},{"section":"configmapvolumesource-v1-core","subsections":[]},{"section":"configmapprojection-v1-core","subsections":[]},{"section":"configmapnodeconfigsource-v1-core","subsections":[]},{"section":"configmapkeyselector-v1-core","subsections":[]},{"section":"configmapenvsource-v1-core","subsections":[]},{"section":"condition-v1-meta","subsections":[]},{"section":"componentcondition-v1-core","subsections":[]},{"section":"clientipconfig-v1-core","subsections":[]},{"section":"cindervolumesource-v1-core","subsections":[]},{"section":"cinderpersistentvolumesource-v1-core","subsections":[]},{"section":"certificatesigningrequestcondition-v1-certificates-k8s-io","subsections":[]},{"section":"cephfsvolumesource-v1-core","subsections":[]},{"section":"cephfspersistentvolumesource-v1-core","subsections":[]},{"section":"capabilities-v1-core","subsections":[]},{"section":"csivolumesource-v1-core","subsections":[]},{"section":"csipersistentvolumesource-v1-core","subsections":[]},{"section":"csinodedriver-v1-storage-k8s-io","subsections":[]},{"section":"boundobjectreference-v1-authentication-k8s-io","subsections":[]},{"section":"azurefilevolumesource-v1-core","subsections":[]},{"section":"azurefilepersistentvolumesource-v1-core","subsections":[]},{"section":"azurediskvolumesource-v1-core","subsections":[]},{"section":"attachedvolume-v1-core","subsections":[]},{"section":"allowedhostpath-v1beta1-policy","subsections":[]},{"section":"allowedflexvolume-v1beta1-policy","subsections":[]},{"section":"allowedcsidriver-v1beta1-policy","subsections":[]},{"section":"aggregationrule-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"affinity-v1-core","subsections":[]},{"section":"awselasticblockstorevolumesource-v1-core","subsections":[]},{"section":"apiversions-v1-meta","subsections":[]},{"section":"apiservicecondition-v1-apiregistration-k8s-io","subsections":[]},{"section":"apiresource-v1-meta","subsections":[]},{"section":"apigroup-v1-meta","subsections":[]},{"section":"-strong-definitions-strong-","subsections":[]},{"section":"networkpolicy-v1-networking-k8s-io","subsections":[{"section":"-strong-read-operations-networkpolicy-v1-networking-k8s-io-strong-","subsections":[{"section":"watch-list-all-namespaces-networkpolicy-v1-networking-k8s-io","subsections":[]},{"section":"watch-list-networkpolicy-v1-networking-k8s-io","subsections":[]},{"section":"watch-networkpolicy-v1-networking-k8s-io","subsections":[]},{"section":"list-all-namespaces-networkpolicy-v1-networking-k8s-io","subsections":[]},{"section":"list-networkpolicy-v1-networking-k8s-io","subsections":[]},{"section":"read-networkpolicy-v1-networking-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-networkpolicy-v1-networking-k8s-io-strong-","subsections":[{"section":"delete-collection-networkpolicy-v1-networking-k8s-io","subsections":[]},{"section":"delete-networkpolicy-v1-networking-k8s-io","subsections":[]},{"section":"replace-networkpolicy-v1-networking-k8s-io","subsections":[]},{"section":"patch-networkpolicy-v1-networking-k8s-io","subsections":[]},{"section":"create-networkpolicy-v1-networking-k8s-io","subsections":[]}]}]},{"section":"tokenreview-v1-authentication-k8s-io","subsections":[{"section":"-strong-write-operations-tokenreview-v1-authentication-k8s-io-strong-","subsections":[{"section":"create-tokenreview-v1-authentication-k8s-io","subsections":[]}]}]},{"section":"tokenrequest-v1-authentication-k8s-io","subsections":[]},{"section":"subjectaccessreview-v1-authorization-k8s-io","subsections":[{"section":"-strong-write-operations-subjectaccessreview-v1-authorization-k8s-io-strong-","subsections":[{"section":"create-subjectaccessreview-v1-authorization-k8s-io","subsections":[]}]}]},{"section":"storageversion-v1alpha1-internal-apiserver-k8s-io","subsections":[{"section":"-strong-status-operations-storageversion-v1alpha1-internal-apiserver-k8s-io-strong-","subsections":[{"section":"replace-status-storageversion-v1alpha1-internal-apiserver-k8s-io","subsections":[]},{"section":"read-status-storageversion-v1alpha1-internal-apiserver-k8s-io","subsections":[]},{"section":"patch-status-storageversion-v1alpha1-internal-apiserver-k8s-io","subsections":[]}]},{"section":"-strong-read-operations-storageversion-v1alpha1-internal-apiserver-k8s-io-strong-","subsections":[{"section":"watch-list-storageversion-v1alpha1-internal-apiserver-k8s-io","subsections":[]},{"section":"watch-storageversion-v1alpha1-internal-apiserver-k8s-io","subsections":[]},{"section":"list-storageversion-v1alpha1-internal-apiserver-k8s-io","subsections":[]},{"section":"read-storageversion-v1alpha1-internal-apiserver-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-storageversion-v1alpha1-internal-apiserver-k8s-io-strong-","subsections":[{"section":"delete-collection-storageversion-v1alpha1-internal-apiserver-k8s-io","subsections":[]},{"section":"delete-storageversion-v1alpha1-internal-apiserver-k8s-io","subsections":[]},{"section":"replace-storageversion-v1alpha1-internal-apiserver-k8s-io","subsections":[]},{"section":"patch-storageversion-v1alpha1-internal-apiserver-k8s-io","subsections":[]},{"section":"create-storageversion-v1alpha1-internal-apiserver-k8s-io","subsections":[]}]}]},{"section":"serviceaccount-v1-core","subsections":[{"section":"-strong-read-operations-serviceaccount-v1-core-strong-","subsections":[{"section":"watch-list-all-namespaces-serviceaccount-v1-core","subsections":[]},{"section":"watch-list-serviceaccount-v1-core","subsections":[]},{"section":"watch-serviceaccount-v1-core","subsections":[]},{"section":"list-all-namespaces-serviceaccount-v1-core","subsections":[]},{"section":"list-serviceaccount-v1-core","subsections":[]},{"section":"read-serviceaccount-v1-core","subsections":[]}]},{"section":"-strong-write-operations-serviceaccount-v1-core-strong-","subsections":[{"section":"delete-collection-serviceaccount-v1-core","subsections":[]},{"section":"delete-serviceaccount-v1-core","subsections":[]},{"section":"replace-serviceaccount-v1-core","subsections":[]},{"section":"patch-serviceaccount-v1-core","subsections":[]},{"section":"create-serviceaccount-v1-core","subsections":[]}]}]},{"section":"selfsubjectrulesreview-v1-authorization-k8s-io","subsections":[{"section":"-strong-write-operations-selfsubjectrulesreview-v1-authorization-k8s-io-strong-","subsections":[{"section":"create-selfsubjectrulesreview-v1-authorization-k8s-io","subsections":[]}]}]},{"section":"selfsubjectaccessreview-v1-authorization-k8s-io","subsections":[{"section":"-strong-write-operations-selfsubjectaccessreview-v1-authorization-k8s-io-strong-","subsections":[{"section":"create-selfsubjectaccessreview-v1-authorization-k8s-io","subsections":[]}]}]},{"section":"runtimeclass-v1-node-k8s-io","subsections":[{"section":"-strong-read-operations-runtimeclass-v1-node-k8s-io-strong-","subsections":[{"section":"watch-list-runtimeclass-v1-node-k8s-io","subsections":[]},{"section":"watch-runtimeclass-v1-node-k8s-io","subsections":[]},{"section":"list-runtimeclass-v1-node-k8s-io","subsections":[]},{"section":"read-runtimeclass-v1-node-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-runtimeclass-v1-node-k8s-io-strong-","subsections":[{"section":"delete-collection-runtimeclass-v1-node-k8s-io","subsections":[]},{"section":"delete-runtimeclass-v1-node-k8s-io","subsections":[]},{"section":"replace-runtimeclass-v1-node-k8s-io","subsections":[]},{"section":"patch-runtimeclass-v1-node-k8s-io","subsections":[]},{"section":"create-runtimeclass-v1-node-k8s-io","subsections":[]}]}]},{"section":"rolebinding-v1-rbac-authorization-k8s-io","subsections":[{"section":"-strong-read-operations-rolebinding-v1-rbac-authorization-k8s-io-strong-","subsections":[{"section":"watch-list-all-namespaces-rolebinding-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"watch-list-rolebinding-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"watch-rolebinding-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"list-all-namespaces-rolebinding-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"list-rolebinding-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"read-rolebinding-v1-rbac-authorization-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-rolebinding-v1-rbac-authorization-k8s-io-strong-","subsections":[{"section":"delete-collection-rolebinding-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"delete-rolebinding-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"replace-rolebinding-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"patch-rolebinding-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"create-rolebinding-v1-rbac-authorization-k8s-io","subsections":[]}]}]},{"section":"role-v1-rbac-authorization-k8s-io","subsections":[{"section":"-strong-read-operations-role-v1-rbac-authorization-k8s-io-strong-","subsections":[{"section":"watch-list-all-namespaces-role-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"watch-list-role-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"watch-role-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"list-all-namespaces-role-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"list-role-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"read-role-v1-rbac-authorization-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-role-v1-rbac-authorization-k8s-io-strong-","subsections":[{"section":"delete-collection-role-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"delete-role-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"replace-role-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"patch-role-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"create-role-v1-rbac-authorization-k8s-io","subsections":[]}]}]},{"section":"resourcequota-v1-core","subsections":[{"section":"-strong-status-operations-resourcequota-v1-core-strong-","subsections":[{"section":"replace-status-resourcequota-v1-core","subsections":[]},{"section":"read-status-resourcequota-v1-core","subsections":[]},{"section":"patch-status-resourcequota-v1-core","subsections":[]}]},{"section":"-strong-read-operations-resourcequota-v1-core-strong-","subsections":[{"section":"watch-list-all-namespaces-resourcequota-v1-core","subsections":[]},{"section":"watch-list-resourcequota-v1-core","subsections":[]},{"section":"watch-resourcequota-v1-core","subsections":[]},{"section":"list-all-namespaces-resourcequota-v1-core","subsections":[]},{"section":"list-resourcequota-v1-core","subsections":[]},{"section":"read-resourcequota-v1-core","subsections":[]}]},{"section":"-strong-write-operations-resourcequota-v1-core-strong-","subsections":[{"section":"delete-collection-resourcequota-v1-core","subsections":[]},{"section":"delete-resourcequota-v1-core","subsections":[]},{"section":"replace-resourcequota-v1-core","subsections":[]},{"section":"patch-resourcequota-v1-core","subsections":[]},{"section":"create-resourcequota-v1-core","subsections":[]}]}]},{"section":"prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[{"section":"-strong-status-operations-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io-strong-","subsections":[{"section":"replace-status-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"read-status-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"patch-status-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]}]},{"section":"-strong-read-operations-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io-strong-","subsections":[{"section":"watch-list-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"watch-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"list-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"read-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io-strong-","subsections":[{"section":"delete-collection-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"delete-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"replace-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"patch-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"create-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]}]}]},{"section":"persistentvolume-v1-core","subsections":[{"section":"-strong-status-operations-persistentvolume-v1-core-strong-","subsections":[{"section":"replace-status-persistentvolume-v1-core","subsections":[]},{"section":"read-status-persistentvolume-v1-core","subsections":[]},{"section":"patch-status-persistentvolume-v1-core","subsections":[]}]},{"section":"-strong-read-operations-persistentvolume-v1-core-strong-","subsections":[{"section":"watch-list-persistentvolume-v1-core","subsections":[]},{"section":"watch-persistentvolume-v1-core","subsections":[]},{"section":"list-persistentvolume-v1-core","subsections":[]},{"section":"read-persistentvolume-v1-core","subsections":[]}]},{"section":"-strong-write-operations-persistentvolume-v1-core-strong-","subsections":[{"section":"delete-collection-persistentvolume-v1-core","subsections":[]},{"section":"delete-persistentvolume-v1-core","subsections":[]},{"section":"replace-persistentvolume-v1-core","subsections":[]},{"section":"patch-persistentvolume-v1-core","subsections":[]},{"section":"create-persistentvolume-v1-core","subsections":[]}]}]},{"section":"node-v1-core","subsections":[{"section":"-strong-proxy-operations-node-v1-core-strong-","subsections":[{"section":"replace-connect-proxy-path-node-v1-core","subsections":[]},{"section":"replace-connect-proxy-node-v1-core","subsections":[]},{"section":"head-connect-proxy-path-node-v1-core","subsections":[]},{"section":"head-connect-proxy-node-v1-core","subsections":[]},{"section":"get-connect-proxy-path-node-v1-core","subsections":[]},{"section":"get-connect-proxy-node-v1-core","subsections":[]},{"section":"delete-connect-proxy-path-node-v1-core","subsections":[]},{"section":"delete-connect-proxy-node-v1-core","subsections":[]},{"section":"create-connect-proxy-path-node-v1-core","subsections":[]},{"section":"create-connect-proxy-node-v1-core","subsections":[]}]},{"section":"-strong-status-operations-node-v1-core-strong-","subsections":[{"section":"replace-status-node-v1-core","subsections":[]},{"section":"read-status-node-v1-core","subsections":[]},{"section":"patch-status-node-v1-core","subsections":[]}]},{"section":"-strong-read-operations-node-v1-core-strong-","subsections":[{"section":"watch-list-node-v1-core","subsections":[]},{"section":"watch-node-v1-core","subsections":[]},{"section":"list-node-v1-core","subsections":[]},{"section":"read-node-v1-core","subsections":[]}]},{"section":"-strong-write-operations-node-v1-core-strong-","subsections":[{"section":"delete-collection-node-v1-core","subsections":[]},{"section":"delete-node-v1-core","subsections":[]},{"section":"replace-node-v1-core","subsections":[]},{"section":"patch-node-v1-core","subsections":[]},{"section":"create-node-v1-core","subsections":[]}]}]},{"section":"namespace-v1-core","subsections":[{"section":"-strong-status-operations-namespace-v1-core-strong-","subsections":[{"section":"replace-status-namespace-v1-core","subsections":[]},{"section":"read-status-namespace-v1-core","subsections":[]},{"section":"patch-status-namespace-v1-core","subsections":[]}]},{"section":"-strong-read-operations-namespace-v1-core-strong-","subsections":[{"section":"watch-list-namespace-v1-core","subsections":[]},{"section":"watch-namespace-v1-core","subsections":[]},{"section":"list-namespace-v1-core","subsections":[]},{"section":"read-namespace-v1-core","subsections":[]}]},{"section":"-strong-write-operations-namespace-v1-core-strong-","subsections":[{"section":"delete-namespace-v1-core","subsections":[]},{"section":"replace-namespace-v1-core","subsections":[]},{"section":"patch-namespace-v1-core","subsections":[]},{"section":"create-namespace-v1-core","subsections":[]}]}]},{"section":"localsubjectaccessreview-v1-authorization-k8s-io","subsections":[{"section":"-strong-write-operations-localsubjectaccessreview-v1-authorization-k8s-io-strong-","subsections":[{"section":"create-localsubjectaccessreview-v1-authorization-k8s-io","subsections":[]}]}]},{"section":"lease-v1-coordination-k8s-io","subsections":[{"section":"-strong-read-operations-lease-v1-coordination-k8s-io-strong-","subsections":[{"section":"watch-list-all-namespaces-lease-v1-coordination-k8s-io","subsections":[]},{"section":"watch-list-lease-v1-coordination-k8s-io","subsections":[]},{"section":"watch-lease-v1-coordination-k8s-io","subsections":[]},{"section":"list-all-namespaces-lease-v1-coordination-k8s-io","subsections":[]},{"section":"list-lease-v1-coordination-k8s-io","subsections":[]},{"section":"read-lease-v1-coordination-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-lease-v1-coordination-k8s-io-strong-","subsections":[{"section":"delete-collection-lease-v1-coordination-k8s-io","subsections":[]},{"section":"delete-lease-v1-coordination-k8s-io","subsections":[]},{"section":"replace-lease-v1-coordination-k8s-io","subsections":[]},{"section":"patch-lease-v1-coordination-k8s-io","subsections":[]},{"section":"create-lease-v1-coordination-k8s-io","subsections":[]}]}]},{"section":"flowschema-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[{"section":"-strong-status-operations-flowschema-v1beta1-flowcontrol-apiserver-k8s-io-strong-","subsections":[{"section":"replace-status-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"read-status-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"patch-status-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]}]},{"section":"-strong-read-operations-flowschema-v1beta1-flowcontrol-apiserver-k8s-io-strong-","subsections":[{"section":"watch-list-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"watch-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"list-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"read-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-flowschema-v1beta1-flowcontrol-apiserver-k8s-io-strong-","subsections":[{"section":"delete-collection-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"delete-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"replace-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"patch-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]},{"section":"create-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","subsections":[]}]}]},{"section":"componentstatus-v1-core","subsections":[{"section":"-strong-read-operations-componentstatus-v1-core-strong-","subsections":[{"section":"list-componentstatus-v1-core","subsections":[]},{"section":"read-componentstatus-v1-core","subsections":[]}]}]},{"section":"clusterrolebinding-v1-rbac-authorization-k8s-io","subsections":[{"section":"-strong-read-operations-clusterrolebinding-v1-rbac-authorization-k8s-io-strong-","subsections":[{"section":"watch-list-clusterrolebinding-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"watch-clusterrolebinding-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"list-clusterrolebinding-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"read-clusterrolebinding-v1-rbac-authorization-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-clusterrolebinding-v1-rbac-authorization-k8s-io-strong-","subsections":[{"section":"delete-collection-clusterrolebinding-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"delete-clusterrolebinding-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"replace-clusterrolebinding-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"patch-clusterrolebinding-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"create-clusterrolebinding-v1-rbac-authorization-k8s-io","subsections":[]}]}]},{"section":"clusterrole-v1-rbac-authorization-k8s-io","subsections":[{"section":"-strong-read-operations-clusterrole-v1-rbac-authorization-k8s-io-strong-","subsections":[{"section":"watch-list-clusterrole-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"watch-clusterrole-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"list-clusterrole-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"read-clusterrole-v1-rbac-authorization-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-clusterrole-v1-rbac-authorization-k8s-io-strong-","subsections":[{"section":"delete-collection-clusterrole-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"delete-clusterrole-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"replace-clusterrole-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"patch-clusterrole-v1-rbac-authorization-k8s-io","subsections":[]},{"section":"create-clusterrole-v1-rbac-authorization-k8s-io","subsections":[]}]}]},{"section":"certificatesigningrequest-v1-certificates-k8s-io","subsections":[{"section":"-strong-status-operations-certificatesigningrequest-v1-certificates-k8s-io-strong-","subsections":[{"section":"replace-status-certificatesigningrequest-v1-certificates-k8s-io","subsections":[]},{"section":"read-status-certificatesigningrequest-v1-certificates-k8s-io","subsections":[]},{"section":"patch-status-certificatesigningrequest-v1-certificates-k8s-io","subsections":[]}]},{"section":"-strong-read-operations-certificatesigningrequest-v1-certificates-k8s-io-strong-","subsections":[{"section":"watch-list-certificatesigningrequest-v1-certificates-k8s-io","subsections":[]},{"section":"watch-certificatesigningrequest-v1-certificates-k8s-io","subsections":[]},{"section":"list-certificatesigningrequest-v1-certificates-k8s-io","subsections":[]},{"section":"read-certificatesigningrequest-v1-certificates-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-certificatesigningrequest-v1-certificates-k8s-io-strong-","subsections":[{"section":"delete-collection-certificatesigningrequest-v1-certificates-k8s-io","subsections":[]},{"section":"delete-certificatesigningrequest-v1-certificates-k8s-io","subsections":[]},{"section":"replace-certificatesigningrequest-v1-certificates-k8s-io","subsections":[]},{"section":"patch-certificatesigningrequest-v1-certificates-k8s-io","subsections":[]},{"section":"create-certificatesigningrequest-v1-certificates-k8s-io","subsections":[]}]}]},{"section":"binding-v1-core","subsections":[{"section":"-strong-write-operations-binding-v1-core-strong-","subsections":[{"section":"create-binding-v1-core","subsections":[]}]}]},{"section":"apiservice-v1-apiregistration-k8s-io","subsections":[{"section":"-strong-status-operations-apiservice-v1-apiregistration-k8s-io-strong-","subsections":[{"section":"replace-status-apiservice-v1-apiregistration-k8s-io","subsections":[]},{"section":"read-status-apiservice-v1-apiregistration-k8s-io","subsections":[]},{"section":"patch-status-apiservice-v1-apiregistration-k8s-io","subsections":[]}]},{"section":"-strong-read-operations-apiservice-v1-apiregistration-k8s-io-strong-","subsections":[{"section":"watch-list-apiservice-v1-apiregistration-k8s-io","subsections":[]},{"section":"watch-apiservice-v1-apiregistration-k8s-io","subsections":[]},{"section":"list-apiservice-v1-apiregistration-k8s-io","subsections":[]},{"section":"read-apiservice-v1-apiregistration-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-apiservice-v1-apiregistration-k8s-io-strong-","subsections":[{"section":"delete-collection-apiservice-v1-apiregistration-k8s-io","subsections":[]},{"section":"delete-apiservice-v1-apiregistration-k8s-io","subsections":[]},{"section":"replace-apiservice-v1-apiregistration-k8s-io","subsections":[]},{"section":"patch-apiservice-v1-apiregistration-k8s-io","subsections":[]},{"section":"create-apiservice-v1-apiregistration-k8s-io","subsections":[]}]}]},{"section":"-strong-cluster-apis-strong-","subsections":[]},{"section":"podsecuritypolicy-v1beta1-policy","subsections":[{"section":"-strong-read-operations-podsecuritypolicy-v1beta1-policy-strong-","subsections":[{"section":"watch-list-podsecuritypolicy-v1beta1-policy","subsections":[]},{"section":"watch-podsecuritypolicy-v1beta1-policy","subsections":[]},{"section":"list-podsecuritypolicy-v1beta1-policy","subsections":[]},{"section":"read-podsecuritypolicy-v1beta1-policy","subsections":[]}]},{"section":"-strong-write-operations-podsecuritypolicy-v1beta1-policy-strong-","subsections":[{"section":"delete-collection-podsecuritypolicy-v1beta1-policy","subsections":[]},{"section":"delete-podsecuritypolicy-v1beta1-policy","subsections":[]},{"section":"replace-podsecuritypolicy-v1beta1-policy","subsections":[]},{"section":"patch-podsecuritypolicy-v1beta1-policy","subsections":[]},{"section":"create-podsecuritypolicy-v1beta1-policy","subsections":[]}]}]},{"section":"priorityclass-v1-scheduling-k8s-io","subsections":[{"section":"-strong-read-operations-priorityclass-v1-scheduling-k8s-io-strong-","subsections":[{"section":"watch-list-priorityclass-v1-scheduling-k8s-io","subsections":[]},{"section":"watch-priorityclass-v1-scheduling-k8s-io","subsections":[]},{"section":"list-priorityclass-v1-scheduling-k8s-io","subsections":[]},{"section":"read-priorityclass-v1-scheduling-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-priorityclass-v1-scheduling-k8s-io-strong-","subsections":[{"section":"delete-collection-priorityclass-v1-scheduling-k8s-io","subsections":[]},{"section":"delete-priorityclass-v1-scheduling-k8s-io","subsections":[]},{"section":"replace-priorityclass-v1-scheduling-k8s-io","subsections":[]},{"section":"patch-priorityclass-v1-scheduling-k8s-io","subsections":[]},{"section":"create-priorityclass-v1-scheduling-k8s-io","subsections":[]}]}]},{"section":"poddisruptionbudget-v1-policy","subsections":[{"section":"-strong-status-operations-poddisruptionbudget-v1-policy-strong-","subsections":[{"section":"replace-status-poddisruptionbudget-v1-policy","subsections":[]},{"section":"read-status-poddisruptionbudget-v1-policy","subsections":[]},{"section":"patch-status-poddisruptionbudget-v1-policy","subsections":[]}]},{"section":"-strong-read-operations-poddisruptionbudget-v1-policy-strong-","subsections":[{"section":"watch-list-all-namespaces-poddisruptionbudget-v1-policy","subsections":[]},{"section":"watch-list-poddisruptionbudget-v1-policy","subsections":[]},{"section":"watch-poddisruptionbudget-v1-policy","subsections":[]},{"section":"list-all-namespaces-poddisruptionbudget-v1-policy","subsections":[]},{"section":"list-poddisruptionbudget-v1-policy","subsections":[]},{"section":"read-poddisruptionbudget-v1-policy","subsections":[]}]},{"section":"-strong-write-operations-poddisruptionbudget-v1-policy-strong-","subsections":[{"section":"delete-collection-poddisruptionbudget-v1-policy","subsections":[]},{"section":"delete-poddisruptionbudget-v1-policy","subsections":[]},{"section":"replace-poddisruptionbudget-v1-policy","subsections":[]},{"section":"patch-poddisruptionbudget-v1-policy","subsections":[]},{"section":"create-poddisruptionbudget-v1-policy","subsections":[]}]}]},{"section":"podtemplate-v1-core","subsections":[{"section":"-strong-read-operations-podtemplate-v1-core-strong-","subsections":[{"section":"watch-list-all-namespaces-podtemplate-v1-core","subsections":[]},{"section":"watch-list-podtemplate-v1-core","subsections":[]},{"section":"watch-podtemplate-v1-core","subsections":[]},{"section":"list-all-namespaces-podtemplate-v1-core","subsections":[]},{"section":"list-podtemplate-v1-core","subsections":[]},{"section":"read-podtemplate-v1-core","subsections":[]}]},{"section":"-strong-write-operations-podtemplate-v1-core-strong-","subsections":[{"section":"delete-collection-podtemplate-v1-core","subsections":[]},{"section":"delete-podtemplate-v1-core","subsections":[]},{"section":"replace-podtemplate-v1-core","subsections":[]},{"section":"patch-podtemplate-v1-core","subsections":[]},{"section":"create-podtemplate-v1-core","subsections":[]}]}]},{"section":"validatingwebhookconfiguration-v1-admissionregistration-k8s-io","subsections":[{"section":"-strong-read-operations-validatingwebhookconfiguration-v1-admissionregistration-k8s-io-strong-","subsections":[{"section":"watch-list-validatingwebhookconfiguration-v1-admissionregistration-k8s-io","subsections":[]},{"section":"watch-validatingwebhookconfiguration-v1-admissionregistration-k8s-io","subsections":[]},{"section":"list-validatingwebhookconfiguration-v1-admissionregistration-k8s-io","subsections":[]},{"section":"read-validatingwebhookconfiguration-v1-admissionregistration-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-validatingwebhookconfiguration-v1-admissionregistration-k8s-io-strong-","subsections":[{"section":"delete-collection-validatingwebhookconfiguration-v1-admissionregistration-k8s-io","subsections":[]},{"section":"delete-validatingwebhookconfiguration-v1-admissionregistration-k8s-io","subsections":[]},{"section":"replace-validatingwebhookconfiguration-v1-admissionregistration-k8s-io","subsections":[]},{"section":"patch-validatingwebhookconfiguration-v1-admissionregistration-k8s-io","subsections":[]},{"section":"create-validatingwebhookconfiguration-v1-admissionregistration-k8s-io","subsections":[]}]}]},{"section":"mutatingwebhookconfiguration-v1-admissionregistration-k8s-io","subsections":[{"section":"-strong-read-operations-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io-strong-","subsections":[{"section":"watch-list-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io","subsections":[]},{"section":"watch-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io","subsections":[]},{"section":"list-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io","subsections":[]},{"section":"read-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io-strong-","subsections":[{"section":"delete-collection-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io","subsections":[]},{"section":"delete-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io","subsections":[]},{"section":"replace-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io","subsections":[]},{"section":"patch-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io","subsections":[]},{"section":"create-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io","subsections":[]}]}]},{"section":"horizontalpodautoscaler-v1-autoscaling","subsections":[{"section":"-strong-status-operations-horizontalpodautoscaler-v1-autoscaling-strong-","subsections":[{"section":"replace-status-horizontalpodautoscaler-v1-autoscaling","subsections":[]},{"section":"read-status-horizontalpodautoscaler-v1-autoscaling","subsections":[]},{"section":"patch-status-horizontalpodautoscaler-v1-autoscaling","subsections":[]}]},{"section":"-strong-read-operations-horizontalpodautoscaler-v1-autoscaling-strong-","subsections":[{"section":"watch-list-all-namespaces-horizontalpodautoscaler-v1-autoscaling","subsections":[]},{"section":"watch-list-horizontalpodautoscaler-v1-autoscaling","subsections":[]},{"section":"watch-horizontalpodautoscaler-v1-autoscaling","subsections":[]},{"section":"list-all-namespaces-horizontalpodautoscaler-v1-autoscaling","subsections":[]},{"section":"list-horizontalpodautoscaler-v1-autoscaling","subsections":[]},{"section":"read-horizontalpodautoscaler-v1-autoscaling","subsections":[]}]},{"section":"-strong-write-operations-horizontalpodautoscaler-v1-autoscaling-strong-","subsections":[{"section":"delete-collection-horizontalpodautoscaler-v1-autoscaling","subsections":[]},{"section":"delete-horizontalpodautoscaler-v1-autoscaling","subsections":[]},{"section":"replace-horizontalpodautoscaler-v1-autoscaling","subsections":[]},{"section":"patch-horizontalpodautoscaler-v1-autoscaling","subsections":[]},{"section":"create-horizontalpodautoscaler-v1-autoscaling","subsections":[]}]}]},{"section":"limitrange-v1-core","subsections":[{"section":"-strong-read-operations-limitrange-v1-core-strong-","subsections":[{"section":"watch-list-all-namespaces-limitrange-v1-core","subsections":[]},{"section":"watch-list-limitrange-v1-core","subsections":[]},{"section":"watch-limitrange-v1-core","subsections":[]},{"section":"list-all-namespaces-limitrange-v1-core","subsections":[]},{"section":"list-limitrange-v1-core","subsections":[]},{"section":"read-limitrange-v1-core","subsections":[]}]},{"section":"-strong-write-operations-limitrange-v1-core-strong-","subsections":[{"section":"delete-collection-limitrange-v1-core","subsections":[]},{"section":"delete-limitrange-v1-core","subsections":[]},{"section":"replace-limitrange-v1-core","subsections":[]},{"section":"patch-limitrange-v1-core","subsections":[]},{"section":"create-limitrange-v1-core","subsections":[]}]}]},{"section":"event-v1-events-k8s-io","subsections":[{"section":"-strong-read-operations-event-v1-events-k8s-io-strong-","subsections":[{"section":"watch-list-all-namespaces-event-v1-events-k8s-io","subsections":[]},{"section":"watch-list-event-v1-events-k8s-io","subsections":[]},{"section":"watch-event-v1-events-k8s-io","subsections":[]},{"section":"list-all-namespaces-event-v1-events-k8s-io","subsections":[]},{"section":"list-event-v1-events-k8s-io","subsections":[]},{"section":"read-event-v1-events-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-event-v1-events-k8s-io-strong-","subsections":[{"section":"delete-collection-event-v1-events-k8s-io","subsections":[]},{"section":"delete-event-v1-events-k8s-io","subsections":[]},{"section":"replace-event-v1-events-k8s-io","subsections":[]},{"section":"patch-event-v1-events-k8s-io","subsections":[]},{"section":"create-event-v1-events-k8s-io","subsections":[]}]}]},{"section":"customresourcedefinition-v1-apiextensions-k8s-io","subsections":[{"section":"-strong-status-operations-customresourcedefinition-v1-apiextensions-k8s-io-strong-","subsections":[{"section":"replace-status-customresourcedefinition-v1-apiextensions-k8s-io","subsections":[]},{"section":"read-status-customresourcedefinition-v1-apiextensions-k8s-io","subsections":[]},{"section":"patch-status-customresourcedefinition-v1-apiextensions-k8s-io","subsections":[]}]},{"section":"-strong-read-operations-customresourcedefinition-v1-apiextensions-k8s-io-strong-","subsections":[{"section":"watch-list-customresourcedefinition-v1-apiextensions-k8s-io","subsections":[]},{"section":"watch-customresourcedefinition-v1-apiextensions-k8s-io","subsections":[]},{"section":"list-customresourcedefinition-v1-apiextensions-k8s-io","subsections":[]},{"section":"read-customresourcedefinition-v1-apiextensions-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-customresourcedefinition-v1-apiextensions-k8s-io-strong-","subsections":[{"section":"delete-collection-customresourcedefinition-v1-apiextensions-k8s-io","subsections":[]},{"section":"delete-customresourcedefinition-v1-apiextensions-k8s-io","subsections":[]},{"section":"replace-customresourcedefinition-v1-apiextensions-k8s-io","subsections":[]},{"section":"patch-customresourcedefinition-v1-apiextensions-k8s-io","subsections":[]},{"section":"create-customresourcedefinition-v1-apiextensions-k8s-io","subsections":[]}]}]},{"section":"controllerrevision-v1-apps","subsections":[{"section":"-strong-read-operations-controllerrevision-v1-apps-strong-","subsections":[{"section":"watch-list-all-namespaces-controllerrevision-v1-apps","subsections":[]},{"section":"watch-list-controllerrevision-v1-apps","subsections":[]},{"section":"watch-controllerrevision-v1-apps","subsections":[]},{"section":"list-all-namespaces-controllerrevision-v1-apps","subsections":[]},{"section":"list-controllerrevision-v1-apps","subsections":[]},{"section":"read-controllerrevision-v1-apps","subsections":[]}]},{"section":"-strong-write-operations-controllerrevision-v1-apps-strong-","subsections":[{"section":"delete-collection-controllerrevision-v1-apps","subsections":[]},{"section":"delete-controllerrevision-v1-apps","subsections":[]},{"section":"replace-controllerrevision-v1-apps","subsections":[]},{"section":"patch-controllerrevision-v1-apps","subsections":[]},{"section":"create-controllerrevision-v1-apps","subsections":[]}]}]},{"section":"-strong-metadata-apis-strong-","subsections":[]},{"section":"volumeattachment-v1-storage-k8s-io","subsections":[{"section":"-strong-status-operations-volumeattachment-v1-storage-k8s-io-strong-","subsections":[{"section":"replace-status-volumeattachment-v1-storage-k8s-io","subsections":[]},{"section":"read-status-volumeattachment-v1-storage-k8s-io","subsections":[]},{"section":"patch-status-volumeattachment-v1-storage-k8s-io","subsections":[]}]},{"section":"-strong-read-operations-volumeattachment-v1-storage-k8s-io-strong-","subsections":[{"section":"watch-list-volumeattachment-v1-storage-k8s-io","subsections":[]},{"section":"watch-volumeattachment-v1-storage-k8s-io","subsections":[]},{"section":"list-volumeattachment-v1-storage-k8s-io","subsections":[]},{"section":"read-volumeattachment-v1-storage-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-volumeattachment-v1-storage-k8s-io-strong-","subsections":[{"section":"delete-collection-volumeattachment-v1-storage-k8s-io","subsections":[]},{"section":"delete-volumeattachment-v1-storage-k8s-io","subsections":[]},{"section":"replace-volumeattachment-v1-storage-k8s-io","subsections":[]},{"section":"patch-volumeattachment-v1-storage-k8s-io","subsections":[]},{"section":"create-volumeattachment-v1-storage-k8s-io","subsections":[]}]}]},{"section":"volume-v1-core","subsections":[]},{"section":"csistoragecapacity-v1beta1-storage-k8s-io","subsections":[{"section":"-strong-read-operations-csistoragecapacity-v1beta1-storage-k8s-io-strong-","subsections":[{"section":"watch-list-all-namespaces-csistoragecapacity-v1beta1-storage-k8s-io","subsections":[]},{"section":"watch-list-csistoragecapacity-v1beta1-storage-k8s-io","subsections":[]},{"section":"watch-csistoragecapacity-v1beta1-storage-k8s-io","subsections":[]},{"section":"list-all-namespaces-csistoragecapacity-v1beta1-storage-k8s-io","subsections":[]},{"section":"list-csistoragecapacity-v1beta1-storage-k8s-io","subsections":[]},{"section":"read-csistoragecapacity-v1beta1-storage-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-csistoragecapacity-v1beta1-storage-k8s-io-strong-","subsections":[{"section":"delete-collection-csistoragecapacity-v1beta1-storage-k8s-io","subsections":[]},{"section":"delete-csistoragecapacity-v1beta1-storage-k8s-io","subsections":[]},{"section":"replace-csistoragecapacity-v1beta1-storage-k8s-io","subsections":[]},{"section":"patch-csistoragecapacity-v1beta1-storage-k8s-io","subsections":[]},{"section":"create-csistoragecapacity-v1beta1-storage-k8s-io","subsections":[]}]}]},{"section":"storageclass-v1-storage-k8s-io","subsections":[{"section":"-strong-read-operations-storageclass-v1-storage-k8s-io-strong-","subsections":[{"section":"watch-list-storageclass-v1-storage-k8s-io","subsections":[]},{"section":"watch-storageclass-v1-storage-k8s-io","subsections":[]},{"section":"list-storageclass-v1-storage-k8s-io","subsections":[]},{"section":"read-storageclass-v1-storage-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-storageclass-v1-storage-k8s-io-strong-","subsections":[{"section":"delete-collection-storageclass-v1-storage-k8s-io","subsections":[]},{"section":"delete-storageclass-v1-storage-k8s-io","subsections":[]},{"section":"replace-storageclass-v1-storage-k8s-io","subsections":[]},{"section":"patch-storageclass-v1-storage-k8s-io","subsections":[]},{"section":"create-storageclass-v1-storage-k8s-io","subsections":[]}]}]},{"section":"persistentvolumeclaim-v1-core","subsections":[{"section":"-strong-status-operations-persistentvolumeclaim-v1-core-strong-","subsections":[{"section":"replace-status-persistentvolumeclaim-v1-core","subsections":[]},{"section":"read-status-persistentvolumeclaim-v1-core","subsections":[]},{"section":"patch-status-persistentvolumeclaim-v1-core","subsections":[]}]},{"section":"-strong-read-operations-persistentvolumeclaim-v1-core-strong-","subsections":[{"section":"watch-list-all-namespaces-persistentvolumeclaim-v1-core","subsections":[]},{"section":"watch-list-persistentvolumeclaim-v1-core","subsections":[]},{"section":"watch-persistentvolumeclaim-v1-core","subsections":[]},{"section":"list-all-namespaces-persistentvolumeclaim-v1-core","subsections":[]},{"section":"list-persistentvolumeclaim-v1-core","subsections":[]},{"section":"read-persistentvolumeclaim-v1-core","subsections":[]}]},{"section":"-strong-write-operations-persistentvolumeclaim-v1-core-strong-","subsections":[{"section":"delete-collection-persistentvolumeclaim-v1-core","subsections":[]},{"section":"delete-persistentvolumeclaim-v1-core","subsections":[]},{"section":"replace-persistentvolumeclaim-v1-core","subsections":[]},{"section":"patch-persistentvolumeclaim-v1-core","subsections":[]},{"section":"create-persistentvolumeclaim-v1-core","subsections":[]}]}]},{"section":"secret-v1-core","subsections":[{"section":"-strong-read-operations-secret-v1-core-strong-","subsections":[{"section":"watch-list-all-namespaces-secret-v1-core","subsections":[]},{"section":"watch-list-secret-v1-core","subsections":[]},{"section":"watch-secret-v1-core","subsections":[]},{"section":"list-all-namespaces-secret-v1-core","subsections":[]},{"section":"list-secret-v1-core","subsections":[]},{"section":"read-secret-v1-core","subsections":[]}]},{"section":"-strong-write-operations-secret-v1-core-strong-","subsections":[{"section":"delete-collection-secret-v1-core","subsections":[]},{"section":"delete-secret-v1-core","subsections":[]},{"section":"replace-secret-v1-core","subsections":[]},{"section":"patch-secret-v1-core","subsections":[]},{"section":"create-secret-v1-core","subsections":[]}]}]},{"section":"csinode-v1-storage-k8s-io","subsections":[{"section":"-strong-read-operations-csinode-v1-storage-k8s-io-strong-","subsections":[{"section":"watch-list-csinode-v1-storage-k8s-io","subsections":[]},{"section":"watch-csinode-v1-storage-k8s-io","subsections":[]},{"section":"list-csinode-v1-storage-k8s-io","subsections":[]},{"section":"read-csinode-v1-storage-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-csinode-v1-storage-k8s-io-strong-","subsections":[{"section":"delete-collection-csinode-v1-storage-k8s-io","subsections":[]},{"section":"delete-csinode-v1-storage-k8s-io","subsections":[]},{"section":"replace-csinode-v1-storage-k8s-io","subsections":[]},{"section":"patch-csinode-v1-storage-k8s-io","subsections":[]},{"section":"create-csinode-v1-storage-k8s-io","subsections":[]}]}]},{"section":"csidriver-v1-storage-k8s-io","subsections":[{"section":"-strong-read-operations-csidriver-v1-storage-k8s-io-strong-","subsections":[{"section":"watch-list-csidriver-v1-storage-k8s-io","subsections":[]},{"section":"watch-csidriver-v1-storage-k8s-io","subsections":[]},{"section":"list-csidriver-v1-storage-k8s-io","subsections":[]},{"section":"read-csidriver-v1-storage-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-csidriver-v1-storage-k8s-io-strong-","subsections":[{"section":"delete-collection-csidriver-v1-storage-k8s-io","subsections":[]},{"section":"delete-csidriver-v1-storage-k8s-io","subsections":[]},{"section":"replace-csidriver-v1-storage-k8s-io","subsections":[]},{"section":"patch-csidriver-v1-storage-k8s-io","subsections":[]},{"section":"create-csidriver-v1-storage-k8s-io","subsections":[]}]}]},{"section":"configmap-v1-core","subsections":[{"section":"-strong-read-operations-configmap-v1-core-strong-","subsections":[{"section":"watch-list-all-namespaces-configmap-v1-core","subsections":[]},{"section":"watch-list-configmap-v1-core","subsections":[]},{"section":"watch-configmap-v1-core","subsections":[]},{"section":"list-all-namespaces-configmap-v1-core","subsections":[]},{"section":"list-configmap-v1-core","subsections":[]},{"section":"read-configmap-v1-core","subsections":[]}]},{"section":"-strong-write-operations-configmap-v1-core-strong-","subsections":[{"section":"delete-collection-configmap-v1-core","subsections":[]},{"section":"delete-configmap-v1-core","subsections":[]},{"section":"replace-configmap-v1-core","subsections":[]},{"section":"patch-configmap-v1-core","subsections":[]},{"section":"create-configmap-v1-core","subsections":[]}]}]},{"section":"-strong-config-and-storage-apis-strong-","subsections":[]},{"section":"service-v1-core","subsections":[{"section":"-strong-proxy-operations-service-v1-core-strong-","subsections":[{"section":"replace-connect-proxy-path-service-v1-core","subsections":[]},{"section":"replace-connect-proxy-service-v1-core","subsections":[]},{"section":"head-connect-proxy-path-service-v1-core","subsections":[]},{"section":"head-connect-proxy-service-v1-core","subsections":[]},{"section":"get-connect-proxy-path-service-v1-core","subsections":[]},{"section":"get-connect-proxy-service-v1-core","subsections":[]},{"section":"delete-connect-proxy-path-service-v1-core","subsections":[]},{"section":"delete-connect-proxy-service-v1-core","subsections":[]},{"section":"create-connect-proxy-path-service-v1-core","subsections":[]},{"section":"create-connect-proxy-service-v1-core","subsections":[]}]},{"section":"-strong-status-operations-service-v1-core-strong-","subsections":[{"section":"replace-status-service-v1-core","subsections":[]},{"section":"read-status-service-v1-core","subsections":[]},{"section":"patch-status-service-v1-core","subsections":[]}]},{"section":"-strong-read-operations-service-v1-core-strong-","subsections":[{"section":"watch-list-all-namespaces-service-v1-core","subsections":[]},{"section":"watch-list-service-v1-core","subsections":[]},{"section":"watch-service-v1-core","subsections":[]},{"section":"list-all-namespaces-service-v1-core","subsections":[]},{"section":"list-service-v1-core","subsections":[]},{"section":"read-service-v1-core","subsections":[]}]},{"section":"-strong-write-operations-service-v1-core-strong-","subsections":[{"section":"delete-service-v1-core","subsections":[]},{"section":"replace-service-v1-core","subsections":[]},{"section":"patch-service-v1-core","subsections":[]},{"section":"create-service-v1-core","subsections":[]}]}]},{"section":"ingressclass-v1-networking-k8s-io","subsections":[{"section":"-strong-read-operations-ingressclass-v1-networking-k8s-io-strong-","subsections":[{"section":"watch-list-ingressclass-v1-networking-k8s-io","subsections":[]},{"section":"watch-ingressclass-v1-networking-k8s-io","subsections":[]},{"section":"list-ingressclass-v1-networking-k8s-io","subsections":[]},{"section":"read-ingressclass-v1-networking-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-ingressclass-v1-networking-k8s-io-strong-","subsections":[{"section":"delete-collection-ingressclass-v1-networking-k8s-io","subsections":[]},{"section":"delete-ingressclass-v1-networking-k8s-io","subsections":[]},{"section":"replace-ingressclass-v1-networking-k8s-io","subsections":[]},{"section":"patch-ingressclass-v1-networking-k8s-io","subsections":[]},{"section":"create-ingressclass-v1-networking-k8s-io","subsections":[]}]}]},{"section":"ingress-v1-networking-k8s-io","subsections":[{"section":"-strong-status-operations-ingress-v1-networking-k8s-io-strong-","subsections":[{"section":"replace-status-ingress-v1-networking-k8s-io","subsections":[]},{"section":"read-status-ingress-v1-networking-k8s-io","subsections":[]},{"section":"patch-status-ingress-v1-networking-k8s-io","subsections":[]}]},{"section":"-strong-read-operations-ingress-v1-networking-k8s-io-strong-","subsections":[{"section":"watch-list-all-namespaces-ingress-v1-networking-k8s-io","subsections":[]},{"section":"watch-list-ingress-v1-networking-k8s-io","subsections":[]},{"section":"watch-ingress-v1-networking-k8s-io","subsections":[]},{"section":"list-all-namespaces-ingress-v1-networking-k8s-io","subsections":[]},{"section":"list-ingress-v1-networking-k8s-io","subsections":[]},{"section":"read-ingress-v1-networking-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-ingress-v1-networking-k8s-io-strong-","subsections":[{"section":"delete-collection-ingress-v1-networking-k8s-io","subsections":[]},{"section":"delete-ingress-v1-networking-k8s-io","subsections":[]},{"section":"replace-ingress-v1-networking-k8s-io","subsections":[]},{"section":"patch-ingress-v1-networking-k8s-io","subsections":[]},{"section":"create-ingress-v1-networking-k8s-io","subsections":[]}]}]},{"section":"endpointslice-v1-discovery-k8s-io","subsections":[{"section":"-strong-read-operations-endpointslice-v1-discovery-k8s-io-strong-","subsections":[{"section":"watch-list-all-namespaces-endpointslice-v1-discovery-k8s-io","subsections":[]},{"section":"watch-list-endpointslice-v1-discovery-k8s-io","subsections":[]},{"section":"watch-endpointslice-v1-discovery-k8s-io","subsections":[]},{"section":"list-all-namespaces-endpointslice-v1-discovery-k8s-io","subsections":[]},{"section":"list-endpointslice-v1-discovery-k8s-io","subsections":[]},{"section":"read-endpointslice-v1-discovery-k8s-io","subsections":[]}]},{"section":"-strong-write-operations-endpointslice-v1-discovery-k8s-io-strong-","subsections":[{"section":"delete-collection-endpointslice-v1-discovery-k8s-io","subsections":[]},{"section":"delete-endpointslice-v1-discovery-k8s-io","subsections":[]},{"section":"replace-endpointslice-v1-discovery-k8s-io","subsections":[]},{"section":"patch-endpointslice-v1-discovery-k8s-io","subsections":[]},{"section":"create-endpointslice-v1-discovery-k8s-io","subsections":[]}]}]},{"section":"endpoints-v1-core","subsections":[{"section":"-strong-read-operations-endpoints-v1-core-strong-","subsections":[{"section":"watch-list-all-namespaces-endpoints-v1-core","subsections":[]},{"section":"watch-list-endpoints-v1-core","subsections":[]},{"section":"watch-endpoints-v1-core","subsections":[]},{"section":"list-all-namespaces-endpoints-v1-core","subsections":[]},{"section":"list-endpoints-v1-core","subsections":[]},{"section":"read-endpoints-v1-core","subsections":[]}]},{"section":"-strong-write-operations-endpoints-v1-core-strong-","subsections":[{"section":"delete-collection-endpoints-v1-core","subsections":[]},{"section":"delete-endpoints-v1-core","subsections":[]},{"section":"replace-endpoints-v1-core","subsections":[]},{"section":"patch-endpoints-v1-core","subsections":[]},{"section":"create-endpoints-v1-core","subsections":[]}]}]},{"section":"-strong-service-apis-strong-","subsections":[]},{"section":"statefulset-v1-apps","subsections":[{"section":"-strong-misc-operations-statefulset-v1-apps-strong-","subsections":[{"section":"patch-scale-statefulset-v1-apps","subsections":[]},{"section":"replace-scale-statefulset-v1-apps","subsections":[]},{"section":"read-scale-statefulset-v1-apps","subsections":[]}]},{"section":"-strong-status-operations-statefulset-v1-apps-strong-","subsections":[{"section":"replace-status-statefulset-v1-apps","subsections":[]},{"section":"read-status-statefulset-v1-apps","subsections":[]},{"section":"patch-status-statefulset-v1-apps","subsections":[]}]},{"section":"-strong-read-operations-statefulset-v1-apps-strong-","subsections":[{"section":"watch-list-all-namespaces-statefulset-v1-apps","subsections":[]},{"section":"watch-list-statefulset-v1-apps","subsections":[]},{"section":"watch-statefulset-v1-apps","subsections":[]},{"section":"list-all-namespaces-statefulset-v1-apps","subsections":[]},{"section":"list-statefulset-v1-apps","subsections":[]},{"section":"read-statefulset-v1-apps","subsections":[]}]},{"section":"-strong-write-operations-statefulset-v1-apps-strong-","subsections":[{"section":"delete-collection-statefulset-v1-apps","subsections":[]},{"section":"delete-statefulset-v1-apps","subsections":[]},{"section":"replace-statefulset-v1-apps","subsections":[]},{"section":"patch-statefulset-v1-apps","subsections":[]},{"section":"create-statefulset-v1-apps","subsections":[]}]}]},{"section":"replicationcontroller-v1-core","subsections":[{"section":"-strong-misc-operations-replicationcontroller-v1-core-strong-","subsections":[{"section":"patch-scale-replicationcontroller-v1-core","subsections":[]},{"section":"replace-scale-replicationcontroller-v1-core","subsections":[]},{"section":"read-scale-replicationcontroller-v1-core","subsections":[]}]},{"section":"-strong-status-operations-replicationcontroller-v1-core-strong-","subsections":[{"section":"replace-status-replicationcontroller-v1-core","subsections":[]},{"section":"read-status-replicationcontroller-v1-core","subsections":[]},{"section":"patch-status-replicationcontroller-v1-core","subsections":[]}]},{"section":"-strong-read-operations-replicationcontroller-v1-core-strong-","subsections":[{"section":"watch-list-all-namespaces-replicationcontroller-v1-core","subsections":[]},{"section":"watch-list-replicationcontroller-v1-core","subsections":[]},{"section":"watch-replicationcontroller-v1-core","subsections":[]},{"section":"list-all-namespaces-replicationcontroller-v1-core","subsections":[]},{"section":"list-replicationcontroller-v1-core","subsections":[]},{"section":"read-replicationcontroller-v1-core","subsections":[]}]},{"section":"-strong-write-operations-replicationcontroller-v1-core-strong-","subsections":[{"section":"delete-collection-replicationcontroller-v1-core","subsections":[]},{"section":"delete-replicationcontroller-v1-core","subsections":[]},{"section":"replace-replicationcontroller-v1-core","subsections":[]},{"section":"patch-replicationcontroller-v1-core","subsections":[]},{"section":"create-replicationcontroller-v1-core","subsections":[]}]}]},{"section":"replicaset-v1-apps","subsections":[{"section":"-strong-misc-operations-replicaset-v1-apps-strong-","subsections":[{"section":"patch-scale-replicaset-v1-apps","subsections":[]},{"section":"replace-scale-replicaset-v1-apps","subsections":[]},{"section":"read-scale-replicaset-v1-apps","subsections":[]}]},{"section":"-strong-status-operations-replicaset-v1-apps-strong-","subsections":[{"section":"replace-status-replicaset-v1-apps","subsections":[]},{"section":"read-status-replicaset-v1-apps","subsections":[]},{"section":"patch-status-replicaset-v1-apps","subsections":[]}]},{"section":"-strong-read-operations-replicaset-v1-apps-strong-","subsections":[{"section":"watch-list-all-namespaces-replicaset-v1-apps","subsections":[]},{"section":"watch-list-replicaset-v1-apps","subsections":[]},{"section":"watch-replicaset-v1-apps","subsections":[]},{"section":"list-all-namespaces-replicaset-v1-apps","subsections":[]},{"section":"list-replicaset-v1-apps","subsections":[]},{"section":"read-replicaset-v1-apps","subsections":[]}]},{"section":"-strong-write-operations-replicaset-v1-apps-strong-","subsections":[{"section":"delete-collection-replicaset-v1-apps","subsections":[]},{"section":"delete-replicaset-v1-apps","subsections":[]},{"section":"replace-replicaset-v1-apps","subsections":[]},{"section":"patch-replicaset-v1-apps","subsections":[]},{"section":"create-replicaset-v1-apps","subsections":[]}]}]},{"section":"pod-v1-core","subsections":[{"section":"-strong-misc-operations-pod-v1-core-strong-","subsections":[{"section":"read-log-pod-v1-core","subsections":[]}]},{"section":"-strong-proxy-operations-pod-v1-core-strong-","subsections":[{"section":"replace-connect-proxy-path-pod-v1-core","subsections":[]},{"section":"replace-connect-proxy-pod-v1-core","subsections":[]},{"section":"head-connect-proxy-path-pod-v1-core","subsections":[]},{"section":"head-connect-proxy-pod-v1-core","subsections":[]},{"section":"get-connect-proxy-path-pod-v1-core","subsections":[]},{"section":"get-connect-proxy-pod-v1-core","subsections":[]},{"section":"get-connect-portforward-pod-v1-core","subsections":[]},{"section":"delete-connect-proxy-path-pod-v1-core","subsections":[]},{"section":"delete-connect-proxy-pod-v1-core","subsections":[]},{"section":"create-connect-proxy-path-pod-v1-core","subsections":[]},{"section":"create-connect-proxy-pod-v1-core","subsections":[]},{"section":"create-connect-portforward-pod-v1-core","subsections":[]}]},{"section":"-strong-ephemeralcontainers-operations-pod-v1-core-strong-","subsections":[{"section":"replace-ephemeralcontainers-pod-v1-core","subsections":[]},{"section":"read-ephemeralcontainers-pod-v1-core","subsections":[]},{"section":"patch-ephemeralcontainers-pod-v1-core","subsections":[]}]},{"section":"-strong-status-operations-pod-v1-core-strong-","subsections":[{"section":"replace-status-pod-v1-core","subsections":[]},{"section":"read-status-pod-v1-core","subsections":[]},{"section":"patch-status-pod-v1-core","subsections":[]}]},{"section":"-strong-read-operations-pod-v1-core-strong-","subsections":[{"section":"watch-list-all-namespaces-pod-v1-core","subsections":[]},{"section":"watch-list-pod-v1-core","subsections":[]},{"section":"watch-pod-v1-core","subsections":[]},{"section":"list-all-namespaces-pod-v1-core","subsections":[]},{"section":"list-pod-v1-core","subsections":[]},{"section":"read-pod-v1-core","subsections":[]}]},{"section":"-strong-write-operations-pod-v1-core-strong-","subsections":[{"section":"delete-collection-pod-v1-core","subsections":[]},{"section":"delete-pod-v1-core","subsections":[]},{"section":"replace-pod-v1-core","subsections":[]},{"section":"patch-pod-v1-core","subsections":[]},{"section":"create-eviction-pod-v1-core","subsections":[]},{"section":"create-pod-v1-core","subsections":[]}]}]},{"section":"job-v1-batch","subsections":[{"section":"-strong-status-operations-job-v1-batch-strong-","subsections":[{"section":"replace-status-job-v1-batch","subsections":[]},{"section":"read-status-job-v1-batch","subsections":[]},{"section":"patch-status-job-v1-batch","subsections":[]}]},{"section":"-strong-read-operations-job-v1-batch-strong-","subsections":[{"section":"watch-list-all-namespaces-job-v1-batch","subsections":[]},{"section":"watch-list-job-v1-batch","subsections":[]},{"section":"watch-job-v1-batch","subsections":[]},{"section":"list-all-namespaces-job-v1-batch","subsections":[]},{"section":"list-job-v1-batch","subsections":[]},{"section":"read-job-v1-batch","subsections":[]}]},{"section":"-strong-write-operations-job-v1-batch-strong-","subsections":[{"section":"delete-collection-job-v1-batch","subsections":[]},{"section":"delete-job-v1-batch","subsections":[]},{"section":"replace-job-v1-batch","subsections":[]},{"section":"patch-job-v1-batch","subsections":[]},{"section":"create-job-v1-batch","subsections":[]}]}]},{"section":"deployment-v1-apps","subsections":[{"section":"-strong-misc-operations-deployment-v1-apps-strong-","subsections":[{"section":"patch-scale-deployment-v1-apps","subsections":[]},{"section":"replace-scale-deployment-v1-apps","subsections":[]},{"section":"read-scale-deployment-v1-apps","subsections":[]}]},{"section":"-strong-status-operations-deployment-v1-apps-strong-","subsections":[{"section":"replace-status-deployment-v1-apps","subsections":[]},{"section":"read-status-deployment-v1-apps","subsections":[]},{"section":"patch-status-deployment-v1-apps","subsections":[]}]},{"section":"-strong-read-operations-deployment-v1-apps-strong-","subsections":[{"section":"watch-list-all-namespaces-deployment-v1-apps","subsections":[]},{"section":"watch-list-deployment-v1-apps","subsections":[]},{"section":"watch-deployment-v1-apps","subsections":[]},{"section":"list-all-namespaces-deployment-v1-apps","subsections":[]},{"section":"list-deployment-v1-apps","subsections":[]},{"section":"read-deployment-v1-apps","subsections":[]}]},{"section":"-strong-write-operations-deployment-v1-apps-strong-","subsections":[{"section":"delete-collection-deployment-v1-apps","subsections":[]},{"section":"delete-deployment-v1-apps","subsections":[]},{"section":"replace-deployment-v1-apps","subsections":[]},{"section":"patch-deployment-v1-apps","subsections":[]},{"section":"create-deployment-v1-apps","subsections":[]}]}]},{"section":"daemonset-v1-apps","subsections":[{"section":"-strong-status-operations-daemonset-v1-apps-strong-","subsections":[{"section":"replace-status-daemonset-v1-apps","subsections":[]},{"section":"read-status-daemonset-v1-apps","subsections":[]},{"section":"patch-status-daemonset-v1-apps","subsections":[]}]},{"section":"-strong-read-operations-daemonset-v1-apps-strong-","subsections":[{"section":"watch-list-all-namespaces-daemonset-v1-apps","subsections":[]},{"section":"watch-list-daemonset-v1-apps","subsections":[]},{"section":"watch-daemonset-v1-apps","subsections":[]},{"section":"list-all-namespaces-daemonset-v1-apps","subsections":[]},{"section":"list-daemonset-v1-apps","subsections":[]},{"section":"read-daemonset-v1-apps","subsections":[]}]},{"section":"-strong-write-operations-daemonset-v1-apps-strong-","subsections":[{"section":"delete-collection-daemonset-v1-apps","subsections":[]},{"section":"delete-daemonset-v1-apps","subsections":[]},{"section":"replace-daemonset-v1-apps","subsections":[]},{"section":"patch-daemonset-v1-apps","subsections":[]},{"section":"create-daemonset-v1-apps","subsections":[]}]}]},{"section":"cronjob-v1-batch","subsections":[{"section":"-strong-status-operations-cronjob-v1-batch-strong-","subsections":[{"section":"replace-status-cronjob-v1-batch","subsections":[]},{"section":"read-status-cronjob-v1-batch","subsections":[]},{"section":"patch-status-cronjob-v1-batch","subsections":[]}]},{"section":"-strong-read-operations-cronjob-v1-batch-strong-","subsections":[{"section":"watch-list-all-namespaces-cronjob-v1-batch","subsections":[]},{"section":"watch-list-cronjob-v1-batch","subsections":[]},{"section":"watch-cronjob-v1-batch","subsections":[]},{"section":"list-all-namespaces-cronjob-v1-batch","subsections":[]},{"section":"list-cronjob-v1-batch","subsections":[]},{"section":"read-cronjob-v1-batch","subsections":[]}]},{"section":"-strong-write-operations-cronjob-v1-batch-strong-","subsections":[{"section":"delete-collection-cronjob-v1-batch","subsections":[]},{"section":"delete-cronjob-v1-batch","subsections":[]},{"section":"replace-cronjob-v1-batch","subsections":[]},{"section":"patch-cronjob-v1-batch","subsections":[]},{"section":"create-cronjob-v1-batch","subsections":[]}]}]},{"section":"container-v1-core","subsections":[]},{"section":"-strong-workloads-apis-strong-","subsections":[]},{"section":"-strong-api-groups-strong-","subsections":[]},{"section":"-strong-api-overview-strong-","subsections":[]}],"flatToc":["webhookclientconfig-v1-apiextensions-k8s-io","volumeerror-v1alpha1-storage-k8s-io","volumeattachmentsource-v1alpha1-storage-k8s-io","watch-list-volumeattachment-v1alpha1-storage-k8s-io","watch-volumeattachment-v1alpha1-storage-k8s-io","list-volumeattachment-v1alpha1-storage-k8s-io","read-volumeattachment-v1alpha1-storage-k8s-io","-strong-read-operations-volumeattachment-v1alpha1-storage-k8s-io-strong-","delete-collection-volumeattachment-v1alpha1-storage-k8s-io","delete-volumeattachment-v1alpha1-storage-k8s-io","replace-volumeattachment-v1alpha1-storage-k8s-io","patch-volumeattachment-v1alpha1-storage-k8s-io","create-volumeattachment-v1alpha1-storage-k8s-io","-strong-write-operations-volumeattachment-v1alpha1-storage-k8s-io-strong-","volumeattachment-v1alpha1-storage-k8s-io","tokenrequest-v1-storage-k8s-io","subject-v1alpha1-rbac-authorization-k8s-io","subject-v1-rbac-authorization-k8s-io","servicereference-v1-apiregistration-k8s-io","servicereference-v1-apiextensions-k8s-io","scheduling-v1alpha1-node-k8s-io","scheduling-v1beta1-node-k8s-io","watch-list-runtimeclass-v1alpha1-node-k8s-io","watch-runtimeclass-v1alpha1-node-k8s-io","list-runtimeclass-v1alpha1-node-k8s-io","read-runtimeclass-v1alpha1-node-k8s-io","-strong-read-operations-runtimeclass-v1alpha1-node-k8s-io-strong-","delete-collection-runtimeclass-v1alpha1-node-k8s-io","delete-runtimeclass-v1alpha1-node-k8s-io","replace-runtimeclass-v1alpha1-node-k8s-io","patch-runtimeclass-v1alpha1-node-k8s-io","create-runtimeclass-v1alpha1-node-k8s-io","-strong-write-operations-runtimeclass-v1alpha1-node-k8s-io-strong-","runtimeclass-v1alpha1-node-k8s-io","watch-list-runtimeclass-v1beta1-node-k8s-io","watch-runtimeclass-v1beta1-node-k8s-io","list-runtimeclass-v1beta1-node-k8s-io","read-runtimeclass-v1beta1-node-k8s-io","-strong-read-operations-runtimeclass-v1beta1-node-k8s-io-strong-","delete-collection-runtimeclass-v1beta1-node-k8s-io","delete-runtimeclass-v1beta1-node-k8s-io","replace-runtimeclass-v1beta1-node-k8s-io","patch-runtimeclass-v1beta1-node-k8s-io","create-runtimeclass-v1beta1-node-k8s-io","-strong-write-operations-runtimeclass-v1beta1-node-k8s-io-strong-","runtimeclass-v1beta1-node-k8s-io","roleref-v1alpha1-rbac-authorization-k8s-io","watch-list-all-namespaces-rolebinding-v1alpha1-rbac-authorization-k8s-io","watch-list-rolebinding-v1alpha1-rbac-authorization-k8s-io","watch-rolebinding-v1alpha1-rbac-authorization-k8s-io","list-all-namespaces-rolebinding-v1alpha1-rbac-authorization-k8s-io","list-rolebinding-v1alpha1-rbac-authorization-k8s-io","read-rolebinding-v1alpha1-rbac-authorization-k8s-io","-strong-read-operations-rolebinding-v1alpha1-rbac-authorization-k8s-io-strong-","delete-collection-rolebinding-v1alpha1-rbac-authorization-k8s-io","delete-rolebinding-v1alpha1-rbac-authorization-k8s-io","replace-rolebinding-v1alpha1-rbac-authorization-k8s-io","patch-rolebinding-v1alpha1-rbac-authorization-k8s-io","create-rolebinding-v1alpha1-rbac-authorization-k8s-io","-strong-write-operations-rolebinding-v1alpha1-rbac-authorization-k8s-io-strong-","rolebinding-v1alpha1-rbac-authorization-k8s-io","watch-list-all-namespaces-role-v1alpha1-rbac-authorization-k8s-io","watch-list-role-v1alpha1-rbac-authorization-k8s-io","watch-role-v1alpha1-rbac-authorization-k8s-io","list-all-namespaces-role-v1alpha1-rbac-authorization-k8s-io","list-role-v1alpha1-rbac-authorization-k8s-io","read-role-v1alpha1-rbac-authorization-k8s-io","-strong-read-operations-role-v1alpha1-rbac-authorization-k8s-io-strong-","delete-collection-role-v1alpha1-rbac-authorization-k8s-io","delete-role-v1alpha1-rbac-authorization-k8s-io","replace-role-v1alpha1-rbac-authorization-k8s-io","patch-role-v1alpha1-rbac-authorization-k8s-io","create-role-v1alpha1-rbac-authorization-k8s-io","-strong-write-operations-role-v1alpha1-rbac-authorization-k8s-io-strong-","role-v1alpha1-rbac-authorization-k8s-io","resourcemetricstatus-v2beta1-autoscaling","resourcemetricsource-v2beta1-autoscaling","watch-list-priorityclass-v1alpha1-scheduling-k8s-io","watch-priorityclass-v1alpha1-scheduling-k8s-io","list-priorityclass-v1alpha1-scheduling-k8s-io","read-priorityclass-v1alpha1-scheduling-k8s-io","-strong-read-operations-priorityclass-v1alpha1-scheduling-k8s-io-strong-","delete-collection-priorityclass-v1alpha1-scheduling-k8s-io","delete-priorityclass-v1alpha1-scheduling-k8s-io","replace-priorityclass-v1alpha1-scheduling-k8s-io","patch-priorityclass-v1alpha1-scheduling-k8s-io","create-priorityclass-v1alpha1-scheduling-k8s-io","-strong-write-operations-priorityclass-v1alpha1-scheduling-k8s-io-strong-","priorityclass-v1alpha1-scheduling-k8s-io","policyrule-v1alpha1-rbac-authorization-k8s-io","podsmetricstatus-v2beta1-autoscaling","podsmetricsource-v2beta1-autoscaling","replace-status-poddisruptionbudget-v1beta1-policy","read-status-poddisruptionbudget-v1beta1-policy","patch-status-poddisruptionbudget-v1beta1-policy","-strong-status-operations-poddisruptionbudget-v1beta1-policy-strong-","watch-list-all-namespaces-poddisruptionbudget-v1beta1-policy","watch-list-poddisruptionbudget-v1beta1-policy","watch-poddisruptionbudget-v1beta1-policy","list-all-namespaces-poddisruptionbudget-v1beta1-policy","list-poddisruptionbudget-v1beta1-policy","read-poddisruptionbudget-v1beta1-policy","-strong-read-operations-poddisruptionbudget-v1beta1-policy-strong-","delete-collection-poddisruptionbudget-v1beta1-policy","delete-poddisruptionbudget-v1beta1-policy","replace-poddisruptionbudget-v1beta1-policy","patch-poddisruptionbudget-v1beta1-policy","create-poddisruptionbudget-v1beta1-policy","-strong-write-operations-poddisruptionbudget-v1beta1-policy-strong-","poddisruptionbudget-v1beta1-policy","overhead-v1alpha1-node-k8s-io","overhead-v1beta1-node-k8s-io","objectmetricstatus-v2beta1-autoscaling","objectmetricsource-v2beta1-autoscaling","metricstatus-v2beta1-autoscaling","metricspec-v2beta1-autoscaling","jobtemplatespec-v1beta1-batch","horizontalpodautoscalercondition-v2beta1-autoscaling","replace-status-horizontalpodautoscaler-v2beta1-autoscaling","read-status-horizontalpodautoscaler-v2beta1-autoscaling","patch-status-horizontalpodautoscaler-v2beta1-autoscaling","-strong-status-operations-horizontalpodautoscaler-v2beta1-autoscaling-strong-","watch-list-all-namespaces-horizontalpodautoscaler-v2beta1-autoscaling","watch-list-horizontalpodautoscaler-v2beta1-autoscaling","watch-horizontalpodautoscaler-v2beta1-autoscaling","list-all-namespaces-horizontalpodautoscaler-v2beta1-autoscaling","list-horizontalpodautoscaler-v2beta1-autoscaling","read-horizontalpodautoscaler-v2beta1-autoscaling","-strong-read-operations-horizontalpodautoscaler-v2beta1-autoscaling-strong-","delete-collection-horizontalpodautoscaler-v2beta1-autoscaling","delete-horizontalpodautoscaler-v2beta1-autoscaling","replace-horizontalpodautoscaler-v2beta1-autoscaling","patch-horizontalpodautoscaler-v2beta1-autoscaling","create-horizontalpodautoscaler-v2beta1-autoscaling","-strong-write-operations-horizontalpodautoscaler-v2beta1-autoscaling-strong-","horizontalpodautoscaler-v2beta1-autoscaling","replace-status-horizontalpodautoscaler-v2beta2-autoscaling","read-status-horizontalpodautoscaler-v2beta2-autoscaling","patch-status-horizontalpodautoscaler-v2beta2-autoscaling","-strong-status-operations-horizontalpodautoscaler-v2beta2-autoscaling-strong-","watch-list-all-namespaces-horizontalpodautoscaler-v2beta2-autoscaling","watch-list-horizontalpodautoscaler-v2beta2-autoscaling","watch-horizontalpodautoscaler-v2beta2-autoscaling","list-all-namespaces-horizontalpodautoscaler-v2beta2-autoscaling","list-horizontalpodautoscaler-v2beta2-autoscaling","read-horizontalpodautoscaler-v2beta2-autoscaling","-strong-read-operations-horizontalpodautoscaler-v2beta2-autoscaling-strong-","delete-collection-horizontalpodautoscaler-v2beta2-autoscaling","delete-horizontalpodautoscaler-v2beta2-autoscaling","replace-horizontalpodautoscaler-v2beta2-autoscaling","patch-horizontalpodautoscaler-v2beta2-autoscaling","create-horizontalpodautoscaler-v2beta2-autoscaling","-strong-write-operations-horizontalpodautoscaler-v2beta2-autoscaling-strong-","horizontalpodautoscaler-v2beta2-autoscaling","forzone-v1beta1-discovery-k8s-io","externalmetricstatus-v2beta1-autoscaling","externalmetricsource-v2beta1-autoscaling","eventseries-v1beta1-events-k8s-io","eventseries-v1-core","watch-list-all-namespaces-event-v1beta1-events-k8s-io","watch-list-event-v1beta1-events-k8s-io","watch-event-v1beta1-events-k8s-io","list-all-namespaces-event-v1beta1-events-k8s-io","list-event-v1beta1-events-k8s-io","read-event-v1beta1-events-k8s-io","-strong-read-operations-event-v1beta1-events-k8s-io-strong-","delete-collection-event-v1beta1-events-k8s-io","delete-event-v1beta1-events-k8s-io","replace-event-v1beta1-events-k8s-io","patch-event-v1beta1-events-k8s-io","create-event-v1beta1-events-k8s-io","-strong-write-operations-event-v1beta1-events-k8s-io-strong-","event-v1beta1-events-k8s-io","watch-list-all-namespaces-event-v1-core","watch-list-event-v1-core","watch-event-v1-core","list-all-namespaces-event-v1-core","list-event-v1-core","read-event-v1-core","-strong-read-operations-event-v1-core-strong-","delete-collection-event-v1-core","delete-event-v1-core","replace-event-v1-core","patch-event-v1-core","create-event-v1-core","-strong-write-operations-event-v1-core-strong-","event-v1-core","watch-list-all-namespaces-endpointslice-v1beta1-discovery-k8s-io","watch-list-endpointslice-v1beta1-discovery-k8s-io","watch-endpointslice-v1beta1-discovery-k8s-io","list-all-namespaces-endpointslice-v1beta1-discovery-k8s-io","list-endpointslice-v1beta1-discovery-k8s-io","read-endpointslice-v1beta1-discovery-k8s-io","-strong-read-operations-endpointslice-v1beta1-discovery-k8s-io-strong-","delete-collection-endpointslice-v1beta1-discovery-k8s-io","delete-endpointslice-v1beta1-discovery-k8s-io","replace-endpointslice-v1beta1-discovery-k8s-io","patch-endpointslice-v1beta1-discovery-k8s-io","create-endpointslice-v1beta1-discovery-k8s-io","-strong-write-operations-endpointslice-v1beta1-discovery-k8s-io-strong-","endpointslice-v1beta1-discovery-k8s-io","endpointport-v1beta1-discovery-k8s-io","endpointport-v1-discovery-k8s-io","endpointhints-v1beta1-discovery-k8s-io","endpointconditions-v1beta1-discovery-k8s-io","endpoint-v1beta1-discovery-k8s-io","crossversionobjectreference-v2beta1-autoscaling","crossversionobjectreference-v2beta2-autoscaling","replace-status-cronjob-v1beta1-batch","read-status-cronjob-v1beta1-batch","patch-status-cronjob-v1beta1-batch","-strong-status-operations-cronjob-v1beta1-batch-strong-","watch-list-all-namespaces-cronjob-v1beta1-batch","watch-list-cronjob-v1beta1-batch","watch-cronjob-v1beta1-batch","list-all-namespaces-cronjob-v1beta1-batch","list-cronjob-v1beta1-batch","read-cronjob-v1beta1-batch","-strong-read-operations-cronjob-v1beta1-batch-strong-","delete-collection-cronjob-v1beta1-batch","delete-cronjob-v1beta1-batch","replace-cronjob-v1beta1-batch","patch-cronjob-v1beta1-batch","create-cronjob-v1beta1-batch","-strong-write-operations-cronjob-v1beta1-batch-strong-","cronjob-v1beta1-batch","containerresourcemetricstatus-v2beta1-autoscaling","containerresourcemetricsource-v2beta1-autoscaling","watch-list-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io","watch-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io","list-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io","read-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io","-strong-read-operations-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io-strong-","delete-collection-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io","delete-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io","replace-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io","patch-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io","create-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io","-strong-write-operations-clusterrolebinding-v1alpha1-rbac-authorization-k8s-io-strong-","clusterrolebinding-v1alpha1-rbac-authorization-k8s-io","watch-list-clusterrole-v1alpha1-rbac-authorization-k8s-io","watch-clusterrole-v1alpha1-rbac-authorization-k8s-io","list-clusterrole-v1alpha1-rbac-authorization-k8s-io","read-clusterrole-v1alpha1-rbac-authorization-k8s-io","-strong-read-operations-clusterrole-v1alpha1-rbac-authorization-k8s-io-strong-","delete-collection-clusterrole-v1alpha1-rbac-authorization-k8s-io","delete-clusterrole-v1alpha1-rbac-authorization-k8s-io","replace-clusterrole-v1alpha1-rbac-authorization-k8s-io","patch-clusterrole-v1alpha1-rbac-authorization-k8s-io","create-clusterrole-v1alpha1-rbac-authorization-k8s-io","-strong-write-operations-clusterrole-v1alpha1-rbac-authorization-k8s-io-strong-","clusterrole-v1alpha1-rbac-authorization-k8s-io","watch-list-all-namespaces-csistoragecapacity-v1alpha1-storage-k8s-io","watch-list-csistoragecapacity-v1alpha1-storage-k8s-io","watch-csistoragecapacity-v1alpha1-storage-k8s-io","list-all-namespaces-csistoragecapacity-v1alpha1-storage-k8s-io","list-csistoragecapacity-v1alpha1-storage-k8s-io","read-csistoragecapacity-v1alpha1-storage-k8s-io","-strong-read-operations-csistoragecapacity-v1alpha1-storage-k8s-io-strong-","delete-collection-csistoragecapacity-v1alpha1-storage-k8s-io","delete-csistoragecapacity-v1alpha1-storage-k8s-io","replace-csistoragecapacity-v1alpha1-storage-k8s-io","patch-csistoragecapacity-v1alpha1-storage-k8s-io","create-csistoragecapacity-v1alpha1-storage-k8s-io","-strong-write-operations-csistoragecapacity-v1alpha1-storage-k8s-io-strong-","csistoragecapacity-v1alpha1-storage-k8s-io","aggregationrule-v1alpha1-rbac-authorization-k8s-io","-strong-old-api-versions-strong-","windowssecuritycontextoptions-v1-core","weightedpodaffinityterm-v1-core","webhookconversion-v1-apiextensions-k8s-io","webhookclientconfig-v1-admissionregistration-k8s-io","watchevent-v1-meta","vspherevirtualdiskvolumesource-v1-core","volumeprojection-v1-core","volumenoderesources-v1-storage-k8s-io","volumenodeaffinity-v1-core","volumemount-v1-core","volumeerror-v1-storage-k8s-io","volumedevice-v1-core","volumeattachmentsource-v1-storage-k8s-io","validatingwebhook-v1-admissionregistration-k8s-io","usersubject-v1beta1-flowcontrol-apiserver-k8s-io","userinfo-v1-authentication-k8s-io","uncountedterminatedpods-v1-batch","typedlocalobjectreference-v1-core","topologyspreadconstraint-v1-core","topologyselectorterm-v1-core","topologyselectorlabelrequirement-v1-core","toleration-v1-core","time-v1-meta","taint-v1-core","tcpsocketaction-v1-core","sysctl-v1-core","supplementalgroupsstrategyoptions-v1beta1-policy","subjectrulesreviewstatus-v1-authorization-k8s-io","subject-v1beta1-flowcontrol-apiserver-k8s-io","storageversioncondition-v1alpha1-internal-apiserver-k8s-io","storageosvolumesource-v1-core","storageospersistentvolumesource-v1-core","statusdetails-v1-meta","statuscause-v1-meta","status-v1-meta","statefulsetupdatestrategy-v1-apps","statefulsetcondition-v1-apps","sessionaffinityconfig-v1-core","servicereference-v1-admissionregistration-k8s-io","serviceport-v1-core","servicebackendport-v1-networking-k8s-io","serviceaccounttokenprojection-v1-core","serviceaccountsubject-v1beta1-flowcontrol-apiserver-k8s-io","serverstorageversion-v1alpha1-internal-apiserver-k8s-io","serveraddressbyclientcidr-v1-meta","securitycontext-v1-core","secretvolumesource-v1-core","secretreference-v1-core","secretprojection-v1-core","secretkeyselector-v1-core","secretenvsource-v1-core","seccompprofile-v1-core","scopedresourceselectorrequirement-v1-core","scopeselector-v1-core","scheduling-v1-node-k8s-io","scaleiovolumesource-v1-core","scaleiopersistentvolumesource-v1-core","scale-v1-autoscaling","selinuxstrategyoptions-v1beta1-policy","selinuxoptions-v1-core","runtimeclassstrategyoptions-v1beta1-policy","runasuserstrategyoptions-v1beta1-policy","runasgroupstrategyoptions-v1beta1-policy","rulewithoperations-v1-admissionregistration-k8s-io","rollingupdatestatefulsetstrategy-v1-apps","roleref-v1-rbac-authorization-k8s-io","resourcerule-v1-authorization-k8s-io","resourcerequirements-v1-core","resourcepolicyrule-v1beta1-flowcontrol-apiserver-k8s-io","resourcemetricstatus-v2beta2-autoscaling","resourcemetricsource-v2beta2-autoscaling","resourcefieldselector-v1-core","resourceattributes-v1-authorization-k8s-io","replicationcontrollercondition-v1-core","replicasetcondition-v1-apps","rbdvolumesource-v1-core","rbdpersistentvolumesource-v1-core","quobytevolumesource-v1-core","queuingconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","quantity-resource-core","projectedvolumesource-v1-core","probe-v1-core","prioritylevelconfigurationreference-v1beta1-flowcontrol-apiserver-k8s-io","prioritylevelconfigurationcondition-v1beta1-flowcontrol-apiserver-k8s-io","preferredschedulingterm-v1-core","preconditions-v1-meta","portworxvolumesource-v1-core","portstatus-v1-core","policyruleswithsubjects-v1beta1-flowcontrol-apiserver-k8s-io","policyrule-v1-rbac-authorization-k8s-io","podsmetricstatus-v2beta2-autoscaling","podsmetricsource-v2beta2-autoscaling","podsecuritycontext-v1-core","podreadinessgate-v1-core","podip-v1-core","poddnsconfigoption-v1-core","poddnsconfig-v1-core","podcondition-v1-core","podantiaffinity-v1-core","podaffinityterm-v1-core","podaffinity-v1-core","photonpersistentdiskvolumesource-v1-core","persistentvolumeclaimvolumesource-v1-core","persistentvolumeclaimtemplate-v1-core","persistentvolumeclaimcondition-v1-core","patch-v1-meta","ownerreference-v1-meta","overhead-v1-node-k8s-io","objectreference-v1-core","objectmetricstatus-v2beta2-autoscaling","objectmetricsource-v2beta2-autoscaling","objectmeta-v1-meta","objectfieldselector-v1-core","nonresourcerule-v1-authorization-k8s-io","nonresourcepolicyrule-v1beta1-flowcontrol-apiserver-k8s-io","nonresourceattributes-v1-authorization-k8s-io","nodesysteminfo-v1-core","nodeselectorterm-v1-core","nodeselectorrequirement-v1-core","nodeselector-v1-core","nodedaemonendpoints-v1-core","nodeconfigstatus-v1-core","nodeconfigsource-v1-core","nodecondition-v1-core","nodeaffinity-v1-core","nodeaddress-v1-core","networkpolicyport-v1-networking-k8s-io","networkpolicypeer-v1-networking-k8s-io","networkpolicyingressrule-v1-networking-k8s-io","networkpolicyegressrule-v1-networking-k8s-io","namespacecondition-v1-core","nfsvolumesource-v1-core","mutatingwebhook-v1-admissionregistration-k8s-io","microtime-v1-meta","metricvaluestatus-v2beta2-autoscaling","metrictarget-v2beta2-autoscaling","metricstatus-v2beta2-autoscaling","metricspec-v2beta2-autoscaling","metricidentifier-v2beta2-autoscaling","managedfieldsentry-v1-meta","localvolumesource-v1-core","localobjectreference-v1-core","loadbalancerstatus-v1-core","loadbalanceringress-v1-core","listmeta-v1-meta","limitedprioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","limitresponse-v1beta1-flowcontrol-apiserver-k8s-io","limitrangeitem-v1-core","lifecycle-v1-core","labelselectorrequirement-v1-meta","labelselector-v1-meta","keytopath-v1-core","jobtemplatespec-v1-batch","jobcondition-v1-batch","jsonschemapropsorbool-v1-apiextensions-k8s-io","jsonschemapropsorarray-v1-apiextensions-k8s-io","jsonschemaprops-v1-apiextensions-k8s-io","json-v1-apiextensions-k8s-io","ingresstls-v1-networking-k8s-io","ingressservicebackend-v1-networking-k8s-io","ingressrule-v1-networking-k8s-io","ingressclassparametersreference-v1-networking-k8s-io","ingressbackend-v1-networking-k8s-io","iscsivolumesource-v1-core","iscsipersistentvolumesource-v1-core","ipblock-v1-networking-k8s-io","idrange-v1beta1-policy","hostportrange-v1beta1-policy","hostpathvolumesource-v1-core","hostalias-v1-core","horizontalpodautoscalercondition-v2beta2-autoscaling","horizontalpodautoscalerbehavior-v2beta2-autoscaling","handler-v1-core","httpingressrulevalue-v1-networking-k8s-io","httpingresspath-v1-networking-k8s-io","httpheader-v1-core","httpgetaction-v1-core","hpascalingrules-v2beta2-autoscaling","hpascalingpolicy-v2beta2-autoscaling","groupversionfordiscovery-v1-meta","groupsubject-v1beta1-flowcontrol-apiserver-k8s-io","glusterfsvolumesource-v1-core","glusterfspersistentvolumesource-v1-core","gitrepovolumesource-v1-core","gcepersistentdiskvolumesource-v1-core","forzone-v1-discovery-k8s-io","flowschemacondition-v1beta1-flowcontrol-apiserver-k8s-io","flowdistinguishermethod-v1beta1-flowcontrol-apiserver-k8s-io","flockervolumesource-v1-core","flexvolumesource-v1-core","flexpersistentvolumesource-v1-core","fieldsv1-v1-meta","fsgroupstrategyoptions-v1beta1-policy","fcvolumesource-v1-core","externalmetricstatus-v2beta2-autoscaling","externalmetricsource-v2beta2-autoscaling","externaldocumentation-v1-apiextensions-k8s-io","execaction-v1-core","eviction-v1-policy","eventsource-v1-core","eventseries-v1-events-k8s-io","ephemeralvolumesource-v1-core","ephemeralcontainer-v1-core","envvarsource-v1-core","envvar-v1-core","envfromsource-v1-core","endpointsubset-v1-core","endpointport-v1-core","endpointhints-v1-discovery-k8s-io","endpointconditions-v1-discovery-k8s-io","endpointaddress-v1-core","endpoint-v1-discovery-k8s-io","emptydirvolumesource-v1-core","downwardapivolumesource-v1-core","downwardapivolumefile-v1-core","downwardapiprojection-v1-core","deploymentcondition-v1-apps","deleteoptions-v1-meta","daemonsetupdatestrategy-v1-apps","daemonsetcondition-v1-apps","daemonendpoint-v1-core","customresourcevalidation-v1-apiextensions-k8s-io","customresourcesubresources-v1-apiextensions-k8s-io","customresourcesubresourcestatus-v1-apiextensions-k8s-io","customresourcesubresourcescale-v1-apiextensions-k8s-io","customresourcedefinitionversion-v1-apiextensions-k8s-io","customresourcedefinitionnames-v1-apiextensions-k8s-io","customresourcedefinitioncondition-v1-apiextensions-k8s-io","customresourceconversion-v1-apiextensions-k8s-io","customresourcecolumndefinition-v1-apiextensions-k8s-io","crossversionobjectreference-v1-autoscaling","containerstatewaiting-v1-core","containerstateterminated-v1-core","containerstaterunning-v1-core","containerstate-v1-core","containerresourcemetricstatus-v2beta2-autoscaling","containerresourcemetricsource-v2beta2-autoscaling","containerport-v1-core","containerimage-v1-core","configmapvolumesource-v1-core","configmapprojection-v1-core","configmapnodeconfigsource-v1-core","configmapkeyselector-v1-core","configmapenvsource-v1-core","condition-v1-meta","componentcondition-v1-core","clientipconfig-v1-core","cindervolumesource-v1-core","cinderpersistentvolumesource-v1-core","certificatesigningrequestcondition-v1-certificates-k8s-io","cephfsvolumesource-v1-core","cephfspersistentvolumesource-v1-core","capabilities-v1-core","csivolumesource-v1-core","csipersistentvolumesource-v1-core","csinodedriver-v1-storage-k8s-io","boundobjectreference-v1-authentication-k8s-io","azurefilevolumesource-v1-core","azurefilepersistentvolumesource-v1-core","azurediskvolumesource-v1-core","attachedvolume-v1-core","allowedhostpath-v1beta1-policy","allowedflexvolume-v1beta1-policy","allowedcsidriver-v1beta1-policy","aggregationrule-v1-rbac-authorization-k8s-io","affinity-v1-core","awselasticblockstorevolumesource-v1-core","apiversions-v1-meta","apiservicecondition-v1-apiregistration-k8s-io","apiresource-v1-meta","apigroup-v1-meta","-strong-definitions-strong-","watch-list-all-namespaces-networkpolicy-v1-networking-k8s-io","watch-list-networkpolicy-v1-networking-k8s-io","watch-networkpolicy-v1-networking-k8s-io","list-all-namespaces-networkpolicy-v1-networking-k8s-io","list-networkpolicy-v1-networking-k8s-io","read-networkpolicy-v1-networking-k8s-io","-strong-read-operations-networkpolicy-v1-networking-k8s-io-strong-","delete-collection-networkpolicy-v1-networking-k8s-io","delete-networkpolicy-v1-networking-k8s-io","replace-networkpolicy-v1-networking-k8s-io","patch-networkpolicy-v1-networking-k8s-io","create-networkpolicy-v1-networking-k8s-io","-strong-write-operations-networkpolicy-v1-networking-k8s-io-strong-","networkpolicy-v1-networking-k8s-io","create-tokenreview-v1-authentication-k8s-io","-strong-write-operations-tokenreview-v1-authentication-k8s-io-strong-","tokenreview-v1-authentication-k8s-io","tokenrequest-v1-authentication-k8s-io","create-subjectaccessreview-v1-authorization-k8s-io","-strong-write-operations-subjectaccessreview-v1-authorization-k8s-io-strong-","subjectaccessreview-v1-authorization-k8s-io","replace-status-storageversion-v1alpha1-internal-apiserver-k8s-io","read-status-storageversion-v1alpha1-internal-apiserver-k8s-io","patch-status-storageversion-v1alpha1-internal-apiserver-k8s-io","-strong-status-operations-storageversion-v1alpha1-internal-apiserver-k8s-io-strong-","watch-list-storageversion-v1alpha1-internal-apiserver-k8s-io","watch-storageversion-v1alpha1-internal-apiserver-k8s-io","list-storageversion-v1alpha1-internal-apiserver-k8s-io","read-storageversion-v1alpha1-internal-apiserver-k8s-io","-strong-read-operations-storageversion-v1alpha1-internal-apiserver-k8s-io-strong-","delete-collection-storageversion-v1alpha1-internal-apiserver-k8s-io","delete-storageversion-v1alpha1-internal-apiserver-k8s-io","replace-storageversion-v1alpha1-internal-apiserver-k8s-io","patch-storageversion-v1alpha1-internal-apiserver-k8s-io","create-storageversion-v1alpha1-internal-apiserver-k8s-io","-strong-write-operations-storageversion-v1alpha1-internal-apiserver-k8s-io-strong-","storageversion-v1alpha1-internal-apiserver-k8s-io","watch-list-all-namespaces-serviceaccount-v1-core","watch-list-serviceaccount-v1-core","watch-serviceaccount-v1-core","list-all-namespaces-serviceaccount-v1-core","list-serviceaccount-v1-core","read-serviceaccount-v1-core","-strong-read-operations-serviceaccount-v1-core-strong-","delete-collection-serviceaccount-v1-core","delete-serviceaccount-v1-core","replace-serviceaccount-v1-core","patch-serviceaccount-v1-core","create-serviceaccount-v1-core","-strong-write-operations-serviceaccount-v1-core-strong-","serviceaccount-v1-core","create-selfsubjectrulesreview-v1-authorization-k8s-io","-strong-write-operations-selfsubjectrulesreview-v1-authorization-k8s-io-strong-","selfsubjectrulesreview-v1-authorization-k8s-io","create-selfsubjectaccessreview-v1-authorization-k8s-io","-strong-write-operations-selfsubjectaccessreview-v1-authorization-k8s-io-strong-","selfsubjectaccessreview-v1-authorization-k8s-io","watch-list-runtimeclass-v1-node-k8s-io","watch-runtimeclass-v1-node-k8s-io","list-runtimeclass-v1-node-k8s-io","read-runtimeclass-v1-node-k8s-io","-strong-read-operations-runtimeclass-v1-node-k8s-io-strong-","delete-collection-runtimeclass-v1-node-k8s-io","delete-runtimeclass-v1-node-k8s-io","replace-runtimeclass-v1-node-k8s-io","patch-runtimeclass-v1-node-k8s-io","create-runtimeclass-v1-node-k8s-io","-strong-write-operations-runtimeclass-v1-node-k8s-io-strong-","runtimeclass-v1-node-k8s-io","watch-list-all-namespaces-rolebinding-v1-rbac-authorization-k8s-io","watch-list-rolebinding-v1-rbac-authorization-k8s-io","watch-rolebinding-v1-rbac-authorization-k8s-io","list-all-namespaces-rolebinding-v1-rbac-authorization-k8s-io","list-rolebinding-v1-rbac-authorization-k8s-io","read-rolebinding-v1-rbac-authorization-k8s-io","-strong-read-operations-rolebinding-v1-rbac-authorization-k8s-io-strong-","delete-collection-rolebinding-v1-rbac-authorization-k8s-io","delete-rolebinding-v1-rbac-authorization-k8s-io","replace-rolebinding-v1-rbac-authorization-k8s-io","patch-rolebinding-v1-rbac-authorization-k8s-io","create-rolebinding-v1-rbac-authorization-k8s-io","-strong-write-operations-rolebinding-v1-rbac-authorization-k8s-io-strong-","rolebinding-v1-rbac-authorization-k8s-io","watch-list-all-namespaces-role-v1-rbac-authorization-k8s-io","watch-list-role-v1-rbac-authorization-k8s-io","watch-role-v1-rbac-authorization-k8s-io","list-all-namespaces-role-v1-rbac-authorization-k8s-io","list-role-v1-rbac-authorization-k8s-io","read-role-v1-rbac-authorization-k8s-io","-strong-read-operations-role-v1-rbac-authorization-k8s-io-strong-","delete-collection-role-v1-rbac-authorization-k8s-io","delete-role-v1-rbac-authorization-k8s-io","replace-role-v1-rbac-authorization-k8s-io","patch-role-v1-rbac-authorization-k8s-io","create-role-v1-rbac-authorization-k8s-io","-strong-write-operations-role-v1-rbac-authorization-k8s-io-strong-","role-v1-rbac-authorization-k8s-io","replace-status-resourcequota-v1-core","read-status-resourcequota-v1-core","patch-status-resourcequota-v1-core","-strong-status-operations-resourcequota-v1-core-strong-","watch-list-all-namespaces-resourcequota-v1-core","watch-list-resourcequota-v1-core","watch-resourcequota-v1-core","list-all-namespaces-resourcequota-v1-core","list-resourcequota-v1-core","read-resourcequota-v1-core","-strong-read-operations-resourcequota-v1-core-strong-","delete-collection-resourcequota-v1-core","delete-resourcequota-v1-core","replace-resourcequota-v1-core","patch-resourcequota-v1-core","create-resourcequota-v1-core","-strong-write-operations-resourcequota-v1-core-strong-","resourcequota-v1-core","replace-status-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","read-status-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","patch-status-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","-strong-status-operations-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io-strong-","watch-list-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","watch-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","list-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","read-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","-strong-read-operations-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io-strong-","delete-collection-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","delete-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","replace-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","patch-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","create-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","-strong-write-operations-prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io-strong-","prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io","replace-status-persistentvolume-v1-core","read-status-persistentvolume-v1-core","patch-status-persistentvolume-v1-core","-strong-status-operations-persistentvolume-v1-core-strong-","watch-list-persistentvolume-v1-core","watch-persistentvolume-v1-core","list-persistentvolume-v1-core","read-persistentvolume-v1-core","-strong-read-operations-persistentvolume-v1-core-strong-","delete-collection-persistentvolume-v1-core","delete-persistentvolume-v1-core","replace-persistentvolume-v1-core","patch-persistentvolume-v1-core","create-persistentvolume-v1-core","-strong-write-operations-persistentvolume-v1-core-strong-","persistentvolume-v1-core","replace-connect-proxy-path-node-v1-core","replace-connect-proxy-node-v1-core","head-connect-proxy-path-node-v1-core","head-connect-proxy-node-v1-core","get-connect-proxy-path-node-v1-core","get-connect-proxy-node-v1-core","delete-connect-proxy-path-node-v1-core","delete-connect-proxy-node-v1-core","create-connect-proxy-path-node-v1-core","create-connect-proxy-node-v1-core","-strong-proxy-operations-node-v1-core-strong-","replace-status-node-v1-core","read-status-node-v1-core","patch-status-node-v1-core","-strong-status-operations-node-v1-core-strong-","watch-list-node-v1-core","watch-node-v1-core","list-node-v1-core","read-node-v1-core","-strong-read-operations-node-v1-core-strong-","delete-collection-node-v1-core","delete-node-v1-core","replace-node-v1-core","patch-node-v1-core","create-node-v1-core","-strong-write-operations-node-v1-core-strong-","node-v1-core","replace-status-namespace-v1-core","read-status-namespace-v1-core","patch-status-namespace-v1-core","-strong-status-operations-namespace-v1-core-strong-","watch-list-namespace-v1-core","watch-namespace-v1-core","list-namespace-v1-core","read-namespace-v1-core","-strong-read-operations-namespace-v1-core-strong-","delete-namespace-v1-core","replace-namespace-v1-core","patch-namespace-v1-core","create-namespace-v1-core","-strong-write-operations-namespace-v1-core-strong-","namespace-v1-core","create-localsubjectaccessreview-v1-authorization-k8s-io","-strong-write-operations-localsubjectaccessreview-v1-authorization-k8s-io-strong-","localsubjectaccessreview-v1-authorization-k8s-io","watch-list-all-namespaces-lease-v1-coordination-k8s-io","watch-list-lease-v1-coordination-k8s-io","watch-lease-v1-coordination-k8s-io","list-all-namespaces-lease-v1-coordination-k8s-io","list-lease-v1-coordination-k8s-io","read-lease-v1-coordination-k8s-io","-strong-read-operations-lease-v1-coordination-k8s-io-strong-","delete-collection-lease-v1-coordination-k8s-io","delete-lease-v1-coordination-k8s-io","replace-lease-v1-coordination-k8s-io","patch-lease-v1-coordination-k8s-io","create-lease-v1-coordination-k8s-io","-strong-write-operations-lease-v1-coordination-k8s-io-strong-","lease-v1-coordination-k8s-io","replace-status-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","read-status-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","patch-status-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","-strong-status-operations-flowschema-v1beta1-flowcontrol-apiserver-k8s-io-strong-","watch-list-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","watch-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","list-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","read-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","-strong-read-operations-flowschema-v1beta1-flowcontrol-apiserver-k8s-io-strong-","delete-collection-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","delete-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","replace-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","patch-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","create-flowschema-v1beta1-flowcontrol-apiserver-k8s-io","-strong-write-operations-flowschema-v1beta1-flowcontrol-apiserver-k8s-io-strong-","flowschema-v1beta1-flowcontrol-apiserver-k8s-io","list-componentstatus-v1-core","read-componentstatus-v1-core","-strong-read-operations-componentstatus-v1-core-strong-","componentstatus-v1-core","watch-list-clusterrolebinding-v1-rbac-authorization-k8s-io","watch-clusterrolebinding-v1-rbac-authorization-k8s-io","list-clusterrolebinding-v1-rbac-authorization-k8s-io","read-clusterrolebinding-v1-rbac-authorization-k8s-io","-strong-read-operations-clusterrolebinding-v1-rbac-authorization-k8s-io-strong-","delete-collection-clusterrolebinding-v1-rbac-authorization-k8s-io","delete-clusterrolebinding-v1-rbac-authorization-k8s-io","replace-clusterrolebinding-v1-rbac-authorization-k8s-io","patch-clusterrolebinding-v1-rbac-authorization-k8s-io","create-clusterrolebinding-v1-rbac-authorization-k8s-io","-strong-write-operations-clusterrolebinding-v1-rbac-authorization-k8s-io-strong-","clusterrolebinding-v1-rbac-authorization-k8s-io","watch-list-clusterrole-v1-rbac-authorization-k8s-io","watch-clusterrole-v1-rbac-authorization-k8s-io","list-clusterrole-v1-rbac-authorization-k8s-io","read-clusterrole-v1-rbac-authorization-k8s-io","-strong-read-operations-clusterrole-v1-rbac-authorization-k8s-io-strong-","delete-collection-clusterrole-v1-rbac-authorization-k8s-io","delete-clusterrole-v1-rbac-authorization-k8s-io","replace-clusterrole-v1-rbac-authorization-k8s-io","patch-clusterrole-v1-rbac-authorization-k8s-io","create-clusterrole-v1-rbac-authorization-k8s-io","-strong-write-operations-clusterrole-v1-rbac-authorization-k8s-io-strong-","clusterrole-v1-rbac-authorization-k8s-io","replace-status-certificatesigningrequest-v1-certificates-k8s-io","read-status-certificatesigningrequest-v1-certificates-k8s-io","patch-status-certificatesigningrequest-v1-certificates-k8s-io","-strong-status-operations-certificatesigningrequest-v1-certificates-k8s-io-strong-","watch-list-certificatesigningrequest-v1-certificates-k8s-io","watch-certificatesigningrequest-v1-certificates-k8s-io","list-certificatesigningrequest-v1-certificates-k8s-io","read-certificatesigningrequest-v1-certificates-k8s-io","-strong-read-operations-certificatesigningrequest-v1-certificates-k8s-io-strong-","delete-collection-certificatesigningrequest-v1-certificates-k8s-io","delete-certificatesigningrequest-v1-certificates-k8s-io","replace-certificatesigningrequest-v1-certificates-k8s-io","patch-certificatesigningrequest-v1-certificates-k8s-io","create-certificatesigningrequest-v1-certificates-k8s-io","-strong-write-operations-certificatesigningrequest-v1-certificates-k8s-io-strong-","certificatesigningrequest-v1-certificates-k8s-io","create-binding-v1-core","-strong-write-operations-binding-v1-core-strong-","binding-v1-core","replace-status-apiservice-v1-apiregistration-k8s-io","read-status-apiservice-v1-apiregistration-k8s-io","patch-status-apiservice-v1-apiregistration-k8s-io","-strong-status-operations-apiservice-v1-apiregistration-k8s-io-strong-","watch-list-apiservice-v1-apiregistration-k8s-io","watch-apiservice-v1-apiregistration-k8s-io","list-apiservice-v1-apiregistration-k8s-io","read-apiservice-v1-apiregistration-k8s-io","-strong-read-operations-apiservice-v1-apiregistration-k8s-io-strong-","delete-collection-apiservice-v1-apiregistration-k8s-io","delete-apiservice-v1-apiregistration-k8s-io","replace-apiservice-v1-apiregistration-k8s-io","patch-apiservice-v1-apiregistration-k8s-io","create-apiservice-v1-apiregistration-k8s-io","-strong-write-operations-apiservice-v1-apiregistration-k8s-io-strong-","apiservice-v1-apiregistration-k8s-io","-strong-cluster-apis-strong-","watch-list-podsecuritypolicy-v1beta1-policy","watch-podsecuritypolicy-v1beta1-policy","list-podsecuritypolicy-v1beta1-policy","read-podsecuritypolicy-v1beta1-policy","-strong-read-operations-podsecuritypolicy-v1beta1-policy-strong-","delete-collection-podsecuritypolicy-v1beta1-policy","delete-podsecuritypolicy-v1beta1-policy","replace-podsecuritypolicy-v1beta1-policy","patch-podsecuritypolicy-v1beta1-policy","create-podsecuritypolicy-v1beta1-policy","-strong-write-operations-podsecuritypolicy-v1beta1-policy-strong-","podsecuritypolicy-v1beta1-policy","watch-list-priorityclass-v1-scheduling-k8s-io","watch-priorityclass-v1-scheduling-k8s-io","list-priorityclass-v1-scheduling-k8s-io","read-priorityclass-v1-scheduling-k8s-io","-strong-read-operations-priorityclass-v1-scheduling-k8s-io-strong-","delete-collection-priorityclass-v1-scheduling-k8s-io","delete-priorityclass-v1-scheduling-k8s-io","replace-priorityclass-v1-scheduling-k8s-io","patch-priorityclass-v1-scheduling-k8s-io","create-priorityclass-v1-scheduling-k8s-io","-strong-write-operations-priorityclass-v1-scheduling-k8s-io-strong-","priorityclass-v1-scheduling-k8s-io","replace-status-poddisruptionbudget-v1-policy","read-status-poddisruptionbudget-v1-policy","patch-status-poddisruptionbudget-v1-policy","-strong-status-operations-poddisruptionbudget-v1-policy-strong-","watch-list-all-namespaces-poddisruptionbudget-v1-policy","watch-list-poddisruptionbudget-v1-policy","watch-poddisruptionbudget-v1-policy","list-all-namespaces-poddisruptionbudget-v1-policy","list-poddisruptionbudget-v1-policy","read-poddisruptionbudget-v1-policy","-strong-read-operations-poddisruptionbudget-v1-policy-strong-","delete-collection-poddisruptionbudget-v1-policy","delete-poddisruptionbudget-v1-policy","replace-poddisruptionbudget-v1-policy","patch-poddisruptionbudget-v1-policy","create-poddisruptionbudget-v1-policy","-strong-write-operations-poddisruptionbudget-v1-policy-strong-","poddisruptionbudget-v1-policy","watch-list-all-namespaces-podtemplate-v1-core","watch-list-podtemplate-v1-core","watch-podtemplate-v1-core","list-all-namespaces-podtemplate-v1-core","list-podtemplate-v1-core","read-podtemplate-v1-core","-strong-read-operations-podtemplate-v1-core-strong-","delete-collection-podtemplate-v1-core","delete-podtemplate-v1-core","replace-podtemplate-v1-core","patch-podtemplate-v1-core","create-podtemplate-v1-core","-strong-write-operations-podtemplate-v1-core-strong-","podtemplate-v1-core","watch-list-validatingwebhookconfiguration-v1-admissionregistration-k8s-io","watch-validatingwebhookconfiguration-v1-admissionregistration-k8s-io","list-validatingwebhookconfiguration-v1-admissionregistration-k8s-io","read-validatingwebhookconfiguration-v1-admissionregistration-k8s-io","-strong-read-operations-validatingwebhookconfiguration-v1-admissionregistration-k8s-io-strong-","delete-collection-validatingwebhookconfiguration-v1-admissionregistration-k8s-io","delete-validatingwebhookconfiguration-v1-admissionregistration-k8s-io","replace-validatingwebhookconfiguration-v1-admissionregistration-k8s-io","patch-validatingwebhookconfiguration-v1-admissionregistration-k8s-io","create-validatingwebhookconfiguration-v1-admissionregistration-k8s-io","-strong-write-operations-validatingwebhookconfiguration-v1-admissionregistration-k8s-io-strong-","validatingwebhookconfiguration-v1-admissionregistration-k8s-io","watch-list-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io","watch-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io","list-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io","read-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io","-strong-read-operations-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io-strong-","delete-collection-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io","delete-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io","replace-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io","patch-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io","create-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io","-strong-write-operations-mutatingwebhookconfiguration-v1-admissionregistration-k8s-io-strong-","mutatingwebhookconfiguration-v1-admissionregistration-k8s-io","replace-status-horizontalpodautoscaler-v1-autoscaling","read-status-horizontalpodautoscaler-v1-autoscaling","patch-status-horizontalpodautoscaler-v1-autoscaling","-strong-status-operations-horizontalpodautoscaler-v1-autoscaling-strong-","watch-list-all-namespaces-horizontalpodautoscaler-v1-autoscaling","watch-list-horizontalpodautoscaler-v1-autoscaling","watch-horizontalpodautoscaler-v1-autoscaling","list-all-namespaces-horizontalpodautoscaler-v1-autoscaling","list-horizontalpodautoscaler-v1-autoscaling","read-horizontalpodautoscaler-v1-autoscaling","-strong-read-operations-horizontalpodautoscaler-v1-autoscaling-strong-","delete-collection-horizontalpodautoscaler-v1-autoscaling","delete-horizontalpodautoscaler-v1-autoscaling","replace-horizontalpodautoscaler-v1-autoscaling","patch-horizontalpodautoscaler-v1-autoscaling","create-horizontalpodautoscaler-v1-autoscaling","-strong-write-operations-horizontalpodautoscaler-v1-autoscaling-strong-","horizontalpodautoscaler-v1-autoscaling","watch-list-all-namespaces-limitrange-v1-core","watch-list-limitrange-v1-core","watch-limitrange-v1-core","list-all-namespaces-limitrange-v1-core","list-limitrange-v1-core","read-limitrange-v1-core","-strong-read-operations-limitrange-v1-core-strong-","delete-collection-limitrange-v1-core","delete-limitrange-v1-core","replace-limitrange-v1-core","patch-limitrange-v1-core","create-limitrange-v1-core","-strong-write-operations-limitrange-v1-core-strong-","limitrange-v1-core","watch-list-all-namespaces-event-v1-events-k8s-io","watch-list-event-v1-events-k8s-io","watch-event-v1-events-k8s-io","list-all-namespaces-event-v1-events-k8s-io","list-event-v1-events-k8s-io","read-event-v1-events-k8s-io","-strong-read-operations-event-v1-events-k8s-io-strong-","delete-collection-event-v1-events-k8s-io","delete-event-v1-events-k8s-io","replace-event-v1-events-k8s-io","patch-event-v1-events-k8s-io","create-event-v1-events-k8s-io","-strong-write-operations-event-v1-events-k8s-io-strong-","event-v1-events-k8s-io","replace-status-customresourcedefinition-v1-apiextensions-k8s-io","read-status-customresourcedefinition-v1-apiextensions-k8s-io","patch-status-customresourcedefinition-v1-apiextensions-k8s-io","-strong-status-operations-customresourcedefinition-v1-apiextensions-k8s-io-strong-","watch-list-customresourcedefinition-v1-apiextensions-k8s-io","watch-customresourcedefinition-v1-apiextensions-k8s-io","list-customresourcedefinition-v1-apiextensions-k8s-io","read-customresourcedefinition-v1-apiextensions-k8s-io","-strong-read-operations-customresourcedefinition-v1-apiextensions-k8s-io-strong-","delete-collection-customresourcedefinition-v1-apiextensions-k8s-io","delete-customresourcedefinition-v1-apiextensions-k8s-io","replace-customresourcedefinition-v1-apiextensions-k8s-io","patch-customresourcedefinition-v1-apiextensions-k8s-io","create-customresourcedefinition-v1-apiextensions-k8s-io","-strong-write-operations-customresourcedefinition-v1-apiextensions-k8s-io-strong-","customresourcedefinition-v1-apiextensions-k8s-io","watch-list-all-namespaces-controllerrevision-v1-apps","watch-list-controllerrevision-v1-apps","watch-controllerrevision-v1-apps","list-all-namespaces-controllerrevision-v1-apps","list-controllerrevision-v1-apps","read-controllerrevision-v1-apps","-strong-read-operations-controllerrevision-v1-apps-strong-","delete-collection-controllerrevision-v1-apps","delete-controllerrevision-v1-apps","replace-controllerrevision-v1-apps","patch-controllerrevision-v1-apps","create-controllerrevision-v1-apps","-strong-write-operations-controllerrevision-v1-apps-strong-","controllerrevision-v1-apps","-strong-metadata-apis-strong-","replace-status-volumeattachment-v1-storage-k8s-io","read-status-volumeattachment-v1-storage-k8s-io","patch-status-volumeattachment-v1-storage-k8s-io","-strong-status-operations-volumeattachment-v1-storage-k8s-io-strong-","watch-list-volumeattachment-v1-storage-k8s-io","watch-volumeattachment-v1-storage-k8s-io","list-volumeattachment-v1-storage-k8s-io","read-volumeattachment-v1-storage-k8s-io","-strong-read-operations-volumeattachment-v1-storage-k8s-io-strong-","delete-collection-volumeattachment-v1-storage-k8s-io","delete-volumeattachment-v1-storage-k8s-io","replace-volumeattachment-v1-storage-k8s-io","patch-volumeattachment-v1-storage-k8s-io","create-volumeattachment-v1-storage-k8s-io","-strong-write-operations-volumeattachment-v1-storage-k8s-io-strong-","volumeattachment-v1-storage-k8s-io","volume-v1-core","watch-list-all-namespaces-csistoragecapacity-v1beta1-storage-k8s-io","watch-list-csistoragecapacity-v1beta1-storage-k8s-io","watch-csistoragecapacity-v1beta1-storage-k8s-io","list-all-namespaces-csistoragecapacity-v1beta1-storage-k8s-io","list-csistoragecapacity-v1beta1-storage-k8s-io","read-csistoragecapacity-v1beta1-storage-k8s-io","-strong-read-operations-csistoragecapacity-v1beta1-storage-k8s-io-strong-","delete-collection-csistoragecapacity-v1beta1-storage-k8s-io","delete-csistoragecapacity-v1beta1-storage-k8s-io","replace-csistoragecapacity-v1beta1-storage-k8s-io","patch-csistoragecapacity-v1beta1-storage-k8s-io","create-csistoragecapacity-v1beta1-storage-k8s-io","-strong-write-operations-csistoragecapacity-v1beta1-storage-k8s-io-strong-","csistoragecapacity-v1beta1-storage-k8s-io","watch-list-storageclass-v1-storage-k8s-io","watch-storageclass-v1-storage-k8s-io","list-storageclass-v1-storage-k8s-io","read-storageclass-v1-storage-k8s-io","-strong-read-operations-storageclass-v1-storage-k8s-io-strong-","delete-collection-storageclass-v1-storage-k8s-io","delete-storageclass-v1-storage-k8s-io","replace-storageclass-v1-storage-k8s-io","patch-storageclass-v1-storage-k8s-io","create-storageclass-v1-storage-k8s-io","-strong-write-operations-storageclass-v1-storage-k8s-io-strong-","storageclass-v1-storage-k8s-io","replace-status-persistentvolumeclaim-v1-core","read-status-persistentvolumeclaim-v1-core","patch-status-persistentvolumeclaim-v1-core","-strong-status-operations-persistentvolumeclaim-v1-core-strong-","watch-list-all-namespaces-persistentvolumeclaim-v1-core","watch-list-persistentvolumeclaim-v1-core","watch-persistentvolumeclaim-v1-core","list-all-namespaces-persistentvolumeclaim-v1-core","list-persistentvolumeclaim-v1-core","read-persistentvolumeclaim-v1-core","-strong-read-operations-persistentvolumeclaim-v1-core-strong-","delete-collection-persistentvolumeclaim-v1-core","delete-persistentvolumeclaim-v1-core","replace-persistentvolumeclaim-v1-core","patch-persistentvolumeclaim-v1-core","create-persistentvolumeclaim-v1-core","-strong-write-operations-persistentvolumeclaim-v1-core-strong-","persistentvolumeclaim-v1-core","watch-list-all-namespaces-secret-v1-core","watch-list-secret-v1-core","watch-secret-v1-core","list-all-namespaces-secret-v1-core","list-secret-v1-core","read-secret-v1-core","-strong-read-operations-secret-v1-core-strong-","delete-collection-secret-v1-core","delete-secret-v1-core","replace-secret-v1-core","patch-secret-v1-core","create-secret-v1-core","-strong-write-operations-secret-v1-core-strong-","secret-v1-core","watch-list-csinode-v1-storage-k8s-io","watch-csinode-v1-storage-k8s-io","list-csinode-v1-storage-k8s-io","read-csinode-v1-storage-k8s-io","-strong-read-operations-csinode-v1-storage-k8s-io-strong-","delete-collection-csinode-v1-storage-k8s-io","delete-csinode-v1-storage-k8s-io","replace-csinode-v1-storage-k8s-io","patch-csinode-v1-storage-k8s-io","create-csinode-v1-storage-k8s-io","-strong-write-operations-csinode-v1-storage-k8s-io-strong-","csinode-v1-storage-k8s-io","watch-list-csidriver-v1-storage-k8s-io","watch-csidriver-v1-storage-k8s-io","list-csidriver-v1-storage-k8s-io","read-csidriver-v1-storage-k8s-io","-strong-read-operations-csidriver-v1-storage-k8s-io-strong-","delete-collection-csidriver-v1-storage-k8s-io","delete-csidriver-v1-storage-k8s-io","replace-csidriver-v1-storage-k8s-io","patch-csidriver-v1-storage-k8s-io","create-csidriver-v1-storage-k8s-io","-strong-write-operations-csidriver-v1-storage-k8s-io-strong-","csidriver-v1-storage-k8s-io","watch-list-all-namespaces-configmap-v1-core","watch-list-configmap-v1-core","watch-configmap-v1-core","list-all-namespaces-configmap-v1-core","list-configmap-v1-core","read-configmap-v1-core","-strong-read-operations-configmap-v1-core-strong-","delete-collection-configmap-v1-core","delete-configmap-v1-core","replace-configmap-v1-core","patch-configmap-v1-core","create-configmap-v1-core","-strong-write-operations-configmap-v1-core-strong-","configmap-v1-core","-strong-config-and-storage-apis-strong-","replace-connect-proxy-path-service-v1-core","replace-connect-proxy-service-v1-core","head-connect-proxy-path-service-v1-core","head-connect-proxy-service-v1-core","get-connect-proxy-path-service-v1-core","get-connect-proxy-service-v1-core","delete-connect-proxy-path-service-v1-core","delete-connect-proxy-service-v1-core","create-connect-proxy-path-service-v1-core","create-connect-proxy-service-v1-core","-strong-proxy-operations-service-v1-core-strong-","replace-status-service-v1-core","read-status-service-v1-core","patch-status-service-v1-core","-strong-status-operations-service-v1-core-strong-","watch-list-all-namespaces-service-v1-core","watch-list-service-v1-core","watch-service-v1-core","list-all-namespaces-service-v1-core","list-service-v1-core","read-service-v1-core","-strong-read-operations-service-v1-core-strong-","delete-service-v1-core","replace-service-v1-core","patch-service-v1-core","create-service-v1-core","-strong-write-operations-service-v1-core-strong-","service-v1-core","watch-list-ingressclass-v1-networking-k8s-io","watch-ingressclass-v1-networking-k8s-io","list-ingressclass-v1-networking-k8s-io","read-ingressclass-v1-networking-k8s-io","-strong-read-operations-ingressclass-v1-networking-k8s-io-strong-","delete-collection-ingressclass-v1-networking-k8s-io","delete-ingressclass-v1-networking-k8s-io","replace-ingressclass-v1-networking-k8s-io","patch-ingressclass-v1-networking-k8s-io","create-ingressclass-v1-networking-k8s-io","-strong-write-operations-ingressclass-v1-networking-k8s-io-strong-","ingressclass-v1-networking-k8s-io","replace-status-ingress-v1-networking-k8s-io","read-status-ingress-v1-networking-k8s-io","patch-status-ingress-v1-networking-k8s-io","-strong-status-operations-ingress-v1-networking-k8s-io-strong-","watch-list-all-namespaces-ingress-v1-networking-k8s-io","watch-list-ingress-v1-networking-k8s-io","watch-ingress-v1-networking-k8s-io","list-all-namespaces-ingress-v1-networking-k8s-io","list-ingress-v1-networking-k8s-io","read-ingress-v1-networking-k8s-io","-strong-read-operations-ingress-v1-networking-k8s-io-strong-","delete-collection-ingress-v1-networking-k8s-io","delete-ingress-v1-networking-k8s-io","replace-ingress-v1-networking-k8s-io","patch-ingress-v1-networking-k8s-io","create-ingress-v1-networking-k8s-io","-strong-write-operations-ingress-v1-networking-k8s-io-strong-","ingress-v1-networking-k8s-io","watch-list-all-namespaces-endpointslice-v1-discovery-k8s-io","watch-list-endpointslice-v1-discovery-k8s-io","watch-endpointslice-v1-discovery-k8s-io","list-all-namespaces-endpointslice-v1-discovery-k8s-io","list-endpointslice-v1-discovery-k8s-io","read-endpointslice-v1-discovery-k8s-io","-strong-read-operations-endpointslice-v1-discovery-k8s-io-strong-","delete-collection-endpointslice-v1-discovery-k8s-io","delete-endpointslice-v1-discovery-k8s-io","replace-endpointslice-v1-discovery-k8s-io","patch-endpointslice-v1-discovery-k8s-io","create-endpointslice-v1-discovery-k8s-io","-strong-write-operations-endpointslice-v1-discovery-k8s-io-strong-","endpointslice-v1-discovery-k8s-io","watch-list-all-namespaces-endpoints-v1-core","watch-list-endpoints-v1-core","watch-endpoints-v1-core","list-all-namespaces-endpoints-v1-core","list-endpoints-v1-core","read-endpoints-v1-core","-strong-read-operations-endpoints-v1-core-strong-","delete-collection-endpoints-v1-core","delete-endpoints-v1-core","replace-endpoints-v1-core","patch-endpoints-v1-core","create-endpoints-v1-core","-strong-write-operations-endpoints-v1-core-strong-","endpoints-v1-core","-strong-service-apis-strong-","patch-scale-statefulset-v1-apps","replace-scale-statefulset-v1-apps","read-scale-statefulset-v1-apps","-strong-misc-operations-statefulset-v1-apps-strong-","replace-status-statefulset-v1-apps","read-status-statefulset-v1-apps","patch-status-statefulset-v1-apps","-strong-status-operations-statefulset-v1-apps-strong-","watch-list-all-namespaces-statefulset-v1-apps","watch-list-statefulset-v1-apps","watch-statefulset-v1-apps","list-all-namespaces-statefulset-v1-apps","list-statefulset-v1-apps","read-statefulset-v1-apps","-strong-read-operations-statefulset-v1-apps-strong-","delete-collection-statefulset-v1-apps","delete-statefulset-v1-apps","replace-statefulset-v1-apps","patch-statefulset-v1-apps","create-statefulset-v1-apps","-strong-write-operations-statefulset-v1-apps-strong-","statefulset-v1-apps","patch-scale-replicationcontroller-v1-core","replace-scale-replicationcontroller-v1-core","read-scale-replicationcontroller-v1-core","-strong-misc-operations-replicationcontroller-v1-core-strong-","replace-status-replicationcontroller-v1-core","read-status-replicationcontroller-v1-core","patch-status-replicationcontroller-v1-core","-strong-status-operations-replicationcontroller-v1-core-strong-","watch-list-all-namespaces-replicationcontroller-v1-core","watch-list-replicationcontroller-v1-core","watch-replicationcontroller-v1-core","list-all-namespaces-replicationcontroller-v1-core","list-replicationcontroller-v1-core","read-replicationcontroller-v1-core","-strong-read-operations-replicationcontroller-v1-core-strong-","delete-collection-replicationcontroller-v1-core","delete-replicationcontroller-v1-core","replace-replicationcontroller-v1-core","patch-replicationcontroller-v1-core","create-replicationcontroller-v1-core","-strong-write-operations-replicationcontroller-v1-core-strong-","replicationcontroller-v1-core","patch-scale-replicaset-v1-apps","replace-scale-replicaset-v1-apps","read-scale-replicaset-v1-apps","-strong-misc-operations-replicaset-v1-apps-strong-","replace-status-replicaset-v1-apps","read-status-replicaset-v1-apps","patch-status-replicaset-v1-apps","-strong-status-operations-replicaset-v1-apps-strong-","watch-list-all-namespaces-replicaset-v1-apps","watch-list-replicaset-v1-apps","watch-replicaset-v1-apps","list-all-namespaces-replicaset-v1-apps","list-replicaset-v1-apps","read-replicaset-v1-apps","-strong-read-operations-replicaset-v1-apps-strong-","delete-collection-replicaset-v1-apps","delete-replicaset-v1-apps","replace-replicaset-v1-apps","patch-replicaset-v1-apps","create-replicaset-v1-apps","-strong-write-operations-replicaset-v1-apps-strong-","replicaset-v1-apps","read-log-pod-v1-core","-strong-misc-operations-pod-v1-core-strong-","replace-connect-proxy-path-pod-v1-core","replace-connect-proxy-pod-v1-core","head-connect-proxy-path-pod-v1-core","head-connect-proxy-pod-v1-core","get-connect-proxy-path-pod-v1-core","get-connect-proxy-pod-v1-core","get-connect-portforward-pod-v1-core","delete-connect-proxy-path-pod-v1-core","delete-connect-proxy-pod-v1-core","create-connect-proxy-path-pod-v1-core","create-connect-proxy-pod-v1-core","create-connect-portforward-pod-v1-core","-strong-proxy-operations-pod-v1-core-strong-","replace-ephemeralcontainers-pod-v1-core","read-ephemeralcontainers-pod-v1-core","patch-ephemeralcontainers-pod-v1-core","-strong-ephemeralcontainers-operations-pod-v1-core-strong-","replace-status-pod-v1-core","read-status-pod-v1-core","patch-status-pod-v1-core","-strong-status-operations-pod-v1-core-strong-","watch-list-all-namespaces-pod-v1-core","watch-list-pod-v1-core","watch-pod-v1-core","list-all-namespaces-pod-v1-core","list-pod-v1-core","read-pod-v1-core","-strong-read-operations-pod-v1-core-strong-","delete-collection-pod-v1-core","delete-pod-v1-core","replace-pod-v1-core","patch-pod-v1-core","create-eviction-pod-v1-core","create-pod-v1-core","-strong-write-operations-pod-v1-core-strong-","pod-v1-core","replace-status-job-v1-batch","read-status-job-v1-batch","patch-status-job-v1-batch","-strong-status-operations-job-v1-batch-strong-","watch-list-all-namespaces-job-v1-batch","watch-list-job-v1-batch","watch-job-v1-batch","list-all-namespaces-job-v1-batch","list-job-v1-batch","read-job-v1-batch","-strong-read-operations-job-v1-batch-strong-","delete-collection-job-v1-batch","delete-job-v1-batch","replace-job-v1-batch","patch-job-v1-batch","create-job-v1-batch","-strong-write-operations-job-v1-batch-strong-","job-v1-batch","patch-scale-deployment-v1-apps","replace-scale-deployment-v1-apps","read-scale-deployment-v1-apps","-strong-misc-operations-deployment-v1-apps-strong-","replace-status-deployment-v1-apps","read-status-deployment-v1-apps","patch-status-deployment-v1-apps","-strong-status-operations-deployment-v1-apps-strong-","watch-list-all-namespaces-deployment-v1-apps","watch-list-deployment-v1-apps","watch-deployment-v1-apps","list-all-namespaces-deployment-v1-apps","list-deployment-v1-apps","read-deployment-v1-apps","-strong-read-operations-deployment-v1-apps-strong-","delete-collection-deployment-v1-apps","delete-deployment-v1-apps","replace-deployment-v1-apps","patch-deployment-v1-apps","create-deployment-v1-apps","-strong-write-operations-deployment-v1-apps-strong-","deployment-v1-apps","replace-status-daemonset-v1-apps","read-status-daemonset-v1-apps","patch-status-daemonset-v1-apps","-strong-status-operations-daemonset-v1-apps-strong-","watch-list-all-namespaces-daemonset-v1-apps","watch-list-daemonset-v1-apps","watch-daemonset-v1-apps","list-all-namespaces-daemonset-v1-apps","list-daemonset-v1-apps","read-daemonset-v1-apps","-strong-read-operations-daemonset-v1-apps-strong-","delete-collection-daemonset-v1-apps","delete-daemonset-v1-apps","replace-daemonset-v1-apps","patch-daemonset-v1-apps","create-daemonset-v1-apps","-strong-write-operations-daemonset-v1-apps-strong-","daemonset-v1-apps","replace-status-cronjob-v1-batch","read-status-cronjob-v1-batch","patch-status-cronjob-v1-batch","-strong-status-operations-cronjob-v1-batch-strong-","watch-list-all-namespaces-cronjob-v1-batch","watch-list-cronjob-v1-batch","watch-cronjob-v1-batch","list-all-namespaces-cronjob-v1-batch","list-cronjob-v1-batch","read-cronjob-v1-batch","-strong-read-operations-cronjob-v1-batch-strong-","delete-collection-cronjob-v1-batch","delete-cronjob-v1-batch","replace-cronjob-v1-batch","patch-cronjob-v1-batch","create-cronjob-v1-batch","-strong-write-operations-cronjob-v1-batch-strong-","cronjob-v1-batch","container-v1-core","-strong-workloads-apis-strong-","-strong-api-groups-strong-","-strong-api-overview-strong-"]};})(); \ No newline at end of file diff --git a/static/images/announcements/kubecon-China-2021-white.svg b/static/images/announcements/kubecon-China-2021-white.svg new file mode 100644 index 0000000000..2d40cf9580 --- /dev/null +++ b/static/images/announcements/kubecon-China-2021-white.svg @@ -0,0 +1 @@ +KubeCon-China-2020-logos_white.svg diff --git a/static/images/announcements/kubecon-NA-2021-white.svg b/static/images/announcements/kubecon-NA-2021-white.svg new file mode 100644 index 0000000000..0e9840087d --- /dev/null +++ b/static/images/announcements/kubecon-NA-2021-white.svg @@ -0,0 +1 @@ +KubeCon_NA_2021_web_web-logo-white (1).svg diff --git a/static/images/blog/2021-08-04-kubernetes-release-1.22/kubernetes-1.22.png b/static/images/blog/2021-08-04-kubernetes-release-1.22/kubernetes-1.22.png new file mode 100644 index 0000000000..550b1ae811 Binary files /dev/null and b/static/images/blog/2021-08-04-kubernetes-release-1.22/kubernetes-1.22.png differ diff --git a/static/images/blog/2021-08-09-csi-windows-support-with-csi-proxy-reaches-ga/csi-proxy.png b/static/images/blog/2021-08-09-csi-windows-support-with-csi-proxy-reaches-ga/csi-proxy.png new file mode 100644 index 0000000000..7d1f42af94 Binary files /dev/null and b/static/images/blog/2021-08-09-csi-windows-support-with-csi-proxy-reaches-ga/csi-proxy.png differ diff --git a/static/images/blog/2021-08-11-memory-manager-moves-to-beta/MemoryManagerDiagram.svg b/static/images/blog/2021-08-11-memory-manager-moves-to-beta/MemoryManagerDiagram.svg new file mode 100644 index 0000000000..af22c48c52 --- /dev/null +++ b/static/images/blog/2021-08-11-memory-manager-moves-to-beta/MemoryManagerDiagram.svg @@ -0,0 +1,3 @@ + + +
    Kubelet
    Kubelet
    Topology Manager
    Topology Manager
    Memory
    Manager
    Memory...
    Memory Map
    Memory Map
    Admit()
    Admit()
    GetTopologyHints()
    GetTopologyHints()
    Calculates Affinity
    Calculates Affinity
    Hint
    Hint
    Allocate()
    Allocate()
    Updates Memory Map
    Updates Memory Map
    PreCreateContainer()
    PreCreateContainer()
    Gets Container Memory
     Allocation
    Gets Container Memory...
    Viewer does not support full SVG 1.1
    \ No newline at end of file diff --git a/static/images/blog/2021-08-11-memory-manager-moves-to-beta/ReservedMemory.svg b/static/images/blog/2021-08-11-memory-manager-moves-to-beta/ReservedMemory.svg new file mode 100644 index 0000000000..e89faf3156 --- /dev/null +++ b/static/images/blog/2021-08-11-memory-manager-moves-to-beta/ReservedMemory.svg @@ -0,0 +1,3 @@ + + +
    --kube-reserved memory=500Mi
    --system-reserved memory=500Mi
    --eviction-hard memory.available<100Mi
    --reserved-memory 0:memory=600Mi
    --reserved-memory 1:memory=500Mi
    --kube-reserved memory=500Mi--system-reserv...
    --kube-reserved memory=500Mi
    --system-reserved memory=500Mi
    --eviction-hard memory.available<100Mi
    --reserved-memory 0:memory=600Mi
    --reserved-memory 1:memory=600Mi
    --kube-reserved memory=500Mi--system-reserv...
    Viewer does not support full SVG 1.1
    \ No newline at end of file diff --git a/static/images/blog/2021-08-11-memory-manager-moves-to-beta/SingleCrossNUMAAllocation.svg b/static/images/blog/2021-08-11-memory-manager-moves-to-beta/SingleCrossNUMAAllocation.svg new file mode 100644 index 0000000000..3cc323311c --- /dev/null +++ b/static/images/blog/2021-08-11-memory-manager-moves-to-beta/SingleCrossNUMAAllocation.svg @@ -0,0 +1,3 @@ + + +
    NUMA 0
    8Gi

    NUMA 0...
    NUMA 1
    8Gi

    NUMA 1...
    container2
    requested: 10Gi
    container2...
    The container1 requested 5Gi of memory, but is using only 3Gi now
    The container1 requested 5...
    The container2 requested 10Gi of memory and is using 3.5Gi from the NUMA node 0 and 1Gi from the NUMA node 1
    The container2 requested 1...
    The container2
    uses the memory
    from
    the NUMA node 0
    that should be
    guaranteed for the
    container1
    The container2...
    uses: 1Gi
    uses: 1Gi
    uses: 3Gi
    uses: 3Gi
    uses: 3.5Gi
    uses: 3.5Gi
    container1
    requested: 5Gi

    container1...
    Viewer does not support full SVG 1.1
    \ No newline at end of file diff --git a/static/images/blog/2021-09-03-api-server-tracing/example-trace-1.png b/static/images/blog/2021-09-03-api-server-tracing/example-trace-1.png new file mode 100644 index 0000000000..659b765362 Binary files /dev/null and b/static/images/blog/2021-09-03-api-server-tracing/example-trace-1.png differ diff --git a/static/images/blog/2021-09-03-api-server-tracing/example-trace-2.png b/static/images/blog/2021-09-03-api-server-tracing/example-trace-2.png new file mode 100644 index 0000000000..4ca4644deb Binary files /dev/null and b/static/images/blog/2021-09-03-api-server-tracing/example-trace-2.png differ diff --git a/static/js/mermaid.min.js b/static/js/mermaid.min.js index fad6aa1f7d..a31c3e0dce 100644 --- a/static/js/mermaid.min.js +++ b/static/js/mermaid.min.js @@ -1,11 +1,4 @@ -!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.mermaid=e():t.mermaid=e()}("undefined"!=typeof self?self:this,(function(){return function(t){var e={};function n(r){if(e[r])return e[r].exports;var i=e[r]={i:r,l:!1,exports:{}};return t[r].call(i.exports,i,i.exports,n),i.l=!0,i.exports}return n.m=t,n.c=e,n.d=function(t,e,r){n.o(t,e)||Object.defineProperty(t,e,{enumerable:!0,get:r})},n.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},n.t=function(t,e){if(1&e&&(t=n(t)),8&e)return t;if(4&e&&"object"==typeof t&&t&&t.__esModule)return t;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:t}),2&e&&"string"!=typeof t)for(var i in t)n.d(r,i,function(e){return t[e]}.bind(null,i));return r},n.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return n.d(e,"a",e),e},n.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},n.p="",n(n.s=901)}([function(t,e,n){"use strict";var r=function(t,e){return te?1:t>=e?0:NaN},i=function(t){var e;return 1===t.length&&(e=t,t=function(t,n){return r(e(t),n)}),{left:function(e,n,r,i){for(null==r&&(r=0),null==i&&(i=e.length);r>>1;t(e[o],n)<0?r=o+1:i=o}return r},right:function(e,n,r,i){for(null==r&&(r=0),null==i&&(i=e.length);r>>1;t(e[o],n)>0?i=o:r=o+1}return r}}};var o=i(r),a=o.right,u=o.left,s=a,c=function(t,e){null==e&&(e=f);for(var n=0,r=t.length-1,i=t[0],o=new Array(r<0?0:r);nt?1:e>=t?0:NaN},d=function(t){return null===t?NaN:+t},p=function(t,e){var n,r,i=t.length,o=0,a=-1,u=0,s=0;if(null==e)for(;++a1)return s/(o-1)},g=function(t,e){var n=p(t,e);return n?Math.sqrt(n):n},y=function(t,e){var n,r,i,o=t.length,a=-1;if(null==e){for(;++a=n)for(r=i=n;++an&&(r=n),i=n)for(r=i=n;++an&&(r=n),i0)return[t];if((r=e0)for(t=Math.ceil(t/a),e=Math.floor(e/a),o=new Array(i=Math.ceil(e-t+1));++u=0?(o>=k?10:o>=E?5:o>=A?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(o>=k?10:o>=E?5:o>=A?2:1)}function T(t,e,n){var r=Math.abs(e-t)/Math.max(0,n),i=Math.pow(10,Math.floor(Math.log(r)/Math.LN10)),o=r/i;return o>=k?i*=10:o>=E?i*=5:o>=A&&(i*=2),el;)h.pop(),--d;var p,g=new Array(d+1);for(i=0;i<=d;++i)(p=g[i]=[]).x0=i>0?h[i-1]:f,p.x1=i=1)return+n(t[r-1],r-1,t);var r,i=(r-1)*e,o=Math.floor(i),a=+n(t[o],o,t);return a+(+n(t[o+1],o+1,t)-a)*(i-o)}},N=function(t,e,n){return t=m.call(t,d).sort(r),Math.ceil((n-e)/(2*(C(t,.75)-C(t,.25))*Math.pow(t.length,-1/3)))},I=function(t,e,n){return Math.ceil((n-e)/(3.5*g(t)*Math.pow(t.length,-1/3)))},R=function(t,e){var n,r,i=t.length,o=-1;if(null==e){for(;++o=n)for(r=n;++or&&(r=n)}else for(;++o=n)for(r=n;++or&&(r=n);return r},j=function(t,e){var n,r=t.length,i=r,o=-1,a=0;if(null==e)for(;++o=0;)for(e=(r=t[i]).length;--e>=0;)n[--a]=r[e];return n},P=function(t,e){var n,r,i=t.length,o=-1;if(null==e){for(;++o=n)for(r=n;++on&&(r=n)}else for(;++o=n)for(r=n;++on&&(r=n);return r},F=function(t,e){for(var n=e.length,r=new Array(n);n--;)r[n]=t[e[n]];return r},q=function(t,e){if(n=t.length){var n,i,o=0,a=0,u=t[a];for(null==e&&(e=r);++ol&&T.push("'"+this.terminals_[A]+"'");D=p.showPosition?"Parse error on line "+(s+1)+":\n"+p.showPosition()+"\nExpecting "+T.join(", ")+", got '"+(this.terminals_[_]||_)+"'":"Parse error on line "+(s+1)+": Unexpected "+(_==h?"end of input":"'"+(this.terminals_[_]||_)+"'"),this.parseError(D,{text:p.match,token:this.terminals_[_]||_,line:p.yylineno,loc:b,expected:T})}if(k[0]instanceof Array&&k.length>1)throw new Error("Parse Error: multiple actions possible at state: "+x+", token: "+_);switch(k[0]){case 1:n.push(_),i.push(p.yytext),o.push(p.yylloc),n.push(k[1]),_=null,w?(_=w,w=null):(c=p.yyleng,u=p.yytext,s=p.yylineno,b=p.yylloc,f>0&&f--);break;case 2:if(S=this.productions_[k[1]][1],O.$=i[i.length-S],O._$={first_line:o[o.length-(S||1)].first_line,last_line:o[o.length-1].last_line,first_column:o[o.length-(S||1)].first_column,last_column:o[o.length-1].last_column},v&&(O._$.range=[o[o.length-(S||1)].range[0],o[o.length-1].range[1]]),void 0!==(E=this.performAction.apply(O,[u,c,s,g.yy,k[1],i,o].concat(d))))return E;S&&(n=n.slice(0,-1*S*2),i=i.slice(0,-1*S),o=o.slice(0,-1*S)),n.push(this.productions_[k[1]][0]),i.push(O.$),o.push(O._$),M=a[n[n.length-2]][n[n.length-1]],n.push(M);break;case 3:return!0}}return!0}},S={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var o in i)this[o]=i[o];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),o=0;oe[0].length)){if(e=n,r=o,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[o])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return 5;case 1:case 2:case 3:case 4:break;case 5:return this.begin("ID"),10;case 6:return e.yytext=e.yytext.trim(),this.begin("ALIAS"),42;case 7:return this.popState(),this.popState(),this.begin("LINE"),12;case 8:return this.popState(),this.popState(),5;case 9:return this.begin("LINE"),21;case 10:return this.begin("LINE"),23;case 11:return this.begin("LINE"),24;case 12:return this.begin("LINE"),25;case 13:return this.begin("LINE"),30;case 14:return this.begin("LINE"),27;case 15:return this.begin("LINE"),29;case 16:return this.popState(),13;case 17:return 22;case 18:return 37;case 19:return 38;case 20:return 33;case 21:return 31;case 22:return this.begin("ID"),16;case 23:return this.begin("ID"),17;case 24:return 19;case 25:return 6;case 26:return 15;case 27:return 36;case 28:return 5;case 29:return e.yytext=e.yytext.trim(),42;case 30:return 45;case 31:return 46;case 32:return 43;case 33:return 44;case 34:return 47;case 35:return 48;case 36:return 49;case 37:return 40;case 38:return 41;case 39:return 5;case 40:return"INVALID"}},rules:[/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:((?!\n)\s)+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:participant\b)/i,/^(?:[^\->:\n,;]+?(?=((?!\n)\s)+as(?!\n)\s|[#\n;]|$))/i,/^(?:as\b)/i,/^(?:(?:))/i,/^(?:loop\b)/i,/^(?:rect\b)/i,/^(?:opt\b)/i,/^(?:alt\b)/i,/^(?:else\b)/i,/^(?:par\b)/i,/^(?:and\b)/i,/^(?:[^#\n;]*)/i,/^(?:end\b)/i,/^(?:left of\b)/i,/^(?:right of\b)/i,/^(?:over\b)/i,/^(?:note\b)/i,/^(?:activate\b)/i,/^(?:deactivate\b)/i,/^(?:title\b)/i,/^(?:sequenceDiagram\b)/i,/^(?:autonumber\b)/i,/^(?:,)/i,/^(?:;)/i,/^(?:[^\+\->:\n,;]+)/i,/^(?:->>)/i,/^(?:-->>)/i,/^(?:->)/i,/^(?:-->)/i,/^(?:-[x])/i,/^(?:--[x])/i,/^(?::[^#\n;]+)/i,/^(?:\+)/i,/^(?:-)/i,/^(?:$)/i,/^(?:.)/i],conditions:{LINE:{rules:[2,3,16],inclusive:!1},ALIAS:{rules:[2,3,7,8],inclusive:!1},ID:{rules:[2,3,6],inclusive:!1},INITIAL:{rules:[0,1,3,4,5,9,10,11,12,13,14,15,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40],inclusive:!0}}};function M(){this.yy={}}return A.lexer=S,M.prototype=A,A.Parser=M,new M}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(54).readFileSync(n(55).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(17),n(14)(t))},function(t,e){"function"==typeof Object.create?t.exports=function(t,e){e&&(t.super_=e,t.prototype=Object.create(e.prototype,{constructor:{value:t,enumerable:!1,writable:!0,configurable:!0}}))}:t.exports=function(t,e){if(e){t.super_=e;var n=function(){};n.prototype=e.prototype,t.prototype=new n,t.prototype.constructor=t}}},function(t,e,n){var r=n(18),i=r.Buffer;function o(t,e){for(var n in t)e[n]=t[n]}function a(t,e,n){return i(t,e,n)}i.from&&i.alloc&&i.allocUnsafe&&i.allocUnsafeSlow?t.exports=r:(o(r,e),e.Buffer=a),a.prototype=Object.create(i.prototype),o(i,a),a.from=function(t,e,n){if("number"==typeof t)throw new TypeError("Argument must not be a number");return i(t,e,n)},a.alloc=function(t,e,n){if("number"!=typeof t)throw new TypeError("Argument must be a number");var r=i(t);return void 0!==e?"string"==typeof n?r.fill(e,n):r.fill(e):r.fill(0),r},a.allocUnsafe=function(t){if("number"!=typeof t)throw new TypeError("Argument must be a number");return i(t)},a.allocUnsafeSlow=function(t){if("number"!=typeof t)throw new TypeError("Argument must be a number");return r.SlowBuffer(t)}},function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var r=new Date,i=new Date;function o(t,e,n,a){function u(e){return t(e=0===arguments.length?new Date:new Date(+e)),e}return u.floor=function(e){return t(e=new Date(+e)),e},u.ceil=function(n){return t(n=new Date(n-1)),e(n,1),t(n),n},u.round=function(t){var e=u(t),n=u.ceil(t);return t-e0))return a;do{a.push(o=new Date(+n)),e(n,i),t(n)}while(o=e)for(;t(e),!n(e);)e.setTime(e-1)}),(function(t,r){if(t>=t)if(r<0)for(;++r<=0;)for(;e(t,-1),!n(t););else for(;--r>=0;)for(;e(t,1),!n(t););}))},n&&(u.count=function(e,o){return r.setTime(+e),i.setTime(+o),t(r),t(i),Math.floor(n(r,i))},u.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?u.filter(a?function(e){return a(e)%t==0}:function(e){return u.count(0,e)%t==0}):u:null}),u}},function(t,e,n){"use strict";n.d(e,"d",(function(){return r})),n.d(e,"c",(function(){return i})),n.d(e,"b",(function(){return o})),n.d(e,"a",(function(){return a})),n.d(e,"e",(function(){return u}));var r=1e3,i=6e4,o=36e5,a=864e5,u=6048e5},function(t,e,n){"use strict";n.d(e,"c",(function(){return o})),n.d(e,"b",(function(){return a})),n.d(e,"a",(function(){return u}));var r=n(115);function i(t,e){return function(n){return t+n*e}}function o(t,e){var n=e-t;return n?i(t,n>180||n<-180?n-360*Math.round(n/360):n):Object(r.a)(isNaN(t)?e:t)}function a(t){return 1==(t=+t)?u:function(e,n){return n-e?function(t,e,n){return t=Math.pow(t,n),e=Math.pow(e,n)-t,n=1/n,function(r){return Math.pow(t+r*e,n)}}(e,n,t):Object(r.a)(isNaN(e)?n:e)}}function u(t,e){var n=e-t;return n?i(t,n):Object(r.a)(isNaN(t)?e:t)}},function(t,e,n){var r;try{r={cloneDeep:n(681),constant:n(251),defaults:n(387),each:n(252),filter:n(361),find:n(682),flatten:n(389),forEach:n(359),forIn:n(687),has:n(258),isUndefined:n(372),last:n(688),map:n(373),mapValues:n(689),max:n(690),merge:n(692),min:n(697),minBy:n(698),now:n(699),pick:n(394),range:n(395),reduce:n(375),sortBy:n(706),uniqueId:n(396),values:n(380),zipObject:n(711)}}catch(t){}r||(r=window._),t.exports=r},function(t,e,n){var r;try{r={cloneDeep:n(752),constant:n(234),defaults:n(753),each:n(311),filter:n(314),find:n(754),flatten:n(403),forEach:n(312),forIn:n(759),has:n(325),isUndefined:n(326),last:n(760),map:n(327),mapValues:n(761),max:n(762),merge:n(764),min:n(770),minBy:n(771),now:n(772),pick:n(773),range:n(778),reduce:n(329),sortBy:n(781),uniqueId:n(786),values:n(334),zipObject:n(787)}}catch(t){}r||(r=window._),t.exports=r},function(t,e,n){"use strict";n.d(e,"g",(function(){return a})),n.d(e,"c",(function(){return u})),n.d(e,"k",(function(){return s})),n.d(e,"m",(function(){return c})),n.d(e,"i",(function(){return f})),n.d(e,"a",(function(){return l})),n.d(e,"e",(function(){return h})),n.d(e,"h",(function(){return d})),n.d(e,"d",(function(){return p})),n.d(e,"l",(function(){return g})),n.d(e,"n",(function(){return y})),n.d(e,"j",(function(){return b})),n.d(e,"b",(function(){return v})),n.d(e,"f",(function(){return m}));var r=n(4),i=n(5);function o(t){return Object(r.a)((function(e){e.setDate(e.getDate()-(e.getDay()+7-t)%7),e.setHours(0,0,0,0)}),(function(t,e){t.setDate(t.getDate()+7*e)}),(function(t,e){return(e-t-(e.getTimezoneOffset()-t.getTimezoneOffset())*i.c)/i.e}))}var a=o(0),u=o(1),s=o(2),c=o(3),f=o(4),l=o(5),h=o(6),d=a.range,p=u.range,g=s.range,y=c.range,b=f.range,v=l.range,m=h.range},function(t,e,n){"use strict";n.d(e,"g",(function(){return a})),n.d(e,"c",(function(){return u})),n.d(e,"k",(function(){return s})),n.d(e,"m",(function(){return c})),n.d(e,"i",(function(){return f})),n.d(e,"a",(function(){return l})),n.d(e,"e",(function(){return h})),n.d(e,"h",(function(){return d})),n.d(e,"d",(function(){return p})),n.d(e,"l",(function(){return g})),n.d(e,"n",(function(){return y})),n.d(e,"j",(function(){return b})),n.d(e,"b",(function(){return v})),n.d(e,"f",(function(){return m}));var r=n(4),i=n(5);function o(t){return Object(r.a)((function(e){e.setUTCDate(e.getUTCDate()-(e.getUTCDay()+7-t)%7),e.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCDate(t.getUTCDate()+7*e)}),(function(t,e){return(e-t)/i.e}))}var a=o(0),u=o(1),s=o(2),c=o(3),f=o(4),l=o(5),h=o(6),d=a.range,p=u.range,g=s.range,y=c.range,b=f.range,v=l.range,m=h.range},function(t,e,n){"use strict";n.d(e,"a",(function(){return i})),n.d(e,"d",(function(){return o})),n.d(e,"c",(function(){return a})),n.d(e,"e",(function(){return _})),n.d(e,"h",(function(){return k})),n.d(e,"g",(function(){return E})),n.d(e,"b",(function(){return A})),n.d(e,"f",(function(){return C}));var r=n(24);function i(){}var o=.7,a=1/o,u="\\s*([+-]?\\d+)\\s*",s="\\s*([+-]?\\d*\\.?\\d+(?:[eE][+-]?\\d+)?)\\s*",c="\\s*([+-]?\\d*\\.?\\d+(?:[eE][+-]?\\d+)?)%\\s*",f=/^#([0-9a-f]{3,8})$/,l=new RegExp("^rgb\\("+[u,u,u]+"\\)$"),h=new RegExp("^rgb\\("+[c,c,c]+"\\)$"),d=new RegExp("^rgba\\("+[u,u,u,s]+"\\)$"),p=new RegExp("^rgba\\("+[c,c,c,s]+"\\)$"),g=new RegExp("^hsl\\("+[s,c,c]+"\\)$"),y=new RegExp("^hsla\\("+[s,c,c,s]+"\\)$"),b={aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,rebeccapurple:6697881,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074};function v(){return this.rgb().formatHex()}function m(){return this.rgb().formatRgb()}function _(t){var e,n;return t=(t+"").trim().toLowerCase(),(e=f.exec(t))?(n=e[1].length,e=parseInt(e[1],16),6===n?w(e):3===n?new A(e>>8&15|e>>4&240,e>>4&15|240&e,(15&e)<<4|15&e,1):8===n?new A(e>>24&255,e>>16&255,e>>8&255,(255&e)/255):4===n?new A(e>>12&15|e>>8&240,e>>8&15|e>>4&240,e>>4&15|240&e,((15&e)<<4|15&e)/255):null):(e=l.exec(t))?new A(e[1],e[2],e[3],1):(e=h.exec(t))?new A(255*e[1]/100,255*e[2]/100,255*e[3]/100,1):(e=d.exec(t))?x(e[1],e[2],e[3],e[4]):(e=p.exec(t))?x(255*e[1]/100,255*e[2]/100,255*e[3]/100,e[4]):(e=g.exec(t))?O(e[1],e[2]/100,e[3]/100,1):(e=y.exec(t))?O(e[1],e[2]/100,e[3]/100,e[4]):b.hasOwnProperty(t)?w(b[t]):"transparent"===t?new A(NaN,NaN,NaN,0):null}function w(t){return new A(t>>16&255,t>>8&255,255&t,1)}function x(t,e,n,r){return r<=0&&(t=e=n=NaN),new A(t,e,n,r)}function k(t){return t instanceof i||(t=_(t)),t?new A((t=t.rgb()).r,t.g,t.b,t.opacity):new A}function E(t,e,n,r){return 1===arguments.length?k(t):new A(t,e,n,null==r?1:r)}function A(t,e,n,r){this.r=+t,this.g=+e,this.b=+n,this.opacity=+r}function S(){return"#"+T(this.r)+T(this.g)+T(this.b)}function M(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(1===t?")":", "+t+")")}function T(t){return((t=Math.max(0,Math.min(255,Math.round(t)||0)))<16?"0":"")+t.toString(16)}function O(t,e,n,r){return r<=0?t=e=n=NaN:n<=0||n>=1?t=e=NaN:e<=0&&(t=NaN),new N(t,e,n,r)}function D(t){if(t instanceof N)return new N(t.h,t.s,t.l,t.opacity);if(t instanceof i||(t=_(t)),!t)return new N;if(t instanceof N)return t;var e=(t=t.rgb()).r/255,n=t.g/255,r=t.b/255,o=Math.min(e,n,r),a=Math.max(e,n,r),u=NaN,s=a-o,c=(a+o)/2;return s?(u=e===a?(n-r)/s+6*(n0&&c<1?0:u,new N(u,s,c,t.opacity)}function C(t,e,n,r){return 1===arguments.length?D(t):new N(t,e,n,null==r?1:r)}function N(t,e,n,r){this.h=+t,this.s=+e,this.l=+n,this.opacity=+r}function I(t,e,n){return 255*(t<60?e+(n-e)*t/60:t<180?n:t<240?e+(n-e)*(240-t)/60:e)}Object(r.a)(i,_,{copy:function(t){return Object.assign(new this.constructor,this,t)},displayable:function(){return this.rgb().displayable()},hex:v,formatHex:v,formatHsl:function(){return D(this).formatHsl()},formatRgb:m,toString:m}),Object(r.a)(A,E,Object(r.b)(i,{brighter:function(t){return t=null==t?a:Math.pow(a,t),new A(this.r*t,this.g*t,this.b*t,this.opacity)},darker:function(t){return t=null==t?o:Math.pow(o,t),new A(this.r*t,this.g*t,this.b*t,this.opacity)},rgb:function(){return this},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:S,formatHex:S,formatRgb:M,toString:M})),Object(r.a)(N,C,Object(r.b)(i,{brighter:function(t){return t=null==t?a:Math.pow(a,t),new N(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?o:Math.pow(o,t),new N(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=this.h%360+360*(this.h<0),e=isNaN(t)||isNaN(this.s)?0:this.s,n=this.l,r=n+(n<.5?n:1-n)*e,i=2*n-r;return new A(I(t>=240?t-240:t+120,i,r),I(t,i,r),I(t<120?t+240:t-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"hsl(":"hsla(")+(this.h||0)+", "+100*(this.s||0)+"%, "+100*(this.l||0)+"%"+(1===t?")":", "+t+")")}}))},function(t,e,n){(function(t){!function(t,e){"use strict";function r(t,e){if(!t)throw new Error(e||"Assertion failed")}function i(t,e){t.super_=e;var n=function(){};n.prototype=e.prototype,t.prototype=new n,t.prototype.constructor=t}function o(t,e,n){if(o.isBN(t))return t;this.negative=0,this.words=null,this.length=0,this.red=null,null!==t&&("le"!==e&&"be"!==e||(n=e,e=10),this._init(t||0,e||10,n||"be"))}var a;"object"==typeof t?t.exports=o:e.BN=o,o.BN=o,o.wordSize=26;try{a=n(849).Buffer}catch(t){}function u(t,e,n){for(var r=0,i=Math.min(t.length,n),o=e;o=49&&a<=54?a-49+10:a>=17&&a<=22?a-17+10:15&a}return r}function s(t,e,n,r){for(var i=0,o=Math.min(t.length,n),a=e;a=49?u-49+10:u>=17?u-17+10:u}return i}o.isBN=function(t){return t instanceof o||null!==t&&"object"==typeof t&&t.constructor.wordSize===o.wordSize&&Array.isArray(t.words)},o.max=function(t,e){return t.cmp(e)>0?t:e},o.min=function(t,e){return t.cmp(e)<0?t:e},o.prototype._init=function(t,e,n){if("number"==typeof t)return this._initNumber(t,e,n);if("object"==typeof t)return this._initArray(t,e,n);"hex"===e&&(e=16),r(e===(0|e)&&e>=2&&e<=36);var i=0;"-"===(t=t.toString().replace(/\s+/g,""))[0]&&i++,16===e?this._parseHex(t,i):this._parseBase(t,e,i),"-"===t[0]&&(this.negative=1),this.strip(),"le"===n&&this._initArray(this.toArray(),e,n)},o.prototype._initNumber=function(t,e,n){t<0&&(this.negative=1,t=-t),t<67108864?(this.words=[67108863&t],this.length=1):t<4503599627370496?(this.words=[67108863&t,t/67108864&67108863],this.length=2):(r(t<9007199254740992),this.words=[67108863&t,t/67108864&67108863,1],this.length=3),"le"===n&&this._initArray(this.toArray(),e,n)},o.prototype._initArray=function(t,e,n){if(r("number"==typeof t.length),t.length<=0)return this.words=[0],this.length=1,this;this.length=Math.ceil(t.length/3),this.words=new Array(this.length);for(var i=0;i=0;i-=3)a=t[i]|t[i-1]<<8|t[i-2]<<16,this.words[o]|=a<>>26-u&67108863,(u+=24)>=26&&(u-=26,o++);else if("le"===n)for(i=0,o=0;i>>26-u&67108863,(u+=24)>=26&&(u-=26,o++);return this.strip()},o.prototype._parseHex=function(t,e){this.length=Math.ceil((t.length-e)/6),this.words=new Array(this.length);for(var n=0;n=e;n-=6)i=u(t,n,n+6),this.words[r]|=i<>>26-o&4194303,(o+=24)>=26&&(o-=26,r++);n+6!==e&&(i=u(t,e,n+6),this.words[r]|=i<>>26-o&4194303),this.strip()},o.prototype._parseBase=function(t,e,n){this.words=[0],this.length=1;for(var r=0,i=1;i<=67108863;i*=e)r++;r--,i=i/e|0;for(var o=t.length-n,a=o%r,u=Math.min(o,o-a)+n,c=0,f=n;f1&&0===this.words[this.length-1];)this.length--;return this._normSign()},o.prototype._normSign=function(){return 1===this.length&&0===this.words[0]&&(this.negative=0),this},o.prototype.inspect=function(){return(this.red?""};var c=["","0","00","000","0000","00000","000000","0000000","00000000","000000000","0000000000","00000000000","000000000000","0000000000000","00000000000000","000000000000000","0000000000000000","00000000000000000","000000000000000000","0000000000000000000","00000000000000000000","000000000000000000000","0000000000000000000000","00000000000000000000000","000000000000000000000000","0000000000000000000000000"],f=[0,0,25,16,12,11,10,9,8,8,7,7,7,7,6,6,6,6,6,6,6,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5],l=[0,0,33554432,43046721,16777216,48828125,60466176,40353607,16777216,43046721,1e7,19487171,35831808,62748517,7529536,11390625,16777216,24137569,34012224,47045881,64e6,4084101,5153632,6436343,7962624,9765625,11881376,14348907,17210368,20511149,243e5,28629151,33554432,39135393,45435424,52521875,60466176];function h(t,e,n){n.negative=e.negative^t.negative;var r=t.length+e.length|0;n.length=r,r=r-1|0;var i=0|t.words[0],o=0|e.words[0],a=i*o,u=67108863&a,s=a/67108864|0;n.words[0]=u;for(var c=1;c>>26,l=67108863&s,h=Math.min(c,e.length-1),d=Math.max(0,c-t.length+1);d<=h;d++){var p=c-d|0;f+=(a=(i=0|t.words[p])*(o=0|e.words[d])+l)/67108864|0,l=67108863&a}n.words[c]=0|l,s=0|f}return 0!==s?n.words[c]=0|s:n.length--,n.strip()}o.prototype.toString=function(t,e){var n;if(e=0|e||1,16===(t=t||10)||"hex"===t){n="";for(var i=0,o=0,a=0;a>>24-i&16777215)||a!==this.length-1?c[6-s.length]+s+n:s+n,(i+=2)>=26&&(i-=26,a--)}for(0!==o&&(n=o.toString(16)+n);n.length%e!=0;)n="0"+n;return 0!==this.negative&&(n="-"+n),n}if(t===(0|t)&&t>=2&&t<=36){var h=f[t],d=l[t];n="";var p=this.clone();for(p.negative=0;!p.isZero();){var g=p.modn(d).toString(t);n=(p=p.idivn(d)).isZero()?g+n:c[h-g.length]+g+n}for(this.isZero()&&(n="0"+n);n.length%e!=0;)n="0"+n;return 0!==this.negative&&(n="-"+n),n}r(!1,"Base should be between 2 and 36")},o.prototype.toNumber=function(){var t=this.words[0];return 2===this.length?t+=67108864*this.words[1]:3===this.length&&1===this.words[2]?t+=4503599627370496+67108864*this.words[1]:this.length>2&&r(!1,"Number can only safely store up to 53 bits"),0!==this.negative?-t:t},o.prototype.toJSON=function(){return this.toString(16)},o.prototype.toBuffer=function(t,e){return r(void 0!==a),this.toArrayLike(a,t,e)},o.prototype.toArray=function(t,e){return this.toArrayLike(Array,t,e)},o.prototype.toArrayLike=function(t,e,n){var i=this.byteLength(),o=n||Math.max(1,i);r(i<=o,"byte array longer than desired length"),r(o>0,"Requested array length <= 0"),this.strip();var a,u,s="le"===e,c=new t(o),f=this.clone();if(s){for(u=0;!f.isZero();u++)a=f.andln(255),f.iushrn(8),c[u]=a;for(;u=4096&&(n+=13,e>>>=13),e>=64&&(n+=7,e>>>=7),e>=8&&(n+=4,e>>>=4),e>=2&&(n+=2,e>>>=2),n+e},o.prototype._zeroBits=function(t){if(0===t)return 26;var e=t,n=0;return 0==(8191&e)&&(n+=13,e>>>=13),0==(127&e)&&(n+=7,e>>>=7),0==(15&e)&&(n+=4,e>>>=4),0==(3&e)&&(n+=2,e>>>=2),0==(1&e)&&n++,n},o.prototype.bitLength=function(){var t=this.words[this.length-1],e=this._countBits(t);return 26*(this.length-1)+e},o.prototype.zeroBits=function(){if(this.isZero())return 0;for(var t=0,e=0;et.length?this.clone().ior(t):t.clone().ior(this)},o.prototype.uor=function(t){return this.length>t.length?this.clone().iuor(t):t.clone().iuor(this)},o.prototype.iuand=function(t){var e;e=this.length>t.length?t:this;for(var n=0;nt.length?this.clone().iand(t):t.clone().iand(this)},o.prototype.uand=function(t){return this.length>t.length?this.clone().iuand(t):t.clone().iuand(this)},o.prototype.iuxor=function(t){var e,n;this.length>t.length?(e=this,n=t):(e=t,n=this);for(var r=0;rt.length?this.clone().ixor(t):t.clone().ixor(this)},o.prototype.uxor=function(t){return this.length>t.length?this.clone().iuxor(t):t.clone().iuxor(this)},o.prototype.inotn=function(t){r("number"==typeof t&&t>=0);var e=0|Math.ceil(t/26),n=t%26;this._expand(e),n>0&&e--;for(var i=0;i0&&(this.words[i]=~this.words[i]&67108863>>26-n),this.strip()},o.prototype.notn=function(t){return this.clone().inotn(t)},o.prototype.setn=function(t,e){r("number"==typeof t&&t>=0);var n=t/26|0,i=t%26;return this._expand(n+1),this.words[n]=e?this.words[n]|1<t.length?(n=this,r=t):(n=t,r=this);for(var i=0,o=0;o>>26;for(;0!==i&&o>>26;if(this.length=n.length,0!==i)this.words[this.length]=i,this.length++;else if(n!==this)for(;ot.length?this.clone().iadd(t):t.clone().iadd(this)},o.prototype.isub=function(t){if(0!==t.negative){t.negative=0;var e=this.iadd(t);return t.negative=1,e._normSign()}if(0!==this.negative)return this.negative=0,this.iadd(t),this.negative=1,this._normSign();var n,r,i=this.cmp(t);if(0===i)return this.negative=0,this.length=1,this.words[0]=0,this;i>0?(n=this,r=t):(n=t,r=this);for(var o=0,a=0;a>26,this.words[a]=67108863&e;for(;0!==o&&a>26,this.words[a]=67108863&e;if(0===o&&a>>13,d=0|a[1],p=8191&d,g=d>>>13,y=0|a[2],b=8191&y,v=y>>>13,m=0|a[3],_=8191&m,w=m>>>13,x=0|a[4],k=8191&x,E=x>>>13,A=0|a[5],S=8191&A,M=A>>>13,T=0|a[6],O=8191&T,D=T>>>13,C=0|a[7],N=8191&C,I=C>>>13,R=0|a[8],j=8191&R,L=R>>>13,B=0|a[9],P=8191&B,F=B>>>13,q=0|u[0],U=8191&q,z=q>>>13,Y=0|u[1],V=8191&Y,G=Y>>>13,H=0|u[2],W=8191&H,$=H>>>13,K=0|u[3],Z=8191&K,X=K>>>13,J=0|u[4],Q=8191&J,tt=J>>>13,et=0|u[5],nt=8191&et,rt=et>>>13,it=0|u[6],ot=8191&it,at=it>>>13,ut=0|u[7],st=8191&ut,ct=ut>>>13,ft=0|u[8],lt=8191&ft,ht=ft>>>13,dt=0|u[9],pt=8191&dt,gt=dt>>>13;n.negative=t.negative^e.negative,n.length=19;var yt=(c+(r=Math.imul(l,U))|0)+((8191&(i=(i=Math.imul(l,z))+Math.imul(h,U)|0))<<13)|0;c=((o=Math.imul(h,z))+(i>>>13)|0)+(yt>>>26)|0,yt&=67108863,r=Math.imul(p,U),i=(i=Math.imul(p,z))+Math.imul(g,U)|0,o=Math.imul(g,z);var bt=(c+(r=r+Math.imul(l,V)|0)|0)+((8191&(i=(i=i+Math.imul(l,G)|0)+Math.imul(h,V)|0))<<13)|0;c=((o=o+Math.imul(h,G)|0)+(i>>>13)|0)+(bt>>>26)|0,bt&=67108863,r=Math.imul(b,U),i=(i=Math.imul(b,z))+Math.imul(v,U)|0,o=Math.imul(v,z),r=r+Math.imul(p,V)|0,i=(i=i+Math.imul(p,G)|0)+Math.imul(g,V)|0,o=o+Math.imul(g,G)|0;var vt=(c+(r=r+Math.imul(l,W)|0)|0)+((8191&(i=(i=i+Math.imul(l,$)|0)+Math.imul(h,W)|0))<<13)|0;c=((o=o+Math.imul(h,$)|0)+(i>>>13)|0)+(vt>>>26)|0,vt&=67108863,r=Math.imul(_,U),i=(i=Math.imul(_,z))+Math.imul(w,U)|0,o=Math.imul(w,z),r=r+Math.imul(b,V)|0,i=(i=i+Math.imul(b,G)|0)+Math.imul(v,V)|0,o=o+Math.imul(v,G)|0,r=r+Math.imul(p,W)|0,i=(i=i+Math.imul(p,$)|0)+Math.imul(g,W)|0,o=o+Math.imul(g,$)|0;var mt=(c+(r=r+Math.imul(l,Z)|0)|0)+((8191&(i=(i=i+Math.imul(l,X)|0)+Math.imul(h,Z)|0))<<13)|0;c=((o=o+Math.imul(h,X)|0)+(i>>>13)|0)+(mt>>>26)|0,mt&=67108863,r=Math.imul(k,U),i=(i=Math.imul(k,z))+Math.imul(E,U)|0,o=Math.imul(E,z),r=r+Math.imul(_,V)|0,i=(i=i+Math.imul(_,G)|0)+Math.imul(w,V)|0,o=o+Math.imul(w,G)|0,r=r+Math.imul(b,W)|0,i=(i=i+Math.imul(b,$)|0)+Math.imul(v,W)|0,o=o+Math.imul(v,$)|0,r=r+Math.imul(p,Z)|0,i=(i=i+Math.imul(p,X)|0)+Math.imul(g,Z)|0,o=o+Math.imul(g,X)|0;var _t=(c+(r=r+Math.imul(l,Q)|0)|0)+((8191&(i=(i=i+Math.imul(l,tt)|0)+Math.imul(h,Q)|0))<<13)|0;c=((o=o+Math.imul(h,tt)|0)+(i>>>13)|0)+(_t>>>26)|0,_t&=67108863,r=Math.imul(S,U),i=(i=Math.imul(S,z))+Math.imul(M,U)|0,o=Math.imul(M,z),r=r+Math.imul(k,V)|0,i=(i=i+Math.imul(k,G)|0)+Math.imul(E,V)|0,o=o+Math.imul(E,G)|0,r=r+Math.imul(_,W)|0,i=(i=i+Math.imul(_,$)|0)+Math.imul(w,W)|0,o=o+Math.imul(w,$)|0,r=r+Math.imul(b,Z)|0,i=(i=i+Math.imul(b,X)|0)+Math.imul(v,Z)|0,o=o+Math.imul(v,X)|0,r=r+Math.imul(p,Q)|0,i=(i=i+Math.imul(p,tt)|0)+Math.imul(g,Q)|0,o=o+Math.imul(g,tt)|0;var wt=(c+(r=r+Math.imul(l,nt)|0)|0)+((8191&(i=(i=i+Math.imul(l,rt)|0)+Math.imul(h,nt)|0))<<13)|0;c=((o=o+Math.imul(h,rt)|0)+(i>>>13)|0)+(wt>>>26)|0,wt&=67108863,r=Math.imul(O,U),i=(i=Math.imul(O,z))+Math.imul(D,U)|0,o=Math.imul(D,z),r=r+Math.imul(S,V)|0,i=(i=i+Math.imul(S,G)|0)+Math.imul(M,V)|0,o=o+Math.imul(M,G)|0,r=r+Math.imul(k,W)|0,i=(i=i+Math.imul(k,$)|0)+Math.imul(E,W)|0,o=o+Math.imul(E,$)|0,r=r+Math.imul(_,Z)|0,i=(i=i+Math.imul(_,X)|0)+Math.imul(w,Z)|0,o=o+Math.imul(w,X)|0,r=r+Math.imul(b,Q)|0,i=(i=i+Math.imul(b,tt)|0)+Math.imul(v,Q)|0,o=o+Math.imul(v,tt)|0,r=r+Math.imul(p,nt)|0,i=(i=i+Math.imul(p,rt)|0)+Math.imul(g,nt)|0,o=o+Math.imul(g,rt)|0;var xt=(c+(r=r+Math.imul(l,ot)|0)|0)+((8191&(i=(i=i+Math.imul(l,at)|0)+Math.imul(h,ot)|0))<<13)|0;c=((o=o+Math.imul(h,at)|0)+(i>>>13)|0)+(xt>>>26)|0,xt&=67108863,r=Math.imul(N,U),i=(i=Math.imul(N,z))+Math.imul(I,U)|0,o=Math.imul(I,z),r=r+Math.imul(O,V)|0,i=(i=i+Math.imul(O,G)|0)+Math.imul(D,V)|0,o=o+Math.imul(D,G)|0,r=r+Math.imul(S,W)|0,i=(i=i+Math.imul(S,$)|0)+Math.imul(M,W)|0,o=o+Math.imul(M,$)|0,r=r+Math.imul(k,Z)|0,i=(i=i+Math.imul(k,X)|0)+Math.imul(E,Z)|0,o=o+Math.imul(E,X)|0,r=r+Math.imul(_,Q)|0,i=(i=i+Math.imul(_,tt)|0)+Math.imul(w,Q)|0,o=o+Math.imul(w,tt)|0,r=r+Math.imul(b,nt)|0,i=(i=i+Math.imul(b,rt)|0)+Math.imul(v,nt)|0,o=o+Math.imul(v,rt)|0,r=r+Math.imul(p,ot)|0,i=(i=i+Math.imul(p,at)|0)+Math.imul(g,ot)|0,o=o+Math.imul(g,at)|0;var kt=(c+(r=r+Math.imul(l,st)|0)|0)+((8191&(i=(i=i+Math.imul(l,ct)|0)+Math.imul(h,st)|0))<<13)|0;c=((o=o+Math.imul(h,ct)|0)+(i>>>13)|0)+(kt>>>26)|0,kt&=67108863,r=Math.imul(j,U),i=(i=Math.imul(j,z))+Math.imul(L,U)|0,o=Math.imul(L,z),r=r+Math.imul(N,V)|0,i=(i=i+Math.imul(N,G)|0)+Math.imul(I,V)|0,o=o+Math.imul(I,G)|0,r=r+Math.imul(O,W)|0,i=(i=i+Math.imul(O,$)|0)+Math.imul(D,W)|0,o=o+Math.imul(D,$)|0,r=r+Math.imul(S,Z)|0,i=(i=i+Math.imul(S,X)|0)+Math.imul(M,Z)|0,o=o+Math.imul(M,X)|0,r=r+Math.imul(k,Q)|0,i=(i=i+Math.imul(k,tt)|0)+Math.imul(E,Q)|0,o=o+Math.imul(E,tt)|0,r=r+Math.imul(_,nt)|0,i=(i=i+Math.imul(_,rt)|0)+Math.imul(w,nt)|0,o=o+Math.imul(w,rt)|0,r=r+Math.imul(b,ot)|0,i=(i=i+Math.imul(b,at)|0)+Math.imul(v,ot)|0,o=o+Math.imul(v,at)|0,r=r+Math.imul(p,st)|0,i=(i=i+Math.imul(p,ct)|0)+Math.imul(g,st)|0,o=o+Math.imul(g,ct)|0;var Et=(c+(r=r+Math.imul(l,lt)|0)|0)+((8191&(i=(i=i+Math.imul(l,ht)|0)+Math.imul(h,lt)|0))<<13)|0;c=((o=o+Math.imul(h,ht)|0)+(i>>>13)|0)+(Et>>>26)|0,Et&=67108863,r=Math.imul(P,U),i=(i=Math.imul(P,z))+Math.imul(F,U)|0,o=Math.imul(F,z),r=r+Math.imul(j,V)|0,i=(i=i+Math.imul(j,G)|0)+Math.imul(L,V)|0,o=o+Math.imul(L,G)|0,r=r+Math.imul(N,W)|0,i=(i=i+Math.imul(N,$)|0)+Math.imul(I,W)|0,o=o+Math.imul(I,$)|0,r=r+Math.imul(O,Z)|0,i=(i=i+Math.imul(O,X)|0)+Math.imul(D,Z)|0,o=o+Math.imul(D,X)|0,r=r+Math.imul(S,Q)|0,i=(i=i+Math.imul(S,tt)|0)+Math.imul(M,Q)|0,o=o+Math.imul(M,tt)|0,r=r+Math.imul(k,nt)|0,i=(i=i+Math.imul(k,rt)|0)+Math.imul(E,nt)|0,o=o+Math.imul(E,rt)|0,r=r+Math.imul(_,ot)|0,i=(i=i+Math.imul(_,at)|0)+Math.imul(w,ot)|0,o=o+Math.imul(w,at)|0,r=r+Math.imul(b,st)|0,i=(i=i+Math.imul(b,ct)|0)+Math.imul(v,st)|0,o=o+Math.imul(v,ct)|0,r=r+Math.imul(p,lt)|0,i=(i=i+Math.imul(p,ht)|0)+Math.imul(g,lt)|0,o=o+Math.imul(g,ht)|0;var At=(c+(r=r+Math.imul(l,pt)|0)|0)+((8191&(i=(i=i+Math.imul(l,gt)|0)+Math.imul(h,pt)|0))<<13)|0;c=((o=o+Math.imul(h,gt)|0)+(i>>>13)|0)+(At>>>26)|0,At&=67108863,r=Math.imul(P,V),i=(i=Math.imul(P,G))+Math.imul(F,V)|0,o=Math.imul(F,G),r=r+Math.imul(j,W)|0,i=(i=i+Math.imul(j,$)|0)+Math.imul(L,W)|0,o=o+Math.imul(L,$)|0,r=r+Math.imul(N,Z)|0,i=(i=i+Math.imul(N,X)|0)+Math.imul(I,Z)|0,o=o+Math.imul(I,X)|0,r=r+Math.imul(O,Q)|0,i=(i=i+Math.imul(O,tt)|0)+Math.imul(D,Q)|0,o=o+Math.imul(D,tt)|0,r=r+Math.imul(S,nt)|0,i=(i=i+Math.imul(S,rt)|0)+Math.imul(M,nt)|0,o=o+Math.imul(M,rt)|0,r=r+Math.imul(k,ot)|0,i=(i=i+Math.imul(k,at)|0)+Math.imul(E,ot)|0,o=o+Math.imul(E,at)|0,r=r+Math.imul(_,st)|0,i=(i=i+Math.imul(_,ct)|0)+Math.imul(w,st)|0,o=o+Math.imul(w,ct)|0,r=r+Math.imul(b,lt)|0,i=(i=i+Math.imul(b,ht)|0)+Math.imul(v,lt)|0,o=o+Math.imul(v,ht)|0;var St=(c+(r=r+Math.imul(p,pt)|0)|0)+((8191&(i=(i=i+Math.imul(p,gt)|0)+Math.imul(g,pt)|0))<<13)|0;c=((o=o+Math.imul(g,gt)|0)+(i>>>13)|0)+(St>>>26)|0,St&=67108863,r=Math.imul(P,W),i=(i=Math.imul(P,$))+Math.imul(F,W)|0,o=Math.imul(F,$),r=r+Math.imul(j,Z)|0,i=(i=i+Math.imul(j,X)|0)+Math.imul(L,Z)|0,o=o+Math.imul(L,X)|0,r=r+Math.imul(N,Q)|0,i=(i=i+Math.imul(N,tt)|0)+Math.imul(I,Q)|0,o=o+Math.imul(I,tt)|0,r=r+Math.imul(O,nt)|0,i=(i=i+Math.imul(O,rt)|0)+Math.imul(D,nt)|0,o=o+Math.imul(D,rt)|0,r=r+Math.imul(S,ot)|0,i=(i=i+Math.imul(S,at)|0)+Math.imul(M,ot)|0,o=o+Math.imul(M,at)|0,r=r+Math.imul(k,st)|0,i=(i=i+Math.imul(k,ct)|0)+Math.imul(E,st)|0,o=o+Math.imul(E,ct)|0,r=r+Math.imul(_,lt)|0,i=(i=i+Math.imul(_,ht)|0)+Math.imul(w,lt)|0,o=o+Math.imul(w,ht)|0;var Mt=(c+(r=r+Math.imul(b,pt)|0)|0)+((8191&(i=(i=i+Math.imul(b,gt)|0)+Math.imul(v,pt)|0))<<13)|0;c=((o=o+Math.imul(v,gt)|0)+(i>>>13)|0)+(Mt>>>26)|0,Mt&=67108863,r=Math.imul(P,Z),i=(i=Math.imul(P,X))+Math.imul(F,Z)|0,o=Math.imul(F,X),r=r+Math.imul(j,Q)|0,i=(i=i+Math.imul(j,tt)|0)+Math.imul(L,Q)|0,o=o+Math.imul(L,tt)|0,r=r+Math.imul(N,nt)|0,i=(i=i+Math.imul(N,rt)|0)+Math.imul(I,nt)|0,o=o+Math.imul(I,rt)|0,r=r+Math.imul(O,ot)|0,i=(i=i+Math.imul(O,at)|0)+Math.imul(D,ot)|0,o=o+Math.imul(D,at)|0,r=r+Math.imul(S,st)|0,i=(i=i+Math.imul(S,ct)|0)+Math.imul(M,st)|0,o=o+Math.imul(M,ct)|0,r=r+Math.imul(k,lt)|0,i=(i=i+Math.imul(k,ht)|0)+Math.imul(E,lt)|0,o=o+Math.imul(E,ht)|0;var Tt=(c+(r=r+Math.imul(_,pt)|0)|0)+((8191&(i=(i=i+Math.imul(_,gt)|0)+Math.imul(w,pt)|0))<<13)|0;c=((o=o+Math.imul(w,gt)|0)+(i>>>13)|0)+(Tt>>>26)|0,Tt&=67108863,r=Math.imul(P,Q),i=(i=Math.imul(P,tt))+Math.imul(F,Q)|0,o=Math.imul(F,tt),r=r+Math.imul(j,nt)|0,i=(i=i+Math.imul(j,rt)|0)+Math.imul(L,nt)|0,o=o+Math.imul(L,rt)|0,r=r+Math.imul(N,ot)|0,i=(i=i+Math.imul(N,at)|0)+Math.imul(I,ot)|0,o=o+Math.imul(I,at)|0,r=r+Math.imul(O,st)|0,i=(i=i+Math.imul(O,ct)|0)+Math.imul(D,st)|0,o=o+Math.imul(D,ct)|0,r=r+Math.imul(S,lt)|0,i=(i=i+Math.imul(S,ht)|0)+Math.imul(M,lt)|0,o=o+Math.imul(M,ht)|0;var Ot=(c+(r=r+Math.imul(k,pt)|0)|0)+((8191&(i=(i=i+Math.imul(k,gt)|0)+Math.imul(E,pt)|0))<<13)|0;c=((o=o+Math.imul(E,gt)|0)+(i>>>13)|0)+(Ot>>>26)|0,Ot&=67108863,r=Math.imul(P,nt),i=(i=Math.imul(P,rt))+Math.imul(F,nt)|0,o=Math.imul(F,rt),r=r+Math.imul(j,ot)|0,i=(i=i+Math.imul(j,at)|0)+Math.imul(L,ot)|0,o=o+Math.imul(L,at)|0,r=r+Math.imul(N,st)|0,i=(i=i+Math.imul(N,ct)|0)+Math.imul(I,st)|0,o=o+Math.imul(I,ct)|0,r=r+Math.imul(O,lt)|0,i=(i=i+Math.imul(O,ht)|0)+Math.imul(D,lt)|0,o=o+Math.imul(D,ht)|0;var Dt=(c+(r=r+Math.imul(S,pt)|0)|0)+((8191&(i=(i=i+Math.imul(S,gt)|0)+Math.imul(M,pt)|0))<<13)|0;c=((o=o+Math.imul(M,gt)|0)+(i>>>13)|0)+(Dt>>>26)|0,Dt&=67108863,r=Math.imul(P,ot),i=(i=Math.imul(P,at))+Math.imul(F,ot)|0,o=Math.imul(F,at),r=r+Math.imul(j,st)|0,i=(i=i+Math.imul(j,ct)|0)+Math.imul(L,st)|0,o=o+Math.imul(L,ct)|0,r=r+Math.imul(N,lt)|0,i=(i=i+Math.imul(N,ht)|0)+Math.imul(I,lt)|0,o=o+Math.imul(I,ht)|0;var Ct=(c+(r=r+Math.imul(O,pt)|0)|0)+((8191&(i=(i=i+Math.imul(O,gt)|0)+Math.imul(D,pt)|0))<<13)|0;c=((o=o+Math.imul(D,gt)|0)+(i>>>13)|0)+(Ct>>>26)|0,Ct&=67108863,r=Math.imul(P,st),i=(i=Math.imul(P,ct))+Math.imul(F,st)|0,o=Math.imul(F,ct),r=r+Math.imul(j,lt)|0,i=(i=i+Math.imul(j,ht)|0)+Math.imul(L,lt)|0,o=o+Math.imul(L,ht)|0;var Nt=(c+(r=r+Math.imul(N,pt)|0)|0)+((8191&(i=(i=i+Math.imul(N,gt)|0)+Math.imul(I,pt)|0))<<13)|0;c=((o=o+Math.imul(I,gt)|0)+(i>>>13)|0)+(Nt>>>26)|0,Nt&=67108863,r=Math.imul(P,lt),i=(i=Math.imul(P,ht))+Math.imul(F,lt)|0,o=Math.imul(F,ht);var It=(c+(r=r+Math.imul(j,pt)|0)|0)+((8191&(i=(i=i+Math.imul(j,gt)|0)+Math.imul(L,pt)|0))<<13)|0;c=((o=o+Math.imul(L,gt)|0)+(i>>>13)|0)+(It>>>26)|0,It&=67108863;var Rt=(c+(r=Math.imul(P,pt))|0)+((8191&(i=(i=Math.imul(P,gt))+Math.imul(F,pt)|0))<<13)|0;return c=((o=Math.imul(F,gt))+(i>>>13)|0)+(Rt>>>26)|0,Rt&=67108863,s[0]=yt,s[1]=bt,s[2]=vt,s[3]=mt,s[4]=_t,s[5]=wt,s[6]=xt,s[7]=kt,s[8]=Et,s[9]=At,s[10]=St,s[11]=Mt,s[12]=Tt,s[13]=Ot,s[14]=Dt,s[15]=Ct,s[16]=Nt,s[17]=It,s[18]=Rt,0!==c&&(s[19]=c,n.length++),n};function p(t,e,n){return(new g).mulp(t,e,n)}function g(t,e){this.x=t,this.y=e}Math.imul||(d=h),o.prototype.mulTo=function(t,e){var n=this.length+t.length;return 10===this.length&&10===t.length?d(this,t,e):n<63?h(this,t,e):n<1024?function(t,e,n){n.negative=e.negative^t.negative,n.length=t.length+e.length;for(var r=0,i=0,o=0;o>>26)|0)>>>26,a&=67108863}n.words[o]=u,r=a,a=i}return 0!==r?n.words[o]=r:n.length--,n.strip()}(this,t,e):p(this,t,e)},g.prototype.makeRBT=function(t){for(var e=new Array(t),n=o.prototype._countBits(t)-1,r=0;r>=1;return r},g.prototype.permute=function(t,e,n,r,i,o){for(var a=0;a>>=1)i++;return 1<>>=13,n[2*a+1]=8191&o,o>>>=13;for(a=2*e;a>=26,e+=i/67108864|0,e+=o>>>26,this.words[n]=67108863&o}return 0!==e&&(this.words[n]=e,this.length++),this},o.prototype.muln=function(t){return this.clone().imuln(t)},o.prototype.sqr=function(){return this.mul(this)},o.prototype.isqr=function(){return this.imul(this.clone())},o.prototype.pow=function(t){var e=function(t){for(var e=new Array(t.bitLength()),n=0;n>>i}return e}(t);if(0===e.length)return new o(1);for(var n=this,r=0;r=0);var e,n=t%26,i=(t-n)/26,o=67108863>>>26-n<<26-n;if(0!==n){var a=0;for(e=0;e>>26-n}a&&(this.words[e]=a,this.length++)}if(0!==i){for(e=this.length-1;e>=0;e--)this.words[e+i]=this.words[e];for(e=0;e=0),i=e?(e-e%26)/26:0;var o=t%26,a=Math.min((t-o)/26,this.length),u=67108863^67108863>>>o<a)for(this.length-=a,c=0;c=0&&(0!==f||c>=i);c--){var l=0|this.words[c];this.words[c]=f<<26-o|l>>>o,f=l&u}return s&&0!==f&&(s.words[s.length++]=f),0===this.length&&(this.words[0]=0,this.length=1),this.strip()},o.prototype.ishrn=function(t,e,n){return r(0===this.negative),this.iushrn(t,e,n)},o.prototype.shln=function(t){return this.clone().ishln(t)},o.prototype.ushln=function(t){return this.clone().iushln(t)},o.prototype.shrn=function(t){return this.clone().ishrn(t)},o.prototype.ushrn=function(t){return this.clone().iushrn(t)},o.prototype.testn=function(t){r("number"==typeof t&&t>=0);var e=t%26,n=(t-e)/26,i=1<=0);var e=t%26,n=(t-e)/26;if(r(0===this.negative,"imaskn works only with positive numbers"),this.length<=n)return this;if(0!==e&&n++,this.length=Math.min(n,this.length),0!==e){var i=67108863^67108863>>>e<=67108864;e++)this.words[e]-=67108864,e===this.length-1?this.words[e+1]=1:this.words[e+1]++;return this.length=Math.max(this.length,e+1),this},o.prototype.isubn=function(t){if(r("number"==typeof t),r(t<67108864),t<0)return this.iaddn(-t);if(0!==this.negative)return this.negative=0,this.iaddn(t),this.negative=1,this;if(this.words[0]-=t,1===this.length&&this.words[0]<0)this.words[0]=-this.words[0],this.negative=1;else for(var e=0;e>26)-(s/67108864|0),this.words[i+n]=67108863&o}for(;i>26,this.words[i+n]=67108863&o;if(0===u)return this.strip();for(r(-1===u),u=0,i=0;i>26,this.words[i]=67108863&o;return this.negative=1,this.strip()},o.prototype._wordDiv=function(t,e){var n=(this.length,t.length),r=this.clone(),i=t,a=0|i.words[i.length-1];0!==(n=26-this._countBits(a))&&(i=i.ushln(n),r.iushln(n),a=0|i.words[i.length-1]);var u,s=r.length-i.length;if("mod"!==e){(u=new o(null)).length=s+1,u.words=new Array(u.length);for(var c=0;c=0;l--){var h=67108864*(0|r.words[i.length+l])+(0|r.words[i.length+l-1]);for(h=Math.min(h/a|0,67108863),r._ishlnsubmul(i,h,l);0!==r.negative;)h--,r.negative=0,r._ishlnsubmul(i,1,l),r.isZero()||(r.negative^=1);u&&(u.words[l]=h)}return u&&u.strip(),r.strip(),"div"!==e&&0!==n&&r.iushrn(n),{div:u||null,mod:r}},o.prototype.divmod=function(t,e,n){return r(!t.isZero()),this.isZero()?{div:new o(0),mod:new o(0)}:0!==this.negative&&0===t.negative?(u=this.neg().divmod(t,e),"mod"!==e&&(i=u.div.neg()),"div"!==e&&(a=u.mod.neg(),n&&0!==a.negative&&a.iadd(t)),{div:i,mod:a}):0===this.negative&&0!==t.negative?(u=this.divmod(t.neg(),e),"mod"!==e&&(i=u.div.neg()),{div:i,mod:u.mod}):0!=(this.negative&t.negative)?(u=this.neg().divmod(t.neg(),e),"div"!==e&&(a=u.mod.neg(),n&&0!==a.negative&&a.isub(t)),{div:u.div,mod:a}):t.length>this.length||this.cmp(t)<0?{div:new o(0),mod:this}:1===t.length?"div"===e?{div:this.divn(t.words[0]),mod:null}:"mod"===e?{div:null,mod:new o(this.modn(t.words[0]))}:{div:this.divn(t.words[0]),mod:new o(this.modn(t.words[0]))}:this._wordDiv(t,e);var i,a,u},o.prototype.div=function(t){return this.divmod(t,"div",!1).div},o.prototype.mod=function(t){return this.divmod(t,"mod",!1).mod},o.prototype.umod=function(t){return this.divmod(t,"mod",!0).mod},o.prototype.divRound=function(t){var e=this.divmod(t);if(e.mod.isZero())return e.div;var n=0!==e.div.negative?e.mod.isub(t):e.mod,r=t.ushrn(1),i=t.andln(1),o=n.cmp(r);return o<0||1===i&&0===o?e.div:0!==e.div.negative?e.div.isubn(1):e.div.iaddn(1)},o.prototype.modn=function(t){r(t<=67108863);for(var e=(1<<26)%t,n=0,i=this.length-1;i>=0;i--)n=(e*n+(0|this.words[i]))%t;return n},o.prototype.idivn=function(t){r(t<=67108863);for(var e=0,n=this.length-1;n>=0;n--){var i=(0|this.words[n])+67108864*e;this.words[n]=i/t|0,e=i%t}return this.strip()},o.prototype.divn=function(t){return this.clone().idivn(t)},o.prototype.egcd=function(t){r(0===t.negative),r(!t.isZero());var e=this,n=t.clone();e=0!==e.negative?e.umod(t):e.clone();for(var i=new o(1),a=new o(0),u=new o(0),s=new o(1),c=0;e.isEven()&&n.isEven();)e.iushrn(1),n.iushrn(1),++c;for(var f=n.clone(),l=e.clone();!e.isZero();){for(var h=0,d=1;0==(e.words[0]&d)&&h<26;++h,d<<=1);if(h>0)for(e.iushrn(h);h-- >0;)(i.isOdd()||a.isOdd())&&(i.iadd(f),a.isub(l)),i.iushrn(1),a.iushrn(1);for(var p=0,g=1;0==(n.words[0]&g)&&p<26;++p,g<<=1);if(p>0)for(n.iushrn(p);p-- >0;)(u.isOdd()||s.isOdd())&&(u.iadd(f),s.isub(l)),u.iushrn(1),s.iushrn(1);e.cmp(n)>=0?(e.isub(n),i.isub(u),a.isub(s)):(n.isub(e),u.isub(i),s.isub(a))}return{a:u,b:s,gcd:n.iushln(c)}},o.prototype._invmp=function(t){r(0===t.negative),r(!t.isZero());var e=this,n=t.clone();e=0!==e.negative?e.umod(t):e.clone();for(var i,a=new o(1),u=new o(0),s=n.clone();e.cmpn(1)>0&&n.cmpn(1)>0;){for(var c=0,f=1;0==(e.words[0]&f)&&c<26;++c,f<<=1);if(c>0)for(e.iushrn(c);c-- >0;)a.isOdd()&&a.iadd(s),a.iushrn(1);for(var l=0,h=1;0==(n.words[0]&h)&&l<26;++l,h<<=1);if(l>0)for(n.iushrn(l);l-- >0;)u.isOdd()&&u.iadd(s),u.iushrn(1);e.cmp(n)>=0?(e.isub(n),a.isub(u)):(n.isub(e),u.isub(a))}return(i=0===e.cmpn(1)?a:u).cmpn(0)<0&&i.iadd(t),i},o.prototype.gcd=function(t){if(this.isZero())return t.abs();if(t.isZero())return this.abs();var e=this.clone(),n=t.clone();e.negative=0,n.negative=0;for(var r=0;e.isEven()&&n.isEven();r++)e.iushrn(1),n.iushrn(1);for(;;){for(;e.isEven();)e.iushrn(1);for(;n.isEven();)n.iushrn(1);var i=e.cmp(n);if(i<0){var o=e;e=n,n=o}else if(0===i||0===n.cmpn(1))break;e.isub(n)}return n.iushln(r)},o.prototype.invm=function(t){return this.egcd(t).a.umod(t)},o.prototype.isEven=function(){return 0==(1&this.words[0])},o.prototype.isOdd=function(){return 1==(1&this.words[0])},o.prototype.andln=function(t){return this.words[0]&t},o.prototype.bincn=function(t){r("number"==typeof t);var e=t%26,n=(t-e)/26,i=1<>>26,u&=67108863,this.words[a]=u}return 0!==o&&(this.words[a]=o,this.length++),this},o.prototype.isZero=function(){return 1===this.length&&0===this.words[0]},o.prototype.cmpn=function(t){var e,n=t<0;if(0!==this.negative&&!n)return-1;if(0===this.negative&&n)return 1;if(this.strip(),this.length>1)e=1;else{n&&(t=-t),r(t<=67108863,"Number is too big");var i=0|this.words[0];e=i===t?0:it.length)return 1;if(this.length=0;n--){var r=0|this.words[n],i=0|t.words[n];if(r!==i){ri&&(e=1);break}}return e},o.prototype.gtn=function(t){return 1===this.cmpn(t)},o.prototype.gt=function(t){return 1===this.cmp(t)},o.prototype.gten=function(t){return this.cmpn(t)>=0},o.prototype.gte=function(t){return this.cmp(t)>=0},o.prototype.ltn=function(t){return-1===this.cmpn(t)},o.prototype.lt=function(t){return-1===this.cmp(t)},o.prototype.lten=function(t){return this.cmpn(t)<=0},o.prototype.lte=function(t){return this.cmp(t)<=0},o.prototype.eqn=function(t){return 0===this.cmpn(t)},o.prototype.eq=function(t){return 0===this.cmp(t)},o.red=function(t){return new x(t)},o.prototype.toRed=function(t){return r(!this.red,"Already a number in reduction context"),r(0===this.negative,"red works only with positives"),t.convertTo(this)._forceRed(t)},o.prototype.fromRed=function(){return r(this.red,"fromRed works only with numbers in reduction context"),this.red.convertFrom(this)},o.prototype._forceRed=function(t){return this.red=t,this},o.prototype.forceRed=function(t){return r(!this.red,"Already a number in reduction context"),this._forceRed(t)},o.prototype.redAdd=function(t){return r(this.red,"redAdd works only with red numbers"),this.red.add(this,t)},o.prototype.redIAdd=function(t){return r(this.red,"redIAdd works only with red numbers"),this.red.iadd(this,t)},o.prototype.redSub=function(t){return r(this.red,"redSub works only with red numbers"),this.red.sub(this,t)},o.prototype.redISub=function(t){return r(this.red,"redISub works only with red numbers"),this.red.isub(this,t)},o.prototype.redShl=function(t){return r(this.red,"redShl works only with red numbers"),this.red.shl(this,t)},o.prototype.redMul=function(t){return r(this.red,"redMul works only with red numbers"),this.red._verify2(this,t),this.red.mul(this,t)},o.prototype.redIMul=function(t){return r(this.red,"redMul works only with red numbers"),this.red._verify2(this,t),this.red.imul(this,t)},o.prototype.redSqr=function(){return r(this.red,"redSqr works only with red numbers"),this.red._verify1(this),this.red.sqr(this)},o.prototype.redISqr=function(){return r(this.red,"redISqr works only with red numbers"),this.red._verify1(this),this.red.isqr(this)},o.prototype.redSqrt=function(){return r(this.red,"redSqrt works only with red numbers"),this.red._verify1(this),this.red.sqrt(this)},o.prototype.redInvm=function(){return r(this.red,"redInvm works only with red numbers"),this.red._verify1(this),this.red.invm(this)},o.prototype.redNeg=function(){return r(this.red,"redNeg works only with red numbers"),this.red._verify1(this),this.red.neg(this)},o.prototype.redPow=function(t){return r(this.red&&!t.red,"redPow(normalNum)"),this.red._verify1(this),this.red.pow(this,t)};var y={k256:null,p224:null,p192:null,p25519:null};function b(t,e){this.name=t,this.p=new o(e,16),this.n=this.p.bitLength(),this.k=new o(1).iushln(this.n).isub(this.p),this.tmp=this._tmp()}function v(){b.call(this,"k256","ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff fffffffe fffffc2f")}function m(){b.call(this,"p224","ffffffff ffffffff ffffffff ffffffff 00000000 00000000 00000001")}function _(){b.call(this,"p192","ffffffff ffffffff ffffffff fffffffe ffffffff ffffffff")}function w(){b.call(this,"25519","7fffffffffffffff ffffffffffffffff ffffffffffffffff ffffffffffffffed")}function x(t){if("string"==typeof t){var e=o._prime(t);this.m=e.p,this.prime=e}else r(t.gtn(1),"modulus must be greater than 1"),this.m=t,this.prime=null}function k(t){x.call(this,t),this.shift=this.m.bitLength(),this.shift%26!=0&&(this.shift+=26-this.shift%26),this.r=new o(1).iushln(this.shift),this.r2=this.imod(this.r.sqr()),this.rinv=this.r._invmp(this.m),this.minv=this.rinv.mul(this.r).isubn(1).div(this.m),this.minv=this.minv.umod(this.r),this.minv=this.r.sub(this.minv)}b.prototype._tmp=function(){var t=new o(null);return t.words=new Array(Math.ceil(this.n/13)),t},b.prototype.ireduce=function(t){var e,n=t;do{this.split(n,this.tmp),e=(n=(n=this.imulK(n)).iadd(this.tmp)).bitLength()}while(e>this.n);var r=e0?n.isub(this.p):n.strip(),n},b.prototype.split=function(t,e){t.iushrn(this.n,0,e)},b.prototype.imulK=function(t){return t.imul(this.k)},i(v,b),v.prototype.split=function(t,e){for(var n=Math.min(t.length,9),r=0;r>>22,i=o}i>>>=22,t.words[r-10]=i,0===i&&t.length>10?t.length-=10:t.length-=9},v.prototype.imulK=function(t){t.words[t.length]=0,t.words[t.length+1]=0,t.length+=2;for(var e=0,n=0;n>>=26,t.words[n]=i,e=r}return 0!==e&&(t.words[t.length++]=e),t},o._prime=function(t){if(y[t])return y[t];var e;if("k256"===t)e=new v;else if("p224"===t)e=new m;else if("p192"===t)e=new _;else{if("p25519"!==t)throw new Error("Unknown prime "+t);e=new w}return y[t]=e,e},x.prototype._verify1=function(t){r(0===t.negative,"red works only with positives"),r(t.red,"red works only with red numbers")},x.prototype._verify2=function(t,e){r(0==(t.negative|e.negative),"red works only with positives"),r(t.red&&t.red===e.red,"red works only with red numbers")},x.prototype.imod=function(t){return this.prime?this.prime.ireduce(t)._forceRed(this):t.umod(this.m)._forceRed(this)},x.prototype.neg=function(t){return t.isZero()?t.clone():this.m.sub(t)._forceRed(this)},x.prototype.add=function(t,e){this._verify2(t,e);var n=t.add(e);return n.cmp(this.m)>=0&&n.isub(this.m),n._forceRed(this)},x.prototype.iadd=function(t,e){this._verify2(t,e);var n=t.iadd(e);return n.cmp(this.m)>=0&&n.isub(this.m),n},x.prototype.sub=function(t,e){this._verify2(t,e);var n=t.sub(e);return n.cmpn(0)<0&&n.iadd(this.m),n._forceRed(this)},x.prototype.isub=function(t,e){this._verify2(t,e);var n=t.isub(e);return n.cmpn(0)<0&&n.iadd(this.m),n},x.prototype.shl=function(t,e){return this._verify1(t),this.imod(t.ushln(e))},x.prototype.imul=function(t,e){return this._verify2(t,e),this.imod(t.imul(e))},x.prototype.mul=function(t,e){return this._verify2(t,e),this.imod(t.mul(e))},x.prototype.isqr=function(t){return this.imul(t,t.clone())},x.prototype.sqr=function(t){return this.mul(t,t)},x.prototype.sqrt=function(t){if(t.isZero())return t.clone();var e=this.m.andln(3);if(r(e%2==1),3===e){var n=this.m.add(new o(1)).iushrn(2);return this.pow(t,n)}for(var i=this.m.subn(1),a=0;!i.isZero()&&0===i.andln(1);)a++,i.iushrn(1);r(!i.isZero());var u=new o(1).toRed(this),s=u.redNeg(),c=this.m.subn(1).iushrn(1),f=this.m.bitLength();for(f=new o(2*f*f).toRed(this);0!==this.pow(f,c).cmp(s);)f.redIAdd(s);for(var l=this.pow(f,i),h=this.pow(t,i.addn(1).iushrn(1)),d=this.pow(t,i),p=a;0!==d.cmp(u);){for(var g=d,y=0;0!==g.cmp(u);y++)g=g.redSqr();r(y=0;r--){for(var c=e.words[r],f=s-1;f>=0;f--){var l=c>>f&1;i!==n[0]&&(i=this.sqr(i)),0!==l||0!==a?(a<<=1,a|=l,(4===++u||0===r&&0===f)&&(i=this.mul(i,n[a]),u=0,a=0)):u=0}s=26}return i},x.prototype.convertTo=function(t){var e=t.umod(this.m);return e===t?e.clone():e},x.prototype.convertFrom=function(t){var e=t.clone();return e.red=null,e},o.mont=function(t){return new k(t)},i(k,x),k.prototype.convertTo=function(t){return this.imod(t.ushln(this.shift))},k.prototype.convertFrom=function(t){var e=this.imod(t.mul(this.rinv));return e.red=null,e},k.prototype.imul=function(t,e){if(t.isZero()||e.isZero())return t.words[0]=0,t.length=1,t;var n=t.imul(e),r=n.maskn(this.shift).mul(this.minv).imaskn(this.shift).mul(this.m),i=n.isub(r).iushrn(this.shift),o=i;return i.cmp(this.m)>=0?o=i.isub(this.m):i.cmpn(0)<0&&(o=i.iadd(this.m)),o._forceRed(this)},k.prototype.mul=function(t,e){if(t.isZero()||e.isZero())return new o(0)._forceRed(this);var n=t.mul(e),r=n.maskn(this.shift).mul(this.minv).imaskn(this.shift).mul(this.m),i=n.isub(r).iushrn(this.shift),a=i;return i.cmp(this.m)>=0?a=i.isub(this.m):i.cmpn(0)<0&&(a=i.iadd(this.m)),a._forceRed(this)},k.prototype.invm=function(t){return this.imod(t._invmp(this.m).mul(this.r2))._forceRed(this)}}(t,this)}).call(this,n(14)(t))},function(t,e,n){"use strict";var r=n(66),i=n(113),o=function(t){return Object(i.a)(Object(r.a)(t).call(document.documentElement))},a=0;function u(){return new s}function s(){this._="@"+(++a).toString(36)}s.prototype=u.prototype={constructor:s,get:function(t){for(var e=this._;!(e in t);)if(!(t=t.parentNode))return;return t[e]},set:function(t,e){return t[this._]=e},remove:function(t){return this._ in t&&delete t[this._]},toString:function(){return this._}};var c=n(203),f=n(285),l=n(105),h=n(68),d=n(67),p=n(49),g=function(t){return"string"==typeof t?new p.a([document.querySelectorAll(t)],[document.documentElement]):new p.a([null==t?[]:t],p.c)},y=n(106),b=n(204),v=n(205),m=n(284),_=n(112),w=function(t,e){null==e&&(e=Object(_.a)().touches);for(var n=0,r=e?e.length:0,i=new Array(r);n1)for(var n=1;n - * @license MIT - */ -var r=n(813),i=n(814),o=n(408);function a(){return s.TYPED_ARRAY_SUPPORT?2147483647:1073741823}function u(t,e){if(a()=a())throw new RangeError("Attempt to allocate Buffer larger than maximum size: 0x"+a().toString(16)+" bytes");return 0|t}function p(t,e){if(s.isBuffer(t))return t.length;if("undefined"!=typeof ArrayBuffer&&"function"==typeof ArrayBuffer.isView&&(ArrayBuffer.isView(t)||t instanceof ArrayBuffer))return t.byteLength;"string"!=typeof t&&(t=""+t);var n=t.length;if(0===n)return 0;for(var r=!1;;)switch(e){case"ascii":case"latin1":case"binary":return n;case"utf8":case"utf-8":case void 0:return q(t).length;case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return 2*n;case"hex":return n>>>1;case"base64":return U(t).length;default:if(r)return q(t).length;e=(""+e).toLowerCase(),r=!0}}function g(t,e,n){var r=!1;if((void 0===e||e<0)&&(e=0),e>this.length)return"";if((void 0===n||n>this.length)&&(n=this.length),n<=0)return"";if((n>>>=0)<=(e>>>=0))return"";for(t||(t="utf8");;)switch(t){case"hex":return O(this,e,n);case"utf8":case"utf-8":return S(this,e,n);case"ascii":return M(this,e,n);case"latin1":case"binary":return T(this,e,n);case"base64":return A(this,e,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return D(this,e,n);default:if(r)throw new TypeError("Unknown encoding: "+t);t=(t+"").toLowerCase(),r=!0}}function y(t,e,n){var r=t[e];t[e]=t[n],t[n]=r}function b(t,e,n,r,i){if(0===t.length)return-1;if("string"==typeof n?(r=n,n=0):n>2147483647?n=2147483647:n<-2147483648&&(n=-2147483648),n=+n,isNaN(n)&&(n=i?0:t.length-1),n<0&&(n=t.length+n),n>=t.length){if(i)return-1;n=t.length-1}else if(n<0){if(!i)return-1;n=0}if("string"==typeof e&&(e=s.from(e,r)),s.isBuffer(e))return 0===e.length?-1:v(t,e,n,r,i);if("number"==typeof e)return e&=255,s.TYPED_ARRAY_SUPPORT&&"function"==typeof Uint8Array.prototype.indexOf?i?Uint8Array.prototype.indexOf.call(t,e,n):Uint8Array.prototype.lastIndexOf.call(t,e,n):v(t,[e],n,r,i);throw new TypeError("val must be string, number or Buffer")}function v(t,e,n,r,i){var o,a=1,u=t.length,s=e.length;if(void 0!==r&&("ucs2"===(r=String(r).toLowerCase())||"ucs-2"===r||"utf16le"===r||"utf-16le"===r)){if(t.length<2||e.length<2)return-1;a=2,u/=2,s/=2,n/=2}function c(t,e){return 1===a?t[e]:t.readUInt16BE(e*a)}if(i){var f=-1;for(o=n;ou&&(n=u-s),o=n;o>=0;o--){for(var l=!0,h=0;hi&&(r=i):r=i;var o=e.length;if(o%2!=0)throw new TypeError("Invalid hex string");r>o/2&&(r=o/2);for(var a=0;a>8,i=n%256,o.push(i),o.push(r);return o}(e,t.length-n),t,n,r)}function A(t,e,n){return 0===e&&n===t.length?r.fromByteArray(t):r.fromByteArray(t.slice(e,n))}function S(t,e,n){n=Math.min(t.length,n);for(var r=[],i=e;i239?4:c>223?3:c>191?2:1;if(i+l<=n)switch(l){case 1:c<128&&(f=c);break;case 2:128==(192&(o=t[i+1]))&&(s=(31&c)<<6|63&o)>127&&(f=s);break;case 3:o=t[i+1],a=t[i+2],128==(192&o)&&128==(192&a)&&(s=(15&c)<<12|(63&o)<<6|63&a)>2047&&(s<55296||s>57343)&&(f=s);break;case 4:o=t[i+1],a=t[i+2],u=t[i+3],128==(192&o)&&128==(192&a)&&128==(192&u)&&(s=(15&c)<<18|(63&o)<<12|(63&a)<<6|63&u)>65535&&s<1114112&&(f=s)}null===f?(f=65533,l=1):f>65535&&(f-=65536,r.push(f>>>10&1023|55296),f=56320|1023&f),r.push(f),i+=l}return function(t){var e=t.length;if(e<=4096)return String.fromCharCode.apply(String,t);var n="",r=0;for(;r0&&(t=this.toString("hex",0,n).match(/.{2}/g).join(" "),this.length>n&&(t+=" ... ")),""},s.prototype.compare=function(t,e,n,r,i){if(!s.isBuffer(t))throw new TypeError("Argument must be a Buffer");if(void 0===e&&(e=0),void 0===n&&(n=t?t.length:0),void 0===r&&(r=0),void 0===i&&(i=this.length),e<0||n>t.length||r<0||i>this.length)throw new RangeError("out of range index");if(r>=i&&e>=n)return 0;if(r>=i)return-1;if(e>=n)return 1;if(this===t)return 0;for(var o=(i>>>=0)-(r>>>=0),a=(n>>>=0)-(e>>>=0),u=Math.min(o,a),c=this.slice(r,i),f=t.slice(e,n),l=0;li)&&(n=i),t.length>0&&(n<0||e<0)||e>this.length)throw new RangeError("Attempt to write outside buffer bounds");r||(r="utf8");for(var o=!1;;)switch(r){case"hex":return m(this,t,e,n);case"utf8":case"utf-8":return _(this,t,e,n);case"ascii":return w(this,t,e,n);case"latin1":case"binary":return x(this,t,e,n);case"base64":return k(this,t,e,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return E(this,t,e,n);default:if(o)throw new TypeError("Unknown encoding: "+r);r=(""+r).toLowerCase(),o=!0}},s.prototype.toJSON=function(){return{type:"Buffer",data:Array.prototype.slice.call(this._arr||this,0)}};function M(t,e,n){var r="";n=Math.min(t.length,n);for(var i=e;ir)&&(n=r);for(var i="",o=e;on)throw new RangeError("Trying to access beyond buffer length")}function N(t,e,n,r,i,o){if(!s.isBuffer(t))throw new TypeError('"buffer" argument must be a Buffer instance');if(e>i||et.length)throw new RangeError("Index out of range")}function I(t,e,n,r){e<0&&(e=65535+e+1);for(var i=0,o=Math.min(t.length-n,2);i>>8*(r?i:1-i)}function R(t,e,n,r){e<0&&(e=4294967295+e+1);for(var i=0,o=Math.min(t.length-n,4);i>>8*(r?i:3-i)&255}function j(t,e,n,r,i,o){if(n+r>t.length)throw new RangeError("Index out of range");if(n<0)throw new RangeError("Index out of range")}function L(t,e,n,r,o){return o||j(t,0,n,4),i.write(t,e,n,r,23,4),n+4}function B(t,e,n,r,o){return o||j(t,0,n,8),i.write(t,e,n,r,52,8),n+8}s.prototype.slice=function(t,e){var n,r=this.length;if((t=~~t)<0?(t+=r)<0&&(t=0):t>r&&(t=r),(e=void 0===e?r:~~e)<0?(e+=r)<0&&(e=0):e>r&&(e=r),e0&&(i*=256);)r+=this[t+--e]*i;return r},s.prototype.readUInt8=function(t,e){return e||C(t,1,this.length),this[t]},s.prototype.readUInt16LE=function(t,e){return e||C(t,2,this.length),this[t]|this[t+1]<<8},s.prototype.readUInt16BE=function(t,e){return e||C(t,2,this.length),this[t]<<8|this[t+1]},s.prototype.readUInt32LE=function(t,e){return e||C(t,4,this.length),(this[t]|this[t+1]<<8|this[t+2]<<16)+16777216*this[t+3]},s.prototype.readUInt32BE=function(t,e){return e||C(t,4,this.length),16777216*this[t]+(this[t+1]<<16|this[t+2]<<8|this[t+3])},s.prototype.readIntLE=function(t,e,n){t|=0,e|=0,n||C(t,e,this.length);for(var r=this[t],i=1,o=0;++o=(i*=128)&&(r-=Math.pow(2,8*e)),r},s.prototype.readIntBE=function(t,e,n){t|=0,e|=0,n||C(t,e,this.length);for(var r=e,i=1,o=this[t+--r];r>0&&(i*=256);)o+=this[t+--r]*i;return o>=(i*=128)&&(o-=Math.pow(2,8*e)),o},s.prototype.readInt8=function(t,e){return e||C(t,1,this.length),128&this[t]?-1*(255-this[t]+1):this[t]},s.prototype.readInt16LE=function(t,e){e||C(t,2,this.length);var n=this[t]|this[t+1]<<8;return 32768&n?4294901760|n:n},s.prototype.readInt16BE=function(t,e){e||C(t,2,this.length);var n=this[t+1]|this[t]<<8;return 32768&n?4294901760|n:n},s.prototype.readInt32LE=function(t,e){return e||C(t,4,this.length),this[t]|this[t+1]<<8|this[t+2]<<16|this[t+3]<<24},s.prototype.readInt32BE=function(t,e){return e||C(t,4,this.length),this[t]<<24|this[t+1]<<16|this[t+2]<<8|this[t+3]},s.prototype.readFloatLE=function(t,e){return e||C(t,4,this.length),i.read(this,t,!0,23,4)},s.prototype.readFloatBE=function(t,e){return e||C(t,4,this.length),i.read(this,t,!1,23,4)},s.prototype.readDoubleLE=function(t,e){return e||C(t,8,this.length),i.read(this,t,!0,52,8)},s.prototype.readDoubleBE=function(t,e){return e||C(t,8,this.length),i.read(this,t,!1,52,8)},s.prototype.writeUIntLE=function(t,e,n,r){(t=+t,e|=0,n|=0,r)||N(this,t,e,n,Math.pow(2,8*n)-1,0);var i=1,o=0;for(this[e]=255&t;++o=0&&(o*=256);)this[e+i]=t/o&255;return e+n},s.prototype.writeUInt8=function(t,e,n){return t=+t,e|=0,n||N(this,t,e,1,255,0),s.TYPED_ARRAY_SUPPORT||(t=Math.floor(t)),this[e]=255&t,e+1},s.prototype.writeUInt16LE=function(t,e,n){return t=+t,e|=0,n||N(this,t,e,2,65535,0),s.TYPED_ARRAY_SUPPORT?(this[e]=255&t,this[e+1]=t>>>8):I(this,t,e,!0),e+2},s.prototype.writeUInt16BE=function(t,e,n){return t=+t,e|=0,n||N(this,t,e,2,65535,0),s.TYPED_ARRAY_SUPPORT?(this[e]=t>>>8,this[e+1]=255&t):I(this,t,e,!1),e+2},s.prototype.writeUInt32LE=function(t,e,n){return t=+t,e|=0,n||N(this,t,e,4,4294967295,0),s.TYPED_ARRAY_SUPPORT?(this[e+3]=t>>>24,this[e+2]=t>>>16,this[e+1]=t>>>8,this[e]=255&t):R(this,t,e,!0),e+4},s.prototype.writeUInt32BE=function(t,e,n){return t=+t,e|=0,n||N(this,t,e,4,4294967295,0),s.TYPED_ARRAY_SUPPORT?(this[e]=t>>>24,this[e+1]=t>>>16,this[e+2]=t>>>8,this[e+3]=255&t):R(this,t,e,!1),e+4},s.prototype.writeIntLE=function(t,e,n,r){if(t=+t,e|=0,!r){var i=Math.pow(2,8*n-1);N(this,t,e,n,i-1,-i)}var o=0,a=1,u=0;for(this[e]=255&t;++o>0)-u&255;return e+n},s.prototype.writeIntBE=function(t,e,n,r){if(t=+t,e|=0,!r){var i=Math.pow(2,8*n-1);N(this,t,e,n,i-1,-i)}var o=n-1,a=1,u=0;for(this[e+o]=255&t;--o>=0&&(a*=256);)t<0&&0===u&&0!==this[e+o+1]&&(u=1),this[e+o]=(t/a>>0)-u&255;return e+n},s.prototype.writeInt8=function(t,e,n){return t=+t,e|=0,n||N(this,t,e,1,127,-128),s.TYPED_ARRAY_SUPPORT||(t=Math.floor(t)),t<0&&(t=255+t+1),this[e]=255&t,e+1},s.prototype.writeInt16LE=function(t,e,n){return t=+t,e|=0,n||N(this,t,e,2,32767,-32768),s.TYPED_ARRAY_SUPPORT?(this[e]=255&t,this[e+1]=t>>>8):I(this,t,e,!0),e+2},s.prototype.writeInt16BE=function(t,e,n){return t=+t,e|=0,n||N(this,t,e,2,32767,-32768),s.TYPED_ARRAY_SUPPORT?(this[e]=t>>>8,this[e+1]=255&t):I(this,t,e,!1),e+2},s.prototype.writeInt32LE=function(t,e,n){return t=+t,e|=0,n||N(this,t,e,4,2147483647,-2147483648),s.TYPED_ARRAY_SUPPORT?(this[e]=255&t,this[e+1]=t>>>8,this[e+2]=t>>>16,this[e+3]=t>>>24):R(this,t,e,!0),e+4},s.prototype.writeInt32BE=function(t,e,n){return t=+t,e|=0,n||N(this,t,e,4,2147483647,-2147483648),t<0&&(t=4294967295+t+1),s.TYPED_ARRAY_SUPPORT?(this[e]=t>>>24,this[e+1]=t>>>16,this[e+2]=t>>>8,this[e+3]=255&t):R(this,t,e,!1),e+4},s.prototype.writeFloatLE=function(t,e,n){return L(this,t,e,!0,n)},s.prototype.writeFloatBE=function(t,e,n){return L(this,t,e,!1,n)},s.prototype.writeDoubleLE=function(t,e,n){return B(this,t,e,!0,n)},s.prototype.writeDoubleBE=function(t,e,n){return B(this,t,e,!1,n)},s.prototype.copy=function(t,e,n,r){if(n||(n=0),r||0===r||(r=this.length),e>=t.length&&(e=t.length),e||(e=0),r>0&&r=this.length)throw new RangeError("sourceStart out of bounds");if(r<0)throw new RangeError("sourceEnd out of bounds");r>this.length&&(r=this.length),t.length-e=0;--i)t[i+e]=this[i+n];else if(o<1e3||!s.TYPED_ARRAY_SUPPORT)for(i=0;i>>=0,n=void 0===n?this.length:n>>>0,t||(t=0),"number"==typeof t)for(o=e;o55295&&n<57344){if(!i){if(n>56319){(e-=3)>-1&&o.push(239,191,189);continue}if(a+1===r){(e-=3)>-1&&o.push(239,191,189);continue}i=n;continue}if(n<56320){(e-=3)>-1&&o.push(239,191,189),i=n;continue}n=65536+(i-55296<<10|n-56320)}else i&&(e-=3)>-1&&o.push(239,191,189);if(i=null,n<128){if((e-=1)<0)break;o.push(n)}else if(n<2048){if((e-=2)<0)break;o.push(n>>6|192,63&n|128)}else if(n<65536){if((e-=3)<0)break;o.push(n>>12|224,n>>6&63|128,63&n|128)}else{if(!(n<1114112))throw new Error("Invalid code point");if((e-=4)<0)break;o.push(n>>18|240,n>>12&63|128,n>>6&63|128,63&n|128)}}return o}function U(t){return r.toByteArray(function(t){if((t=function(t){return t.trim?t.trim():t.replace(/^\s+|\s+$/g,"")}(t).replace(P,"")).length<2)return"";for(;t.length%4!=0;)t+="=";return t}(t))}function z(t,e,n,r){for(var i=0;i=e.length||i>=t.length);++i)e[i+n]=t[i];return i}}).call(this,n(25))},function(t,e,n){ +!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.mermaid=e():t.mermaid=e()}("undefined"!=typeof self?self:this,(function(){return function(t){var e={};function n(r){if(e[r])return e[r].exports;var i=e[r]={i:r,l:!1,exports:{}};return t[r].call(i.exports,i,i.exports,n),i.l=!0,i.exports}return n.m=t,n.c=e,n.d=function(t,e,r){n.o(t,e)||Object.defineProperty(t,e,{enumerable:!0,get:r})},n.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},n.t=function(t,e){if(1&e&&(t=n(t)),8&e)return t;if(4&e&&"object"==typeof t&&t&&t.__esModule)return t;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:t}),2&e&&"string"!=typeof t)for(var i in t)n.d(r,i,function(e){return t[e]}.bind(null,i));return r},n.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return n.d(e,"a",e),e},n.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},n.p="",n(n.s=390)}([function(t,e,n){"use strict";n.r(e),n.d(e,"version",(function(){return r})),n.d(e,"bisect",(function(){return u})),n.d(e,"bisectRight",(function(){return s})),n.d(e,"bisectLeft",(function(){return c})),n.d(e,"ascending",(function(){return i})),n.d(e,"bisector",(function(){return a})),n.d(e,"cross",(function(){return f})),n.d(e,"descending",(function(){return d})),n.d(e,"deviation",(function(){return g})),n.d(e,"extent",(function(){return m})),n.d(e,"histogram",(function(){return N})),n.d(e,"thresholdFreedmanDiaconis",(function(){return D})),n.d(e,"thresholdScott",(function(){return L})),n.d(e,"thresholdSturges",(function(){return O})),n.d(e,"max",(function(){return I})),n.d(e,"mean",(function(){return R})),n.d(e,"median",(function(){return F})),n.d(e,"merge",(function(){return P})),n.d(e,"min",(function(){return j})),n.d(e,"pairs",(function(){return l})),n.d(e,"permute",(function(){return Y})),n.d(e,"quantile",(function(){return B})),n.d(e,"range",(function(){return w})),n.d(e,"scan",(function(){return z})),n.d(e,"shuffle",(function(){return U})),n.d(e,"sum",(function(){return $})),n.d(e,"ticks",(function(){return S})),n.d(e,"tickIncrement",(function(){return A})),n.d(e,"tickStep",(function(){return M})),n.d(e,"transpose",(function(){return q})),n.d(e,"variance",(function(){return y})),n.d(e,"zip",(function(){return H})),n.d(e,"axisTop",(function(){return et})),n.d(e,"axisRight",(function(){return nt})),n.d(e,"axisBottom",(function(){return rt})),n.d(e,"axisLeft",(function(){return it})),n.d(e,"brush",(function(){return Ci})),n.d(e,"brushX",(function(){return Ei})),n.d(e,"brushY",(function(){return Ti})),n.d(e,"brushSelection",(function(){return wi})),n.d(e,"chord",(function(){return Ii})),n.d(e,"ribbon",(function(){return Xi})),n.d(e,"nest",(function(){return Ji})),n.d(e,"set",(function(){return sa})),n.d(e,"map",(function(){return Qi})),n.d(e,"keys",(function(){return ca})),n.d(e,"values",(function(){return ua})),n.d(e,"entries",(function(){return la})),n.d(e,"color",(function(){return qe})),n.d(e,"rgb",(function(){return Ge})),n.d(e,"hsl",(function(){return en})),n.d(e,"lab",(function(){return ya})),n.d(e,"hcl",(function(){return wa})),n.d(e,"lch",(function(){return ka})),n.d(e,"gray",(function(){return pa})),n.d(e,"cubehelix",(function(){return Na})),n.d(e,"contours",(function(){return za})),n.d(e,"contourDensity",(function(){return Va})),n.d(e,"dispatch",(function(){return ht})),n.d(e,"drag",(function(){return to})),n.d(e,"dragDisable",(function(){return Ce})),n.d(e,"dragEnable",(function(){return Se})),n.d(e,"dsvFormat",(function(){return so})),n.d(e,"csvParse",(function(){return uo})),n.d(e,"csvParseRows",(function(){return lo})),n.d(e,"csvFormat",(function(){return ho})),n.d(e,"csvFormatBody",(function(){return fo})),n.d(e,"csvFormatRows",(function(){return po})),n.d(e,"csvFormatRow",(function(){return yo})),n.d(e,"csvFormatValue",(function(){return go})),n.d(e,"tsvParse",(function(){return vo})),n.d(e,"tsvParseRows",(function(){return bo})),n.d(e,"tsvFormat",(function(){return xo})),n.d(e,"tsvFormatBody",(function(){return _o})),n.d(e,"tsvFormatRows",(function(){return ko})),n.d(e,"tsvFormatRow",(function(){return wo})),n.d(e,"tsvFormatValue",(function(){return Eo})),n.d(e,"autoType",(function(){return To})),n.d(e,"easeLinear",(function(){return So})),n.d(e,"easeQuad",(function(){return Oo})),n.d(e,"easeQuadIn",(function(){return Ao})),n.d(e,"easeQuadOut",(function(){return Mo})),n.d(e,"easeQuadInOut",(function(){return Oo})),n.d(e,"easeCubic",(function(){return Vr})),n.d(e,"easeCubicIn",(function(){return Wr})),n.d(e,"easeCubicOut",(function(){return Hr})),n.d(e,"easeCubicInOut",(function(){return Vr})),n.d(e,"easePoly",(function(){return Do})),n.d(e,"easePolyIn",(function(){return No})),n.d(e,"easePolyOut",(function(){return Bo})),n.d(e,"easePolyInOut",(function(){return Do})),n.d(e,"easeSin",(function(){return Po})),n.d(e,"easeSinIn",(function(){return Ro})),n.d(e,"easeSinOut",(function(){return Fo})),n.d(e,"easeSinInOut",(function(){return Po})),n.d(e,"easeExp",(function(){return Uo})),n.d(e,"easeExpIn",(function(){return Yo})),n.d(e,"easeExpOut",(function(){return zo})),n.d(e,"easeExpInOut",(function(){return Uo})),n.d(e,"easeCircle",(function(){return Wo})),n.d(e,"easeCircleIn",(function(){return $o})),n.d(e,"easeCircleOut",(function(){return qo})),n.d(e,"easeCircleInOut",(function(){return Wo})),n.d(e,"easeBounce",(function(){return Vo})),n.d(e,"easeBounceIn",(function(){return Ho})),n.d(e,"easeBounceOut",(function(){return Vo})),n.d(e,"easeBounceInOut",(function(){return Go})),n.d(e,"easeBack",(function(){return Ko})),n.d(e,"easeBackIn",(function(){return Xo})),n.d(e,"easeBackOut",(function(){return Zo})),n.d(e,"easeBackInOut",(function(){return Ko})),n.d(e,"easeElastic",(function(){return ts})),n.d(e,"easeElasticIn",(function(){return Jo})),n.d(e,"easeElasticOut",(function(){return ts})),n.d(e,"easeElasticInOut",(function(){return es})),n.d(e,"blob",(function(){return rs})),n.d(e,"buffer",(function(){return as})),n.d(e,"dsv",(function(){return us})),n.d(e,"csv",(function(){return ls})),n.d(e,"tsv",(function(){return hs})),n.d(e,"image",(function(){return fs})),n.d(e,"json",(function(){return ps})),n.d(e,"text",(function(){return ss})),n.d(e,"xml",(function(){return gs})),n.d(e,"html",(function(){return ms})),n.d(e,"svg",(function(){return vs})),n.d(e,"forceCenter",(function(){return bs})),n.d(e,"forceCollide",(function(){return Bs})),n.d(e,"forceLink",(function(){return Is})),n.d(e,"forceManyBody",(function(){return Ys})),n.d(e,"forceRadial",(function(){return zs})),n.d(e,"forceSimulation",(function(){return js})),n.d(e,"forceX",(function(){return Us})),n.d(e,"forceY",(function(){return $s})),n.d(e,"formatDefaultLocale",(function(){return ac})),n.d(e,"format",(function(){return Ks})),n.d(e,"formatPrefix",(function(){return Qs})),n.d(e,"formatLocale",(function(){return ic})),n.d(e,"formatSpecifier",(function(){return Vs})),n.d(e,"FormatSpecifier",(function(){return Gs})),n.d(e,"precisionFixed",(function(){return oc})),n.d(e,"precisionPrefix",(function(){return sc})),n.d(e,"precisionRound",(function(){return cc})),n.d(e,"geoArea",(function(){return Jc})),n.d(e,"geoBounds",(function(){return Wu})),n.d(e,"geoCentroid",(function(){return rl})),n.d(e,"geoCircle",(function(){return pl})),n.d(e,"geoClipAntimeridian",(function(){return Cl})),n.d(e,"geoClipCircle",(function(){return Sl})),n.d(e,"geoClipExtent",(function(){return Bl})),n.d(e,"geoClipRectangle",(function(){return Al})),n.d(e,"geoContains",(function(){return Zl})),n.d(e,"geoDistance",(function(){return zl})),n.d(e,"geoGraticule",(function(){return Jl})),n.d(e,"geoGraticule10",(function(){return th})),n.d(e,"geoInterpolate",(function(){return ah})),n.d(e,"geoLength",(function(){return Pl})),n.d(e,"geoPath",(function(){return rf})),n.d(e,"geoAlbers",(function(){return wf})),n.d(e,"geoAlbersUsa",(function(){return Ef})),n.d(e,"geoAzimuthalEqualArea",(function(){return Af})),n.d(e,"geoAzimuthalEqualAreaRaw",(function(){return Sf})),n.d(e,"geoAzimuthalEquidistant",(function(){return Of})),n.d(e,"geoAzimuthalEquidistantRaw",(function(){return Mf})),n.d(e,"geoConicConformal",(function(){return Rf})),n.d(e,"geoConicConformalRaw",(function(){return If})),n.d(e,"geoConicEqualArea",(function(){return kf})),n.d(e,"geoConicEqualAreaRaw",(function(){return _f})),n.d(e,"geoConicEquidistant",(function(){return Yf})),n.d(e,"geoConicEquidistantRaw",(function(){return jf})),n.d(e,"geoEqualEarth",(function(){return Vf})),n.d(e,"geoEqualEarthRaw",(function(){return Hf})),n.d(e,"geoEquirectangular",(function(){return Pf})),n.d(e,"geoEquirectangularRaw",(function(){return Ff})),n.d(e,"geoGnomonic",(function(){return Xf})),n.d(e,"geoGnomonicRaw",(function(){return Gf})),n.d(e,"geoIdentity",(function(){return Zf})),n.d(e,"geoProjection",(function(){return vf})),n.d(e,"geoProjectionMutator",(function(){return bf})),n.d(e,"geoMercator",(function(){return Bf})),n.d(e,"geoMercatorRaw",(function(){return Nf})),n.d(e,"geoNaturalEarth1",(function(){return Qf})),n.d(e,"geoNaturalEarth1Raw",(function(){return Kf})),n.d(e,"geoOrthographic",(function(){return td})),n.d(e,"geoOrthographicRaw",(function(){return Jf})),n.d(e,"geoStereographic",(function(){return nd})),n.d(e,"geoStereographicRaw",(function(){return ed})),n.d(e,"geoTransverseMercator",(function(){return id})),n.d(e,"geoTransverseMercatorRaw",(function(){return rd})),n.d(e,"geoRotation",(function(){return hl})),n.d(e,"geoStream",(function(){return Wc})),n.d(e,"geoTransform",(function(){return af})),n.d(e,"cluster",(function(){return cd})),n.d(e,"hierarchy",(function(){return ld})),n.d(e,"pack",(function(){return Id})),n.d(e,"packSiblings",(function(){return Md})),n.d(e,"packEnclose",(function(){return gd})),n.d(e,"partition",(function(){return zd})),n.d(e,"stratify",(function(){return Hd})),n.d(e,"tree",(function(){return Jd})),n.d(e,"treemap",(function(){return ip})),n.d(e,"treemapBinary",(function(){return ap})),n.d(e,"treemapDice",(function(){return Yd})),n.d(e,"treemapSlice",(function(){return tp})),n.d(e,"treemapSliceDice",(function(){return op})),n.d(e,"treemapSquarify",(function(){return rp})),n.d(e,"treemapResquarify",(function(){return sp})),n.d(e,"interpolate",(function(){return Mn})),n.d(e,"interpolateArray",(function(){return bn})),n.d(e,"interpolateBasis",(function(){return on})),n.d(e,"interpolateBasisClosed",(function(){return sn})),n.d(e,"interpolateDate",(function(){return _n})),n.d(e,"interpolateDiscrete",(function(){return cp})),n.d(e,"interpolateHue",(function(){return up})),n.d(e,"interpolateNumber",(function(){return kn})),n.d(e,"interpolateNumberArray",(function(){return mn})),n.d(e,"interpolateObject",(function(){return wn})),n.d(e,"interpolateRound",(function(){return lp})),n.d(e,"interpolateString",(function(){return An})),n.d(e,"interpolateTransformCss",(function(){return fr})),n.d(e,"interpolateTransformSvg",(function(){return dr})),n.d(e,"interpolateZoom",(function(){return dp})),n.d(e,"interpolateRgb",(function(){return dn})),n.d(e,"interpolateRgbBasis",(function(){return yn})),n.d(e,"interpolateRgbBasisClosed",(function(){return gn})),n.d(e,"interpolateHsl",(function(){return yp})),n.d(e,"interpolateHslLong",(function(){return gp})),n.d(e,"interpolateLab",(function(){return mp})),n.d(e,"interpolateHcl",(function(){return bp})),n.d(e,"interpolateHclLong",(function(){return xp})),n.d(e,"interpolateCubehelix",(function(){return kp})),n.d(e,"interpolateCubehelixLong",(function(){return wp})),n.d(e,"piecewise",(function(){return Ep})),n.d(e,"quantize",(function(){return Tp})),n.d(e,"path",(function(){return $i})),n.d(e,"polygonArea",(function(){return Cp})),n.d(e,"polygonCentroid",(function(){return Sp})),n.d(e,"polygonHull",(function(){return Op})),n.d(e,"polygonContains",(function(){return Np})),n.d(e,"polygonLength",(function(){return Bp})),n.d(e,"quadtree",(function(){return Cs})),n.d(e,"randomUniform",(function(){return Lp})),n.d(e,"randomNormal",(function(){return Ip})),n.d(e,"randomLogNormal",(function(){return Rp})),n.d(e,"randomBates",(function(){return Pp})),n.d(e,"randomIrwinHall",(function(){return Fp})),n.d(e,"randomExponential",(function(){return jp})),n.d(e,"scaleBand",(function(){return Vp})),n.d(e,"scalePoint",(function(){return Xp})),n.d(e,"scaleIdentity",(function(){return uy})),n.d(e,"scaleLinear",(function(){return cy})),n.d(e,"scaleLog",(function(){return vy})),n.d(e,"scaleSymlog",(function(){return ky})),n.d(e,"scaleOrdinal",(function(){return Hp})),n.d(e,"scaleImplicit",(function(){return Wp})),n.d(e,"scalePow",(function(){return Sy})),n.d(e,"scaleSqrt",(function(){return Ay})),n.d(e,"scaleQuantile",(function(){return My})),n.d(e,"scaleQuantize",(function(){return Oy})),n.d(e,"scaleThreshold",(function(){return Ny})),n.d(e,"scaleTime",(function(){return pv})),n.d(e,"scaleUtc",(function(){return Ev})),n.d(e,"scaleSequential",(function(){return Sv})),n.d(e,"scaleSequentialLog",(function(){return Av})),n.d(e,"scaleSequentialPow",(function(){return Ov})),n.d(e,"scaleSequentialSqrt",(function(){return Nv})),n.d(e,"scaleSequentialSymlog",(function(){return Mv})),n.d(e,"scaleSequentialQuantile",(function(){return Bv})),n.d(e,"scaleDiverging",(function(){return Lv})),n.d(e,"scaleDivergingLog",(function(){return Iv})),n.d(e,"scaleDivergingPow",(function(){return Fv})),n.d(e,"scaleDivergingSqrt",(function(){return Pv})),n.d(e,"scaleDivergingSymlog",(function(){return Rv})),n.d(e,"tickFormat",(function(){return oy})),n.d(e,"schemeCategory10",(function(){return Yv})),n.d(e,"schemeAccent",(function(){return zv})),n.d(e,"schemeDark2",(function(){return Uv})),n.d(e,"schemePaired",(function(){return $v})),n.d(e,"schemePastel1",(function(){return qv})),n.d(e,"schemePastel2",(function(){return Wv})),n.d(e,"schemeSet1",(function(){return Hv})),n.d(e,"schemeSet2",(function(){return Vv})),n.d(e,"schemeSet3",(function(){return Gv})),n.d(e,"schemeTableau10",(function(){return Xv})),n.d(e,"interpolateBrBG",(function(){return Qv})),n.d(e,"schemeBrBG",(function(){return Kv})),n.d(e,"interpolatePRGn",(function(){return tb})),n.d(e,"schemePRGn",(function(){return Jv})),n.d(e,"interpolatePiYG",(function(){return nb})),n.d(e,"schemePiYG",(function(){return eb})),n.d(e,"interpolatePuOr",(function(){return ib})),n.d(e,"schemePuOr",(function(){return rb})),n.d(e,"interpolateRdBu",(function(){return ob})),n.d(e,"schemeRdBu",(function(){return ab})),n.d(e,"interpolateRdGy",(function(){return cb})),n.d(e,"schemeRdGy",(function(){return sb})),n.d(e,"interpolateRdYlBu",(function(){return lb})),n.d(e,"schemeRdYlBu",(function(){return ub})),n.d(e,"interpolateRdYlGn",(function(){return fb})),n.d(e,"schemeRdYlGn",(function(){return hb})),n.d(e,"interpolateSpectral",(function(){return pb})),n.d(e,"schemeSpectral",(function(){return db})),n.d(e,"interpolateBuGn",(function(){return gb})),n.d(e,"schemeBuGn",(function(){return yb})),n.d(e,"interpolateBuPu",(function(){return vb})),n.d(e,"schemeBuPu",(function(){return mb})),n.d(e,"interpolateGnBu",(function(){return xb})),n.d(e,"schemeGnBu",(function(){return bb})),n.d(e,"interpolateOrRd",(function(){return kb})),n.d(e,"schemeOrRd",(function(){return _b})),n.d(e,"interpolatePuBuGn",(function(){return Eb})),n.d(e,"schemePuBuGn",(function(){return wb})),n.d(e,"interpolatePuBu",(function(){return Cb})),n.d(e,"schemePuBu",(function(){return Tb})),n.d(e,"interpolatePuRd",(function(){return Ab})),n.d(e,"schemePuRd",(function(){return Sb})),n.d(e,"interpolateRdPu",(function(){return Ob})),n.d(e,"schemeRdPu",(function(){return Mb})),n.d(e,"interpolateYlGnBu",(function(){return Bb})),n.d(e,"schemeYlGnBu",(function(){return Nb})),n.d(e,"interpolateYlGn",(function(){return Lb})),n.d(e,"schemeYlGn",(function(){return Db})),n.d(e,"interpolateYlOrBr",(function(){return Rb})),n.d(e,"schemeYlOrBr",(function(){return Ib})),n.d(e,"interpolateYlOrRd",(function(){return Pb})),n.d(e,"schemeYlOrRd",(function(){return Fb})),n.d(e,"interpolateBlues",(function(){return Yb})),n.d(e,"schemeBlues",(function(){return jb})),n.d(e,"interpolateGreens",(function(){return Ub})),n.d(e,"schemeGreens",(function(){return zb})),n.d(e,"interpolateGreys",(function(){return qb})),n.d(e,"schemeGreys",(function(){return $b})),n.d(e,"interpolatePurples",(function(){return Hb})),n.d(e,"schemePurples",(function(){return Wb})),n.d(e,"interpolateReds",(function(){return Gb})),n.d(e,"schemeReds",(function(){return Vb})),n.d(e,"interpolateOranges",(function(){return Zb})),n.d(e,"schemeOranges",(function(){return Xb})),n.d(e,"interpolateCividis",(function(){return Kb})),n.d(e,"interpolateCubehelixDefault",(function(){return Qb})),n.d(e,"interpolateRainbow",(function(){return nx})),n.d(e,"interpolateWarm",(function(){return Jb})),n.d(e,"interpolateCool",(function(){return tx})),n.d(e,"interpolateSinebow",(function(){return ox})),n.d(e,"interpolateTurbo",(function(){return sx})),n.d(e,"interpolateViridis",(function(){return ux})),n.d(e,"interpolateMagma",(function(){return lx})),n.d(e,"interpolateInferno",(function(){return hx})),n.d(e,"interpolatePlasma",(function(){return fx})),n.d(e,"create",(function(){return dx})),n.d(e,"creator",(function(){return re})),n.d(e,"local",(function(){return yx})),n.d(e,"matcher",(function(){return gt})),n.d(e,"mouse",(function(){return Dn})),n.d(e,"namespace",(function(){return Et})),n.d(e,"namespaces",(function(){return wt})),n.d(e,"clientPoint",(function(){return Nn})),n.d(e,"select",(function(){return we})),n.d(e,"selectAll",(function(){return mx})),n.d(e,"selection",(function(){return ke})),n.d(e,"selector",(function(){return dt})),n.d(e,"selectorAll",(function(){return yt})),n.d(e,"style",(function(){return It})),n.d(e,"touch",(function(){return Bn})),n.d(e,"touches",(function(){return vx})),n.d(e,"window",(function(){return Nt})),n.d(e,"event",(function(){return ue})),n.d(e,"customEvent",(function(){return ye})),n.d(e,"arc",(function(){return jx})),n.d(e,"area",(function(){return Wx})),n.d(e,"line",(function(){return qx})),n.d(e,"pie",(function(){return Gx})),n.d(e,"areaRadial",(function(){return t_})),n.d(e,"radialArea",(function(){return t_})),n.d(e,"lineRadial",(function(){return Jx})),n.d(e,"radialLine",(function(){return Jx})),n.d(e,"pointRadial",(function(){return e_})),n.d(e,"linkHorizontal",(function(){return u_})),n.d(e,"linkVertical",(function(){return l_})),n.d(e,"linkRadial",(function(){return h_})),n.d(e,"symbol",(function(){return M_})),n.d(e,"symbols",(function(){return A_})),n.d(e,"symbolCircle",(function(){return f_})),n.d(e,"symbolCross",(function(){return d_})),n.d(e,"symbolDiamond",(function(){return g_})),n.d(e,"symbolSquare",(function(){return __})),n.d(e,"symbolStar",(function(){return x_})),n.d(e,"symbolTriangle",(function(){return w_})),n.d(e,"symbolWye",(function(){return S_})),n.d(e,"curveBasisClosed",(function(){return I_})),n.d(e,"curveBasisOpen",(function(){return F_})),n.d(e,"curveBasis",(function(){return D_})),n.d(e,"curveBundle",(function(){return j_})),n.d(e,"curveCardinalClosed",(function(){return q_})),n.d(e,"curveCardinalOpen",(function(){return H_})),n.d(e,"curveCardinal",(function(){return U_})),n.d(e,"curveCatmullRomClosed",(function(){return K_})),n.d(e,"curveCatmullRomOpen",(function(){return J_})),n.d(e,"curveCatmullRom",(function(){return X_})),n.d(e,"curveLinearClosed",(function(){return ek})),n.d(e,"curveLinear",(function(){return zx})),n.d(e,"curveMonotoneX",(function(){return uk})),n.d(e,"curveMonotoneY",(function(){return lk})),n.d(e,"curveNatural",(function(){return dk})),n.d(e,"curveStep",(function(){return yk})),n.d(e,"curveStepAfter",(function(){return mk})),n.d(e,"curveStepBefore",(function(){return gk})),n.d(e,"stack",(function(){return _k})),n.d(e,"stackOffsetExpand",(function(){return kk})),n.d(e,"stackOffsetDiverging",(function(){return wk})),n.d(e,"stackOffsetNone",(function(){return vk})),n.d(e,"stackOffsetSilhouette",(function(){return Ek})),n.d(e,"stackOffsetWiggle",(function(){return Tk})),n.d(e,"stackOrderAppearance",(function(){return Ck})),n.d(e,"stackOrderAscending",(function(){return Ak})),n.d(e,"stackOrderDescending",(function(){return Ok})),n.d(e,"stackOrderInsideOut",(function(){return Nk})),n.d(e,"stackOrderNone",(function(){return bk})),n.d(e,"stackOrderReverse",(function(){return Bk})),n.d(e,"timeInterval",(function(){return Ly})),n.d(e,"timeMillisecond",(function(){return yg})),n.d(e,"timeMilliseconds",(function(){return gg})),n.d(e,"utcMillisecond",(function(){return yg})),n.d(e,"utcMilliseconds",(function(){return gg})),n.d(e,"timeSecond",(function(){return fg})),n.d(e,"timeSeconds",(function(){return dg})),n.d(e,"utcSecond",(function(){return fg})),n.d(e,"utcSeconds",(function(){return dg})),n.d(e,"timeMinute",(function(){return ug})),n.d(e,"timeMinutes",(function(){return lg})),n.d(e,"timeHour",(function(){return og})),n.d(e,"timeHours",(function(){return sg})),n.d(e,"timeDay",(function(){return rg})),n.d(e,"timeDays",(function(){return ig})),n.d(e,"timeWeek",(function(){return Uy})),n.d(e,"timeWeeks",(function(){return Xy})),n.d(e,"timeSunday",(function(){return Uy})),n.d(e,"timeSundays",(function(){return Xy})),n.d(e,"timeMonday",(function(){return $y})),n.d(e,"timeMondays",(function(){return Zy})),n.d(e,"timeTuesday",(function(){return qy})),n.d(e,"timeTuesdays",(function(){return Ky})),n.d(e,"timeWednesday",(function(){return Wy})),n.d(e,"timeWednesdays",(function(){return Qy})),n.d(e,"timeThursday",(function(){return Hy})),n.d(e,"timeThursdays",(function(){return Jy})),n.d(e,"timeFriday",(function(){return Vy})),n.d(e,"timeFridays",(function(){return tg})),n.d(e,"timeSaturday",(function(){return Gy})),n.d(e,"timeSaturdays",(function(){return eg})),n.d(e,"timeMonth",(function(){return jy})),n.d(e,"timeMonths",(function(){return Yy})),n.d(e,"timeYear",(function(){return Ry})),n.d(e,"timeYears",(function(){return Fy})),n.d(e,"utcMinute",(function(){return kv})),n.d(e,"utcMinutes",(function(){return wv})),n.d(e,"utcHour",(function(){return bv})),n.d(e,"utcHours",(function(){return xv})),n.d(e,"utcDay",(function(){return Dg})),n.d(e,"utcDays",(function(){return Lg})),n.d(e,"utcWeek",(function(){return vg})),n.d(e,"utcWeeks",(function(){return Tg})),n.d(e,"utcSunday",(function(){return vg})),n.d(e,"utcSundays",(function(){return Tg})),n.d(e,"utcMonday",(function(){return bg})),n.d(e,"utcMondays",(function(){return Cg})),n.d(e,"utcTuesday",(function(){return xg})),n.d(e,"utcTuesdays",(function(){return Sg})),n.d(e,"utcWednesday",(function(){return _g})),n.d(e,"utcWednesdays",(function(){return Ag})),n.d(e,"utcThursday",(function(){return kg})),n.d(e,"utcThursdays",(function(){return Mg})),n.d(e,"utcFriday",(function(){return wg})),n.d(e,"utcFridays",(function(){return Og})),n.d(e,"utcSaturday",(function(){return Eg})),n.d(e,"utcSaturdays",(function(){return Ng})),n.d(e,"utcMonth",(function(){return gv})),n.d(e,"utcMonths",(function(){return mv})),n.d(e,"utcYear",(function(){return Rg})),n.d(e,"utcYears",(function(){return Fg})),n.d(e,"timeFormatDefaultLocale",(function(){return lv})),n.d(e,"timeFormat",(function(){return $g})),n.d(e,"timeParse",(function(){return qg})),n.d(e,"utcFormat",(function(){return Wg})),n.d(e,"utcParse",(function(){return Hg})),n.d(e,"timeFormatLocale",(function(){return zg})),n.d(e,"isoFormat",(function(){return Dk})),n.d(e,"isoParse",(function(){return Lk})),n.d(e,"now",(function(){return Un})),n.d(e,"timer",(function(){return Wn})),n.d(e,"timerFlush",(function(){return Hn})),n.d(e,"timeout",(function(){return Zn})),n.d(e,"interval",(function(){return Ik})),n.d(e,"transition",(function(){return Ur})),n.d(e,"active",(function(){return Kr})),n.d(e,"interrupt",(function(){return sr})),n.d(e,"voronoi",(function(){return xw})),n.d(e,"zoom",(function(){return Lw})),n.d(e,"zoomTransform",(function(){return Tw})),n.d(e,"zoomIdentity",(function(){return Ew}));var r="5.16.0",i=function(t,e){return te?1:t>=e?0:NaN},a=function(t){var e;return 1===t.length&&(e=t,t=function(t,n){return i(e(t),n)}),{left:function(e,n,r,i){for(null==r&&(r=0),null==i&&(i=e.length);r>>1;t(e[a],n)<0?r=a+1:i=a}return r},right:function(e,n,r,i){for(null==r&&(r=0),null==i&&(i=e.length);r>>1;t(e[a],n)>0?i=a:r=a+1}return r}}};var o=a(i),s=o.right,c=o.left,u=s,l=function(t,e){null==e&&(e=h);for(var n=0,r=t.length-1,i=t[0],a=new Array(r<0?0:r);nt?1:e>=t?0:NaN},p=function(t){return null===t?NaN:+t},y=function(t,e){var n,r,i=t.length,a=0,o=-1,s=0,c=0;if(null==e)for(;++o1)return c/(a-1)},g=function(t,e){var n=y(t,e);return n?Math.sqrt(n):n},m=function(t,e){var n,r,i,a=t.length,o=-1;if(null==e){for(;++o=n)for(r=i=n;++on&&(r=n),i=n)for(r=i=n;++on&&(r=n),i0)return[t];if((r=e0)for(t=Math.ceil(t/o),e=Math.floor(e/o),a=new Array(i=Math.ceil(e-t+1));++s=0?(a>=E?10:a>=T?5:a>=C?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(a>=E?10:a>=T?5:a>=C?2:1)}function M(t,e,n){var r=Math.abs(e-t)/Math.max(0,n),i=Math.pow(10,Math.floor(Math.log(r)/Math.LN10)),a=r/i;return a>=E?i*=10:a>=T?i*=5:a>=C&&(i*=2),eh;)f.pop(),--d;var p,y=new Array(d+1);for(i=0;i<=d;++i)(p=y[i]=[]).x0=i>0?f[i-1]:l,p.x1=i=1)return+n(t[r-1],r-1,t);var r,i=(r-1)*e,a=Math.floor(i),o=+n(t[a],a,t);return o+(+n(t[a+1],a+1,t)-o)*(i-a)}},D=function(t,e,n){return t=x.call(t,p).sort(i),Math.ceil((n-e)/(2*(B(t,.75)-B(t,.25))*Math.pow(t.length,-1/3)))},L=function(t,e,n){return Math.ceil((n-e)/(3.5*g(t)*Math.pow(t.length,-1/3)))},I=function(t,e){var n,r,i=t.length,a=-1;if(null==e){for(;++a=n)for(r=n;++ar&&(r=n)}else for(;++a=n)for(r=n;++ar&&(r=n);return r},R=function(t,e){var n,r=t.length,i=r,a=-1,o=0;if(null==e)for(;++a=0;)for(e=(r=t[i]).length;--e>=0;)n[--o]=r[e];return n},j=function(t,e){var n,r,i=t.length,a=-1;if(null==e){for(;++a=n)for(r=n;++an&&(r=n)}else for(;++a=n)for(r=n;++an&&(r=n);return r},Y=function(t,e){for(var n=e.length,r=new Array(n);n--;)r[n]=t[e[n]];return r},z=function(t,e){if(n=t.length){var n,r,a=0,o=0,s=t[o];for(null==e&&(e=i);++a=0&&(n=t.slice(r+1),t=t.slice(0,r)),t&&!e.hasOwnProperty(t))throw new Error("unknown type: "+t);return{type:t,name:n}}))}function ut(t,e){for(var n,r=0,i=t.length;r0)for(var n,r,i=new Array(n),a=0;ae?1:t>=e?0:NaN}var kt="http://www.w3.org/1999/xhtml",wt={svg:"http://www.w3.org/2000/svg",xhtml:kt,xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"},Et=function(t){var e=t+="",n=e.indexOf(":");return n>=0&&"xmlns"!==(e=t.slice(0,n))&&(t=t.slice(n+1)),wt.hasOwnProperty(e)?{space:wt[e],local:t}:t};function Tt(t){return function(){this.removeAttribute(t)}}function Ct(t){return function(){this.removeAttributeNS(t.space,t.local)}}function St(t,e){return function(){this.setAttribute(t,e)}}function At(t,e){return function(){this.setAttributeNS(t.space,t.local,e)}}function Mt(t,e){return function(){var n=e.apply(this,arguments);null==n?this.removeAttribute(t):this.setAttribute(t,n)}}function Ot(t,e){return function(){var n=e.apply(this,arguments);null==n?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,n)}}var Nt=function(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView};function Bt(t){return function(){this.style.removeProperty(t)}}function Dt(t,e,n){return function(){this.style.setProperty(t,e,n)}}function Lt(t,e,n){return function(){var r=e.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,n)}}function It(t,e){return t.style.getPropertyValue(e)||Nt(t).getComputedStyle(t,null).getPropertyValue(e)}function Rt(t){return function(){delete this[t]}}function Ft(t,e){return function(){this[t]=e}}function Pt(t,e){return function(){var n=e.apply(this,arguments);null==n?delete this[t]:this[t]=n}}function jt(t){return t.trim().split(/^|\s+/)}function Yt(t){return t.classList||new zt(t)}function zt(t){this._node=t,this._names=jt(t.getAttribute("class")||"")}function Ut(t,e){for(var n=Yt(t),r=-1,i=e.length;++r=0&&(this._names.splice(e,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};function Vt(){this.textContent=""}function Gt(t){return function(){this.textContent=t}}function Xt(t){return function(){var e=t.apply(this,arguments);this.textContent=null==e?"":e}}function Zt(){this.innerHTML=""}function Kt(t){return function(){this.innerHTML=t}}function Qt(t){return function(){var e=t.apply(this,arguments);this.innerHTML=null==e?"":e}}function Jt(){this.nextSibling&&this.parentNode.appendChild(this)}function te(){this.previousSibling&&this.parentNode.insertBefore(this,this.parentNode.firstChild)}function ee(t){return function(){var e=this.ownerDocument,n=this.namespaceURI;return n===kt&&e.documentElement.namespaceURI===kt?e.createElement(t):e.createElementNS(n,t)}}function ne(t){return function(){return this.ownerDocument.createElementNS(t.space,t.local)}}var re=function(t){var e=Et(t);return(e.local?ne:ee)(e)};function ie(){return null}function ae(){var t=this.parentNode;t&&t.removeChild(this)}function oe(){var t=this.cloneNode(!1),e=this.parentNode;return e?e.insertBefore(t,this.nextSibling):t}function se(){var t=this.cloneNode(!0),e=this.parentNode;return e?e.insertBefore(t,this.nextSibling):t}var ce={},ue=null;"undefined"!=typeof document&&("onmouseenter"in document.documentElement||(ce={mouseenter:"mouseover",mouseleave:"mouseout"}));function le(t,e,n){return t=he(t,e,n),function(e){var n=e.relatedTarget;n&&(n===this||8&n.compareDocumentPosition(this))||t.call(this,e)}}function he(t,e,n){return function(r){var i=ue;ue=r;try{t.call(this,this.__data__,e,n)}finally{ue=i}}}function fe(t){return t.trim().split(/^|\s+/).map((function(t){var e="",n=t.indexOf(".");return n>=0&&(e=t.slice(n+1),t=t.slice(0,n)),{type:t,name:e}}))}function de(t){return function(){var e=this.__on;if(e){for(var n,r=0,i=-1,a=e.length;r=_&&(_=x+1);!(b=m[_])&&++_=0;)(r=i[a])&&(o&&4^r.compareDocumentPosition(o)&&o.parentNode.insertBefore(r,o),o=r);return this},sort:function(t){function e(e,n){return e&&n?t(e.__data__,n.__data__):!e-!n}t||(t=_t);for(var n=this._groups,r=n.length,i=new Array(r),a=0;a1?this.each((null==e?Bt:"function"==typeof e?Lt:Dt)(t,e,null==n?"":n)):It(this.node(),t)},property:function(t,e){return arguments.length>1?this.each((null==e?Rt:"function"==typeof e?Pt:Ft)(t,e)):this.node()[t]},classed:function(t,e){var n=jt(t+"");if(arguments.length<2){for(var r=Yt(this.node()),i=-1,a=n.length;++i>8&15|e>>4&240,e>>4&15|240&e,(15&e)<<4|15&e,1):8===n?He(e>>24&255,e>>16&255,e>>8&255,(255&e)/255):4===n?He(e>>12&15|e>>8&240,e>>8&15|e>>4&240,e>>4&15|240&e,((15&e)<<4|15&e)/255):null):(e=Ie.exec(t))?new Xe(e[1],e[2],e[3],1):(e=Re.exec(t))?new Xe(255*e[1]/100,255*e[2]/100,255*e[3]/100,1):(e=Fe.exec(t))?He(e[1],e[2],e[3],e[4]):(e=Pe.exec(t))?He(255*e[1]/100,255*e[2]/100,255*e[3]/100,e[4]):(e=je.exec(t))?Je(e[1],e[2]/100,e[3]/100,1):(e=Ye.exec(t))?Je(e[1],e[2]/100,e[3]/100,e[4]):ze.hasOwnProperty(t)?We(ze[t]):"transparent"===t?new Xe(NaN,NaN,NaN,0):null}function We(t){return new Xe(t>>16&255,t>>8&255,255&t,1)}function He(t,e,n,r){return r<=0&&(t=e=n=NaN),new Xe(t,e,n,r)}function Ve(t){return t instanceof Oe||(t=qe(t)),t?new Xe((t=t.rgb()).r,t.g,t.b,t.opacity):new Xe}function Ge(t,e,n,r){return 1===arguments.length?Ve(t):new Xe(t,e,n,null==r?1:r)}function Xe(t,e,n,r){this.r=+t,this.g=+e,this.b=+n,this.opacity=+r}function Ze(){return"#"+Qe(this.r)+Qe(this.g)+Qe(this.b)}function Ke(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(1===t?")":", "+t+")")}function Qe(t){return((t=Math.max(0,Math.min(255,Math.round(t)||0)))<16?"0":"")+t.toString(16)}function Je(t,e,n,r){return r<=0?t=e=n=NaN:n<=0||n>=1?t=e=NaN:e<=0&&(t=NaN),new nn(t,e,n,r)}function tn(t){if(t instanceof nn)return new nn(t.h,t.s,t.l,t.opacity);if(t instanceof Oe||(t=qe(t)),!t)return new nn;if(t instanceof nn)return t;var e=(t=t.rgb()).r/255,n=t.g/255,r=t.b/255,i=Math.min(e,n,r),a=Math.max(e,n,r),o=NaN,s=a-i,c=(a+i)/2;return s?(o=e===a?(n-r)/s+6*(n0&&c<1?0:o,new nn(o,s,c,t.opacity)}function en(t,e,n,r){return 1===arguments.length?tn(t):new nn(t,e,n,null==r?1:r)}function nn(t,e,n,r){this.h=+t,this.s=+e,this.l=+n,this.opacity=+r}function rn(t,e,n){return 255*(t<60?e+(n-e)*t/60:t<180?n:t<240?e+(n-e)*(240-t)/60:e)}function an(t,e,n,r,i){var a=t*t,o=a*t;return((1-3*t+3*a-o)*e+(4-6*a+3*o)*n+(1+3*t+3*a-3*o)*r+o*i)/6}Ae(Oe,qe,{copy:function(t){return Object.assign(new this.constructor,this,t)},displayable:function(){return this.rgb().displayable()},hex:Ue,formatHex:Ue,formatHsl:function(){return tn(this).formatHsl()},formatRgb:$e,toString:$e}),Ae(Xe,Ge,Me(Oe,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new Xe(this.r*t,this.g*t,this.b*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new Xe(this.r*t,this.g*t,this.b*t,this.opacity)},rgb:function(){return this},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:Ze,formatHex:Ze,formatRgb:Ke,toString:Ke})),Ae(nn,en,Me(Oe,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new nn(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new nn(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=this.h%360+360*(this.h<0),e=isNaN(t)||isNaN(this.s)?0:this.s,n=this.l,r=n+(n<.5?n:1-n)*e,i=2*n-r;return new Xe(rn(t>=240?t-240:t+120,i,r),rn(t,i,r),rn(t<120?t+240:t-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"hsl(":"hsla(")+(this.h||0)+", "+100*(this.s||0)+"%, "+100*(this.l||0)+"%"+(1===t?")":", "+t+")")}}));var on=function(t){var e=t.length-1;return function(n){var r=n<=0?n=0:n>=1?(n=1,e-1):Math.floor(n*e),i=t[r],a=t[r+1],o=r>0?t[r-1]:2*i-a,s=r180||n<-180?n-360*Math.round(n/360):n):cn(isNaN(t)?e:t)}function hn(t){return 1==(t=+t)?fn:function(e,n){return n-e?function(t,e,n){return t=Math.pow(t,n),e=Math.pow(e,n)-t,n=1/n,function(r){return Math.pow(t+r*e,n)}}(e,n,t):cn(isNaN(e)?n:e)}}function fn(t,e){var n=e-t;return n?un(t,n):cn(isNaN(t)?e:t)}var dn=function t(e){var n=hn(e);function r(t,e){var r=n((t=Ge(t)).r,(e=Ge(e)).r),i=n(t.g,e.g),a=n(t.b,e.b),o=fn(t.opacity,e.opacity);return function(e){return t.r=r(e),t.g=i(e),t.b=a(e),t.opacity=o(e),t+""}}return r.gamma=t,r}(1);function pn(t){return function(e){var n,r,i=e.length,a=new Array(i),o=new Array(i),s=new Array(i);for(n=0;na&&(i=e.slice(a,i),s[o]?s[o]+=i:s[++o]=i),(n=n[0])===(r=r[0])?s[o]?s[o]+=r:s[++o]=r:(s[++o]=null,c.push({i:o,x:kn(n,r)})),a=Tn.lastIndex;return a=0&&e._call.call(null,t),e=e._next;--Ln}function Vn(){Pn=(Fn=Yn.now())+jn,Ln=In=0;try{Hn()}finally{Ln=0,function(){var t,e,n=Cn,r=1/0;for(;n;)n._call?(r>n._time&&(r=n._time),t=n,n=n._next):(e=n._next,n._next=null,n=t?t._next=e:Cn=e);Sn=t,Xn(r)}(),Pn=0}}function Gn(){var t=Yn.now(),e=t-Fn;e>1e3&&(jn-=e,Fn=t)}function Xn(t){Ln||(In&&(In=clearTimeout(In)),t-Pn>24?(t<1/0&&(In=setTimeout(Vn,t-Yn.now()-jn)),Rn&&(Rn=clearInterval(Rn))):(Rn||(Fn=Yn.now(),Rn=setInterval(Gn,1e3)),Ln=1,zn(Vn)))}qn.prototype=Wn.prototype={constructor:qn,restart:function(t,e,n){if("function"!=typeof t)throw new TypeError("callback is not a function");n=(null==n?Un():+n)+(null==e?0:+e),this._next||Sn===this||(Sn?Sn._next=this:Cn=this,Sn=this),this._call=t,this._time=n,Xn()},stop:function(){this._call&&(this._call=null,this._time=1/0,Xn())}};var Zn=function(t,e,n){var r=new qn;return e=null==e?0:+e,r.restart((function(n){r.stop(),t(n+e)}),e,n),r},Kn=ht("start","end","cancel","interrupt"),Qn=[],Jn=function(t,e,n,r,i,a){var o=t.__transition;if(o){if(n in o)return}else t.__transition={};!function(t,e,n){var r,i=t.__transition;function a(c){var u,l,h,f;if(1!==n.state)return s();for(u in i)if((f=i[u]).name===n.name){if(3===f.state)return Zn(a);4===f.state?(f.state=6,f.timer.stop(),f.on.call("interrupt",t,t.__data__,f.index,f.group),delete i[u]):+u0)throw new Error("too late; already scheduled");return n}function er(t,e){var n=nr(t,e);if(n.state>3)throw new Error("too late; already running");return n}function nr(t,e){var n=t.__transition;if(!n||!(n=n[e]))throw new Error("transition not found");return n}var rr,ir,ar,or,sr=function(t,e){var n,r,i,a=t.__transition,o=!0;if(a){for(i in e=null==e?null:e+"",a)(n=a[i]).name===e?(r=n.state>2&&n.state<5,n.state=6,n.timer.stop(),n.on.call(r?"interrupt":"cancel",t,t.__data__,n.index,n.group),delete a[i]):o=!1;o&&delete t.__transition}},cr=180/Math.PI,ur={translateX:0,translateY:0,rotate:0,skewX:0,scaleX:1,scaleY:1},lr=function(t,e,n,r,i,a){var o,s,c;return(o=Math.sqrt(t*t+e*e))&&(t/=o,e/=o),(c=t*n+e*r)&&(n-=t*c,r-=e*c),(s=Math.sqrt(n*n+r*r))&&(n/=s,r/=s,c/=s),t*r180?e+=360:e-t>180&&(t+=360),a.push({i:n.push(i(n)+"rotate(",null,r)-2,x:kn(t,e)})):e&&n.push(i(n)+"rotate("+e+r)}(a.rotate,o.rotate,s,c),function(t,e,n,a){t!==e?a.push({i:n.push(i(n)+"skewX(",null,r)-2,x:kn(t,e)}):e&&n.push(i(n)+"skewX("+e+r)}(a.skewX,o.skewX,s,c),function(t,e,n,r,a,o){if(t!==n||e!==r){var s=a.push(i(a)+"scale(",null,",",null,")");o.push({i:s-4,x:kn(t,n)},{i:s-2,x:kn(e,r)})}else 1===n&&1===r||a.push(i(a)+"scale("+n+","+r+")")}(a.scaleX,a.scaleY,o.scaleX,o.scaleY,s,c),a=o=null,function(t){for(var e,n=-1,r=c.length;++n=0&&(t=t.slice(0,e)),!t||"start"===t}))}(e)?tr:er;return function(){var o=a(this,t),s=o.on;s!==r&&(i=(r=s).copy()).on(e,n),o.on=i}}var Lr=ke.prototype.constructor;function Ir(t){return function(){this.style.removeProperty(t)}}function Rr(t,e,n){return function(r){this.style.setProperty(t,e.call(this,r),n)}}function Fr(t,e,n){var r,i;function a(){var a=e.apply(this,arguments);return a!==i&&(r=(i=a)&&Rr(t,a,n)),r}return a._value=e,a}function Pr(t){return function(e){this.textContent=t.call(this,e)}}function jr(t){var e,n;function r(){var r=t.apply(this,arguments);return r!==n&&(e=(n=r)&&Pr(r)),e}return r._value=t,r}var Yr=0;function zr(t,e,n,r){this._groups=t,this._parents=e,this._name=n,this._id=r}function Ur(t){return ke().transition(t)}function $r(){return++Yr}var qr=ke.prototype;function Wr(t){return t*t*t}function Hr(t){return--t*t*t+1}function Vr(t){return((t*=2)<=1?t*t*t:(t-=2)*t*t+2)/2}zr.prototype=Ur.prototype={constructor:zr,select:function(t){var e=this._name,n=this._id;"function"!=typeof t&&(t=dt(t));for(var r=this._groups,i=r.length,a=new Array(i),o=0;o1&&n.name===e)return new zr([[t]],Zr,e,+r);return null},Qr=function(t){return function(){return t}},Jr=function(t,e,n){this.target=t,this.type=e,this.selection=n};function ti(){ue.stopImmediatePropagation()}var ei=function(){ue.preventDefault(),ue.stopImmediatePropagation()},ni={name:"drag"},ri={name:"space"},ii={name:"handle"},ai={name:"center"};function oi(t){return[+t[0],+t[1]]}function si(t){return[oi(t[0]),oi(t[1])]}function ci(t){return function(e){return Bn(e,ue.touches,t)}}var ui={name:"x",handles:["w","e"].map(mi),input:function(t,e){return null==t?null:[[+t[0],e[0][1]],[+t[1],e[1][1]]]},output:function(t){return t&&[t[0][0],t[1][0]]}},li={name:"y",handles:["n","s"].map(mi),input:function(t,e){return null==t?null:[[e[0][0],+t[0]],[e[1][0],+t[1]]]},output:function(t){return t&&[t[0][1],t[1][1]]}},hi={name:"xy",handles:["n","w","e","s","nw","ne","sw","se"].map(mi),input:function(t){return null==t?null:si(t)},output:function(t){return t}},fi={overlay:"crosshair",selection:"move",n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},di={e:"w",w:"e",nw:"ne",ne:"nw",se:"sw",sw:"se"},pi={n:"s",s:"n",nw:"sw",ne:"se",se:"ne",sw:"nw"},yi={overlay:1,selection:1,n:null,e:1,s:null,w:-1,nw:-1,ne:1,se:1,sw:-1},gi={overlay:1,selection:1,n:-1,e:null,s:1,w:null,nw:-1,ne:-1,se:1,sw:1};function mi(t){return{type:t}}function vi(){return!ue.ctrlKey&&!ue.button}function bi(){var t=this.ownerSVGElement||this;return t.hasAttribute("viewBox")?[[(t=t.viewBox.baseVal).x,t.y],[t.x+t.width,t.y+t.height]]:[[0,0],[t.width.baseVal.value,t.height.baseVal.value]]}function xi(){return navigator.maxTouchPoints||"ontouchstart"in this}function _i(t){for(;!t.__brush;)if(!(t=t.parentNode))return;return t.__brush}function ki(t){return t[0][0]===t[1][0]||t[0][1]===t[1][1]}function wi(t){var e=t.__brush;return e?e.dim.output(e.selection):null}function Ei(){return Si(ui)}function Ti(){return Si(li)}var Ci=function(){return Si(hi)};function Si(t){var e,n=bi,r=vi,i=xi,a=!0,o=ht("start","brush","end"),s=6;function c(e){var n=e.property("__brush",y).selectAll(".overlay").data([mi("overlay")]);n.enter().append("rect").attr("class","overlay").attr("pointer-events","all").attr("cursor",fi.overlay).merge(n).each((function(){var t=_i(this).extent;we(this).attr("x",t[0][0]).attr("y",t[0][1]).attr("width",t[1][0]-t[0][0]).attr("height",t[1][1]-t[0][1])})),e.selectAll(".selection").data([mi("selection")]).enter().append("rect").attr("class","selection").attr("cursor",fi.selection).attr("fill","#777").attr("fill-opacity",.3).attr("stroke","#fff").attr("shape-rendering","crispEdges");var r=e.selectAll(".handle").data(t.handles,(function(t){return t.type}));r.exit().remove(),r.enter().append("rect").attr("class",(function(t){return"handle handle--"+t.type})).attr("cursor",(function(t){return fi[t.type]})),e.each(u).attr("fill","none").attr("pointer-events","all").on("mousedown.brush",f).filter(i).on("touchstart.brush",f).on("touchmove.brush",d).on("touchend.brush touchcancel.brush",p).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function u(){var t=we(this),e=_i(this).selection;e?(t.selectAll(".selection").style("display",null).attr("x",e[0][0]).attr("y",e[0][1]).attr("width",e[1][0]-e[0][0]).attr("height",e[1][1]-e[0][1]),t.selectAll(".handle").style("display",null).attr("x",(function(t){return"e"===t.type[t.type.length-1]?e[1][0]-s/2:e[0][0]-s/2})).attr("y",(function(t){return"s"===t.type[0]?e[1][1]-s/2:e[0][1]-s/2})).attr("width",(function(t){return"n"===t.type||"s"===t.type?e[1][0]-e[0][0]+s:s})).attr("height",(function(t){return"e"===t.type||"w"===t.type?e[1][1]-e[0][1]+s:s}))):t.selectAll(".selection,.handle").style("display","none").attr("x",null).attr("y",null).attr("width",null).attr("height",null)}function l(t,e,n){var r=t.__brush.emitter;return!r||n&&r.clean?new h(t,e,n):r}function h(t,e,n){this.that=t,this.args=e,this.state=t.__brush,this.active=0,this.clean=n}function f(){if((!e||ue.touches)&&r.apply(this,arguments)){var n,i,o,s,c,h,f,d,p,y,g,m=this,v=ue.target.__data__.type,b="selection"===(a&&ue.metaKey?v="overlay":v)?ni:a&&ue.altKey?ai:ii,x=t===li?null:yi[v],_=t===ui?null:gi[v],k=_i(m),w=k.extent,E=k.selection,T=w[0][0],C=w[0][1],S=w[1][0],A=w[1][1],M=0,O=0,N=x&&_&&a&&ue.shiftKey,B=ue.touches?ci(ue.changedTouches[0].identifier):Dn,D=B(m),L=D,I=l(m,arguments,!0).beforestart();"overlay"===v?(E&&(p=!0),k.selection=E=[[n=t===li?T:D[0],o=t===ui?C:D[1]],[c=t===li?S:n,f=t===ui?A:o]]):(n=E[0][0],o=E[0][1],c=E[1][0],f=E[1][1]),i=n,s=o,h=c,d=f;var R=we(m).attr("pointer-events","none"),F=R.selectAll(".overlay").attr("cursor",fi[v]);if(ue.touches)I.moved=j,I.ended=z;else{var P=we(ue.view).on("mousemove.brush",j,!0).on("mouseup.brush",z,!0);a&&P.on("keydown.brush",U,!0).on("keyup.brush",$,!0),Ce(ue.view)}ti(),sr(m),u.call(m),I.start()}function j(){var t=B(m);!N||y||g||(Math.abs(t[0]-L[0])>Math.abs(t[1]-L[1])?g=!0:y=!0),L=t,p=!0,ei(),Y()}function Y(){var t;switch(M=L[0]-D[0],O=L[1]-D[1],b){case ri:case ni:x&&(M=Math.max(T-n,Math.min(S-c,M)),i=n+M,h=c+M),_&&(O=Math.max(C-o,Math.min(A-f,O)),s=o+O,d=f+O);break;case ii:x<0?(M=Math.max(T-n,Math.min(S-n,M)),i=n+M,h=c):x>0&&(M=Math.max(T-c,Math.min(S-c,M)),i=n,h=c+M),_<0?(O=Math.max(C-o,Math.min(A-o,O)),s=o+O,d=f):_>0&&(O=Math.max(C-f,Math.min(A-f,O)),s=o,d=f+O);break;case ai:x&&(i=Math.max(T,Math.min(S,n-M*x)),h=Math.max(T,Math.min(S,c+M*x))),_&&(s=Math.max(C,Math.min(A,o-O*_)),d=Math.max(C,Math.min(A,f+O*_)))}h0&&(n=i-M),_<0?f=d-O:_>0&&(o=s-O),b=ri,F.attr("cursor",fi.selection),Y());break;default:return}ei()}function $(){switch(ue.keyCode){case 16:N&&(y=g=N=!1,Y());break;case 18:b===ai&&(x<0?c=h:x>0&&(n=i),_<0?f=d:_>0&&(o=s),b=ii,Y());break;case 32:b===ri&&(ue.altKey?(x&&(c=h-M*x,n=i+M*x),_&&(f=d-O*_,o=s+O*_),b=ai):(x<0?c=h:x>0&&(n=i),_<0?f=d:_>0&&(o=s),b=ii),F.attr("cursor",fi[v]),Y());break;default:return}ei()}}function d(){l(this,arguments).moved()}function p(){l(this,arguments).ended()}function y(){var e=this.__brush||{selection:null};return e.extent=si(n.apply(this,arguments)),e.dim=t,e}return c.move=function(e,n){e.selection?e.on("start.brush",(function(){l(this,arguments).beforestart().start()})).on("interrupt.brush end.brush",(function(){l(this,arguments).end()})).tween("brush",(function(){var e=this,r=e.__brush,i=l(e,arguments),a=r.selection,o=t.input("function"==typeof n?n.apply(this,arguments):n,r.extent),s=Mn(a,o);function c(t){r.selection=1===t&&null===o?null:s(t),u.call(e),i.brush()}return null!==a&&null!==o?c:c(1)})):e.each((function(){var e=this,r=arguments,i=e.__brush,a=t.input("function"==typeof n?n.apply(e,r):n,i.extent),o=l(e,r).beforestart();sr(e),i.selection=null===a?null:a,u.call(e),o.start().brush().end()}))},c.clear=function(t){c.move(t,null)},h.prototype={beforestart:function(){return 1==++this.active&&(this.state.emitter=this,this.starting=!0),this},start:function(){return this.starting?(this.starting=!1,this.emit("start")):this.emit("brush"),this},brush:function(){return this.emit("brush"),this},end:function(){return 0==--this.active&&(delete this.state.emitter,this.emit("end")),this},emit:function(e){ye(new Jr(c,e,t.output(this.state.selection)),o.apply,o,[e,this.that,this.args])}},c.extent=function(t){return arguments.length?(n="function"==typeof t?t:Qr(si(t)),c):n},c.filter=function(t){return arguments.length?(r="function"==typeof t?t:Qr(!!t),c):r},c.touchable=function(t){return arguments.length?(i="function"==typeof t?t:Qr(!!t),c):i},c.handleSize=function(t){return arguments.length?(s=+t,c):s},c.keyModifiers=function(t){return arguments.length?(a=!!t,c):a},c.on=function(){var t=o.on.apply(o,arguments);return t===o?c:t},c}var Ai=Math.cos,Mi=Math.sin,Oi=Math.PI,Ni=Oi/2,Bi=2*Oi,Di=Math.max;function Li(t){return function(e,n){return t(e.source.value+e.target.value,n.source.value+n.target.value)}}var Ii=function(){var t=0,e=null,n=null,r=null;function i(i){var a,o,s,c,u,l,h=i.length,f=[],d=w(h),p=[],y=[],g=y.groups=new Array(h),m=new Array(h*h);for(a=0,u=-1;++u1e-6)if(Math.abs(l*s-c*u)>1e-6&&i){var f=n-a,d=r-o,p=s*s+c*c,y=f*f+d*d,g=Math.sqrt(p),m=Math.sqrt(h),v=i*Math.tan((Pi-Math.acos((p+h-y)/(2*g*m)))/2),b=v/m,x=v/g;Math.abs(b-1)>1e-6&&(this._+="L"+(t+b*u)+","+(e+b*l)),this._+="A"+i+","+i+",0,0,"+ +(l*f>u*d)+","+(this._x1=t+x*s)+","+(this._y1=e+x*c)}else this._+="L"+(this._x1=t)+","+(this._y1=e);else;},arc:function(t,e,n,r,i,a){t=+t,e=+e,a=!!a;var o=(n=+n)*Math.cos(r),s=n*Math.sin(r),c=t+o,u=e+s,l=1^a,h=a?r-i:i-r;if(n<0)throw new Error("negative radius: "+n);null===this._x1?this._+="M"+c+","+u:(Math.abs(this._x1-c)>1e-6||Math.abs(this._y1-u)>1e-6)&&(this._+="L"+c+","+u),n&&(h<0&&(h=h%ji+ji),h>Yi?this._+="A"+n+","+n+",0,1,"+l+","+(t-o)+","+(e-s)+"A"+n+","+n+",0,1,"+l+","+(this._x1=c)+","+(this._y1=u):h>1e-6&&(this._+="A"+n+","+n+",0,"+ +(h>=Pi)+","+l+","+(this._x1=t+n*Math.cos(i))+","+(this._y1=e+n*Math.sin(i))))},rect:function(t,e,n,r){this._+="M"+(this._x0=this._x1=+t)+","+(this._y0=this._y1=+e)+"h"+ +n+"v"+ +r+"h"+-n+"Z"},toString:function(){return this._}};var $i=Ui;function qi(t){return t.source}function Wi(t){return t.target}function Hi(t){return t.radius}function Vi(t){return t.startAngle}function Gi(t){return t.endAngle}var Xi=function(){var t=qi,e=Wi,n=Hi,r=Vi,i=Gi,a=null;function o(){var o,s=Ri.call(arguments),c=t.apply(this,s),u=e.apply(this,s),l=+n.apply(this,(s[0]=c,s)),h=r.apply(this,s)-Ni,f=i.apply(this,s)-Ni,d=l*Ai(h),p=l*Mi(h),y=+n.apply(this,(s[0]=u,s)),g=r.apply(this,s)-Ni,m=i.apply(this,s)-Ni;if(a||(a=o=$i()),a.moveTo(d,p),a.arc(0,0,l,h,f),h===g&&f===m||(a.quadraticCurveTo(0,0,y*Ai(g),y*Mi(g)),a.arc(0,0,y,g,m)),a.quadraticCurveTo(0,0,d,p),a.closePath(),o)return a=null,o+""||null}return o.radius=function(t){return arguments.length?(n="function"==typeof t?t:Fi(+t),o):n},o.startAngle=function(t){return arguments.length?(r="function"==typeof t?t:Fi(+t),o):r},o.endAngle=function(t){return arguments.length?(i="function"==typeof t?t:Fi(+t),o):i},o.source=function(e){return arguments.length?(t=e,o):t},o.target=function(t){return arguments.length?(e=t,o):e},o.context=function(t){return arguments.length?(a=null==t?null:t,o):a},o};function Zi(){}function Ki(t,e){var n=new Zi;if(t instanceof Zi)t.each((function(t,e){n.set(e,t)}));else if(Array.isArray(t)){var r,i=-1,a=t.length;if(null==e)for(;++i=r.length)return null!=t&&n.sort(t),null!=e?e(n):n;for(var c,u,l,h=-1,f=n.length,d=r[i++],p=Qi(),y=o();++hr.length)return n;var o,s=i[a-1];return null!=e&&a>=r.length?o=n.entries():(o=[],n.each((function(e,n){o.push({key:n,values:t(e,a)})}))),null!=s?o.sort((function(t,e){return s(t.key,e.key)})):o}(a(t,0,na,ra),0)},key:function(t){return r.push(t),n},sortKeys:function(t){return i[r.length-1]=t,n},sortValues:function(e){return t=e,n},rollup:function(t){return e=t,n}}};function ta(){return{}}function ea(t,e,n){t[e]=n}function na(){return Qi()}function ra(t,e,n){t.set(e,n)}function ia(){}var aa=Qi.prototype;function oa(t,e){var n=new ia;if(t instanceof ia)t.each((function(t){n.add(t)}));else if(t){var r=-1,i=t.length;if(null==e)for(;++r6/29*(6/29)*(6/29)?Math.pow(t,1/3):t/(6/29*3*(6/29))+4/29}function va(t){return t>6/29?t*t*t:6/29*3*(6/29)*(t-4/29)}function ba(t){return 255*(t<=.0031308?12.92*t:1.055*Math.pow(t,1/2.4)-.055)}function xa(t){return(t/=255)<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4)}function _a(t){if(t instanceof Ea)return new Ea(t.h,t.c,t.l,t.opacity);if(t instanceof ga||(t=da(t)),0===t.a&&0===t.b)return new Ea(NaN,0r!=d>r&&n<(f-u)*(r-l)/(d-l)+u&&(i=-i)}return i}function Pa(t,e,n){var r,i,a,o;return function(t,e,n){return(e[0]-t[0])*(n[1]-t[1])==(n[0]-t[0])*(e[1]-t[1])}(t,e,n)&&(i=t[r=+(t[0]===e[0])],a=n[r],o=e[r],i<=a&&a<=o||o<=a&&a<=i)}var ja=function(){},Ya=[[],[[[1,1.5],[.5,1]]],[[[1.5,1],[1,1.5]]],[[[1.5,1],[.5,1]]],[[[1,.5],[1.5,1]]],[[[1,1.5],[.5,1]],[[1,.5],[1.5,1]]],[[[1,.5],[1,1.5]]],[[[1,.5],[.5,1]]],[[[.5,1],[1,.5]]],[[[1,1.5],[1,.5]]],[[[.5,1],[1,.5]],[[1.5,1],[1,1.5]]],[[[1.5,1],[1,.5]]],[[[.5,1],[1.5,1]]],[[[1,1.5],[1.5,1]]],[[[.5,1],[1,1.5]]],[]],za=function(){var t=1,e=1,n=O,r=s;function i(t){var e=n(t);if(Array.isArray(e))e=e.slice().sort(La);else{var r=m(t),i=r[0],o=r[1];e=M(i,o,e),e=w(Math.floor(i/e)*e,Math.floor(o/e)*e,e)}return e.map((function(e){return a(t,e)}))}function a(n,i){var a=[],s=[];return function(n,r,i){var a,s,c,u,l,h,f=new Array,d=new Array;a=s=-1,u=n[0]>=r,Ya[u<<1].forEach(p);for(;++a=r,Ya[c|u<<1].forEach(p);Ya[u<<0].forEach(p);for(;++s=r,l=n[s*t]>=r,Ya[u<<1|l<<2].forEach(p);++a=r,h=l,l=n[s*t+a+1]>=r,Ya[c|u<<1|l<<2|h<<3].forEach(p);Ya[u|l<<3].forEach(p)}a=-1,l=n[s*t]>=r,Ya[l<<2].forEach(p);for(;++a=r,Ya[l<<2|h<<3].forEach(p);function p(t){var e,n,r=[t[0][0]+a,t[0][1]+s],c=[t[1][0]+a,t[1][1]+s],u=o(r),l=o(c);(e=d[u])?(n=f[l])?(delete d[e.end],delete f[n.start],e===n?(e.ring.push(c),i(e.ring)):f[e.start]=d[n.end]={start:e.start,end:n.end,ring:e.ring.concat(n.ring)}):(delete d[e.end],e.ring.push(c),d[e.end=l]=e):(e=f[l])?(n=d[u])?(delete f[e.start],delete d[n.end],e===n?(e.ring.push(c),i(e.ring)):f[n.start]=d[e.end]={start:n.start,end:e.end,ring:n.ring.concat(e.ring)}):(delete f[e.start],e.ring.unshift(r),f[e.start=u]=e):f[u]=d[l]={start:u,end:l,ring:[r,c]}}Ya[l<<3].forEach(p)}(n,i,(function(t){r(t,n,i),function(t){for(var e=0,n=t.length,r=t[n-1][1]*t[0][0]-t[n-1][0]*t[0][1];++e0?a.push([t]):s.push(t)})),s.forEach((function(t){for(var e,n=0,r=a.length;n0&&o0&&s0&&a>0))throw new Error("invalid size");return t=r,e=a,i},i.thresholds=function(t){return arguments.length?(n="function"==typeof t?t:Array.isArray(t)?Ia(Da.call(t)):Ia(t),i):n},i.smooth=function(t){return arguments.length?(r=t?s:ja,i):r===s},i};function Ua(t,e,n){for(var r=t.width,i=t.height,a=1+(n<<1),o=0;o=n&&(s>=a&&(c-=t.data[s-a+o*r]),e.data[s-n+o*r]=c/Math.min(s+1,r-1+a-s,a))}function $a(t,e,n){for(var r=t.width,i=t.height,a=1+(n<<1),o=0;o=n&&(s>=a&&(c-=t.data[o+(s-a)*r]),e.data[o+(s-n)*r]=c/Math.min(s+1,i-1+a-s,a))}function qa(t){return t[0]}function Wa(t){return t[1]}function Ha(){return 1}var Va=function(){var t=qa,e=Wa,n=Ha,r=960,i=500,a=20,o=2,s=3*a,c=r+2*s>>o,u=i+2*s>>o,l=Ia(20);function h(r){var i=new Float32Array(c*u),h=new Float32Array(c*u);r.forEach((function(r,a,l){var h=+t(r,a,l)+s>>o,f=+e(r,a,l)+s>>o,d=+n(r,a,l);h>=0&&h=0&&f>o),$a({width:c,height:u,data:h},{width:c,height:u,data:i},a>>o),Ua({width:c,height:u,data:i},{width:c,height:u,data:h},a>>o),$a({width:c,height:u,data:h},{width:c,height:u,data:i},a>>o),Ua({width:c,height:u,data:i},{width:c,height:u,data:h},a>>o),$a({width:c,height:u,data:h},{width:c,height:u,data:i},a>>o);var d=l(i);if(!Array.isArray(d)){var p=I(i);d=M(0,p,d),(d=w(0,Math.floor(p/d)*d,d)).shift()}return za().thresholds(d).size([c,u])(i).map(f)}function f(t){return t.value*=Math.pow(2,-2*o),t.coordinates.forEach(d),t}function d(t){t.forEach(p)}function p(t){t.forEach(y)}function y(t){t[0]=t[0]*Math.pow(2,o)-s,t[1]=t[1]*Math.pow(2,o)-s}function g(){return c=r+2*(s=3*a)>>o,u=i+2*s>>o,h}return h.x=function(e){return arguments.length?(t="function"==typeof e?e:Ia(+e),h):t},h.y=function(t){return arguments.length?(e="function"==typeof t?t:Ia(+t),h):e},h.weight=function(t){return arguments.length?(n="function"==typeof t?t:Ia(+t),h):n},h.size=function(t){if(!arguments.length)return[r,i];var e=Math.ceil(t[0]),n=Math.ceil(t[1]);if(!(e>=0||e>=0))throw new Error("invalid size");return r=e,i=n,g()},h.cellSize=function(t){if(!arguments.length)return 1<=1))throw new Error("invalid cell size");return o=Math.floor(Math.log(t)/Math.LN2),g()},h.thresholds=function(t){return arguments.length?(l="function"==typeof t?t:Array.isArray(t)?Ia(Da.call(t)):Ia(t),h):l},h.bandwidth=function(t){if(!arguments.length)return Math.sqrt(a*(a+1));if(!((t=+t)>=0))throw new Error("invalid bandwidth");return a=Math.round((Math.sqrt(4*t*t+1)-1)/2),g()},h},Ga=function(t){return function(){return t}};function Xa(t,e,n,r,i,a,o,s,c,u){this.target=t,this.type=e,this.subject=n,this.identifier=r,this.active=i,this.x=a,this.y=o,this.dx=s,this.dy=c,this._=u}function Za(){return!ue.ctrlKey&&!ue.button}function Ka(){return this.parentNode}function Qa(t){return null==t?{x:ue.x,y:ue.y}:t}function Ja(){return navigator.maxTouchPoints||"ontouchstart"in this}Xa.prototype.on=function(){var t=this._.on.apply(this._,arguments);return t===this._?this:t};var to=function(){var t,e,n,r,i=Za,a=Ka,o=Qa,s=Ja,c={},u=ht("start","drag","end"),l=0,h=0;function f(t){t.on("mousedown.drag",d).filter(s).on("touchstart.drag",g).on("touchmove.drag",m).on("touchend.drag touchcancel.drag",v).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function d(){if(!r&&i.apply(this,arguments)){var o=b("mouse",a.apply(this,arguments),Dn,this,arguments);o&&(we(ue.view).on("mousemove.drag",p,!0).on("mouseup.drag",y,!0),Ce(ue.view),Ee(),n=!1,t=ue.clientX,e=ue.clientY,o("start"))}}function p(){if(Te(),!n){var r=ue.clientX-t,i=ue.clientY-e;n=r*r+i*i>h}c.mouse("drag")}function y(){we(ue.view).on("mousemove.drag mouseup.drag",null),Se(ue.view,n),Te(),c.mouse("end")}function g(){if(i.apply(this,arguments)){var t,e,n=ue.changedTouches,r=a.apply(this,arguments),o=n.length;for(t=0;t9999?"+"+ao(e,6):ao(e,4))+"-"+ao(t.getUTCMonth()+1,2)+"-"+ao(t.getUTCDate(),2)+(a?"T"+ao(n,2)+":"+ao(r,2)+":"+ao(i,2)+"."+ao(a,3)+"Z":i?"T"+ao(n,2)+":"+ao(r,2)+":"+ao(i,2)+"Z":r||n?"T"+ao(n,2)+":"+ao(r,2)+"Z":"")}var so=function(t){var e=new RegExp('["'+t+"\n\r]"),n=t.charCodeAt(0);function r(t,e){var r,i=[],a=t.length,o=0,s=0,c=a<=0,u=!1;function l(){if(c)return no;if(u)return u=!1,eo;var e,r,i=o;if(34===t.charCodeAt(i)){for(;o++=a?c=!0:10===(r=t.charCodeAt(o++))?u=!0:13===r&&(u=!0,10===t.charCodeAt(o)&&++o),t.slice(i+1,e-1).replace(/""/g,'"')}for(;o=(a=(y+m)/2))?y=a:m=a,(l=n>=(o=(g+v)/2))?g=o:v=o,i=d,!(d=d[h=l<<1|u]))return i[h]=p,t;if(s=+t._x.call(null,d.data),c=+t._y.call(null,d.data),e===s&&n===c)return p.next=d,i?i[h]=p:t._root=p,t;do{i=i?i[h]=new Array(4):t._root=new Array(4),(u=e>=(a=(y+m)/2))?y=a:m=a,(l=n>=(o=(g+v)/2))?g=o:v=o}while((h=l<<1|u)==(f=(c>=o)<<1|s>=a));return i[f]=d,i[h]=p,t}var ws=function(t,e,n,r,i){this.node=t,this.x0=e,this.y0=n,this.x1=r,this.y1=i};function Es(t){return t[0]}function Ts(t){return t[1]}function Cs(t,e,n){var r=new Ss(null==e?Es:e,null==n?Ts:n,NaN,NaN,NaN,NaN);return null==t?r:r.addAll(t)}function Ss(t,e,n,r,i,a){this._x=t,this._y=e,this._x0=n,this._y0=r,this._x1=i,this._y1=a,this._root=void 0}function As(t){for(var e={data:t.data},n=e;t=t.next;)n=n.next={data:t.data};return e}var Ms=Cs.prototype=Ss.prototype;function Os(t){return t.x+t.vx}function Ns(t){return t.y+t.vy}Ms.copy=function(){var t,e,n=new Ss(this._x,this._y,this._x0,this._y0,this._x1,this._y1),r=this._root;if(!r)return n;if(!r.length)return n._root=As(r),n;for(t=[{source:r,target:n._root=new Array(4)}];r=t.pop();)for(var i=0;i<4;++i)(e=r.source[i])&&(e.length?t.push({source:e,target:r.target[i]=new Array(4)}):r.target[i]=As(e));return n},Ms.add=function(t){var e=+this._x.call(null,t),n=+this._y.call(null,t);return ks(this.cover(e,n),e,n,t)},Ms.addAll=function(t){var e,n,r,i,a=t.length,o=new Array(a),s=new Array(a),c=1/0,u=1/0,l=-1/0,h=-1/0;for(n=0;nl&&(l=r),ih&&(h=i));if(c>l||u>h)return this;for(this.cover(c,u).cover(l,h),n=0;nt||t>=i||r>e||e>=a;)switch(s=(ef||(a=c.y0)>d||(o=c.x1)=m)<<1|t>=g)&&(c=p[p.length-1],p[p.length-1]=p[p.length-1-u],p[p.length-1-u]=c)}else{var v=t-+this._x.call(null,y.data),b=e-+this._y.call(null,y.data),x=v*v+b*b;if(x=(s=(p+g)/2))?p=s:g=s,(l=o>=(c=(y+m)/2))?y=c:m=c,e=d,!(d=d[h=l<<1|u]))return this;if(!d.length)break;(e[h+1&3]||e[h+2&3]||e[h+3&3])&&(n=e,f=h)}for(;d.data!==t;)if(r=d,!(d=d.next))return this;return(i=d.next)&&delete d.next,r?(i?r.next=i:delete r.next,this):e?(i?e[h]=i:delete e[h],(d=e[0]||e[1]||e[2]||e[3])&&d===(e[3]||e[2]||e[1]||e[0])&&!d.length&&(n?n[f]=d:this._root=d),this):(this._root=i,this)},Ms.removeAll=function(t){for(var e=0,n=t.length;ec+d||iu+d||as.index){var p=c-o.x-o.vx,y=u-o.y-o.vy,g=p*p+y*y;gt.r&&(t.r=t[e].r)}function s(){if(e){var r,i,a=e.length;for(n=new Array(a),r=0;r1?(null==n?s.remove(t):s.set(t,d(n)),e):s.get(t)},find:function(e,n,r){var i,a,o,s,c,u=0,l=t.length;for(null==r?r=1/0:r*=r,u=0;u1?(u.on(t,n),e):u.on(t)}}},Ys=function(){var t,e,n,r,i=xs(-30),a=1,o=1/0,s=.81;function c(r){var i,a=t.length,o=Cs(t,Rs,Fs).visitAfter(l);for(n=r,i=0;i=o)){(t.data!==e||t.next)&&(0===l&&(d+=(l=_s())*l),0===h&&(d+=(h=_s())*h),d1?r[0]+r.slice(2):r,+t.slice(n+1)]}var Ws=function(t){return(t=qs(Math.abs(t)))?t[1]:NaN},Hs=/^(?:(.)?([<>=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function Vs(t){if(!(e=Hs.exec(t)))throw new Error("invalid format: "+t);var e;return new Gs({fill:e[1],align:e[2],sign:e[3],symbol:e[4],zero:e[5],width:e[6],comma:e[7],precision:e[8]&&e[8].slice(1),trim:e[9],type:e[10]})}function Gs(t){this.fill=void 0===t.fill?" ":t.fill+"",this.align=void 0===t.align?">":t.align+"",this.sign=void 0===t.sign?"-":t.sign+"",this.symbol=void 0===t.symbol?"":t.symbol+"",this.zero=!!t.zero,this.width=void 0===t.width?void 0:+t.width,this.comma=!!t.comma,this.precision=void 0===t.precision?void 0:+t.precision,this.trim=!!t.trim,this.type=void 0===t.type?"":t.type+""}Vs.prototype=Gs.prototype,Gs.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(void 0===this.width?"":Math.max(1,0|this.width))+(this.comma?",":"")+(void 0===this.precision?"":"."+Math.max(0,0|this.precision))+(this.trim?"~":"")+this.type};var Xs,Zs,Ks,Qs,Js=function(t,e){var n=qs(t,e);if(!n)return t+"";var r=n[0],i=n[1];return i<0?"0."+new Array(-i).join("0")+r:r.length>i+1?r.slice(0,i+1)+"."+r.slice(i+1):r+new Array(i-r.length+2).join("0")},tc={"%":function(t,e){return(100*t).toFixed(e)},b:function(t){return Math.round(t).toString(2)},c:function(t){return t+""},d:function(t){return Math.abs(t=Math.round(t))>=1e21?t.toLocaleString("en").replace(/,/g,""):t.toString(10)},e:function(t,e){return t.toExponential(e)},f:function(t,e){return t.toFixed(e)},g:function(t,e){return t.toPrecision(e)},o:function(t){return Math.round(t).toString(8)},p:function(t,e){return Js(100*t,e)},r:Js,s:function(t,e){var n=qs(t,e);if(!n)return t+"";var r=n[0],i=n[1],a=i-(Xs=3*Math.max(-8,Math.min(8,Math.floor(i/3))))+1,o=r.length;return a===o?r:a>o?r+new Array(a-o+1).join("0"):a>0?r.slice(0,a)+"."+r.slice(a):"0."+new Array(1-a).join("0")+qs(t,Math.max(0,e+a-1))[0]},X:function(t){return Math.round(t).toString(16).toUpperCase()},x:function(t){return Math.round(t).toString(16)}},ec=function(t){return t},nc=Array.prototype.map,rc=["y","z","a","f","p","n","µ","m","","k","M","G","T","P","E","Z","Y"],ic=function(t){var e,n,r=void 0===t.grouping||void 0===t.thousands?ec:(e=nc.call(t.grouping,Number),n=t.thousands+"",function(t,r){for(var i=t.length,a=[],o=0,s=e[0],c=0;i>0&&s>0&&(c+s+1>r&&(s=Math.max(1,r-c)),a.push(t.substring(i-=s,i+s)),!((c+=s+1)>r));)s=e[o=(o+1)%e.length];return a.reverse().join(n)}),i=void 0===t.currency?"":t.currency[0]+"",a=void 0===t.currency?"":t.currency[1]+"",o=void 0===t.decimal?".":t.decimal+"",s=void 0===t.numerals?ec:function(t){return function(e){return e.replace(/[0-9]/g,(function(e){return t[+e]}))}}(nc.call(t.numerals,String)),c=void 0===t.percent?"%":t.percent+"",u=void 0===t.minus?"-":t.minus+"",l=void 0===t.nan?"NaN":t.nan+"";function h(t){var e=(t=Vs(t)).fill,n=t.align,h=t.sign,f=t.symbol,d=t.zero,p=t.width,y=t.comma,g=t.precision,m=t.trim,v=t.type;"n"===v?(y=!0,v="g"):tc[v]||(void 0===g&&(g=12),m=!0,v="g"),(d||"0"===e&&"="===n)&&(d=!0,e="0",n="=");var b="$"===f?i:"#"===f&&/[boxX]/.test(v)?"0"+v.toLowerCase():"",x="$"===f?a:/[%p]/.test(v)?c:"",_=tc[v],k=/[defgprs%]/.test(v);function w(t){var i,a,c,f=b,w=x;if("c"===v)w=_(t)+w,t="";else{var E=(t=+t)<0||1/t<0;if(t=isNaN(t)?l:_(Math.abs(t),g),m&&(t=function(t){t:for(var e,n=t.length,r=1,i=-1;r0&&(i=0)}return i>0?t.slice(0,i)+t.slice(e+1):t}(t)),E&&0==+t&&"+"!==h&&(E=!1),f=(E?"("===h?h:u:"-"===h||"("===h?"":h)+f,w=("s"===v?rc[8+Xs/3]:"")+w+(E&&"("===h?")":""),k)for(i=-1,a=t.length;++i(c=t.charCodeAt(i))||c>57){w=(46===c?o+t.slice(i+1):t.slice(i))+w,t=t.slice(0,i);break}}y&&!d&&(t=r(t,1/0));var T=f.length+t.length+w.length,C=T>1)+f+t+w+C.slice(T);break;default:t=C+f+t+w}return s(t)}return g=void 0===g?6:/[gprs]/.test(v)?Math.max(1,Math.min(21,g)):Math.max(0,Math.min(20,g)),w.toString=function(){return t+""},w}return{format:h,formatPrefix:function(t,e){var n=h(((t=Vs(t)).type="f",t)),r=3*Math.max(-8,Math.min(8,Math.floor(Ws(e)/3))),i=Math.pow(10,-r),a=rc[8+r/3];return function(t){return n(i*t)+a}}}};function ac(t){return Zs=ic(t),Ks=Zs.format,Qs=Zs.formatPrefix,Zs}ac({decimal:".",thousands:",",grouping:[3],currency:["$",""],minus:"-"});var oc=function(t){return Math.max(0,-Ws(Math.abs(t)))},sc=function(t,e){return Math.max(0,3*Math.max(-8,Math.min(8,Math.floor(Ws(e)/3)))-Ws(Math.abs(t)))},cc=function(t,e){return t=Math.abs(t),e=Math.abs(e)-t,Math.max(0,Ws(e)-Ws(t))+1},uc=function(){return new lc};function lc(){this.reset()}lc.prototype={constructor:lc,reset:function(){this.s=this.t=0},add:function(t){fc(hc,t,this.t),fc(this,hc.s,this.s),this.s?this.t+=hc.t:this.s=hc.t},valueOf:function(){return this.s}};var hc=new lc;function fc(t,e,n){var r=t.s=e+n,i=r-e,a=r-i;t.t=e-a+(n-i)}var dc=Math.PI,pc=dc/2,yc=dc/4,gc=2*dc,mc=180/dc,vc=dc/180,bc=Math.abs,xc=Math.atan,_c=Math.atan2,kc=Math.cos,wc=Math.ceil,Ec=Math.exp,Tc=(Math.floor,Math.log),Cc=Math.pow,Sc=Math.sin,Ac=Math.sign||function(t){return t>0?1:t<0?-1:0},Mc=Math.sqrt,Oc=Math.tan;function Nc(t){return t>1?0:t<-1?dc:Math.acos(t)}function Bc(t){return t>1?pc:t<-1?-pc:Math.asin(t)}function Dc(t){return(t=Sc(t/2))*t}function Lc(){}function Ic(t,e){t&&Fc.hasOwnProperty(t.type)&&Fc[t.type](t,e)}var Rc={Feature:function(t,e){Ic(t.geometry,e)},FeatureCollection:function(t,e){for(var n=t.features,r=-1,i=n.length;++r=0?1:-1,i=r*n,a=kc(e=(e*=vc)/2+yc),o=Sc(e),s=qc*o,c=$c*a+s*kc(i),u=s*r*Sc(i);Hc.add(_c(u,c)),Uc=t,$c=a,qc=o}var Jc=function(t){return Vc.reset(),Wc(t,Gc),2*Vc};function tu(t){return[_c(t[1],t[0]),Bc(t[2])]}function eu(t){var e=t[0],n=t[1],r=kc(n);return[r*kc(e),r*Sc(e),Sc(n)]}function nu(t,e){return t[0]*e[0]+t[1]*e[1]+t[2]*e[2]}function ru(t,e){return[t[1]*e[2]-t[2]*e[1],t[2]*e[0]-t[0]*e[2],t[0]*e[1]-t[1]*e[0]]}function iu(t,e){t[0]+=e[0],t[1]+=e[1],t[2]+=e[2]}function au(t,e){return[t[0]*e,t[1]*e,t[2]*e]}function ou(t){var e=Mc(t[0]*t[0]+t[1]*t[1]+t[2]*t[2]);t[0]/=e,t[1]/=e,t[2]/=e}var su,cu,uu,lu,hu,fu,du,pu,yu,gu,mu=uc(),vu={point:bu,lineStart:_u,lineEnd:ku,polygonStart:function(){vu.point=wu,vu.lineStart=Eu,vu.lineEnd=Tu,mu.reset(),Gc.polygonStart()},polygonEnd:function(){Gc.polygonEnd(),vu.point=bu,vu.lineStart=_u,vu.lineEnd=ku,Hc<0?(su=-(uu=180),cu=-(lu=90)):mu>1e-6?lu=90:mu<-1e-6&&(cu=-90),gu[0]=su,gu[1]=uu},sphere:function(){su=-(uu=180),cu=-(lu=90)}};function bu(t,e){yu.push(gu=[su=t,uu=t]),elu&&(lu=e)}function xu(t,e){var n=eu([t*vc,e*vc]);if(pu){var r=ru(pu,n),i=ru([r[1],-r[0],0],r);ou(i),i=tu(i);var a,o=t-hu,s=o>0?1:-1,c=i[0]*mc*s,u=bc(o)>180;u^(s*hulu&&(lu=a):u^(s*hu<(c=(c+360)%360-180)&&clu&&(lu=e)),u?tCu(su,uu)&&(uu=t):Cu(t,uu)>Cu(su,uu)&&(su=t):uu>=su?(tuu&&(uu=t)):t>hu?Cu(su,t)>Cu(su,uu)&&(uu=t):Cu(t,uu)>Cu(su,uu)&&(su=t)}else yu.push(gu=[su=t,uu=t]);elu&&(lu=e),pu=n,hu=t}function _u(){vu.point=xu}function ku(){gu[0]=su,gu[1]=uu,vu.point=bu,pu=null}function wu(t,e){if(pu){var n=t-hu;mu.add(bc(n)>180?n+(n>0?360:-360):n)}else fu=t,du=e;Gc.point(t,e),xu(t,e)}function Eu(){Gc.lineStart()}function Tu(){wu(fu,du),Gc.lineEnd(),bc(mu)>1e-6&&(su=-(uu=180)),gu[0]=su,gu[1]=uu,pu=null}function Cu(t,e){return(e-=t)<0?e+360:e}function Su(t,e){return t[0]-e[0]}function Au(t,e){return t[0]<=t[1]?t[0]<=e&&e<=t[1]:eCu(r[0],r[1])&&(r[1]=i[1]),Cu(i[0],r[1])>Cu(r[0],r[1])&&(r[0]=i[0])):a.push(r=i);for(o=-1/0,e=0,r=a[n=a.length-1];e<=n;r=i,++e)i=a[e],(s=Cu(r[1],i[0]))>o&&(o=s,su=i[0],uu=r[1])}return yu=gu=null,su===1/0||cu===1/0?[[NaN,NaN],[NaN,NaN]]:[[su,cu],[uu,lu]]},Hu={sphere:Lc,point:Vu,lineStart:Xu,lineEnd:Qu,polygonStart:function(){Hu.lineStart=Ju,Hu.lineEnd=tl},polygonEnd:function(){Hu.lineStart=Xu,Hu.lineEnd=Qu}};function Vu(t,e){t*=vc;var n=kc(e*=vc);Gu(n*kc(t),n*Sc(t),Sc(e))}function Gu(t,e,n){++Mu,Nu+=(t-Nu)/Mu,Bu+=(e-Bu)/Mu,Du+=(n-Du)/Mu}function Xu(){Hu.point=Zu}function Zu(t,e){t*=vc;var n=kc(e*=vc);Uu=n*kc(t),$u=n*Sc(t),qu=Sc(e),Hu.point=Ku,Gu(Uu,$u,qu)}function Ku(t,e){t*=vc;var n=kc(e*=vc),r=n*kc(t),i=n*Sc(t),a=Sc(e),o=_c(Mc((o=$u*a-qu*i)*o+(o=qu*r-Uu*a)*o+(o=Uu*i-$u*r)*o),Uu*r+$u*i+qu*a);Ou+=o,Lu+=o*(Uu+(Uu=r)),Iu+=o*($u+($u=i)),Ru+=o*(qu+(qu=a)),Gu(Uu,$u,qu)}function Qu(){Hu.point=Vu}function Ju(){Hu.point=el}function tl(){nl(Yu,zu),Hu.point=Vu}function el(t,e){Yu=t,zu=e,t*=vc,e*=vc,Hu.point=nl;var n=kc(e);Uu=n*kc(t),$u=n*Sc(t),qu=Sc(e),Gu(Uu,$u,qu)}function nl(t,e){t*=vc;var n=kc(e*=vc),r=n*kc(t),i=n*Sc(t),a=Sc(e),o=$u*a-qu*i,s=qu*r-Uu*a,c=Uu*i-$u*r,u=Mc(o*o+s*s+c*c),l=Bc(u),h=u&&-l/u;Fu+=h*o,Pu+=h*s,ju+=h*c,Ou+=l,Lu+=l*(Uu+(Uu=r)),Iu+=l*($u+($u=i)),Ru+=l*(qu+(qu=a)),Gu(Uu,$u,qu)}var rl=function(t){Mu=Ou=Nu=Bu=Du=Lu=Iu=Ru=Fu=Pu=ju=0,Wc(t,Hu);var e=Fu,n=Pu,r=ju,i=e*e+n*n+r*r;return i<1e-12&&(e=Lu,n=Iu,r=Ru,Ou<1e-6&&(e=Nu,n=Bu,r=Du),(i=e*e+n*n+r*r)<1e-12)?[NaN,NaN]:[_c(n,e)*mc,Bc(r/Mc(i))*mc]},il=function(t){return function(){return t}},al=function(t,e){function n(n,r){return n=t(n,r),e(n[0],n[1])}return t.invert&&e.invert&&(n.invert=function(n,r){return(n=e.invert(n,r))&&t.invert(n[0],n[1])}),n};function ol(t,e){return[bc(t)>dc?t+Math.round(-t/gc)*gc:t,e]}function sl(t,e,n){return(t%=gc)?e||n?al(ul(t),ll(e,n)):ul(t):e||n?ll(e,n):ol}function cl(t){return function(e,n){return[(e+=t)>dc?e-gc:e<-dc?e+gc:e,n]}}function ul(t){var e=cl(t);return e.invert=cl(-t),e}function ll(t,e){var n=kc(t),r=Sc(t),i=kc(e),a=Sc(e);function o(t,e){var o=kc(e),s=kc(t)*o,c=Sc(t)*o,u=Sc(e),l=u*n+s*r;return[_c(c*i-l*a,s*n-u*r),Bc(l*i+c*a)]}return o.invert=function(t,e){var o=kc(e),s=kc(t)*o,c=Sc(t)*o,u=Sc(e),l=u*i-c*a;return[_c(c*i+u*a,s*n+l*r),Bc(l*n-s*r)]},o}ol.invert=ol;var hl=function(t){function e(e){return(e=t(e[0]*vc,e[1]*vc))[0]*=mc,e[1]*=mc,e}return t=sl(t[0]*vc,t[1]*vc,t.length>2?t[2]*vc:0),e.invert=function(e){return(e=t.invert(e[0]*vc,e[1]*vc))[0]*=mc,e[1]*=mc,e},e};function fl(t,e,n,r,i,a){if(n){var o=kc(e),s=Sc(e),c=r*n;null==i?(i=e+r*gc,a=e-c/2):(i=dl(o,i),a=dl(o,a),(r>0?ia)&&(i+=r*gc));for(var u,l=i;r>0?l>a:l1&&e.push(e.pop().concat(e.shift()))},result:function(){var n=e;return e=[],t=null,n}}},gl=function(t,e){return bc(t[0]-e[0])<1e-6&&bc(t[1]-e[1])<1e-6};function ml(t,e,n,r){this.x=t,this.z=e,this.o=n,this.e=r,this.v=!1,this.n=this.p=null}var vl=function(t,e,n,r,i){var a,o,s=[],c=[];if(t.forEach((function(t){if(!((e=t.length-1)<=0)){var e,n,r=t[0],o=t[e];if(gl(r,o)){if(!r[2]&&!o[2]){for(i.lineStart(),a=0;a=0;--a)i.point((l=u[a])[0],l[1]);else r(f.x,f.p.x,-1,i);f=f.p}u=(f=f.o).z,d=!d}while(!f.v);i.lineEnd()}}};function bl(t){if(e=t.length){for(var e,n,r=0,i=t[0];++r=0?1:-1,T=E*w,C=T>dc,S=y*_;if(xl.add(_c(S*E*Sc(T),g*k+S*kc(T))),o+=C?w+E*gc:w,C^d>=n^b>=n){var A=ru(eu(f),eu(v));ou(A);var M=ru(a,A);ou(M);var O=(C^w>=0?-1:1)*Bc(M[2]);(r>O||r===O&&(A[0]||A[1]))&&(s+=C^w>=0?1:-1)}}return(o<-1e-6||o<1e-6&&xl<-1e-6)^1&s},wl=function(t,e,n,r){return function(i){var a,o,s,c=e(i),u=yl(),l=e(u),h=!1,f={point:d,lineStart:y,lineEnd:g,polygonStart:function(){f.point=m,f.lineStart=v,f.lineEnd=b,o=[],a=[]},polygonEnd:function(){f.point=d,f.lineStart=y,f.lineEnd=g,o=P(o);var t=kl(a,r);o.length?(h||(i.polygonStart(),h=!0),vl(o,Tl,t,n,i)):t&&(h||(i.polygonStart(),h=!0),i.lineStart(),n(null,null,1,i),i.lineEnd()),h&&(i.polygonEnd(),h=!1),o=a=null},sphere:function(){i.polygonStart(),i.lineStart(),n(null,null,1,i),i.lineEnd(),i.polygonEnd()}};function d(e,n){t(e,n)&&i.point(e,n)}function p(t,e){c.point(t,e)}function y(){f.point=p,c.lineStart()}function g(){f.point=d,c.lineEnd()}function m(t,e){s.push([t,e]),l.point(t,e)}function v(){l.lineStart(),s=[]}function b(){m(s[0][0],s[0][1]),l.lineEnd();var t,e,n,r,c=l.clean(),f=u.result(),d=f.length;if(s.pop(),a.push(s),s=null,d)if(1&c){if((e=(n=f[0]).length-1)>0){for(h||(i.polygonStart(),h=!0),i.lineStart(),t=0;t1&&2&c&&f.push(f.pop().concat(f.shift())),o.push(f.filter(El))}return f}};function El(t){return t.length>1}function Tl(t,e){return((t=t.x)[0]<0?t[1]-pc-1e-6:pc-t[1])-((e=e.x)[0]<0?e[1]-pc-1e-6:pc-e[1])}var Cl=wl((function(){return!0}),(function(t){var e,n=NaN,r=NaN,i=NaN;return{lineStart:function(){t.lineStart(),e=1},point:function(a,o){var s=a>0?dc:-dc,c=bc(a-n);bc(c-dc)<1e-6?(t.point(n,r=(r+o)/2>0?pc:-pc),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(s,r),t.point(a,r),e=0):i!==s&&c>=dc&&(bc(n-i)<1e-6&&(n-=1e-6*i),bc(a-s)<1e-6&&(a-=1e-6*s),r=function(t,e,n,r){var i,a,o=Sc(t-n);return bc(o)>1e-6?xc((Sc(e)*(a=kc(r))*Sc(n)-Sc(r)*(i=kc(e))*Sc(t))/(i*a*o)):(e+r)/2}(n,r,a,o),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(s,r),e=0),t.point(n=a,r=o),i=s},lineEnd:function(){t.lineEnd(),n=r=NaN},clean:function(){return 2-e}}}),(function(t,e,n,r){var i;if(null==t)i=n*pc,r.point(-dc,i),r.point(0,i),r.point(dc,i),r.point(dc,0),r.point(dc,-i),r.point(0,-i),r.point(-dc,-i),r.point(-dc,0),r.point(-dc,i);else if(bc(t[0]-e[0])>1e-6){var a=t[0]0,i=bc(e)>1e-6;function a(t,n){return kc(t)*kc(n)>e}function o(t,n,r){var i=[1,0,0],a=ru(eu(t),eu(n)),o=nu(a,a),s=a[0],c=o-s*s;if(!c)return!r&&t;var u=e*o/c,l=-e*s/c,h=ru(i,a),f=au(i,u);iu(f,au(a,l));var d=h,p=nu(f,d),y=nu(d,d),g=p*p-y*(nu(f,f)-1);if(!(g<0)){var m=Mc(g),v=au(d,(-p-m)/y);if(iu(v,f),v=tu(v),!r)return v;var b,x=t[0],_=n[0],k=t[1],w=n[1];_0^v[1]<(bc(v[0]-x)<1e-6?k:w):k<=v[1]&&v[1]<=w:E>dc^(x<=v[0]&&v[0]<=_)){var C=au(d,(-p+m)/y);return iu(C,f),[v,tu(C)]}}}function s(e,n){var i=r?t:dc-t,a=0;return e<-i?a|=1:e>i&&(a|=2),n<-i?a|=4:n>i&&(a|=8),a}return wl(a,(function(t){var e,n,c,u,l;return{lineStart:function(){u=c=!1,l=1},point:function(h,f){var d,p=[h,f],y=a(h,f),g=r?y?0:s(h,f):y?s(h+(h<0?dc:-dc),f):0;if(!e&&(u=c=y)&&t.lineStart(),y!==c&&(!(d=o(e,p))||gl(e,d)||gl(p,d))&&(p[2]=1),y!==c)l=0,y?(t.lineStart(),d=o(p,e),t.point(d[0],d[1])):(d=o(e,p),t.point(d[0],d[1],2),t.lineEnd()),e=d;else if(i&&e&&r^y){var m;g&n||!(m=o(p,e,!0))||(l=0,r?(t.lineStart(),t.point(m[0][0],m[0][1]),t.point(m[1][0],m[1][1]),t.lineEnd()):(t.point(m[1][0],m[1][1]),t.lineEnd(),t.lineStart(),t.point(m[0][0],m[0][1],3)))}!y||e&&gl(e,p)||t.point(p[0],p[1]),e=p,c=y,n=g},lineEnd:function(){c&&t.lineEnd(),e=null},clean:function(){return l|(u&&c)<<1}}}),(function(e,r,i,a){fl(a,t,n,i,e,r)}),r?[0,-t]:[-dc,t-dc])};function Al(t,e,n,r){function i(i,a){return t<=i&&i<=n&&e<=a&&a<=r}function a(i,a,s,u){var l=0,h=0;if(null==i||(l=o(i,s))!==(h=o(a,s))||c(i,a)<0^s>0)do{u.point(0===l||3===l?t:n,l>1?r:e)}while((l=(l+s+4)%4)!==h);else u.point(a[0],a[1])}function o(r,i){return bc(r[0]-t)<1e-6?i>0?0:3:bc(r[0]-n)<1e-6?i>0?2:1:bc(r[1]-e)<1e-6?i>0?1:0:i>0?3:2}function s(t,e){return c(t.x,e.x)}function c(t,e){var n=o(t,1),r=o(e,1);return n!==r?n-r:0===n?e[1]-t[1]:1===n?t[0]-e[0]:2===n?t[1]-e[1]:e[0]-t[0]}return function(o){var c,u,l,h,f,d,p,y,g,m,v,b=o,x=yl(),_={point:k,lineStart:function(){_.point=w,u&&u.push(l=[]);m=!0,g=!1,p=y=NaN},lineEnd:function(){c&&(w(h,f),d&&g&&x.rejoin(),c.push(x.result()));_.point=k,g&&b.lineEnd()},polygonStart:function(){b=x,c=[],u=[],v=!0},polygonEnd:function(){var e=function(){for(var e=0,n=0,i=u.length;nr&&(f-a)*(r-o)>(d-o)*(t-a)&&++e:d<=r&&(f-a)*(r-o)<(d-o)*(t-a)&&--e;return e}(),n=v&&e,i=(c=P(c)).length;(n||i)&&(o.polygonStart(),n&&(o.lineStart(),a(null,null,1,o),o.lineEnd()),i&&vl(c,s,e,a,o),o.polygonEnd());b=o,c=u=l=null}};function k(t,e){i(t,e)&&b.point(t,e)}function w(a,o){var s=i(a,o);if(u&&l.push([a,o]),m)h=a,f=o,d=s,m=!1,s&&(b.lineStart(),b.point(a,o));else if(s&&g)b.point(a,o);else{var c=[p=Math.max(-1e9,Math.min(1e9,p)),y=Math.max(-1e9,Math.min(1e9,y))],x=[a=Math.max(-1e9,Math.min(1e9,a)),o=Math.max(-1e9,Math.min(1e9,o))];!function(t,e,n,r,i,a){var o,s=t[0],c=t[1],u=0,l=1,h=e[0]-s,f=e[1]-c;if(o=n-s,h||!(o>0)){if(o/=h,h<0){if(o0){if(o>l)return;o>u&&(u=o)}if(o=i-s,h||!(o<0)){if(o/=h,h<0){if(o>l)return;o>u&&(u=o)}else if(h>0){if(o0)){if(o/=f,f<0){if(o0){if(o>l)return;o>u&&(u=o)}if(o=a-c,f||!(o<0)){if(o/=f,f<0){if(o>l)return;o>u&&(u=o)}else if(f>0){if(o0&&(t[0]=s+u*h,t[1]=c+u*f),l<1&&(e[0]=s+l*h,e[1]=c+l*f),!0}}}}}(c,x,t,e,n,r)?s&&(b.lineStart(),b.point(a,o),v=!1):(g||(b.lineStart(),b.point(c[0],c[1])),b.point(x[0],x[1]),s||b.lineEnd(),v=!1)}p=a,y=o,g=s}return _}}var Ml,Ol,Nl,Bl=function(){var t,e,n,r=0,i=0,a=960,o=500;return n={stream:function(n){return t&&e===n?t:t=Al(r,i,a,o)(e=n)},extent:function(s){return arguments.length?(r=+s[0][0],i=+s[0][1],a=+s[1][0],o=+s[1][1],t=e=null,n):[[r,i],[a,o]]}}},Dl=uc(),Ll={sphere:Lc,point:Lc,lineStart:function(){Ll.point=Rl,Ll.lineEnd=Il},lineEnd:Lc,polygonStart:Lc,polygonEnd:Lc};function Il(){Ll.point=Ll.lineEnd=Lc}function Rl(t,e){Ml=t*=vc,Ol=Sc(e*=vc),Nl=kc(e),Ll.point=Fl}function Fl(t,e){t*=vc;var n=Sc(e*=vc),r=kc(e),i=bc(t-Ml),a=kc(i),o=r*Sc(i),s=Nl*n-Ol*r*a,c=Ol*n+Nl*r*a;Dl.add(_c(Mc(o*o+s*s),c)),Ml=t,Ol=n,Nl=r}var Pl=function(t){return Dl.reset(),Wc(t,Ll),+Dl},jl=[null,null],Yl={type:"LineString",coordinates:jl},zl=function(t,e){return jl[0]=t,jl[1]=e,Pl(Yl)},Ul={Feature:function(t,e){return ql(t.geometry,e)},FeatureCollection:function(t,e){for(var n=t.features,r=-1,i=n.length;++r0&&(i=zl(t[a],t[a-1]))>0&&n<=i&&r<=i&&(n+r-i)*(1-Math.pow((n-r)/i,2))<1e-12*i)return!0;n=r}return!1}function Vl(t,e){return!!kl(t.map(Gl),Xl(e))}function Gl(t){return(t=t.map(Xl)).pop(),t}function Xl(t){return[t[0]*vc,t[1]*vc]}var Zl=function(t,e){return(t&&Ul.hasOwnProperty(t.type)?Ul[t.type]:ql)(t,e)};function Kl(t,e,n){var r=w(t,e-1e-6,n).concat(e);return function(t){return r.map((function(e){return[t,e]}))}}function Ql(t,e,n){var r=w(t,e-1e-6,n).concat(e);return function(t){return r.map((function(e){return[e,t]}))}}function Jl(){var t,e,n,r,i,a,o,s,c,u,l,h,f=10,d=f,p=90,y=360,g=2.5;function m(){return{type:"MultiLineString",coordinates:v()}}function v(){return w(wc(r/p)*p,n,p).map(l).concat(w(wc(s/y)*y,o,y).map(h)).concat(w(wc(e/f)*f,t,f).filter((function(t){return bc(t%p)>1e-6})).map(c)).concat(w(wc(a/d)*d,i,d).filter((function(t){return bc(t%y)>1e-6})).map(u))}return m.lines=function(){return v().map((function(t){return{type:"LineString",coordinates:t}}))},m.outline=function(){return{type:"Polygon",coordinates:[l(r).concat(h(o).slice(1),l(n).reverse().slice(1),h(s).reverse().slice(1))]}},m.extent=function(t){return arguments.length?m.extentMajor(t).extentMinor(t):m.extentMinor()},m.extentMajor=function(t){return arguments.length?(r=+t[0][0],n=+t[1][0],s=+t[0][1],o=+t[1][1],r>n&&(t=r,r=n,n=t),s>o&&(t=s,s=o,o=t),m.precision(g)):[[r,s],[n,o]]},m.extentMinor=function(n){return arguments.length?(e=+n[0][0],t=+n[1][0],a=+n[0][1],i=+n[1][1],e>t&&(n=e,e=t,t=n),a>i&&(n=a,a=i,i=n),m.precision(g)):[[e,a],[t,i]]},m.step=function(t){return arguments.length?m.stepMajor(t).stepMinor(t):m.stepMinor()},m.stepMajor=function(t){return arguments.length?(p=+t[0],y=+t[1],m):[p,y]},m.stepMinor=function(t){return arguments.length?(f=+t[0],d=+t[1],m):[f,d]},m.precision=function(f){return arguments.length?(g=+f,c=Kl(a,i,90),u=Ql(e,t,g),l=Kl(s,o,90),h=Ql(r,n,g),m):g},m.extentMajor([[-180,-89.999999],[180,89.999999]]).extentMinor([[-180,-80.000001],[180,80.000001]])}function th(){return Jl()()}var eh,nh,rh,ih,ah=function(t,e){var n=t[0]*vc,r=t[1]*vc,i=e[0]*vc,a=e[1]*vc,o=kc(r),s=Sc(r),c=kc(a),u=Sc(a),l=o*kc(n),h=o*Sc(n),f=c*kc(i),d=c*Sc(i),p=2*Bc(Mc(Dc(a-r)+o*c*Dc(i-n))),y=Sc(p),g=p?function(t){var e=Sc(t*=p)/y,n=Sc(p-t)/y,r=n*l+e*f,i=n*h+e*d,a=n*s+e*u;return[_c(i,r)*mc,_c(a,Mc(r*r+i*i))*mc]}:function(){return[n*mc,r*mc]};return g.distance=p,g},oh=function(t){return t},sh=uc(),ch=uc(),uh={point:Lc,lineStart:Lc,lineEnd:Lc,polygonStart:function(){uh.lineStart=lh,uh.lineEnd=dh},polygonEnd:function(){uh.lineStart=uh.lineEnd=uh.point=Lc,sh.add(bc(ch)),ch.reset()},result:function(){var t=sh/2;return sh.reset(),t}};function lh(){uh.point=hh}function hh(t,e){uh.point=fh,eh=rh=t,nh=ih=e}function fh(t,e){ch.add(ih*t-rh*e),rh=t,ih=e}function dh(){fh(eh,nh)}var ph=uh,yh=1/0,gh=yh,mh=-yh,vh=mh;var bh,xh,_h,kh,wh={point:function(t,e){tmh&&(mh=t);evh&&(vh=e)},lineStart:Lc,lineEnd:Lc,polygonStart:Lc,polygonEnd:Lc,result:function(){var t=[[yh,gh],[mh,vh]];return mh=vh=-(gh=yh=1/0),t}},Eh=0,Th=0,Ch=0,Sh=0,Ah=0,Mh=0,Oh=0,Nh=0,Bh=0,Dh={point:Lh,lineStart:Ih,lineEnd:Ph,polygonStart:function(){Dh.lineStart=jh,Dh.lineEnd=Yh},polygonEnd:function(){Dh.point=Lh,Dh.lineStart=Ih,Dh.lineEnd=Ph},result:function(){var t=Bh?[Oh/Bh,Nh/Bh]:Mh?[Sh/Mh,Ah/Mh]:Ch?[Eh/Ch,Th/Ch]:[NaN,NaN];return Eh=Th=Ch=Sh=Ah=Mh=Oh=Nh=Bh=0,t}};function Lh(t,e){Eh+=t,Th+=e,++Ch}function Ih(){Dh.point=Rh}function Rh(t,e){Dh.point=Fh,Lh(_h=t,kh=e)}function Fh(t,e){var n=t-_h,r=e-kh,i=Mc(n*n+r*r);Sh+=i*(_h+t)/2,Ah+=i*(kh+e)/2,Mh+=i,Lh(_h=t,kh=e)}function Ph(){Dh.point=Lh}function jh(){Dh.point=zh}function Yh(){Uh(bh,xh)}function zh(t,e){Dh.point=Uh,Lh(bh=_h=t,xh=kh=e)}function Uh(t,e){var n=t-_h,r=e-kh,i=Mc(n*n+r*r);Sh+=i*(_h+t)/2,Ah+=i*(kh+e)/2,Mh+=i,Oh+=(i=kh*t-_h*e)*(_h+t),Nh+=i*(kh+e),Bh+=3*i,Lh(_h=t,kh=e)}var $h=Dh;function qh(t){this._context=t}qh.prototype={_radius:4.5,pointRadius:function(t){return this._radius=t,this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._context.closePath(),this._point=NaN},point:function(t,e){switch(this._point){case 0:this._context.moveTo(t,e),this._point=1;break;case 1:this._context.lineTo(t,e);break;default:this._context.moveTo(t+this._radius,e),this._context.arc(t,e,this._radius,0,gc)}},result:Lc};var Wh,Hh,Vh,Gh,Xh,Zh=uc(),Kh={point:Lc,lineStart:function(){Kh.point=Qh},lineEnd:function(){Wh&&Jh(Hh,Vh),Kh.point=Lc},polygonStart:function(){Wh=!0},polygonEnd:function(){Wh=null},result:function(){var t=+Zh;return Zh.reset(),t}};function Qh(t,e){Kh.point=Jh,Hh=Gh=t,Vh=Xh=e}function Jh(t,e){Gh-=t,Xh-=e,Zh.add(Mc(Gh*Gh+Xh*Xh)),Gh=t,Xh=e}var tf=Kh;function ef(){this._string=[]}function nf(t){return"m0,"+t+"a"+t+","+t+" 0 1,1 0,"+-2*t+"a"+t+","+t+" 0 1,1 0,"+2*t+"z"}ef.prototype={_radius:4.5,_circle:nf(4.5),pointRadius:function(t){return(t=+t)!==this._radius&&(this._radius=t,this._circle=null),this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._string.push("Z"),this._point=NaN},point:function(t,e){switch(this._point){case 0:this._string.push("M",t,",",e),this._point=1;break;case 1:this._string.push("L",t,",",e);break;default:null==this._circle&&(this._circle=nf(this._radius)),this._string.push("M",t,",",e,this._circle)}},result:function(){if(this._string.length){var t=this._string.join("");return this._string=[],t}return null}};var rf=function(t,e){var n,r,i=4.5;function a(t){return t&&("function"==typeof i&&r.pointRadius(+i.apply(this,arguments)),Wc(t,n(r))),r.result()}return a.area=function(t){return Wc(t,n(ph)),ph.result()},a.measure=function(t){return Wc(t,n(tf)),tf.result()},a.bounds=function(t){return Wc(t,n(wh)),wh.result()},a.centroid=function(t){return Wc(t,n($h)),$h.result()},a.projection=function(e){return arguments.length?(n=null==e?(t=null,oh):(t=e).stream,a):t},a.context=function(t){return arguments.length?(r=null==t?(e=null,new ef):new qh(e=t),"function"!=typeof i&&r.pointRadius(i),a):e},a.pointRadius=function(t){return arguments.length?(i="function"==typeof t?t:(r.pointRadius(+t),+t),a):i},a.projection(t).context(e)},af=function(t){return{stream:of(t)}};function of(t){return function(e){var n=new sf;for(var r in t)n[r]=t[r];return n.stream=e,n}}function sf(){}function cf(t,e,n){var r=t.clipExtent&&t.clipExtent();return t.scale(150).translate([0,0]),null!=r&&t.clipExtent(null),Wc(n,t.stream(wh)),e(wh.result()),null!=r&&t.clipExtent(r),t}function uf(t,e,n){return cf(t,(function(n){var r=e[1][0]-e[0][0],i=e[1][1]-e[0][1],a=Math.min(r/(n[1][0]-n[0][0]),i/(n[1][1]-n[0][1])),o=+e[0][0]+(r-a*(n[1][0]+n[0][0]))/2,s=+e[0][1]+(i-a*(n[1][1]+n[0][1]))/2;t.scale(150*a).translate([o,s])}),n)}function lf(t,e,n){return uf(t,[[0,0],e],n)}function hf(t,e,n){return cf(t,(function(n){var r=+e,i=r/(n[1][0]-n[0][0]),a=(r-i*(n[1][0]+n[0][0]))/2,o=-i*n[0][1];t.scale(150*i).translate([a,o])}),n)}function ff(t,e,n){return cf(t,(function(n){var r=+e,i=r/(n[1][1]-n[0][1]),a=-i*n[0][0],o=(r-i*(n[1][1]+n[0][1]))/2;t.scale(150*i).translate([a,o])}),n)}sf.prototype={constructor:sf,point:function(t,e){this.stream.point(t,e)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}};var df=kc(30*vc),pf=function(t,e){return+e?function(t,e){function n(r,i,a,o,s,c,u,l,h,f,d,p,y,g){var m=u-r,v=l-i,b=m*m+v*v;if(b>4*e&&y--){var x=o+f,_=s+d,k=c+p,w=Mc(x*x+_*_+k*k),E=Bc(k/=w),T=bc(bc(k)-1)<1e-6||bc(a-h)<1e-6?(a+h)/2:_c(_,x),C=t(T,E),S=C[0],A=C[1],M=S-r,O=A-i,N=v*M-m*O;(N*N/b>e||bc((m*M+v*O)/b-.5)>.3||o*f+s*d+c*p2?t[2]%360*vc:0,M()):[g*mc,m*mc,v*mc]},S.angle=function(t){return arguments.length?(b=t%360*vc,M()):b*mc},S.reflectX=function(t){return arguments.length?(x=t?-1:1,M()):x<0},S.reflectY=function(t){return arguments.length?(_=t?-1:1,M()):_<0},S.precision=function(t){return arguments.length?(o=pf(s,C=t*t),O()):Mc(C)},S.fitExtent=function(t,e){return uf(S,t,e)},S.fitSize=function(t,e){return lf(S,t,e)},S.fitWidth=function(t,e){return hf(S,t,e)},S.fitHeight=function(t,e){return ff(S,t,e)},function(){return e=t.apply(this,arguments),S.invert=e.invert&&A,M()}}function xf(t){var e=0,n=dc/3,r=bf(t),i=r(e,n);return i.parallels=function(t){return arguments.length?r(e=t[0]*vc,n=t[1]*vc):[e*mc,n*mc]},i}function _f(t,e){var n=Sc(t),r=(n+Sc(e))/2;if(bc(r)<1e-6)return function(t){var e=kc(t);function n(t,n){return[t*e,Sc(n)/e]}return n.invert=function(t,n){return[t/e,Bc(n*e)]},n}(t);var i=1+n*(2*r-n),a=Mc(i)/r;function o(t,e){var n=Mc(i-2*r*Sc(e))/r;return[n*Sc(t*=r),a-n*kc(t)]}return o.invert=function(t,e){var n=a-e,o=_c(t,bc(n))*Ac(n);return n*r<0&&(o-=dc*Ac(t)*Ac(n)),[o/r,Bc((i-(t*t+n*n)*r*r)/(2*r))]},o}var kf=function(){return xf(_f).scale(155.424).center([0,33.6442])},wf=function(){return kf().parallels([29.5,45.5]).scale(1070).translate([480,250]).rotate([96,0]).center([-.6,38.7])};var Ef=function(){var t,e,n,r,i,a,o=wf(),s=kf().rotate([154,0]).center([-2,58.5]).parallels([55,65]),c=kf().rotate([157,0]).center([-3,19.9]).parallels([8,18]),u={point:function(t,e){a=[t,e]}};function l(t){var e=t[0],o=t[1];return a=null,n.point(e,o),a||(r.point(e,o),a)||(i.point(e,o),a)}function h(){return t=e=null,l}return l.invert=function(t){var e=o.scale(),n=o.translate(),r=(t[0]-n[0])/e,i=(t[1]-n[1])/e;return(i>=.12&&i<.234&&r>=-.425&&r<-.214?s:i>=.166&&i<.234&&r>=-.214&&r<-.115?c:o).invert(t)},l.stream=function(n){return t&&e===n?t:(r=[o.stream(e=n),s.stream(n),c.stream(n)],i=r.length,t={point:function(t,e){for(var n=-1;++n0?e<1e-6-pc&&(e=1e-6-pc):e>pc-1e-6&&(e=pc-1e-6);var n=i/Cc(Lf(e),r);return[n*Sc(r*t),i-n*kc(r*t)]}return a.invert=function(t,e){var n=i-e,a=Ac(r)*Mc(t*t+n*n),o=_c(t,bc(n))*Ac(n);return n*r<0&&(o-=dc*Ac(t)*Ac(n)),[o/r,2*xc(Cc(i/a,1/r))-pc]},a}var Rf=function(){return xf(If).scale(109.5).parallels([30,30])};function Ff(t,e){return[t,e]}Ff.invert=Ff;var Pf=function(){return vf(Ff).scale(152.63)};function jf(t,e){var n=kc(t),r=t===e?Sc(t):(n-kc(e))/(e-t),i=n/r+t;if(bc(r)<1e-6)return Ff;function a(t,e){var n=i-e,a=r*t;return[n*Sc(a),i-n*kc(a)]}return a.invert=function(t,e){var n=i-e,a=_c(t,bc(n))*Ac(n);return n*r<0&&(a-=dc*Ac(t)*Ac(n)),[a/r,i-Ac(r)*Mc(t*t+n*n)]},a}var Yf=function(){return xf(jf).scale(131.154).center([0,13.9389])},zf=1.340264,Uf=-.081106,$f=893e-6,qf=.003796,Wf=Mc(3)/2;function Hf(t,e){var n=Bc(Wf*Sc(e)),r=n*n,i=r*r*r;return[t*kc(n)/(Wf*(zf+3*Uf*r+i*(7*$f+9*qf*r))),n*(zf+Uf*r+i*($f+qf*r))]}Hf.invert=function(t,e){for(var n,r=e,i=r*r,a=i*i*i,o=0;o<12&&(a=(i=(r-=n=(r*(zf+Uf*i+a*($f+qf*i))-e)/(zf+3*Uf*i+a*(7*$f+9*qf*i)))*r)*i*i,!(bc(n)<1e-12));++o);return[Wf*t*(zf+3*Uf*i+a*(7*$f+9*qf*i))/kc(r),Bc(Sc(r)/Wf)]};var Vf=function(){return vf(Hf).scale(177.158)};function Gf(t,e){var n=kc(e),r=kc(t)*n;return[n*Sc(t)/r,Sc(e)/r]}Gf.invert=Cf(xc);var Xf=function(){return vf(Gf).scale(144.049).clipAngle(60)},Zf=function(){var t,e,n,r,i,a,o,s=1,c=0,u=0,l=1,h=1,f=0,d=null,p=1,y=1,g=of({point:function(t,e){var n=b([t,e]);this.stream.point(n[0],n[1])}}),m=oh;function v(){return p=s*l,y=s*h,a=o=null,b}function b(n){var r=n[0]*p,i=n[1]*y;if(f){var a=i*t-r*e;r=r*t+i*e,i=a}return[r+c,i+u]}return b.invert=function(n){var r=n[0]-c,i=n[1]-u;if(f){var a=i*t+r*e;r=r*t-i*e,i=a}return[r/p,i/y]},b.stream=function(t){return a&&o===t?a:a=g(m(o=t))},b.postclip=function(t){return arguments.length?(m=t,d=n=r=i=null,v()):m},b.clipExtent=function(t){return arguments.length?(m=null==t?(d=n=r=i=null,oh):Al(d=+t[0][0],n=+t[0][1],r=+t[1][0],i=+t[1][1]),v()):null==d?null:[[d,n],[r,i]]},b.scale=function(t){return arguments.length?(s=+t,v()):s},b.translate=function(t){return arguments.length?(c=+t[0],u=+t[1],v()):[c,u]},b.angle=function(n){return arguments.length?(e=Sc(f=n%360*vc),t=kc(f),v()):f*mc},b.reflectX=function(t){return arguments.length?(l=t?-1:1,v()):l<0},b.reflectY=function(t){return arguments.length?(h=t?-1:1,v()):h<0},b.fitExtent=function(t,e){return uf(b,t,e)},b.fitSize=function(t,e){return lf(b,t,e)},b.fitWidth=function(t,e){return hf(b,t,e)},b.fitHeight=function(t,e){return ff(b,t,e)},b};function Kf(t,e){var n=e*e,r=n*n;return[t*(.8707-.131979*n+r*(r*(.003971*n-.001529*r)-.013791)),e*(1.007226+n*(.015085+r*(.028874*n-.044475-.005916*r)))]}Kf.invert=function(t,e){var n,r=e,i=25;do{var a=r*r,o=a*a;r-=n=(r*(1.007226+a*(.015085+o*(.028874*a-.044475-.005916*o)))-e)/(1.007226+a*(.045255+o*(.259866*a-.311325-.005916*11*o)))}while(bc(n)>1e-6&&--i>0);return[t/(.8707+(a=r*r)*(a*(a*a*a*(.003971-.001529*a)-.013791)-.131979)),r]};var Qf=function(){return vf(Kf).scale(175.295)};function Jf(t,e){return[kc(e)*Sc(t),Sc(e)]}Jf.invert=Cf(Bc);var td=function(){return vf(Jf).scale(249.5).clipAngle(90.000001)};function ed(t,e){var n=kc(e),r=1+kc(t)*n;return[n*Sc(t)/r,Sc(e)/r]}ed.invert=Cf((function(t){return 2*xc(t)}));var nd=function(){return vf(ed).scale(250).clipAngle(142)};function rd(t,e){return[Tc(Oc((pc+e)/2)),-t]}rd.invert=function(t,e){return[-e,2*xc(Ec(t))-pc]};var id=function(){var t=Df(rd),e=t.center,n=t.rotate;return t.center=function(t){return arguments.length?e([-t[1],t[0]]):[(t=e())[1],-t[0]]},t.rotate=function(t){return arguments.length?n([t[0],t[1],t.length>2?t[2]+90:90]):[(t=n())[0],t[1],t[2]-90]},n([0,0,90]).scale(159.155)};function ad(t,e){return t.parent===e.parent?1:2}function od(t,e){return t+e.x}function sd(t,e){return Math.max(t,e.y)}var cd=function(){var t=ad,e=1,n=1,r=!1;function i(i){var a,o=0;i.eachAfter((function(e){var n=e.children;n?(e.x=function(t){return t.reduce(od,0)/t.length}(n),e.y=function(t){return 1+t.reduce(sd,0)}(n)):(e.x=a?o+=t(e,a):0,e.y=0,a=e)}));var s=function(t){for(var e;e=t.children;)t=e[0];return t}(i),c=function(t){for(var e;e=t.children;)t=e[e.length-1];return t}(i),u=s.x-t(s,c)/2,l=c.x+t(c,s)/2;return i.eachAfter(r?function(t){t.x=(t.x-i.x)*e,t.y=(i.y-t.y)*n}:function(t){t.x=(t.x-u)/(l-u)*e,t.y=(1-(i.y?t.y/i.y:1))*n})}return i.separation=function(e){return arguments.length?(t=e,i):t},i.size=function(t){return arguments.length?(r=!1,e=+t[0],n=+t[1],i):r?null:[e,n]},i.nodeSize=function(t){return arguments.length?(r=!0,e=+t[0],n=+t[1],i):r?[e,n]:null},i};function ud(t){var e=0,n=t.children,r=n&&n.length;if(r)for(;--r>=0;)e+=n[r].value;else e=1;t.value=e}function ld(t,e){var n,r,i,a,o,s=new pd(t),c=+t.value&&(s.value=t.value),u=[s];for(null==e&&(e=hd);n=u.pop();)if(c&&(n.value=+n.data.value),(i=e(n.data))&&(o=i.length))for(n.children=new Array(o),a=o-1;a>=0;--a)u.push(r=n.children[a]=new pd(i[a])),r.parent=n,r.depth=n.depth+1;return s.eachBefore(dd)}function hd(t){return t.children}function fd(t){t.data=t.data.data}function dd(t){var e=0;do{t.height=e}while((t=t.parent)&&t.height<++e)}function pd(t){this.data=t,this.depth=this.height=0,this.parent=null}pd.prototype=ld.prototype={constructor:pd,count:function(){return this.eachAfter(ud)},each:function(t){var e,n,r,i,a=this,o=[a];do{for(e=o.reverse(),o=[];a=e.pop();)if(t(a),n=a.children)for(r=0,i=n.length;r=0;--n)i.push(e[n]);return this},sum:function(t){return this.eachAfter((function(e){for(var n=+t(e.data)||0,r=e.children,i=r&&r.length;--i>=0;)n+=r[i].value;e.value=n}))},sort:function(t){return this.eachBefore((function(e){e.children&&e.children.sort(t)}))},path:function(t){for(var e=this,n=function(t,e){if(t===e)return t;var n=t.ancestors(),r=e.ancestors(),i=null;t=n.pop(),e=r.pop();for(;t===e;)i=t,t=n.pop(),e=r.pop();return i}(e,t),r=[e];e!==n;)e=e.parent,r.push(e);for(var i=r.length;t!==n;)r.splice(i,0,t),t=t.parent;return r},ancestors:function(){for(var t=this,e=[t];t=t.parent;)e.push(t);return e},descendants:function(){var t=[];return this.each((function(e){t.push(e)})),t},leaves:function(){var t=[];return this.eachBefore((function(e){e.children||t.push(e)})),t},links:function(){var t=this,e=[];return t.each((function(n){n!==t&&e.push({source:n.parent,target:n})})),e},copy:function(){return ld(this).eachBefore(fd)}};var yd=Array.prototype.slice;var gd=function(t){for(var e,n,r=0,i=(t=function(t){for(var e,n,r=t.length;r;)n=Math.random()*r--|0,e=t[r],t[r]=t[n],t[n]=e;return t}(yd.call(t))).length,a=[];r0&&n*n>r*r+i*i}function xd(t,e){for(var n=0;n(o*=o)?(r=(u+o-i)/(2*u),a=Math.sqrt(Math.max(0,o/u-r*r)),n.x=t.x-r*s-a*c,n.y=t.y-r*c+a*s):(r=(u+i-o)/(2*u),a=Math.sqrt(Math.max(0,i/u-r*r)),n.x=e.x+r*s-a*c,n.y=e.y+r*c+a*s)):(n.x=e.x+n.r,n.y=e.y)}function Td(t,e){var n=t.r+e.r-1e-6,r=e.x-t.x,i=e.y-t.y;return n>0&&n*n>r*r+i*i}function Cd(t){var e=t._,n=t.next._,r=e.r+n.r,i=(e.x*n.r+n.x*e.r)/r,a=(e.y*n.r+n.y*e.r)/r;return i*i+a*a}function Sd(t){this._=t,this.next=null,this.previous=null}function Ad(t){if(!(i=t.length))return 0;var e,n,r,i,a,o,s,c,u,l,h;if((e=t[0]).x=0,e.y=0,!(i>1))return e.r;if(n=t[1],e.x=-n.r,n.x=e.r,n.y=0,!(i>2))return e.r+n.r;Ed(n,e,r=t[2]),e=new Sd(e),n=new Sd(n),r=new Sd(r),e.next=r.previous=n,n.next=e.previous=r,r.next=n.previous=e;t:for(s=3;s0)throw new Error("cycle");return a}return n.id=function(e){return arguments.length?(t=Nd(e),n):t},n.parentId=function(t){return arguments.length?(e=Nd(t),n):e},n};function Vd(t,e){return t.parent===e.parent?1:2}function Gd(t){var e=t.children;return e?e[0]:t.t}function Xd(t){var e=t.children;return e?e[e.length-1]:t.t}function Zd(t,e,n){var r=n/(e.i-t.i);e.c-=r,e.s+=n,t.c+=r,e.z+=n,e.m+=n}function Kd(t,e,n){return t.a.parent===e.parent?t.a:n}function Qd(t,e){this._=t,this.parent=null,this.children=null,this.A=null,this.a=this,this.z=0,this.m=0,this.c=0,this.s=0,this.t=null,this.i=e}Qd.prototype=Object.create(pd.prototype);var Jd=function(){var t=Vd,e=1,n=1,r=null;function i(i){var c=function(t){for(var e,n,r,i,a,o=new Qd(t,0),s=[o];e=s.pop();)if(r=e._.children)for(e.children=new Array(a=r.length),i=a-1;i>=0;--i)s.push(n=e.children[i]=new Qd(r[i],i)),n.parent=e;return(o.parent=new Qd(null,0)).children=[o],o}(i);if(c.eachAfter(a),c.parent.m=-c.z,c.eachBefore(o),r)i.eachBefore(s);else{var u=i,l=i,h=i;i.eachBefore((function(t){t.xl.x&&(l=t),t.depth>h.depth&&(h=t)}));var f=u===l?1:t(u,l)/2,d=f-u.x,p=e/(l.x+f+d),y=n/(h.depth||1);i.eachBefore((function(t){t.x=(t.x+d)*p,t.y=t.depth*y}))}return i}function a(e){var n=e.children,r=e.parent.children,i=e.i?r[e.i-1]:null;if(n){!function(t){for(var e,n=0,r=0,i=t.children,a=i.length;--a>=0;)(e=i[a]).z+=n,e.m+=n,n+=e.s+(r+=e.c)}(e);var a=(n[0].z+n[n.length-1].z)/2;i?(e.z=i.z+t(e._,i._),e.m=e.z-a):e.z=a}else i&&(e.z=i.z+t(e._,i._));e.parent.A=function(e,n,r){if(n){for(var i,a=e,o=e,s=n,c=a.parent.children[0],u=a.m,l=o.m,h=s.m,f=c.m;s=Xd(s),a=Gd(a),s&&a;)c=Gd(c),(o=Xd(o)).a=e,(i=s.z+h-a.z-u+t(s._,a._))>0&&(Zd(Kd(s,e,r),e,i),u+=i,l+=i),h+=s.m,u+=a.m,f+=c.m,l+=o.m;s&&!Xd(o)&&(o.t=s,o.m+=h-l),a&&!Gd(c)&&(c.t=a,c.m+=u-f,r=e)}return r}(e,i,e.parent.A||r[0])}function o(t){t._.x=t.z+t.parent.m,t.m+=t.parent.m}function s(t){t.x*=e,t.y=t.depth*n}return i.separation=function(e){return arguments.length?(t=e,i):t},i.size=function(t){return arguments.length?(r=!1,e=+t[0],n=+t[1],i):r?null:[e,n]},i.nodeSize=function(t){return arguments.length?(r=!0,e=+t[0],n=+t[1],i):r?[e,n]:null},i},tp=function(t,e,n,r,i){for(var a,o=t.children,s=-1,c=o.length,u=t.value&&(i-n)/t.value;++sf&&(f=s),g=l*l*y,(d=Math.max(f/g,g/h))>p){l-=s;break}p=d}m.push(o={value:l,dice:c1?e:1)},n}(ep),ip=function(){var t=rp,e=!1,n=1,r=1,i=[0],a=Bd,o=Bd,s=Bd,c=Bd,u=Bd;function l(t){return t.x0=t.y0=0,t.x1=n,t.y1=r,t.eachBefore(h),i=[0],e&&t.eachBefore(jd),t}function h(e){var n=i[e.depth],r=e.x0+n,l=e.y0+n,h=e.x1-n,f=e.y1-n;h=n-1){var l=s[e];return l.x0=i,l.y0=a,l.x1=o,void(l.y1=c)}var h=u[e],f=r/2+h,d=e+1,p=n-1;for(;d>>1;u[y]c-a){var v=(i*m+o*g)/r;t(e,d,g,i,a,v,c),t(d,n,m,v,a,o,c)}else{var b=(a*m+c*g)/r;t(e,d,g,i,a,o,b),t(d,n,m,i,b,o,c)}}(0,c,t.value,e,n,r,i)},op=function(t,e,n,r,i){(1&t.depth?tp:Yd)(t,e,n,r,i)},sp=function t(e){function n(t,n,r,i,a){if((o=t._squarify)&&o.ratio===e)for(var o,s,c,u,l,h=-1,f=o.length,d=t.value;++h1?e:1)},n}(ep),cp=function(t){var e=t.length;return function(n){return t[Math.max(0,Math.min(e-1,Math.floor(n*e)))]}},up=function(t,e){var n=ln(+t,+e);return function(t){var e=n(t);return e-360*Math.floor(e/360)}},lp=function(t,e){return t=+t,e=+e,function(n){return Math.round(t*(1-n)+e*n)}},hp=Math.SQRT2;function fp(t){return((t=Math.exp(t))+1/t)/2}var dp=function(t,e){var n,r,i=t[0],a=t[1],o=t[2],s=e[0],c=e[1],u=e[2],l=s-i,h=c-a,f=l*l+h*h;if(f<1e-12)r=Math.log(u/o)/hp,n=function(t){return[i+t*l,a+t*h,o*Math.exp(hp*t*r)]};else{var d=Math.sqrt(f),p=(u*u-o*o+4*f)/(2*o*2*d),y=(u*u-o*o-4*f)/(2*u*2*d),g=Math.log(Math.sqrt(p*p+1)-p),m=Math.log(Math.sqrt(y*y+1)-y);r=(m-g)/hp,n=function(t){var e,n=t*r,s=fp(g),c=o/(2*d)*(s*(e=hp*n+g,((e=Math.exp(2*e))-1)/(e+1))-function(t){return((t=Math.exp(t))-1/t)/2}(g));return[i+c*l,a+c*h,o*s/fp(hp*n+g)]}}return n.duration=1e3*r,n};function pp(t){return function(e,n){var r=t((e=en(e)).h,(n=en(n)).h),i=fn(e.s,n.s),a=fn(e.l,n.l),o=fn(e.opacity,n.opacity);return function(t){return e.h=r(t),e.s=i(t),e.l=a(t),e.opacity=o(t),e+""}}}var yp=pp(ln),gp=pp(fn);function mp(t,e){var n=fn((t=ya(t)).l,(e=ya(e)).l),r=fn(t.a,e.a),i=fn(t.b,e.b),a=fn(t.opacity,e.opacity);return function(e){return t.l=n(e),t.a=r(e),t.b=i(e),t.opacity=a(e),t+""}}function vp(t){return function(e,n){var r=t((e=wa(e)).h,(n=wa(n)).h),i=fn(e.c,n.c),a=fn(e.l,n.l),o=fn(e.opacity,n.opacity);return function(t){return e.h=r(t),e.c=i(t),e.l=a(t),e.opacity=o(t),e+""}}}var bp=vp(ln),xp=vp(fn);function _p(t){return function e(n){function r(e,r){var i=t((e=Na(e)).h,(r=Na(r)).h),a=fn(e.s,r.s),o=fn(e.l,r.l),s=fn(e.opacity,r.opacity);return function(t){return e.h=i(t),e.s=a(t),e.l=o(Math.pow(t,n)),e.opacity=s(t),e+""}}return n=+n,r.gamma=e,r}(1)}var kp=_p(ln),wp=_p(fn);function Ep(t,e){for(var n=0,r=e.length-1,i=e[0],a=new Array(r<0?0:r);n1&&(e=t[a[o-2]],n=t[a[o-1]],r=t[s],(n[0]-e[0])*(r[1]-e[1])-(n[1]-e[1])*(r[0]-e[0])<=0);)--o;a[o++]=s}return a.slice(0,o)}var Op=function(t){if((n=t.length)<3)return null;var e,n,r=new Array(n),i=new Array(n);for(e=0;e=0;--e)u.push(t[r[a[e]][2]]);for(e=+s;es!=u>s&&o<(c-n)*(s-r)/(u-r)+n&&(l=!l),c=n,u=r;return l},Bp=function(t){for(var e,n,r=-1,i=t.length,a=t[i-1],o=a[0],s=a[1],c=0;++r1);return t+n*a*Math.sqrt(-2*Math.log(i)/i)}}return n.source=t,n}(Dp),Rp=function t(e){function n(){var t=Ip.source(e).apply(this,arguments);return function(){return Math.exp(t())}}return n.source=t,n}(Dp),Fp=function t(e){function n(t){return function(){for(var n=0,r=0;rr&&(e=n,n=r,r=e),function(t){return Math.max(n,Math.min(r,t))}}function ey(t,e,n){var r=t[0],i=t[1],a=e[0],o=e[1];return i2?ny:ey,i=a=null,h}function h(e){return isNaN(e=+e)?n:(i||(i=r(o.map(t),s,c)))(t(u(e)))}return h.invert=function(n){return u(e((a||(a=r(s,o.map(t),kn)))(n)))},h.domain=function(t){return arguments.length?(o=$p.call(t,Zp),u===Qp||(u=ty(o)),l()):o.slice()},h.range=function(t){return arguments.length?(s=qp.call(t),l()):s.slice()},h.rangeRound=function(t){return s=qp.call(t),c=lp,l()},h.clamp=function(t){return arguments.length?(u=t?ty(o):Qp,h):u!==Qp},h.interpolate=function(t){return arguments.length?(c=t,l()):c},h.unknown=function(t){return arguments.length?(n=t,h):n},function(n,r){return t=n,e=r,l()}}function ay(t,e){return iy()(t,e)}var oy=function(t,e,n,r){var i,a=M(t,e,n);switch((r=Vs(null==r?",f":r)).type){case"s":var o=Math.max(Math.abs(t),Math.abs(e));return null!=r.precision||isNaN(i=sc(a,o))||(r.precision=i),Qs(r,o);case"":case"e":case"g":case"p":case"r":null!=r.precision||isNaN(i=cc(a,Math.max(Math.abs(t),Math.abs(e))))||(r.precision=i-("e"===r.type));break;case"f":case"%":null!=r.precision||isNaN(i=oc(a))||(r.precision=i-2*("%"===r.type))}return Ks(r)};function sy(t){var e=t.domain;return t.ticks=function(t){var n=e();return S(n[0],n[n.length-1],null==t?10:t)},t.tickFormat=function(t,n){var r=e();return oy(r[0],r[r.length-1],null==t?10:t,n)},t.nice=function(n){null==n&&(n=10);var r,i=e(),a=0,o=i.length-1,s=i[a],c=i[o];return c0?r=A(s=Math.floor(s/r)*r,c=Math.ceil(c/r)*r,n):r<0&&(r=A(s=Math.ceil(s*r)/r,c=Math.floor(c*r)/r,n)),r>0?(i[a]=Math.floor(s/r)*r,i[o]=Math.ceil(c/r)*r,e(i)):r<0&&(i[a]=Math.ceil(s*r)/r,i[o]=Math.floor(c*r)/r,e(i)),t},t}function cy(){var t=ay(Qp,Qp);return t.copy=function(){return ry(t,cy())},Yp.apply(t,arguments),sy(t)}function uy(t){var e;function n(t){return isNaN(t=+t)?e:t}return n.invert=n,n.domain=n.range=function(e){return arguments.length?(t=$p.call(e,Zp),n):t.slice()},n.unknown=function(t){return arguments.length?(e=t,n):e},n.copy=function(){return uy(t).unknown(e)},t=arguments.length?$p.call(t,Zp):[0,1],sy(n)}var ly=function(t,e){var n,r=0,i=(t=t.slice()).length-1,a=t[r],o=t[i];return o0){for(;fc)break;y.push(h)}}else for(;f=1;--l)if(!((h=u*l)c)break;y.push(h)}}else y=S(f,d,Math.min(d-f,p)).map(n);return r?y.reverse():y},r.tickFormat=function(t,i){if(null==i&&(i=10===a?".0e":","),"function"!=typeof i&&(i=Ks(i)),t===1/0)return i;null==t&&(t=10);var o=Math.max(1,a*t/r.ticks().length);return function(t){var r=t/n(Math.round(e(t)));return r*a0?r[i-1]:e[0],i=r?[i[r-1],n]:[i[o-1],i[o]]},o.unknown=function(e){return arguments.length?(t=e,o):o},o.thresholds=function(){return i.slice()},o.copy=function(){return Oy().domain([e,n]).range(a).unknown(t)},Yp.apply(sy(o),arguments)}function Ny(){var t,e=[.5],n=[0,1],r=1;function i(i){return i<=i?n[u(e,i,0,r)]:t}return i.domain=function(t){return arguments.length?(e=qp.call(t),r=Math.min(e.length,n.length-1),i):e.slice()},i.range=function(t){return arguments.length?(n=qp.call(t),r=Math.min(e.length,n.length-1),i):n.slice()},i.invertExtent=function(t){var r=n.indexOf(t);return[e[r-1],e[r]]},i.unknown=function(e){return arguments.length?(t=e,i):t},i.copy=function(){return Ny().domain(e).range(n).unknown(t)},Yp.apply(i,arguments)}var By=new Date,Dy=new Date;function Ly(t,e,n,r){function i(e){return t(e=0===arguments.length?new Date:new Date(+e)),e}return i.floor=function(e){return t(e=new Date(+e)),e},i.ceil=function(n){return t(n=new Date(n-1)),e(n,1),t(n),n},i.round=function(t){var e=i(t),n=i.ceil(t);return t-e0))return s;do{s.push(o=new Date(+n)),e(n,a),t(n)}while(o=e)for(;t(e),!n(e);)e.setTime(e-1)}),(function(t,r){if(t>=t)if(r<0)for(;++r<=0;)for(;e(t,-1),!n(t););else for(;--r>=0;)for(;e(t,1),!n(t););}))},n&&(i.count=function(e,r){return By.setTime(+e),Dy.setTime(+r),t(By),t(Dy),Math.floor(n(By,Dy))},i.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?i.filter(r?function(e){return r(e)%t==0}:function(e){return i.count(0,e)%t==0}):i:null}),i}var Iy=Ly((function(t){t.setMonth(0,1),t.setHours(0,0,0,0)}),(function(t,e){t.setFullYear(t.getFullYear()+e)}),(function(t,e){return e.getFullYear()-t.getFullYear()}),(function(t){return t.getFullYear()}));Iy.every=function(t){return isFinite(t=Math.floor(t))&&t>0?Ly((function(e){e.setFullYear(Math.floor(e.getFullYear()/t)*t),e.setMonth(0,1),e.setHours(0,0,0,0)}),(function(e,n){e.setFullYear(e.getFullYear()+n*t)})):null};var Ry=Iy,Fy=Iy.range,Py=Ly((function(t){t.setDate(1),t.setHours(0,0,0,0)}),(function(t,e){t.setMonth(t.getMonth()+e)}),(function(t,e){return e.getMonth()-t.getMonth()+12*(e.getFullYear()-t.getFullYear())}),(function(t){return t.getMonth()})),jy=Py,Yy=Py.range;function zy(t){return Ly((function(e){e.setDate(e.getDate()-(e.getDay()+7-t)%7),e.setHours(0,0,0,0)}),(function(t,e){t.setDate(t.getDate()+7*e)}),(function(t,e){return(e-t-6e4*(e.getTimezoneOffset()-t.getTimezoneOffset()))/6048e5}))}var Uy=zy(0),$y=zy(1),qy=zy(2),Wy=zy(3),Hy=zy(4),Vy=zy(5),Gy=zy(6),Xy=Uy.range,Zy=$y.range,Ky=qy.range,Qy=Wy.range,Jy=Hy.range,tg=Vy.range,eg=Gy.range,ng=Ly((function(t){t.setHours(0,0,0,0)}),(function(t,e){t.setDate(t.getDate()+e)}),(function(t,e){return(e-t-6e4*(e.getTimezoneOffset()-t.getTimezoneOffset()))/864e5}),(function(t){return t.getDate()-1})),rg=ng,ig=ng.range,ag=Ly((function(t){t.setTime(t-t.getMilliseconds()-1e3*t.getSeconds()-6e4*t.getMinutes())}),(function(t,e){t.setTime(+t+36e5*e)}),(function(t,e){return(e-t)/36e5}),(function(t){return t.getHours()})),og=ag,sg=ag.range,cg=Ly((function(t){t.setTime(t-t.getMilliseconds()-1e3*t.getSeconds())}),(function(t,e){t.setTime(+t+6e4*e)}),(function(t,e){return(e-t)/6e4}),(function(t){return t.getMinutes()})),ug=cg,lg=cg.range,hg=Ly((function(t){t.setTime(t-t.getMilliseconds())}),(function(t,e){t.setTime(+t+1e3*e)}),(function(t,e){return(e-t)/1e3}),(function(t){return t.getUTCSeconds()})),fg=hg,dg=hg.range,pg=Ly((function(){}),(function(t,e){t.setTime(+t+e)}),(function(t,e){return e-t}));pg.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?Ly((function(e){e.setTime(Math.floor(e/t)*t)}),(function(e,n){e.setTime(+e+n*t)}),(function(e,n){return(n-e)/t})):pg:null};var yg=pg,gg=pg.range;function mg(t){return Ly((function(e){e.setUTCDate(e.getUTCDate()-(e.getUTCDay()+7-t)%7),e.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCDate(t.getUTCDate()+7*e)}),(function(t,e){return(e-t)/6048e5}))}var vg=mg(0),bg=mg(1),xg=mg(2),_g=mg(3),kg=mg(4),wg=mg(5),Eg=mg(6),Tg=vg.range,Cg=bg.range,Sg=xg.range,Ag=_g.range,Mg=kg.range,Og=wg.range,Ng=Eg.range,Bg=Ly((function(t){t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCDate(t.getUTCDate()+e)}),(function(t,e){return(e-t)/864e5}),(function(t){return t.getUTCDate()-1})),Dg=Bg,Lg=Bg.range,Ig=Ly((function(t){t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCFullYear(t.getUTCFullYear()+e)}),(function(t,e){return e.getUTCFullYear()-t.getUTCFullYear()}),(function(t){return t.getUTCFullYear()}));Ig.every=function(t){return isFinite(t=Math.floor(t))&&t>0?Ly((function(e){e.setUTCFullYear(Math.floor(e.getUTCFullYear()/t)*t),e.setUTCMonth(0,1),e.setUTCHours(0,0,0,0)}),(function(e,n){e.setUTCFullYear(e.getUTCFullYear()+n*t)})):null};var Rg=Ig,Fg=Ig.range;function Pg(t){if(0<=t.y&&t.y<100){var e=new Date(-1,t.m,t.d,t.H,t.M,t.S,t.L);return e.setFullYear(t.y),e}return new Date(t.y,t.m,t.d,t.H,t.M,t.S,t.L)}function jg(t){if(0<=t.y&&t.y<100){var e=new Date(Date.UTC(-1,t.m,t.d,t.H,t.M,t.S,t.L));return e.setUTCFullYear(t.y),e}return new Date(Date.UTC(t.y,t.m,t.d,t.H,t.M,t.S,t.L))}function Yg(t,e,n){return{y:t,m:e,d:n,H:0,M:0,S:0,L:0}}function zg(t){var e=t.dateTime,n=t.date,r=t.time,i=t.periods,a=t.days,o=t.shortDays,s=t.months,c=t.shortMonths,u=Jg(i),l=tm(i),h=Jg(a),f=tm(a),d=Jg(o),p=tm(o),y=Jg(s),g=tm(s),m=Jg(c),v=tm(c),b={a:function(t){return o[t.getDay()]},A:function(t){return a[t.getDay()]},b:function(t){return c[t.getMonth()]},B:function(t){return s[t.getMonth()]},c:null,d:_m,e:_m,f:Cm,g:Fm,G:jm,H:km,I:wm,j:Em,L:Tm,m:Sm,M:Am,p:function(t){return i[+(t.getHours()>=12)]},q:function(t){return 1+~~(t.getMonth()/3)},Q:cv,s:uv,S:Mm,u:Om,U:Nm,V:Dm,w:Lm,W:Im,x:null,X:null,y:Rm,Y:Pm,Z:Ym,"%":sv},x={a:function(t){return o[t.getUTCDay()]},A:function(t){return a[t.getUTCDay()]},b:function(t){return c[t.getUTCMonth()]},B:function(t){return s[t.getUTCMonth()]},c:null,d:zm,e:zm,f:Hm,g:rv,G:av,H:Um,I:$m,j:qm,L:Wm,m:Vm,M:Gm,p:function(t){return i[+(t.getUTCHours()>=12)]},q:function(t){return 1+~~(t.getUTCMonth()/3)},Q:cv,s:uv,S:Xm,u:Zm,U:Km,V:Jm,w:tv,W:ev,x:null,X:null,y:nv,Y:iv,Z:ov,"%":sv},_={a:function(t,e,n){var r=d.exec(e.slice(n));return r?(t.w=p[r[0].toLowerCase()],n+r[0].length):-1},A:function(t,e,n){var r=h.exec(e.slice(n));return r?(t.w=f[r[0].toLowerCase()],n+r[0].length):-1},b:function(t,e,n){var r=m.exec(e.slice(n));return r?(t.m=v[r[0].toLowerCase()],n+r[0].length):-1},B:function(t,e,n){var r=y.exec(e.slice(n));return r?(t.m=g[r[0].toLowerCase()],n+r[0].length):-1},c:function(t,n,r){return E(t,e,n,r)},d:hm,e:hm,f:mm,g:sm,G:om,H:dm,I:dm,j:fm,L:gm,m:lm,M:pm,p:function(t,e,n){var r=u.exec(e.slice(n));return r?(t.p=l[r[0].toLowerCase()],n+r[0].length):-1},q:um,Q:bm,s:xm,S:ym,u:nm,U:rm,V:im,w:em,W:am,x:function(t,e,r){return E(t,n,e,r)},X:function(t,e,n){return E(t,r,e,n)},y:sm,Y:om,Z:cm,"%":vm};function k(t,e){return function(n){var r,i,a,o=[],s=-1,c=0,u=t.length;for(n instanceof Date||(n=new Date(+n));++s53)return null;"w"in a||(a.w=1),"Z"in a?(i=(r=jg(Yg(a.y,0,1))).getUTCDay(),r=i>4||0===i?bg.ceil(r):bg(r),r=Dg.offset(r,7*(a.V-1)),a.y=r.getUTCFullYear(),a.m=r.getUTCMonth(),a.d=r.getUTCDate()+(a.w+6)%7):(i=(r=Pg(Yg(a.y,0,1))).getDay(),r=i>4||0===i?$y.ceil(r):$y(r),r=rg.offset(r,7*(a.V-1)),a.y=r.getFullYear(),a.m=r.getMonth(),a.d=r.getDate()+(a.w+6)%7)}else("W"in a||"U"in a)&&("w"in a||(a.w="u"in a?a.u%7:"W"in a?1:0),i="Z"in a?jg(Yg(a.y,0,1)).getUTCDay():Pg(Yg(a.y,0,1)).getDay(),a.m=0,a.d="W"in a?(a.w+6)%7+7*a.W-(i+5)%7:a.w+7*a.U-(i+6)%7);return"Z"in a?(a.H+=a.Z/100|0,a.M+=a.Z%100,jg(a)):Pg(a)}}function E(t,e,n,r){for(var i,a,o=0,s=e.length,c=n.length;o=c)return-1;if(37===(i=e.charCodeAt(o++))){if(i=e.charAt(o++),!(a=_[i in Vg?e.charAt(o++):i])||(r=a(t,n,r))<0)return-1}else if(i!=n.charCodeAt(r++))return-1}return r}return b.x=k(n,b),b.X=k(r,b),b.c=k(e,b),x.x=k(n,x),x.X=k(r,x),x.c=k(e,x),{format:function(t){var e=k(t+="",b);return e.toString=function(){return t},e},parse:function(t){var e=w(t+="",!1);return e.toString=function(){return t},e},utcFormat:function(t){var e=k(t+="",x);return e.toString=function(){return t},e},utcParse:function(t){var e=w(t+="",!0);return e.toString=function(){return t},e}}}var Ug,$g,qg,Wg,Hg,Vg={"-":"",_:" ",0:"0"},Gg=/^\s*\d+/,Xg=/^%/,Zg=/[\\^$*+?|[\]().{}]/g;function Kg(t,e,n){var r=t<0?"-":"",i=(r?-t:t)+"",a=i.length;return r+(a68?1900:2e3),n+r[0].length):-1}function cm(t,e,n){var r=/^(Z)|([+-]\d\d)(?::?(\d\d))?/.exec(e.slice(n,n+6));return r?(t.Z=r[1]?0:-(r[2]+(r[3]||"00")),n+r[0].length):-1}function um(t,e,n){var r=Gg.exec(e.slice(n,n+1));return r?(t.q=3*r[0]-3,n+r[0].length):-1}function lm(t,e,n){var r=Gg.exec(e.slice(n,n+2));return r?(t.m=r[0]-1,n+r[0].length):-1}function hm(t,e,n){var r=Gg.exec(e.slice(n,n+2));return r?(t.d=+r[0],n+r[0].length):-1}function fm(t,e,n){var r=Gg.exec(e.slice(n,n+3));return r?(t.m=0,t.d=+r[0],n+r[0].length):-1}function dm(t,e,n){var r=Gg.exec(e.slice(n,n+2));return r?(t.H=+r[0],n+r[0].length):-1}function pm(t,e,n){var r=Gg.exec(e.slice(n,n+2));return r?(t.M=+r[0],n+r[0].length):-1}function ym(t,e,n){var r=Gg.exec(e.slice(n,n+2));return r?(t.S=+r[0],n+r[0].length):-1}function gm(t,e,n){var r=Gg.exec(e.slice(n,n+3));return r?(t.L=+r[0],n+r[0].length):-1}function mm(t,e,n){var r=Gg.exec(e.slice(n,n+6));return r?(t.L=Math.floor(r[0]/1e3),n+r[0].length):-1}function vm(t,e,n){var r=Xg.exec(e.slice(n,n+1));return r?n+r[0].length:-1}function bm(t,e,n){var r=Gg.exec(e.slice(n));return r?(t.Q=+r[0],n+r[0].length):-1}function xm(t,e,n){var r=Gg.exec(e.slice(n));return r?(t.s=+r[0],n+r[0].length):-1}function _m(t,e){return Kg(t.getDate(),e,2)}function km(t,e){return Kg(t.getHours(),e,2)}function wm(t,e){return Kg(t.getHours()%12||12,e,2)}function Em(t,e){return Kg(1+rg.count(Ry(t),t),e,3)}function Tm(t,e){return Kg(t.getMilliseconds(),e,3)}function Cm(t,e){return Tm(t,e)+"000"}function Sm(t,e){return Kg(t.getMonth()+1,e,2)}function Am(t,e){return Kg(t.getMinutes(),e,2)}function Mm(t,e){return Kg(t.getSeconds(),e,2)}function Om(t){var e=t.getDay();return 0===e?7:e}function Nm(t,e){return Kg(Uy.count(Ry(t)-1,t),e,2)}function Bm(t){var e=t.getDay();return e>=4||0===e?Hy(t):Hy.ceil(t)}function Dm(t,e){return t=Bm(t),Kg(Hy.count(Ry(t),t)+(4===Ry(t).getDay()),e,2)}function Lm(t){return t.getDay()}function Im(t,e){return Kg($y.count(Ry(t)-1,t),e,2)}function Rm(t,e){return Kg(t.getFullYear()%100,e,2)}function Fm(t,e){return Kg((t=Bm(t)).getFullYear()%100,e,2)}function Pm(t,e){return Kg(t.getFullYear()%1e4,e,4)}function jm(t,e){var n=t.getDay();return Kg((t=n>=4||0===n?Hy(t):Hy.ceil(t)).getFullYear()%1e4,e,4)}function Ym(t){var e=t.getTimezoneOffset();return(e>0?"-":(e*=-1,"+"))+Kg(e/60|0,"0",2)+Kg(e%60,"0",2)}function zm(t,e){return Kg(t.getUTCDate(),e,2)}function Um(t,e){return Kg(t.getUTCHours(),e,2)}function $m(t,e){return Kg(t.getUTCHours()%12||12,e,2)}function qm(t,e){return Kg(1+Dg.count(Rg(t),t),e,3)}function Wm(t,e){return Kg(t.getUTCMilliseconds(),e,3)}function Hm(t,e){return Wm(t,e)+"000"}function Vm(t,e){return Kg(t.getUTCMonth()+1,e,2)}function Gm(t,e){return Kg(t.getUTCMinutes(),e,2)}function Xm(t,e){return Kg(t.getUTCSeconds(),e,2)}function Zm(t){var e=t.getUTCDay();return 0===e?7:e}function Km(t,e){return Kg(vg.count(Rg(t)-1,t),e,2)}function Qm(t){var e=t.getUTCDay();return e>=4||0===e?kg(t):kg.ceil(t)}function Jm(t,e){return t=Qm(t),Kg(kg.count(Rg(t),t)+(4===Rg(t).getUTCDay()),e,2)}function tv(t){return t.getUTCDay()}function ev(t,e){return Kg(bg.count(Rg(t)-1,t),e,2)}function nv(t,e){return Kg(t.getUTCFullYear()%100,e,2)}function rv(t,e){return Kg((t=Qm(t)).getUTCFullYear()%100,e,2)}function iv(t,e){return Kg(t.getUTCFullYear()%1e4,e,4)}function av(t,e){var n=t.getUTCDay();return Kg((t=n>=4||0===n?kg(t):kg.ceil(t)).getUTCFullYear()%1e4,e,4)}function ov(){return"+0000"}function sv(){return"%"}function cv(t){return+t}function uv(t){return Math.floor(+t/1e3)}function lv(t){return Ug=zg(t),$g=Ug.format,qg=Ug.parse,Wg=Ug.utcFormat,Hg=Ug.utcParse,Ug}lv({dateTime:"%x, %X",date:"%-m/%-d/%Y",time:"%-I:%M:%S %p",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});function hv(t){return new Date(t)}function fv(t){return t instanceof Date?+t:+new Date(+t)}function dv(t,e,n,r,i,o,s,c,u){var l=ay(Qp,Qp),h=l.invert,f=l.domain,d=u(".%L"),p=u(":%S"),y=u("%I:%M"),g=u("%I %p"),m=u("%a %d"),v=u("%b %d"),b=u("%B"),x=u("%Y"),_=[[s,1,1e3],[s,5,5e3],[s,15,15e3],[s,30,3e4],[o,1,6e4],[o,5,3e5],[o,15,9e5],[o,30,18e5],[i,1,36e5],[i,3,108e5],[i,6,216e5],[i,12,432e5],[r,1,864e5],[r,2,1728e5],[n,1,6048e5],[e,1,2592e6],[e,3,7776e6],[t,1,31536e6]];function k(a){return(s(a)1)&&(t-=Math.floor(t));var e=Math.abs(t-.5);return ex.h=360*t-100,ex.s=1.5-1.5*e,ex.l=.8-.9*e,ex+""},rx=Ge(),ix=Math.PI/3,ax=2*Math.PI/3,ox=function(t){var e;return t=(.5-t)*Math.PI,rx.r=255*(e=Math.sin(t))*e,rx.g=255*(e=Math.sin(t+ix))*e,rx.b=255*(e=Math.sin(t+ax))*e,rx+""},sx=function(t){return t=Math.max(0,Math.min(1,t)),"rgb("+Math.max(0,Math.min(255,Math.round(34.61+t*(1172.33-t*(10793.56-t*(33300.12-t*(38394.49-14825.05*t)))))))+", "+Math.max(0,Math.min(255,Math.round(23.31+t*(557.33+t*(1225.33-t*(3574.96-t*(1073.77+707.56*t)))))))+", "+Math.max(0,Math.min(255,Math.round(27.2+t*(3211.1-t*(15327.97-t*(27814-t*(22569.18-6838.66*t)))))))+")"};function cx(t){var e=t.length;return function(n){return t[Math.max(0,Math.min(e-1,Math.floor(n*e)))]}}var ux=cx(jv("44015444025645045745055946075a46085c460a5d460b5e470d60470e6147106347116447136548146748166848176948186a481a6c481b6d481c6e481d6f481f70482071482173482374482475482576482677482878482979472a7a472c7a472d7b472e7c472f7d46307e46327e46337f463480453581453781453882443983443a83443b84433d84433e85423f854240864241864142874144874045884046883f47883f48893e49893e4a893e4c8a3d4d8a3d4e8a3c4f8a3c508b3b518b3b528b3a538b3a548c39558c39568c38588c38598c375a8c375b8d365c8d365d8d355e8d355f8d34608d34618d33628d33638d32648e32658e31668e31678e31688e30698e306a8e2f6b8e2f6c8e2e6d8e2e6e8e2e6f8e2d708e2d718e2c718e2c728e2c738e2b748e2b758e2a768e2a778e2a788e29798e297a8e297b8e287c8e287d8e277e8e277f8e27808e26818e26828e26828e25838e25848e25858e24868e24878e23888e23898e238a8d228b8d228c8d228d8d218e8d218f8d21908d21918c20928c20928c20938c1f948c1f958b1f968b1f978b1f988b1f998a1f9a8a1e9b8a1e9c891e9d891f9e891f9f881fa0881fa1881fa1871fa28720a38620a48621a58521a68522a78522a88423a98324aa8325ab8225ac8226ad8127ad8128ae8029af7f2ab07f2cb17e2db27d2eb37c2fb47c31b57b32b67a34b67935b77937b87838b9773aba763bbb753dbc743fbc7340bd7242be7144bf7046c06f48c16e4ac16d4cc26c4ec36b50c46a52c56954c56856c66758c7655ac8645cc8635ec96260ca6063cb5f65cb5e67cc5c69cd5b6ccd5a6ece5870cf5773d05675d05477d1537ad1517cd2507fd34e81d34d84d44b86d54989d5488bd6468ed64590d74393d74195d84098d83e9bd93c9dd93ba0da39a2da37a5db36a8db34aadc32addc30b0dd2fb2dd2db5de2bb8de29bade28bddf26c0df25c2df23c5e021c8e020cae11fcde11dd0e11cd2e21bd5e21ad8e219dae319dde318dfe318e2e418e5e419e7e419eae51aece51befe51cf1e51df4e61ef6e620f8e621fbe723fde725")),lx=cx(jv("00000401000501010601010802010902020b02020d03030f03031204041405041606051806051a07061c08071e0907200a08220b09240c09260d0a290e0b2b100b2d110c2f120d31130d34140e36150e38160f3b180f3d19103f1a10421c10441d11471e114920114b21114e22115024125325125527125829115a2a115c2c115f2d11612f116331116533106734106936106b38106c390f6e3b0f703d0f713f0f72400f74420f75440f764510774710784910784a10794c117a4e117b4f127b51127c52137c54137d56147d57157e59157e5a167e5c167f5d177f5f187f601880621980641a80651a80671b80681c816a1c816b1d816d1d816e1e81701f81721f817320817521817621817822817922827b23827c23827e24828025828125818326818426818627818827818928818b29818c29818e2a81902a81912b81932b80942c80962c80982d80992d809b2e7f9c2e7f9e2f7fa02f7fa1307ea3307ea5317ea6317da8327daa337dab337cad347cae347bb0357bb2357bb3367ab5367ab73779b83779ba3878bc3978bd3977bf3a77c03a76c23b75c43c75c53c74c73d73c83e73ca3e72cc3f71cd4071cf4070d0416fd2426fd3436ed5446dd6456cd8456cd9466bdb476adc4869de4968df4a68e04c67e24d66e34e65e44f64e55064e75263e85362e95462ea5661eb5760ec5860ed5a5fee5b5eef5d5ef05f5ef1605df2625df2645cf3655cf4675cf4695cf56b5cf66c5cf66e5cf7705cf7725cf8745cf8765cf9785df9795df97b5dfa7d5efa7f5efa815ffb835ffb8560fb8761fc8961fc8a62fc8c63fc8e64fc9065fd9266fd9467fd9668fd9869fd9a6afd9b6bfe9d6cfe9f6dfea16efea36ffea571fea772fea973feaa74feac76feae77feb078feb27afeb47bfeb67cfeb77efeb97ffebb81febd82febf84fec185fec287fec488fec68afec88cfeca8dfecc8ffecd90fecf92fed194fed395fed597fed799fed89afdda9cfddc9efddea0fde0a1fde2a3fde3a5fde5a7fde7a9fde9aafdebacfcecaefceeb0fcf0b2fcf2b4fcf4b6fcf6b8fcf7b9fcf9bbfcfbbdfcfdbf")),hx=cx(jv("00000401000501010601010802010a02020c02020e03021004031204031405041706041907051b08051d09061f0a07220b07240c08260d08290e092b10092d110a30120a32140b34150b37160b39180c3c190c3e1b0c411c0c431e0c451f0c48210c4a230c4c240c4f260c51280b53290b552b0b572d0b592f0a5b310a5c320a5e340a5f3609613809623909633b09643d09653e0966400a67420a68440a68450a69470b6a490b6a4a0c6b4c0c6b4d0d6c4f0d6c510e6c520e6d540f6d550f6d57106e59106e5a116e5c126e5d126e5f136e61136e62146e64156e65156e67166e69166e6a176e6c186e6d186e6f196e71196e721a6e741a6e751b6e771c6d781c6d7a1d6d7c1d6d7d1e6d7f1e6c801f6c82206c84206b85216b87216b88226a8a226a8c23698d23698f24699025689225689326679526679727669827669a28659b29649d29649f2a63a02a63a22b62a32c61a52c60a62d60a82e5fa92e5eab2f5ead305dae305cb0315bb1325ab3325ab43359b63458b73557b93556ba3655bc3754bd3853bf3952c03a51c13a50c33b4fc43c4ec63d4dc73e4cc83f4bca404acb4149cc4248ce4347cf4446d04545d24644d34743d44842d54a41d74b3fd84c3ed94d3dda4e3cdb503bdd513ade5238df5337e05536e15635e25734e35933e45a31e55c30e65d2fe75e2ee8602de9612bea632aeb6429eb6628ec6726ed6925ee6a24ef6c23ef6e21f06f20f1711ff1731df2741cf3761bf37819f47918f57b17f57d15f67e14f68013f78212f78410f8850ff8870ef8890cf98b0bf98c0af98e09fa9008fa9207fa9407fb9606fb9706fb9906fb9b06fb9d07fc9f07fca108fca309fca50afca60cfca80dfcaa0ffcac11fcae12fcb014fcb216fcb418fbb61afbb81dfbba1ffbbc21fbbe23fac026fac228fac42afac62df9c72ff9c932f9cb35f8cd37f8cf3af7d13df7d340f6d543f6d746f5d949f5db4cf4dd4ff4df53f4e156f3e35af3e55df2e661f2e865f2ea69f1ec6df1ed71f1ef75f1f179f2f27df2f482f3f586f3f68af4f88ef5f992f6fa96f8fb9af9fc9dfafda1fcffa4")),fx=cx(jv("0d088710078813078916078a19068c1b068d1d068e20068f2206902406912605912805922a05932c05942e05952f059631059733059735049837049938049a3a049a3c049b3e049c3f049c41049d43039e44039e46039f48039f4903a04b03a14c02a14e02a25002a25102a35302a35502a45601a45801a45901a55b01a55c01a65e01a66001a66100a76300a76400a76600a76700a86900a86a00a86c00a86e00a86f00a87100a87201a87401a87501a87701a87801a87a02a87b02a87d03a87e03a88004a88104a78305a78405a78606a68707a68808a68a09a58b0aa58d0ba58e0ca48f0da4910ea3920fa39410a29511a19613a19814a099159f9a169f9c179e9d189d9e199da01a9ca11b9ba21d9aa31e9aa51f99a62098a72197a82296aa2395ab2494ac2694ad2793ae2892b02991b12a90b22b8fb32c8eb42e8db52f8cb6308bb7318ab83289ba3388bb3488bc3587bd3786be3885bf3984c03a83c13b82c23c81c33d80c43e7fc5407ec6417dc7427cc8437bc9447aca457acb4679cc4778cc4977cd4a76ce4b75cf4c74d04d73d14e72d24f71d35171d45270d5536fd5546ed6556dd7566cd8576bd9586ada5a6ada5b69db5c68dc5d67dd5e66de5f65de6164df6263e06363e16462e26561e26660e3685fe4695ee56a5de56b5de66c5ce76e5be76f5ae87059e97158e97257ea7457eb7556eb7655ec7754ed7953ed7a52ee7b51ef7c51ef7e50f07f4ff0804ef1814df1834cf2844bf3854bf3874af48849f48948f58b47f58c46f68d45f68f44f79044f79143f79342f89441f89540f9973ff9983ef99a3efa9b3dfa9c3cfa9e3bfb9f3afba139fba238fca338fca537fca636fca835fca934fdab33fdac33fdae32fdaf31fdb130fdb22ffdb42ffdb52efeb72dfeb82cfeba2cfebb2bfebd2afebe2afec029fdc229fdc328fdc527fdc627fdc827fdca26fdcb26fccd25fcce25fcd025fcd225fbd324fbd524fbd724fad824fada24f9dc24f9dd25f8df25f8e125f7e225f7e425f6e626f6e826f5e926f5eb27f4ed27f3ee27f3f027f2f227f1f426f1f525f0f724f0f921")),dx=function(t){return we(re(t).call(document.documentElement))},px=0;function yx(){return new gx}function gx(){this._="@"+(++px).toString(36)}gx.prototype=yx.prototype={constructor:gx,get:function(t){for(var e=this._;!(e in t);)if(!(t=t.parentNode))return;return t[e]},set:function(t,e){return t[this._]=e},remove:function(t){return this._ in t&&delete t[this._]},toString:function(){return this._}};var mx=function(t){return"string"==typeof t?new xe([document.querySelectorAll(t)],[document.documentElement]):new xe([null==t?[]:t],be)},vx=function(t,e){null==e&&(e=On().touches);for(var n=0,r=e?e.length:0,i=new Array(r);n1?0:t<-1?Sx:Math.acos(t)}function Nx(t){return t>=1?Ax:t<=-1?-Ax:Math.asin(t)}function Bx(t){return t.innerRadius}function Dx(t){return t.outerRadius}function Lx(t){return t.startAngle}function Ix(t){return t.endAngle}function Rx(t){return t&&t.padAngle}function Fx(t,e,n,r,i,a,o,s){var c=n-t,u=r-e,l=o-i,h=s-a,f=h*c-l*u;if(!(f*f<1e-12))return[t+(f=(l*(e-a)-h*(t-i))/f)*c,e+f*u]}function Px(t,e,n,r,i,a,o){var s=t-n,c=e-r,u=(o?a:-a)/Cx(s*s+c*c),l=u*c,h=-u*s,f=t+l,d=e+h,p=n+l,y=r+h,g=(f+p)/2,m=(d+y)/2,v=p-f,b=y-d,x=v*v+b*b,_=i-a,k=f*y-p*d,w=(b<0?-1:1)*Cx(wx(0,_*_*x-k*k)),E=(k*b-v*w)/x,T=(-k*v-b*w)/x,C=(k*b+v*w)/x,S=(-k*v+b*w)/x,A=E-g,M=T-m,O=C-g,N=S-m;return A*A+M*M>O*O+N*N&&(E=C,T=S),{cx:E,cy:T,x01:-l,y01:-h,x11:E*(i/_-1),y11:T*(i/_-1)}}var jx=function(){var t=Bx,e=Dx,n=bx(0),r=null,i=Lx,a=Ix,o=Rx,s=null;function c(){var c,u,l=+t.apply(this,arguments),h=+e.apply(this,arguments),f=i.apply(this,arguments)-Ax,d=a.apply(this,arguments)-Ax,p=xx(d-f),y=d>f;if(s||(s=c=$i()),h1e-12)if(p>Mx-1e-12)s.moveTo(h*kx(f),h*Tx(f)),s.arc(0,0,h,f,d,!y),l>1e-12&&(s.moveTo(l*kx(d),l*Tx(d)),s.arc(0,0,l,d,f,y));else{var g,m,v=f,b=d,x=f,_=d,k=p,w=p,E=o.apply(this,arguments)/2,T=E>1e-12&&(r?+r.apply(this,arguments):Cx(l*l+h*h)),C=Ex(xx(h-l)/2,+n.apply(this,arguments)),S=C,A=C;if(T>1e-12){var M=Nx(T/l*Tx(E)),O=Nx(T/h*Tx(E));(k-=2*M)>1e-12?(x+=M*=y?1:-1,_-=M):(k=0,x=_=(f+d)/2),(w-=2*O)>1e-12?(v+=O*=y?1:-1,b-=O):(w=0,v=b=(f+d)/2)}var N=h*kx(v),B=h*Tx(v),D=l*kx(_),L=l*Tx(_);if(C>1e-12){var I,R=h*kx(b),F=h*Tx(b),P=l*kx(x),j=l*Tx(x);if(p1e-12?A>1e-12?(g=Px(P,j,N,B,h,A,y),m=Px(R,F,D,L,h,A,y),s.moveTo(g.cx+g.x01,g.cy+g.y01),A1e-12&&k>1e-12?S>1e-12?(g=Px(D,L,R,F,l,-S,y),m=Px(N,B,P,j,l,-S,y),s.lineTo(g.cx+g.x01,g.cy+g.y01),S=l;--h)s.point(g[h],m[h]);s.lineEnd(),s.areaEnd()}y&&(g[u]=+t(f,u,c),m[u]=+n(f,u,c),s.point(e?+e(f,u,c):g[u],r?+r(f,u,c):m[u]))}if(d)return s=null,d+""||null}function u(){return qx().defined(i).curve(o).context(a)}return c.x=function(n){return arguments.length?(t="function"==typeof n?n:bx(+n),e=null,c):t},c.x0=function(e){return arguments.length?(t="function"==typeof e?e:bx(+e),c):t},c.x1=function(t){return arguments.length?(e=null==t?null:"function"==typeof t?t:bx(+t),c):e},c.y=function(t){return arguments.length?(n="function"==typeof t?t:bx(+t),r=null,c):n},c.y0=function(t){return arguments.length?(n="function"==typeof t?t:bx(+t),c):n},c.y1=function(t){return arguments.length?(r=null==t?null:"function"==typeof t?t:bx(+t),c):r},c.lineX0=c.lineY0=function(){return u().x(t).y(n)},c.lineY1=function(){return u().x(t).y(r)},c.lineX1=function(){return u().x(e).y(n)},c.defined=function(t){return arguments.length?(i="function"==typeof t?t:bx(!!t),c):i},c.curve=function(t){return arguments.length?(o=t,null!=a&&(s=o(a)),c):o},c.context=function(t){return arguments.length?(null==t?a=s=null:s=o(a=t),c):a},c},Hx=function(t,e){return et?1:e>=t?0:NaN},Vx=function(t){return t},Gx=function(){var t=Vx,e=Hx,n=null,r=bx(0),i=bx(Mx),a=bx(0);function o(o){var s,c,u,l,h,f=o.length,d=0,p=new Array(f),y=new Array(f),g=+r.apply(this,arguments),m=Math.min(Mx,Math.max(-Mx,i.apply(this,arguments)-g)),v=Math.min(Math.abs(m)/f,a.apply(this,arguments)),b=v*(m<0?-1:1);for(s=0;s0&&(d+=h);for(null!=e?p.sort((function(t,n){return e(y[t],y[n])})):null!=n&&p.sort((function(t,e){return n(o[t],o[e])})),s=0,u=d?(m-f*b)/d:0;s0?h*u:0)+b,y[c]={data:o[c],index:s,value:h,startAngle:g,endAngle:l,padAngle:v};return y}return o.value=function(e){return arguments.length?(t="function"==typeof e?e:bx(+e),o):t},o.sortValues=function(t){return arguments.length?(e=t,n=null,o):e},o.sort=function(t){return arguments.length?(n=t,e=null,o):n},o.startAngle=function(t){return arguments.length?(r="function"==typeof t?t:bx(+t),o):r},o.endAngle=function(t){return arguments.length?(i="function"==typeof t?t:bx(+t),o):i},o.padAngle=function(t){return arguments.length?(a="function"==typeof t?t:bx(+t),o):a},o},Xx=Kx(zx);function Zx(t){this._curve=t}function Kx(t){function e(e){return new Zx(t(e))}return e._curve=t,e}function Qx(t){var e=t.curve;return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t.curve=function(t){return arguments.length?e(Kx(t)):e()._curve},t}Zx.prototype={areaStart:function(){this._curve.areaStart()},areaEnd:function(){this._curve.areaEnd()},lineStart:function(){this._curve.lineStart()},lineEnd:function(){this._curve.lineEnd()},point:function(t,e){this._curve.point(e*Math.sin(t),e*-Math.cos(t))}};var Jx=function(){return Qx(qx().curve(Xx))},t_=function(){var t=Wx().curve(Xx),e=t.curve,n=t.lineX0,r=t.lineX1,i=t.lineY0,a=t.lineY1;return t.angle=t.x,delete t.x,t.startAngle=t.x0,delete t.x0,t.endAngle=t.x1,delete t.x1,t.radius=t.y,delete t.y,t.innerRadius=t.y0,delete t.y0,t.outerRadius=t.y1,delete t.y1,t.lineStartAngle=function(){return Qx(n())},delete t.lineX0,t.lineEndAngle=function(){return Qx(r())},delete t.lineX1,t.lineInnerRadius=function(){return Qx(i())},delete t.lineY0,t.lineOuterRadius=function(){return Qx(a())},delete t.lineY1,t.curve=function(t){return arguments.length?e(Kx(t)):e()._curve},t},e_=function(t,e){return[(e=+e)*Math.cos(t-=Math.PI/2),e*Math.sin(t)]},n_=Array.prototype.slice;function r_(t){return t.source}function i_(t){return t.target}function a_(t){var e=r_,n=i_,r=Ux,i=$x,a=null;function o(){var o,s=n_.call(arguments),c=e.apply(this,s),u=n.apply(this,s);if(a||(a=o=$i()),t(a,+r.apply(this,(s[0]=c,s)),+i.apply(this,s),+r.apply(this,(s[0]=u,s)),+i.apply(this,s)),o)return a=null,o+""||null}return o.source=function(t){return arguments.length?(e=t,o):e},o.target=function(t){return arguments.length?(n=t,o):n},o.x=function(t){return arguments.length?(r="function"==typeof t?t:bx(+t),o):r},o.y=function(t){return arguments.length?(i="function"==typeof t?t:bx(+t),o):i},o.context=function(t){return arguments.length?(a=null==t?null:t,o):a},o}function o_(t,e,n,r,i){t.moveTo(e,n),t.bezierCurveTo(e=(e+r)/2,n,e,i,r,i)}function s_(t,e,n,r,i){t.moveTo(e,n),t.bezierCurveTo(e,n=(n+i)/2,r,n,r,i)}function c_(t,e,n,r,i){var a=e_(e,n),o=e_(e,n=(n+i)/2),s=e_(r,n),c=e_(r,i);t.moveTo(a[0],a[1]),t.bezierCurveTo(o[0],o[1],s[0],s[1],c[0],c[1])}function u_(){return a_(o_)}function l_(){return a_(s_)}function h_(){var t=a_(c_);return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t}var f_={draw:function(t,e){var n=Math.sqrt(e/Sx);t.moveTo(n,0),t.arc(0,0,n,0,Mx)}},d_={draw:function(t,e){var n=Math.sqrt(e/5)/2;t.moveTo(-3*n,-n),t.lineTo(-n,-n),t.lineTo(-n,-3*n),t.lineTo(n,-3*n),t.lineTo(n,-n),t.lineTo(3*n,-n),t.lineTo(3*n,n),t.lineTo(n,n),t.lineTo(n,3*n),t.lineTo(-n,3*n),t.lineTo(-n,n),t.lineTo(-3*n,n),t.closePath()}},p_=Math.sqrt(1/3),y_=2*p_,g_={draw:function(t,e){var n=Math.sqrt(e/y_),r=n*p_;t.moveTo(0,-n),t.lineTo(r,0),t.lineTo(0,n),t.lineTo(-r,0),t.closePath()}},m_=Math.sin(Sx/10)/Math.sin(7*Sx/10),v_=Math.sin(Mx/10)*m_,b_=-Math.cos(Mx/10)*m_,x_={draw:function(t,e){var n=Math.sqrt(.8908130915292852*e),r=v_*n,i=b_*n;t.moveTo(0,-n),t.lineTo(r,i);for(var a=1;a<5;++a){var o=Mx*a/5,s=Math.cos(o),c=Math.sin(o);t.lineTo(c*n,-s*n),t.lineTo(s*r-c*i,c*r+s*i)}t.closePath()}},__={draw:function(t,e){var n=Math.sqrt(e),r=-n/2;t.rect(r,r,n,n)}},k_=Math.sqrt(3),w_={draw:function(t,e){var n=-Math.sqrt(e/(3*k_));t.moveTo(0,2*n),t.lineTo(-k_*n,-n),t.lineTo(k_*n,-n),t.closePath()}},E_=Math.sqrt(3)/2,T_=1/Math.sqrt(12),C_=3*(T_/2+1),S_={draw:function(t,e){var n=Math.sqrt(e/C_),r=n/2,i=n*T_,a=r,o=n*T_+n,s=-a,c=o;t.moveTo(r,i),t.lineTo(a,o),t.lineTo(s,c),t.lineTo(-.5*r-E_*i,E_*r+-.5*i),t.lineTo(-.5*a-E_*o,E_*a+-.5*o),t.lineTo(-.5*s-E_*c,E_*s+-.5*c),t.lineTo(-.5*r+E_*i,-.5*i-E_*r),t.lineTo(-.5*a+E_*o,-.5*o-E_*a),t.lineTo(-.5*s+E_*c,-.5*c-E_*s),t.closePath()}},A_=[f_,d_,g_,__,x_,w_,S_],M_=function(){var t=bx(f_),e=bx(64),n=null;function r(){var r;if(n||(n=r=$i()),t.apply(this,arguments).draw(n,+e.apply(this,arguments)),r)return n=null,r+""||null}return r.type=function(e){return arguments.length?(t="function"==typeof e?e:bx(e),r):t},r.size=function(t){return arguments.length?(e="function"==typeof t?t:bx(+t),r):e},r.context=function(t){return arguments.length?(n=null==t?null:t,r):n},r},O_=function(){};function N_(t,e,n){t._context.bezierCurveTo((2*t._x0+t._x1)/3,(2*t._y0+t._y1)/3,(t._x0+2*t._x1)/3,(t._y0+2*t._y1)/3,(t._x0+4*t._x1+e)/6,(t._y0+4*t._y1+n)/6)}function B_(t){this._context=t}B_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){switch(this._point){case 3:N_(this,this._x1,this._y1);case 2:this._context.lineTo(this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2;break;case 2:this._point=3,this._context.lineTo((5*this._x0+this._x1)/6,(5*this._y0+this._y1)/6);default:N_(this,t,e)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=e}};var D_=function(t){return new B_(t)};function L_(t){this._context=t}L_.prototype={areaStart:O_,areaEnd:O_,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._y0=this._y1=this._y2=this._y3=this._y4=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x2,this._y2),this._context.closePath();break;case 2:this._context.moveTo((this._x2+2*this._x3)/3,(this._y2+2*this._y3)/3),this._context.lineTo((this._x3+2*this._x2)/3,(this._y3+2*this._y2)/3),this._context.closePath();break;case 3:this.point(this._x2,this._y2),this.point(this._x3,this._y3),this.point(this._x4,this._y4)}},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._x2=t,this._y2=e;break;case 1:this._point=2,this._x3=t,this._y3=e;break;case 2:this._point=3,this._x4=t,this._y4=e,this._context.moveTo((this._x0+4*this._x1+t)/6,(this._y0+4*this._y1+e)/6);break;default:N_(this,t,e)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=e}};var I_=function(t){return new L_(t)};function R_(t){this._context=t}R_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3;var n=(this._x0+4*this._x1+t)/6,r=(this._y0+4*this._y1+e)/6;this._line?this._context.lineTo(n,r):this._context.moveTo(n,r);break;case 3:this._point=4;default:N_(this,t,e)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=e}};var F_=function(t){return new R_(t)};function P_(t,e){this._basis=new B_(t),this._beta=e}P_.prototype={lineStart:function(){this._x=[],this._y=[],this._basis.lineStart()},lineEnd:function(){var t=this._x,e=this._y,n=t.length-1;if(n>0)for(var r,i=t[0],a=e[0],o=t[n]-i,s=e[n]-a,c=-1;++c<=n;)r=c/n,this._basis.point(this._beta*t[c]+(1-this._beta)*(i+r*o),this._beta*e[c]+(1-this._beta)*(a+r*s));this._x=this._y=null,this._basis.lineEnd()},point:function(t,e){this._x.push(+t),this._y.push(+e)}};var j_=function t(e){function n(t){return 1===e?new B_(t):new P_(t,e)}return n.beta=function(e){return t(+e)},n}(.85);function Y_(t,e,n){t._context.bezierCurveTo(t._x1+t._k*(t._x2-t._x0),t._y1+t._k*(t._y2-t._y0),t._x2+t._k*(t._x1-e),t._y2+t._k*(t._y1-n),t._x2,t._y2)}function z_(t,e){this._context=t,this._k=(1-e)/6}z_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:Y_(this,this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2,this._x1=t,this._y1=e;break;case 2:this._point=3;default:Y_(this,t,e)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var U_=function t(e){function n(t){return new z_(t,e)}return n.tension=function(e){return t(+e)},n}(0);function $_(t,e){this._context=t,this._k=(1-e)/6}$_.prototype={areaStart:O_,areaEnd:O_,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._x3=t,this._y3=e;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=e);break;case 2:this._point=3,this._x5=t,this._y5=e;break;default:Y_(this,t,e)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var q_=function t(e){function n(t){return new $_(t,e)}return n.tension=function(e){return t(+e)},n}(0);function W_(t,e){this._context=t,this._k=(1-e)/6}W_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:Y_(this,t,e)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var H_=function t(e){function n(t){return new W_(t,e)}return n.tension=function(e){return t(+e)},n}(0);function V_(t,e,n){var r=t._x1,i=t._y1,a=t._x2,o=t._y2;if(t._l01_a>1e-12){var s=2*t._l01_2a+3*t._l01_a*t._l12_a+t._l12_2a,c=3*t._l01_a*(t._l01_a+t._l12_a);r=(r*s-t._x0*t._l12_2a+t._x2*t._l01_2a)/c,i=(i*s-t._y0*t._l12_2a+t._y2*t._l01_2a)/c}if(t._l23_a>1e-12){var u=2*t._l23_2a+3*t._l23_a*t._l12_a+t._l12_2a,l=3*t._l23_a*(t._l23_a+t._l12_a);a=(a*u+t._x1*t._l23_2a-e*t._l12_2a)/l,o=(o*u+t._y1*t._l23_2a-n*t._l12_2a)/l}t._context.bezierCurveTo(r,i,a,o,t._x2,t._y2)}function G_(t,e){this._context=t,this._alpha=e}G_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:this.point(this._x2,this._y2)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){if(t=+t,e=+e,this._point){var n=this._x2-t,r=this._y2-e;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2;break;case 2:this._point=3;default:V_(this,t,e)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var X_=function t(e){function n(t){return e?new G_(t,e):new z_(t,0)}return n.alpha=function(e){return t(+e)},n}(.5);function Z_(t,e){this._context=t,this._alpha=e}Z_.prototype={areaStart:O_,areaEnd:O_,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,e){if(t=+t,e=+e,this._point){var n=this._x2-t,r=this._y2-e;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._x3=t,this._y3=e;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=e);break;case 2:this._point=3,this._x5=t,this._y5=e;break;default:V_(this,t,e)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var K_=function t(e){function n(t){return e?new Z_(t,e):new $_(t,0)}return n.alpha=function(e){return t(+e)},n}(.5);function Q_(t,e){this._context=t,this._alpha=e}Q_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){if(t=+t,e=+e,this._point){var n=this._x2-t,r=this._y2-e;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:V_(this,t,e)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var J_=function t(e){function n(t){return e?new Q_(t,e):new W_(t,0)}return n.alpha=function(e){return t(+e)},n}(.5);function tk(t){this._context=t}tk.prototype={areaStart:O_,areaEnd:O_,lineStart:function(){this._point=0},lineEnd:function(){this._point&&this._context.closePath()},point:function(t,e){t=+t,e=+e,this._point?this._context.lineTo(t,e):(this._point=1,this._context.moveTo(t,e))}};var ek=function(t){return new tk(t)};function nk(t){return t<0?-1:1}function rk(t,e,n){var r=t._x1-t._x0,i=e-t._x1,a=(t._y1-t._y0)/(r||i<0&&-0),o=(n-t._y1)/(i||r<0&&-0),s=(a*i+o*r)/(r+i);return(nk(a)+nk(o))*Math.min(Math.abs(a),Math.abs(o),.5*Math.abs(s))||0}function ik(t,e){var n=t._x1-t._x0;return n?(3*(t._y1-t._y0)/n-e)/2:e}function ak(t,e,n){var r=t._x0,i=t._y0,a=t._x1,o=t._y1,s=(a-r)/3;t._context.bezierCurveTo(r+s,i+s*e,a-s,o-s*n,a,o)}function ok(t){this._context=t}function sk(t){this._context=new ck(t)}function ck(t){this._context=t}function uk(t){return new ok(t)}function lk(t){return new sk(t)}function hk(t){this._context=t}function fk(t){var e,n,r=t.length-1,i=new Array(r),a=new Array(r),o=new Array(r);for(i[0]=0,a[0]=2,o[0]=t[0]+2*t[1],e=1;e=0;--e)i[e]=(o[e]-i[e+1])/a[e];for(a[r-1]=(t[r]+i[r-1])/2,e=0;e=0&&(this._t=1-this._t,this._line=1-this._line)},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2;default:if(this._t<=0)this._context.lineTo(this._x,e),this._context.lineTo(t,e);else{var n=this._x*(1-this._t)+t*this._t;this._context.lineTo(n,this._y),this._context.lineTo(n,e)}}this._x=t,this._y=e}};var yk=function(t){return new pk(t,.5)};function gk(t){return new pk(t,0)}function mk(t){return new pk(t,1)}var vk=function(t,e){if((i=t.length)>1)for(var n,r,i,a=1,o=t[e[0]],s=o.length;a=0;)n[e]=e;return n};function xk(t,e){return t[e]}var _k=function(){var t=bx([]),e=bk,n=vk,r=xk;function i(i){var a,o,s=t.apply(this,arguments),c=i.length,u=s.length,l=new Array(u);for(a=0;a0){for(var n,r,i,a=0,o=t[0].length;a0)for(var n,r,i,a,o,s,c=0,u=t[e[0]].length;c0?(r[0]=a,r[1]=a+=i):i<0?(r[1]=o,r[0]=o+=i):(r[0]=0,r[1]=i)},Ek=function(t,e){if((n=t.length)>0){for(var n,r=0,i=t[e[0]],a=i.length;r0&&(r=(n=t[e[0]]).length)>0){for(var n,r,i,a=0,o=1;oa&&(a=e,r=n);return r}var Ak=function(t){var e=t.map(Mk);return bk(t).sort((function(t,n){return e[t]-e[n]}))};function Mk(t){for(var e,n=0,r=-1,i=t.length;++r0)){if(a/=f,f<0){if(a0){if(a>h)return;a>l&&(l=a)}if(a=r-c,f||!(a<0)){if(a/=f,f<0){if(a>h)return;a>l&&(l=a)}else if(f>0){if(a0)){if(a/=d,d<0){if(a0){if(a>h)return;a>l&&(l=a)}if(a=i-u,d||!(a<0)){if(a/=d,d<0){if(a>h)return;a>l&&(l=a)}else if(d>0){if(a0||h<1)||(l>0&&(t[0]=[c+l*f,u+l*d]),h<1&&(t[1]=[c+h*f,u+h*d]),!0)}}}}}function Xk(t,e,n,r,i){var a=t[1];if(a)return!0;var o,s,c=t[0],u=t.left,l=t.right,h=u[0],f=u[1],d=l[0],p=l[1],y=(h+d)/2,g=(f+p)/2;if(p===f){if(y=r)return;if(h>d){if(c){if(c[1]>=i)return}else c=[y,n];a=[y,i]}else{if(c){if(c[1]1)if(h>d){if(c){if(c[1]>=i)return}else c=[(n-s)/o,n];a=[(i-s)/o,i]}else{if(c){if(c[1]=r)return}else c=[e,o*e+s];a=[r,o*r+s]}else{if(c){if(c[0]=-mw)){var d=c*c+u*u,p=l*l+h*h,y=(h*d-u*p)/f,g=(c*p-l*d)/f,m=tw.pop()||new ew;m.arc=t,m.site=i,m.x=y+o,m.y=(m.cy=g+s)+Math.sqrt(y*y+g*g),t.circle=m;for(var v=null,b=pw._;b;)if(m.ygw)s=s.L;else{if(!((i=a-hw(s,o))>gw)){r>-gw?(e=s.P,n=s):i>-gw?(e=s,n=s.N):e=n=s;break}if(!s.R){e=s;break}s=s.R}!function(t){dw[t.index]={site:t,halfedges:[]}}(t);var c=ow(t);if(fw.insert(e,c),e||n){if(e===n)return rw(e),n=ow(e.site),fw.insert(c,n),c.edge=n.edge=Wk(e.site,c.site),nw(e),void nw(n);if(n){rw(e),rw(n);var u=e.site,l=u[0],h=u[1],f=t[0]-l,d=t[1]-h,p=n.site,y=p[0]-l,g=p[1]-h,m=2*(f*g-d*y),v=f*f+d*d,b=y*y+g*g,x=[(g*v-d*b)/m+l,(f*b-y*v)/m+h];Vk(n.edge,u,p,x),c.edge=Wk(u,t,null,x),n.edge=Wk(t,p,null,x),nw(e),nw(n)}else c.edge=Wk(e.site,c.site)}}function lw(t,e){var n=t.site,r=n[0],i=n[1],a=i-e;if(!a)return r;var o=t.P;if(!o)return-1/0;var s=(n=o.site)[0],c=n[1],u=c-e;if(!u)return s;var l=s-r,h=1/a-1/u,f=l/u;return h?(-f+Math.sqrt(f*f-2*h*(l*l/(-2*u)-c+u/2+i-a/2)))/h+r:(r+s)/2}function hw(t,e){var n=t.N;if(n)return lw(n,e);var r=t.site;return r[1]===e?r[0]:1/0}var fw,dw,pw,yw,gw=1e-6,mw=1e-12;function vw(t,e){return e[1]-t[1]||e[0]-t[0]}function bw(t,e){var n,r,i,a=t.sort(vw).pop();for(yw=[],dw=new Array(t.length),fw=new qk,pw=new qk;;)if(i=Jk,a&&(!i||a[1]gw||Math.abs(i[0][1]-i[1][1])>gw)||delete yw[a]}(o,s,c,u),function(t,e,n,r){var i,a,o,s,c,u,l,h,f,d,p,y,g=dw.length,m=!0;for(i=0;igw||Math.abs(y-f)>gw)&&(c.splice(s,0,yw.push(Hk(o,d,Math.abs(p-t)gw?[t,Math.abs(h-t)gw?[Math.abs(f-r)gw?[n,Math.abs(h-n)gw?[Math.abs(f-e)=s)return null;var c=t-i.site[0],u=e-i.site[1],l=c*c+u*u;do{i=a.cells[r=o],o=null,i.halfedges.forEach((function(n){var r=a.edges[n],s=r.left;if(s!==i.site&&s||(s=r.right)){var c=t-s[0],u=e-s[1],h=c*c+u*u;hr?(r+i)/2:Math.min(0,r)||Math.max(0,i),o>a?(a+o)/2:Math.min(0,a)||Math.max(0,o))}var Lw=function(){var t,e,n=Aw,r=Mw,i=Dw,a=Nw,o=Bw,s=[0,1/0],c=[[-1/0,-1/0],[1/0,1/0]],u=250,l=dp,h=ht("start","zoom","end"),f=0;function d(t){t.property("__zoom",Ow).on("wheel.zoom",x).on("mousedown.zoom",_).on("dblclick.zoom",k).filter(o).on("touchstart.zoom",w).on("touchmove.zoom",E).on("touchend.zoom touchcancel.zoom",T).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function p(t,e){return(e=Math.max(s[0],Math.min(s[1],e)))===t.k?t:new ww(e,t.x,t.y)}function y(t,e,n){var r=e[0]-n[0]*t.k,i=e[1]-n[1]*t.k;return r===t.x&&i===t.y?t:new ww(t.k,r,i)}function g(t){return[(+t[0][0]+ +t[1][0])/2,(+t[0][1]+ +t[1][1])/2]}function m(t,e,n){t.on("start.zoom",(function(){v(this,arguments).start()})).on("interrupt.zoom end.zoom",(function(){v(this,arguments).end()})).tween("zoom",(function(){var t=this,i=arguments,a=v(t,i),o=r.apply(t,i),s=null==n?g(o):"function"==typeof n?n.apply(t,i):n,c=Math.max(o[1][0]-o[0][0],o[1][1]-o[0][1]),u=t.__zoom,h="function"==typeof e?e.apply(t,i):e,f=l(u.invert(s).concat(c/u.k),h.invert(s).concat(c/h.k));return function(t){if(1===t)t=h;else{var e=f(t),n=c/e[2];t=new ww(n,s[0]-e[0]*n,s[1]-e[1]*n)}a.zoom(null,t)}}))}function v(t,e,n){return!n&&t.__zooming||new b(t,e)}function b(t,e){this.that=t,this.args=e,this.active=0,this.extent=r.apply(t,e),this.taps=0}function x(){if(n.apply(this,arguments)){var t=v(this,arguments),e=this.__zoom,r=Math.max(s[0],Math.min(s[1],e.k*Math.pow(2,a.apply(this,arguments)))),o=Dn(this);if(t.wheel)t.mouse[0][0]===o[0]&&t.mouse[0][1]===o[1]||(t.mouse[1]=e.invert(t.mouse[0]=o)),clearTimeout(t.wheel);else{if(e.k===r)return;t.mouse=[o,e.invert(o)],sr(this),t.start()}Sw(),t.wheel=setTimeout(u,150),t.zoom("mouse",i(y(p(e,r),t.mouse[0],t.mouse[1]),t.extent,c))}function u(){t.wheel=null,t.end()}}function _(){if(!e&&n.apply(this,arguments)){var t=v(this,arguments,!0),r=we(ue.view).on("mousemove.zoom",u,!0).on("mouseup.zoom",l,!0),a=Dn(this),o=ue.clientX,s=ue.clientY;Ce(ue.view),Cw(),t.mouse=[a,this.__zoom.invert(a)],sr(this),t.start()}function u(){if(Sw(),!t.moved){var e=ue.clientX-o,n=ue.clientY-s;t.moved=e*e+n*n>f}t.zoom("mouse",i(y(t.that.__zoom,t.mouse[0]=Dn(t.that),t.mouse[1]),t.extent,c))}function l(){r.on("mousemove.zoom mouseup.zoom",null),Se(ue.view,t.moved),Sw(),t.end()}}function k(){if(n.apply(this,arguments)){var t=this.__zoom,e=Dn(this),a=t.invert(e),o=t.k*(ue.shiftKey?.5:2),s=i(y(p(t,o),e,a),r.apply(this,arguments),c);Sw(),u>0?we(this).transition().duration(u).call(m,s,e):we(this).call(d.transform,s)}}function w(){if(n.apply(this,arguments)){var e,r,i,a,o=ue.touches,s=o.length,c=v(this,arguments,ue.changedTouches.length===s);for(Cw(),r=0;rh&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:m,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,y.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},M={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),58;case 1:return this.begin("type_directive"),59;case 2:return this.popState(),this.begin("arg_directive"),14;case 3:return this.popState(),this.popState(),61;case 4:return 60;case 5:return 5;case 6:case 7:case 8:case 9:case 10:break;case 11:return this.begin("ID"),16;case 12:return e.yytext=e.yytext.trim(),this.begin("ALIAS"),48;case 13:return this.popState(),this.popState(),this.begin("LINE"),18;case 14:return this.popState(),this.popState(),5;case 15:return this.begin("LINE"),27;case 16:return this.begin("LINE"),29;case 17:return this.begin("LINE"),30;case 18:return this.begin("LINE"),31;case 19:return this.begin("LINE"),36;case 20:return this.begin("LINE"),33;case 21:return this.begin("LINE"),35;case 22:return this.popState(),19;case 23:return 28;case 24:return 43;case 25:return 44;case 26:return 39;case 27:return 37;case 28:return this.begin("ID"),22;case 29:return this.begin("ID"),23;case 30:return 25;case 31:return 7;case 32:return 21;case 33:return 42;case 34:return 5;case 35:return e.yytext=e.yytext.trim(),48;case 36:return 51;case 37:return 52;case 38:return 49;case 39:return 50;case 40:return 53;case 41:return 54;case 42:return 55;case 43:return 56;case 44:return 57;case 45:return 46;case 46:return 47;case 47:return 5;case 48:return"INVALID"}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:((?!\n)\s)+)/i,/^(?:#[^\n]*)/i,/^(?:%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:participant\b)/i,/^(?:[^\->:\n,;]+?(?=((?!\n)\s)+as(?!\n)\s|[#\n;]|$))/i,/^(?:as\b)/i,/^(?:(?:))/i,/^(?:loop\b)/i,/^(?:rect\b)/i,/^(?:opt\b)/i,/^(?:alt\b)/i,/^(?:else\b)/i,/^(?:par\b)/i,/^(?:and\b)/i,/^(?:(?:[:]?(?:no)?wrap)?[^#\n;]*)/i,/^(?:end\b)/i,/^(?:left of\b)/i,/^(?:right of\b)/i,/^(?:over\b)/i,/^(?:note\b)/i,/^(?:activate\b)/i,/^(?:deactivate\b)/i,/^(?:title\b)/i,/^(?:sequenceDiagram\b)/i,/^(?:autonumber\b)/i,/^(?:,)/i,/^(?:;)/i,/^(?:[^\+\->:\n,;]+((?!(-x|--x|-\)|--\)))[\-]*[^\+\->:\n,;]+)*)/i,/^(?:->>)/i,/^(?:-->>)/i,/^(?:->)/i,/^(?:-->)/i,/^(?:-[x])/i,/^(?:--[x])/i,/^(?:-[\)])/i,/^(?:--[\)])/i,/^(?::(?:(?:no)?wrap)?[^#\n;]+)/i,/^(?:\+)/i,/^(?:-)/i,/^(?:$)/i,/^(?:.)/i],conditions:{open_directive:{rules:[1,8],inclusive:!1},type_directive:{rules:[2,3,8],inclusive:!1},arg_directive:{rules:[3,4,8],inclusive:!1},ID:{rules:[7,8,12],inclusive:!1},ALIAS:{rules:[7,8,13,14],inclusive:!1},LINE:{rules:[7,8,22],inclusive:!1},INITIAL:{rules:[0,5,6,8,9,10,11,15,16,17,18,19,20,21,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48],inclusive:!0}}};function O(){this.yy={}}return A.lexer=M,O.prototype=A,A.Parser=O,new O}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(17).readFileSync(n(18).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(12),n(7)(t))},function(t,e,n){var r=n(203);t.exports={Graph:r.Graph,json:n(306),alg:n(307),version:r.version}},function(t,e,n){var r;try{r={cloneDeep:n(318),constant:n(88),defaults:n(154),each:n(89),filter:n(128),find:n(319),flatten:n(156),forEach:n(126),forIn:n(326),has:n(94),isUndefined:n(139),last:n(327),map:n(140),mapValues:n(328),max:n(329),merge:n(331),min:n(336),minBy:n(337),now:n(338),pick:n(161),range:n(162),reduce:n(142),sortBy:n(345),uniqueId:n(163),values:n(147),zipObject:n(350)}}catch(t){}r||(r=window._),t.exports=r},function(t,e){var n=Array.isArray;t.exports=n},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(99),i=n(178),a=n(179),o=n(180),s={format:{keyword:i.default,hex:r.default,rgb:a.default,rgba:a.default,hsl:o.default,hsla:o.default},parse:function(t){if("string"!=typeof t)return t;var e=r.default.parse(t)||a.default.parse(t)||o.default.parse(t)||i.default.parse(t);if(e)return e;throw new Error('Unsupported color format: "'+t+'"')},stringify:function(t){return!t.changed&&t.color?t.color:t.type.is(2)||void 0===t.data.r?o.default.stringify(t):t.a<1||!Number.isInteger(t.r)||!Number.isInteger(t.g)||!Number.isInteger(t.b)?a.default.stringify(t):r.default.stringify(t)}};e.default=s},function(t,e){t.exports=function(t){return t.webpackPolyfill||(t.deprecate=function(){},t.paths=[],t.children||(t.children=[]),Object.defineProperty(t,"loaded",{enumerable:!0,get:function(){return t.l}}),Object.defineProperty(t,"id",{enumerable:!0,get:function(){return t.i}}),t.webpackPolyfill=1),t}},function(t,e,n){ /** * @license * Copyright (c) 2012-2013 Chris Pettitt @@ -28,22 +21,14 @@ var r=n(813),i=n(814),o=n(408);function a(){return s.TYPED_ARRAY_SUPPORT?2147483 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ -t.exports={graphlib:n(566),dagre:n(386),intersect:n(736),render:n(738),util:n(31),version:n(750)}},function(t,e,n){"use strict";var r=n(11);n.d(e,"a",(function(){return r.e})),n.d(e,"h",(function(){return r.g})),n.d(e,"e",(function(){return r.f}));var i=n(121);n.d(e,"f",(function(){return i.a})),n.d(e,"d",(function(){return i.c})),n.d(e,"g",(function(){return i.d})),n.d(e,"c",(function(){return i.b}));var o=n(211);n.d(e,"b",(function(){return o.a}))},function(t,e,n){"use strict";var r=n(7),i=n(37).Graph;function o(t,e,n,i){var o;do{o=r.uniqueId(i)}while(t.hasNode(o));return n.dummy=e,t.setNode(o,n),o}function a(t){return r.max(r.map(t.nodes(),(function(e){var n=t.node(e).rank;if(!r.isUndefined(n))return n})))}t.exports={addDummyNode:o,simplify:function(t){var e=(new i).setGraph(t.graph());return r.forEach(t.nodes(),(function(n){e.setNode(n,t.node(n))})),r.forEach(t.edges(),(function(n){var r=e.edge(n.v,n.w)||{weight:0,minlen:1},i=t.edge(n);e.setEdge(n.v,n.w,{weight:r.weight+i.weight,minlen:Math.max(r.minlen,i.minlen)})})),e},asNonCompoundGraph:function(t){var e=new i({multigraph:t.isMultigraph()}).setGraph(t.graph());return r.forEach(t.nodes(),(function(n){t.children(n).length||e.setNode(n,t.node(n))})),r.forEach(t.edges(),(function(n){e.setEdge(n,t.edge(n))})),e},successorWeights:function(t){var e=r.map(t.nodes(),(function(e){var n={};return r.forEach(t.outEdges(e),(function(e){n[e.w]=(n[e.w]||0)+t.edge(e).weight})),n}));return r.zipObject(t.nodes(),e)},predecessorWeights:function(t){var e=r.map(t.nodes(),(function(e){var n={};return r.forEach(t.inEdges(e),(function(e){n[e.v]=(n[e.v]||0)+t.edge(e).weight})),n}));return r.zipObject(t.nodes(),e)},intersectRect:function(t,e){var n,r,i=t.x,o=t.y,a=e.x-i,u=e.y-o,s=t.width/2,c=t.height/2;if(!a&&!u)throw new Error("Not possible to find intersection inside of the rectangle");Math.abs(u)*s>Math.abs(a)*c?(u<0&&(c=-c),n=c*a/u,r=c):(a<0&&(s=-s),n=s,r=s*u/a);return{x:i+n,y:o+r}},buildLayerMatrix:function(t){var e=r.map(r.range(a(t)+1),(function(){return[]}));return r.forEach(t.nodes(),(function(n){var i=t.node(n),o=i.rank;r.isUndefined(o)||(e[o][i.order]=n)})),e},normalizeRanks:function(t){var e=r.min(r.map(t.nodes(),(function(e){return t.node(e).rank})));r.forEach(t.nodes(),(function(n){var i=t.node(n);r.has(i,"rank")&&(i.rank-=e)}))},removeEmptyRanks:function(t){var e=r.min(r.map(t.nodes(),(function(e){return t.node(e).rank}))),n=[];r.forEach(t.nodes(),(function(r){var i=t.node(r).rank-e;n[i]||(n[i]=[]),n[i].push(r)}));var i=0,o=t.graph().nodeRankFactor;r.forEach(n,(function(e,n){r.isUndefined(e)&&n%o!=0?--i:i&&r.forEach(e,(function(e){t.node(e).rank+=i}))}))},addBorderNode:function(t,e,n,r){var i={width:0,height:0};arguments.length>=4&&(i.rank=n,i.order=r);return o(t,"border",i,e)},maxRank:a,partition:function(t,e){var n={lhs:[],rhs:[]};return r.forEach(t,(function(t){e(t)?n.lhs.push(t):n.rhs.push(t)})),n},time:function(t,e){var n=r.now();try{return e()}finally{console.log(t+" time: "+(r.now()-n)+"ms")}},notime:function(t,e){return e()}}},function(t,e,n){"use strict";var r=n(8),i=n(38).Graph;function o(t,e,n,i){var o;do{o=r.uniqueId(i)}while(t.hasNode(o));return n.dummy=e,t.setNode(o,n),o}function a(t){return r.max(r.map(t.nodes(),(function(e){var n=t.node(e).rank;if(!r.isUndefined(n))return n})))}t.exports={addDummyNode:o,simplify:function(t){var e=(new i).setGraph(t.graph());return r.forEach(t.nodes(),(function(n){e.setNode(n,t.node(n))})),r.forEach(t.edges(),(function(n){var r=e.edge(n.v,n.w)||{weight:0,minlen:1},i=t.edge(n);e.setEdge(n.v,n.w,{weight:r.weight+i.weight,minlen:Math.max(r.minlen,i.minlen)})})),e},asNonCompoundGraph:function(t){var e=new i({multigraph:t.isMultigraph()}).setGraph(t.graph());return r.forEach(t.nodes(),(function(n){t.children(n).length||e.setNode(n,t.node(n))})),r.forEach(t.edges(),(function(n){e.setEdge(n,t.edge(n))})),e},successorWeights:function(t){var e=r.map(t.nodes(),(function(e){var n={};return r.forEach(t.outEdges(e),(function(e){n[e.w]=(n[e.w]||0)+t.edge(e).weight})),n}));return r.zipObject(t.nodes(),e)},predecessorWeights:function(t){var e=r.map(t.nodes(),(function(e){var n={};return r.forEach(t.inEdges(e),(function(e){n[e.v]=(n[e.v]||0)+t.edge(e).weight})),n}));return r.zipObject(t.nodes(),e)},intersectRect:function(t,e){var n,r,i=t.x,o=t.y,a=e.x-i,u=e.y-o,s=t.width/2,c=t.height/2;if(!a&&!u)throw new Error("Not possible to find intersection inside of the rectangle");Math.abs(u)*s>Math.abs(a)*c?(u<0&&(c=-c),n=c*a/u,r=c):(a<0&&(s=-s),n=s,r=s*u/a);return{x:i+n,y:o+r}},buildLayerMatrix:function(t){var e=r.map(r.range(a(t)+1),(function(){return[]}));return r.forEach(t.nodes(),(function(n){var i=t.node(n),o=i.rank;r.isUndefined(o)||(e[o][i.order]=n)})),e},normalizeRanks:function(t){var e=r.min(r.map(t.nodes(),(function(e){return t.node(e).rank})));r.forEach(t.nodes(),(function(n){var i=t.node(n);r.has(i,"rank")&&(i.rank-=e)}))},removeEmptyRanks:function(t){var e=r.min(r.map(t.nodes(),(function(e){return t.node(e).rank}))),n=[];r.forEach(t.nodes(),(function(r){var i=t.node(r).rank-e;n[i]||(n[i]=[]),n[i].push(r)}));var i=0,o=t.graph().nodeRankFactor;r.forEach(n,(function(e,n){r.isUndefined(e)&&n%o!=0?--i:i&&r.forEach(e,(function(e){t.node(e).rank+=i}))}))},addBorderNode:function(t,e,n,r){var i={width:0,height:0};arguments.length>=4&&(i.rank=n,i.order=r);return o(t,"border",i,e)},maxRank:a,partition:function(t,e){var n={lhs:[],rhs:[]};return r.forEach(t,(function(t){e(t)?n.lhs.push(t):n.rhs.push(t)})),n},time:function(t,e){var n=r.now();try{return e()}finally{console.log(t+" time: "+(r.now()-n)+"ms")}},notime:function(t,e){return e()}}},function(t,e,n){"use strict";e.a=function(t,e){return t=+t,e=+e,function(n){return t*(1-n)+e*n}}},function(t,e,n){"use strict";function r(t,e){var n=Object.create(t.prototype);for(var r in e)n[r]=e[r];return n}n.d(e,"b",(function(){return r})),e.a=function(t,e,n){t.prototype=e.prototype=n,n.constructor=t}},function(t,e){var n;n=function(){return this}();try{n=n||new Function("return this")()}catch(t){"object"==typeof window&&(n=window)}t.exports=n},function(t,e,n){"use strict";var r=n(108),i=n(207),o=n(104),a=n(201),u=n(206),s=function(t){var e=t.length;return function(n){return t[Math.max(0,Math.min(e-1,Math.floor(n*e)))]}},c=n(6),f=function(t,e){var n=Object(c.c)(+t,+e);return function(t){var e=n(t);return e-360*Math.floor(e/360)}},l=n(23),h=n(69),d=n(208),p=n(290),g=n(202),y=n(281),b=n(291),v=n(82),m=n(11);function _(t){return function(e,n){var r=t((e=Object(m.f)(e)).h,(n=Object(m.f)(n)).h),i=Object(c.a)(e.s,n.s),o=Object(c.a)(e.l,n.l),a=Object(c.a)(e.opacity,n.opacity);return function(t){return e.h=r(t),e.s=i(t),e.l=o(t),e.opacity=a(t),e+""}}}var w=_(c.c),x=_(c.a),k=n(121);function E(t,e){var n=Object(c.a)((t=Object(k.a)(t)).l,(e=Object(k.a)(e)).l),r=Object(c.a)(t.a,e.a),i=Object(c.a)(t.b,e.b),o=Object(c.a)(t.opacity,e.opacity);return function(e){return t.l=n(e),t.a=r(e),t.b=i(e),t.opacity=o(e),t+""}}function A(t){return function(e,n){var r=t((e=Object(k.c)(e)).h,(n=Object(k.c)(n)).h),i=Object(c.a)(e.c,n.c),o=Object(c.a)(e.l,n.l),a=Object(c.a)(e.opacity,n.opacity);return function(t){return e.h=r(t),e.c=i(t),e.l=o(t),e.opacity=a(t),e+""}}}var S=A(c.c),M=A(c.a),T=n(217);function O(t,e){for(var n=0,r=e.length-1,i=e[0],o=new Array(r<0?0:r);n(i>>1)-1?(i>>1)-s:s,o.isubn(u)):u=0,r[a]=u,o.iushrn(1)}return r},r.getJSF=function(t,e){var n=[[],[]];t=t.clone(),e=e.clone();for(var r=0,i=0;t.cmpn(-r)>0||e.cmpn(-i)>0;){var o,a,u,s=t.andln(3)+r&3,c=e.andln(3)+i&3;if(3===s&&(s=-1),3===c&&(c=-1),0==(1&s))o=0;else o=3!==(u=t.andln(7)+r&7)&&5!==u||2!==c?s:-s;if(n[0].push(o),0==(1&c))a=0;else a=3!==(u=e.andln(7)+i&7)&&5!==u||2!==s?c:-c;n[1].push(a),2*r===o+1&&(r=1-r),2*i===a+1&&(i=1-i),t.iushrn(1),e.iushrn(1)}return n},r.cachedProperty=function(t,e,n){var r="_"+e;t.prototype[e]=function(){return void 0!==this[r]?this[r]:this[r]=n.call(this)}},r.parseBytes=function(t){return"string"==typeof t?r.toArray(t,"hex"):t},r.intFromLE=function(t){return new i(t,"hex","le")}},function(t,e,n){var r=n(454);t.exports={Graph:r.Graph,json:n(556),alg:n(557),version:r.version}},function(t,e,n){var r=n(294),i="object"==typeof self&&self&&self.Object===Object&&self,o=r||i||Function("return this")();t.exports=o},function(t,e,n){var r=n(342),i="object"==typeof self&&self&&self.Object===Object&&self,o=r||i||Function("return this")();t.exports=o},function(t,e,n){var r;try{r=n(340)}catch(t){}r||(r=window.graphlib),t.exports=r},function(t,e,n){var r;try{r=n(34)}catch(t){}r||(r=window.graphlib),t.exports=r},function(t,e,n){"use strict";function r(){}function i(t,e){var n=new r;if(t instanceof r)t.each((function(t,e){n.set(e,t)}));else if(Array.isArray(t)){var i,o=-1,a=t.length;if(null==e)for(;++o=r.length)return null!=t&&n.sort(t),null!=e?e(n):n;for(var c,f,l,h=-1,d=n.length,p=r[i++],g=o(),y=u();++hr.length)return n;var a,u=i[o-1];return null!=e&&o>=r.length?a=n.entries():(a=[],n.each((function(e,n){a.push({key:n,values:t(e,o)})}))),null!=u?a.sort((function(t,e){return u(t.key,e.key)})):a}(a(t,0,c,f),0)},key:function(t){return r.push(t),n},sortKeys:function(t){return i[r.length-1]=t,n},sortValues:function(e){return t=e,n},rollup:function(t){return e=t,n}}};function u(){return{}}function s(t,e,n){t[e]=n}function c(){return o()}function f(t,e,n){t.set(e,n)}function l(){}var h=o.prototype;function d(t,e){var n=new l;if(t instanceof l)t.each((function(t){n.add(t)}));else if(t){var r=-1,i=t.length;if(null==e)for(;++r0)throw new Error("too late; already scheduled");return n}function l(t,e){var n=h(t,e);if(n.state>3)throw new Error("too late; already running");return n}function h(t,e){var n=t.__transition;if(!n||!(n=n[e]))throw new Error("transition not found");return n}var d=function(t,e){var n,r,i,o=t.__transition,a=!0;if(o){for(i in e=null==e?null:e+"",o)(n=o[i]).name===e?(r=n.state>2&&n.state<5,n.state=6,n.timer.stop(),n.on.call(r?"interrupt":"cancel",t,t.__data__,n.index,n.group),delete o[i]):a=!1;a&&delete t.__transition}},p=n(281),g=n(105);function y(t,e){var n,r;return function(){var i=l(this,t),o=i.tween;if(o!==n)for(var a=0,u=(r=n=o).length;a=0&&(t=t.slice(0,e)),!t||"start"===t}))}(e)?f:l;return function(){var a=o(this,t),u=a.on;u!==r&&(i=(r=u).copy()).on(e,n),a.on=i}}var U=n(106),z=n(204),Y=r.b.prototype.constructor,V=n(205);function G(t){return function(){this.style.removeProperty(t)}}function H(t,e,n){return function(r){this.style.setProperty(t,e.call(this,r),n)}}function W(t,e,n){var r,i;function o(){var o=e.apply(this,arguments);return o!==i&&(r=(i=o)&&H(t,o,n)),r}return o._value=e,o}function $(t){return function(e){this.textContent=t.call(this,e)}}function K(t){var e,n;function r(){var r=t.apply(this,arguments);return r!==n&&(e=(n=r)&&$(r)),e}return r._value=t,r}var Z=0;function X(t,e,n,r){this._groups=t,this._parents=e,this._name=n,this._id=r}function J(t){return Object(r.b)().transition(t)}function Q(){return++Z}var tt=r.b.prototype;X.prototype=J.prototype={constructor:X,select:function(t){var e=this._name,n=this._id;"function"!=typeof t&&(t=Object(U.a)(t));for(var r=this._groups,i=r.length,o=new Array(i),a=0;a1&&n.name===e)return new X([[t]],rt,e,+r);return null};n.d(e,"c",(function(){return J})),n.d(e,"a",(function(){return it})),n.d(e,"b",(function(){return d}))},function(t,e,n){"use strict";n.d(e,"b",(function(){return i}));var r=n(47);function i(){r.c.stopImmediatePropagation()}e.a=function(){r.c.preventDefault(),r.c.stopImmediatePropagation()}},function(t,e,n){"use strict";var r=n(286);n.d(e,"a",(function(){return r.a}))},function(t,e){t.exports=function(t){return null!=t&&"object"==typeof t}},function(t,e){t.exports=function(t){return null!=t&&"object"==typeof t}},function(t,e,n){"use strict";var r=n(32),i=n(2);function o(t,e){return 55296==(64512&t.charCodeAt(e))&&(!(e<0||e+1>=t.length)&&56320==(64512&t.charCodeAt(e+1)))}function a(t){return(t>>>24|t>>>8&65280|t<<8&16711680|(255&t)<<24)>>>0}function u(t){return 1===t.length?"0"+t:t}function s(t){return 7===t.length?"0"+t:6===t.length?"00"+t:5===t.length?"000"+t:4===t.length?"0000"+t:3===t.length?"00000"+t:2===t.length?"000000"+t:1===t.length?"0000000"+t:t}e.inherits=i,e.toArray=function(t,e){if(Array.isArray(t))return t.slice();if(!t)return[];var n=[];if("string"==typeof t)if(e){if("hex"===e)for((t=t.replace(/[^a-z0-9]+/gi,"")).length%2!=0&&(t="0"+t),i=0;i>6|192,n[r++]=63&a|128):o(t,i)?(a=65536+((1023&a)<<10)+(1023&t.charCodeAt(++i)),n[r++]=a>>18|240,n[r++]=a>>12&63|128,n[r++]=a>>6&63|128,n[r++]=63&a|128):(n[r++]=a>>12|224,n[r++]=a>>6&63|128,n[r++]=63&a|128)}else for(i=0;i>>0}return a},e.split32=function(t,e){for(var n=new Array(4*t.length),r=0,i=0;r>>24,n[i+1]=o>>>16&255,n[i+2]=o>>>8&255,n[i+3]=255&o):(n[i+3]=o>>>24,n[i+2]=o>>>16&255,n[i+1]=o>>>8&255,n[i]=255&o)}return n},e.rotr32=function(t,e){return t>>>e|t<<32-e},e.rotl32=function(t,e){return t<>>32-e},e.sum32=function(t,e){return t+e>>>0},e.sum32_3=function(t,e,n){return t+e+n>>>0},e.sum32_4=function(t,e,n,r){return t+e+n+r>>>0},e.sum32_5=function(t,e,n,r,i){return t+e+n+r+i>>>0},e.sum64=function(t,e,n,r){var i=t[e],o=r+t[e+1]>>>0,a=(o>>0,t[e+1]=o},e.sum64_hi=function(t,e,n,r){return(e+r>>>0>>0},e.sum64_lo=function(t,e,n,r){return e+r>>>0},e.sum64_4_hi=function(t,e,n,r,i,o,a,u){var s=0,c=e;return s+=(c=c+r>>>0)>>0)>>0)>>0},e.sum64_4_lo=function(t,e,n,r,i,o,a,u){return e+r+o+u>>>0},e.sum64_5_hi=function(t,e,n,r,i,o,a,u,s,c){var f=0,l=e;return f+=(l=l+r>>>0)>>0)>>0)>>0)>>0},e.sum64_5_lo=function(t,e,n,r,i,o,a,u,s,c){return e+r+o+u+c>>>0},e.rotr64_hi=function(t,e,n){return(e<<32-n|t>>>n)>>>0},e.rotr64_lo=function(t,e,n){return(t<<32-n|e>>>n)>>>0},e.shr64_hi=function(t,e,n){return t>>>n},e.shr64_lo=function(t,e,n){return(t<<32-n|e>>>n)>>>0}},function(t,e,n){"use strict";n.d(e,"b",(function(){return i})),n.d(e,"c",(function(){return o})),n.d(e,"d",(function(){return a})),n.d(e,"e",(function(){return u})),n.d(e,"a",(function(){return c}));var r,i,o,a,u,s=n(209);function c(t){return r=Object(s.a)(t),i=r.format,o=r.parse,a=r.utcFormat,u=r.utcParse,r}c({dateTime:"%x, %X",date:"%-m/%-d/%Y",time:"%-I:%M:%S %p",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]})},function(t,e,n){"use strict";n.d(e,"c",(function(){return i})),n.d(e,"a",(function(){return f}));var r={},i=null;"undefined"!=typeof document&&("onmouseenter"in document.documentElement||(r={mouseenter:"mouseover",mouseleave:"mouseout"}));function o(t,e,n){return t=a(t,e,n),function(e){var n=e.relatedTarget;n&&(n===this||8&n.compareDocumentPosition(this))||t.call(this,e)}}function a(t,e,n){return function(r){var o=i;i=r;try{t.call(this,this.__data__,e,n)}finally{i=o}}}function u(t){return t.trim().split(/^|\s+/).map((function(t){var e="",n=t.indexOf(".");return n>=0&&(e=t.slice(n+1),t=t.slice(0,n)),{type:t,name:e}}))}function s(t){return function(){var e=this.__on;if(e){for(var n,r=0,i=-1,o=e.length;re?1:t>=e?0:NaN}var l=n(105);function h(t){return function(){this.removeAttribute(t)}}function d(t){return function(){this.removeAttributeNS(t.space,t.local)}}function p(t,e){return function(){this.setAttribute(t,e)}}function g(t,e){return function(){this.setAttributeNS(t.space,t.local,e)}}function y(t,e){return function(){var n=e.apply(this,arguments);null==n?this.removeAttribute(t):this.setAttribute(t,n)}}function b(t,e){return function(){var n=e.apply(this,arguments);null==n?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,n)}}var v=n(205);function m(t){return function(){delete this[t]}}function _(t,e){return function(){this[t]=e}}function w(t,e){return function(){var n=e.apply(this,arguments);null==n?delete this[t]:this[t]=n}}function x(t){return t.trim().split(/^|\s+/)}function k(t){return t.classList||new E(t)}function E(t){this._node=t,this._names=x(t.getAttribute("class")||"")}function A(t,e){for(var n=k(t),r=-1,i=e.length;++r=0&&(this._names.splice(e,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};function D(){this.textContent=""}function C(t){return function(){this.textContent=t}}function N(t){return function(){var e=t.apply(this,arguments);this.textContent=null==e?"":e}}function I(){this.innerHTML=""}function R(t){return function(){this.innerHTML=t}}function j(t){return function(){var e=t.apply(this,arguments);this.innerHTML=null==e?"":e}}function L(){this.nextSibling&&this.parentNode.appendChild(this)}function B(){this.previousSibling&&this.parentNode.insertBefore(this,this.parentNode.firstChild)}var P=n(66);function F(){return null}function q(){var t=this.parentNode;t&&t.removeChild(this)}function U(){var t=this.cloneNode(!1),e=this.parentNode;return e?e.insertBefore(t,this.nextSibling):t}function z(){var t=this.cloneNode(!0),e=this.parentNode;return e?e.insertBefore(t,this.nextSibling):t}var Y=n(47),V=n(107);function G(t,e,n){var r=Object(V.a)(t),i=r.CustomEvent;"function"==typeof i?i=new i(e,n):(i=r.document.createEvent("Event"),n?(i.initEvent(e,n.bubbles,n.cancelable),i.detail=n.detail):i.initEvent(e,!1,!1)),t.dispatchEvent(i)}function H(t,e){return function(){return G(this,t,e)}}function W(t,e){return function(){return G(this,t,e.apply(this,arguments))}}n.d(e,"c",(function(){return $})),n.d(e,"a",(function(){return K}));var $=[null];function K(t,e){this._groups=t,this._parents=e}function Z(){return new K([[document.documentElement]],$)}K.prototype=Z.prototype={constructor:K,select:function(t){"function"!=typeof t&&(t=Object(r.a)(t));for(var e=this._groups,n=e.length,i=new Array(n),o=0;o=k&&(k=x+1);!(w=m[k])&&++k=0;)(r=i[o])&&(a&&4^r.compareDocumentPosition(a)&&a.parentNode.insertBefore(r,a),a=r);return this},sort:function(t){function e(e,n){return e&&n?t(e.__data__,n.__data__):!e-!n}t||(t=f);for(var n=this._groups,r=n.length,i=new Array(r),o=0;o1?this.each((null==e?m:"function"==typeof e?w:_)(t,e)):this.node()[t]},classed:function(t,e){var n=x(t+"");if(arguments.length<2){for(var r=k(this.node()),i=-1,o=n.length;++i>>0,r=0;ryt(t)?(o=t+1,u-yt(t)):(o=t,u),{year:o,dayOfYear:a}}function jt(t,e,n){var r,i,o=It(t.year(),e,n),a=Math.floor((t.dayOfYear()-o-1)/7)+1;return a<1?r=a+Lt(i=t.year()-1,e,n):a>Lt(t.year(),e,n)?(r=a-Lt(t.year(),e,n),i=t.year()+1):(i=t.year(),r=a),{week:r,year:i}}function Lt(t,e,n){var r=It(t,e,n),i=It(t+1,e,n);return(yt(t)-r+i)/7}function Bt(t,e){return t.slice(e,7).concat(t.slice(0,e))}V("w",["ww",2],"wo","week"),V("W",["WW",2],"Wo","isoWeek"),R("week","w"),R("isoWeek","W"),P("week",5),P("isoWeek",5),ft("w",J),ft("ww",J,$),ft("W",J),ft("WW",J,$),gt(["w","ww","W","WW"],(function(t,e,n,r){e[r.substr(0,1)]=k(t)})),V("d",0,"do","day"),V("dd",0,0,(function(t){return this.localeData().weekdaysMin(this,t)})),V("ddd",0,0,(function(t){return this.localeData().weekdaysShort(this,t)})),V("dddd",0,0,(function(t){return this.localeData().weekdays(this,t)})),V("e",0,0,"weekday"),V("E",0,0,"isoWeekday"),R("day","d"),R("weekday","e"),R("isoWeekday","E"),P("day",11),P("weekday",11),P("isoWeekday",11),ft("d",J),ft("e",J),ft("E",J),ft("dd",(function(t,e){return e.weekdaysMinRegex(t)})),ft("ddd",(function(t,e){return e.weekdaysShortRegex(t)})),ft("dddd",(function(t,e){return e.weekdaysRegex(t)})),gt(["dd","ddd","dddd"],(function(t,e,n,r){var i=n._locale.weekdaysParse(t,r,n._strict);null!=i?e.d=i:p(n).invalidWeekday=t})),gt(["d","e","E"],(function(t,e,n,r){e[r]=k(t)}));var Pt="Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),Ft="Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),qt="Su_Mo_Tu_We_Th_Fr_Sa".split("_"),Ut=st,zt=st,Yt=st;function Vt(){function t(t,e){return e.length-t.length}var e,n,r,i,o,a=[],u=[],s=[],c=[];for(e=0;e<7;e++)n=d([2e3,1]).day(e),r=this.weekdaysMin(n,""),i=this.weekdaysShort(n,""),o=this.weekdays(n,""),a.push(r),u.push(i),s.push(o),c.push(r),c.push(i),c.push(o);for(a.sort(t),u.sort(t),s.sort(t),c.sort(t),e=0;e<7;e++)u[e]=ht(u[e]),s[e]=ht(s[e]),c[e]=ht(c[e]);this._weekdaysRegex=new RegExp("^("+c.join("|")+")","i"),this._weekdaysShortRegex=this._weekdaysRegex,this._weekdaysMinRegex=this._weekdaysRegex,this._weekdaysStrictRegex=new RegExp("^("+s.join("|")+")","i"),this._weekdaysShortStrictRegex=new RegExp("^("+u.join("|")+")","i"),this._weekdaysMinStrictRegex=new RegExp("^("+a.join("|")+")","i")}function Gt(){return this.hours()%12||12}function Ht(t,e){V(t,0,0,(function(){return this.localeData().meridiem(this.hours(),this.minutes(),e)}))}function Wt(t,e){return e._meridiemParse}V("H",["HH",2],0,"hour"),V("h",["hh",2],0,Gt),V("k",["kk",2],0,(function(){return this.hours()||24})),V("hmm",0,0,(function(){return""+Gt.apply(this)+F(this.minutes(),2)})),V("hmmss",0,0,(function(){return""+Gt.apply(this)+F(this.minutes(),2)+F(this.seconds(),2)})),V("Hmm",0,0,(function(){return""+this.hours()+F(this.minutes(),2)})),V("Hmmss",0,0,(function(){return""+this.hours()+F(this.minutes(),2)+F(this.seconds(),2)})),Ht("a",!0),Ht("A",!1),R("hour","h"),P("hour",13),ft("a",Wt),ft("A",Wt),ft("H",J),ft("h",J),ft("k",J),ft("HH",J,$),ft("hh",J,$),ft("kk",J,$),ft("hmm",Q),ft("hmmss",tt),ft("Hmm",Q),ft("Hmmss",tt),pt(["H","HH"],3),pt(["k","kk"],(function(t,e,n){var r=k(t);e[3]=24===r?0:r})),pt(["a","A"],(function(t,e,n){n._isPm=n._locale.isPM(t),n._meridiem=t})),pt(["h","hh"],(function(t,e,n){e[3]=k(t),p(n).bigHour=!0})),pt("hmm",(function(t,e,n){var r=t.length-2;e[3]=k(t.substr(0,r)),e[4]=k(t.substr(r)),p(n).bigHour=!0})),pt("hmmss",(function(t,e,n){var r=t.length-4,i=t.length-2;e[3]=k(t.substr(0,r)),e[4]=k(t.substr(r,2)),e[5]=k(t.substr(i)),p(n).bigHour=!0})),pt("Hmm",(function(t,e,n){var r=t.length-2;e[3]=k(t.substr(0,r)),e[4]=k(t.substr(r))})),pt("Hmmss",(function(t,e,n){var r=t.length-4,i=t.length-2;e[3]=k(t.substr(0,r)),e[4]=k(t.substr(r,2)),e[5]=k(t.substr(i))}));var $t,Kt=_t("Hours",!0),Zt={calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},longDateFormat:{LTS:"h:mm:ss A",LT:"h:mm A",L:"MM/DD/YYYY",LL:"MMMM D, YYYY",LLL:"MMMM D, YYYY h:mm A",LLLL:"dddd, MMMM D, YYYY h:mm A"},invalidDate:"Invalid date",ordinal:"%d",dayOfMonthOrdinalParse:/\d{1,2}/,relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",ss:"%d seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},months:At,monthsShort:St,week:{dow:0,doy:6},weekdays:Pt,weekdaysMin:qt,weekdaysShort:Ft,meridiemParse:/[ap]\.?m?\.?/i},Xt={},Jt={};function Qt(t){return t?t.toLowerCase().replace("_","-"):t}function te(e){var r=null;if(!Xt[e]&&void 0!==t&&t&&t.exports)try{r=$t._abbr,n(453)("./"+e),ee(r)}catch(e){}return Xt[e]}function ee(t,e){var n;return t&&((n=u(e)?re(t):ne(t,e))?$t=n:"undefined"!=typeof console&&console.warn&&console.warn("Locale "+t+" not found. Did you forget to load it?")),$t._abbr}function ne(t,e){if(null===e)return delete Xt[t],null;var n,r=Zt;if(e.abbr=t,null!=Xt[t])O("defineLocaleOverride","use moment.updateLocale(localeName, config) to change an existing locale. moment.defineLocale(localeName, config) should only be used for creating a new locale See http://momentjs.com/guides/#/warnings/define-locale/ for more info."),r=Xt[t]._config;else if(null!=e.parentLocale)if(null!=Xt[e.parentLocale])r=Xt[e.parentLocale]._config;else{if(null==(n=te(e.parentLocale)))return Jt[e.parentLocale]||(Jt[e.parentLocale]=[]),Jt[e.parentLocale].push({name:t,config:e}),null;r=n._config}return Xt[t]=new N(C(r,e)),Jt[t]&&Jt[t].forEach((function(t){ne(t.name,t.config)})),ee(t),Xt[t]}function re(t){var e;if(t&&t._locale&&t._locale._abbr&&(t=t._locale._abbr),!t)return $t;if(!o(t)){if(e=te(t))return e;t=[t]}return function(t){for(var e,n,r,i,o=0;o=e&&E(i,n,!0)>=e-1)break;e--}o++}return $t}(t)}function ie(t){var e,n=t._a;return n&&-2===p(t).overflow&&(e=n[1]<0||11kt(n[0],n[1])?2:n[3]<0||24Lt(n,o,a)?p(t)._overflowWeeks=!0:null!=s?p(t)._overflowWeekday=!0:(u=Rt(n,r,i,o,a),t._a[0]=u.year,t._dayOfYear=u.dayOfYear)}(t),null!=t._dayOfYear&&(a=oe(t._a[0],r[0]),(t._dayOfYear>yt(a)||0===t._dayOfYear)&&(p(t)._overflowDayOfYear=!0),n=Nt(a,0,t._dayOfYear),t._a[1]=n.getUTCMonth(),t._a[2]=n.getUTCDate()),e=0;e<3&&null==t._a[e];++e)t._a[e]=u[e]=r[e];for(;e<7;e++)t._a[e]=u[e]=null==t._a[e]?2===e?1:0:t._a[e];24===t._a[3]&&0===t._a[4]&&0===t._a[5]&&0===t._a[6]&&(t._nextDay=!0,t._a[3]=0),t._d=(t._useUTC?Nt:function(t,e,n,r,i,o,a){var u;return t<100&&0<=t?(u=new Date(t+400,e,n,r,i,o,a),isFinite(u.getFullYear())&&u.setFullYear(t)):u=new Date(t,e,n,r,i,o,a),u}).apply(null,u),o=t._useUTC?t._d.getUTCDay():t._d.getDay(),null!=t._tzm&&t._d.setUTCMinutes(t._d.getUTCMinutes()-t._tzm),t._nextDay&&(t._a[3]=24),t._w&&void 0!==t._w.d&&t._w.d!==o&&(p(t).weekdayMismatch=!0)}}var ue=/^\s*((?:[+-]\d{6}|\d{4})-(?:\d\d-\d\d|W\d\d-\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?::\d\d(?::\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?$/,se=/^\s*((?:[+-]\d{6}|\d{4})(?:\d\d\d\d|W\d\d\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?:\d\d(?:\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?$/,ce=/Z|[+-]\d\d(?::?\d\d)?/,fe=[["YYYYYY-MM-DD",/[+-]\d{6}-\d\d-\d\d/],["YYYY-MM-DD",/\d{4}-\d\d-\d\d/],["GGGG-[W]WW-E",/\d{4}-W\d\d-\d/],["GGGG-[W]WW",/\d{4}-W\d\d/,!1],["YYYY-DDD",/\d{4}-\d{3}/],["YYYY-MM",/\d{4}-\d\d/,!1],["YYYYYYMMDD",/[+-]\d{10}/],["YYYYMMDD",/\d{8}/],["GGGG[W]WWE",/\d{4}W\d{3}/],["GGGG[W]WW",/\d{4}W\d{2}/,!1],["YYYYDDD",/\d{7}/]],le=[["HH:mm:ss.SSSS",/\d\d:\d\d:\d\d\.\d+/],["HH:mm:ss,SSSS",/\d\d:\d\d:\d\d,\d+/],["HH:mm:ss",/\d\d:\d\d:\d\d/],["HH:mm",/\d\d:\d\d/],["HHmmss.SSSS",/\d\d\d\d\d\d\.\d+/],["HHmmss,SSSS",/\d\d\d\d\d\d,\d+/],["HHmmss",/\d\d\d\d\d\d/],["HHmm",/\d\d\d\d/],["HH",/\d\d/]],he=/^\/?Date\((\-?\d+)/i;function de(t){var e,n,r,i,o,a,u=t._i,s=ue.exec(u)||se.exec(u);if(s){for(p(t).iso=!0,e=0,n=fe.length;en.valueOf():n.valueOf()this.clone().month(0).utcOffset()||this.utcOffset()>this.clone().month(5).utcOffset()},an.isLocal=function(){return!!this.isValid()&&!this._isUTC},an.isUtcOffset=function(){return!!this.isValid()&&this._isUTC},an.isUtc=Ie,an.isUTC=Ie,an.zoneAbbr=function(){return this._isUTC?"UTC":""},an.zoneName=function(){return this._isUTC?"Coordinated Universal Time":""},an.dates=S("dates accessor is deprecated. Use date instead.",Qe),an.months=S("months accessor is deprecated. Use month instead",Tt),an.years=S("years accessor is deprecated. Use year instead",mt),an.zone=S("moment().zone is deprecated, use moment().utcOffset instead. http://momentjs.com/guides/#/warnings/zone/",(function(t,e){return null!=t?("string"!=typeof t&&(t=-t),this.utcOffset(t,e),this):-this.utcOffset()})),an.isDSTShifted=S("isDSTShifted is deprecated. See http://momentjs.com/guides/#/warnings/dst-shifted/ for more information",(function(){if(!u(this._isDSTShifted))return this._isDSTShifted;var t={};if(v(t,this),(t=ve(t))._a){var e=t._isUTC?d(t._a):_e(t._a);this._isDSTShifted=this.isValid()&&0=0;r--){var i=t[r];"."===i?t.splice(r,1):".."===i?(t.splice(r,1),n++):n&&(t.splice(r,1),n--)}if(e)for(;n--;n)t.unshift("..");return t}function r(t,e){if(t.filter)return t.filter(e);for(var n=[],r=0;r=-1&&!i;o--){var a=o>=0?arguments[o]:t.cwd();if("string"!=typeof a)throw new TypeError("Arguments to path.resolve must be strings");a&&(e=a+"/"+e,i="/"===a.charAt(0))}return(i?"/":"")+(e=n(r(e.split("/"),(function(t){return!!t})),!i).join("/"))||"."},e.normalize=function(t){var o=e.isAbsolute(t),a="/"===i(t,-1);return(t=n(r(t.split("/"),(function(t){return!!t})),!o).join("/"))||o||(t="."),t&&a&&(t+="/"),(o?"/":"")+t},e.isAbsolute=function(t){return"/"===t.charAt(0)},e.join=function(){var t=Array.prototype.slice.call(arguments,0);return e.normalize(r(t,(function(t,e){if("string"!=typeof t)throw new TypeError("Arguments to path.join must be strings");return t})).join("/"))},e.relative=function(t,n){function r(t){for(var e=0;e=0&&""===t[n];n--);return e>n?[]:t.slice(e,n-e+1)}t=e.resolve(t).substr(1),n=e.resolve(n).substr(1);for(var i=r(t.split("/")),o=r(n.split("/")),a=Math.min(i.length,o.length),u=a,s=0;s=1;--o)if(47===(e=t.charCodeAt(o))){if(!i){r=o;break}}else i=!1;return-1===r?n?"/":".":n&&1===r?"/":t.slice(0,r)},e.basename=function(t,e){var n=function(t){"string"!=typeof t&&(t+="");var e,n=0,r=-1,i=!0;for(e=t.length-1;e>=0;--e)if(47===t.charCodeAt(e)){if(!i){n=e+1;break}}else-1===r&&(i=!1,r=e+1);return-1===r?"":t.slice(n,r)}(t);return e&&n.substr(-1*e.length)===e&&(n=n.substr(0,n.length-e.length)),n},e.extname=function(t){"string"!=typeof t&&(t+="");for(var e=-1,n=0,r=-1,i=!0,o=0,a=t.length-1;a>=0;--a){var u=t.charCodeAt(a);if(47!==u)-1===r&&(i=!1,r=a+1),46===u?-1===e?e=a:1!==o&&(o=1):-1!==e&&(o=-1);else if(!i){n=a+1;break}}return-1===e||-1===r||0===o||1===o&&e===r-1&&e===n+1?"":t.slice(e,r)};var i="b"==="ab".substr(-1)?function(t,e,n){return t.substr(e,n)}:function(t,e,n){return e<0&&(e=t.length+e),t.substr(e,n)}}).call(this,n(17))},function(t,e,n){var r=n(93),i=n(245);t.exports=function(t){return null!=t&&i(t.length)&&!r(t)}},function(t,e,n){var r=n(624),i=n(634),o=n(79),a=n(16),u=n(641);t.exports=function(t){return"function"==typeof t?t:null==t?o:"object"==typeof t?a(t)?i(t[0],t[1]):r(t):u(t)}},function(t,e,n){"use strict";n.d(e,"b",(function(){return d})),n.d(e,"a",(function(){return g})),n.d(e,"c",(function(){return y})),n.d(e,"d",(function(){return b}));var r,i,o=0,a=0,u=0,s=0,c=0,f=0,l="object"==typeof performance&&performance.now?performance:Date,h="object"==typeof window&&window.requestAnimationFrame?window.requestAnimationFrame.bind(window):function(t){setTimeout(t,17)};function d(){return c||(h(p),c=l.now()+f)}function p(){c=0}function g(){this._call=this._time=this._next=null}function y(t,e,n){var r=new g;return r.restart(t,e,n),r}function b(){d(),++o;for(var t,e=r;e;)(t=c-e._time)>=0&&e._call.call(null,t),e=e._next;--o}function v(){c=(s=l.now())+f,o=a=0;try{b()}finally{o=0,function(){var t,e,n=r,o=1/0;for(;n;)n._call?(o>n._time&&(o=n._time),t=n,n=n._next):(e=n._next,n._next=null,n=t?t._next=e:r=e);i=t,_(o)}(),c=0}}function m(){var t=l.now(),e=t-s;e>1e3&&(f-=e,s=t)}function _(t){o||(a&&(a=clearTimeout(a)),t-c>24?(t<1/0&&(a=setTimeout(v,t-l.now()-f)),u&&(u=clearInterval(u))):(u||(s=l.now(),u=setInterval(m,1e3)),o=1,h(v)))}g.prototype=y.prototype={constructor:g,restart:function(t,e,n){if("function"!=typeof t)throw new TypeError("callback is not a function");n=(null==n?d():+n)+(null==e?0:+e),this._next||i===this||(i?i._next=this:r=this,i=this),this._call=t,this._time=n,_()},stop:function(){this._call&&(this._call=null,this._time=1/0,_())}}},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[6,8,10,11,12,13,14,15,16,18,20],n=[1,9],r=[1,10],i=[1,11],o=[1,12],a=[1,13],u=[1,14],s=[1,16],c=[1,17],f={trace:function(){},yy:{},symbols_:{error:2,start:3,gantt:4,document:5,EOF:6,line:7,SPACE:8,statement:9,NL:10,dateFormat:11,inclusiveEndDates:12,axisFormat:13,excludes:14,title:15,section:16,clickStatement:17,taskTxt:18,taskData:19,click:20,callbackname:21,callbackargs:22,href:23,clickStatementDebug:24,$accept:0,$end:1},terminals_:{2:"error",4:"gantt",6:"EOF",8:"SPACE",10:"NL",11:"dateFormat",12:"inclusiveEndDates",13:"axisFormat",14:"excludes",15:"title",16:"section",18:"taskTxt",19:"taskData",20:"click",21:"callbackname",22:"callbackargs",23:"href"},productions_:[0,[3,3],[5,0],[5,2],[7,2],[7,1],[7,1],[7,1],[9,1],[9,1],[9,1],[9,1],[9,1],[9,1],[9,1],[9,2],[17,2],[17,3],[17,3],[17,4],[17,3],[17,4],[17,2],[24,2],[24,3],[24,3],[24,4],[24,3],[24,4],[24,2]],performAction:function(t,e,n,r,i,o,a){var u=o.length-1;switch(i){case 1:return o[u-1];case 2:this.$=[];break;case 3:o[u-1].push(o[u]),this.$=o[u-1];break;case 4:case 5:this.$=o[u];break;case 6:case 7:this.$=[];break;case 8:r.setDateFormat(o[u].substr(11)),this.$=o[u].substr(11);break;case 9:r.enableInclusiveEndDates(),this.$=o[u].substr(18);break;case 10:r.setAxisFormat(o[u].substr(11)),this.$=o[u].substr(11);break;case 11:r.setExcludes(o[u].substr(9)),this.$=o[u].substr(9);break;case 12:r.setTitle(o[u].substr(6)),this.$=o[u].substr(6);break;case 13:r.addSection(o[u].substr(8)),this.$=o[u].substr(8);break;case 15:r.addTask(o[u-1],o[u]),this.$="task";break;case 16:this.$=o[u-1],r.setClickEvent(o[u-1],o[u],null);break;case 17:this.$=o[u-2],r.setClickEvent(o[u-2],o[u-1],o[u]);break;case 18:this.$=o[u-2],r.setClickEvent(o[u-2],o[u-1],null),r.setLink(o[u-2],o[u]);break;case 19:this.$=o[u-3],r.setClickEvent(o[u-3],o[u-2],o[u-1]),r.setLink(o[u-3],o[u]);break;case 20:this.$=o[u-2],r.setClickEvent(o[u-2],o[u],null),r.setLink(o[u-2],o[u-1]);break;case 21:this.$=o[u-3],r.setClickEvent(o[u-3],o[u-1],o[u]),r.setLink(o[u-3],o[u-2]);break;case 22:this.$=o[u-1],r.setLink(o[u-1],o[u]);break;case 23:case 29:this.$=o[u-1]+" "+o[u];break;case 24:case 25:case 27:this.$=o[u-2]+" "+o[u-1]+" "+o[u];break;case 26:case 28:this.$=o[u-3]+" "+o[u-2]+" "+o[u-1]+" "+o[u]}},table:[{3:1,4:[1,2]},{1:[3]},t(e,[2,2],{5:3}),{6:[1,4],7:5,8:[1,6],9:7,10:[1,8],11:n,12:r,13:i,14:o,15:a,16:u,17:15,18:s,20:c},t(e,[2,7],{1:[2,1]}),t(e,[2,3]),{9:18,11:n,12:r,13:i,14:o,15:a,16:u,17:15,18:s,20:c},t(e,[2,5]),t(e,[2,6]),t(e,[2,8]),t(e,[2,9]),t(e,[2,10]),t(e,[2,11]),t(e,[2,12]),t(e,[2,13]),t(e,[2,14]),{19:[1,19]},{21:[1,20],23:[1,21]},t(e,[2,4]),t(e,[2,15]),t(e,[2,16],{22:[1,22],23:[1,23]}),t(e,[2,22],{21:[1,24]}),t(e,[2,17],{23:[1,25]}),t(e,[2,18]),t(e,[2,20],{22:[1,26]}),t(e,[2,19]),t(e,[2,21])],defaultActions:{},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],o=[],a=this.table,u="",s=0,c=0,f=0,l=2,h=1,d=o.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var b=p.yylloc;o.push(b);var v=p.options&&p.options.ranges;function m(){var t;return"number"!=typeof(t=r.pop()||p.lex()||h)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var _,w,x,k,E,A,S,M,T,O={};;){if(x=n[n.length-1],this.defaultActions[x]?k=this.defaultActions[x]:(null==_&&(_=m()),k=a[x]&&a[x][_]),void 0===k||!k.length||!k[0]){var D="";for(A in T=[],a[x])this.terminals_[A]&&A>l&&T.push("'"+this.terminals_[A]+"'");D=p.showPosition?"Parse error on line "+(s+1)+":\n"+p.showPosition()+"\nExpecting "+T.join(", ")+", got '"+(this.terminals_[_]||_)+"'":"Parse error on line "+(s+1)+": Unexpected "+(_==h?"end of input":"'"+(this.terminals_[_]||_)+"'"),this.parseError(D,{text:p.match,token:this.terminals_[_]||_,line:p.yylineno,loc:b,expected:T})}if(k[0]instanceof Array&&k.length>1)throw new Error("Parse Error: multiple actions possible at state: "+x+", token: "+_);switch(k[0]){case 1:n.push(_),i.push(p.yytext),o.push(p.yylloc),n.push(k[1]),_=null,w?(_=w,w=null):(c=p.yyleng,u=p.yytext,s=p.yylineno,b=p.yylloc,f>0&&f--);break;case 2:if(S=this.productions_[k[1]][1],O.$=i[i.length-S],O._$={first_line:o[o.length-(S||1)].first_line,last_line:o[o.length-1].last_line,first_column:o[o.length-(S||1)].first_column,last_column:o[o.length-1].last_column},v&&(O._$.range=[o[o.length-(S||1)].range[0],o[o.length-1].range[1]]),void 0!==(E=this.performAction.apply(O,[u,c,s,g.yy,k[1],i,o].concat(d))))return E;S&&(n=n.slice(0,-1*S*2),i=i.slice(0,-1*S),o=o.slice(0,-1*S)),n.push(this.productions_[k[1]][0]),i.push(O.$),o.push(O._$),M=a[n[n.length-2]][n[n.length-1]],n.push(M);break;case 3:return!0}}return!0}},l={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var o in i)this[o]=i[o];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),o=0;oe[0].length)){if(e=n,r=o,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[o])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return 10;case 1:case 2:case 3:break;case 4:this.begin("href");break;case 5:this.popState();break;case 6:return 23;case 7:this.begin("callbackname");break;case 8:this.popState();break;case 9:this.popState(),this.begin("callbackargs");break;case 10:return 21;case 11:this.popState();break;case 12:return 22;case 13:this.begin("click");break;case 14:this.popState();break;case 15:return 20;case 16:return 4;case 17:return 11;case 18:return 12;case 19:return 13;case 20:return 14;case 21:return"date";case 22:return 15;case 23:return 16;case 24:return 18;case 25:return 19;case 26:return":";case 27:return 6;case 28:return"INVALID"}},rules:[/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:href[\s]+["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:call[\s]+)/i,/^(?:\([\s]*\))/i,/^(?:\()/i,/^(?:[^(]*)/i,/^(?:\))/i,/^(?:[^)]*)/i,/^(?:click[\s]+)/i,/^(?:[\s\n])/i,/^(?:[^\s\n]*)/i,/^(?:gantt\b)/i,/^(?:dateFormat\s[^#\n;]+)/i,/^(?:inclusiveEndDates\b)/i,/^(?:axisFormat\s[^#\n;]+)/i,/^(?:excludes\s[^#\n;]+)/i,/^(?:\d\d\d\d-\d\d-\d\d\b)/i,/^(?:title\s[^#\n;]+)/i,/^(?:section\s[^#:\n;]+)/i,/^(?:[^#:\n;]+)/i,/^(?::[^#\n;]+)/i,/^(?::)/i,/^(?:$)/i,/^(?:.)/i],conditions:{callbackargs:{rules:[11,12],inclusive:!1},callbackname:{rules:[8,9,10],inclusive:!1},href:{rules:[5,6],inclusive:!1},click:{rules:[14,15],inclusive:!1},INITIAL:{rules:[0,1,2,3,4,7,13,16,17,18,19,20,21,22,23,24,25,26,27,28],inclusive:!0}}};function h(){this.yy={}}return f.lexer=l,h.prototype=f,f.Parser=h,new h}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(54).readFileSync(n(55).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(17),n(14)(t))},function(t,e,n){"use strict";n.d(e,"f",(function(){return o})),n.d(e,"g",(function(){return a})),n.d(e,"a",(function(){return u})),n.d(e,"b",(function(){return s})),n.d(e,"d",(function(){return c})),n.d(e,"c",(function(){return f})),n.d(e,"e",(function(){return l}));var r=n(110),i=Object(r.a)(","),o=i.parse,a=i.parseRows,u=i.format,s=i.formatBody,c=i.formatRows,f=i.formatRow,l=i.formatValue},function(t,e,n){"use strict";n.d(e,"f",(function(){return o})),n.d(e,"g",(function(){return a})),n.d(e,"a",(function(){return u})),n.d(e,"b",(function(){return s})),n.d(e,"d",(function(){return c})),n.d(e,"c",(function(){return f})),n.d(e,"e",(function(){return l}));var r=n(110),i=Object(r.a)("\t"),o=i.parse,a=i.parseRows,u=i.format,s=i.formatBody,c=i.formatRows,f=i.formatRow,l=i.formatValue},function(t,e,n){var r=n(297),i=n(230),o=n(52);t.exports=function(t){return o(t)?r(t):i(t)}},function(t,e,n){var r=n(345),i=n(247),o=n(56);t.exports=function(t){return o(t)?r(t):i(t)}},function(t,e,n){var r;if(!r)try{r=n(902)}catch(t){}r||(r=window.d3),t.exports=r},function(t,e,n){var r=n(3).Buffer,i=n(265).Transform,o=n(270).StringDecoder;function a(t){i.call(this),this.hashMode="string"==typeof t,this.hashMode?this[t]=this._finalOrDigest:this.final=this._finalOrDigest,this._final&&(this.__final=this._final,this._final=null),this._decoder=null,this._encoding=null}n(2)(a,i),a.prototype.update=function(t,e,n){"string"==typeof t&&(t=r.from(t,e));var i=this._update(t);return this.hashMode?this:(n&&(i=this._toString(i,n)),i)},a.prototype.setAutoPadding=function(){},a.prototype.getAuthTag=function(){throw new Error("trying to get auth tag in unsupported state")},a.prototype.setAuthTag=function(){throw new Error("trying to set auth tag in unsupported state")},a.prototype.setAAD=function(){throw new Error("trying to set aad in unsupported state")},a.prototype._transform=function(t,e,n){var r;try{this.hashMode?this._update(t):this.push(this._update(t))}catch(t){r=t}finally{n(r)}},a.prototype._flush=function(t){var e;try{this.push(this.__final())}catch(t){e=t}t(e)},a.prototype._finalOrDigest=function(t){var e=this.__final()||r.alloc(0);return t&&(e=this._toString(e,t,!0)),e},a.prototype._toString=function(t,e,n){if(this._decoder||(this._decoder=new o(e),this._encoding=e),this._encoding!==e)throw new Error("can't switch encodings");var r=this._decoder.write(t);return n&&(r+=this._decoder.end()),r},t.exports=a},function(t,e,n){"use strict";var r=n(105),i=n(68);function o(t){return function(){var e=this.ownerDocument,n=this.namespaceURI;return n===i.b&&e.documentElement.namespaceURI===i.b?e.createElement(t):e.createElementNS(n,t)}}function a(t){return function(){return this.ownerDocument.createElementNS(t.space,t.local)}}e.a=function(t){var e=Object(r.a)(t);return(e.local?a:o)(e)}},function(t,e,n){"use strict";e.a=function(t,e){var n=t.ownerSVGElement||t;if(n.createSVGPoint){var r=n.createSVGPoint();return r.x=e.clientX,r.y=e.clientY,[(r=r.matrixTransform(t.getScreenCTM().inverse())).x,r.y]}var i=t.getBoundingClientRect();return[e.clientX-i.left-t.clientLeft,e.clientY-i.top-t.clientTop]}},function(t,e,n){"use strict";n.d(e,"b",(function(){return r}));var r="http://www.w3.org/1999/xhtml";e.a={svg:"http://www.w3.org/2000/svg",xhtml:r,xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"}},function(t,e,n){"use strict";function r(t){return ArrayBuffer.isView(t)&&!(t instanceof DataView)}n.d(e,"b",(function(){return r})),e.a=function(t,e){e||(e=[]);var n,r=t?Math.min(e.length,t.length):0,i=e.slice();return function(o){for(n=0;n0?Object(r.a)((function(e){e.setFullYear(Math.floor(e.getFullYear()/t)*t),e.setMonth(0,1),e.setHours(0,0,0,0)}),(function(e,n){e.setFullYear(e.getFullYear()+n*t)})):null},e.a=i;var o=i.range},function(t,e,n){"use strict";n.d(e,"b",(function(){return o}));var r=n(4),i=Object(r.a)((function(t){t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCFullYear(t.getUTCFullYear()+e)}),(function(t,e){return e.getUTCFullYear()-t.getUTCFullYear()}),(function(t){return t.getUTCFullYear()}));i.every=function(t){return isFinite(t=Math.floor(t))&&t>0?Object(r.a)((function(e){e.setUTCFullYear(Math.floor(e.getUTCFullYear()/t)*t),e.setUTCMonth(0,1),e.setUTCHours(0,0,0,0)}),(function(e,n){e.setUTCFullYear(e.getUTCFullYear()+n*t)})):null},e.a=i;var o=i.range},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,4],n=[1,3],r=[1,5],i=[1,8,9,10,11,26,34,61,62,63,64,65,66,76,77,80,81,82,84,85,91,92,93,94,95,96],o=[2,2],a=[1,12],u=[1,13],s=[1,14],c=[1,15],f=[1,22],l=[1,46],h=[1,24],d=[1,25],p=[1,26],g=[1,27],y=[1,28],b=[1,40],v=[1,35],m=[1,37],_=[1,32],w=[1,36],x=[1,39],k=[1,43],E=[1,44],A=[1,45],S=[1,34],M=[1,38],T=[1,41],O=[1,42],D=[1,33],C=[1,51],N=[1,8,9,10,11,26,30,34,61,62,63,64,65,66,76,77,80,81,82,84,85,91,92,93,94,95,96],I=[1,55],R=[1,54],j=[1,56],L=[8,9,11,55,56],B=[8,9,10,11,55,56],P=[8,9,10,11,35,55,56],F=[8,9,10,11,28,34,35,37,39,41,43,45,47,48,50,55,56,66,76,77,80,81,82,84,85,91,92,93,94,95,96],q=[8,9,11,34,55,56,66,76,77,80,81,82,84,85,91,92,93,94,95,96],U=[34,66,76,77,80,81,82,84,85,91,92,93,94,95,96],z=[1,100],Y=[1,121],V=[1,122],G=[1,123],H=[1,124],W=[1,104],$=[1,95],K=[1,96],Z=[1,92],X=[1,116],J=[1,117],Q=[1,118],tt=[1,119],et=[1,120],nt=[1,125],rt=[1,126],it=[1,98],ot=[1,106],at=[1,109],ut=[1,107],st=[1,108],ct=[1,101],ft=[1,114],lt=[1,113],ht=[1,97],dt=[1,94],pt=[1,103],gt=[1,105],yt=[1,110],bt=[1,111],vt=[1,112],mt=[1,115],_t=[8,9,10,11,26,30,34,61,62,63,64,65,66,76,77,80,81,82,84,85,91,92,93,94,95,96],wt=[1,129],xt=[1,133],kt=[1,135],Et=[1,136],At=[8,9,10,11,12,13,26,28,29,30,34,38,40,42,44,46,47,49,51,55,56,57,61,62,63,64,65,66,67,70,76,77,80,81,82,84,85,86,87,91,92,93,94,95,96],St=[8,9,10,11,13,34,66,76,77,80,81,82,84,85,91,92,93,94,95,96],Mt=[10,77],Tt=[1,201],Ot=[1,205],Dt=[1,202],Ct=[1,199],Nt=[1,196],It=[1,197],Rt=[1,198],jt=[1,200],Lt=[1,203],Bt=[1,204],Pt=[1,206],Ft=[8,9,11],qt=[1,222],Ut=[8,9,11,77],zt=[8,9,10,11,61,73,76,77,80,81,82,83,84,85,86],Yt={trace:function(){},yy:{},symbols_:{error:2,mermaidDoc:3,graphConfig:4,document:5,line:6,statement:7,SEMI:8,NEWLINE:9,SPACE:10,EOF:11,GRAPH:12,DIR:13,FirstStmtSeperator:14,ending:15,endToken:16,spaceList:17,spaceListNewline:18,verticeStatement:19,separator:20,styleStatement:21,linkStyleStatement:22,classDefStatement:23,classStatement:24,clickStatement:25,subgraph:26,text:27,SQS:28,SQE:29,end:30,link:31,node:32,vertex:33,AMP:34,STYLE_SEPARATOR:35,idString:36,PS:37,PE:38,"(-":39,"-)":40,STADIUMSTART:41,STADIUMEND:42,CYLINDERSTART:43,CYLINDEREND:44,DIAMOND_START:45,DIAMOND_STOP:46,TAGEND:47,TRAPSTART:48,TRAPEND:49,INVTRAPSTART:50,INVTRAPEND:51,linkStatement:52,arrowText:53,TESTSTR:54,START_LINK:55,LINK:56,PIPE:57,textToken:58,STR:59,keywords:60,STYLE:61,LINKSTYLE:62,CLASSDEF:63,CLASS:64,CLICK:65,DOWN:66,UP:67,textNoTags:68,textNoTagsToken:69,DEFAULT:70,stylesOpt:71,alphaNum:72,HEX:73,numList:74,INTERPOLATE:75,NUM:76,COMMA:77,style:78,styleComponent:79,ALPHA:80,COLON:81,MINUS:82,UNIT:83,BRKT:84,DOT:85,PCT:86,TAGSTART:87,alphaNumToken:88,idStringToken:89,alphaNumStatement:90,PUNCTUATION:91,UNICODE_TEXT:92,PLUS:93,EQUALS:94,MULT:95,UNDERSCORE:96,graphCodeTokens:97,ARROW_CROSS:98,ARROW_POINT:99,ARROW_CIRCLE:100,ARROW_OPEN:101,QUOTE:102,$accept:0,$end:1},terminals_:{2:"error",8:"SEMI",9:"NEWLINE",10:"SPACE",11:"EOF",12:"GRAPH",13:"DIR",26:"subgraph",28:"SQS",29:"SQE",30:"end",34:"AMP",35:"STYLE_SEPARATOR",37:"PS",38:"PE",39:"(-",40:"-)",41:"STADIUMSTART",42:"STADIUMEND",43:"CYLINDERSTART",44:"CYLINDEREND",45:"DIAMOND_START",46:"DIAMOND_STOP",47:"TAGEND",48:"TRAPSTART",49:"TRAPEND",50:"INVTRAPSTART",51:"INVTRAPEND",54:"TESTSTR",55:"START_LINK",56:"LINK",57:"PIPE",59:"STR",61:"STYLE",62:"LINKSTYLE",63:"CLASSDEF",64:"CLASS",65:"CLICK",66:"DOWN",67:"UP",70:"DEFAULT",73:"HEX",75:"INTERPOLATE",76:"NUM",77:"COMMA",80:"ALPHA",81:"COLON",82:"MINUS",83:"UNIT",84:"BRKT",85:"DOT",86:"PCT",87:"TAGSTART",91:"PUNCTUATION",92:"UNICODE_TEXT",93:"PLUS",94:"EQUALS",95:"MULT",96:"UNDERSCORE",98:"ARROW_CROSS",99:"ARROW_POINT",100:"ARROW_CIRCLE",101:"ARROW_OPEN",102:"QUOTE"},productions_:[0,[3,2],[5,0],[5,2],[6,1],[6,1],[6,1],[6,1],[6,1],[4,2],[4,2],[4,3],[15,2],[15,1],[16,1],[16,1],[16,1],[14,1],[14,1],[14,2],[18,2],[18,2],[18,1],[18,1],[17,2],[17,1],[7,2],[7,2],[7,2],[7,2],[7,2],[7,2],[7,9],[7,6],[7,4],[20,1],[20,1],[20,1],[19,3],[19,4],[19,2],[19,1],[32,1],[32,5],[32,3],[33,4],[33,6],[33,4],[33,4],[33,4],[33,4],[33,4],[33,6],[33,4],[33,4],[33,4],[33,4],[33,4],[33,1],[31,2],[31,3],[31,3],[31,1],[31,3],[52,1],[53,3],[27,1],[27,2],[27,1],[60,1],[60,1],[60,1],[60,1],[60,1],[60,1],[60,1],[60,1],[60,1],[60,1],[60,1],[68,1],[68,2],[23,5],[23,5],[24,5],[25,5],[25,7],[25,5],[25,7],[21,5],[21,5],[22,5],[22,5],[22,9],[22,9],[22,7],[22,7],[74,1],[74,3],[71,1],[71,3],[78,1],[78,2],[79,1],[79,1],[79,1],[79,1],[79,1],[79,1],[79,1],[79,1],[79,1],[79,1],[79,1],[58,1],[58,1],[58,1],[58,1],[58,1],[58,1],[69,1],[69,1],[69,1],[69,1],[36,1],[36,2],[72,1],[72,2],[90,1],[90,1],[90,1],[90,1],[88,1],[88,1],[88,1],[88,1],[88,1],[88,1],[88,1],[88,1],[88,1],[88,1],[88,1],[88,1],[88,1],[89,1],[89,1],[89,1],[89,1],[89,1],[89,1],[89,1],[89,1],[89,1],[89,1],[89,1],[89,1],[89,1],[89,1],[89,1],[97,1],[97,1],[97,1],[97,1],[97,1],[97,1],[97,1],[97,1],[97,1],[97,1],[97,1],[97,1],[97,1],[97,1],[97,1],[97,1],[97,1],[97,1],[97,1],[97,1],[97,1],[97,1],[97,1]],performAction:function(t,e,n,r,i,o,a){var u=o.length-1;switch(i){case 2:this.$=[];break;case 3:o[u]!==[]&&o[u-1].push(o[u]),this.$=o[u-1];break;case 4:case 66:case 68:case 80:case 126:case 128:case 129:this.$=o[u];break;case 11:r.setDirection(o[u-1]),this.$=o[u-1];break;case 26:this.$=o[u-1].nodes;break;case 27:case 28:case 29:case 30:case 31:this.$=[];break;case 32:this.$=r.addSubGraph(o[u-6],o[u-1],o[u-4]);break;case 33:this.$=r.addSubGraph(o[u-3],o[u-1],o[u-3]);break;case 34:this.$=r.addSubGraph(void 0,o[u-1],void 0);break;case 38:r.addLink(o[u-2].stmt,o[u],o[u-1]),this.$={stmt:o[u],nodes:o[u].concat(o[u-2].nodes)};break;case 39:r.addLink(o[u-3].stmt,o[u-1],o[u-2]),this.$={stmt:o[u-1],nodes:o[u-1].concat(o[u-3].nodes)};break;case 40:this.$={stmt:o[u-1],nodes:o[u-1]};break;case 41:this.$={stmt:o[u],nodes:o[u]};break;case 42:this.$=[o[u]];break;case 43:this.$=o[u-4].concat(o[u]);break;case 44:this.$=[o[u-2]],r.setClass(o[u-2],o[u]);break;case 45:this.$=o[u-3],r.addVertex(o[u-3],o[u-1],"square");break;case 46:this.$=o[u-5],r.addVertex(o[u-5],o[u-2],"circle");break;case 47:this.$=o[u-3],r.addVertex(o[u-3],o[u-1],"ellipse");break;case 48:this.$=o[u-3],r.addVertex(o[u-3],o[u-1],"stadium");break;case 49:this.$=o[u-3],r.addVertex(o[u-3],o[u-1],"cylinder");break;case 50:this.$=o[u-3],r.addVertex(o[u-3],o[u-1],"round");break;case 51:this.$=o[u-3],r.addVertex(o[u-3],o[u-1],"diamond");break;case 52:this.$=o[u-5],r.addVertex(o[u-5],o[u-2],"hexagon");break;case 53:this.$=o[u-3],r.addVertex(o[u-3],o[u-1],"odd");break;case 54:this.$=o[u-3],r.addVertex(o[u-3],o[u-1],"trapezoid");break;case 55:this.$=o[u-3],r.addVertex(o[u-3],o[u-1],"inv_trapezoid");break;case 56:this.$=o[u-3],r.addVertex(o[u-3],o[u-1],"lean_right");break;case 57:this.$=o[u-3],r.addVertex(o[u-3],o[u-1],"lean_left");break;case 58:this.$=o[u],r.addVertex(o[u]);break;case 59:o[u-1].text=o[u],this.$=o[u-1];break;case 60:case 61:o[u-2].text=o[u-1],this.$=o[u-2];break;case 62:this.$=o[u];break;case 63:var s=r.destructLink(o[u],o[u-2]);this.$={type:s.type,stroke:s.stroke,text:o[u-1]};break;case 64:s=r.destructLink(o[u]);this.$={type:s.type,stroke:s.stroke};break;case 65:this.$=o[u-1];break;case 67:case 81:case 127:this.$=o[u-1]+""+o[u];break;case 82:case 83:this.$=o[u-4],r.addClass(o[u-2],o[u]);break;case 84:this.$=o[u-4],r.setClass(o[u-2],o[u]);break;case 85:this.$=o[u-4],r.setClickEvent(o[u-2],o[u],void 0);break;case 86:this.$=o[u-6],r.setClickEvent(o[u-4],o[u-2],o[u]);break;case 87:this.$=o[u-4],r.setLink(o[u-2],o[u],void 0);break;case 88:this.$=o[u-6],r.setLink(o[u-4],o[u-2],o[u]);break;case 89:this.$=o[u-4],r.addVertex(o[u-2],void 0,void 0,o[u]);break;case 90:case 92:this.$=o[u-4],r.updateLink(o[u-2],o[u]);break;case 91:this.$=o[u-4],r.updateLink([o[u-2]],o[u]);break;case 93:this.$=o[u-8],r.updateLinkInterpolate([o[u-6]],o[u-2]),r.updateLink([o[u-6]],o[u]);break;case 94:this.$=o[u-8],r.updateLinkInterpolate(o[u-6],o[u-2]),r.updateLink(o[u-6],o[u]);break;case 95:this.$=o[u-6],r.updateLinkInterpolate([o[u-4]],o[u]);break;case 96:this.$=o[u-6],r.updateLinkInterpolate(o[u-4],o[u]);break;case 97:case 99:this.$=[o[u]];break;case 98:case 100:o[u-2].push(o[u]),this.$=o[u-2];break;case 102:this.$=o[u-1]+o[u];break;case 124:this.$=o[u];break;case 125:this.$=o[u-1]+""+o[u];break;case 130:this.$="v";break;case 131:this.$="-"}},table:[{3:1,4:2,9:e,10:n,12:r},{1:[3]},t(i,o,{5:6}),{4:7,9:e,10:n,12:r},{4:8,9:e,10:n,12:r},{13:[1,9]},{1:[2,1],6:10,7:11,8:a,9:u,10:s,11:c,19:16,21:17,22:18,23:19,24:20,25:21,26:f,32:23,33:29,34:l,36:30,61:h,62:d,63:p,64:g,65:y,66:b,76:v,77:m,80:_,81:w,82:x,84:k,85:E,89:31,91:A,92:S,93:M,94:T,95:O,96:D},t(i,[2,9]),t(i,[2,10]),{8:[1,48],9:[1,49],10:C,14:47,17:50},t(N,[2,3]),t(N,[2,4]),t(N,[2,5]),t(N,[2,6]),t(N,[2,7]),t(N,[2,8]),{8:I,9:R,11:j,20:52,31:53,52:57,55:[1,58],56:[1,59]},{8:I,9:R,11:j,20:60},{8:I,9:R,11:j,20:61},{8:I,9:R,11:j,20:62},{8:I,9:R,11:j,20:63},{8:I,9:R,11:j,20:64},{8:I,9:R,10:[1,65],11:j,20:66},t(L,[2,41],{17:67,10:C}),{10:[1,68]},{10:[1,69]},{10:[1,70]},{10:[1,71]},{10:[1,72]},t(B,[2,42],{35:[1,73]}),t(P,[2,58],{89:83,28:[1,74],34:l,37:[1,75],39:[1,76],41:[1,77],43:[1,78],45:[1,79],47:[1,80],48:[1,81],50:[1,82],66:b,76:v,77:m,80:_,81:w,82:x,84:k,85:E,91:A,92:S,93:M,94:T,95:O,96:D}),t(F,[2,124]),t(F,[2,145]),t(F,[2,146]),t(F,[2,147]),t(F,[2,148]),t(F,[2,149]),t(F,[2,150]),t(F,[2,151]),t(F,[2,152]),t(F,[2,153]),t(F,[2,154]),t(F,[2,155]),t(F,[2,156]),t(F,[2,157]),t(F,[2,158]),t(F,[2,159]),t(i,[2,11]),t(i,[2,17]),t(i,[2,18]),{9:[1,84]},t(q,[2,25],{17:85,10:C}),t(N,[2,26]),{32:86,33:29,34:l,36:30,66:b,76:v,77:m,80:_,81:w,82:x,84:k,85:E,89:31,91:A,92:S,93:M,94:T,95:O,96:D},t(N,[2,35]),t(N,[2,36]),t(N,[2,37]),t(U,[2,62],{53:87,54:[1,88],57:[1,89]}),{10:z,12:Y,13:V,26:G,27:90,30:H,34:W,47:$,55:K,58:91,59:Z,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},t([34,54,57,66,76,77,80,81,82,84,85,91,92,93,94,95,96],[2,64]),t(N,[2,27]),t(N,[2,28]),t(N,[2,29]),t(N,[2,30]),t(N,[2,31]),{10:z,12:Y,13:V,26:G,27:127,30:H,34:W,47:$,55:K,58:91,59:Z,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},t(_t,o,{5:128}),t(L,[2,40],{34:wt}),{13:xt,34:W,66:kt,72:130,73:[1,131],76:ot,77:at,80:ut,81:st,82:Et,84:ft,85:lt,88:134,90:132,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{70:[1,137],74:138,76:[1,139]},{13:xt,34:W,66:kt,70:[1,140],72:141,76:ot,77:at,80:ut,81:st,82:Et,84:ft,85:lt,88:134,90:132,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{13:xt,34:W,66:kt,72:142,76:ot,77:at,80:ut,81:st,82:Et,84:ft,85:lt,88:134,90:132,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{13:xt,34:W,66:kt,72:143,76:ot,77:at,80:ut,81:st,82:Et,84:ft,85:lt,88:134,90:132,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{34:l,36:144,66:b,76:v,77:m,80:_,81:w,82:x,84:k,85:E,89:31,91:A,92:S,93:M,94:T,95:O,96:D},{10:z,12:Y,13:V,26:G,27:145,30:H,34:W,47:$,55:K,58:91,59:Z,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:z,12:Y,13:V,26:G,27:147,30:H,34:W,37:[1,146],47:$,55:K,58:91,59:Z,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:z,12:Y,13:V,26:G,27:148,30:H,34:W,47:$,55:K,58:91,59:Z,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:z,12:Y,13:V,26:G,27:149,30:H,34:W,47:$,55:K,58:91,59:Z,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:z,12:Y,13:V,26:G,27:150,30:H,34:W,47:$,55:K,58:91,59:Z,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:z,12:Y,13:V,26:G,27:151,30:H,34:W,45:[1,152],47:$,55:K,58:91,59:Z,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:z,12:Y,13:V,26:G,27:153,30:H,34:W,47:$,55:K,58:91,59:Z,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:z,12:Y,13:V,26:G,27:154,30:H,34:W,47:$,55:K,58:91,59:Z,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:z,12:Y,13:V,26:G,27:155,30:H,34:W,47:$,55:K,58:91,59:Z,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},t(F,[2,125]),t(i,[2,19]),t(q,[2,24]),t(L,[2,38],{17:156,10:C}),t(U,[2,59],{10:[1,157]}),{10:[1,158]},{10:z,12:Y,13:V,26:G,27:159,30:H,34:W,47:$,55:K,58:91,59:Z,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:z,12:Y,13:V,26:G,30:H,34:W,47:$,55:K,56:[1,160],58:161,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},t(At,[2,66]),t(At,[2,68]),t(At,[2,114]),t(At,[2,115]),t(At,[2,116]),t(At,[2,117]),t(At,[2,118]),t(At,[2,119]),t(At,[2,120]),t(At,[2,121]),t(At,[2,122]),t(At,[2,123]),t(At,[2,132]),t(At,[2,133]),t(At,[2,134]),t(At,[2,135]),t(At,[2,136]),t(At,[2,137]),t(At,[2,138]),t(At,[2,139]),t(At,[2,140]),t(At,[2,141]),t(At,[2,142]),t(At,[2,143]),t(At,[2,144]),t(At,[2,69]),t(At,[2,70]),t(At,[2,71]),t(At,[2,72]),t(At,[2,73]),t(At,[2,74]),t(At,[2,75]),t(At,[2,76]),t(At,[2,77]),t(At,[2,78]),t(At,[2,79]),{8:I,9:R,10:z,11:j,12:Y,13:V,20:163,26:G,28:[1,162],30:H,34:W,47:$,55:K,58:161,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{6:10,7:11,8:a,9:u,10:s,11:c,19:16,21:17,22:18,23:19,24:20,25:21,26:f,30:[1,164],32:23,33:29,34:l,36:30,61:h,62:d,63:p,64:g,65:y,66:b,76:v,77:m,80:_,81:w,82:x,84:k,85:E,89:31,91:A,92:S,93:M,94:T,95:O,96:D},{10:C,17:165},{10:[1,166],13:xt,34:W,66:kt,76:ot,77:at,80:ut,81:st,82:Et,84:ft,85:lt,88:134,90:167,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:[1,168]},t(St,[2,126]),t(St,[2,128]),t(St,[2,129]),t(St,[2,130]),t(St,[2,131]),{10:[1,169]},{10:[1,170],77:[1,171]},t(Mt,[2,97]),{10:[1,172]},{10:[1,173],13:xt,34:W,66:kt,76:ot,77:at,80:ut,81:st,82:Et,84:ft,85:lt,88:134,90:167,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:[1,174],13:xt,34:W,66:kt,76:ot,77:at,80:ut,81:st,82:Et,84:ft,85:lt,88:134,90:167,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:[1,175],13:xt,34:W,66:kt,76:ot,77:at,80:ut,81:st,82:Et,84:ft,85:lt,88:134,90:167,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},t(B,[2,44],{89:83,34:l,66:b,76:v,77:m,80:_,81:w,82:x,84:k,85:E,91:A,92:S,93:M,94:T,95:O,96:D}),{10:z,12:Y,13:V,26:G,29:[1,176],30:H,34:W,47:$,55:K,58:161,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:z,12:Y,13:V,26:G,27:177,30:H,34:W,47:$,55:K,58:91,59:Z,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:z,12:Y,13:V,26:G,30:H,34:W,38:[1,178],47:$,55:K,58:161,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:z,12:Y,13:V,26:G,30:H,34:W,40:[1,179],47:$,55:K,58:161,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:z,12:Y,13:V,26:G,30:H,34:W,42:[1,180],47:$,55:K,58:161,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:z,12:Y,13:V,26:G,30:H,34:W,44:[1,181],47:$,55:K,58:161,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:z,12:Y,13:V,26:G,30:H,34:W,46:[1,182],47:$,55:K,58:161,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:z,12:Y,13:V,26:G,27:183,30:H,34:W,47:$,55:K,58:91,59:Z,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:z,12:Y,13:V,26:G,29:[1,184],30:H,34:W,47:$,55:K,58:161,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:z,12:Y,13:V,26:G,30:H,34:W,47:$,49:[1,185],51:[1,186],55:K,58:161,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{10:z,12:Y,13:V,26:G,30:H,34:W,47:$,49:[1,188],51:[1,187],55:K,58:161,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},t(L,[2,39],{34:wt}),t(U,[2,61]),t(U,[2,60]),{10:z,12:Y,13:V,26:G,30:H,34:W,47:$,55:K,57:[1,189],58:161,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},t(U,[2,63]),t(At,[2,67]),{10:z,12:Y,13:V,26:G,27:190,30:H,34:W,47:$,55:K,58:91,59:Z,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},t(_t,o,{5:191}),t(N,[2,34]),{33:192,34:l,36:30,66:b,76:v,77:m,80:_,81:w,82:x,84:k,85:E,89:31,91:A,92:S,93:M,94:T,95:O,96:D},{10:Tt,61:Ot,71:193,73:Dt,76:Ct,78:194,79:195,80:Nt,81:It,82:Rt,83:jt,84:Lt,85:Bt,86:Pt},t(St,[2,127]),{10:Tt,61:Ot,71:207,73:Dt,76:Ct,78:194,79:195,80:Nt,81:It,82:Rt,83:jt,84:Lt,85:Bt,86:Pt},{10:Tt,61:Ot,71:208,73:Dt,75:[1,209],76:Ct,78:194,79:195,80:Nt,81:It,82:Rt,83:jt,84:Lt,85:Bt,86:Pt},{10:Tt,61:Ot,71:210,73:Dt,75:[1,211],76:Ct,78:194,79:195,80:Nt,81:It,82:Rt,83:jt,84:Lt,85:Bt,86:Pt},{76:[1,212]},{10:Tt,61:Ot,71:213,73:Dt,76:Ct,78:194,79:195,80:Nt,81:It,82:Rt,83:jt,84:Lt,85:Bt,86:Pt},{10:Tt,61:Ot,71:214,73:Dt,76:Ct,78:194,79:195,80:Nt,81:It,82:Rt,83:jt,84:Lt,85:Bt,86:Pt},{13:xt,34:W,66:kt,72:215,76:ot,77:at,80:ut,81:st,82:Et,84:ft,85:lt,88:134,90:132,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{13:xt,34:W,59:[1,217],66:kt,72:216,76:ot,77:at,80:ut,81:st,82:Et,84:ft,85:lt,88:134,90:132,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},t(P,[2,45]),{10:z,12:Y,13:V,26:G,30:H,34:W,38:[1,218],47:$,55:K,58:161,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},t(P,[2,50]),t(P,[2,47]),t(P,[2,48]),t(P,[2,49]),t(P,[2,51]),{10:z,12:Y,13:V,26:G,30:H,34:W,46:[1,219],47:$,55:K,58:161,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},t(P,[2,53]),t(P,[2,54]),t(P,[2,56]),t(P,[2,55]),t(P,[2,57]),t([10,34,66,76,77,80,81,82,84,85,91,92,93,94,95,96],[2,65]),{10:z,12:Y,13:V,26:G,29:[1,220],30:H,34:W,47:$,55:K,58:161,60:102,61:X,62:J,63:Q,64:tt,65:et,66:nt,67:rt,69:93,70:it,76:ot,77:at,80:ut,81:st,82:ct,84:ft,85:lt,86:ht,87:dt,88:99,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{6:10,7:11,8:a,9:u,10:s,11:c,19:16,21:17,22:18,23:19,24:20,25:21,26:f,30:[1,221],32:23,33:29,34:l,36:30,61:h,62:d,63:p,64:g,65:y,66:b,76:v,77:m,80:_,81:w,82:x,84:k,85:E,89:31,91:A,92:S,93:M,94:T,95:O,96:D},t(B,[2,43]),t(Ft,[2,89],{77:qt}),t(Ut,[2,99],{79:223,10:Tt,61:Ot,73:Dt,76:Ct,80:Nt,81:It,82:Rt,83:jt,84:Lt,85:Bt,86:Pt}),t(zt,[2,101]),t(zt,[2,103]),t(zt,[2,104]),t(zt,[2,105]),t(zt,[2,106]),t(zt,[2,107]),t(zt,[2,108]),t(zt,[2,109]),t(zt,[2,110]),t(zt,[2,111]),t(zt,[2,112]),t(zt,[2,113]),t(Ft,[2,90],{77:qt}),t(Ft,[2,91],{77:qt}),{10:[1,224]},t(Ft,[2,92],{77:qt}),{10:[1,225]},t(Mt,[2,98]),t(Ft,[2,82],{77:qt}),t(Ft,[2,83],{77:qt}),t(Ft,[2,84],{88:134,90:167,13:xt,34:W,66:kt,76:ot,77:at,80:ut,81:st,82:Et,84:ft,85:lt,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt}),t(Ft,[2,85],{88:134,90:167,10:[1,226],13:xt,34:W,66:kt,76:ot,77:at,80:ut,81:st,82:Et,84:ft,85:lt,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt}),t(Ft,[2,87],{10:[1,227]}),{38:[1,228]},{46:[1,229]},{8:I,9:R,11:j,20:230},t(N,[2,33]),{10:Tt,61:Ot,73:Dt,76:Ct,78:231,79:195,80:Nt,81:It,82:Rt,83:jt,84:Lt,85:Bt,86:Pt},t(zt,[2,102]),{13:xt,34:W,66:kt,72:232,76:ot,77:at,80:ut,81:st,82:Et,84:ft,85:lt,88:134,90:132,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{13:xt,34:W,66:kt,72:233,76:ot,77:at,80:ut,81:st,82:Et,84:ft,85:lt,88:134,90:132,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt},{59:[1,234]},{59:[1,235]},t(P,[2,46]),t(P,[2,52]),t(_t,o,{5:236}),t(Ut,[2,100],{79:223,10:Tt,61:Ot,73:Dt,76:Ct,80:Nt,81:It,82:Rt,83:jt,84:Lt,85:Bt,86:Pt}),t(Ft,[2,95],{88:134,90:167,10:[1,237],13:xt,34:W,66:kt,76:ot,77:at,80:ut,81:st,82:Et,84:ft,85:lt,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt}),t(Ft,[2,96],{88:134,90:167,10:[1,238],13:xt,34:W,66:kt,76:ot,77:at,80:ut,81:st,82:Et,84:ft,85:lt,91:pt,92:gt,93:yt,94:bt,95:vt,96:mt}),t(Ft,[2,86]),t(Ft,[2,88]),{6:10,7:11,8:a,9:u,10:s,11:c,19:16,21:17,22:18,23:19,24:20,25:21,26:f,30:[1,239],32:23,33:29,34:l,36:30,61:h,62:d,63:p,64:g,65:y,66:b,76:v,77:m,80:_,81:w,82:x,84:k,85:E,89:31,91:A,92:S,93:M,94:T,95:O,96:D},{10:Tt,61:Ot,71:240,73:Dt,76:Ct,78:194,79:195,80:Nt,81:It,82:Rt,83:jt,84:Lt,85:Bt,86:Pt},{10:Tt,61:Ot,71:241,73:Dt,76:Ct,78:194,79:195,80:Nt,81:It,82:Rt,83:jt,84:Lt,85:Bt,86:Pt},t(N,[2,32]),t(Ft,[2,93],{77:qt}),t(Ft,[2,94],{77:qt})],defaultActions:{},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],o=[],a=this.table,u="",s=0,c=0,f=0,l=2,h=1,d=o.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var b=p.yylloc;o.push(b);var v=p.options&&p.options.ranges;function m(){var t;return"number"!=typeof(t=r.pop()||p.lex()||h)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var _,w,x,k,E,A,S,M,T,O={};;){if(x=n[n.length-1],this.defaultActions[x]?k=this.defaultActions[x]:(null==_&&(_=m()),k=a[x]&&a[x][_]),void 0===k||!k.length||!k[0]){var D="";for(A in T=[],a[x])this.terminals_[A]&&A>l&&T.push("'"+this.terminals_[A]+"'");D=p.showPosition?"Parse error on line "+(s+1)+":\n"+p.showPosition()+"\nExpecting "+T.join(", ")+", got '"+(this.terminals_[_]||_)+"'":"Parse error on line "+(s+1)+": Unexpected "+(_==h?"end of input":"'"+(this.terminals_[_]||_)+"'"),this.parseError(D,{text:p.match,token:this.terminals_[_]||_,line:p.yylineno,loc:b,expected:T})}if(k[0]instanceof Array&&k.length>1)throw new Error("Parse Error: multiple actions possible at state: "+x+", token: "+_);switch(k[0]){case 1:n.push(_),i.push(p.yytext),o.push(p.yylloc),n.push(k[1]),_=null,w?(_=w,w=null):(c=p.yyleng,u=p.yytext,s=p.yylineno,b=p.yylloc,f>0&&f--);break;case 2:if(S=this.productions_[k[1]][1],O.$=i[i.length-S],O._$={first_line:o[o.length-(S||1)].first_line,last_line:o[o.length-1].last_line,first_column:o[o.length-(S||1)].first_column,last_column:o[o.length-1].last_column},v&&(O._$.range=[o[o.length-(S||1)].range[0],o[o.length-1].range[1]]),void 0!==(E=this.performAction.apply(O,[u,c,s,g.yy,k[1],i,o].concat(d))))return E;S&&(n=n.slice(0,-1*S*2),i=i.slice(0,-1*S),o=o.slice(0,-1*S)),n.push(this.productions_[k[1]][0]),i.push(O.$),o.push(O._$),M=a[n[n.length-2]][n[n.length-1]],n.push(M);break;case 3:return!0}}return!0}},Vt={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var o in i)this[o]=i[o];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),o=0;oe[0].length)){if(e=n,r=o,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[o])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{},performAction:function(t,e,n,r){switch(n){case 0:break;case 1:this.begin("string");break;case 2:this.popState();break;case 3:return"STR";case 4:return 61;case 5:return 70;case 6:return 62;case 7:return 75;case 8:return 63;case 9:return 64;case 10:return 65;case 11:case 12:return t.lex.firstGraph()&&this.begin("dir"),12;case 13:return 26;case 14:return 30;case 15:case 16:case 17:case 18:case 19:case 20:case 21:case 22:case 23:case 24:return this.popState(),13;case 25:return 76;case 26:return 84;case 27:return 35;case 28:return 81;case 29:return 34;case 30:return 8;case 31:return 77;case 32:return 95;case 33:case 34:case 35:case 36:case 37:case 38:case 39:case 40:case 41:case 42:case 43:case 44:case 45:case 46:case 47:case 48:case 49:case 50:case 51:case 52:case 53:case 54:case 55:case 56:case 57:case 58:case 59:case 60:return 56;case 61:case 62:case 63:case 64:case 65:case 66:case 67:case 68:case 69:case 70:case 71:case 72:return 55;case 73:return 39;case 74:return 40;case 75:return 41;case 76:return 42;case 77:return 43;case 78:return 44;case 79:return 82;case 80:return 85;case 81:return 96;case 82:return 93;case 83:return 86;case 84:case 85:return 94;case 86:return 87;case 87:return 47;case 88:return 67;case 89:return"SEP";case 90:return 66;case 91:return 80;case 92:return 49;case 93:return 48;case 94:return 51;case 95:return 50;case 96:return 91;case 97:return 92;case 98:return 57;case 99:return 37;case 100:return 38;case 101:return 28;case 102:return 29;case 103:return 45;case 104:return 46;case 105:return 102;case 106:return 9;case 107:return 10;case 108:return 11}},rules:[/^(?:%%[^\n]*\n*)/,/^(?:["])/,/^(?:["])/,/^(?:[^"]*)/,/^(?:style\b)/,/^(?:default\b)/,/^(?:linkStyle\b)/,/^(?:interpolate\b)/,/^(?:classDef\b)/,/^(?:class\b)/,/^(?:click\b)/,/^(?:graph\b)/,/^(?:flowchart\b)/,/^(?:subgraph\b)/,/^(?:end\b\s*)/,/^(?:\s*LR\b)/,/^(?:\s*RL\b)/,/^(?:\s*TB\b)/,/^(?:\s*BT\b)/,/^(?:\s*TD\b)/,/^(?:\s*BR\b)/,/^(?:\s*<)/,/^(?:\s*>)/,/^(?:\s*\^)/,/^(?:\s*v\b)/,/^(?:[0-9]+)/,/^(?:#)/,/^(?::::)/,/^(?::)/,/^(?:&)/,/^(?:;)/,/^(?:,)/,/^(?:\*)/,/^(?:\s*--[x]\s*)/,/^(?:\s*-->\s*)/,/^(?:\s*<-->\s*)/,/^(?:\s*[x]--[x]\s*)/,/^(?:\s*[o]--[o]\s*)/,/^(?:\s*[o]\.-[o]\s*)/,/^(?:\s*<==>\s*)/,/^(?:\s*[o]==[o]\s*)/,/^(?:\s*[x]==[x]\s*)/,/^(?:\s*[x].-[x]\s*)/,/^(?:\s*[x]-\.-[x]\s*)/,/^(?:\s*<\.->\s*)/,/^(?:\s*<-\.->\s*)/,/^(?:\s*[o]-\.-[o]\s*)/,/^(?:\s*--[o]\s*)/,/^(?:\s*---\s*)/,/^(?:\s*-\.-[x]\s*)/,/^(?:\s*-\.->\s*)/,/^(?:\s*-\.-[o]\s*)/,/^(?:\s*-\.-\s*)/,/^(?:\s*.-[x]\s*)/,/^(?:\s*\.->\s*)/,/^(?:\s*\.-[o]\s*)/,/^(?:\s*\.-\s*)/,/^(?:\s*==[x]\s*)/,/^(?:\s*==>\s*)/,/^(?:\s*==[o]\s*)/,/^(?:\s*==[\=]\s*)/,/^(?:\s*<--\s*)/,/^(?:\s*[x]--\s*)/,/^(?:\s*[o]--\s*)/,/^(?:\s*<-\.\s*)/,/^(?:\s*[x]-\.\s*)/,/^(?:\s*[o]-\.\s*)/,/^(?:\s*<==\s*)/,/^(?:\s*[x]==\s*)/,/^(?:\s*[o]==\s*)/,/^(?:\s*--\s*)/,/^(?:\s*-\.\s*)/,/^(?:\s*==\s*)/,/^(?:\(-)/,/^(?:-\))/,/^(?:\(\[)/,/^(?:\]\))/,/^(?:\[\()/,/^(?:\)\])/,/^(?:-)/,/^(?:\.)/,/^(?:[\_])/,/^(?:\+)/,/^(?:%)/,/^(?:=)/,/^(?:=)/,/^(?:<)/,/^(?:>)/,/^(?:\^)/,/^(?:\\\|)/,/^(?:v\b)/,/^(?:[A-Za-z]+)/,/^(?:\\\])/,/^(?:\[\/)/,/^(?:\/\])/,/^(?:\[\\)/,/^(?:[!"#$%&'*+,-.`?\\_/])/,/^(?:[\u00AA\u00B5\u00BA\u00C0-\u00D6\u00D8-\u00F6]|[\u00F8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377]|[\u037A-\u037D\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5]|[\u03F7-\u0481\u048A-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA]|[\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE]|[\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA]|[\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u08A0]|[\u08A2-\u08AC\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0977]|[\u0979-\u097F\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2]|[\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A]|[\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39]|[\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8]|[\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0B05-\u0B0C]|[\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C]|[\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99]|[\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0]|[\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C3D]|[\u0C58\u0C59\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3]|[\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10]|[\u0D12-\u0D3A\u0D3D\u0D4E\u0D60\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1]|[\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81]|[\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3]|[\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6]|[\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A]|[\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081]|[\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D]|[\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0]|[\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310]|[\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F4\u1401-\u166C]|[\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u1700-\u170C\u170E-\u1711]|[\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7]|[\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191C]|[\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19C1-\u19C7\u1A00-\u1A16]|[\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF]|[\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC]|[\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D]|[\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D]|[\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3]|[\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F]|[\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128]|[\u212A-\u212D\u212F-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2183\u2184]|[\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3]|[\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6]|[\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE]|[\u2DD0-\u2DD6\u2DD8-\u2DDE\u2E2F\u3005\u3006\u3031-\u3035\u303B\u303C]|[\u3041-\u3096\u309D-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D]|[\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FCC]|[\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B]|[\uA640-\uA66E\uA67F-\uA697\uA6A0-\uA6E5\uA717-\uA71F\uA722-\uA788]|[\uA78B-\uA78E\uA790-\uA793\uA7A0-\uA7AA\uA7F8-\uA801\uA803-\uA805]|[\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB]|[\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uAA00-\uAA28]|[\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA80-\uAAAF\uAAB1\uAAB5]|[\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4]|[\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E]|[\uABC0-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D]|[\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36]|[\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D]|[\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC]|[\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF]|[\uFFD2-\uFFD7\uFFDA-\uFFDC])/,/^(?:\|)/,/^(?:\()/,/^(?:\))/,/^(?:\[)/,/^(?:\])/,/^(?:\{)/,/^(?:\})/,/^(?:")/,/^(?:(\r|\n|\r\n)+)/,/^(?:\s)/,/^(?:$)/],conditions:{vertex:{rules:[],inclusive:!1},dir:{rules:[15,16,17,18,19,20,21,22,23,24],inclusive:!1},string:{rules:[2,3],inclusive:!1},INITIAL:{rules:[0,1,4,5,6,7,8,9,10,11,12,13,14,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108],inclusive:!0}}};function Gt(){this.yy={}}return Yt.lexer=Vt,Gt.prototype=Yt,Yt.Parser=Gt,new Gt}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(54).readFileSync(n(55).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(17),n(14)(t))},function(t,e,n){"use strict";var r=n(4);n.d(e,"g",(function(){return r.a}));var i=n(123);n.d(e,"h",(function(){return i.a})),n.d(e,"i",(function(){return i.b})),n.d(e,"L",(function(){return i.a})),n.d(e,"M",(function(){return i.b}));var o=n(122);n.d(e,"r",(function(){return o.a})),n.d(e,"s",(function(){return o.b})),n.d(e,"V",(function(){return o.a})),n.d(e,"W",(function(){return o.b}));var a=n(220);n.d(e,"j",(function(){return a.a})),n.d(e,"k",(function(){return a.b}));var u=n(219);n.d(e,"e",(function(){return u.a})),n.d(e,"f",(function(){return u.b}));var s=n(141);n.d(e,"a",(function(){return s.b})),n.d(e,"b",(function(){return s.a}));var c=n(9);n.d(e,"B",(function(){return c.g})),n.d(e,"C",(function(){return c.h})),n.d(e,"t",(function(){return c.g})),n.d(e,"u",(function(){return c.h})),n.d(e,"l",(function(){return c.c})),n.d(e,"m",(function(){return c.d})),n.d(e,"x",(function(){return c.k})),n.d(e,"y",(function(){return c.l})),n.d(e,"z",(function(){return c.m})),n.d(e,"A",(function(){return c.n})),n.d(e,"v",(function(){return c.i})),n.d(e,"w",(function(){return c.j})),n.d(e,"c",(function(){return c.a})),n.d(e,"d",(function(){return c.b})),n.d(e,"p",(function(){return c.e})),n.d(e,"q",(function(){return c.f}));var f=n(218);n.d(e,"n",(function(){return f.a})),n.d(e,"o",(function(){return f.b}));var l=n(70);n.d(e,"D",(function(){return l.a})),n.d(e,"E",(function(){return l.b}));var h=n(223);n.d(e,"N",(function(){return h.a})),n.d(e,"O",(function(){return h.b}));var d=n(222);n.d(e,"J",(function(){return d.a})),n.d(e,"K",(function(){return d.b}));var p=n(142);n.d(e,"F",(function(){return p.a})),n.d(e,"G",(function(){return p.b}));var g=n(10);n.d(e,"fb",(function(){return g.g})),n.d(e,"gb",(function(){return g.h})),n.d(e,"X",(function(){return g.g})),n.d(e,"Y",(function(){return g.h})),n.d(e,"P",(function(){return g.c})),n.d(e,"Q",(function(){return g.d})),n.d(e,"bb",(function(){return g.k})),n.d(e,"cb",(function(){return g.l})),n.d(e,"db",(function(){return g.m})),n.d(e,"eb",(function(){return g.n})),n.d(e,"Z",(function(){return g.i})),n.d(e,"ab",(function(){return g.j})),n.d(e,"H",(function(){return g.a})),n.d(e,"I",(function(){return g.b})),n.d(e,"T",(function(){return g.e})),n.d(e,"U",(function(){return g.f}));var y=n(221);n.d(e,"R",(function(){return y.a})),n.d(e,"S",(function(){return y.b}));var b=n(71);n.d(e,"hb",(function(){return b.a})),n.d(e,"ib",(function(){return b.b}))},function(t,e,n){var r=n(466),i=n(471);t.exports=function(t,e){var n=i(t,e);return r(n)?n:void 0}},function(t,e,n){var r=n(87),i=n(467),o=n(468),a=r?r.toStringTag:void 0;t.exports=function(t){return null==t?void 0===t?"[object Undefined]":"[object Null]":a&&a in Object(t)?i(t):o(t)}},function(t,e){t.exports=function(t){return t}},function(t,e,n){var r=n(579),i=n(584);t.exports=function(t,e){var n=i(t,e);return r(n)?n:void 0}},function(t,e,n){var r=n(94),i=n(580),o=n(581),a=r?r.toStringTag:void 0;t.exports=function(t){return null==t?void 0===t?"[object Undefined]":"[object Null]":a&&a in Object(t)?i(t):o(t)}},function(t,e){t.exports=function(t){return t}},function(t,e,n){"use strict";var r=n(180),i=Object.keys||function(t){var e=[];for(var n in t)e.push(n);return e};t.exports=l;var o=Object.create(n(134));o.inherits=n(2);var a=n(410),u=n(269);o.inherits(l,a);for(var s=i(u.prototype),c=0;c1?r[0]+r.slice(2):r,+t.slice(n+1)]}},function(t,e,n){"use strict";var r=n(145);n.d(e,"c",(function(){return r.a})),n.d(e,"b",(function(){return r.b})),n.d(e,"e",(function(){return r.c}));var i=n(210);n.d(e,"d",(function(){return i.a}));var o=n(111);n.d(e,"f",(function(){return o.b})),n.d(e,"a",(function(){return o.a}));var a=n(287);n.d(e,"g",(function(){return a.a}));var u=n(288);n.d(e,"h",(function(){return u.a}));var s=n(289);n.d(e,"i",(function(){return s.a}))},function(t,e,n){"use strict";var r=n(445);n.d(e,"a",(function(){return r.a}))},function(t,e){t.exports=function(t,e){return t===e||t!=t&&e!=e}},function(t,e,n){var r=n(35).Symbol;t.exports=r},function(t,e,n){(function(t){var r=n(35),i=n(487),o=e&&!e.nodeType&&e,a=o&&"object"==typeof t&&t&&!t.nodeType&&t,u=a&&a.exports===o?r.Buffer:void 0,s=(u?u.isBuffer:void 0)||i;t.exports=s}).call(this,n(14)(t))},function(t,e,n){var r=n(297),i=n(491),o=n(52);t.exports=function(t){return o(t)?r(t,!0):i(t)}},function(t,e,n){var r=n(496),i=n(225),o=n(497),a=n(306),u=n(498),s=n(75),c=n(295),f=c(r),l=c(i),h=c(o),d=c(a),p=c(u),g=s;(r&&"[object DataView]"!=g(new r(new ArrayBuffer(1)))||i&&"[object Map]"!=g(new i)||o&&"[object Promise]"!=g(o.resolve())||a&&"[object Set]"!=g(new a)||u&&"[object WeakMap]"!=g(new u))&&(g=function(t){var e=s(t),n="[object Object]"==e?t.constructor:void 0,r=n?c(n):"";if(r)switch(r){case f:return"[object DataView]";case l:return"[object Map]";case h:return"[object Promise]";case d:return"[object Set]";case p:return"[object WeakMap]"}return e}),t.exports=g},function(t,e,n){var r=n(75),i=n(43);t.exports=function(t){return"symbol"==typeof t||i(t)&&"[object Symbol]"==r(t)}},function(t,e){t.exports=function(t,e){return t===e||t!=t&&e!=e}},function(t,e,n){var r=n(78),i=n(30);t.exports=function(t){if(!i(t))return!1;var e=r(t);return"[object Function]"==e||"[object GeneratorFunction]"==e||"[object AsyncFunction]"==e||"[object Proxy]"==e}},function(t,e,n){var r=n(36).Symbol;t.exports=r},function(t,e,n){(function(t){var r=n(36),i=n(600),o=e&&!e.nodeType&&e,a=o&&"object"==typeof t&&t&&!t.nodeType&&t,u=a&&a.exports===o?r.Buffer:void 0,s=(u?u.isBuffer:void 0)||i;t.exports=s}).call(this,n(14)(t))},function(t,e,n){var r=n(345),i=n(604),o=n(56);t.exports=function(t){return o(t)?r(t,!0):i(t)}},function(t,e,n){var r=n(609),i=n(242),o=n(610),a=n(354),u=n(611),s=n(78),c=n(343),f=c(r),l=c(i),h=c(o),d=c(a),p=c(u),g=s;(r&&"[object DataView]"!=g(new r(new ArrayBuffer(1)))||i&&"[object Map]"!=g(new i)||o&&"[object Promise]"!=g(o.resolve())||a&&"[object Set]"!=g(new a)||u&&"[object WeakMap]"!=g(new u))&&(g=function(t){var e=s(t),n="[object Object]"==e?t.constructor:void 0,r=n?c(n):"";if(r)switch(r){case f:return"[object DataView]";case l:return"[object Map]";case h:return"[object Promise]";case d:return"[object Set]";case p:return"[object WeakMap]"}return e}),t.exports=g},function(t,e,n){var r=n(78),i=n(44);t.exports=function(t){return"symbol"==typeof t||i(t)&&"[object Symbol]"==r(t)}},function(t,e,n){var r;try{r={defaults:n(387),each:n(252),isFunction:n(93),isPlainObject:n(391),pick:n(394),has:n(258),range:n(395),uniqueId:n(396)}}catch(t){}r||(r=window._),t.exports=r},function(t,e,n){"use strict";(function(e,r){var i=n(3).Buffer,o=e.crypto||e.msCrypto;o&&o.getRandomValues?t.exports=function(t,e){if(t>4294967295)throw new RangeError("requested too many random bytes");var n=i.allocUnsafe(t);if(t>0)if(t>65536)for(var a=0;a=this._finalSize&&(this._update(this._block),this._block.fill(0));var n=8*this._len;if(n<=4294967295)this._block.writeUInt32BE(n,this._blockSize-4);else{var r=(4294967295&n)>>>0,i=(n-r)/4294967296;this._block.writeUInt32BE(i,this._blockSize-8),this._block.writeUInt32BE(r,this._blockSize-4)}this._update(this._block);var o=this._hash();return t?o.toString(t):o},i.prototype._update=function(){throw new Error("_update must be implemented by subclass")},t.exports=i},function(t,e,n){"use strict";var r=n(282),i=n(47),o=n(285),a=n(113),u=n(284),s=n(109),c=n(41),f=function(t){return function(){return t}};function l(t,e,n,r,i,o,a,u,s,c){this.target=t,this.type=e,this.subject=n,this.identifier=r,this.active=i,this.x=o,this.y=a,this.dx=u,this.dy=s,this._=c}function h(){return!i.c.ctrlKey&&!i.c.button}function d(){return this.parentNode}function p(t){return null==t?{x:i.c.x,y:i.c.y}:t}function g(){return navigator.maxTouchPoints||"ontouchstart"in this}l.prototype.on=function(){var t=this._.on.apply(this._,arguments);return t===this._?this:t};var y=function(){var t,e,n,y,b=h,v=d,m=p,_=g,w={},x=Object(r.a)("start","drag","end"),k=0,E=0;function A(t){t.on("mousedown.drag",S).filter(_).on("touchstart.drag",O).on("touchmove.drag",D).on("touchend.drag touchcancel.drag",C).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function S(){if(!y&&b.apply(this,arguments)){var r=N("mouse",v.apply(this,arguments),o.a,this,arguments);r&&(Object(a.a)(i.c.view).on("mousemove.drag",M,!0).on("mouseup.drag",T,!0),Object(s.a)(i.c.view),Object(c.b)(),n=!1,t=i.c.clientX,e=i.c.clientY,r("start"))}}function M(){if(Object(c.a)(),!n){var r=i.c.clientX-t,o=i.c.clientY-e;n=r*r+o*o>E}w.mouse("drag")}function T(){Object(a.a)(i.c.view).on("mousemove.drag mouseup.drag",null),Object(s.b)(i.c.view,n),Object(c.a)(),w.mouse("end")}function O(){if(b.apply(this,arguments)){var t,e,n=i.c.changedTouches,r=v.apply(this,arguments),o=n.length;for(t=0;t=1?(n=1,e-1):Math.floor(n*e),o=t[i],a=t[i+1],u=i>0?t[i-1]:2*o-a,s=i=0&&"xmlns"!==(e=t.slice(0,n))&&(t=t.slice(n+1)),r.a.hasOwnProperty(e)?{space:r.a[e],local:t}:t}},function(t,e,n){"use strict";function r(){}e.a=function(t){return null==t?r:function(){return this.querySelector(t)}}},function(t,e,n){"use strict";e.a=function(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView}},function(t,e,n){"use strict";var r=n(11),i=n(82),o=n(207),a=n(206),u=n(23),s=n(208),c=n(202),f=n(115),l=n(69);e.a=function(t,e){var n,h=typeof e;return null==e||"boolean"===h?Object(f.a)(e):("number"===h?u.a:"string"===h?(n=Object(r.e)(e))?(e=n,i.a):c.a:e instanceof r.e?i.a:e instanceof Date?a.a:Object(l.b)(e)?l.a:Array.isArray(e)?o.b:"function"!=typeof e.valueOf&&"function"!=typeof e.toString||isNaN(e)?s.a:u.a)(t,e)}},function(t,e,n){"use strict";n.d(e,"b",(function(){return o}));var r=n(113),i=n(41);function o(t,e){var n=t.document.documentElement,o=Object(r.a)(t).on("dragstart.drag",null);e&&(o.on("click.drag",i.a,!0),setTimeout((function(){o.on("click.drag",null)}),0)),"onselectstart"in n?o.on("selectstart.drag",null):(n.style.MozUserSelect=n.__noselect,delete n.__noselect)}e.a=function(t){var e=t.document.documentElement,n=Object(r.a)(t).on("dragstart.drag",i.a,!0);"onselectstart"in e?n.on("selectstart.drag",i.a,!0):(e.__noselect=e.style.MozUserSelect,e.style.MozUserSelect="none")}},function(t,e,n){"use strict";var r={},i={};function o(t){return new Function("d","return {"+t.map((function(t,e){return JSON.stringify(t)+": d["+e+'] || ""'})).join(",")+"}")}function a(t){var e=Object.create(null),n=[];return t.forEach((function(t){for(var r in t)r in e||n.push(e[r]=r)})),n}function u(t,e){var n=t+"",r=n.length;return r9999?"+"+u(e,6):u(e,4))+"-"+u(t.getUTCMonth()+1,2)+"-"+u(t.getUTCDate(),2)+(o?"T"+u(n,2)+":"+u(r,2)+":"+u(i,2)+"."+u(o,3)+"Z":i?"T"+u(n,2)+":"+u(r,2)+":"+u(i,2)+"Z":r||n?"T"+u(n,2)+":"+u(r,2)+"Z":"")}e.a=function(t){var e=new RegExp('["'+t+"\n\r]"),n=t.charCodeAt(0);function u(t,e){var o,a=[],u=t.length,s=0,c=0,f=u<=0,l=!1;function h(){if(f)return i;if(l)return l=!1,r;var e,o,a=s;if(34===t.charCodeAt(a)){for(;s++=u?f=!0:10===(o=t.charCodeAt(s++))?l=!0:13===o&&(l=!0,10===t.charCodeAt(s)&&++s),t.slice(a+1,e-1).replace(/""/g,'"')}for(;s=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function i(t){if(!(e=r.exec(t)))throw new Error("invalid format: "+t);var e;return new o({fill:e[1],align:e[2],sign:e[3],symbol:e[4],zero:e[5],width:e[6],comma:e[7],precision:e[8]&&e[8].slice(1),trim:e[9],type:e[10]})}function o(t){this.fill=void 0===t.fill?" ":t.fill+"",this.align=void 0===t.align?">":t.align+"",this.sign=void 0===t.sign?"-":t.sign+"",this.symbol=void 0===t.symbol?"":t.symbol+"",this.zero=!!t.zero,this.width=void 0===t.width?void 0:+t.width,this.comma=!!t.comma,this.precision=void 0===t.precision?void 0:+t.precision,this.trim=!!t.trim,this.type=void 0===t.type?"":t.type+""}i.prototype=o.prototype,o.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(void 0===this.width?"":Math.max(1,0|this.width))+(this.comma?",":"")+(void 0===this.precision?"":"."+Math.max(0,0|this.precision))+(this.trim?"~":"")+this.type}},function(t,e,n){"use strict";var r=n(47);e.a=function(){for(var t,e=r.c;t=e.sourceEvent;)e=t;return e}},function(t,e,n){"use strict";var r=n(49);e.a=function(t){return"string"==typeof t?new r.a([[document.querySelector(t)]],[document.documentElement]):new r.a([[t]],r.c)}},function(t,e,n){t.exports={graphlib:n(38),layout:n(751),debug:n(810),util:{time:n(22).time,notime:n(22).notime},version:n(811)}},function(t,e,n){"use strict";e.a=function(t){return function(){return t}}},function(t,e,n){"use strict";n.d(e,"a",(function(){return r})),n.d(e,"b",(function(){return i}));var r=Math.PI/180,i=180/Math.PI},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,13],n=[1,16],r=[1,14],i=[1,15],o=[1,17],a=[1,18],u=[1,20],s=[1,21],c=[1,22],f=[6,8],l=[1,31],h=[1,32],d=[1,33],p=[1,34],g=[1,35],y=[1,36],b=[6,8,14,20,28,31,32,33,34,35,36],v=[6,8,12,14,20,24,28,31,32,33,34,35,36,52,53,54],m=[28,52,53,54],_=[28,35,36,52,53,54],w=[28,31,32,33,34,52,53,54],x=[6,8,14],k=[1,59],E={trace:function(){},yy:{},symbols_:{error:2,mermaidDoc:3,graphConfig:4,CLASS_DIAGRAM:5,NEWLINE:6,statements:7,EOF:8,statement:9,className:10,alphaNumToken:11,GENERICTYPE:12,relationStatement:13,LABEL:14,classStatement:15,methodStatement:16,annotationStatement:17,clickStatement:18,CLASS:19,STRUCT_START:20,members:21,STRUCT_STOP:22,ANNOTATION_START:23,ANNOTATION_END:24,MEMBER:25,SEPARATOR:26,relation:27,STR:28,relationType:29,lineType:30,AGGREGATION:31,EXTENSION:32,COMPOSITION:33,DEPENDENCY:34,LINE:35,DOTTED_LINE:36,CALLBACK:37,LINK:38,commentToken:39,textToken:40,graphCodeTokens:41,textNoTagsToken:42,TAGSTART:43,TAGEND:44,"==":45,"--":46,PCT:47,DEFAULT:48,SPACE:49,MINUS:50,keywords:51,UNICODE_TEXT:52,NUM:53,ALPHA:54,$accept:0,$end:1},terminals_:{2:"error",5:"CLASS_DIAGRAM",6:"NEWLINE",8:"EOF",12:"GENERICTYPE",14:"LABEL",19:"CLASS",20:"STRUCT_START",22:"STRUCT_STOP",23:"ANNOTATION_START",24:"ANNOTATION_END",25:"MEMBER",26:"SEPARATOR",28:"STR",31:"AGGREGATION",32:"EXTENSION",33:"COMPOSITION",34:"DEPENDENCY",35:"LINE",36:"DOTTED_LINE",37:"CALLBACK",38:"LINK",41:"graphCodeTokens",43:"TAGSTART",44:"TAGEND",45:"==",46:"--",47:"PCT",48:"DEFAULT",49:"SPACE",50:"MINUS",51:"keywords",52:"UNICODE_TEXT",53:"NUM",54:"ALPHA"},productions_:[0,[3,1],[4,4],[7,1],[7,2],[7,3],[10,2],[10,1],[10,3],[10,2],[9,1],[9,2],[9,1],[9,1],[9,1],[9,1],[15,2],[15,5],[17,4],[21,1],[21,2],[16,1],[16,2],[16,1],[16,1],[13,3],[13,4],[13,4],[13,5],[27,3],[27,2],[27,2],[27,1],[29,1],[29,1],[29,1],[29,1],[30,1],[30,1],[18,3],[18,4],[18,3],[18,4],[39,1],[39,1],[40,1],[40,1],[40,1],[40,1],[40,1],[40,1],[40,1],[42,1],[42,1],[42,1],[42,1],[11,1],[11,1],[11,1]],performAction:function(t,e,n,r,i,o,a){var u=o.length-1;switch(i){case 6:this.$=o[u-1]+o[u];break;case 7:this.$=o[u];break;case 8:this.$=o[u-2]+"~"+o[u-1]+o[u];break;case 9:this.$=o[u-1]+"~"+o[u];break;case 10:r.addRelation(o[u]);break;case 11:o[u-1].title=r.cleanupLabel(o[u]),r.addRelation(o[u-1]);break;case 16:r.addClass(o[u]);break;case 17:r.addClass(o[u-3]),r.addMembers(o[u-3],o[u-1]);break;case 18:r.addAnnotation(o[u],o[u-2]);break;case 19:this.$=[o[u]];break;case 20:o[u].push(o[u-1]),this.$=o[u];break;case 21:break;case 22:r.addMember(o[u-1],r.cleanupLabel(o[u]));break;case 23:case 24:break;case 25:this.$={id1:o[u-2],id2:o[u],relation:o[u-1],relationTitle1:"none",relationTitle2:"none"};break;case 26:this.$={id1:o[u-3],id2:o[u],relation:o[u-1],relationTitle1:o[u-2],relationTitle2:"none"};break;case 27:this.$={id1:o[u-3],id2:o[u],relation:o[u-2],relationTitle1:"none",relationTitle2:o[u-1]};break;case 28:this.$={id1:o[u-4],id2:o[u],relation:o[u-2],relationTitle1:o[u-3],relationTitle2:o[u-1]};break;case 29:this.$={type1:o[u-2],type2:o[u],lineType:o[u-1]};break;case 30:this.$={type1:"none",type2:o[u],lineType:o[u-1]};break;case 31:this.$={type1:o[u-1],type2:"none",lineType:o[u]};break;case 32:this.$={type1:"none",type2:"none",lineType:o[u]};break;case 33:this.$=r.relationType.AGGREGATION;break;case 34:this.$=r.relationType.EXTENSION;break;case 35:this.$=r.relationType.COMPOSITION;break;case 36:this.$=r.relationType.DEPENDENCY;break;case 37:this.$=r.lineType.LINE;break;case 38:this.$=r.lineType.DOTTED_LINE;break;case 39:this.$=o[u-2],r.setClickEvent(o[u-1],o[u],void 0);break;case 40:this.$=o[u-3],r.setClickEvent(o[u-2],o[u-1],o[u]);break;case 41:this.$=o[u-2],r.setLink(o[u-1],o[u],void 0);break;case 42:this.$=o[u-3],r.setLink(o[u-2],o[u-1],o[u])}},table:[{3:1,4:2,5:[1,3]},{1:[3]},{1:[2,1]},{6:[1,4]},{7:5,9:6,10:12,11:19,13:7,15:8,16:9,17:10,18:11,19:e,23:n,25:r,26:i,37:o,38:a,52:u,53:s,54:c},{8:[1,23]},{6:[1,24],8:[2,3]},t(f,[2,10],{14:[1,25]}),t(f,[2,12]),t(f,[2,13]),t(f,[2,14]),t(f,[2,15]),t(f,[2,21],{27:26,29:29,30:30,14:[1,28],28:[1,27],31:l,32:h,33:d,34:p,35:g,36:y}),{10:37,11:19,52:u,53:s,54:c},t(f,[2,23]),t(f,[2,24]),{11:38,52:u,53:s,54:c},{10:39,11:19,52:u,53:s,54:c},{10:40,11:19,52:u,53:s,54:c},t(b,[2,7],{11:19,10:41,12:[1,42],52:u,53:s,54:c}),t(v,[2,56]),t(v,[2,57]),t(v,[2,58]),{1:[2,2]},{7:43,8:[2,4],9:6,10:12,11:19,13:7,15:8,16:9,17:10,18:11,19:e,23:n,25:r,26:i,37:o,38:a,52:u,53:s,54:c},t(f,[2,11]),{10:44,11:19,28:[1,45],52:u,53:s,54:c},{27:46,29:29,30:30,31:l,32:h,33:d,34:p,35:g,36:y},t(f,[2,22]),{30:47,35:g,36:y},t(m,[2,32],{29:48,31:l,32:h,33:d,34:p}),t(_,[2,33]),t(_,[2,34]),t(_,[2,35]),t(_,[2,36]),t(w,[2,37]),t(w,[2,38]),t(f,[2,16],{20:[1,49]}),{24:[1,50]},{28:[1,51]},{28:[1,52]},t(b,[2,6]),t(b,[2,9],{11:19,10:53,52:u,53:s,54:c}),{8:[2,5]},t(x,[2,25]),{10:54,11:19,52:u,53:s,54:c},{10:55,11:19,28:[1,56],52:u,53:s,54:c},t(m,[2,31],{29:57,31:l,32:h,33:d,34:p}),t(m,[2,30]),{21:58,25:k},{10:60,11:19,52:u,53:s,54:c},t(f,[2,39],{28:[1,61]}),t(f,[2,41],{28:[1,62]}),t(b,[2,8]),t(x,[2,27]),t(x,[2,26]),{10:63,11:19,52:u,53:s,54:c},t(m,[2,29]),{22:[1,64]},{21:65,22:[2,19],25:k},t(f,[2,18]),t(f,[2,40]),t(f,[2,42]),t(x,[2,28]),t(f,[2,17]),{22:[2,20]}],defaultActions:{2:[2,1],23:[2,2],43:[2,5],65:[2,20]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],o=[],a=this.table,u="",s=0,c=0,f=0,l=2,h=1,d=o.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var b=p.yylloc;o.push(b);var v=p.options&&p.options.ranges;function m(){var t;return"number"!=typeof(t=r.pop()||p.lex()||h)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var _,w,x,k,E,A,S,M,T,O={};;){if(x=n[n.length-1],this.defaultActions[x]?k=this.defaultActions[x]:(null==_&&(_=m()),k=a[x]&&a[x][_]),void 0===k||!k.length||!k[0]){var D="";for(A in T=[],a[x])this.terminals_[A]&&A>l&&T.push("'"+this.terminals_[A]+"'");D=p.showPosition?"Parse error on line "+(s+1)+":\n"+p.showPosition()+"\nExpecting "+T.join(", ")+", got '"+(this.terminals_[_]||_)+"'":"Parse error on line "+(s+1)+": Unexpected "+(_==h?"end of input":"'"+(this.terminals_[_]||_)+"'"),this.parseError(D,{text:p.match,token:this.terminals_[_]||_,line:p.yylineno,loc:b,expected:T})}if(k[0]instanceof Array&&k.length>1)throw new Error("Parse Error: multiple actions possible at state: "+x+", token: "+_);switch(k[0]){case 1:n.push(_),i.push(p.yytext),o.push(p.yylloc),n.push(k[1]),_=null,w?(_=w,w=null):(c=p.yyleng,u=p.yytext,s=p.yylineno,b=p.yylloc,f>0&&f--);break;case 2:if(S=this.productions_[k[1]][1],O.$=i[i.length-S],O._$={first_line:o[o.length-(S||1)].first_line,last_line:o[o.length-1].last_line,first_column:o[o.length-(S||1)].first_column,last_column:o[o.length-1].last_column},v&&(O._$.range=[o[o.length-(S||1)].range[0],o[o.length-1].range[1]]),void 0!==(E=this.performAction.apply(O,[u,c,s,g.yy,k[1],i,o].concat(d))))return E;S&&(n=n.slice(0,-1*S*2),i=i.slice(0,-1*S),o=o.slice(0,-1*S)),n.push(this.productions_[k[1]][0]),i.push(O.$),o.push(O._$),M=a[n[n.length-2]][n[n.length-1]],n.push(M);break;case 3:return!0}}return!0}},A={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var o in i)this[o]=i[o];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),o=0;oe[0].length)){if(e=n,r=o,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[o])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{},performAction:function(t,e,n,r){switch(n){case 0:break;case 1:return 6;case 2:break;case 3:return 5;case 4:return this.begin("struct"),20;case 5:return"EOF_IN_STRUCT";case 6:return"OPEN_IN_STRUCT";case 7:return this.popState(),22;case 8:break;case 9:return"MEMBER";case 10:return 19;case 11:return 37;case 12:return 38;case 13:return 23;case 14:return 24;case 15:this.begin("generic");break;case 16:this.popState();break;case 17:return"GENERICTYPE";case 18:this.begin("string");break;case 19:this.popState();break;case 20:return"STR";case 21:case 22:return 32;case 23:case 24:return 34;case 25:return 33;case 26:return 31;case 27:return 35;case 28:return 36;case 29:return 14;case 30:return 50;case 31:return"DOT";case 32:return"PLUS";case 33:return 47;case 34:case 35:return"EQUALS";case 36:return 54;case 37:return"PUNCTUATION";case 38:return 53;case 39:return 52;case 40:return 49;case 41:return 8}},rules:[/^(?:%%[^\n]*\n*)/,/^(?:\n+)/,/^(?:\s+)/,/^(?:classDiagram\b)/,/^(?:[\{])/,/^(?:$)/,/^(?:[\{])/,/^(?:\})/,/^(?:[\n])/,/^(?:[^\{\}\n]*)/,/^(?:class\b)/,/^(?:callback\b)/,/^(?:link\b)/,/^(?:<<)/,/^(?:>>)/,/^(?:[~])/,/^(?:[~])/,/^(?:[^~]*)/,/^(?:["])/,/^(?:["])/,/^(?:[^"]*)/,/^(?:\s*<\|)/,/^(?:\s*\|>)/,/^(?:\s*>)/,/^(?:\s*<)/,/^(?:\s*\*)/,/^(?:\s*o\b)/,/^(?:--)/,/^(?:\.\.)/,/^(?::[^\n;]+)/,/^(?:-)/,/^(?:\.)/,/^(?:\+)/,/^(?:%)/,/^(?:=)/,/^(?:=)/,/^(?:\w+)/,/^(?:[!"#$%&'*+,-.`?\\/])/,/^(?:[0-9]+)/,/^(?:[\u00AA\u00B5\u00BA\u00C0-\u00D6\u00D8-\u00F6]|[\u00F8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377]|[\u037A-\u037D\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5]|[\u03F7-\u0481\u048A-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA]|[\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE]|[\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA]|[\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u08A0]|[\u08A2-\u08AC\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0977]|[\u0979-\u097F\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2]|[\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A]|[\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39]|[\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8]|[\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0B05-\u0B0C]|[\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C]|[\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99]|[\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0]|[\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C3D]|[\u0C58\u0C59\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3]|[\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10]|[\u0D12-\u0D3A\u0D3D\u0D4E\u0D60\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1]|[\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81]|[\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3]|[\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6]|[\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A]|[\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081]|[\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D]|[\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0]|[\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310]|[\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F4\u1401-\u166C]|[\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u1700-\u170C\u170E-\u1711]|[\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7]|[\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191C]|[\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19C1-\u19C7\u1A00-\u1A16]|[\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF]|[\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC]|[\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D]|[\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D]|[\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3]|[\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F]|[\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128]|[\u212A-\u212D\u212F-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2183\u2184]|[\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3]|[\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6]|[\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE]|[\u2DD0-\u2DD6\u2DD8-\u2DDE\u2E2F\u3005\u3006\u3031-\u3035\u303B\u303C]|[\u3041-\u3096\u309D-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D]|[\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FCC]|[\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B]|[\uA640-\uA66E\uA67F-\uA697\uA6A0-\uA6E5\uA717-\uA71F\uA722-\uA788]|[\uA78B-\uA78E\uA790-\uA793\uA7A0-\uA7AA\uA7F8-\uA801\uA803-\uA805]|[\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB]|[\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uAA00-\uAA28]|[\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA80-\uAAAF\uAAB1\uAAB5]|[\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4]|[\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E]|[\uABC0-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D]|[\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36]|[\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D]|[\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC]|[\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF]|[\uFFD2-\uFFD7\uFFDA-\uFFDC])/,/^(?:\s)/,/^(?:$)/],conditions:{string:{rules:[19,20],inclusive:!1},generic:{rules:[16,17],inclusive:!1},struct:{rules:[5,6,7,8,9],inclusive:!1},INITIAL:{rules:[0,1,2,3,4,10,11,12,13,14,15,18,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41],inclusive:!0}}};function S(){this.yy={}}return E.lexer=A,S.prototype=E,E.Parser=S,new S}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(54).readFileSync(n(55).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(17),n(14)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,2],n=[1,3],r=[1,4],i=[2,4],o=[1,9],a=[1,11],u=[1,13],s=[1,14],c=[1,15],f=[1,16],l=[1,21],h=[1,17],d=[1,18],p=[1,19],g=[1,20],y=[1,22],b=[1,4,5,13,14,16,18,19,21,22,23,24,25,28],v=[1,4,5,11,12,13,14,16,18,19,21,22,23,24,25,28],m=[4,5,13,14,16,18,19,21,22,23,24,25,28],_={trace:function(){},yy:{},symbols_:{error:2,start:3,SPACE:4,NL:5,SD:6,document:7,line:8,statement:9,idStatement:10,DESCR:11,"--\x3e":12,HIDE_EMPTY:13,scale:14,WIDTH:15,COMPOSIT_STATE:16,STRUCT_START:17,STRUCT_STOP:18,STATE_DESCR:19,AS:20,ID:21,FORK:22,JOIN:23,CONCURRENT:24,note:25,notePosition:26,NOTE_TEXT:27,EDGE_STATE:28,left_of:29,right_of:30,$accept:0,$end:1},terminals_:{2:"error",4:"SPACE",5:"NL",6:"SD",11:"DESCR",12:"--\x3e",13:"HIDE_EMPTY",14:"scale",15:"WIDTH",16:"COMPOSIT_STATE",17:"STRUCT_START",18:"STRUCT_STOP",19:"STATE_DESCR",20:"AS",21:"ID",22:"FORK",23:"JOIN",24:"CONCURRENT",25:"note",27:"NOTE_TEXT",28:"EDGE_STATE",29:"left_of",30:"right_of"},productions_:[0,[3,2],[3,2],[3,2],[7,0],[7,2],[8,2],[8,1],[8,1],[9,1],[9,2],[9,3],[9,4],[9,1],[9,2],[9,1],[9,4],[9,3],[9,6],[9,1],[9,1],[9,1],[9,4],[9,4],[10,1],[10,1],[26,1],[26,1]],performAction:function(t,e,n,r,i,o,a){var u=o.length-1;switch(i){case 3:return r.setRootDoc(o[u]),o[u];case 4:this.$=[];break;case 5:"nl"!=o[u]&&(o[u-1].push(o[u]),this.$=o[u-1]);break;case 6:case 7:this.$=o[u];break;case 8:this.$="nl";break;case 9:this.$={stmt:"state",id:o[u],type:"default",description:""};break;case 10:this.$={stmt:"state",id:o[u-1],type:"default",description:o[u].trim()};break;case 11:this.$={stmt:"relation",state1:{stmt:"state",id:o[u-2],type:"default",description:""},state2:{stmt:"state",id:o[u],type:"default",description:""}};break;case 12:this.$={stmt:"relation",state1:{stmt:"state",id:o[u-3],type:"default",description:""},state2:{stmt:"state",id:o[u-1],type:"default",description:""},description:o[u].substr(1).trim()};break;case 16:this.$={stmt:"state",id:o[u-3],type:"default",description:"",doc:o[u-1]};break;case 17:var s=o[u],c=o[u-2].trim();if(o[u].match(":")){var f=o[u].split(":");s=f[0],c=[c,f[1]]}this.$={stmt:"state",id:s,type:"default",description:c};break;case 18:this.$={stmt:"state",id:o[u-3],type:"default",description:o[u-5],doc:o[u-1]};break;case 19:this.$={stmt:"state",id:o[u],type:"fork"};break;case 20:this.$={stmt:"state",id:o[u],type:"join"};break;case 21:this.$={stmt:"state",id:r.getDividerId(),type:"divider"};break;case 22:this.$={stmt:"state",id:o[u-1].trim(),note:{position:o[u-2].trim(),text:o[u].trim()}};break;case 24:case 25:this.$=o[u]}},table:[{3:1,4:e,5:n,6:r},{1:[3]},{3:5,4:e,5:n,6:r},{3:6,4:e,5:n,6:r},t([1,4,5,13,14,16,19,21,22,23,24,25,28],i,{7:7}),{1:[2,1]},{1:[2,2]},{1:[2,3],4:o,5:a,8:8,9:10,10:12,13:u,14:s,16:c,19:f,21:l,22:h,23:d,24:p,25:g,28:y},t(b,[2,5]),{9:23,10:12,13:u,14:s,16:c,19:f,21:l,22:h,23:d,24:p,25:g,28:y},t(b,[2,7]),t(b,[2,8]),t(b,[2,9],{11:[1,24],12:[1,25]}),t(b,[2,13]),{15:[1,26]},t(b,[2,15],{17:[1,27]}),{20:[1,28]},t(b,[2,19]),t(b,[2,20]),t(b,[2,21]),{26:29,27:[1,30],29:[1,31],30:[1,32]},t(v,[2,24]),t(v,[2,25]),t(b,[2,6]),t(b,[2,10]),{10:33,21:l,28:y},t(b,[2,14]),t(m,i,{7:34}),{21:[1,35]},{21:[1,36]},{20:[1,37]},{21:[2,26]},{21:[2,27]},t(b,[2,11],{11:[1,38]}),{4:o,5:a,8:8,9:10,10:12,13:u,14:s,16:c,18:[1,39],19:f,21:l,22:h,23:d,24:p,25:g,28:y},t(b,[2,17],{17:[1,40]}),{27:[1,41]},{21:[1,42]},t(b,[2,12]),t(b,[2,16]),t(m,i,{7:43}),t(b,[2,22]),t(b,[2,23]),{4:o,5:a,8:8,9:10,10:12,13:u,14:s,16:c,18:[1,44],19:f,21:l,22:h,23:d,24:p,25:g,28:y},t(b,[2,18])],defaultActions:{5:[2,1],6:[2,2],31:[2,26],32:[2,27]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],o=[],a=this.table,u="",s=0,c=0,f=0,l=2,h=1,d=o.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var b=p.yylloc;o.push(b);var v=p.options&&p.options.ranges;function m(){var t;return"number"!=typeof(t=r.pop()||p.lex()||h)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var _,w,x,k,E,A,S,M,T,O={};;){if(x=n[n.length-1],this.defaultActions[x]?k=this.defaultActions[x]:(null==_&&(_=m()),k=a[x]&&a[x][_]),void 0===k||!k.length||!k[0]){var D="";for(A in T=[],a[x])this.terminals_[A]&&A>l&&T.push("'"+this.terminals_[A]+"'");D=p.showPosition?"Parse error on line "+(s+1)+":\n"+p.showPosition()+"\nExpecting "+T.join(", ")+", got '"+(this.terminals_[_]||_)+"'":"Parse error on line "+(s+1)+": Unexpected "+(_==h?"end of input":"'"+(this.terminals_[_]||_)+"'"),this.parseError(D,{text:p.match,token:this.terminals_[_]||_,line:p.yylineno,loc:b,expected:T})}if(k[0]instanceof Array&&k.length>1)throw new Error("Parse Error: multiple actions possible at state: "+x+", token: "+_);switch(k[0]){case 1:n.push(_),i.push(p.yytext),o.push(p.yylloc),n.push(k[1]),_=null,w?(_=w,w=null):(c=p.yyleng,u=p.yytext,s=p.yylineno,b=p.yylloc,f>0&&f--);break;case 2:if(S=this.productions_[k[1]][1],O.$=i[i.length-S],O._$={first_line:o[o.length-(S||1)].first_line,last_line:o[o.length-1].last_line,first_column:o[o.length-(S||1)].first_column,last_column:o[o.length-1].last_column},v&&(O._$.range=[o[o.length-(S||1)].range[0],o[o.length-1].range[1]]),void 0!==(E=this.performAction.apply(O,[u,c,s,g.yy,k[1],i,o].concat(d))))return E;S&&(n=n.slice(0,-1*S*2),i=i.slice(0,-1*S),o=o.slice(0,-1*S)),n.push(this.productions_[k[1]][0]),i.push(O.$),o.push(O._$),M=a[n[n.length-2]][n[n.length-1]],n.push(M);break;case 3:return!0}}return!0}},w={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var o in i)this[o]=i[o];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),o=0;oe[0].length)){if(e=n,r=o,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[o])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return 5;case 1:case 2:case 3:case 4:break;case 5:return this.pushState("SCALE"),14;case 6:return 15;case 7:this.popState();break;case 8:this.pushState("STATE");break;case 9:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),22;case 10:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),23;case 11:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),22;case 12:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),23;case 13:this.begin("STATE_STRING");break;case 14:return this.popState(),this.pushState("STATE_ID"),"AS";case 15:return this.popState(),"ID";case 16:this.popState();break;case 17:return"STATE_DESCR";case 18:return 16;case 19:this.popState();break;case 20:return this.popState(),this.pushState("struct"),17;case 21:return this.popState(),18;case 22:break;case 23:return this.begin("NOTE"),25;case 24:return this.popState(),this.pushState("NOTE_ID"),29;case 25:return this.popState(),this.pushState("NOTE_ID"),30;case 26:this.popState(),this.pushState("FLOATING_NOTE");break;case 27:return this.popState(),this.pushState("FLOATING_NOTE_ID"),"AS";case 28:break;case 29:return"NOTE_TEXT";case 30:return this.popState(),"ID";case 31:return this.popState(),this.pushState("NOTE_TEXT"),21;case 32:return this.popState(),e.yytext=e.yytext.substr(2).trim(),27;case 33:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),27;case 34:return 6;case 35:return 13;case 36:return 28;case 37:return 21;case 38:return e.yytext=e.yytext.trim(),11;case 39:return 12;case 40:return 24;case 41:return 5;case 42:return"INVALID"}},rules:[/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:((?!\n)\s)+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:scale\s+)/i,/^(?:\d+)/i,/^(?:\s+width\b)/i,/^(?:state\s+)/i,/^(?:.*<>)/i,/^(?:.*<>)/i,/^(?:.*\[\[fork\]\])/i,/^(?:.*\[\[join\]\])/i,/^(?:["])/i,/^(?:\s*as\s+)/i,/^(?:[^\n\{]*)/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:[^\n\s\{]+)/i,/^(?:\n)/i,/^(?:\{)/i,/^(?:\})/i,/^(?:[\n])/i,/^(?:note\s+)/i,/^(?:left of\b)/i,/^(?:right of\b)/i,/^(?:")/i,/^(?:\s*as\s*)/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:[^\n]*)/i,/^(?:\s*[^:\n\s\-]+)/i,/^(?:\s*:[^:\n;]+)/i,/^(?:\s*[^:;]+end note\b)/i,/^(?:stateDiagram\s+)/i,/^(?:hide empty description\b)/i,/^(?:\[\*\])/i,/^(?:[^:\n\s\-\{]+)/i,/^(?:\s*:[^:\n;]+)/i,/^(?:-->)/i,/^(?:--)/i,/^(?:$)/i,/^(?:.)/i],conditions:{LINE:{rules:[2,3],inclusive:!1},struct:{rules:[2,3,8,21,22,23,36,37,38,39,40],inclusive:!1},FLOATING_NOTE_ID:{rules:[30],inclusive:!1},FLOATING_NOTE:{rules:[27,28,29],inclusive:!1},NOTE_TEXT:{rules:[32,33],inclusive:!1},NOTE_ID:{rules:[31],inclusive:!1},NOTE:{rules:[24,25,26],inclusive:!1},SCALE:{rules:[6,7],inclusive:!1},ALIAS:{rules:[],inclusive:!1},STATE_ID:{rules:[15],inclusive:!1},STATE_STRING:{rules:[16,17],inclusive:!1},FORK_STATE:{rules:[],inclusive:!1},STATE:{rules:[2,3,9,10,11,12,13,14,18,19,20],inclusive:!1},ID:{rules:[2,3],inclusive:!1},INITIAL:{rules:[0,1,3,4,5,8,20,23,34,35,36,37,38,39,41,42],inclusive:!0}}};function x(){this.yy={}}return _.lexer=w,x.prototype=_,_.Parser=x,new x}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(54).readFileSync(n(55).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(17),n(14)(t))},function(t,e,n){(function(t,n){(function(){var r="Expected a function",i="__lodash_placeholder__",o=[["ary",128],["bind",1],["bindKey",2],["curry",8],["curryRight",16],["flip",512],["partial",32],["partialRight",64],["rearg",256]],a="[object Arguments]",u="[object Array]",s="[object Boolean]",c="[object Date]",f="[object Error]",l="[object Function]",h="[object GeneratorFunction]",d="[object Map]",p="[object Number]",g="[object Object]",y="[object RegExp]",b="[object Set]",v="[object String]",m="[object Symbol]",_="[object WeakMap]",w="[object ArrayBuffer]",x="[object DataView]",k="[object Float32Array]",E="[object Float64Array]",A="[object Int8Array]",S="[object Int16Array]",M="[object Int32Array]",T="[object Uint8Array]",O="[object Uint16Array]",D="[object Uint32Array]",C=/\b__p \+= '';/g,N=/\b(__p \+=) '' \+/g,I=/(__e\(.*?\)|\b__t\)) \+\n'';/g,R=/&(?:amp|lt|gt|quot|#39);/g,j=/[&<>"']/g,L=RegExp(R.source),B=RegExp(j.source),P=/<%-([\s\S]+?)%>/g,F=/<%([\s\S]+?)%>/g,q=/<%=([\s\S]+?)%>/g,U=/\.|\[(?:[^[\]]*|(["'])(?:(?!\1)[^\\]|\\.)*?\1)\]/,z=/^\w*$/,Y=/[^.[\]]+|\[(?:(-?\d+(?:\.\d+)?)|(["'])((?:(?!\2)[^\\]|\\.)*?)\2)\]|(?=(?:\.|\[\])(?:\.|\[\]|$))/g,V=/[\\^$.*+?()[\]{}|]/g,G=RegExp(V.source),H=/^\s+|\s+$/g,W=/^\s+/,$=/\s+$/,K=/\{(?:\n\/\* \[wrapped with .+\] \*\/)?\n?/,Z=/\{\n\/\* \[wrapped with (.+)\] \*/,X=/,? & /,J=/[^\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\x7f]+/g,Q=/\\(\\)?/g,tt=/\$\{([^\\}]*(?:\\.[^\\}]*)*)\}/g,et=/\w*$/,nt=/^[-+]0x[0-9a-f]+$/i,rt=/^0b[01]+$/i,it=/^\[object .+?Constructor\]$/,ot=/^0o[0-7]+$/i,at=/^(?:0|[1-9]\d*)$/,ut=/[\xc0-\xd6\xd8-\xf6\xf8-\xff\u0100-\u017f]/g,st=/($^)/,ct=/['\n\r\u2028\u2029\\]/g,ft="\\u0300-\\u036f\\ufe20-\\ufe2f\\u20d0-\\u20ff",lt="\\xac\\xb1\\xd7\\xf7\\x00-\\x2f\\x3a-\\x40\\x5b-\\x60\\x7b-\\xbf\\u2000-\\u206f \\t\\x0b\\f\\xa0\\ufeff\\n\\r\\u2028\\u2029\\u1680\\u180e\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200a\\u202f\\u205f\\u3000",ht="[\\ud800-\\udfff]",dt="["+lt+"]",pt="["+ft+"]",gt="\\d+",yt="[\\u2700-\\u27bf]",bt="[a-z\\xdf-\\xf6\\xf8-\\xff]",vt="[^\\ud800-\\udfff"+lt+gt+"\\u2700-\\u27bfa-z\\xdf-\\xf6\\xf8-\\xffA-Z\\xc0-\\xd6\\xd8-\\xde]",mt="\\ud83c[\\udffb-\\udfff]",_t="[^\\ud800-\\udfff]",wt="(?:\\ud83c[\\udde6-\\uddff]){2}",xt="[\\ud800-\\udbff][\\udc00-\\udfff]",kt="[A-Z\\xc0-\\xd6\\xd8-\\xde]",Et="(?:"+bt+"|"+vt+")",At="(?:"+kt+"|"+vt+")",St="(?:"+pt+"|"+mt+")"+"?",Mt="[\\ufe0e\\ufe0f]?"+St+("(?:\\u200d(?:"+[_t,wt,xt].join("|")+")[\\ufe0e\\ufe0f]?"+St+")*"),Tt="(?:"+[yt,wt,xt].join("|")+")"+Mt,Ot="(?:"+[_t+pt+"?",pt,wt,xt,ht].join("|")+")",Dt=RegExp("['’]","g"),Ct=RegExp(pt,"g"),Nt=RegExp(mt+"(?="+mt+")|"+Ot+Mt,"g"),It=RegExp([kt+"?"+bt+"+(?:['’](?:d|ll|m|re|s|t|ve))?(?="+[dt,kt,"$"].join("|")+")",At+"+(?:['’](?:D|LL|M|RE|S|T|VE))?(?="+[dt,kt+Et,"$"].join("|")+")",kt+"?"+Et+"+(?:['’](?:d|ll|m|re|s|t|ve))?",kt+"+(?:['’](?:D|LL|M|RE|S|T|VE))?","\\d*(?:1ST|2ND|3RD|(?![123])\\dTH)(?=\\b|[a-z_])","\\d*(?:1st|2nd|3rd|(?![123])\\dth)(?=\\b|[A-Z_])",gt,Tt].join("|"),"g"),Rt=RegExp("[\\u200d\\ud800-\\udfff"+ft+"\\ufe0e\\ufe0f]"),jt=/[a-z][A-Z]|[A-Z]{2}[a-z]|[0-9][a-zA-Z]|[a-zA-Z][0-9]|[^a-zA-Z0-9 ]/,Lt=["Array","Buffer","DataView","Date","Error","Float32Array","Float64Array","Function","Int8Array","Int16Array","Int32Array","Map","Math","Object","Promise","RegExp","Set","String","Symbol","TypeError","Uint8Array","Uint8ClampedArray","Uint16Array","Uint32Array","WeakMap","_","clearTimeout","isFinite","parseInt","setTimeout"],Bt=-1,Pt={};Pt[k]=Pt[E]=Pt[A]=Pt[S]=Pt[M]=Pt[T]=Pt["[object Uint8ClampedArray]"]=Pt[O]=Pt[D]=!0,Pt[a]=Pt[u]=Pt[w]=Pt[s]=Pt[x]=Pt[c]=Pt[f]=Pt[l]=Pt[d]=Pt[p]=Pt[g]=Pt[y]=Pt[b]=Pt[v]=Pt[_]=!1;var Ft={};Ft[a]=Ft[u]=Ft[w]=Ft[x]=Ft[s]=Ft[c]=Ft[k]=Ft[E]=Ft[A]=Ft[S]=Ft[M]=Ft[d]=Ft[p]=Ft[g]=Ft[y]=Ft[b]=Ft[v]=Ft[m]=Ft[T]=Ft["[object Uint8ClampedArray]"]=Ft[O]=Ft[D]=!0,Ft[f]=Ft[l]=Ft[_]=!1;var qt={"\\":"\\","'":"'","\n":"n","\r":"r","\u2028":"u2028","\u2029":"u2029"},Ut=parseFloat,zt=parseInt,Yt="object"==typeof t&&t&&t.Object===Object&&t,Vt="object"==typeof self&&self&&self.Object===Object&&self,Gt=Yt||Vt||Function("return this")(),Ht=e&&!e.nodeType&&e,Wt=Ht&&"object"==typeof n&&n&&!n.nodeType&&n,$t=Wt&&Wt.exports===Ht,Kt=$t&&Yt.process,Zt=function(){try{var t=Wt&&Wt.require&&Wt.require("util").types;return t||Kt&&Kt.binding&&Kt.binding("util")}catch(t){}}(),Xt=Zt&&Zt.isArrayBuffer,Jt=Zt&&Zt.isDate,Qt=Zt&&Zt.isMap,te=Zt&&Zt.isRegExp,ee=Zt&&Zt.isSet,ne=Zt&&Zt.isTypedArray;function re(t,e,n){switch(n.length){case 0:return t.call(e);case 1:return t.call(e,n[0]);case 2:return t.call(e,n[0],n[1]);case 3:return t.call(e,n[0],n[1],n[2])}return t.apply(e,n)}function ie(t,e,n,r){for(var i=-1,o=null==t?0:t.length;++i-1}function fe(t,e,n){for(var r=-1,i=null==t?0:t.length;++r-1;);return n}function Ne(t,e){for(var n=t.length;n--&&me(e,t[n],0)>-1;);return n}function Ie(t,e){for(var n=t.length,r=0;n--;)t[n]===e&&++r;return r}var Re=Ee({"À":"A","Á":"A","Â":"A","Ã":"A","Ä":"A","Å":"A","à":"a","á":"a","â":"a","ã":"a","ä":"a","å":"a","Ç":"C","ç":"c","Ð":"D","ð":"d","È":"E","É":"E","Ê":"E","Ë":"E","è":"e","é":"e","ê":"e","ë":"e","Ì":"I","Í":"I","Î":"I","Ï":"I","ì":"i","í":"i","î":"i","ï":"i","Ñ":"N","ñ":"n","Ò":"O","Ó":"O","Ô":"O","Õ":"O","Ö":"O","Ø":"O","ò":"o","ó":"o","ô":"o","õ":"o","ö":"o","ø":"o","Ù":"U","Ú":"U","Û":"U","Ü":"U","ù":"u","ú":"u","û":"u","ü":"u","Ý":"Y","ý":"y","ÿ":"y","Æ":"Ae","æ":"ae","Þ":"Th","þ":"th","ß":"ss","Ā":"A","Ă":"A","Ą":"A","ā":"a","ă":"a","ą":"a","Ć":"C","Ĉ":"C","Ċ":"C","Č":"C","ć":"c","ĉ":"c","ċ":"c","č":"c","Ď":"D","Đ":"D","ď":"d","đ":"d","Ē":"E","Ĕ":"E","Ė":"E","Ę":"E","Ě":"E","ē":"e","ĕ":"e","ė":"e","ę":"e","ě":"e","Ĝ":"G","Ğ":"G","Ġ":"G","Ģ":"G","ĝ":"g","ğ":"g","ġ":"g","ģ":"g","Ĥ":"H","Ħ":"H","ĥ":"h","ħ":"h","Ĩ":"I","Ī":"I","Ĭ":"I","Į":"I","İ":"I","ĩ":"i","ī":"i","ĭ":"i","į":"i","ı":"i","Ĵ":"J","ĵ":"j","Ķ":"K","ķ":"k","ĸ":"k","Ĺ":"L","Ļ":"L","Ľ":"L","Ŀ":"L","Ł":"L","ĺ":"l","ļ":"l","ľ":"l","ŀ":"l","ł":"l","Ń":"N","Ņ":"N","Ň":"N","Ŋ":"N","ń":"n","ņ":"n","ň":"n","ŋ":"n","Ō":"O","Ŏ":"O","Ő":"O","ō":"o","ŏ":"o","ő":"o","Ŕ":"R","Ŗ":"R","Ř":"R","ŕ":"r","ŗ":"r","ř":"r","Ś":"S","Ŝ":"S","Ş":"S","Š":"S","ś":"s","ŝ":"s","ş":"s","š":"s","Ţ":"T","Ť":"T","Ŧ":"T","ţ":"t","ť":"t","ŧ":"t","Ũ":"U","Ū":"U","Ŭ":"U","Ů":"U","Ű":"U","Ų":"U","ũ":"u","ū":"u","ŭ":"u","ů":"u","ű":"u","ų":"u","Ŵ":"W","ŵ":"w","Ŷ":"Y","ŷ":"y","Ÿ":"Y","Ź":"Z","Ż":"Z","Ž":"Z","ź":"z","ż":"z","ž":"z","IJ":"IJ","ij":"ij","Œ":"Oe","œ":"oe","ʼn":"'n","ſ":"s"}),je=Ee({"&":"&","<":"<",">":">",'"':""","'":"'"});function Le(t){return"\\"+qt[t]}function Be(t){return Rt.test(t)}function Pe(t){var e=-1,n=Array(t.size);return t.forEach((function(t,r){n[++e]=[r,t]})),n}function Fe(t,e){return function(n){return t(e(n))}}function qe(t,e){for(var n=-1,r=t.length,o=0,a=[];++n",""":'"',"'":"'"});var He=function t(e){var n,ft=(e=null==e?Gt:He.defaults(Gt.Object(),e,He.pick(Gt,Lt))).Array,lt=e.Date,ht=e.Error,dt=e.Function,pt=e.Math,gt=e.Object,yt=e.RegExp,bt=e.String,vt=e.TypeError,mt=ft.prototype,_t=dt.prototype,wt=gt.prototype,xt=e["__core-js_shared__"],kt=_t.toString,Et=wt.hasOwnProperty,At=0,St=(n=/[^.]+$/.exec(xt&&xt.keys&&xt.keys.IE_PROTO||""))?"Symbol(src)_1."+n:"",Mt=wt.toString,Tt=kt.call(gt),Ot=Gt._,Nt=yt("^"+kt.call(Et).replace(V,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$"),Rt=$t?e.Buffer:void 0,qt=e.Symbol,Yt=e.Uint8Array,Vt=Rt?Rt.allocUnsafe:void 0,Ht=Fe(gt.getPrototypeOf,gt),Wt=gt.create,Kt=wt.propertyIsEnumerable,Zt=mt.splice,ye=qt?qt.isConcatSpreadable:void 0,Ee=qt?qt.iterator:void 0,We=qt?qt.toStringTag:void 0,$e=function(){try{var t=Qi(gt,"defineProperty");return t({},"",{}),t}catch(t){}}(),Ke=e.clearTimeout!==Gt.clearTimeout&&e.clearTimeout,Ze=lt&<.now!==Gt.Date.now&<.now,Xe=e.setTimeout!==Gt.setTimeout&&e.setTimeout,Je=pt.ceil,Qe=pt.floor,tn=gt.getOwnPropertySymbols,en=Rt?Rt.isBuffer:void 0,nn=e.isFinite,rn=mt.join,on=Fe(gt.keys,gt),an=pt.max,un=pt.min,sn=lt.now,cn=e.parseInt,fn=pt.random,ln=mt.reverse,hn=Qi(e,"DataView"),dn=Qi(e,"Map"),pn=Qi(e,"Promise"),gn=Qi(e,"Set"),yn=Qi(e,"WeakMap"),bn=Qi(gt,"create"),vn=yn&&new yn,mn={},_n=Mo(hn),wn=Mo(dn),xn=Mo(pn),kn=Mo(gn),En=Mo(yn),An=qt?qt.prototype:void 0,Sn=An?An.valueOf:void 0,Mn=An?An.toString:void 0;function Tn(t){if(Va(t)&&!Ia(t)&&!(t instanceof Nn)){if(t instanceof Cn)return t;if(Et.call(t,"__wrapped__"))return To(t)}return new Cn(t)}var On=function(){function t(){}return function(e){if(!Ya(e))return{};if(Wt)return Wt(e);t.prototype=e;var n=new t;return t.prototype=void 0,n}}();function Dn(){}function Cn(t,e){this.__wrapped__=t,this.__actions__=[],this.__chain__=!!e,this.__index__=0,this.__values__=void 0}function Nn(t){this.__wrapped__=t,this.__actions__=[],this.__dir__=1,this.__filtered__=!1,this.__iteratees__=[],this.__takeCount__=4294967295,this.__views__=[]}function In(t){var e=-1,n=null==t?0:t.length;for(this.clear();++e=e?t:e)),t}function Zn(t,e,n,r,i,o){var u,f=1&e,_=2&e,C=4&e;if(n&&(u=i?n(t,r,i,o):n(t)),void 0!==u)return u;if(!Ya(t))return t;var N=Ia(t);if(N){if(u=function(t){var e=t.length,n=new t.constructor(e);e&&"string"==typeof t[0]&&Et.call(t,"index")&&(n.index=t.index,n.input=t.input);return n}(t),!f)return bi(t,u)}else{var I=no(t),R=I==l||I==h;if(Ba(t))return li(t,f);if(I==g||I==a||R&&!i){if(u=_||R?{}:io(t),!f)return _?function(t,e){return vi(t,eo(t),e)}(t,function(t,e){return t&&vi(e,wu(e),t)}(u,t)):function(t,e){return vi(t,to(t),e)}(t,Hn(u,t))}else{if(!Ft[I])return i?t:{};u=function(t,e,n){var r=t.constructor;switch(e){case w:return hi(t);case s:case c:return new r(+t);case x:return function(t,e){var n=e?hi(t.buffer):t.buffer;return new t.constructor(n,t.byteOffset,t.byteLength)}(t,n);case k:case E:case A:case S:case M:case T:case"[object Uint8ClampedArray]":case O:case D:return di(t,n);case d:return new r;case p:case v:return new r(t);case y:return function(t){var e=new t.constructor(t.source,et.exec(t));return e.lastIndex=t.lastIndex,e}(t);case b:return new r;case m:return i=t,Sn?gt(Sn.call(i)):{}}var i}(t,I,f)}}o||(o=new Bn);var j=o.get(t);if(j)return j;o.set(t,u),Ka(t)?t.forEach((function(r){u.add(Zn(r,e,n,r,t,o))})):Ga(t)&&t.forEach((function(r,i){u.set(i,Zn(r,e,n,i,t,o))}));var L=N?void 0:(C?_?Hi:Gi:_?wu:_u)(t);return oe(L||t,(function(r,i){L&&(r=t[i=r]),Yn(u,i,Zn(r,e,n,i,t,o))})),u}function Xn(t,e,n){var r=n.length;if(null==t)return!r;for(t=gt(t);r--;){var i=n[r],o=e[i],a=t[i];if(void 0===a&&!(i in t)||!o(a))return!1}return!0}function Jn(t,e,n){if("function"!=typeof t)throw new vt(r);return _o((function(){t.apply(void 0,n)}),e)}function Qn(t,e,n,r){var i=-1,o=ce,a=!0,u=t.length,s=[],c=e.length;if(!u)return s;n&&(e=le(e,Te(n))),r?(o=fe,a=!1):e.length>=200&&(o=De,a=!1,e=new Ln(e));t:for(;++i-1},Rn.prototype.set=function(t,e){var n=this.__data__,r=Vn(n,t);return r<0?(++this.size,n.push([t,e])):n[r][1]=e,this},jn.prototype.clear=function(){this.size=0,this.__data__={hash:new In,map:new(dn||Rn),string:new In}},jn.prototype.delete=function(t){var e=Xi(this,t).delete(t);return this.size-=e?1:0,e},jn.prototype.get=function(t){return Xi(this,t).get(t)},jn.prototype.has=function(t){return Xi(this,t).has(t)},jn.prototype.set=function(t,e){var n=Xi(this,t),r=n.size;return n.set(t,e),this.size+=n.size==r?0:1,this},Ln.prototype.add=Ln.prototype.push=function(t){return this.__data__.set(t,"__lodash_hash_undefined__"),this},Ln.prototype.has=function(t){return this.__data__.has(t)},Bn.prototype.clear=function(){this.__data__=new Rn,this.size=0},Bn.prototype.delete=function(t){var e=this.__data__,n=e.delete(t);return this.size=e.size,n},Bn.prototype.get=function(t){return this.__data__.get(t)},Bn.prototype.has=function(t){return this.__data__.has(t)},Bn.prototype.set=function(t,e){var n=this.__data__;if(n instanceof Rn){var r=n.__data__;if(!dn||r.length<199)return r.push([t,e]),this.size=++n.size,this;n=this.__data__=new jn(r)}return n.set(t,e),this.size=n.size,this};var tr=wi(sr),er=wi(cr,!0);function nr(t,e){var n=!0;return tr(t,(function(t,r,i){return n=!!e(t,r,i)})),n}function rr(t,e,n){for(var r=-1,i=t.length;++r0&&n(u)?e>1?or(u,e-1,n,r,i):he(i,u):r||(i[i.length]=u)}return i}var ar=xi(),ur=xi(!0);function sr(t,e){return t&&ar(t,e,_u)}function cr(t,e){return t&&ur(t,e,_u)}function fr(t,e){return se(e,(function(e){return qa(t[e])}))}function lr(t,e){for(var n=0,r=(e=ui(e,t)).length;null!=t&&ne}function gr(t,e){return null!=t&&Et.call(t,e)}function yr(t,e){return null!=t&&e in gt(t)}function br(t,e,n){for(var r=n?fe:ce,i=t[0].length,o=t.length,a=o,u=ft(o),s=1/0,c=[];a--;){var f=t[a];a&&e&&(f=le(f,Te(e))),s=un(f.length,s),u[a]=!n&&(e||i>=120&&f.length>=120)?new Ln(a&&f):void 0}f=t[0];var l=-1,h=u[0];t:for(;++l=u)return s;var c=n[r];return s*("desc"==c?-1:1)}}return t.index-e.index}(t,e,n)}))}function Ir(t,e,n){for(var r=-1,i=e.length,o={};++r-1;)u!==t&&Zt.call(u,s,1),Zt.call(t,s,1);return t}function jr(t,e){for(var n=t?e.length:0,r=n-1;n--;){var i=e[n];if(n==r||i!==o){var o=i;ao(i)?Zt.call(t,i,1):Qr(t,i)}}return t}function Lr(t,e){return t+Qe(fn()*(e-t+1))}function Br(t,e){var n="";if(!t||e<1||e>9007199254740991)return n;do{e%2&&(n+=t),(e=Qe(e/2))&&(t+=t)}while(e);return n}function Pr(t,e){return wo(go(t,e,Hu),t+"")}function Fr(t){return Fn(Ou(t))}function qr(t,e){var n=Ou(t);return Eo(n,Kn(e,0,n.length))}function Ur(t,e,n,r){if(!Ya(t))return t;for(var i=-1,o=(e=ui(e,t)).length,a=o-1,u=t;null!=u&&++ii?0:i+e),(n=n>i?i:n)<0&&(n+=i),i=e>n?0:n-e>>>0,e>>>=0;for(var o=ft(i);++r>>1,a=t[o];null!==a&&!Xa(a)&&(n?a<=e:a=200){var c=e?null:Bi(t);if(c)return Ue(c);a=!1,i=De,s=new Ln}else s=e?[]:u;t:for(;++r=r?t:Gr(t,e,n)}var fi=Ke||function(t){return Gt.clearTimeout(t)};function li(t,e){if(e)return t.slice();var n=t.length,r=Vt?Vt(n):new t.constructor(n);return t.copy(r),r}function hi(t){var e=new t.constructor(t.byteLength);return new Yt(e).set(new Yt(t)),e}function di(t,e){var n=e?hi(t.buffer):t.buffer;return new t.constructor(n,t.byteOffset,t.length)}function pi(t,e){if(t!==e){var n=void 0!==t,r=null===t,i=t==t,o=Xa(t),a=void 0!==e,u=null===e,s=e==e,c=Xa(e);if(!u&&!c&&!o&&t>e||o&&a&&s&&!u&&!c||r&&a&&s||!n&&s||!i)return 1;if(!r&&!o&&!c&&t1?n[i-1]:void 0,a=i>2?n[2]:void 0;for(o=t.length>3&&"function"==typeof o?(i--,o):void 0,a&&uo(n[0],n[1],a)&&(o=i<3?void 0:o,i=1),e=gt(e);++r-1?i[o?e[a]:a]:void 0}}function Mi(t){return Vi((function(e){var n=e.length,i=n,o=Cn.prototype.thru;for(t&&e.reverse();i--;){var a=e[i];if("function"!=typeof a)throw new vt(r);if(o&&!u&&"wrapper"==$i(a))var u=new Cn([],!0)}for(i=u?i:n;++i1&&v.reverse(),f&&su))return!1;var c=o.get(t);if(c&&o.get(e))return c==e;var f=-1,l=!0,h=2&n?new Ln:void 0;for(o.set(t,e),o.set(e,t);++f-1&&t%1==0&&t1?"& ":"")+e[r],e=e.join(n>2?", ":" "),t.replace(K,"{\n/* [wrapped with "+e+"] */\n")}(r,function(t,e){return oe(o,(function(n){var r="_."+n[0];e&n[1]&&!ce(t,r)&&t.push(r)})),t.sort()}(function(t){var e=t.match(Z);return e?e[1].split(X):[]}(r),n)))}function ko(t){var e=0,n=0;return function(){var r=sn(),i=16-(r-n);if(n=r,i>0){if(++e>=800)return arguments[0]}else e=0;return t.apply(void 0,arguments)}}function Eo(t,e){var n=-1,r=t.length,i=r-1;for(e=void 0===e?r:e;++n1?t[e-1]:void 0;return n="function"==typeof n?(t.pop(),n):void 0,$o(t,n)}));function ea(t){var e=Tn(t);return e.__chain__=!0,e}function na(t,e){return e(t)}var ra=Vi((function(t){var e=t.length,n=e?t[0]:0,r=this.__wrapped__,i=function(e){return $n(e,t)};return!(e>1||this.__actions__.length)&&r instanceof Nn&&ao(n)?((r=r.slice(n,+n+(e?1:0))).__actions__.push({func:na,args:[i],thisArg:void 0}),new Cn(r,this.__chain__).thru((function(t){return e&&!t.length&&t.push(void 0),t}))):this.thru(i)}));var ia=mi((function(t,e,n){Et.call(t,n)?++t[n]:Wn(t,n,1)}));var oa=Si(No),aa=Si(Io);function ua(t,e){return(Ia(t)?oe:tr)(t,Zi(e,3))}function sa(t,e){return(Ia(t)?ae:er)(t,Zi(e,3))}var ca=mi((function(t,e,n){Et.call(t,n)?t[n].push(e):Wn(t,n,[e])}));var fa=Pr((function(t,e,n){var r=-1,i="function"==typeof e,o=ja(t)?ft(t.length):[];return tr(t,(function(t){o[++r]=i?re(e,t,n):vr(t,e,n)})),o})),la=mi((function(t,e,n){Wn(t,n,e)}));function ha(t,e){return(Ia(t)?le:Mr)(t,Zi(e,3))}var da=mi((function(t,e,n){t[n?0:1].push(e)}),(function(){return[[],[]]}));var pa=Pr((function(t,e){if(null==t)return[];var n=e.length;return n>1&&uo(t,e[0],e[1])?e=[]:n>2&&uo(e[0],e[1],e[2])&&(e=[e[0]]),Nr(t,or(e,1),[])})),ga=Ze||function(){return Gt.Date.now()};function ya(t,e,n){return e=n?void 0:e,Fi(t,128,void 0,void 0,void 0,void 0,e=t&&null==e?t.length:e)}function ba(t,e){var n;if("function"!=typeof e)throw new vt(r);return t=ru(t),function(){return--t>0&&(n=e.apply(this,arguments)),t<=1&&(e=void 0),n}}var va=Pr((function(t,e,n){var r=1;if(n.length){var i=qe(n,Ki(va));r|=32}return Fi(t,r,e,n,i)})),ma=Pr((function(t,e,n){var r=3;if(n.length){var i=qe(n,Ki(ma));r|=32}return Fi(e,r,t,n,i)}));function _a(t,e,n){var i,o,a,u,s,c,f=0,l=!1,h=!1,d=!0;if("function"!=typeof t)throw new vt(r);function p(e){var n=i,r=o;return i=o=void 0,f=e,u=t.apply(r,n)}function g(t){return f=t,s=_o(b,e),l?p(t):u}function y(t){var n=t-c;return void 0===c||n>=e||n<0||h&&t-f>=a}function b(){var t=ga();if(y(t))return v(t);s=_o(b,function(t){var n=e-(t-c);return h?un(n,a-(t-f)):n}(t))}function v(t){return s=void 0,d&&i?p(t):(i=o=void 0,u)}function m(){var t=ga(),n=y(t);if(i=arguments,o=this,c=t,n){if(void 0===s)return g(c);if(h)return fi(s),s=_o(b,e),p(c)}return void 0===s&&(s=_o(b,e)),u}return e=ou(e)||0,Ya(n)&&(l=!!n.leading,a=(h="maxWait"in n)?an(ou(n.maxWait)||0,e):a,d="trailing"in n?!!n.trailing:d),m.cancel=function(){void 0!==s&&fi(s),f=0,i=c=o=s=void 0},m.flush=function(){return void 0===s?u:v(ga())},m}var wa=Pr((function(t,e){return Jn(t,1,e)})),xa=Pr((function(t,e,n){return Jn(t,ou(e)||0,n)}));function ka(t,e){if("function"!=typeof t||null!=e&&"function"!=typeof e)throw new vt(r);var n=function(){var r=arguments,i=e?e.apply(this,r):r[0],o=n.cache;if(o.has(i))return o.get(i);var a=t.apply(this,r);return n.cache=o.set(i,a)||o,a};return n.cache=new(ka.Cache||jn),n}function Ea(t){if("function"!=typeof t)throw new vt(r);return function(){var e=arguments;switch(e.length){case 0:return!t.call(this);case 1:return!t.call(this,e[0]);case 2:return!t.call(this,e[0],e[1]);case 3:return!t.call(this,e[0],e[1],e[2])}return!t.apply(this,e)}}ka.Cache=jn;var Aa=si((function(t,e){var n=(e=1==e.length&&Ia(e[0])?le(e[0],Te(Zi())):le(or(e,1),Te(Zi()))).length;return Pr((function(r){for(var i=-1,o=un(r.length,n);++i=e})),Na=mr(function(){return arguments}())?mr:function(t){return Va(t)&&Et.call(t,"callee")&&!Kt.call(t,"callee")},Ia=ft.isArray,Ra=Xt?Te(Xt):function(t){return Va(t)&&dr(t)==w};function ja(t){return null!=t&&za(t.length)&&!qa(t)}function La(t){return Va(t)&&ja(t)}var Ba=en||os,Pa=Jt?Te(Jt):function(t){return Va(t)&&dr(t)==c};function Fa(t){if(!Va(t))return!1;var e=dr(t);return e==f||"[object DOMException]"==e||"string"==typeof t.message&&"string"==typeof t.name&&!Wa(t)}function qa(t){if(!Ya(t))return!1;var e=dr(t);return e==l||e==h||"[object AsyncFunction]"==e||"[object Proxy]"==e}function Ua(t){return"number"==typeof t&&t==ru(t)}function za(t){return"number"==typeof t&&t>-1&&t%1==0&&t<=9007199254740991}function Ya(t){var e=typeof t;return null!=t&&("object"==e||"function"==e)}function Va(t){return null!=t&&"object"==typeof t}var Ga=Qt?Te(Qt):function(t){return Va(t)&&no(t)==d};function Ha(t){return"number"==typeof t||Va(t)&&dr(t)==p}function Wa(t){if(!Va(t)||dr(t)!=g)return!1;var e=Ht(t);if(null===e)return!0;var n=Et.call(e,"constructor")&&e.constructor;return"function"==typeof n&&n instanceof n&&kt.call(n)==Tt}var $a=te?Te(te):function(t){return Va(t)&&dr(t)==y};var Ka=ee?Te(ee):function(t){return Va(t)&&no(t)==b};function Za(t){return"string"==typeof t||!Ia(t)&&Va(t)&&dr(t)==v}function Xa(t){return"symbol"==typeof t||Va(t)&&dr(t)==m}var Ja=ne?Te(ne):function(t){return Va(t)&&za(t.length)&&!!Pt[dr(t)]};var Qa=Ri(Sr),tu=Ri((function(t,e){return t<=e}));function eu(t){if(!t)return[];if(ja(t))return Za(t)?Ve(t):bi(t);if(Ee&&t[Ee])return function(t){for(var e,n=[];!(e=t.next()).done;)n.push(e.value);return n}(t[Ee]());var e=no(t);return(e==d?Pe:e==b?Ue:Ou)(t)}function nu(t){return t?(t=ou(t))===1/0||t===-1/0?17976931348623157e292*(t<0?-1:1):t==t?t:0:0===t?t:0}function ru(t){var e=nu(t),n=e%1;return e==e?n?e-n:e:0}function iu(t){return t?Kn(ru(t),0,4294967295):0}function ou(t){if("number"==typeof t)return t;if(Xa(t))return NaN;if(Ya(t)){var e="function"==typeof t.valueOf?t.valueOf():t;t=Ya(e)?e+"":e}if("string"!=typeof t)return 0===t?t:+t;t=t.replace(H,"");var n=rt.test(t);return n||ot.test(t)?zt(t.slice(2),n?2:8):nt.test(t)?NaN:+t}function au(t){return vi(t,wu(t))}function uu(t){return null==t?"":Xr(t)}var su=_i((function(t,e){if(lo(e)||ja(e))vi(e,_u(e),t);else for(var n in e)Et.call(e,n)&&Yn(t,n,e[n])})),cu=_i((function(t,e){vi(e,wu(e),t)})),fu=_i((function(t,e,n,r){vi(e,wu(e),t,r)})),lu=_i((function(t,e,n,r){vi(e,_u(e),t,r)})),hu=Vi($n);var du=Pr((function(t,e){t=gt(t);var n=-1,r=e.length,i=r>2?e[2]:void 0;for(i&&uo(e[0],e[1],i)&&(r=1);++n1),e})),vi(t,Hi(t),n),r&&(n=Zn(n,7,zi));for(var i=e.length;i--;)Qr(n,e[i]);return n}));var Au=Vi((function(t,e){return null==t?{}:function(t,e){return Ir(t,e,(function(e,n){return yu(t,n)}))}(t,e)}));function Su(t,e){if(null==t)return{};var n=le(Hi(t),(function(t){return[t]}));return e=Zi(e),Ir(t,n,(function(t,n){return e(t,n[0])}))}var Mu=Pi(_u),Tu=Pi(wu);function Ou(t){return null==t?[]:Oe(t,_u(t))}var Du=Ei((function(t,e,n){return e=e.toLowerCase(),t+(n?Cu(e):e)}));function Cu(t){return Fu(uu(t).toLowerCase())}function Nu(t){return(t=uu(t))&&t.replace(ut,Re).replace(Ct,"")}var Iu=Ei((function(t,e,n){return t+(n?"-":"")+e.toLowerCase()})),Ru=Ei((function(t,e,n){return t+(n?" ":"")+e.toLowerCase()})),ju=ki("toLowerCase");var Lu=Ei((function(t,e,n){return t+(n?"_":"")+e.toLowerCase()}));var Bu=Ei((function(t,e,n){return t+(n?" ":"")+Fu(e)}));var Pu=Ei((function(t,e,n){return t+(n?" ":"")+e.toUpperCase()})),Fu=ki("toUpperCase");function qu(t,e,n){return t=uu(t),void 0===(e=n?void 0:e)?function(t){return jt.test(t)}(t)?function(t){return t.match(It)||[]}(t):function(t){return t.match(J)||[]}(t):t.match(e)||[]}var Uu=Pr((function(t,e){try{return re(t,void 0,e)}catch(t){return Fa(t)?t:new ht(t)}})),zu=Vi((function(t,e){return oe(e,(function(e){e=So(e),Wn(t,e,va(t[e],t))})),t}));function Yu(t){return function(){return t}}var Vu=Mi(),Gu=Mi(!0);function Hu(t){return t}function Wu(t){return kr("function"==typeof t?t:Zn(t,1))}var $u=Pr((function(t,e){return function(n){return vr(n,t,e)}})),Ku=Pr((function(t,e){return function(n){return vr(t,n,e)}}));function Zu(t,e,n){var r=_u(e),i=fr(e,r);null!=n||Ya(e)&&(i.length||!r.length)||(n=e,e=t,t=this,i=fr(e,_u(e)));var o=!(Ya(n)&&"chain"in n&&!n.chain),a=qa(t);return oe(i,(function(n){var r=e[n];t[n]=r,a&&(t.prototype[n]=function(){var e=this.__chain__;if(o||e){var n=t(this.__wrapped__),i=n.__actions__=bi(this.__actions__);return i.push({func:r,args:arguments,thisArg:t}),n.__chain__=e,n}return r.apply(t,he([this.value()],arguments))})})),t}function Xu(){}var Ju=Ci(le),Qu=Ci(ue),ts=Ci(ge);function es(t){return so(t)?ke(So(t)):function(t){return function(e){return lr(e,t)}}(t)}var ns=Ii(),rs=Ii(!0);function is(){return[]}function os(){return!1}var as=Di((function(t,e){return t+e}),0),us=Li("ceil"),ss=Di((function(t,e){return t/e}),1),cs=Li("floor");var fs,ls=Di((function(t,e){return t*e}),1),hs=Li("round"),ds=Di((function(t,e){return t-e}),0);return Tn.after=function(t,e){if("function"!=typeof e)throw new vt(r);return t=ru(t),function(){if(--t<1)return e.apply(this,arguments)}},Tn.ary=ya,Tn.assign=su,Tn.assignIn=cu,Tn.assignInWith=fu,Tn.assignWith=lu,Tn.at=hu,Tn.before=ba,Tn.bind=va,Tn.bindAll=zu,Tn.bindKey=ma,Tn.castArray=function(){if(!arguments.length)return[];var t=arguments[0];return Ia(t)?t:[t]},Tn.chain=ea,Tn.chunk=function(t,e,n){e=(n?uo(t,e,n):void 0===e)?1:an(ru(e),0);var r=null==t?0:t.length;if(!r||e<1)return[];for(var i=0,o=0,a=ft(Je(r/e));ii?0:i+n),(r=void 0===r||r>i?i:ru(r))<0&&(r+=i),r=n>r?0:iu(r);n>>0)?(t=uu(t))&&("string"==typeof e||null!=e&&!$a(e))&&!(e=Xr(e))&&Be(t)?ci(Ve(t),0,n):t.split(e,n):[]},Tn.spread=function(t,e){if("function"!=typeof t)throw new vt(r);return e=null==e?0:an(ru(e),0),Pr((function(n){var r=n[e],i=ci(n,0,e);return r&&he(i,r),re(t,this,i)}))},Tn.tail=function(t){var e=null==t?0:t.length;return e?Gr(t,1,e):[]},Tn.take=function(t,e,n){return t&&t.length?Gr(t,0,(e=n||void 0===e?1:ru(e))<0?0:e):[]},Tn.takeRight=function(t,e,n){var r=null==t?0:t.length;return r?Gr(t,(e=r-(e=n||void 0===e?1:ru(e)))<0?0:e,r):[]},Tn.takeRightWhile=function(t,e){return t&&t.length?ei(t,Zi(e,3),!1,!0):[]},Tn.takeWhile=function(t,e){return t&&t.length?ei(t,Zi(e,3)):[]},Tn.tap=function(t,e){return e(t),t},Tn.throttle=function(t,e,n){var i=!0,o=!0;if("function"!=typeof t)throw new vt(r);return Ya(n)&&(i="leading"in n?!!n.leading:i,o="trailing"in n?!!n.trailing:o),_a(t,e,{leading:i,maxWait:e,trailing:o})},Tn.thru=na,Tn.toArray=eu,Tn.toPairs=Mu,Tn.toPairsIn=Tu,Tn.toPath=function(t){return Ia(t)?le(t,So):Xa(t)?[t]:bi(Ao(uu(t)))},Tn.toPlainObject=au,Tn.transform=function(t,e,n){var r=Ia(t),i=r||Ba(t)||Ja(t);if(e=Zi(e,4),null==n){var o=t&&t.constructor;n=i?r?new o:[]:Ya(t)&&qa(o)?On(Ht(t)):{}}return(i?oe:sr)(t,(function(t,r,i){return e(n,t,r,i)})),n},Tn.unary=function(t){return ya(t,1)},Tn.union=Vo,Tn.unionBy=Go,Tn.unionWith=Ho,Tn.uniq=function(t){return t&&t.length?Jr(t):[]},Tn.uniqBy=function(t,e){return t&&t.length?Jr(t,Zi(e,2)):[]},Tn.uniqWith=function(t,e){return e="function"==typeof e?e:void 0,t&&t.length?Jr(t,void 0,e):[]},Tn.unset=function(t,e){return null==t||Qr(t,e)},Tn.unzip=Wo,Tn.unzipWith=$o,Tn.update=function(t,e,n){return null==t?t:ti(t,e,ai(n))},Tn.updateWith=function(t,e,n,r){return r="function"==typeof r?r:void 0,null==t?t:ti(t,e,ai(n),r)},Tn.values=Ou,Tn.valuesIn=function(t){return null==t?[]:Oe(t,wu(t))},Tn.without=Ko,Tn.words=qu,Tn.wrap=function(t,e){return Sa(ai(e),t)},Tn.xor=Zo,Tn.xorBy=Xo,Tn.xorWith=Jo,Tn.zip=Qo,Tn.zipObject=function(t,e){return ii(t||[],e||[],Yn)},Tn.zipObjectDeep=function(t,e){return ii(t||[],e||[],Ur)},Tn.zipWith=ta,Tn.entries=Mu,Tn.entriesIn=Tu,Tn.extend=cu,Tn.extendWith=fu,Zu(Tn,Tn),Tn.add=as,Tn.attempt=Uu,Tn.camelCase=Du,Tn.capitalize=Cu,Tn.ceil=us,Tn.clamp=function(t,e,n){return void 0===n&&(n=e,e=void 0),void 0!==n&&(n=(n=ou(n))==n?n:0),void 0!==e&&(e=(e=ou(e))==e?e:0),Kn(ou(t),e,n)},Tn.clone=function(t){return Zn(t,4)},Tn.cloneDeep=function(t){return Zn(t,5)},Tn.cloneDeepWith=function(t,e){return Zn(t,5,e="function"==typeof e?e:void 0)},Tn.cloneWith=function(t,e){return Zn(t,4,e="function"==typeof e?e:void 0)},Tn.conformsTo=function(t,e){return null==e||Xn(t,e,_u(e))},Tn.deburr=Nu,Tn.defaultTo=function(t,e){return null==t||t!=t?e:t},Tn.divide=ss,Tn.endsWith=function(t,e,n){t=uu(t),e=Xr(e);var r=t.length,i=n=void 0===n?r:Kn(ru(n),0,r);return(n-=e.length)>=0&&t.slice(n,i)==e},Tn.eq=Oa,Tn.escape=function(t){return(t=uu(t))&&B.test(t)?t.replace(j,je):t},Tn.escapeRegExp=function(t){return(t=uu(t))&&G.test(t)?t.replace(V,"\\$&"):t},Tn.every=function(t,e,n){var r=Ia(t)?ue:nr;return n&&uo(t,e,n)&&(e=void 0),r(t,Zi(e,3))},Tn.find=oa,Tn.findIndex=No,Tn.findKey=function(t,e){return be(t,Zi(e,3),sr)},Tn.findLast=aa,Tn.findLastIndex=Io,Tn.findLastKey=function(t,e){return be(t,Zi(e,3),cr)},Tn.floor=cs,Tn.forEach=ua,Tn.forEachRight=sa,Tn.forIn=function(t,e){return null==t?t:ar(t,Zi(e,3),wu)},Tn.forInRight=function(t,e){return null==t?t:ur(t,Zi(e,3),wu)},Tn.forOwn=function(t,e){return t&&sr(t,Zi(e,3))},Tn.forOwnRight=function(t,e){return t&&cr(t,Zi(e,3))},Tn.get=gu,Tn.gt=Da,Tn.gte=Ca,Tn.has=function(t,e){return null!=t&&ro(t,e,gr)},Tn.hasIn=yu,Tn.head=jo,Tn.identity=Hu,Tn.includes=function(t,e,n,r){t=ja(t)?t:Ou(t),n=n&&!r?ru(n):0;var i=t.length;return n<0&&(n=an(i+n,0)),Za(t)?n<=i&&t.indexOf(e,n)>-1:!!i&&me(t,e,n)>-1},Tn.indexOf=function(t,e,n){var r=null==t?0:t.length;if(!r)return-1;var i=null==n?0:ru(n);return i<0&&(i=an(r+i,0)),me(t,e,i)},Tn.inRange=function(t,e,n){return e=nu(e),void 0===n?(n=e,e=0):n=nu(n),function(t,e,n){return t>=un(e,n)&&t=-9007199254740991&&t<=9007199254740991},Tn.isSet=Ka,Tn.isString=Za,Tn.isSymbol=Xa,Tn.isTypedArray=Ja,Tn.isUndefined=function(t){return void 0===t},Tn.isWeakMap=function(t){return Va(t)&&no(t)==_},Tn.isWeakSet=function(t){return Va(t)&&"[object WeakSet]"==dr(t)},Tn.join=function(t,e){return null==t?"":rn.call(t,e)},Tn.kebabCase=Iu,Tn.last=Fo,Tn.lastIndexOf=function(t,e,n){var r=null==t?0:t.length;if(!r)return-1;var i=r;return void 0!==n&&(i=(i=ru(n))<0?an(r+i,0):un(i,r-1)),e==e?function(t,e,n){for(var r=n+1;r--;)if(t[r]===e)return r;return r}(t,e,i):ve(t,we,i,!0)},Tn.lowerCase=Ru,Tn.lowerFirst=ju,Tn.lt=Qa,Tn.lte=tu,Tn.max=function(t){return t&&t.length?rr(t,Hu,pr):void 0},Tn.maxBy=function(t,e){return t&&t.length?rr(t,Zi(e,2),pr):void 0},Tn.mean=function(t){return xe(t,Hu)},Tn.meanBy=function(t,e){return xe(t,Zi(e,2))},Tn.min=function(t){return t&&t.length?rr(t,Hu,Sr):void 0},Tn.minBy=function(t,e){return t&&t.length?rr(t,Zi(e,2),Sr):void 0},Tn.stubArray=is,Tn.stubFalse=os,Tn.stubObject=function(){return{}},Tn.stubString=function(){return""},Tn.stubTrue=function(){return!0},Tn.multiply=ls,Tn.nth=function(t,e){return t&&t.length?Cr(t,ru(e)):void 0},Tn.noConflict=function(){return Gt._===this&&(Gt._=Ot),this},Tn.noop=Xu,Tn.now=ga,Tn.pad=function(t,e,n){t=uu(t);var r=(e=ru(e))?Ye(t):0;if(!e||r>=e)return t;var i=(e-r)/2;return Ni(Qe(i),n)+t+Ni(Je(i),n)},Tn.padEnd=function(t,e,n){t=uu(t);var r=(e=ru(e))?Ye(t):0;return e&&re){var r=t;t=e,e=r}if(n||t%1||e%1){var i=fn();return un(t+i*(e-t+Ut("1e-"+((i+"").length-1))),e)}return Lr(t,e)},Tn.reduce=function(t,e,n){var r=Ia(t)?de:Ae,i=arguments.length<3;return r(t,Zi(e,4),n,i,tr)},Tn.reduceRight=function(t,e,n){var r=Ia(t)?pe:Ae,i=arguments.length<3;return r(t,Zi(e,4),n,i,er)},Tn.repeat=function(t,e,n){return e=(n?uo(t,e,n):void 0===e)?1:ru(e),Br(uu(t),e)},Tn.replace=function(){var t=arguments,e=uu(t[0]);return t.length<3?e:e.replace(t[1],t[2])},Tn.result=function(t,e,n){var r=-1,i=(e=ui(e,t)).length;for(i||(i=1,t=void 0);++r9007199254740991)return[];var n=4294967295,r=un(t,4294967295);t-=4294967295;for(var i=Me(r,e=Zi(e));++n=o)return t;var u=n-Ye(r);if(u<1)return r;var s=a?ci(a,0,u).join(""):t.slice(0,u);if(void 0===i)return s+r;if(a&&(u+=s.length-u),$a(i)){if(t.slice(u).search(i)){var c,f=s;for(i.global||(i=yt(i.source,uu(et.exec(i))+"g")),i.lastIndex=0;c=i.exec(f);)var l=c.index;s=s.slice(0,void 0===l?u:l)}}else if(t.indexOf(Xr(i),u)!=u){var h=s.lastIndexOf(i);h>-1&&(s=s.slice(0,h))}return s+r},Tn.unescape=function(t){return(t=uu(t))&&L.test(t)?t.replace(R,Ge):t},Tn.uniqueId=function(t){var e=++At;return uu(t)+e},Tn.upperCase=Pu,Tn.upperFirst=Fu,Tn.each=ua,Tn.eachRight=sa,Tn.first=jo,Zu(Tn,(fs={},sr(Tn,(function(t,e){Et.call(Tn.prototype,e)||(fs[e]=t)})),fs),{chain:!1}),Tn.VERSION="4.17.15",oe(["bind","bindKey","curry","curryRight","partial","partialRight"],(function(t){Tn[t].placeholder=Tn})),oe(["drop","take"],(function(t,e){Nn.prototype[t]=function(n){n=void 0===n?1:an(ru(n),0);var r=this.__filtered__&&!e?new Nn(this):this.clone();return r.__filtered__?r.__takeCount__=un(n,r.__takeCount__):r.__views__.push({size:un(n,4294967295),type:t+(r.__dir__<0?"Right":"")}),r},Nn.prototype[t+"Right"]=function(e){return this.reverse()[t](e).reverse()}})),oe(["filter","map","takeWhile"],(function(t,e){var n=e+1,r=1==n||3==n;Nn.prototype[t]=function(t){var e=this.clone();return e.__iteratees__.push({iteratee:Zi(t,3),type:n}),e.__filtered__=e.__filtered__||r,e}})),oe(["head","last"],(function(t,e){var n="take"+(e?"Right":"");Nn.prototype[t]=function(){return this[n](1).value()[0]}})),oe(["initial","tail"],(function(t,e){var n="drop"+(e?"":"Right");Nn.prototype[t]=function(){return this.__filtered__?new Nn(this):this[n](1)}})),Nn.prototype.compact=function(){return this.filter(Hu)},Nn.prototype.find=function(t){return this.filter(t).head()},Nn.prototype.findLast=function(t){return this.reverse().find(t)},Nn.prototype.invokeMap=Pr((function(t,e){return"function"==typeof t?new Nn(this):this.map((function(n){return vr(n,t,e)}))})),Nn.prototype.reject=function(t){return this.filter(Ea(Zi(t)))},Nn.prototype.slice=function(t,e){t=ru(t);var n=this;return n.__filtered__&&(t>0||e<0)?new Nn(n):(t<0?n=n.takeRight(-t):t&&(n=n.drop(t)),void 0!==e&&(n=(e=ru(e))<0?n.dropRight(-e):n.take(e-t)),n)},Nn.prototype.takeRightWhile=function(t){return this.reverse().takeWhile(t).reverse()},Nn.prototype.toArray=function(){return this.take(4294967295)},sr(Nn.prototype,(function(t,e){var n=/^(?:filter|find|map|reject)|While$/.test(e),r=/^(?:head|last)$/.test(e),i=Tn[r?"take"+("last"==e?"Right":""):e],o=r||/^find/.test(e);i&&(Tn.prototype[e]=function(){var e=this.__wrapped__,a=r?[1]:arguments,u=e instanceof Nn,s=a[0],c=u||Ia(e),f=function(t){var e=i.apply(Tn,he([t],a));return r&&l?e[0]:e};c&&n&&"function"==typeof s&&1!=s.length&&(u=c=!1);var l=this.__chain__,h=!!this.__actions__.length,d=o&&!l,p=u&&!h;if(!o&&c){e=p?e:new Nn(this);var g=t.apply(e,a);return g.__actions__.push({func:na,args:[f],thisArg:void 0}),new Cn(g,l)}return d&&p?t.apply(this,a):(g=this.thru(f),d?r?g.value()[0]:g.value():g)})})),oe(["pop","push","shift","sort","splice","unshift"],(function(t){var e=mt[t],n=/^(?:push|sort|unshift)$/.test(t)?"tap":"thru",r=/^(?:pop|shift)$/.test(t);Tn.prototype[t]=function(){var t=arguments;if(r&&!this.__chain__){var i=this.value();return e.apply(Ia(i)?i:[],t)}return this[n]((function(n){return e.apply(Ia(n)?n:[],t)}))}})),sr(Nn.prototype,(function(t,e){var n=Tn[e];if(n){var r=n.name+"";Et.call(mn,r)||(mn[r]=[]),mn[r].push({name:e,func:n})}})),mn[Ti(void 0,2).name]=[{name:"wrapper",func:void 0}],Nn.prototype.clone=function(){var t=new Nn(this.__wrapped__);return t.__actions__=bi(this.__actions__),t.__dir__=this.__dir__,t.__filtered__=this.__filtered__,t.__iteratees__=bi(this.__iteratees__),t.__takeCount__=this.__takeCount__,t.__views__=bi(this.__views__),t},Nn.prototype.reverse=function(){if(this.__filtered__){var t=new Nn(this);t.__dir__=-1,t.__filtered__=!0}else(t=this.clone()).__dir__*=-1;return t},Nn.prototype.value=function(){var t=this.__wrapped__.value(),e=this.__dir__,n=Ia(t),r=e<0,i=n?t.length:0,o=function(t,e,n){var r=-1,i=n.length;for(;++r=this.__values__.length;return{done:t,value:t?void 0:this.__values__[this.__index__++]}},Tn.prototype.plant=function(t){for(var e,n=this;n instanceof Dn;){var r=To(n);r.__index__=0,r.__values__=void 0,e?i.__wrapped__=r:e=r;var i=r;n=n.__wrapped__}return i.__wrapped__=t,e},Tn.prototype.reverse=function(){var t=this.__wrapped__;if(t instanceof Nn){var e=t;return this.__actions__.length&&(e=new Nn(this)),(e=e.reverse()).__actions__.push({func:na,args:[Yo],thisArg:void 0}),new Cn(e,this.__chain__)}return this.thru(Yo)},Tn.prototype.toJSON=Tn.prototype.valueOf=Tn.prototype.value=function(){return ni(this.__wrapped__,this.__actions__)},Tn.prototype.first=Tn.prototype.head,Ee&&(Tn.prototype[Ee]=function(){return this}),Tn}();"function"==typeof define&&"object"==typeof define.amd&&define.amd?(Gt._=He,define((function(){return He}))):Wt?((Wt.exports=He)._=He,Ht._=He):Gt._=He}).call(this)}).call(this,n(25),n(14)(t))},function(t,e,n){"use strict";function r(t){return t*t*t}function i(t){return--t*t*t+1}function o(t){return((t*=2)<=1?t*t*t:(t-=2)*t*t+2)/2}n.d(e,"a",(function(){return r})),n.d(e,"c",(function(){return i})),n.d(e,"b",(function(){return o}))},function(t,e,n){"use strict";n.d(e,"b",(function(){return c})),n.d(e,"a",(function(){return f})),n.d(e,"d",(function(){return b})),n.d(e,"c",(function(){return v}));var r=n(24),i=n(11),o=n(116),a=6/29,u=3*a*a;function s(t){if(t instanceof l)return new l(t.l,t.a,t.b,t.opacity);if(t instanceof m)return _(t);t instanceof i.b||(t=Object(i.h)(t));var e,n,r=g(t.r),o=g(t.g),a=g(t.b),u=h((.2225045*r+.7168786*o+.0606169*a)/1);return r===o&&o===a?e=n=u:(e=h((.4360747*r+.3850649*o+.1430804*a)/.96422),n=h((.0139322*r+.0971045*o+.7141733*a)/.82521)),new l(116*u-16,500*(e-u),200*(u-n),t.opacity)}function c(t,e){return new l(t,0,0,null==e?1:e)}function f(t,e,n,r){return 1===arguments.length?s(t):new l(t,e,n,null==r?1:r)}function l(t,e,n,r){this.l=+t,this.a=+e,this.b=+n,this.opacity=+r}function h(t){return t>.008856451679035631?Math.pow(t,1/3):t/u+4/29}function d(t){return t>a?t*t*t:u*(t-4/29)}function p(t){return 255*(t<=.0031308?12.92*t:1.055*Math.pow(t,1/2.4)-.055)}function g(t){return(t/=255)<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4)}function y(t){if(t instanceof m)return new m(t.h,t.c,t.l,t.opacity);if(t instanceof l||(t=s(t)),0===t.a&&0===t.b)return new m(NaN,00?t>1?Object(r.a)((function(e){e.setTime(Math.floor(e/t)*t)}),(function(e,n){e.setTime(+e+n*t)}),(function(e,n){return(n-e)/t})):i:null},e.a=i;var o=i.range},function(t,e,n){var r=n(75),i=n(28);t.exports=function(t){if(!i(t))return!1;var e=r(t);return"[object Function]"==e||"[object GeneratorFunction]"==e||"[object AsyncFunction]"==e||"[object Proxy]"==e}},function(t,e,n){var r=n(151),i=n(152);t.exports=function(t,e,n,o){var a=!n;n||(n={});for(var u=-1,s=e.length;++u=this._delta8){var n=(t=this.pending).length%this._delta8;this.pending=t.slice(t.length-n,t.length),0===this.pending.length&&(this.pending=null),t=r.join32(t,0,t.length-n,this.endian);for(var i=0;i>>24&255,r[i++]=t>>>16&255,r[i++]=t>>>8&255,r[i++]=255&t}else for(r[i++]=255&t,r[i++]=t>>>8&255,r[i++]=t>>>16&255,r[i++]=t>>>24&255,r[i++]=0,r[i++]=0,r[i++]=0,r[i++]=0,o=8;o ./dist/mermaid.min.js","release":"yarn build -p --config webpack.config.prod.babel.js","lint":"eslint src","e2e:depr":"yarn lint && jest e2e --config e2e/jest.config.js","cypress":"percy exec -- cypress run","e2e":"start-server-and-test dev http://localhost:9000/ cypress","e2e-upd":"yarn lint && jest e2e -u --config e2e/jest.config.js","dev":"webpack-dev-server --config webpack.config.e2e.js","test":"yarn lint && jest src/.*","test:watch":"jest --watch src","prepublishOnly":"yarn build && yarn release && yarn test && yarn e2e","prepush":"yarn test"},"repository":{"type":"git","url":"https://github.com/knsv/mermaid"},"author":"Knut Sveidqvist","license":"MIT","standard":{"ignore":["**/parser/*.js","dist/**/*.js","cypress/**/*.js"],"globals":["page"]},"dependencies":{"@braintree/sanitize-url":"^3.1.0","crypto-random-string":"^3.0.1","d3":"^5.7.0","dagre":"^0.8.4","dagre-d3":"^0.6.4","graphlib":"^2.1.7","he":"^1.2.0","lodash":"^4.17.11","minify":"^4.1.1","moment-mini":"^2.22.1","scope-css":"^1.2.1"},"devDependencies":{"@babel/core":"^7.2.2","@babel/preset-env":"^7.8.4","@babel/register":"^7.0.0","@percy/cypress":"*","babel-core":"7.0.0-bridge.0","babel-jest":"^24.9.0","babel-loader":"^8.0.4","coveralls":"^3.0.2","css-loader":"^2.0.1","css-to-string-loader":"^0.1.3","cypress":"4.0.1","documentation":"^12.0.1","eslint":"^6.3.0","eslint-config-prettier":"^6.3.0","eslint-plugin-prettier":"^3.1.0","husky":"^1.2.1","identity-obj-proxy":"^3.0.0","jest":"^24.9.0","jison":"^0.4.18","moment":"^2.23.0","node-sass":"^4.12.0","prettier":"^1.18.2","puppeteer":"^1.17.0","sass-loader":"^7.1.0","start-server-and-test":"^1.10.6","terser-webpack-plugin":"^2.2.2","webpack":"^4.41.2","webpack-cli":"^3.1.2","webpack-dev-server":"^3.4.1","webpack-node-externals":"^1.7.2","yarn-upgrade-all":"^0.5.0"},"files":["dist"],"yarn-upgrade-all":{"ignore":["babel-core"]}}')},function(t,e,n){"use strict";n.d(e,"b",(function(){return i})),n.d(e,"c",(function(){return o})),n.d(e,"a",(function(){return u}));var r,i,o,a=n(210);function u(t){return r=Object(a.a)(t),i=r.format,o=r.formatPrefix,r}u({decimal:".",thousands:",",grouping:[3],currency:["$",""],minus:"-"})},function(t,e,n){var r=n(147),i=n(461),o=n(462),a=n(463),u=n(464),s=n(465);function c(t){var e=this.__data__=new r(t);this.size=e.size}c.prototype.clear=i,c.prototype.delete=o,c.prototype.get=a,c.prototype.has=u,c.prototype.set=s,t.exports=c},function(t,e,n){var r=n(456),i=n(457),o=n(458),a=n(459),u=n(460);function s(t){var e=-1,n=null==t?0:t.length;for(this.clear();++e-1&&t%1==0&&t-1&&t%1==0&&t>>24]^f[p>>>16&255]^l[g>>>8&255]^h[255&y]^e[b++],a=c[p>>>24]^f[g>>>16&255]^l[y>>>8&255]^h[255&d]^e[b++],u=c[g>>>24]^f[y>>>16&255]^l[d>>>8&255]^h[255&p]^e[b++],s=c[y>>>24]^f[d>>>16&255]^l[p>>>8&255]^h[255&g]^e[b++],d=o,p=a,g=u,y=s;return o=(r[d>>>24]<<24|r[p>>>16&255]<<16|r[g>>>8&255]<<8|r[255&y])^e[b++],a=(r[p>>>24]<<24|r[g>>>16&255]<<16|r[y>>>8&255]<<8|r[255&d])^e[b++],u=(r[g>>>24]<<24|r[y>>>16&255]<<16|r[d>>>8&255]<<8|r[255&p])^e[b++],s=(r[y>>>24]<<24|r[d>>>16&255]<<16|r[p>>>8&255]<<8|r[255&g])^e[b++],[o>>>=0,a>>>=0,u>>>=0,s>>>=0]}var u=[0,1,2,4,8,16,32,64,128,27,54],s=function(){for(var t=new Array(256),e=0;e<256;e++)t[e]=e<128?e<<1:e<<1^283;for(var n=[],r=[],i=[[],[],[],[]],o=[[],[],[],[]],a=0,u=0,s=0;s<256;++s){var c=u^u<<1^u<<2^u<<3^u<<4;c=c>>>8^255&c^99,n[a]=c,r[c]=a;var f=t[a],l=t[f],h=t[l],d=257*t[c]^16843008*c;i[0][a]=d<<24|d>>>8,i[1][a]=d<<16|d>>>16,i[2][a]=d<<8|d>>>24,i[3][a]=d,d=16843009*h^65537*l^257*f^16843008*a,o[0][c]=d<<24|d>>>8,o[1][c]=d<<16|d>>>16,o[2][c]=d<<8|d>>>24,o[3][c]=d,0===a?a=u=1:(a=f^t[t[t[h^f]]],u^=t[t[u]])}return{SBOX:n,INV_SBOX:r,SUB_MIX:i,INV_SUB_MIX:o}}();function c(t){this._key=i(t),this._reset()}c.blockSize=16,c.keySize=32,c.prototype.blockSize=c.blockSize,c.prototype.keySize=c.keySize,c.prototype._reset=function(){for(var t=this._key,e=t.length,n=e+6,r=4*(n+1),i=[],o=0;o>>24,a=s.SBOX[a>>>24]<<24|s.SBOX[a>>>16&255]<<16|s.SBOX[a>>>8&255]<<8|s.SBOX[255&a],a^=u[o/e|0]<<24):e>6&&o%e==4&&(a=s.SBOX[a>>>24]<<24|s.SBOX[a>>>16&255]<<16|s.SBOX[a>>>8&255]<<8|s.SBOX[255&a]),i[o]=i[o-e]^a}for(var c=[],f=0;f>>24]]^s.INV_SUB_MIX[1][s.SBOX[h>>>16&255]]^s.INV_SUB_MIX[2][s.SBOX[h>>>8&255]]^s.INV_SUB_MIX[3][s.SBOX[255&h]]}this._nRounds=n,this._keySchedule=i,this._invKeySchedule=c},c.prototype.encryptBlockRaw=function(t){return a(t=i(t),this._keySchedule,s.SUB_MIX,s.SBOX,this._nRounds)},c.prototype.encryptBlock=function(t){var e=this.encryptBlockRaw(t),n=r.allocUnsafe(16);return n.writeUInt32BE(e[0],0),n.writeUInt32BE(e[1],4),n.writeUInt32BE(e[2],8),n.writeUInt32BE(e[3],12),n},c.prototype.decryptBlock=function(t){var e=(t=i(t))[1];t[1]=t[3],t[3]=e;var n=a(t,this._invKeySchedule,s.INV_SUB_MIX,s.INV_SBOX,this._nRounds),o=r.allocUnsafe(16);return o.writeUInt32BE(n[0],0),o.writeUInt32BE(n[3],4),o.writeUInt32BE(n[2],8),o.writeUInt32BE(n[1],12),o},c.prototype.scrub=function(){o(this._keySchedule),o(this._invKeySchedule),o(this._key)},t.exports.AES=c},function(t,e,n){var r=n(3).Buffer,i=n(264);t.exports=function(t,e,n,o){if(r.isBuffer(t)||(t=r.from(t,"binary")),e&&(r.isBuffer(e)||(e=r.from(e,"binary")),8!==e.length))throw new RangeError("salt should be Buffer with 8 byte length");for(var a=n/8,u=r.alloc(a),s=r.alloc(o||0),c=r.alloc(0);a>0||o>0;){var f=new i;f.update(c),f.update(t),e&&f.update(e),c=f.digest();var l=0;if(a>0){var h=u.length-a;l=Math.min(a,c.length),c.copy(u,h,0,l),a-=l}if(l0){var d=s.length-o,p=Math.min(o,c.length-l);c.copy(s,d,l,l+p),o-=p}}return c.fill(0),{key:u,iv:s}}},function(t,e,n){"use strict";var r=n(12),i=n(33),o=i.getNAF,a=i.getJSF,u=i.assert;function s(t,e){this.type=t,this.p=new r(e.p,16),this.red=e.prime?r.red(e.prime):r.mont(this.p),this.zero=new r(0).toRed(this.red),this.one=new r(1).toRed(this.red),this.two=new r(2).toRed(this.red),this.n=e.n&&new r(e.n,16),this.g=e.g&&this.pointFromJSON(e.g,e.gRed),this._wnafT1=new Array(4),this._wnafT2=new Array(4),this._wnafT3=new Array(4),this._wnafT4=new Array(4),this._bitLength=this.n?this.n.bitLength():0;var n=this.n&&this.p.div(this.n);!n||n.cmpn(100)>0?this.redN=null:(this._maxwellTrick=!0,this.redN=this.n.toRed(this.red))}function c(t,e){this.curve=t,this.type=e,this.precomputed=null}t.exports=s,s.prototype.point=function(){throw new Error("Not implemented")},s.prototype.validate=function(){throw new Error("Not implemented")},s.prototype._fixedNafMul=function(t,e){u(t.precomputed);var n=t._getDoubles(),r=o(e,1,this._bitLength),i=(1<=s;e--)c=(c<<1)+r[e];a.push(c)}for(var f=this.jpoint(null,null,null),l=this.jpoint(null,null,null),h=i;h>0;h--){for(s=0;s=0;c--){for(e=0;c>=0&&0===a[c];c--)e++;if(c>=0&&e++,s=s.dblp(e),c<0)break;var f=a[c];u(0!==f),s="affine"===t.type?f>0?s.mixedAdd(i[f-1>>1]):s.mixedAdd(i[-f-1>>1].neg()):f>0?s.add(i[f-1>>1]):s.add(i[-f-1>>1].neg())}return"affine"===t.type?s.toP():s},s.prototype._wnafMulAdd=function(t,e,n,r,i){for(var u=this._wnafT1,s=this._wnafT2,c=this._wnafT3,f=0,l=0;l=1;l-=2){var d=l-1,p=l;if(1===u[d]&&1===u[p]){var g=[e[d],null,null,e[p]];0===e[d].y.cmp(e[p].y)?(g[1]=e[d].add(e[p]),g[2]=e[d].toJ().mixedAdd(e[p].neg())):0===e[d].y.cmp(e[p].y.redNeg())?(g[1]=e[d].toJ().mixedAdd(e[p]),g[2]=e[d].add(e[p].neg())):(g[1]=e[d].toJ().mixedAdd(e[p]),g[2]=e[d].toJ().mixedAdd(e[p].neg()));var y=[-3,-1,-5,-7,0,7,5,1,3],b=a(n[d],n[p]);f=Math.max(b[0].length,f),c[d]=new Array(f),c[p]=new Array(f);for(var v=0;v=0;l--){for(var k=0;l>=0;){var E=!0;for(v=0;v=0&&k++,w=w.dblp(k),l<0)break;for(v=0;v0?A=s[v][S-1>>1]:S<0&&(A=s[v][-S-1>>1].neg()),w="affine"===A.type?w.mixedAdd(A):w.add(A))}}for(l=0;l=Math.ceil((t.bitLength()+1)/e.step)},c.prototype._getDoubles=function(t,e){if(this.precomputed&&this.precomputed.doubles)return this.precomputed.doubles;for(var n=[this],r=this,i=0;i0?1:t<0?-1:0},k=Math.sqrt,E=Math.tan;function A(t){return t>1?0:t<-1?u:Math.acos(t)}function S(t){return t>1?s:t<-1?-s:Math.asin(t)}function M(t){return(t=w(t/2))*t}function T(){}function O(t,e){t&&C.hasOwnProperty(t.type)&&C[t.type](t,e)}var D={Feature:function(t,e){O(t.geometry,e)},FeatureCollection:function(t,e){for(var n=t.features,r=-1,i=n.length;++r=0?1:-1,i=r*n,o=y(e=(e*=h)/2+c),a=w(e),u=P*a,s=B*o+u*y(i),f=u*r*w(i);q.add(g(f,s)),L=t,B=o,P=a}var W=function(t){return U.reset(),F(t,z),2*U};function $(t){return[g(t[1],t[0]),S(t[2])]}function K(t){var e=t[0],n=t[1],r=y(n);return[r*y(e),r*w(e),w(n)]}function Z(t,e){return t[0]*e[0]+t[1]*e[1]+t[2]*e[2]}function X(t,e){return[t[1]*e[2]-t[2]*e[1],t[2]*e[0]-t[0]*e[2],t[0]*e[1]-t[1]*e[0]]}function J(t,e){t[0]+=e[0],t[1]+=e[1],t[2]+=e[2]}function Q(t,e){return[t[0]*e,t[1]*e,t[2]*e]}function tt(t){var e=k(t[0]*t[0]+t[1]*t[1]+t[2]*t[2]);t[0]/=e,t[1]/=e,t[2]/=e}var et,nt,rt,it,ot,at,ut,st,ct,ft,lt=r(),ht={point:dt,lineStart:gt,lineEnd:yt,polygonStart:function(){ht.point=bt,ht.lineStart=vt,ht.lineEnd=mt,lt.reset(),z.polygonStart()},polygonEnd:function(){z.polygonEnd(),ht.point=dt,ht.lineStart=gt,ht.lineEnd=yt,q<0?(et=-(rt=180),nt=-(it=90)):lt>1e-6?it=90:lt<-1e-6&&(nt=-90),ft[0]=et,ft[1]=rt},sphere:function(){et=-(rt=180),nt=-(it=90)}};function dt(t,e){ct.push(ft=[et=t,rt=t]),eit&&(it=e)}function pt(t,e){var n=K([t*h,e*h]);if(st){var r=X(st,n),i=X([r[1],-r[0],0],r);tt(i),i=$(i);var o,a=t-ot,u=a>0?1:-1,s=i[0]*l*u,c=d(a)>180;c^(u*otit&&(it=o):c^(u*ot<(s=(s+360)%360-180)&&sit&&(it=e)),c?t_t(et,rt)&&(rt=t):_t(t,rt)>_t(et,rt)&&(et=t):rt>=et?(trt&&(rt=t)):t>ot?_t(et,t)>_t(et,rt)&&(rt=t):_t(t,rt)>_t(et,rt)&&(et=t)}else ct.push(ft=[et=t,rt=t]);eit&&(it=e),st=n,ot=t}function gt(){ht.point=pt}function yt(){ft[0]=et,ft[1]=rt,ht.point=dt,st=null}function bt(t,e){if(st){var n=t-ot;lt.add(d(n)>180?n+(n>0?360:-360):n)}else at=t,ut=e;z.point(t,e),pt(t,e)}function vt(){z.lineStart()}function mt(){bt(at,ut),z.lineEnd(),d(lt)>1e-6&&(et=-(rt=180)),ft[0]=et,ft[1]=rt,st=null}function _t(t,e){return(e-=t)<0?e+360:e}function wt(t,e){return t[0]-e[0]}function xt(t,e){return t[0]<=t[1]?t[0]<=e&&e<=t[1]:e_t(r[0],r[1])&&(r[1]=i[1]),_t(i[0],r[1])>_t(r[0],r[1])&&(r[0]=i[0])):o.push(r=i);for(a=-1/0,e=0,r=o[n=o.length-1];e<=n;r=i,++e)i=o[e],(u=_t(r[1],i[0]))>a&&(a=u,et=i[0],rt=r[1])}return ct=ft=null,et===1/0||nt===1/0?[[NaN,NaN],[NaN,NaN]]:[[et,nt],[rt,it]]},qt={sphere:T,point:Ut,lineStart:Yt,lineEnd:Ht,polygonStart:function(){qt.lineStart=Wt,qt.lineEnd=$t},polygonEnd:function(){qt.lineStart=Yt,qt.lineEnd=Ht}};function Ut(t,e){t*=h;var n=y(e*=h);zt(n*y(t),n*w(t),w(e))}function zt(t,e,n){++kt,At+=(t-At)/kt,St+=(e-St)/kt,Mt+=(n-Mt)/kt}function Yt(){qt.point=Vt}function Vt(t,e){t*=h;var n=y(e*=h);Lt=n*y(t),Bt=n*w(t),Pt=w(e),qt.point=Gt,zt(Lt,Bt,Pt)}function Gt(t,e){t*=h;var n=y(e*=h),r=n*y(t),i=n*w(t),o=w(e),a=g(k((a=Bt*o-Pt*i)*a+(a=Pt*r-Lt*o)*a+(a=Lt*i-Bt*r)*a),Lt*r+Bt*i+Pt*o);Et+=a,Tt+=a*(Lt+(Lt=r)),Ot+=a*(Bt+(Bt=i)),Dt+=a*(Pt+(Pt=o)),zt(Lt,Bt,Pt)}function Ht(){qt.point=Ut}function Wt(){qt.point=Kt}function $t(){Zt(Rt,jt),qt.point=Ut}function Kt(t,e){Rt=t,jt=e,t*=h,e*=h,qt.point=Zt;var n=y(e);Lt=n*y(t),Bt=n*w(t),Pt=w(e),zt(Lt,Bt,Pt)}function Zt(t,e){t*=h;var n=y(e*=h),r=n*y(t),i=n*w(t),o=w(e),a=Bt*o-Pt*i,u=Pt*r-Lt*o,s=Lt*i-Bt*r,c=k(a*a+u*u+s*s),f=S(c),l=c&&-f/c;Ct+=l*a,Nt+=l*u,It+=l*s,Et+=f,Tt+=f*(Lt+(Lt=r)),Ot+=f*(Bt+(Bt=i)),Dt+=f*(Pt+(Pt=o)),zt(Lt,Bt,Pt)}var Xt=function(t){kt=Et=At=St=Mt=Tt=Ot=Dt=Ct=Nt=It=0,F(t,qt);var e=Ct,n=Nt,r=It,i=e*e+n*n+r*r;return i<1e-12&&(e=Tt,n=Ot,r=Dt,Et<1e-6&&(e=At,n=St,r=Mt),(i=e*e+n*n+r*r)<1e-12)?[NaN,NaN]:[g(n,e)*l,S(r/k(i))*l]},Jt=function(t){return function(){return t}},Qt=function(t,e){function n(n,r){return n=t(n,r),e(n[0],n[1])}return t.invert&&e.invert&&(n.invert=function(n,r){return(n=e.invert(n,r))&&t.invert(n[0],n[1])}),n};function te(t,e){return[d(t)>u?t+Math.round(-t/f)*f:t,e]}function ee(t,e,n){return(t%=f)?e||n?Qt(re(t),ie(e,n)):re(t):e||n?ie(e,n):te}function ne(t){return function(e,n){return[(e+=t)>u?e-f:e<-u?e+f:e,n]}}function re(t){var e=ne(t);return e.invert=ne(-t),e}function ie(t,e){var n=y(t),r=w(t),i=y(e),o=w(e);function a(t,e){var a=y(e),u=y(t)*a,s=w(t)*a,c=w(e),f=c*n+u*r;return[g(s*i-f*o,u*n-c*r),S(f*i+s*o)]}return a.invert=function(t,e){var a=y(e),u=y(t)*a,s=w(t)*a,c=w(e),f=c*i-s*o;return[g(s*i+c*o,u*n+f*r),S(f*n-u*r)]},a}te.invert=te;var oe=function(t){function e(e){return(e=t(e[0]*h,e[1]*h))[0]*=l,e[1]*=l,e}return t=ee(t[0]*h,t[1]*h,t.length>2?t[2]*h:0),e.invert=function(e){return(e=t.invert(e[0]*h,e[1]*h))[0]*=l,e[1]*=l,e},e};function ae(t,e,n,r,i,o){if(n){var a=y(e),u=w(e),s=r*n;null==i?(i=e+r*f,o=e-s/2):(i=ue(a,i),o=ue(a,o),(r>0?io)&&(i+=r*f));for(var c,l=i;r>0?l>o:l1&&e.push(e.pop().concat(e.shift()))},result:function(){var n=e;return e=[],t=null,n}}},fe=function(t,e){return d(t[0]-e[0])<1e-6&&d(t[1]-e[1])<1e-6};function le(t,e,n,r){this.x=t,this.z=e,this.o=n,this.e=r,this.v=!1,this.n=this.p=null}var he=function(t,e,n,r,i){var o,a,u=[],s=[];if(t.forEach((function(t){if(!((e=t.length-1)<=0)){var e,n,r=t[0],a=t[e];if(fe(r,a)){for(i.lineStart(),o=0;o=0;--o)i.point((f=c[o])[0],f[1]);else r(h.x,h.p.x,-1,i);h=h.p}c=(h=h.o).z,d=!d}while(!h.v);i.lineEnd()}}};function de(t){if(e=t.length){for(var e,n,r=0,i=t[0];++r=0?1:-1,I=N*C,R=I>u,j=x*O;if(pe.add(g(j*N*w(I),k*D+j*y(I))),a+=R?C+N*f:C,R^m>=n^M>=n){var L=X(K(v),K(A));tt(L);var B=X(o,L);tt(B);var P=(R^C>=0?-1:1)*S(B[2]);(r>P||r===P&&(L[0]||L[1]))&&(l+=R^C>=0?1:-1)}}return(a<-1e-6||a<1e-6&&pe<-1e-6)^1&l},be=n(0),ve=function(t,e,n,r){return function(i){var o,a,u,s=e(i),c=ce(),f=e(c),l=!1,h={point:d,lineStart:g,lineEnd:y,polygonStart:function(){h.point=b,h.lineStart=v,h.lineEnd=m,a=[],o=[]},polygonEnd:function(){h.point=d,h.lineStart=g,h.lineEnd=y,a=Object(be.n)(a);var t=ye(o,r);a.length?(l||(i.polygonStart(),l=!0),he(a,_e,t,n,i)):t&&(l||(i.polygonStart(),l=!0),i.lineStart(),n(null,null,1,i),i.lineEnd()),l&&(i.polygonEnd(),l=!1),a=o=null},sphere:function(){i.polygonStart(),i.lineStart(),n(null,null,1,i),i.lineEnd(),i.polygonEnd()}};function d(e,n){t(e,n)&&i.point(e,n)}function p(t,e){s.point(t,e)}function g(){h.point=p,s.lineStart()}function y(){h.point=d,s.lineEnd()}function b(t,e){u.push([t,e]),f.point(t,e)}function v(){f.lineStart(),u=[]}function m(){b(u[0][0],u[0][1]),f.lineEnd();var t,e,n,r,s=f.clean(),h=c.result(),d=h.length;if(u.pop(),o.push(u),u=null,d)if(1&s){if((e=(n=h[0]).length-1)>0){for(l||(i.polygonStart(),l=!0),i.lineStart(),t=0;t1&&2&s&&h.push(h.pop().concat(h.shift())),a.push(h.filter(me))}return h}};function me(t){return t.length>1}function _e(t,e){return((t=t.x)[0]<0?t[1]-s-1e-6:s-t[1])-((e=e.x)[0]<0?e[1]-s-1e-6:s-e[1])}var we=ve((function(){return!0}),(function(t){var e,n=NaN,r=NaN,i=NaN;return{lineStart:function(){t.lineStart(),e=1},point:function(o,a){var c=o>0?u:-u,f=d(o-n);d(f-u)<1e-6?(t.point(n,r=(r+a)/2>0?s:-s),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(c,r),t.point(o,r),e=0):i!==c&&f>=u&&(d(n-i)<1e-6&&(n-=1e-6*i),d(o-c)<1e-6&&(o-=1e-6*c),r=function(t,e,n,r){var i,o,a=w(t-n);return d(a)>1e-6?p((w(e)*(o=y(r))*w(n)-w(r)*(i=y(e))*w(t))/(i*o*a)):(e+r)/2}(n,r,o,a),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(c,r),e=0),t.point(n=o,r=a),i=c},lineEnd:function(){t.lineEnd(),n=r=NaN},clean:function(){return 2-e}}}),(function(t,e,n,r){var i;if(null==t)i=n*s,r.point(-u,i),r.point(0,i),r.point(u,i),r.point(u,0),r.point(u,-i),r.point(0,-i),r.point(-u,-i),r.point(-u,0),r.point(-u,i);else if(d(t[0]-e[0])>1e-6){var o=t[0]0,i=d(e)>1e-6;function o(t,n){return y(t)*y(n)>e}function a(t,n,r){var i=[1,0,0],o=X(K(t),K(n)),a=Z(o,o),s=o[0],c=a-s*s;if(!c)return!r&&t;var f=e*a/c,l=-e*s/c,h=X(i,o),p=Q(i,f);J(p,Q(o,l));var g=h,y=Z(p,g),b=Z(g,g),v=y*y-b*(Z(p,p)-1);if(!(v<0)){var m=k(v),_=Q(g,(-y-m)/b);if(J(_,p),_=$(_),!r)return _;var w,x=t[0],E=n[0],A=t[1],S=n[1];E0^_[1]<(d(_[0]-x)<1e-6?A:S):A<=_[1]&&_[1]<=S:M>u^(x<=_[0]&&_[0]<=E)){var O=Q(g,(-y+m)/b);return J(O,p),[_,$(O)]}}}function s(e,n){var i=r?t:u-t,o=0;return e<-i?o|=1:e>i&&(o|=2),n<-i?o|=4:n>i&&(o|=8),o}return ve(o,(function(t){var e,n,c,f,l;return{lineStart:function(){f=c=!1,l=1},point:function(h,d){var p,g=[h,d],y=o(h,d),b=r?y?0:s(h,d):y?s(h+(h<0?u:-u),d):0;if(!e&&(f=c=y)&&t.lineStart(),y!==c&&(!(p=a(e,g))||fe(e,p)||fe(g,p))&&(g[0]+=1e-6,g[1]+=1e-6,y=o(g[0],g[1])),y!==c)l=0,y?(t.lineStart(),p=a(g,e),t.point(p[0],p[1])):(p=a(e,g),t.point(p[0],p[1]),t.lineEnd()),e=p;else if(i&&e&&r^y){var v;b&n||!(v=a(g,e,!0))||(l=0,r?(t.lineStart(),t.point(v[0][0],v[0][1]),t.point(v[1][0],v[1][1]),t.lineEnd()):(t.point(v[1][0],v[1][1]),t.lineEnd(),t.lineStart(),t.point(v[0][0],v[0][1])))}!y||e&&fe(e,g)||t.point(g[0],g[1]),e=g,c=y,n=b},lineEnd:function(){c&&t.lineEnd(),e=null},clean:function(){return l|(f&&c)<<1}}}),(function(e,r,i,o){ae(o,t,n,i,e,r)}),r?[0,-t]:[-u,t-u])};function ke(t,e,n,r){function i(i,o){return t<=i&&i<=n&&e<=o&&o<=r}function o(i,o,u,c){var f=0,l=0;if(null==i||(f=a(i,u))!==(l=a(o,u))||s(i,o)<0^u>0)do{c.point(0===f||3===f?t:n,f>1?r:e)}while((f=(f+u+4)%4)!==l);else c.point(o[0],o[1])}function a(r,i){return d(r[0]-t)<1e-6?i>0?0:3:d(r[0]-n)<1e-6?i>0?2:1:d(r[1]-e)<1e-6?i>0?1:0:i>0?3:2}function u(t,e){return s(t.x,e.x)}function s(t,e){var n=a(t,1),r=a(e,1);return n!==r?n-r:0===n?e[1]-t[1]:1===n?t[0]-e[0]:2===n?t[1]-e[1]:e[0]-t[0]}return function(a){var s,c,f,l,h,d,p,g,y,b,v,m=a,_=ce(),w={point:x,lineStart:function(){w.point=k,c&&c.push(f=[]);b=!0,y=!1,p=g=NaN},lineEnd:function(){s&&(k(l,h),d&&y&&_.rejoin(),s.push(_.result()));w.point=x,y&&m.lineEnd()},polygonStart:function(){m=_,s=[],c=[],v=!0},polygonEnd:function(){var e=function(){for(var e=0,n=0,i=c.length;nr&&(h-o)*(r-a)>(d-a)*(t-o)&&++e:d<=r&&(h-o)*(r-a)<(d-a)*(t-o)&&--e;return e}(),n=v&&e,i=(s=Object(be.n)(s)).length;(n||i)&&(a.polygonStart(),n&&(a.lineStart(),o(null,null,1,a),a.lineEnd()),i&&he(s,u,e,o,a),a.polygonEnd());m=a,s=c=f=null}};function x(t,e){i(t,e)&&m.point(t,e)}function k(o,a){var u=i(o,a);if(c&&f.push([o,a]),b)l=o,h=a,d=u,b=!1,u&&(m.lineStart(),m.point(o,a));else if(u&&y)m.point(o,a);else{var s=[p=Math.max(-1e9,Math.min(1e9,p)),g=Math.max(-1e9,Math.min(1e9,g))],_=[o=Math.max(-1e9,Math.min(1e9,o)),a=Math.max(-1e9,Math.min(1e9,a))];!function(t,e,n,r,i,o){var a,u=t[0],s=t[1],c=0,f=1,l=e[0]-u,h=e[1]-s;if(a=n-u,l||!(a>0)){if(a/=l,l<0){if(a0){if(a>f)return;a>c&&(c=a)}if(a=i-u,l||!(a<0)){if(a/=l,l<0){if(a>f)return;a>c&&(c=a)}else if(l>0){if(a0)){if(a/=h,h<0){if(a0){if(a>f)return;a>c&&(c=a)}if(a=o-s,h||!(a<0)){if(a/=h,h<0){if(a>f)return;a>c&&(c=a)}else if(h>0){if(a0&&(t[0]=u+c*l,t[1]=s+c*h),f<1&&(e[0]=u+f*l,e[1]=s+f*h),!0}}}}}(s,_,t,e,n,r)?u&&(m.lineStart(),m.point(o,a),v=!1):(y||(m.lineStart(),m.point(s[0],s[1])),m.point(_[0],_[1]),u||m.lineEnd(),v=!1)}p=o,g=a,y=u}return w}}var Ee,Ae,Se,Me=function(){var t,e,n,r=0,i=0,o=960,a=500;return n={stream:function(n){return t&&e===n?t:t=ke(r,i,o,a)(e=n)},extent:function(u){return arguments.length?(r=+u[0][0],i=+u[0][1],o=+u[1][0],a=+u[1][1],t=e=null,n):[[r,i],[o,a]]}}},Te=r(),Oe={sphere:T,point:T,lineStart:function(){Oe.point=Ce,Oe.lineEnd=De},lineEnd:T,polygonStart:T,polygonEnd:T};function De(){Oe.point=Oe.lineEnd=T}function Ce(t,e){Ee=t*=h,Ae=w(e*=h),Se=y(e),Oe.point=Ne}function Ne(t,e){t*=h;var n=w(e*=h),r=y(e),i=d(t-Ee),o=y(i),a=r*w(i),u=Se*n-Ae*r*o,s=Ae*n+Se*r*o;Te.add(g(k(a*a+u*u),s)),Ee=t,Ae=n,Se=r}var Ie=function(t){return Te.reset(),F(t,Oe),+Te},Re=[null,null],je={type:"LineString",coordinates:Re},Le=function(t,e){return Re[0]=t,Re[1]=e,Ie(je)},Be={Feature:function(t,e){return Fe(t.geometry,e)},FeatureCollection:function(t,e){for(var n=t.features,r=-1,i=n.length;++r0&&(i=Le(t[o],t[o-1]))>0&&n<=i&&r<=i&&(n+r-i)*(1-Math.pow((n-r)/i,2))<1e-12*i)return!0;n=r}return!1}function ze(t,e){return!!ye(t.map(Ye),Ve(e))}function Ye(t){return(t=t.map(Ve)).pop(),t}function Ve(t){return[t[0]*h,t[1]*h]}var Ge=function(t,e){return(t&&Be.hasOwnProperty(t.type)?Be[t.type]:Fe)(t,e)};function He(t,e,n){var r=Object(be.s)(t,e-1e-6,n).concat(e);return function(t){return r.map((function(e){return[t,e]}))}}function We(t,e,n){var r=Object(be.s)(t,e-1e-6,n).concat(e);return function(t){return r.map((function(e){return[e,t]}))}}function $e(){var t,e,n,r,i,o,a,u,s,c,f,l,h=10,p=h,g=90,y=360,v=2.5;function m(){return{type:"MultiLineString",coordinates:_()}}function _(){return Object(be.s)(b(r/g)*g,n,g).map(f).concat(Object(be.s)(b(u/y)*y,a,y).map(l)).concat(Object(be.s)(b(e/h)*h,t,h).filter((function(t){return d(t%g)>1e-6})).map(s)).concat(Object(be.s)(b(o/p)*p,i,p).filter((function(t){return d(t%y)>1e-6})).map(c))}return m.lines=function(){return _().map((function(t){return{type:"LineString",coordinates:t}}))},m.outline=function(){return{type:"Polygon",coordinates:[f(r).concat(l(a).slice(1),f(n).reverse().slice(1),l(u).reverse().slice(1))]}},m.extent=function(t){return arguments.length?m.extentMajor(t).extentMinor(t):m.extentMinor()},m.extentMajor=function(t){return arguments.length?(r=+t[0][0],n=+t[1][0],u=+t[0][1],a=+t[1][1],r>n&&(t=r,r=n,n=t),u>a&&(t=u,u=a,a=t),m.precision(v)):[[r,u],[n,a]]},m.extentMinor=function(n){return arguments.length?(e=+n[0][0],t=+n[1][0],o=+n[0][1],i=+n[1][1],e>t&&(n=e,e=t,t=n),o>i&&(n=o,o=i,i=n),m.precision(v)):[[e,o],[t,i]]},m.step=function(t){return arguments.length?m.stepMajor(t).stepMinor(t):m.stepMinor()},m.stepMajor=function(t){return arguments.length?(g=+t[0],y=+t[1],m):[g,y]},m.stepMinor=function(t){return arguments.length?(h=+t[0],p=+t[1],m):[h,p]},m.precision=function(h){return arguments.length?(v=+h,s=He(o,i,90),c=We(e,t,v),f=He(u,a,90),l=We(r,n,v),m):v},m.extentMajor([[-180,1e-6-90],[180,90-1e-6]]).extentMinor([[-180,-80-1e-6],[180,80+1e-6]])}function Ke(){return $e()()}var Ze,Xe,Je,Qe,tn=function(t,e){var n=t[0]*h,r=t[1]*h,i=e[0]*h,o=e[1]*h,a=y(r),u=w(r),s=y(o),c=w(o),f=a*y(n),d=a*w(n),p=s*y(i),b=s*w(i),v=2*S(k(M(o-r)+a*s*M(i-n))),m=w(v),_=v?function(t){var e=w(t*=v)/m,n=w(v-t)/m,r=n*f+e*p,i=n*d+e*b,o=n*u+e*c;return[g(i,r)*l,g(o,k(r*r+i*i))*l]}:function(){return[n*l,r*l]};return _.distance=v,_},en=function(t){return t},nn=r(),rn=r(),on={point:T,lineStart:T,lineEnd:T,polygonStart:function(){on.lineStart=an,on.lineEnd=cn},polygonEnd:function(){on.lineStart=on.lineEnd=on.point=T,nn.add(d(rn)),rn.reset()},result:function(){var t=nn/2;return nn.reset(),t}};function an(){on.point=un}function un(t,e){on.point=sn,Ze=Je=t,Xe=Qe=e}function sn(t,e){rn.add(Qe*t-Je*e),Je=t,Qe=e}function cn(){sn(Ze,Xe)}var fn=on,ln=1/0,hn=ln,dn=-ln,pn=dn;var gn,yn,bn,vn,mn={point:function(t,e){tdn&&(dn=t);epn&&(pn=e)},lineStart:T,lineEnd:T,polygonStart:T,polygonEnd:T,result:function(){var t=[[ln,hn],[dn,pn]];return dn=pn=-(hn=ln=1/0),t}},_n=0,wn=0,xn=0,kn=0,En=0,An=0,Sn=0,Mn=0,Tn=0,On={point:Dn,lineStart:Cn,lineEnd:Rn,polygonStart:function(){On.lineStart=jn,On.lineEnd=Ln},polygonEnd:function(){On.point=Dn,On.lineStart=Cn,On.lineEnd=Rn},result:function(){var t=Tn?[Sn/Tn,Mn/Tn]:An?[kn/An,En/An]:xn?[_n/xn,wn/xn]:[NaN,NaN];return _n=wn=xn=kn=En=An=Sn=Mn=Tn=0,t}};function Dn(t,e){_n+=t,wn+=e,++xn}function Cn(){On.point=Nn}function Nn(t,e){On.point=In,Dn(bn=t,vn=e)}function In(t,e){var n=t-bn,r=e-vn,i=k(n*n+r*r);kn+=i*(bn+t)/2,En+=i*(vn+e)/2,An+=i,Dn(bn=t,vn=e)}function Rn(){On.point=Dn}function jn(){On.point=Bn}function Ln(){Pn(gn,yn)}function Bn(t,e){On.point=Pn,Dn(gn=bn=t,yn=vn=e)}function Pn(t,e){var n=t-bn,r=e-vn,i=k(n*n+r*r);kn+=i*(bn+t)/2,En+=i*(vn+e)/2,An+=i,Sn+=(i=vn*t-bn*e)*(bn+t),Mn+=i*(vn+e),Tn+=3*i,Dn(bn=t,vn=e)}var Fn=On;function qn(t){this._context=t}qn.prototype={_radius:4.5,pointRadius:function(t){return this._radius=t,this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._context.closePath(),this._point=NaN},point:function(t,e){switch(this._point){case 0:this._context.moveTo(t,e),this._point=1;break;case 1:this._context.lineTo(t,e);break;default:this._context.moveTo(t+this._radius,e),this._context.arc(t,e,this._radius,0,f)}},result:T};var Un,zn,Yn,Vn,Gn,Hn=r(),Wn={point:T,lineStart:function(){Wn.point=$n},lineEnd:function(){Un&&Kn(zn,Yn),Wn.point=T},polygonStart:function(){Un=!0},polygonEnd:function(){Un=null},result:function(){var t=+Hn;return Hn.reset(),t}};function $n(t,e){Wn.point=Kn,zn=Vn=t,Yn=Gn=e}function Kn(t,e){Vn-=t,Gn-=e,Hn.add(k(Vn*Vn+Gn*Gn)),Vn=t,Gn=e}var Zn=Wn;function Xn(){this._string=[]}function Jn(t){return"m0,"+t+"a"+t+","+t+" 0 1,1 0,"+-2*t+"a"+t+","+t+" 0 1,1 0,"+2*t+"z"}Xn.prototype={_radius:4.5,_circle:Jn(4.5),pointRadius:function(t){return(t=+t)!==this._radius&&(this._radius=t,this._circle=null),this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._string.push("Z"),this._point=NaN},point:function(t,e){switch(this._point){case 0:this._string.push("M",t,",",e),this._point=1;break;case 1:this._string.push("L",t,",",e);break;default:null==this._circle&&(this._circle=Jn(this._radius)),this._string.push("M",t,",",e,this._circle)}},result:function(){if(this._string.length){var t=this._string.join("");return this._string=[],t}return null}};var Qn=function(t,e){var n,r,i=4.5;function o(t){return t&&("function"==typeof i&&r.pointRadius(+i.apply(this,arguments)),F(t,n(r))),r.result()}return o.area=function(t){return F(t,n(fn)),fn.result()},o.measure=function(t){return F(t,n(Zn)),Zn.result()},o.bounds=function(t){return F(t,n(mn)),mn.result()},o.centroid=function(t){return F(t,n(Fn)),Fn.result()},o.projection=function(e){return arguments.length?(n=null==e?(t=null,en):(t=e).stream,o):t},o.context=function(t){return arguments.length?(r=null==t?(e=null,new Xn):new qn(e=t),"function"!=typeof i&&r.pointRadius(i),o):e},o.pointRadius=function(t){return arguments.length?(i="function"==typeof t?t:(r.pointRadius(+t),+t),o):i},o.projection(t).context(e)},tr=function(t){return{stream:er(t)}};function er(t){return function(e){var n=new nr;for(var r in t)n[r]=t[r];return n.stream=e,n}}function nr(){}function rr(t,e,n){var r=t.clipExtent&&t.clipExtent();return t.scale(150).translate([0,0]),null!=r&&t.clipExtent(null),F(n,t.stream(mn)),e(mn.result()),null!=r&&t.clipExtent(r),t}function ir(t,e,n){return rr(t,(function(n){var r=e[1][0]-e[0][0],i=e[1][1]-e[0][1],o=Math.min(r/(n[1][0]-n[0][0]),i/(n[1][1]-n[0][1])),a=+e[0][0]+(r-o*(n[1][0]+n[0][0]))/2,u=+e[0][1]+(i-o*(n[1][1]+n[0][1]))/2;t.scale(150*o).translate([a,u])}),n)}function or(t,e,n){return ir(t,[[0,0],e],n)}function ar(t,e,n){return rr(t,(function(n){var r=+e,i=r/(n[1][0]-n[0][0]),o=(r-i*(n[1][0]+n[0][0]))/2,a=-i*n[0][1];t.scale(150*i).translate([o,a])}),n)}function ur(t,e,n){return rr(t,(function(n){var r=+e,i=r/(n[1][1]-n[0][1]),o=-i*n[0][0],a=(r-i*(n[1][1]+n[0][1]))/2;t.scale(150*i).translate([o,a])}),n)}nr.prototype={constructor:nr,point:function(t,e){this.stream.point(t,e)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}};var sr=y(30*h),cr=function(t,e){return+e?function(t,e){function n(r,i,o,a,u,s,c,f,l,h,p,y,b,v){var m=c-r,_=f-i,w=m*m+_*_;if(w>4*e&&b--){var x=a+h,E=u+p,A=s+y,M=k(x*x+E*E+A*A),T=S(A/=M),O=d(d(A)-1)<1e-6||d(o-l)<1e-6?(o+l)/2:g(E,x),D=t(O,T),C=D[0],N=D[1],I=C-r,R=N-i,j=_*I-m*R;(j*j/w>e||d((m*I+_*R)/w-.5)>.3||a*h+u*p+s*y2?t[2]%360*h:0,D()):[v*l,m*l,_*l]},T.angle=function(t){return arguments.length?(w=t%360*h,D()):w*l},T.precision=function(t){return arguments.length?(a=cr(u,M=t*t),C()):k(M)},T.fitExtent=function(t,e){return ir(T,t,e)},T.fitSize=function(t,e){return or(T,t,e)},T.fitWidth=function(t,e){return ar(T,t,e)},T.fitHeight=function(t,e){return ur(T,t,e)},function(){return e=t.apply(this,arguments),T.invert=e.invert&&O,D()}}function gr(t){var e=0,n=u/3,r=pr(t),i=r(e,n);return i.parallels=function(t){return arguments.length?r(e=t[0]*h,n=t[1]*h):[e*l,n*l]},i}function yr(t,e){var n=w(t),r=(n+w(e))/2;if(d(r)<1e-6)return function(t){var e=y(t);function n(t,n){return[t*e,w(n)/e]}return n.invert=function(t,n){return[t/e,S(n*e)]},n}(t);var i=1+n*(2*r-n),o=k(i)/r;function a(t,e){var n=k(i-2*r*w(e))/r;return[n*w(t*=r),o-n*y(t)]}return a.invert=function(t,e){var n=o-e;return[g(t,d(n))/r*x(n),S((i-(t*t+n*n)*r*r)/(2*r))]},a}var br=function(){return gr(yr).scale(155.424).center([0,33.6442])},vr=function(){return br().parallels([29.5,45.5]).scale(1070).translate([480,250]).rotate([96,0]).center([-.6,38.7])};var mr=function(){var t,e,n,r,i,o,a=vr(),u=br().rotate([154,0]).center([-2,58.5]).parallels([55,65]),s=br().rotate([157,0]).center([-3,19.9]).parallels([8,18]),c={point:function(t,e){o=[t,e]}};function f(t){var e=t[0],a=t[1];return o=null,n.point(e,a),o||(r.point(e,a),o)||(i.point(e,a),o)}function l(){return t=e=null,f}return f.invert=function(t){var e=a.scale(),n=a.translate(),r=(t[0]-n[0])/e,i=(t[1]-n[1])/e;return(i>=.12&&i<.234&&r>=-.425&&r<-.214?u:i>=.166&&i<.234&&r>=-.214&&r<-.115?s:a).invert(t)},f.stream=function(n){return t&&e===n?t:(r=[a.stream(e=n),u.stream(n),s.stream(n)],i=r.length,t={point:function(t,e){for(var n=-1;++n0?e<1e-6-s&&(e=1e-6-s):e>s-1e-6&&(e=s-1e-6);var n=i/_(Or(e),r);return[n*w(r*t),i-n*y(r*t)]}return o.invert=function(t,e){var n=i-e,o=x(r)*k(t*t+n*n);return[g(t,d(n))/r*x(n),2*p(_(i/o,1/r))-s]},o}var Cr=function(){return gr(Dr).scale(109.5).parallels([30,30])};function Nr(t,e){return[t,e]}Nr.invert=Nr;var Ir=function(){return dr(Nr).scale(152.63)};function Rr(t,e){var n=y(t),r=t===e?w(t):(n-y(e))/(e-t),i=n/r+t;if(d(r)<1e-6)return Nr;function o(t,e){var n=i-e,o=r*t;return[n*w(o),i-n*y(o)]}return o.invert=function(t,e){var n=i-e;return[g(t,d(n))/r*x(n),i-x(r)*k(t*t+n*n)]},o}var jr=function(){return gr(Rr).scale(131.154).center([0,13.9389])},Lr=1.340264,Br=-.081106,Pr=893e-6,Fr=.003796,qr=k(3)/2;function Ur(t,e){var n=S(qr*w(e)),r=n*n,i=r*r*r;return[t*y(n)/(qr*(Lr+3*Br*r+i*(7*Pr+9*Fr*r))),n*(Lr+Br*r+i*(Pr+Fr*r))]}Ur.invert=function(t,e){for(var n,r=e,i=r*r,o=i*i*i,a=0;a<12&&(o=(i=(r-=n=(r*(Lr+Br*i+o*(Pr+Fr*i))-e)/(Lr+3*Br*i+o*(7*Pr+9*Fr*i)))*r)*i*i,!(d(n)<1e-12));++a);return[qr*t*(Lr+3*Br*i+o*(7*Pr+9*Fr*i))/y(r),S(w(r)/qr)]};var zr=function(){return dr(Ur).scale(177.158)};function Yr(t,e){var n=y(e),r=y(t)*n;return[n*w(t)/r,w(e)/r]}Yr.invert=wr(p);var Vr=function(){return dr(Yr).scale(144.049).clipAngle(60)};function Gr(t,e,n,r){return 1===t&&1===e&&0===n&&0===r?en:er({point:function(i,o){this.stream.point(i*t+n,o*e+r)}})}var Hr=function(){var t,e,n,r,i,o,a=1,u=0,s=0,c=1,f=1,l=en,h=null,d=en;function p(){return r=i=null,o}return o={stream:function(t){return r&&i===t?r:r=l(d(i=t))},postclip:function(r){return arguments.length?(d=r,h=t=e=n=null,p()):d},clipExtent:function(r){return arguments.length?(d=null==r?(h=t=e=n=null,en):ke(h=+r[0][0],t=+r[0][1],e=+r[1][0],n=+r[1][1]),p()):null==h?null:[[h,t],[e,n]]},scale:function(t){return arguments.length?(l=Gr((a=+t)*c,a*f,u,s),p()):a},translate:function(t){return arguments.length?(l=Gr(a*c,a*f,u=+t[0],s=+t[1]),p()):[u,s]},reflectX:function(t){return arguments.length?(l=Gr(a*(c=t?-1:1),a*f,u,s),p()):c<0},reflectY:function(t){return arguments.length?(l=Gr(a*c,a*(f=t?-1:1),u,s),p()):f<0},fitExtent:function(t,e){return ir(o,t,e)},fitSize:function(t,e){return or(o,t,e)},fitWidth:function(t,e){return ar(o,t,e)},fitHeight:function(t,e){return ur(o,t,e)}}};function Wr(t,e){var n=e*e,r=n*n;return[t*(.8707-.131979*n+r*(r*(.003971*n-.001529*r)-.013791)),e*(1.007226+n*(.015085+r*(.028874*n-.044475-.005916*r)))]}Wr.invert=function(t,e){var n,r=e,i=25;do{var o=r*r,a=o*o;r-=n=(r*(1.007226+o*(.015085+a*(.028874*o-.044475-.005916*a)))-e)/(1.007226+o*(.045255+a*(.259866*o-.311325-.005916*11*a)))}while(d(n)>1e-6&&--i>0);return[t/(.8707+(o=r*r)*(o*(o*o*o*(.003971-.001529*o)-.013791)-.131979)),r]};var $r=function(){return dr(Wr).scale(175.295)};function Kr(t,e){return[y(e)*w(t),w(e)]}Kr.invert=wr(S);var Zr=function(){return dr(Kr).scale(249.5).clipAngle(90+1e-6)};function Xr(t,e){var n=y(e),r=1+y(t)*n;return[n*w(t)/r,w(e)/r]}Xr.invert=wr((function(t){return 2*p(t)}));var Jr=function(){return dr(Xr).scale(250).clipAngle(142)};function Qr(t,e){return[m(E((s+e)/2)),-t]}Qr.invert=function(t,e){return[-e,2*p(v(t))-s]};var ti=function(){var t=Tr(Qr),e=t.center,n=t.rotate;return t.center=function(t){return arguments.length?e([-t[1],t[0]]):[(t=e())[1],-t[0]]},t.rotate=function(t){return arguments.length?n([t[0],t[1],t.length>2?t[2]+90:90]):[(t=n())[0],t[1],t[2]-90]},n([0,0,90]).scale(159.155)};n.d(e,"c",(function(){return W})),n.d(e,"h",(function(){return Ft})),n.d(e,"i",(function(){return Xt})),n.d(e,"j",(function(){return se})),n.d(e,"k",(function(){return we})),n.d(e,"l",(function(){return xe})),n.d(e,"m",(function(){return Me})),n.d(e,"n",(function(){return ke})),n.d(e,"u",(function(){return Ge})),n.d(e,"v",(function(){return Le})),n.d(e,"C",(function(){return $e})),n.d(e,"D",(function(){return Ke})),n.d(e,"F",(function(){return tn})),n.d(e,"G",(function(){return Ie})),n.d(e,"N",(function(){return Qn})),n.d(e,"a",(function(){return vr})),n.d(e,"b",(function(){return mr})),n.d(e,"d",(function(){return kr})),n.d(e,"e",(function(){return xr})),n.d(e,"f",(function(){return Ar})),n.d(e,"g",(function(){return Er})),n.d(e,"o",(function(){return Cr})),n.d(e,"p",(function(){return Dr})),n.d(e,"q",(function(){return br})),n.d(e,"r",(function(){return yr})),n.d(e,"s",(function(){return jr})),n.d(e,"t",(function(){return Rr})),n.d(e,"w",(function(){return zr})),n.d(e,"x",(function(){return Ur})),n.d(e,"y",(function(){return Ir})),n.d(e,"z",(function(){return Nr})),n.d(e,"A",(function(){return Vr})),n.d(e,"B",(function(){return Yr})),n.d(e,"E",(function(){return Hr})),n.d(e,"O",(function(){return dr})),n.d(e,"P",(function(){return pr})),n.d(e,"H",(function(){return Mr})),n.d(e,"I",(function(){return Sr})),n.d(e,"J",(function(){return $r})),n.d(e,"K",(function(){return Wr})),n.d(e,"L",(function(){return Zr})),n.d(e,"M",(function(){return Kr})),n.d(e,"R",(function(){return Jr})),n.d(e,"S",(function(){return Xr})),n.d(e,"V",(function(){return ti})),n.d(e,"W",(function(){return Qr})),n.d(e,"Q",(function(){return oe})),n.d(e,"T",(function(){return F})),n.d(e,"U",(function(){return tr}))},function(t,e,n){"use strict";var r=n(286),i=function(t){return function(){return t}},o=Math.abs,a=Math.atan2,u=Math.cos,s=Math.max,c=Math.min,f=Math.sin,l=Math.sqrt,h=Math.PI,d=h/2,p=2*h;function g(t){return t>1?0:t<-1?h:Math.acos(t)}function y(t){return t>=1?d:t<=-1?-d:Math.asin(t)}function b(t){return t.innerRadius}function v(t){return t.outerRadius}function m(t){return t.startAngle}function _(t){return t.endAngle}function w(t){return t&&t.padAngle}function x(t,e,n,r,i,o,a,u){var s=n-t,c=r-e,f=a-i,l=u-o,h=l*s-f*c;if(!(h*h<1e-12))return[t+(h=(f*(e-o)-l*(t-i))/h)*s,e+h*c]}function k(t,e,n,r,i,o,a){var u=t-n,c=e-r,f=(a?o:-o)/l(u*u+c*c),h=f*c,d=-f*u,p=t+h,g=e+d,y=n+h,b=r+d,v=(p+y)/2,m=(g+b)/2,_=y-p,w=b-g,x=_*_+w*w,k=i-o,E=p*b-y*g,A=(w<0?-1:1)*l(s(0,k*k*x-E*E)),S=(E*w-_*A)/x,M=(-E*_-w*A)/x,T=(E*w+_*A)/x,O=(-E*_+w*A)/x,D=S-v,C=M-m,N=T-v,I=O-m;return D*D+C*C>N*N+I*I&&(S=T,M=O),{cx:S,cy:M,x01:-h,y01:-d,x11:S*(i/k-1),y11:M*(i/k-1)}}var E=function(){var t=b,e=v,n=i(0),s=null,E=m,A=_,S=w,M=null;function T(){var i,b,v=+t.apply(this,arguments),m=+e.apply(this,arguments),_=E.apply(this,arguments)-d,w=A.apply(this,arguments)-d,T=o(w-_),O=w>_;if(M||(M=i=Object(r.a)()),m1e-12)if(T>p-1e-12)M.moveTo(m*u(_),m*f(_)),M.arc(0,0,m,_,w,!O),v>1e-12&&(M.moveTo(v*u(w),v*f(w)),M.arc(0,0,v,w,_,O));else{var D,C,N=_,I=w,R=_,j=w,L=T,B=T,P=S.apply(this,arguments)/2,F=P>1e-12&&(s?+s.apply(this,arguments):l(v*v+m*m)),q=c(o(m-v)/2,+n.apply(this,arguments)),U=q,z=q;if(F>1e-12){var Y=y(F/v*f(P)),V=y(F/m*f(P));(L-=2*Y)>1e-12?(R+=Y*=O?1:-1,j-=Y):(L=0,R=j=(_+w)/2),(B-=2*V)>1e-12?(N+=V*=O?1:-1,I-=V):(B=0,N=I=(_+w)/2)}var G=m*u(N),H=m*f(N),W=v*u(j),$=v*f(j);if(q>1e-12){var K,Z=m*u(I),X=m*f(I),J=v*u(R),Q=v*f(R);if(T1e-12?z>1e-12?(D=k(J,Q,G,H,m,z,O),C=k(Z,X,W,$,m,z,O),M.moveTo(D.cx+D.x01,D.cy+D.y01),z1e-12&&L>1e-12?U>1e-12?(D=k(W,$,Z,X,v,-U,O),C=k(G,H,J,Q,v,-U,O),M.lineTo(D.cx+D.x01,D.cy+D.y01),U=l;--h)c.point(b[h],v[h]);c.lineEnd(),c.areaEnd()}y&&(b[f]=+t(d,f,i),v[f]=+n(d,f,i),c.point(e?+e(d,f,i):b[f],o?+o(d,f,i):v[f]))}if(p)return c=null,p+""||null}function l(){return O().defined(a).curve(s).context(u)}return f.x=function(n){return arguments.length?(t="function"==typeof n?n:i(+n),e=null,f):t},f.x0=function(e){return arguments.length?(t="function"==typeof e?e:i(+e),f):t},f.x1=function(t){return arguments.length?(e=null==t?null:"function"==typeof t?t:i(+t),f):e},f.y=function(t){return arguments.length?(n="function"==typeof t?t:i(+t),o=null,f):n},f.y0=function(t){return arguments.length?(n="function"==typeof t?t:i(+t),f):n},f.y1=function(t){return arguments.length?(o=null==t?null:"function"==typeof t?t:i(+t),f):o},f.lineX0=f.lineY0=function(){return l().x(t).y(n)},f.lineY1=function(){return l().x(t).y(o)},f.lineX1=function(){return l().x(e).y(n)},f.defined=function(t){return arguments.length?(a="function"==typeof t?t:i(!!t),f):a},f.curve=function(t){return arguments.length?(s=t,null!=u&&(c=s(u)),f):s},f.context=function(t){return arguments.length?(null==t?u=c=null:c=s(u=t),f):u},f},C=function(t,e){return et?1:e>=t?0:NaN},N=function(t){return t},I=function(){var t=N,e=C,n=null,r=i(0),o=i(p),a=i(0);function u(i){var u,s,c,f,l,h=i.length,d=0,g=new Array(h),y=new Array(h),b=+r.apply(this,arguments),v=Math.min(p,Math.max(-p,o.apply(this,arguments)-b)),m=Math.min(Math.abs(v)/h,a.apply(this,arguments)),_=m*(v<0?-1:1);for(u=0;u0&&(d+=l);for(null!=e?g.sort((function(t,n){return e(y[t],y[n])})):null!=n&&g.sort((function(t,e){return n(i[t],i[e])})),u=0,c=d?(v-h*_)/d:0;u0?l*c:0)+_,y[s]={data:i[s],index:u,value:l,startAngle:b,endAngle:f,padAngle:m};return y}return u.value=function(e){return arguments.length?(t="function"==typeof e?e:i(+e),u):t},u.sortValues=function(t){return arguments.length?(e=t,n=null,u):e},u.sort=function(t){return arguments.length?(n=t,e=null,u):n},u.startAngle=function(t){return arguments.length?(r="function"==typeof t?t:i(+t),u):r},u.endAngle=function(t){return arguments.length?(o="function"==typeof t?t:i(+t),u):o},u.padAngle=function(t){return arguments.length?(a="function"==typeof t?t:i(+t),u):a},u},R=L(S);function j(t){this._curve=t}function L(t){function e(e){return new j(t(e))}return e._curve=t,e}function B(t){var e=t.curve;return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t.curve=function(t){return arguments.length?e(L(t)):e()._curve},t}j.prototype={areaStart:function(){this._curve.areaStart()},areaEnd:function(){this._curve.areaEnd()},lineStart:function(){this._curve.lineStart()},lineEnd:function(){this._curve.lineEnd()},point:function(t,e){this._curve.point(e*Math.sin(t),e*-Math.cos(t))}};var P=function(){return B(O().curve(R))},F=function(){var t=D().curve(R),e=t.curve,n=t.lineX0,r=t.lineX1,i=t.lineY0,o=t.lineY1;return t.angle=t.x,delete t.x,t.startAngle=t.x0,delete t.x0,t.endAngle=t.x1,delete t.x1,t.radius=t.y,delete t.y,t.innerRadius=t.y0,delete t.y0,t.outerRadius=t.y1,delete t.y1,t.lineStartAngle=function(){return B(n())},delete t.lineX0,t.lineEndAngle=function(){return B(r())},delete t.lineX1,t.lineInnerRadius=function(){return B(i())},delete t.lineY0,t.lineOuterRadius=function(){return B(o())},delete t.lineY1,t.curve=function(t){return arguments.length?e(L(t)):e()._curve},t},q=function(t,e){return[(e=+e)*Math.cos(t-=Math.PI/2),e*Math.sin(t)]},U=Array.prototype.slice;function z(t){return t.source}function Y(t){return t.target}function V(t){var e=z,n=Y,o=M,a=T,u=null;function s(){var i,s=U.call(arguments),c=e.apply(this,s),f=n.apply(this,s);if(u||(u=i=Object(r.a)()),t(u,+o.apply(this,(s[0]=c,s)),+a.apply(this,s),+o.apply(this,(s[0]=f,s)),+a.apply(this,s)),i)return u=null,i+""||null}return s.source=function(t){return arguments.length?(e=t,s):e},s.target=function(t){return arguments.length?(n=t,s):n},s.x=function(t){return arguments.length?(o="function"==typeof t?t:i(+t),s):o},s.y=function(t){return arguments.length?(a="function"==typeof t?t:i(+t),s):a},s.context=function(t){return arguments.length?(u=null==t?null:t,s):u},s}function G(t,e,n,r,i){t.moveTo(e,n),t.bezierCurveTo(e=(e+r)/2,n,e,i,r,i)}function H(t,e,n,r,i){t.moveTo(e,n),t.bezierCurveTo(e,n=(n+i)/2,r,n,r,i)}function W(t,e,n,r,i){var o=q(e,n),a=q(e,n=(n+i)/2),u=q(r,n),s=q(r,i);t.moveTo(o[0],o[1]),t.bezierCurveTo(a[0],a[1],u[0],u[1],s[0],s[1])}function $(){return V(G)}function K(){return V(H)}function Z(){var t=V(W);return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t}var X={draw:function(t,e){var n=Math.sqrt(e/h);t.moveTo(n,0),t.arc(0,0,n,0,p)}},J={draw:function(t,e){var n=Math.sqrt(e/5)/2;t.moveTo(-3*n,-n),t.lineTo(-n,-n),t.lineTo(-n,-3*n),t.lineTo(n,-3*n),t.lineTo(n,-n),t.lineTo(3*n,-n),t.lineTo(3*n,n),t.lineTo(n,n),t.lineTo(n,3*n),t.lineTo(-n,3*n),t.lineTo(-n,n),t.lineTo(-3*n,n),t.closePath()}},Q=Math.sqrt(1/3),tt=2*Q,et={draw:function(t,e){var n=Math.sqrt(e/tt),r=n*Q;t.moveTo(0,-n),t.lineTo(r,0),t.lineTo(0,n),t.lineTo(-r,0),t.closePath()}},nt=Math.sin(h/10)/Math.sin(7*h/10),rt=Math.sin(p/10)*nt,it=-Math.cos(p/10)*nt,ot={draw:function(t,e){var n=Math.sqrt(.8908130915292852*e),r=rt*n,i=it*n;t.moveTo(0,-n),t.lineTo(r,i);for(var o=1;o<5;++o){var a=p*o/5,u=Math.cos(a),s=Math.sin(a);t.lineTo(s*n,-u*n),t.lineTo(u*r-s*i,s*r+u*i)}t.closePath()}},at={draw:function(t,e){var n=Math.sqrt(e),r=-n/2;t.rect(r,r,n,n)}},ut=Math.sqrt(3),st={draw:function(t,e){var n=-Math.sqrt(e/(3*ut));t.moveTo(0,2*n),t.lineTo(-ut*n,-n),t.lineTo(ut*n,-n),t.closePath()}},ct=Math.sqrt(3)/2,ft=1/Math.sqrt(12),lt=3*(ft/2+1),ht={draw:function(t,e){var n=Math.sqrt(e/lt),r=n/2,i=n*ft,o=r,a=n*ft+n,u=-o,s=a;t.moveTo(r,i),t.lineTo(o,a),t.lineTo(u,s),t.lineTo(-.5*r-ct*i,ct*r+-.5*i),t.lineTo(-.5*o-ct*a,ct*o+-.5*a),t.lineTo(-.5*u-ct*s,ct*u+-.5*s),t.lineTo(-.5*r+ct*i,-.5*i-ct*r),t.lineTo(-.5*o+ct*a,-.5*a-ct*o),t.lineTo(-.5*u+ct*s,-.5*s-ct*u),t.closePath()}},dt=[X,J,et,at,ot,st,ht],pt=function(){var t=i(X),e=i(64),n=null;function o(){var i;if(n||(n=i=Object(r.a)()),t.apply(this,arguments).draw(n,+e.apply(this,arguments)),i)return n=null,i+""||null}return o.type=function(e){return arguments.length?(t="function"==typeof e?e:i(e),o):t},o.size=function(t){return arguments.length?(e="function"==typeof t?t:i(+t),o):e},o.context=function(t){return arguments.length?(n=null==t?null:t,o):n},o},gt=function(){};function yt(t,e,n){t._context.bezierCurveTo((2*t._x0+t._x1)/3,(2*t._y0+t._y1)/3,(t._x0+2*t._x1)/3,(t._y0+2*t._y1)/3,(t._x0+4*t._x1+e)/6,(t._y0+4*t._y1+n)/6)}function bt(t){this._context=t}bt.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){switch(this._point){case 3:yt(this,this._x1,this._y1);case 2:this._context.lineTo(this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2;break;case 2:this._point=3,this._context.lineTo((5*this._x0+this._x1)/6,(5*this._y0+this._y1)/6);default:yt(this,t,e)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=e}};var vt=function(t){return new bt(t)};function mt(t){this._context=t}mt.prototype={areaStart:gt,areaEnd:gt,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._y0=this._y1=this._y2=this._y3=this._y4=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x2,this._y2),this._context.closePath();break;case 2:this._context.moveTo((this._x2+2*this._x3)/3,(this._y2+2*this._y3)/3),this._context.lineTo((this._x3+2*this._x2)/3,(this._y3+2*this._y2)/3),this._context.closePath();break;case 3:this.point(this._x2,this._y2),this.point(this._x3,this._y3),this.point(this._x4,this._y4)}},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._x2=t,this._y2=e;break;case 1:this._point=2,this._x3=t,this._y3=e;break;case 2:this._point=3,this._x4=t,this._y4=e,this._context.moveTo((this._x0+4*this._x1+t)/6,(this._y0+4*this._y1+e)/6);break;default:yt(this,t,e)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=e}};var _t=function(t){return new mt(t)};function wt(t){this._context=t}wt.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3;var n=(this._x0+4*this._x1+t)/6,r=(this._y0+4*this._y1+e)/6;this._line?this._context.lineTo(n,r):this._context.moveTo(n,r);break;case 3:this._point=4;default:yt(this,t,e)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=e}};var xt=function(t){return new wt(t)};function kt(t,e){this._basis=new bt(t),this._beta=e}kt.prototype={lineStart:function(){this._x=[],this._y=[],this._basis.lineStart()},lineEnd:function(){var t=this._x,e=this._y,n=t.length-1;if(n>0)for(var r,i=t[0],o=e[0],a=t[n]-i,u=e[n]-o,s=-1;++s<=n;)r=s/n,this._basis.point(this._beta*t[s]+(1-this._beta)*(i+r*a),this._beta*e[s]+(1-this._beta)*(o+r*u));this._x=this._y=null,this._basis.lineEnd()},point:function(t,e){this._x.push(+t),this._y.push(+e)}};var Et=function t(e){function n(t){return 1===e?new bt(t):new kt(t,e)}return n.beta=function(e){return t(+e)},n}(.85);function At(t,e,n){t._context.bezierCurveTo(t._x1+t._k*(t._x2-t._x0),t._y1+t._k*(t._y2-t._y0),t._x2+t._k*(t._x1-e),t._y2+t._k*(t._y1-n),t._x2,t._y2)}function St(t,e){this._context=t,this._k=(1-e)/6}St.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:At(this,this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2,this._x1=t,this._y1=e;break;case 2:this._point=3;default:At(this,t,e)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var Mt=function t(e){function n(t){return new St(t,e)}return n.tension=function(e){return t(+e)},n}(0);function Tt(t,e){this._context=t,this._k=(1-e)/6}Tt.prototype={areaStart:gt,areaEnd:gt,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._x3=t,this._y3=e;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=e);break;case 2:this._point=3,this._x5=t,this._y5=e;break;default:At(this,t,e)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var Ot=function t(e){function n(t){return new Tt(t,e)}return n.tension=function(e){return t(+e)},n}(0);function Dt(t,e){this._context=t,this._k=(1-e)/6}Dt.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:At(this,t,e)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var Ct=function t(e){function n(t){return new Dt(t,e)}return n.tension=function(e){return t(+e)},n}(0);function Nt(t,e,n){var r=t._x1,i=t._y1,o=t._x2,a=t._y2;if(t._l01_a>1e-12){var u=2*t._l01_2a+3*t._l01_a*t._l12_a+t._l12_2a,s=3*t._l01_a*(t._l01_a+t._l12_a);r=(r*u-t._x0*t._l12_2a+t._x2*t._l01_2a)/s,i=(i*u-t._y0*t._l12_2a+t._y2*t._l01_2a)/s}if(t._l23_a>1e-12){var c=2*t._l23_2a+3*t._l23_a*t._l12_a+t._l12_2a,f=3*t._l23_a*(t._l23_a+t._l12_a);o=(o*c+t._x1*t._l23_2a-e*t._l12_2a)/f,a=(a*c+t._y1*t._l23_2a-n*t._l12_2a)/f}t._context.bezierCurveTo(r,i,o,a,t._x2,t._y2)}function It(t,e){this._context=t,this._alpha=e}It.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:this.point(this._x2,this._y2)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){if(t=+t,e=+e,this._point){var n=this._x2-t,r=this._y2-e;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2;break;case 2:this._point=3;default:Nt(this,t,e)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var Rt=function t(e){function n(t){return e?new It(t,e):new St(t,0)}return n.alpha=function(e){return t(+e)},n}(.5);function jt(t,e){this._context=t,this._alpha=e}jt.prototype={areaStart:gt,areaEnd:gt,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,e){if(t=+t,e=+e,this._point){var n=this._x2-t,r=this._y2-e;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._x3=t,this._y3=e;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=e);break;case 2:this._point=3,this._x5=t,this._y5=e;break;default:Nt(this,t,e)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var Lt=function t(e){function n(t){return e?new jt(t,e):new Tt(t,0)}return n.alpha=function(e){return t(+e)},n}(.5);function Bt(t,e){this._context=t,this._alpha=e}Bt.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){if(t=+t,e=+e,this._point){var n=this._x2-t,r=this._y2-e;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:Nt(this,t,e)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var Pt=function t(e){function n(t){return e?new Bt(t,e):new Dt(t,0)}return n.alpha=function(e){return t(+e)},n}(.5);function Ft(t){this._context=t}Ft.prototype={areaStart:gt,areaEnd:gt,lineStart:function(){this._point=0},lineEnd:function(){this._point&&this._context.closePath()},point:function(t,e){t=+t,e=+e,this._point?this._context.lineTo(t,e):(this._point=1,this._context.moveTo(t,e))}};var qt=function(t){return new Ft(t)};function Ut(t){return t<0?-1:1}function zt(t,e,n){var r=t._x1-t._x0,i=e-t._x1,o=(t._y1-t._y0)/(r||i<0&&-0),a=(n-t._y1)/(i||r<0&&-0),u=(o*i+a*r)/(r+i);return(Ut(o)+Ut(a))*Math.min(Math.abs(o),Math.abs(a),.5*Math.abs(u))||0}function Yt(t,e){var n=t._x1-t._x0;return n?(3*(t._y1-t._y0)/n-e)/2:e}function Vt(t,e,n){var r=t._x0,i=t._y0,o=t._x1,a=t._y1,u=(o-r)/3;t._context.bezierCurveTo(r+u,i+u*e,o-u,a-u*n,o,a)}function Gt(t){this._context=t}function Ht(t){this._context=new Wt(t)}function Wt(t){this._context=t}function $t(t){return new Gt(t)}function Kt(t){return new Ht(t)}function Zt(t){this._context=t}function Xt(t){var e,n,r=t.length-1,i=new Array(r),o=new Array(r),a=new Array(r);for(i[0]=0,o[0]=2,a[0]=t[0]+2*t[1],e=1;e=0;--e)i[e]=(a[e]-i[e+1])/o[e];for(o[r-1]=(t[r]+i[r-1])/2,e=0;e=0&&(this._t=1-this._t,this._line=1-this._line)},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2;default:if(this._t<=0)this._context.lineTo(this._x,e),this._context.lineTo(t,e);else{var n=this._x*(1-this._t)+t*this._t;this._context.lineTo(n,this._y),this._context.lineTo(n,e)}}this._x=t,this._y=e}};var te=function(t){return new Qt(t,.5)};function ee(t){return new Qt(t,0)}function ne(t){return new Qt(t,1)}var re=function(t,e){if((i=t.length)>1)for(var n,r,i,o=1,a=t[e[0]],u=a.length;o=0;)n[e]=e;return n};function oe(t,e){return t[e]}var ae=function(){var t=i([]),e=ie,n=re,r=oe;function o(i){var o,a,u=t.apply(this,arguments),s=i.length,c=u.length,f=new Array(c);for(o=0;o0){for(var n,r,i,o=0,a=t[0].length;o0)for(var n,r,i,o,a,u,s=0,c=t[e[0]].length;s0?(r[0]=o,r[1]=o+=i):i<0?(r[1]=a,r[0]=a+=i):(r[0]=0,r[1]=i)},ce=function(t,e){if((n=t.length)>0){for(var n,r=0,i=t[e[0]],o=i.length;r0&&(r=(n=t[e[0]]).length)>0){for(var n,r,i,o=0,a=1;ao&&(o=e,r=n);return r}var de=function(t){var e=t.map(pe);return ie(t).sort((function(t,n){return e[t]-e[n]}))};function pe(t){for(var e,n=0,r=-1,i=t.length;++r1)&&(t-=Math.floor(t));var e=Math.abs(t-.5);return xt.h=360*t-100,xt.s=1.5-1.5*e,xt.l=.8-.9*e,xt+""},Et=n(11),At=Object(Et.g)(),St=Math.PI/3,Mt=2*Math.PI/3,Tt=function(t){var e;return t=(.5-t)*Math.PI,At.r=255*(e=Math.sin(t))*e,At.g=255*(e=Math.sin(t+St))*e,At.b=255*(e=Math.sin(t+Mt))*e,At+""},Ot=function(t){return t=Math.max(0,Math.min(1,t)),"rgb("+Math.max(0,Math.min(255,Math.round(34.61+t*(1172.33-t*(10793.56-t*(33300.12-t*(38394.49-14825.05*t)))))))+", "+Math.max(0,Math.min(255,Math.round(23.31+t*(557.33+t*(1225.33-t*(3574.96-t*(1073.77+707.56*t)))))))+", "+Math.max(0,Math.min(255,Math.round(27.2+t*(3211.1-t*(15327.97-t*(27814-t*(22569.18-6838.66*t)))))))+")"};function Dt(t){var e=t.length;return function(n){return t[Math.max(0,Math.min(e-1,Math.floor(n*e)))]}}var Ct=Dt(r("44015444025645045745055946075a46085c460a5d460b5e470d60470e6147106347116447136548146748166848176948186a481a6c481b6d481c6e481d6f481f70482071482173482374482475482576482677482878482979472a7a472c7a472d7b472e7c472f7d46307e46327e46337f463480453581453781453882443983443a83443b84433d84433e85423f854240864241864142874144874045884046883f47883f48893e49893e4a893e4c8a3d4d8a3d4e8a3c4f8a3c508b3b518b3b528b3a538b3a548c39558c39568c38588c38598c375a8c375b8d365c8d365d8d355e8d355f8d34608d34618d33628d33638d32648e32658e31668e31678e31688e30698e306a8e2f6b8e2f6c8e2e6d8e2e6e8e2e6f8e2d708e2d718e2c718e2c728e2c738e2b748e2b758e2a768e2a778e2a788e29798e297a8e297b8e287c8e287d8e277e8e277f8e27808e26818e26828e26828e25838e25848e25858e24868e24878e23888e23898e238a8d228b8d228c8d228d8d218e8d218f8d21908d21918c20928c20928c20938c1f948c1f958b1f968b1f978b1f988b1f998a1f9a8a1e9b8a1e9c891e9d891f9e891f9f881fa0881fa1881fa1871fa28720a38620a48621a58521a68522a78522a88423a98324aa8325ab8225ac8226ad8127ad8128ae8029af7f2ab07f2cb17e2db27d2eb37c2fb47c31b57b32b67a34b67935b77937b87838b9773aba763bbb753dbc743fbc7340bd7242be7144bf7046c06f48c16e4ac16d4cc26c4ec36b50c46a52c56954c56856c66758c7655ac8645cc8635ec96260ca6063cb5f65cb5e67cc5c69cd5b6ccd5a6ece5870cf5773d05675d05477d1537ad1517cd2507fd34e81d34d84d44b86d54989d5488bd6468ed64590d74393d74195d84098d83e9bd93c9dd93ba0da39a2da37a5db36a8db34aadc32addc30b0dd2fb2dd2db5de2bb8de29bade28bddf26c0df25c2df23c5e021c8e020cae11fcde11dd0e11cd2e21bd5e21ad8e219dae319dde318dfe318e2e418e5e419e7e419eae51aece51befe51cf1e51df4e61ef6e620f8e621fbe723fde725")),Nt=Dt(r("00000401000501010601010802010902020b02020d03030f03031204041405041606051806051a07061c08071e0907200a08220b09240c09260d0a290e0b2b100b2d110c2f120d31130d34140e36150e38160f3b180f3d19103f1a10421c10441d11471e114920114b21114e22115024125325125527125829115a2a115c2c115f2d11612f116331116533106734106936106b38106c390f6e3b0f703d0f713f0f72400f74420f75440f764510774710784910784a10794c117a4e117b4f127b51127c52137c54137d56147d57157e59157e5a167e5c167f5d177f5f187f601880621980641a80651a80671b80681c816a1c816b1d816d1d816e1e81701f81721f817320817521817621817822817922827b23827c23827e24828025828125818326818426818627818827818928818b29818c29818e2a81902a81912b81932b80942c80962c80982d80992d809b2e7f9c2e7f9e2f7fa02f7fa1307ea3307ea5317ea6317da8327daa337dab337cad347cae347bb0357bb2357bb3367ab5367ab73779b83779ba3878bc3978bd3977bf3a77c03a76c23b75c43c75c53c74c73d73c83e73ca3e72cc3f71cd4071cf4070d0416fd2426fd3436ed5446dd6456cd8456cd9466bdb476adc4869de4968df4a68e04c67e24d66e34e65e44f64e55064e75263e85362e95462ea5661eb5760ec5860ed5a5fee5b5eef5d5ef05f5ef1605df2625df2645cf3655cf4675cf4695cf56b5cf66c5cf66e5cf7705cf7725cf8745cf8765cf9785df9795df97b5dfa7d5efa7f5efa815ffb835ffb8560fb8761fc8961fc8a62fc8c63fc8e64fc9065fd9266fd9467fd9668fd9869fd9a6afd9b6bfe9d6cfe9f6dfea16efea36ffea571fea772fea973feaa74feac76feae77feb078feb27afeb47bfeb67cfeb77efeb97ffebb81febd82febf84fec185fec287fec488fec68afec88cfeca8dfecc8ffecd90fecf92fed194fed395fed597fed799fed89afdda9cfddc9efddea0fde0a1fde2a3fde3a5fde5a7fde7a9fde9aafdebacfcecaefceeb0fcf0b2fcf2b4fcf4b6fcf6b8fcf7b9fcf9bbfcfbbdfcfdbf")),It=Dt(r("00000401000501010601010802010a02020c02020e03021004031204031405041706041907051b08051d09061f0a07220b07240c08260d08290e092b10092d110a30120a32140b34150b37160b39180c3c190c3e1b0c411c0c431e0c451f0c48210c4a230c4c240c4f260c51280b53290b552b0b572d0b592f0a5b310a5c320a5e340a5f3609613809623909633b09643d09653e0966400a67420a68440a68450a69470b6a490b6a4a0c6b4c0c6b4d0d6c4f0d6c510e6c520e6d540f6d550f6d57106e59106e5a116e5c126e5d126e5f136e61136e62146e64156e65156e67166e69166e6a176e6c186e6d186e6f196e71196e721a6e741a6e751b6e771c6d781c6d7a1d6d7c1d6d7d1e6d7f1e6c801f6c82206c84206b85216b87216b88226a8a226a8c23698d23698f24699025689225689326679526679727669827669a28659b29649d29649f2a63a02a63a22b62a32c61a52c60a62d60a82e5fa92e5eab2f5ead305dae305cb0315bb1325ab3325ab43359b63458b73557b93556ba3655bc3754bd3853bf3952c03a51c13a50c33b4fc43c4ec63d4dc73e4cc83f4bca404acb4149cc4248ce4347cf4446d04545d24644d34743d44842d54a41d74b3fd84c3ed94d3dda4e3cdb503bdd513ade5238df5337e05536e15635e25734e35933e45a31e55c30e65d2fe75e2ee8602de9612bea632aeb6429eb6628ec6726ed6925ee6a24ef6c23ef6e21f06f20f1711ff1731df2741cf3761bf37819f47918f57b17f57d15f67e14f68013f78212f78410f8850ff8870ef8890cf98b0bf98c0af98e09fa9008fa9207fa9407fb9606fb9706fb9906fb9b06fb9d07fc9f07fca108fca309fca50afca60cfca80dfcaa0ffcac11fcae12fcb014fcb216fcb418fbb61afbb81dfbba1ffbbc21fbbe23fac026fac228fac42afac62df9c72ff9c932f9cb35f8cd37f8cf3af7d13df7d340f6d543f6d746f5d949f5db4cf4dd4ff4df53f4e156f3e35af3e55df2e661f2e865f2ea69f1ec6df1ed71f1ef75f1f179f2f27df2f482f3f586f3f68af4f88ef5f992f6fa96f8fb9af9fc9dfafda1fcffa4")),Rt=Dt(r("0d088710078813078916078a19068c1b068d1d068e20068f2206902406912605912805922a05932c05942e05952f059631059733059735049837049938049a3a049a3c049b3e049c3f049c41049d43039e44039e46039f48039f4903a04b03a14c02a14e02a25002a25102a35302a35502a45601a45801a45901a55b01a55c01a65e01a66001a66100a76300a76400a76600a76700a86900a86a00a86c00a86e00a86f00a87100a87201a87401a87501a87701a87801a87a02a87b02a87d03a87e03a88004a88104a78305a78405a78606a68707a68808a68a09a58b0aa58d0ba58e0ca48f0da4910ea3920fa39410a29511a19613a19814a099159f9a169f9c179e9d189d9e199da01a9ca11b9ba21d9aa31e9aa51f99a62098a72197a82296aa2395ab2494ac2694ad2793ae2892b02991b12a90b22b8fb32c8eb42e8db52f8cb6308bb7318ab83289ba3388bb3488bc3587bd3786be3885bf3984c03a83c13b82c23c81c33d80c43e7fc5407ec6417dc7427cc8437bc9447aca457acb4679cc4778cc4977cd4a76ce4b75cf4c74d04d73d14e72d24f71d35171d45270d5536fd5546ed6556dd7566cd8576bd9586ada5a6ada5b69db5c68dc5d67dd5e66de5f65de6164df6263e06363e16462e26561e26660e3685fe4695ee56a5de56b5de66c5ce76e5be76f5ae87059e97158e97257ea7457eb7556eb7655ec7754ed7953ed7a52ee7b51ef7c51ef7e50f07f4ff0804ef1814df1834cf2844bf3854bf3874af48849f48948f58b47f58c46f68d45f68f44f79044f79143f79342f89441f89540f9973ff9983ef99a3efa9b3dfa9c3cfa9e3bfb9f3afba139fba238fca338fca537fca636fca835fca934fdab33fdac33fdae32fdaf31fdb130fdb22ffdb42ffdb52efeb72dfeb82cfeba2cfebb2bfebd2afebe2afec029fdc229fdc328fdc527fdc627fdc827fdca26fdcb26fccd25fcce25fcd025fcd225fbd324fbd524fbd724fad824fada24f9dc24f9dd25f8df25f8e125f7e225f7e425f6e626f6e826f5e926f5eb27f4ed27f3ee27f3f027f2f227f1f426f1f525f0f724f0f921"));n.d(e,"R",(function(){return i})),n.d(e,"M",(function(){return o})),n.d(e,"S",(function(){return a})),n.d(e,"Z",(function(){return u})),n.d(e,"ab",(function(){return s})),n.d(e,"bb",(function(){return c})),n.d(e,"ob",(function(){return f})),n.d(e,"pb",(function(){return l})),n.d(e,"qb",(function(){return h})),n.d(e,"sb",(function(){return d})),n.d(e,"b",(function(){return b})),n.d(e,"O",(function(){return y})),n.d(e,"o",(function(){return m})),n.d(e,"Y",(function(){return v})),n.d(e,"p",(function(){return w})),n.d(e,"cb",(function(){return _})),n.d(e,"t",(function(){return k})),n.d(e,"fb",(function(){return x})),n.d(e,"x",(function(){return A})),n.d(e,"ib",(function(){return E})),n.d(e,"y",(function(){return M})),n.d(e,"jb",(function(){return S})),n.d(e,"A",(function(){return O})),n.d(e,"lb",(function(){return T})),n.d(e,"B",(function(){return C})),n.d(e,"mb",(function(){return D})),n.d(e,"E",(function(){return I})),n.d(e,"rb",(function(){return N})),n.d(e,"c",(function(){return j})),n.d(e,"P",(function(){return R})),n.d(e,"d",(function(){return B})),n.d(e,"Q",(function(){return L})),n.d(e,"h",(function(){return F})),n.d(e,"T",(function(){return P})),n.d(e,"m",(function(){return U})),n.d(e,"W",(function(){return q})),n.d(e,"s",(function(){return Y})),n.d(e,"eb",(function(){return z})),n.d(e,"r",(function(){return G})),n.d(e,"db",(function(){return V})),n.d(e,"u",(function(){return W})),n.d(e,"gb",(function(){return H})),n.d(e,"z",(function(){return K})),n.d(e,"kb",(function(){return $})),n.d(e,"J",(function(){return X})),n.d(e,"ub",(function(){return Z})),n.d(e,"I",(function(){return Q})),n.d(e,"tb",(function(){return J})),n.d(e,"K",(function(){return et})),n.d(e,"vb",(function(){return tt})),n.d(e,"L",(function(){return rt})),n.d(e,"wb",(function(){return nt})),n.d(e,"a",(function(){return ot})),n.d(e,"N",(function(){return it})),n.d(e,"i",(function(){return ut})),n.d(e,"U",(function(){return at})),n.d(e,"j",(function(){return ct})),n.d(e,"V",(function(){return st})),n.d(e,"v",(function(){return lt})),n.d(e,"hb",(function(){return ft})),n.d(e,"C",(function(){return dt})),n.d(e,"nb",(function(){return ht})),n.d(e,"n",(function(){return gt})),n.d(e,"X",(function(){return pt})),n.d(e,"e",(function(){return yt})),n.d(e,"g",(function(){return mt})),n.d(e,"w",(function(){return kt})),n.d(e,"H",(function(){return _t})),n.d(e,"f",(function(){return wt})),n.d(e,"D",(function(){return Tt})),n.d(e,"F",(function(){return Ot})),n.d(e,"G",(function(){return Ct})),n.d(e,"l",(function(){return Nt})),n.d(e,"k",(function(){return It})),n.d(e,"q",(function(){return Rt}))},function(t,e,n){"use strict";function r(t,e){return t.parent===e.parent?1:2}function i(t,e){return t+e.x}function o(t,e){return Math.max(t,e.y)}var a=function(){var t=r,e=1,n=1,a=!1;function u(r){var u,s=0;r.eachAfter((function(e){var n=e.children;n?(e.x=function(t){return t.reduce(i,0)/t.length}(n),e.y=function(t){return 1+t.reduce(o,0)}(n)):(e.x=u?s+=t(e,u):0,e.y=0,u=e)}));var c=function(t){for(var e;e=t.children;)t=e[0];return t}(r),f=function(t){for(var e;e=t.children;)t=e[e.length-1];return t}(r),l=c.x-t(c,f)/2,h=f.x+t(f,c)/2;return r.eachAfter(a?function(t){t.x=(t.x-r.x)*e,t.y=(r.y-t.y)*n}:function(t){t.x=(t.x-l)/(h-l)*e,t.y=(1-(r.y?t.y/r.y:1))*n})}return u.separation=function(e){return arguments.length?(t=e,u):t},u.size=function(t){return arguments.length?(a=!1,e=+t[0],n=+t[1],u):a?null:[e,n]},u.nodeSize=function(t){return arguments.length?(a=!0,e=+t[0],n=+t[1],u):a?[e,n]:null},u};function u(t){var e=0,n=t.children,r=n&&n.length;if(r)for(;--r>=0;)e+=n[r].value;else e=1;t.value=e}function s(t,e){var n,r,i,o,a,u=new h(t),s=+t.value&&(u.value=t.value),f=[u];for(null==e&&(e=c);n=f.pop();)if(s&&(n.value=+n.data.value),(i=e(n.data))&&(a=i.length))for(n.children=new Array(a),o=a-1;o>=0;--o)f.push(r=n.children[o]=new h(i[o])),r.parent=n,r.depth=n.depth+1;return u.eachBefore(l)}function c(t){return t.children}function f(t){t.data=t.data.data}function l(t){var e=0;do{t.height=e}while((t=t.parent)&&t.height<++e)}function h(t){this.data=t,this.depth=this.height=0,this.parent=null}h.prototype=s.prototype={constructor:h,count:function(){return this.eachAfter(u)},each:function(t){var e,n,r,i,o=this,a=[o];do{for(e=a.reverse(),a=[];o=e.pop();)if(t(o),n=o.children)for(r=0,i=n.length;r=0;--n)i.push(e[n]);return this},sum:function(t){return this.eachAfter((function(e){for(var n=+t(e.data)||0,r=e.children,i=r&&r.length;--i>=0;)n+=r[i].value;e.value=n}))},sort:function(t){return this.eachBefore((function(e){e.children&&e.children.sort(t)}))},path:function(t){for(var e=this,n=function(t,e){if(t===e)return t;var n=t.ancestors(),r=e.ancestors(),i=null;t=n.pop(),e=r.pop();for(;t===e;)i=t,t=n.pop(),e=r.pop();return i}(e,t),r=[e];e!==n;)e=e.parent,r.push(e);for(var i=r.length;t!==n;)r.splice(i,0,t),t=t.parent;return r},ancestors:function(){for(var t=this,e=[t];t=t.parent;)e.push(t);return e},descendants:function(){var t=[];return this.each((function(e){t.push(e)})),t},leaves:function(){var t=[];return this.eachBefore((function(e){e.children||t.push(e)})),t},links:function(){var t=this,e=[];return t.each((function(n){n!==t&&e.push({source:n.parent,target:n})})),e},copy:function(){return s(this).eachBefore(f)}};var d=Array.prototype.slice;var p=function(t){for(var e,n,r=0,i=(t=function(t){for(var e,n,r=t.length;r;)n=Math.random()*r--|0,e=t[r],t[r]=t[n],t[n]=e;return t}(d.call(t))).length,o=[];r0&&n*n>r*r+i*i}function v(t,e){for(var n=0;n(a*=a)?(r=(c+a-i)/(2*c),o=Math.sqrt(Math.max(0,a/c-r*r)),n.x=t.x-r*u-o*s,n.y=t.y-r*s+o*u):(r=(c+i-a)/(2*c),o=Math.sqrt(Math.max(0,i/c-r*r)),n.x=e.x+r*u-o*s,n.y=e.y+r*s+o*u)):(n.x=e.x+n.r,n.y=e.y)}function k(t,e){var n=t.r+e.r-1e-6,r=e.x-t.x,i=e.y-t.y;return n>0&&n*n>r*r+i*i}function E(t){var e=t._,n=t.next._,r=e.r+n.r,i=(e.x*n.r+n.x*e.r)/r,o=(e.y*n.r+n.y*e.r)/r;return i*i+o*o}function A(t){this._=t,this.next=null,this.previous=null}function S(t){if(!(i=t.length))return 0;var e,n,r,i,o,a,u,s,c,f,l;if((e=t[0]).x=0,e.y=0,!(i>1))return e.r;if(n=t[1],e.x=-n.r,n.x=e.r,n.y=0,!(i>2))return e.r+n.r;x(n,e,r=t[2]),e=new A(e),n=new A(n),r=new A(r),e.next=r.previous=n,n.next=e.previous=r,r.next=n.previous=e;t:for(u=3;u0)throw new Error("cycle");return o}return n.id=function(e){return arguments.length?(t=O(e),n):t},n.parentId=function(t){return arguments.length?(e=O(t),n):e},n};function G(t,e){return t.parent===e.parent?1:2}function H(t){var e=t.children;return e?e[0]:t.t}function W(t){var e=t.children;return e?e[e.length-1]:t.t}function $(t,e,n){var r=n/(e.i-t.i);e.c-=r,e.s+=n,t.c+=r,e.z+=n,e.m+=n}function K(t,e,n){return t.a.parent===e.parent?t.a:n}function Z(t,e){this._=t,this.parent=null,this.children=null,this.A=null,this.a=this,this.z=0,this.m=0,this.c=0,this.s=0,this.t=null,this.i=e}Z.prototype=Object.create(h.prototype);var X=function(){var t=G,e=1,n=1,r=null;function i(i){var s=function(t){for(var e,n,r,i,o,a=new Z(t,0),u=[a];e=u.pop();)if(r=e._.children)for(e.children=new Array(o=r.length),i=o-1;i>=0;--i)u.push(n=e.children[i]=new Z(r[i],i)),n.parent=e;return(a.parent=new Z(null,0)).children=[a],a}(i);if(s.eachAfter(o),s.parent.m=-s.z,s.eachBefore(a),r)i.eachBefore(u);else{var c=i,f=i,l=i;i.eachBefore((function(t){t.xf.x&&(f=t),t.depth>l.depth&&(l=t)}));var h=c===f?1:t(c,f)/2,d=h-c.x,p=e/(f.x+h+d),g=n/(l.depth||1);i.eachBefore((function(t){t.x=(t.x+d)*p,t.y=t.depth*g}))}return i}function o(e){var n=e.children,r=e.parent.children,i=e.i?r[e.i-1]:null;if(n){!function(t){for(var e,n=0,r=0,i=t.children,o=i.length;--o>=0;)(e=i[o]).z+=n,e.m+=n,n+=e.s+(r+=e.c)}(e);var o=(n[0].z+n[n.length-1].z)/2;i?(e.z=i.z+t(e._,i._),e.m=e.z-o):e.z=o}else i&&(e.z=i.z+t(e._,i._));e.parent.A=function(e,n,r){if(n){for(var i,o=e,a=e,u=n,s=o.parent.children[0],c=o.m,f=a.m,l=u.m,h=s.m;u=W(u),o=H(o),u&&o;)s=H(s),(a=W(a)).a=e,(i=u.z+l-o.z-c+t(u._,o._))>0&&($(K(u,e,r),e,i),c+=i,f+=i),l+=u.m,c+=o.m,h+=s.m,f+=a.m;u&&!W(a)&&(a.t=u,a.m+=l-f),o&&!H(s)&&(s.t=o,s.m+=c-h,r=e)}return r}(e,i,e.parent.A||r[0])}function a(t){t._.x=t.z+t.parent.m,t.m+=t.parent.m}function u(t){t.x*=e,t.y=t.depth*n}return i.separation=function(e){return arguments.length?(t=e,i):t},i.size=function(t){return arguments.length?(r=!1,e=+t[0],n=+t[1],i):r?null:[e,n]},i.nodeSize=function(t){return arguments.length?(r=!0,e=+t[0],n=+t[1],i):r?[e,n]:null},i},J=function(t,e,n,r,i){for(var o,a=t.children,u=-1,s=a.length,c=t.value&&(i-n)/t.value;++uh&&(h=u),y=f*f*g,(d=Math.max(h/y,y/l))>p){f-=u;break}p=d}b.push(a={value:f,dice:s1?e:1)},n}(Q),nt=function(){var t=et,e=!1,n=1,r=1,i=[0],o=D,a=D,u=D,s=D,c=D;function f(t){return t.x0=t.y0=0,t.x1=n,t.y1=r,t.eachBefore(l),i=[0],e&&t.eachBefore(B),t}function l(e){var n=i[e.depth],r=e.x0+n,f=e.y0+n,l=e.x1-n,h=e.y1-n;l=n-1){var f=u[e];return f.x0=i,f.y0=o,f.x1=a,void(f.y1=s)}var l=c[e],h=r/2+l,d=e+1,p=n-1;for(;d>>1;c[g]s-o){var v=(i*b+a*y)/r;t(e,d,y,i,o,v,s),t(d,n,b,v,o,a,s)}else{var m=(o*b+s*y)/r;t(e,d,y,i,o,a,m),t(d,n,b,i,m,a,s)}}(0,s,t.value,e,n,r,i)},it=function(t,e,n,r,i){(1&t.depth?J:P)(t,e,n,r,i)},ot=function t(e){function n(t,n,r,i,o){if((a=t._squarify)&&a.ratio===e)for(var a,u,s,c,f,l=-1,h=a.length,d=t.value;++l1?e:1)},n}(Q);n.d(e,"a",(function(){return a})),n.d(e,"b",(function(){return s})),n.d(e,"c",(function(){return I})),n.d(e,"e",(function(){return M})),n.d(e,"d",(function(){return p})),n.d(e,"f",(function(){return F})),n.d(e,"g",(function(){return V})),n.d(e,"h",(function(){return X})),n.d(e,"i",(function(){return nt})),n.d(e,"j",(function(){return rt})),n.d(e,"k",(function(){return P})),n.d(e,"m",(function(){return J})),n.d(e,"n",(function(){return it})),n.d(e,"o",(function(){return et})),n.d(e,"l",(function(){return ot}))},function(t,e,n){"use strict";var r=n(0);function i(t,e){switch(arguments.length){case 0:break;case 1:this.range(t);break;default:this.range(e).domain(t)}return this}function o(t,e){switch(arguments.length){case 0:break;case 1:this.interpolator(t);break;default:this.interpolator(e).domain(t)}return this}var a=n(39),u=Array.prototype,s=u.map,c=u.slice,f={name:"implicit"};function l(){var t=Object(a.c)(),e=[],n=[],r=f;function o(i){var o=i+"",a=t.get(o);if(!a){if(r!==f)return r;t.set(o,a=e.push(i))}return n[(a-1)%n.length]}return o.domain=function(n){if(!arguments.length)return e.slice();e=[],t=Object(a.c)();for(var r,i,u=-1,s=n.length;++ur&&(e=n,n=r,r=e),function(t){return Math.max(n,Math.min(r,t))}}function k(t,e,n){var r=t[0],i=t[1],o=e[0],a=e[1];return i2?E:k,i=o=null,d}function d(e){return isNaN(e=+e)?n:(i||(i=r(a.map(t),u,f)))(t(l(e)))}return d.invert=function(n){return l(e((o||(o=r(u,a.map(t),y.a)))(n)))},d.domain=function(t){return arguments.length?(a=s.call(t,v),l===_||(l=x(a)),h()):a.slice()},d.range=function(t){return arguments.length?(u=c.call(t),h()):u.slice()},d.rangeRound=function(t){return u=c.call(t),f=b.a,h()},d.clamp=function(t){return arguments.length?(l=t?x(a):_,d):l!==_},d.interpolate=function(t){return arguments.length?(f=t,h()):f},d.unknown=function(t){return arguments.length?(n=t,d):n},function(n,r){return t=n,e=r,h()}}function M(t,e){return S()(t,e)}var T=n(111),O=n(288),D=n(145),C=n(289),N=n(287),I=function(t,e,n,i){var o,a=Object(r.A)(t,e,n);switch((i=Object(T.b)(null==i?",f":i)).type){case"s":var u=Math.max(Math.abs(t),Math.abs(e));return null!=i.precision||isNaN(o=Object(O.a)(a,u))||(i.precision=o),Object(D.c)(i,u);case"":case"e":case"g":case"p":case"r":null!=i.precision||isNaN(o=Object(C.a)(a,Math.max(Math.abs(t),Math.abs(e))))||(i.precision=o-("e"===i.type));break;case"f":case"%":null!=i.precision||isNaN(o=Object(N.a)(a))||(i.precision=o-2*("%"===i.type))}return Object(D.b)(i)};function R(t){var e=t.domain;return t.ticks=function(t){var n=e();return Object(r.B)(n[0],n[n.length-1],null==t?10:t)},t.tickFormat=function(t,n){var r=e();return I(r[0],r[r.length-1],null==t?10:t,n)},t.nice=function(n){null==n&&(n=10);var i,o=e(),a=0,u=o.length-1,s=o[a],c=o[u];return c0?(s=Math.floor(s/i)*i,c=Math.ceil(c/i)*i,i=Object(r.z)(s,c,n)):i<0&&(s=Math.ceil(s*i)/i,c=Math.floor(c*i)/i,i=Object(r.z)(s,c,n)),i>0?(o[a]=Math.floor(s/i)*i,o[u]=Math.ceil(c/i)*i,e(o)):i<0&&(o[a]=Math.ceil(s*i)/i,o[u]=Math.floor(c*i)/i,e(o)),t},t}function j(){var t=M(_,_);return t.copy=function(){return A(t,j())},i.apply(t,arguments),R(t)}function L(t){var e;function n(t){return isNaN(t=+t)?e:t}return n.invert=n,n.domain=n.range=function(e){return arguments.length?(t=s.call(e,v),n):t.slice()},n.unknown=function(t){return arguments.length?(e=t,n):e},n.copy=function(){return L(t).unknown(e)},t=arguments.length?s.call(t,v):[0,1],R(n)}var B=function(t,e){var n,r=0,i=(t=t.slice()).length-1,o=t[r],a=t[i];return a0){for(;dc)break;y.push(h)}}else for(;d=1;--l)if(!((h=f*l)c)break;y.push(h)}}else y=Object(r.B)(d,p,Math.min(p-d,g)).map(n);return i?y.reverse():y},i.tickFormat=function(t,r){if(null==r&&(r=10===a?".0e":","),"function"!=typeof r&&(r=Object(D.b)(r)),t===1/0)return r;null==t&&(t=10);var o=Math.max(1,a*t/i.ticks().length);return function(t){var i=t/n(Math.round(e(t)));return i*a0?o[r-1]:e[0],r=o?[a[o-1],n]:[a[r-1],a[r]]},s.unknown=function(e){return arguments.length?(t=e,s):s},s.thresholds=function(){return a.slice()},s.copy=function(){return rt().domain([e,n]).range(u).unknown(t)},i.apply(R(s),arguments)}function it(){var t,e=[.5],n=[0,1],o=1;function a(i){return i<=i?n[Object(r.b)(e,i,0,o)]:t}return a.domain=function(t){return arguments.length?(e=c.call(t),o=Math.min(e.length,n.length-1),a):e.slice()},a.range=function(t){return arguments.length?(n=c.call(t),o=Math.min(e.length,n.length-1),a):n.slice()},a.invertExtent=function(t){var r=n.indexOf(t);return[e[r-1],e[r]]},a.unknown=function(e){return arguments.length?(t=e,a):t},a.copy=function(){return it().domain(e).range(n).unknown(t)},i.apply(a,arguments)}var ot=n(70),at=n(218),ut=n(9),st=n(141),ct=n(219),ft=n(220),lt=n(122),ht=n(123),dt=n(46);function pt(t){return new Date(t)}function gt(t){return t instanceof Date?+t:+new Date(+t)}function yt(t,e,n,i,o,a,u,c,f){var l=M(_,_),h=l.invert,d=l.domain,p=f(".%L"),g=f(":%S"),y=f("%I:%M"),b=f("%I %p"),v=f("%a %d"),m=f("%b %d"),w=f("%B"),x=f("%Y"),k=[[u,1,1e3],[u,5,5e3],[u,15,15e3],[u,30,3e4],[a,1,6e4],[a,5,3e5],[a,15,9e5],[a,30,18e5],[o,1,36e5],[o,3,108e5],[o,6,216e5],[o,12,432e5],[i,1,864e5],[i,2,1728e5],[n,1,6048e5],[e,1,2592e6],[e,3,7776e6],[t,1,31536e6]];function E(r){return(u(r)h+c||id+c||af.index){var l=h-u.x-u.vx,y=d-u.y-u.vy,b=l*l+y*y;bt.r&&(t.r=t[e].r)}function h(){if(e){var r,i,o=e.length;for(n=new Array(o),r=0;r1?(null==n?u.remove(t):u.set(t,y(n)),e):u.get(t)},find:function(e,n,r){var i,o,a,u,s,c=0,f=t.length;for(null==r?r=1/0:r*=r,c=0;c1?(c.on(t,n),e):c.on(t)}}},_=function(){var t,e,n,r,u=i(-30),s=1,c=1/0,f=.81;function l(r){var i,o=t.length,u=Object(a.a)(t,y,b).visitAfter(d);for(n=r,i=0;i=c)){(t.data!==e||t.next)&&(0===l&&(p+=(l=o())*l),0===h&&(p+=(h=o())*h),pr!=p>r&&n<(d-f)*(r-l)/(p-l)+f&&(i=-i)}return i}function c(t,e,n){var r,i,o,a;return function(t,e,n){return(e[0]-t[0])*(n[1]-t[1])==(n[0]-t[0])*(e[1]-t[1])}(t,e,n)&&(i=t[r=+(t[0]===e[0])],o=n[r],a=e[r],i<=o&&o<=a||a<=o&&o<=i)}var f=function(){},l=[[],[[[1,1.5],[.5,1]]],[[[1.5,1],[1,1.5]]],[[[1.5,1],[.5,1]]],[[[1,.5],[1.5,1]]],[[[1,1.5],[.5,1]],[[1,.5],[1.5,1]]],[[[1,.5],[1,1.5]]],[[[1,.5],[.5,1]]],[[[.5,1],[1,.5]]],[[[1,1.5],[1,.5]]],[[[.5,1],[1,.5]],[[1.5,1],[1,1.5]]],[[[1.5,1],[1,.5]]],[[[.5,1],[1.5,1]]],[[[1,1.5],[1.5,1]]],[[[.5,1],[1,1.5]]],[]],h=function(){var t=1,e=1,n=r.y,s=p;function c(t){var e=n(t);if(Array.isArray(e))e=e.slice().sort(o);else{var i=Object(r.i)(t),a=i[0],u=i[1];e=Object(r.A)(a,u,e),e=Object(r.s)(Math.floor(a/e)*e,Math.floor(u/e)*e,e)}return e.map((function(e){return h(t,e)}))}function h(n,r){var i=[],o=[];return function(n,r,i){var o,a,u,s,c,f,h=new Array,p=new Array;o=a=-1,s=n[0]>=r,l[s<<1].forEach(g);for(;++o=r,l[u|s<<1].forEach(g);l[s<<0].forEach(g);for(;++a=r,c=n[a*t]>=r,l[s<<1|c<<2].forEach(g);++o=r,f=c,c=n[a*t+o+1]>=r,l[u|s<<1|c<<2|f<<3].forEach(g);l[s|c<<3].forEach(g)}o=-1,c=n[a*t]>=r,l[c<<2].forEach(g);for(;++o=r,l[c<<2|f<<3].forEach(g);function g(t){var e,n,r=[t[0][0]+o,t[0][1]+a],u=[t[1][0]+o,t[1][1]+a],s=d(r),c=d(u);(e=p[s])?(n=h[c])?(delete p[e.end],delete h[n.start],e===n?(e.ring.push(u),i(e.ring)):h[e.start]=p[n.end]={start:e.start,end:n.end,ring:e.ring.concat(n.ring)}):(delete p[e.end],e.ring.push(u),p[e.end=c]=e):(e=h[c])?(n=p[s])?(delete h[e.start],delete p[n.end],e===n?(e.ring.push(u),i(e.ring)):h[n.start]=p[e.end]={start:n.start,end:e.end,ring:n.ring.concat(e.ring)}):(delete h[e.start],e.ring.unshift(r),h[e.start=s]=e):h[s]=p[c]={start:s,end:c,ring:[r,u]}}l[c<<3].forEach(g)}(n,r,(function(t){s(t,n,r),function(t){for(var e=0,n=t.length,r=t[n-1][1]*t[0][0]-t[n-1][0]*t[0][1];++e0?i.push([t]):o.push(t)})),o.forEach((function(t){for(var e,n=0,r=i.length;n0&&a0&&u0&&i>0))throw new Error("invalid size");return t=r,e=i,c},c.thresholds=function(t){return arguments.length?(n="function"==typeof t?t:Array.isArray(t)?a(i.call(t)):a(t),c):n},c.smooth=function(t){return arguments.length?(s=t?p:f,c):s===p},c};function d(t,e,n){for(var r=t.width,i=t.height,o=1+(n<<1),a=0;a=n&&(u>=o&&(s-=t.data[u-o+a*r]),e.data[u-n+a*r]=s/Math.min(u+1,r-1+o-u,o))}function p(t,e,n){for(var r=t.width,i=t.height,o=1+(n<<1),a=0;a=n&&(u>=o&&(s-=t.data[a+(u-o)*r]),e.data[a+(u-n)*r]=s/Math.min(u+1,i-1+o-u,o))}function g(t){return t[0]}function y(t){return t[1]}function b(){return 1}var v=function(){var t=g,e=y,n=b,o=960,u=500,s=20,c=2,f=3*s,l=o+2*f>>c,v=u+2*f>>c,m=a(20);function _(i){var o=new Float32Array(l*v),a=new Float32Array(l*v);i.forEach((function(r,i,a){var u=+t(r,i,a)+f>>c,s=+e(r,i,a)+f>>c,h=+n(r,i,a);u>=0&&u=0&&s>c),p({width:l,height:v,data:a},{width:l,height:v,data:o},s>>c),d({width:l,height:v,data:o},{width:l,height:v,data:a},s>>c),p({width:l,height:v,data:a},{width:l,height:v,data:o},s>>c),d({width:l,height:v,data:o},{width:l,height:v,data:a},s>>c),p({width:l,height:v,data:a},{width:l,height:v,data:o},s>>c);var u=m(o);if(!Array.isArray(u)){var g=Object(r.k)(o);u=Object(r.A)(0,g,u),(u=Object(r.s)(0,Math.floor(g/u)*u,u)).shift()}return h().thresholds(u).size([l,v])(o).map(w)}function w(t){return t.value*=Math.pow(2,-2*c),t.coordinates.forEach(x),t}function x(t){t.forEach(k)}function k(t){t.forEach(E)}function E(t){t[0]=t[0]*Math.pow(2,c)-f,t[1]=t[1]*Math.pow(2,c)-f}function A(){return l=o+2*(f=3*s)>>c,v=u+2*f>>c,_}return _.x=function(e){return arguments.length?(t="function"==typeof e?e:a(+e),_):t},_.y=function(t){return arguments.length?(e="function"==typeof t?t:a(+t),_):e},_.weight=function(t){return arguments.length?(n="function"==typeof t?t:a(+t),_):n},_.size=function(t){if(!arguments.length)return[o,u];var e=Math.ceil(t[0]),n=Math.ceil(t[1]);if(!(e>=0||e>=0))throw new Error("invalid size");return o=e,u=n,A()},_.cellSize=function(t){if(!arguments.length)return 1<=1))throw new Error("invalid cell size");return c=Math.floor(Math.log(t)/Math.LN2),A()},_.thresholds=function(t){return arguments.length?(m="function"==typeof t?t:Array.isArray(t)?a(i.call(t)):a(t),_):m},_.bandwidth=function(t){if(!arguments.length)return Math.sqrt(s*(s+1));if(!((t=+t)>=0))throw new Error("invalid bandwidth");return s=Math.round((Math.sqrt(4*t*t+1)-1)/2),A()},_};n.d(e,"b",(function(){return h})),n.d(e,"a",(function(){return v}))},function(t,e,n){"use strict";var r=function(t){return function(){return t}};function i(t){return t[0]}function o(t){return t[1]}function a(){this._=null}function u(t){t.U=t.C=t.L=t.R=t.P=t.N=null}function s(t,e){var n=e,r=e.R,i=n.U;i?i.L===n?i.L=r:i.R=r:t._=r,r.U=i,n.U=r,n.R=r.L,n.R&&(n.R.U=n),r.L=n}function c(t,e){var n=e,r=e.L,i=n.U;i?i.L===n?i.L=r:i.R=r:t._=r,r.U=i,n.U=r,n.L=r.R,n.L&&(n.L.U=n),r.R=n}function f(t){for(;t.L;)t=t.L;return t}a.prototype={constructor:a,insert:function(t,e){var n,r,i;if(t){if(e.P=t,e.N=t.N,t.N&&(t.N.P=e),t.N=e,t.R){for(t=t.R;t.L;)t=t.L;t.L=e}else t.R=e;n=t}else this._?(t=f(this._),e.P=null,e.N=t,t.P=t.L=e,n=t):(e.P=e.N=null,this._=e,n=null);for(e.L=e.R=null,e.U=n,e.C=!0,t=e;n&&n.C;)n===(r=n.U).L?(i=r.R)&&i.C?(n.C=i.C=!1,r.C=!0,t=r):(t===n.R&&(s(this,n),n=(t=n).U),n.C=!1,r.C=!0,c(this,r)):(i=r.L)&&i.C?(n.C=i.C=!1,r.C=!0,t=r):(t===n.L&&(c(this,n),n=(t=n).U),n.C=!1,r.C=!0,s(this,r)),n=t.U;this._.C=!1},remove:function(t){t.N&&(t.N.P=t.P),t.P&&(t.P.N=t.N),t.N=t.P=null;var e,n,r,i=t.U,o=t.L,a=t.R;if(n=o?a?f(a):o:a,i?i.L===t?i.L=n:i.R=n:this._=n,o&&a?(r=n.C,n.C=t.C,n.L=o,o.U=n,n!==a?(i=n.U,n.U=t.U,t=n.R,i.L=t,n.R=a,a.U=n):(n.U=i,i=n,t=n.R)):(r=t.C,t=n),t&&(t.U=i),!r)if(t&&t.C)t.C=!1;else{do{if(t===this._)break;if(t===i.L){if((e=i.R).C&&(e.C=!1,i.C=!0,s(this,i),e=i.R),e.L&&e.L.C||e.R&&e.R.C){e.R&&e.R.C||(e.L.C=!1,e.C=!0,c(this,e),e=i.R),e.C=i.C,i.C=e.R.C=!1,s(this,i),t=this._;break}}else if((e=i.L).C&&(e.C=!1,i.C=!0,c(this,i),e=i.L),e.L&&e.L.C||e.R&&e.R.C){e.L&&e.L.C||(e.R.C=!1,e.C=!0,s(this,e),e=i.L),e.C=i.C,i.C=e.L.C=!1,c(this,i),t=this._;break}e.C=!0,t=i,i=i.U}while(!t.C);t&&(t.C=!1)}}};var l=a;function h(t,e,n,r){var i=[null,null],o=L.push(i)-1;return i.left=t,i.right=e,n&&p(i,t,e,n),r&&p(i,e,t,r),R[t.index].halfedges.push(o),R[e.index].halfedges.push(o),i}function d(t,e,n){var r=[e,n];return r.left=t,r}function p(t,e,n,r){t[0]||t[1]?t.left===n?t[1]=r:t[0]=r:(t[0]=r,t.left=e,t.right=n)}function g(t,e,n,r,i){var o,a=t[0],u=t[1],s=a[0],c=a[1],f=0,l=1,h=u[0]-s,d=u[1]-c;if(o=e-s,h||!(o>0)){if(o/=h,h<0){if(o0){if(o>l)return;o>f&&(f=o)}if(o=r-s,h||!(o<0)){if(o/=h,h<0){if(o>l)return;o>f&&(f=o)}else if(h>0){if(o0)){if(o/=d,d<0){if(o0){if(o>l)return;o>f&&(f=o)}if(o=i-c,d||!(o<0)){if(o/=d,d<0){if(o>l)return;o>f&&(f=o)}else if(d>0){if(o0||l<1)||(f>0&&(t[0]=[s+f*h,c+f*d]),l<1&&(t[1]=[s+l*h,c+l*d]),!0)}}}}}function y(t,e,n,r,i){var o=t[1];if(o)return!0;var a,u,s=t[0],c=t.left,f=t.right,l=c[0],h=c[1],d=f[0],p=f[1],g=(l+d)/2,y=(h+p)/2;if(p===h){if(g=r)return;if(l>d){if(s){if(s[1]>=i)return}else s=[g,n];o=[g,i]}else{if(s){if(s[1]1)if(l>d){if(s){if(s[1]>=i)return}else s=[(n-u)/a,n];o=[(i-u)/a,i]}else{if(s){if(s[1]=r)return}else s=[e,a*e+u];o=[r,a*r+u]}else{if(s){if(s[0]=-P)){var d=s*s+c*c,p=f*f+l*l,g=(l*d-c*p)/h,y=(s*p-f*d)/h,b=w.pop()||new x;b.arc=t,b.site=i,b.x=g+a,b.y=(b.cy=y+u)+Math.sqrt(g*g+y*y),t.circle=b;for(var v=null,m=j._;m;)if(b.yB)u=u.L;else{if(!((i=o-N(u,a))>B)){r>-B?(e=u.P,n=u):i>-B?(e=u,n=u.N):e=n=u;break}if(!u.R){e=u;break}u=u.R}!function(t){R[t.index]={site:t,halfedges:[]}}(t);var s=M(t);if(I.insert(e,s),e||n){if(e===n)return E(e),n=M(e.site),I.insert(s,n),s.edge=n.edge=h(e.site,s.site),k(e),void k(n);if(n){E(e),E(n);var c=e.site,f=c[0],l=c[1],d=t[0]-f,g=t[1]-l,y=n.site,b=y[0]-f,v=y[1]-l,m=2*(d*v-g*b),_=d*d+g*g,w=b*b+v*v,x=[(v*_-g*w)/m+f,(d*w-b*_)/m+l];p(n.edge,c,y,x),s.edge=h(c,t,null,x),n.edge=h(t,y,null,x),k(e),k(n)}else s.edge=h(e.site,s.site)}}function C(t,e){var n=t.site,r=n[0],i=n[1],o=i-e;if(!o)return r;var a=t.P;if(!a)return-1/0;var u=(n=a.site)[0],s=n[1],c=s-e;if(!c)return u;var f=u-r,l=1/o-1/c,h=f/c;return l?(-h+Math.sqrt(h*h-2*l*(f*f/(-2*c)-s+c/2+i-o/2)))/l+r:(r+u)/2}function N(t,e){var n=t.N;if(n)return C(n,e);var r=t.site;return r[1]===e?r[0]:1/0}var I,R,j,L,B=1e-6,P=1e-12;function F(t,e){return e[1]-t[1]||e[0]-t[0]}function q(t,e){var n,r,i,o=t.sort(F).pop();for(L=[],R=new Array(t.length),I=new l,j=new l;;)if(i=_,o&&(!i||o[1]B||Math.abs(i[0][1]-i[1][1])>B)||delete L[o]}(a,u,s,c),function(t,e,n,r){var i,o,a,u,s,c,f,l,h,p,g,y,b=R.length,_=!0;for(i=0;iB||Math.abs(y-h)>B)&&(s.splice(u,0,L.push(d(a,p,Math.abs(g-t)B?[t,Math.abs(l-t)B?[Math.abs(h-r)B?[n,Math.abs(l-n)B?[Math.abs(h-e)=u)return null;var s=t-i.site[0],c=e-i.site[1],f=s*s+c*c;do{i=o.cells[r=a],a=null,i.halfedges.forEach((function(n){var r=o.edges[n],u=r.left;if(u!==i.site&&u||(u=r.right)){var s=t-u[0],c=e-u[1],l=s*s+c*c;l1);return t+n*o*Math.sqrt(-2*Math.log(i)/i)}}return n.source=t,n}(r),a=function t(e){function n(){var t=o.source(e).apply(this,arguments);return function(){return Math.exp(t())}}return n.source=t,n}(r),u=function t(e){function n(t){return function(){for(var n=0,r=0;r1&&(e=t[o[a-2]],n=t[o[a-1]],r=t[u],(n[0]-e[0])*(r[1]-e[1])-(n[1]-e[1])*(r[0]-e[0])<=0);)--a;o[a++]=u}return o.slice(0,a)}var u=function(t){if((n=t.length)<3)return null;var e,n,r=new Array(n),i=new Array(n);for(e=0;e=0;--e)l.push(t[r[u[e]][2]]);for(e=+c;eu!=c>u&&a<(s-n)*(u-r)/(c-r)+n&&(f=!f),s=n,c=r;return f},c=function(t){for(var e,n,r=-1,i=t.length,o=t[i-1],a=o[0],u=o[1],s=0;++rr?(r+i)/2:Math.min(0,r)||Math.max(0,i),a>o?(o+a)/2:Math.min(0,o)||Math.max(0,a))}var E=function(){var t,e,n=v,g=m,E=k,A=w,S=x,M=[0,1/0],T=[[-1/0,-1/0],[1/0,1/0]],O=250,D=o.a,C=Object(r.a)("start","zoom","end"),N=0;function I(t){t.property("__zoom",_).on("wheel.zoom",q).on("mousedown.zoom",U).on("dblclick.zoom",z).filter(S).on("touchstart.zoom",Y).on("touchmove.zoom",V).on("touchend.zoom touchcancel.zoom",G).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function R(t,e){return(e=Math.max(M[0],Math.min(M[1],e)))===t.k?t:new d(e,t.x,t.y)}function j(t,e,n){var r=e[0]-n[0]*t.k,i=e[1]-n[1]*t.k;return r===t.x&&i===t.y?t:new d(t.k,r,i)}function L(t){return[(+t[0][0]+ +t[1][0])/2,(+t[0][1]+ +t[1][1])/2]}function B(t,e,n){t.on("start.zoom",(function(){P(this,arguments).start()})).on("interrupt.zoom end.zoom",(function(){P(this,arguments).end()})).tween("zoom",(function(){var t=this,r=arguments,i=P(t,r),o=g.apply(t,r),a=null==n?L(o):"function"==typeof n?n.apply(t,r):n,u=Math.max(o[1][0]-o[0][0],o[1][1]-o[0][1]),s=t.__zoom,c="function"==typeof e?e.apply(t,r):e,f=D(s.invert(a).concat(u/s.k),c.invert(a).concat(u/c.k));return function(t){if(1===t)t=c;else{var e=f(t),n=u/e[2];t=new d(n,a[0]-e[0]*n,a[1]-e[1]*n)}i.zoom(null,t)}}))}function P(t,e,n){return!n&&t.__zooming||new F(t,e)}function F(t,e){this.that=t,this.args=e,this.active=0,this.extent=g.apply(t,e),this.taps=0}function q(){if(n.apply(this,arguments)){var t=P(this,arguments),e=this.__zoom,r=Math.max(M[0],Math.min(M[1],e.k*Math.pow(2,A.apply(this,arguments)))),i=Object(u.a)(this);if(t.wheel)t.mouse[0][0]===i[0]&&t.mouse[0][1]===i[1]||(t.mouse[1]=e.invert(t.mouse[0]=i)),clearTimeout(t.wheel);else{if(e.k===r)return;t.mouse=[i,e.invert(i)],Object(f.b)(this),t.start()}b(),t.wheel=setTimeout(o,150),t.zoom("mouse",E(j(R(e,r),t.mouse[0],t.mouse[1]),t.extent,T))}function o(){t.wheel=null,t.end()}}function U(){if(!e&&n.apply(this,arguments)){var t=P(this,arguments,!0),r=Object(s.a)(a.c.view).on("mousemove.zoom",h,!0).on("mouseup.zoom",d,!0),o=Object(u.a)(this),c=a.c.clientX,l=a.c.clientY;Object(i.a)(a.c.view),y(),t.mouse=[o,this.__zoom.invert(o)],Object(f.b)(this),t.start()}function h(){if(b(),!t.moved){var e=a.c.clientX-c,n=a.c.clientY-l;t.moved=e*e+n*n>N}t.zoom("mouse",E(j(t.that.__zoom,t.mouse[0]=Object(u.a)(t.that),t.mouse[1]),t.extent,T))}function d(){r.on("mousemove.zoom mouseup.zoom",null),Object(i.b)(a.c.view,t.moved),b(),t.end()}}function z(){if(n.apply(this,arguments)){var t=this.__zoom,e=Object(u.a)(this),r=t.invert(e),i=t.k*(a.c.shiftKey?.5:2),o=E(j(R(t,i),e,r),g.apply(this,arguments),T);b(),O>0?Object(s.a)(this).transition().duration(O).call(B,o,e):Object(s.a)(this).call(I.transform,o)}}function Y(){if(n.apply(this,arguments)){var e,r,i,o,u=a.c.touches,s=u.length,l=P(this,arguments,a.c.changedTouches.length===s);for(y(),r=0;rMath.abs(t[1]-et[1])?I=!0:N=!0),et=t,C=!0,p(),ut()}function ut(){var t;switch(Z=et[0]-tt[0],X=et[1]-tt[1],P){case y:case g:U&&(Z=Math.max(H-n,Math.min($-h,Z)),r=n+Z,m=h+Z),z&&(X=Math.max(W-o,Math.min(K-_,X)),l=o+X,D=_+X);break;case b:U<0?(Z=Math.max(H-n,Math.min($-n,Z)),r=n+Z,m=h):U>0&&(Z=Math.max(H-h,Math.min($-h,Z)),r=n,m=h+Z),z<0?(X=Math.max(W-o,Math.min(K-o,X)),l=o+X,D=_):z>0&&(X=Math.max(W-_,Math.min(K-_,X)),l=o,D=_+X);break;case v:U&&(r=Math.max(H,Math.min($,n-Z*U)),m=Math.max(H,Math.min($,h+Z*U))),z&&(l=Math.max(W,Math.min(K,o-X*z)),D=Math.max(W,Math.min(K,_+X*z)))}m0&&(n=r-Z),z<0?_=D-X:z>0&&(o=l-X),P=y,it.attr("cursor",A.selection),ut());break;default:return}p()}function ft(){switch(u.c.keyCode){case 16:J&&(N=I=J=!1,ut());break;case 18:P===v&&(U<0?h=m:U>0&&(n=r),z<0?_=D:z>0&&(o=l),P=b,ut());break;case 32:P===y&&(u.c.altKey?(U&&(h=m-Z*U,n=r+Z*U),z&&(_=D-X*z,o=l+X*z),P=v):(U<0?h=m:U>0&&(n=r),z<0?_=D:z>0&&(o=l),P=b),it.attr("cursor",A[B]),ut());break;default:return}p()}}function Y(){q(this,arguments).moved()}function V(){q(this,arguments).ended()}function G(){var e=this.__brush||{selection:null};return e.extent=_(n.apply(this,arguments)),e.dim=t,e}return P.move=function(e,n){e.selection?e.on("start.brush",(function(){q(this,arguments).beforestart().start()})).on("interrupt.brush end.brush",(function(){q(this,arguments).end()})).tween("brush",(function(){var e=this,r=e.__brush,i=q(e,arguments),a=r.selection,u=t.input("function"==typeof n?n.apply(this,arguments):n,r.extent),s=Object(o.a)(a,u);function c(t){r.selection=1===t&&null===u?null:s(t),F.call(e),i.brush()}return null!==a&&null!==u?c:c(1)})):e.each((function(){var e=this,r=arguments,i=e.__brush,o=t.input("function"==typeof n?n.apply(e,r):n,i.extent),a=q(e,r).beforestart();Object(f.b)(e),i.selection=null===o?null:o,F.call(e),a.start().brush().end()}))},P.clear=function(t){P.move(t,null)},U.prototype={beforestart:function(){return 1==++this.active&&(this.state.emitter=this,this.starting=!0),this},start:function(){return this.starting?(this.starting=!1,this.emit("start")):this.emit("brush"),this},brush:function(){return this.emit("brush"),this},end:function(){return 0==--this.active&&(delete this.state.emitter,this.emit("end")),this},emit:function(e){Object(u.a)(new h(P,e,t.output(this.state.selection)),L.apply,L,[e,this.that,this.args])}},P.extent=function(t){return arguments.length?(n="function"==typeof t?t:l(_(t)),P):n},P.filter=function(t){return arguments.length?(a="function"==typeof t?t:l(!!t),P):a},P.touchable=function(t){return arguments.length?(m="function"==typeof t?t:l(!!t),P):m},P.handleSize=function(t){return arguments.length?(B=+t,P):B},P.keyModifiers=function(t){return arguments.length?(E=!!t,P):E},P.on=function(){var t=L.on.apply(L,arguments);return t===L?P:t},P}n.d(e,"a",(function(){return F})),n.d(e,"c",(function(){return B})),n.d(e,"d",(function(){return P})),n.d(e,"b",(function(){return L}))},function(t,e,n){"use strict";var r=Array.prototype.slice,i=function(t){return t};function o(t){return"translate("+(t+.5)+",0)"}function a(t){return"translate(0,"+(t+.5)+")"}function u(t){return function(e){return+t(e)}}function s(t){var e=Math.max(0,t.bandwidth()-1)/2;return t.round()&&(e=Math.round(e)),function(n){return+t(n)+e}}function c(){return!this.__axis}function f(t,e){var n=[],f=null,l=null,h=6,d=6,p=3,g=1===t||4===t?-1:1,y=4===t||2===t?"x":"y",b=1===t||3===t?o:a;function v(r){var o=null==f?e.ticks?e.ticks.apply(e,n):e.domain():f,a=null==l?e.tickFormat?e.tickFormat.apply(e,n):i:l,v=Math.max(h,0)+p,m=e.range(),_=+m[0]+.5,w=+m[m.length-1]+.5,x=(e.bandwidth?s:u)(e.copy()),k=r.selection?r.selection():r,E=k.selectAll(".domain").data([null]),A=k.selectAll(".tick").data(o,e).order(),S=A.exit(),M=A.enter().append("g").attr("class","tick"),T=A.select("line"),O=A.select("text");E=E.merge(E.enter().insert("path",".tick").attr("class","domain").attr("stroke","currentColor")),A=A.merge(M),T=T.merge(M.append("line").attr("stroke","currentColor").attr(y+"2",g*h)),O=O.merge(M.append("text").attr("fill","currentColor").attr(y,g*v).attr("dy",1===t?"0em":3===t?"0.71em":"0.32em")),r!==k&&(E=E.transition(r),A=A.transition(r),T=T.transition(r),O=O.transition(r),S=S.transition(r).attr("opacity",1e-6).attr("transform",(function(t){return isFinite(t=x(t))?b(t):this.getAttribute("transform")})),M.attr("opacity",1e-6).attr("transform",(function(t){var e=this.parentNode.__axis;return b(e&&isFinite(e=e(t))?e:x(t))}))),S.remove(),E.attr("d",4===t||2==t?d?"M"+g*d+","+_+"H0.5V"+w+"H"+g*d:"M0.5,"+_+"V"+w:d?"M"+_+","+g*d+"V0.5H"+w+"V"+g*d:"M"+_+",0.5H"+w),A.attr("opacity",1).attr("transform",(function(t){return b(x(t))})),T.attr(y+"2",g*h),O.attr(y,g*v).text(a),k.filter(c).attr("fill","none").attr("font-size",10).attr("font-family","sans-serif").attr("text-anchor",2===t?"start":4===t?"end":"middle"),k.each((function(){this.__axis=x}))}return v.scale=function(t){return arguments.length?(e=t,v):e},v.ticks=function(){return n=r.call(arguments),v},v.tickArguments=function(t){return arguments.length?(n=null==t?[]:r.call(t),v):n.slice()},v.tickValues=function(t){return arguments.length?(f=null==t?null:r.call(t),v):f&&f.slice()},v.tickFormat=function(t){return arguments.length?(l=t,v):l},v.tickSize=function(t){return arguments.length?(h=d=+t,v):h},v.tickSizeInner=function(t){return arguments.length?(h=+t,v):h},v.tickSizeOuter=function(t){return arguments.length?(d=+t,v):d},v.tickPadding=function(t){return arguments.length?(p=+t,v):p},v}function l(t){return f(1,t)}function h(t){return f(2,t)}function d(t){return f(3,t)}function p(t){return f(4,t)}n.d(e,"d",(function(){return l})),n.d(e,"c",(function(){return h})),n.d(e,"a",(function(){return d})),n.d(e,"b",(function(){return p}))},function(t,e,n){"use strict";var r=n(104);e.a=function(t){var e=t.length;return function(n){var i=Math.floor(((n%=1)<0?++n:n)*e),o=t[(i+e-1)%e],a=t[i%e],u=t[(i+1)%e],s=t[(i+2)%e];return Object(r.a)((n-i/e)*e,o,a,u,s)}}},function(t,e,n){"use strict";var r=n(23),i=/[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g,o=new RegExp(i.source,"g");e.a=function(t,e){var n,a,u,s=i.lastIndex=o.lastIndex=0,c=-1,f=[],l=[];for(t+="",e+="";(n=i.exec(t))&&(a=o.exec(e));)(u=a.index)>s&&(u=e.slice(s,u),f[c]?f[c]+=u:f[++c]=u),(n=n[0])===(a=a[0])?f[c]?f[c]+=a:f[++c]=a:(f[++c]=null,l.push({i:c,x:Object(r.a)(n,a)})),s=o.lastIndex;return s1?this.each((null==e?i:"function"==typeof e?a:o)(t,e,null==n?"":n)):u(this.node(),t)}},function(t,e,n){"use strict";e.a=function(t,e){var n=new Date;return t=+t,e=+e,function(r){return n.setTime(t*(1-r)+e*r),n}}},function(t,e,n){"use strict";n.d(e,"b",(function(){return o}));var r=n(108),i=n(69);function o(t,e){var n,i=e?e.length:0,o=t?Math.min(i,t.length):0,a=new Array(o),u=new Array(i);for(n=0;n=12)]},q:function(t){return 1+~~(t.getMonth()/3)},Q:xt,s:kt,S:K,u:Z,U:X,V:J,w:Q,W:tt,x:null,X:null,y:et,Y:nt,Z:rt,"%":wt},It={a:function(t){return p[t.getUTCDay()]},A:function(t){return h[t.getUTCDay()]},b:function(t){return y[t.getUTCMonth()]},B:function(t){return g[t.getUTCMonth()]},c:null,d:it,e:it,f:ct,H:ot,I:at,j:ut,L:st,m:ft,M:lt,p:function(t){return s[+(t.getUTCHours()>=12)]},q:function(t){return 1+~~(t.getUTCMonth()/3)},Q:xt,s:kt,S:ht,u:dt,U:pt,V:gt,w:yt,W:bt,x:null,X:null,y:vt,Y:mt,Z:_t,"%":wt},Rt={a:function(t,e,n){var r=St.exec(e.slice(n));return r?(t.w=Mt[r[0].toLowerCase()],n+r[0].length):-1},A:function(t,e,n){var r=Et.exec(e.slice(n));return r?(t.w=At[r[0].toLowerCase()],n+r[0].length):-1},b:function(t,e,n){var r=Dt.exec(e.slice(n));return r?(t.m=Ct[r[0].toLowerCase()],n+r[0].length):-1},B:function(t,e,n){var r=Tt.exec(e.slice(n));return r?(t.m=Ot[r[0].toLowerCase()],n+r[0].length):-1},c:function(t,n,r){return Bt(t,e,n,r)},d:C,e:C,f:B,H:I,I:I,j:N,L:L,m:D,M:R,p:function(t,e,n){var r=b.exec(e.slice(n));return r?(t.p=v[r[0].toLowerCase()],n+r[0].length):-1},q:O,Q:F,s:q,S:j,u:x,U:k,V:E,w:w,W:A,x:function(t,e,r){return Bt(t,n,e,r)},X:function(t,e,n){return Bt(t,u,e,n)},y:M,Y:S,Z:T,"%":P};function jt(t,e){return function(n){var r,i,o,a=[],u=-1,s=0,c=t.length;for(n instanceof Date||(n=new Date(+n));++u53)return null;"w"in h||(h.w=1),"Z"in h?(s=(u=f(l(h.y,0,1))).getUTCDay(),u=s>4||0===s?r.c.ceil(u):Object(r.c)(u),u=i.a.offset(u,7*(h.V-1)),h.y=u.getUTCFullYear(),h.m=u.getUTCMonth(),h.d=u.getUTCDate()+(h.w+6)%7):(s=(u=c(l(h.y,0,1))).getDay(),u=s>4||0===s?o.c.ceil(u):Object(o.c)(u),u=a.b.offset(u,7*(h.V-1)),h.y=u.getFullYear(),h.m=u.getMonth(),h.d=u.getDate()+(h.w+6)%7)}else("W"in h||"U"in h)&&("w"in h||(h.w="u"in h?h.u%7:"W"in h?1:0),s="Z"in h?f(l(h.y,0,1)).getUTCDay():c(l(h.y,0,1)).getDay(),h.m=0,h.d="W"in h?(h.w+6)%7+7*h.W-(s+5)%7:h.w+7*h.U-(s+6)%7);return"Z"in h?(h.H+=h.Z/100|0,h.M+=h.Z%100,f(h)):c(h)}}function Bt(t,e,n,r){for(var i,o,a=0,u=e.length,s=n.length;a=s)return-1;if(37===(i=e.charCodeAt(a++))){if(i=e.charAt(a++),!(o=Rt[i in d?e.charAt(a++):i])||(r=o(t,n,r))<0)return-1}else if(i!=n.charCodeAt(r++))return-1}return r}return(Nt.x=jt(n,Nt),Nt.X=jt(u,Nt),Nt.c=jt(e,Nt),It.x=jt(n,It),It.X=jt(u,It),It.c=jt(e,It),{format:function(t){var e=jt(t+="",Nt);return e.toString=function(){return t},e},parse:function(t){var e=Lt(t+="",!1);return e.toString=function(){return t},e},utcFormat:function(t){var e=jt(t+="",It);return e.toString=function(){return t},e},utcParse:function(t){var e=Lt(t+="",!0);return e.toString=function(){return t},e}})}var d={"-":"",_:" ",0:"0"},p=/^\s*\d+/,g=/^%/,y=/[\\^$*+?|[\]().{}]/g;function b(t,e,n){var r=t<0?"-":"",i=(r?-t:t)+"",o=i.length;return r+(o68?1900:2e3),n+r[0].length):-1}function T(t,e,n){var r=/^(Z)|([+-]\d\d)(?::?(\d\d))?/.exec(e.slice(n,n+6));return r?(t.Z=r[1]?0:-(r[2]+(r[3]||"00")),n+r[0].length):-1}function O(t,e,n){var r=p.exec(e.slice(n,n+1));return r?(t.q=3*r[0]-3,n+r[0].length):-1}function D(t,e,n){var r=p.exec(e.slice(n,n+2));return r?(t.m=r[0]-1,n+r[0].length):-1}function C(t,e,n){var r=p.exec(e.slice(n,n+2));return r?(t.d=+r[0],n+r[0].length):-1}function N(t,e,n){var r=p.exec(e.slice(n,n+3));return r?(t.m=0,t.d=+r[0],n+r[0].length):-1}function I(t,e,n){var r=p.exec(e.slice(n,n+2));return r?(t.H=+r[0],n+r[0].length):-1}function R(t,e,n){var r=p.exec(e.slice(n,n+2));return r?(t.M=+r[0],n+r[0].length):-1}function j(t,e,n){var r=p.exec(e.slice(n,n+2));return r?(t.S=+r[0],n+r[0].length):-1}function L(t,e,n){var r=p.exec(e.slice(n,n+3));return r?(t.L=+r[0],n+r[0].length):-1}function B(t,e,n){var r=p.exec(e.slice(n,n+6));return r?(t.L=Math.floor(r[0]/1e3),n+r[0].length):-1}function P(t,e,n){var r=g.exec(e.slice(n,n+1));return r?n+r[0].length:-1}function F(t,e,n){var r=p.exec(e.slice(n));return r?(t.Q=+r[0],n+r[0].length):-1}function q(t,e,n){var r=p.exec(e.slice(n));return r?(t.s=+r[0],n+r[0].length):-1}function U(t,e){return b(t.getDate(),e,2)}function z(t,e){return b(t.getHours(),e,2)}function Y(t,e){return b(t.getHours()%12||12,e,2)}function V(t,e){return b(1+a.b.count(Object(u.a)(t),t),e,3)}function G(t,e){return b(t.getMilliseconds(),e,3)}function H(t,e){return G(t,e)+"000"}function W(t,e){return b(t.getMonth()+1,e,2)}function $(t,e){return b(t.getMinutes(),e,2)}function K(t,e){return b(t.getSeconds(),e,2)}function Z(t){var e=t.getDay();return 0===e?7:e}function X(t,e){return b(o.g.count(Object(u.a)(t)-1,t),e,2)}function J(t,e){var n=t.getDay();return t=n>=4||0===n?Object(o.i)(t):o.i.ceil(t),b(o.i.count(Object(u.a)(t),t)+(4===Object(u.a)(t).getDay()),e,2)}function Q(t){return t.getDay()}function tt(t,e){return b(o.c.count(Object(u.a)(t)-1,t),e,2)}function et(t,e){return b(t.getFullYear()%100,e,2)}function nt(t,e){return b(t.getFullYear()%1e4,e,4)}function rt(t){var e=t.getTimezoneOffset();return(e>0?"-":(e*=-1,"+"))+b(e/60|0,"0",2)+b(e%60,"0",2)}function it(t,e){return b(t.getUTCDate(),e,2)}function ot(t,e){return b(t.getUTCHours(),e,2)}function at(t,e){return b(t.getUTCHours()%12||12,e,2)}function ut(t,e){return b(1+i.a.count(Object(s.a)(t),t),e,3)}function st(t,e){return b(t.getUTCMilliseconds(),e,3)}function ct(t,e){return st(t,e)+"000"}function ft(t,e){return b(t.getUTCMonth()+1,e,2)}function lt(t,e){return b(t.getUTCMinutes(),e,2)}function ht(t,e){return b(t.getUTCSeconds(),e,2)}function dt(t){var e=t.getUTCDay();return 0===e?7:e}function pt(t,e){return b(r.g.count(Object(s.a)(t)-1,t),e,2)}function gt(t,e){var n=t.getUTCDay();return t=n>=4||0===n?Object(r.i)(t):r.i.ceil(t),b(r.i.count(Object(s.a)(t),t)+(4===Object(s.a)(t).getUTCDay()),e,2)}function yt(t){return t.getUTCDay()}function bt(t,e){return b(r.c.count(Object(s.a)(t)-1,t),e,2)}function vt(t,e){return b(t.getUTCFullYear()%100,e,2)}function mt(t,e){return b(t.getUTCFullYear()%1e4,e,4)}function _t(){return"+0000"}function wt(){return"%"}function xt(t){return+t}function kt(t){return Math.floor(+t/1e3)}},function(t,e,n){"use strict";var r,i=n(48),o=n(111),a=n(83),u=function(t,e){var n=Object(a.a)(t,e);if(!n)return t+"";var r=n[0],i=n[1];return i<0?"0."+new Array(-i).join("0")+r:r.length>i+1?r.slice(0,i+1)+"."+r.slice(i+1):r+new Array(i-r.length+2).join("0")},s={"%":function(t,e){return(100*t).toFixed(e)},b:function(t){return Math.round(t).toString(2)},c:function(t){return t+""},d:function(t){return Math.round(t).toString(10)},e:function(t,e){return t.toExponential(e)},f:function(t,e){return t.toFixed(e)},g:function(t,e){return t.toPrecision(e)},o:function(t){return Math.round(t).toString(8)},p:function(t,e){return u(100*t,e)},r:u,s:function(t,e){var n=Object(a.a)(t,e);if(!n)return t+"";var i=n[0],o=n[1],u=o-(r=3*Math.max(-8,Math.min(8,Math.floor(o/3))))+1,s=i.length;return u===s?i:u>s?i+new Array(u-s+1).join("0"):u>0?i.slice(0,u)+"."+i.slice(u):"0."+new Array(1-u).join("0")+Object(a.a)(t,Math.max(0,e+u-1))[0]},X:function(t){return Math.round(t).toString(16).toUpperCase()},x:function(t){return Math.round(t).toString(16)}},c=function(t){return t},f=Array.prototype.map,l=["y","z","a","f","p","n","µ","m","","k","M","G","T","P","E","Z","Y"];e.a=function(t){var e,n,a=void 0===t.grouping||void 0===t.thousands?c:(e=f.call(t.grouping,Number),n=t.thousands+"",function(t,r){for(var i=t.length,o=[],a=0,u=e[0],s=0;i>0&&u>0&&(s+u+1>r&&(u=Math.max(1,r-s)),o.push(t.substring(i-=u,i+u)),!((s+=u+1)>r));)u=e[a=(a+1)%e.length];return o.reverse().join(n)}),u=void 0===t.currency?"":t.currency[0]+"",h=void 0===t.currency?"":t.currency[1]+"",d=void 0===t.decimal?".":t.decimal+"",p=void 0===t.numerals?c:function(t){return function(e){return e.replace(/[0-9]/g,(function(e){return t[+e]}))}}(f.call(t.numerals,String)),g=void 0===t.percent?"%":t.percent+"",y=void 0===t.minus?"-":t.minus+"",b=void 0===t.nan?"NaN":t.nan+"";function v(t){var e=(t=Object(o.b)(t)).fill,n=t.align,i=t.sign,c=t.symbol,f=t.zero,v=t.width,m=t.comma,_=t.precision,w=t.trim,x=t.type;"n"===x?(m=!0,x="g"):s[x]||(void 0===_&&(_=12),w=!0,x="g"),(f||"0"===e&&"="===n)&&(f=!0,e="0",n="=");var k="$"===c?u:"#"===c&&/[boxX]/.test(x)?"0"+x.toLowerCase():"",E="$"===c?h:/[%p]/.test(x)?g:"",A=s[x],S=/[defgprs%]/.test(x);function M(t){var o,u,s,c=k,h=E;if("c"===x)h=A(t)+h,t="";else{var g=(t=+t)<0;if(t=isNaN(t)?b:A(Math.abs(t),_),w&&(t=function(t){t:for(var e,n=t.length,r=1,i=-1;r0&&(i=0)}return i>0?t.slice(0,i)+t.slice(e+1):t}(t)),g&&0==+t&&(g=!1),c=(g?"("===i?i:y:"-"===i||"("===i?"":i)+c,h=("s"===x?l[8+r/3]:"")+h+(g&&"("===i?")":""),S)for(o=-1,u=t.length;++o(s=t.charCodeAt(o))||s>57){h=(46===s?d+t.slice(o+1):t.slice(o))+h,t=t.slice(0,o);break}}m&&!f&&(t=a(t,1/0));var M=c.length+t.length+h.length,T=M>1)+c+t+h+T.slice(M);break;default:t=T+c+t+h}return p(t)}return _=void 0===_?6:/[gprs]/.test(x)?Math.max(1,Math.min(21,_)):Math.max(0,Math.min(20,_)),M.toString=function(){return t+""},M}return{format:v,formatPrefix:function(t,e){var n=v(((t=Object(o.b)(t)).type="f",t)),r=3*Math.max(-8,Math.min(8,Math.floor(Object(i.a)(e)/3))),a=Math.pow(10,-r),u=l[8+r/3];return function(t){return n(a*t)+u}}}}},function(t,e,n){"use strict";n.d(e,"a",(function(){return g}));var r=n(24),i=n(11),o=n(116),a=-.14861,u=1.78277,s=-.29227,c=-.90649,f=1.97294,l=f*c,h=f*u,d=u*s-c*a;function p(t){if(t instanceof y)return new y(t.h,t.s,t.l,t.opacity);t instanceof i.b||(t=Object(i.h)(t));var e=t.r/255,n=t.g/255,r=t.b/255,a=(d*r+l*e-h*n)/(d+l-h),u=r-a,p=(f*(n-a)-s*u)/c,g=Math.sqrt(p*p+u*u)/(f*a*(1-a)),b=g?Math.atan2(p,u)*o.b-120:NaN;return new y(b<0?b+360:b,g,a,t.opacity)}function g(t,e,n,r){return 1===arguments.length?p(t):new y(t,e,n,null==r?1:r)}function y(t,e,n,r){this.h=+t,this.s=+e,this.l=+n,this.opacity=+r}Object(r.a)(y,g,Object(r.b)(i.a,{brighter:function(t){return t=null==t?i.c:Math.pow(i.c,t),new y(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?i.d:Math.pow(i.d,t),new y(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=isNaN(this.h)?0:(this.h+120)*o.a,e=+this.l,n=isNaN(this.s)?0:this.s*e*(1-e),r=Math.cos(t),l=Math.sin(t);return new i.b(255*(e+n*(a*r+u*l)),255*(e+n*(s*r+c*l)),255*(e+n*(f*r)),this.opacity)}}))},function(t,e,n){"use strict";var r=/^(%20|\s)*(javascript|data)/im,i=/[^\x20-\x7E]/gim,o=/^([^:]+):/gm,a=[".","/"];t.exports={sanitizeUrl:function(t){if(!t)return"about:blank";var e,n,u=t.replace(i,"").trim();return function(t){return a.indexOf(t[0])>-1}(u)?u:(n=u.match(o))?(e=n[0],r.test(e)?"about:blank":u):"about:blank"}}},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[2,3],n=[1,7],r=[7,12,15,17,19,20,21],i=[7,11,12,15,17,19,20,21],o=[2,20],a=[1,32],u={trace:function(){},yy:{},symbols_:{error:2,start:3,GG:4,":":5,document:6,EOF:7,DIR:8,options:9,body:10,OPT:11,NL:12,line:13,statement:14,COMMIT:15,commit_arg:16,BRANCH:17,ID:18,CHECKOUT:19,MERGE:20,RESET:21,reset_arg:22,STR:23,HEAD:24,reset_parents:25,CARET:26,$accept:0,$end:1},terminals_:{2:"error",4:"GG",5:":",7:"EOF",8:"DIR",11:"OPT",12:"NL",15:"COMMIT",17:"BRANCH",18:"ID",19:"CHECKOUT",20:"MERGE",21:"RESET",23:"STR",24:"HEAD",26:"CARET"},productions_:[0,[3,4],[3,5],[6,0],[6,2],[9,2],[9,1],[10,0],[10,2],[13,2],[13,1],[14,2],[14,2],[14,2],[14,2],[14,2],[16,0],[16,1],[22,2],[22,2],[25,0],[25,2]],performAction:function(t,e,n,r,i,o,a){var u=o.length-1;switch(i){case 1:return o[u-1];case 2:return r.setDirection(o[u-3]),o[u-1];case 4:r.setOptions(o[u-1]),this.$=o[u];break;case 5:o[u-1]+=o[u],this.$=o[u-1];break;case 7:this.$=[];break;case 8:o[u-1].push(o[u]),this.$=o[u-1];break;case 9:this.$=o[u-1];break;case 11:r.commit(o[u]);break;case 12:r.branch(o[u]);break;case 13:r.checkout(o[u]);break;case 14:r.merge(o[u]);break;case 15:r.reset(o[u]);break;case 16:this.$="";break;case 17:this.$=o[u];break;case 18:this.$=o[u-1]+":"+o[u];break;case 19:this.$=o[u-1]+":"+r.count,r.count=0;break;case 20:r.count=0;break;case 21:r.count+=1}},table:[{3:1,4:[1,2]},{1:[3]},{5:[1,3],8:[1,4]},{6:5,7:e,9:6,12:n},{5:[1,8]},{7:[1,9]},t(r,[2,7],{10:10,11:[1,11]}),t(i,[2,6]),{6:12,7:e,9:6,12:n},{1:[2,1]},{7:[2,4],12:[1,15],13:13,14:14,15:[1,16],17:[1,17],19:[1,18],20:[1,19],21:[1,20]},t(i,[2,5]),{7:[1,21]},t(r,[2,8]),{12:[1,22]},t(r,[2,10]),{12:[2,16],16:23,23:[1,24]},{18:[1,25]},{18:[1,26]},{18:[1,27]},{18:[1,30],22:28,24:[1,29]},{1:[2,2]},t(r,[2,9]),{12:[2,11]},{12:[2,17]},{12:[2,12]},{12:[2,13]},{12:[2,14]},{12:[2,15]},{12:o,25:31,26:a},{12:o,25:33,26:a},{12:[2,18]},{12:o,25:34,26:a},{12:[2,19]},{12:[2,21]}],defaultActions:{9:[2,1],21:[2,2],23:[2,11],24:[2,17],25:[2,12],26:[2,13],27:[2,14],28:[2,15],31:[2,18],33:[2,19],34:[2,21]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],o=[],a=this.table,u="",s=0,c=0,f=0,l=2,h=1,d=o.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var b=p.yylloc;o.push(b);var v=p.options&&p.options.ranges;function m(){var t;return"number"!=typeof(t=r.pop()||p.lex()||h)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var _,w,x,k,E,A,S,M,T,O={};;){if(x=n[n.length-1],this.defaultActions[x]?k=this.defaultActions[x]:(null==_&&(_=m()),k=a[x]&&a[x][_]),void 0===k||!k.length||!k[0]){var D="";for(A in T=[],a[x])this.terminals_[A]&&A>l&&T.push("'"+this.terminals_[A]+"'");D=p.showPosition?"Parse error on line "+(s+1)+":\n"+p.showPosition()+"\nExpecting "+T.join(", ")+", got '"+(this.terminals_[_]||_)+"'":"Parse error on line "+(s+1)+": Unexpected "+(_==h?"end of input":"'"+(this.terminals_[_]||_)+"'"),this.parseError(D,{text:p.match,token:this.terminals_[_]||_,line:p.yylineno,loc:b,expected:T})}if(k[0]instanceof Array&&k.length>1)throw new Error("Parse Error: multiple actions possible at state: "+x+", token: "+_);switch(k[0]){case 1:n.push(_),i.push(p.yytext),o.push(p.yylloc),n.push(k[1]),_=null,w?(_=w,w=null):(c=p.yyleng,u=p.yytext,s=p.yylineno,b=p.yylloc,f>0&&f--);break;case 2:if(S=this.productions_[k[1]][1],O.$=i[i.length-S],O._$={first_line:o[o.length-(S||1)].first_line,last_line:o[o.length-1].last_line,first_column:o[o.length-(S||1)].first_column,last_column:o[o.length-1].last_column},v&&(O._$.range=[o[o.length-(S||1)].range[0],o[o.length-1].range[1]]),void 0!==(E=this.performAction.apply(O,[u,c,s,g.yy,k[1],i,o].concat(d))))return E;S&&(n=n.slice(0,-1*S*2),i=i.slice(0,-1*S),o=o.slice(0,-1*S)),n.push(this.productions_[k[1]][0]),i.push(O.$),o.push(O._$),M=a[n[n.length-2]][n[n.length-1]],n.push(M);break;case 3:return!0}}return!0}},s={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var o in i)this[o]=i[o];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),o=0;oe[0].length)){if(e=n,r=o,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[o])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return 12;case 1:case 2:case 3:break;case 4:return 4;case 5:return 15;case 6:return 17;case 7:return 20;case 8:return 21;case 9:return 19;case 10:case 11:return 8;case 12:return 5;case 13:return 26;case 14:this.begin("options");break;case 15:this.popState();break;case 16:return 11;case 17:this.begin("string");break;case 18:this.popState();break;case 19:return 23;case 20:return 18;case 21:return 7}},rules:[/^(?:(\r?\n)+)/i,/^(?:\s+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:gitGraph\b)/i,/^(?:commit\b)/i,/^(?:branch\b)/i,/^(?:merge\b)/i,/^(?:reset\b)/i,/^(?:checkout\b)/i,/^(?:LR\b)/i,/^(?:BT\b)/i,/^(?::)/i,/^(?:\^)/i,/^(?:options\r?\n)/i,/^(?:end\r?\n)/i,/^(?:[^\n]+\r?\n)/i,/^(?:["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:[a-zA-Z][a-zA-Z0-9_]+)/i,/^(?:$)/i],conditions:{options:{rules:[15,16],inclusive:!1},string:{rules:[18,19],inclusive:!1},INITIAL:{rules:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,17,20,21],inclusive:!0}}};function c(){this.yy={}}return u.lexer=s,c.prototype=u,u.Parser=c,new c}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(54).readFileSync(n(55).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(17),n(14)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[6,9,10],n={trace:function(){},yy:{},symbols_:{error:2,start:3,info:4,document:5,EOF:6,line:7,statement:8,NL:9,showInfo:10,$accept:0,$end:1},terminals_:{2:"error",4:"info",6:"EOF",9:"NL",10:"showInfo"},productions_:[0,[3,3],[5,0],[5,2],[7,1],[7,1],[8,1]],performAction:function(t,e,n,r,i,o,a){o.length;switch(i){case 1:return r;case 4:break;case 6:r.setInfo(!0)}},table:[{3:1,4:[1,2]},{1:[3]},t(e,[2,2],{5:3}),{6:[1,4],7:5,8:6,9:[1,7],10:[1,8]},{1:[2,1]},t(e,[2,3]),t(e,[2,4]),t(e,[2,5]),t(e,[2,6])],defaultActions:{4:[2,1]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],o=[],a=this.table,u="",s=0,c=0,f=0,l=2,h=1,d=o.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var b=p.yylloc;o.push(b);var v=p.options&&p.options.ranges;function m(){var t;return"number"!=typeof(t=r.pop()||p.lex()||h)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var _,w,x,k,E,A,S,M,T,O={};;){if(x=n[n.length-1],this.defaultActions[x]?k=this.defaultActions[x]:(null==_&&(_=m()),k=a[x]&&a[x][_]),void 0===k||!k.length||!k[0]){var D="";for(A in T=[],a[x])this.terminals_[A]&&A>l&&T.push("'"+this.terminals_[A]+"'");D=p.showPosition?"Parse error on line "+(s+1)+":\n"+p.showPosition()+"\nExpecting "+T.join(", ")+", got '"+(this.terminals_[_]||_)+"'":"Parse error on line "+(s+1)+": Unexpected "+(_==h?"end of input":"'"+(this.terminals_[_]||_)+"'"),this.parseError(D,{text:p.match,token:this.terminals_[_]||_,line:p.yylineno,loc:b,expected:T})}if(k[0]instanceof Array&&k.length>1)throw new Error("Parse Error: multiple actions possible at state: "+x+", token: "+_);switch(k[0]){case 1:n.push(_),i.push(p.yytext),o.push(p.yylloc),n.push(k[1]),_=null,w?(_=w,w=null):(c=p.yyleng,u=p.yytext,s=p.yylineno,b=p.yylloc,f>0&&f--);break;case 2:if(S=this.productions_[k[1]][1],O.$=i[i.length-S],O._$={first_line:o[o.length-(S||1)].first_line,last_line:o[o.length-1].last_line,first_column:o[o.length-(S||1)].first_column,last_column:o[o.length-1].last_column},v&&(O._$.range=[o[o.length-(S||1)].range[0],o[o.length-1].range[1]]),void 0!==(E=this.performAction.apply(O,[u,c,s,g.yy,k[1],i,o].concat(d))))return E;S&&(n=n.slice(0,-1*S*2),i=i.slice(0,-1*S),o=o.slice(0,-1*S)),n.push(this.productions_[k[1]][0]),i.push(O.$),o.push(O._$),M=a[n[n.length-2]][n[n.length-1]],n.push(M);break;case 3:return!0}}return!0}},r={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var o in i)this[o]=i[o];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),o=0;oe[0].length)){if(e=n,r=o,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[o])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return 4;case 1:return 9;case 2:return"space";case 3:return 10;case 4:return 6;case 5:return"TXT"}},rules:[/^(?:info\b)/i,/^(?:[\s\n\r]+)/i,/^(?:[\s]+)/i,/^(?:showInfo\b)/i,/^(?:$)/i,/^(?:.)/i],conditions:{INITIAL:{rules:[0,1,2,3,4,5],inclusive:!0}}};function i(){this.yy={}}return n.lexer=r,i.prototype=n,n.Parser=i,new i}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(54).readFileSync(n(55).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(17),n(14)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[6,9,10,12],n={trace:function(){},yy:{},symbols_:{error:2,start:3,pie:4,document:5,EOF:6,line:7,statement:8,NL:9,STR:10,VALUE:11,title:12,$accept:0,$end:1},terminals_:{2:"error",4:"pie",6:"EOF",9:"NL",10:"STR",11:"VALUE",12:"title"},productions_:[0,[3,3],[5,0],[5,2],[7,1],[7,1],[8,2],[8,1]],performAction:function(t,e,n,r,i,o,a){var u=o.length-1;switch(i){case 4:break;case 6:r.addSection(o[u-1],r.cleanupValue(o[u]));break;case 7:r.setTitle(o[u].substr(6)),this.$=o[u].substr(6)}},table:[{3:1,4:[1,2]},{1:[3]},t(e,[2,2],{5:3}),{6:[1,4],7:5,8:6,9:[1,7],10:[1,8],12:[1,9]},{1:[2,1]},t(e,[2,3]),t(e,[2,4]),t(e,[2,5]),{11:[1,10]},t(e,[2,7]),t(e,[2,6])],defaultActions:{4:[2,1]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],o=[],a=this.table,u="",s=0,c=0,f=0,l=2,h=1,d=o.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var b=p.yylloc;o.push(b);var v=p.options&&p.options.ranges;function m(){var t;return"number"!=typeof(t=r.pop()||p.lex()||h)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var _,w,x,k,E,A,S,M,T,O={};;){if(x=n[n.length-1],this.defaultActions[x]?k=this.defaultActions[x]:(null==_&&(_=m()),k=a[x]&&a[x][_]),void 0===k||!k.length||!k[0]){var D="";for(A in T=[],a[x])this.terminals_[A]&&A>l&&T.push("'"+this.terminals_[A]+"'");D=p.showPosition?"Parse error on line "+(s+1)+":\n"+p.showPosition()+"\nExpecting "+T.join(", ")+", got '"+(this.terminals_[_]||_)+"'":"Parse error on line "+(s+1)+": Unexpected "+(_==h?"end of input":"'"+(this.terminals_[_]||_)+"'"),this.parseError(D,{text:p.match,token:this.terminals_[_]||_,line:p.yylineno,loc:b,expected:T})}if(k[0]instanceof Array&&k.length>1)throw new Error("Parse Error: multiple actions possible at state: "+x+", token: "+_);switch(k[0]){case 1:n.push(_),i.push(p.yytext),o.push(p.yylloc),n.push(k[1]),_=null,w?(_=w,w=null):(c=p.yyleng,u=p.yytext,s=p.yylineno,b=p.yylloc,f>0&&f--);break;case 2:if(S=this.productions_[k[1]][1],O.$=i[i.length-S],O._$={first_line:o[o.length-(S||1)].first_line,last_line:o[o.length-1].last_line,first_column:o[o.length-(S||1)].first_column,last_column:o[o.length-1].last_column},v&&(O._$.range=[o[o.length-(S||1)].range[0],o[o.length-1].range[1]]),void 0!==(E=this.performAction.apply(O,[u,c,s,g.yy,k[1],i,o].concat(d))))return E;S&&(n=n.slice(0,-1*S*2),i=i.slice(0,-1*S),o=o.slice(0,-1*S)),n.push(this.productions_[k[1]][0]),i.push(O.$),o.push(O._$),M=a[n[n.length-2]][n[n.length-1]],n.push(M);break;case 3:return!0}}return!0}},r={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var o in i)this[o]=i[o];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),o=0;oe[0].length)){if(e=n,r=o,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[o])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:case 1:break;case 2:return 4;case 3:return 9;case 4:return"space";case 5:return 12;case 6:this.begin("string");break;case 7:this.popState();break;case 8:return"STR";case 9:return"VALUE";case 10:return 6}},rules:[/^(?:%%[^\n]*)/i,/^(?:\s+)/i,/^(?:pie\b)/i,/^(?:[\s\n\r]+)/i,/^(?:[\s]+)/i,/^(?:title\s[^#\n;]+)/i,/^(?:["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?::[\s]*[\d]+(?:\.[\d]+)?)/i,/^(?:$)/i],conditions:{string:{rules:[7,8],inclusive:!1},INITIAL:{rules:[0,1,2,3,4,5,6,9,10],inclusive:!0}}};function i(){this.yy={}}return n.lexer=r,i.prototype=n,n.Parser=i,new i}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(54).readFileSync(n(55).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(17),n(14)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[6,12],n=[1,7],r=[1,10],i=[1,11],o=[1,12],a=[1,13],u=[12,19,20],s=[15,16,17,18],c={trace:function(){},yy:{},symbols_:{error:2,start:3,ER_DIAGRAM:4,document:5,EOF:6,statement:7,entityName:8,relSpec:9,":":10,role:11,ALPHANUM:12,cardinality:13,relType:14,ZERO_OR_ONE:15,ZERO_OR_MORE:16,ONE_OR_MORE:17,ONLY_ONE:18,NON_IDENTIFYING:19,IDENTIFYING:20,STR:21,$accept:0,$end:1},terminals_:{2:"error",4:"ER_DIAGRAM",6:"EOF",10:":",12:"ALPHANUM",15:"ZERO_OR_ONE",16:"ZERO_OR_MORE",17:"ONE_OR_MORE",18:"ONLY_ONE",19:"NON_IDENTIFYING",20:"IDENTIFYING",21:"STR"},productions_:[0,[3,3],[5,0],[5,2],[7,5],[8,1],[9,3],[13,1],[13,1],[13,1],[13,1],[14,1],[14,1],[11,1],[11,1]],performAction:function(t,e,n,r,i,o,a){var u=o.length-1;switch(i){case 1:break;case 4:r.addEntity(o[u-4]),r.addEntity(o[u-2]),r.addRelationship(o[u-4],o[u],o[u-2],o[u-3]);break;case 5:this.$=o[u];break;case 6:this.$={cardA:o[u],relType:o[u-1],cardB:o[u-2]};break;case 7:this.$=r.Cardinality.ZERO_OR_ONE;break;case 8:this.$=r.Cardinality.ZERO_OR_MORE;break;case 9:this.$=r.Cardinality.ONE_OR_MORE;break;case 10:this.$=r.Cardinality.ONLY_ONE;break;case 11:this.$=r.Identification.NON_IDENTIFYING;break;case 12:this.$=r.Identification.IDENTIFYING;break;case 13:case 14:this.$=o[u]}},table:[{3:1,4:[1,2]},{1:[3]},t(e,[2,2],{5:3}),{6:[1,4],7:5,8:6,12:n},{1:[2,1]},t(e,[2,3]),{9:8,13:9,15:r,16:i,17:o,18:a},t([10,15,16,17,18],[2,5]),{8:14,12:n},{14:15,19:[1,16],20:[1,17]},t(u,[2,7]),t(u,[2,8]),t(u,[2,9]),t(u,[2,10]),{10:[1,18]},{13:19,15:r,16:i,17:o,18:a},t(s,[2,11]),t(s,[2,12]),{11:20,12:[1,22],21:[1,21]},{12:[2,6]},t(e,[2,4]),t(e,[2,13]),t(e,[2,14])],defaultActions:{4:[2,1],19:[2,6]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],o=[],a=this.table,u="",s=0,c=0,f=0,l=2,h=1,d=o.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var b=p.yylloc;o.push(b);var v=p.options&&p.options.ranges;function m(){var t;return"number"!=typeof(t=r.pop()||p.lex()||h)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var _,w,x,k,E,A,S,M,T,O={};;){if(x=n[n.length-1],this.defaultActions[x]?k=this.defaultActions[x]:(null==_&&(_=m()),k=a[x]&&a[x][_]),void 0===k||!k.length||!k[0]){var D="";for(A in T=[],a[x])this.terminals_[A]&&A>l&&T.push("'"+this.terminals_[A]+"'");D=p.showPosition?"Parse error on line "+(s+1)+":\n"+p.showPosition()+"\nExpecting "+T.join(", ")+", got '"+(this.terminals_[_]||_)+"'":"Parse error on line "+(s+1)+": Unexpected "+(_==h?"end of input":"'"+(this.terminals_[_]||_)+"'"),this.parseError(D,{text:p.match,token:this.terminals_[_]||_,line:p.yylineno,loc:b,expected:T})}if(k[0]instanceof Array&&k.length>1)throw new Error("Parse Error: multiple actions possible at state: "+x+", token: "+_);switch(k[0]){case 1:n.push(_),i.push(p.yytext),o.push(p.yylloc),n.push(k[1]),_=null,w?(_=w,w=null):(c=p.yyleng,u=p.yytext,s=p.yylineno,b=p.yylloc,f>0&&f--);break;case 2:if(S=this.productions_[k[1]][1],O.$=i[i.length-S],O._$={first_line:o[o.length-(S||1)].first_line,last_line:o[o.length-1].last_line,first_column:o[o.length-(S||1)].first_column,last_column:o[o.length-1].last_column},v&&(O._$.range=[o[o.length-(S||1)].range[0],o[o.length-1].range[1]]),void 0!==(E=this.performAction.apply(O,[u,c,s,g.yy,k[1],i,o].concat(d))))return E;S&&(n=n.slice(0,-1*S*2),i=i.slice(0,-1*S),o=o.slice(0,-1*S)),n.push(this.productions_[k[1]][0]),i.push(O.$),o.push(O._$),M=a[n[n.length-2]][n[n.length-1]],n.push(M);break;case 3:return!0}}return!0}},f={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var o in i)this[o]=i[o];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),o=0;oe[0].length)){if(e=n,r=o,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[o])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:break;case 1:return"SPACE";case 2:this.begin("string");break;case 3:this.popState();break;case 4:return 21;case 5:return 4;case 6:return 15;case 7:return 16;case 8:return 17;case 9:return 18;case 10:return 15;case 11:return 16;case 12:return 17;case 13:return 19;case 14:return 20;case 15:case 16:return 19;case 17:return 12;case 18:return e.yytext[0];case 19:return 6}},rules:[/^(?:\s+)/i,/^(?:[\s]+)/i,/^(?:["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:erDiagram\b)/i,/^(?:\|o\b)/i,/^(?:\}o\b)/i,/^(?:\}\|)/i,/^(?:\|\|)/i,/^(?:o\|)/i,/^(?:o\{)/i,/^(?:\|\{)/i,/^(?:\.\.)/i,/^(?:--)/i,/^(?:\.-)/i,/^(?:-\.)/i,/^(?:[A-Za-z][A-Za-z0-9\-]*)/i,/^(?:.)/i,/^(?:$)/i],conditions:{string:{rules:[3,4],inclusive:!1},INITIAL:{rules:[0,1,2,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19],inclusive:!0}}};function l(){this.yy={}}return c.lexer=f,l.prototype=c,c.Parser=l,new l}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(54).readFileSync(n(55).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(17),n(14)(t))},function(t,e,n){"use strict";n.d(e,"a",(function(){return a}));var r=n(211),i=n(6);function o(t){return function e(n){function o(e,o){var a=t((e=Object(r.a)(e)).h,(o=Object(r.a)(o)).h),u=Object(i.a)(e.s,o.s),s=Object(i.a)(e.l,o.l),c=Object(i.a)(e.opacity,o.opacity);return function(t){return e.h=a(t),e.s=u(t),e.l=s(Math.pow(t,n)),e.opacity=c(t),e+""}}return n=+n,o.gamma=e,o}(1)}e.b=o(i.c);var a=o(i.a)},function(t,e,n){"use strict";n.d(e,"b",(function(){return o}));var r=n(4),i=Object(r.a)((function(t){t.setDate(1),t.setHours(0,0,0,0)}),(function(t,e){t.setMonth(t.getMonth()+e)}),(function(t,e){return e.getMonth()-t.getMonth()+12*(e.getFullYear()-t.getFullYear())}),(function(t){return t.getMonth()}));e.a=i;var o=i.range},function(t,e,n){"use strict";n.d(e,"b",(function(){return a}));var r=n(4),i=n(5),o=Object(r.a)((function(t){t.setTime(t-t.getMilliseconds()-t.getSeconds()*i.d-t.getMinutes()*i.c)}),(function(t,e){t.setTime(+t+e*i.b)}),(function(t,e){return(e-t)/i.b}),(function(t){return t.getHours()}));e.a=o;var a=o.range},function(t,e,n){"use strict";n.d(e,"b",(function(){return a}));var r=n(4),i=n(5),o=Object(r.a)((function(t){t.setTime(t-t.getMilliseconds()-t.getSeconds()*i.d)}),(function(t,e){t.setTime(+t+e*i.c)}),(function(t,e){return(e-t)/i.c}),(function(t){return t.getMinutes()}));e.a=o;var a=o.range},function(t,e,n){"use strict";n.d(e,"b",(function(){return o}));var r=n(4),i=Object(r.a)((function(t){t.setUTCDate(1),t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCMonth(t.getUTCMonth()+e)}),(function(t,e){return e.getUTCMonth()-t.getUTCMonth()+12*(e.getUTCFullYear()-t.getUTCFullYear())}),(function(t){return t.getUTCMonth()}));e.a=i;var o=i.range},function(t,e,n){"use strict";n.d(e,"b",(function(){return a}));var r=n(4),i=n(5),o=Object(r.a)((function(t){t.setUTCMinutes(0,0,0)}),(function(t,e){t.setTime(+t+e*i.b)}),(function(t,e){return(e-t)/i.b}),(function(t){return t.getUTCHours()}));e.a=o;var a=o.range},function(t,e,n){"use strict";n.d(e,"b",(function(){return a}));var r=n(4),i=n(5),o=Object(r.a)((function(t){t.setUTCSeconds(0,0)}),(function(t,e){t.setTime(+t+e*i.c)}),(function(t,e){return(e-t)/i.c}),(function(t){return t.getUTCMinutes()}));e.a=o;var a=o.range},function(t,e,n){"use strict";var r=n(27);t.exports=i;function i(t){this._isDirected=!r.has(t,"directed")||t.directed,this._isMultigraph=!!r.has(t,"multigraph")&&t.multigraph,this._isCompound=!!r.has(t,"compound")&&t.compound,this._label=void 0,this._defaultNodeLabelFn=r.constant(void 0),this._defaultEdgeLabelFn=r.constant(void 0),this._nodes={},this._isCompound&&(this._parent={},this._children={},this._children["\0"]={}),this._in={},this._preds={},this._out={},this._sucs={},this._edgeObjs={},this._edgeLabels={}}function o(t,e){t[e]?t[e]++:t[e]=1}function a(t,e){--t[e]||delete t[e]}function u(t,e,n,i){var o=""+e,a=""+n;if(!t&&o>a){var u=o;o=a,a=u}return o+""+a+""+(r.isUndefined(i)?"\0":i)}function s(t,e,n,r){var i=""+e,o=""+n;if(!t&&i>o){var a=i;i=o,o=a}var u={v:i,w:o};return r&&(u.name=r),u}function c(t,e){return u(t,e.v,e.w,e.name)}i.prototype._nodeCount=0,i.prototype._edgeCount=0,i.prototype.isDirected=function(){return this._isDirected},i.prototype.isMultigraph=function(){return this._isMultigraph},i.prototype.isCompound=function(){return this._isCompound},i.prototype.setGraph=function(t){return this._label=t,this},i.prototype.graph=function(){return this._label},i.prototype.setDefaultNodeLabel=function(t){return r.isFunction(t)||(t=r.constant(t)),this._defaultNodeLabelFn=t,this},i.prototype.nodeCount=function(){return this._nodeCount},i.prototype.nodes=function(){return r.keys(this._nodes)},i.prototype.sources=function(){var t=this;return r.filter(this.nodes(),(function(e){return r.isEmpty(t._in[e])}))},i.prototype.sinks=function(){var t=this;return r.filter(this.nodes(),(function(e){return r.isEmpty(t._out[e])}))},i.prototype.setNodes=function(t,e){var n=arguments,i=this;return r.each(t,(function(t){n.length>1?i.setNode(t,e):i.setNode(t)})),this},i.prototype.setNode=function(t,e){return r.has(this._nodes,t)?(arguments.length>1&&(this._nodes[t]=e),this):(this._nodes[t]=arguments.length>1?e:this._defaultNodeLabelFn(t),this._isCompound&&(this._parent[t]="\0",this._children[t]={},this._children["\0"][t]=!0),this._in[t]={},this._preds[t]={},this._out[t]={},this._sucs[t]={},++this._nodeCount,this)},i.prototype.node=function(t){return this._nodes[t]},i.prototype.hasNode=function(t){return r.has(this._nodes,t)},i.prototype.removeNode=function(t){var e=this;if(r.has(this._nodes,t)){var n=function(t){e.removeEdge(e._edgeObjs[t])};delete this._nodes[t],this._isCompound&&(this._removeFromParentsChildList(t),delete this._parent[t],r.each(this.children(t),(function(t){e.setParent(t)})),delete this._children[t]),r.each(r.keys(this._in[t]),n),delete this._in[t],delete this._preds[t],r.each(r.keys(this._out[t]),n),delete this._out[t],delete this._sucs[t],--this._nodeCount}return this},i.prototype.setParent=function(t,e){if(!this._isCompound)throw new Error("Cannot set parent in a non-compound graph");if(r.isUndefined(e))e="\0";else{for(var n=e+="";!r.isUndefined(n);n=this.parent(n))if(n===t)throw new Error("Setting "+e+" as parent of "+t+" would create a cycle");this.setNode(e)}return this.setNode(t),this._removeFromParentsChildList(t),this._parent[t]=e,this._children[e][t]=!0,this},i.prototype._removeFromParentsChildList=function(t){delete this._children[this._parent[t]][t]},i.prototype.parent=function(t){if(this._isCompound){var e=this._parent[t];if("\0"!==e)return e}},i.prototype.children=function(t){if(r.isUndefined(t)&&(t="\0"),this._isCompound){var e=this._children[t];if(e)return r.keys(e)}else{if("\0"===t)return this.nodes();if(this.hasNode(t))return[]}},i.prototype.predecessors=function(t){var e=this._preds[t];if(e)return r.keys(e)},i.prototype.successors=function(t){var e=this._sucs[t];if(e)return r.keys(e)},i.prototype.neighbors=function(t){var e=this.predecessors(t);if(e)return r.union(e,this.successors(t))},i.prototype.isLeaf=function(t){return 0===(this.isDirected()?this.successors(t):this.neighbors(t)).length},i.prototype.filterNodes=function(t){var e=new this.constructor({directed:this._isDirected,multigraph:this._isMultigraph,compound:this._isCompound});e.setGraph(this.graph());var n=this;r.each(this._nodes,(function(n,r){t(r)&&e.setNode(r,n)})),r.each(this._edgeObjs,(function(t){e.hasNode(t.v)&&e.hasNode(t.w)&&e.setEdge(t,n.edge(t))}));var i={};return this._isCompound&&r.each(e.nodes(),(function(t){e.setParent(t,function t(r){var o=n.parent(r);return void 0===o||e.hasNode(o)?(i[r]=o,o):o in i?i[o]:t(o)}(t))})),e},i.prototype.setDefaultEdgeLabel=function(t){return r.isFunction(t)||(t=r.constant(t)),this._defaultEdgeLabelFn=t,this},i.prototype.edgeCount=function(){return this._edgeCount},i.prototype.edges=function(){return r.values(this._edgeObjs)},i.prototype.setPath=function(t,e){var n=this,i=arguments;return r.reduce(t,(function(t,r){return i.length>1?n.setEdge(t,r,e):n.setEdge(t,r),r})),this},i.prototype.setEdge=function(){var t,e,n,i,a=!1,c=arguments[0];"object"==typeof c&&null!==c&&"v"in c?(t=c.v,e=c.w,n=c.name,2===arguments.length&&(i=arguments[1],a=!0)):(t=c,e=arguments[1],n=arguments[3],arguments.length>2&&(i=arguments[2],a=!0)),t=""+t,e=""+e,r.isUndefined(n)||(n=""+n);var f=u(this._isDirected,t,e,n);if(r.has(this._edgeLabels,f))return a&&(this._edgeLabels[f]=i),this;if(!r.isUndefined(n)&&!this._isMultigraph)throw new Error("Cannot set a named edge when isMultigraph = false");this.setNode(t),this.setNode(e),this._edgeLabels[f]=a?i:this._defaultEdgeLabelFn(t,e,n);var l=s(this._isDirected,t,e,n);return t=l.v,e=l.w,Object.freeze(l),this._edgeObjs[f]=l,o(this._preds[e],t),o(this._sucs[t],e),this._in[e][f]=l,this._out[t][f]=l,this._edgeCount++,this},i.prototype.edge=function(t,e,n){var r=1===arguments.length?c(this._isDirected,arguments[0]):u(this._isDirected,t,e,n);return this._edgeLabels[r]},i.prototype.hasEdge=function(t,e,n){var i=1===arguments.length?c(this._isDirected,arguments[0]):u(this._isDirected,t,e,n);return r.has(this._edgeLabels,i)},i.prototype.removeEdge=function(t,e,n){var r=1===arguments.length?c(this._isDirected,arguments[0]):u(this._isDirected,t,e,n),i=this._edgeObjs[r];return i&&(t=i.v,e=i.w,delete this._edgeLabels[r],delete this._edgeObjs[r],a(this._preds[e],t),a(this._sucs[t],e),delete this._in[e][r],delete this._out[t][r],this._edgeCount--),this},i.prototype.inEdges=function(t,e){var n=this._in[t];if(n){var i=r.values(n);return e?r.filter(i,(function(t){return t.v===e})):i}},i.prototype.outEdges=function(t,e){var n=this._out[t];if(n){var i=r.values(n);return e?r.filter(i,(function(t){return t.w===e})):i}},i.prototype.nodeEdges=function(t,e){var n=this.inEdges(t,e);if(n)return n.concat(this.outEdges(t,e))}},function(t,e,n){var r=n(74)(n(35),"Map");t.exports=r},function(t,e,n){var r=n(472),i=n(479),o=n(481),a=n(482),u=n(483);function s(t){var e=-1,n=null==t?0:t.length;for(this.clear();++e-1&&t%1==0&&t<=9007199254740991}},function(t,e,n){(function(t){var r=n(294),i=e&&!e.nodeType&&e,o=i&&"object"==typeof t&&t&&!t.nodeType&&t,a=o&&o.exports===i&&r.process,u=function(){try{var t=o&&o.require&&o.require("util").types;return t||a&&a.binding&&a.binding("util")}catch(t){}}();t.exports=u}).call(this,n(14)(t))},function(t,e,n){var r=n(155),i=n(489),o=Object.prototype.hasOwnProperty;t.exports=function(t){if(!r(t))return i(t);var e=[];for(var n in Object(t))o.call(t,n)&&"constructor"!=n&&e.push(n);return e}},function(t,e,n){var r=n(301),i=n(302),o=Object.prototype.propertyIsEnumerable,a=Object.getOwnPropertySymbols,u=a?function(t){return null==t?[]:(t=Object(t),r(a(t),(function(e){return o.call(t,e)})))}:i;t.exports=u},function(t,e){t.exports=function(t,e){for(var n=-1,r=e.length,i=t.length;++n0&&o(f)?n>1?t(f,n-1,o,a,u):r(u,f):a||(u[u.length]=f)}return u}},function(t,e,n){"use strict";var r=n(29);t.exports=i;function i(t){this._isDirected=!r.has(t,"directed")||t.directed,this._isMultigraph=!!r.has(t,"multigraph")&&t.multigraph,this._isCompound=!!r.has(t,"compound")&&t.compound,this._label=void 0,this._defaultNodeLabelFn=r.constant(void 0),this._defaultEdgeLabelFn=r.constant(void 0),this._nodes={},this._isCompound&&(this._parent={},this._children={},this._children["\0"]={}),this._in={},this._preds={},this._out={},this._sucs={},this._edgeObjs={},this._edgeLabels={}}function o(t,e){t[e]?t[e]++:t[e]=1}function a(t,e){--t[e]||delete t[e]}function u(t,e,n,i){var o=""+e,a=""+n;if(!t&&o>a){var u=o;o=a,a=u}return o+""+a+""+(r.isUndefined(i)?"\0":i)}function s(t,e,n,r){var i=""+e,o=""+n;if(!t&&i>o){var a=i;i=o,o=a}var u={v:i,w:o};return r&&(u.name=r),u}function c(t,e){return u(t,e.v,e.w,e.name)}i.prototype._nodeCount=0,i.prototype._edgeCount=0,i.prototype.isDirected=function(){return this._isDirected},i.prototype.isMultigraph=function(){return this._isMultigraph},i.prototype.isCompound=function(){return this._isCompound},i.prototype.setGraph=function(t){return this._label=t,this},i.prototype.graph=function(){return this._label},i.prototype.setDefaultNodeLabel=function(t){return r.isFunction(t)||(t=r.constant(t)),this._defaultNodeLabelFn=t,this},i.prototype.nodeCount=function(){return this._nodeCount},i.prototype.nodes=function(){return r.keys(this._nodes)},i.prototype.sources=function(){var t=this;return r.filter(this.nodes(),(function(e){return r.isEmpty(t._in[e])}))},i.prototype.sinks=function(){var t=this;return r.filter(this.nodes(),(function(e){return r.isEmpty(t._out[e])}))},i.prototype.setNodes=function(t,e){var n=arguments,i=this;return r.each(t,(function(t){n.length>1?i.setNode(t,e):i.setNode(t)})),this},i.prototype.setNode=function(t,e){return r.has(this._nodes,t)?(arguments.length>1&&(this._nodes[t]=e),this):(this._nodes[t]=arguments.length>1?e:this._defaultNodeLabelFn(t),this._isCompound&&(this._parent[t]="\0",this._children[t]={},this._children["\0"][t]=!0),this._in[t]={},this._preds[t]={},this._out[t]={},this._sucs[t]={},++this._nodeCount,this)},i.prototype.node=function(t){return this._nodes[t]},i.prototype.hasNode=function(t){return r.has(this._nodes,t)},i.prototype.removeNode=function(t){var e=this;if(r.has(this._nodes,t)){var n=function(t){e.removeEdge(e._edgeObjs[t])};delete this._nodes[t],this._isCompound&&(this._removeFromParentsChildList(t),delete this._parent[t],r.each(this.children(t),(function(t){e.setParent(t)})),delete this._children[t]),r.each(r.keys(this._in[t]),n),delete this._in[t],delete this._preds[t],r.each(r.keys(this._out[t]),n),delete this._out[t],delete this._sucs[t],--this._nodeCount}return this},i.prototype.setParent=function(t,e){if(!this._isCompound)throw new Error("Cannot set parent in a non-compound graph");if(r.isUndefined(e))e="\0";else{for(var n=e+="";!r.isUndefined(n);n=this.parent(n))if(n===t)throw new Error("Setting "+e+" as parent of "+t+" would create a cycle");this.setNode(e)}return this.setNode(t),this._removeFromParentsChildList(t),this._parent[t]=e,this._children[e][t]=!0,this},i.prototype._removeFromParentsChildList=function(t){delete this._children[this._parent[t]][t]},i.prototype.parent=function(t){if(this._isCompound){var e=this._parent[t];if("\0"!==e)return e}},i.prototype.children=function(t){if(r.isUndefined(t)&&(t="\0"),this._isCompound){var e=this._children[t];if(e)return r.keys(e)}else{if("\0"===t)return this.nodes();if(this.hasNode(t))return[]}},i.prototype.predecessors=function(t){var e=this._preds[t];if(e)return r.keys(e)},i.prototype.successors=function(t){var e=this._sucs[t];if(e)return r.keys(e)},i.prototype.neighbors=function(t){var e=this.predecessors(t);if(e)return r.union(e,this.successors(t))},i.prototype.isLeaf=function(t){return 0===(this.isDirected()?this.successors(t):this.neighbors(t)).length},i.prototype.filterNodes=function(t){var e=new this.constructor({directed:this._isDirected,multigraph:this._isMultigraph,compound:this._isCompound});e.setGraph(this.graph());var n=this;r.each(this._nodes,(function(n,r){t(r)&&e.setNode(r,n)})),r.each(this._edgeObjs,(function(t){e.hasNode(t.v)&&e.hasNode(t.w)&&e.setEdge(t,n.edge(t))}));var i={};return this._isCompound&&r.each(e.nodes(),(function(t){e.setParent(t,function t(r){var o=n.parent(r);return void 0===o||e.hasNode(o)?(i[r]=o,o):o in i?i[o]:t(o)}(t))})),e},i.prototype.setDefaultEdgeLabel=function(t){return r.isFunction(t)||(t=r.constant(t)),this._defaultEdgeLabelFn=t,this},i.prototype.edgeCount=function(){return this._edgeCount},i.prototype.edges=function(){return r.values(this._edgeObjs)},i.prototype.setPath=function(t,e){var n=this,i=arguments;return r.reduce(t,(function(t,r){return i.length>1?n.setEdge(t,r,e):n.setEdge(t,r),r})),this},i.prototype.setEdge=function(){var t,e,n,i,a=!1,c=arguments[0];"object"==typeof c&&null!==c&&"v"in c?(t=c.v,e=c.w,n=c.name,2===arguments.length&&(i=arguments[1],a=!0)):(t=c,e=arguments[1],n=arguments[3],arguments.length>2&&(i=arguments[2],a=!0)),t=""+t,e=""+e,r.isUndefined(n)||(n=""+n);var f=u(this._isDirected,t,e,n);if(r.has(this._edgeLabels,f))return a&&(this._edgeLabels[f]=i),this;if(!r.isUndefined(n)&&!this._isMultigraph)throw new Error("Cannot set a named edge when isMultigraph = false");this.setNode(t),this.setNode(e),this._edgeLabels[f]=a?i:this._defaultEdgeLabelFn(t,e,n);var l=s(this._isDirected,t,e,n);return t=l.v,e=l.w,Object.freeze(l),this._edgeObjs[f]=l,o(this._preds[e],t),o(this._sucs[t],e),this._in[e][f]=l,this._out[t][f]=l,this._edgeCount++,this},i.prototype.edge=function(t,e,n){var r=1===arguments.length?c(this._isDirected,arguments[0]):u(this._isDirected,t,e,n);return this._edgeLabels[r]},i.prototype.hasEdge=function(t,e,n){var i=1===arguments.length?c(this._isDirected,arguments[0]):u(this._isDirected,t,e,n);return r.has(this._edgeLabels,i)},i.prototype.removeEdge=function(t,e,n){var r=1===arguments.length?c(this._isDirected,arguments[0]):u(this._isDirected,t,e,n),i=this._edgeObjs[r];return i&&(t=i.v,e=i.w,delete this._edgeLabels[r],delete this._edgeObjs[r],a(this._preds[e],t),a(this._sucs[t],e),delete this._in[e][r],delete this._out[t][r],this._edgeCount--),this},i.prototype.inEdges=function(t,e){var n=this._in[t];if(n){var i=r.values(n);return e?r.filter(i,(function(t){return t.v===e})):i}},i.prototype.outEdges=function(t,e){var n=this._out[t];if(n){var i=r.values(n);return e?r.filter(i,(function(t){return t.w===e})):i}},i.prototype.nodeEdges=function(t,e){var n=this.inEdges(t,e);if(n)return n.concat(this.outEdges(t,e))}},function(t,e,n){var r=n(77)(n(36),"Map");t.exports=r},function(t,e,n){var r=n(585),i=n(592),o=n(594),a=n(595),u=n(596);function s(t){var e=-1,n=null==t?0:t.length;for(this.clear();++e-1&&t%1==0&&t<=9007199254740991}},function(t,e,n){(function(t){var r=n(342),i=e&&!e.nodeType&&e,o=i&&"object"==typeof t&&t&&!t.nodeType&&t,a=o&&o.exports===i&&r.process,u=function(){try{var t=o&&o.require&&o.require("util").types;return t||a&&a.binding&&a.binding("util")}catch(t){}}();t.exports=u}).call(this,n(14)(t))},function(t,e,n){var r=n(170),i=n(602),o=Object.prototype.hasOwnProperty;t.exports=function(t){if(!r(t))return i(t);var e=[];for(var n in Object(t))o.call(t,n)&&"constructor"!=n&&e.push(n);return e}},function(t,e,n){var r=n(349),i=n(350),o=Object.prototype.propertyIsEnumerable,a=Object.getOwnPropertySymbols,u=a?function(t){return null==t?[]:(t=Object(t),r(a(t),(function(e){return o.call(t,e)})))}:i;t.exports=u},function(t,e){t.exports=function(t,e){for(var n=-1,r=e.length,i=t.length;++n0&&o(f)?n>1?t(f,n-1,o,a,u):r(u,f):a||(u[u.length]=f)}return u}},function(t,e,n){var r=n(98);t.exports=function(t,e,n){for(var i=-1,o=t.length;++i>>32-e}function c(t,e,n,r,i,o,a){return s(t+(e&n|~e&r)+i+o|0,a)+e|0}function f(t,e,n,r,i,o,a){return s(t+(e&r|n&~r)+i+o|0,a)+e|0}function l(t,e,n,r,i,o,a){return s(t+(e^n^r)+i+o|0,a)+e|0}function h(t,e,n,r,i,o,a){return s(t+(n^(e|~r))+i+o|0,a)+e|0}r(u,i),u.prototype._update=function(){for(var t=a,e=0;e<16;++e)t[e]=this._block.readInt32LE(4*e);var n=this._a,r=this._b,i=this._c,o=this._d;n=c(n,r,i,o,t[0],3614090360,7),o=c(o,n,r,i,t[1],3905402710,12),i=c(i,o,n,r,t[2],606105819,17),r=c(r,i,o,n,t[3],3250441966,22),n=c(n,r,i,o,t[4],4118548399,7),o=c(o,n,r,i,t[5],1200080426,12),i=c(i,o,n,r,t[6],2821735955,17),r=c(r,i,o,n,t[7],4249261313,22),n=c(n,r,i,o,t[8],1770035416,7),o=c(o,n,r,i,t[9],2336552879,12),i=c(i,o,n,r,t[10],4294925233,17),r=c(r,i,o,n,t[11],2304563134,22),n=c(n,r,i,o,t[12],1804603682,7),o=c(o,n,r,i,t[13],4254626195,12),i=c(i,o,n,r,t[14],2792965006,17),n=f(n,r=c(r,i,o,n,t[15],1236535329,22),i,o,t[1],4129170786,5),o=f(o,n,r,i,t[6],3225465664,9),i=f(i,o,n,r,t[11],643717713,14),r=f(r,i,o,n,t[0],3921069994,20),n=f(n,r,i,o,t[5],3593408605,5),o=f(o,n,r,i,t[10],38016083,9),i=f(i,o,n,r,t[15],3634488961,14),r=f(r,i,o,n,t[4],3889429448,20),n=f(n,r,i,o,t[9],568446438,5),o=f(o,n,r,i,t[14],3275163606,9),i=f(i,o,n,r,t[3],4107603335,14),r=f(r,i,o,n,t[8],1163531501,20),n=f(n,r,i,o,t[13],2850285829,5),o=f(o,n,r,i,t[2],4243563512,9),i=f(i,o,n,r,t[7],1735328473,14),n=l(n,r=f(r,i,o,n,t[12],2368359562,20),i,o,t[5],4294588738,4),o=l(o,n,r,i,t[8],2272392833,11),i=l(i,o,n,r,t[11],1839030562,16),r=l(r,i,o,n,t[14],4259657740,23),n=l(n,r,i,o,t[1],2763975236,4),o=l(o,n,r,i,t[4],1272893353,11),i=l(i,o,n,r,t[7],4139469664,16),r=l(r,i,o,n,t[10],3200236656,23),n=l(n,r,i,o,t[13],681279174,4),o=l(o,n,r,i,t[0],3936430074,11),i=l(i,o,n,r,t[3],3572445317,16),r=l(r,i,o,n,t[6],76029189,23),n=l(n,r,i,o,t[9],3654602809,4),o=l(o,n,r,i,t[12],3873151461,11),i=l(i,o,n,r,t[15],530742520,16),n=h(n,r=l(r,i,o,n,t[2],3299628645,23),i,o,t[0],4096336452,6),o=h(o,n,r,i,t[7],1126891415,10),i=h(i,o,n,r,t[14],2878612391,15),r=h(r,i,o,n,t[5],4237533241,21),n=h(n,r,i,o,t[12],1700485571,6),o=h(o,n,r,i,t[3],2399980690,10),i=h(i,o,n,r,t[10],4293915773,15),r=h(r,i,o,n,t[1],2240044497,21),n=h(n,r,i,o,t[8],1873313359,6),o=h(o,n,r,i,t[15],4264355552,10),i=h(i,o,n,r,t[6],2734768916,15),r=h(r,i,o,n,t[13],1309151649,21),n=h(n,r,i,o,t[4],4149444226,6),o=h(o,n,r,i,t[11],3174756917,10),i=h(i,o,n,r,t[2],718787259,15),r=h(r,i,o,n,t[9],3951481745,21),this._a=this._a+n|0,this._b=this._b+r|0,this._c=this._c+i|0,this._d=this._d+o|0},u.prototype._digest=function(){this._block[this._blockOffset++]=128,this._blockOffset>56&&(this._block.fill(0,this._blockOffset,64),this._update(),this._blockOffset=0),this._block.fill(0,this._blockOffset,56),this._block.writeUInt32LE(this._length[0],56),this._block.writeUInt32LE(this._length[1],60),this._update();var t=o.allocUnsafe(16);return t.writeInt32LE(this._a,0),t.writeInt32LE(this._b,4),t.writeInt32LE(this._c,8),t.writeInt32LE(this._d,12),t},t.exports=u},function(t,e,n){t.exports=i;var r=n(266).EventEmitter;function i(){r.call(this)}n(2)(i,r),i.Readable=n(267),i.Writable=n(822),i.Duplex=n(823),i.Transform=n(824),i.PassThrough=n(825),i.Stream=i,i.prototype.pipe=function(t,e){var n=this;function i(e){t.writable&&!1===t.write(e)&&n.pause&&n.pause()}function o(){n.readable&&n.resume&&n.resume()}n.on("data",i),t.on("drain",o),t._isStdio||e&&!1===e.end||(n.on("end",u),n.on("close",s));var a=!1;function u(){a||(a=!0,t.end())}function s(){a||(a=!0,"function"==typeof t.destroy&&t.destroy())}function c(t){if(f(),0===r.listenerCount(this,"error"))throw t}function f(){n.removeListener("data",i),t.removeListener("drain",o),n.removeListener("end",u),n.removeListener("close",s),n.removeListener("error",c),t.removeListener("error",c),n.removeListener("end",f),n.removeListener("close",f),t.removeListener("close",f)}return n.on("error",c),t.on("error",c),n.on("end",f),n.on("close",f),t.on("close",f),t.emit("pipe",n),t}},function(t,e,n){"use strict";var r,i="object"==typeof Reflect?Reflect:null,o=i&&"function"==typeof i.apply?i.apply:function(t,e,n){return Function.prototype.apply.call(t,e,n)};r=i&&"function"==typeof i.ownKeys?i.ownKeys:Object.getOwnPropertySymbols?function(t){return Object.getOwnPropertyNames(t).concat(Object.getOwnPropertySymbols(t))}:function(t){return Object.getOwnPropertyNames(t)};var a=Number.isNaN||function(t){return t!=t};function u(){u.init.call(this)}t.exports=u,u.EventEmitter=u,u.prototype._events=void 0,u.prototype._eventsCount=0,u.prototype._maxListeners=void 0;var s=10;function c(t){if("function"!=typeof t)throw new TypeError('The "listener" argument must be of type Function. Received type '+typeof t)}function f(t){return void 0===t._maxListeners?u.defaultMaxListeners:t._maxListeners}function l(t,e,n,r){var i,o,a,u;if(c(n),void 0===(o=t._events)?(o=t._events=Object.create(null),t._eventsCount=0):(void 0!==o.newListener&&(t.emit("newListener",e,n.listener?n.listener:n),o=t._events),a=o[e]),void 0===a)a=o[e]=n,++t._eventsCount;else if("function"==typeof a?a=o[e]=r?[n,a]:[a,n]:r?a.unshift(n):a.push(n),(i=f(t))>0&&a.length>i&&!a.warned){a.warned=!0;var s=new Error("Possible EventEmitter memory leak detected. "+a.length+" "+String(e)+" listeners added. Use emitter.setMaxListeners() to increase limit");s.name="MaxListenersExceededWarning",s.emitter=t,s.type=e,s.count=a.length,u=s,console&&console.warn&&console.warn(u)}return t}function h(){if(!this.fired)return this.target.removeListener(this.type,this.wrapFn),this.fired=!0,0===arguments.length?this.listener.call(this.target):this.listener.apply(this.target,arguments)}function d(t,e,n){var r={fired:!1,wrapFn:void 0,target:t,type:e,listener:n},i=h.bind(r);return i.listener=n,r.wrapFn=i,i}function p(t,e,n){var r=t._events;if(void 0===r)return[];var i=r[e];return void 0===i?[]:"function"==typeof i?n?[i.listener||i]:[i]:n?function(t){for(var e=new Array(t.length),n=0;n0&&(a=e[0]),a instanceof Error)throw a;var u=new Error("Unhandled error."+(a?" ("+a.message+")":""));throw u.context=a,u}var s=i[t];if(void 0===s)return!1;if("function"==typeof s)o(s,this,e);else{var c=s.length,f=y(s,c);for(n=0;n=0;o--)if(n[o]===e||n[o].listener===e){a=n[o].listener,i=o;break}if(i<0)return this;0===i?n.shift():function(t,e){for(;e+1=0;r--)this.removeListener(t,e[r]);return this},u.prototype.listeners=function(t){return p(this,t,!0)},u.prototype.rawListeners=function(t){return p(this,t,!1)},u.listenerCount=function(t,e){return"function"==typeof t.listenerCount?t.listenerCount(e):g.call(t,e)},u.prototype.listenerCount=g,u.prototype.eventNames=function(){return this._eventsCount>0?r(this._events):[]}},function(t,e,n){(e=t.exports=n(410)).Stream=e,e.Readable=e,e.Writable=n(269),e.Duplex=n(80),e.Transform=n(413),e.PassThrough=n(821)},function(t,e,n){var r=n(18),i=r.Buffer;function o(t,e){for(var n in t)e[n]=t[n]}function a(t,e,n){return i(t,e,n)}i.from&&i.alloc&&i.allocUnsafe&&i.allocUnsafeSlow?t.exports=r:(o(r,e),e.Buffer=a),o(i,a),a.from=function(t,e,n){if("number"==typeof t)throw new TypeError("Argument must not be a number");return i(t,e,n)},a.alloc=function(t,e,n){if("number"!=typeof t)throw new TypeError("Argument must be a number");var r=i(t);return void 0!==e?"string"==typeof n?r.fill(e,n):r.fill(e):r.fill(0),r},a.allocUnsafe=function(t){if("number"!=typeof t)throw new TypeError("Argument must be a number");return i(t)},a.allocUnsafeSlow=function(t){if("number"!=typeof t)throw new TypeError("Argument must be a number");return r.SlowBuffer(t)}},function(t,e,n){"use strict";(function(e,r,i){var o=n(180);function a(t){var e=this;this.next=null,this.entry=null,this.finish=function(){!function(t,e,n){var r=t.entry;t.entry=null;for(;r;){var i=r.callback;e.pendingcb--,i(n),r=r.next}e.corkedRequestsFree?e.corkedRequestsFree.next=t:e.corkedRequestsFree=t}(e,t)}}t.exports=v;var u,s=!e.browser&&["v0.10","v0.9."].indexOf(e.version.slice(0,5))>-1?r:o.nextTick;v.WritableState=b;var c=Object.create(n(134));c.inherits=n(2);var f={deprecate:n(820)},l=n(411),h=n(268).Buffer,d=i.Uint8Array||function(){};var p,g=n(412);function y(){}function b(t,e){u=u||n(80),t=t||{};var r=e instanceof u;this.objectMode=!!t.objectMode,r&&(this.objectMode=this.objectMode||!!t.writableObjectMode);var i=t.highWaterMark,c=t.writableHighWaterMark,f=this.objectMode?16:16384;this.highWaterMark=i||0===i?i:r&&(c||0===c)?c:f,this.highWaterMark=Math.floor(this.highWaterMark),this.finalCalled=!1,this.needDrain=!1,this.ending=!1,this.ended=!1,this.finished=!1,this.destroyed=!1;var l=!1===t.decodeStrings;this.decodeStrings=!l,this.defaultEncoding=t.defaultEncoding||"utf8",this.length=0,this.writing=!1,this.corked=0,this.sync=!0,this.bufferProcessing=!1,this.onwrite=function(t){!function(t,e){var n=t._writableState,r=n.sync,i=n.writecb;if(function(t){t.writing=!1,t.writecb=null,t.length-=t.writelen,t.writelen=0}(n),e)!function(t,e,n,r,i){--e.pendingcb,n?(o.nextTick(i,r),o.nextTick(E,t,e),t._writableState.errorEmitted=!0,t.emit("error",r)):(i(r),t._writableState.errorEmitted=!0,t.emit("error",r),E(t,e))}(t,n,r,e,i);else{var a=x(n);a||n.corked||n.bufferProcessing||!n.bufferedRequest||w(t,n),r?s(_,t,n,a,i):_(t,n,a,i)}}(e,t)},this.writecb=null,this.writelen=0,this.bufferedRequest=null,this.lastBufferedRequest=null,this.pendingcb=0,this.prefinished=!1,this.errorEmitted=!1,this.bufferedRequestCount=0,this.corkedRequestsFree=new a(this)}function v(t){if(u=u||n(80),!(p.call(v,this)||this instanceof u))return new v(t);this._writableState=new b(t,this),this.writable=!0,t&&("function"==typeof t.write&&(this._write=t.write),"function"==typeof t.writev&&(this._writev=t.writev),"function"==typeof t.destroy&&(this._destroy=t.destroy),"function"==typeof t.final&&(this._final=t.final)),l.call(this)}function m(t,e,n,r,i,o,a){e.writelen=r,e.writecb=a,e.writing=!0,e.sync=!0,n?t._writev(i,e.onwrite):t._write(i,o,e.onwrite),e.sync=!1}function _(t,e,n,r){n||function(t,e){0===e.length&&e.needDrain&&(e.needDrain=!1,t.emit("drain"))}(t,e),e.pendingcb--,r(),E(t,e)}function w(t,e){e.bufferProcessing=!0;var n=e.bufferedRequest;if(t._writev&&n&&n.next){var r=e.bufferedRequestCount,i=new Array(r),o=e.corkedRequestsFree;o.entry=n;for(var u=0,s=!0;n;)i[u]=n,n.isBuf||(s=!1),n=n.next,u+=1;i.allBuffers=s,m(t,e,!0,e.length,i,"",o.finish),e.pendingcb++,e.lastBufferedRequest=null,o.next?(e.corkedRequestsFree=o.next,o.next=null):e.corkedRequestsFree=new a(e),e.bufferedRequestCount=0}else{for(;n;){var c=n.chunk,f=n.encoding,l=n.callback;if(m(t,e,!1,e.objectMode?1:c.length,c,f,l),n=n.next,e.bufferedRequestCount--,e.writing)break}null===n&&(e.lastBufferedRequest=null)}e.bufferedRequest=n,e.bufferProcessing=!1}function x(t){return t.ending&&0===t.length&&null===t.bufferedRequest&&!t.finished&&!t.writing}function k(t,e){t._final((function(n){e.pendingcb--,n&&t.emit("error",n),e.prefinished=!0,t.emit("prefinish"),E(t,e)}))}function E(t,e){var n=x(e);return n&&(!function(t,e){e.prefinished||e.finalCalled||("function"==typeof t._final?(e.pendingcb++,e.finalCalled=!0,o.nextTick(k,t,e)):(e.prefinished=!0,t.emit("prefinish")))}(t,e),0===e.pendingcb&&(e.finished=!0,t.emit("finish"))),n}c.inherits(v,l),b.prototype.getBuffer=function(){for(var t=this.bufferedRequest,e=[];t;)e.push(t),t=t.next;return e},function(){try{Object.defineProperty(b.prototype,"buffer",{get:f.deprecate((function(){return this.getBuffer()}),"_writableState.buffer is deprecated. Use _writableState.getBuffer instead.","DEP0003")})}catch(t){}}(),"function"==typeof Symbol&&Symbol.hasInstance&&"function"==typeof Function.prototype[Symbol.hasInstance]?(p=Function.prototype[Symbol.hasInstance],Object.defineProperty(v,Symbol.hasInstance,{value:function(t){return!!p.call(this,t)||this===v&&(t&&t._writableState instanceof b)}})):p=function(t){return t instanceof this},v.prototype.pipe=function(){this.emit("error",new Error("Cannot pipe, not readable"))},v.prototype.write=function(t,e,n){var r,i=this._writableState,a=!1,u=!i.objectMode&&(r=t,h.isBuffer(r)||r instanceof d);return u&&!h.isBuffer(t)&&(t=function(t){return h.from(t)}(t)),"function"==typeof e&&(n=e,e=null),u?e="buffer":e||(e=i.defaultEncoding),"function"!=typeof n&&(n=y),i.ended?function(t,e){var n=new Error("write after end");t.emit("error",n),o.nextTick(e,n)}(this,n):(u||function(t,e,n,r){var i=!0,a=!1;return null===n?a=new TypeError("May not write null values to stream"):"string"==typeof n||void 0===n||e.objectMode||(a=new TypeError("Invalid non-string/buffer chunk")),a&&(t.emit("error",a),o.nextTick(r,a),i=!1),i}(this,i,t,n))&&(i.pendingcb++,a=function(t,e,n,r,i,o){if(!n){var a=function(t,e,n){t.objectMode||!1===t.decodeStrings||"string"!=typeof e||(e=h.from(e,n));return e}(e,r,i);r!==a&&(n=!0,i="buffer",r=a)}var u=e.objectMode?1:r.length;e.length+=u;var s=e.length-1))throw new TypeError("Unknown encoding: "+t);return this._writableState.defaultEncoding=t,this},Object.defineProperty(v.prototype,"writableHighWaterMark",{enumerable:!1,get:function(){return this._writableState.highWaterMark}}),v.prototype._write=function(t,e,n){n(new Error("_write() is not implemented"))},v.prototype._writev=null,v.prototype.end=function(t,e,n){var r=this._writableState;"function"==typeof t?(n=t,t=null,e=null):"function"==typeof e&&(n=e,e=null),null!=t&&this.write(t,e),r.corked&&(r.corked=1,this.uncork()),r.ending||r.finished||function(t,e,n){e.ending=!0,E(t,e),n&&(e.finished?o.nextTick(n):t.once("finish",n));e.ended=!0,t.writable=!1}(this,r,n)},Object.defineProperty(v.prototype,"destroyed",{get:function(){return void 0!==this._writableState&&this._writableState.destroyed},set:function(t){this._writableState&&(this._writableState.destroyed=t)}}),v.prototype.destroy=g.destroy,v.prototype._undestroy=g.undestroy,v.prototype._destroy=function(t,e){this.end(),e(t)}}).call(this,n(17),n(818).setImmediate,n(25))},function(t,e,n){"use strict";var r=n(3).Buffer,i=r.isEncoding||function(t){switch((t=""+t)&&t.toLowerCase()){case"hex":case"utf8":case"utf-8":case"ascii":case"binary":case"base64":case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":case"raw":return!0;default:return!1}};function o(t){var e;switch(this.encoding=function(t){var e=function(t){if(!t)return"utf8";for(var e;;)switch(t){case"utf8":case"utf-8":return"utf8";case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return"utf16le";case"latin1":case"binary":return"latin1";case"base64":case"ascii":case"hex":return t;default:if(e)return;t=(""+t).toLowerCase(),e=!0}}(t);if("string"!=typeof e&&(r.isEncoding===i||!i(t)))throw new Error("Unknown encoding: "+t);return e||t}(t),this.encoding){case"utf16le":this.text=s,this.end=c,e=4;break;case"utf8":this.fillLast=u,e=4;break;case"base64":this.text=f,this.end=l,e=3;break;default:return this.write=h,void(this.end=d)}this.lastNeed=0,this.lastTotal=0,this.lastChar=r.allocUnsafe(e)}function a(t){return t<=127?0:t>>5==6?2:t>>4==14?3:t>>3==30?4:t>>6==2?-1:-2}function u(t){var e=this.lastTotal-this.lastNeed,n=function(t,e,n){if(128!=(192&e[0]))return t.lastNeed=0,"�";if(t.lastNeed>1&&e.length>1){if(128!=(192&e[1]))return t.lastNeed=1,"�";if(t.lastNeed>2&&e.length>2&&128!=(192&e[2]))return t.lastNeed=2,"�"}}(this,t);return void 0!==n?n:this.lastNeed<=t.length?(t.copy(this.lastChar,e,0,this.lastNeed),this.lastChar.toString(this.encoding,0,this.lastTotal)):(t.copy(this.lastChar,e,0,t.length),void(this.lastNeed-=t.length))}function s(t,e){if((t.length-e)%2==0){var n=t.toString("utf16le",e);if(n){var r=n.charCodeAt(n.length-1);if(r>=55296&&r<=56319)return this.lastNeed=2,this.lastTotal=4,this.lastChar[0]=t[t.length-2],this.lastChar[1]=t[t.length-1],n.slice(0,-1)}return n}return this.lastNeed=1,this.lastTotal=2,this.lastChar[0]=t[t.length-1],t.toString("utf16le",e,t.length-1)}function c(t){var e=t&&t.length?this.write(t):"";if(this.lastNeed){var n=this.lastTotal-this.lastNeed;return e+this.lastChar.toString("utf16le",0,n)}return e}function f(t,e){var n=(t.length-e)%3;return 0===n?t.toString("base64",e):(this.lastNeed=3-n,this.lastTotal=3,1===n?this.lastChar[0]=t[t.length-1]:(this.lastChar[0]=t[t.length-2],this.lastChar[1]=t[t.length-1]),t.toString("base64",e,t.length-n))}function l(t){var e=t&&t.length?this.write(t):"";return this.lastNeed?e+this.lastChar.toString("base64",0,3-this.lastNeed):e}function h(t){return t.toString(this.encoding)}function d(t){return t&&t.length?this.write(t):""}e.StringDecoder=o,o.prototype.write=function(t){if(0===t.length)return"";var e,n;if(this.lastNeed){if(void 0===(e=this.fillLast(t)))return"";n=this.lastNeed,this.lastNeed=0}else n=0;return n=0)return i>0&&(t.lastNeed=i-1),i;if(--r=0)return i>0&&(t.lastNeed=i-2),i;if(--r=0)return i>0&&(2===i?i=0:t.lastNeed=i-3),i;return 0}(this,t,e);if(!this.lastNeed)return t.toString("utf8",e);this.lastTotal=n;var r=t.length-(n-this.lastNeed);return t.copy(this.lastChar,0,r),t.toString("utf8",e,r)},o.prototype.fillLast=function(t){if(this.lastNeed<=t.length)return t.copy(this.lastChar,this.lastTotal-this.lastNeed,0,this.lastNeed),this.lastChar.toString(this.encoding,0,this.lastTotal);t.copy(this.lastChar,this.lastTotal-this.lastNeed,0,t.length),this.lastNeed-=t.length}},function(t,e,n){"use strict";var r=n(18).Buffer,i=n(2),o=n(409),a=new Array(16),u=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,7,4,13,1,10,6,15,3,12,0,9,5,2,14,11,8,3,10,14,4,9,15,8,1,2,7,0,6,13,11,5,12,1,9,11,10,0,8,12,4,13,3,7,15,14,5,6,2,4,0,5,9,7,12,2,10,14,1,3,8,11,6,15,13],s=[5,14,7,0,9,2,11,4,13,6,15,8,1,10,3,12,6,11,3,7,0,13,5,10,14,15,8,12,4,9,1,2,15,5,1,3,7,14,6,9,11,8,12,2,10,0,4,13,8,6,4,1,3,11,15,0,5,12,2,13,9,7,10,14,12,15,10,4,1,5,8,7,6,2,13,14,0,3,9,11],c=[11,14,15,12,5,8,7,9,11,13,14,15,6,7,9,8,7,6,8,13,11,9,7,15,7,12,15,9,11,7,13,12,11,13,6,7,14,9,13,15,14,8,13,6,5,12,7,5,11,12,14,15,14,15,9,8,9,14,5,6,8,6,5,12,9,15,5,11,6,8,13,12,5,12,13,14,11,8,5,6],f=[8,9,9,11,13,15,15,5,7,7,8,11,14,14,12,6,9,13,15,7,12,8,9,11,7,7,12,7,6,15,13,11,9,7,15,11,8,6,6,14,12,13,5,14,13,13,7,5,15,5,8,11,14,14,6,14,6,9,12,9,12,5,15,8,8,5,12,9,12,5,14,6,8,13,6,5,15,13,11,11],l=[0,1518500249,1859775393,2400959708,2840853838],h=[1352829926,1548603684,1836072691,2053994217,0];function d(){o.call(this,64),this._a=1732584193,this._b=4023233417,this._c=2562383102,this._d=271733878,this._e=3285377520}function p(t,e){return t<>>32-e}function g(t,e,n,r,i,o,a,u){return p(t+(e^n^r)+o+a|0,u)+i|0}function y(t,e,n,r,i,o,a,u){return p(t+(e&n|~e&r)+o+a|0,u)+i|0}function b(t,e,n,r,i,o,a,u){return p(t+((e|~n)^r)+o+a|0,u)+i|0}function v(t,e,n,r,i,o,a,u){return p(t+(e&r|n&~r)+o+a|0,u)+i|0}function m(t,e,n,r,i,o,a,u){return p(t+(e^(n|~r))+o+a|0,u)+i|0}i(d,o),d.prototype._update=function(){for(var t=a,e=0;e<16;++e)t[e]=this._block.readInt32LE(4*e);for(var n=0|this._a,r=0|this._b,i=0|this._c,o=0|this._d,d=0|this._e,_=0|this._a,w=0|this._b,x=0|this._c,k=0|this._d,E=0|this._e,A=0;A<80;A+=1){var S,M;A<16?(S=g(n,r,i,o,d,t[u[A]],l[0],c[A]),M=m(_,w,x,k,E,t[s[A]],h[0],f[A])):A<32?(S=y(n,r,i,o,d,t[u[A]],l[1],c[A]),M=v(_,w,x,k,E,t[s[A]],h[1],f[A])):A<48?(S=b(n,r,i,o,d,t[u[A]],l[2],c[A]),M=b(_,w,x,k,E,t[s[A]],h[2],f[A])):A<64?(S=v(n,r,i,o,d,t[u[A]],l[3],c[A]),M=y(_,w,x,k,E,t[s[A]],h[3],f[A])):(S=m(n,r,i,o,d,t[u[A]],l[4],c[A]),M=g(_,w,x,k,E,t[s[A]],h[4],f[A])),n=d,d=o,o=p(i,10),i=r,r=S,_=E,E=k,k=p(x,10),x=w,w=M}var T=this._b+i+k|0;this._b=this._c+o+E|0,this._c=this._d+d+_|0,this._d=this._e+n+w|0,this._e=this._a+r+x|0,this._a=T},d.prototype._digest=function(){this._block[this._blockOffset++]=128,this._blockOffset>56&&(this._block.fill(0,this._blockOffset,64),this._update(),this._blockOffset=0),this._block.fill(0,this._blockOffset,56),this._block.writeUInt32LE(this._length[0],56),this._block.writeUInt32LE(this._length[1],60),this._update();var t=r.alloc?r.alloc(20):new r(20);return t.writeInt32LE(this._a,0),t.writeInt32LE(this._b,4),t.writeInt32LE(this._c,8),t.writeInt32LE(this._d,12),t.writeInt32LE(this._e,16),t},t.exports=d},function(t,e,n){(e=t.exports=function(t){t=t.toLowerCase();var n=e[t];if(!n)throw new Error(t+" is not supported (we accept pull requests)");return new n}).sha=n(826),e.sha1=n(827),e.sha224=n(828),e.sha256=n(414),e.sha384=n(829),e.sha512=n(415)},function(t,e,n){"use strict";var r=n(32);function i(t){this.options=t,this.type=this.options.type,this.blockSize=8,this._init(),this.buffer=new Array(this.blockSize),this.bufferOff=0}t.exports=i,i.prototype._init=function(){},i.prototype.update=function(t){return 0===t.length?[]:"decrypt"===this.type?this._updateDecrypt(t):this._updateEncrypt(t)},i.prototype._buffer=function(t,e){for(var n=Math.min(this.buffer.length-this.bufferOff,t.length-e),r=0;r0;r--)e+=this._buffer(t,e),n+=this._flushBuffer(i,n);return e+=this._buffer(t,e),i},i.prototype.final=function(t){var e,n;return t&&(e=this.update(t)),n="encrypt"===this.type?this._finalEncrypt():this._finalDecrypt(),e?e.concat(n):n},i.prototype._pad=function(t,e){if(0===e)return!1;for(;e=0||!n.umod(t.prime1)||!n.umod(t.prime2);)n=new r(i(e));return n}t.exports=o,o.getr=a}).call(this,n(18).Buffer)},function(t,e,n){"use strict";var r=e;r.version=n(855).version,r.utils=n(33),r.rand=n(276),r.curve=n(433),r.curves=n(279),r.ec=n(866),r.eddsa=n(870)},function(t,e,n){"use strict";var r,i=e,o=n(280),a=n(433),u=n(33).assert;function s(t){"short"===t.type?this.curve=new a.short(t):"edwards"===t.type?this.curve=new a.edwards(t):this.curve=new a.mont(t),this.g=this.curve.g,this.n=this.curve.n,this.hash=t.hash,u(this.g.validate(),"Invalid curve"),u(this.g.mul(this.n).isInfinity(),"Invalid curve, G*N != O")}function c(t,e){Object.defineProperty(i,t,{configurable:!0,enumerable:!0,get:function(){var n=new s(e);return Object.defineProperty(i,t,{configurable:!0,enumerable:!0,value:n}),n}})}i.PresetCurve=s,c("p192",{type:"short",prime:"p192",p:"ffffffff ffffffff ffffffff fffffffe ffffffff ffffffff",a:"ffffffff ffffffff ffffffff fffffffe ffffffff fffffffc",b:"64210519 e59c80e7 0fa7e9ab 72243049 feb8deec c146b9b1",n:"ffffffff ffffffff ffffffff 99def836 146bc9b1 b4d22831",hash:o.sha256,gRed:!1,g:["188da80e b03090f6 7cbf20eb 43a18800 f4ff0afd 82ff1012","07192b95 ffc8da78 631011ed 6b24cdd5 73f977a1 1e794811"]}),c("p224",{type:"short",prime:"p224",p:"ffffffff ffffffff ffffffff ffffffff 00000000 00000000 00000001",a:"ffffffff ffffffff ffffffff fffffffe ffffffff ffffffff fffffffe",b:"b4050a85 0c04b3ab f5413256 5044b0b7 d7bfd8ba 270b3943 2355ffb4",n:"ffffffff ffffffff ffffffff ffff16a2 e0b8f03e 13dd2945 5c5c2a3d",hash:o.sha256,gRed:!1,g:["b70e0cbd 6bb4bf7f 321390b9 4a03c1d3 56c21122 343280d6 115c1d21","bd376388 b5f723fb 4c22dfe6 cd4375a0 5a074764 44d58199 85007e34"]}),c("p256",{type:"short",prime:null,p:"ffffffff 00000001 00000000 00000000 00000000 ffffffff ffffffff ffffffff",a:"ffffffff 00000001 00000000 00000000 00000000 ffffffff ffffffff fffffffc",b:"5ac635d8 aa3a93e7 b3ebbd55 769886bc 651d06b0 cc53b0f6 3bce3c3e 27d2604b",n:"ffffffff 00000000 ffffffff ffffffff bce6faad a7179e84 f3b9cac2 fc632551",hash:o.sha256,gRed:!1,g:["6b17d1f2 e12c4247 f8bce6e5 63a440f2 77037d81 2deb33a0 f4a13945 d898c296","4fe342e2 fe1a7f9b 8ee7eb4a 7c0f9e16 2bce3357 6b315ece cbb64068 37bf51f5"]}),c("p384",{type:"short",prime:null,p:"ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff fffffffe ffffffff 00000000 00000000 ffffffff",a:"ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff fffffffe ffffffff 00000000 00000000 fffffffc",b:"b3312fa7 e23ee7e4 988e056b e3f82d19 181d9c6e fe814112 0314088f 5013875a c656398d 8a2ed19d 2a85c8ed d3ec2aef",n:"ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff c7634d81 f4372ddf 581a0db2 48b0a77a ecec196a ccc52973",hash:o.sha384,gRed:!1,g:["aa87ca22 be8b0537 8eb1c71e f320ad74 6e1d3b62 8ba79b98 59f741e0 82542a38 5502f25d bf55296c 3a545e38 72760ab7","3617de4a 96262c6f 5d9e98bf 9292dc29 f8f41dbd 289a147c e9da3113 b5f0b8c0 0a60b1ce 1d7e819d 7a431d7c 90ea0e5f"]}),c("p521",{type:"short",prime:null,p:"000001ff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff",a:"000001ff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff fffffffc",b:"00000051 953eb961 8e1c9a1f 929a21a0 b68540ee a2da725b 99b315f3 b8b48991 8ef109e1 56193951 ec7e937b 1652c0bd 3bb1bf07 3573df88 3d2c34f1 ef451fd4 6b503f00",n:"000001ff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff fffffffa 51868783 bf2f966b 7fcc0148 f709a5d0 3bb5c9b8 899c47ae bb6fb71e 91386409",hash:o.sha512,gRed:!1,g:["000000c6 858e06b7 0404e9cd 9e3ecb66 2395b442 9c648139 053fb521 f828af60 6b4d3dba a14b5e77 efe75928 fe1dc127 a2ffa8de 3348b3c1 856a429b f97e7e31 c2e5bd66","00000118 39296a78 9a3bc004 5c8a5fb4 2c7d1bd9 98f54449 579b4468 17afbd17 273e662c 97ee7299 5ef42640 c550b901 3fad0761 353c7086 a272c240 88be9476 9fd16650"]}),c("curve25519",{type:"mont",prime:"p25519",p:"7fffffffffffffff ffffffffffffffff ffffffffffffffff ffffffffffffffed",a:"76d06",b:"1",n:"1000000000000000 0000000000000000 14def9dea2f79cd6 5812631a5cf5d3ed",hash:o.sha256,gRed:!1,g:["9"]}),c("ed25519",{type:"edwards",prime:"p25519",p:"7fffffffffffffff ffffffffffffffff ffffffffffffffff ffffffffffffffed",a:"-1",c:"1",d:"52036cee2b6ffe73 8cc740797779e898 00700a4d4141d8ab 75eb4dca135978a3",n:"1000000000000000 0000000000000000 14def9dea2f79cd6 5812631a5cf5d3ed",hash:o.sha256,gRed:!1,g:["216936d3cd6e53fec0a4e231fdd6dc5c692cc7609525a7b2c9562d608f25d51a","6666666666666666666666666666666666666666666666666666666666666658"]});try{r=n(865)}catch(t){r=void 0}c("secp256k1",{type:"short",prime:"k256",p:"ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff fffffffe fffffc2f",a:"0",b:"7",n:"ffffffff ffffffff ffffffff fffffffe baaedce6 af48a03b bfd25e8c d0364141",h:"1",hash:o.sha256,beta:"7ae96a2b657c07106e64479eac3434e99cf0497512f58995c1396c28719501ee",lambda:"5363ad4cc05c30e0a5261c028812645a122e22ea20816678df02967c1b23bd72",basis:[{a:"3086d221a7d46bcde86c90e49284eb15",b:"-e4437ed6010e88286f547fa90abfe4c3"},{a:"114ca50f7a8e2f3f657c1108d9d44cfd8",b:"3086d221a7d46bcde86c90e49284eb15"}],gRed:!1,g:["79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798","483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8",r]})},function(t,e,n){var r=e;r.utils=n(45),r.common=n(136),r.sha=n(859),r.ripemd=n(863),r.hmac=n(864),r.sha1=r.sha.sha1,r.sha256=r.sha.sha256,r.sha224=r.sha.sha224,r.sha384=r.sha.sha384,r.sha512=r.sha.sha512,r.ripemd160=r.ripemd.ripemd160},function(t,e,n){"use strict";var r,i,o,a,u=n(23),s=180/Math.PI,c={translateX:0,translateY:0,rotate:0,skewX:0,scaleX:1,scaleY:1},f=function(t,e,n,r,i,o){var a,u,c;return(a=Math.sqrt(t*t+e*e))&&(t/=a,e/=a),(c=t*n+e*r)&&(n-=t*c,r-=e*c),(u=Math.sqrt(n*n+r*r))&&(n/=u,r/=u,c/=u),t*r180?e+=360:e-t>180&&(t+=360),o.push({i:n.push(i(n)+"rotate(",null,r)-2,x:Object(u.a)(t,e)})):e&&n.push(i(n)+"rotate("+e+r)}(o.rotate,a.rotate,s,c),function(t,e,n,o){t!==e?o.push({i:n.push(i(n)+"skewX(",null,r)-2,x:Object(u.a)(t,e)}):e&&n.push(i(n)+"skewX("+e+r)}(o.skewX,a.skewX,s,c),function(t,e,n,r,o,a){if(t!==n||e!==r){var s=o.push(i(o)+"scale(",null,",",null,")");a.push({i:s-4,x:Object(u.a)(t,n)},{i:s-2,x:Object(u.a)(e,r)})}else 1===n&&1===r||o.push(i(o)+"scale("+n+","+r+")")}(o.scaleX,o.scaleY,a.scaleX,a.scaleY,s,c),o=a=null,function(t){for(var e,n=-1,r=c.length;++n=0&&(n=t.slice(r+1),t=t.slice(0,r)),t&&!e.hasOwnProperty(t))throw new Error("unknown type: "+t);return{type:t,name:n}}))}function u(t,e){for(var n,r=0,i=t.length;r0)for(var n,r,i=new Array(n),o=0;o1e-6)if(Math.abs(l*s-c*f)>1e-6&&o){var d=n-a,p=i-u,g=s*s+c*c,y=d*d+p*p,b=Math.sqrt(g),v=Math.sqrt(h),m=o*Math.tan((r-Math.acos((g+h-y)/(2*b*v)))/2),_=m/v,w=m/b;Math.abs(_-1)>1e-6&&(this._+="L"+(t+_*f)+","+(e+_*l)),this._+="A"+o+","+o+",0,0,"+ +(l*d>f*p)+","+(this._x1=t+w*s)+","+(this._y1=e+w*c)}else this._+="L"+(this._x1=t)+","+(this._y1=e);else;},arc:function(t,e,n,a,u,s){t=+t,e=+e,s=!!s;var c=(n=+n)*Math.cos(a),f=n*Math.sin(a),l=t+c,h=e+f,d=1^s,p=s?a-u:u-a;if(n<0)throw new Error("negative radius: "+n);null===this._x1?this._+="M"+l+","+h:(Math.abs(this._x1-l)>1e-6||Math.abs(this._y1-h)>1e-6)&&(this._+="L"+l+","+h),n&&(p<0&&(p=p%i+i),p>o?this._+="A"+n+","+n+",0,1,"+d+","+(t-c)+","+(e-f)+"A"+n+","+n+",0,1,"+d+","+(this._x1=l)+","+(this._y1=h):p>1e-6&&(this._+="A"+n+","+n+",0,"+ +(p>=r)+","+d+","+(this._x1=t+n*Math.cos(u))+","+(this._y1=e+n*Math.sin(u))))},rect:function(t,e,n,r){this._+="M"+(this._x0=this._x1=+t)+","+(this._y0=this._y1=+e)+"h"+ +n+"v"+ +r+"h"+-n+"Z"},toString:function(){return this._}},e.a=u},function(t,e,n){"use strict";var r=n(48);e.a=function(t){return Math.max(0,-Object(r.a)(Math.abs(t)))}},function(t,e,n){"use strict";var r=n(48);e.a=function(t,e){return Math.max(0,3*Math.max(-8,Math.min(8,Math.floor(Object(r.a)(e)/3)))-Object(r.a)(Math.abs(t)))}},function(t,e,n){"use strict";var r=n(48);e.a=function(t,e){return t=Math.abs(t),e=Math.abs(e)-t,Math.max(0,Object(r.a)(e)-Object(r.a)(t))+1}},function(t,e,n){"use strict";e.a=function(t,e){return t=+t,e=+e,function(n){return Math.round(t*(1-n)+e*n)}}},function(t,e,n){"use strict";var r=Math.SQRT2;function i(t){return((t=Math.exp(t))+1/t)/2}e.a=function(t,e){var n,o,a=t[0],u=t[1],s=t[2],c=e[0],f=e[1],l=e[2],h=c-a,d=f-u,p=h*h+d*d;if(p<1e-12)o=Math.log(l/s)/r,n=function(t){return[a+t*h,u+t*d,s*Math.exp(r*t*o)]};else{var g=Math.sqrt(p),y=(l*l-s*s+4*p)/(2*s*2*g),b=(l*l-s*s-4*p)/(2*l*2*g),v=Math.log(Math.sqrt(y*y+1)-y),m=Math.log(Math.sqrt(b*b+1)-b);o=(m-v)/r,n=function(t){var e,n=t*o,c=i(v),f=s/(2*g)*(c*(e=r*n+v,((e=Math.exp(2*e))-1)/(e+1))-function(t){return((t=Math.exp(t))-1/t)/2}(v));return[a+f*h,u+f*d,s*c/i(r*n+v)]}}return n.duration=1e3*o,n}},function(t,e){},function(t,e,n){var r=n(146),i=n(227),o=n(151),a=n(484),u=n(490),s=n(299),c=n(300),f=n(493),l=n(494),h=n(304),d=n(495),p=n(90),g=n(499),y=n(500),b=n(309),v=n(15),m=n(88),_=n(504),w=n(28),x=n(506),k=n(62),E={};E["[object Arguments]"]=E["[object Array]"]=E["[object ArrayBuffer]"]=E["[object DataView]"]=E["[object Boolean]"]=E["[object Date]"]=E["[object Float32Array]"]=E["[object Float64Array]"]=E["[object Int8Array]"]=E["[object Int16Array]"]=E["[object Int32Array]"]=E["[object Map]"]=E["[object Number]"]=E["[object Object]"]=E["[object RegExp]"]=E["[object Set]"]=E["[object String]"]=E["[object Symbol]"]=E["[object Uint8Array]"]=E["[object Uint8ClampedArray]"]=E["[object Uint16Array]"]=E["[object Uint32Array]"]=!0,E["[object Error]"]=E["[object Function]"]=E["[object WeakMap]"]=!1,t.exports=function t(e,n,A,S,M,T){var O,D=1&n,C=2&n,N=4&n;if(A&&(O=M?A(e,S,M,T):A(e)),void 0!==O)return O;if(!w(e))return e;var I=v(e);if(I){if(O=g(e),!D)return c(e,O)}else{var R=p(e),j="[object Function]"==R||"[object GeneratorFunction]"==R;if(m(e))return s(e,D);if("[object Object]"==R||"[object Arguments]"==R||j&&!M){if(O=C||j?{}:b(e),!D)return C?l(e,u(O,e)):f(e,a(O,e))}else{if(!E[R])return M?e:{};O=y(e,R,D)}}T||(T=new r);var L=T.get(e);if(L)return L;T.set(e,O),x(e)?e.forEach((function(r){O.add(t(r,n,A,r,e,T))})):_(e)&&e.forEach((function(r,i){O.set(i,t(r,n,A,i,e,T))}));var B=N?C?d:h:C?keysIn:k,P=I?void 0:B(e);return i(P||e,(function(r,i){P&&(r=e[i=r]),o(O,i,t(r,n,A,i,e,T))})),O}},function(t,e,n){(function(e){var n="object"==typeof e&&e&&e.Object===Object&&e;t.exports=n}).call(this,n(25))},function(t,e){var n=Function.prototype.toString;t.exports=function(t){if(null!=t){try{return n.call(t)}catch(t){}try{return t+""}catch(t){}}return""}},function(t,e,n){var r=n(74),i=function(){try{var t=r(Object,"defineProperty");return t({},"",{}),t}catch(t){}}();t.exports=i},function(t,e,n){var r=n(485),i=n(126),o=n(15),a=n(88),u=n(153),s=n(127),c=Object.prototype.hasOwnProperty;t.exports=function(t,e){var n=o(t),f=!n&&i(t),l=!n&&!f&&a(t),h=!n&&!f&&!l&&s(t),d=n||f||l||h,p=d?r(t.length,String):[],g=p.length;for(var y in t)!e&&!c.call(t,y)||d&&("length"==y||l&&("offset"==y||"parent"==y)||h&&("buffer"==y||"byteLength"==y||"byteOffset"==y)||u(y,g))||p.push(y);return p}},function(t,e){t.exports=function(t,e){return function(n){return t(e(n))}}},function(t,e,n){(function(t){var r=n(35),i=e&&!e.nodeType&&e,o=i&&"object"==typeof t&&t&&!t.nodeType&&t,a=o&&o.exports===i?r.Buffer:void 0,u=a?a.allocUnsafe:void 0;t.exports=function(t,e){if(e)return t.slice();var n=t.length,r=u?u(n):new t.constructor(n);return t.copy(r),r}}).call(this,n(14)(t))},function(t,e){t.exports=function(t,e){var n=-1,r=t.length;for(e||(e=Array(r));++nf))return!1;var h=s.get(t);if(h&&s.get(e))return h==e;var d=-1,p=!0,g=2&n?new r:void 0;for(s.set(t,e),s.set(e,t);++d0&&(o=s.removeMin(),(a=u[o]).distance!==Number.POSITIVE_INFINITY);)r(o).forEach(c);return u}(t,String(e),n||o,r||function(e){return t.outEdges(e)})};var o=r.constant(1)},function(t,e,n){var r=n(27);function i(){this._arr=[],this._keyIndices={}}t.exports=i,i.prototype.size=function(){return this._arr.length},i.prototype.keys=function(){return this._arr.map((function(t){return t.key}))},i.prototype.has=function(t){return r.has(this._keyIndices,t)},i.prototype.priority=function(t){var e=this._keyIndices[t];if(void 0!==e)return this._arr[e].priority},i.prototype.min=function(){if(0===this.size())throw new Error("Queue underflow");return this._arr[0].key},i.prototype.add=function(t,e){var n=this._keyIndices;if(t=String(t),!r.has(n,t)){var i=this._arr,o=i.length;return n[t]=o,i.push({key:t,priority:e}),this._decrease(o),!0}return!1},i.prototype.removeMin=function(){this._swap(0,this._arr.length-1);var t=this._arr.pop();return delete this._keyIndices[t.key],this._heapify(0),t.key},i.prototype.decrease=function(t,e){var n=this._keyIndices[t];if(e>this._arr[n].priority)throw new Error("New priority is greater than current priority. Key: "+t+" Old: "+this._arr[n].priority+" New: "+e);this._arr[n].priority=e,this._decrease(n)},i.prototype._heapify=function(t){var e=this._arr,n=2*t,r=n+1,i=t;n>1].priorityf))return!1;var h=s.get(t);if(h&&s.get(e))return h==e;var d=-1,p=!0,g=2&n?new r:void 0;for(s.set(t,e),s.set(e,t);++d0&&(o=s.removeMin(),(a=u[o]).distance!==Number.POSITIVE_INFINITY);)r(o).forEach(c);return u}(t,String(e),n||o,r||function(e){return t.outEdges(e)})};var o=r.constant(1)},function(t,e,n){var r=n(29);function i(){this._arr=[],this._keyIndices={}}t.exports=i,i.prototype.size=function(){return this._arr.length},i.prototype.keys=function(){return this._arr.map((function(t){return t.key}))},i.prototype.has=function(t){return r.has(this._keyIndices,t)},i.prototype.priority=function(t){var e=this._keyIndices[t];if(void 0!==e)return this._arr[e].priority},i.prototype.min=function(){if(0===this.size())throw new Error("Queue underflow");return this._arr[0].key},i.prototype.add=function(t,e){var n=this._keyIndices;if(t=String(t),!r.has(n,t)){var i=this._arr,o=i.length;return n[t]=o,i.push({key:t,priority:e}),this._decrease(o),!0}return!1},i.prototype.removeMin=function(){this._swap(0,this._arr.length-1);var t=this._arr.pop();return delete this._keyIndices[t.key],this._heapify(0),t.key},i.prototype.decrease=function(t,e){var n=this._keyIndices[t];if(e>this._arr[n].priority)throw new Error("New priority is greater than current priority. Key: "+t+" Old: "+this._arr[n].priority+" New: "+e);this._arr[n].priority=e,this._decrease(n)},i.prototype._heapify=function(t){var e=this._arr,n=2*t,r=n+1,i=t;n>1].priority2?e[2]:void 0;for(c&&o(e[0],e[1],c)&&(r=1);++n1&&a.sort((function(t,e){var r=t.x-n.x,i=t.y-n.y,o=Math.sqrt(r*r+i*i),a=e.x-n.x,u=e.y-n.y,s=Math.sqrt(a*a+u*u);return oMath.abs(a)*c?(u<0&&(c=-c),n=0===u?0:c*a/u,r=c):(a<0&&(s=-s),n=s,r=0===a?0:s*u/a);return{x:i+n,y:o+r}}},function(t,e,n){var r=n(758);t.exports=function(t){return t?(t=r(t))===1/0||t===-1/0?17976931348623157e292*(t<0?-1:1):t==t?t:0:0===t?t:0}},function(t,e,n){var r=n(240);t.exports=function(t){return(null==t?0:t.length)?r(t,1):[]}},function(t,e,n){var r=n(152),i=n(86);t.exports=function(t,e,n){(void 0===n||i(t[e],n))&&(void 0!==n||e in t)||r(t,e,n)}},function(t,e){t.exports=function(t,e){if(("constructor"!==e||"function"!=typeof t[e])&&"__proto__"!=e)return t[e]}},function(t,e){t.exports=function(t,e){return t=this._blockSize;){for(var o=this._blockOffset;o0;++a)this._length[a]+=u,(u=this._length[a]/4294967296|0)>0&&(this._length[a]-=4294967296*u);return this},o.prototype._update=function(){throw new Error("_update is not implemented")},o.prototype.digest=function(t){if(this._finalized)throw new Error("Digest already called");this._finalized=!0;var e=this._digest();void 0!==t&&(e=e.toString(t)),this._block.fill(0),this._blockOffset=0;for(var n=0;n<4;++n)this._length[n]=0;return e},o.prototype._digest=function(){throw new Error("_digest is not implemented")},t.exports=o},function(t,e,n){"use strict";(function(e,r){var i=n(180);t.exports=m;var o,a=n(408);m.ReadableState=v;n(266).EventEmitter;var u=function(t,e){return t.listeners(e).length},s=n(411),c=n(268).Buffer,f=e.Uint8Array||function(){};var l=Object.create(n(134));l.inherits=n(2);var h=n(815),d=void 0;d=h&&h.debuglog?h.debuglog("stream"):function(){};var p,g=n(816),y=n(412);l.inherits(m,s);var b=["error","close","destroy","pause","resume"];function v(t,e){t=t||{};var r=e instanceof(o=o||n(80));this.objectMode=!!t.objectMode,r&&(this.objectMode=this.objectMode||!!t.readableObjectMode);var i=t.highWaterMark,a=t.readableHighWaterMark,u=this.objectMode?16:16384;this.highWaterMark=i||0===i?i:r&&(a||0===a)?a:u,this.highWaterMark=Math.floor(this.highWaterMark),this.buffer=new g,this.length=0,this.pipes=null,this.pipesCount=0,this.flowing=null,this.ended=!1,this.endEmitted=!1,this.reading=!1,this.sync=!0,this.needReadable=!1,this.emittedReadable=!1,this.readableListening=!1,this.resumeScheduled=!1,this.destroyed=!1,this.defaultEncoding=t.defaultEncoding||"utf8",this.awaitDrain=0,this.readingMore=!1,this.decoder=null,this.encoding=null,t.encoding&&(p||(p=n(270).StringDecoder),this.decoder=new p(t.encoding),this.encoding=t.encoding)}function m(t){if(o=o||n(80),!(this instanceof m))return new m(t);this._readableState=new v(t,this),this.readable=!0,t&&("function"==typeof t.read&&(this._read=t.read),"function"==typeof t.destroy&&(this._destroy=t.destroy)),s.call(this)}function _(t,e,n,r,i){var o,a=t._readableState;null===e?(a.reading=!1,function(t,e){if(e.ended)return;if(e.decoder){var n=e.decoder.end();n&&n.length&&(e.buffer.push(n),e.length+=e.objectMode?1:n.length)}e.ended=!0,k(t)}(t,a)):(i||(o=function(t,e){var n;r=e,c.isBuffer(r)||r instanceof f||"string"==typeof e||void 0===e||t.objectMode||(n=new TypeError("Invalid non-string/buffer chunk"));var r;return n}(a,e)),o?t.emit("error",o):a.objectMode||e&&e.length>0?("string"==typeof e||a.objectMode||Object.getPrototypeOf(e)===c.prototype||(e=function(t){return c.from(t)}(e)),r?a.endEmitted?t.emit("error",new Error("stream.unshift() after end event")):w(t,a,e,!0):a.ended?t.emit("error",new Error("stream.push() after EOF")):(a.reading=!1,a.decoder&&!n?(e=a.decoder.write(e),a.objectMode||0!==e.length?w(t,a,e,!1):A(t,a)):w(t,a,e,!1))):r||(a.reading=!1));return function(t){return!t.ended&&(t.needReadable||t.lengthe.highWaterMark&&(e.highWaterMark=function(t){return t>=8388608?t=8388608:(t--,t|=t>>>1,t|=t>>>2,t|=t>>>4,t|=t>>>8,t|=t>>>16,t++),t}(t)),t<=e.length?t:e.ended?e.length:(e.needReadable=!0,0))}function k(t){var e=t._readableState;e.needReadable=!1,e.emittedReadable||(d("emitReadable",e.flowing),e.emittedReadable=!0,e.sync?i.nextTick(E,t):E(t))}function E(t){d("emit readable"),t.emit("readable"),O(t)}function A(t,e){e.readingMore||(e.readingMore=!0,i.nextTick(S,t,e))}function S(t,e){for(var n=e.length;!e.reading&&!e.flowing&&!e.ended&&e.length=e.length?(n=e.decoder?e.buffer.join(""):1===e.buffer.length?e.buffer.head.data:e.buffer.concat(e.length),e.buffer.clear()):n=function(t,e,n){var r;to.length?o.length:t;if(a===o.length?i+=o:i+=o.slice(0,t),0===(t-=a)){a===o.length?(++r,n.next?e.head=n.next:e.head=e.tail=null):(e.head=n,n.data=o.slice(a));break}++r}return e.length-=r,i}(t,e):function(t,e){var n=c.allocUnsafe(t),r=e.head,i=1;r.data.copy(n),t-=r.data.length;for(;r=r.next;){var o=r.data,a=t>o.length?o.length:t;if(o.copy(n,n.length-t,0,a),0===(t-=a)){a===o.length?(++i,r.next?e.head=r.next:e.head=e.tail=null):(e.head=r,r.data=o.slice(a));break}++i}return e.length-=i,n}(t,e);return r}(t,e.buffer,e.decoder),n);var n}function C(t){var e=t._readableState;if(e.length>0)throw new Error('"endReadable()" called on non-empty stream');e.endEmitted||(e.ended=!0,i.nextTick(N,e,t))}function N(t,e){t.endEmitted||0!==t.length||(t.endEmitted=!0,e.readable=!1,e.emit("end"))}function I(t,e){for(var n=0,r=t.length;n=e.highWaterMark||e.ended))return d("read: emitReadable",e.length,e.ended),0===e.length&&e.ended?C(this):k(this),null;if(0===(t=x(t,e))&&e.ended)return 0===e.length&&C(this),null;var r,i=e.needReadable;return d("need readable",i),(0===e.length||e.length-t0?D(t,e):null)?(e.needReadable=!0,t=0):e.length-=t,0===e.length&&(e.ended||(e.needReadable=!0),n!==t&&e.ended&&C(this)),null!==r&&this.emit("data",r),r},m.prototype._read=function(t){this.emit("error",new Error("_read() is not implemented"))},m.prototype.pipe=function(t,e){var n=this,o=this._readableState;switch(o.pipesCount){case 0:o.pipes=t;break;case 1:o.pipes=[o.pipes,t];break;default:o.pipes.push(t)}o.pipesCount+=1,d("pipe count=%d opts=%j",o.pipesCount,e);var s=(!e||!1!==e.end)&&t!==r.stdout&&t!==r.stderr?f:m;function c(e,r){d("onunpipe"),e===n&&r&&!1===r.hasUnpiped&&(r.hasUnpiped=!0,d("cleanup"),t.removeListener("close",b),t.removeListener("finish",v),t.removeListener("drain",l),t.removeListener("error",y),t.removeListener("unpipe",c),n.removeListener("end",f),n.removeListener("end",m),n.removeListener("data",g),h=!0,!o.awaitDrain||t._writableState&&!t._writableState.needDrain||l())}function f(){d("onend"),t.end()}o.endEmitted?i.nextTick(s):n.once("end",s),t.on("unpipe",c);var l=function(t){return function(){var e=t._readableState;d("pipeOnDrain",e.awaitDrain),e.awaitDrain&&e.awaitDrain--,0===e.awaitDrain&&u(t,"data")&&(e.flowing=!0,O(t))}}(n);t.on("drain",l);var h=!1;var p=!1;function g(e){d("ondata"),p=!1,!1!==t.write(e)||p||((1===o.pipesCount&&o.pipes===t||o.pipesCount>1&&-1!==I(o.pipes,t))&&!h&&(d("false write response, pause",n._readableState.awaitDrain),n._readableState.awaitDrain++,p=!0),n.pause())}function y(e){d("onerror",e),m(),t.removeListener("error",y),0===u(t,"error")&&t.emit("error",e)}function b(){t.removeListener("finish",v),m()}function v(){d("onfinish"),t.removeListener("close",b),m()}function m(){d("unpipe"),n.unpipe(t)}return n.on("data",g),function(t,e,n){if("function"==typeof t.prependListener)return t.prependListener(e,n);t._events&&t._events[e]?a(t._events[e])?t._events[e].unshift(n):t._events[e]=[n,t._events[e]]:t.on(e,n)}(t,"error",y),t.once("close",b),t.once("finish",v),t.emit("pipe",n),o.flowing||(d("pipe resume"),n.resume()),t},m.prototype.unpipe=function(t){var e=this._readableState,n={hasUnpiped:!1};if(0===e.pipesCount)return this;if(1===e.pipesCount)return t&&t!==e.pipes?this:(t||(t=e.pipes),e.pipes=null,e.pipesCount=0,e.flowing=!1,t&&t.emit("unpipe",this,n),this);if(!t){var r=e.pipes,i=e.pipesCount;e.pipes=null,e.pipesCount=0,e.flowing=!1;for(var o=0;o>>2|t<<30)^(t>>>13|t<<19)^(t>>>22|t<<10)}function h(t){return(t>>>6|t<<26)^(t>>>11|t<<21)^(t>>>25|t<<7)}function d(t){return(t>>>7|t<<25)^(t>>>18|t<<14)^t>>>3}r(s,i),s.prototype.init=function(){return this._a=1779033703,this._b=3144134277,this._c=1013904242,this._d=2773480762,this._e=1359893119,this._f=2600822924,this._g=528734635,this._h=1541459225,this},s.prototype._update=function(t){for(var e,n=this._w,r=0|this._a,i=0|this._b,o=0|this._c,u=0|this._d,s=0|this._e,p=0|this._f,g=0|this._g,y=0|this._h,b=0;b<16;++b)n[b]=t.readInt32BE(4*b);for(;b<64;++b)n[b]=0|(((e=n[b-2])>>>17|e<<15)^(e>>>19|e<<13)^e>>>10)+n[b-7]+d(n[b-15])+n[b-16];for(var v=0;v<64;++v){var m=y+h(s)+c(s,p,g)+a[v]+n[v]|0,_=l(r)+f(r,i,o)|0;y=g,g=p,p=s,s=u+m|0,u=o,o=i,i=r,r=m+_|0}this._a=r+this._a|0,this._b=i+this._b|0,this._c=o+this._c|0,this._d=u+this._d|0,this._e=s+this._e|0,this._f=p+this._f|0,this._g=g+this._g|0,this._h=y+this._h|0},s.prototype._hash=function(){var t=o.allocUnsafe(32);return t.writeInt32BE(this._a,0),t.writeInt32BE(this._b,4),t.writeInt32BE(this._c,8),t.writeInt32BE(this._d,12),t.writeInt32BE(this._e,16),t.writeInt32BE(this._f,20),t.writeInt32BE(this._g,24),t.writeInt32BE(this._h,28),t},t.exports=s},function(t,e,n){var r=n(2),i=n(101),o=n(3).Buffer,a=[1116352408,3609767458,1899447441,602891725,3049323471,3964484399,3921009573,2173295548,961987163,4081628472,1508970993,3053834265,2453635748,2937671579,2870763221,3664609560,3624381080,2734883394,310598401,1164996542,607225278,1323610764,1426881987,3590304994,1925078388,4068182383,2162078206,991336113,2614888103,633803317,3248222580,3479774868,3835390401,2666613458,4022224774,944711139,264347078,2341262773,604807628,2007800933,770255983,1495990901,1249150122,1856431235,1555081692,3175218132,1996064986,2198950837,2554220882,3999719339,2821834349,766784016,2952996808,2566594879,3210313671,3203337956,3336571891,1034457026,3584528711,2466948901,113926993,3758326383,338241895,168717936,666307205,1188179964,773529912,1546045734,1294757372,1522805485,1396182291,2643833823,1695183700,2343527390,1986661051,1014477480,2177026350,1206759142,2456956037,344077627,2730485921,1290863460,2820302411,3158454273,3259730800,3505952657,3345764771,106217008,3516065817,3606008344,3600352804,1432725776,4094571909,1467031594,275423344,851169720,430227734,3100823752,506948616,1363258195,659060556,3750685593,883997877,3785050280,958139571,3318307427,1322822218,3812723403,1537002063,2003034995,1747873779,3602036899,1955562222,1575990012,2024104815,1125592928,2227730452,2716904306,2361852424,442776044,2428436474,593698344,2756734187,3733110249,3204031479,2999351573,3329325298,3815920427,3391569614,3928383900,3515267271,566280711,3940187606,3454069534,4118630271,4000239992,116418474,1914138554,174292421,2731055270,289380356,3203993006,460393269,320620315,685471733,587496836,852142971,1086792851,1017036298,365543100,1126000580,2618297676,1288033470,3409855158,1501505948,4234509866,1607167915,987167468,1816402316,1246189591],u=new Array(160);function s(){this.init(),this._w=u,i.call(this,128,112)}function c(t,e,n){return n^t&(e^n)}function f(t,e,n){return t&e|n&(t|e)}function l(t,e){return(t>>>28|e<<4)^(e>>>2|t<<30)^(e>>>7|t<<25)}function h(t,e){return(t>>>14|e<<18)^(t>>>18|e<<14)^(e>>>9|t<<23)}function d(t,e){return(t>>>1|e<<31)^(t>>>8|e<<24)^t>>>7}function p(t,e){return(t>>>1|e<<31)^(t>>>8|e<<24)^(t>>>7|e<<25)}function g(t,e){return(t>>>19|e<<13)^(e>>>29|t<<3)^t>>>6}function y(t,e){return(t>>>19|e<<13)^(e>>>29|t<<3)^(t>>>6|e<<26)}function b(t,e){return t>>>0>>0?1:0}r(s,i),s.prototype.init=function(){return this._ah=1779033703,this._bh=3144134277,this._ch=1013904242,this._dh=2773480762,this._eh=1359893119,this._fh=2600822924,this._gh=528734635,this._hh=1541459225,this._al=4089235720,this._bl=2227873595,this._cl=4271175723,this._dl=1595750129,this._el=2917565137,this._fl=725511199,this._gl=4215389547,this._hl=327033209,this},s.prototype._update=function(t){for(var e=this._w,n=0|this._ah,r=0|this._bh,i=0|this._ch,o=0|this._dh,u=0|this._eh,s=0|this._fh,v=0|this._gh,m=0|this._hh,_=0|this._al,w=0|this._bl,x=0|this._cl,k=0|this._dl,E=0|this._el,A=0|this._fl,S=0|this._gl,M=0|this._hl,T=0;T<32;T+=2)e[T]=t.readInt32BE(4*T),e[T+1]=t.readInt32BE(4*T+4);for(;T<160;T+=2){var O=e[T-30],D=e[T-30+1],C=d(O,D),N=p(D,O),I=g(O=e[T-4],D=e[T-4+1]),R=y(D,O),j=e[T-14],L=e[T-14+1],B=e[T-32],P=e[T-32+1],F=N+L|0,q=C+j+b(F,N)|0;q=(q=q+I+b(F=F+R|0,R)|0)+B+b(F=F+P|0,P)|0,e[T]=q,e[T+1]=F}for(var U=0;U<160;U+=2){q=e[U],F=e[U+1];var z=f(n,r,i),Y=f(_,w,x),V=l(n,_),G=l(_,n),H=h(u,E),W=h(E,u),$=a[U],K=a[U+1],Z=c(u,s,v),X=c(E,A,S),J=M+W|0,Q=m+H+b(J,M)|0;Q=(Q=(Q=Q+Z+b(J=J+X|0,X)|0)+$+b(J=J+K|0,K)|0)+q+b(J=J+F|0,F)|0;var tt=G+Y|0,et=V+z+b(tt,G)|0;m=v,M=S,v=s,S=A,s=u,A=E,u=o+Q+b(E=k+J|0,k)|0,o=i,k=x,i=r,x=w,r=n,w=_,n=Q+et+b(_=J+tt|0,J)|0}this._al=this._al+_|0,this._bl=this._bl+w|0,this._cl=this._cl+x|0,this._dl=this._dl+k|0,this._el=this._el+E|0,this._fl=this._fl+A|0,this._gl=this._gl+S|0,this._hl=this._hl+M|0,this._ah=this._ah+n+b(this._al,_)|0,this._bh=this._bh+r+b(this._bl,w)|0,this._ch=this._ch+i+b(this._cl,x)|0,this._dh=this._dh+o+b(this._dl,k)|0,this._eh=this._eh+u+b(this._el,E)|0,this._fh=this._fh+s+b(this._fl,A)|0,this._gh=this._gh+v+b(this._gl,S)|0,this._hh=this._hh+m+b(this._hl,M)|0},s.prototype._hash=function(){var t=o.allocUnsafe(64);function e(e,n,r){t.writeInt32BE(e,r),t.writeInt32BE(n,r+4)}return e(this._ah,this._al,0),e(this._bh,this._bl,8),e(this._ch,this._cl,16),e(this._dh,this._dl,24),e(this._eh,this._el,32),e(this._fh,this._fl,40),e(this._gh,this._gl,48),e(this._hh,this._hl,56),t},t.exports=s},function(t,e,n){"use strict";var r=n(2),i=n(830),o=n(65),a=n(3).Buffer,u=n(417),s=n(271),c=n(272),f=a.alloc(128);function l(t,e){o.call(this,"digest"),"string"==typeof e&&(e=a.from(e));var n="sha512"===t||"sha384"===t?128:64;(this._alg=t,this._key=e,e.length>n)?e=("rmd160"===t?new s:c(t)).update(e).digest():e.lengthn||o!=o)throw new TypeError("Bad key length")}}).call(this,n(18).Buffer)},function(t,e,n){(function(e){var n;e.browser?n="utf-8":n=parseInt(e.version.split(".")[0].slice(1),10)>=6?"utf-8":"binary";t.exports=n}).call(this,n(17))},function(t,e,n){var r=n(417),i=n(271),o=n(272),a=n(420),u=n(421),s=n(3).Buffer,c=s.alloc(128),f={md5:16,sha1:20,sha224:28,sha256:32,sha384:48,sha512:64,rmd160:20,ripemd160:20};function l(t,e,n){var a=function(t){function e(e){return o(t).update(e).digest()}return"rmd160"===t||"ripemd160"===t?function(t){return(new i).update(t).digest()}:"md5"===t?r:e}(t),u="sha512"===t||"sha384"===t?128:64;e.length>u?e=a(e):e.length>>0},e.writeUInt32BE=function(t,e,n){t[0+n]=e>>>24,t[1+n]=e>>>16&255,t[2+n]=e>>>8&255,t[3+n]=255&e},e.ip=function(t,e,n,r){for(var i=0,o=0,a=6;a>=0;a-=2){for(var u=0;u<=24;u+=8)i<<=1,i|=e>>>u+a&1;for(u=0;u<=24;u+=8)i<<=1,i|=t>>>u+a&1}for(a=6;a>=0;a-=2){for(u=1;u<=25;u+=8)o<<=1,o|=e>>>u+a&1;for(u=1;u<=25;u+=8)o<<=1,o|=t>>>u+a&1}n[r+0]=i>>>0,n[r+1]=o>>>0},e.rip=function(t,e,n,r){for(var i=0,o=0,a=0;a<4;a++)for(var u=24;u>=0;u-=8)i<<=1,i|=e>>>u+a&1,i<<=1,i|=t>>>u+a&1;for(a=4;a<8;a++)for(u=24;u>=0;u-=8)o<<=1,o|=e>>>u+a&1,o<<=1,o|=t>>>u+a&1;n[r+0]=i>>>0,n[r+1]=o>>>0},e.pc1=function(t,e,n,r){for(var i=0,o=0,a=7;a>=5;a--){for(var u=0;u<=24;u+=8)i<<=1,i|=e>>u+a&1;for(u=0;u<=24;u+=8)i<<=1,i|=t>>u+a&1}for(u=0;u<=24;u+=8)i<<=1,i|=e>>u+a&1;for(a=1;a<=3;a++){for(u=0;u<=24;u+=8)o<<=1,o|=e>>u+a&1;for(u=0;u<=24;u+=8)o<<=1,o|=t>>u+a&1}for(u=0;u<=24;u+=8)o<<=1,o|=t>>u+a&1;n[r+0]=i>>>0,n[r+1]=o>>>0},e.r28shl=function(t,e){return t<>>28-e};var r=[14,11,17,4,27,23,25,0,13,22,7,18,5,9,16,24,2,20,12,21,1,8,15,26,15,4,25,19,9,1,26,16,5,11,23,8,12,7,17,0,22,3,10,14,6,20,27,24];e.pc2=function(t,e,n,i){for(var o=0,a=0,u=r.length>>>1,s=0;s>>r[s]&1;for(s=u;s>>r[s]&1;n[i+0]=o>>>0,n[i+1]=a>>>0},e.expand=function(t,e,n){var r=0,i=0;r=(1&t)<<5|t>>>27;for(var o=23;o>=15;o-=4)r<<=6,r|=t>>>o&63;for(o=11;o>=3;o-=4)i|=t>>>o&63,i<<=6;i|=(31&t)<<1|t>>>31,e[n+0]=r>>>0,e[n+1]=i>>>0};var i=[14,0,4,15,13,7,1,4,2,14,15,2,11,13,8,1,3,10,10,6,6,12,12,11,5,9,9,5,0,3,7,8,4,15,1,12,14,8,8,2,13,4,6,9,2,1,11,7,15,5,12,11,9,3,7,14,3,10,10,0,5,6,0,13,15,3,1,13,8,4,14,7,6,15,11,2,3,8,4,14,9,12,7,0,2,1,13,10,12,6,0,9,5,11,10,5,0,13,14,8,7,10,11,1,10,3,4,15,13,4,1,2,5,11,8,6,12,7,6,12,9,0,3,5,2,14,15,9,10,13,0,7,9,0,14,9,6,3,3,4,15,6,5,10,1,2,13,8,12,5,7,14,11,12,4,11,2,15,8,1,13,1,6,10,4,13,9,0,8,6,15,9,3,8,0,7,11,4,1,15,2,14,12,3,5,11,10,5,14,2,7,12,7,13,13,8,14,11,3,5,0,6,6,15,9,0,10,3,1,4,2,7,8,2,5,12,11,1,12,10,4,14,15,9,10,3,6,15,9,0,0,6,12,10,11,1,7,13,13,8,15,9,1,4,3,5,14,11,5,12,2,7,8,2,4,14,2,14,12,11,4,2,1,12,7,4,10,7,11,13,6,1,8,5,5,0,3,15,15,10,13,3,0,9,14,8,9,6,4,11,2,8,1,12,11,7,10,1,13,14,7,2,8,13,15,6,9,15,12,0,5,9,6,10,3,4,0,5,14,3,12,10,1,15,10,4,15,2,9,7,2,12,6,9,8,5,0,6,13,1,3,13,4,14,14,0,7,11,5,3,11,8,9,4,14,3,15,2,5,12,2,9,8,5,12,15,3,10,7,11,0,14,4,1,10,7,1,6,13,0,11,8,6,13,4,13,11,0,2,11,14,7,15,4,0,9,8,1,13,10,3,14,12,3,9,5,7,12,5,2,10,15,6,8,1,6,1,6,4,11,11,13,13,8,12,1,3,4,7,10,14,7,10,9,15,5,6,0,8,15,0,14,5,2,9,3,2,12,13,1,2,15,8,13,4,8,6,10,15,3,11,7,1,4,10,12,9,5,3,6,14,11,5,0,0,14,12,9,7,2,7,2,11,1,4,14,1,7,9,4,12,10,14,8,2,13,0,15,6,12,10,9,13,0,15,3,3,5,5,6,8,11];e.substitute=function(t,e){for(var n=0,r=0;r<4;r++){n<<=4,n|=i[64*r+(t>>>18-6*r&63)]}for(r=0;r<4;r++){n<<=4,n|=i[256+64*r+(e>>>18-6*r&63)]}return n>>>0};var o=[16,25,12,11,3,20,4,15,31,17,9,6,27,14,1,22,30,24,8,18,0,5,29,23,13,19,2,26,10,21,28,7];e.permute=function(t){for(var e=0,n=0;n>>o[n]&1;return e>>>0},e.padSplit=function(t,e,n){for(var r=t.toString(2);r.length>>1];n=o.r28shl(n,u),i=o.r28shl(i,u),o.pc2(n,i,t.keys,a)}},s.prototype._update=function(t,e,n,r){var i=this._desState,a=o.readUInt32BE(t,e),u=o.readUInt32BE(t,e+4);o.ip(a,u,i.tmp,0),a=i.tmp[0],u=i.tmp[1],"encrypt"===this.type?this._encrypt(i,a,u,i.tmp,0):this._decrypt(i,a,u,i.tmp,0),a=i.tmp[0],u=i.tmp[1],o.writeUInt32BE(n,a,r),o.writeUInt32BE(n,u,r+4)},s.prototype._pad=function(t,e){for(var n=t.length-e,r=e;r>>0,a=h}o.rip(u,a,r,i)},s.prototype._decrypt=function(t,e,n,r,i){for(var a=n,u=e,s=t.keys.length-2;s>=0;s-=2){var c=t.keys[s],f=t.keys[s+1];o.expand(a,t.tmp,0),c^=t.tmp[0],f^=t.tmp[1];var l=o.substitute(c,f),h=a;a=(u^o.permute(l))>>>0,u=h}o.rip(a,u,r,i)}},function(t,e,n){var r=n(135),i=n(3).Buffer,o=n(426);function a(t){var e=t._cipher.encryptBlockRaw(t._prev);return o(t._prev),e}e.encrypt=function(t,e){var n=Math.ceil(e.length/16),o=t._cache.length;t._cache=i.concat([t._cache,i.allocUnsafe(16*n)]);for(var u=0;ut;)n.ishrn(1);if(n.isEven()&&n.iadd(u),n.testn(1)||n.iadd(s),e.cmp(s)){if(!e.cmp(c))for(;n.mod(f).cmp(l);)n.iadd(d)}else for(;n.mod(o).cmp(h);)n.iadd(d);if(y(p=n.shrn(1))&&y(n)&&b(p)&&b(n)&&a.test(p)&&a.test(n))return n}}},function(t,e,n){var r=n(12),i=n(276);function o(t){this.rand=t||new i.Rand}t.exports=o,o.create=function(t){return new o(t)},o.prototype._randbelow=function(t){var e=t.bitLength(),n=Math.ceil(e/8);do{var i=new r(this.rand.generate(n))}while(i.cmp(t)>=0);return i},o.prototype._randrange=function(t,e){var n=e.sub(t);return t.add(this._randbelow(n))},o.prototype.test=function(t,e,n){var i=t.bitLength(),o=r.mont(t),a=new r(1).toRed(o);e||(e=Math.max(1,i/48|0));for(var u=t.subn(1),s=0;!u.testn(s);s++);for(var c=t.shrn(s),f=u.toRed(o);e>0;e--){var l=this._randrange(new r(2),u);n&&n(l);var h=l.toRed(o).redPow(c);if(0!==h.cmp(a)&&0!==h.cmp(f)){for(var d=1;d0;e--){var f=this._randrange(new r(2),a),l=t.gcd(f);if(0!==l.cmpn(1))return l;var h=f.toRed(i).redPow(s);if(0!==h.cmp(o)&&0!==h.cmp(c)){for(var d=1;d>8,a=255&i;o?n.push(o,a):n.push(a)}return n},r.zero2=i,r.toHex=o,r.encode=function(t,e){return"hex"===e?o(t):t}},function(t,e,n){"use strict";var r=e;r.base=n(183),r.short=n(856),r.mont=n(857),r.edwards=n(858)},function(t,e,n){"use strict";var r=n(45).rotr32;function i(t,e,n){return t&e^~t&n}function o(t,e,n){return t&e^t&n^e&n}function a(t,e,n){return t^e^n}e.ft_1=function(t,e,n,r){return 0===t?i(e,n,r):1===t||3===t?a(e,n,r):2===t?o(e,n,r):void 0},e.ch32=i,e.maj32=o,e.p32=a,e.s0_256=function(t){return r(t,2)^r(t,13)^r(t,22)},e.s1_256=function(t){return r(t,6)^r(t,11)^r(t,25)},e.g0_256=function(t){return r(t,7)^r(t,18)^t>>>3},e.g1_256=function(t){return r(t,17)^r(t,19)^t>>>10}},function(t,e,n){"use strict";var r=n(45),i=n(136),o=n(434),a=n(32),u=r.sum32,s=r.sum32_4,c=r.sum32_5,f=o.ch32,l=o.maj32,h=o.s0_256,d=o.s1_256,p=o.g0_256,g=o.g1_256,y=i.BlockHash,b=[1116352408,1899447441,3049323471,3921009573,961987163,1508970993,2453635748,2870763221,3624381080,310598401,607225278,1426881987,1925078388,2162078206,2614888103,3248222580,3835390401,4022224774,264347078,604807628,770255983,1249150122,1555081692,1996064986,2554220882,2821834349,2952996808,3210313671,3336571891,3584528711,113926993,338241895,666307205,773529912,1294757372,1396182291,1695183700,1986661051,2177026350,2456956037,2730485921,2820302411,3259730800,3345764771,3516065817,3600352804,4094571909,275423344,430227734,506948616,659060556,883997877,958139571,1322822218,1537002063,1747873779,1955562222,2024104815,2227730452,2361852424,2428436474,2756734187,3204031479,3329325298];function v(){if(!(this instanceof v))return new v;y.call(this),this.h=[1779033703,3144134277,1013904242,2773480762,1359893119,2600822924,528734635,1541459225],this.k=b,this.W=new Array(64)}r.inherits(v,y),t.exports=v,v.blockSize=512,v.outSize=256,v.hmacStrength=192,v.padLength=64,v.prototype._update=function(t,e){for(var n=this.W,r=0;r<16;r++)n[r]=t[e+r];for(;r>6],i=0==(32&n);if(31==(31&n)){var o=n;for(n=0;128==(128&o);){if(o=t.readUInt8(e),t.isError(o))return o;n<<=7,n|=127&o}}else n&=31;return{cls:r,primitive:i,tag:n,tagStr:u.tag[n]}}function l(t,e,n){var r=t.readUInt8(n);if(t.isError(r))return r;if(!e&&128===r)return null;if(0==(128&r))return r;var i=127&r;if(i>4)return t.error("length octect is too long");r=0;for(var o=0;o=31)return r.error("Multi-octet tag encoding unsupported");e||(i|=32);return i|=u.tagClassByName[n||"universal"]<<6}(t,e,n,this.reporter);if(r.length<128)return(o=new i(2))[0]=a,o[1]=r.length,this._createEncoderBuffer([o,r]);for(var s=1,c=r.length;c>=256;c>>=8)s++;(o=new i(2+s))[0]=a,o[1]=128|s;c=1+s;for(var f=r.length;f>0;c--,f>>=8)o[c]=255&f;return this._createEncoderBuffer([o,r])},c.prototype._encodeStr=function(t,e){if("bitstr"===e)return this._createEncoderBuffer([0|t.unused,t.data]);if("bmpstr"===e){for(var n=new i(2*t.length),r=0;r=40)return this.reporter.error("Second objid identifier OOB");t.splice(0,2,40*t[0]+t[1])}var o=0;for(r=0;r=128;a>>=7)o++}var u=new i(o),s=u.length-1;for(r=t.length-1;r>=0;r--){a=t[r];for(u[s--]=127&a;(a>>=7)>0;)u[s--]=128|127&a}return this._createEncoderBuffer(u)},c.prototype._encodeTime=function(t,e){var n,r=new Date(t);return"gentime"===e?n=[f(r.getFullYear()),f(r.getUTCMonth()+1),f(r.getUTCDate()),f(r.getUTCHours()),f(r.getUTCMinutes()),f(r.getUTCSeconds()),"Z"].join(""):"utctime"===e?n=[f(r.getFullYear()%100),f(r.getUTCMonth()+1),f(r.getUTCDate()),f(r.getUTCHours()),f(r.getUTCMinutes()),f(r.getUTCSeconds()),"Z"].join(""):this.reporter.error("Encoding "+e+" time is not supported yet"),this._encodeStr(n,"octstr")},c.prototype._encodeNull=function(){return this._createEncoderBuffer("")},c.prototype._encodeInt=function(t,e){if("string"==typeof t){if(!e)return this.reporter.error("String int or enum given, but no values map");if(!e.hasOwnProperty(t))return this.reporter.error("Values map doesn't contain: "+JSON.stringify(t));t=e[t]}if("number"!=typeof t&&!i.isBuffer(t)){var n=t.toArray();!t.sign&&128&n[0]&&n.unshift(0),t=new i(n)}if(i.isBuffer(t)){var r=t.length;0===t.length&&r++;var o=new i(r);return t.copy(o),0===t.length&&(o[0]=0),this._createEncoderBuffer(o)}if(t<128)return this._createEncoderBuffer(t);if(t<256)return this._createEncoderBuffer([0,t]);r=1;for(var a=t;a>=256;a>>=8)r++;for(a=(o=new Array(r)).length-1;a>=0;a--)o[a]=255&t,t>>=8;return 128&o[0]&&o.unshift(0),this._createEncoderBuffer(new i(o))},c.prototype._encodeBool=function(t){return this._createEncoderBuffer(t?255:0)},c.prototype._use=function(t,e){return"function"==typeof t&&(t=t(e)),t._getEncoder("der").tree},c.prototype._skipDefault=function(t,e,n){var r,i=this._baseState;if(null===i.default)return!1;var o=t.join();if(void 0===i.defaultBuffer&&(i.defaultBuffer=this._encodeValue(i.default,e,n).join()),o.length!==i.defaultBuffer.length)return!1;for(r=0;r=(o=(g+b)/2))?g=o:b=o,(f=n>=(a=(y+v)/2))?y=a:v=a,i=d,!(d=d[l=f<<1|c]))return i[l]=p,t;if(u=+t._x.call(null,d.data),s=+t._y.call(null,d.data),e===u&&n===s)return p.next=d,i?i[l]=p:t._root=p,t;do{i=i?i[l]=new Array(4):t._root=new Array(4),(c=e>=(o=(g+b)/2))?g=o:b=o,(f=n>=(a=(y+v)/2))?y=a:v=a}while((l=f<<1|c)==(h=(s>=a)<<1|u>=o));return i[h]=d,i[l]=p,t}var i=function(t,e,n,r,i){this.node=t,this.x0=e,this.y0=n,this.x1=r,this.y1=i};function o(t){return t[0]}function a(t){return t[1]}function u(t,e,n){var r=new s(null==e?o:e,null==n?a:n,NaN,NaN,NaN,NaN);return null==t?r:r.addAll(t)}function s(t,e,n,r,i,o){this._x=t,this._y=e,this._x0=n,this._y0=r,this._x1=i,this._y1=o,this._root=void 0}function c(t){for(var e={data:t.data},n=e;t=t.next;)n=n.next={data:t.data};return e}n.d(e,"a",(function(){return u}));var f=u.prototype=s.prototype;f.copy=function(){var t,e,n=new s(this._x,this._y,this._x0,this._y0,this._x1,this._y1),r=this._root;if(!r)return n;if(!r.length)return n._root=c(r),n;for(t=[{source:r,target:n._root=new Array(4)}];r=t.pop();)for(var i=0;i<4;++i)(e=r.source[i])&&(e.length?t.push({source:e,target:r.target[i]=new Array(4)}):r.target[i]=c(e));return n},f.add=function(t){var e=+this._x.call(null,t),n=+this._y.call(null,t);return r(this.cover(e,n),e,n,t)},f.addAll=function(t){var e,n,i,o,a=t.length,u=new Array(a),s=new Array(a),c=1/0,f=1/0,l=-1/0,h=-1/0;for(n=0;nl&&(l=i),oh&&(h=o));if(c>l||f>h)return this;for(this.cover(c,f).cover(l,h),n=0;nt||t>=i||r>e||e>=o;)switch(u=(ed||(a=c.y0)>p||(u=c.x1)=v)<<1|t>=b)&&(c=g[g.length-1],g[g.length-1]=g[g.length-1-f],g[g.length-1-f]=c)}else{var m=t-+this._x.call(null,y.data),_=e-+this._y.call(null,y.data),w=m*m+_*_;if(w=(u=(p+y)/2))?p=u:y=u,(f=a>=(s=(g+b)/2))?g=s:b=s,e=d,!(d=d[l=f<<1|c]))return this;if(!d.length)break;(e[l+1&3]||e[l+2&3]||e[l+3&3])&&(n=e,h=l)}for(;d.data!==t;)if(r=d,!(d=d.next))return this;return(i=d.next)&&delete d.next,r?(i?r.next=i:delete r.next,this):e?(i?e[l]=i:delete e[l],(d=e[0]||e[1]||e[2]||e[3])&&d===(e[3]||e[2]||e[1]||e[0])&&!d.length&&(n?n[h]=d:this._root=d),this):(this._root=i,this)},f.removeAll=function(t){for(var e=0,n=t.length;e\u20D2|\u205F\u200A|\u219D\u0338|\u2202\u0338|\u2220\u20D2|\u2229\uFE00|\u222A\uFE00|\u223C\u20D2|\u223D\u0331|\u223E\u0333|\u2242\u0338|\u224B\u0338|\u224D\u20D2|\u224E\u0338|\u224F\u0338|\u2250\u0338|\u2261\u20E5|\u2264\u20D2|\u2265\u20D2|\u2266\u0338|\u2267\u0338|\u2268\uFE00|\u2269\uFE00|\u226A\u0338|\u226A\u20D2|\u226B\u0338|\u226B\u20D2|\u227F\u0338|\u2282\u20D2|\u2283\u20D2|\u228A\uFE00|\u228B\uFE00|\u228F\u0338|\u2290\u0338|\u2293\uFE00|\u2294\uFE00|\u22B4\u20D2|\u22B5\u20D2|\u22D8\u0338|\u22D9\u0338|\u22DA\uFE00|\u22DB\uFE00|\u22F5\u0338|\u22F9\u0338|\u2933\u0338|\u29CF\u0338|\u29D0\u0338|\u2A6D\u0338|\u2A70\u0338|\u2A7D\u0338|\u2A7E\u0338|\u2AA1\u0338|\u2AA2\u0338|\u2AAC\uFE00|\u2AAD\uFE00|\u2AAF\u0338|\u2AB0\u0338|\u2AC5\u0338|\u2AC6\u0338|\u2ACB\uFE00|\u2ACC\uFE00|\u2AFD\u20E5|[\xA0-\u0113\u0116-\u0122\u0124-\u012B\u012E-\u014D\u0150-\u017E\u0192\u01B5\u01F5\u0237\u02C6\u02C7\u02D8-\u02DD\u0311\u0391-\u03A1\u03A3-\u03A9\u03B1-\u03C9\u03D1\u03D2\u03D5\u03D6\u03DC\u03DD\u03F0\u03F1\u03F5\u03F6\u0401-\u040C\u040E-\u044F\u0451-\u045C\u045E\u045F\u2002-\u2005\u2007-\u2010\u2013-\u2016\u2018-\u201A\u201C-\u201E\u2020-\u2022\u2025\u2026\u2030-\u2035\u2039\u203A\u203E\u2041\u2043\u2044\u204F\u2057\u205F-\u2063\u20AC\u20DB\u20DC\u2102\u2105\u210A-\u2113\u2115-\u211E\u2122\u2124\u2127-\u2129\u212C\u212D\u212F-\u2131\u2133-\u2138\u2145-\u2148\u2153-\u215E\u2190-\u219B\u219D-\u21A7\u21A9-\u21AE\u21B0-\u21B3\u21B5-\u21B7\u21BA-\u21DB\u21DD\u21E4\u21E5\u21F5\u21FD-\u2205\u2207-\u2209\u220B\u220C\u220F-\u2214\u2216-\u2218\u221A\u221D-\u2238\u223A-\u2257\u2259\u225A\u225C\u225F-\u2262\u2264-\u228B\u228D-\u229B\u229D-\u22A5\u22A7-\u22B0\u22B2-\u22BB\u22BD-\u22DB\u22DE-\u22E3\u22E6-\u22F7\u22F9-\u22FE\u2305\u2306\u2308-\u2310\u2312\u2313\u2315\u2316\u231C-\u231F\u2322\u2323\u232D\u232E\u2336\u233D\u233F\u237C\u23B0\u23B1\u23B4-\u23B6\u23DC-\u23DF\u23E2\u23E7\u2423\u24C8\u2500\u2502\u250C\u2510\u2514\u2518\u251C\u2524\u252C\u2534\u253C\u2550-\u256C\u2580\u2584\u2588\u2591-\u2593\u25A1\u25AA\u25AB\u25AD\u25AE\u25B1\u25B3-\u25B5\u25B8\u25B9\u25BD-\u25BF\u25C2\u25C3\u25CA\u25CB\u25EC\u25EF\u25F8-\u25FC\u2605\u2606\u260E\u2640\u2642\u2660\u2663\u2665\u2666\u266A\u266D-\u266F\u2713\u2717\u2720\u2736\u2758\u2772\u2773\u27C8\u27C9\u27E6-\u27ED\u27F5-\u27FA\u27FC\u27FF\u2902-\u2905\u290C-\u2913\u2916\u2919-\u2920\u2923-\u292A\u2933\u2935-\u2939\u293C\u293D\u2945\u2948-\u294B\u294E-\u2976\u2978\u2979\u297B-\u297F\u2985\u2986\u298B-\u2996\u299A\u299C\u299D\u29A4-\u29B7\u29B9\u29BB\u29BC\u29BE-\u29C5\u29C9\u29CD-\u29D0\u29DC-\u29DE\u29E3-\u29E5\u29EB\u29F4\u29F6\u2A00-\u2A02\u2A04\u2A06\u2A0C\u2A0D\u2A10-\u2A17\u2A22-\u2A27\u2A29\u2A2A\u2A2D-\u2A31\u2A33-\u2A3C\u2A3F\u2A40\u2A42-\u2A4D\u2A50\u2A53-\u2A58\u2A5A-\u2A5D\u2A5F\u2A66\u2A6A\u2A6D-\u2A75\u2A77-\u2A9A\u2A9D-\u2AA2\u2AA4-\u2AB0\u2AB3-\u2AC8\u2ACB\u2ACC\u2ACF-\u2ADB\u2AE4\u2AE6-\u2AE9\u2AEB-\u2AF3\u2AFD\uFB00-\uFB04]|\uD835[\uDC9C\uDC9E\uDC9F\uDCA2\uDCA5\uDCA6\uDCA9-\uDCAC\uDCAE-\uDCB9\uDCBB\uDCBD-\uDCC3\uDCC5-\uDCCF\uDD04\uDD05\uDD07-\uDD0A\uDD0D-\uDD14\uDD16-\uDD1C\uDD1E-\uDD39\uDD3B-\uDD3E\uDD40-\uDD44\uDD46\uDD4A-\uDD50\uDD52-\uDD6B]/g,l={"­":"shy","‌":"zwnj","‍":"zwj","‎":"lrm","⁣":"ic","⁢":"it","⁡":"af","‏":"rlm","​":"ZeroWidthSpace","⁠":"NoBreak","̑":"DownBreve","⃛":"tdot","⃜":"DotDot","\t":"Tab","\n":"NewLine"," ":"puncsp"," ":"MediumSpace"," ":"thinsp"," ":"hairsp"," ":"emsp13"," ":"ensp"," ":"emsp14"," ":"emsp"," ":"numsp"," ":"nbsp","  ":"ThickSpace","‾":"oline",_:"lowbar","‐":"dash","–":"ndash","—":"mdash","―":"horbar",",":"comma",";":"semi","⁏":"bsemi",":":"colon","⩴":"Colone","!":"excl","¡":"iexcl","?":"quest","¿":"iquest",".":"period","‥":"nldr","…":"mldr","·":"middot","'":"apos","‘":"lsquo","’":"rsquo","‚":"sbquo","‹":"lsaquo","›":"rsaquo",'"':"quot","“":"ldquo","”":"rdquo","„":"bdquo","«":"laquo","»":"raquo","(":"lpar",")":"rpar","[":"lsqb","]":"rsqb","{":"lcub","}":"rcub","⌈":"lceil","⌉":"rceil","⌊":"lfloor","⌋":"rfloor","⦅":"lopar","⦆":"ropar","⦋":"lbrke","⦌":"rbrke","⦍":"lbrkslu","⦎":"rbrksld","⦏":"lbrksld","⦐":"rbrkslu","⦑":"langd","⦒":"rangd","⦓":"lparlt","⦔":"rpargt","⦕":"gtlPar","⦖":"ltrPar","⟦":"lobrk","⟧":"robrk","⟨":"lang","⟩":"rang","⟪":"Lang","⟫":"Rang","⟬":"loang","⟭":"roang","❲":"lbbrk","❳":"rbbrk","‖":"Vert","§":"sect","¶":"para","@":"commat","*":"ast","/":"sol",undefined:null,"&":"amp","#":"num","%":"percnt","‰":"permil","‱":"pertenk","†":"dagger","‡":"Dagger","•":"bull","⁃":"hybull","′":"prime","″":"Prime","‴":"tprime","⁗":"qprime","‵":"bprime","⁁":"caret","`":"grave","´":"acute","˜":"tilde","^":"Hat","¯":"macr","˘":"breve","˙":"dot","¨":"die","˚":"ring","˝":"dblac","¸":"cedil","˛":"ogon","ˆ":"circ","ˇ":"caron","°":"deg","©":"copy","®":"reg","℗":"copysr","℘":"wp","℞":"rx","℧":"mho","℩":"iiota","←":"larr","↚":"nlarr","→":"rarr","↛":"nrarr","↑":"uarr","↓":"darr","↔":"harr","↮":"nharr","↕":"varr","↖":"nwarr","↗":"nearr","↘":"searr","↙":"swarr","↝":"rarrw","↝̸":"nrarrw","↞":"Larr","↟":"Uarr","↠":"Rarr","↡":"Darr","↢":"larrtl","↣":"rarrtl","↤":"mapstoleft","↥":"mapstoup","↦":"map","↧":"mapstodown","↩":"larrhk","↪":"rarrhk","↫":"larrlp","↬":"rarrlp","↭":"harrw","↰":"lsh","↱":"rsh","↲":"ldsh","↳":"rdsh","↵":"crarr","↶":"cularr","↷":"curarr","↺":"olarr","↻":"orarr","↼":"lharu","↽":"lhard","↾":"uharr","↿":"uharl","⇀":"rharu","⇁":"rhard","⇂":"dharr","⇃":"dharl","⇄":"rlarr","⇅":"udarr","⇆":"lrarr","⇇":"llarr","⇈":"uuarr","⇉":"rrarr","⇊":"ddarr","⇋":"lrhar","⇌":"rlhar","⇐":"lArr","⇍":"nlArr","⇑":"uArr","⇒":"rArr","⇏":"nrArr","⇓":"dArr","⇔":"iff","⇎":"nhArr","⇕":"vArr","⇖":"nwArr","⇗":"neArr","⇘":"seArr","⇙":"swArr","⇚":"lAarr","⇛":"rAarr","⇝":"zigrarr","⇤":"larrb","⇥":"rarrb","⇵":"duarr","⇽":"loarr","⇾":"roarr","⇿":"hoarr","∀":"forall","∁":"comp","∂":"part","∂̸":"npart","∃":"exist","∄":"nexist","∅":"empty","∇":"Del","∈":"in","∉":"notin","∋":"ni","∌":"notni","϶":"bepsi","∏":"prod","∐":"coprod","∑":"sum","+":"plus","±":"pm","÷":"div","×":"times","<":"lt","≮":"nlt","<⃒":"nvlt","=":"equals","≠":"ne","=⃥":"bne","⩵":"Equal",">":"gt","≯":"ngt",">⃒":"nvgt","¬":"not","|":"vert","¦":"brvbar","−":"minus","∓":"mp","∔":"plusdo","⁄":"frasl","∖":"setmn","∗":"lowast","∘":"compfn","√":"Sqrt","∝":"prop","∞":"infin","∟":"angrt","∠":"ang","∠⃒":"nang","∡":"angmsd","∢":"angsph","∣":"mid","∤":"nmid","∥":"par","∦":"npar","∧":"and","∨":"or","∩":"cap","∩︀":"caps","∪":"cup","∪︀":"cups","∫":"int","∬":"Int","∭":"tint","⨌":"qint","∮":"oint","∯":"Conint","∰":"Cconint","∱":"cwint","∲":"cwconint","∳":"awconint","∴":"there4","∵":"becaus","∶":"ratio","∷":"Colon","∸":"minusd","∺":"mDDot","∻":"homtht","∼":"sim","≁":"nsim","∼⃒":"nvsim","∽":"bsim","∽̱":"race","∾":"ac","∾̳":"acE","∿":"acd","≀":"wr","≂":"esim","≂̸":"nesim","≃":"sime","≄":"nsime","≅":"cong","≇":"ncong","≆":"simne","≈":"ap","≉":"nap","≊":"ape","≋":"apid","≋̸":"napid","≌":"bcong","≍":"CupCap","≭":"NotCupCap","≍⃒":"nvap","≎":"bump","≎̸":"nbump","≏":"bumpe","≏̸":"nbumpe","≐":"doteq","≐̸":"nedot","≑":"eDot","≒":"efDot","≓":"erDot","≔":"colone","≕":"ecolon","≖":"ecir","≗":"cire","≙":"wedgeq","≚":"veeeq","≜":"trie","≟":"equest","≡":"equiv","≢":"nequiv","≡⃥":"bnequiv","≤":"le","≰":"nle","≤⃒":"nvle","≥":"ge","≱":"nge","≥⃒":"nvge","≦":"lE","≦̸":"nlE","≧":"gE","≧̸":"ngE","≨︀":"lvnE","≨":"lnE","≩":"gnE","≩︀":"gvnE","≪":"ll","≪̸":"nLtv","≪⃒":"nLt","≫":"gg","≫̸":"nGtv","≫⃒":"nGt","≬":"twixt","≲":"lsim","≴":"nlsim","≳":"gsim","≵":"ngsim","≶":"lg","≸":"ntlg","≷":"gl","≹":"ntgl","≺":"pr","⊀":"npr","≻":"sc","⊁":"nsc","≼":"prcue","⋠":"nprcue","≽":"sccue","⋡":"nsccue","≾":"prsim","≿":"scsim","≿̸":"NotSucceedsTilde","⊂":"sub","⊄":"nsub","⊂⃒":"vnsub","⊃":"sup","⊅":"nsup","⊃⃒":"vnsup","⊆":"sube","⊈":"nsube","⊇":"supe","⊉":"nsupe","⊊︀":"vsubne","⊊":"subne","⊋︀":"vsupne","⊋":"supne","⊍":"cupdot","⊎":"uplus","⊏":"sqsub","⊏̸":"NotSquareSubset","⊐":"sqsup","⊐̸":"NotSquareSuperset","⊑":"sqsube","⋢":"nsqsube","⊒":"sqsupe","⋣":"nsqsupe","⊓":"sqcap","⊓︀":"sqcaps","⊔":"sqcup","⊔︀":"sqcups","⊕":"oplus","⊖":"ominus","⊗":"otimes","⊘":"osol","⊙":"odot","⊚":"ocir","⊛":"oast","⊝":"odash","⊞":"plusb","⊟":"minusb","⊠":"timesb","⊡":"sdotb","⊢":"vdash","⊬":"nvdash","⊣":"dashv","⊤":"top","⊥":"bot","⊧":"models","⊨":"vDash","⊭":"nvDash","⊩":"Vdash","⊮":"nVdash","⊪":"Vvdash","⊫":"VDash","⊯":"nVDash","⊰":"prurel","⊲":"vltri","⋪":"nltri","⊳":"vrtri","⋫":"nrtri","⊴":"ltrie","⋬":"nltrie","⊴⃒":"nvltrie","⊵":"rtrie","⋭":"nrtrie","⊵⃒":"nvrtrie","⊶":"origof","⊷":"imof","⊸":"mumap","⊹":"hercon","⊺":"intcal","⊻":"veebar","⊽":"barvee","⊾":"angrtvb","⊿":"lrtri","⋀":"Wedge","⋁":"Vee","⋂":"xcap","⋃":"xcup","⋄":"diam","⋅":"sdot","⋆":"Star","⋇":"divonx","⋈":"bowtie","⋉":"ltimes","⋊":"rtimes","⋋":"lthree","⋌":"rthree","⋍":"bsime","⋎":"cuvee","⋏":"cuwed","⋐":"Sub","⋑":"Sup","⋒":"Cap","⋓":"Cup","⋔":"fork","⋕":"epar","⋖":"ltdot","⋗":"gtdot","⋘":"Ll","⋘̸":"nLl","⋙":"Gg","⋙̸":"nGg","⋚︀":"lesg","⋚":"leg","⋛":"gel","⋛︀":"gesl","⋞":"cuepr","⋟":"cuesc","⋦":"lnsim","⋧":"gnsim","⋨":"prnsim","⋩":"scnsim","⋮":"vellip","⋯":"ctdot","⋰":"utdot","⋱":"dtdot","⋲":"disin","⋳":"isinsv","⋴":"isins","⋵":"isindot","⋵̸":"notindot","⋶":"notinvc","⋷":"notinvb","⋹":"isinE","⋹̸":"notinE","⋺":"nisd","⋻":"xnis","⋼":"nis","⋽":"notnivc","⋾":"notnivb","⌅":"barwed","⌆":"Barwed","⌌":"drcrop","⌍":"dlcrop","⌎":"urcrop","⌏":"ulcrop","⌐":"bnot","⌒":"profline","⌓":"profsurf","⌕":"telrec","⌖":"target","⌜":"ulcorn","⌝":"urcorn","⌞":"dlcorn","⌟":"drcorn","⌢":"frown","⌣":"smile","⌭":"cylcty","⌮":"profalar","⌶":"topbot","⌽":"ovbar","⌿":"solbar","⍼":"angzarr","⎰":"lmoust","⎱":"rmoust","⎴":"tbrk","⎵":"bbrk","⎶":"bbrktbrk","⏜":"OverParenthesis","⏝":"UnderParenthesis","⏞":"OverBrace","⏟":"UnderBrace","⏢":"trpezium","⏧":"elinters","␣":"blank","─":"boxh","│":"boxv","┌":"boxdr","┐":"boxdl","└":"boxur","┘":"boxul","├":"boxvr","┤":"boxvl","┬":"boxhd","┴":"boxhu","┼":"boxvh","═":"boxH","║":"boxV","╒":"boxdR","╓":"boxDr","╔":"boxDR","╕":"boxdL","╖":"boxDl","╗":"boxDL","╘":"boxuR","╙":"boxUr","╚":"boxUR","╛":"boxuL","╜":"boxUl","╝":"boxUL","╞":"boxvR","╟":"boxVr","╠":"boxVR","╡":"boxvL","╢":"boxVl","╣":"boxVL","╤":"boxHd","╥":"boxhD","╦":"boxHD","╧":"boxHu","╨":"boxhU","╩":"boxHU","╪":"boxvH","╫":"boxVh","╬":"boxVH","▀":"uhblk","▄":"lhblk","█":"block","░":"blk14","▒":"blk12","▓":"blk34","□":"squ","▪":"squf","▫":"EmptyVerySmallSquare","▭":"rect","▮":"marker","▱":"fltns","△":"xutri","▴":"utrif","▵":"utri","▸":"rtrif","▹":"rtri","▽":"xdtri","▾":"dtrif","▿":"dtri","◂":"ltrif","◃":"ltri","◊":"loz","○":"cir","◬":"tridot","◯":"xcirc","◸":"ultri","◹":"urtri","◺":"lltri","◻":"EmptySmallSquare","◼":"FilledSmallSquare","★":"starf","☆":"star","☎":"phone","♀":"female","♂":"male","♠":"spades","♣":"clubs","♥":"hearts","♦":"diams","♪":"sung","✓":"check","✗":"cross","✠":"malt","✶":"sext","❘":"VerticalSeparator","⟈":"bsolhsub","⟉":"suphsol","⟵":"xlarr","⟶":"xrarr","⟷":"xharr","⟸":"xlArr","⟹":"xrArr","⟺":"xhArr","⟼":"xmap","⟿":"dzigrarr","⤂":"nvlArr","⤃":"nvrArr","⤄":"nvHarr","⤅":"Map","⤌":"lbarr","⤍":"rbarr","⤎":"lBarr","⤏":"rBarr","⤐":"RBarr","⤑":"DDotrahd","⤒":"UpArrowBar","⤓":"DownArrowBar","⤖":"Rarrtl","⤙":"latail","⤚":"ratail","⤛":"lAtail","⤜":"rAtail","⤝":"larrfs","⤞":"rarrfs","⤟":"larrbfs","⤠":"rarrbfs","⤣":"nwarhk","⤤":"nearhk","⤥":"searhk","⤦":"swarhk","⤧":"nwnear","⤨":"toea","⤩":"tosa","⤪":"swnwar","⤳":"rarrc","⤳̸":"nrarrc","⤵":"cudarrr","⤶":"ldca","⤷":"rdca","⤸":"cudarrl","⤹":"larrpl","⤼":"curarrm","⤽":"cularrp","⥅":"rarrpl","⥈":"harrcir","⥉":"Uarrocir","⥊":"lurdshar","⥋":"ldrushar","⥎":"LeftRightVector","⥏":"RightUpDownVector","⥐":"DownLeftRightVector","⥑":"LeftUpDownVector","⥒":"LeftVectorBar","⥓":"RightVectorBar","⥔":"RightUpVectorBar","⥕":"RightDownVectorBar","⥖":"DownLeftVectorBar","⥗":"DownRightVectorBar","⥘":"LeftUpVectorBar","⥙":"LeftDownVectorBar","⥚":"LeftTeeVector","⥛":"RightTeeVector","⥜":"RightUpTeeVector","⥝":"RightDownTeeVector","⥞":"DownLeftTeeVector","⥟":"DownRightTeeVector","⥠":"LeftUpTeeVector","⥡":"LeftDownTeeVector","⥢":"lHar","⥣":"uHar","⥤":"rHar","⥥":"dHar","⥦":"luruhar","⥧":"ldrdhar","⥨":"ruluhar","⥩":"rdldhar","⥪":"lharul","⥫":"llhard","⥬":"rharul","⥭":"lrhard","⥮":"udhar","⥯":"duhar","⥰":"RoundImplies","⥱":"erarr","⥲":"simrarr","⥳":"larrsim","⥴":"rarrsim","⥵":"rarrap","⥶":"ltlarr","⥸":"gtrarr","⥹":"subrarr","⥻":"suplarr","⥼":"lfisht","⥽":"rfisht","⥾":"ufisht","⥿":"dfisht","⦚":"vzigzag","⦜":"vangrt","⦝":"angrtvbd","⦤":"ange","⦥":"range","⦦":"dwangle","⦧":"uwangle","⦨":"angmsdaa","⦩":"angmsdab","⦪":"angmsdac","⦫":"angmsdad","⦬":"angmsdae","⦭":"angmsdaf","⦮":"angmsdag","⦯":"angmsdah","⦰":"bemptyv","⦱":"demptyv","⦲":"cemptyv","⦳":"raemptyv","⦴":"laemptyv","⦵":"ohbar","⦶":"omid","⦷":"opar","⦹":"operp","⦻":"olcross","⦼":"odsold","⦾":"olcir","⦿":"ofcir","⧀":"olt","⧁":"ogt","⧂":"cirscir","⧃":"cirE","⧄":"solb","⧅":"bsolb","⧉":"boxbox","⧍":"trisb","⧎":"rtriltri","⧏":"LeftTriangleBar","⧏̸":"NotLeftTriangleBar","⧐":"RightTriangleBar","⧐̸":"NotRightTriangleBar","⧜":"iinfin","⧝":"infintie","⧞":"nvinfin","⧣":"eparsl","⧤":"smeparsl","⧥":"eqvparsl","⧫":"lozf","⧴":"RuleDelayed","⧶":"dsol","⨀":"xodot","⨁":"xoplus","⨂":"xotime","⨄":"xuplus","⨆":"xsqcup","⨍":"fpartint","⨐":"cirfnint","⨑":"awint","⨒":"rppolint","⨓":"scpolint","⨔":"npolint","⨕":"pointint","⨖":"quatint","⨗":"intlarhk","⨢":"pluscir","⨣":"plusacir","⨤":"simplus","⨥":"plusdu","⨦":"plussim","⨧":"plustwo","⨩":"mcomma","⨪":"minusdu","⨭":"loplus","⨮":"roplus","⨯":"Cross","⨰":"timesd","⨱":"timesbar","⨳":"smashp","⨴":"lotimes","⨵":"rotimes","⨶":"otimesas","⨷":"Otimes","⨸":"odiv","⨹":"triplus","⨺":"triminus","⨻":"tritime","⨼":"iprod","⨿":"amalg","⩀":"capdot","⩂":"ncup","⩃":"ncap","⩄":"capand","⩅":"cupor","⩆":"cupcap","⩇":"capcup","⩈":"cupbrcap","⩉":"capbrcup","⩊":"cupcup","⩋":"capcap","⩌":"ccups","⩍":"ccaps","⩐":"ccupssm","⩓":"And","⩔":"Or","⩕":"andand","⩖":"oror","⩗":"orslope","⩘":"andslope","⩚":"andv","⩛":"orv","⩜":"andd","⩝":"ord","⩟":"wedbar","⩦":"sdote","⩪":"simdot","⩭":"congdot","⩭̸":"ncongdot","⩮":"easter","⩯":"apacir","⩰":"apE","⩰̸":"napE","⩱":"eplus","⩲":"pluse","⩳":"Esim","⩷":"eDDot","⩸":"equivDD","⩹":"ltcir","⩺":"gtcir","⩻":"ltquest","⩼":"gtquest","⩽":"les","⩽̸":"nles","⩾":"ges","⩾̸":"nges","⩿":"lesdot","⪀":"gesdot","⪁":"lesdoto","⪂":"gesdoto","⪃":"lesdotor","⪄":"gesdotol","⪅":"lap","⪆":"gap","⪇":"lne","⪈":"gne","⪉":"lnap","⪊":"gnap","⪋":"lEg","⪌":"gEl","⪍":"lsime","⪎":"gsime","⪏":"lsimg","⪐":"gsiml","⪑":"lgE","⪒":"glE","⪓":"lesges","⪔":"gesles","⪕":"els","⪖":"egs","⪗":"elsdot","⪘":"egsdot","⪙":"el","⪚":"eg","⪝":"siml","⪞":"simg","⪟":"simlE","⪠":"simgE","⪡":"LessLess","⪡̸":"NotNestedLessLess","⪢":"GreaterGreater","⪢̸":"NotNestedGreaterGreater","⪤":"glj","⪥":"gla","⪦":"ltcc","⪧":"gtcc","⪨":"lescc","⪩":"gescc","⪪":"smt","⪫":"lat","⪬":"smte","⪬︀":"smtes","⪭":"late","⪭︀":"lates","⪮":"bumpE","⪯":"pre","⪯̸":"npre","⪰":"sce","⪰̸":"nsce","⪳":"prE","⪴":"scE","⪵":"prnE","⪶":"scnE","⪷":"prap","⪸":"scap","⪹":"prnap","⪺":"scnap","⪻":"Pr","⪼":"Sc","⪽":"subdot","⪾":"supdot","⪿":"subplus","⫀":"supplus","⫁":"submult","⫂":"supmult","⫃":"subedot","⫄":"supedot","⫅":"subE","⫅̸":"nsubE","⫆":"supE","⫆̸":"nsupE","⫇":"subsim","⫈":"supsim","⫋︀":"vsubnE","⫋":"subnE","⫌︀":"vsupnE","⫌":"supnE","⫏":"csub","⫐":"csup","⫑":"csube","⫒":"csupe","⫓":"subsup","⫔":"supsub","⫕":"subsub","⫖":"supsup","⫗":"suphsub","⫘":"supdsub","⫙":"forkv","⫚":"topfork","⫛":"mlcp","⫤":"Dashv","⫦":"Vdashl","⫧":"Barv","⫨":"vBar","⫩":"vBarv","⫫":"Vbar","⫬":"Not","⫭":"bNot","⫮":"rnmid","⫯":"cirmid","⫰":"midcir","⫱":"topcir","⫲":"nhpar","⫳":"parsim","⫽":"parsl","⫽⃥":"nparsl","♭":"flat","♮":"natur","♯":"sharp","¤":"curren","¢":"cent",$:"dollar","£":"pound","¥":"yen","€":"euro","¹":"sup1","½":"half","⅓":"frac13","¼":"frac14","⅕":"frac15","⅙":"frac16","⅛":"frac18","²":"sup2","⅔":"frac23","⅖":"frac25","³":"sup3","¾":"frac34","⅗":"frac35","⅜":"frac38","⅘":"frac45","⅚":"frac56","⅝":"frac58","⅞":"frac78","𝒶":"ascr","𝕒":"aopf","𝔞":"afr","𝔸":"Aopf","𝔄":"Afr","𝒜":"Ascr","ª":"ordf","á":"aacute","Á":"Aacute","à":"agrave","À":"Agrave","ă":"abreve","Ă":"Abreve","â":"acirc","Â":"Acirc","å":"aring","Å":"angst","ä":"auml","Ä":"Auml","ã":"atilde","Ã":"Atilde","ą":"aogon","Ą":"Aogon","ā":"amacr","Ā":"Amacr","æ":"aelig","Æ":"AElig","𝒷":"bscr","𝕓":"bopf","𝔟":"bfr","𝔹":"Bopf","ℬ":"Bscr","𝔅":"Bfr","𝔠":"cfr","𝒸":"cscr","𝕔":"copf","ℭ":"Cfr","𝒞":"Cscr","ℂ":"Copf","ć":"cacute","Ć":"Cacute","ĉ":"ccirc","Ĉ":"Ccirc","č":"ccaron","Č":"Ccaron","ċ":"cdot","Ċ":"Cdot","ç":"ccedil","Ç":"Ccedil","℅":"incare","𝔡":"dfr","ⅆ":"dd","𝕕":"dopf","𝒹":"dscr","𝒟":"Dscr","𝔇":"Dfr","ⅅ":"DD","𝔻":"Dopf","ď":"dcaron","Ď":"Dcaron","đ":"dstrok","Đ":"Dstrok","ð":"eth","Ð":"ETH","ⅇ":"ee","ℯ":"escr","𝔢":"efr","𝕖":"eopf","ℰ":"Escr","𝔈":"Efr","𝔼":"Eopf","é":"eacute","É":"Eacute","è":"egrave","È":"Egrave","ê":"ecirc","Ê":"Ecirc","ě":"ecaron","Ě":"Ecaron","ë":"euml","Ë":"Euml","ė":"edot","Ė":"Edot","ę":"eogon","Ę":"Eogon","ē":"emacr","Ē":"Emacr","𝔣":"ffr","𝕗":"fopf","𝒻":"fscr","𝔉":"Ffr","𝔽":"Fopf","ℱ":"Fscr","ff":"fflig","ffi":"ffilig","ffl":"ffllig","fi":"filig",fj:"fjlig","fl":"fllig","ƒ":"fnof","ℊ":"gscr","𝕘":"gopf","𝔤":"gfr","𝒢":"Gscr","𝔾":"Gopf","𝔊":"Gfr","ǵ":"gacute","ğ":"gbreve","Ğ":"Gbreve","ĝ":"gcirc","Ĝ":"Gcirc","ġ":"gdot","Ġ":"Gdot","Ģ":"Gcedil","𝔥":"hfr","ℎ":"planckh","𝒽":"hscr","𝕙":"hopf","ℋ":"Hscr","ℌ":"Hfr","ℍ":"Hopf","ĥ":"hcirc","Ĥ":"Hcirc","ℏ":"hbar","ħ":"hstrok","Ħ":"Hstrok","𝕚":"iopf","𝔦":"ifr","𝒾":"iscr","ⅈ":"ii","𝕀":"Iopf","ℐ":"Iscr","ℑ":"Im","í":"iacute","Í":"Iacute","ì":"igrave","Ì":"Igrave","î":"icirc","Î":"Icirc","ï":"iuml","Ï":"Iuml","ĩ":"itilde","Ĩ":"Itilde","İ":"Idot","į":"iogon","Į":"Iogon","ī":"imacr","Ī":"Imacr","ij":"ijlig","IJ":"IJlig","ı":"imath","𝒿":"jscr","𝕛":"jopf","𝔧":"jfr","𝒥":"Jscr","𝔍":"Jfr","𝕁":"Jopf","ĵ":"jcirc","Ĵ":"Jcirc","ȷ":"jmath","𝕜":"kopf","𝓀":"kscr","𝔨":"kfr","𝒦":"Kscr","𝕂":"Kopf","𝔎":"Kfr","ķ":"kcedil","Ķ":"Kcedil","𝔩":"lfr","𝓁":"lscr","ℓ":"ell","𝕝":"lopf","ℒ":"Lscr","𝔏":"Lfr","𝕃":"Lopf","ĺ":"lacute","Ĺ":"Lacute","ľ":"lcaron","Ľ":"Lcaron","ļ":"lcedil","Ļ":"Lcedil","ł":"lstrok","Ł":"Lstrok","ŀ":"lmidot","Ŀ":"Lmidot","𝔪":"mfr","𝕞":"mopf","𝓂":"mscr","𝔐":"Mfr","𝕄":"Mopf","ℳ":"Mscr","𝔫":"nfr","𝕟":"nopf","𝓃":"nscr","ℕ":"Nopf","𝒩":"Nscr","𝔑":"Nfr","ń":"nacute","Ń":"Nacute","ň":"ncaron","Ň":"Ncaron","ñ":"ntilde","Ñ":"Ntilde","ņ":"ncedil","Ņ":"Ncedil","№":"numero","ŋ":"eng","Ŋ":"ENG","𝕠":"oopf","𝔬":"ofr","ℴ":"oscr","𝒪":"Oscr","𝔒":"Ofr","𝕆":"Oopf","º":"ordm","ó":"oacute","Ó":"Oacute","ò":"ograve","Ò":"Ograve","ô":"ocirc","Ô":"Ocirc","ö":"ouml","Ö":"Ouml","ő":"odblac","Ő":"Odblac","õ":"otilde","Õ":"Otilde","ø":"oslash","Ø":"Oslash","ō":"omacr","Ō":"Omacr","œ":"oelig","Œ":"OElig","𝔭":"pfr","𝓅":"pscr","𝕡":"popf","ℙ":"Popf","𝔓":"Pfr","𝒫":"Pscr","𝕢":"qopf","𝔮":"qfr","𝓆":"qscr","𝒬":"Qscr","𝔔":"Qfr","ℚ":"Qopf","ĸ":"kgreen","𝔯":"rfr","𝕣":"ropf","𝓇":"rscr","ℛ":"Rscr","ℜ":"Re","ℝ":"Ropf","ŕ":"racute","Ŕ":"Racute","ř":"rcaron","Ř":"Rcaron","ŗ":"rcedil","Ŗ":"Rcedil","𝕤":"sopf","𝓈":"sscr","𝔰":"sfr","𝕊":"Sopf","𝔖":"Sfr","𝒮":"Sscr","Ⓢ":"oS","ś":"sacute","Ś":"Sacute","ŝ":"scirc","Ŝ":"Scirc","š":"scaron","Š":"Scaron","ş":"scedil","Ş":"Scedil","ß":"szlig","𝔱":"tfr","𝓉":"tscr","𝕥":"topf","𝒯":"Tscr","𝔗":"Tfr","𝕋":"Topf","ť":"tcaron","Ť":"Tcaron","ţ":"tcedil","Ţ":"Tcedil","™":"trade","ŧ":"tstrok","Ŧ":"Tstrok","𝓊":"uscr","𝕦":"uopf","𝔲":"ufr","𝕌":"Uopf","𝔘":"Ufr","𝒰":"Uscr","ú":"uacute","Ú":"Uacute","ù":"ugrave","Ù":"Ugrave","ŭ":"ubreve","Ŭ":"Ubreve","û":"ucirc","Û":"Ucirc","ů":"uring","Ů":"Uring","ü":"uuml","Ü":"Uuml","ű":"udblac","Ű":"Udblac","ũ":"utilde","Ũ":"Utilde","ų":"uogon","Ų":"Uogon","ū":"umacr","Ū":"Umacr","𝔳":"vfr","𝕧":"vopf","𝓋":"vscr","𝔙":"Vfr","𝕍":"Vopf","𝒱":"Vscr","𝕨":"wopf","𝓌":"wscr","𝔴":"wfr","𝒲":"Wscr","𝕎":"Wopf","𝔚":"Wfr","ŵ":"wcirc","Ŵ":"Wcirc","𝔵":"xfr","𝓍":"xscr","𝕩":"xopf","𝕏":"Xopf","𝔛":"Xfr","𝒳":"Xscr","𝔶":"yfr","𝓎":"yscr","𝕪":"yopf","𝒴":"Yscr","𝔜":"Yfr","𝕐":"Yopf","ý":"yacute","Ý":"Yacute","ŷ":"ycirc","Ŷ":"Ycirc","ÿ":"yuml","Ÿ":"Yuml","𝓏":"zscr","𝔷":"zfr","𝕫":"zopf","ℨ":"Zfr","ℤ":"Zopf","𝒵":"Zscr","ź":"zacute","Ź":"Zacute","ž":"zcaron","Ž":"Zcaron","ż":"zdot","Ż":"Zdot","Ƶ":"imped","þ":"thorn","Þ":"THORN","ʼn":"napos","α":"alpha","Α":"Alpha","β":"beta","Β":"Beta","γ":"gamma","Γ":"Gamma","δ":"delta","Δ":"Delta","ε":"epsi","ϵ":"epsiv","Ε":"Epsilon","ϝ":"gammad","Ϝ":"Gammad","ζ":"zeta","Ζ":"Zeta","η":"eta","Η":"Eta","θ":"theta","ϑ":"thetav","Θ":"Theta","ι":"iota","Ι":"Iota","κ":"kappa","ϰ":"kappav","Κ":"Kappa","λ":"lambda","Λ":"Lambda","μ":"mu","µ":"micro","Μ":"Mu","ν":"nu","Ν":"Nu","ξ":"xi","Ξ":"Xi","ο":"omicron","Ο":"Omicron","π":"pi","ϖ":"piv","Π":"Pi","ρ":"rho","ϱ":"rhov","Ρ":"Rho","σ":"sigma","Σ":"Sigma","ς":"sigmaf","τ":"tau","Τ":"Tau","υ":"upsi","Υ":"Upsilon","ϒ":"Upsi","φ":"phi","ϕ":"phiv","Φ":"Phi","χ":"chi","Χ":"Chi","ψ":"psi","Ψ":"Psi","ω":"omega","Ω":"ohm","а":"acy","А":"Acy","б":"bcy","Б":"Bcy","в":"vcy","В":"Vcy","г":"gcy","Г":"Gcy","ѓ":"gjcy","Ѓ":"GJcy","д":"dcy","Д":"Dcy","ђ":"djcy","Ђ":"DJcy","е":"iecy","Е":"IEcy","ё":"iocy","Ё":"IOcy","є":"jukcy","Є":"Jukcy","ж":"zhcy","Ж":"ZHcy","з":"zcy","З":"Zcy","ѕ":"dscy","Ѕ":"DScy","и":"icy","И":"Icy","і":"iukcy","І":"Iukcy","ї":"yicy","Ї":"YIcy","й":"jcy","Й":"Jcy","ј":"jsercy","Ј":"Jsercy","к":"kcy","К":"Kcy","ќ":"kjcy","Ќ":"KJcy","л":"lcy","Л":"Lcy","љ":"ljcy","Љ":"LJcy","м":"mcy","М":"Mcy","н":"ncy","Н":"Ncy","њ":"njcy","Њ":"NJcy","о":"ocy","О":"Ocy","п":"pcy","П":"Pcy","р":"rcy","Р":"Rcy","с":"scy","С":"Scy","т":"tcy","Т":"Tcy","ћ":"tshcy","Ћ":"TSHcy","у":"ucy","У":"Ucy","ў":"ubrcy","Ў":"Ubrcy","ф":"fcy","Ф":"Fcy","х":"khcy","Х":"KHcy","ц":"tscy","Ц":"TScy","ч":"chcy","Ч":"CHcy","џ":"dzcy","Џ":"DZcy","ш":"shcy","Ш":"SHcy","щ":"shchcy","Щ":"SHCHcy","ъ":"hardcy","Ъ":"HARDcy","ы":"ycy","Ы":"Ycy","ь":"softcy","Ь":"SOFTcy","э":"ecy","Э":"Ecy","ю":"yucy","Ю":"YUcy","я":"yacy","Я":"YAcy","ℵ":"aleph","ℶ":"beth","ℷ":"gimel","ℸ":"daleth"},h=/["&'<>`]/g,d={'"':""","&":"&","'":"'","<":"<",">":">","`":"`"},p=/&#(?:[xX][^a-fA-F0-9]|[^0-9xX])/,g=/[\0-\x08\x0B\x0E-\x1F\x7F-\x9F\uFDD0-\uFDEF\uFFFE\uFFFF]|[\uD83F\uD87F\uD8BF\uD8FF\uD93F\uD97F\uD9BF\uD9FF\uDA3F\uDA7F\uDABF\uDAFF\uDB3F\uDB7F\uDBBF\uDBFF][\uDFFE\uDFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF]/,y=/&(CounterClockwiseContourIntegral|DoubleLongLeftRightArrow|ClockwiseContourIntegral|NotNestedGreaterGreater|NotSquareSupersetEqual|DiacriticalDoubleAcute|NotRightTriangleEqual|NotSucceedsSlantEqual|NotPrecedesSlantEqual|CloseCurlyDoubleQuote|NegativeVeryThinSpace|DoubleContourIntegral|FilledVerySmallSquare|CapitalDifferentialD|OpenCurlyDoubleQuote|EmptyVerySmallSquare|NestedGreaterGreater|DoubleLongRightArrow|NotLeftTriangleEqual|NotGreaterSlantEqual|ReverseUpEquilibrium|DoubleLeftRightArrow|NotSquareSubsetEqual|NotDoubleVerticalBar|RightArrowLeftArrow|NotGreaterFullEqual|NotRightTriangleBar|SquareSupersetEqual|DownLeftRightVector|DoubleLongLeftArrow|leftrightsquigarrow|LeftArrowRightArrow|NegativeMediumSpace|blacktriangleright|RightDownVectorBar|PrecedesSlantEqual|RightDoubleBracket|SucceedsSlantEqual|NotLeftTriangleBar|RightTriangleEqual|SquareIntersection|RightDownTeeVector|ReverseEquilibrium|NegativeThickSpace|longleftrightarrow|Longleftrightarrow|LongLeftRightArrow|DownRightTeeVector|DownRightVectorBar|GreaterSlantEqual|SquareSubsetEqual|LeftDownVectorBar|LeftDoubleBracket|VerticalSeparator|rightleftharpoons|NotGreaterGreater|NotSquareSuperset|blacktriangleleft|blacktriangledown|NegativeThinSpace|LeftDownTeeVector|NotLessSlantEqual|leftrightharpoons|DoubleUpDownArrow|DoubleVerticalBar|LeftTriangleEqual|FilledSmallSquare|twoheadrightarrow|NotNestedLessLess|DownLeftTeeVector|DownLeftVectorBar|RightAngleBracket|NotTildeFullEqual|NotReverseElement|RightUpDownVector|DiacriticalTilde|NotSucceedsTilde|circlearrowright|NotPrecedesEqual|rightharpoondown|DoubleRightArrow|NotSucceedsEqual|NonBreakingSpace|NotRightTriangle|LessEqualGreater|RightUpTeeVector|LeftAngleBracket|GreaterFullEqual|DownArrowUpArrow|RightUpVectorBar|twoheadleftarrow|GreaterEqualLess|downharpoonright|RightTriangleBar|ntrianglerighteq|NotSupersetEqual|LeftUpDownVector|DiacriticalAcute|rightrightarrows|vartriangleright|UpArrowDownArrow|DiacriticalGrave|UnderParenthesis|EmptySmallSquare|LeftUpVectorBar|leftrightarrows|DownRightVector|downharpoonleft|trianglerighteq|ShortRightArrow|OverParenthesis|DoubleLeftArrow|DoubleDownArrow|NotSquareSubset|bigtriangledown|ntrianglelefteq|UpperRightArrow|curvearrowright|vartriangleleft|NotLeftTriangle|nleftrightarrow|LowerRightArrow|NotHumpDownHump|NotGreaterTilde|rightthreetimes|LeftUpTeeVector|NotGreaterEqual|straightepsilon|LeftTriangleBar|rightsquigarrow|ContourIntegral|rightleftarrows|CloseCurlyQuote|RightDownVector|LeftRightVector|nLeftrightarrow|leftharpoondown|circlearrowleft|SquareSuperset|OpenCurlyQuote|hookrightarrow|HorizontalLine|DiacriticalDot|NotLessGreater|ntriangleright|DoubleRightTee|InvisibleComma|InvisibleTimes|LowerLeftArrow|DownLeftVector|NotSubsetEqual|curvearrowleft|trianglelefteq|NotVerticalBar|TildeFullEqual|downdownarrows|NotGreaterLess|RightTeeVector|ZeroWidthSpace|looparrowright|LongRightArrow|doublebarwedge|ShortLeftArrow|ShortDownArrow|RightVectorBar|GreaterGreater|ReverseElement|rightharpoonup|LessSlantEqual|leftthreetimes|upharpoonright|rightarrowtail|LeftDownVector|Longrightarrow|NestedLessLess|UpperLeftArrow|nshortparallel|leftleftarrows|leftrightarrow|Leftrightarrow|LeftRightArrow|longrightarrow|upharpoonleft|RightArrowBar|ApplyFunction|LeftTeeVector|leftarrowtail|NotEqualTilde|varsubsetneqq|varsupsetneqq|RightTeeArrow|SucceedsEqual|SucceedsTilde|LeftVectorBar|SupersetEqual|hookleftarrow|DifferentialD|VerticalTilde|VeryThinSpace|blacktriangle|bigtriangleup|LessFullEqual|divideontimes|leftharpoonup|UpEquilibrium|ntriangleleft|RightTriangle|measuredangle|shortparallel|longleftarrow|Longleftarrow|LongLeftArrow|DoubleLeftTee|Poincareplane|PrecedesEqual|triangleright|DoubleUpArrow|RightUpVector|fallingdotseq|looparrowleft|PrecedesTilde|NotTildeEqual|NotTildeTilde|smallsetminus|Proportional|triangleleft|triangledown|UnderBracket|NotHumpEqual|exponentiale|ExponentialE|NotLessTilde|HilbertSpace|RightCeiling|blacklozenge|varsupsetneq|HumpDownHump|GreaterEqual|VerticalLine|LeftTeeArrow|NotLessEqual|DownTeeArrow|LeftTriangle|varsubsetneq|Intersection|NotCongruent|DownArrowBar|LeftUpVector|LeftArrowBar|risingdotseq|GreaterTilde|RoundImplies|SquareSubset|ShortUpArrow|NotSuperset|quaternions|precnapprox|backepsilon|preccurlyeq|OverBracket|blacksquare|MediumSpace|VerticalBar|circledcirc|circleddash|CircleMinus|CircleTimes|LessGreater|curlyeqprec|curlyeqsucc|diamondsuit|UpDownArrow|Updownarrow|RuleDelayed|Rrightarrow|updownarrow|RightVector|nRightarrow|nrightarrow|eqslantless|LeftCeiling|Equilibrium|SmallCircle|expectation|NotSucceeds|thickapprox|GreaterLess|SquareUnion|NotPrecedes|NotLessLess|straightphi|succnapprox|succcurlyeq|SubsetEqual|sqsupseteq|Proportion|Laplacetrf|ImaginaryI|supsetneqq|NotGreater|gtreqqless|NotElement|ThickSpace|TildeEqual|TildeTilde|Fouriertrf|rmoustache|EqualTilde|eqslantgtr|UnderBrace|LeftVector|UpArrowBar|nLeftarrow|nsubseteqq|subsetneqq|nsupseteqq|nleftarrow|succapprox|lessapprox|UpTeeArrow|upuparrows|curlywedge|lesseqqgtr|varepsilon|varnothing|RightFloor|complement|CirclePlus|sqsubseteq|Lleftarrow|circledast|RightArrow|Rightarrow|rightarrow|lmoustache|Bernoullis|precapprox|mapstoleft|mapstodown|longmapsto|dotsquare|downarrow|DoubleDot|nsubseteq|supsetneq|leftarrow|nsupseteq|subsetneq|ThinSpace|ngeqslant|subseteqq|HumpEqual|NotSubset|triangleq|NotCupCap|lesseqgtr|heartsuit|TripleDot|Leftarrow|Coproduct|Congruent|varpropto|complexes|gvertneqq|LeftArrow|LessTilde|supseteqq|MinusPlus|CircleDot|nleqslant|NotExists|gtreqless|nparallel|UnionPlus|LeftFloor|checkmark|CenterDot|centerdot|Mellintrf|gtrapprox|bigotimes|OverBrace|spadesuit|therefore|pitchfork|rationals|PlusMinus|Backslash|Therefore|DownBreve|backsimeq|backprime|DownArrow|nshortmid|Downarrow|lvertneqq|eqvparsl|imagline|imagpart|infintie|integers|Integral|intercal|LessLess|Uarrocir|intlarhk|sqsupset|angmsdaf|sqsubset|llcorner|vartheta|cupbrcap|lnapprox|Superset|SuchThat|succnsim|succneqq|angmsdag|biguplus|curlyvee|trpezium|Succeeds|NotTilde|bigwedge|angmsdah|angrtvbd|triminus|cwconint|fpartint|lrcorner|smeparsl|subseteq|urcorner|lurdshar|laemptyv|DDotrahd|approxeq|ldrushar|awconint|mapstoup|backcong|shortmid|triangle|geqslant|gesdotol|timesbar|circledR|circledS|setminus|multimap|naturals|scpolint|ncongdot|RightTee|boxminus|gnapprox|boxtimes|andslope|thicksim|angmsdaa|varsigma|cirfnint|rtriltri|angmsdab|rppolint|angmsdac|barwedge|drbkarow|clubsuit|thetasym|bsolhsub|capbrcup|dzigrarr|doteqdot|DotEqual|dotminus|UnderBar|NotEqual|realpart|otimesas|ulcorner|hksearow|hkswarow|parallel|PartialD|elinters|emptyset|plusacir|bbrktbrk|angmsdad|pointint|bigoplus|angmsdae|Precedes|bigsqcup|varkappa|notindot|supseteq|precneqq|precnsim|profalar|profline|profsurf|leqslant|lesdotor|raemptyv|subplus|notnivb|notnivc|subrarr|zigrarr|vzigzag|submult|subedot|Element|between|cirscir|larrbfs|larrsim|lotimes|lbrksld|lbrkslu|lozenge|ldrdhar|dbkarow|bigcirc|epsilon|simrarr|simplus|ltquest|Epsilon|luruhar|gtquest|maltese|npolint|eqcolon|npreceq|bigodot|ddagger|gtrless|bnequiv|harrcir|ddotseq|equivDD|backsim|demptyv|nsqsube|nsqsupe|Upsilon|nsubset|upsilon|minusdu|nsucceq|swarrow|nsupset|coloneq|searrow|boxplus|napprox|natural|asympeq|alefsym|congdot|nearrow|bigstar|diamond|supplus|tritime|LeftTee|nvinfin|triplus|NewLine|nvltrie|nvrtrie|nwarrow|nexists|Diamond|ruluhar|Implies|supmult|angzarr|suplarr|suphsub|questeq|because|digamma|Because|olcross|bemptyv|omicron|Omicron|rotimes|NoBreak|intprod|angrtvb|orderof|uwangle|suphsol|lesdoto|orslope|DownTee|realine|cudarrl|rdldhar|OverBar|supedot|lessdot|supdsub|topfork|succsim|rbrkslu|rbrksld|pertenk|cudarrr|isindot|planckh|lessgtr|pluscir|gesdoto|plussim|plustwo|lesssim|cularrp|rarrsim|Cayleys|notinva|notinvb|notinvc|UpArrow|Uparrow|uparrow|NotLess|dwangle|precsim|Product|curarrm|Cconint|dotplus|rarrbfs|ccupssm|Cedilla|cemptyv|notniva|quatint|frac35|frac38|frac45|frac56|frac58|frac78|tridot|xoplus|gacute|gammad|Gammad|lfisht|lfloor|bigcup|sqsupe|gbreve|Gbreve|lharul|sqsube|sqcups|Gcedil|apacir|llhard|lmidot|Lmidot|lmoust|andand|sqcaps|approx|Abreve|spades|circeq|tprime|divide|topcir|Assign|topbot|gesdot|divonx|xuplus|timesd|gesles|atilde|solbar|SOFTcy|loplus|timesb|lowast|lowbar|dlcorn|dlcrop|softcy|dollar|lparlt|thksim|lrhard|Atilde|lsaquo|smashp|bigvee|thinsp|wreath|bkarow|lsquor|lstrok|Lstrok|lthree|ltimes|ltlarr|DotDot|simdot|ltrPar|weierp|xsqcup|angmsd|sigmav|sigmaf|zeetrf|Zcaron|zcaron|mapsto|vsupne|thetav|cirmid|marker|mcomma|Zacute|vsubnE|there4|gtlPar|vsubne|bottom|gtrarr|SHCHcy|shchcy|midast|midcir|middot|minusb|minusd|gtrdot|bowtie|sfrown|mnplus|models|colone|seswar|Colone|mstpos|searhk|gtrsim|nacute|Nacute|boxbox|telrec|hairsp|Tcedil|nbumpe|scnsim|ncaron|Ncaron|ncedil|Ncedil|hamilt|Scedil|nearhk|hardcy|HARDcy|tcedil|Tcaron|commat|nequiv|nesear|tcaron|target|hearts|nexist|varrho|scedil|Scaron|scaron|hellip|Sacute|sacute|hercon|swnwar|compfn|rtimes|rthree|rsquor|rsaquo|zacute|wedgeq|homtht|barvee|barwed|Barwed|rpargt|horbar|conint|swarhk|roplus|nltrie|hslash|hstrok|Hstrok|rmoust|Conint|bprime|hybull|hyphen|iacute|Iacute|supsup|supsub|supsim|varphi|coprod|brvbar|agrave|Supset|supset|igrave|Igrave|notinE|Agrave|iiiint|iinfin|copysr|wedbar|Verbar|vangrt|becaus|incare|verbar|inodot|bullet|drcorn|intcal|drcrop|cularr|vellip|Utilde|bumpeq|cupcap|dstrok|Dstrok|CupCap|cupcup|cupdot|eacute|Eacute|supdot|iquest|easter|ecaron|Ecaron|ecolon|isinsv|utilde|itilde|Itilde|curarr|succeq|Bumpeq|cacute|ulcrop|nparsl|Cacute|nprcue|egrave|Egrave|nrarrc|nrarrw|subsup|subsub|nrtrie|jsercy|nsccue|Jsercy|kappav|kcedil|Kcedil|subsim|ulcorn|nsimeq|egsdot|veebar|kgreen|capand|elsdot|Subset|subset|curren|aacute|lacute|Lacute|emptyv|ntilde|Ntilde|lagran|lambda|Lambda|capcap|Ugrave|langle|subdot|emsp13|numero|emsp14|nvdash|nvDash|nVdash|nVDash|ugrave|ufisht|nvHarr|larrfs|nvlArr|larrhk|larrlp|larrpl|nvrArr|Udblac|nwarhk|larrtl|nwnear|oacute|Oacute|latail|lAtail|sstarf|lbrace|odblac|Odblac|lbrack|udblac|odsold|eparsl|lcaron|Lcaron|ograve|Ograve|lcedil|Lcedil|Aacute|ssmile|ssetmn|squarf|ldquor|capcup|ominus|cylcty|rharul|eqcirc|dagger|rfloor|rfisht|Dagger|daleth|equals|origof|capdot|equest|dcaron|Dcaron|rdquor|oslash|Oslash|otilde|Otilde|otimes|Otimes|urcrop|Ubreve|ubreve|Yacute|Uacute|uacute|Rcedil|rcedil|urcorn|parsim|Rcaron|Vdashl|rcaron|Tstrok|percnt|period|permil|Exists|yacute|rbrack|rbrace|phmmat|ccaron|Ccaron|planck|ccedil|plankv|tstrok|female|plusdo|plusdu|ffilig|plusmn|ffllig|Ccedil|rAtail|dfisht|bernou|ratail|Rarrtl|rarrtl|angsph|rarrpl|rarrlp|rarrhk|xwedge|xotime|forall|ForAll|Vvdash|vsupnE|preceq|bigcap|frac12|frac13|frac14|primes|rarrfs|prnsim|frac15|Square|frac16|square|lesdot|frac18|frac23|propto|prurel|rarrap|rangle|puncsp|frac25|Racute|qprime|racute|lesges|frac34|abreve|AElig|eqsim|utdot|setmn|urtri|Equal|Uring|seArr|uring|searr|dashv|Dashv|mumap|nabla|iogon|Iogon|sdote|sdotb|scsim|napid|napos|equiv|natur|Acirc|dblac|erarr|nbump|iprod|erDot|ucirc|awint|esdot|angrt|ncong|isinE|scnap|Scirc|scirc|ndash|isins|Ubrcy|nearr|neArr|isinv|nedot|ubrcy|acute|Ycirc|iukcy|Iukcy|xutri|nesim|caret|jcirc|Jcirc|caron|twixt|ddarr|sccue|exist|jmath|sbquo|ngeqq|angst|ccaps|lceil|ngsim|UpTee|delta|Delta|rtrif|nharr|nhArr|nhpar|rtrie|jukcy|Jukcy|kappa|rsquo|Kappa|nlarr|nlArr|TSHcy|rrarr|aogon|Aogon|fflig|xrarr|tshcy|ccirc|nleqq|filig|upsih|nless|dharl|nlsim|fjlig|ropar|nltri|dharr|robrk|roarr|fllig|fltns|roang|rnmid|subnE|subne|lAarr|trisb|Ccirc|acirc|ccups|blank|VDash|forkv|Vdash|langd|cedil|blk12|blk14|laquo|strns|diams|notin|vDash|larrb|blk34|block|disin|uplus|vdash|vBarv|aelig|starf|Wedge|check|xrArr|lates|lbarr|lBarr|notni|lbbrk|bcong|frasl|lbrke|frown|vrtri|vprop|vnsup|gamma|Gamma|wedge|xodot|bdquo|srarr|doteq|ldquo|boxdl|boxdL|gcirc|Gcirc|boxDl|boxDL|boxdr|boxdR|boxDr|TRADE|trade|rlhar|boxDR|vnsub|npart|vltri|rlarr|boxhd|boxhD|nprec|gescc|nrarr|nrArr|boxHd|boxHD|boxhu|boxhU|nrtri|boxHu|clubs|boxHU|times|colon|Colon|gimel|xlArr|Tilde|nsime|tilde|nsmid|nspar|THORN|thorn|xlarr|nsube|nsubE|thkap|xhArr|comma|nsucc|boxul|boxuL|nsupe|nsupE|gneqq|gnsim|boxUl|boxUL|grave|boxur|boxuR|boxUr|boxUR|lescc|angle|bepsi|boxvh|varpi|boxvH|numsp|Theta|gsime|gsiml|theta|boxVh|boxVH|boxvl|gtcir|gtdot|boxvL|boxVl|boxVL|crarr|cross|Cross|nvsim|boxvr|nwarr|nwArr|sqsup|dtdot|Uogon|lhard|lharu|dtrif|ocirc|Ocirc|lhblk|duarr|odash|sqsub|Hacek|sqcup|llarr|duhar|oelig|OElig|ofcir|boxvR|uogon|lltri|boxVr|csube|uuarr|ohbar|csupe|ctdot|olarr|olcir|harrw|oline|sqcap|omacr|Omacr|omega|Omega|boxVR|aleph|lneqq|lnsim|loang|loarr|rharu|lobrk|hcirc|operp|oplus|rhard|Hcirc|orarr|Union|order|ecirc|Ecirc|cuepr|szlig|cuesc|breve|reals|eDDot|Breve|hoarr|lopar|utrif|rdquo|Umacr|umacr|efDot|swArr|ultri|alpha|rceil|ovbar|swarr|Wcirc|wcirc|smtes|smile|bsemi|lrarr|aring|parsl|lrhar|bsime|uhblk|lrtri|cupor|Aring|uharr|uharl|slarr|rbrke|bsolb|lsime|rbbrk|RBarr|lsimg|phone|rBarr|rbarr|icirc|lsquo|Icirc|emacr|Emacr|ratio|simne|plusb|simlE|simgE|simeq|pluse|ltcir|ltdot|empty|xharr|xdtri|iexcl|Alpha|ltrie|rarrw|pound|ltrif|xcirc|bumpe|prcue|bumpE|asymp|amacr|cuvee|Sigma|sigma|iiint|udhar|iiota|ijlig|IJlig|supnE|imacr|Imacr|prime|Prime|image|prnap|eogon|Eogon|rarrc|mdash|mDDot|cuwed|imath|supne|imped|Amacr|udarr|prsim|micro|rarrb|cwint|raquo|infin|eplus|range|rangd|Ucirc|radic|minus|amalg|veeeq|rAarr|epsiv|ycirc|quest|sharp|quot|zwnj|Qscr|race|qscr|Qopf|qopf|qint|rang|Rang|Zscr|zscr|Zopf|zopf|rarr|rArr|Rarr|Pscr|pscr|prop|prod|prnE|prec|ZHcy|zhcy|prap|Zeta|zeta|Popf|popf|Zdot|plus|zdot|Yuml|yuml|phiv|YUcy|yucy|Yscr|yscr|perp|Yopf|yopf|part|para|YIcy|Ouml|rcub|yicy|YAcy|rdca|ouml|osol|Oscr|rdsh|yacy|real|oscr|xvee|andd|rect|andv|Xscr|oror|ordm|ordf|xscr|ange|aopf|Aopf|rHar|Xopf|opar|Oopf|xopf|xnis|rhov|oopf|omid|xmap|oint|apid|apos|ogon|ascr|Ascr|odot|odiv|xcup|xcap|ocir|oast|nvlt|nvle|nvgt|nvge|nvap|Wscr|wscr|auml|ntlg|ntgl|nsup|nsub|nsim|Nscr|nscr|nsce|Wopf|ring|npre|wopf|npar|Auml|Barv|bbrk|Nopf|nopf|nmid|nLtv|beta|ropf|Ropf|Beta|beth|nles|rpar|nleq|bnot|bNot|nldr|NJcy|rscr|Rscr|Vscr|vscr|rsqb|njcy|bopf|nisd|Bopf|rtri|Vopf|nGtv|ngtr|vopf|boxh|boxH|boxv|nges|ngeq|boxV|bscr|scap|Bscr|bsim|Vert|vert|bsol|bull|bump|caps|cdot|ncup|scnE|ncap|nbsp|napE|Cdot|cent|sdot|Vbar|nang|vBar|chcy|Mscr|mscr|sect|semi|CHcy|Mopf|mopf|sext|circ|cire|mldr|mlcp|cirE|comp|shcy|SHcy|vArr|varr|cong|copf|Copf|copy|COPY|malt|male|macr|lvnE|cscr|ltri|sime|ltcc|simg|Cscr|siml|csub|Uuml|lsqb|lsim|uuml|csup|Lscr|lscr|utri|smid|lpar|cups|smte|lozf|darr|Lopf|Uscr|solb|lopf|sopf|Sopf|lneq|uscr|spar|dArr|lnap|Darr|dash|Sqrt|LJcy|ljcy|lHar|dHar|Upsi|upsi|diam|lesg|djcy|DJcy|leqq|dopf|Dopf|dscr|Dscr|dscy|ldsh|ldca|squf|DScy|sscr|Sscr|dsol|lcub|late|star|Star|Uopf|Larr|lArr|larr|uopf|dtri|dzcy|sube|subE|Lang|lang|Kscr|kscr|Kopf|kopf|KJcy|kjcy|KHcy|khcy|DZcy|ecir|edot|eDot|Jscr|jscr|succ|Jopf|jopf|Edot|uHar|emsp|ensp|Iuml|iuml|eopf|isin|Iscr|iscr|Eopf|epar|sung|epsi|escr|sup1|sup2|sup3|Iota|iota|supe|supE|Iopf|iopf|IOcy|iocy|Escr|esim|Esim|imof|Uarr|QUOT|uArr|uarr|euml|IEcy|iecy|Idot|Euml|euro|excl|Hscr|hscr|Hopf|hopf|TScy|tscy|Tscr|hbar|tscr|flat|tbrk|fnof|hArr|harr|half|fopf|Fopf|tdot|gvnE|fork|trie|gtcc|fscr|Fscr|gdot|gsim|Gscr|gscr|Gopf|gopf|gneq|Gdot|tosa|gnap|Topf|topf|geqq|toea|GJcy|gjcy|tint|gesl|mid|Sfr|ggg|top|ges|gla|glE|glj|geq|gne|gEl|gel|gnE|Gcy|gcy|gap|Tfr|tfr|Tcy|tcy|Hat|Tau|Ffr|tau|Tab|hfr|Hfr|ffr|Fcy|fcy|icy|Icy|iff|ETH|eth|ifr|Ifr|Eta|eta|int|Int|Sup|sup|ucy|Ucy|Sum|sum|jcy|ENG|ufr|Ufr|eng|Jcy|jfr|els|ell|egs|Efr|efr|Jfr|uml|kcy|Kcy|Ecy|ecy|kfr|Kfr|lap|Sub|sub|lat|lcy|Lcy|leg|Dot|dot|lEg|leq|les|squ|div|die|lfr|Lfr|lgE|Dfr|dfr|Del|deg|Dcy|dcy|lne|lnE|sol|loz|smt|Cup|lrm|cup|lsh|Lsh|sim|shy|map|Map|mcy|Mcy|mfr|Mfr|mho|gfr|Gfr|sfr|cir|Chi|chi|nap|Cfr|vcy|Vcy|cfr|Scy|scy|ncy|Ncy|vee|Vee|Cap|cap|nfr|scE|sce|Nfr|nge|ngE|nGg|vfr|Vfr|ngt|bot|nGt|nis|niv|Rsh|rsh|nle|nlE|bne|Bfr|bfr|nLl|nlt|nLt|Bcy|bcy|not|Not|rlm|wfr|Wfr|npr|nsc|num|ocy|ast|Ocy|ofr|xfr|Xfr|Ofr|ogt|ohm|apE|olt|Rho|ape|rho|Rfr|rfr|ord|REG|ang|reg|orv|And|and|AMP|Rcy|amp|Afr|ycy|Ycy|yen|yfr|Yfr|rcy|par|pcy|Pcy|pfr|Pfr|phi|Phi|afr|Acy|acy|zcy|Zcy|piv|acE|acd|zfr|Zfr|pre|prE|psi|Psi|qfr|Qfr|zwj|Or|ge|Gg|gt|gg|el|oS|lt|Lt|LT|Re|lg|gl|eg|ne|Im|it|le|DD|wp|wr|nu|Nu|dd|lE|Sc|sc|pi|Pi|ee|af|ll|Ll|rx|gE|xi|pm|Xi|ic|pr|Pr|in|ni|mp|mu|ac|Mu|or|ap|Gt|GT|ii);|&(Aacute|Agrave|Atilde|Ccedil|Eacute|Egrave|Iacute|Igrave|Ntilde|Oacute|Ograve|Oslash|Otilde|Uacute|Ugrave|Yacute|aacute|agrave|atilde|brvbar|ccedil|curren|divide|eacute|egrave|frac12|frac14|frac34|iacute|igrave|iquest|middot|ntilde|oacute|ograve|oslash|otilde|plusmn|uacute|ugrave|yacute|AElig|Acirc|Aring|Ecirc|Icirc|Ocirc|THORN|Ucirc|acirc|acute|aelig|aring|cedil|ecirc|icirc|iexcl|laquo|micro|ocirc|pound|raquo|szlig|thorn|times|ucirc|Auml|COPY|Euml|Iuml|Ouml|QUOT|Uuml|auml|cent|copy|euml|iuml|macr|nbsp|ordf|ordm|ouml|para|quot|sect|sup1|sup2|sup3|uuml|yuml|AMP|ETH|REG|amp|deg|eth|not|reg|shy|uml|yen|GT|LT|gt|lt)(?!;)([=a-zA-Z0-9]?)|&#([0-9]+)(;?)|&#[xX]([a-fA-F0-9]+)(;?)|&([0-9a-zA-Z]+)/g,b={aacute:"á",Aacute:"Á",abreve:"ă",Abreve:"Ă",ac:"∾",acd:"∿",acE:"∾̳",acirc:"â",Acirc:"Â",acute:"´",acy:"а",Acy:"А",aelig:"æ",AElig:"Æ",af:"⁡",afr:"𝔞",Afr:"𝔄",agrave:"à",Agrave:"À",alefsym:"ℵ",aleph:"ℵ",alpha:"α",Alpha:"Α",amacr:"ā",Amacr:"Ā",amalg:"⨿",amp:"&",AMP:"&",and:"∧",And:"⩓",andand:"⩕",andd:"⩜",andslope:"⩘",andv:"⩚",ang:"∠",ange:"⦤",angle:"∠",angmsd:"∡",angmsdaa:"⦨",angmsdab:"⦩",angmsdac:"⦪",angmsdad:"⦫",angmsdae:"⦬",angmsdaf:"⦭",angmsdag:"⦮",angmsdah:"⦯",angrt:"∟",angrtvb:"⊾",angrtvbd:"⦝",angsph:"∢",angst:"Å",angzarr:"⍼",aogon:"ą",Aogon:"Ą",aopf:"𝕒",Aopf:"𝔸",ap:"≈",apacir:"⩯",ape:"≊",apE:"⩰",apid:"≋",apos:"'",ApplyFunction:"⁡",approx:"≈",approxeq:"≊",aring:"å",Aring:"Å",ascr:"𝒶",Ascr:"𝒜",Assign:"≔",ast:"*",asymp:"≈",asympeq:"≍",atilde:"ã",Atilde:"Ã",auml:"ä",Auml:"Ä",awconint:"∳",awint:"⨑",backcong:"≌",backepsilon:"϶",backprime:"‵",backsim:"∽",backsimeq:"⋍",Backslash:"∖",Barv:"⫧",barvee:"⊽",barwed:"⌅",Barwed:"⌆",barwedge:"⌅",bbrk:"⎵",bbrktbrk:"⎶",bcong:"≌",bcy:"б",Bcy:"Б",bdquo:"„",becaus:"∵",because:"∵",Because:"∵",bemptyv:"⦰",bepsi:"϶",bernou:"ℬ",Bernoullis:"ℬ",beta:"β",Beta:"Β",beth:"ℶ",between:"≬",bfr:"𝔟",Bfr:"𝔅",bigcap:"⋂",bigcirc:"◯",bigcup:"⋃",bigodot:"⨀",bigoplus:"⨁",bigotimes:"⨂",bigsqcup:"⨆",bigstar:"★",bigtriangledown:"▽",bigtriangleup:"△",biguplus:"⨄",bigvee:"⋁",bigwedge:"⋀",bkarow:"⤍",blacklozenge:"⧫",blacksquare:"▪",blacktriangle:"▴",blacktriangledown:"▾",blacktriangleleft:"◂",blacktriangleright:"▸",blank:"␣",blk12:"▒",blk14:"░",blk34:"▓",block:"█",bne:"=⃥",bnequiv:"≡⃥",bnot:"⌐",bNot:"⫭",bopf:"𝕓",Bopf:"𝔹",bot:"⊥",bottom:"⊥",bowtie:"⋈",boxbox:"⧉",boxdl:"┐",boxdL:"╕",boxDl:"╖",boxDL:"╗",boxdr:"┌",boxdR:"╒",boxDr:"╓",boxDR:"╔",boxh:"─",boxH:"═",boxhd:"┬",boxhD:"╥",boxHd:"╤",boxHD:"╦",boxhu:"┴",boxhU:"╨",boxHu:"╧",boxHU:"╩",boxminus:"⊟",boxplus:"⊞",boxtimes:"⊠",boxul:"┘",boxuL:"╛",boxUl:"╜",boxUL:"╝",boxur:"└",boxuR:"╘",boxUr:"╙",boxUR:"╚",boxv:"│",boxV:"║",boxvh:"┼",boxvH:"╪",boxVh:"╫",boxVH:"╬",boxvl:"┤",boxvL:"╡",boxVl:"╢",boxVL:"╣",boxvr:"├",boxvR:"╞",boxVr:"╟",boxVR:"╠",bprime:"‵",breve:"˘",Breve:"˘",brvbar:"¦",bscr:"𝒷",Bscr:"ℬ",bsemi:"⁏",bsim:"∽",bsime:"⋍",bsol:"\\",bsolb:"⧅",bsolhsub:"⟈",bull:"•",bullet:"•",bump:"≎",bumpe:"≏",bumpE:"⪮",bumpeq:"≏",Bumpeq:"≎",cacute:"ć",Cacute:"Ć",cap:"∩",Cap:"⋒",capand:"⩄",capbrcup:"⩉",capcap:"⩋",capcup:"⩇",capdot:"⩀",CapitalDifferentialD:"ⅅ",caps:"∩︀",caret:"⁁",caron:"ˇ",Cayleys:"ℭ",ccaps:"⩍",ccaron:"č",Ccaron:"Č",ccedil:"ç",Ccedil:"Ç",ccirc:"ĉ",Ccirc:"Ĉ",Cconint:"∰",ccups:"⩌",ccupssm:"⩐",cdot:"ċ",Cdot:"Ċ",cedil:"¸",Cedilla:"¸",cemptyv:"⦲",cent:"¢",centerdot:"·",CenterDot:"·",cfr:"𝔠",Cfr:"ℭ",chcy:"ч",CHcy:"Ч",check:"✓",checkmark:"✓",chi:"χ",Chi:"Χ",cir:"○",circ:"ˆ",circeq:"≗",circlearrowleft:"↺",circlearrowright:"↻",circledast:"⊛",circledcirc:"⊚",circleddash:"⊝",CircleDot:"⊙",circledR:"®",circledS:"Ⓢ",CircleMinus:"⊖",CirclePlus:"⊕",CircleTimes:"⊗",cire:"≗",cirE:"⧃",cirfnint:"⨐",cirmid:"⫯",cirscir:"⧂",ClockwiseContourIntegral:"∲",CloseCurlyDoubleQuote:"”",CloseCurlyQuote:"’",clubs:"♣",clubsuit:"♣",colon:":",Colon:"∷",colone:"≔",Colone:"⩴",coloneq:"≔",comma:",",commat:"@",comp:"∁",compfn:"∘",complement:"∁",complexes:"ℂ",cong:"≅",congdot:"⩭",Congruent:"≡",conint:"∮",Conint:"∯",ContourIntegral:"∮",copf:"𝕔",Copf:"ℂ",coprod:"∐",Coproduct:"∐",copy:"©",COPY:"©",copysr:"℗",CounterClockwiseContourIntegral:"∳",crarr:"↵",cross:"✗",Cross:"⨯",cscr:"𝒸",Cscr:"𝒞",csub:"⫏",csube:"⫑",csup:"⫐",csupe:"⫒",ctdot:"⋯",cudarrl:"⤸",cudarrr:"⤵",cuepr:"⋞",cuesc:"⋟",cularr:"↶",cularrp:"⤽",cup:"∪",Cup:"⋓",cupbrcap:"⩈",cupcap:"⩆",CupCap:"≍",cupcup:"⩊",cupdot:"⊍",cupor:"⩅",cups:"∪︀",curarr:"↷",curarrm:"⤼",curlyeqprec:"⋞",curlyeqsucc:"⋟",curlyvee:"⋎",curlywedge:"⋏",curren:"¤",curvearrowleft:"↶",curvearrowright:"↷",cuvee:"⋎",cuwed:"⋏",cwconint:"∲",cwint:"∱",cylcty:"⌭",dagger:"†",Dagger:"‡",daleth:"ℸ",darr:"↓",dArr:"⇓",Darr:"↡",dash:"‐",dashv:"⊣",Dashv:"⫤",dbkarow:"⤏",dblac:"˝",dcaron:"ď",Dcaron:"Ď",dcy:"д",Dcy:"Д",dd:"ⅆ",DD:"ⅅ",ddagger:"‡",ddarr:"⇊",DDotrahd:"⤑",ddotseq:"⩷",deg:"°",Del:"∇",delta:"δ",Delta:"Δ",demptyv:"⦱",dfisht:"⥿",dfr:"𝔡",Dfr:"𝔇",dHar:"⥥",dharl:"⇃",dharr:"⇂",DiacriticalAcute:"´",DiacriticalDot:"˙",DiacriticalDoubleAcute:"˝",DiacriticalGrave:"`",DiacriticalTilde:"˜",diam:"⋄",diamond:"⋄",Diamond:"⋄",diamondsuit:"♦",diams:"♦",die:"¨",DifferentialD:"ⅆ",digamma:"ϝ",disin:"⋲",div:"÷",divide:"÷",divideontimes:"⋇",divonx:"⋇",djcy:"ђ",DJcy:"Ђ",dlcorn:"⌞",dlcrop:"⌍",dollar:"$",dopf:"𝕕",Dopf:"𝔻",dot:"˙",Dot:"¨",DotDot:"⃜",doteq:"≐",doteqdot:"≑",DotEqual:"≐",dotminus:"∸",dotplus:"∔",dotsquare:"⊡",doublebarwedge:"⌆",DoubleContourIntegral:"∯",DoubleDot:"¨",DoubleDownArrow:"⇓",DoubleLeftArrow:"⇐",DoubleLeftRightArrow:"⇔",DoubleLeftTee:"⫤",DoubleLongLeftArrow:"⟸",DoubleLongLeftRightArrow:"⟺",DoubleLongRightArrow:"⟹",DoubleRightArrow:"⇒",DoubleRightTee:"⊨",DoubleUpArrow:"⇑",DoubleUpDownArrow:"⇕",DoubleVerticalBar:"∥",downarrow:"↓",Downarrow:"⇓",DownArrow:"↓",DownArrowBar:"⤓",DownArrowUpArrow:"⇵",DownBreve:"̑",downdownarrows:"⇊",downharpoonleft:"⇃",downharpoonright:"⇂",DownLeftRightVector:"⥐",DownLeftTeeVector:"⥞",DownLeftVector:"↽",DownLeftVectorBar:"⥖",DownRightTeeVector:"⥟",DownRightVector:"⇁",DownRightVectorBar:"⥗",DownTee:"⊤",DownTeeArrow:"↧",drbkarow:"⤐",drcorn:"⌟",drcrop:"⌌",dscr:"𝒹",Dscr:"𝒟",dscy:"ѕ",DScy:"Ѕ",dsol:"⧶",dstrok:"đ",Dstrok:"Đ",dtdot:"⋱",dtri:"▿",dtrif:"▾",duarr:"⇵",duhar:"⥯",dwangle:"⦦",dzcy:"џ",DZcy:"Џ",dzigrarr:"⟿",eacute:"é",Eacute:"É",easter:"⩮",ecaron:"ě",Ecaron:"Ě",ecir:"≖",ecirc:"ê",Ecirc:"Ê",ecolon:"≕",ecy:"э",Ecy:"Э",eDDot:"⩷",edot:"ė",eDot:"≑",Edot:"Ė",ee:"ⅇ",efDot:"≒",efr:"𝔢",Efr:"𝔈",eg:"⪚",egrave:"è",Egrave:"È",egs:"⪖",egsdot:"⪘",el:"⪙",Element:"∈",elinters:"⏧",ell:"ℓ",els:"⪕",elsdot:"⪗",emacr:"ē",Emacr:"Ē",empty:"∅",emptyset:"∅",EmptySmallSquare:"◻",emptyv:"∅",EmptyVerySmallSquare:"▫",emsp:" ",emsp13:" ",emsp14:" ",eng:"ŋ",ENG:"Ŋ",ensp:" ",eogon:"ę",Eogon:"Ę",eopf:"𝕖",Eopf:"𝔼",epar:"⋕",eparsl:"⧣",eplus:"⩱",epsi:"ε",epsilon:"ε",Epsilon:"Ε",epsiv:"ϵ",eqcirc:"≖",eqcolon:"≕",eqsim:"≂",eqslantgtr:"⪖",eqslantless:"⪕",Equal:"⩵",equals:"=",EqualTilde:"≂",equest:"≟",Equilibrium:"⇌",equiv:"≡",equivDD:"⩸",eqvparsl:"⧥",erarr:"⥱",erDot:"≓",escr:"ℯ",Escr:"ℰ",esdot:"≐",esim:"≂",Esim:"⩳",eta:"η",Eta:"Η",eth:"ð",ETH:"Ð",euml:"ë",Euml:"Ë",euro:"€",excl:"!",exist:"∃",Exists:"∃",expectation:"ℰ",exponentiale:"ⅇ",ExponentialE:"ⅇ",fallingdotseq:"≒",fcy:"ф",Fcy:"Ф",female:"♀",ffilig:"ffi",fflig:"ff",ffllig:"ffl",ffr:"𝔣",Ffr:"𝔉",filig:"fi",FilledSmallSquare:"◼",FilledVerySmallSquare:"▪",fjlig:"fj",flat:"♭",fllig:"fl",fltns:"▱",fnof:"ƒ",fopf:"𝕗",Fopf:"𝔽",forall:"∀",ForAll:"∀",fork:"⋔",forkv:"⫙",Fouriertrf:"ℱ",fpartint:"⨍",frac12:"½",frac13:"⅓",frac14:"¼",frac15:"⅕",frac16:"⅙",frac18:"⅛",frac23:"⅔",frac25:"⅖",frac34:"¾",frac35:"⅗",frac38:"⅜",frac45:"⅘",frac56:"⅚",frac58:"⅝",frac78:"⅞",frasl:"⁄",frown:"⌢",fscr:"𝒻",Fscr:"ℱ",gacute:"ǵ",gamma:"γ",Gamma:"Γ",gammad:"ϝ",Gammad:"Ϝ",gap:"⪆",gbreve:"ğ",Gbreve:"Ğ",Gcedil:"Ģ",gcirc:"ĝ",Gcirc:"Ĝ",gcy:"г",Gcy:"Г",gdot:"ġ",Gdot:"Ġ",ge:"≥",gE:"≧",gel:"⋛",gEl:"⪌",geq:"≥",geqq:"≧",geqslant:"⩾",ges:"⩾",gescc:"⪩",gesdot:"⪀",gesdoto:"⪂",gesdotol:"⪄",gesl:"⋛︀",gesles:"⪔",gfr:"𝔤",Gfr:"𝔊",gg:"≫",Gg:"⋙",ggg:"⋙",gimel:"ℷ",gjcy:"ѓ",GJcy:"Ѓ",gl:"≷",gla:"⪥",glE:"⪒",glj:"⪤",gnap:"⪊",gnapprox:"⪊",gne:"⪈",gnE:"≩",gneq:"⪈",gneqq:"≩",gnsim:"⋧",gopf:"𝕘",Gopf:"𝔾",grave:"`",GreaterEqual:"≥",GreaterEqualLess:"⋛",GreaterFullEqual:"≧",GreaterGreater:"⪢",GreaterLess:"≷",GreaterSlantEqual:"⩾",GreaterTilde:"≳",gscr:"ℊ",Gscr:"𝒢",gsim:"≳",gsime:"⪎",gsiml:"⪐",gt:">",Gt:"≫",GT:">",gtcc:"⪧",gtcir:"⩺",gtdot:"⋗",gtlPar:"⦕",gtquest:"⩼",gtrapprox:"⪆",gtrarr:"⥸",gtrdot:"⋗",gtreqless:"⋛",gtreqqless:"⪌",gtrless:"≷",gtrsim:"≳",gvertneqq:"≩︀",gvnE:"≩︀",Hacek:"ˇ",hairsp:" ",half:"½",hamilt:"ℋ",hardcy:"ъ",HARDcy:"Ъ",harr:"↔",hArr:"⇔",harrcir:"⥈",harrw:"↭",Hat:"^",hbar:"ℏ",hcirc:"ĥ",Hcirc:"Ĥ",hearts:"♥",heartsuit:"♥",hellip:"…",hercon:"⊹",hfr:"𝔥",Hfr:"ℌ",HilbertSpace:"ℋ",hksearow:"⤥",hkswarow:"⤦",hoarr:"⇿",homtht:"∻",hookleftarrow:"↩",hookrightarrow:"↪",hopf:"𝕙",Hopf:"ℍ",horbar:"―",HorizontalLine:"─",hscr:"𝒽",Hscr:"ℋ",hslash:"ℏ",hstrok:"ħ",Hstrok:"Ħ",HumpDownHump:"≎",HumpEqual:"≏",hybull:"⁃",hyphen:"‐",iacute:"í",Iacute:"Í",ic:"⁣",icirc:"î",Icirc:"Î",icy:"и",Icy:"И",Idot:"İ",iecy:"е",IEcy:"Е",iexcl:"¡",iff:"⇔",ifr:"𝔦",Ifr:"ℑ",igrave:"ì",Igrave:"Ì",ii:"ⅈ",iiiint:"⨌",iiint:"∭",iinfin:"⧜",iiota:"℩",ijlig:"ij",IJlig:"IJ",Im:"ℑ",imacr:"ī",Imacr:"Ī",image:"ℑ",ImaginaryI:"ⅈ",imagline:"ℐ",imagpart:"ℑ",imath:"ı",imof:"⊷",imped:"Ƶ",Implies:"⇒",in:"∈",incare:"℅",infin:"∞",infintie:"⧝",inodot:"ı",int:"∫",Int:"∬",intcal:"⊺",integers:"ℤ",Integral:"∫",intercal:"⊺",Intersection:"⋂",intlarhk:"⨗",intprod:"⨼",InvisibleComma:"⁣",InvisibleTimes:"⁢",iocy:"ё",IOcy:"Ё",iogon:"į",Iogon:"Į",iopf:"𝕚",Iopf:"𝕀",iota:"ι",Iota:"Ι",iprod:"⨼",iquest:"¿",iscr:"𝒾",Iscr:"ℐ",isin:"∈",isindot:"⋵",isinE:"⋹",isins:"⋴",isinsv:"⋳",isinv:"∈",it:"⁢",itilde:"ĩ",Itilde:"Ĩ",iukcy:"і",Iukcy:"І",iuml:"ï",Iuml:"Ï",jcirc:"ĵ",Jcirc:"Ĵ",jcy:"й",Jcy:"Й",jfr:"𝔧",Jfr:"𝔍",jmath:"ȷ",jopf:"𝕛",Jopf:"𝕁",jscr:"𝒿",Jscr:"𝒥",jsercy:"ј",Jsercy:"Ј",jukcy:"є",Jukcy:"Є",kappa:"κ",Kappa:"Κ",kappav:"ϰ",kcedil:"ķ",Kcedil:"Ķ",kcy:"к",Kcy:"К",kfr:"𝔨",Kfr:"𝔎",kgreen:"ĸ",khcy:"х",KHcy:"Х",kjcy:"ќ",KJcy:"Ќ",kopf:"𝕜",Kopf:"𝕂",kscr:"𝓀",Kscr:"𝒦",lAarr:"⇚",lacute:"ĺ",Lacute:"Ĺ",laemptyv:"⦴",lagran:"ℒ",lambda:"λ",Lambda:"Λ",lang:"⟨",Lang:"⟪",langd:"⦑",langle:"⟨",lap:"⪅",Laplacetrf:"ℒ",laquo:"«",larr:"←",lArr:"⇐",Larr:"↞",larrb:"⇤",larrbfs:"⤟",larrfs:"⤝",larrhk:"↩",larrlp:"↫",larrpl:"⤹",larrsim:"⥳",larrtl:"↢",lat:"⪫",latail:"⤙",lAtail:"⤛",late:"⪭",lates:"⪭︀",lbarr:"⤌",lBarr:"⤎",lbbrk:"❲",lbrace:"{",lbrack:"[",lbrke:"⦋",lbrksld:"⦏",lbrkslu:"⦍",lcaron:"ľ",Lcaron:"Ľ",lcedil:"ļ",Lcedil:"Ļ",lceil:"⌈",lcub:"{",lcy:"л",Lcy:"Л",ldca:"⤶",ldquo:"“",ldquor:"„",ldrdhar:"⥧",ldrushar:"⥋",ldsh:"↲",le:"≤",lE:"≦",LeftAngleBracket:"⟨",leftarrow:"←",Leftarrow:"⇐",LeftArrow:"←",LeftArrowBar:"⇤",LeftArrowRightArrow:"⇆",leftarrowtail:"↢",LeftCeiling:"⌈",LeftDoubleBracket:"⟦",LeftDownTeeVector:"⥡",LeftDownVector:"⇃",LeftDownVectorBar:"⥙",LeftFloor:"⌊",leftharpoondown:"↽",leftharpoonup:"↼",leftleftarrows:"⇇",leftrightarrow:"↔",Leftrightarrow:"⇔",LeftRightArrow:"↔",leftrightarrows:"⇆",leftrightharpoons:"⇋",leftrightsquigarrow:"↭",LeftRightVector:"⥎",LeftTee:"⊣",LeftTeeArrow:"↤",LeftTeeVector:"⥚",leftthreetimes:"⋋",LeftTriangle:"⊲",LeftTriangleBar:"⧏",LeftTriangleEqual:"⊴",LeftUpDownVector:"⥑",LeftUpTeeVector:"⥠",LeftUpVector:"↿",LeftUpVectorBar:"⥘",LeftVector:"↼",LeftVectorBar:"⥒",leg:"⋚",lEg:"⪋",leq:"≤",leqq:"≦",leqslant:"⩽",les:"⩽",lescc:"⪨",lesdot:"⩿",lesdoto:"⪁",lesdotor:"⪃",lesg:"⋚︀",lesges:"⪓",lessapprox:"⪅",lessdot:"⋖",lesseqgtr:"⋚",lesseqqgtr:"⪋",LessEqualGreater:"⋚",LessFullEqual:"≦",LessGreater:"≶",lessgtr:"≶",LessLess:"⪡",lesssim:"≲",LessSlantEqual:"⩽",LessTilde:"≲",lfisht:"⥼",lfloor:"⌊",lfr:"𝔩",Lfr:"𝔏",lg:"≶",lgE:"⪑",lHar:"⥢",lhard:"↽",lharu:"↼",lharul:"⥪",lhblk:"▄",ljcy:"љ",LJcy:"Љ",ll:"≪",Ll:"⋘",llarr:"⇇",llcorner:"⌞",Lleftarrow:"⇚",llhard:"⥫",lltri:"◺",lmidot:"ŀ",Lmidot:"Ŀ",lmoust:"⎰",lmoustache:"⎰",lnap:"⪉",lnapprox:"⪉",lne:"⪇",lnE:"≨",lneq:"⪇",lneqq:"≨",lnsim:"⋦",loang:"⟬",loarr:"⇽",lobrk:"⟦",longleftarrow:"⟵",Longleftarrow:"⟸",LongLeftArrow:"⟵",longleftrightarrow:"⟷",Longleftrightarrow:"⟺",LongLeftRightArrow:"⟷",longmapsto:"⟼",longrightarrow:"⟶",Longrightarrow:"⟹",LongRightArrow:"⟶",looparrowleft:"↫",looparrowright:"↬",lopar:"⦅",lopf:"𝕝",Lopf:"𝕃",loplus:"⨭",lotimes:"⨴",lowast:"∗",lowbar:"_",LowerLeftArrow:"↙",LowerRightArrow:"↘",loz:"◊",lozenge:"◊",lozf:"⧫",lpar:"(",lparlt:"⦓",lrarr:"⇆",lrcorner:"⌟",lrhar:"⇋",lrhard:"⥭",lrm:"‎",lrtri:"⊿",lsaquo:"‹",lscr:"𝓁",Lscr:"ℒ",lsh:"↰",Lsh:"↰",lsim:"≲",lsime:"⪍",lsimg:"⪏",lsqb:"[",lsquo:"‘",lsquor:"‚",lstrok:"ł",Lstrok:"Ł",lt:"<",Lt:"≪",LT:"<",ltcc:"⪦",ltcir:"⩹",ltdot:"⋖",lthree:"⋋",ltimes:"⋉",ltlarr:"⥶",ltquest:"⩻",ltri:"◃",ltrie:"⊴",ltrif:"◂",ltrPar:"⦖",lurdshar:"⥊",luruhar:"⥦",lvertneqq:"≨︀",lvnE:"≨︀",macr:"¯",male:"♂",malt:"✠",maltese:"✠",map:"↦",Map:"⤅",mapsto:"↦",mapstodown:"↧",mapstoleft:"↤",mapstoup:"↥",marker:"▮",mcomma:"⨩",mcy:"м",Mcy:"М",mdash:"—",mDDot:"∺",measuredangle:"∡",MediumSpace:" ",Mellintrf:"ℳ",mfr:"𝔪",Mfr:"𝔐",mho:"℧",micro:"µ",mid:"∣",midast:"*",midcir:"⫰",middot:"·",minus:"−",minusb:"⊟",minusd:"∸",minusdu:"⨪",MinusPlus:"∓",mlcp:"⫛",mldr:"…",mnplus:"∓",models:"⊧",mopf:"𝕞",Mopf:"𝕄",mp:"∓",mscr:"𝓂",Mscr:"ℳ",mstpos:"∾",mu:"μ",Mu:"Μ",multimap:"⊸",mumap:"⊸",nabla:"∇",nacute:"ń",Nacute:"Ń",nang:"∠⃒",nap:"≉",napE:"⩰̸",napid:"≋̸",napos:"ʼn",napprox:"≉",natur:"♮",natural:"♮",naturals:"ℕ",nbsp:" ",nbump:"≎̸",nbumpe:"≏̸",ncap:"⩃",ncaron:"ň",Ncaron:"Ň",ncedil:"ņ",Ncedil:"Ņ",ncong:"≇",ncongdot:"⩭̸",ncup:"⩂",ncy:"н",Ncy:"Н",ndash:"–",ne:"≠",nearhk:"⤤",nearr:"↗",neArr:"⇗",nearrow:"↗",nedot:"≐̸",NegativeMediumSpace:"​",NegativeThickSpace:"​",NegativeThinSpace:"​",NegativeVeryThinSpace:"​",nequiv:"≢",nesear:"⤨",nesim:"≂̸",NestedGreaterGreater:"≫",NestedLessLess:"≪",NewLine:"\n",nexist:"∄",nexists:"∄",nfr:"𝔫",Nfr:"𝔑",nge:"≱",ngE:"≧̸",ngeq:"≱",ngeqq:"≧̸",ngeqslant:"⩾̸",nges:"⩾̸",nGg:"⋙̸",ngsim:"≵",ngt:"≯",nGt:"≫⃒",ngtr:"≯",nGtv:"≫̸",nharr:"↮",nhArr:"⇎",nhpar:"⫲",ni:"∋",nis:"⋼",nisd:"⋺",niv:"∋",njcy:"њ",NJcy:"Њ",nlarr:"↚",nlArr:"⇍",nldr:"‥",nle:"≰",nlE:"≦̸",nleftarrow:"↚",nLeftarrow:"⇍",nleftrightarrow:"↮",nLeftrightarrow:"⇎",nleq:"≰",nleqq:"≦̸",nleqslant:"⩽̸",nles:"⩽̸",nless:"≮",nLl:"⋘̸",nlsim:"≴",nlt:"≮",nLt:"≪⃒",nltri:"⋪",nltrie:"⋬",nLtv:"≪̸",nmid:"∤",NoBreak:"⁠",NonBreakingSpace:" ",nopf:"𝕟",Nopf:"ℕ",not:"¬",Not:"⫬",NotCongruent:"≢",NotCupCap:"≭",NotDoubleVerticalBar:"∦",NotElement:"∉",NotEqual:"≠",NotEqualTilde:"≂̸",NotExists:"∄",NotGreater:"≯",NotGreaterEqual:"≱",NotGreaterFullEqual:"≧̸",NotGreaterGreater:"≫̸",NotGreaterLess:"≹",NotGreaterSlantEqual:"⩾̸",NotGreaterTilde:"≵",NotHumpDownHump:"≎̸",NotHumpEqual:"≏̸",notin:"∉",notindot:"⋵̸",notinE:"⋹̸",notinva:"∉",notinvb:"⋷",notinvc:"⋶",NotLeftTriangle:"⋪",NotLeftTriangleBar:"⧏̸",NotLeftTriangleEqual:"⋬",NotLess:"≮",NotLessEqual:"≰",NotLessGreater:"≸",NotLessLess:"≪̸",NotLessSlantEqual:"⩽̸",NotLessTilde:"≴",NotNestedGreaterGreater:"⪢̸",NotNestedLessLess:"⪡̸",notni:"∌",notniva:"∌",notnivb:"⋾",notnivc:"⋽",NotPrecedes:"⊀",NotPrecedesEqual:"⪯̸",NotPrecedesSlantEqual:"⋠",NotReverseElement:"∌",NotRightTriangle:"⋫",NotRightTriangleBar:"⧐̸",NotRightTriangleEqual:"⋭",NotSquareSubset:"⊏̸",NotSquareSubsetEqual:"⋢",NotSquareSuperset:"⊐̸",NotSquareSupersetEqual:"⋣",NotSubset:"⊂⃒",NotSubsetEqual:"⊈",NotSucceeds:"⊁",NotSucceedsEqual:"⪰̸",NotSucceedsSlantEqual:"⋡",NotSucceedsTilde:"≿̸",NotSuperset:"⊃⃒",NotSupersetEqual:"⊉",NotTilde:"≁",NotTildeEqual:"≄",NotTildeFullEqual:"≇",NotTildeTilde:"≉",NotVerticalBar:"∤",npar:"∦",nparallel:"∦",nparsl:"⫽⃥",npart:"∂̸",npolint:"⨔",npr:"⊀",nprcue:"⋠",npre:"⪯̸",nprec:"⊀",npreceq:"⪯̸",nrarr:"↛",nrArr:"⇏",nrarrc:"⤳̸",nrarrw:"↝̸",nrightarrow:"↛",nRightarrow:"⇏",nrtri:"⋫",nrtrie:"⋭",nsc:"⊁",nsccue:"⋡",nsce:"⪰̸",nscr:"𝓃",Nscr:"𝒩",nshortmid:"∤",nshortparallel:"∦",nsim:"≁",nsime:"≄",nsimeq:"≄",nsmid:"∤",nspar:"∦",nsqsube:"⋢",nsqsupe:"⋣",nsub:"⊄",nsube:"⊈",nsubE:"⫅̸",nsubset:"⊂⃒",nsubseteq:"⊈",nsubseteqq:"⫅̸",nsucc:"⊁",nsucceq:"⪰̸",nsup:"⊅",nsupe:"⊉",nsupE:"⫆̸",nsupset:"⊃⃒",nsupseteq:"⊉",nsupseteqq:"⫆̸",ntgl:"≹",ntilde:"ñ",Ntilde:"Ñ",ntlg:"≸",ntriangleleft:"⋪",ntrianglelefteq:"⋬",ntriangleright:"⋫",ntrianglerighteq:"⋭",nu:"ν",Nu:"Ν",num:"#",numero:"№",numsp:" ",nvap:"≍⃒",nvdash:"⊬",nvDash:"⊭",nVdash:"⊮",nVDash:"⊯",nvge:"≥⃒",nvgt:">⃒",nvHarr:"⤄",nvinfin:"⧞",nvlArr:"⤂",nvle:"≤⃒",nvlt:"<⃒",nvltrie:"⊴⃒",nvrArr:"⤃",nvrtrie:"⊵⃒",nvsim:"∼⃒",nwarhk:"⤣",nwarr:"↖",nwArr:"⇖",nwarrow:"↖",nwnear:"⤧",oacute:"ó",Oacute:"Ó",oast:"⊛",ocir:"⊚",ocirc:"ô",Ocirc:"Ô",ocy:"о",Ocy:"О",odash:"⊝",odblac:"ő",Odblac:"Ő",odiv:"⨸",odot:"⊙",odsold:"⦼",oelig:"œ",OElig:"Œ",ofcir:"⦿",ofr:"𝔬",Ofr:"𝔒",ogon:"˛",ograve:"ò",Ograve:"Ò",ogt:"⧁",ohbar:"⦵",ohm:"Ω",oint:"∮",olarr:"↺",olcir:"⦾",olcross:"⦻",oline:"‾",olt:"⧀",omacr:"ō",Omacr:"Ō",omega:"ω",Omega:"Ω",omicron:"ο",Omicron:"Ο",omid:"⦶",ominus:"⊖",oopf:"𝕠",Oopf:"𝕆",opar:"⦷",OpenCurlyDoubleQuote:"“",OpenCurlyQuote:"‘",operp:"⦹",oplus:"⊕",or:"∨",Or:"⩔",orarr:"↻",ord:"⩝",order:"ℴ",orderof:"ℴ",ordf:"ª",ordm:"º",origof:"⊶",oror:"⩖",orslope:"⩗",orv:"⩛",oS:"Ⓢ",oscr:"ℴ",Oscr:"𝒪",oslash:"ø",Oslash:"Ø",osol:"⊘",otilde:"õ",Otilde:"Õ",otimes:"⊗",Otimes:"⨷",otimesas:"⨶",ouml:"ö",Ouml:"Ö",ovbar:"⌽",OverBar:"‾",OverBrace:"⏞",OverBracket:"⎴",OverParenthesis:"⏜",par:"∥",para:"¶",parallel:"∥",parsim:"⫳",parsl:"⫽",part:"∂",PartialD:"∂",pcy:"п",Pcy:"П",percnt:"%",period:".",permil:"‰",perp:"⊥",pertenk:"‱",pfr:"𝔭",Pfr:"𝔓",phi:"φ",Phi:"Φ",phiv:"ϕ",phmmat:"ℳ",phone:"☎",pi:"π",Pi:"Π",pitchfork:"⋔",piv:"ϖ",planck:"ℏ",planckh:"ℎ",plankv:"ℏ",plus:"+",plusacir:"⨣",plusb:"⊞",pluscir:"⨢",plusdo:"∔",plusdu:"⨥",pluse:"⩲",PlusMinus:"±",plusmn:"±",plussim:"⨦",plustwo:"⨧",pm:"±",Poincareplane:"ℌ",pointint:"⨕",popf:"𝕡",Popf:"ℙ",pound:"£",pr:"≺",Pr:"⪻",prap:"⪷",prcue:"≼",pre:"⪯",prE:"⪳",prec:"≺",precapprox:"⪷",preccurlyeq:"≼",Precedes:"≺",PrecedesEqual:"⪯",PrecedesSlantEqual:"≼",PrecedesTilde:"≾",preceq:"⪯",precnapprox:"⪹",precneqq:"⪵",precnsim:"⋨",precsim:"≾",prime:"′",Prime:"″",primes:"ℙ",prnap:"⪹",prnE:"⪵",prnsim:"⋨",prod:"∏",Product:"∏",profalar:"⌮",profline:"⌒",profsurf:"⌓",prop:"∝",Proportion:"∷",Proportional:"∝",propto:"∝",prsim:"≾",prurel:"⊰",pscr:"𝓅",Pscr:"𝒫",psi:"ψ",Psi:"Ψ",puncsp:" ",qfr:"𝔮",Qfr:"𝔔",qint:"⨌",qopf:"𝕢",Qopf:"ℚ",qprime:"⁗",qscr:"𝓆",Qscr:"𝒬",quaternions:"ℍ",quatint:"⨖",quest:"?",questeq:"≟",quot:'"',QUOT:'"',rAarr:"⇛",race:"∽̱",racute:"ŕ",Racute:"Ŕ",radic:"√",raemptyv:"⦳",rang:"⟩",Rang:"⟫",rangd:"⦒",range:"⦥",rangle:"⟩",raquo:"»",rarr:"→",rArr:"⇒",Rarr:"↠",rarrap:"⥵",rarrb:"⇥",rarrbfs:"⤠",rarrc:"⤳",rarrfs:"⤞",rarrhk:"↪",rarrlp:"↬",rarrpl:"⥅",rarrsim:"⥴",rarrtl:"↣",Rarrtl:"⤖",rarrw:"↝",ratail:"⤚",rAtail:"⤜",ratio:"∶",rationals:"ℚ",rbarr:"⤍",rBarr:"⤏",RBarr:"⤐",rbbrk:"❳",rbrace:"}",rbrack:"]",rbrke:"⦌",rbrksld:"⦎",rbrkslu:"⦐",rcaron:"ř",Rcaron:"Ř",rcedil:"ŗ",Rcedil:"Ŗ",rceil:"⌉",rcub:"}",rcy:"р",Rcy:"Р",rdca:"⤷",rdldhar:"⥩",rdquo:"”",rdquor:"”",rdsh:"↳",Re:"ℜ",real:"ℜ",realine:"ℛ",realpart:"ℜ",reals:"ℝ",rect:"▭",reg:"®",REG:"®",ReverseElement:"∋",ReverseEquilibrium:"⇋",ReverseUpEquilibrium:"⥯",rfisht:"⥽",rfloor:"⌋",rfr:"𝔯",Rfr:"ℜ",rHar:"⥤",rhard:"⇁",rharu:"⇀",rharul:"⥬",rho:"ρ",Rho:"Ρ",rhov:"ϱ",RightAngleBracket:"⟩",rightarrow:"→",Rightarrow:"⇒",RightArrow:"→",RightArrowBar:"⇥",RightArrowLeftArrow:"⇄",rightarrowtail:"↣",RightCeiling:"⌉",RightDoubleBracket:"⟧",RightDownTeeVector:"⥝",RightDownVector:"⇂",RightDownVectorBar:"⥕",RightFloor:"⌋",rightharpoondown:"⇁",rightharpoonup:"⇀",rightleftarrows:"⇄",rightleftharpoons:"⇌",rightrightarrows:"⇉",rightsquigarrow:"↝",RightTee:"⊢",RightTeeArrow:"↦",RightTeeVector:"⥛",rightthreetimes:"⋌",RightTriangle:"⊳",RightTriangleBar:"⧐",RightTriangleEqual:"⊵",RightUpDownVector:"⥏",RightUpTeeVector:"⥜",RightUpVector:"↾",RightUpVectorBar:"⥔",RightVector:"⇀",RightVectorBar:"⥓",ring:"˚",risingdotseq:"≓",rlarr:"⇄",rlhar:"⇌",rlm:"‏",rmoust:"⎱",rmoustache:"⎱",rnmid:"⫮",roang:"⟭",roarr:"⇾",robrk:"⟧",ropar:"⦆",ropf:"𝕣",Ropf:"ℝ",roplus:"⨮",rotimes:"⨵",RoundImplies:"⥰",rpar:")",rpargt:"⦔",rppolint:"⨒",rrarr:"⇉",Rrightarrow:"⇛",rsaquo:"›",rscr:"𝓇",Rscr:"ℛ",rsh:"↱",Rsh:"↱",rsqb:"]",rsquo:"’",rsquor:"’",rthree:"⋌",rtimes:"⋊",rtri:"▹",rtrie:"⊵",rtrif:"▸",rtriltri:"⧎",RuleDelayed:"⧴",ruluhar:"⥨",rx:"℞",sacute:"ś",Sacute:"Ś",sbquo:"‚",sc:"≻",Sc:"⪼",scap:"⪸",scaron:"š",Scaron:"Š",sccue:"≽",sce:"⪰",scE:"⪴",scedil:"ş",Scedil:"Ş",scirc:"ŝ",Scirc:"Ŝ",scnap:"⪺",scnE:"⪶",scnsim:"⋩",scpolint:"⨓",scsim:"≿",scy:"с",Scy:"С",sdot:"⋅",sdotb:"⊡",sdote:"⩦",searhk:"⤥",searr:"↘",seArr:"⇘",searrow:"↘",sect:"§",semi:";",seswar:"⤩",setminus:"∖",setmn:"∖",sext:"✶",sfr:"𝔰",Sfr:"𝔖",sfrown:"⌢",sharp:"♯",shchcy:"щ",SHCHcy:"Щ",shcy:"ш",SHcy:"Ш",ShortDownArrow:"↓",ShortLeftArrow:"←",shortmid:"∣",shortparallel:"∥",ShortRightArrow:"→",ShortUpArrow:"↑",shy:"­",sigma:"σ",Sigma:"Σ",sigmaf:"ς",sigmav:"ς",sim:"∼",simdot:"⩪",sime:"≃",simeq:"≃",simg:"⪞",simgE:"⪠",siml:"⪝",simlE:"⪟",simne:"≆",simplus:"⨤",simrarr:"⥲",slarr:"←",SmallCircle:"∘",smallsetminus:"∖",smashp:"⨳",smeparsl:"⧤",smid:"∣",smile:"⌣",smt:"⪪",smte:"⪬",smtes:"⪬︀",softcy:"ь",SOFTcy:"Ь",sol:"/",solb:"⧄",solbar:"⌿",sopf:"𝕤",Sopf:"𝕊",spades:"♠",spadesuit:"♠",spar:"∥",sqcap:"⊓",sqcaps:"⊓︀",sqcup:"⊔",sqcups:"⊔︀",Sqrt:"√",sqsub:"⊏",sqsube:"⊑",sqsubset:"⊏",sqsubseteq:"⊑",sqsup:"⊐",sqsupe:"⊒",sqsupset:"⊐",sqsupseteq:"⊒",squ:"□",square:"□",Square:"□",SquareIntersection:"⊓",SquareSubset:"⊏",SquareSubsetEqual:"⊑",SquareSuperset:"⊐",SquareSupersetEqual:"⊒",SquareUnion:"⊔",squarf:"▪",squf:"▪",srarr:"→",sscr:"𝓈",Sscr:"𝒮",ssetmn:"∖",ssmile:"⌣",sstarf:"⋆",star:"☆",Star:"⋆",starf:"★",straightepsilon:"ϵ",straightphi:"ϕ",strns:"¯",sub:"⊂",Sub:"⋐",subdot:"⪽",sube:"⊆",subE:"⫅",subedot:"⫃",submult:"⫁",subne:"⊊",subnE:"⫋",subplus:"⪿",subrarr:"⥹",subset:"⊂",Subset:"⋐",subseteq:"⊆",subseteqq:"⫅",SubsetEqual:"⊆",subsetneq:"⊊",subsetneqq:"⫋",subsim:"⫇",subsub:"⫕",subsup:"⫓",succ:"≻",succapprox:"⪸",succcurlyeq:"≽",Succeeds:"≻",SucceedsEqual:"⪰",SucceedsSlantEqual:"≽",SucceedsTilde:"≿",succeq:"⪰",succnapprox:"⪺",succneqq:"⪶",succnsim:"⋩",succsim:"≿",SuchThat:"∋",sum:"∑",Sum:"∑",sung:"♪",sup:"⊃",Sup:"⋑",sup1:"¹",sup2:"²",sup3:"³",supdot:"⪾",supdsub:"⫘",supe:"⊇",supE:"⫆",supedot:"⫄",Superset:"⊃",SupersetEqual:"⊇",suphsol:"⟉",suphsub:"⫗",suplarr:"⥻",supmult:"⫂",supne:"⊋",supnE:"⫌",supplus:"⫀",supset:"⊃",Supset:"⋑",supseteq:"⊇",supseteqq:"⫆",supsetneq:"⊋",supsetneqq:"⫌",supsim:"⫈",supsub:"⫔",supsup:"⫖",swarhk:"⤦",swarr:"↙",swArr:"⇙",swarrow:"↙",swnwar:"⤪",szlig:"ß",Tab:"\t",target:"⌖",tau:"τ",Tau:"Τ",tbrk:"⎴",tcaron:"ť",Tcaron:"Ť",tcedil:"ţ",Tcedil:"Ţ",tcy:"т",Tcy:"Т",tdot:"⃛",telrec:"⌕",tfr:"𝔱",Tfr:"𝔗",there4:"∴",therefore:"∴",Therefore:"∴",theta:"θ",Theta:"Θ",thetasym:"ϑ",thetav:"ϑ",thickapprox:"≈",thicksim:"∼",ThickSpace:"  ",thinsp:" ",ThinSpace:" ",thkap:"≈",thksim:"∼",thorn:"þ",THORN:"Þ",tilde:"˜",Tilde:"∼",TildeEqual:"≃",TildeFullEqual:"≅",TildeTilde:"≈",times:"×",timesb:"⊠",timesbar:"⨱",timesd:"⨰",tint:"∭",toea:"⤨",top:"⊤",topbot:"⌶",topcir:"⫱",topf:"𝕥",Topf:"𝕋",topfork:"⫚",tosa:"⤩",tprime:"‴",trade:"™",TRADE:"™",triangle:"▵",triangledown:"▿",triangleleft:"◃",trianglelefteq:"⊴",triangleq:"≜",triangleright:"▹",trianglerighteq:"⊵",tridot:"◬",trie:"≜",triminus:"⨺",TripleDot:"⃛",triplus:"⨹",trisb:"⧍",tritime:"⨻",trpezium:"⏢",tscr:"𝓉",Tscr:"𝒯",tscy:"ц",TScy:"Ц",tshcy:"ћ",TSHcy:"Ћ",tstrok:"ŧ",Tstrok:"Ŧ",twixt:"≬",twoheadleftarrow:"↞",twoheadrightarrow:"↠",uacute:"ú",Uacute:"Ú",uarr:"↑",uArr:"⇑",Uarr:"↟",Uarrocir:"⥉",ubrcy:"ў",Ubrcy:"Ў",ubreve:"ŭ",Ubreve:"Ŭ",ucirc:"û",Ucirc:"Û",ucy:"у",Ucy:"У",udarr:"⇅",udblac:"ű",Udblac:"Ű",udhar:"⥮",ufisht:"⥾",ufr:"𝔲",Ufr:"𝔘",ugrave:"ù",Ugrave:"Ù",uHar:"⥣",uharl:"↿",uharr:"↾",uhblk:"▀",ulcorn:"⌜",ulcorner:"⌜",ulcrop:"⌏",ultri:"◸",umacr:"ū",Umacr:"Ū",uml:"¨",UnderBar:"_",UnderBrace:"⏟",UnderBracket:"⎵",UnderParenthesis:"⏝",Union:"⋃",UnionPlus:"⊎",uogon:"ų",Uogon:"Ų",uopf:"𝕦",Uopf:"𝕌",uparrow:"↑",Uparrow:"⇑",UpArrow:"↑",UpArrowBar:"⤒",UpArrowDownArrow:"⇅",updownarrow:"↕",Updownarrow:"⇕",UpDownArrow:"↕",UpEquilibrium:"⥮",upharpoonleft:"↿",upharpoonright:"↾",uplus:"⊎",UpperLeftArrow:"↖",UpperRightArrow:"↗",upsi:"υ",Upsi:"ϒ",upsih:"ϒ",upsilon:"υ",Upsilon:"Υ",UpTee:"⊥",UpTeeArrow:"↥",upuparrows:"⇈",urcorn:"⌝",urcorner:"⌝",urcrop:"⌎",uring:"ů",Uring:"Ů",urtri:"◹",uscr:"𝓊",Uscr:"𝒰",utdot:"⋰",utilde:"ũ",Utilde:"Ũ",utri:"▵",utrif:"▴",uuarr:"⇈",uuml:"ü",Uuml:"Ü",uwangle:"⦧",vangrt:"⦜",varepsilon:"ϵ",varkappa:"ϰ",varnothing:"∅",varphi:"ϕ",varpi:"ϖ",varpropto:"∝",varr:"↕",vArr:"⇕",varrho:"ϱ",varsigma:"ς",varsubsetneq:"⊊︀",varsubsetneqq:"⫋︀",varsupsetneq:"⊋︀",varsupsetneqq:"⫌︀",vartheta:"ϑ",vartriangleleft:"⊲",vartriangleright:"⊳",vBar:"⫨",Vbar:"⫫",vBarv:"⫩",vcy:"в",Vcy:"В",vdash:"⊢",vDash:"⊨",Vdash:"⊩",VDash:"⊫",Vdashl:"⫦",vee:"∨",Vee:"⋁",veebar:"⊻",veeeq:"≚",vellip:"⋮",verbar:"|",Verbar:"‖",vert:"|",Vert:"‖",VerticalBar:"∣",VerticalLine:"|",VerticalSeparator:"❘",VerticalTilde:"≀",VeryThinSpace:" ",vfr:"𝔳",Vfr:"𝔙",vltri:"⊲",vnsub:"⊂⃒",vnsup:"⊃⃒",vopf:"𝕧",Vopf:"𝕍",vprop:"∝",vrtri:"⊳",vscr:"𝓋",Vscr:"𝒱",vsubne:"⊊︀",vsubnE:"⫋︀",vsupne:"⊋︀",vsupnE:"⫌︀",Vvdash:"⊪",vzigzag:"⦚",wcirc:"ŵ",Wcirc:"Ŵ",wedbar:"⩟",wedge:"∧",Wedge:"⋀",wedgeq:"≙",weierp:"℘",wfr:"𝔴",Wfr:"𝔚",wopf:"𝕨",Wopf:"𝕎",wp:"℘",wr:"≀",wreath:"≀",wscr:"𝓌",Wscr:"𝒲",xcap:"⋂",xcirc:"◯",xcup:"⋃",xdtri:"▽",xfr:"𝔵",Xfr:"𝔛",xharr:"⟷",xhArr:"⟺",xi:"ξ",Xi:"Ξ",xlarr:"⟵",xlArr:"⟸",xmap:"⟼",xnis:"⋻",xodot:"⨀",xopf:"𝕩",Xopf:"𝕏",xoplus:"⨁",xotime:"⨂",xrarr:"⟶",xrArr:"⟹",xscr:"𝓍",Xscr:"𝒳",xsqcup:"⨆",xuplus:"⨄",xutri:"△",xvee:"⋁",xwedge:"⋀",yacute:"ý",Yacute:"Ý",yacy:"я",YAcy:"Я",ycirc:"ŷ",Ycirc:"Ŷ",ycy:"ы",Ycy:"Ы",yen:"¥",yfr:"𝔶",Yfr:"𝔜",yicy:"ї",YIcy:"Ї",yopf:"𝕪",Yopf:"𝕐",yscr:"𝓎",Yscr:"𝒴",yucy:"ю",YUcy:"Ю",yuml:"ÿ",Yuml:"Ÿ",zacute:"ź",Zacute:"Ź",zcaron:"ž",Zcaron:"Ž",zcy:"з",Zcy:"З",zdot:"ż",Zdot:"Ż",zeetrf:"ℨ",ZeroWidthSpace:"​",zeta:"ζ",Zeta:"Ζ",zfr:"𝔷",Zfr:"ℨ",zhcy:"ж",ZHcy:"Ж",zigrarr:"⇝",zopf:"𝕫",Zopf:"ℤ",zscr:"𝓏",Zscr:"𝒵",zwj:"‍",zwnj:"‌"},v={aacute:"á",Aacute:"Á",acirc:"â",Acirc:"Â",acute:"´",aelig:"æ",AElig:"Æ",agrave:"à",Agrave:"À",amp:"&",AMP:"&",aring:"å",Aring:"Å",atilde:"ã",Atilde:"Ã",auml:"ä",Auml:"Ä",brvbar:"¦",ccedil:"ç",Ccedil:"Ç",cedil:"¸",cent:"¢",copy:"©",COPY:"©",curren:"¤",deg:"°",divide:"÷",eacute:"é",Eacute:"É",ecirc:"ê",Ecirc:"Ê",egrave:"è",Egrave:"È",eth:"ð",ETH:"Ð",euml:"ë",Euml:"Ë",frac12:"½",frac14:"¼",frac34:"¾",gt:">",GT:">",iacute:"í",Iacute:"Í",icirc:"î",Icirc:"Î",iexcl:"¡",igrave:"ì",Igrave:"Ì",iquest:"¿",iuml:"ï",Iuml:"Ï",laquo:"«",lt:"<",LT:"<",macr:"¯",micro:"µ",middot:"·",nbsp:" ",not:"¬",ntilde:"ñ",Ntilde:"Ñ",oacute:"ó",Oacute:"Ó",ocirc:"ô",Ocirc:"Ô",ograve:"ò",Ograve:"Ò",ordf:"ª",ordm:"º",oslash:"ø",Oslash:"Ø",otilde:"õ",Otilde:"Õ",ouml:"ö",Ouml:"Ö",para:"¶",plusmn:"±",pound:"£",quot:'"',QUOT:'"',raquo:"»",reg:"®",REG:"®",sect:"§",shy:"­",sup1:"¹",sup2:"²",sup3:"³",szlig:"ß",thorn:"þ",THORN:"Þ",times:"×",uacute:"ú",Uacute:"Ú",ucirc:"û",Ucirc:"Û",ugrave:"ù",Ugrave:"Ù",uml:"¨",uuml:"ü",Uuml:"Ü",yacute:"ý",Yacute:"Ý",yen:"¥",yuml:"ÿ"},m={0:"�",128:"€",130:"‚",131:"ƒ",132:"„",133:"…",134:"†",135:"‡",136:"ˆ",137:"‰",138:"Š",139:"‹",140:"Œ",142:"Ž",145:"‘",146:"’",147:"“",148:"”",149:"•",150:"–",151:"—",152:"˜",153:"™",154:"š",155:"›",156:"œ",158:"ž",159:"Ÿ"},_=[1,2,3,4,5,6,7,8,11,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,64976,64977,64978,64979,64980,64981,64982,64983,64984,64985,64986,64987,64988,64989,64990,64991,64992,64993,64994,64995,64996,64997,64998,64999,65e3,65001,65002,65003,65004,65005,65006,65007,65534,65535,131070,131071,196606,196607,262142,262143,327678,327679,393214,393215,458750,458751,524286,524287,589822,589823,655358,655359,720894,720895,786430,786431,851966,851967,917502,917503,983038,983039,1048574,1048575,1114110,1114111],w=String.fromCharCode,x={}.hasOwnProperty,k=function(t,e){return x.call(t,e)},E=function(t,e){if(!t)return e;var n,r={};for(n in e)r[n]=k(t,n)?t[n]:e[n];return r},A=function(t,e){var n="";return t>=55296&&t<=57343||t>1114111?(e&&T("character reference outside the permissible Unicode range"),"�"):k(m,t)?(e&&T("disallowed character reference"),m[t]):(e&&function(t,e){for(var n=-1,r=t.length;++n65535&&(n+=w((t-=65536)>>>10&1023|55296),t=56320|1023&t),n+=w(t))},S=function(t){return"&#x"+t.toString(16).toUpperCase()+";"},M=function(t){return"&#"+t+";"},T=function(t){throw Error("Parse error: "+t)},O=function(t,e){(e=E(e,O.options)).strict&&g.test(t)&&T("forbidden code point");var n=e.encodeEverything,r=e.useNamedReferences,i=e.allowUnsafeSymbols,o=e.decimal?M:S,a=function(t){return o(t.charCodeAt(0))};return n?(t=t.replace(s,(function(t){return r&&k(l,t)?"&"+l[t]+";":a(t)})),r&&(t=t.replace(/>\u20D2/g,">⃒").replace(/<\u20D2/g,"<⃒").replace(/fj/g,"fj")),r&&(t=t.replace(f,(function(t){return"&"+l[t]+";"})))):r?(i||(t=t.replace(h,(function(t){return"&"+l[t]+";"}))),t=(t=t.replace(/>\u20D2/g,">⃒").replace(/<\u20D2/g,"<⃒")).replace(f,(function(t){return"&"+l[t]+";"}))):i||(t=t.replace(h,a)),t.replace(u,(function(t){var e=t.charCodeAt(0),n=t.charCodeAt(1);return o(1024*(e-55296)+n-56320+65536)})).replace(c,a)};O.options={allowUnsafeSymbols:!1,encodeEverything:!1,strict:!1,useNamedReferences:!1,decimal:!1};var D=function(t,e){var n=(e=E(e,D.options)).strict;return n&&p.test(t)&&T("malformed character reference"),t.replace(y,(function(t,r,i,o,a,u,s,c,f){var l,h,d,p,g,y;return r?b[g=r]:i?(g=i,(y=o)&&e.isAttributeValue?(n&&"="==y&&T("`&` did not start a character reference"),t):(n&&T("named character reference was not terminated by a semicolon"),v[g]+(y||""))):a?(d=a,h=u,n&&!h&&T("character reference was not terminated by a semicolon"),l=parseInt(d,10),A(l,n)):s?(p=s,h=c,n&&!h&&T("character reference was not terminated by a semicolon"),l=parseInt(p,16),A(l,n)):(n&&T("named character reference was not terminated by a semicolon"),t)}))};D.options={isAttributeValue:!1,strict:!1};var C={version:"1.2.0",encode:O,decode:D,escape:function(t){return t.replace(h,(function(t){return d[t]}))},unescape:D};if("function"==typeof define&&"object"==typeof define.amd&&define.amd)define((function(){return C}));else if(i&&!i.nodeType)if(o)o.exports=C;else for(var N in C)k(C,N)&&(i[N]=C[N]);else r.he=C}(this)}).call(this,n(14)(t),n(25))},function(t,e,n){"use strict";var r=n(449),i=n(450),o=n(451);function a(t,e,n){if(!t)return t;if(!e)return t;"string"==typeof n&&(n={keyframes:n}),n||(n={keyframes:!1}),t=u(t,e+" $1$2");var i=e.replace(/[-\/\\^$*+?.()|[\]{}]/g,"\\$&");t=(t=(t=(t=t.replace(new RegExp("("+i+")\\s*\\1(?=[\\s\\r\\n,{])","g"),"$1")).replace(new RegExp("("+i+")\\s*:host","g"),"$1")).replace(new RegExp("("+i+")\\s*@","g"),"@")).replace(new RegExp("("+i+")\\s*:root","g"),":root");for(var o,a=[],s=/@keyframes\s+([a-zA-Z0-9_-]+)\s*{/g;null!==(o=s.exec(t));)a.indexOf(o[1])<0&&a.push(o[1]);var c=r(e);return a.forEach((function(e){var r=(!0===n.keyframes?c+"-":"string"==typeof n.keyframes?n.keyframes:"")+e;t=(t=t.replace(new RegExp("(@keyframes\\s+)"+e+"(\\s*{)","g"),"$1"+r+"$2")).replace(new RegExp("(animation(?:-name)?\\s*:[^;]*\\s*)"+e+"([\\s;}])","g"),"$1"+r+"$2")})),t=t.replace(new RegExp("("+i+" )(\\s*(?:to|from|[+-]?(?:(?:\\.\\d+)|(?:\\d+(?:\\.\\d*)?))%))(?=[\\s\\r\\n,{])","g"),"$2")}function u(t,e){var n=[];return t=o(t),t=(t=i.replace(t,!0,n)).replace(/([^\r\n,{}]+)(,(?=[^}]*{)|\s*{)/g,e),t=i.paste(t,n)}t.exports=a,a.replace=u},function(t,e,n){"use strict";const r=n(812),i="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-._~".split(""),o="0123456789".split(""),a=(t,e)=>{const n=e.length,i=Math.floor(65536/n)*n-1,o=2*Math.ceil(1.1*t);let a="",u=0;for(;ui||(a+=e[t%n],u++)}}return a},u=[void 0,"hex","base64","url-safe","numeric"];t.exports=({length:t,type:e,characters:n})=>{if(!(t>=0&&Number.isFinite(t)))throw new TypeError("Expected a `length` to be a non-negative finite number");if(void 0!==e&&void 0!==n)throw new TypeError("Expected either `type` or `characters`");if(void 0!==n&&"string"!=typeof n)throw new TypeError("Expected `characters` to be string");if(!u.includes(e))throw new TypeError(`Unknown type: ${e}`);if(void 0===e&&void 0===n&&(e="hex"),"hex"===e||void 0===e&&void 0===n)return r.randomBytes(Math.ceil(.5*t)).toString("hex").slice(0,t);if("base64"===e)return r.randomBytes(Math.ceil(.75*t)).toString("base64").slice(0,t);if("url-safe"===e)return a(t,i);if("numeric"===e)return a(t,o);if(0===n.length)throw new TypeError("Expected `characters` string length to be greater than or equal to 1");if(n.length>65536)throw new TypeError("Expected `characters` string length to be less or equal to 65536");return a(t,n.split(""))}},function(t,e,n){var r;r=function(){var t=JSON.parse('{"$":"dollar","%":"percent","&":"and","<":"less",">":"greater","|":"or","¢":"cent","£":"pound","¤":"currency","¥":"yen","©":"(c)","ª":"a","®":"(r)","º":"o","À":"A","Á":"A","Â":"A","Ã":"A","Ä":"A","Å":"A","Æ":"AE","Ç":"C","È":"E","É":"E","Ê":"E","Ë":"E","Ì":"I","Í":"I","Î":"I","Ï":"I","Ð":"D","Ñ":"N","Ò":"O","Ó":"O","Ô":"O","Õ":"O","Ö":"O","Ø":"O","Ù":"U","Ú":"U","Û":"U","Ü":"U","Ý":"Y","Þ":"TH","ß":"ss","à":"a","á":"a","â":"a","ã":"a","ä":"a","å":"a","æ":"ae","ç":"c","è":"e","é":"e","ê":"e","ë":"e","ì":"i","í":"i","î":"i","ï":"i","ð":"d","ñ":"n","ò":"o","ó":"o","ô":"o","õ":"o","ö":"o","ø":"o","ù":"u","ú":"u","û":"u","ü":"u","ý":"y","þ":"th","ÿ":"y","Ā":"A","ā":"a","Ă":"A","ă":"a","Ą":"A","ą":"a","Ć":"C","ć":"c","Č":"C","č":"c","Ď":"D","ď":"d","Đ":"DJ","đ":"dj","Ē":"E","ē":"e","Ė":"E","ė":"e","Ę":"e","ę":"e","Ě":"E","ě":"e","Ğ":"G","ğ":"g","Ģ":"G","ģ":"g","Ĩ":"I","ĩ":"i","Ī":"i","ī":"i","Į":"I","į":"i","İ":"I","ı":"i","Ķ":"k","ķ":"k","Ļ":"L","ļ":"l","Ľ":"L","ľ":"l","Ł":"L","ł":"l","Ń":"N","ń":"n","Ņ":"N","ņ":"n","Ň":"N","ň":"n","Ő":"O","ő":"o","Œ":"OE","œ":"oe","Ŕ":"R","ŕ":"r","Ř":"R","ř":"r","Ś":"S","ś":"s","Ş":"S","ş":"s","Š":"S","š":"s","Ţ":"T","ţ":"t","Ť":"T","ť":"t","Ũ":"U","ũ":"u","Ū":"u","ū":"u","Ů":"U","ů":"u","Ű":"U","ű":"u","Ų":"U","ų":"u","Ŵ":"W","ŵ":"w","Ŷ":"Y","ŷ":"y","Ÿ":"Y","Ź":"Z","ź":"z","Ż":"Z","ż":"z","Ž":"Z","ž":"z","ƒ":"f","Ơ":"O","ơ":"o","Ư":"U","ư":"u","Lj":"LJ","lj":"lj","Nj":"NJ","nj":"nj","Ș":"S","ș":"s","Ț":"T","ț":"t","˚":"o","Ά":"A","Έ":"E","Ή":"H","Ί":"I","Ό":"O","Ύ":"Y","Ώ":"W","ΐ":"i","Α":"A","Β":"B","Γ":"G","Δ":"D","Ε":"E","Ζ":"Z","Η":"H","Θ":"8","Ι":"I","Κ":"K","Λ":"L","Μ":"M","Ν":"N","Ξ":"3","Ο":"O","Π":"P","Ρ":"R","Σ":"S","Τ":"T","Υ":"Y","Φ":"F","Χ":"X","Ψ":"PS","Ω":"W","Ϊ":"I","Ϋ":"Y","ά":"a","έ":"e","ή":"h","ί":"i","ΰ":"y","α":"a","β":"b","γ":"g","δ":"d","ε":"e","ζ":"z","η":"h","θ":"8","ι":"i","κ":"k","λ":"l","μ":"m","ν":"n","ξ":"3","ο":"o","π":"p","ρ":"r","ς":"s","σ":"s","τ":"t","υ":"y","φ":"f","χ":"x","ψ":"ps","ω":"w","ϊ":"i","ϋ":"y","ό":"o","ύ":"y","ώ":"w","Ё":"Yo","Ђ":"DJ","Є":"Ye","І":"I","Ї":"Yi","Ј":"J","Љ":"LJ","Њ":"NJ","Ћ":"C","Џ":"DZ","А":"A","Б":"B","В":"V","Г":"G","Д":"D","Е":"E","Ж":"Zh","З":"Z","И":"I","Й":"J","К":"K","Л":"L","М":"M","Н":"N","О":"O","П":"P","Р":"R","С":"S","Т":"T","У":"U","Ф":"F","Х":"H","Ц":"C","Ч":"Ch","Ш":"Sh","Щ":"Sh","Ъ":"U","Ы":"Y","Ь":"","Э":"E","Ю":"Yu","Я":"Ya","а":"a","б":"b","в":"v","г":"g","д":"d","е":"e","ж":"zh","з":"z","и":"i","й":"j","к":"k","л":"l","м":"m","н":"n","о":"o","п":"p","р":"r","с":"s","т":"t","у":"u","ф":"f","х":"h","ц":"c","ч":"ch","ш":"sh","щ":"sh","ъ":"u","ы":"y","ь":"","э":"e","ю":"yu","я":"ya","ё":"yo","ђ":"dj","є":"ye","і":"i","ї":"yi","ј":"j","љ":"lj","њ":"nj","ћ":"c","ѝ":"u","џ":"dz","Ґ":"G","ґ":"g","Ғ":"GH","ғ":"gh","Қ":"KH","қ":"kh","Ң":"NG","ң":"ng","Ү":"UE","ү":"ue","Ұ":"U","ұ":"u","Һ":"H","һ":"h","Ә":"AE","ә":"ae","Ө":"OE","ө":"oe","฿":"baht","ა":"a","ბ":"b","გ":"g","დ":"d","ე":"e","ვ":"v","ზ":"z","თ":"t","ი":"i","კ":"k","ლ":"l","მ":"m","ნ":"n","ო":"o","პ":"p","ჟ":"zh","რ":"r","ს":"s","ტ":"t","უ":"u","ფ":"f","ქ":"k","ღ":"gh","ყ":"q","შ":"sh","ჩ":"ch","ც":"ts","ძ":"dz","წ":"ts","ჭ":"ch","ხ":"kh","ჯ":"j","ჰ":"h","Ẁ":"W","ẁ":"w","Ẃ":"W","ẃ":"w","Ẅ":"W","ẅ":"w","ẞ":"SS","Ạ":"A","ạ":"a","Ả":"A","ả":"a","Ấ":"A","ấ":"a","Ầ":"A","ầ":"a","Ẩ":"A","ẩ":"a","Ẫ":"A","ẫ":"a","Ậ":"A","ậ":"a","Ắ":"A","ắ":"a","Ằ":"A","ằ":"a","Ẳ":"A","ẳ":"a","Ẵ":"A","ẵ":"a","Ặ":"A","ặ":"a","Ẹ":"E","ẹ":"e","Ẻ":"E","ẻ":"e","Ẽ":"E","ẽ":"e","Ế":"E","ế":"e","Ề":"E","ề":"e","Ể":"E","ể":"e","Ễ":"E","ễ":"e","Ệ":"E","ệ":"e","Ỉ":"I","ỉ":"i","Ị":"I","ị":"i","Ọ":"O","ọ":"o","Ỏ":"O","ỏ":"o","Ố":"O","ố":"o","Ồ":"O","ồ":"o","Ổ":"O","ổ":"o","Ỗ":"O","ỗ":"o","Ộ":"O","ộ":"o","Ớ":"O","ớ":"o","Ờ":"O","ờ":"o","Ở":"O","ở":"o","Ỡ":"O","ỡ":"o","Ợ":"O","ợ":"o","Ụ":"U","ụ":"u","Ủ":"U","ủ":"u","Ứ":"U","ứ":"u","Ừ":"U","ừ":"u","Ử":"U","ử":"u","Ữ":"U","ữ":"u","Ự":"U","ự":"u","Ỳ":"Y","ỳ":"y","Ỵ":"Y","ỵ":"y","Ỷ":"Y","ỷ":"y","Ỹ":"Y","ỹ":"y","‘":"\'","’":"\'","“":"\\"","”":"\\"","†":"+","•":"*","…":"...","₠":"ecu","₢":"cruzeiro","₣":"french franc","₤":"lira","₥":"mill","₦":"naira","₧":"peseta","₨":"rupee","₩":"won","₪":"new shequel","₫":"dong","€":"euro","₭":"kip","₮":"tugrik","₯":"drachma","₰":"penny","₱":"peso","₲":"guarani","₳":"austral","₴":"hryvnia","₵":"cedi","₸":"kazakhstani tenge","₹":"indian rupee","₽":"russian ruble","₿":"bitcoin","℠":"sm","™":"tm","∂":"d","∆":"delta","∑":"sum","∞":"infinity","♥":"love","元":"yuan","円":"yen","﷼":"rial"}'),e=JSON.parse('{"vi":{"Đ":"D","đ":"d"}}');function n(n,r){if("string"!=typeof n)throw new Error("slugify: string argument expected");var i=e[(r="string"==typeof r?{replacement:r}:r||{}).locale]||{},o=n.split("").reduce((function(e,n){return e+(i[n]||t[n]||n).replace(r.remove||/[^\w\s$*_+~.()'"!\-:@]/g,"")}),"").trim().replace(/[-\s]+/g,r.replacement||"-");return r.lower?o.toLowerCase():o}return n.extend=function(e){for(var n in e)t[n]=e[n]},n},t.exports=r(),t.exports.default=r()},function(t,e,n){ -/*! - * Escaper v2.5.3 - * https://github.com/kobezzza/Escaper - * - * Released under the MIT license - * https://github.com/kobezzza/Escaper/blob/master/LICENSE - * - * Date: Tue, 23 Jan 2018 15:58:45 GMT - */ -!function(t){"use strict";var e,n="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t},r=e={VERSION:[2,5,3],content:[],cache:{},snakeskinRgxp:null,symbols:null,replace:T,paste:D},i={'"':!0,"'":!0,"`":!0},o={"/":!0};for(var a in i){if(!i.hasOwnProperty(a))break;o[a]=!0}var u={"//":!0,"//*":!0,"//!":!0,"//#":!0,"//@":!0,"//$":!0},s={"/*":!0,"/**":!0,"/*!":!0,"/*#":!0,"/*@":!0,"/*$":!0},c=[],f={};for(var l in o){if(!o.hasOwnProperty(l))break;c.push(l),f[l]=!0}for(var h in u){if(!u.hasOwnProperty(h))break;c.push(h),f[h]=!0}for(var d in s){if(!s.hasOwnProperty(d))break;c.push(d),f[d]=!0}var p=[],g={g:!0,m:!0,i:!0,y:!0,u:!0};for(var y in g){if(!g.hasOwnProperty(y))break;p.push(y)}var b={"-":!0,"+":!0,"*":!0,"%":!0,"~":!0,">":!0,"<":!0,"^":!0,",":!0,";":!0,"=":!0,"|":!0,"&":!0,"!":!0,"?":!0,":":!0,"(":!0,"{":!0,"[":!0},v={return:!0,yield:!0,await:!0,typeof:!0,void:!0,instanceof:!0,delete:!0,in:!0,new:!0,of:!0};function m(t,e,n){for(var r in t){if(!t.hasOwnProperty(r))break;r in e==0&&(e[r]=n)}}var _=void 0,w=void 0,x=/[^\s/]/,k=/[a-z]/,E=/\s/,A=/[\r\n]/,S=/\${pos}/g,M={object:!0,function:!0};function T(t,r,a,l){_=_||e.symbols||"a-z",w=w||e.snakeskinRgxp||new RegExp("[!$"+_+"_]","i");var h=e.cache,d=e.content,y=Boolean(r&&M[void 0===r?"undefined":n(r)]),T=y?Object(r):{};function O(t){return T["@label"]?T["@label"].replace(S,t):"__ESCAPER_QUOT__"+t+"_"}var D=!1;"boolean"==typeof r&&(D=Boolean(r)),"@comments"in T&&(m(s,T,T["@comments"]),m(u,T,T["@comments"]),delete T["@comments"]),"@strings"in T&&(m(i,T,T["@strings"]),delete T["@strings"]),"@literals"in T&&(m(o,T,T["@literals"]),delete T["@literals"]),"@all"in T&&(m(f,T,T["@all"]),delete T["@all"]);for(var C="",N=-1;++N2&&s[F])&&(T[F]&&(V=t.substring(q,$+1),-1===T[F]?G="":(G=O(j.length),j.push(V)),t=t.substring(0,q)+G+t.substring($+1),$+=G.length-V.length),F=!1);else{if(!L){if("/"===K&&((u[X]||s[X])&&(F=u[J]||s[J]?J:X),F)){q=$;continue}b[K]||v[W]?(B=!0,W=""):x.test(K)&&(B=!1),k.test(K)?H+=K:(W=H,H="");var Q=!1;l&&("|"===K&&w.test(Z)?(Y=!0,B=!1,Q=!0):Y&&E.test(K)&&(Y=!1,B=!0,Q=!0)),Q||(b[K]?B=!0:x.test(K)&&(B=!1))}if("/"!==L||P||("["===K?U=!0:"]"===K&&(U=!1)),!L&&z&&("}"===K?z--:"{"===K&&z++,z||(K="`")),"`"!==L||P||"${"!==X||(K="`",$++,z++),!f[K]||"/"===K&&!B||L){if(L&&("\\"===K||P))P=!P;else if(f[K]&&L===K&&!P&&("/"!==L||!U)){if("/"===K)for(var tt=-1;++tt-1}},function(t,e,n){var r=n(148);t.exports=function(t,e){var n=this.__data__,i=r(n,t);return i<0?(++this.size,n.push([t,e])):n[i][1]=e,this}},function(t,e,n){var r=n(147);t.exports=function(){this.__data__=new r,this.size=0}},function(t,e){t.exports=function(t){var e=this.__data__,n=e.delete(t);return this.size=e.size,n}},function(t,e){t.exports=function(t){return this.__data__.get(t)}},function(t,e){t.exports=function(t){return this.__data__.has(t)}},function(t,e,n){var r=n(147),i=n(225),o=n(226);t.exports=function(t,e){var n=this.__data__;if(n instanceof r){var a=n.__data__;if(!i||a.length<199)return a.push([t,e]),this.size=++n.size,this;n=this.__data__=new o(a)}return n.set(t,e),this.size=n.size,this}},function(t,e,n){var r=n(124),i=n(469),o=n(28),a=n(295),u=/^\[object .+?Constructor\]$/,s=Function.prototype,c=Object.prototype,f=s.toString,l=c.hasOwnProperty,h=RegExp("^"+f.call(l).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$");t.exports=function(t){return!(!o(t)||i(t))&&(r(t)?h:u).test(a(t))}},function(t,e,n){var r=n(87),i=Object.prototype,o=i.hasOwnProperty,a=i.toString,u=r?r.toStringTag:void 0;t.exports=function(t){var e=o.call(t,u),n=t[u];try{t[u]=void 0;var r=!0}catch(t){}var i=a.call(t);return r&&(e?t[u]=n:delete t[u]),i}},function(t,e){var n=Object.prototype.toString;t.exports=function(t){return n.call(t)}},function(t,e,n){var r,i=n(470),o=(r=/[^.]+$/.exec(i&&i.keys&&i.keys.IE_PROTO||""))?"Symbol(src)_1."+r:"";t.exports=function(t){return!!o&&o in t}},function(t,e,n){var r=n(35)["__core-js_shared__"];t.exports=r},function(t,e){t.exports=function(t,e){return null==t?void 0:t[e]}},function(t,e,n){var r=n(473),i=n(147),o=n(225);t.exports=function(){this.size=0,this.__data__={hash:new r,map:new(o||i),string:new r}}},function(t,e,n){var r=n(474),i=n(475),o=n(476),a=n(477),u=n(478);function s(t){var e=-1,n=null==t?0:t.length;for(this.clear();++e0){if(++e>=800)return arguments[0]}else e=0;return t.apply(void 0,arguments)}}},function(t,e,n){var r=n(317),i=n(547),o=n(551),a=n(318),u=n(552),s=n(237);t.exports=function(t,e,n){var c=-1,f=i,l=t.length,h=!0,d=[],p=d;if(n)h=!1,f=o;else if(l>=200){var g=e?null:u(t);if(g)return s(g);h=!1,f=a,p=new r}else p=e?[]:d;t:for(;++c-1}},function(t,e,n){var r=n(332),i=n(549),o=n(550);t.exports=function(t,e,n){return e==e?o(t,e,n):r(t,i,n)}},function(t,e){t.exports=function(t){return t!=t}},function(t,e){t.exports=function(t,e,n){for(var r=n-1,i=t.length;++r1||1===e.length&&t.hasEdge(e[0],e[0])}))}},function(t,e,n){var r=n(27);t.exports=function(t,e,n){return function(t,e,n){var r={},i=t.nodes();return i.forEach((function(t){r[t]={},r[t][t]={distance:0},i.forEach((function(e){t!==e&&(r[t][e]={distance:Number.POSITIVE_INFINITY})})),n(t).forEach((function(n){var i=n.v===t?n.w:n.v,o=e(n);r[t][i]={distance:o,predecessor:t}}))})),i.forEach((function(t){var e=r[t];i.forEach((function(n){var o=r[n];i.forEach((function(n){var r=o[t],i=e[n],a=o[n],u=r.distance+i.distance;u0;){if(n=s.removeMin(),r.has(u,n))a.setEdge(n,u[n]);else{if(f)throw new Error("Input graph is not connected: "+t);f=!0}t.nodeEdges(n).forEach(c)}return a}},function(t,e,n){var r;try{r=n(340)}catch(t){}r||(r=window.graphlib),t.exports=r},function(t,e,n){t.exports={Graph:n(241),version:n(668)}},function(t,e,n){var r=n(341);t.exports=function(t){return r(t,4)}},function(t,e){t.exports=function(){this.__data__=[],this.size=0}},function(t,e,n){var r=n(163),i=Array.prototype.splice;t.exports=function(t){var e=this.__data__,n=r(e,t);return!(n<0)&&(n==e.length-1?e.pop():i.call(e,n,1),--this.size,!0)}},function(t,e,n){var r=n(163);t.exports=function(t){var e=this.__data__,n=r(e,t);return n<0?void 0:e[n][1]}},function(t,e,n){var r=n(163);t.exports=function(t){return r(this.__data__,t)>-1}},function(t,e,n){var r=n(163);t.exports=function(t,e){var n=this.__data__,i=r(n,t);return i<0?(++this.size,n.push([t,e])):n[i][1]=e,this}},function(t,e,n){var r=n(162);t.exports=function(){this.__data__=new r,this.size=0}},function(t,e){t.exports=function(t){var e=this.__data__,n=e.delete(t);return this.size=e.size,n}},function(t,e){t.exports=function(t){return this.__data__.get(t)}},function(t,e){t.exports=function(t){return this.__data__.has(t)}},function(t,e,n){var r=n(162),i=n(242),o=n(243);t.exports=function(t,e){var n=this.__data__;if(n instanceof r){var a=n.__data__;if(!i||a.length<199)return a.push([t,e]),this.size=++n.size,this;n=this.__data__=new o(a)}return n.set(t,e),this.size=n.size,this}},function(t,e,n){var r=n(93),i=n(582),o=n(30),a=n(343),u=/^\[object .+?Constructor\]$/,s=Function.prototype,c=Object.prototype,f=s.toString,l=c.hasOwnProperty,h=RegExp("^"+f.call(l).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$");t.exports=function(t){return!(!o(t)||i(t))&&(r(t)?h:u).test(a(t))}},function(t,e,n){var r=n(94),i=Object.prototype,o=i.hasOwnProperty,a=i.toString,u=r?r.toStringTag:void 0;t.exports=function(t){var e=o.call(t,u),n=t[u];try{t[u]=void 0;var r=!0}catch(t){}var i=a.call(t);return r&&(e?t[u]=n:delete t[u]),i}},function(t,e){var n=Object.prototype.toString;t.exports=function(t){return n.call(t)}},function(t,e,n){var r,i=n(583),o=(r=/[^.]+$/.exec(i&&i.keys&&i.keys.IE_PROTO||""))?"Symbol(src)_1."+r:"";t.exports=function(t){return!!o&&o in t}},function(t,e,n){var r=n(36)["__core-js_shared__"];t.exports=r},function(t,e){t.exports=function(t,e){return null==t?void 0:t[e]}},function(t,e,n){var r=n(586),i=n(162),o=n(242);t.exports=function(){this.size=0,this.__data__={hash:new r,map:new(o||i),string:new r}}},function(t,e,n){var r=n(587),i=n(588),o=n(589),a=n(590),u=n(591);function s(t){var e=-1,n=null==t?0:t.length;for(this.clear();++e0){if(++e>=800)return arguments[0]}else e=0;return t.apply(void 0,arguments)}}},function(t,e,n){var r=n(364),i=n(660),o=n(664),a=n(365),u=n(665),s=n(255);t.exports=function(t,e,n){var c=-1,f=i,l=t.length,h=!0,d=[],p=d;if(n)h=!1,f=o;else if(l>=200){var g=e?null:u(t);if(g)return s(g);h=!1,f=a,p=new r}else p=e?[]:d;t:for(;++c-1}},function(t,e,n){var r=n(378),i=n(662),o=n(663);t.exports=function(t,e,n){return e==e?o(t,e,n):r(t,i,n)}},function(t,e){t.exports=function(t){return t!=t}},function(t,e){t.exports=function(t,e,n){for(var r=n-1,i=t.length;++r1||1===e.length&&t.hasEdge(e[0],e[0])}))}},function(t,e,n){var r=n(29);t.exports=function(t,e,n){return function(t,e,n){var r={},i=t.nodes();return i.forEach((function(t){r[t]={},r[t][t]={distance:0},i.forEach((function(e){t!==e&&(r[t][e]={distance:Number.POSITIVE_INFINITY})})),n(t).forEach((function(n){var i=n.v===t?n.w:n.v,o=e(n);r[t][i]={distance:o,predecessor:t}}))})),i.forEach((function(t){var e=r[t];i.forEach((function(n){var o=r[n];i.forEach((function(n){var r=o[t],i=e[n],a=o[n],u=r.distance+i.distance;u0;){if(n=s.removeMin(),r.has(u,n))a.setEdge(n,u[n]);else{if(f)throw new Error("Input graph is not connected: "+t);f=!0}t.nodeEdges(n).forEach(c)}return a}},function(t,e,n){t.exports={graphlib:n(37),layout:n(680),debug:n(734),util:{time:n(21).time,notime:n(21).notime},version:n(735)}},function(t,e,n){"use strict";var r=n(7),i=n(713),o=n(716),a=n(717),u=n(21).normalizeRanks,s=n(719),c=n(21).removeEmptyRanks,f=n(720),l=n(721),h=n(722),d=n(723),p=n(732),g=n(21),y=n(37).Graph;t.exports=function(t,e){var n=e&&e.debugTiming?g.time:g.notime;n("layout",(function(){var e=n(" buildLayoutGraph",(function(){return function(t){var e=new y({multigraph:!0,compound:!0}),n=S(t.graph());return e.setGraph(r.merge({},v,A(n,b),r.pick(n,m))),r.forEach(t.nodes(),(function(n){var i=S(t.node(n));e.setNode(n,r.defaults(A(i,_),w)),e.setParent(n,t.parent(n))})),r.forEach(t.edges(),(function(n){var i=S(t.edge(n));e.setEdge(n,r.merge({},k,A(i,x),r.pick(i,E)))})),e}(t)}));n(" runLayout",(function(){!function(t,e){e(" makeSpaceForEdgeLabels",(function(){!function(t){var e=t.graph();e.ranksep/=2,r.forEach(t.edges(),(function(n){var r=t.edge(n);r.minlen*=2,"c"!==r.labelpos.toLowerCase()&&("TB"===e.rankdir||"BT"===e.rankdir?r.width+=r.labeloffset:r.height+=r.labeloffset)}))}(t)})),e(" removeSelfEdges",(function(){!function(t){r.forEach(t.edges(),(function(e){if(e.v===e.w){var n=t.node(e.v);n.selfEdges||(n.selfEdges=[]),n.selfEdges.push({e:e,label:t.edge(e)}),t.removeEdge(e)}}))}(t)})),e(" acyclic",(function(){i.run(t)})),e(" nestingGraph.run",(function(){f.run(t)})),e(" rank",(function(){a(g.asNonCompoundGraph(t))})),e(" injectEdgeLabelProxies",(function(){!function(t){r.forEach(t.edges(),(function(e){var n=t.edge(e);if(n.width&&n.height){var r=t.node(e.v),i={rank:(t.node(e.w).rank-r.rank)/2+r.rank,e:e};g.addDummyNode(t,"edge-proxy",i,"_ep")}}))}(t)})),e(" removeEmptyRanks",(function(){c(t)})),e(" nestingGraph.cleanup",(function(){f.cleanup(t)})),e(" normalizeRanks",(function(){u(t)})),e(" assignRankMinMax",(function(){!function(t){var e=0;r.forEach(t.nodes(),(function(n){var i=t.node(n);i.borderTop&&(i.minRank=t.node(i.borderTop).rank,i.maxRank=t.node(i.borderBottom).rank,e=r.max(e,i.maxRank))})),t.graph().maxRank=e}(t)})),e(" removeEdgeLabelProxies",(function(){!function(t){r.forEach(t.nodes(),(function(e){var n=t.node(e);"edge-proxy"===n.dummy&&(t.edge(n.e).labelRank=n.rank,t.removeNode(e))}))}(t)})),e(" normalize.run",(function(){o.run(t)})),e(" parentDummyChains",(function(){s(t)})),e(" addBorderSegments",(function(){l(t)})),e(" order",(function(){d(t)})),e(" insertSelfEdges",(function(){!function(t){var e=g.buildLayerMatrix(t);r.forEach(e,(function(e){var n=0;r.forEach(e,(function(e,i){var o=t.node(e);o.order=i+n,r.forEach(o.selfEdges,(function(e){g.addDummyNode(t,"selfedge",{width:e.label.width,height:e.label.height,rank:o.rank,order:i+ ++n,e:e.e,label:e.label},"_se")})),delete o.selfEdges}))}))}(t)})),e(" adjustCoordinateSystem",(function(){h.adjust(t)})),e(" position",(function(){p(t)})),e(" positionSelfEdges",(function(){!function(t){r.forEach(t.nodes(),(function(e){var n=t.node(e);if("selfedge"===n.dummy){var r=t.node(n.e.v),i=r.x+r.width/2,o=r.y,a=n.x-i,u=r.height/2;t.setEdge(n.e,n.label),t.removeNode(e),n.label.points=[{x:i+2*a/3,y:o-u},{x:i+5*a/6,y:o-u},{x:i+a,y:o},{x:i+5*a/6,y:o+u},{x:i+2*a/3,y:o+u}],n.label.x=n.x,n.label.y=n.y}}))}(t)})),e(" removeBorderNodes",(function(){!function(t){r.forEach(t.nodes(),(function(e){if(t.children(e).length){var n=t.node(e),i=t.node(n.borderTop),o=t.node(n.borderBottom),a=t.node(r.last(n.borderLeft)),u=t.node(r.last(n.borderRight));n.width=Math.abs(u.x-a.x),n.height=Math.abs(o.y-i.y),n.x=a.x+n.width/2,n.y=i.y+n.height/2}})),r.forEach(t.nodes(),(function(e){"border"===t.node(e).dummy&&t.removeNode(e)}))}(t)})),e(" normalize.undo",(function(){o.undo(t)})),e(" fixupEdgeLabelCoords",(function(){!function(t){r.forEach(t.edges(),(function(e){var n=t.edge(e);if(r.has(n,"x"))switch("l"!==n.labelpos&&"r"!==n.labelpos||(n.width-=n.labeloffset),n.labelpos){case"l":n.x-=n.width/2+n.labeloffset;break;case"r":n.x+=n.width/2+n.labeloffset}}))}(t)})),e(" undoCoordinateSystem",(function(){h.undo(t)})),e(" translateGraph",(function(){!function(t){var e=Number.POSITIVE_INFINITY,n=0,i=Number.POSITIVE_INFINITY,o=0,a=t.graph(),u=a.marginx||0,s=a.marginy||0;function c(t){var r=t.x,a=t.y,u=t.width,s=t.height;e=Math.min(e,r-u/2),n=Math.max(n,r+u/2),i=Math.min(i,a-s/2),o=Math.max(o,a+s/2)}r.forEach(t.nodes(),(function(e){c(t.node(e))})),r.forEach(t.edges(),(function(e){var n=t.edge(e);r.has(n,"x")&&c(n)})),e-=u,i-=s,r.forEach(t.nodes(),(function(n){var r=t.node(n);r.x-=e,r.y-=i})),r.forEach(t.edges(),(function(n){var o=t.edge(n);r.forEach(o.points,(function(t){t.x-=e,t.y-=i})),r.has(o,"x")&&(o.x-=e),r.has(o,"y")&&(o.y-=i)})),a.width=n-e+u,a.height=o-i+s}(t)})),e(" assignNodeIntersects",(function(){!function(t){r.forEach(t.edges(),(function(e){var n,r,i=t.edge(e),o=t.node(e.v),a=t.node(e.w);i.points?(n=i.points[0],r=i.points[i.points.length-1]):(i.points=[],n=a,r=o),i.points.unshift(g.intersectRect(o,n)),i.points.push(g.intersectRect(a,r))}))}(t)})),e(" reversePoints",(function(){!function(t){r.forEach(t.edges(),(function(e){var n=t.edge(e);n.reversed&&n.points.reverse()}))}(t)})),e(" acyclic.undo",(function(){i.undo(t)}))}(e,n)})),n(" updateInputGraph",(function(){!function(t,e){r.forEach(t.nodes(),(function(n){var r=t.node(n),i=e.node(n);r&&(r.x=i.x,r.y=i.y,e.children(n).length&&(r.width=i.width,r.height=i.height))})),r.forEach(t.edges(),(function(n){var i=t.edge(n),o=e.edge(n);i.points=o.points,r.has(o,"x")&&(i.x=o.x,i.y=o.y)})),t.graph().width=e.graph().width,t.graph().height=e.graph().height}(t,e)}))}))};var b=["nodesep","edgesep","ranksep","marginx","marginy"],v={ranksep:50,edgesep:20,nodesep:50,rankdir:"tb"},m=["acyclicer","ranker","rankdir","align"],_=["width","height"],w={width:0,height:0},x=["minlen","weight","width","height","labeloffset"],k={minlen:1,weight:1,width:0,height:0,labeloffset:10,labelpos:"r"},E=["labelpos"];function A(t,e){return r.mapValues(r.pick(t,e),Number)}function S(t){var e={};return r.forEach(t,(function(t,n){e[n.toLowerCase()]=t})),e}},function(t,e,n){var r=n(341);t.exports=function(t){return r(t,5)}},function(t,e,n){var r=n(683)(n(684));t.exports=r},function(t,e,n){var r=n(57),i=n(56),o=n(63);t.exports=function(t){return function(e,n,a){var u=Object(e);if(!i(e)){var s=r(n,3);e=o(e),n=function(t){return s(u[t],t,u)}}var c=t(e,n,a);return c>-1?u[s?e[c]:c]:void 0}}},function(t,e,n){var r=n(378),i=n(57),o=n(685),a=Math.max;t.exports=function(t,e,n){var u=null==t?0:t.length;if(!u)return-1;var s=null==n?0:o(n);return s<0&&(s=a(u+s,0)),r(t,i(e,3),s)}},function(t,e,n){var r=n(388);t.exports=function(t){var e=r(t),n=e%1;return e==e?n?e-n:e:0}},function(t,e,n){var r=n(30),i=n(98),o=/^\s+|\s+$/g,a=/^[-+]0x[0-9a-f]+$/i,u=/^0b[01]+$/i,s=/^0o[0-7]+$/i,c=parseInt;t.exports=function(t){if("number"==typeof t)return t;if(i(t))return NaN;if(r(t)){var e="function"==typeof t.valueOf?t.valueOf():t;t=r(e)?e+"":e}if("string"!=typeof t)return 0===t?t:+t;t=t.replace(o,"");var n=u.test(t);return n||s.test(t)?c(t.slice(2),n?2:8):a.test(t)?NaN:+t}},function(t,e,n){var r=n(254),i=n(360),o=n(96);t.exports=function(t,e){return null==t?t:r(t,i(e),o)}},function(t,e){t.exports=function(t){var e=null==t?0:t.length;return e?t[e-1]:void 0}},function(t,e,n){var r=n(167),i=n(253),o=n(57);t.exports=function(t,e){var n={};return e=o(e,3),i(t,(function(t,i,o){r(n,i,e(t,i,o))})),n}},function(t,e,n){var r=n(260),i=n(691),o=n(79);t.exports=function(t){return t&&t.length?r(t,o,i):void 0}},function(t,e){t.exports=function(t,e){return t>e}},function(t,e,n){var r=n(693),i=n(696)((function(t,e,n){r(t,e,n)}));t.exports=i},function(t,e,n){var r=n(161),i=n(390),o=n(254),a=n(694),u=n(30),s=n(96),c=n(392);t.exports=function t(e,n,f,l,h){e!==n&&o(n,(function(o,s){if(h||(h=new r),u(o))a(e,n,s,f,t,l,h);else{var d=l?l(c(e,s),o,s+"",e,n,h):void 0;void 0===d&&(d=o),i(e,s,d)}}),s)}},function(t,e,n){var r=n(390),i=n(347),o=n(356),a=n(348),u=n(357),s=n(130),c=n(16),f=n(379),l=n(95),h=n(93),d=n(30),p=n(391),g=n(131),y=n(392),b=n(695);t.exports=function(t,e,n,v,m,_,w){var x=y(t,n),k=y(e,n),E=w.get(k);if(E)r(t,n,E);else{var A=_?_(x,k,n+"",t,e,w):void 0,S=void 0===A;if(S){var M=c(k),T=!M&&l(k),O=!M&&!T&&g(k);A=k,M||T||O?c(x)?A=x:f(x)?A=a(x):T?(S=!1,A=i(k,!0)):O?(S=!1,A=o(k,!0)):A=[]:p(k)||s(k)?(A=x,s(x)?A=b(x):d(x)&&!h(x)||(A=u(k))):S=!1}S&&(w.set(k,A),m(A,k,v,_,w),w.delete(k)),r(t,n,A)}}},function(t,e,n){var r=n(129),i=n(96);t.exports=function(t){return r(t,i(t))}},function(t,e,n){var r=n(175),i=n(176);t.exports=function(t){return r((function(e,n){var r=-1,o=n.length,a=o>1?n[o-1]:void 0,u=o>2?n[2]:void 0;for(a=t.length>3&&"function"==typeof a?(o--,a):void 0,u&&i(n[0],n[1],u)&&(a=o<3?void 0:a,o=1),e=Object(e);++r1&&a(t,e[0],e[1])?e=[]:n>2&&a(e[0],e[1],e[2])&&(e=[e[0]]),i(t,r(e,1),[])}));t.exports=u},function(t,e,n){var r=n(174),i=n(57),o=n(374),a=n(708),u=n(169),s=n(709),c=n(79);t.exports=function(t,e,n){var f=-1;e=r(e.length?e:[c],u(i));var l=o(t,(function(t,n,i){return{criteria:r(e,(function(e){return e(t)})),index:++f,value:t}}));return a(l,(function(t,e){return s(t,e,n)}))}},function(t,e){t.exports=function(t,e){var n=t.length;for(t.sort(e);n--;)t[n]=t[n].value;return t}},function(t,e,n){var r=n(710);t.exports=function(t,e,n){for(var i=-1,o=t.criteria,a=e.criteria,u=o.length,s=n.length;++i=s?c:c*("desc"==n[i]?-1:1)}return t.index-e.index}},function(t,e,n){var r=n(98);t.exports=function(t,e){if(t!==e){var n=void 0!==t,i=null===t,o=t==t,a=r(t),u=void 0!==e,s=null===e,c=e==e,f=r(e);if(!s&&!f&&!a&&t>e||a&&u&&c&&!s&&!f||i&&u&&c||!n&&c||!o)return 1;if(!i&&!a&&!f&&t0;--s)if(r=e[s].dequeue()){i=i.concat(u(t,e,n,r,!0));break}}return i}(n.graph,n.buckets,n.zeroIdx);return r.flatten(r.map(c,(function(e){return t.outEdges(e.v,e.w)})),!0)};var a=r.constant(1);function u(t,e,n,i,o){var a=o?[]:void 0;return r.forEach(t.inEdges(i.v),(function(r){var i=t.edge(r),u=t.node(r.v);o&&a.push({v:r.v,w:r.w}),u.out-=i,s(e,n,u)})),r.forEach(t.outEdges(i.v),(function(r){var i=t.edge(r),o=r.w,a=t.node(o);a.in-=i,s(e,n,a)})),t.removeNode(i.v),a}function s(t,e,n){n.out?n.in?t[n.out-n.in+e].enqueue(n):t[t.length-1].enqueue(n):t[0].enqueue(n)}},function(t,e){function n(){var t={};t._next=t._prev=t,this._sentinel=t}function r(t){t._prev._next=t._next,t._next._prev=t._prev,delete t._next,delete t._prev}function i(t,e){if("_next"!==t&&"_prev"!==t)return e}t.exports=n,n.prototype.dequeue=function(){var t=this._sentinel,e=t._prev;if(e!==t)return r(e),e},n.prototype.enqueue=function(t){var e=this._sentinel;t._prev&&t._next&&r(t),t._next=e._next,e._next._prev=t,e._next=t,t._prev=e},n.prototype.toString=function(){for(var t=[],e=this._sentinel,n=e._prev;n!==e;)t.push(JSON.stringify(n,i)),n=n._prev;return"["+t.join(", ")+"]"}},function(t,e,n){"use strict";var r=n(7),i=n(21);t.exports={run:function(t){t.graph().dummyChains=[],r.forEach(t.edges(),(function(e){!function(t,e){var n,r,o,a=e.v,u=t.node(a).rank,s=e.w,c=t.node(s).rank,f=e.name,l=t.edge(e),h=l.labelRank;if(c===u+1)return;for(t.removeEdge(e),o=0,++u;us.lim&&(c=s,f=!0);var l=r.filter(e.edges(),(function(e){return f===v(t,t.node(e.v),c)&&f!==v(t,t.node(e.w),c)}));return r.minBy(l,(function(t){return o(e,t)}))}function b(t,e,n,i){var o=n.v,a=n.w;t.removeEdge(o,a),t.setEdge(i.v,i.w,{}),d(t),l(t,e),function(t,e){var n=r.find(t.nodes(),(function(t){return!e.node(t).parent})),i=u(t,n);i=i.slice(1),r.forEach(i,(function(n){var r=t.node(n).parent,i=e.edge(n,r),o=!1;i||(i=e.edge(r,n),o=!0),e.node(n).rank=e.node(r).rank+(o?i.minlen:-i.minlen)}))}(t,e)}function v(t,e,n){return n.low<=e.lim&&e.lim<=n.lim}t.exports=f,f.initLowLimValues=d,f.initCutValues=l,f.calcCutValue=h,f.leaveEdge=g,f.enterEdge=y,f.exchangeEdges=b},function(t,e,n){var r=n(7);t.exports=function(t){var e=function(t){var e={},n=0;function i(o){var a=n;r.forEach(t.children(o),i),e[o]={low:a,lim:n++}}return r.forEach(t.children(),i),e}(t);r.forEach(t.graph().dummyChains,(function(n){for(var r=t.node(n),i=r.edgeObj,o=function(t,e,n,r){var i,o,a=[],u=[],s=Math.min(e[n].low,e[r].low),c=Math.max(e[n].lim,e[r].lim);i=n;do{i=t.parent(i),a.push(i)}while(i&&(e[i].low>s||c>e[i].lim));o=i,i=r;for(;(i=t.parent(i))!==o;)u.push(i);return{path:a.concat(u.reverse()),lca:o}}(t,e,i.v,i.w),a=o.path,u=o.lca,s=0,c=a[s],f=!0;n!==i.w;){if(r=t.node(n),f){for(;(c=a[s])!==u&&t.node(c).maxRank=2),u=f.buildLayerMatrix(t);var y=o(t,u);y0;)e%2&&(n+=s[e+1]),s[e=e-1>>1]+=t.weight;c+=t.weight*n}))),c}t.exports=function(t,e){for(var n=0,r=1;r=t.barycenter)&&function(t,e){var n=0,r=0;t.weight&&(n+=t.barycenter*t.weight,r+=t.weight);e.weight&&(n+=e.barycenter*e.weight,r+=e.weight);t.vs=e.vs.concat(t.vs),t.barycenter=n/r,t.weight=r,t.i=Math.min(e.i,t.i),e.merged=!0}(t,e)}}function i(e){return function(n){n.in.push(e),0==--n.indegree&&t.push(n)}}for(;t.length;){var o=t.pop();e.push(o),r.forEach(o.in.reverse(),n(o)),r.forEach(o.out,i(o))}return r.map(r.filter(e,(function(t){return!t.merged})),(function(t){return r.pick(t,["vs","i","barycenter","weight"])}))}(r.filter(n,(function(t){return!t.indegree})))}},function(t,e,n){var r=n(7),i=n(21);function o(t,e,n){for(var i;e.length&&(i=r.last(e)).i<=n;)e.pop(),t.push(i.vs),n++;return n}t.exports=function(t,e){var n=i.partition(t,(function(t){return r.has(t,"barycenter")})),a=n.lhs,u=r.sortBy(n.rhs,(function(t){return-t.i})),s=[],c=0,f=0,l=0;a.sort((h=!!e,function(t,e){return t.barycentere.barycenter?1:h?e.i-t.i:t.i-e.i})),l=o(s,u,l),r.forEach(a,(function(t){l+=t.vs.length,s.push(t.vs),c+=t.barycenter*t.weight,f+=t.weight,l=o(s,u,l)}));var h;var d={vs:r.flatten(s,!0)};f&&(d.barycenter=c/f,d.weight=f);return d}},function(t,e,n){var r=n(7),i=n(37).Graph;t.exports=function(t,e,n){var o=function(t){var e;for(;t.hasNode(e=r.uniqueId("_root")););return e}(t),a=new i({compound:!0}).setGraph({root:o}).setDefaultNodeLabel((function(e){return t.node(e)}));return r.forEach(t.nodes(),(function(i){var u=t.node(i),s=t.parent(i);(u.rank===e||u.minRank<=e&&e<=u.maxRank)&&(a.setNode(i),a.setParent(i,s||o),r.forEach(t[n](i),(function(e){var n=e.v===i?e.w:e.v,o=a.edge(n,i),u=r.isUndefined(o)?0:o.weight;a.setEdge(n,i,{weight:t.edge(e).weight+u})})),r.has(u,"minRank")&&a.setNode(i,{borderLeft:u.borderLeft[e],borderRight:u.borderRight[e]}))})),a}},function(t,e,n){var r=n(7);t.exports=function(t,e,n){var i,o={};r.forEach(n,(function(n){for(var r,a,u=t.parent(n);u;){if((r=t.parent(u))?(a=o[r],o[r]=u):(a=i,i=u),a&&a!==u)return void e.setEdge(a,u);u=r}}))}},function(t,e,n){"use strict";var r=n(7),i=n(21),o=n(733).positionX;t.exports=function(t){(function(t){var e=i.buildLayerMatrix(t),n=t.graph().ranksep,o=0;r.forEach(e,(function(e){var i=r.max(r.map(e,(function(e){return t.node(e).height})));r.forEach(e,(function(e){t.node(e).y=o+i/2})),o+=i+n}))})(t=i.asNonCompoundGraph(t)),r.forEach(o(t),(function(e,n){t.node(n).x=e}))}},function(t,e,n){"use strict";var r=n(7),i=n(37).Graph,o=n(21);function a(t,e){var n={};return r.reduce(e,(function(e,i){var o=0,a=0,u=e.length,c=r.last(i);return r.forEach(i,(function(e,f){var l=function(t,e){if(t.node(e).dummy)return r.find(t.predecessors(e),(function(e){return t.node(e).dummy}))}(t,e),h=l?t.node(l).order:u;(l||e===c)&&(r.forEach(i.slice(a,f+1),(function(e){r.forEach(t.predecessors(e),(function(r){var i=t.node(r),a=i.order;!(au)&&s(n,e,c)}))}))}return r.reduce(e,(function(e,n){var o,a=-1,u=0;return r.forEach(n,(function(r,s){if("border"===t.node(r).dummy){var c=t.predecessors(r);c.length&&(o=t.node(c[0]).order,i(n,u,s,a,o),u=s,a=o)}i(n,u,n.length,o,e.length)})),n})),n}function s(t,e,n){if(e>n){var r=e;e=n,n=r}var i=t[e];i||(t[e]=i={}),i[n]=!0}function c(t,e,n){if(e>n){var i=e;e=n,n=i}return r.has(t[e],n)}function f(t,e,n,i){var o={},a={},u={};return r.forEach(e,(function(t){r.forEach(t,(function(t,e){o[t]=t,a[t]=t,u[t]=e}))})),r.forEach(e,(function(t){var e=-1;r.forEach(t,(function(t){var s=i(t);if(s.length)for(var f=((s=r.sortBy(s,(function(t){return u[t]}))).length-1)/2,l=Math.floor(f),h=Math.ceil(f);l<=h;++l){var d=s[l];a[t]===t&&e0}t.exports=function(t,e,r,i){var o,a,u,s,c,f,l,h,d,p,g,y,b;if(o=e.y-t.y,u=t.x-e.x,c=e.x*t.y-t.x*e.y,d=o*r.x+u*r.y+c,p=o*i.x+u*i.y+c,0!==d&&0!==p&&n(d,p))return;if(a=i.y-r.y,s=r.x-i.x,f=i.x*r.y-r.x*i.y,l=a*t.x+s*t.y+f,h=a*e.x+s*e.y+f,0!==l&&0!==h&&n(l,h))return;if(0===(g=o*s-a*u))return;return y=Math.abs(g/2),{x:(b=u*f-s*c)<0?(b-y)/g:(b+y)/g,y:(b=a*c-o*f)<0?(b-y)/g:(b+y)/g}}},function(t,e,n){var r=n(99),i=n(64),o=n(386).layout;t.exports=function(){var t=n(739),e=n(742),i=n(743),c=n(744),f=n(745),l=n(746),h=n(747),d=n(748),p=n(749),g=function(n,g){!function(t){t.nodes().forEach((function(e){var n=t.node(e);r.has(n,"label")||t.children(e).length||(n.label=e),r.has(n,"paddingX")&&r.defaults(n,{paddingLeft:n.paddingX,paddingRight:n.paddingX}),r.has(n,"paddingY")&&r.defaults(n,{paddingTop:n.paddingY,paddingBottom:n.paddingY}),r.has(n,"padding")&&r.defaults(n,{paddingLeft:n.padding,paddingRight:n.padding,paddingTop:n.padding,paddingBottom:n.padding}),r.defaults(n,a),r.each(["paddingLeft","paddingRight","paddingTop","paddingBottom"],(function(t){n[t]=Number(n[t])})),r.has(n,"width")&&(n._prevWidth=n.width),r.has(n,"height")&&(n._prevHeight=n.height)})),t.edges().forEach((function(e){var n=t.edge(e);r.has(n,"label")||(n.label=""),r.defaults(n,u)}))}(g);var y=s(n,"output"),b=s(y,"clusters"),v=s(y,"edgePaths"),m=i(s(y,"edgeLabels"),g),_=t(s(y,"nodes"),g,d);o(g),f(_,g),l(m,g),c(v,g,p);var w=e(b,g);h(w,g),function(t){r.each(t.nodes(),(function(e){var n=t.node(e);r.has(n,"_prevWidth")?n.width=n._prevWidth:delete n.width,r.has(n,"_prevHeight")?n.height=n._prevHeight:delete n.height,delete n._prevWidth,delete n._prevHeight}))}(g)};return g.createNodes=function(e){return arguments.length?(t=e,g):t},g.createClusters=function(t){return arguments.length?(e=t,g):e},g.createEdgeLabels=function(t){return arguments.length?(i=t,g):i},g.createEdgePaths=function(t){return arguments.length?(c=t,g):c},g.shapes=function(t){return arguments.length?(d=t,g):d},g.arrows=function(t){return arguments.length?(p=t,g):p},g};var a={paddingLeft:10,paddingRight:10,paddingTop:10,paddingBottom:10,rx:0,ry:0,shape:"rect"},u={arrowhead:"normal",curve:i.curveLinear};function s(t,e){var n=t.select("g."+e);return n.empty()&&(n=t.append("g").attr("class",e)),n}},function(t,e,n){"use strict";var r=n(99),i=n(262),o=n(31),a=n(64);t.exports=function(t,e,n){var u,s=e.nodes().filter((function(t){return!o.isSubgraph(e,t)})),c=t.selectAll("g.node").data(s,(function(t){return t})).classed("update",!0);c.exit().remove(),c.enter().append("g").attr("class","node").style("opacity",0),(c=t.selectAll("g.node")).each((function(t){var u=e.node(t),s=a.select(this);o.applyClass(s,u.class,(s.classed("update")?"update ":"")+"node"),s.select("g.label").remove();var c=s.append("g").attr("class","label"),f=i(c,u),l=n[u.shape],h=r.pick(f.node().getBBox(),"width","height");u.elem=this,u.id&&s.attr("id",u.id),u.labelId&&c.attr("id",u.labelId),r.has(u,"width")&&(h.width=u.width),r.has(u,"height")&&(h.height=u.height),h.width+=u.paddingLeft+u.paddingRight,h.height+=u.paddingTop+u.paddingBottom,c.attr("transform","translate("+(u.paddingLeft-u.paddingRight)/2+","+(u.paddingTop-u.paddingBottom)/2+")");var d=a.select(this);d.select(".label-container").remove();var p=l(d,h,u).classed("label-container",!0);o.applyStyle(p,u.style);var g=p.node().getBBox();u.width=g.width,u.height=g.height})),u=c.exit?c.exit():c.selectAll(null);return o.applyTransition(u,e).style("opacity",0).remove(),c}},function(t,e,n){var r=n(31);t.exports=function(t,e){for(var n=t.append("text"),i=function(t){for(var e,n="",r=!1,i=0;i2?e[2]:void 0;for(c&&o(e[0],e[1],c)&&(r=1);++n-1?u[s?e[c]:c]:void 0}}},function(t,e,n){var r=n(332),i=n(53),o=n(757),a=Math.max;t.exports=function(t,e,n){var u=null==t?0:t.length;if(!u)return-1;var s=null==n?0:o(n);return s<0&&(s=a(u+s,0)),r(t,i(e,3),s)}},function(t,e,n){var r=n(402);t.exports=function(t){var e=r(t),n=e%1;return e==e?n?e-n:e:0}},function(t,e,n){var r=n(28),i=n(91),o=/^\s+|\s+$/g,a=/^[-+]0x[0-9a-f]+$/i,u=/^0b[01]+$/i,s=/^0o[0-7]+$/i,c=parseInt;t.exports=function(t){if("number"==typeof t)return t;if(i(t))return NaN;if(r(t)){var e="function"==typeof t.valueOf?t.valueOf():t;t=r(e)?e+"":e}if("string"!=typeof t)return 0===t?t:+t;t=t.replace(o,"");var n=u.test(t);return n||s.test(t)?c(t.slice(2),n?2:8):a.test(t)?NaN:+t}},function(t,e,n){var r=n(236),i=n(313),o=n(89);t.exports=function(t,e){return null==t?t:r(t,i(e),o)}},function(t,e){t.exports=function(t){var e=null==t?0:t.length;return e?t[e-1]:void 0}},function(t,e,n){var r=n(152),i=n(235),o=n(53);t.exports=function(t,e){var n={};return e=o(e,3),i(t,(function(t,i,o){r(n,i,e(t,i,o))})),n}},function(t,e,n){var r=n(263),i=n(763),o=n(76);t.exports=function(t){return t&&t.length?r(t,o,i):void 0}},function(t,e){t.exports=function(t,e){return t>e}},function(t,e,n){var r=n(765),i=n(769)((function(t,e,n){r(t,e,n)}));t.exports=i},function(t,e,n){var r=n(146),i=n(404),o=n(236),a=n(766),u=n(28),s=n(89),c=n(405);t.exports=function t(e,n,f,l,h){e!==n&&o(n,(function(o,s){if(h||(h=new r),u(o))a(e,n,s,f,t,l,h);else{var d=l?l(c(e,s),o,s+"",e,n,h):void 0;void 0===d&&(d=o),i(e,s,d)}}),s)}},function(t,e,n){var r=n(404),i=n(299),o=n(308),a=n(300),u=n(309),s=n(126),c=n(15),f=n(333),l=n(88),h=n(124),d=n(28),p=n(767),g=n(127),y=n(405),b=n(768);t.exports=function(t,e,n,v,m,_,w){var x=y(t,n),k=y(e,n),E=w.get(k);if(E)r(t,n,E);else{var A=_?_(x,k,n+"",t,e,w):void 0,S=void 0===A;if(S){var M=c(k),T=!M&&l(k),O=!M&&!T&&g(k);A=k,M||T||O?c(x)?A=x:f(x)?A=a(x):T?(S=!1,A=i(k,!0)):O?(S=!1,A=o(k,!0)):A=[]:p(k)||s(k)?(A=x,s(x)?A=b(x):d(x)&&!h(x)||(A=u(k))):S=!1}S&&(w.set(k,A),m(A,k,v,_,w),w.delete(k)),r(t,n,A)}}},function(t,e,n){var r=n(75),i=n(156),o=n(43),a=Function.prototype,u=Object.prototype,s=a.toString,c=u.hasOwnProperty,f=s.call(Object);t.exports=function(t){if(!o(t)||"[object Object]"!=r(t))return!1;var e=i(t);if(null===e)return!0;var n=c.call(e,"constructor")&&e.constructor;return"function"==typeof n&&n instanceof n&&s.call(n)==f}},function(t,e,n){var r=n(125),i=n(89);t.exports=function(t){return r(t,i(t))}},function(t,e,n){var r=n(160),i=n(178);t.exports=function(t){return r((function(e,n){var r=-1,o=n.length,a=o>1?n[o-1]:void 0,u=o>2?n[2]:void 0;for(a=t.length>3&&"function"==typeof a?(o--,a):void 0,u&&i(n[0],n[1],u)&&(a=o<3?void 0:a,o=1),e=Object(e);++r1&&a(t,e[0],e[1])?e=[]:n>2&&a(e[0],e[1],e[2])&&(e=[e[0]]),i(t,r(e,1),[])}));t.exports=u},function(t,e,n){var r=n(159),i=n(53),o=n(328),a=n(783),u=n(154),s=n(784),c=n(76);t.exports=function(t,e,n){var f=-1;e=r(e.length?e:[c],u(i));var l=o(t,(function(t,n,i){return{criteria:r(e,(function(e){return e(t)})),index:++f,value:t}}));return a(l,(function(t,e){return s(t,e,n)}))}},function(t,e){t.exports=function(t,e){var n=t.length;for(t.sort(e);n--;)t[n]=t[n].value;return t}},function(t,e,n){var r=n(785);t.exports=function(t,e,n){for(var i=-1,o=t.criteria,a=e.criteria,u=o.length,s=n.length;++i=s?c:c*("desc"==n[i]?-1:1)}return t.index-e.index}},function(t,e,n){var r=n(91);t.exports=function(t,e){if(t!==e){var n=void 0!==t,i=null===t,o=t==t,a=r(t),u=void 0!==e,s=null===e,c=e==e,f=r(e);if(!s&&!f&&!a&&t>e||a&&u&&c&&!s&&!f||i&&u&&c||!n&&c||!o)return 1;if(!i&&!a&&!f&&t0;--s)if(r=e[s].dequeue()){i=i.concat(u(t,e,n,r,!0));break}}return i}(n.graph,n.buckets,n.zeroIdx);return r.flatten(r.map(c,(function(e){return t.outEdges(e.v,e.w)})),!0)};var a=r.constant(1);function u(t,e,n,i,o){var a=o?[]:void 0;return r.forEach(t.inEdges(i.v),(function(r){var i=t.edge(r),u=t.node(r.v);o&&a.push({v:r.v,w:r.w}),u.out-=i,s(e,n,u)})),r.forEach(t.outEdges(i.v),(function(r){var i=t.edge(r),o=r.w,a=t.node(o);a.in-=i,s(e,n,a)})),t.removeNode(i.v),a}function s(t,e,n){n.out?n.in?t[n.out-n.in+e].enqueue(n):t[t.length-1].enqueue(n):t[0].enqueue(n)}},function(t,e){function n(){var t={};t._next=t._prev=t,this._sentinel=t}function r(t){t._prev._next=t._next,t._next._prev=t._prev,delete t._next,delete t._prev}function i(t,e){if("_next"!==t&&"_prev"!==t)return e}t.exports=n,n.prototype.dequeue=function(){var t=this._sentinel,e=t._prev;if(e!==t)return r(e),e},n.prototype.enqueue=function(t){var e=this._sentinel;t._prev&&t._next&&r(t),t._next=e._next,e._next._prev=t,e._next=t,t._prev=e},n.prototype.toString=function(){for(var t=[],e=this._sentinel,n=e._prev;n!==e;)t.push(JSON.stringify(n,i)),n=n._prev;return"["+t.join(", ")+"]"}},function(t,e,n){"use strict";var r=n(8),i=n(22);t.exports={run:function(t){t.graph().dummyChains=[],r.forEach(t.edges(),(function(e){!function(t,e){var n,r,o,a=e.v,u=t.node(a).rank,s=e.w,c=t.node(s).rank,f=e.name,l=t.edge(e),h=l.labelRank;if(c===u+1)return;for(t.removeEdge(e),o=0,++u;us.lim&&(c=s,f=!0);var l=r.filter(e.edges(),(function(e){return f===v(t,t.node(e.v),c)&&f!==v(t,t.node(e.w),c)}));return r.minBy(l,(function(t){return o(e,t)}))}function b(t,e,n,i){var o=n.v,a=n.w;t.removeEdge(o,a),t.setEdge(i.v,i.w,{}),d(t),l(t,e),function(t,e){var n=r.find(t.nodes(),(function(t){return!e.node(t).parent})),i=u(t,n);i=i.slice(1),r.forEach(i,(function(n){var r=t.node(n).parent,i=e.edge(n,r),o=!1;i||(i=e.edge(r,n),o=!0),e.node(n).rank=e.node(r).rank+(o?i.minlen:-i.minlen)}))}(t,e)}function v(t,e,n){return n.low<=e.lim&&e.lim<=n.lim}t.exports=f,f.initLowLimValues=d,f.initCutValues=l,f.calcCutValue=h,f.leaveEdge=g,f.enterEdge=y,f.exchangeEdges=b},function(t,e,n){var r=n(8);t.exports=function(t){var e=function(t){var e={},n=0;function i(o){var a=n;r.forEach(t.children(o),i),e[o]={low:a,lim:n++}}return r.forEach(t.children(),i),e}(t);r.forEach(t.graph().dummyChains,(function(n){for(var r=t.node(n),i=r.edgeObj,o=function(t,e,n,r){var i,o,a=[],u=[],s=Math.min(e[n].low,e[r].low),c=Math.max(e[n].lim,e[r].lim);i=n;do{i=t.parent(i),a.push(i)}while(i&&(e[i].low>s||c>e[i].lim));o=i,i=r;for(;(i=t.parent(i))!==o;)u.push(i);return{path:a.concat(u.reverse()),lca:o}}(t,e,i.v,i.w),a=o.path,u=o.lca,s=0,c=a[s],f=!0;n!==i.w;){if(r=t.node(n),f){for(;(c=a[s])!==u&&t.node(c).maxRank=2),u=f.buildLayerMatrix(t);var y=o(t,u);y0;)e%2&&(n+=s[e+1]),s[e=e-1>>1]+=t.weight;c+=t.weight*n}))),c}t.exports=function(t,e){for(var n=0,r=1;r=t.barycenter)&&function(t,e){var n=0,r=0;t.weight&&(n+=t.barycenter*t.weight,r+=t.weight);e.weight&&(n+=e.barycenter*e.weight,r+=e.weight);t.vs=e.vs.concat(t.vs),t.barycenter=n/r,t.weight=r,t.i=Math.min(e.i,t.i),e.merged=!0}(t,e)}}function i(e){return function(n){n.in.push(e),0==--n.indegree&&t.push(n)}}for(;t.length;){var o=t.pop();e.push(o),r.forEach(o.in.reverse(),n(o)),r.forEach(o.out,i(o))}return r.map(r.filter(e,(function(t){return!t.merged})),(function(t){return r.pick(t,["vs","i","barycenter","weight"])}))}(r.filter(n,(function(t){return!t.indegree})))}},function(t,e,n){var r=n(8),i=n(22);function o(t,e,n){for(var i;e.length&&(i=r.last(e)).i<=n;)e.pop(),t.push(i.vs),n++;return n}t.exports=function(t,e){var n=i.partition(t,(function(t){return r.has(t,"barycenter")})),a=n.lhs,u=r.sortBy(n.rhs,(function(t){return-t.i})),s=[],c=0,f=0,l=0;a.sort((h=!!e,function(t,e){return t.barycentere.barycenter?1:h?e.i-t.i:t.i-e.i})),l=o(s,u,l),r.forEach(a,(function(t){l+=t.vs.length,s.push(t.vs),c+=t.barycenter*t.weight,f+=t.weight,l=o(s,u,l)}));var h;var d={vs:r.flatten(s,!0)};f&&(d.barycenter=c/f,d.weight=f);return d}},function(t,e,n){var r=n(8),i=n(38).Graph;t.exports=function(t,e,n){var o=function(t){var e;for(;t.hasNode(e=r.uniqueId("_root")););return e}(t),a=new i({compound:!0}).setGraph({root:o}).setDefaultNodeLabel((function(e){return t.node(e)}));return r.forEach(t.nodes(),(function(i){var u=t.node(i),s=t.parent(i);(u.rank===e||u.minRank<=e&&e<=u.maxRank)&&(a.setNode(i),a.setParent(i,s||o),r.forEach(t[n](i),(function(e){var n=e.v===i?e.w:e.v,o=a.edge(n,i),u=r.isUndefined(o)?0:o.weight;a.setEdge(n,i,{weight:t.edge(e).weight+u})})),r.has(u,"minRank")&&a.setNode(i,{borderLeft:u.borderLeft[e],borderRight:u.borderRight[e]}))})),a}},function(t,e,n){var r=n(8);t.exports=function(t,e,n){var i,o={};r.forEach(n,(function(n){for(var r,a,u=t.parent(n);u;){if((r=t.parent(u))?(a=o[r],o[r]=u):(a=i,i=u),a&&a!==u)return void e.setEdge(a,u);u=r}}))}},function(t,e,n){"use strict";var r=n(8),i=n(22),o=n(809).positionX;t.exports=function(t){(function(t){var e=i.buildLayerMatrix(t),n=t.graph().ranksep,o=0;r.forEach(e,(function(e){var i=r.max(r.map(e,(function(e){return t.node(e).height})));r.forEach(e,(function(e){t.node(e).y=o+i/2})),o+=i+n}))})(t=i.asNonCompoundGraph(t)),r.forEach(o(t),(function(e,n){t.node(n).x=e}))}},function(t,e,n){"use strict";var r=n(8),i=n(38).Graph,o=n(22);function a(t,e){var n={};return r.reduce(e,(function(e,i){var o=0,a=0,u=e.length,c=r.last(i);return r.forEach(i,(function(e,f){var l=function(t,e){if(t.node(e).dummy)return r.find(t.predecessors(e),(function(e){return t.node(e).dummy}))}(t,e),h=l?t.node(l).order:u;(l||e===c)&&(r.forEach(i.slice(a,f+1),(function(e){r.forEach(t.predecessors(e),(function(r){var i=t.node(r),a=i.order;!(au)&&s(n,e,c)}))}))}return r.reduce(e,(function(e,n){var o,a=-1,u=0;return r.forEach(n,(function(r,s){if("border"===t.node(r).dummy){var c=t.predecessors(r);c.length&&(o=t.node(c[0]).order,i(n,u,s,a,o),u=s,a=o)}i(n,u,n.length,o,e.length)})),n})),n}function s(t,e,n){if(e>n){var r=e;e=n,n=r}var i=t[e];i||(t[e]=i={}),i[n]=!0}function c(t,e,n){if(e>n){var i=e;e=n,n=i}return r.has(t[e],n)}function f(t,e,n,i){var o={},a={},u={};return r.forEach(e,(function(t){r.forEach(t,(function(t,e){o[t]=t,a[t]=t,u[t]=e}))})),r.forEach(e,(function(t){var e=-1;r.forEach(t,(function(t){var s=i(t);if(s.length)for(var f=((s=r.sortBy(s,(function(t){return u[t]}))).length-1)/2,l=Math.floor(f),h=Math.ceil(f);l<=h;++l){var d=s[l];a[t]===t&&e0?a-4:a;for(n=0;n>16&255,s[f++]=e>>8&255,s[f++]=255&e;2===u&&(e=i[t.charCodeAt(n)]<<2|i[t.charCodeAt(n+1)]>>4,s[f++]=255&e);1===u&&(e=i[t.charCodeAt(n)]<<10|i[t.charCodeAt(n+1)]<<4|i[t.charCodeAt(n+2)]>>2,s[f++]=e>>8&255,s[f++]=255&e);return s},e.fromByteArray=function(t){for(var e,n=t.length,i=n%3,o=[],a=0,u=n-i;au?u:a+16383));1===i?(e=t[n-1],o.push(r[e>>2]+r[e<<4&63]+"==")):2===i&&(e=(t[n-2]<<8)+t[n-1],o.push(r[e>>10]+r[e>>4&63]+r[e<<2&63]+"="));return o.join("")};for(var r=[],i=[],o="undefined"!=typeof Uint8Array?Uint8Array:Array,a="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",u=0,s=a.length;u0)throw new Error("Invalid string. Length must be a multiple of 4");var n=t.indexOf("=");return-1===n&&(n=e),[n,n===e?0:4-n%4]}function f(t,e,n){for(var i,o,a=[],u=e;u>18&63]+r[o>>12&63]+r[o>>6&63]+r[63&o]);return a.join("")}i["-".charCodeAt(0)]=62,i["_".charCodeAt(0)]=63},function(t,e){e.read=function(t,e,n,r,i){var o,a,u=8*i-r-1,s=(1<>1,f=-7,l=n?i-1:0,h=n?-1:1,d=t[e+l];for(l+=h,o=d&(1<<-f)-1,d>>=-f,f+=u;f>0;o=256*o+t[e+l],l+=h,f-=8);for(a=o&(1<<-f)-1,o>>=-f,f+=r;f>0;a=256*a+t[e+l],l+=h,f-=8);if(0===o)o=1-c;else{if(o===s)return a?NaN:1/0*(d?-1:1);a+=Math.pow(2,r),o-=c}return(d?-1:1)*a*Math.pow(2,o-r)},e.write=function(t,e,n,r,i,o){var a,u,s,c=8*o-i-1,f=(1<>1,h=23===i?Math.pow(2,-24)-Math.pow(2,-77):0,d=r?0:o-1,p=r?1:-1,g=e<0||0===e&&1/e<0?1:0;for(e=Math.abs(e),isNaN(e)||e===1/0?(u=isNaN(e)?1:0,a=f):(a=Math.floor(Math.log(e)/Math.LN2),e*(s=Math.pow(2,-a))<1&&(a--,s*=2),(e+=a+l>=1?h/s:h*Math.pow(2,1-l))*s>=2&&(a++,s/=2),a+l>=f?(u=0,a=f):a+l>=1?(u=(e*s-1)*Math.pow(2,i),a+=l):(u=e*Math.pow(2,l-1)*Math.pow(2,i),a=0));i>=8;t[n+d]=255&u,d+=p,u/=256,i-=8);for(a=a<0;t[n+d]=255&a,d+=p,a/=256,c-=8);t[n+d-p]|=128*g}},function(t,e){},function(t,e,n){"use strict";var r=n(268).Buffer,i=n(817);t.exports=function(){function t(){!function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,t),this.head=null,this.tail=null,this.length=0}return t.prototype.push=function(t){var e={data:t,next:null};this.length>0?this.tail.next=e:this.head=e,this.tail=e,++this.length},t.prototype.unshift=function(t){var e={data:t,next:this.head};0===this.length&&(this.tail=e),this.head=e,++this.length},t.prototype.shift=function(){if(0!==this.length){var t=this.head.data;return 1===this.length?this.head=this.tail=null:this.head=this.head.next,--this.length,t}},t.prototype.clear=function(){this.head=this.tail=null,this.length=0},t.prototype.join=function(t){if(0===this.length)return"";for(var e=this.head,n=""+e.data;e=e.next;)n+=t+e.data;return n},t.prototype.concat=function(t){if(0===this.length)return r.alloc(0);if(1===this.length)return this.head.data;for(var e,n,i,o=r.allocUnsafe(t>>>0),a=this.head,u=0;a;)e=a.data,n=o,i=u,e.copy(n,i),u+=a.data.length,a=a.next;return o},t}(),i&&i.inspect&&i.inspect.custom&&(t.exports.prototype[i.inspect.custom]=function(){var t=i.inspect({length:this.length});return this.constructor.name+" "+t})},function(t,e){},function(t,e,n){(function(t){var r=void 0!==t&&t||"undefined"!=typeof self&&self||window,i=Function.prototype.apply;function o(t,e){this._id=t,this._clearFn=e}e.setTimeout=function(){return new o(i.call(setTimeout,r,arguments),clearTimeout)},e.setInterval=function(){return new o(i.call(setInterval,r,arguments),clearInterval)},e.clearTimeout=e.clearInterval=function(t){t&&t.close()},o.prototype.unref=o.prototype.ref=function(){},o.prototype.close=function(){this._clearFn.call(r,this._id)},e.enroll=function(t,e){clearTimeout(t._idleTimeoutId),t._idleTimeout=e},e.unenroll=function(t){clearTimeout(t._idleTimeoutId),t._idleTimeout=-1},e._unrefActive=e.active=function(t){clearTimeout(t._idleTimeoutId);var e=t._idleTimeout;e>=0&&(t._idleTimeoutId=setTimeout((function(){t._onTimeout&&t._onTimeout()}),e))},n(819),e.setImmediate="undefined"!=typeof self&&self.setImmediate||void 0!==t&&t.setImmediate||this&&this.setImmediate,e.clearImmediate="undefined"!=typeof self&&self.clearImmediate||void 0!==t&&t.clearImmediate||this&&this.clearImmediate}).call(this,n(25))},function(t,e,n){(function(t,e){!function(t,n){"use strict";if(!t.setImmediate){var r,i,o,a,u,s=1,c={},f=!1,l=t.document,h=Object.getPrototypeOf&&Object.getPrototypeOf(t);h=h&&h.setTimeout?h:t,"[object process]"==={}.toString.call(t.process)?r=function(t){e.nextTick((function(){p(t)}))}:!function(){if(t.postMessage&&!t.importScripts){var e=!0,n=t.onmessage;return t.onmessage=function(){e=!1},t.postMessage("","*"),t.onmessage=n,e}}()?t.MessageChannel?((o=new MessageChannel).port1.onmessage=function(t){p(t.data)},r=function(t){o.port2.postMessage(t)}):l&&"onreadystatechange"in l.createElement("script")?(i=l.documentElement,r=function(t){var e=l.createElement("script");e.onreadystatechange=function(){p(t),e.onreadystatechange=null,i.removeChild(e),e=null},i.appendChild(e)}):r=function(t){setTimeout(p,0,t)}:(a="setImmediate$"+Math.random()+"$",u=function(e){e.source===t&&"string"==typeof e.data&&0===e.data.indexOf(a)&&p(+e.data.slice(a.length))},t.addEventListener?t.addEventListener("message",u,!1):t.attachEvent("onmessage",u),r=function(e){t.postMessage(a+e,"*")}),h.setImmediate=function(t){"function"!=typeof t&&(t=new Function(""+t));for(var e=new Array(arguments.length-1),n=0;n>>2}function f(t,e,n,r){return 0===t?e&n|~e&r:2===t?e&n|e&r|n&r:e^n^r}r(s,i),s.prototype.init=function(){return this._a=1732584193,this._b=4023233417,this._c=2562383102,this._d=271733878,this._e=3285377520,this},s.prototype._update=function(t){for(var e,n=this._w,r=0|this._a,i=0|this._b,o=0|this._c,u=0|this._d,s=0|this._e,l=0;l<16;++l)n[l]=t.readInt32BE(4*l);for(;l<80;++l)n[l]=n[l-3]^n[l-8]^n[l-14]^n[l-16];for(var h=0;h<80;++h){var d=~~(h/20),p=0|((e=r)<<5|e>>>27)+f(d,i,o,u)+s+n[h]+a[d];s=u,u=o,o=c(i),i=r,r=p}this._a=r+this._a|0,this._b=i+this._b|0,this._c=o+this._c|0,this._d=u+this._d|0,this._e=s+this._e|0},s.prototype._hash=function(){var t=o.allocUnsafe(20);return t.writeInt32BE(0|this._a,0),t.writeInt32BE(0|this._b,4),t.writeInt32BE(0|this._c,8),t.writeInt32BE(0|this._d,12),t.writeInt32BE(0|this._e,16),t},t.exports=s},function(t,e,n){var r=n(2),i=n(101),o=n(3).Buffer,a=[1518500249,1859775393,-1894007588,-899497514],u=new Array(80);function s(){this.init(),this._w=u,i.call(this,64,56)}function c(t){return t<<5|t>>>27}function f(t){return t<<30|t>>>2}function l(t,e,n,r){return 0===t?e&n|~e&r:2===t?e&n|e&r|n&r:e^n^r}r(s,i),s.prototype.init=function(){return this._a=1732584193,this._b=4023233417,this._c=2562383102,this._d=271733878,this._e=3285377520,this},s.prototype._update=function(t){for(var e,n=this._w,r=0|this._a,i=0|this._b,o=0|this._c,u=0|this._d,s=0|this._e,h=0;h<16;++h)n[h]=t.readInt32BE(4*h);for(;h<80;++h)n[h]=(e=n[h-3]^n[h-8]^n[h-14]^n[h-16])<<1|e>>>31;for(var d=0;d<80;++d){var p=~~(d/20),g=c(r)+l(p,i,o,u)+s+n[d]+a[p]|0;s=u,u=o,o=f(i),i=r,r=g}this._a=r+this._a|0,this._b=i+this._b|0,this._c=o+this._c|0,this._d=u+this._d|0,this._e=s+this._e|0},s.prototype._hash=function(){var t=o.allocUnsafe(20);return t.writeInt32BE(0|this._a,0),t.writeInt32BE(0|this._b,4),t.writeInt32BE(0|this._c,8),t.writeInt32BE(0|this._d,12),t.writeInt32BE(0|this._e,16),t},t.exports=s},function(t,e,n){var r=n(2),i=n(414),o=n(101),a=n(3).Buffer,u=new Array(64);function s(){this.init(),this._w=u,o.call(this,64,56)}r(s,i),s.prototype.init=function(){return this._a=3238371032,this._b=914150663,this._c=812702999,this._d=4144912697,this._e=4290775857,this._f=1750603025,this._g=1694076839,this._h=3204075428,this},s.prototype._hash=function(){var t=a.allocUnsafe(28);return t.writeInt32BE(this._a,0),t.writeInt32BE(this._b,4),t.writeInt32BE(this._c,8),t.writeInt32BE(this._d,12),t.writeInt32BE(this._e,16),t.writeInt32BE(this._f,20),t.writeInt32BE(this._g,24),t},t.exports=s},function(t,e,n){var r=n(2),i=n(415),o=n(101),a=n(3).Buffer,u=new Array(160);function s(){this.init(),this._w=u,o.call(this,128,112)}r(s,i),s.prototype.init=function(){return this._ah=3418070365,this._bh=1654270250,this._ch=2438529370,this._dh=355462360,this._eh=1731405415,this._fh=2394180231,this._gh=3675008525,this._hh=1203062813,this._al=3238371032,this._bl=914150663,this._cl=812702999,this._dl=4144912697,this._el=4290775857,this._fl=1750603025,this._gl=1694076839,this._hl=3204075428,this},s.prototype._hash=function(){var t=a.allocUnsafe(48);function e(e,n,r){t.writeInt32BE(e,r),t.writeInt32BE(n,r+4)}return e(this._ah,this._al,0),e(this._bh,this._bl,8),e(this._ch,this._cl,16),e(this._dh,this._dl,24),e(this._eh,this._el,32),e(this._fh,this._fl,40),t},t.exports=s},function(t,e,n){"use strict";var r=n(2),i=n(3).Buffer,o=n(65),a=i.alloc(128);function u(t,e){o.call(this,"digest"),"string"==typeof e&&(e=i.from(e)),this._alg=t,this._key=e,e.length>64?e=t(e):e.length<64&&(e=i.concat([e,a],64));for(var n=this._ipad=i.allocUnsafe(64),r=this._opad=i.allocUnsafe(64),u=0;u<64;u++)n[u]=54^e[u],r[u]=92^e[u];this._hash=[n]}r(u,o),u.prototype._update=function(t){this._hash.push(t)},u.prototype._final=function(){var t=this._alg(i.concat(this._hash));return this._alg(i.concat([this._opad,t]))},t.exports=u},function(t,e,n){t.exports=n(418)},function(t,e,n){(function(e,r){var i,o=n(420),a=n(421),u=n(422),s=n(3).Buffer,c=e.crypto&&e.crypto.subtle,f={sha:"SHA-1","sha-1":"SHA-1",sha1:"SHA-1",sha256:"SHA-256","sha-256":"SHA-256",sha384:"SHA-384","sha-384":"SHA-384","sha-512":"SHA-512",sha512:"SHA-512"},l=[];function h(t,e,n,r,i){return c.importKey("raw",t,{name:"PBKDF2"},!1,["deriveBits"]).then((function(t){return c.deriveBits({name:"PBKDF2",salt:e,iterations:n,hash:{name:i}},t,r<<3)})).then((function(t){return s.from(t)}))}t.exports=function(t,n,d,p,g,y){"function"==typeof g&&(y=g,g=void 0);var b=f[(g=g||"sha1").toLowerCase()];if(!b||"function"!=typeof e.Promise)return r.nextTick((function(){var e;try{e=u(t,n,d,p,g)}catch(t){return y(t)}y(null,e)}));if(o(t,n,d,p),"function"!=typeof y)throw new Error("No callback provided to pbkdf2");s.isBuffer(t)||(t=s.from(t,a)),s.isBuffer(n)||(n=s.from(n,a)),function(t,e){t.then((function(t){r.nextTick((function(){e(null,t)}))}),(function(t){r.nextTick((function(){e(t)}))}))}(function(t){if(e.process&&!e.process.browser)return Promise.resolve(!1);if(!c||!c.importKey||!c.deriveBits)return Promise.resolve(!1);if(void 0!==l[t])return l[t];var n=h(i=i||s.alloc(8),i,10,128,t).then((function(){return!0})).catch((function(){return!1}));return l[t]=n,n}(b).then((function(e){return e?h(t,n,d,p,b):u(t,n,d,p,g)})),y)}}).call(this,n(25),n(17))},function(t,e,n){var r=n(834),i=n(274),o=n(275),a=n(847),u=n(182);function s(t,e,n){if(t=t.toLowerCase(),o[t])return i.createCipheriv(t,e,n);if(a[t])return new r({key:e,iv:n,mode:t});throw new TypeError("invalid suite type")}function c(t,e,n){if(t=t.toLowerCase(),o[t])return i.createDecipheriv(t,e,n);if(a[t])return new r({key:e,iv:n,mode:t,decrypt:!0});throw new TypeError("invalid suite type")}e.createCipher=e.Cipher=function(t,e){var n,r;if(t=t.toLowerCase(),o[t])n=o[t].key,r=o[t].iv;else{if(!a[t])throw new TypeError("invalid suite type");n=8*a[t].key,r=a[t].iv}var i=u(e,!1,n,r);return s(t,i.key,i.iv)},e.createCipheriv=e.Cipheriv=s,e.createDecipher=e.Decipher=function(t,e){var n,r;if(t=t.toLowerCase(),o[t])n=o[t].key,r=o[t].iv;else{if(!a[t])throw new TypeError("invalid suite type");n=8*a[t].key,r=a[t].iv}var i=u(e,!1,n,r);return c(t,i.key,i.iv)},e.createDecipheriv=e.Decipheriv=c,e.listCiphers=e.getCiphers=function(){return Object.keys(a).concat(i.getCiphers())}},function(t,e,n){var r=n(65),i=n(835),o=n(2),a=n(3).Buffer,u={"des-ede3-cbc":i.CBC.instantiate(i.EDE),"des-ede3":i.EDE,"des-ede-cbc":i.CBC.instantiate(i.EDE),"des-ede":i.EDE,"des-cbc":i.CBC.instantiate(i.DES),"des-ecb":i.DES};function s(t){r.call(this);var e,n=t.mode.toLowerCase(),i=u[n];e=t.decrypt?"decrypt":"encrypt";var o=t.key;a.isBuffer(o)||(o=a.from(o)),"des-ede"!==n&&"des-ede-cbc"!==n||(o=a.concat([o,o.slice(0,8)]));var s=t.iv;a.isBuffer(s)||(s=a.from(s)),this._des=i.create({key:o,iv:s,type:e})}u.des=u["des-cbc"],u.des3=u["des-ede3-cbc"],t.exports=s,o(s,r),s.prototype._update=function(t){return a.from(this._des.update(t))},s.prototype._final=function(){return a.from(this._des.final())}},function(t,e,n){"use strict";e.utils=n(423),e.Cipher=n(273),e.DES=n(424),e.CBC=n(836),e.EDE=n(837)},function(t,e,n){"use strict";var r=n(32),i=n(2),o={};function a(t){r.equal(t.length,8,"Invalid IV length"),this.iv=new Array(8);for(var e=0;e15){var t=this.cache.slice(0,16);return this.cache=this.cache.slice(16),t}return null},h.prototype.flush=function(){for(var t=16-this.cache.length,e=o.allocUnsafe(t),n=-1;++n>a%8,t._prev=o(t._prev,n?r:i);return u}function o(t,e){var n=t.length,i=-1,o=r.allocUnsafe(t.length);for(t=r.concat([t,r.from([e])]);++i>7;return o}e.encrypt=function(t,e,n){for(var o=e.length,a=r.allocUnsafe(o),u=-1;++u>>0,0),e.writeUInt32BE(t[1]>>>0,4),e.writeUInt32BE(t[2]>>>0,8),e.writeUInt32BE(t[3]>>>0,12),e}function a(t){this.h=t,this.state=r.alloc(16,0),this.cache=r.allocUnsafe(0)}a.prototype.ghash=function(t){for(var e=-1;++e0;e--)r[e]=r[e]>>>1|(1&r[e-1])<<31;r[0]=r[0]>>>1,n&&(r[0]=r[0]^225<<24)}this.state=o(i)},a.prototype.update=function(t){var e;for(this.cache=r.concat([this.cache,t]);this.cache.length>=16;)e=this.cache.slice(0,16),this.cache=this.cache.slice(16),this.ghash(e)},a.prototype.final=function(t,e){return this.cache.length&&this.ghash(r.concat([this.cache,i],16)),this.ghash(o([0,t,0,e])),this.state},t.exports=a},function(t,e,n){var r=n(428),i=n(3).Buffer,o=n(275),a=n(429),u=n(65),s=n(181),c=n(182);function f(t,e,n){u.call(this),this._cache=new l,this._last=void 0,this._cipher=new s.AES(e),this._prev=i.from(n),this._mode=t,this._autopadding=!0}function l(){this.cache=i.allocUnsafe(0)}function h(t,e,n){var u=o[t.toLowerCase()];if(!u)throw new TypeError("invalid suite type");if("string"==typeof n&&(n=i.from(n)),"GCM"!==u.mode&&n.length!==u.iv)throw new TypeError("invalid iv length "+n.length);if("string"==typeof e&&(e=i.from(e)),e.length!==u.key/8)throw new TypeError("invalid key length "+e.length);return"stream"===u.type?new a(u.module,e,n,!0):"auth"===u.type?new r(u.module,e,n,!0):new f(u.module,e,n)}n(2)(f,u),f.prototype._update=function(t){var e,n;this._cache.add(t);for(var r=[];e=this._cache.get(this._autopadding);)n=this._mode.decrypt(this,e),r.push(n);return i.concat(r)},f.prototype._final=function(){var t=this._cache.flush();if(this._autopadding)return function(t){var e=t[15];if(e<1||e>16)throw new Error("unable to decrypt data");var n=-1;for(;++n16)return e=this.cache.slice(0,16),this.cache=this.cache.slice(16),e}else if(this.cache.length>=16)return e=this.cache.slice(0,16),this.cache=this.cache.slice(16),e;return null},l.prototype.flush=function(){if(this.cache.length)return this.cache},e.createDecipher=function(t,e){var n=o[t.toLowerCase()];if(!n)throw new TypeError("invalid suite type");var r=c(e,!1,n.key,n.iv);return h(t,r.key,r.iv)},e.createDecipheriv=h},function(t,e){e["des-ecb"]={key:8,iv:0},e["des-cbc"]=e.des={key:8,iv:8},e["des-ede3-cbc"]=e.des3={key:24,iv:8},e["des-ede3"]={key:24,iv:0},e["des-ede-cbc"]={key:16,iv:8},e["des-ede"]={key:16,iv:0}},function(t,e,n){(function(t){var r=n(430),i=n(851),o=n(852);var a={binary:!0,hex:!0,base64:!0};e.DiffieHellmanGroup=e.createDiffieHellmanGroup=e.getDiffieHellman=function(e){var n=new t(i[e].prime,"hex"),r=new t(i[e].gen,"hex");return new o(n,r)},e.createDiffieHellman=e.DiffieHellman=function e(n,i,u,s){return t.isBuffer(i)||void 0===a[i]?e(n,"binary",i,u):(i=i||"binary",s=s||"binary",u=u||new t([2]),t.isBuffer(u)||(u=new t(u,s)),"number"==typeof n?new o(r(n,u),u,!0):(t.isBuffer(n)||(n=new t(n,i)),new o(n,u,!0)))}}).call(this,n(18).Buffer)},function(t,e){},function(t,e){},function(t){t.exports=JSON.parse('{"modp1":{"gen":"02","prime":"ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a63a3620ffffffffffffffff"},"modp2":{"gen":"02","prime":"ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece65381ffffffffffffffff"},"modp5":{"gen":"02","prime":"ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca237327ffffffffffffffff"},"modp14":{"gen":"02","prime":"ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca18217c32905e462e36ce3be39e772c180e86039b2783a2ec07a28fb5c55df06f4c52c9de2bcbf6955817183995497cea956ae515d2261898fa051015728e5a8aacaa68ffffffffffffffff"},"modp15":{"gen":"02","prime":"ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca18217c32905e462e36ce3be39e772c180e86039b2783a2ec07a28fb5c55df06f4c52c9de2bcbf6955817183995497cea956ae515d2261898fa051015728e5a8aaac42dad33170d04507a33a85521abdf1cba64ecfb850458dbef0a8aea71575d060c7db3970f85a6e1e4c7abf5ae8cdb0933d71e8c94e04a25619dcee3d2261ad2ee6bf12ffa06d98a0864d87602733ec86a64521f2b18177b200cbbe117577a615d6c770988c0bad946e208e24fa074e5ab3143db5bfce0fd108e4b82d120a93ad2caffffffffffffffff"},"modp16":{"gen":"02","prime":"ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca18217c32905e462e36ce3be39e772c180e86039b2783a2ec07a28fb5c55df06f4c52c9de2bcbf6955817183995497cea956ae515d2261898fa051015728e5a8aaac42dad33170d04507a33a85521abdf1cba64ecfb850458dbef0a8aea71575d060c7db3970f85a6e1e4c7abf5ae8cdb0933d71e8c94e04a25619dcee3d2261ad2ee6bf12ffa06d98a0864d87602733ec86a64521f2b18177b200cbbe117577a615d6c770988c0bad946e208e24fa074e5ab3143db5bfce0fd108e4b82d120a92108011a723c12a787e6d788719a10bdba5b2699c327186af4e23c1a946834b6150bda2583e9ca2ad44ce8dbbbc2db04de8ef92e8efc141fbecaa6287c59474e6bc05d99b2964fa090c3a2233ba186515be7ed1f612970cee2d7afb81bdd762170481cd0069127d5b05aa993b4ea988d8fddc186ffb7dc90a6c08f4df435c934063199ffffffffffffffff"},"modp17":{"gen":"02","prime":"ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca18217c32905e462e36ce3be39e772c180e86039b2783a2ec07a28fb5c55df06f4c52c9de2bcbf6955817183995497cea956ae515d2261898fa051015728e5a8aaac42dad33170d04507a33a85521abdf1cba64ecfb850458dbef0a8aea71575d060c7db3970f85a6e1e4c7abf5ae8cdb0933d71e8c94e04a25619dcee3d2261ad2ee6bf12ffa06d98a0864d87602733ec86a64521f2b18177b200cbbe117577a615d6c770988c0bad946e208e24fa074e5ab3143db5bfce0fd108e4b82d120a92108011a723c12a787e6d788719a10bdba5b2699c327186af4e23c1a946834b6150bda2583e9ca2ad44ce8dbbbc2db04de8ef92e8efc141fbecaa6287c59474e6bc05d99b2964fa090c3a2233ba186515be7ed1f612970cee2d7afb81bdd762170481cd0069127d5b05aa993b4ea988d8fddc186ffb7dc90a6c08f4df435c93402849236c3fab4d27c7026c1d4dcb2602646dec9751e763dba37bdf8ff9406ad9e530ee5db382f413001aeb06a53ed9027d831179727b0865a8918da3edbebcf9b14ed44ce6cbaced4bb1bdb7f1447e6cc254b332051512bd7af426fb8f401378cd2bf5983ca01c64b92ecf032ea15d1721d03f482d7ce6e74fef6d55e702f46980c82b5a84031900b1c9e59e7c97fbec7e8f323a97a7e36cc88be0f1d45b7ff585ac54bd407b22b4154aacc8f6d7ebf48e1d814cc5ed20f8037e0a79715eef29be32806a1d58bb7c5da76f550aa3d8a1fbff0eb19ccb1a313d55cda56c9ec2ef29632387fe8d76e3c0468043e8f663f4860ee12bf2d5b0b7474d6e694f91e6dcc4024ffffffffffffffff"},"modp18":{"gen":"02","prime":"ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca18217c32905e462e36ce3be39e772c180e86039b2783a2ec07a28fb5c55df06f4c52c9de2bcbf6955817183995497cea956ae515d2261898fa051015728e5a8aaac42dad33170d04507a33a85521abdf1cba64ecfb850458dbef0a8aea71575d060c7db3970f85a6e1e4c7abf5ae8cdb0933d71e8c94e04a25619dcee3d2261ad2ee6bf12ffa06d98a0864d87602733ec86a64521f2b18177b200cbbe117577a615d6c770988c0bad946e208e24fa074e5ab3143db5bfce0fd108e4b82d120a92108011a723c12a787e6d788719a10bdba5b2699c327186af4e23c1a946834b6150bda2583e9ca2ad44ce8dbbbc2db04de8ef92e8efc141fbecaa6287c59474e6bc05d99b2964fa090c3a2233ba186515be7ed1f612970cee2d7afb81bdd762170481cd0069127d5b05aa993b4ea988d8fddc186ffb7dc90a6c08f4df435c93402849236c3fab4d27c7026c1d4dcb2602646dec9751e763dba37bdf8ff9406ad9e530ee5db382f413001aeb06a53ed9027d831179727b0865a8918da3edbebcf9b14ed44ce6cbaced4bb1bdb7f1447e6cc254b332051512bd7af426fb8f401378cd2bf5983ca01c64b92ecf032ea15d1721d03f482d7ce6e74fef6d55e702f46980c82b5a84031900b1c9e59e7c97fbec7e8f323a97a7e36cc88be0f1d45b7ff585ac54bd407b22b4154aacc8f6d7ebf48e1d814cc5ed20f8037e0a79715eef29be32806a1d58bb7c5da76f550aa3d8a1fbff0eb19ccb1a313d55cda56c9ec2ef29632387fe8d76e3c0468043e8f663f4860ee12bf2d5b0b7474d6e694f91e6dbe115974a3926f12fee5e438777cb6a932df8cd8bec4d073b931ba3bc832b68d9dd300741fa7bf8afc47ed2576f6936ba424663aab639c5ae4f5683423b4742bf1c978238f16cbe39d652de3fdb8befc848ad922222e04a4037c0713eb57a81a23f0c73473fc646cea306b4bcbc8862f8385ddfa9d4b7fa2c087e879683303ed5bdd3a062b3cf5b3a278a66d2a13f83f44f82ddf310ee074ab6a364597e899a0255dc164f31cc50846851df9ab48195ded7ea1b1d510bd7ee74d73faf36bc31ecfa268359046f4eb879f924009438b481c6cd7889a002ed5ee382bc9190da6fc026e479558e4475677e9aa9e3050e2765694dfc81f56e880b96e7160c980dd98edd3dfffffffffffffffff"}}')},function(t,e,n){(function(e){var r=n(12),i=new(n(431)),o=new r(24),a=new r(11),u=new r(10),s=new r(3),c=new r(7),f=n(430),l=n(100);function h(t,n){return n=n||"utf8",e.isBuffer(t)||(t=new e(t,n)),this._pub=new r(t),this}function d(t,n){return n=n||"utf8",e.isBuffer(t)||(t=new e(t,n)),this._priv=new r(t),this}t.exports=g;var p={};function g(t,e,n){this.setGenerator(e),this.__prime=new r(t),this._prime=r.mont(this.__prime),this._primeLen=t.length,this._pub=void 0,this._priv=void 0,this._primeCode=void 0,n?(this.setPublicKey=h,this.setPrivateKey=d):this._primeCode=8}function y(t,n){var r=new e(t.toArray());return n?r.toString(n):r}Object.defineProperty(g.prototype,"verifyError",{enumerable:!0,get:function(){return"number"!=typeof this._primeCode&&(this._primeCode=function(t,e){var n=e.toString("hex"),r=[n,t.toString(16)].join("_");if(r in p)return p[r];var l,h=0;if(t.isEven()||!f.simpleSieve||!f.fermatTest(t)||!i.test(t))return h+=1,h+="02"===n||"05"===n?8:4,p[r]=h,h;switch(i.test(t.shrn(1))||(h+=2),n){case"02":t.mod(o).cmp(a)&&(h+=8);break;case"05":(l=t.mod(u)).cmp(s)&&l.cmp(c)&&(h+=8);break;default:h+=4}return p[r]=h,h}(this.__prime,this.__gen)),this._primeCode}}),g.prototype.generateKeys=function(){return this._priv||(this._priv=new r(l(this._primeLen))),this._pub=this._gen.toRed(this._prime).redPow(this._priv).fromRed(),this.getPublicKey()},g.prototype.computeSecret=function(t){var n=(t=(t=new r(t)).toRed(this._prime)).redPow(this._priv).fromRed(),i=new e(n.toArray()),o=this.getPrime();if(i.length0&&n.ishrn(r),n}function l(t,n,i){var o,a;do{for(o=new e(0);8*o.length=0&&(a=e,u=n),r.negative&&(r=r.neg(),o=o.neg()),a.negative&&(a=a.neg(),u=u.neg()),[{a:r,b:o},{a:a,b:u}]},s.prototype._endoSplit=function(t){var e=this.endo.basis,n=e[0],r=e[1],i=r.b.mul(t).divRound(this.n),o=n.b.neg().mul(t).divRound(this.n),a=i.mul(n.a),u=o.mul(r.a),s=i.mul(n.b),c=o.mul(r.b);return{k1:t.sub(a).sub(u),k2:s.add(c).neg()}},s.prototype.pointFromX=function(t,e){(t=new i(t,16)).red||(t=t.toRed(this.red));var n=t.redSqr().redMul(t).redIAdd(t.redMul(this.a)).redIAdd(this.b),r=n.redSqrt();if(0!==r.redSqr().redSub(n).cmp(this.zero))throw new Error("invalid point");var o=r.fromRed().isOdd();return(e&&!o||!e&&o)&&(r=r.redNeg()),this.point(t,r)},s.prototype.validate=function(t){if(t.inf)return!0;var e=t.x,n=t.y,r=this.a.redMul(e),i=e.redSqr().redMul(e).redIAdd(r).redIAdd(this.b);return 0===n.redSqr().redISub(i).cmpn(0)},s.prototype._endoWnafMulAdd=function(t,e,n){for(var r=this._endoWnafT1,i=this._endoWnafT2,o=0;o":""},c.prototype.isInfinity=function(){return this.inf},c.prototype.add=function(t){if(this.inf)return t;if(t.inf)return this;if(this.eq(t))return this.dbl();if(this.neg().eq(t))return this.curve.point(null,null);if(0===this.x.cmp(t.x))return this.curve.point(null,null);var e=this.y.redSub(t.y);0!==e.cmpn(0)&&(e=e.redMul(this.x.redSub(t.x).redInvm()));var n=e.redSqr().redISub(this.x).redISub(t.x),r=e.redMul(this.x.redSub(n)).redISub(this.y);return this.curve.point(n,r)},c.prototype.dbl=function(){if(this.inf)return this;var t=this.y.redAdd(this.y);if(0===t.cmpn(0))return this.curve.point(null,null);var e=this.curve.a,n=this.x.redSqr(),r=t.redInvm(),i=n.redAdd(n).redIAdd(n).redIAdd(e).redMul(r),o=i.redSqr().redISub(this.x.redAdd(this.x)),a=i.redMul(this.x.redSub(o)).redISub(this.y);return this.curve.point(o,a)},c.prototype.getX=function(){return this.x.fromRed()},c.prototype.getY=function(){return this.y.fromRed()},c.prototype.mul=function(t){return t=new i(t,16),this.isInfinity()?this:this._hasDoubles(t)?this.curve._fixedNafMul(this,t):this.curve.endo?this.curve._endoWnafMulAdd([this],[t]):this.curve._wnafMul(this,t)},c.prototype.mulAdd=function(t,e,n){var r=[this,e],i=[t,n];return this.curve.endo?this.curve._endoWnafMulAdd(r,i):this.curve._wnafMulAdd(1,r,i,2)},c.prototype.jmulAdd=function(t,e,n){var r=[this,e],i=[t,n];return this.curve.endo?this.curve._endoWnafMulAdd(r,i,!0):this.curve._wnafMulAdd(1,r,i,2,!0)},c.prototype.eq=function(t){return this===t||this.inf===t.inf&&(this.inf||0===this.x.cmp(t.x)&&0===this.y.cmp(t.y))},c.prototype.neg=function(t){if(this.inf)return this;var e=this.curve.point(this.x,this.y.redNeg());if(t&&this.precomputed){var n=this.precomputed,r=function(t){return t.neg()};e.precomputed={naf:n.naf&&{wnd:n.naf.wnd,points:n.naf.points.map(r)},doubles:n.doubles&&{step:n.doubles.step,points:n.doubles.points.map(r)}}}return e},c.prototype.toJ=function(){return this.inf?this.curve.jpoint(null,null,null):this.curve.jpoint(this.x,this.y,this.curve.one)},o(f,a.BasePoint),s.prototype.jpoint=function(t,e,n){return new f(this,t,e,n)},f.prototype.toP=function(){if(this.isInfinity())return this.curve.point(null,null);var t=this.z.redInvm(),e=t.redSqr(),n=this.x.redMul(e),r=this.y.redMul(e).redMul(t);return this.curve.point(n,r)},f.prototype.neg=function(){return this.curve.jpoint(this.x,this.y.redNeg(),this.z)},f.prototype.add=function(t){if(this.isInfinity())return t;if(t.isInfinity())return this;var e=t.z.redSqr(),n=this.z.redSqr(),r=this.x.redMul(e),i=t.x.redMul(n),o=this.y.redMul(e.redMul(t.z)),a=t.y.redMul(n.redMul(this.z)),u=r.redSub(i),s=o.redSub(a);if(0===u.cmpn(0))return 0!==s.cmpn(0)?this.curve.jpoint(null,null,null):this.dbl();var c=u.redSqr(),f=c.redMul(u),l=r.redMul(c),h=s.redSqr().redIAdd(f).redISub(l).redISub(l),d=s.redMul(l.redISub(h)).redISub(o.redMul(f)),p=this.z.redMul(t.z).redMul(u);return this.curve.jpoint(h,d,p)},f.prototype.mixedAdd=function(t){if(this.isInfinity())return t.toJ();if(t.isInfinity())return this;var e=this.z.redSqr(),n=this.x,r=t.x.redMul(e),i=this.y,o=t.y.redMul(e).redMul(this.z),a=n.redSub(r),u=i.redSub(o);if(0===a.cmpn(0))return 0!==u.cmpn(0)?this.curve.jpoint(null,null,null):this.dbl();var s=a.redSqr(),c=s.redMul(a),f=n.redMul(s),l=u.redSqr().redIAdd(c).redISub(f).redISub(f),h=u.redMul(f.redISub(l)).redISub(i.redMul(c)),d=this.z.redMul(a);return this.curve.jpoint(l,h,d)},f.prototype.dblp=function(t){if(0===t)return this;if(this.isInfinity())return this;if(!t)return this.dbl();if(this.curve.zeroA||this.curve.threeA){for(var e=this,n=0;n=0)return!1;if(n.redIAdd(i),0===this.x.cmp(n))return!0}},f.prototype.inspect=function(){return this.isInfinity()?"":""},f.prototype.isInfinity=function(){return 0===this.z.cmpn(0)}},function(t,e,n){"use strict";var r=n(12),i=n(2),o=n(183),a=n(33);function u(t){o.call(this,"mont",t),this.a=new r(t.a,16).toRed(this.red),this.b=new r(t.b,16).toRed(this.red),this.i4=new r(4).toRed(this.red).redInvm(),this.two=new r(2).toRed(this.red),this.a24=this.i4.redMul(this.a.redAdd(this.two))}function s(t,e,n){o.BasePoint.call(this,t,"projective"),null===e&&null===n?(this.x=this.curve.one,this.z=this.curve.zero):(this.x=new r(e,16),this.z=new r(n,16),this.x.red||(this.x=this.x.toRed(this.curve.red)),this.z.red||(this.z=this.z.toRed(this.curve.red)))}i(u,o),t.exports=u,u.prototype.validate=function(t){var e=t.normalize().x,n=e.redSqr(),r=n.redMul(e).redAdd(n.redMul(this.a)).redAdd(e);return 0===r.redSqrt().redSqr().cmp(r)},i(s,o.BasePoint),u.prototype.decodePoint=function(t,e){return this.point(a.toArray(t,e),1)},u.prototype.point=function(t,e){return new s(this,t,e)},u.prototype.pointFromJSON=function(t){return s.fromJSON(this,t)},s.prototype.precompute=function(){},s.prototype._encode=function(){return this.getX().toArray("be",this.curve.p.byteLength())},s.fromJSON=function(t,e){return new s(t,e[0],e[1]||t.one)},s.prototype.inspect=function(){return this.isInfinity()?"":""},s.prototype.isInfinity=function(){return 0===this.z.cmpn(0)},s.prototype.dbl=function(){var t=this.x.redAdd(this.z).redSqr(),e=this.x.redSub(this.z).redSqr(),n=t.redSub(e),r=t.redMul(e),i=n.redMul(e.redAdd(this.curve.a24.redMul(n)));return this.curve.point(r,i)},s.prototype.add=function(){throw new Error("Not supported on Montgomery curve")},s.prototype.diffAdd=function(t,e){var n=this.x.redAdd(this.z),r=this.x.redSub(this.z),i=t.x.redAdd(t.z),o=t.x.redSub(t.z).redMul(n),a=i.redMul(r),u=e.z.redMul(o.redAdd(a).redSqr()),s=e.x.redMul(o.redISub(a).redSqr());return this.curve.point(u,s)},s.prototype.mul=function(t){for(var e=t.clone(),n=this,r=this.curve.point(null,null),i=[];0!==e.cmpn(0);e.iushrn(1))i.push(e.andln(1));for(var o=i.length-1;o>=0;o--)0===i[o]?(n=n.diffAdd(r,this),r=r.dbl()):(r=n.diffAdd(r,this),n=n.dbl());return r},s.prototype.mulAdd=function(){throw new Error("Not supported on Montgomery curve")},s.prototype.jumlAdd=function(){throw new Error("Not supported on Montgomery curve")},s.prototype.eq=function(t){return 0===this.getX().cmp(t.getX())},s.prototype.normalize=function(){return this.x=this.x.redMul(this.z.redInvm()),this.z=this.curve.one,this},s.prototype.getX=function(){return this.normalize(),this.x.fromRed()}},function(t,e,n){"use strict";var r=n(33),i=n(12),o=n(2),a=n(183),u=r.assert;function s(t){this.twisted=1!=(0|t.a),this.mOneA=this.twisted&&-1==(0|t.a),this.extended=this.mOneA,a.call(this,"edwards",t),this.a=new i(t.a,16).umod(this.red.m),this.a=this.a.toRed(this.red),this.c=new i(t.c,16).toRed(this.red),this.c2=this.c.redSqr(),this.d=new i(t.d,16).toRed(this.red),this.dd=this.d.redAdd(this.d),u(!this.twisted||0===this.c.fromRed().cmpn(1)),this.oneC=1==(0|t.c)}function c(t,e,n,r,o){a.BasePoint.call(this,t,"projective"),null===e&&null===n&&null===r?(this.x=this.curve.zero,this.y=this.curve.one,this.z=this.curve.one,this.t=this.curve.zero,this.zOne=!0):(this.x=new i(e,16),this.y=new i(n,16),this.z=r?new i(r,16):this.curve.one,this.t=o&&new i(o,16),this.x.red||(this.x=this.x.toRed(this.curve.red)),this.y.red||(this.y=this.y.toRed(this.curve.red)),this.z.red||(this.z=this.z.toRed(this.curve.red)),this.t&&!this.t.red&&(this.t=this.t.toRed(this.curve.red)),this.zOne=this.z===this.curve.one,this.curve.extended&&!this.t&&(this.t=this.x.redMul(this.y),this.zOne||(this.t=this.t.redMul(this.z.redInvm()))))}o(s,a),t.exports=s,s.prototype._mulA=function(t){return this.mOneA?t.redNeg():this.a.redMul(t)},s.prototype._mulC=function(t){return this.oneC?t:this.c.redMul(t)},s.prototype.jpoint=function(t,e,n,r){return this.point(t,e,n,r)},s.prototype.pointFromX=function(t,e){(t=new i(t,16)).red||(t=t.toRed(this.red));var n=t.redSqr(),r=this.c2.redSub(this.a.redMul(n)),o=this.one.redSub(this.c2.redMul(this.d).redMul(n)),a=r.redMul(o.redInvm()),u=a.redSqrt();if(0!==u.redSqr().redSub(a).cmp(this.zero))throw new Error("invalid point");var s=u.fromRed().isOdd();return(e&&!s||!e&&s)&&(u=u.redNeg()),this.point(t,u)},s.prototype.pointFromY=function(t,e){(t=new i(t,16)).red||(t=t.toRed(this.red));var n=t.redSqr(),r=n.redSub(this.c2),o=n.redMul(this.d).redMul(this.c2).redSub(this.a),a=r.redMul(o.redInvm());if(0===a.cmp(this.zero)){if(e)throw new Error("invalid point");return this.point(this.zero,t)}var u=a.redSqrt();if(0!==u.redSqr().redSub(a).cmp(this.zero))throw new Error("invalid point");return u.fromRed().isOdd()!==e&&(u=u.redNeg()),this.point(u,t)},s.prototype.validate=function(t){if(t.isInfinity())return!0;t.normalize();var e=t.x.redSqr(),n=t.y.redSqr(),r=e.redMul(this.a).redAdd(n),i=this.c2.redMul(this.one.redAdd(this.d.redMul(e).redMul(n)));return 0===r.cmp(i)},o(c,a.BasePoint),s.prototype.pointFromJSON=function(t){return c.fromJSON(this,t)},s.prototype.point=function(t,e,n,r){return new c(this,t,e,n,r)},c.fromJSON=function(t,e){return new c(t,e[0],e[1],e[2])},c.prototype.inspect=function(){return this.isInfinity()?"":""},c.prototype.isInfinity=function(){return 0===this.x.cmpn(0)&&(0===this.y.cmp(this.z)||this.zOne&&0===this.y.cmp(this.curve.c))},c.prototype._extDbl=function(){var t=this.x.redSqr(),e=this.y.redSqr(),n=this.z.redSqr();n=n.redIAdd(n);var r=this.curve._mulA(t),i=this.x.redAdd(this.y).redSqr().redISub(t).redISub(e),o=r.redAdd(e),a=o.redSub(n),u=r.redSub(e),s=i.redMul(a),c=o.redMul(u),f=i.redMul(u),l=a.redMul(o);return this.curve.point(s,c,l,f)},c.prototype._projDbl=function(){var t,e,n,r=this.x.redAdd(this.y).redSqr(),i=this.x.redSqr(),o=this.y.redSqr();if(this.curve.twisted){var a=(c=this.curve._mulA(i)).redAdd(o);if(this.zOne)t=r.redSub(i).redSub(o).redMul(a.redSub(this.curve.two)),e=a.redMul(c.redSub(o)),n=a.redSqr().redSub(a).redSub(a);else{var u=this.z.redSqr(),s=a.redSub(u).redISub(u);t=r.redSub(i).redISub(o).redMul(s),e=a.redMul(c.redSub(o)),n=a.redMul(s)}}else{var c=i.redAdd(o);u=this.curve._mulC(this.z).redSqr(),s=c.redSub(u).redSub(u);t=this.curve._mulC(r.redISub(c)).redMul(s),e=this.curve._mulC(c).redMul(i.redISub(o)),n=c.redMul(s)}return this.curve.point(t,e,n)},c.prototype.dbl=function(){return this.isInfinity()?this:this.curve.extended?this._extDbl():this._projDbl()},c.prototype._extAdd=function(t){var e=this.y.redSub(this.x).redMul(t.y.redSub(t.x)),n=this.y.redAdd(this.x).redMul(t.y.redAdd(t.x)),r=this.t.redMul(this.curve.dd).redMul(t.t),i=this.z.redMul(t.z.redAdd(t.z)),o=n.redSub(e),a=i.redSub(r),u=i.redAdd(r),s=n.redAdd(e),c=o.redMul(a),f=u.redMul(s),l=o.redMul(s),h=a.redMul(u);return this.curve.point(c,f,h,l)},c.prototype._projAdd=function(t){var e,n,r=this.z.redMul(t.z),i=r.redSqr(),o=this.x.redMul(t.x),a=this.y.redMul(t.y),u=this.curve.d.redMul(o).redMul(a),s=i.redSub(u),c=i.redAdd(u),f=this.x.redAdd(this.y).redMul(t.x.redAdd(t.y)).redISub(o).redISub(a),l=r.redMul(s).redMul(f);return this.curve.twisted?(e=r.redMul(c).redMul(a.redSub(this.curve._mulA(o))),n=s.redMul(c)):(e=r.redMul(c).redMul(a.redSub(o)),n=this.curve._mulC(s).redMul(c)),this.curve.point(l,e,n)},c.prototype.add=function(t){return this.isInfinity()?t:t.isInfinity()?this:this.curve.extended?this._extAdd(t):this._projAdd(t)},c.prototype.mul=function(t){return this._hasDoubles(t)?this.curve._fixedNafMul(this,t):this.curve._wnafMul(this,t)},c.prototype.mulAdd=function(t,e,n){return this.curve._wnafMulAdd(1,[this,e],[t,n],2,!1)},c.prototype.jmulAdd=function(t,e,n){return this.curve._wnafMulAdd(1,[this,e],[t,n],2,!0)},c.prototype.normalize=function(){if(this.zOne)return this;var t=this.z.redInvm();return this.x=this.x.redMul(t),this.y=this.y.redMul(t),this.t&&(this.t=this.t.redMul(t)),this.z=this.curve.one,this.zOne=!0,this},c.prototype.neg=function(){return this.curve.point(this.x.redNeg(),this.y,this.z,this.t&&this.t.redNeg())},c.prototype.getX=function(){return this.normalize(),this.x.fromRed()},c.prototype.getY=function(){return this.normalize(),this.y.fromRed()},c.prototype.eq=function(t){return this===t||0===this.getX().cmp(t.getX())&&0===this.getY().cmp(t.getY())},c.prototype.eqXToP=function(t){var e=t.toRed(this.curve.red).redMul(this.z);if(0===this.x.cmp(e))return!0;for(var n=t.clone(),r=this.curve.redN.redMul(this.z);;){if(n.iadd(this.curve.n),n.cmp(this.curve.p)>=0)return!1;if(e.redIAdd(r),0===this.x.cmp(e))return!0}},c.prototype.toP=c.prototype.normalize,c.prototype.mixedAdd=c.prototype.add},function(t,e,n){"use strict";e.sha1=n(860),e.sha224=n(861),e.sha256=n(435),e.sha384=n(862),e.sha512=n(436)},function(t,e,n){"use strict";var r=n(45),i=n(136),o=n(434),a=r.rotl32,u=r.sum32,s=r.sum32_5,c=o.ft_1,f=i.BlockHash,l=[1518500249,1859775393,2400959708,3395469782];function h(){if(!(this instanceof h))return new h;f.call(this),this.h=[1732584193,4023233417,2562383102,271733878,3285377520],this.W=new Array(80)}r.inherits(h,f),t.exports=h,h.blockSize=512,h.outSize=160,h.hmacStrength=80,h.padLength=64,h.prototype._update=function(t,e){for(var n=this.W,r=0;r<16;r++)n[r]=t[e+r];for(;rthis.blockSize&&(t=(new this.Hash).update(t).digest()),i(t.length<=this.blockSize);for(var e=t.length;e0))return a.iaddn(1),this.keyFromPrivate(a)}},l.prototype._truncateToN=function(t,e){var n=8*t.byteLength()-this.n.bitLength();return n>0&&(t=t.ushrn(n)),!e&&t.cmp(this.n)>=0?t.sub(this.n):t},l.prototype.sign=function(t,e,n,o){"object"==typeof n&&(o=n,n=null),o||(o={}),e=this.keyFromPrivate(e,n),t=this._truncateToN(new r(t,16));for(var a=this.n.byteLength(),u=e.getPrivate().toArray("be",a),s=t.toArray("be",a),c=new i({hash:this.hash,entropy:u,nonce:s,pers:o.pers,persEnc:o.persEnc||"utf8"}),l=this.n.sub(new r(1)),h=0;;h++){var d=o.k?o.k(h):new r(c.generate(this.n.byteLength()));if(!((d=this._truncateToN(d,!0)).cmpn(1)<=0||d.cmp(l)>=0)){var p=this.g.mul(d);if(!p.isInfinity()){var g=p.getX(),y=g.umod(this.n);if(0!==y.cmpn(0)){var b=d.invm(this.n).mul(y.mul(e.getPrivate()).iadd(t));if(0!==(b=b.umod(this.n)).cmpn(0)){var v=(p.getY().isOdd()?1:0)|(0!==g.cmp(y)?2:0);return o.canonical&&b.cmp(this.nh)>0&&(b=this.n.sub(b),v^=1),new f({r:y,s:b,recoveryParam:v})}}}}}},l.prototype.verify=function(t,e,n,i){t=this._truncateToN(new r(t,16)),n=this.keyFromPublic(n,i);var o=(e=new f(e,"hex")).r,a=e.s;if(o.cmpn(1)<0||o.cmp(this.n)>=0)return!1;if(a.cmpn(1)<0||a.cmp(this.n)>=0)return!1;var u,s=a.invm(this.n),c=s.mul(t).umod(this.n),l=s.mul(o).umod(this.n);return this.curve._maxwellTrick?!(u=this.g.jmulAdd(c,n.getPublic(),l)).isInfinity()&&u.eqXToP(o):!(u=this.g.mulAdd(c,n.getPublic(),l)).isInfinity()&&0===u.getX().umod(this.n).cmp(o)},l.prototype.recoverPubKey=function(t,e,n,i){s((3&n)===n,"The recovery param is more than two bits"),e=new f(e,i);var o=this.n,a=new r(t),u=e.r,c=e.s,l=1&n,h=n>>1;if(u.cmp(this.curve.p.umod(this.curve.n))>=0&&h)throw new Error("Unable to find sencond key candinate");u=h?this.curve.pointFromX(u.add(this.curve.n),l):this.curve.pointFromX(u,l);var d=e.r.invm(o),p=o.sub(a).mul(d).umod(o),g=c.mul(d).umod(o);return this.g.mulAdd(p,u,g)},l.prototype.getKeyRecoveryParam=function(t,e,n,r){if(null!==(e=new f(e,r)).recoveryParam)return e.recoveryParam;for(var i=0;i<4;i++){var o;try{o=this.recoverPubKey(t,e,i)}catch(t){continue}if(o.eq(n))return i}throw new Error("Unable to find valid recovery factor")}},function(t,e,n){"use strict";var r=n(280),i=n(432),o=n(32);function a(t){if(!(this instanceof a))return new a(t);this.hash=t.hash,this.predResist=!!t.predResist,this.outLen=this.hash.outSize,this.minEntropy=t.minEntropy||this.hash.hmacStrength,this._reseed=null,this.reseedInterval=null,this.K=null,this.V=null;var e=i.toArray(t.entropy,t.entropyEnc||"hex"),n=i.toArray(t.nonce,t.nonceEnc||"hex"),r=i.toArray(t.pers,t.persEnc||"hex");o(e.length>=this.minEntropy/8,"Not enough entropy. Minimum is: "+this.minEntropy+" bits"),this._init(e,n,r)}t.exports=a,a.prototype._init=function(t,e,n){var r=t.concat(e).concat(n);this.K=new Array(this.outLen/8),this.V=new Array(this.outLen/8);for(var i=0;i=this.minEntropy/8,"Not enough entropy. Minimum is: "+this.minEntropy+" bits"),this._update(t.concat(n||[])),this._reseed=1},a.prototype.generate=function(t,e,n,r){if(this._reseed>this.reseedInterval)throw new Error("Reseed is required");"string"!=typeof e&&(r=n,n=e,e=null),n&&(n=i.toArray(n,r||"hex"),this._update(n));for(var o=[];o.length"}},function(t,e,n){"use strict";var r=n(12),i=n(33),o=i.assert;function a(t,e){if(t instanceof a)return t;this._importDER(t,e)||(o(t.r&&t.s,"Signature without r or s"),this.r=new r(t.r,16),this.s=new r(t.s,16),void 0===t.recoveryParam?this.recoveryParam=null:this.recoveryParam=t.recoveryParam)}function u(){this.place=0}function s(t,e){var n=t[e.place++];if(!(128&n))return n;for(var r=15&n,i=0,o=0,a=e.place;o>>3);for(t.push(128|n);--n;)t.push(e>>>(n<<3)&255);t.push(e)}}t.exports=a,a.prototype._importDER=function(t,e){t=i.toArray(t,e);var n=new u;if(48!==t[n.place++])return!1;if(s(t,n)+n.place!==t.length)return!1;if(2!==t[n.place++])return!1;var o=s(t,n),a=t.slice(n.place,o+n.place);if(n.place+=o,2!==t[n.place++])return!1;var c=s(t,n);if(t.length!==c+n.place)return!1;var f=t.slice(n.place,c+n.place);return 0===a[0]&&128&a[1]&&(a=a.slice(1)),0===f[0]&&128&f[1]&&(f=f.slice(1)),this.r=new r(a),this.s=new r(f),this.recoveryParam=null,!0},a.prototype.toDER=function(t){var e=this.r.toArray(),n=this.s.toArray();for(128&e[0]&&(e=[0].concat(e)),128&n[0]&&(n=[0].concat(n)),e=c(e),n=c(n);!(n[0]||128&n[1]);)n=n.slice(1);var r=[2];f(r,e.length),(r=r.concat(e)).push(2),f(r,n.length);var o=r.concat(n),a=[48];return f(a,o.length),a=a.concat(o),i.encode(a,t)}},function(t,e,n){"use strict";var r=n(280),i=n(279),o=n(33),a=o.assert,u=o.parseBytes,s=n(871),c=n(872);function f(t){if(a("ed25519"===t,"only tested with ed25519 so far"),!(this instanceof f))return new f(t);t=i[t].curve;this.curve=t,this.g=t.g,this.g.precompute(t.n.bitLength()+1),this.pointClass=t.point().constructor,this.encodingLength=Math.ceil(t.n.bitLength()/8),this.hash=r.sha512}t.exports=f,f.prototype.sign=function(t,e){t=u(t);var n=this.keyFromSecret(e),r=this.hashInt(n.messagePrefix(),t),i=this.g.mul(r),o=this.encodePoint(i),a=this.hashInt(o,n.pubBytes(),t).mul(n.priv()),s=r.add(a).umod(this.curve.n);return this.makeSignature({R:i,S:s,Rencoded:o})},f.prototype.verify=function(t,e,n){t=u(t),e=this.makeSignature(e);var r=this.keyFromPublic(n),i=this.hashInt(e.Rencoded(),r.pubBytes(),t),o=this.g.mul(e.S());return e.R().add(r.pub().mul(i)).eq(o)},f.prototype.hashInt=function(){for(var t=this.hash(),e=0;e=e)throw new Error("invalid sig")}t.exports=function(t,n,s,c,f){var l=o(s);if("ec"===l.type){if("ecdsa"!==c&&"ecdsa/rsa"!==c)throw new Error("wrong public key type");return function(t,e,n){var r=a[n.data.algorithm.curve.join(".")];if(!r)throw new Error("unknown curve "+n.data.algorithm.curve.join("."));var o=new i(r),u=n.data.subjectPrivateKey.data;return o.verify(e,t,u)}(t,n,l)}if("dsa"===l.type){if("dsa"!==c)throw new Error("wrong public key type");return function(t,e,n){var i=n.data.p,a=n.data.q,s=n.data.g,c=n.data.pub_key,f=o.signature.decode(t,"der"),l=f.s,h=f.r;u(l,a),u(h,a);var d=r.mont(i),p=l.invm(a);return 0===s.toRed(d).redPow(new r(e).mul(p).mod(a)).fromRed().mul(c.toRed(d).redPow(h.mul(p).mod(a)).fromRed()).mod(i).mod(a).cmp(h)}(t,n,l)}if("rsa"!==c&&"ecdsa/rsa"!==c)throw new Error("wrong public key type");n=e.concat([f,n]);for(var h=l.modulus.byteLength(),d=[1],p=0;n.length+d.length+2n-h-2)throw new Error("message too long");var d=l.alloc(n-r-h-2),p=n-f-1,g=i(f),y=u(l.concat([c,d,l.alloc(1,1),e],p),a(g,p)),b=u(g,a(y,f));return new s(l.concat([l.alloc(1),b,y],n))}(p,e);else if(1===h)d=function(t,e,n){var r,o=e.length,a=t.modulus.byteLength();if(o>a-11)throw new Error("message too long");r=n?l.alloc(a-o-3,255):function(t){var e,n=l.allocUnsafe(t),r=0,o=i(2*t),a=0;for(;r=0)throw new Error("data too long for modulus")}return n?f(d,p):c(d,p)}},function(t,e,n){var r=n(184),i=n(442),o=n(443),a=n(12),u=n(277),s=n(133),c=n(444),f=n(3).Buffer;t.exports=function(t,e,n){var l;l=t.padding?t.padding:n?1:4;var h,d=r(t),p=d.modulus.byteLength();if(e.length>p||new a(e).cmp(d.modulus)>=0)throw new Error("decryption error");h=n?c(new a(e),d):u(e,d);var g=f.alloc(p-h.length);if(h=f.concat([g,h],p),4===l)return function(t,e){var n=t.modulus.byteLength(),r=s("sha1").update(f.alloc(0)).digest(),a=r.length;if(0!==e[0])throw new Error("decryption error");var u=e.slice(1,a+1),c=e.slice(a+1),l=o(u,i(c,a)),h=o(c,i(l,n-a-1));if(function(t,e){t=f.from(t),e=f.from(e);var n=0,r=t.length;t.length!==e.length&&(n++,r=Math.min(t.length,e.length));var i=-1;for(;++i=e.length){o++;break}var a=e.slice(2,i-1);("0002"!==r.toString("hex")&&!n||"0001"!==r.toString("hex")&&n)&&o++;a.length<8&&o++;if(o)throw new Error("decryption error");return e.slice(i)}(0,h,n);if(3===l)return h;throw new Error("unknown padding")}},function(t,e,n){"use strict";(function(t,r){function i(){throw new Error("secure random number generation not supported by this browser\nuse chrome, FireFox or Internet Explorer 11")}var o=n(3),a=n(100),u=o.Buffer,s=o.kMaxLength,c=t.crypto||t.msCrypto,f=Math.pow(2,32)-1;function l(t,e){if("number"!=typeof t||t!=t)throw new TypeError("offset must be a number");if(t>f||t<0)throw new TypeError("offset must be a uint32");if(t>s||t>e)throw new RangeError("offset out of range")}function h(t,e,n){if("number"!=typeof t||t!=t)throw new TypeError("size must be a number");if(t>f||t<0)throw new TypeError("size must be a uint32");if(t+e>n||t>s)throw new RangeError("buffer too small")}function d(t,e,n,i){if(r.browser){var o=t.buffer,u=new Uint8Array(o,e,n);return c.getRandomValues(u),i?void r.nextTick((function(){i(null,t)})):t}if(!i)return a(n).copy(t,e),t;a(n,(function(n,r){if(n)return i(n);r.copy(t,e),i(null,t)}))}c&&c.getRandomValues||!r.browser?(e.randomFill=function(e,n,r,i){if(!(u.isBuffer(e)||e instanceof t.Uint8Array))throw new TypeError('"buf" argument must be a Buffer or Uint8Array');if("function"==typeof n)i=n,n=0,r=e.length;else if("function"==typeof r)i=r,r=e.length-n;else if("function"!=typeof i)throw new TypeError('"cb" argument must be a function');return l(n,e.length),h(r,n,e.length),d(e,n,r,i)},e.randomFillSync=function(e,n,r){void 0===n&&(n=0);if(!(u.isBuffer(e)||e instanceof t.Uint8Array))throw new TypeError('"buf" argument must be a Buffer or Uint8Array');l(n,e.length),void 0===r&&(r=e.length-n);return h(r,n,e.length),d(e,n,r)}):(e.randomFill=i,e.randomFillSync=i)}).call(this,n(25),n(17))},function(t,e,n){var r={"./dark/index.scss":893,"./default/index.scss":895,"./forest/index.scss":897,"./neutral/index.scss":899};function i(t){var e=o(t);return n(e)}function o(t){if(!n.o(r,t)){var e=new Error("Cannot find module '"+t+"'");throw e.code="MODULE_NOT_FOUND",e}return r[t]}i.keys=function(){return Object.keys(r)},i.resolve=o,t.exports=i,i.id=892},function(t,e,n){var r=n(894);t.exports="string"==typeof r?r:r.toString()},function(t,e,n){(t.exports=n(185)(!1)).push([t.i,".label{font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family);color:#333}.label text{fill:#333}.node rect,.node circle,.node ellipse,.node polygon,.node path{fill:#BDD5EA;stroke:purple;stroke-width:1px}.node .label{text-align:center}.node.clickable{cursor:pointer}.arrowheadPath{fill:#d3d3d3}.edgePath .path{stroke:#d3d3d3;stroke-width:1.5px}.edgeLabel{background-color:#e8e8e8;text-align:center}.cluster rect{fill:#6D6D65;stroke:rgba(255,255,255,0.25);stroke-width:1px}.cluster text{fill:#F9FFFE}div.mermaidTooltip{position:absolute;text-align:center;max-width:200px;padding:2px;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family);font-size:12px;background:#6D6D65;border:1px solid rgba(255,255,255,0.25);border-radius:2px;pointer-events:none;z-index:100}.actor{stroke:#81B1DB;fill:#BDD5EA}text.actor{fill:#000;stroke:none}.actor-line{stroke:#d3d3d3}.messageLine0{stroke-width:1.5;stroke-dasharray:'2 2';stroke:#d3d3d3}.messageLine1{stroke-width:1.5;stroke-dasharray:'2 2';stroke:#d3d3d3}#arrowhead{fill:#d3d3d3}.sequenceNumber{fill:#fff}#sequencenumber{fill:#d3d3d3}#crosshead path{fill:#d3d3d3 !important;stroke:#d3d3d3 !important}.messageText{fill:#d3d3d3;stroke:none}.labelBox{stroke:#81B1DB;fill:#BDD5EA}.labelText{fill:#323D47;stroke:none}.loopText{fill:#d3d3d3;stroke:none}.loopLine{stroke-width:2;stroke-dasharray:'2 2';stroke:#81B1DB}.note{stroke:rgba(255,255,255,0.25);fill:#fff5ad}.noteText{fill:black;stroke:none;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family);font-size:14px}.activation0{fill:#f4f4f4;stroke:#666}.activation1{fill:#f4f4f4;stroke:#666}.activation2{fill:#f4f4f4;stroke:#666}.mermaid-main-font{font-family:\"trebuchet ms\", verdana, arial;font-family:var(--mermaid-font-family)}.section{stroke:none;opacity:0.2}.section0{fill:rgba(255,255,255,0.3)}.section2{fill:#EAE8B9}.section1,.section3{fill:#fff;opacity:0.2}.sectionTitle0{fill:#F9FFFE}.sectionTitle1{fill:#F9FFFE}.sectionTitle2{fill:#F9FFFE}.sectionTitle3{fill:#F9FFFE}.sectionTitle{text-anchor:start;font-size:11px;text-height:14px;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.grid .tick{stroke:#d3d3d3;opacity:0.8;shape-rendering:crispEdges}.grid .tick text{font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.grid path{stroke-width:0}.today{fill:none;stroke:#DB5757;stroke-width:2px}.task{stroke-width:2}.taskText{text-anchor:middle;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.taskText:not([font-size]){font-size:11px}.taskTextOutsideRight{fill:#323D47;text-anchor:start;font-size:11px;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.taskTextOutsideLeft{fill:#323D47;text-anchor:end;font-size:11px}.task.clickable{cursor:pointer}.taskText.clickable{cursor:pointer;fill:#003163 !important;font-weight:bold}.taskTextOutsideLeft.clickable{cursor:pointer;fill:#003163 !important;font-weight:bold}.taskTextOutsideRight.clickable{cursor:pointer;fill:#003163 !important;font-weight:bold}.taskText0,.taskText1,.taskText2,.taskText3{fill:#323D47}.task0,.task1,.task2,.task3{fill:#BDD5EA;stroke:rgba(255,255,255,0.5)}.taskTextOutside0,.taskTextOutside2{fill:#d3d3d3}.taskTextOutside1,.taskTextOutside3{fill:#d3d3d3}.active0,.active1,.active2,.active3{fill:#81B1DB;stroke:rgba(255,255,255,0.5)}.activeText0,.activeText1,.activeText2,.activeText3{fill:#323D47 !important}.done0,.done1,.done2,.done3{stroke:grey;fill:#d3d3d3;stroke-width:2}.doneText0,.doneText1,.doneText2,.doneText3{fill:#323D47 !important}.crit0,.crit1,.crit2,.crit3{stroke:#E83737;fill:#E83737;stroke-width:2}.activeCrit0,.activeCrit1,.activeCrit2,.activeCrit3{stroke:#E83737;fill:#81B1DB;stroke-width:2}.doneCrit0,.doneCrit1,.doneCrit2,.doneCrit3{stroke:#E83737;fill:#d3d3d3;stroke-width:2;cursor:pointer;shape-rendering:crispEdges}.milestone{transform:rotate(45deg) scale(0.8, 0.8)}.milestoneText{font-style:italic}.doneCritText0,.doneCritText1,.doneCritText2,.doneCritText3{fill:#323D47 !important}.activeCritText0,.activeCritText1,.activeCritText2,.activeCritText3{fill:#323D47 !important}.titleText{text-anchor:middle;font-size:18px;fill:#323D47;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}g.classGroup text{fill:purple;stroke:none;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family);font-size:10px}g.classGroup text .title{font-weight:bolder}g.clickable{cursor:pointer}g.classGroup rect{fill:#BDD5EA;stroke:purple}g.classGroup line{stroke:purple;stroke-width:1}.classLabel .box{stroke:none;stroke-width:0;fill:#BDD5EA;opacity:0.5}.classLabel .label{fill:purple;font-size:10px}.relation{stroke:purple;stroke-width:1;fill:none}.dashed-line{stroke-dasharray:3}#compositionStart{fill:purple;stroke:purple;stroke-width:1}#compositionEnd{fill:purple;stroke:purple;stroke-width:1}#aggregationStart{fill:#BDD5EA;stroke:purple;stroke-width:1}#aggregationEnd{fill:#BDD5EA;stroke:purple;stroke-width:1}#dependencyStart{fill:purple;stroke:purple;stroke-width:1}#dependencyEnd{fill:purple;stroke:purple;stroke-width:1}#extensionStart{fill:purple;stroke:purple;stroke-width:1}#extensionEnd{fill:purple;stroke:purple;stroke-width:1}.commit-id,.commit-msg,.branch-label{fill:lightgrey;color:lightgrey;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.pieTitleText{text-anchor:middle;font-size:25px;fill:#323D47;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.slice{font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}g.stateGroup text{fill:purple;stroke:none;font-size:10px;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}g.stateGroup text{fill:purple;stroke:none;font-size:10px}g.stateGroup .state-title{font-weight:bolder;fill:#000}g.stateGroup rect{fill:#BDD5EA;stroke:purple}g.stateGroup line{stroke:purple;stroke-width:1}.transition{stroke:purple;stroke-width:1;fill:none}.stateGroup .composit{fill:white;border-bottom:1px}.stateGroup .alt-composit{fill:#e0e0e0;border-bottom:1px}.state-note{stroke:rgba(255,255,255,0.25);fill:#fff5ad}.state-note text{fill:black;stroke:none;font-size:10px}.stateLabel .box{stroke:none;stroke-width:0;fill:#BDD5EA;opacity:0.5}.stateLabel text{fill:#000;font-size:10px;font-weight:bold;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}:root{--mermaid-font-family: '\"trebuchet ms\", verdana, arial';--mermaid-font-family: \"Comic Sans MS\", \"Comic Sans\", cursive}\n",""])},function(t,e,n){var r=n(896);t.exports="string"==typeof r?r:r.toString()},function(t,e,n){(t.exports=n(185)(!1)).push([t.i,".label{font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family);color:#333}.label text{fill:#333}.node rect,.node circle,.node ellipse,.node polygon,.node path{fill:#ECECFF;stroke:#9370db;stroke-width:1px}.node .label{text-align:center}.node.clickable{cursor:pointer}.arrowheadPath{fill:#333}.edgePath .path{stroke:#333;stroke-width:1.5px}.edgeLabel{background-color:#e8e8e8;text-align:center}.cluster rect{fill:#ffffde;stroke:#aa3;stroke-width:1px}.cluster text{fill:#333}div.mermaidTooltip{position:absolute;text-align:center;max-width:200px;padding:2px;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family);font-size:12px;background:#ffffde;border:1px solid #aa3;border-radius:2px;pointer-events:none;z-index:100}.actor{stroke:#ccf;fill:#ECECFF}text.actor{fill:#000;stroke:none}.actor-line{stroke:grey}.messageLine0{stroke-width:1.5;stroke-dasharray:'2 2';stroke:#333}.messageLine1{stroke-width:1.5;stroke-dasharray:'2 2';stroke:#333}#arrowhead{fill:#333}.sequenceNumber{fill:#fff}#sequencenumber{fill:#333}#crosshead path{fill:#333 !important;stroke:#333 !important}.messageText{fill:#333;stroke:none}.labelBox{stroke:#ccf;fill:#ECECFF}.labelText{fill:#000;stroke:none}.loopText{fill:#000;stroke:none}.loopLine{stroke-width:2;stroke-dasharray:'2 2';stroke:#ccf}.note{stroke:#aa3;fill:#fff5ad}.noteText{fill:black;stroke:none;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family);font-size:14px}.activation0{fill:#f4f4f4;stroke:#666}.activation1{fill:#f4f4f4;stroke:#666}.activation2{fill:#f4f4f4;stroke:#666}.mermaid-main-font{font-family:\"trebuchet ms\", verdana, arial;font-family:var(--mermaid-font-family)}.section{stroke:none;opacity:0.2}.section0{fill:rgba(102,102,255,0.49)}.section2{fill:#fff400}.section1,.section3{fill:#fff;opacity:0.2}.sectionTitle0{fill:#333}.sectionTitle1{fill:#333}.sectionTitle2{fill:#333}.sectionTitle3{fill:#333}.sectionTitle{text-anchor:start;font-size:11px;text-height:14px;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.grid .tick{stroke:#d3d3d3;opacity:0.8;shape-rendering:crispEdges}.grid .tick text{font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.grid path{stroke-width:0}.today{fill:none;stroke:red;stroke-width:2px}.task{stroke-width:2}.taskText{text-anchor:middle;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.taskText:not([font-size]){font-size:11px}.taskTextOutsideRight{fill:#000;text-anchor:start;font-size:11px;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.taskTextOutsideLeft{fill:#000;text-anchor:end;font-size:11px}.task.clickable{cursor:pointer}.taskText.clickable{cursor:pointer;fill:#003163 !important;font-weight:bold}.taskTextOutsideLeft.clickable{cursor:pointer;fill:#003163 !important;font-weight:bold}.taskTextOutsideRight.clickable{cursor:pointer;fill:#003163 !important;font-weight:bold}.taskText0,.taskText1,.taskText2,.taskText3{fill:#fff}.task0,.task1,.task2,.task3{fill:#8a90dd;stroke:#534fbc}.taskTextOutside0,.taskTextOutside2{fill:#000}.taskTextOutside1,.taskTextOutside3{fill:#000}.active0,.active1,.active2,.active3{fill:#bfc7ff;stroke:#534fbc}.activeText0,.activeText1,.activeText2,.activeText3{fill:#000 !important}.done0,.done1,.done2,.done3{stroke:grey;fill:#d3d3d3;stroke-width:2}.doneText0,.doneText1,.doneText2,.doneText3{fill:#000 !important}.crit0,.crit1,.crit2,.crit3{stroke:#f88;fill:red;stroke-width:2}.activeCrit0,.activeCrit1,.activeCrit2,.activeCrit3{stroke:#f88;fill:#bfc7ff;stroke-width:2}.doneCrit0,.doneCrit1,.doneCrit2,.doneCrit3{stroke:#f88;fill:#d3d3d3;stroke-width:2;cursor:pointer;shape-rendering:crispEdges}.milestone{transform:rotate(45deg) scale(0.8, 0.8)}.milestoneText{font-style:italic}.doneCritText0,.doneCritText1,.doneCritText2,.doneCritText3{fill:#000 !important}.activeCritText0,.activeCritText1,.activeCritText2,.activeCritText3{fill:#000 !important}.titleText{text-anchor:middle;font-size:18px;fill:#000;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}g.classGroup text{fill:#9370db;stroke:none;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family);font-size:10px}g.classGroup text .title{font-weight:bolder}g.clickable{cursor:pointer}g.classGroup rect{fill:#ECECFF;stroke:#9370db}g.classGroup line{stroke:#9370db;stroke-width:1}.classLabel .box{stroke:none;stroke-width:0;fill:#ECECFF;opacity:0.5}.classLabel .label{fill:#9370db;font-size:10px}.relation{stroke:#9370db;stroke-width:1;fill:none}.dashed-line{stroke-dasharray:3}#compositionStart{fill:#9370db;stroke:#9370db;stroke-width:1}#compositionEnd{fill:#9370db;stroke:#9370db;stroke-width:1}#aggregationStart{fill:#ECECFF;stroke:#9370db;stroke-width:1}#aggregationEnd{fill:#ECECFF;stroke:#9370db;stroke-width:1}#dependencyStart{fill:#9370db;stroke:#9370db;stroke-width:1}#dependencyEnd{fill:#9370db;stroke:#9370db;stroke-width:1}#extensionStart{fill:#9370db;stroke:#9370db;stroke-width:1}#extensionEnd{fill:#9370db;stroke:#9370db;stroke-width:1}.commit-id,.commit-msg,.branch-label{fill:lightgrey;color:lightgrey;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.pieTitleText{text-anchor:middle;font-size:25px;fill:#000;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.slice{font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}g.stateGroup text{fill:#9370db;stroke:none;font-size:10px;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}g.stateGroup text{fill:#9370db;stroke:none;font-size:10px}g.stateGroup .state-title{font-weight:bolder;fill:#000}g.stateGroup rect{fill:#ECECFF;stroke:#9370db}g.stateGroup line{stroke:#9370db;stroke-width:1}.transition{stroke:#9370db;stroke-width:1;fill:none}.stateGroup .composit{fill:white;border-bottom:1px}.stateGroup .alt-composit{fill:#e0e0e0;border-bottom:1px}.state-note{stroke:#aa3;fill:#fff5ad}.state-note text{fill:black;stroke:none;font-size:10px}.stateLabel .box{stroke:none;stroke-width:0;fill:#ECECFF;opacity:0.5}.stateLabel text{fill:#000;font-size:10px;font-weight:bold;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}:root{--mermaid-font-family: '\"trebuchet ms\", verdana, arial';--mermaid-font-family: \"Comic Sans MS\", \"Comic Sans\", cursive}\n",""])},function(t,e,n){var r=n(898);t.exports="string"==typeof r?r:r.toString()},function(t,e,n){(t.exports=n(185)(!1)).push([t.i,".label{font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family);color:#333}.label text{fill:#333}.node rect,.node circle,.node ellipse,.node polygon,.node path{fill:#cde498;stroke:#13540c;stroke-width:1px}.node .label{text-align:center}.node.clickable{cursor:pointer}.arrowheadPath{fill:green}.edgePath .path{stroke:green;stroke-width:1.5px}.edgeLabel{background-color:#e8e8e8;text-align:center}.cluster rect{fill:#cdffb2;stroke:#6eaa49;stroke-width:1px}.cluster text{fill:#333}div.mermaidTooltip{position:absolute;text-align:center;max-width:200px;padding:2px;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family);font-size:12px;background:#cdffb2;border:1px solid #6eaa49;border-radius:2px;pointer-events:none;z-index:100}.actor{stroke:#13540c;fill:#cde498}text.actor{fill:#000;stroke:none}.actor-line{stroke:grey}.messageLine0{stroke-width:1.5;stroke-dasharray:'2 2';stroke:#333}.messageLine1{stroke-width:1.5;stroke-dasharray:'2 2';stroke:#333}#arrowhead{fill:#333}.sequenceNumber{fill:#fff}#sequencenumber{fill:#333}#crosshead path{fill:#333 !important;stroke:#333 !important}.messageText{fill:#333;stroke:none}.labelBox{stroke:#326932;fill:#cde498}.labelText{fill:#000;stroke:none}.loopText{fill:#000;stroke:none}.loopLine{stroke-width:2;stroke-dasharray:'2 2';stroke:#326932}.note{stroke:#6eaa49;fill:#fff5ad}.noteText{fill:black;stroke:none;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family);font-size:14px}.activation0{fill:#f4f4f4;stroke:#666}.activation1{fill:#f4f4f4;stroke:#666}.activation2{fill:#f4f4f4;stroke:#666}.mermaid-main-font{font-family:\"trebuchet ms\", verdana, arial;font-family:var(--mermaid-font-family)}.section{stroke:none;opacity:0.2}.section0{fill:#6eaa49}.section2{fill:#6eaa49}.section1,.section3{fill:#fff;opacity:0.2}.sectionTitle0{fill:#333}.sectionTitle1{fill:#333}.sectionTitle2{fill:#333}.sectionTitle3{fill:#333}.sectionTitle{text-anchor:start;font-size:11px;text-height:14px;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.grid .tick{stroke:#d3d3d3;opacity:0.8;shape-rendering:crispEdges}.grid .tick text{font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.grid path{stroke-width:0}.today{fill:none;stroke:red;stroke-width:2px}.task{stroke-width:2}.taskText{text-anchor:middle;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.taskText:not([font-size]){font-size:11px}.taskTextOutsideRight{fill:#000;text-anchor:start;font-size:11px;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.taskTextOutsideLeft{fill:#000;text-anchor:end;font-size:11px}.task.clickable{cursor:pointer}.taskText.clickable{cursor:pointer;fill:#003163 !important;font-weight:bold}.taskTextOutsideLeft.clickable{cursor:pointer;fill:#003163 !important;font-weight:bold}.taskTextOutsideRight.clickable{cursor:pointer;fill:#003163 !important;font-weight:bold}.taskText0,.taskText1,.taskText2,.taskText3{fill:#fff}.task0,.task1,.task2,.task3{fill:#487e3a;stroke:#13540c}.taskTextOutside0,.taskTextOutside2{fill:#000}.taskTextOutside1,.taskTextOutside3{fill:#000}.active0,.active1,.active2,.active3{fill:#cde498;stroke:#13540c}.activeText0,.activeText1,.activeText2,.activeText3{fill:#000 !important}.done0,.done1,.done2,.done3{stroke:grey;fill:#d3d3d3;stroke-width:2}.doneText0,.doneText1,.doneText2,.doneText3{fill:#000 !important}.crit0,.crit1,.crit2,.crit3{stroke:#f88;fill:red;stroke-width:2}.activeCrit0,.activeCrit1,.activeCrit2,.activeCrit3{stroke:#f88;fill:#cde498;stroke-width:2}.doneCrit0,.doneCrit1,.doneCrit2,.doneCrit3{stroke:#f88;fill:#d3d3d3;stroke-width:2;cursor:pointer;shape-rendering:crispEdges}.milestone{transform:rotate(45deg) scale(0.8, 0.8)}.milestoneText{font-style:italic}.doneCritText0,.doneCritText1,.doneCritText2,.doneCritText3{fill:#000 !important}.activeCritText0,.activeCritText1,.activeCritText2,.activeCritText3{fill:#000 !important}.titleText{text-anchor:middle;font-size:18px;fill:#000;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}g.classGroup text{fill:#13540c;stroke:none;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family);font-size:10px}g.classGroup text .title{font-weight:bolder}g.clickable{cursor:pointer}g.classGroup rect{fill:#cde498;stroke:#13540c}g.classGroup line{stroke:#13540c;stroke-width:1}.classLabel .box{stroke:none;stroke-width:0;fill:#cde498;opacity:0.5}.classLabel .label{fill:#13540c;font-size:10px}.relation{stroke:#13540c;stroke-width:1;fill:none}.dashed-line{stroke-dasharray:3}#compositionStart{fill:#13540c;stroke:#13540c;stroke-width:1}#compositionEnd{fill:#13540c;stroke:#13540c;stroke-width:1}#aggregationStart{fill:#cde498;stroke:#13540c;stroke-width:1}#aggregationEnd{fill:#cde498;stroke:#13540c;stroke-width:1}#dependencyStart{fill:#13540c;stroke:#13540c;stroke-width:1}#dependencyEnd{fill:#13540c;stroke:#13540c;stroke-width:1}#extensionStart{fill:#13540c;stroke:#13540c;stroke-width:1}#extensionEnd{fill:#13540c;stroke:#13540c;stroke-width:1}.commit-id,.commit-msg,.branch-label{fill:lightgrey;color:lightgrey;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.pieTitleText{text-anchor:middle;font-size:25px;fill:#000;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.slice{font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}g.stateGroup text{fill:#13540c;stroke:none;font-size:10px;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}g.stateGroup text{fill:#13540c;stroke:none;font-size:10px}g.stateGroup .state-title{font-weight:bolder;fill:#000}g.stateGroup rect{fill:#cde498;stroke:#13540c}g.stateGroup line{stroke:#13540c;stroke-width:1}.transition{stroke:#13540c;stroke-width:1;fill:none}.stateGroup .composit{fill:white;border-bottom:1px}.stateGroup .alt-composit{fill:#e0e0e0;border-bottom:1px}.state-note{stroke:#6eaa49;fill:#fff5ad}.state-note text{fill:black;stroke:none;font-size:10px}.stateLabel .box{stroke:none;stroke-width:0;fill:#cde498;opacity:0.5}.stateLabel text{fill:#000;font-size:10px;font-weight:bold;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}:root{--mermaid-font-family: '\"trebuchet ms\", verdana, arial';--mermaid-font-family: \"Comic Sans MS\", \"Comic Sans\", cursive}\n",""])},function(t,e,n){var r=n(900);t.exports="string"==typeof r?r:r.toString()},function(t,e,n){(t.exports=n(185)(!1)).push([t.i,".label{font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family);color:#333}.label text{fill:#333}.node rect,.node circle,.node ellipse,.node polygon,.node path{fill:#eee;stroke:#999;stroke-width:1px}.node .label{text-align:center}.node.clickable{cursor:pointer}.arrowheadPath{fill:#333}.edgePath .path{stroke:#666;stroke-width:1.5px}.edgeLabel{background-color:#fff;text-align:center}.cluster rect{fill:#eaf2fb;stroke:#26a;stroke-width:1px}.cluster text{fill:#333}div.mermaidTooltip{position:absolute;text-align:center;max-width:200px;padding:2px;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family);font-size:12px;background:#eaf2fb;border:1px solid #26a;border-radius:2px;pointer-events:none;z-index:100}.actor{stroke:#999;fill:#eee}text.actor{fill:#333;stroke:none}.actor-line{stroke:#666}.messageLine0{stroke-width:1.5;stroke-dasharray:'2 2';stroke:#333}.messageLine1{stroke-width:1.5;stroke-dasharray:'2 2';stroke:#333}#arrowhead{fill:#333}.sequenceNumber{fill:#fff}#sequencenumber{fill:#333}#crosshead path{fill:#333 !important;stroke:#333 !important}.messageText{fill:#333;stroke:none}.labelBox{stroke:#999;fill:#eee}.labelText{fill:#333;stroke:none}.loopText{fill:#333;stroke:none}.loopLine{stroke-width:2;stroke-dasharray:'2 2';stroke:#999}.note{stroke:#770;fill:#ffa}.noteText{fill:black;stroke:none;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family);font-size:14px}.activation0{fill:#f4f4f4;stroke:#666}.activation1{fill:#f4f4f4;stroke:#666}.activation2{fill:#f4f4f4;stroke:#666}.mermaid-main-font{font-family:\"trebuchet ms\", verdana, arial;font-family:var(--mermaid-font-family)}.section{stroke:none;opacity:0.2}.section0{fill:#80b3e6}.section2{fill:#80b3e6}.section1,.section3{fill:#fff;opacity:0.2}.sectionTitle0{fill:#333}.sectionTitle1{fill:#333}.sectionTitle2{fill:#333}.sectionTitle3{fill:#333}.sectionTitle{text-anchor:start;font-size:11px;text-height:14px;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.grid .tick{stroke:#e6e6e6;opacity:0.8;shape-rendering:crispEdges}.grid .tick text{font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.grid path{stroke-width:0}.today{fill:none;stroke:#d42;stroke-width:2px}.task{stroke-width:2}.taskText{text-anchor:middle;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.taskText:not([font-size]){font-size:11px}.taskTextOutsideRight{fill:#333;text-anchor:start;font-size:11px;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.taskTextOutsideLeft{fill:#333;text-anchor:end;font-size:11px}.task.clickable{cursor:pointer}.taskText.clickable{cursor:pointer;fill:#003163 !important;font-weight:bold}.taskTextOutsideLeft.clickable{cursor:pointer;fill:#003163 !important;font-weight:bold}.taskTextOutsideRight.clickable{cursor:pointer;fill:#003163 !important;font-weight:bold}.taskText0,.taskText1,.taskText2,.taskText3{fill:#fff}.task0,.task1,.task2,.task3{fill:#26a;stroke:#1a4d80}.taskTextOutside0,.taskTextOutside2{fill:#333}.taskTextOutside1,.taskTextOutside3{fill:#333}.active0,.active1,.active2,.active3{fill:#eee;stroke:#1a4d80}.activeText0,.activeText1,.activeText2,.activeText3{fill:#333 !important}.done0,.done1,.done2,.done3{stroke:#666;fill:#bbb;stroke-width:2}.doneText0,.doneText1,.doneText2,.doneText3{fill:#333 !important}.crit0,.crit1,.crit2,.crit3{stroke:#b1361b;fill:#d42;stroke-width:2}.activeCrit0,.activeCrit1,.activeCrit2,.activeCrit3{stroke:#b1361b;fill:#eee;stroke-width:2}.doneCrit0,.doneCrit1,.doneCrit2,.doneCrit3{stroke:#b1361b;fill:#bbb;stroke-width:2;cursor:pointer;shape-rendering:crispEdges}.milestone{transform:rotate(45deg) scale(0.8, 0.8)}.milestoneText{font-style:italic}.doneCritText0,.doneCritText1,.doneCritText2,.doneCritText3{fill:#333 !important}.activeCritText0,.activeCritText1,.activeCritText2,.activeCritText3{fill:#333 !important}.titleText{text-anchor:middle;font-size:18px;fill:#333;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}g.classGroup text{fill:#999;stroke:none;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family);font-size:10px}g.classGroup text .title{font-weight:bolder}g.clickable{cursor:pointer}g.classGroup rect{fill:#eee;stroke:#999}g.classGroup line{stroke:#999;stroke-width:1}.classLabel .box{stroke:none;stroke-width:0;fill:#eee;opacity:0.5}.classLabel .label{fill:#999;font-size:10px}.relation{stroke:#999;stroke-width:1;fill:none}.dashed-line{stroke-dasharray:3}#compositionStart{fill:#999;stroke:#999;stroke-width:1}#compositionEnd{fill:#999;stroke:#999;stroke-width:1}#aggregationStart{fill:#eee;stroke:#999;stroke-width:1}#aggregationEnd{fill:#eee;stroke:#999;stroke-width:1}#dependencyStart{fill:#999;stroke:#999;stroke-width:1}#dependencyEnd{fill:#999;stroke:#999;stroke-width:1}#extensionStart{fill:#999;stroke:#999;stroke-width:1}#extensionEnd{fill:#999;stroke:#999;stroke-width:1}.commit-id,.commit-msg,.branch-label{fill:lightgrey;color:lightgrey;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.pieTitleText{text-anchor:middle;font-size:25px;fill:#333;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}.slice{font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}g.stateGroup text{fill:#999;stroke:none;font-size:10px;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}g.stateGroup text{fill:#999;stroke:none;font-size:10px}g.stateGroup .state-title{font-weight:bolder;fill:#000}g.stateGroup rect{fill:#eee;stroke:#999}g.stateGroup line{stroke:#999;stroke-width:1}.transition{stroke:#999;stroke-width:1;fill:none}.stateGroup .composit{fill:white;border-bottom:1px}.stateGroup .alt-composit{fill:#e0e0e0;border-bottom:1px}.state-note{stroke:#770;fill:#ffa}.state-note text{fill:black;stroke:none;font-size:10px}.stateLabel .box{stroke:none;stroke-width:0;fill:#eee;opacity:0.5}.stateLabel text{fill:#000;font-size:10px;font-weight:bold;font-family:'trebuchet ms', verdana, arial;font-family:var(--mermaid-font-family)}:root{--mermaid-font-family: '\"trebuchet ms\", verdana, arial';--mermaid-font-family: \"Comic Sans MS\", \"Comic Sans\", cursive}\n",""])},function(t,e,n){"use strict";n.r(e);var r={};n.r(r),n.d(r,"version",(function(){return a})),n.d(r,"bisect",(function(){return u.b})),n.d(r,"bisectRight",(function(){return u.d})),n.d(r,"bisectLeft",(function(){return u.c})),n.d(r,"ascending",(function(){return u.a})),n.d(r,"bisector",(function(){return u.e})),n.d(r,"cross",(function(){return u.f})),n.d(r,"descending",(function(){return u.g})),n.d(r,"deviation",(function(){return u.h})),n.d(r,"extent",(function(){return u.i})),n.d(r,"histogram",(function(){return u.j})),n.d(r,"thresholdFreedmanDiaconis",(function(){return u.w})),n.d(r,"thresholdScott",(function(){return u.x})),n.d(r,"thresholdSturges",(function(){return u.y})),n.d(r,"max",(function(){return u.k})),n.d(r,"mean",(function(){return u.l})),n.d(r,"median",(function(){return u.m})),n.d(r,"merge",(function(){return u.n})),n.d(r,"min",(function(){return u.o})),n.d(r,"pairs",(function(){return u.p})),n.d(r,"permute",(function(){return u.q})),n.d(r,"quantile",(function(){return u.r})),n.d(r,"range",(function(){return u.s})),n.d(r,"scan",(function(){return u.t})),n.d(r,"shuffle",(function(){return u.u})),n.d(r,"sum",(function(){return u.v})),n.d(r,"ticks",(function(){return u.B})),n.d(r,"tickIncrement",(function(){return u.z})),n.d(r,"tickStep",(function(){return u.A})),n.d(r,"transpose",(function(){return u.C})),n.d(r,"variance",(function(){return u.D})),n.d(r,"zip",(function(){return u.E})),n.d(r,"axisTop",(function(){return s.d})),n.d(r,"axisRight",(function(){return s.c})),n.d(r,"axisBottom",(function(){return s.a})),n.d(r,"axisLeft",(function(){return s.b})),n.d(r,"brush",(function(){return c.a})),n.d(r,"brushX",(function(){return c.c})),n.d(r,"brushY",(function(){return c.d})),n.d(r,"brushSelection",(function(){return c.b})),n.d(r,"chord",(function(){return f.a})),n.d(r,"ribbon",(function(){return f.b})),n.d(r,"nest",(function(){return l.d})),n.d(r,"set",(function(){return l.e})),n.d(r,"map",(function(){return l.c})),n.d(r,"keys",(function(){return l.b})),n.d(r,"values",(function(){return l.f})),n.d(r,"entries",(function(){return l.a})),n.d(r,"color",(function(){return h.a})),n.d(r,"rgb",(function(){return h.h})),n.d(r,"hsl",(function(){return h.e})),n.d(r,"lab",(function(){return h.f})),n.d(r,"hcl",(function(){return h.d})),n.d(r,"lch",(function(){return h.g})),n.d(r,"gray",(function(){return h.c})),n.d(r,"cubehelix",(function(){return h.b})),n.d(r,"contours",(function(){return d.b})),n.d(r,"contourDensity",(function(){return d.a})),n.d(r,"dispatch",(function(){return p.a})),n.d(r,"drag",(function(){return g.a})),n.d(r,"dragDisable",(function(){return g.b})),n.d(r,"dragEnable",(function(){return g.c})),n.d(r,"dsvFormat",(function(){return y.i})),n.d(r,"csvParse",(function(){return y.g})),n.d(r,"csvParseRows",(function(){return y.h})),n.d(r,"csvFormat",(function(){return y.b})),n.d(r,"csvFormatBody",(function(){return y.c})),n.d(r,"csvFormatRows",(function(){return y.e})),n.d(r,"csvFormatRow",(function(){return y.d})),n.d(r,"csvFormatValue",(function(){return y.f})),n.d(r,"tsvParse",(function(){return y.o})),n.d(r,"tsvParseRows",(function(){return y.p})),n.d(r,"tsvFormat",(function(){return y.j})),n.d(r,"tsvFormatBody",(function(){return y.k})),n.d(r,"tsvFormatRows",(function(){return y.m})),n.d(r,"tsvFormatRow",(function(){return y.l})),n.d(r,"tsvFormatValue",(function(){return y.n})),n.d(r,"autoType",(function(){return y.a})),n.d(r,"easeLinear",(function(){return b.y})),n.d(r,"easeQuad",(function(){return b.D})),n.d(r,"easeQuadIn",(function(){return b.E})),n.d(r,"easeQuadOut",(function(){return b.G})),n.d(r,"easeQuadInOut",(function(){return b.F})),n.d(r,"easeCubic",(function(){return b.m})),n.d(r,"easeCubicIn",(function(){return b.n})),n.d(r,"easeCubicOut",(function(){return b.p})),n.d(r,"easeCubicInOut",(function(){return b.o})),n.d(r,"easePoly",(function(){return b.z})),n.d(r,"easePolyIn",(function(){return b.A})),n.d(r,"easePolyOut",(function(){return b.C})),n.d(r,"easePolyInOut",(function(){return b.B})),n.d(r,"easeSin",(function(){return b.H})),n.d(r,"easeSinIn",(function(){return b.I})),n.d(r,"easeSinOut",(function(){return b.K})),n.d(r,"easeSinInOut",(function(){return b.J})),n.d(r,"easeExp",(function(){return b.u})),n.d(r,"easeExpIn",(function(){return b.v})),n.d(r,"easeExpOut",(function(){return b.x})),n.d(r,"easeExpInOut",(function(){return b.w})),n.d(r,"easeCircle",(function(){return b.i})),n.d(r,"easeCircleIn",(function(){return b.j})),n.d(r,"easeCircleOut",(function(){return b.l})),n.d(r,"easeCircleInOut",(function(){return b.k})),n.d(r,"easeBounce",(function(){return b.e})),n.d(r,"easeBounceIn",(function(){return b.f})),n.d(r,"easeBounceOut",(function(){return b.h})),n.d(r,"easeBounceInOut",(function(){return b.g})),n.d(r,"easeBack",(function(){return b.a})),n.d(r,"easeBackIn",(function(){return b.b})),n.d(r,"easeBackOut",(function(){return b.d})),n.d(r,"easeBackInOut",(function(){return b.c})),n.d(r,"easeElastic",(function(){return b.q})),n.d(r,"easeElasticIn",(function(){return b.r})),n.d(r,"easeElasticOut",(function(){return b.t})),n.d(r,"easeElasticInOut",(function(){return b.s})),n.d(r,"blob",(function(){return v.a})),n.d(r,"buffer",(function(){return v.b})),n.d(r,"dsv",(function(){return v.d})),n.d(r,"csv",(function(){return v.c})),n.d(r,"tsv",(function(){return v.j})),n.d(r,"image",(function(){return v.f})),n.d(r,"json",(function(){return v.g})),n.d(r,"text",(function(){return v.i})),n.d(r,"xml",(function(){return v.k})),n.d(r,"html",(function(){return v.e})),n.d(r,"svg",(function(){return v.h})),n.d(r,"forceCenter",(function(){return m.a})),n.d(r,"forceCollide",(function(){return m.b})),n.d(r,"forceLink",(function(){return m.c})),n.d(r,"forceManyBody",(function(){return m.d})),n.d(r,"forceRadial",(function(){return m.e})),n.d(r,"forceSimulation",(function(){return m.f})),n.d(r,"forceX",(function(){return m.g})),n.d(r,"forceY",(function(){return m.h})),n.d(r,"formatDefaultLocale",(function(){return _.c})),n.d(r,"format",(function(){return _.b})),n.d(r,"formatPrefix",(function(){return _.e})),n.d(r,"formatLocale",(function(){return _.d})),n.d(r,"formatSpecifier",(function(){return _.f})),n.d(r,"FormatSpecifier",(function(){return _.a})),n.d(r,"precisionFixed",(function(){return _.g})),n.d(r,"precisionPrefix",(function(){return _.h})),n.d(r,"precisionRound",(function(){return _.i})),n.d(r,"geoArea",(function(){return w.c})),n.d(r,"geoBounds",(function(){return w.h})),n.d(r,"geoCentroid",(function(){return w.i})),n.d(r,"geoCircle",(function(){return w.j})),n.d(r,"geoClipAntimeridian",(function(){return w.k})),n.d(r,"geoClipCircle",(function(){return w.l})),n.d(r,"geoClipExtent",(function(){return w.m})),n.d(r,"geoClipRectangle",(function(){return w.n})),n.d(r,"geoContains",(function(){return w.u})),n.d(r,"geoDistance",(function(){return w.v})),n.d(r,"geoGraticule",(function(){return w.C})),n.d(r,"geoGraticule10",(function(){return w.D})),n.d(r,"geoInterpolate",(function(){return w.F})),n.d(r,"geoLength",(function(){return w.G})),n.d(r,"geoPath",(function(){return w.N})),n.d(r,"geoAlbers",(function(){return w.a})),n.d(r,"geoAlbersUsa",(function(){return w.b})),n.d(r,"geoAzimuthalEqualArea",(function(){return w.d})),n.d(r,"geoAzimuthalEqualAreaRaw",(function(){return w.e})),n.d(r,"geoAzimuthalEquidistant",(function(){return w.f})),n.d(r,"geoAzimuthalEquidistantRaw",(function(){return w.g})),n.d(r,"geoConicConformal",(function(){return w.o})),n.d(r,"geoConicConformalRaw",(function(){return w.p})),n.d(r,"geoConicEqualArea",(function(){return w.q})),n.d(r,"geoConicEqualAreaRaw",(function(){return w.r})),n.d(r,"geoConicEquidistant",(function(){return w.s})),n.d(r,"geoConicEquidistantRaw",(function(){return w.t})),n.d(r,"geoEqualEarth",(function(){return w.w})),n.d(r,"geoEqualEarthRaw",(function(){return w.x})),n.d(r,"geoEquirectangular",(function(){return w.y})),n.d(r,"geoEquirectangularRaw",(function(){return w.z})),n.d(r,"geoGnomonic",(function(){return w.A})),n.d(r,"geoGnomonicRaw",(function(){return w.B})),n.d(r,"geoIdentity",(function(){return w.E})),n.d(r,"geoProjection",(function(){return w.O})),n.d(r,"geoProjectionMutator",(function(){return w.P})),n.d(r,"geoMercator",(function(){return w.H})),n.d(r,"geoMercatorRaw",(function(){return w.I})),n.d(r,"geoNaturalEarth1",(function(){return w.J})),n.d(r,"geoNaturalEarth1Raw",(function(){return w.K})),n.d(r,"geoOrthographic",(function(){return w.L})),n.d(r,"geoOrthographicRaw",(function(){return w.M})),n.d(r,"geoStereographic",(function(){return w.R})),n.d(r,"geoStereographicRaw",(function(){return w.S})),n.d(r,"geoTransverseMercator",(function(){return w.V})),n.d(r,"geoTransverseMercatorRaw",(function(){return w.W})),n.d(r,"geoRotation",(function(){return w.Q})),n.d(r,"geoStream",(function(){return w.T})),n.d(r,"geoTransform",(function(){return w.U})),n.d(r,"cluster",(function(){return x.a})),n.d(r,"hierarchy",(function(){return x.b})),n.d(r,"pack",(function(){return x.c})),n.d(r,"packSiblings",(function(){return x.e})),n.d(r,"packEnclose",(function(){return x.d})),n.d(r,"partition",(function(){return x.f})),n.d(r,"stratify",(function(){return x.g})),n.d(r,"tree",(function(){return x.h})),n.d(r,"treemap",(function(){return x.i})),n.d(r,"treemapBinary",(function(){return x.j})),n.d(r,"treemapDice",(function(){return x.k})),n.d(r,"treemapSlice",(function(){return x.m})),n.d(r,"treemapSliceDice",(function(){return x.n})),n.d(r,"treemapSquarify",(function(){return x.o})),n.d(r,"treemapResquarify",(function(){return x.l})),n.d(r,"interpolate",(function(){return k.a})),n.d(r,"interpolateArray",(function(){return k.b})),n.d(r,"interpolateBasis",(function(){return k.c})),n.d(r,"interpolateBasisClosed",(function(){return k.d})),n.d(r,"interpolateDate",(function(){return k.g})),n.d(r,"interpolateDiscrete",(function(){return k.h})),n.d(r,"interpolateHue",(function(){return k.m})),n.d(r,"interpolateNumber",(function(){return k.o})),n.d(r,"interpolateNumberArray",(function(){return k.p})),n.d(r,"interpolateObject",(function(){return k.q})),n.d(r,"interpolateRound",(function(){return k.u})),n.d(r,"interpolateString",(function(){return k.v})),n.d(r,"interpolateTransformCss",(function(){return k.w})),n.d(r,"interpolateTransformSvg",(function(){return k.x})),n.d(r,"interpolateZoom",(function(){return k.y})),n.d(r,"interpolateRgb",(function(){return k.r})),n.d(r,"interpolateRgbBasis",(function(){return k.s})),n.d(r,"interpolateRgbBasisClosed",(function(){return k.t})),n.d(r,"interpolateHsl",(function(){return k.k})),n.d(r,"interpolateHslLong",(function(){return k.l})),n.d(r,"interpolateLab",(function(){return k.n})),n.d(r,"interpolateHcl",(function(){return k.i})),n.d(r,"interpolateHclLong",(function(){return k.j})),n.d(r,"interpolateCubehelix",(function(){return k.e})),n.d(r,"interpolateCubehelixLong",(function(){return k.f})),n.d(r,"piecewise",(function(){return k.z})),n.d(r,"quantize",(function(){return k.A})),n.d(r,"path",(function(){return E.a})),n.d(r,"polygonArea",(function(){return A.a})),n.d(r,"polygonCentroid",(function(){return A.b})),n.d(r,"polygonHull",(function(){return A.d})),n.d(r,"polygonContains",(function(){return A.c})),n.d(r,"polygonLength",(function(){return A.e})),n.d(r,"quadtree",(function(){return S.a})),n.d(r,"randomUniform",(function(){return M.f})),n.d(r,"randomNormal",(function(){return M.e})),n.d(r,"randomLogNormal",(function(){return M.d})),n.d(r,"randomBates",(function(){return M.a})),n.d(r,"randomIrwinHall",(function(){return M.c})),n.d(r,"randomExponential",(function(){return M.b})),n.d(r,"scaleBand",(function(){return T.a})),n.d(r,"scalePoint",(function(){return T.l})),n.d(r,"scaleIdentity",(function(){return T.g})),n.d(r,"scaleLinear",(function(){return T.i})),n.d(r,"scaleLog",(function(){return T.j})),n.d(r,"scaleSymlog",(function(){return T.w})),n.d(r,"scaleOrdinal",(function(){return T.k})),n.d(r,"scaleImplicit",(function(){return T.h})),n.d(r,"scalePow",(function(){return T.m})),n.d(r,"scaleSqrt",(function(){return T.v})),n.d(r,"scaleQuantile",(function(){return T.n})),n.d(r,"scaleQuantize",(function(){return T.o})),n.d(r,"scaleThreshold",(function(){return T.x})),n.d(r,"scaleTime",(function(){return T.y})),n.d(r,"scaleUtc",(function(){return T.z})),n.d(r,"scaleSequential",(function(){return T.p})),n.d(r,"scaleSequentialLog",(function(){return T.q})),n.d(r,"scaleSequentialPow",(function(){return T.r})),n.d(r,"scaleSequentialSqrt",(function(){return T.t})),n.d(r,"scaleSequentialSymlog",(function(){return T.u})),n.d(r,"scaleSequentialQuantile",(function(){return T.s})),n.d(r,"scaleDiverging",(function(){return T.b})),n.d(r,"scaleDivergingLog",(function(){return T.c})),n.d(r,"scaleDivergingPow",(function(){return T.d})),n.d(r,"scaleDivergingSqrt",(function(){return T.e})),n.d(r,"scaleDivergingSymlog",(function(){return T.f})),n.d(r,"tickFormat",(function(){return T.A})),n.d(r,"schemeCategory10",(function(){return O.R})),n.d(r,"schemeAccent",(function(){return O.M})),n.d(r,"schemeDark2",(function(){return O.S})),n.d(r,"schemePaired",(function(){return O.Z})),n.d(r,"schemePastel1",(function(){return O.ab})),n.d(r,"schemePastel2",(function(){return O.bb})),n.d(r,"schemeSet1",(function(){return O.ob})),n.d(r,"schemeSet2",(function(){return O.pb})),n.d(r,"schemeSet3",(function(){return O.qb})),n.d(r,"schemeTableau10",(function(){return O.sb})),n.d(r,"interpolateBrBG",(function(){return O.b})),n.d(r,"schemeBrBG",(function(){return O.O})),n.d(r,"interpolatePRGn",(function(){return O.o})),n.d(r,"schemePRGn",(function(){return O.Y})),n.d(r,"interpolatePiYG",(function(){return O.p})),n.d(r,"schemePiYG",(function(){return O.cb})),n.d(r,"interpolatePuOr",(function(){return O.t})),n.d(r,"schemePuOr",(function(){return O.fb})),n.d(r,"interpolateRdBu",(function(){return O.x})),n.d(r,"schemeRdBu",(function(){return O.ib})),n.d(r,"interpolateRdGy",(function(){return O.y})),n.d(r,"schemeRdGy",(function(){return O.jb})),n.d(r,"interpolateRdYlBu",(function(){return O.A})),n.d(r,"schemeRdYlBu",(function(){return O.lb})),n.d(r,"interpolateRdYlGn",(function(){return O.B})),n.d(r,"schemeRdYlGn",(function(){return O.mb})),n.d(r,"interpolateSpectral",(function(){return O.E})),n.d(r,"schemeSpectral",(function(){return O.rb})),n.d(r,"interpolateBuGn",(function(){return O.c})),n.d(r,"schemeBuGn",(function(){return O.P})),n.d(r,"interpolateBuPu",(function(){return O.d})),n.d(r,"schemeBuPu",(function(){return O.Q})),n.d(r,"interpolateGnBu",(function(){return O.h})),n.d(r,"schemeGnBu",(function(){return O.T})),n.d(r,"interpolateOrRd",(function(){return O.m})),n.d(r,"schemeOrRd",(function(){return O.W})),n.d(r,"interpolatePuBuGn",(function(){return O.s})),n.d(r,"schemePuBuGn",(function(){return O.eb})),n.d(r,"interpolatePuBu",(function(){return O.r})),n.d(r,"schemePuBu",(function(){return O.db})),n.d(r,"interpolatePuRd",(function(){return O.u})),n.d(r,"schemePuRd",(function(){return O.gb})),n.d(r,"interpolateRdPu",(function(){return O.z})),n.d(r,"schemeRdPu",(function(){return O.kb})),n.d(r,"interpolateYlGnBu",(function(){return O.J})),n.d(r,"schemeYlGnBu",(function(){return O.ub})),n.d(r,"interpolateYlGn",(function(){return O.I})),n.d(r,"schemeYlGn",(function(){return O.tb})),n.d(r,"interpolateYlOrBr",(function(){return O.K})),n.d(r,"schemeYlOrBr",(function(){return O.vb})),n.d(r,"interpolateYlOrRd",(function(){return O.L})),n.d(r,"schemeYlOrRd",(function(){return O.wb})),n.d(r,"interpolateBlues",(function(){return O.a})),n.d(r,"schemeBlues",(function(){return O.N})),n.d(r,"interpolateGreens",(function(){return O.i})),n.d(r,"schemeGreens",(function(){return O.U})),n.d(r,"interpolateGreys",(function(){return O.j})),n.d(r,"schemeGreys",(function(){return O.V})),n.d(r,"interpolatePurples",(function(){return O.v})),n.d(r,"schemePurples",(function(){return O.hb})),n.d(r,"interpolateReds",(function(){return O.C})),n.d(r,"schemeReds",(function(){return O.nb})),n.d(r,"interpolateOranges",(function(){return O.n})),n.d(r,"schemeOranges",(function(){return O.X})),n.d(r,"interpolateCividis",(function(){return O.e})),n.d(r,"interpolateCubehelixDefault",(function(){return O.g})),n.d(r,"interpolateRainbow",(function(){return O.w})),n.d(r,"interpolateWarm",(function(){return O.H})),n.d(r,"interpolateCool",(function(){return O.f})),n.d(r,"interpolateSinebow",(function(){return O.D})),n.d(r,"interpolateTurbo",(function(){return O.F})),n.d(r,"interpolateViridis",(function(){return O.G})),n.d(r,"interpolateMagma",(function(){return O.l})),n.d(r,"interpolateInferno",(function(){return O.k})),n.d(r,"interpolatePlasma",(function(){return O.q})),n.d(r,"create",(function(){return D.b})),n.d(r,"creator",(function(){return D.c})),n.d(r,"local",(function(){return D.f})),n.d(r,"matcher",(function(){return D.g})),n.d(r,"mouse",(function(){return D.h})),n.d(r,"namespace",(function(){return D.i})),n.d(r,"namespaces",(function(){return D.j})),n.d(r,"clientPoint",(function(){return D.a})),n.d(r,"select",(function(){return D.k})),n.d(r,"selectAll",(function(){return D.l})),n.d(r,"selection",(function(){return D.m})),n.d(r,"selector",(function(){return D.n})),n.d(r,"selectorAll",(function(){return D.o})),n.d(r,"style",(function(){return D.p})),n.d(r,"touch",(function(){return D.q})),n.d(r,"touches",(function(){return D.r})),n.d(r,"window",(function(){return D.s})),n.d(r,"event",(function(){return D.e})),n.d(r,"customEvent",(function(){return D.d})),n.d(r,"arc",(function(){return C.a})),n.d(r,"area",(function(){return C.b})),n.d(r,"line",(function(){return C.v})),n.d(r,"pie",(function(){return C.A})),n.d(r,"areaRadial",(function(){return C.c})),n.d(r,"radialArea",(function(){return C.C})),n.d(r,"lineRadial",(function(){return C.w})),n.d(r,"radialLine",(function(){return C.D})),n.d(r,"pointRadial",(function(){return C.B})),n.d(r,"linkHorizontal",(function(){return C.x})),n.d(r,"linkVertical",(function(){return C.z})),n.d(r,"linkRadial",(function(){return C.y})),n.d(r,"symbol",(function(){return C.Q})),n.d(r,"symbols",(function(){return C.Y})),n.d(r,"symbolCircle",(function(){return C.R})),n.d(r,"symbolCross",(function(){return C.S})),n.d(r,"symbolDiamond",(function(){return C.T})),n.d(r,"symbolSquare",(function(){return C.U})),n.d(r,"symbolStar",(function(){return C.V})),n.d(r,"symbolTriangle",(function(){return C.W})),n.d(r,"symbolWye",(function(){return C.X})),n.d(r,"curveBasisClosed",(function(){return C.e})),n.d(r,"curveBasisOpen",(function(){return C.f})),n.d(r,"curveBasis",(function(){return C.d})),n.d(r,"curveBundle",(function(){return C.g})),n.d(r,"curveCardinalClosed",(function(){return C.i})),n.d(r,"curveCardinalOpen",(function(){return C.j})),n.d(r,"curveCardinal",(function(){return C.h})),n.d(r,"curveCatmullRomClosed",(function(){return C.l})),n.d(r,"curveCatmullRomOpen",(function(){return C.m})),n.d(r,"curveCatmullRom",(function(){return C.k})),n.d(r,"curveLinearClosed",(function(){return C.o})),n.d(r,"curveLinear",(function(){return C.n})),n.d(r,"curveMonotoneX",(function(){return C.p})),n.d(r,"curveMonotoneY",(function(){return C.q})),n.d(r,"curveNatural",(function(){return C.r})),n.d(r,"curveStep",(function(){return C.s})),n.d(r,"curveStepAfter",(function(){return C.t})),n.d(r,"curveStepBefore",(function(){return C.u})),n.d(r,"stack",(function(){return C.E})),n.d(r,"stackOffsetExpand",(function(){return C.G})),n.d(r,"stackOffsetDiverging",(function(){return C.F})),n.d(r,"stackOffsetNone",(function(){return C.H})),n.d(r,"stackOffsetSilhouette",(function(){return C.I})),n.d(r,"stackOffsetWiggle",(function(){return C.J})),n.d(r,"stackOrderAppearance",(function(){return C.K})),n.d(r,"stackOrderAscending",(function(){return C.L})),n.d(r,"stackOrderDescending",(function(){return C.M})),n.d(r,"stackOrderInsideOut",(function(){return C.N})),n.d(r,"stackOrderNone",(function(){return C.O})),n.d(r,"stackOrderReverse",(function(){return C.P})),n.d(r,"timeInterval",(function(){return N.g})),n.d(r,"timeMillisecond",(function(){return N.h})),n.d(r,"timeMilliseconds",(function(){return N.i})),n.d(r,"utcMillisecond",(function(){return N.L})),n.d(r,"utcMilliseconds",(function(){return N.M})),n.d(r,"timeSecond",(function(){return N.r})),n.d(r,"timeSeconds",(function(){return N.s})),n.d(r,"utcSecond",(function(){return N.V})),n.d(r,"utcSeconds",(function(){return N.W})),n.d(r,"timeMinute",(function(){return N.j})),n.d(r,"timeMinutes",(function(){return N.k})),n.d(r,"timeHour",(function(){return N.e})),n.d(r,"timeHours",(function(){return N.f})),n.d(r,"timeDay",(function(){return N.a})),n.d(r,"timeDays",(function(){return N.b})),n.d(r,"timeWeek",(function(){return N.B})),n.d(r,"timeWeeks",(function(){return N.C})),n.d(r,"timeSunday",(function(){return N.t})),n.d(r,"timeSundays",(function(){return N.u})),n.d(r,"timeMonday",(function(){return N.l})),n.d(r,"timeMondays",(function(){return N.m})),n.d(r,"timeTuesday",(function(){return N.x})),n.d(r,"timeTuesdays",(function(){return N.y})),n.d(r,"timeWednesday",(function(){return N.z})),n.d(r,"timeWednesdays",(function(){return N.A})),n.d(r,"timeThursday",(function(){return N.v})),n.d(r,"timeThursdays",(function(){return N.w})),n.d(r,"timeFriday",(function(){return N.c})),n.d(r,"timeFridays",(function(){return N.d})),n.d(r,"timeSaturday",(function(){return N.p})),n.d(r,"timeSaturdays",(function(){return N.q})),n.d(r,"timeMonth",(function(){return N.n})),n.d(r,"timeMonths",(function(){return N.o})),n.d(r,"timeYear",(function(){return N.D})),n.d(r,"timeYears",(function(){return N.E})),n.d(r,"utcMinute",(function(){return N.N})),n.d(r,"utcMinutes",(function(){return N.O})),n.d(r,"utcHour",(function(){return N.J})),n.d(r,"utcHours",(function(){return N.K})),n.d(r,"utcDay",(function(){return N.F})),n.d(r,"utcDays",(function(){return N.G})),n.d(r,"utcWeek",(function(){return N.fb})),n.d(r,"utcWeeks",(function(){return N.gb})),n.d(r,"utcSunday",(function(){return N.X})),n.d(r,"utcSundays",(function(){return N.Y})),n.d(r,"utcMonday",(function(){return N.P})),n.d(r,"utcMondays",(function(){return N.Q})),n.d(r,"utcTuesday",(function(){return N.bb})),n.d(r,"utcTuesdays",(function(){return N.cb})),n.d(r,"utcWednesday",(function(){return N.db})),n.d(r,"utcWednesdays",(function(){return N.eb})),n.d(r,"utcThursday",(function(){return N.Z})),n.d(r,"utcThursdays",(function(){return N.ab})),n.d(r,"utcFriday",(function(){return N.H})),n.d(r,"utcFridays",(function(){return N.I})),n.d(r,"utcSaturday",(function(){return N.T})),n.d(r,"utcSaturdays",(function(){return N.U})),n.d(r,"utcMonth",(function(){return N.R})),n.d(r,"utcMonths",(function(){return N.S})),n.d(r,"utcYear",(function(){return N.hb})),n.d(r,"utcYears",(function(){return N.ib})),n.d(r,"timeFormatDefaultLocale",(function(){return I.d})),n.d(r,"timeFormat",(function(){return I.c})),n.d(r,"timeParse",(function(){return I.f})),n.d(r,"utcFormat",(function(){return I.g})),n.d(r,"utcParse",(function(){return I.h})),n.d(r,"timeFormatLocale",(function(){return I.e})),n.d(r,"isoFormat",(function(){return I.a})),n.d(r,"isoParse",(function(){return I.b})),n.d(r,"now",(function(){return R.b})),n.d(r,"timer",(function(){return R.d})),n.d(r,"timerFlush",(function(){return R.e})),n.d(r,"timeout",(function(){return R.c})),n.d(r,"interval",(function(){return R.a})),n.d(r,"transition",(function(){return j.c})),n.d(r,"active",(function(){return j.a})),n.d(r,"interrupt",(function(){return j.b})),n.d(r,"voronoi",(function(){return L.a})),n.d(r,"zoom",(function(){return B.a})),n.d(r,"zoomTransform",(function(){return B.c})),n.d(r,"zoomIdentity",(function(){return B.b}));var i=n(446),o=n.n(i),a="5.15.0",u=n(0),s=n(200),c=n(199),f=n(197),l=n(39),h=n(20),d=n(192),p=n(51),g=n(102),y=n(140),b=n(139),v=n(194),m=n(191),_=n(84),w=n(186),x=n(189),k=n(26),E=n(42),A=n(196),S=n(85),M=n(195),T=n(190),O=n(188),D=n(13),C=n(187),N=n(73),I=n(103),R=n(81),j=n(40),L=n(193),B=n(198),P=n(447),F=n.n(P),q=n(144);function U(t){return(U="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}var z={},Y=function(t){!function(t){for(var e=Object.keys(t),n=0;n=1&&(r={x:t.x,y:t.y}),o>0&&o<1&&(r={x:(1-o)*e.x+o*t.x,y:(1-o)*e.y+o*t.y})}}e=t})),r}(t)},st=function(t,e,n){var r;e[0]!==n&&(e=e.reverse()),e.forEach((function(t){rt(t,r),r=t}));var i,o=25;r=void 0,e.forEach((function(t){if(r&&!i){var e=rt(t,r);if(e=1&&(i={x:t.x,y:t.y}),n>0&&n<1&&(i={x:(1-n)*r.x+n*t.x,y:(1-n)*r.y+n*t.y})}}r=t}));var a=t?10:5,u=Math.atan2(e[0].y-i.y,e[0].x-i.x),s={x:0,y:0};return s.x=Math.sin(u)*a+(e[0].x+i.x)/2,s.y=-Math.cos(u)*a+(e[0].y+i.y)/2,s},ct=function(t,e){var n=t.trim();if(n)return"loose"!==e.securityLevel?Object(et.sanitizeUrl)(n):n},ft=n(34),lt=n.n(ft),ht=function(t){return t.replace(//gi,"#br#")},dt=function(t){return t.replace(/#br#/g,"
    ")},pt=function(t){if(!t)return 1;var e=ht(t);return(e=e.replace(/\\n/g,"#br#")).split("#br#")},gt=function(t,e){var n=t,r=!0;return!e.flowchart||!1!==e.flowchart.htmlLabels&&"false"!==e.flowchart.htmlLabels||(r=!1),"loose"!==e.securityLevel&&r&&(n=(n=(n=ht(n)).replace(//g,">")).replace(/=/g,"="),n=dt(n)),n};function yt(t){return(yt="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}var bt,vt=V(),mt={},_t=[],wt=[],xt=[],kt={},Et={},At=0,St=!0,Mt=[],Tt=function(t,e,n,r){var i=t,o=e;i[0].match(/\d/)&&(i=""+i),o[0].match(/\d/)&&(o=""+o),J.info("Got edge...",i,o);var a={start:i,end:o,type:void 0,text:""};void 0!==(r=n.text)&&(a.text=gt(r.trim(),vt),'"'===a.text[0]&&'"'===a.text[a.text.length-1]&&(a.text=a.text.substring(1,a.text.length-1))),void 0!==n&&(a.type=n.type,a.stroke=n.stroke),_t.push(a)},Ot=function(t,e){t.split(",").forEach((function(t){var n=t;t[0].match(/\d/)&&(n=""+n),void 0!==mt[n]&&mt[n].classes.push(e),void 0!==kt[n]&&kt[n].classes.push(e)}))},Dt=function(t,e){t.split(",").forEach((function(t){void 0!==e&&(Et[t]=gt(e,vt))}))},Ct=function(t){var e=D.k(".mermaidTooltip");null===(e._groups||e)[0][0]&&(e=D.k("body").append("div").attr("class","mermaidTooltip").style("opacity",0)),D.k(t).select("svg").selectAll("g.node").on("mouseover",(function(){var t=D.k(this);if(null!==t.attr("title")){var n=this.getBoundingClientRect();e.transition().duration(200).style("opacity",".9"),e.html(t.attr("title")).style("left",n.left+(n.right-n.left)/2+"px").style("top",n.top-14+document.body.scrollTop+"px"),t.classed("hover",!0)}})).on("mouseout",(function(){e.transition().duration(500).style("opacity",0),D.k(this).classed("hover",!1)}))};Mt.push(Ct);var Nt=function(t){for(var e=0;e/)&&(bt="LR"),bt.match(/.*v/)&&(bt="TB")},setClass:Ot,getTooltip:function(t){return Et[t]},setClickEvent:function(t,e,n){t.split(",").forEach((function(t){!function(t,e){var n=t;t[0].match(/\d/)&&(n=""+n),"loose"===vt.securityLevel&&void 0!==e&&void 0!==mt[n]&&Mt.push((function(){var t=document.querySelector('[id="'.concat(n,'"]'));null!==t&&t.addEventListener("click",(function(){window[e](n)}),!1)}))}(t,e)})),Dt(t,n),Ot(t,"clickable")},setLink:function(t,e,n){t.split(",").forEach((function(t){var n=t;t[0].match(/\d/)&&(n=""+n),void 0!==mt[n]&&(mt[n].link=ct(e,vt))})),Dt(t,n),Ot(t,"clickable")},bindFunctions:function(t){Mt.forEach((function(e){e(t)}))},getDirection:function(){return bt.trim()},getVertices:function(){return mt},getEdges:function(){return _t},getClasses:function(){return wt},clear:function(){mt={},wt={},_t=[],(Mt=[]).push(Ct),xt=[],kt={},At=0,Et=[],St=!0},defaultStyle:function(){return"fill:#ffa;stroke: #f66; stroke-width: 3px; stroke-dasharray: 5, 5;fill:#ffa;stroke: #666;"},addSubGraph:function(t,e,n){var r=t.trim(),i=n;t===n&&n.match(/\s/)&&(r=void 0);var o,a,u,s=[];o=s.concat.apply(s,e),a={boolean:{},number:{},string:{}},u=[],s=o.filter((function(t){var e=yt(t);return""!==t.trim()&&(e in a?!a[e].hasOwnProperty(t)&&(a[e][t]=!0):!(u.indexOf(t)>=0)&&u.push(t))}));for(var c=0;c0&&function t(e,n){var r=xt[n].nodes;if(!((It+=1)>2e3)){if(Rt[It]=n,xt[n].id===e)return{result:!0,count:0};for(var i=0,o=1;i=0){var u=t(e,a);if(u.result)return{result:!0,count:o+u.count};o+=u.count}i+=1}return{result:!1,count:o}}}("none",xt.length-1)},getSubGraphs:function(){return xt},destructLink:function(t,e){var n,r=function(t){switch(t.trim()){case"--x":return{type:"arrow_cross",stroke:"normal"};case"--\x3e":return{type:"arrow",stroke:"normal"};case"<--\x3e":return{type:"double_arrow_point",stroke:"normal"};case"x--x":return{type:"double_arrow_cross",stroke:"normal"};case"o--o":return{type:"double_arrow_circle",stroke:"normal"};case"o.-o":return{type:"double_arrow_circle",stroke:"dotted"};case"<==>":return{type:"double_arrow_point",stroke:"thick"};case"o==o":return{type:"double_arrow_circle",stroke:"thick"};case"x==x":return{type:"double_arrow_cross",stroke:"thick"};case"x.-x":case"x-.-x":return{type:"double_arrow_cross",stroke:"dotted"};case"<.->":case"<-.->":return{type:"double_arrow_point",stroke:"dotted"};case"o-.-o":return{type:"double_arrow_circle",stroke:"dotted"};case"--o":return{type:"arrow_circle",stroke:"normal"};case"---":return{type:"arrow_open",stroke:"normal"};case"-.-x":return{type:"arrow_cross",stroke:"dotted"};case"-.->":return{type:"arrow",stroke:"dotted"};case"-.-o":return{type:"arrow_circle",stroke:"dotted"};case"-.-":return{type:"arrow_open",stroke:"dotted"};case".-x":return{type:"arrow_cross",stroke:"dotted"};case".->":return{type:"arrow",stroke:"dotted"};case".-o":return{type:"arrow_circle",stroke:"dotted"};case".-":return{type:"arrow_open",stroke:"dotted"};case"==x":return{type:"arrow_cross",stroke:"thick"};case"==>":return{type:"arrow",stroke:"thick"};case"==o":return{type:"arrow_circle",stroke:"thick"};case"===":return{type:"arrow_open",stroke:"thick"}}}(t);if(e){if((n=function(t){switch(t.trim()){case"<--":return{type:"arrow",stroke:"normal"};case"x--":return{type:"arrow_cross",stroke:"normal"};case"o--":return{type:"arrow_circle",stroke:"normal"};case"<-.":return{type:"arrow",stroke:"dotted"};case"x-.":return{type:"arrow_cross",stroke:"dotted"};case"o-.":return{type:"arrow_circle",stroke:"dotted"};case"<==":return{type:"arrow",stroke:"thick"};case"x==":return{type:"arrow_cross",stroke:"thick"};case"o==":return{type:"arrow_circle",stroke:"thick"};case"--":return{type:"arrow_open",stroke:"normal"};case"==":return{type:"arrow_open",stroke:"thick"};case"-.":return{type:"arrow_open",stroke:"dotted"}}}(e)).stroke!==r.stroke)return{type:"INVALID",stroke:"INVALID"};if("arrow_open"===n.type)n.type=r.type;else{if(n.type!==r.type)return{type:"INVALID",stroke:"INVALID"};n.type="double_"+n.type}return"double_arrow"===n.type&&(n.type="double_arrow_point"),n}return r},lex:{firstGraph:function(){return!!St&&(St=!1,!0)}}},Lt=n(72),Bt=n.n(Lt),Pt=n(19),Ft=n.n(Pt),qt=n(143),Ut=n.n(qt);function zt(t,e,n){var r=.9*(e.width+e.height),i=[{x:r/2,y:0},{x:r,y:-r/2},{x:r/2,y:-r},{x:0,y:-r/2}],o=Jt(t,r,r,i);return n.intersect=function(t){return Ft.a.intersect.polygon(n,i,t)},o}function Yt(t,e,n){var r=e.height,i=r/4,o=e.width+2*i,a=[{x:i,y:0},{x:o-i,y:0},{x:o,y:-r/2},{x:o-i,y:-r},{x:i,y:-r},{x:0,y:-r/2}],u=Jt(t,o,r,a);return n.intersect=function(t){return Ft.a.intersect.polygon(n,a,t)},u}function Vt(t,e,n){var r=e.width,i=e.height,o=[{x:-i/2,y:0},{x:r,y:0},{x:r,y:-i},{x:-i/2,y:-i},{x:0,y:-i/2}],a=Jt(t,r,i,o);return n.intersect=function(t){return Ft.a.intersect.polygon(n,o,t)},a}function Gt(t,e,n){var r=e.width,i=e.height,o=[{x:-2*i/6,y:0},{x:r-i/6,y:0},{x:r+2*i/6,y:-i},{x:i/6,y:-i}],a=Jt(t,r,i,o);return n.intersect=function(t){return Ft.a.intersect.polygon(n,o,t)},a}function Ht(t,e,n){var r=e.width,i=e.height,o=[{x:2*i/6,y:0},{x:r+i/6,y:0},{x:r-2*i/6,y:-i},{x:-i/6,y:-i}],a=Jt(t,r,i,o);return n.intersect=function(t){return Ft.a.intersect.polygon(n,o,t)},a}function Wt(t,e,n){var r=e.width,i=e.height,o=[{x:-2*i/6,y:0},{x:r+2*i/6,y:0},{x:r-i/6,y:-i},{x:i/6,y:-i}],a=Jt(t,r,i,o);return n.intersect=function(t){return Ft.a.intersect.polygon(n,o,t)},a}function $t(t,e,n){var r=e.width,i=e.height,o=[{x:i/6,y:0},{x:r-i/6,y:0},{x:r+2*i/6,y:-i},{x:-2*i/6,y:-i}],a=Jt(t,r,i,o);return n.intersect=function(t){return Ft.a.intersect.polygon(n,o,t)},a}function Kt(t,e,n){var r=e.width,i=e.height,o=[{x:0,y:0},{x:r+i/2,y:0},{x:r,y:-i/2},{x:r+i/2,y:-i},{x:0,y:-i}],a=Jt(t,r,i,o);return n.intersect=function(t){return Ft.a.intersect.polygon(n,o,t)},a}function Zt(t,e,n){var r=e.height,i=e.width+r/4,o=t.insert("rect",":first-child").attr("rx",r/2).attr("ry",r/2).attr("x",-i/2).attr("y",-r/2).attr("width",i).attr("height",r);return n.intersect=function(t){return Ft.a.intersect.rect(n,t)},o}function Xt(t,e,n){var r=e.width,i=r/2,o=i/(2.5+r/50),a=e.height+o,u="M 0,"+o+" a "+i+","+o+" 0,0,0 "+r+" 0 a "+i+","+o+" 0,0,0 "+-r+" 0 l 0,"+a+" a "+i+","+o+" 0,0,0 "+r+" 0 l 0,"+-a,s=t.attr("label-offset-y",o).insert("path",":first-child").attr("d",u).attr("transform","translate("+-r/2+","+-(a/2+o)+")");return n.intersect=function(t){var e=Ft.a.intersect.rect(n,t),r=e.x-n.x;if(0!=i&&(Math.abs(r)n.height/2-o)){var a=o*o*(1-r*r/(i*i));0!=a&&(a=Math.sqrt(a)),a=o-a,t.y-n.y>0&&(a=-a),e.y+=a}return e},s}function Jt(t,e,n,r){return t.insert("polygon",":first-child").attr("points",r.map((function(t){return t.x+","+t.y})).join(" ")).attr("transform","translate("+-e/2+","+n/2+")")}var Qt={addToRender:function(t){t.shapes().question=zt,t.shapes().hexagon=Yt,t.shapes().stadium=Zt,t.shapes().cylinder=Xt,t.shapes().rect_left_inv_arrow=Vt,t.shapes().lean_right=Gt,t.shapes().lean_left=Ht,t.shapes().trapezoid=Wt,t.shapes().inv_trapezoid=$t,t.shapes().rect_right_inv_arrow=Kt}},te={},ee=function(t,e,n){var r=D.k('[id="'.concat(n,'"]'));Object.keys(t).forEach((function(n){var i=t[n],o="default";i.classes.length>0&&(o=i.classes.join(" "));var a,u=it(i.styles),s=void 0!==i.text?i.text:i.id;if(V().flowchart.htmlLabels){var c={label:s.replace(/fa[lrsb]?:fa-[\w-]+/g,(function(t){return"")}))};(a=Ut()(r,c).node()).parentNode.removeChild(a)}else{var f=document.createElementNS("http://www.w3.org/2000/svg","text");f.setAttribute("style",u.labelStyle.replace("color:","fill:"));for(var l=s.split(//gi),h=0;h"):(a.labelType="text",a.label=o.text.replace(//gi,"\n"),void 0===o.style&&(a.style=a.style||"stroke: #333; stroke-width: 1.5px;fill:none"),a.labelStyle=a.labelStyle.replace("color:","fill:"))),e.setEdge(o.start,o.end,a,i)}))},re=function(t){for(var e=Object.keys(t),n=0;n=0;f--)i=c[f],jt.addVertex(i.id,i.title,"group",void 0,i.classes);var l=jt.getVertices(),h=jt.getEdges(),d=0;for(d=c.length-1;d>=0;d--){i=c[d],D.l("cluster").append("text");for(var p=0;p0&&(o=i.classes.join(" "));var a,u=it(i.styles),s=void 0!==i.text?i.text:i.id;if(V().flowchart.htmlLabels){var c={label:s.replace(/fa[lrsb]?:fa-[\w-]+/g,(function(t){return"")}))};(a=Ut()(r,c).node()).parentNode.removeChild(a)}else{var f=document.createElementNS("http://www.w3.org/2000/svg","text");f.setAttribute("style",u.labelStyle.replace("color:","fill:"));for(var l=s.split(//gi),h=0;h"):(a.labelType="text",a.label=o.text.replace(//gi,"\n"),void 0===o.style&&(a.style=a.style||"stroke: #333; stroke-width: 1.5px;fill:none"),a.labelStyle=a.labelStyle.replace("color:","fill:"))),e.setEdge(o.start,o.end,a,i)}))},ce={setConf:function(t){for(var e=Object.keys(t),n=0;n=0;f--)i=c[f],jt.addVertex(i.id,i.title,"group",void 0,i.classes);var l=jt.getVertices(),h=jt.getEdges(),d=0;for(d=c.length-1;d>=0;d--){i=c[d],D.l("cluster").append("text");for(var p=0;p/gi," "),r=t.append("text");r.attr("x",e.x),r.attr("y",e.y),r.style("text-anchor",e.anchor),r.attr("fill",e.fill),void 0!==e.class&&r.attr("class",e.class);var i=r.append("tspan");return i.attr("x",e.x+2*e.textMargin),i.attr("fill",e.fill),i.text(n),r},he=function(t,e){var n,r,i,o,a,u=t.append("polygon");u.attr("points",(n=e.x,r=e.y,n+","+r+" "+(n+(i=50))+","+r+" "+(n+i)+","+(r+(o=20)-(a=7))+" "+(n+i-1.2*a)+","+(r+o)+" "+n+","+(r+o))),u.attr("class","labelBox"),e.y=e.y+e.labelMargin,e.x=e.x+.5*e.labelMargin,le(t,e)},de=-1,pe=function(){return{x:0,y:0,fill:void 0,"text-anchor":"start",style:"#666",width:100,height:100,textMargin:0,rx:0,ry:0}},ge=function(){return{x:0,y:0,fill:"#EDF2AE",stroke:"#666",width:100,anchor:"start",height:100,rx:0,ry:0}},ye=function(){function t(t,e,n,i,o,a,u){r(e.append("text").attr("x",n+o/2).attr("y",i+a/2+5).style("text-anchor","middle").text(t),u)}function e(t,e,n,i,o,a,u,s){for(var c=s.actorFontSize,f=s.actorFontFamily,l=t.split(//gi),h=0;h>-",token:"->>-",line:"1",loc:{first_line:1,last_line:1,first_column:1,last_column:1},expected:["'ACTIVE_PARTICIPANT'"]},o}}return we.push({from:t,to:e,message:n,type:r}),!0},Me={SOLID:0,DOTTED:1,NOTE:2,SOLID_CROSS:3,DOTTED_CROSS:4,SOLID_OPEN:5,DOTTED_OPEN:6,LOOP_START:10,LOOP_END:11,ALT_START:12,ALT_ELSE:13,ALT_END:14,OPT_START:15,OPT_END:16,ACTIVE_START:17,ACTIVE_END:18,PAR_START:19,PAR_AND:20,PAR_END:21,RECT_START:22,RECT_END:23},Te=function(t,e,n){var r={actor:t,placement:e,message:n},i=[].concat(t,t);xe.push(r),we.push({from:i[0],to:i[1],message:n,type:Me.NOTE,placement:e})},Oe=function(t){ke=t},De={addActor:Ae,addMessage:function(t,e,n,r){we.push({from:t,to:e,message:n,answer:r})},addSignal:Se,enableSequenceNumbers:function(){Ee=!0},showSequenceNumbers:function(){return Ee},getMessages:function(){return we},getActors:function(){return _e},getActor:function(t){return _e[t]},getActorKeys:function(){return Object.keys(_e)},getTitle:function(){return ke},clear:function(){_e={},we=[]},LINETYPE:Me,ARROWTYPE:{FILLED:0,OPEN:1},PLACEMENT:{LEFTOF:0,RIGHTOF:1,OVER:2},addNote:Te,setTitle:Oe,apply:function t(e){if(e instanceof Array)e.forEach((function(e){t(e)}));else switch(e.type){case"addActor":Ae(e.actor,e.actor,e.description);break;case"activeStart":case"activeEnd":Se(e.actor,void 0,void 0,e.signalType);break;case"addNote":Te(e.actor,e.placement,e.text);break;case"addMessage":Se(e.from,e.to,e.msg,e.signalType);break;case"loopStart":Se(void 0,void 0,e.loopText,e.signalType);break;case"loopEnd":Se(void 0,void 0,void 0,e.signalType);break;case"rectStart":Se(void 0,void 0,e.color,e.signalType);break;case"rectEnd":Se(void 0,void 0,void 0,e.signalType);break;case"optStart":Se(void 0,void 0,e.optText,e.signalType);break;case"optEnd":Se(void 0,void 0,void 0,e.signalType);break;case"altStart":case"else":Se(void 0,void 0,e.altText,e.signalType);break;case"altEnd":Se(void 0,void 0,void 0,e.signalType);break;case"setTitle":Oe(e.text);break;case"parStart":case"and":Se(void 0,void 0,e.parText,e.signalType);break;case"parEnd":Se(void 0,void 0,void 0,e.signalType)}}};function Ce(t){return function(t){if(Array.isArray(t)){for(var e=0,n=new Array(t.length);e/gi),u=!0,s=!1,c=void 0;try{for(var f,l=a[Symbol.iterator]();!(u=(f=l.next()).done);u=!0){var h=f.value,d=be.getTextObj();d.x=e,d.y=n+o,d.textMargin=Ne.noteMargin,d.dy="1em",d.text=h,d.class="noteText";var p=be.drawText(r,d,i);o+=(p._groups||p)[0][0].getBBox().height}}catch(t){s=!0,c=t}finally{try{u||null==l.return||l.return()}finally{if(s)throw c}}return o}(r.message,e-4,n+24,a,o.width-Ne.noteMargin);Ie.insert(e,n,e+o.width,n+2*Ne.noteMargin+s),u.attr("height",s+2*Ne.noteMargin),Ie.bumpVerticalPos(s+2*Ne.noteMargin)},je=function(t,e,n,r){for(var i=0;ie&&(n.starty=e-6,e+=12),be.drawActivation(o,n,e,Ne,Le(t.from.actor).length),Ie.insert(n.startx,e-10,n.stopx,e)}(t,Ie.getVerticalPos());break;case ve.parser.yy.LINETYPE.LOOP_START:Ie.bumpVerticalPos(Ne.boxMargin),Ie.newLoop(t.message),Ie.bumpVerticalPos(Ne.boxMargin+Ne.boxTextMargin);break;case ve.parser.yy.LINETYPE.LOOP_END:e=Ie.endLoop(),be.drawLoop(o,e,"loop",Ne),Ie.bumpVerticalPos(Ne.boxMargin);break;case ve.parser.yy.LINETYPE.RECT_START:Ie.bumpVerticalPos(Ne.boxMargin),Ie.newLoop(void 0,t.message),Ie.bumpVerticalPos(Ne.boxMargin);break;case ve.parser.yy.LINETYPE.RECT_END:var u=Ie.endLoop();be.drawBackgroundRect(o,u),Ie.bumpVerticalPos(Ne.boxMargin);break;case ve.parser.yy.LINETYPE.OPT_START:Ie.bumpVerticalPos(Ne.boxMargin),Ie.newLoop(t.message),Ie.bumpVerticalPos(Ne.boxMargin+Ne.boxTextMargin);break;case ve.parser.yy.LINETYPE.OPT_END:e=Ie.endLoop(),be.drawLoop(o,e,"opt",Ne),Ie.bumpVerticalPos(Ne.boxMargin);break;case ve.parser.yy.LINETYPE.ALT_START:Ie.bumpVerticalPos(Ne.boxMargin),Ie.newLoop(t.message),Ie.bumpVerticalPos(Ne.boxMargin+Ne.boxTextMargin);break;case ve.parser.yy.LINETYPE.ALT_ELSE:Ie.bumpVerticalPos(Ne.boxMargin),e=Ie.addSectionToLoop(t.message),Ie.bumpVerticalPos(Ne.boxMargin);break;case ve.parser.yy.LINETYPE.ALT_END:e=Ie.endLoop(),be.drawLoop(o,e,"alt",Ne),Ie.bumpVerticalPos(Ne.boxMargin);break;case ve.parser.yy.LINETYPE.PAR_START:Ie.bumpVerticalPos(Ne.boxMargin),Ie.newLoop(t.message),Ie.bumpVerticalPos(Ne.boxMargin+Ne.boxTextMargin);break;case ve.parser.yy.LINETYPE.PAR_AND:Ie.bumpVerticalPos(Ne.boxMargin),e=Ie.addSectionToLoop(t.message),Ie.bumpVerticalPos(Ne.boxMargin);break;case ve.parser.yy.LINETYPE.PAR_END:e=Ie.endLoop(),be.drawLoop(o,e,"par",Ne),Ie.bumpVerticalPos(Ne.boxMargin);break;default:try{Ie.bumpVerticalPos(Ne.messageMargin);var s=Be(t.from),c=Be(t.to),l=s[0]<=c[0]?1:0,h=s[0]/gi),l=!0,h=!1,d=void 0;try{for(var p,g=f[Symbol.iterator]();!(l=(p=g.next()).done);l=!0){var y=p.value;s.push(a.append("text").attr("x",u).attr("y",r-7+17*c).style("text-anchor","middle").attr("class","messageText").text(y.trim())),c++}}catch(t){h=!0,d=t}finally{try{l||null==g.return||g.return()}finally{if(h)throw d}}for(var b,v=17*(c-1),m=s.map((function(t){return(t._groups||t)[0][0].getBBox().width})),_=Math.max.apply(Math,Ce(m)),w=0,x=s;w=6&&n.indexOf("weekends")>=0||(n.indexOf(t.format("dddd").toLowerCase())>=0||n.indexOf(t.format(e.trim()))>=0)},rn=function(t,e,n){if(n.length&&!t.manualEndTime){var r=H()(t.startTime,e,!0);r.add(1,"d");var i=H()(t.endTime,e,!0),o=on(r,i,e,n);t.endTime=i.toDate(),t.renderEndTime=o}},on=function(t,e,n,r){for(var i=!1,o=null;t<=e;)i||(o=e.toDate()),(i=nn(t,n,r))&&e.add(1,"d"),t.add(1,"d");return o},an=function(t,e,n){n=n.trim();var r=/^after\s+([\d\w- ]+)/.exec(n.trim());if(null!==r){var i=null;if(r[1].split(" ").forEach((function(t){var e=dn(t);void 0!==e&&(i?e.endTime>i.endTime&&(i=e):i=e)})),i)return i.endTime;var o=new Date;return o.setHours(0,0,0,0),o}var a=H()(n,e.trim(),!0);return a.isValid()?a.toDate():(J.debug("Invalid date:"+n),J.debug("With date format:"+e.trim()),new Date)},un=function(t,e){if(null!==t)switch(t[2]){case"s":e.add(t[1],"seconds");break;case"m":e.add(t[1],"minutes");break;case"h":e.add(t[1],"hours");break;case"d":e.add(t[1],"days");break;case"w":e.add(t[1],"weeks")}return e.toDate()},sn=function(t,e,n,r){r=r||!1,n=n.trim();var i=H()(n,e.trim(),!0);return i.isValid()?(r&&i.add(1,"d"),i.toDate()):un(/^([\d]+)([wdhms])/.exec(n.trim()),H()(t))},cn=0,fn=function(t){return void 0===t?"task"+(cn+=1):t},ln=[],hn={},dn=function(t){var e=hn[t];return ln[e]},pn=function(){for(var t=function(t){var e=ln[t],n="";switch(ln[t].raw.startTime.type){case"prevTaskEnd":var r=dn(e.prevTaskId);e.startTime=r.endTime;break;case"getStartDate":(n=an(0,He,ln[t].raw.startTime.startData))&&(ln[t].startTime=n)}return ln[t].startTime&&(ln[t].endTime=sn(ln[t].startTime,He,ln[t].raw.endTime.data,en),ln[t].endTime&&(ln[t].processed=!0,ln[t].manualEndTime=H()(ln[t].raw.endTime.data,"YYYY-MM-DD",!0).isValid(),rn(ln[t],He,$e))),ln[t].processed},e=!0,n=0;n0&&(e=t.classes.join(" "));for(var n=0,r=0;rn-e?n+o+1.5*_n.leftPadding>s?e+r-5:n+r+5:(n-e)/2+e+r})).attr("y",(function(t,r){return r*e+_n.barHeight/2+(_n.fontSize/2-2)+n})).attr("text-height",i).attr("class",(function(t){var e=a(t.startTime),n=a(t.endTime);t.milestone&&(n=e+i);var r=this.getBBox().width,o="";t.classes.length>0&&(o=t.classes.join(" "));for(var u=0,f=0;fn-e?n+r+1.5*_n.leftPadding>s?o+" taskTextOutsideLeft taskTextOutside"+u+" "+l:o+" taskTextOutsideRight taskTextOutside"+u+" "+l+" width-"+r:o+" taskText taskText"+u+" "+l+" width-"+r}))}(t,i,u,f,r,0,e),function(t,e){for(var n=[],r=0,i=0;i/gi),n=-(e.length-1)/2,r=document.createElementNS("http://www.w3.org/2000/svg","text");r.setAttribute("dy",n+"em");for(var i=0;i0&&o.setAttribute("dy","1em"),o.textContent=e[i],r.appendChild(o)}return r})).attr("x",10).attr("y",(function(i,o){if(!(o>0))return i[1]*t/2+e;for(var a=0;a0){var r=t.split("~");n=r[0],e=r[1]}return{className:n,type:e}},Cn=function(t){var e=Dn(t);void 0===Mn[e.className]&&(Mn[e.className]={id:e.className,type:e.type,cssClasses:[],methods:[],members:[],annotations:[],domId:"classid-"+e.className+"-"+Tn},Tn++)},Nn=function(t){for(var e=Object.keys(Mn),n=0;n>")?r.annotations.push(i.substring(2,i.length-2)):i.indexOf(")")>0?r.methods.push(i):i&&r.members.push(i)}},Rn=function(t,e){t.split(",").forEach((function(t){var n=t;t[0].match(/\d/)&&(n="classid-"+n),void 0!==Mn[n]&&Mn[n].cssClasses.push(e)}))},jn=function(t,e,n){var r=t,i=Nn(r);"loose"===An.securityLevel&&void 0!==e&&void 0!==Mn[r]&&(n&&(Mn[r].tooltip=gt(n,An)),On.push((function(){var t=document.querySelector('[id="'.concat(i,'"]'));null!==t&&t.addEventListener("click",(function(){window[e](i)}),!1)})))},Ln=function(t){var e=D.k(".mermaidTooltip");null===(e._groups||e)[0][0]&&(e=D.k("body").append("div").attr("class","mermaidTooltip").style("opacity",0)),D.k(t).select("svg").selectAll("g.node").on("mouseover",(function(){var t=D.k(this);if(null!==t.attr("title")){var n=this.getBoundingClientRect();e.transition().duration(200).style("opacity",".9"),e.html(t.attr("title")).style("left",n.left+(n.right-n.left)/2+"px").style("top",n.top-14+document.body.scrollTop+"px"),t.classed("hover",!0)}})).on("mouseout",(function(){e.transition().duration(500).style("opacity",0),D.k(this).classed("hover",!1)}))};On.push(Ln);var Bn={addClass:Cn,bindFunctions:function(t){On.forEach((function(e){e(t)}))},clear:function(){Sn=[],Mn={},(On=[]).push(Ln)},getClass:function(t){return Mn[t]},getClasses:function(){return Mn},addAnnotation:function(t,e){var n=Dn(t).className;Mn[n].annotations.push(e)},getRelations:function(){return Sn},addRelation:function(t){J.debug("Adding relation: "+JSON.stringify(t)),Cn(t.id1),Cn(t.id2),t.id1=Dn(t.id1).className,t.id2=Dn(t.id2).className,Sn.push(t)},addMember:In,addMembers:function(t,e){Array.isArray(e)&&(e.reverse(),e.forEach((function(e){return In(t,e)})))},cleanupLabel:function(t){return":"===t.substring(0,1)?t.substr(1).trim():t.trim()},lineType:{LINE:0,DOTTED_LINE:1},relationType:{AGGREGATION:0,EXTENSION:1,COMPOSITION:2,DEPENDENCY:3},setClickEvent:function(t,e,n){t.split(",").forEach((function(t){jn(t,e,n)})),Rn(t,"clickable")},setCssClass:Rn,setLink:function(t,e,n){t.split(",").forEach((function(t){var r=t;t[0].match(/\d/)&&(r="classid-"+r),void 0!==Mn[r]&&(Mn[r].link=ct(e,An),n&&(Mn[r].tooltip=gt(n,An)))})),Rn(t,"clickable")},lookUpDomId:Nn},Pn=n(117),Fn=n.n(Pn),qn=0,Un=function(t){var e=t.match(/^(\+|-|~|#)?(\w+)(~\w+~|\[\])?\s+(\w+)$/),n=t.match(/^(\+|-|~|#)?(\w+)\s?\(\s*(\w+(~\w+~|\[\])?\s*(\w+)?)?\s*\)\s?([*|$])?\s?(\w+(~\w+~|\[\])?)?\s*$/);return e?zn(e):n?Yn(n):Vn(t)},zn=function(t){return{displayText:(t[1]?t[1].trim():"")+(t[2]?t[2].trim():"")+(t[3]?Hn(t[3]):"")+" "+(t[4]?t[4].trim():""),cssStyle:""}},Yn=function(t){var e=t[1]?t[1].trim():"",n=t[2]?t[2].trim():"",r=t[3]?Hn(t[3]):"",i=t[6]?t[6].trim():"";return{displayText:e+n+"("+r+")"+(t[7]?" : "+Hn(t[7]).trim():""),cssStyle:Wn(i)}},Vn=function(t){var e="",n="",r="",i=t.indexOf("("),o=t.indexOf(")");if(i>1&&o>i&&o<=t.length){var a=t.match(/(\+|-|~|#)?(\w+)/),u=a[1]?a[1].trim():"",s=a[2],c=t.substring(i+1,o),f=t.substring(o,o+1);n=Wn(f),o<(e=u+s+"("+Hn(c.trim())+")").length&&""!==(r=t.substring(o+2).trim())&&(r=" : "+Hn(r))}else e=Hn(t);return{displayText:e+r,cssStyle:n}},Gn=function(t,e,n,r){var i=Un(e),o=t.append("tspan").attr("x",r.padding).text(i.displayText);""!==i.cssStyle&&o.attr("style",i.cssStyle),n||o.attr("dy",r.textHeight)},Hn=function t(e){var n=e;return-1!=e.indexOf("~")?t(n=(n=n.replace("~","<")).replace("~",">")):n},Wn=function(t){switch(t){case"*":return"font-style:italic;";case"$":return"text-decoration:underline;";default:return""}},$n=function(t,e,n){J.info("Rendering class "+e);var r="classGroup ";e.cssClasses.length>0&&(r+=e.cssClasses.join(" "));var i,o=e.id,a={id:o,label:e.id,width:0,height:0},u=t.append("g").attr("id",Nn(o)).attr("class",r);i=e.link?u.append("svg:a").attr("xlink:href",e.link).attr("target","_blank").append("text").attr("y",n.textHeight+n.padding).attr("x",0):u.append("text").attr("y",n.textHeight+n.padding).attr("x",0);var s=!0;e.annotations.forEach((function(t){var e=i.append("tspan").text("«"+t+"»");s||e.attr("dy",n.textHeight),s=!1}));var c=e.id;void 0!==e.type&&""!==e.type&&(c+="<"+e.type+">");var f=i.append("tspan").text(c).attr("class","title");s||f.attr("dy",n.textHeight);var l=i.node().getBBox().height,h=u.append("line").attr("x1",0).attr("y1",n.padding+l+n.dividerMargin/2).attr("y2",n.padding+l+n.dividerMargin/2),d=u.append("text").attr("x",n.padding).attr("y",l+n.dividerMargin+n.textHeight).attr("fill","white").attr("class","classText");s=!0,e.members.forEach((function(t){Gn(d,t,s,n),s=!1}));var p=d.node().getBBox(),g=u.append("line").attr("x1",0).attr("y1",n.padding+l+n.dividerMargin+p.height).attr("y2",n.padding+l+n.dividerMargin+p.height),y=u.append("text").attr("x",n.padding).attr("y",l+2*n.dividerMargin+p.height+n.textHeight).attr("fill","white").attr("class","classText");s=!0,e.methods.forEach((function(t){Gn(y,t,s,n),s=!1}));var b=u.node().getBBox(),v=u.insert("rect",":first-child").attr("x",0).attr("y",0).attr("width",b.width+2*n.padding).attr("height",b.height+n.padding+.5*n.dividerMargin).node().getBBox().width;return i.node().childNodes.forEach((function(t){t.setAttribute("x",(v-t.getBBox().width)/2)})),e.tooltip&&i.insert("title").text(e.tooltip),h.attr("x2",v),g.attr("x2",v),a.width=v,a.height=b.height+n.padding+.5*n.dividerMargin,a},Kn=function(t,e,n,r){var i=function(t){switch(t){case Bn.relationType.AGGREGATION:return"aggregation";case Bn.relationType.EXTENSION:return"extension";case Bn.relationType.COMPOSITION:return"composition";case Bn.relationType.DEPENDENCY:return"dependency"}};e.points=e.points.filter((function(t){return!Number.isNaN(t.y)}));var o,a,u=e.points,s=C.v().x((function(t){return t.x})).y((function(t){return t.y})).curve(C.d),c=t.append("path").attr("d",s(u)).attr("id","edge"+qn).attr("class","relation"),f="";r.arrowMarkerAbsolute&&(f=(f=(f=window.location.protocol+"//"+window.location.host+window.location.pathname+window.location.search).replace(/\(/g,"\\(")).replace(/\)/g,"\\)")),1==n.relation.lineType&&c.attr("class","relation dashed-line"),"none"!==n.relation.type1&&c.attr("marker-start","url("+f+"#"+i(n.relation.type1)+"Start)"),"none"!==n.relation.type2&&c.attr("marker-end","url("+f+"#"+i(n.relation.type2)+"End)");var l,h,d,p,g=e.points.length,y=ut(e.points);if(o=y.x,a=y.y,g%2!=0&&g>1){var b=st("none"!==n.relation.type1,e.points,e.points[0]),v=st("none"!==n.relation.type2,e.points,e.points[g-1]);J.debug("cardinality_1_point "+JSON.stringify(b)),J.debug("cardinality_2_point "+JSON.stringify(v)),l=b.x,h=b.y,d=v.x,p=v.y}if(void 0!==n.title){var m=t.append("g").attr("class","classLabel"),_=m.append("text").attr("class","label").attr("x",o).attr("y",a).attr("fill","red").attr("text-anchor","middle").text(n.title);window.label=_;var w=_.node().getBBox();m.insert("rect",":first-child").attr("class","box").attr("x",w.x-r.padding/2).attr("y",w.y-r.padding/2).attr("width",w.width+r.padding).attr("height",w.height+r.padding)}(J.info("Rendering relation "+JSON.stringify(n)),void 0!==n.relationTitle1&&"none"!==n.relationTitle1)&&t.append("g").attr("class","cardinality").append("text").attr("class","type1").attr("x",l).attr("y",h).attr("fill","black").attr("font-size","6").text(n.relationTitle1);void 0!==n.relationTitle2&&"none"!==n.relationTitle2&&t.append("g").attr("class","cardinality").append("text").attr("class","type2").attr("x",d).attr("y",p).attr("fill","black").attr("font-size","6").text(n.relationTitle2);qn++};Pn.parser.yy=Bn;var Zn={},Xn={dividerMargin:10,padding:5,textHeight:10},Jn=function(t){for(var e=Object.keys(Zn),n=0;n "+t.w+": "+JSON.stringify(i.edge(t))),Kn(r,i.edge(t),i.edge(t).relation,Xn))})),r.attr("height",i.graph().height+40),r.attr("width",1.5*i.graph().width+20),r.attr("viewBox","-10 -10 "+(i.graph().width+20)+" "+(i.graph().height+20))};function er(t){return(er="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}var nr,rr=[],ir={root:{relations:[],states:{},documents:{}}},or=ir.root,ar=0,ur=function(t,e,n,r,i){void 0===or.states[t]?or.states[t]={id:t,descriptions:[],type:e,doc:n,note:i}:(or.states[t].doc||(or.states[t].doc=n),or.states[t].type||(or.states[t].type=e)),r&&("string"==typeof r&&fr(t,r.trim()),"object"===er(r)&&r.forEach((function(e){return fr(t,e.trim())}))),i&&(or.states[t].note=i)},sr=function(){or=(ir={root:{relations:[],states:{},documents:{}}}).root},cr=function(t,e,n){var r=t,i=e,o="default",a="default";"[*]"===t&&(r="start"+ ++ar,o="start"),"[*]"===e&&(i="end"+ar,a="end"),ur(r,o),ur(i,a),or.relations.push({id1:r,id2:i,title:n})},fr=function(t,e){var n=or.states[t],r=e;":"===r[0]&&(r=r.substr(1).trim()),n.descriptions.push(r)},lr=0,hr={addState:ur,clear:sr,getState:function(t){return or.states[t]},getStates:function(){return or.states},getRelations:function(){return or.relations},addRelation:cr,getDividerId:function(){return"divider-id-"+ ++lr},cleanupLabel:function(t){return":"===t.substring(0,1)?t.substr(2).trim():t.trim()},lineType:{LINE:0,DOTTED_LINE:1},relationType:{AGGREGATION:0,EXTENSION:1,COMPOSITION:2,DEPENDENCY:3},logDocuments:function(){J.info("Documents = ",ir)},getRootDoc:function(){return rr},setRootDoc:function(t){J.info("Setting root doc",t),rr=t},extract:function(t){sr(),t.forEach((function(t){"state"===t.stmt&&ur(t.id,t.type,t.doc,t.description,t.note),"relation"===t.stmt&&cr(t.state1.id,t.state2.id,t.description)}))}},dr=n(118),pr=n.n(dr),gr={},yr=function(t,e){gr[t]=e},br=function(t,e){var n=t.append("text").attr("x",2*V().state.padding).attr("y",V().state.textHeight+1.3*V().state.padding).attr("font-size",V().state.fontSize).attr("class","state-title").text(e.descriptions[0]).node().getBBox(),r=n.height,i=t.append("text").attr("x",V().state.padding).attr("y",r+.4*V().state.padding+V().state.dividerMargin+V().state.textHeight).attr("class","state-description"),o=!0,a=!0;e.descriptions.forEach((function(t){o||(!function(t,e,n){var r=t.append("tspan").attr("x",2*V().state.padding).text(e);n||r.attr("dy",V().state.textHeight)}(i,t,a),a=!1),o=!1}));var u=t.append("line").attr("x1",V().state.padding).attr("y1",V().state.padding+r+V().state.dividerMargin/2).attr("y2",V().state.padding+r+V().state.dividerMargin/2).attr("class","descr-divider"),s=i.node().getBBox(),c=Math.max(s.width,n.width);return u.attr("x2",c+3*V().state.padding),t.insert("rect",":first-child").attr("x",V().state.padding).attr("y",V().state.padding).attr("width",c+2*V().state.padding).attr("height",s.height+r+2*V().state.padding).attr("rx",V().state.radius),t},vr=function(t,e,n){var r,i=V().state.padding,o=2*V().state.padding,a=t.node().getBBox(),u=a.width,s=a.x,c=t.append("text").attr("x",0).attr("y",V().state.titleShift).attr("font-size",V().state.fontSize).attr("class","state-title").text(e.id),f=c.node().getBBox().width+o,l=Math.max(f,u);l===u&&(l+=o);var h=t.node().getBBox();e.doc,r=s-i,f>u&&(r=(u-l)/2+i),Math.abs(s-h.x)u&&(r=s-(f-u)/2);var d=1-V().state.textHeight;return t.insert("rect",":first-child").attr("x",r).attr("y",d).attr("class",n?"alt-composit":"composit").attr("width",l).attr("height",h.height+V().state.textHeight+V().state.titleShift+1).attr("rx","0"),c.attr("x",r+i),f<=u&&c.attr("x",s+(l-o)/2-f/2+i),t.insert("rect",":first-child").attr("x",r).attr("y",V().state.titleShift-V().state.textHeight-V().state.padding).attr("width",l).attr("height",3*V().state.textHeight).attr("rx",V().state.radius),t.insert("rect",":first-child").attr("x",r).attr("y",V().state.titleShift-V().state.textHeight-V().state.padding).attr("width",l).attr("height",h.height+3+2*V().state.textHeight).attr("rx",V().state.radius),t},mr=function(t,e){e.attr("class","state-note");var n=e.append("rect").attr("x",0).attr("y",V().state.padding),r=function(t,e,n,r){var i=0,o=r.append("text");o.style("text-anchor","start"),o.attr("class","noteText");var a=t.replace(/\r\n/g,"
    "),u=(a=a.replace(/\n/g,"
    ")).split(//gi),s=1.25*V().state.noteMargin,c=!0,f=!1,l=void 0;try{for(var h,d=u[Symbol.iterator]();!(c=(h=d.next()).done);c=!0){var p=h.value.trim();if(p.length>0){var g=o.append("tspan");if(g.text(p),0===s)s+=g.node().getBBox().height;i+=s,g.attr("x",e+V().state.noteMargin),g.attr("y",n+i+1.25*V().state.noteMargin)}}}catch(t){f=!0,l=t}finally{try{c||null==d.return||d.return()}finally{if(f)throw l}}return{textWidth:o.node().getBBox().width,textHeight:i}}(t,0,0,e.append("g")),i=r.textWidth,o=r.textHeight;return n.attr("height",o+2*V().state.noteMargin),n.attr("width",i+2*V().state.noteMargin),n},_r=function(t,e){var n=e.id,r={id:n,label:e.id,width:0,height:0},i=t.append("g").attr("id",n).attr("class","stateGroup");"start"===e.type&&function(t){t.append("circle").style("stroke","black").style("fill","black").attr("r",V().state.sizeUnit).attr("cx",V().state.padding+V().state.sizeUnit).attr("cy",V().state.padding+V().state.sizeUnit)}(i),"end"===e.type&&function(t){t.append("circle").style("stroke","black").style("fill","white").attr("r",V().state.sizeUnit+V().state.miniPadding).attr("cx",V().state.padding+V().state.sizeUnit+V().state.miniPadding).attr("cy",V().state.padding+V().state.sizeUnit+V().state.miniPadding),t.append("circle").style("stroke","black").style("fill","black").attr("r",V().state.sizeUnit).attr("cx",V().state.padding+V().state.sizeUnit+2).attr("cy",V().state.padding+V().state.sizeUnit+2)}(i),"fork"!==e.type&&"join"!==e.type||function(t,e){var n=V().state.forkWidth,r=V().state.forkHeight;if(e.parentId){var i=n;n=r,r=i}t.append("rect").style("stroke","black").style("fill","black").attr("width",n).attr("height",r).attr("x",V().state.padding).attr("y",V().state.padding)}(i,e),"note"===e.type&&mr(e.note.text,i),"divider"===e.type&&function(t){t.append("line").style("stroke","grey").style("stroke-dasharray","3").attr("x1",V().state.textHeight).attr("class","divider").attr("x2",2*V().state.textHeight).attr("y1",0).attr("y2",0)}(i),"default"===e.type&&0===e.descriptions.length&&function(t,e){var n=t.append("text").attr("x",2*V().state.padding).attr("y",V().state.textHeight+2*V().state.padding).attr("font-size",V().state.fontSize).attr("class","state-title").text(e.id),r=n.node().getBBox();t.insert("rect",":first-child").attr("x",V().state.padding).attr("y",V().state.padding).attr("width",r.width+2*V().state.padding).attr("height",r.height+2*V().state.padding).attr("rx",V().state.radius)}(i,e),"default"===e.type&&e.descriptions.length>0&&br(i,e);var o=i.node().getBBox();return r.width=o.width+2*V().state.padding,r.height=o.height+2*V().state.padding,yr(n,r),r},wr=0;dr.parser.yy=hr;var xr={},kr=function t(e,n,r,i){var o,a=new lt.a.Graph({compound:!0,multigraph:!0}),u=!0;for(o=0;o "+t.w+": "+JSON.stringify(a.edge(t))),function(t,e,n){e.points=e.points.filter((function(t){return!Number.isNaN(t.y)}));var r=e.points,i=C.v().x((function(t){return t.x})).y((function(t){return t.y})).curve(C.d),o=t.append("path").attr("d",i(r)).attr("id","edge"+wr).attr("class","transition"),a="";if(V().state.arrowMarkerAbsolute&&(a=(a=(a=window.location.protocol+"//"+window.location.host+window.location.pathname+window.location.search).replace(/\(/g,"\\(")).replace(/\)/g,"\\)")),o.attr("marker-end","url("+a+"#"+function(t){switch(t){case hr.relationType.AGGREGATION:return"aggregation";case hr.relationType.EXTENSION:return"extension";case hr.relationType.COMPOSITION:return"composition";case hr.relationType.DEPENDENCY:return"dependency"}}(hr.relationType.DEPENDENCY)+"End)"),void 0!==n.title){for(var u=t.append("g").attr("class","stateLabel"),s=ut(e.points),c=s.x,f=s.y,l=pt(n.title),h=0,d=[],p=0,g=0,y=0;y<=l.length;y++){var b=u.append("text").attr("text-anchor","middle").text(l[y]).attr("x",c).attr("y",f+h),v=b.node().getBBox();if(p=Math.max(p,v.width),g=Math.min(g,v.x),J.info(v.x,c,f+h),0===h){var m=b.node().getBBox();h=m.height,J.info("Title height",h,f)}d.push(b)}var _=h*l.length;if(l.length>1){var w=(l.length-1)*h*.5;d.forEach((function(t,e){return t.attr("y",f+e*h-w)})),_=h*l.length}var x=u.node().getBBox();u.insert("rect",":first-child").attr("class","box").attr("x",c-p/2-V().state.padding/2).attr("y",f-_/2-V().state.padding/2-3.5).attr("width",p+V().state.padding).attr("height",_+V().state.padding),J.info(x)}wr++}(n,a.edge(t),a.edge(t).relation))})),_=m.getBBox();var w={id:r||"root",label:r||"root",width:0,height:0};return w.width=_.width+2*nr.padding,w.height=_.height+2*nr.padding,J.debug("Doc rendered",w,a),w},Er=function(){},Ar=function(t,e){nr=V().state,dr.parser.yy.clear(),dr.parser.parse(t),J.debug("Rendering diagram "+t);var n=D.k("[id='".concat(e,"']"));n.append("defs").append("marker").attr("id","dependencyEnd").attr("refX",19).attr("refY",7).attr("markerWidth",20).attr("markerHeight",28).attr("orient","auto").append("path").attr("d","M 19,7 L9,13 L14,7 L9,1 Z"),new lt.a.Graph({multigraph:!0,compound:!0,rankdir:"RL"}).setDefaultEdgeLabel((function(){return{}}));var r=hr.getRootDoc();kr(r,n,void 0,!1);var i=nr.padding,o=n.node().getBBox(),a=o.width+2*i,u=o.height+2*i;n.attr("width",1.75*a),n.attr("viewBox","".concat(o.x-nr.padding," ").concat(o.y-nr.padding," ")+a+" "+u)},Sr=n(119),Mr=n.n(Sr),Tr=n(448),Or=n.n(Tr),Dr={},Cr=null,Nr={master:Cr},Ir="master",Rr="LR",jr=0;function Lr(){return Or()({length:7,characters:"0123456789abcdef"})}function Br(t,e){for(J.debug("Entering isfastforwardable:",t.id,e.id);t.seq<=e.seq&&t!==e&&null!=e.parent;){if(Array.isArray(e.parent))return J.debug("In merge commit:",e.parent),Br(t,Dr[e.parent[0]])||Br(t,Dr[e.parent[1]]);e=Dr[e.parent]}return J.debug(t.id,e.id),t.id===e.id}var Pr={};function Fr(t,e,n){var r=t.indexOf(e);-1===r?t.push(n):t.splice(r,1,n)}var qr,Ur=function(){var t=Object.keys(Dr).map((function(t){return Dr[t]}));return t.forEach((function(t){J.debug(t.id)})),Mr.a.orderBy(t,["seq"],["desc"])},zr={setDirection:function(t){Rr=t},setOptions:function(t){J.debug("options str",t),t=(t=t&&t.trim())||"{}";try{Pr=JSON.parse(t)}catch(t){J.error("error while parsing gitGraph options",t.message)}},getOptions:function(){return Pr},commit:function(t){var e={id:Lr(),message:t,seq:jr++,parent:null==Cr?null:Cr.id};Cr=e,Dr[e.id]=e,Nr[Ir]=e.id,J.debug("in pushCommit "+e.id)},branch:function(t){Nr[t]=null!=Cr?Cr.id:null,J.debug("in createBranch")},merge:function(t){var e=Dr[Nr[Ir]],n=Dr[Nr[t]];if(function(t,e){return t.seq>e.seq&&Br(e,t)}(e,n))J.debug("Already merged");else{if(Br(e,n))Nr[Ir]=Nr[t],Cr=Dr[Nr[Ir]];else{var r={id:Lr(),message:"merged branch "+t+" into "+Ir,seq:jr++,parent:[null==Cr?null:Cr.id,Nr[t]]};Cr=r,Dr[r.id]=r,Nr[Ir]=r.id}J.debug(Nr),J.debug("in mergeBranch")}},checkout:function(t){J.debug("in checkout");var e=Nr[Ir=t];Cr=Dr[e]},reset:function(t){J.debug("in reset",t);var e=t.split(":")[0],n=parseInt(t.split(":")[1]),r="HEAD"===e?Cr:Dr[Nr[e]];for(J.debug(r,n);n>0;)if(n--,!(r=Dr[r.parent])){var i="Critical error - unique parent commit not found during reset";throw J.error(i),i}Cr=r,Nr[Ir]=r.id},prettyPrint:function(){J.debug(Dr),function t(e){var n=Mr.a.maxBy(e,"seq"),r="";e.forEach((function(t){r+=t===n?"\t*":"\t|"}));var i=[r,n.id,n.seq];for(var o in Nr)Nr[o]===n.id&&i.push(o);if(J.debug(i.join(" ")),Array.isArray(n.parent)){var a=Dr[n.parent[0]];Fr(e,n,a),e.push(Dr[n.parent[1]])}else{if(null==n.parent)return;var u=Dr[n.parent];Fr(e,n,u)}t(e=Mr.a.uniqBy(e,"id"))}([Ur()[0]])},clear:function(){Dr={},Nr={master:Cr=null},Ir="master",jr=0},getBranchesAsObjArray:function(){var t=[];for(var e in Nr)t.push({name:e,commit:Dr[Nr[e]]});return t},getBranches:function(){return Nr},getCommits:function(){return Dr},getCommitsArray:Ur,getCurrentBranch:function(){return Ir},getDirection:function(){return Rr},getHead:function(){return Cr}},Yr=n(213),Vr=n.n(Yr),Gr={},Hr={nodeSpacing:150,nodeFillColor:"yellow",nodeStrokeWidth:2,nodeStrokeColor:"grey",lineStrokeWidth:4,branchOffset:50,lineColor:"grey",leftMargin:50,branchColors:["#442f74","#983351","#609732","#AA9A39"],nodeRadius:10,nodeLabel:{width:75,height:100,x:-25,y:0}},Wr={};function $r(t,e,n,r){var i=nt(r,C.d),o=Hr.branchColors[n%Hr.branchColors.length],a=C.v().x((function(t){return Math.round(t.x)})).y((function(t){return Math.round(t.y)})).curve(i);t.append("svg:path").attr("d",a(e)).style("stroke",o).style("stroke-width",Hr.lineStrokeWidth).style("fill","none")}function Kr(t,e){e=e||t.node().getBBox();var n=t.node().getCTM();return{left:n.e+e.x*n.a,top:n.f+e.y*n.d,width:e.width,height:e.height}}function Zr(t,e,n,r,i){J.debug("svgDrawLineForCommits: ",e,n);var o=Kr(t.select("#node-"+e+" circle")),a=Kr(t.select("#node-"+n+" circle"));switch(r){case"LR":if(o.left-a.left>Hr.nodeSpacing){var u={x:o.left-Hr.nodeSpacing,y:a.top+a.height/2};$r(t,[u,{x:a.left+a.width,y:a.top+a.height/2}],i,"linear"),$r(t,[{x:o.left,y:o.top+o.height/2},{x:o.left-Hr.nodeSpacing/2,y:o.top+o.height/2},{x:o.left-Hr.nodeSpacing/2,y:u.y},u],i)}else $r(t,[{x:o.left,y:o.top+o.height/2},{x:o.left-Hr.nodeSpacing/2,y:o.top+o.height/2},{x:o.left-Hr.nodeSpacing/2,y:a.top+a.height/2},{x:a.left+a.width,y:a.top+a.height/2}],i);break;case"BT":if(a.top-o.top>Hr.nodeSpacing){var s={x:a.left+a.width/2,y:o.top+o.height+Hr.nodeSpacing};$r(t,[s,{x:a.left+a.width/2,y:a.top}],i,"linear"),$r(t,[{x:o.left+o.width/2,y:o.top+o.height},{x:o.left+o.width/2,y:o.top+o.height+Hr.nodeSpacing/2},{x:a.left+a.width/2,y:s.y-Hr.nodeSpacing/2},s],i)}else $r(t,[{x:o.left+o.width/2,y:o.top+o.height},{x:o.left+o.width/2,y:o.top+Hr.nodeSpacing/2},{x:a.left+a.width/2,y:a.top-Hr.nodeSpacing/2},{x:a.left+a.width/2,y:a.top}],i)}}function Xr(t,e){return t.select(e).node().cloneNode(!0)}function Jr(t,e,n,r){var i,o=Object.keys(Gr).length;if("string"==typeof e)do{if(i=Gr[e],J.debug("in renderCommitHistory",i.id,i.seq),t.select("#node-"+e).size()>0)return;t.append((function(){return Xr(t,"#def-commit")})).attr("class","commit").attr("id",(function(){return"node-"+i.id})).attr("transform",(function(){switch(r){case"LR":return"translate("+(i.seq*Hr.nodeSpacing+Hr.leftMargin)+", "+qr*Hr.branchOffset+")";case"BT":return"translate("+(qr*Hr.branchOffset+Hr.leftMargin)+", "+(o-i.seq)*Hr.nodeSpacing+")"}})).attr("fill",Hr.nodeFillColor).attr("stroke",Hr.nodeStrokeColor).attr("stroke-width",Hr.nodeStrokeWidth);var a=void 0;for(var u in n)if(n[u].commit===i){a=n[u];break}a&&(J.debug("found branch ",a.name),t.select("#node-"+i.id+" p").append("xhtml:span").attr("class","branch-label").text(a.name+", ")),t.select("#node-"+i.id+" p").append("xhtml:span").attr("class","commit-id").text(i.id),""!==i.message&&"BT"===r&&t.select("#node-"+i.id+" p").append("xhtml:span").attr("class","commit-msg").text(", "+i.message),e=i.parent}while(e&&Gr[e]);Array.isArray(e)&&(J.debug("found merge commmit",e),Jr(t,e[0],n,r),qr++,Jr(t,e[1],n,r),qr--)}function Qr(t,e,n,r){for(r=r||0;e.seq>0&&!e.lineDrawn;)"string"==typeof e.parent?(Zr(t,e.id,e.parent,n,r),e.lineDrawn=!0,e=Gr[e.parent]):Array.isArray(e.parent)&&(Zr(t,e.id,e.parent[0],n,r),Zr(t,e.id,e.parent[1],n,r+1),Qr(t,Gr[e.parent[1]],n,r+1),e.lineDrawn=!0,e=Gr[e.parent[0]])}var ti,ei=function(t){Wr=t},ni=function(t,e,n){try{var r=Vr.a.parser;r.yy=zr,r.yy.clear(),J.debug("in gitgraph renderer",t+"\n","id:",e,n),r.parse(t+"\n"),Hr=Mr.a.assign(Hr,Wr,zr.getOptions()),J.debug("effective options",Hr);var i=zr.getDirection();Gr=zr.getCommits();var o=zr.getBranchesAsObjArray();"BT"===i&&(Hr.nodeLabel.x=o.length*Hr.branchOffset,Hr.nodeLabel.width="100%",Hr.nodeLabel.y=-2*Hr.nodeRadius);var a=D.k('[id="'.concat(e,'"]'));for(var u in function(t){t.append("defs").append("g").attr("id","def-commit").append("circle").attr("r",Hr.nodeRadius).attr("cx",0).attr("cy",0),t.select("#def-commit").append("foreignObject").attr("width",Hr.nodeLabel.width).attr("height",Hr.nodeLabel.height).attr("x",Hr.nodeLabel.x).attr("y",Hr.nodeLabel.y).attr("class","node-label").attr("requiredFeatures","http://www.w3.org/TR/SVG11/feature#Extensibility").append("p").html("")}(a),qr=1,o){var s=o[u];Jr(a,s.commit.id,o,i),Qr(a,s.commit,i),qr++}a.attr("height",(function(){return"BT"===i?Object.keys(Gr).length*Hr.nodeSpacing:(o.length+1)*Hr.branchOffset}))}catch(t){J.error("Error while rendering gitgraph"),J.error(t.message)}},ri="",ii=!1,oi={setMessage:function(t){J.debug("Setting message to: "+t),ri=t},getMessage:function(){return ri},setInfo:function(t){ii=t},getInfo:function(){return ii}},ai=n(214),ui=n.n(ai),si={},ci=function(t){Object.keys(t).forEach((function(e){si[e]=t[e]}))},fi=function(t,e,n){try{var r=ui.a.parser;r.yy=oi,J.debug("Renering info diagram\n"+t),r.parse(t),J.debug("Parsed info diagram");var i=D.k("#"+e);i.append("g").append("text").attr("x",100).attr("y",40).attr("class","version").attr("font-size","32px").style("text-anchor","middle").text("v "+n),i.attr("height",100),i.attr("width",400)}catch(t){J.error("Error while rendering info diagram"),J.error(t.message)}},li={},hi="",di={addSection:function(t,e){void 0===li[t]&&(li[t]=e,J.debug("Added new section :",t))},getSections:function(){return li},cleanupValue:function(t){return":"===t.substring(0,1)?(t=t.substring(1).trim(),Number(t.trim())):Number(t.trim())},clear:function(){li={},hi=""},setTitle:function(t){hi=t},getTitle:function(){return hi}},pi=n(215),gi=n.n(pi),yi={},bi=function(t){Object.keys(t).forEach((function(e){yi[e]=t[e]}))},vi=function(t,e){try{var n=gi.a.parser;n.yy=di,J.debug("Rendering info diagram\n"+t),n.yy.clear(),n.parse(t),J.debug("Parsed info diagram");var r=document.getElementById(e);void 0===(ti=r.parentElement.offsetWidth)&&(ti=1200),void 0!==yi.useWidth&&(ti=yi.useWidth);r.setAttribute("height","100%"),r.setAttribute("viewBox","0 0 "+ti+" 450");var i=ti,o=Math.min(i,450)/2-40,a=D.k("#"+e).append("svg").attr("width",i).attr("height",450).append("g").attr("transform","translate("+i/2+",225)"),u=di.getSections(),s=0;Object.keys(u).forEach((function(t){s+=u[t]})),J.info(u);var c=T.k().domain(u).range(O.pb),f=C.A().value((function(t){return t.value}))(l.a(u)),h=C.a().innerRadius(0).outerRadius(o);a.selectAll("mySlices").data(f).enter().append("path").attr("d",h).attr("fill",(function(t){return c(t.data.key)})).attr("stroke","black").style("stroke-width","2px").style("opacity",.7),a.selectAll("mySlices").data(f).enter().append("text").text((function(t){return(t.data.value/s*100).toFixed(0)+"%"})).attr("transform",(function(t){return"translate("+h.centroid(t)+")"})).style("text-anchor","middle").attr("class","slice").style("font-size",17),a.append("text").text(n.yy.getTitle()).attr("x",0).attr("y",-200).attr("class","pieTitleText");var d=a.selectAll(".legend").data(c.domain()).enter().append("g").attr("class","legend").attr("transform",(function(t,e){return"translate(216,"+(22*e-22*c.domain().length/2)+")"}));d.append("rect").attr("width",18).attr("height",18).style("fill",c).style("stroke",c),d.append("text").attr("x",22).attr("y",14).text((function(t){return t}))}catch(t){J.error("Error while rendering info diagram"),J.error(t.message)}},mi={},_i=[],wi="",xi={Cardinality:{ZERO_OR_ONE:"ZERO_OR_ONE",ZERO_OR_MORE:"ZERO_OR_MORE",ONE_OR_MORE:"ONE_OR_MORE",ONLY_ONE:"ONLY_ONE"},Identification:{NON_IDENTIFYING:"NON_IDENTIFYING",IDENTIFYING:"IDENTIFYING"},addEntity:function(t){void 0===mi[t]&&(mi[t]=t,J.debug("Added new entity :",t))},getEntities:function(){return mi},addRelationship:function(t,e,n,r){var i={entityA:t,roleA:e,entityB:n,relSpec:r};_i.push(i),J.debug("Added new relationship :",i)},getRelationships:function(){return _i},clear:function(){mi={},_i=[],wi=""},setTitle:function(t){wi=t},getTitle:function(){return wi}},ki=n(216),Ei=n.n(ki),Ai={ONLY_ONE_START:"ONLY_ONE_START",ONLY_ONE_END:"ONLY_ONE_END",ZERO_OR_ONE_START:"ZERO_OR_ONE_START",ZERO_OR_ONE_END:"ZERO_OR_ONE_END",ONE_OR_MORE_START:"ONE_OR_MORE_START",ONE_OR_MORE_END:"ONE_OR_MORE_END",ZERO_OR_MORE_START:"ZERO_OR_MORE_START",ZERO_OR_MORE_END:"ZERO_OR_MORE_END"},Si=Ai,Mi=function(t,e){var n;t.append("defs").append("marker").attr("id",Ai.ONLY_ONE_START).attr("refX",0).attr("refY",9).attr("markerWidth",18).attr("markerHeight",18).attr("orient","auto").append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M9,0 L9,18 M15,0 L15,18"),t.append("defs").append("marker").attr("id",Ai.ONLY_ONE_END).attr("refX",18).attr("refY",9).attr("markerWidth",18).attr("markerHeight",18).attr("orient","auto").append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M3,0 L3,18 M9,0 L9,18"),(n=t.append("defs").append("marker").attr("id",Ai.ZERO_OR_ONE_START).attr("refX",0).attr("refY",9).attr("markerWidth",30).attr("markerHeight",18).attr("orient","auto")).append("circle").attr("stroke",e.stroke).attr("fill","white").attr("cx",21).attr("cy",9).attr("r",6),n.append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M9,0 L9,18"),(n=t.append("defs").append("marker").attr("id",Ai.ZERO_OR_ONE_END).attr("refX",30).attr("refY",9).attr("markerWidth",30).attr("markerHeight",18).attr("orient","auto")).append("circle").attr("stroke",e.stroke).attr("fill","white").attr("cx",9).attr("cy",9).attr("r",6),n.append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M21,0 L21,18"),t.append("defs").append("marker").attr("id",Ai.ONE_OR_MORE_START).attr("refX",18).attr("refY",18).attr("markerWidth",45).attr("markerHeight",36).attr("orient","auto").append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M0,18 Q 18,0 36,18 Q 18,36 0,18 M42,9 L42,27"),t.append("defs").append("marker").attr("id",Ai.ONE_OR_MORE_END).attr("refX",27).attr("refY",18).attr("markerWidth",45).attr("markerHeight",36).attr("orient","auto").append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M3,9 L3,27 M9,18 Q27,0 45,18 Q27,36 9,18"),(n=t.append("defs").append("marker").attr("id",Ai.ZERO_OR_MORE_START).attr("refX",18).attr("refY",18).attr("markerWidth",57).attr("markerHeight",36).attr("orient","auto")).append("circle").attr("stroke",e.stroke).attr("fill","white").attr("cx",48).attr("cy",18).attr("r",6),n.append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M0,18 Q18,0 36,18 Q18,36 0,18"),(n=t.append("defs").append("marker").attr("id",Ai.ZERO_OR_MORE_END).attr("refX",39).attr("refY",18).attr("markerWidth",57).attr("markerHeight",36).attr("orient","auto")).append("circle").attr("stroke",e.stroke).attr("fill","white").attr("cx",9).attr("cy",18).attr("r",6),n.append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M21,18 Q39,0 57,18 Q39,36 21,18")},Ti={},Oi=function(t){return(t.entityA+t.roleA+t.entityB).replace(/\s/g,"")},Di=0,Ci=function(t){for(var e=Object.keys(t),n=0;nPi.maxTextSize&&(i="graph TB;a[Maximum text size in diagram exceeded];style a fill:#faa"),void 0!==r)r.innerHTML="",D.k(r).append("div").attr("id","d"+t).attr("style","font-family: "+Pi.fontFamily).append("svg").attr("id",t).attr("width","100%").attr("xmlns","http://www.w3.org/2000/svg").append("g");else{var o=document.getElementById(t);o&&o.remove();var a=document.querySelector("#d"+t);a&&(a.innerHTML=""),D.k("body").append("div").attr("id","d"+t).append("svg").attr("id",t).attr("width","100%").attr("xmlns","http://www.w3.org/2000/svg").append("g")}window.txt=i,i=function(t){var e=t;return e=(e=(e=e.replace(/style.*:\S*#.*;/g,(function(t){return t.substring(0,t.length-1)}))).replace(/classDef.*:\S*#.*;/g,(function(t){return t.substring(0,t.length-1)}))).replace(/#\w+;/g,(function(t){var e=t.substring(1,t.length-1);return/^\+?\d+$/.test(e)?"fl°°"+e+"¶ß":"fl°"+e+"¶ß"}))}(i);var u=D.k("#d"+t).node(),s=ot(i),c=u.firstChild,f=c.firstChild,l=Ri[Pi.theme];if(void 0===l&&(l=""),void 0!==Pi.themeCSS&&(l+="\n".concat(Pi.themeCSS)),void 0!==Pi.fontFamily&&(l+="\n:root { --mermaid-font-family: ".concat(Pi.fontFamily,"}")),void 0!==Pi.altFontFamily&&(l+="\n:root { --mermaid-alt-font-family: ".concat(Pi.altFontFamily,"}")),"flowchart"===s){var h=ie(i);for(var d in h)l+="\n.".concat(d," > * { ").concat(h[d].styles.join(" !important; ")," !important; }"),h[d].textStyles&&(l+="\n.".concat(d," tspan { ").concat(h[d].textStyles.join(" !important; ")," !important; }"))}var p=document.createElement("style");p.innerHTML=F()(l,"#".concat(t)),c.insertBefore(p,f);var g=document.createElement("style"),y=window.getComputedStyle(c);switch(g.innerHTML="#".concat(t," {\n color: ").concat(y.color,";\n font: ").concat(y.font,";\n }"),c.insertBefore(g,f),s){case"git":Pi.flowchart.arrowMarkerAbsolute=Pi.arrowMarkerAbsolute,ei(Pi.git),ni(i,t,!1);break;case"flowchart":Pi.flowchart.arrowMarkerAbsolute=Pi.arrowMarkerAbsolute,re(Pi.flowchart),oe(i,t,!1);break;case"flowchart-v2":Pi.flowchart.arrowMarkerAbsolute=Pi.arrowMarkerAbsolute,ce.setConf(Pi.flowchart),ce.draw(i,t,!1);break;case"sequence":Pi.sequence.arrowMarkerAbsolute=Pi.arrowMarkerAbsolute,Pi.sequenceDiagram?(Pe(Object.assign(Pi.sequence,Pi.sequenceDiagram)),console.error("`mermaid config.sequenceDiagram` has been renamed to `config.sequence`. Please update your mermaid config.")):Pe(Pi.sequence),Fe(i,t);break;case"gantt":Pi.gantt.arrowMarkerAbsolute=Pi.arrowMarkerAbsolute,wn(Pi.gantt),xn(i,t);break;case"class":Pi.class.arrowMarkerAbsolute=Pi.arrowMarkerAbsolute,Qn(Pi.class),tr(i,t);break;case"state":Er(Pi.state),Ar(i,t);break;case"info":Pi.class.arrowMarkerAbsolute=Pi.arrowMarkerAbsolute,ci(Pi.class),fi(i,t,q.version);break;case"pie":Pi.class.arrowMarkerAbsolute=Pi.arrowMarkerAbsolute,bi(Pi.class),vi(i,t,q.version);break;case"er":Ci(Pi.er),Ni(i,t,q.version)}D.k('[id="'.concat(t,'"]')).selectAll("foreignobject > *").attr("xmlns","http://www.w3.org/1999/xhtml");var b=D.k("#d"+t).node().innerHTML;if(Pi.arrowMarkerAbsolute&&"false"!==Pi.arrowMarkerAbsolute||(b=b.replace(/marker-end="url\(.*?#/g,'marker-end="url(#',"g")),b=function(t){var e=t;return e=(e=(e=e.replace(/fl°°/g,(function(){return"&#"}))).replace(/fl°/g,(function(){return"&"}))).replace(/¶ß/g,(function(){return";"}))}(b),void 0!==n)switch(s){case"flowchart":n(b,jt.bindFunctions);break;case"gantt":n(b,bn.bindFunctions);break;case"class":n(b,Bn.bindFunctions);break;default:n(b)}else J.debug("CB = undefined!");var v=D.k("#d"+t).node();return null!==v&&"function"==typeof v.remove&&D.k("#d"+t).node().remove(),b},parse:function(t){var e,n=ot(t);switch(J.debug("Type "+n),n){case"git":(e=Vr.a).parser.yy=zr;break;case"flowchart":jt.clear(),(e=Bt.a).parser.yy=jt;break;case"flowchart-v2":jt.clear(),(e=ce).parser.yy=jt;break;case"sequence":(e=me.a).parser.yy=De;break;case"gantt":(e=Ue.a).parser.yy=bn;break;case"class":(e=Fn.a).parser.yy=Bn;break;case"state":(e=pr.a).parser.yy=hr;break;case"info":J.debug("info info info"),(e=ui.a).parser.yy=oi;break;case"pie":J.debug("pie"),(e=gi.a).parser.yy=di;break;case"er":J.debug("er"),(e=Ei.a).parser.yy=xi}e.parser.yy.parseError=function(t,e){throw{str:t,hash:e}},e.parse(t)},initialize:function(t){J.debug("Initializing mermaidAPI ",q.version),"object"===Ii(t)&&function(t){for(var e=Object.keys(t),n=0;nMath.abs(o)*u?(s<0&&(u=-u),n=u*o/s,r=u):(o<0&&(c=-c),n=c,r=c*s/o);return{x:i+n,y:a+r}},buildLayerMatrix:function(t){var e=r.map(r.range(o(t)+1),(function(){return[]}));return r.forEach(t.nodes(),(function(n){var i=t.node(n),a=i.rank;r.isUndefined(a)||(e[a][i.order]=n)})),e},normalizeRanks:function(t){var e=r.min(r.map(t.nodes(),(function(e){return t.node(e).rank})));r.forEach(t.nodes(),(function(n){var i=t.node(n);r.has(i,"rank")&&(i.rank-=e)}))},removeEmptyRanks:function(t){var e=r.min(r.map(t.nodes(),(function(e){return t.node(e).rank}))),n=[];r.forEach(t.nodes(),(function(r){var i=t.node(r).rank-e;n[i]||(n[i]=[]),n[i].push(r)}));var i=0,a=t.graph().nodeRankFactor;r.forEach(n,(function(e,n){r.isUndefined(e)&&n%a!=0?--i:i&&r.forEach(e,(function(e){t.node(e).rank+=i}))}))},addBorderNode:function(t,e,n,r){var i={width:0,height:0};arguments.length>=4&&(i.rank=n,i.order=r);return a(t,"border",i,e)},maxRank:o,partition:function(t,e){var n={lhs:[],rhs:[]};return r.forEach(t,(function(t){e(t)?n.lhs.push(t):n.rhs.push(t)})),n},time:function(t,e){var n=r.now();try{return e()}finally{console.log(t+" time: "+(r.now()-n)+"ms")}},notime:function(t,e){return e()}}},function(t,e,n){t.exports={graphlib:n(20),layout:n(317),debug:n(373),util:{time:n(9).time,notime:n(9).notime},version:n(374)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(172),i=n(173),a=n(174),o={channel:r.default,lang:i.default,unit:a.default};e.default=o},function(t,e){var n,r,i=t.exports={};function a(){throw new Error("setTimeout has not been defined")}function o(){throw new Error("clearTimeout has not been defined")}function s(t){if(n===setTimeout)return setTimeout(t,0);if((n===a||!n)&&setTimeout)return n=setTimeout,setTimeout(t,0);try{return n(t,0)}catch(e){try{return n.call(null,t,0)}catch(e){return n.call(this,t,0)}}}!function(){try{n="function"==typeof setTimeout?setTimeout:a}catch(t){n=a}try{r="function"==typeof clearTimeout?clearTimeout:o}catch(t){r=o}}();var c,u=[],l=!1,h=-1;function f(){l&&c&&(l=!1,c.length?u=c.concat(u):h=-1,u.length&&d())}function d(){if(!l){var t=s(f);l=!0;for(var e=u.length;e;){for(c=u,u=[];++h1)for(var n=1;nh&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:m,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,y.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},B={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),13;case 1:return this.begin("type_directive"),14;case 2:return this.popState(),this.begin("arg_directive"),11;case 3:return this.popState(),this.popState(),16;case 4:return 15;case 5:case 6:break;case 7:return 10;case 8:break;case 9:case 10:return 17;case 11:return this.begin("struct"),34;case 12:return"EOF_IN_STRUCT";case 13:return"OPEN_IN_STRUCT";case 14:return this.popState(),36;case 15:break;case 16:return"MEMBER";case 17:return 32;case 18:return 58;case 19:return 51;case 20:return 52;case 21:return 54;case 22:return 37;case 23:return 38;case 24:this.begin("generic");break;case 25:this.popState();break;case 26:return"GENERICTYPE";case 27:this.begin("string");break;case 28:this.popState();break;case 29:return"STR";case 30:this.begin("bqstring");break;case 31:this.popState();break;case 32:return"BQUOTE_STR";case 33:this.begin("href");break;case 34:this.popState();break;case 35:return 57;case 36:this.begin("callback_name");break;case 37:this.popState();break;case 38:this.popState(),this.begin("callback_args");break;case 39:return 55;case 40:this.popState();break;case 41:return 56;case 42:case 43:case 44:case 45:return 53;case 46:case 47:return 46;case 48:case 49:return 48;case 50:return 47;case 51:return 45;case 52:return 49;case 53:return 50;case 54:return 26;case 55:return 33;case 56:return 70;case 57:return"DOT";case 58:return"PLUS";case 59:return 67;case 60:case 61:return"EQUALS";case 62:return 74;case 63:return"PUNCTUATION";case 64:return 73;case 65:return 72;case 66:return 69;case 67:return 19}},rules:[/^(?:%%\{)/,/^(?:((?:(?!\}%%)[^:.])*))/,/^(?::)/,/^(?:\}%%)/,/^(?:((?:(?!\}%%).|\n)*))/,/^(?:%%(?!\{)*[^\n]*(\r?\n?)+)/,/^(?:%%[^\n]*(\r?\n)*)/,/^(?:(\r?\n)+)/,/^(?:\s+)/,/^(?:classDiagram-v2\b)/,/^(?:classDiagram\b)/,/^(?:[{])/,/^(?:$)/,/^(?:[{])/,/^(?:[}])/,/^(?:[\n])/,/^(?:[^{}\n]*)/,/^(?:class\b)/,/^(?:cssClass\b)/,/^(?:callback\b)/,/^(?:link\b)/,/^(?:click\b)/,/^(?:<<)/,/^(?:>>)/,/^(?:[~])/,/^(?:[~])/,/^(?:[^~]*)/,/^(?:["])/,/^(?:["])/,/^(?:[^"]*)/,/^(?:[`])/,/^(?:[`])/,/^(?:[^`]+)/,/^(?:href[\s]+["])/,/^(?:["])/,/^(?:[^"]*)/,/^(?:call[\s]+)/,/^(?:\([\s]*\))/,/^(?:\()/,/^(?:[^(]*)/,/^(?:\))/,/^(?:[^)]*)/,/^(?:_self\b)/,/^(?:_blank\b)/,/^(?:_parent\b)/,/^(?:_top\b)/,/^(?:\s*<\|)/,/^(?:\s*\|>)/,/^(?:\s*>)/,/^(?:\s*<)/,/^(?:\s*\*)/,/^(?:\s*o\b)/,/^(?:--)/,/^(?:\.\.)/,/^(?::{1}[^:\n;]+)/,/^(?::{3})/,/^(?:-)/,/^(?:\.)/,/^(?:\+)/,/^(?:%)/,/^(?:=)/,/^(?:=)/,/^(?:\w+)/,/^(?:[!"#$%&'*+,-.`?\\/])/,/^(?:[0-9]+)/,/^(?:[\u00AA\u00B5\u00BA\u00C0-\u00D6\u00D8-\u00F6]|[\u00F8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377]|[\u037A-\u037D\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5]|[\u03F7-\u0481\u048A-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA]|[\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE]|[\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA]|[\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u08A0]|[\u08A2-\u08AC\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0977]|[\u0979-\u097F\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2]|[\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A]|[\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39]|[\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8]|[\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0B05-\u0B0C]|[\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C]|[\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99]|[\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0]|[\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C3D]|[\u0C58\u0C59\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3]|[\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10]|[\u0D12-\u0D3A\u0D3D\u0D4E\u0D60\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1]|[\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81]|[\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3]|[\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6]|[\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A]|[\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081]|[\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D]|[\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0]|[\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310]|[\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F4\u1401-\u166C]|[\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u1700-\u170C\u170E-\u1711]|[\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7]|[\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191C]|[\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19C1-\u19C7\u1A00-\u1A16]|[\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF]|[\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC]|[\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D]|[\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D]|[\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3]|[\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F]|[\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128]|[\u212A-\u212D\u212F-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2183\u2184]|[\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3]|[\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6]|[\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE]|[\u2DD0-\u2DD6\u2DD8-\u2DDE\u2E2F\u3005\u3006\u3031-\u3035\u303B\u303C]|[\u3041-\u3096\u309D-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D]|[\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FCC]|[\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B]|[\uA640-\uA66E\uA67F-\uA697\uA6A0-\uA6E5\uA717-\uA71F\uA722-\uA788]|[\uA78B-\uA78E\uA790-\uA793\uA7A0-\uA7AA\uA7F8-\uA801\uA803-\uA805]|[\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB]|[\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uAA00-\uAA28]|[\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA80-\uAAAF\uAAB1\uAAB5]|[\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4]|[\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E]|[\uABC0-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D]|[\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36]|[\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D]|[\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC]|[\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF]|[\uFFD2-\uFFD7\uFFDA-\uFFDC])/,/^(?:\s)/,/^(?:$)/],conditions:{arg_directive:{rules:[3,4],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},open_directive:{rules:[1],inclusive:!1},callback_args:{rules:[40,41],inclusive:!1},callback_name:{rules:[37,38,39],inclusive:!1},href:{rules:[34,35],inclusive:!1},struct:{rules:[12,13,14,15,16],inclusive:!1},generic:{rules:[25,26],inclusive:!1},bqstring:{rules:[31,32],inclusive:!1},string:{rules:[28,29],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,17,18,19,20,21,22,23,24,27,30,33,36,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67],inclusive:!0}}};function D(){this.yy={}}return N.lexer=B,D.prototype=N,N.Parser=D,new D}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(17).readFileSync(n(18).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(12),n(7)(t))},function(t,e){},function(t,e,n){(function(t){function n(t,e){for(var n=0,r=t.length-1;r>=0;r--){var i=t[r];"."===i?t.splice(r,1):".."===i?(t.splice(r,1),n++):n&&(t.splice(r,1),n--)}if(e)for(;n--;n)t.unshift("..");return t}function r(t,e){if(t.filter)return t.filter(e);for(var n=[],r=0;r=-1&&!i;a--){var o=a>=0?arguments[a]:t.cwd();if("string"!=typeof o)throw new TypeError("Arguments to path.resolve must be strings");o&&(e=o+"/"+e,i="/"===o.charAt(0))}return(i?"/":"")+(e=n(r(e.split("/"),(function(t){return!!t})),!i).join("/"))||"."},e.normalize=function(t){var a=e.isAbsolute(t),o="/"===i(t,-1);return(t=n(r(t.split("/"),(function(t){return!!t})),!a).join("/"))||a||(t="."),t&&o&&(t+="/"),(a?"/":"")+t},e.isAbsolute=function(t){return"/"===t.charAt(0)},e.join=function(){var t=Array.prototype.slice.call(arguments,0);return e.normalize(r(t,(function(t,e){if("string"!=typeof t)throw new TypeError("Arguments to path.join must be strings");return t})).join("/"))},e.relative=function(t,n){function r(t){for(var e=0;e=0&&""===t[n];n--);return e>n?[]:t.slice(e,n-e+1)}t=e.resolve(t).substr(1),n=e.resolve(n).substr(1);for(var i=r(t.split("/")),a=r(n.split("/")),o=Math.min(i.length,a.length),s=o,c=0;c=1;--a)if(47===(e=t.charCodeAt(a))){if(!i){r=a;break}}else i=!1;return-1===r?n?"/":".":n&&1===r?"/":t.slice(0,r)},e.basename=function(t,e){var n=function(t){"string"!=typeof t&&(t+="");var e,n=0,r=-1,i=!0;for(e=t.length-1;e>=0;--e)if(47===t.charCodeAt(e)){if(!i){n=e+1;break}}else-1===r&&(i=!1,r=e+1);return-1===r?"":t.slice(n,r)}(t);return e&&n.substr(-1*e.length)===e&&(n=n.substr(0,n.length-e.length)),n},e.extname=function(t){"string"!=typeof t&&(t+="");for(var e=-1,n=0,r=-1,i=!0,a=0,o=t.length-1;o>=0;--o){var s=t.charCodeAt(o);if(47!==s)-1===r&&(i=!1,r=o+1),46===s?-1===e?e=o:1!==a&&(a=1):-1!==e&&(a=-1);else if(!i){n=o+1;break}}return-1===e||-1===r||0===a||1===a&&e===r-1&&e===n+1?"":t.slice(e,r)};var i="b"==="ab".substr(-1)?function(t,e,n){return t.substr(e,n)}:function(t,e,n){return e<0&&(e=t.length+e),t.substr(e,n)}}).call(this,n(12))},function(t,e,n){var r=n(109),i="object"==typeof self&&self&&self.Object===Object&&self,a=r||i||Function("return this")();t.exports=a},function(t,e,n){var r;try{r=n(3)}catch(t){}r||(r=window.graphlib),t.exports=r},function(t,e){t.exports=function(t){return null!=t&&"object"==typeof t}},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,2],n=[1,3],r=[1,5],i=[1,7],a=[2,5],o=[1,15],s=[1,17],c=[1,19],u=[1,20],l=[1,21],h=[1,22],f=[1,30],d=[1,23],p=[1,24],y=[1,25],g=[1,26],m=[1,27],v=[1,32],b=[1,33],x=[1,34],_=[1,35],k=[1,31],w=[1,38],E=[1,4,5,14,15,17,19,20,22,23,24,25,26,27,36,37,38,39,42,45],T=[1,4,5,12,13,14,15,17,19,20,22,23,24,25,26,27,36,37,38,39,42,45],C=[1,4,5,7,14,15,17,19,20,22,23,24,25,26,27,36,37,38,39,42,45],S=[4,5,14,15,17,19,20,22,23,24,25,26,27,36,37,38,39,42,45],A={trace:function(){},yy:{},symbols_:{error:2,start:3,SPACE:4,NL:5,directive:6,SD:7,document:8,line:9,statement:10,idStatement:11,DESCR:12,"--\x3e":13,HIDE_EMPTY:14,scale:15,WIDTH:16,COMPOSIT_STATE:17,STRUCT_START:18,STRUCT_STOP:19,STATE_DESCR:20,AS:21,ID:22,FORK:23,JOIN:24,CHOICE:25,CONCURRENT:26,note:27,notePosition:28,NOTE_TEXT:29,direction:30,openDirective:31,typeDirective:32,closeDirective:33,":":34,argDirective:35,direction_tb:36,direction_bt:37,direction_rl:38,direction_lr:39,eol:40,";":41,EDGE_STATE:42,left_of:43,right_of:44,open_directive:45,type_directive:46,arg_directive:47,close_directive:48,$accept:0,$end:1},terminals_:{2:"error",4:"SPACE",5:"NL",7:"SD",12:"DESCR",13:"--\x3e",14:"HIDE_EMPTY",15:"scale",16:"WIDTH",17:"COMPOSIT_STATE",18:"STRUCT_START",19:"STRUCT_STOP",20:"STATE_DESCR",21:"AS",22:"ID",23:"FORK",24:"JOIN",25:"CHOICE",26:"CONCURRENT",27:"note",29:"NOTE_TEXT",34:":",36:"direction_tb",37:"direction_bt",38:"direction_rl",39:"direction_lr",41:";",42:"EDGE_STATE",43:"left_of",44:"right_of",45:"open_directive",46:"type_directive",47:"arg_directive",48:"close_directive"},productions_:[0,[3,2],[3,2],[3,2],[3,2],[8,0],[8,2],[9,2],[9,1],[9,1],[10,1],[10,2],[10,3],[10,4],[10,1],[10,2],[10,1],[10,4],[10,3],[10,6],[10,1],[10,1],[10,1],[10,1],[10,4],[10,4],[10,1],[10,1],[6,3],[6,5],[30,1],[30,1],[30,1],[30,1],[40,1],[40,1],[11,1],[11,1],[28,1],[28,1],[31,1],[32,1],[35,1],[33,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 4:return r.setRootDoc(a[s]),a[s];case 5:this.$=[];break;case 6:"nl"!=a[s]&&(a[s-1].push(a[s]),this.$=a[s-1]);break;case 7:case 8:this.$=a[s];break;case 9:this.$="nl";break;case 10:this.$={stmt:"state",id:a[s],type:"default",description:""};break;case 11:this.$={stmt:"state",id:a[s-1],type:"default",description:r.trimColon(a[s])};break;case 12:this.$={stmt:"relation",state1:{stmt:"state",id:a[s-2],type:"default",description:""},state2:{stmt:"state",id:a[s],type:"default",description:""}};break;case 13:this.$={stmt:"relation",state1:{stmt:"state",id:a[s-3],type:"default",description:""},state2:{stmt:"state",id:a[s-1],type:"default",description:""},description:a[s].substr(1).trim()};break;case 17:this.$={stmt:"state",id:a[s-3],type:"default",description:"",doc:a[s-1]};break;case 18:var c=a[s],u=a[s-2].trim();if(a[s].match(":")){var l=a[s].split(":");c=l[0],u=[u,l[1]]}this.$={stmt:"state",id:c,type:"default",description:u};break;case 19:this.$={stmt:"state",id:a[s-3],type:"default",description:a[s-5],doc:a[s-1]};break;case 20:this.$={stmt:"state",id:a[s],type:"fork"};break;case 21:this.$={stmt:"state",id:a[s],type:"join"};break;case 22:this.$={stmt:"state",id:a[s],type:"choice"};break;case 23:this.$={stmt:"state",id:r.getDividerId(),type:"divider"};break;case 24:this.$={stmt:"state",id:a[s-1].trim(),note:{position:a[s-2].trim(),text:a[s].trim()}};break;case 30:r.setDirection("TB"),this.$={stmt:"dir",value:"TB"};break;case 31:r.setDirection("BT"),this.$={stmt:"dir",value:"BT"};break;case 32:r.setDirection("RL"),this.$={stmt:"dir",value:"RL"};break;case 33:r.setDirection("LR"),this.$={stmt:"dir",value:"LR"};break;case 36:case 37:this.$=a[s];break;case 40:r.parseDirective("%%{","open_directive");break;case 41:r.parseDirective(a[s],"type_directive");break;case 42:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 43:r.parseDirective("}%%","close_directive","state")}},table:[{3:1,4:e,5:n,6:4,7:r,31:6,45:i},{1:[3]},{3:8,4:e,5:n,6:4,7:r,31:6,45:i},{3:9,4:e,5:n,6:4,7:r,31:6,45:i},{3:10,4:e,5:n,6:4,7:r,31:6,45:i},t([1,4,5,14,15,17,20,22,23,24,25,26,27,36,37,38,39,42,45],a,{8:11}),{32:12,46:[1,13]},{46:[2,40]},{1:[2,1]},{1:[2,2]},{1:[2,3]},{1:[2,4],4:o,5:s,6:28,9:14,10:16,11:18,14:c,15:u,17:l,20:h,22:f,23:d,24:p,25:y,26:g,27:m,30:29,31:6,36:v,37:b,38:x,39:_,42:k,45:i},{33:36,34:[1,37],48:w},t([34,48],[2,41]),t(E,[2,6]),{6:28,10:39,11:18,14:c,15:u,17:l,20:h,22:f,23:d,24:p,25:y,26:g,27:m,30:29,31:6,36:v,37:b,38:x,39:_,42:k,45:i},t(E,[2,8]),t(E,[2,9]),t(E,[2,10],{12:[1,40],13:[1,41]}),t(E,[2,14]),{16:[1,42]},t(E,[2,16],{18:[1,43]}),{21:[1,44]},t(E,[2,20]),t(E,[2,21]),t(E,[2,22]),t(E,[2,23]),{28:45,29:[1,46],43:[1,47],44:[1,48]},t(E,[2,26]),t(E,[2,27]),t(T,[2,36]),t(T,[2,37]),t(E,[2,30]),t(E,[2,31]),t(E,[2,32]),t(E,[2,33]),t(C,[2,28]),{35:49,47:[1,50]},t(C,[2,43]),t(E,[2,7]),t(E,[2,11]),{11:51,22:f,42:k},t(E,[2,15]),t(S,a,{8:52}),{22:[1,53]},{22:[1,54]},{21:[1,55]},{22:[2,38]},{22:[2,39]},{33:56,48:w},{48:[2,42]},t(E,[2,12],{12:[1,57]}),{4:o,5:s,6:28,9:14,10:16,11:18,14:c,15:u,17:l,19:[1,58],20:h,22:f,23:d,24:p,25:y,26:g,27:m,30:29,31:6,36:v,37:b,38:x,39:_,42:k,45:i},t(E,[2,18],{18:[1,59]}),{29:[1,60]},{22:[1,61]},t(C,[2,29]),t(E,[2,13]),t(E,[2,17]),t(S,a,{8:62}),t(E,[2,24]),t(E,[2,25]),{4:o,5:s,6:28,9:14,10:16,11:18,14:c,15:u,17:l,19:[1,63],20:h,22:f,23:d,24:p,25:y,26:g,27:m,30:29,31:6,36:v,37:b,38:x,39:_,42:k,45:i},t(E,[2,19])],defaultActions:{7:[2,40],8:[2,1],9:[2,2],10:[2,3],47:[2,38],48:[2,39],50:[2,42]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),y={yy:{}};for(var g in this.yy)Object.prototype.hasOwnProperty.call(this.yy,g)&&(y.yy[g]=this.yy[g]);p.setInput(t,y.yy),y.yy.lexer=p,y.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var m=p.yylloc;a.push(m);var v=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof y.yy.parseError?this.parseError=y.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:m,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,y.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},M={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return 36;case 1:return 37;case 2:return 38;case 3:return 39;case 4:return this.begin("open_directive"),45;case 5:return this.begin("type_directive"),46;case 6:return this.popState(),this.begin("arg_directive"),34;case 7:return this.popState(),this.popState(),48;case 8:return 47;case 9:case 10:break;case 11:return 5;case 12:case 13:case 14:case 15:break;case 16:return this.pushState("SCALE"),15;case 17:return 16;case 18:this.popState();break;case 19:this.pushState("STATE");break;case 20:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),23;case 21:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),24;case 22:return this.popState(),e.yytext=e.yytext.slice(0,-10).trim(),25;case 23:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),23;case 24:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),24;case 25:return this.popState(),e.yytext=e.yytext.slice(0,-10).trim(),25;case 26:return 36;case 27:return 37;case 28:return 38;case 29:return 39;case 30:this.begin("STATE_STRING");break;case 31:return this.popState(),this.pushState("STATE_ID"),"AS";case 32:return this.popState(),"ID";case 33:this.popState();break;case 34:return"STATE_DESCR";case 35:return 17;case 36:this.popState();break;case 37:return this.popState(),this.pushState("struct"),18;case 38:return this.popState(),19;case 39:break;case 40:return this.begin("NOTE"),27;case 41:return this.popState(),this.pushState("NOTE_ID"),43;case 42:return this.popState(),this.pushState("NOTE_ID"),44;case 43:this.popState(),this.pushState("FLOATING_NOTE");break;case 44:return this.popState(),this.pushState("FLOATING_NOTE_ID"),"AS";case 45:break;case 46:return"NOTE_TEXT";case 47:return this.popState(),"ID";case 48:return this.popState(),this.pushState("NOTE_TEXT"),22;case 49:return this.popState(),e.yytext=e.yytext.substr(2).trim(),29;case 50:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),29;case 51:case 52:return 7;case 53:return 14;case 54:return 42;case 55:return 22;case 56:return e.yytext=e.yytext.trim(),12;case 57:return 13;case 58:return 26;case 59:return 5;case 60:return"INVALID"}},rules:[/^(?:.*direction\s+TB[^\n]*)/i,/^(?:.*direction\s+BT[^\n]*)/i,/^(?:.*direction\s+RL[^\n]*)/i,/^(?:.*direction\s+LR[^\n]*)/i,/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:[\n]+)/i,/^(?:[\s]+)/i,/^(?:((?!\n)\s)+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:scale\s+)/i,/^(?:\d+)/i,/^(?:\s+width\b)/i,/^(?:state\s+)/i,/^(?:.*<>)/i,/^(?:.*<>)/i,/^(?:.*<>)/i,/^(?:.*\[\[fork\]\])/i,/^(?:.*\[\[join\]\])/i,/^(?:.*\[\[choice\]\])/i,/^(?:.*direction\s+TB[^\n]*)/i,/^(?:.*direction\s+BT[^\n]*)/i,/^(?:.*direction\s+RL[^\n]*)/i,/^(?:.*direction\s+LR[^\n]*)/i,/^(?:["])/i,/^(?:\s*as\s+)/i,/^(?:[^\n\{]*)/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:[^\n\s\{]+)/i,/^(?:\n)/i,/^(?:\{)/i,/^(?:\})/i,/^(?:[\n])/i,/^(?:note\s+)/i,/^(?:left of\b)/i,/^(?:right of\b)/i,/^(?:")/i,/^(?:\s*as\s*)/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:[^\n]*)/i,/^(?:\s*[^:\n\s\-]+)/i,/^(?:\s*:[^:\n;]+)/i,/^(?:[\s\S]*?end note\b)/i,/^(?:stateDiagram\s+)/i,/^(?:stateDiagram-v2\s+)/i,/^(?:hide empty description\b)/i,/^(?:\[\*\])/i,/^(?:[^:\n\s\-\{]+)/i,/^(?:\s*:[^:\n;]+)/i,/^(?:-->)/i,/^(?:--)/i,/^(?:$)/i,/^(?:.)/i],conditions:{LINE:{rules:[13,14],inclusive:!1},close_directive:{rules:[13,14],inclusive:!1},arg_directive:{rules:[7,8,13,14],inclusive:!1},type_directive:{rules:[6,7,13,14],inclusive:!1},open_directive:{rules:[5,13,14],inclusive:!1},struct:{rules:[13,14,19,26,27,28,29,38,39,40,54,55,56,57,58],inclusive:!1},FLOATING_NOTE_ID:{rules:[47],inclusive:!1},FLOATING_NOTE:{rules:[44,45,46],inclusive:!1},NOTE_TEXT:{rules:[49,50],inclusive:!1},NOTE_ID:{rules:[48],inclusive:!1},NOTE:{rules:[41,42,43],inclusive:!1},SCALE:{rules:[17,18],inclusive:!1},ALIAS:{rules:[],inclusive:!1},STATE_ID:{rules:[32],inclusive:!1},STATE_STRING:{rules:[33,34],inclusive:!1},FORK_STATE:{rules:[],inclusive:!1},STATE:{rules:[13,14,20,21,22,23,24,25,30,31,35,36,37],inclusive:!1},ID:{rules:[13,14],inclusive:!1},INITIAL:{rules:[0,1,2,3,4,9,10,11,12,14,15,16,19,37,40,51,52,53,54,55,56,57,59,60],inclusive:!0}}};function O(){this.yy={}}return A.lexer=M,O.prototype=A,A.Parser=O,new O}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(17).readFileSync(n(18).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(12),n(7)(t))},function(t,e,n){(function(t){t.exports=function(){"use strict";var e,r;function i(){return e.apply(null,arguments)}function a(t){return t instanceof Array||"[object Array]"===Object.prototype.toString.call(t)}function o(t){return null!=t&&"[object Object]"===Object.prototype.toString.call(t)}function s(t){return void 0===t}function c(t){return"number"==typeof t||"[object Number]"===Object.prototype.toString.call(t)}function u(t){return t instanceof Date||"[object Date]"===Object.prototype.toString.call(t)}function l(t,e){var n,r=[];for(n=0;n>>0,r=0;rgt(t)?(a=t+1,s-gt(t)):(a=t,s),{year:a,dayOfYear:o}}function It(t,e,n){var r,i,a=Dt(t.year(),e,n),o=Math.floor((t.dayOfYear()-a-1)/7)+1;return o<1?r=o+Rt(i=t.year()-1,e,n):o>Rt(t.year(),e,n)?(r=o-Rt(t.year(),e,n),i=t.year()+1):(i=t.year(),r=o),{week:r,year:i}}function Rt(t,e,n){var r=Dt(t,e,n),i=Dt(t+1,e,n);return(gt(t)-r+i)/7}function Ft(t,e){return t.slice(e,7).concat(t.slice(0,e))}q("w",["ww",2],"wo","week"),q("W",["WW",2],"Wo","isoWeek"),L("week","w"),L("isoWeek","W"),P("week",5),P("isoWeek",5),lt("w",Q),lt("ww",Q,G),lt("W",Q),lt("WW",Q,G),yt(["w","ww","W","WW"],(function(t,e,n,r){e[r.substr(0,1)]=w(t)})),q("d",0,"do","day"),q("dd",0,0,(function(t){return this.localeData().weekdaysMin(this,t)})),q("ddd",0,0,(function(t){return this.localeData().weekdaysShort(this,t)})),q("dddd",0,0,(function(t){return this.localeData().weekdays(this,t)})),q("e",0,0,"weekday"),q("E",0,0,"isoWeekday"),L("day","d"),L("weekday","e"),L("isoWeekday","E"),P("day",11),P("weekday",11),P("isoWeekday",11),lt("d",Q),lt("e",Q),lt("E",Q),lt("dd",(function(t,e){return e.weekdaysMinRegex(t)})),lt("ddd",(function(t,e){return e.weekdaysShortRegex(t)})),lt("dddd",(function(t,e){return e.weekdaysRegex(t)})),yt(["dd","ddd","dddd"],(function(t,e,n,r){var i=n._locale.weekdaysParse(t,r,n._strict);null!=i?e.d=i:p(n).invalidWeekday=t})),yt(["d","e","E"],(function(t,e,n,r){e[r]=w(t)}));var Pt="Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),jt="Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),Yt="Su_Mo_Tu_We_Th_Fr_Sa".split("_"),zt=ct,Ut=ct,$t=ct;function qt(){function t(t,e){return e.length-t.length}var e,n,r,i,a,o=[],s=[],c=[],u=[];for(e=0;e<7;e++)n=d([2e3,1]).day(e),r=this.weekdaysMin(n,""),i=this.weekdaysShort(n,""),a=this.weekdays(n,""),o.push(r),s.push(i),c.push(a),u.push(r),u.push(i),u.push(a);for(o.sort(t),s.sort(t),c.sort(t),u.sort(t),e=0;e<7;e++)s[e]=ft(s[e]),c[e]=ft(c[e]),u[e]=ft(u[e]);this._weekdaysRegex=new RegExp("^("+u.join("|")+")","i"),this._weekdaysShortRegex=this._weekdaysRegex,this._weekdaysMinRegex=this._weekdaysRegex,this._weekdaysStrictRegex=new RegExp("^("+c.join("|")+")","i"),this._weekdaysShortStrictRegex=new RegExp("^("+s.join("|")+")","i"),this._weekdaysMinStrictRegex=new RegExp("^("+o.join("|")+")","i")}function Wt(){return this.hours()%12||12}function Ht(t,e){q(t,0,0,(function(){return this.localeData().meridiem(this.hours(),this.minutes(),e)}))}function Vt(t,e){return e._meridiemParse}q("H",["HH",2],0,"hour"),q("h",["hh",2],0,Wt),q("k",["kk",2],0,(function(){return this.hours()||24})),q("hmm",0,0,(function(){return""+Wt.apply(this)+j(this.minutes(),2)})),q("hmmss",0,0,(function(){return""+Wt.apply(this)+j(this.minutes(),2)+j(this.seconds(),2)})),q("Hmm",0,0,(function(){return""+this.hours()+j(this.minutes(),2)})),q("Hmmss",0,0,(function(){return""+this.hours()+j(this.minutes(),2)+j(this.seconds(),2)})),Ht("a",!0),Ht("A",!1),L("hour","h"),P("hour",13),lt("a",Vt),lt("A",Vt),lt("H",Q),lt("h",Q),lt("k",Q),lt("HH",Q,G),lt("hh",Q,G),lt("kk",Q,G),lt("hmm",J),lt("hmmss",tt),lt("Hmm",J),lt("Hmmss",tt),pt(["H","HH"],3),pt(["k","kk"],(function(t,e,n){var r=w(t);e[3]=24===r?0:r})),pt(["a","A"],(function(t,e,n){n._isPm=n._locale.isPM(t),n._meridiem=t})),pt(["h","hh"],(function(t,e,n){e[3]=w(t),p(n).bigHour=!0})),pt("hmm",(function(t,e,n){var r=t.length-2;e[3]=w(t.substr(0,r)),e[4]=w(t.substr(r)),p(n).bigHour=!0})),pt("hmmss",(function(t,e,n){var r=t.length-4,i=t.length-2;e[3]=w(t.substr(0,r)),e[4]=w(t.substr(r,2)),e[5]=w(t.substr(i)),p(n).bigHour=!0})),pt("Hmm",(function(t,e,n){var r=t.length-2;e[3]=w(t.substr(0,r)),e[4]=w(t.substr(r))})),pt("Hmmss",(function(t,e,n){var r=t.length-4,i=t.length-2;e[3]=w(t.substr(0,r)),e[4]=w(t.substr(r,2)),e[5]=w(t.substr(i))}));var Gt,Xt=xt("Hours",!0),Zt={calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},longDateFormat:{LTS:"h:mm:ss A",LT:"h:mm A",L:"MM/DD/YYYY",LL:"MMMM D, YYYY",LLL:"MMMM D, YYYY h:mm A",LLLL:"dddd, MMMM D, YYYY h:mm A"},invalidDate:"Invalid date",ordinal:"%d",dayOfMonthOrdinalParse:/\d{1,2}/,relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",ss:"%d seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},months:Tt,monthsShort:Ct,week:{dow:0,doy:6},weekdays:Pt,weekdaysMin:Yt,weekdaysShort:jt,meridiemParse:/[ap]\.?m?\.?/i},Kt={},Qt={};function Jt(t){return t?t.toLowerCase().replace("_","-"):t}function te(e){var r=null;if(!Kt[e]&&void 0!==t&&t&&t.exports)try{r=Gt._abbr,n(202)("./"+e),ee(r)}catch(e){}return Kt[e]}function ee(t,e){var n;return t&&((n=s(e)?re(t):ne(t,e))?Gt=n:"undefined"!=typeof console&&console.warn&&console.warn("Locale "+t+" not found. Did you forget to load it?")),Gt._abbr}function ne(t,e){if(null===e)return delete Kt[t],null;var n,r=Zt;if(e.abbr=t,null!=Kt[t])M("defineLocaleOverride","use moment.updateLocale(localeName, config) to change an existing locale. moment.defineLocale(localeName, config) should only be used for creating a new locale See http://momentjs.com/guides/#/warnings/define-locale/ for more info."),r=Kt[t]._config;else if(null!=e.parentLocale)if(null!=Kt[e.parentLocale])r=Kt[e.parentLocale]._config;else{if(null==(n=te(e.parentLocale)))return Qt[e.parentLocale]||(Qt[e.parentLocale]=[]),Qt[e.parentLocale].push({name:t,config:e}),null;r=n._config}return Kt[t]=new B(N(r,e)),Qt[t]&&Qt[t].forEach((function(t){ne(t.name,t.config)})),ee(t),Kt[t]}function re(t){var e;if(t&&t._locale&&t._locale._abbr&&(t=t._locale._abbr),!t)return Gt;if(!a(t)){if(e=te(t))return e;t=[t]}return function(t){for(var e,n,r,i,a=0;a=e&&E(i,n,!0)>=e-1)break;e--}a++}return Gt}(t)}function ie(t){var e,n=t._a;return n&&-2===p(t).overflow&&(e=n[1]<0||11wt(n[0],n[1])?2:n[3]<0||24Rt(n,a,o)?p(t)._overflowWeeks=!0:null!=c?p(t)._overflowWeekday=!0:(s=Lt(n,r,i,a,o),t._a[0]=s.year,t._dayOfYear=s.dayOfYear)}(t),null!=t._dayOfYear&&(o=ae(t._a[0],r[0]),(t._dayOfYear>gt(o)||0===t._dayOfYear)&&(p(t)._overflowDayOfYear=!0),n=Bt(o,0,t._dayOfYear),t._a[1]=n.getUTCMonth(),t._a[2]=n.getUTCDate()),e=0;e<3&&null==t._a[e];++e)t._a[e]=s[e]=r[e];for(;e<7;e++)t._a[e]=s[e]=null==t._a[e]?2===e?1:0:t._a[e];24===t._a[3]&&0===t._a[4]&&0===t._a[5]&&0===t._a[6]&&(t._nextDay=!0,t._a[3]=0),t._d=(t._useUTC?Bt:function(t,e,n,r,i,a,o){var s;return t<100&&0<=t?(s=new Date(t+400,e,n,r,i,a,o),isFinite(s.getFullYear())&&s.setFullYear(t)):s=new Date(t,e,n,r,i,a,o),s}).apply(null,s),a=t._useUTC?t._d.getUTCDay():t._d.getDay(),null!=t._tzm&&t._d.setUTCMinutes(t._d.getUTCMinutes()-t._tzm),t._nextDay&&(t._a[3]=24),t._w&&void 0!==t._w.d&&t._w.d!==a&&(p(t).weekdayMismatch=!0)}}var se=/^\s*((?:[+-]\d{6}|\d{4})-(?:\d\d-\d\d|W\d\d-\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?::\d\d(?::\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?$/,ce=/^\s*((?:[+-]\d{6}|\d{4})(?:\d\d\d\d|W\d\d\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?:\d\d(?:\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?$/,ue=/Z|[+-]\d\d(?::?\d\d)?/,le=[["YYYYYY-MM-DD",/[+-]\d{6}-\d\d-\d\d/],["YYYY-MM-DD",/\d{4}-\d\d-\d\d/],["GGGG-[W]WW-E",/\d{4}-W\d\d-\d/],["GGGG-[W]WW",/\d{4}-W\d\d/,!1],["YYYY-DDD",/\d{4}-\d{3}/],["YYYY-MM",/\d{4}-\d\d/,!1],["YYYYYYMMDD",/[+-]\d{10}/],["YYYYMMDD",/\d{8}/],["GGGG[W]WWE",/\d{4}W\d{3}/],["GGGG[W]WW",/\d{4}W\d{2}/,!1],["YYYYDDD",/\d{7}/]],he=[["HH:mm:ss.SSSS",/\d\d:\d\d:\d\d\.\d+/],["HH:mm:ss,SSSS",/\d\d:\d\d:\d\d,\d+/],["HH:mm:ss",/\d\d:\d\d:\d\d/],["HH:mm",/\d\d:\d\d/],["HHmmss.SSSS",/\d\d\d\d\d\d\.\d+/],["HHmmss,SSSS",/\d\d\d\d\d\d,\d+/],["HHmmss",/\d\d\d\d\d\d/],["HHmm",/\d\d\d\d/],["HH",/\d\d/]],fe=/^\/?Date\((\-?\d+)/i;function de(t){var e,n,r,i,a,o,s=t._i,c=se.exec(s)||ce.exec(s);if(c){for(p(t).iso=!0,e=0,n=le.length;en.valueOf():n.valueOf()this.clone().month(0).utcOffset()||this.utcOffset()>this.clone().month(5).utcOffset()},on.isLocal=function(){return!!this.isValid()&&!this._isUTC},on.isUtcOffset=function(){return!!this.isValid()&&this._isUTC},on.isUtc=De,on.isUTC=De,on.zoneAbbr=function(){return this._isUTC?"UTC":""},on.zoneName=function(){return this._isUTC?"Coordinated Universal Time":""},on.dates=C("dates accessor is deprecated. Use date instead.",Je),on.months=C("months accessor is deprecated. Use month instead",At),on.years=C("years accessor is deprecated. Use year instead",bt),on.zone=C("moment().zone is deprecated, use moment().utcOffset instead. http://momentjs.com/guides/#/warnings/zone/",(function(t,e){return null!=t?("string"!=typeof t&&(t=-t),this.utcOffset(t,e),this):-this.utcOffset()})),on.isDSTShifted=C("isDSTShifted is deprecated. See http://momentjs.com/guides/#/warnings/dst-shifted/ for more information",(function(){if(!s(this._isDSTShifted))return this._isDSTShifted;var t={};if(v(t,this),(t=ve(t))._a){var e=t._isUTC?d(t._a):xe(t._a);this._isDSTShifted=this.isValid()&&0h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:m,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,y.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},m={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),33;case 1:return this.begin("type_directive"),34;case 2:return this.popState(),this.begin("arg_directive"),26;case 3:return this.popState(),this.popState(),36;case 4:return 35;case 5:case 6:case 7:break;case 8:return 11;case 9:case 10:case 11:break;case 12:this.begin("href");break;case 13:this.popState();break;case 14:return 31;case 15:this.begin("callbackname");break;case 16:this.popState();break;case 17:this.popState(),this.begin("callbackargs");break;case 18:return 29;case 19:this.popState();break;case 20:return 30;case 21:this.begin("click");break;case 22:this.popState();break;case 23:return 28;case 24:return 5;case 25:return 12;case 26:return 13;case 27:return 14;case 28:return 15;case 29:return 16;case 30:return 17;case 31:return"date";case 32:return 18;case 33:return 19;case 34:return 21;case 35:return 22;case 36:return 26;case 37:return 7;case 38:return"INVALID"}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%%(?!\{)*[^\n]*)/i,/^(?:[^\}]%%*[^\n]*)/i,/^(?:%%*[^\n]*[\n]*)/i,/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:href[\s]+["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:call[\s]+)/i,/^(?:\([\s]*\))/i,/^(?:\()/i,/^(?:[^(]*)/i,/^(?:\))/i,/^(?:[^)]*)/i,/^(?:click[\s]+)/i,/^(?:[\s\n])/i,/^(?:[^\s\n]*)/i,/^(?:gantt\b)/i,/^(?:dateFormat\s[^#\n;]+)/i,/^(?:inclusiveEndDates\b)/i,/^(?:topAxis\b)/i,/^(?:axisFormat\s[^#\n;]+)/i,/^(?:excludes\s[^#\n;]+)/i,/^(?:todayMarker\s[^\n;]+)/i,/^(?:\d\d\d\d-\d\d-\d\d\b)/i,/^(?:title\s[^#\n;]+)/i,/^(?:section\s[^#:\n;]+)/i,/^(?:[^#:\n;]+)/i,/^(?::[^#\n;]+)/i,/^(?::)/i,/^(?:$)/i,/^(?:.)/i],conditions:{close_directive:{rules:[],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},open_directive:{rules:[1],inclusive:!1},callbackargs:{rules:[19,20],inclusive:!1},callbackname:{rules:[16,17,18],inclusive:!1},href:{rules:[13,14],inclusive:!1},click:{rules:[22,23],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,12,15,21,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38],inclusive:!0}}};function v(){this.yy={}}return g.lexer=m,v.prototype=g,g.Parser=v,new v}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(17).readFileSync(n(18).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(12),n(7)(t))},function(t,e,n){var r=n(39),i=n(82);t.exports=function(t){return null!=t&&i(t.length)&&!r(t)}},function(t,e,n){var r=n(261),i=n(271),a=n(36),o=n(5),s=n(278);t.exports=function(t){return"function"==typeof t?t:null==t?a:"object"==typeof t?o(t)?i(t[0],t[1]):r(t):s(t)}},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,9],n=[1,7],r=[1,6],i=[1,8],a=[1,20,21,22,23,38,47,76,77,78,79,80,81,95,96,99,100,101,103,104,110,111,112,113,114,115,116,117,118,119],o=[2,10],s=[1,20],c=[1,21],u=[1,22],l=[1,23],h=[1,30],f=[1,59],d=[1,33],p=[1,34],y=[1,35],g=[1,36],m=[1,37],v=[1,53],b=[1,48],x=[1,50],_=[1,45],k=[1,49],w=[1,52],E=[1,56],T=[1,57],C=[1,38],S=[1,39],A=[1,40],M=[1,41],O=[1,58],N=[1,47],B=[1,51],D=[1,54],L=[1,55],I=[1,46],R=[1,62],F=[1,67],P=[1,20,21,22,23,38,42,47,76,77,78,79,80,81,95,96,99,100,101,103,104,110,111,112,113,114,115,116,117,118,119],j=[1,71],Y=[1,70],z=[1,72],U=[20,21,23,70,71],$=[1,93],q=[1,98],W=[1,95],H=[1,100],V=[1,103],G=[1,101],X=[1,102],Z=[1,96],K=[1,108],Q=[1,107],J=[1,97],tt=[1,99],et=[1,104],nt=[1,105],rt=[1,106],it=[1,109],at=[20,21,22,23,70,71],ot=[20,21,22,23,48,70,71],st=[20,21,22,23,40,47,48,50,52,54,56,58,60,62,63,65,70,71,81,95,96,99,100,101,103,104,114,115,116,117,118,119],ct=[20,21,23],ut=[20,21,23,47,70,71,81,95,96,99,100,101,103,104,114,115,116,117,118,119],lt=[1,12,20,21,22,23,24,38,42,47,76,77,78,79,80,81,95,96,99,100,101,103,104,110,111,112,113,114,115,116,117,118,119],ht=[47,81,95,96,99,100,101,103,104,114,115,116,117,118,119],ft=[1,141],dt=[1,149],pt=[1,150],yt=[1,151],gt=[1,152],mt=[1,136],vt=[1,137],bt=[1,133],xt=[1,144],_t=[1,145],kt=[1,146],wt=[1,147],Et=[1,148],Tt=[1,153],Ct=[1,154],St=[1,139],At=[1,142],Mt=[1,138],Ot=[1,135],Nt=[20,21,22,23,38,42,47,76,77,78,79,80,81,95,96,99,100,101,103,104,110,111,112,113,114,115,116,117,118,119],Bt=[1,157],Dt=[20,21,22,23,26,47,81,95,96,99,100,101,103,104,114,115,116,117,118,119],Lt=[20,21,22,23,24,26,38,40,41,42,47,51,53,55,57,59,61,62,64,66,70,71,72,76,77,78,79,80,81,82,85,95,96,99,100,101,103,104,105,106,114,115,116,117,118,119],It=[12,21,22,24],Rt=[22,96],Ft=[1,238],Pt=[1,242],jt=[1,239],Yt=[1,236],zt=[1,233],Ut=[1,234],$t=[1,235],qt=[1,237],Wt=[1,240],Ht=[1,241],Vt=[1,243],Gt=[1,260],Xt=[20,21,23,96],Zt=[20,21,22,23,76,92,95,96,99,100,101,102,103,104,105],Kt={trace:function(){},yy:{},symbols_:{error:2,start:3,mermaidDoc:4,directive:5,openDirective:6,typeDirective:7,closeDirective:8,separator:9,":":10,argDirective:11,open_directive:12,type_directive:13,arg_directive:14,close_directive:15,graphConfig:16,document:17,line:18,statement:19,SEMI:20,NEWLINE:21,SPACE:22,EOF:23,GRAPH:24,NODIR:25,DIR:26,FirstStmtSeperator:27,ending:28,endToken:29,spaceList:30,spaceListNewline:31,verticeStatement:32,styleStatement:33,linkStyleStatement:34,classDefStatement:35,classStatement:36,clickStatement:37,subgraph:38,text:39,SQS:40,SQE:41,end:42,direction:43,link:44,node:45,vertex:46,AMP:47,STYLE_SEPARATOR:48,idString:49,PS:50,PE:51,"(-":52,"-)":53,STADIUMSTART:54,STADIUMEND:55,SUBROUTINESTART:56,SUBROUTINEEND:57,CYLINDERSTART:58,CYLINDEREND:59,DIAMOND_START:60,DIAMOND_STOP:61,TAGEND:62,TRAPSTART:63,TRAPEND:64,INVTRAPSTART:65,INVTRAPEND:66,linkStatement:67,arrowText:68,TESTSTR:69,START_LINK:70,LINK:71,PIPE:72,textToken:73,STR:74,keywords:75,STYLE:76,LINKSTYLE:77,CLASSDEF:78,CLASS:79,CLICK:80,DOWN:81,UP:82,textNoTags:83,textNoTagsToken:84,DEFAULT:85,stylesOpt:86,alphaNum:87,CALLBACKNAME:88,CALLBACKARGS:89,HREF:90,LINK_TARGET:91,HEX:92,numList:93,INTERPOLATE:94,NUM:95,COMMA:96,style:97,styleComponent:98,ALPHA:99,COLON:100,MINUS:101,UNIT:102,BRKT:103,DOT:104,PCT:105,TAGSTART:106,alphaNumToken:107,idStringToken:108,alphaNumStatement:109,direction_tb:110,direction_bt:111,direction_rl:112,direction_lr:113,PUNCTUATION:114,UNICODE_TEXT:115,PLUS:116,EQUALS:117,MULT:118,UNDERSCORE:119,graphCodeTokens:120,ARROW_CROSS:121,ARROW_POINT:122,ARROW_CIRCLE:123,ARROW_OPEN:124,QUOTE:125,$accept:0,$end:1},terminals_:{2:"error",10:":",12:"open_directive",13:"type_directive",14:"arg_directive",15:"close_directive",20:"SEMI",21:"NEWLINE",22:"SPACE",23:"EOF",24:"GRAPH",25:"NODIR",26:"DIR",38:"subgraph",40:"SQS",41:"SQE",42:"end",47:"AMP",48:"STYLE_SEPARATOR",50:"PS",51:"PE",52:"(-",53:"-)",54:"STADIUMSTART",55:"STADIUMEND",56:"SUBROUTINESTART",57:"SUBROUTINEEND",58:"CYLINDERSTART",59:"CYLINDEREND",60:"DIAMOND_START",61:"DIAMOND_STOP",62:"TAGEND",63:"TRAPSTART",64:"TRAPEND",65:"INVTRAPSTART",66:"INVTRAPEND",69:"TESTSTR",70:"START_LINK",71:"LINK",72:"PIPE",74:"STR",76:"STYLE",77:"LINKSTYLE",78:"CLASSDEF",79:"CLASS",80:"CLICK",81:"DOWN",82:"UP",85:"DEFAULT",88:"CALLBACKNAME",89:"CALLBACKARGS",90:"HREF",91:"LINK_TARGET",92:"HEX",94:"INTERPOLATE",95:"NUM",96:"COMMA",99:"ALPHA",100:"COLON",101:"MINUS",102:"UNIT",103:"BRKT",104:"DOT",105:"PCT",106:"TAGSTART",110:"direction_tb",111:"direction_bt",112:"direction_rl",113:"direction_lr",114:"PUNCTUATION",115:"UNICODE_TEXT",116:"PLUS",117:"EQUALS",118:"MULT",119:"UNDERSCORE",121:"ARROW_CROSS",122:"ARROW_POINT",123:"ARROW_CIRCLE",124:"ARROW_OPEN",125:"QUOTE"},productions_:[0,[3,1],[3,2],[5,4],[5,6],[6,1],[7,1],[11,1],[8,1],[4,2],[17,0],[17,2],[18,1],[18,1],[18,1],[18,1],[18,1],[16,2],[16,2],[16,2],[16,3],[28,2],[28,1],[29,1],[29,1],[29,1],[27,1],[27,1],[27,2],[31,2],[31,2],[31,1],[31,1],[30,2],[30,1],[19,2],[19,2],[19,2],[19,2],[19,2],[19,2],[19,9],[19,6],[19,4],[19,1],[9,1],[9,1],[9,1],[32,3],[32,4],[32,2],[32,1],[45,1],[45,5],[45,3],[46,4],[46,6],[46,4],[46,4],[46,4],[46,4],[46,4],[46,4],[46,6],[46,4],[46,4],[46,4],[46,4],[46,4],[46,1],[44,2],[44,3],[44,3],[44,1],[44,3],[67,1],[68,3],[39,1],[39,2],[39,1],[75,1],[75,1],[75,1],[75,1],[75,1],[75,1],[75,1],[75,1],[75,1],[75,1],[75,1],[83,1],[83,2],[35,5],[35,5],[36,5],[37,2],[37,4],[37,3],[37,5],[37,2],[37,4],[37,4],[37,6],[37,2],[37,4],[37,2],[37,4],[37,4],[37,6],[33,5],[33,5],[34,5],[34,5],[34,9],[34,9],[34,7],[34,7],[93,1],[93,3],[86,1],[86,3],[97,1],[97,2],[98,1],[98,1],[98,1],[98,1],[98,1],[98,1],[98,1],[98,1],[98,1],[98,1],[98,1],[73,1],[73,1],[73,1],[73,1],[73,1],[73,1],[84,1],[84,1],[84,1],[84,1],[49,1],[49,2],[87,1],[87,2],[109,1],[109,1],[109,1],[109,1],[43,1],[43,1],[43,1],[43,1],[107,1],[107,1],[107,1],[107,1],[107,1],[107,1],[107,1],[107,1],[107,1],[107,1],[107,1],[107,1],[107,1],[108,1],[108,1],[108,1],[108,1],[108,1],[108,1],[108,1],[108,1],[108,1],[108,1],[108,1],[108,1],[108,1],[108,1],[108,1],[120,1],[120,1],[120,1],[120,1],[120,1],[120,1],[120,1],[120,1],[120,1],[120,1],[120,1],[120,1],[120,1],[120,1],[120,1],[120,1],[120,1],[120,1],[120,1],[120,1],[120,1],[120,1],[120,1],[120,1],[120,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 5:r.parseDirective("%%{","open_directive");break;case 6:r.parseDirective(a[s],"type_directive");break;case 7:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 8:r.parseDirective("}%%","close_directive","flowchart");break;case 10:this.$=[];break;case 11:a[s]!==[]&&a[s-1].push(a[s]),this.$=a[s-1];break;case 12:case 77:case 79:case 91:case 147:case 149:case 150:this.$=a[s];break;case 19:r.setDirection("TB"),this.$="TB";break;case 20:r.setDirection(a[s-1]),this.$=a[s-1];break;case 35:this.$=a[s-1].nodes;break;case 36:case 37:case 38:case 39:case 40:this.$=[];break;case 41:this.$=r.addSubGraph(a[s-6],a[s-1],a[s-4]);break;case 42:this.$=r.addSubGraph(a[s-3],a[s-1],a[s-3]);break;case 43:this.$=r.addSubGraph(void 0,a[s-1],void 0);break;case 48:r.addLink(a[s-2].stmt,a[s],a[s-1]),this.$={stmt:a[s],nodes:a[s].concat(a[s-2].nodes)};break;case 49:r.addLink(a[s-3].stmt,a[s-1],a[s-2]),this.$={stmt:a[s-1],nodes:a[s-1].concat(a[s-3].nodes)};break;case 50:this.$={stmt:a[s-1],nodes:a[s-1]};break;case 51:this.$={stmt:a[s],nodes:a[s]};break;case 52:this.$=[a[s]];break;case 53:this.$=a[s-4].concat(a[s]);break;case 54:this.$=[a[s-2]],r.setClass(a[s-2],a[s]);break;case 55:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"square");break;case 56:this.$=a[s-5],r.addVertex(a[s-5],a[s-2],"circle");break;case 57:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"ellipse");break;case 58:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"stadium");break;case 59:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"subroutine");break;case 60:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"cylinder");break;case 61:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"round");break;case 62:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"diamond");break;case 63:this.$=a[s-5],r.addVertex(a[s-5],a[s-2],"hexagon");break;case 64:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"odd");break;case 65:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"trapezoid");break;case 66:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"inv_trapezoid");break;case 67:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"lean_right");break;case 68:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"lean_left");break;case 69:this.$=a[s],r.addVertex(a[s]);break;case 70:a[s-1].text=a[s],this.$=a[s-1];break;case 71:case 72:a[s-2].text=a[s-1],this.$=a[s-2];break;case 73:this.$=a[s];break;case 74:var c=r.destructLink(a[s],a[s-2]);this.$={type:c.type,stroke:c.stroke,length:c.length,text:a[s-1]};break;case 75:c=r.destructLink(a[s]);this.$={type:c.type,stroke:c.stroke,length:c.length};break;case 76:this.$=a[s-1];break;case 78:case 92:case 148:this.$=a[s-1]+""+a[s];break;case 93:case 94:this.$=a[s-4],r.addClass(a[s-2],a[s]);break;case 95:this.$=a[s-4],r.setClass(a[s-2],a[s]);break;case 96:case 104:this.$=a[s-1],r.setClickEvent(a[s-1],a[s]);break;case 97:case 105:this.$=a[s-3],r.setClickEvent(a[s-3],a[s-2]),r.setTooltip(a[s-3],a[s]);break;case 98:this.$=a[s-2],r.setClickEvent(a[s-2],a[s-1],a[s]);break;case 99:this.$=a[s-4],r.setClickEvent(a[s-4],a[s-3],a[s-2]),r.setTooltip(a[s-4],a[s]);break;case 100:case 106:this.$=a[s-1],r.setLink(a[s-1],a[s]);break;case 101:case 107:this.$=a[s-3],r.setLink(a[s-3],a[s-2]),r.setTooltip(a[s-3],a[s]);break;case 102:case 108:this.$=a[s-3],r.setLink(a[s-3],a[s-2],a[s]);break;case 103:case 109:this.$=a[s-5],r.setLink(a[s-5],a[s-4],a[s]),r.setTooltip(a[s-5],a[s-2]);break;case 110:this.$=a[s-4],r.addVertex(a[s-2],void 0,void 0,a[s]);break;case 111:case 113:this.$=a[s-4],r.updateLink(a[s-2],a[s]);break;case 112:this.$=a[s-4],r.updateLink([a[s-2]],a[s]);break;case 114:this.$=a[s-8],r.updateLinkInterpolate([a[s-6]],a[s-2]),r.updateLink([a[s-6]],a[s]);break;case 115:this.$=a[s-8],r.updateLinkInterpolate(a[s-6],a[s-2]),r.updateLink(a[s-6],a[s]);break;case 116:this.$=a[s-6],r.updateLinkInterpolate([a[s-4]],a[s]);break;case 117:this.$=a[s-6],r.updateLinkInterpolate(a[s-4],a[s]);break;case 118:case 120:this.$=[a[s]];break;case 119:case 121:a[s-2].push(a[s]),this.$=a[s-2];break;case 123:this.$=a[s-1]+a[s];break;case 145:this.$=a[s];break;case 146:this.$=a[s-1]+""+a[s];break;case 151:this.$="v";break;case 152:this.$="-";break;case 153:this.$={stmt:"dir",value:"TB"};break;case 154:this.$={stmt:"dir",value:"BT"};break;case 155:this.$={stmt:"dir",value:"RL"};break;case 156:this.$={stmt:"dir",value:"LR"}}},table:[{3:1,4:2,5:3,6:5,12:e,16:4,21:n,22:r,24:i},{1:[3]},{1:[2,1]},{3:10,4:2,5:3,6:5,12:e,16:4,21:n,22:r,24:i},t(a,o,{17:11}),{7:12,13:[1,13]},{16:14,21:n,22:r,24:i},{16:15,21:n,22:r,24:i},{25:[1,16],26:[1,17]},{13:[2,5]},{1:[2,2]},{1:[2,9],18:18,19:19,20:s,21:c,22:u,23:l,32:24,33:25,34:26,35:27,36:28,37:29,38:h,43:31,45:32,46:42,47:f,49:43,76:d,77:p,78:y,79:g,80:m,81:v,95:b,96:x,99:_,100:k,101:w,103:E,104:T,108:44,110:C,111:S,112:A,113:M,114:O,115:N,116:B,117:D,118:L,119:I},{8:60,10:[1,61],15:R},t([10,15],[2,6]),t(a,[2,17]),t(a,[2,18]),t(a,[2,19]),{20:[1,64],21:[1,65],22:F,27:63,30:66},t(P,[2,11]),t(P,[2,12]),t(P,[2,13]),t(P,[2,14]),t(P,[2,15]),t(P,[2,16]),{9:68,20:j,21:Y,23:z,44:69,67:73,70:[1,74],71:[1,75]},{9:76,20:j,21:Y,23:z},{9:77,20:j,21:Y,23:z},{9:78,20:j,21:Y,23:z},{9:79,20:j,21:Y,23:z},{9:80,20:j,21:Y,23:z},{9:82,20:j,21:Y,22:[1,81],23:z},t(P,[2,44]),t(U,[2,51],{30:83,22:F}),{22:[1,84]},{22:[1,85]},{22:[1,86]},{22:[1,87]},{26:$,47:q,74:[1,91],81:W,87:90,88:[1,88],90:[1,89],95:H,96:V,99:G,100:X,101:Z,103:K,104:Q,107:94,109:92,114:J,115:tt,116:et,117:nt,118:rt,119:it},t(P,[2,153]),t(P,[2,154]),t(P,[2,155]),t(P,[2,156]),t(at,[2,52],{48:[1,110]}),t(ot,[2,69],{108:121,40:[1,111],47:f,50:[1,112],52:[1,113],54:[1,114],56:[1,115],58:[1,116],60:[1,117],62:[1,118],63:[1,119],65:[1,120],81:v,95:b,96:x,99:_,100:k,101:w,103:E,104:T,114:O,115:N,116:B,117:D,118:L,119:I}),t(st,[2,145]),t(st,[2,170]),t(st,[2,171]),t(st,[2,172]),t(st,[2,173]),t(st,[2,174]),t(st,[2,175]),t(st,[2,176]),t(st,[2,177]),t(st,[2,178]),t(st,[2,179]),t(st,[2,180]),t(st,[2,181]),t(st,[2,182]),t(st,[2,183]),t(st,[2,184]),{9:122,20:j,21:Y,23:z},{11:123,14:[1,124]},t(ct,[2,8]),t(a,[2,20]),t(a,[2,26]),t(a,[2,27]),{21:[1,125]},t(ut,[2,34],{30:126,22:F}),t(P,[2,35]),{45:127,46:42,47:f,49:43,81:v,95:b,96:x,99:_,100:k,101:w,103:E,104:T,108:44,114:O,115:N,116:B,117:D,118:L,119:I},t(lt,[2,45]),t(lt,[2,46]),t(lt,[2,47]),t(ht,[2,73],{68:128,69:[1,129],72:[1,130]}),{22:ft,24:dt,26:pt,38:yt,39:131,42:gt,47:q,62:mt,70:vt,73:132,74:bt,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},t([47,69,72,81,95,96,99,100,101,103,104,114,115,116,117,118,119],[2,75]),t(P,[2,36]),t(P,[2,37]),t(P,[2,38]),t(P,[2,39]),t(P,[2,40]),{22:ft,24:dt,26:pt,38:yt,39:155,42:gt,47:q,62:mt,70:vt,73:132,74:bt,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},t(Nt,o,{17:156}),t(U,[2,50],{47:Bt}),{26:$,47:q,81:W,87:158,92:[1,159],95:H,96:V,99:G,100:X,101:Z,103:K,104:Q,107:94,109:92,114:J,115:tt,116:et,117:nt,118:rt,119:it},{85:[1,160],93:161,95:[1,162]},{26:$,47:q,81:W,85:[1,163],87:164,95:H,96:V,99:G,100:X,101:Z,103:K,104:Q,107:94,109:92,114:J,115:tt,116:et,117:nt,118:rt,119:it},{26:$,47:q,81:W,87:165,95:H,96:V,99:G,100:X,101:Z,103:K,104:Q,107:94,109:92,114:J,115:tt,116:et,117:nt,118:rt,119:it},t(ct,[2,96],{22:[1,166],89:[1,167]}),t(ct,[2,100],{22:[1,168]}),t(ct,[2,104],{107:94,109:170,22:[1,169],26:$,47:q,81:W,95:H,96:V,99:G,100:X,101:Z,103:K,104:Q,114:J,115:tt,116:et,117:nt,118:rt,119:it}),t(ct,[2,106],{22:[1,171]}),t(Dt,[2,147]),t(Dt,[2,149]),t(Dt,[2,150]),t(Dt,[2,151]),t(Dt,[2,152]),t(Lt,[2,157]),t(Lt,[2,158]),t(Lt,[2,159]),t(Lt,[2,160]),t(Lt,[2,161]),t(Lt,[2,162]),t(Lt,[2,163]),t(Lt,[2,164]),t(Lt,[2,165]),t(Lt,[2,166]),t(Lt,[2,167]),t(Lt,[2,168]),t(Lt,[2,169]),{47:f,49:172,81:v,95:b,96:x,99:_,100:k,101:w,103:E,104:T,108:44,114:O,115:N,116:B,117:D,118:L,119:I},{22:ft,24:dt,26:pt,38:yt,39:173,42:gt,47:q,62:mt,70:vt,73:132,74:bt,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,39:175,42:gt,47:q,50:[1,174],62:mt,70:vt,73:132,74:bt,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,39:176,42:gt,47:q,62:mt,70:vt,73:132,74:bt,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,39:177,42:gt,47:q,62:mt,70:vt,73:132,74:bt,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,39:178,42:gt,47:q,62:mt,70:vt,73:132,74:bt,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,39:179,42:gt,47:q,62:mt,70:vt,73:132,74:bt,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,39:180,42:gt,47:q,60:[1,181],62:mt,70:vt,73:132,74:bt,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,39:182,42:gt,47:q,62:mt,70:vt,73:132,74:bt,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,39:183,42:gt,47:q,62:mt,70:vt,73:132,74:bt,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,39:184,42:gt,47:q,62:mt,70:vt,73:132,74:bt,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},t(st,[2,146]),t(It,[2,3]),{8:185,15:R},{15:[2,7]},t(a,[2,28]),t(ut,[2,33]),t(U,[2,48],{30:186,22:F}),t(ht,[2,70],{22:[1,187]}),{22:[1,188]},{22:ft,24:dt,26:pt,38:yt,39:189,42:gt,47:q,62:mt,70:vt,73:132,74:bt,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,42:gt,47:q,62:mt,70:vt,71:[1,190],73:191,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},t(Lt,[2,77]),t(Lt,[2,79]),t(Lt,[2,135]),t(Lt,[2,136]),t(Lt,[2,137]),t(Lt,[2,138]),t(Lt,[2,139]),t(Lt,[2,140]),t(Lt,[2,141]),t(Lt,[2,142]),t(Lt,[2,143]),t(Lt,[2,144]),t(Lt,[2,80]),t(Lt,[2,81]),t(Lt,[2,82]),t(Lt,[2,83]),t(Lt,[2,84]),t(Lt,[2,85]),t(Lt,[2,86]),t(Lt,[2,87]),t(Lt,[2,88]),t(Lt,[2,89]),t(Lt,[2,90]),{9:193,20:j,21:Y,22:ft,23:z,24:dt,26:pt,38:yt,40:[1,192],42:gt,47:q,62:mt,70:vt,73:191,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{18:18,19:19,20:s,21:c,22:u,23:l,32:24,33:25,34:26,35:27,36:28,37:29,38:h,42:[1,194],43:31,45:32,46:42,47:f,49:43,76:d,77:p,78:y,79:g,80:m,81:v,95:b,96:x,99:_,100:k,101:w,103:E,104:T,108:44,110:C,111:S,112:A,113:M,114:O,115:N,116:B,117:D,118:L,119:I},{22:F,30:195},{22:[1,196],26:$,47:q,81:W,95:H,96:V,99:G,100:X,101:Z,103:K,104:Q,107:94,109:170,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:[1,197]},{22:[1,198]},{22:[1,199],96:[1,200]},t(Rt,[2,118]),{22:[1,201]},{22:[1,202],26:$,47:q,81:W,95:H,96:V,99:G,100:X,101:Z,103:K,104:Q,107:94,109:170,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:[1,203],26:$,47:q,81:W,95:H,96:V,99:G,100:X,101:Z,103:K,104:Q,107:94,109:170,114:J,115:tt,116:et,117:nt,118:rt,119:it},{74:[1,204]},t(ct,[2,98],{22:[1,205]}),{74:[1,206],91:[1,207]},{74:[1,208]},t(Dt,[2,148]),{74:[1,209],91:[1,210]},t(at,[2,54],{108:121,47:f,81:v,95:b,96:x,99:_,100:k,101:w,103:E,104:T,114:O,115:N,116:B,117:D,118:L,119:I}),{22:ft,24:dt,26:pt,38:yt,41:[1,211],42:gt,47:q,62:mt,70:vt,73:191,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,39:212,42:gt,47:q,62:mt,70:vt,73:132,74:bt,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,42:gt,47:q,51:[1,213],62:mt,70:vt,73:191,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,42:gt,47:q,53:[1,214],62:mt,70:vt,73:191,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,42:gt,47:q,55:[1,215],62:mt,70:vt,73:191,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,42:gt,47:q,57:[1,216],62:mt,70:vt,73:191,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,42:gt,47:q,59:[1,217],62:mt,70:vt,73:191,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,42:gt,47:q,61:[1,218],62:mt,70:vt,73:191,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,39:219,42:gt,47:q,62:mt,70:vt,73:132,74:bt,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,41:[1,220],42:gt,47:q,62:mt,70:vt,73:191,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,42:gt,47:q,62:mt,64:[1,221],66:[1,222],70:vt,73:191,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{22:ft,24:dt,26:pt,38:yt,42:gt,47:q,62:mt,64:[1,224],66:[1,223],70:vt,73:191,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{9:225,20:j,21:Y,23:z},t(U,[2,49],{47:Bt}),t(ht,[2,72]),t(ht,[2,71]),{22:ft,24:dt,26:pt,38:yt,42:gt,47:q,62:mt,70:vt,72:[1,226],73:191,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},t(ht,[2,74]),t(Lt,[2,78]),{22:ft,24:dt,26:pt,38:yt,39:227,42:gt,47:q,62:mt,70:vt,73:132,74:bt,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},t(Nt,o,{17:228}),t(P,[2,43]),{46:229,47:f,49:43,81:v,95:b,96:x,99:_,100:k,101:w,103:E,104:T,108:44,114:O,115:N,116:B,117:D,118:L,119:I},{22:Ft,76:Pt,86:230,92:jt,95:Yt,97:231,98:232,99:zt,100:Ut,101:$t,102:qt,103:Wt,104:Ht,105:Vt},{22:Ft,76:Pt,86:244,92:jt,95:Yt,97:231,98:232,99:zt,100:Ut,101:$t,102:qt,103:Wt,104:Ht,105:Vt},{22:Ft,76:Pt,86:245,92:jt,94:[1,246],95:Yt,97:231,98:232,99:zt,100:Ut,101:$t,102:qt,103:Wt,104:Ht,105:Vt},{22:Ft,76:Pt,86:247,92:jt,94:[1,248],95:Yt,97:231,98:232,99:zt,100:Ut,101:$t,102:qt,103:Wt,104:Ht,105:Vt},{95:[1,249]},{22:Ft,76:Pt,86:250,92:jt,95:Yt,97:231,98:232,99:zt,100:Ut,101:$t,102:qt,103:Wt,104:Ht,105:Vt},{22:Ft,76:Pt,86:251,92:jt,95:Yt,97:231,98:232,99:zt,100:Ut,101:$t,102:qt,103:Wt,104:Ht,105:Vt},{26:$,47:q,81:W,87:252,95:H,96:V,99:G,100:X,101:Z,103:K,104:Q,107:94,109:92,114:J,115:tt,116:et,117:nt,118:rt,119:it},t(ct,[2,97]),{74:[1,253]},t(ct,[2,101],{22:[1,254]}),t(ct,[2,102]),t(ct,[2,105]),t(ct,[2,107],{22:[1,255]}),t(ct,[2,108]),t(ot,[2,55]),{22:ft,24:dt,26:pt,38:yt,42:gt,47:q,51:[1,256],62:mt,70:vt,73:191,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},t(ot,[2,61]),t(ot,[2,57]),t(ot,[2,58]),t(ot,[2,59]),t(ot,[2,60]),t(ot,[2,62]),{22:ft,24:dt,26:pt,38:yt,42:gt,47:q,61:[1,257],62:mt,70:vt,73:191,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},t(ot,[2,64]),t(ot,[2,65]),t(ot,[2,67]),t(ot,[2,66]),t(ot,[2,68]),t(It,[2,4]),t([22,47,81,95,96,99,100,101,103,104,114,115,116,117,118,119],[2,76]),{22:ft,24:dt,26:pt,38:yt,41:[1,258],42:gt,47:q,62:mt,70:vt,73:191,75:143,76:xt,77:_t,78:kt,79:wt,80:Et,81:Tt,82:Ct,84:134,85:St,95:H,96:V,99:G,100:X,101:At,103:K,104:Q,105:Mt,106:Ot,107:140,114:J,115:tt,116:et,117:nt,118:rt,119:it},{18:18,19:19,20:s,21:c,22:u,23:l,32:24,33:25,34:26,35:27,36:28,37:29,38:h,42:[1,259],43:31,45:32,46:42,47:f,49:43,76:d,77:p,78:y,79:g,80:m,81:v,95:b,96:x,99:_,100:k,101:w,103:E,104:T,108:44,110:C,111:S,112:A,113:M,114:O,115:N,116:B,117:D,118:L,119:I},t(at,[2,53]),t(ct,[2,110],{96:Gt}),t(Xt,[2,120],{98:261,22:Ft,76:Pt,92:jt,95:Yt,99:zt,100:Ut,101:$t,102:qt,103:Wt,104:Ht,105:Vt}),t(Zt,[2,122]),t(Zt,[2,124]),t(Zt,[2,125]),t(Zt,[2,126]),t(Zt,[2,127]),t(Zt,[2,128]),t(Zt,[2,129]),t(Zt,[2,130]),t(Zt,[2,131]),t(Zt,[2,132]),t(Zt,[2,133]),t(Zt,[2,134]),t(ct,[2,111],{96:Gt}),t(ct,[2,112],{96:Gt}),{22:[1,262]},t(ct,[2,113],{96:Gt}),{22:[1,263]},t(Rt,[2,119]),t(ct,[2,93],{96:Gt}),t(ct,[2,94],{96:Gt}),t(ct,[2,95],{107:94,109:170,26:$,47:q,81:W,95:H,96:V,99:G,100:X,101:Z,103:K,104:Q,114:J,115:tt,116:et,117:nt,118:rt,119:it}),t(ct,[2,99]),{91:[1,264]},{91:[1,265]},{51:[1,266]},{61:[1,267]},{9:268,20:j,21:Y,23:z},t(P,[2,42]),{22:Ft,76:Pt,92:jt,95:Yt,97:269,98:232,99:zt,100:Ut,101:$t,102:qt,103:Wt,104:Ht,105:Vt},t(Zt,[2,123]),{26:$,47:q,81:W,87:270,95:H,96:V,99:G,100:X,101:Z,103:K,104:Q,107:94,109:92,114:J,115:tt,116:et,117:nt,118:rt,119:it},{26:$,47:q,81:W,87:271,95:H,96:V,99:G,100:X,101:Z,103:K,104:Q,107:94,109:92,114:J,115:tt,116:et,117:nt,118:rt,119:it},t(ct,[2,103]),t(ct,[2,109]),t(ot,[2,56]),t(ot,[2,63]),t(Nt,o,{17:272}),t(Xt,[2,121],{98:261,22:Ft,76:Pt,92:jt,95:Yt,99:zt,100:Ut,101:$t,102:qt,103:Wt,104:Ht,105:Vt}),t(ct,[2,116],{107:94,109:170,22:[1,273],26:$,47:q,81:W,95:H,96:V,99:G,100:X,101:Z,103:K,104:Q,114:J,115:tt,116:et,117:nt,118:rt,119:it}),t(ct,[2,117],{107:94,109:170,22:[1,274],26:$,47:q,81:W,95:H,96:V,99:G,100:X,101:Z,103:K,104:Q,114:J,115:tt,116:et,117:nt,118:rt,119:it}),{18:18,19:19,20:s,21:c,22:u,23:l,32:24,33:25,34:26,35:27,36:28,37:29,38:h,42:[1,275],43:31,45:32,46:42,47:f,49:43,76:d,77:p,78:y,79:g,80:m,81:v,95:b,96:x,99:_,100:k,101:w,103:E,104:T,108:44,110:C,111:S,112:A,113:M,114:O,115:N,116:B,117:D,118:L,119:I},{22:Ft,76:Pt,86:276,92:jt,95:Yt,97:231,98:232,99:zt,100:Ut,101:$t,102:qt,103:Wt,104:Ht,105:Vt},{22:Ft,76:Pt,86:277,92:jt,95:Yt,97:231,98:232,99:zt,100:Ut,101:$t,102:qt,103:Wt,104:Ht,105:Vt},t(P,[2,41]),t(ct,[2,114],{96:Gt}),t(ct,[2,115],{96:Gt})],defaultActions:{2:[2,1],9:[2,5],10:[2,2],124:[2,7]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),y={yy:{}};for(var g in this.yy)Object.prototype.hasOwnProperty.call(this.yy,g)&&(y.yy[g]=this.yy[g]);p.setInput(t,y.yy),y.yy.lexer=p,y.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var m=p.yylloc;a.push(m);var v=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof y.yy.parseError?this.parseError=y.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:m,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,y.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},Qt={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),12;case 1:return this.begin("type_directive"),13;case 2:return this.popState(),this.begin("arg_directive"),10;case 3:return this.popState(),this.popState(),15;case 4:return 14;case 5:case 6:break;case 7:this.begin("string");break;case 8:this.popState();break;case 9:return"STR";case 10:return 76;case 11:return 85;case 12:return 77;case 13:return 94;case 14:return 78;case 15:return 79;case 16:this.begin("href");break;case 17:this.popState();break;case 18:return 90;case 19:this.begin("callbackname");break;case 20:this.popState();break;case 21:this.popState(),this.begin("callbackargs");break;case 22:return 88;case 23:this.popState();break;case 24:return 89;case 25:this.begin("click");break;case 26:this.popState();break;case 27:return 80;case 28:case 29:return t.lex.firstGraph()&&this.begin("dir"),24;case 30:return 38;case 31:return 42;case 32:case 33:case 34:case 35:return 91;case 36:return this.popState(),25;case 37:case 38:case 39:case 40:case 41:case 42:case 43:case 44:case 45:case 46:return this.popState(),26;case 47:return 110;case 48:return 111;case 49:return 112;case 50:return 113;case 51:return 95;case 52:return 103;case 53:return 48;case 54:return 100;case 55:return 47;case 56:return 20;case 57:return 96;case 58:return 118;case 59:case 60:case 61:return 71;case 62:case 63:case 64:return 70;case 65:return 52;case 66:return 53;case 67:return 54;case 68:return 55;case 69:return 56;case 70:return 57;case 71:return 58;case 72:return 59;case 73:return 101;case 74:return 104;case 75:return 119;case 76:return 116;case 77:return 105;case 78:case 79:return 117;case 80:return 106;case 81:return 62;case 82:return 82;case 83:return"SEP";case 84:return 81;case 85:return 99;case 86:return 64;case 87:return 63;case 88:return 66;case 89:return 65;case 90:return 114;case 91:return 115;case 92:return 72;case 93:return 50;case 94:return 51;case 95:return 40;case 96:return 41;case 97:return 60;case 98:return 61;case 99:return 125;case 100:return 21;case 101:return 22;case 102:return 23}},rules:[/^(?:%%\{)/,/^(?:((?:(?!\}%%)[^:.])*))/,/^(?::)/,/^(?:\}%%)/,/^(?:((?:(?!\}%%).|\n)*))/,/^(?:%%(?!\{)[^\n]*)/,/^(?:[^\}]%%[^\n]*)/,/^(?:["])/,/^(?:["])/,/^(?:[^"]*)/,/^(?:style\b)/,/^(?:default\b)/,/^(?:linkStyle\b)/,/^(?:interpolate\b)/,/^(?:classDef\b)/,/^(?:class\b)/,/^(?:href[\s]+["])/,/^(?:["])/,/^(?:[^"]*)/,/^(?:call[\s]+)/,/^(?:\([\s]*\))/,/^(?:\()/,/^(?:[^(]*)/,/^(?:\))/,/^(?:[^)]*)/,/^(?:click[\s]+)/,/^(?:[\s\n])/,/^(?:[^\s\n]*)/,/^(?:graph\b)/,/^(?:flowchart\b)/,/^(?:subgraph\b)/,/^(?:end\b\s*)/,/^(?:_self\b)/,/^(?:_blank\b)/,/^(?:_parent\b)/,/^(?:_top\b)/,/^(?:(\r?\n)*\s*\n)/,/^(?:\s*LR\b)/,/^(?:\s*RL\b)/,/^(?:\s*TB\b)/,/^(?:\s*BT\b)/,/^(?:\s*TD\b)/,/^(?:\s*BR\b)/,/^(?:\s*<)/,/^(?:\s*>)/,/^(?:\s*\^)/,/^(?:\s*v\b)/,/^(?:.*direction\s+TB[^\n]*)/,/^(?:.*direction\s+BT[^\n]*)/,/^(?:.*direction\s+RL[^\n]*)/,/^(?:.*direction\s+LR[^\n]*)/,/^(?:[0-9]+)/,/^(?:#)/,/^(?::::)/,/^(?::)/,/^(?:&)/,/^(?:;)/,/^(?:,)/,/^(?:\*)/,/^(?:\s*[xo<]?--+[-xo>]\s*)/,/^(?:\s*[xo<]?==+[=xo>]\s*)/,/^(?:\s*[xo<]?-?\.+-[xo>]?\s*)/,/^(?:\s*[xo<]?--\s*)/,/^(?:\s*[xo<]?==\s*)/,/^(?:\s*[xo<]?-\.\s*)/,/^(?:\(-)/,/^(?:-\))/,/^(?:\(\[)/,/^(?:\]\))/,/^(?:\[\[)/,/^(?:\]\])/,/^(?:\[\()/,/^(?:\)\])/,/^(?:-)/,/^(?:\.)/,/^(?:[\_])/,/^(?:\+)/,/^(?:%)/,/^(?:=)/,/^(?:=)/,/^(?:<)/,/^(?:>)/,/^(?:\^)/,/^(?:\\\|)/,/^(?:v\b)/,/^(?:[A-Za-z]+)/,/^(?:\\\])/,/^(?:\[\/)/,/^(?:\/\])/,/^(?:\[\\)/,/^(?:[!"#$%&'*+,-.`?\\_/])/,/^(?:[\u00AA\u00B5\u00BA\u00C0-\u00D6\u00D8-\u00F6]|[\u00F8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377]|[\u037A-\u037D\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5]|[\u03F7-\u0481\u048A-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA]|[\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE]|[\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA]|[\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u08A0]|[\u08A2-\u08AC\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0977]|[\u0979-\u097F\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2]|[\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A]|[\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39]|[\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8]|[\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0B05-\u0B0C]|[\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C]|[\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99]|[\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0]|[\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C3D]|[\u0C58\u0C59\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3]|[\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10]|[\u0D12-\u0D3A\u0D3D\u0D4E\u0D60\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1]|[\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81]|[\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3]|[\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6]|[\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A]|[\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081]|[\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D]|[\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0]|[\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310]|[\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F4\u1401-\u166C]|[\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u1700-\u170C\u170E-\u1711]|[\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7]|[\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191C]|[\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19C1-\u19C7\u1A00-\u1A16]|[\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF]|[\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC]|[\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D]|[\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D]|[\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3]|[\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F]|[\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128]|[\u212A-\u212D\u212F-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2183\u2184]|[\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3]|[\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6]|[\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE]|[\u2DD0-\u2DD6\u2DD8-\u2DDE\u2E2F\u3005\u3006\u3031-\u3035\u303B\u303C]|[\u3041-\u3096\u309D-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D]|[\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FCC]|[\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B]|[\uA640-\uA66E\uA67F-\uA697\uA6A0-\uA6E5\uA717-\uA71F\uA722-\uA788]|[\uA78B-\uA78E\uA790-\uA793\uA7A0-\uA7AA\uA7F8-\uA801\uA803-\uA805]|[\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB]|[\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uAA00-\uAA28]|[\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA80-\uAAAF\uAAB1\uAAB5]|[\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4]|[\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E]|[\uABC0-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D]|[\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36]|[\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D]|[\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC]|[\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF]|[\uFFD2-\uFFD7\uFFDA-\uFFDC])/,/^(?:\|)/,/^(?:\()/,/^(?:\))/,/^(?:\[)/,/^(?:\])/,/^(?:\{)/,/^(?:\})/,/^(?:")/,/^(?:(\r?\n)+)/,/^(?:\s)/,/^(?:$)/],conditions:{close_directive:{rules:[],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},open_directive:{rules:[1],inclusive:!1},callbackargs:{rules:[23,24],inclusive:!1},callbackname:{rules:[20,21,22],inclusive:!1},href:{rules:[17,18],inclusive:!1},click:{rules:[26,27],inclusive:!1},vertex:{rules:[],inclusive:!1},dir:{rules:[36,37,38,39,40,41,42,43,44,45,46],inclusive:!1},string:{rules:[8,9],inclusive:!1},INITIAL:{rules:[0,5,6,7,10,11,12,13,14,15,16,19,25,28,29,30,31,32,33,34,35,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102],inclusive:!0}}};function Jt(){this.yy={}}return Kt.lexer=Qt,Jt.prototype=Kt,Kt.Parser=Jt,new Jt}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(17).readFileSync(n(18).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(12),n(7)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,2],n=[1,5],r=[6,9,11,17,18,19,21],i=[1,15],a=[1,16],o=[1,17],s=[1,21],c=[4,6,9,11,17,18,19,21],u={trace:function(){},yy:{},symbols_:{error:2,start:3,journey:4,document:5,EOF:6,directive:7,line:8,SPACE:9,statement:10,NEWLINE:11,openDirective:12,typeDirective:13,closeDirective:14,":":15,argDirective:16,title:17,section:18,taskName:19,taskData:20,open_directive:21,type_directive:22,arg_directive:23,close_directive:24,$accept:0,$end:1},terminals_:{2:"error",4:"journey",6:"EOF",9:"SPACE",11:"NEWLINE",15:":",17:"title",18:"section",19:"taskName",20:"taskData",21:"open_directive",22:"type_directive",23:"arg_directive",24:"close_directive"},productions_:[0,[3,3],[3,2],[5,0],[5,2],[8,2],[8,1],[8,1],[8,1],[7,4],[7,6],[10,1],[10,1],[10,2],[10,1],[12,1],[13,1],[16,1],[14,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 1:return a[s-1];case 3:this.$=[];break;case 4:a[s-1].push(a[s]),this.$=a[s-1];break;case 5:case 6:this.$=a[s];break;case 7:case 8:this.$=[];break;case 11:r.setTitle(a[s].substr(6)),this.$=a[s].substr(6);break;case 12:r.addSection(a[s].substr(8)),this.$=a[s].substr(8);break;case 13:r.addTask(a[s-1],a[s]),this.$="task";break;case 15:r.parseDirective("%%{","open_directive");break;case 16:r.parseDirective(a[s],"type_directive");break;case 17:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 18:r.parseDirective("}%%","close_directive","journey")}},table:[{3:1,4:e,7:3,12:4,21:n},{1:[3]},t(r,[2,3],{5:6}),{3:7,4:e,7:3,12:4,21:n},{13:8,22:[1,9]},{22:[2,15]},{6:[1,10],7:18,8:11,9:[1,12],10:13,11:[1,14],12:4,17:i,18:a,19:o,21:n},{1:[2,2]},{14:19,15:[1,20],24:s},t([15,24],[2,16]),t(r,[2,8],{1:[2,1]}),t(r,[2,4]),{7:18,10:22,12:4,17:i,18:a,19:o,21:n},t(r,[2,6]),t(r,[2,7]),t(r,[2,11]),t(r,[2,12]),{20:[1,23]},t(r,[2,14]),{11:[1,24]},{16:25,23:[1,26]},{11:[2,18]},t(r,[2,5]),t(r,[2,13]),t(c,[2,9]),{14:27,24:s},{24:[2,17]},{11:[1,28]},t(c,[2,10])],defaultActions:{5:[2,15],7:[2,2],21:[2,18],26:[2,17]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),y={yy:{}};for(var g in this.yy)Object.prototype.hasOwnProperty.call(this.yy,g)&&(y.yy[g]=this.yy[g]);p.setInput(t,y.yy),y.yy.lexer=p,y.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var m=p.yylloc;a.push(m);var v=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof y.yy.parseError?this.parseError=y.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:m,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,y.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},l={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),21;case 1:return this.begin("type_directive"),22;case 2:return this.popState(),this.begin("arg_directive"),15;case 3:return this.popState(),this.popState(),24;case 4:return 23;case 5:case 6:break;case 7:return 11;case 8:case 9:break;case 10:return 4;case 11:return 17;case 12:return 18;case 13:return 19;case 14:return 20;case 15:return 15;case 16:return 6;case 17:return"INVALID"}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:#[^\n]*)/i,/^(?:journey\b)/i,/^(?:title\s[^#\n;]+)/i,/^(?:section\s[^#:\n;]+)/i,/^(?:[^#:\n;]+)/i,/^(?::[^#\n;]+)/i,/^(?::)/i,/^(?:$)/i,/^(?:.)/i],conditions:{open_directive:{rules:[1],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,12,13,14,15,16,17],inclusive:!0}}};function h(){this.yy={}}return u.lexer=l,h.prototype=u,u.Parser=h,new h}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(17).readFileSync(n(18).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(12),n(7)(t))},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(11),i=n(6);e.default=function(t,e){return r.default.lang.round(i.default.parse(t)[e])}},function(t,e,n){var r=n(112),i=n(84),a=n(25);t.exports=function(t){return a(t)?r(t):i(t)}},function(t,e,n){var r;if(!r)try{r=n(0)}catch(t){}r||(r=window.d3),t.exports=r},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(11),i=n(6);e.default=function(t,e,n){var a=i.default.parse(t),o=a[e],s=r.default.channel.clamp[e](o+n);return o!==s&&(a[e]=s),i.default.stringify(a)}},function(t,e,n){var r=n(215),i=n(221);t.exports=function(t,e){var n=i(t,e);return r(n)?n:void 0}},function(t,e,n){var r=n(40),i=n(217),a=n(218),o=r?r.toStringTag:void 0;t.exports=function(t){return null==t?void 0===t?"[object Undefined]":"[object Null]":o&&o in Object(t)?i(t):a(t)}},function(t,e,n){var r=n(112),i=n(241),a=n(25);t.exports=function(t){return a(t)?r(t,!0):i(t)}},function(t,e){t.exports=function(t){return t}},function(t){t.exports=JSON.parse('{"name":"mermaid","version":"8.11.2","description":"Markdownish syntax for generating flowcharts, sequence diagrams, class diagrams, gantt charts and git graphs.","main":"dist/mermaid.core.js","keywords":["diagram","markdown","flowchart","sequence diagram","gantt","class diagram","git graph"],"scripts":{"build:development":"webpack --progress --colors","build:production":"yarn build:development -p --config webpack.config.prod.babel.js","build":"yarn build:development && yarn build:production","postbuild":"documentation build src/mermaidAPI.js src/config.js src/defaultConfig.js --shallow -f md --markdown-toc false > docs/Setup.md","build:watch":"yarn build --watch","release":"yarn build","lint":"eslint src","e2e:depr":"yarn lint && jest e2e --config e2e/jest.config.js","cypress":"percy exec -- cypress run","e2e":"start-server-and-test dev http://localhost:9000/ cypress","e2e-upd":"yarn lint && jest e2e -u --config e2e/jest.config.js","dev":"webpack-dev-server --config webpack.config.e2e.js","test":"yarn lint && jest src/.*","test:watch":"jest --watch src","prepublishOnly":"yarn build && yarn test","prepare":"yarn build"},"repository":{"type":"git","url":"https://github.com/knsv/mermaid"},"author":"Knut Sveidqvist","license":"MIT","standard":{"ignore":["**/parser/*.js","dist/**/*.js","cypress/**/*.js"],"globals":["page"]},"dependencies":{"@braintree/sanitize-url":"^3.1.0","@percy/migrate":"^0.10.0","d3":"^5.7.0","dagre":"^0.8.5","dagre-d3":"^0.6.4","dompurify":"2.3.0","graphlib":"^2.1.8","khroma":"^1.4.1","moment-mini":"^2.24.0","stylis":"^4.0.10"},"devDependencies":{"@babel/core":"^7.14.6","@babel/preset-env":"^7.14.7","@babel/register":"^7.14.5","@percy/cli":"^1.0.0-beta.58","@percy/cypress":"^3.1.0","babel-core":"7.0.0-bridge.0","babel-eslint":"^10.1.0","babel-jest":"^27.0.6","babel-loader":"^8.2.2","coveralls":"^3.0.2","css-to-string-loader":"^0.1.3","cypress":"7.6.0","documentation":"13.2.0","eslint":"^7.30.0","eslint-config-prettier":"^8.3.0","eslint-plugin-prettier":"^3.4.0","husky":"^7.0.1","identity-obj-proxy":"^3.0.0","jest":"^27.0.6","jison":"^0.4.18","js-base64":"3.6.1","moment":"^2.23.0","prettier":"^2.3.2","start-server-and-test":"^1.12.6","terser-webpack-plugin":"^2.2.2","webpack":"^4.41.2","webpack-bundle-analyzer":"^4.4.2","webpack-cli":"^3.1.2","webpack-dev-server":"^3.4.1","webpack-node-externals":"^1.7.2"},"files":["dist"],"yarn-upgrade-all":{"ignore":["babel-core"]},"sideEffects":["**/*.css","**/*.scss"],"husky":{"hooks":{"pre-push":"yarn test"}}}')},function(t,e){t.exports=function(t,e){return t===e||t!=t&&e!=e}},function(t,e,n){var r=n(34),i=n(14);t.exports=function(t){if(!i(t))return!1;var e=r(t);return"[object Function]"==e||"[object GeneratorFunction]"==e||"[object AsyncFunction]"==e||"[object Proxy]"==e}},function(t,e,n){var r=n(19).Symbol;t.exports=r},function(t,e,n){(function(t){var r=n(19),i=n(237),a=e&&!e.nodeType&&e,o=a&&"object"==typeof t&&t&&!t.nodeType&&t,s=o&&o.exports===a?r.Buffer:void 0,c=(s?s.isBuffer:void 0)||i;t.exports=c}).call(this,n(7)(t))},function(t,e,n){var r=n(246),i=n(79),a=n(247),o=n(121),s=n(248),c=n(34),u=n(110),l=u(r),h=u(i),f=u(a),d=u(o),p=u(s),y=c;(r&&"[object DataView]"!=y(new r(new ArrayBuffer(1)))||i&&"[object Map]"!=y(new i)||a&&"[object Promise]"!=y(a.resolve())||o&&"[object Set]"!=y(new o)||s&&"[object WeakMap]"!=y(new s))&&(y=function(t){var e=c(t),n="[object Object]"==e?t.constructor:void 0,r=n?u(n):"";if(r)switch(r){case l:return"[object DataView]";case h:return"[object Map]";case f:return"[object Promise]";case d:return"[object Set]";case p:return"[object WeakMap]"}return e}),t.exports=y},function(t,e,n){var r=n(34),i=n(21);t.exports=function(t){return"symbol"==typeof t||i(t)&&"[object Symbol]"==r(t)}},function(t,e,n){var r;try{r={defaults:n(154),each:n(89),isFunction:n(39),isPlainObject:n(158),pick:n(161),has:n(94),range:n(162),uniqueId:n(163)}}catch(t){}r||(r=window._),t.exports=r},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,3],n=[1,5],r=[1,17],i=[2,10],a=[1,21],o=[1,22],s=[1,23],c=[1,24],u=[1,25],l=[1,26],h=[1,19],f=[1,27],d=[1,28],p=[1,31],y=[66,67],g=[5,8,14,35,36,37,38,39,40,48,55,57,66,67],m=[5,6,8,14,35,36,37,38,39,40,48,66,67],v=[1,51],b=[1,52],x=[1,53],_=[1,54],k=[1,55],w=[1,56],E=[1,57],T=[57,58],C=[1,69],S=[1,65],A=[1,66],M=[1,67],O=[1,68],N=[1,70],B=[1,74],D=[1,75],L=[1,72],I=[1,73],R=[5,8,14,35,36,37,38,39,40,48,66,67],F={trace:function(){},yy:{},symbols_:{error:2,start:3,directive:4,NEWLINE:5,RD:6,diagram:7,EOF:8,openDirective:9,typeDirective:10,closeDirective:11,":":12,argDirective:13,open_directive:14,type_directive:15,arg_directive:16,close_directive:17,requirementDef:18,elementDef:19,relationshipDef:20,requirementType:21,requirementName:22,STRUCT_START:23,requirementBody:24,ID:25,COLONSEP:26,id:27,TEXT:28,text:29,RISK:30,riskLevel:31,VERIFYMTHD:32,verifyType:33,STRUCT_STOP:34,REQUIREMENT:35,FUNCTIONAL_REQUIREMENT:36,INTERFACE_REQUIREMENT:37,PERFORMANCE_REQUIREMENT:38,PHYSICAL_REQUIREMENT:39,DESIGN_CONSTRAINT:40,LOW_RISK:41,MED_RISK:42,HIGH_RISK:43,VERIFY_ANALYSIS:44,VERIFY_DEMONSTRATION:45,VERIFY_INSPECTION:46,VERIFY_TEST:47,ELEMENT:48,elementName:49,elementBody:50,TYPE:51,type:52,DOCREF:53,ref:54,END_ARROW_L:55,relationship:56,LINE:57,END_ARROW_R:58,CONTAINS:59,COPIES:60,DERIVES:61,SATISFIES:62,VERIFIES:63,REFINES:64,TRACES:65,unqString:66,qString:67,$accept:0,$end:1},terminals_:{2:"error",5:"NEWLINE",6:"RD",8:"EOF",12:":",14:"open_directive",15:"type_directive",16:"arg_directive",17:"close_directive",23:"STRUCT_START",25:"ID",26:"COLONSEP",28:"TEXT",30:"RISK",32:"VERIFYMTHD",34:"STRUCT_STOP",35:"REQUIREMENT",36:"FUNCTIONAL_REQUIREMENT",37:"INTERFACE_REQUIREMENT",38:"PERFORMANCE_REQUIREMENT",39:"PHYSICAL_REQUIREMENT",40:"DESIGN_CONSTRAINT",41:"LOW_RISK",42:"MED_RISK",43:"HIGH_RISK",44:"VERIFY_ANALYSIS",45:"VERIFY_DEMONSTRATION",46:"VERIFY_INSPECTION",47:"VERIFY_TEST",48:"ELEMENT",51:"TYPE",53:"DOCREF",55:"END_ARROW_L",57:"LINE",58:"END_ARROW_R",59:"CONTAINS",60:"COPIES",61:"DERIVES",62:"SATISFIES",63:"VERIFIES",64:"REFINES",65:"TRACES",66:"unqString",67:"qString"},productions_:[0,[3,3],[3,2],[3,4],[4,3],[4,5],[9,1],[10,1],[13,1],[11,1],[7,0],[7,2],[7,2],[7,2],[7,2],[7,2],[18,5],[24,5],[24,5],[24,5],[24,5],[24,2],[24,1],[21,1],[21,1],[21,1],[21,1],[21,1],[21,1],[31,1],[31,1],[31,1],[33,1],[33,1],[33,1],[33,1],[19,5],[50,5],[50,5],[50,2],[50,1],[20,5],[20,5],[56,1],[56,1],[56,1],[56,1],[56,1],[56,1],[56,1],[22,1],[22,1],[27,1],[27,1],[29,1],[29,1],[49,1],[49,1],[52,1],[52,1],[54,1],[54,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 6:r.parseDirective("%%{","open_directive");break;case 7:r.parseDirective(a[s],"type_directive");break;case 8:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 9:r.parseDirective("}%%","close_directive","pie");break;case 10:this.$=[];break;case 16:r.addRequirement(a[s-3],a[s-4]);break;case 17:r.setNewReqId(a[s-2]);break;case 18:r.setNewReqText(a[s-2]);break;case 19:r.setNewReqRisk(a[s-2]);break;case 20:r.setNewReqVerifyMethod(a[s-2]);break;case 23:this.$=r.RequirementType.REQUIREMENT;break;case 24:this.$=r.RequirementType.FUNCTIONAL_REQUIREMENT;break;case 25:this.$=r.RequirementType.INTERFACE_REQUIREMENT;break;case 26:this.$=r.RequirementType.PERFORMANCE_REQUIREMENT;break;case 27:this.$=r.RequirementType.PHYSICAL_REQUIREMENT;break;case 28:this.$=r.RequirementType.DESIGN_CONSTRAINT;break;case 29:this.$=r.RiskLevel.LOW_RISK;break;case 30:this.$=r.RiskLevel.MED_RISK;break;case 31:this.$=r.RiskLevel.HIGH_RISK;break;case 32:this.$=r.VerifyType.VERIFY_ANALYSIS;break;case 33:this.$=r.VerifyType.VERIFY_DEMONSTRATION;break;case 34:this.$=r.VerifyType.VERIFY_INSPECTION;break;case 35:this.$=r.VerifyType.VERIFY_TEST;break;case 36:r.addElement(a[s-3]);break;case 37:r.setNewElementType(a[s-2]);break;case 38:r.setNewElementDocRef(a[s-2]);break;case 41:r.addRelationship(a[s-2],a[s],a[s-4]);break;case 42:r.addRelationship(a[s-2],a[s-4],a[s]);break;case 43:this.$=r.Relationships.CONTAINS;break;case 44:this.$=r.Relationships.COPIES;break;case 45:this.$=r.Relationships.DERIVES;break;case 46:this.$=r.Relationships.SATISFIES;break;case 47:this.$=r.Relationships.VERIFIES;break;case 48:this.$=r.Relationships.REFINES;break;case 49:this.$=r.Relationships.TRACES}},table:[{3:1,4:2,6:e,9:4,14:n},{1:[3]},{3:7,4:2,5:[1,6],6:e,9:4,14:n},{5:[1,8]},{10:9,15:[1,10]},{15:[2,6]},{3:11,4:2,6:e,9:4,14:n},{1:[2,2]},{4:16,5:r,7:12,8:i,9:4,14:n,18:13,19:14,20:15,21:18,27:20,35:a,36:o,37:s,38:c,39:u,40:l,48:h,66:f,67:d},{11:29,12:[1,30],17:p},t([12,17],[2,7]),{1:[2,1]},{8:[1,32]},{4:16,5:r,7:33,8:i,9:4,14:n,18:13,19:14,20:15,21:18,27:20,35:a,36:o,37:s,38:c,39:u,40:l,48:h,66:f,67:d},{4:16,5:r,7:34,8:i,9:4,14:n,18:13,19:14,20:15,21:18,27:20,35:a,36:o,37:s,38:c,39:u,40:l,48:h,66:f,67:d},{4:16,5:r,7:35,8:i,9:4,14:n,18:13,19:14,20:15,21:18,27:20,35:a,36:o,37:s,38:c,39:u,40:l,48:h,66:f,67:d},{4:16,5:r,7:36,8:i,9:4,14:n,18:13,19:14,20:15,21:18,27:20,35:a,36:o,37:s,38:c,39:u,40:l,48:h,66:f,67:d},{4:16,5:r,7:37,8:i,9:4,14:n,18:13,19:14,20:15,21:18,27:20,35:a,36:o,37:s,38:c,39:u,40:l,48:h,66:f,67:d},{22:38,66:[1,39],67:[1,40]},{49:41,66:[1,42],67:[1,43]},{55:[1,44],57:[1,45]},t(y,[2,23]),t(y,[2,24]),t(y,[2,25]),t(y,[2,26]),t(y,[2,27]),t(y,[2,28]),t(g,[2,52]),t(g,[2,53]),t(m,[2,4]),{13:46,16:[1,47]},t(m,[2,9]),{1:[2,3]},{8:[2,11]},{8:[2,12]},{8:[2,13]},{8:[2,14]},{8:[2,15]},{23:[1,48]},{23:[2,50]},{23:[2,51]},{23:[1,49]},{23:[2,56]},{23:[2,57]},{56:50,59:v,60:b,61:x,62:_,63:k,64:w,65:E},{56:58,59:v,60:b,61:x,62:_,63:k,64:w,65:E},{11:59,17:p},{17:[2,8]},{5:[1,60]},{5:[1,61]},{57:[1,62]},t(T,[2,43]),t(T,[2,44]),t(T,[2,45]),t(T,[2,46]),t(T,[2,47]),t(T,[2,48]),t(T,[2,49]),{58:[1,63]},t(m,[2,5]),{5:C,24:64,25:S,28:A,30:M,32:O,34:N},{5:B,34:D,50:71,51:L,53:I},{27:76,66:f,67:d},{27:77,66:f,67:d},t(R,[2,16]),{26:[1,78]},{26:[1,79]},{26:[1,80]},{26:[1,81]},{5:C,24:82,25:S,28:A,30:M,32:O,34:N},t(R,[2,22]),t(R,[2,36]),{26:[1,83]},{26:[1,84]},{5:B,34:D,50:85,51:L,53:I},t(R,[2,40]),t(R,[2,41]),t(R,[2,42]),{27:86,66:f,67:d},{29:87,66:[1,88],67:[1,89]},{31:90,41:[1,91],42:[1,92],43:[1,93]},{33:94,44:[1,95],45:[1,96],46:[1,97],47:[1,98]},t(R,[2,21]),{52:99,66:[1,100],67:[1,101]},{54:102,66:[1,103],67:[1,104]},t(R,[2,39]),{5:[1,105]},{5:[1,106]},{5:[2,54]},{5:[2,55]},{5:[1,107]},{5:[2,29]},{5:[2,30]},{5:[2,31]},{5:[1,108]},{5:[2,32]},{5:[2,33]},{5:[2,34]},{5:[2,35]},{5:[1,109]},{5:[2,58]},{5:[2,59]},{5:[1,110]},{5:[2,60]},{5:[2,61]},{5:C,24:111,25:S,28:A,30:M,32:O,34:N},{5:C,24:112,25:S,28:A,30:M,32:O,34:N},{5:C,24:113,25:S,28:A,30:M,32:O,34:N},{5:C,24:114,25:S,28:A,30:M,32:O,34:N},{5:B,34:D,50:115,51:L,53:I},{5:B,34:D,50:116,51:L,53:I},t(R,[2,17]),t(R,[2,18]),t(R,[2,19]),t(R,[2,20]),t(R,[2,37]),t(R,[2,38])],defaultActions:{5:[2,6],7:[2,2],11:[2,1],32:[2,3],33:[2,11],34:[2,12],35:[2,13],36:[2,14],37:[2,15],39:[2,50],40:[2,51],42:[2,56],43:[2,57],47:[2,8],88:[2,54],89:[2,55],91:[2,29],92:[2,30],93:[2,31],95:[2,32],96:[2,33],97:[2,34],98:[2,35],100:[2,58],101:[2,59],103:[2,60],104:[2,61]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),y={yy:{}};for(var g in this.yy)Object.prototype.hasOwnProperty.call(this.yy,g)&&(y.yy[g]=this.yy[g]);p.setInput(t,y.yy),y.yy.lexer=p,y.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var m=p.yylloc;a.push(m);var v=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof y.yy.parseError?this.parseError=y.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:m,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,y.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},P={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),14;case 1:return this.begin("type_directive"),15;case 2:return this.popState(),this.begin("arg_directive"),12;case 3:return this.popState(),this.popState(),17;case 4:return 16;case 5:return 5;case 6:case 7:case 8:break;case 9:return 8;case 10:return 6;case 11:return 23;case 12:return 34;case 13:return 26;case 14:return 25;case 15:return 28;case 16:return 30;case 17:return 32;case 18:return 35;case 19:return 36;case 20:return 37;case 21:return 38;case 22:return 39;case 23:return 40;case 24:return 41;case 25:return 42;case 26:return 43;case 27:return 44;case 28:return 45;case 29:return 46;case 30:return 47;case 31:return 48;case 32:return 59;case 33:return 60;case 34:return 61;case 35:return 62;case 36:return 63;case 37:return 64;case 38:return 65;case 39:return 51;case 40:return 53;case 41:return 55;case 42:return 58;case 43:return 57;case 44:this.begin("string");break;case 45:this.popState();break;case 46:return"qString";case 47:return e.yytext=e.yytext.trim(),66}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:(\r?\n)+)/i,/^(?:\s+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:$)/i,/^(?:requirementDiagram\b)/i,/^(?:\{)/i,/^(?:\})/i,/^(?::)/i,/^(?:id\b)/i,/^(?:text\b)/i,/^(?:risk\b)/i,/^(?:verifyMethod\b)/i,/^(?:requirement\b)/i,/^(?:functionalRequirement\b)/i,/^(?:interfaceRequirement\b)/i,/^(?:performanceRequirement\b)/i,/^(?:physicalRequirement\b)/i,/^(?:designConstraint\b)/i,/^(?:low\b)/i,/^(?:medium\b)/i,/^(?:high\b)/i,/^(?:analysis\b)/i,/^(?:demonstration\b)/i,/^(?:inspection\b)/i,/^(?:test\b)/i,/^(?:element\b)/i,/^(?:contains\b)/i,/^(?:copies\b)/i,/^(?:derives\b)/i,/^(?:satisfies\b)/i,/^(?:verifies\b)/i,/^(?:refines\b)/i,/^(?:traces\b)/i,/^(?:type\b)/i,/^(?:docref\b)/i,/^(?:<-)/i,/^(?:->)/i,/^(?:-)/i,/^(?:["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:[\w][^\r\n\{\<\>\-\=]*)/i],conditions:{close_directive:{rules:[],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},open_directive:{rules:[1],inclusive:!1},unqString:{rules:[],inclusive:!1},token:{rules:[],inclusive:!1},string:{rules:[45,46],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,47],inclusive:!0}}};function j(){this.yy={}}return F.lexer=P,j.prototype=F,F.Parser=j,new j}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(17).readFileSync(n(18).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(12),n(7)(t))},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=new(n(175).default)({r:0,g:0,b:0,a:0},"transparent");e.default=r},function(t,e,n){var r=n(59),i=n(60);t.exports=function(t,e,n,a){var o=!n;n||(n={});for(var s=-1,c=e.length;++s-1&&t%1==0&&t-1}(s)?s:(n=s.match(a))?(e=n[0],r.test(e)?"about:blank":s):"about:blank"}}},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[2,3],n=[1,7],r=[7,12,15,17,19,20,21],i=[7,11,12,15,17,19,20,21],a=[2,20],o=[1,32],s={trace:function(){},yy:{},symbols_:{error:2,start:3,GG:4,":":5,document:6,EOF:7,DIR:8,options:9,body:10,OPT:11,NL:12,line:13,statement:14,COMMIT:15,commit_arg:16,BRANCH:17,ID:18,CHECKOUT:19,MERGE:20,RESET:21,reset_arg:22,STR:23,HEAD:24,reset_parents:25,CARET:26,$accept:0,$end:1},terminals_:{2:"error",4:"GG",5:":",7:"EOF",8:"DIR",11:"OPT",12:"NL",15:"COMMIT",17:"BRANCH",18:"ID",19:"CHECKOUT",20:"MERGE",21:"RESET",23:"STR",24:"HEAD",26:"CARET"},productions_:[0,[3,4],[3,5],[6,0],[6,2],[9,2],[9,1],[10,0],[10,2],[13,2],[13,1],[14,2],[14,2],[14,2],[14,2],[14,2],[16,0],[16,1],[22,2],[22,2],[25,0],[25,2]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 1:return a[s-1];case 2:return r.setDirection(a[s-3]),a[s-1];case 4:r.setOptions(a[s-1]),this.$=a[s];break;case 5:a[s-1]+=a[s],this.$=a[s-1];break;case 7:this.$=[];break;case 8:a[s-1].push(a[s]),this.$=a[s-1];break;case 9:this.$=a[s-1];break;case 11:r.commit(a[s]);break;case 12:r.branch(a[s]);break;case 13:r.checkout(a[s]);break;case 14:r.merge(a[s]);break;case 15:r.reset(a[s]);break;case 16:this.$="";break;case 17:this.$=a[s];break;case 18:this.$=a[s-1]+":"+a[s];break;case 19:this.$=a[s-1]+":"+r.count,r.count=0;break;case 20:r.count=0;break;case 21:r.count+=1}},table:[{3:1,4:[1,2]},{1:[3]},{5:[1,3],8:[1,4]},{6:5,7:e,9:6,12:n},{5:[1,8]},{7:[1,9]},t(r,[2,7],{10:10,11:[1,11]}),t(i,[2,6]),{6:12,7:e,9:6,12:n},{1:[2,1]},{7:[2,4],12:[1,15],13:13,14:14,15:[1,16],17:[1,17],19:[1,18],20:[1,19],21:[1,20]},t(i,[2,5]),{7:[1,21]},t(r,[2,8]),{12:[1,22]},t(r,[2,10]),{12:[2,16],16:23,23:[1,24]},{18:[1,25]},{18:[1,26]},{18:[1,27]},{18:[1,30],22:28,24:[1,29]},{1:[2,2]},t(r,[2,9]),{12:[2,11]},{12:[2,17]},{12:[2,12]},{12:[2,13]},{12:[2,14]},{12:[2,15]},{12:a,25:31,26:o},{12:a,25:33,26:o},{12:[2,18]},{12:a,25:34,26:o},{12:[2,19]},{12:[2,21]}],defaultActions:{9:[2,1],21:[2,2],23:[2,11],24:[2,17],25:[2,12],26:[2,13],27:[2,14],28:[2,15],31:[2,18],33:[2,19],34:[2,21]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),y={yy:{}};for(var g in this.yy)Object.prototype.hasOwnProperty.call(this.yy,g)&&(y.yy[g]=this.yy[g]);p.setInput(t,y.yy),y.yy.lexer=p,y.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var m=p.yylloc;a.push(m);var v=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof y.yy.parseError?this.parseError=y.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:m,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,y.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},c={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return 12;case 1:case 2:case 3:break;case 4:return 4;case 5:return 15;case 6:return 17;case 7:return 20;case 8:return 21;case 9:return 19;case 10:case 11:return 8;case 12:return 5;case 13:return 26;case 14:this.begin("options");break;case 15:this.popState();break;case 16:return 11;case 17:this.begin("string");break;case 18:this.popState();break;case 19:return 23;case 20:return 18;case 21:return 7}},rules:[/^(?:(\r?\n)+)/i,/^(?:\s+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:gitGraph\b)/i,/^(?:commit\b)/i,/^(?:branch\b)/i,/^(?:merge\b)/i,/^(?:reset\b)/i,/^(?:checkout\b)/i,/^(?:LR\b)/i,/^(?:BT\b)/i,/^(?::)/i,/^(?:\^)/i,/^(?:options\r?\n)/i,/^(?:end\r?\n)/i,/^(?:[^\n]+\r?\n)/i,/^(?:["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:[a-zA-Z][-_\.a-zA-Z0-9]*[-_a-zA-Z0-9])/i,/^(?:$)/i],conditions:{options:{rules:[15,16],inclusive:!1},string:{rules:[18,19],inclusive:!1},INITIAL:{rules:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,17,20,21],inclusive:!0}}};function u(){this.yy={}}return s.lexer=c,u.prototype=s,s.Parser=u,new u}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(17).readFileSync(n(18).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(12),n(7)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[6,9,10],n={trace:function(){},yy:{},symbols_:{error:2,start:3,info:4,document:5,EOF:6,line:7,statement:8,NL:9,showInfo:10,$accept:0,$end:1},terminals_:{2:"error",4:"info",6:"EOF",9:"NL",10:"showInfo"},productions_:[0,[3,3],[5,0],[5,2],[7,1],[7,1],[8,1]],performAction:function(t,e,n,r,i,a,o){a.length;switch(i){case 1:return r;case 4:break;case 6:r.setInfo(!0)}},table:[{3:1,4:[1,2]},{1:[3]},t(e,[2,2],{5:3}),{6:[1,4],7:5,8:6,9:[1,7],10:[1,8]},{1:[2,1]},t(e,[2,3]),t(e,[2,4]),t(e,[2,5]),t(e,[2,6])],defaultActions:{4:[2,1]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),y={yy:{}};for(var g in this.yy)Object.prototype.hasOwnProperty.call(this.yy,g)&&(y.yy[g]=this.yy[g]);p.setInput(t,y.yy),y.yy.lexer=p,y.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var m=p.yylloc;a.push(m);var v=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof y.yy.parseError?this.parseError=y.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:m,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,y.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},r={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return 4;case 1:return 9;case 2:return"space";case 3:return 10;case 4:return 6;case 5:return"TXT"}},rules:[/^(?:info\b)/i,/^(?:[\s\n\r]+)/i,/^(?:[\s]+)/i,/^(?:showInfo\b)/i,/^(?:$)/i,/^(?:.)/i],conditions:{INITIAL:{rules:[0,1,2,3,4,5],inclusive:!0}}};function i(){this.yy={}}return n.lexer=r,i.prototype=n,n.Parser=i,new i}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(17).readFileSync(n(18).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(12),n(7)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,4],n=[1,5],r=[1,6],i=[1,7],a=[1,9],o=[1,11,13,20,21,22,23],s=[2,5],c=[1,6,11,13,20,21,22,23],u=[20,21,22],l=[2,8],h=[1,18],f=[1,19],d=[1,24],p=[6,20,21,22,23],y={trace:function(){},yy:{},symbols_:{error:2,start:3,eol:4,directive:5,PIE:6,document:7,showData:8,line:9,statement:10,txt:11,value:12,title:13,title_value:14,openDirective:15,typeDirective:16,closeDirective:17,":":18,argDirective:19,NEWLINE:20,";":21,EOF:22,open_directive:23,type_directive:24,arg_directive:25,close_directive:26,$accept:0,$end:1},terminals_:{2:"error",6:"PIE",8:"showData",11:"txt",12:"value",13:"title",14:"title_value",18:":",20:"NEWLINE",21:";",22:"EOF",23:"open_directive",24:"type_directive",25:"arg_directive",26:"close_directive"},productions_:[0,[3,2],[3,2],[3,2],[3,3],[7,0],[7,2],[9,2],[10,0],[10,2],[10,2],[10,1],[5,3],[5,5],[4,1],[4,1],[4,1],[15,1],[16,1],[19,1],[17,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 4:r.setShowData(!0);break;case 7:this.$=a[s-1];break;case 9:r.addSection(a[s-1],r.cleanupValue(a[s]));break;case 10:this.$=a[s].trim(),r.setTitle(this.$);break;case 17:r.parseDirective("%%{","open_directive");break;case 18:r.parseDirective(a[s],"type_directive");break;case 19:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 20:r.parseDirective("}%%","close_directive","pie")}},table:[{3:1,4:2,5:3,6:e,15:8,20:n,21:r,22:i,23:a},{1:[3]},{3:10,4:2,5:3,6:e,15:8,20:n,21:r,22:i,23:a},{3:11,4:2,5:3,6:e,15:8,20:n,21:r,22:i,23:a},t(o,s,{7:12,8:[1,13]}),t(c,[2,14]),t(c,[2,15]),t(c,[2,16]),{16:14,24:[1,15]},{24:[2,17]},{1:[2,1]},{1:[2,2]},t(u,l,{15:8,9:16,10:17,5:20,1:[2,3],11:h,13:f,23:a}),t(o,s,{7:21}),{17:22,18:[1,23],26:d},t([18,26],[2,18]),t(o,[2,6]),{4:25,20:n,21:r,22:i},{12:[1,26]},{14:[1,27]},t(u,[2,11]),t(u,l,{15:8,9:16,10:17,5:20,1:[2,4],11:h,13:f,23:a}),t(p,[2,12]),{19:28,25:[1,29]},t(p,[2,20]),t(o,[2,7]),t(u,[2,9]),t(u,[2,10]),{17:30,26:d},{26:[2,19]},t(p,[2,13])],defaultActions:{9:[2,17],10:[2,1],11:[2,2],29:[2,19]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),y={yy:{}};for(var g in this.yy)Object.prototype.hasOwnProperty.call(this.yy,g)&&(y.yy[g]=this.yy[g]);p.setInput(t,y.yy),y.yy.lexer=p,y.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var m=p.yylloc;a.push(m);var v=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof y.yy.parseError?this.parseError=y.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:m,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,y.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},g={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),23;case 1:return this.begin("type_directive"),24;case 2:return this.popState(),this.begin("arg_directive"),18;case 3:return this.popState(),this.popState(),26;case 4:return 25;case 5:case 6:break;case 7:return 20;case 8:case 9:break;case 10:return this.begin("title"),13;case 11:return this.popState(),"title_value";case 12:this.begin("string");break;case 13:this.popState();break;case 14:return"txt";case 15:return 6;case 16:return 8;case 17:return"value";case 18:return 22}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:[\n\r]+)/i,/^(?:%%[^\n]*)/i,/^(?:[\s]+)/i,/^(?:title\b)/i,/^(?:(?!\n||)*[^\n]*)/i,/^(?:["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:pie\b)/i,/^(?:showData\b)/i,/^(?::[\s]*[\d]+(?:\.[\d]+)?)/i,/^(?:$)/i],conditions:{close_directive:{rules:[],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},open_directive:{rules:[1],inclusive:!1},title:{rules:[11],inclusive:!1},string:{rules:[13,14],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,12,15,16,17,18],inclusive:!0}}};function m(){this.yy={}}return y.lexer=g,m.prototype=y,y.Parser=m,new m}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(17).readFileSync(n(18).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(12),n(7)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,2],n=[1,5],r=[6,9,11,23,37],i=[1,17],a=[1,20],o=[1,25],s=[1,26],c=[1,27],u=[1,28],l=[1,37],h=[23,34,35],f=[4,6,9,11,23,37],d=[30,31,32,33],p=[22,27],y={trace:function(){},yy:{},symbols_:{error:2,start:3,ER_DIAGRAM:4,document:5,EOF:6,directive:7,line:8,SPACE:9,statement:10,NEWLINE:11,openDirective:12,typeDirective:13,closeDirective:14,":":15,argDirective:16,entityName:17,relSpec:18,role:19,BLOCK_START:20,attributes:21,BLOCK_STOP:22,ALPHANUM:23,attribute:24,attributeType:25,attributeName:26,ATTRIBUTE_WORD:27,cardinality:28,relType:29,ZERO_OR_ONE:30,ZERO_OR_MORE:31,ONE_OR_MORE:32,ONLY_ONE:33,NON_IDENTIFYING:34,IDENTIFYING:35,WORD:36,open_directive:37,type_directive:38,arg_directive:39,close_directive:40,$accept:0,$end:1},terminals_:{2:"error",4:"ER_DIAGRAM",6:"EOF",9:"SPACE",11:"NEWLINE",15:":",20:"BLOCK_START",22:"BLOCK_STOP",23:"ALPHANUM",27:"ATTRIBUTE_WORD",30:"ZERO_OR_ONE",31:"ZERO_OR_MORE",32:"ONE_OR_MORE",33:"ONLY_ONE",34:"NON_IDENTIFYING",35:"IDENTIFYING",36:"WORD",37:"open_directive",38:"type_directive",39:"arg_directive",40:"close_directive"},productions_:[0,[3,3],[3,2],[5,0],[5,2],[8,2],[8,1],[8,1],[8,1],[7,4],[7,6],[10,1],[10,5],[10,4],[10,3],[10,1],[17,1],[21,1],[21,2],[24,2],[25,1],[26,1],[18,3],[28,1],[28,1],[28,1],[28,1],[29,1],[29,1],[19,1],[19,1],[12,1],[13,1],[16,1],[14,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 1:break;case 3:this.$=[];break;case 4:a[s-1].push(a[s]),this.$=a[s-1];break;case 5:case 6:this.$=a[s];break;case 7:case 8:this.$=[];break;case 12:r.addEntity(a[s-4]),r.addEntity(a[s-2]),r.addRelationship(a[s-4],a[s],a[s-2],a[s-3]);break;case 13:r.addEntity(a[s-3]),r.addAttributes(a[s-3],a[s-1]);break;case 14:r.addEntity(a[s-2]);break;case 15:r.addEntity(a[s]);break;case 16:this.$=a[s];break;case 17:this.$=[a[s]];break;case 18:a[s].push(a[s-1]),this.$=a[s];break;case 19:this.$={attributeType:a[s-1],attributeName:a[s]};break;case 20:case 21:this.$=a[s];break;case 22:this.$={cardA:a[s],relType:a[s-1],cardB:a[s-2]};break;case 23:this.$=r.Cardinality.ZERO_OR_ONE;break;case 24:this.$=r.Cardinality.ZERO_OR_MORE;break;case 25:this.$=r.Cardinality.ONE_OR_MORE;break;case 26:this.$=r.Cardinality.ONLY_ONE;break;case 27:this.$=r.Identification.NON_IDENTIFYING;break;case 28:this.$=r.Identification.IDENTIFYING;break;case 29:this.$=a[s].replace(/"/g,"");break;case 30:this.$=a[s];break;case 31:r.parseDirective("%%{","open_directive");break;case 32:r.parseDirective(a[s],"type_directive");break;case 33:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 34:r.parseDirective("}%%","close_directive","er")}},table:[{3:1,4:e,7:3,12:4,37:n},{1:[3]},t(r,[2,3],{5:6}),{3:7,4:e,7:3,12:4,37:n},{13:8,38:[1,9]},{38:[2,31]},{6:[1,10],7:15,8:11,9:[1,12],10:13,11:[1,14],12:4,17:16,23:i,37:n},{1:[2,2]},{14:18,15:[1,19],40:a},t([15,40],[2,32]),t(r,[2,8],{1:[2,1]}),t(r,[2,4]),{7:15,10:21,12:4,17:16,23:i,37:n},t(r,[2,6]),t(r,[2,7]),t(r,[2,11]),t(r,[2,15],{18:22,28:24,20:[1,23],30:o,31:s,32:c,33:u}),t([6,9,11,15,20,23,30,31,32,33,37],[2,16]),{11:[1,29]},{16:30,39:[1,31]},{11:[2,34]},t(r,[2,5]),{17:32,23:i},{21:33,22:[1,34],24:35,25:36,27:l},{29:38,34:[1,39],35:[1,40]},t(h,[2,23]),t(h,[2,24]),t(h,[2,25]),t(h,[2,26]),t(f,[2,9]),{14:41,40:a},{40:[2,33]},{15:[1,42]},{22:[1,43]},t(r,[2,14]),{21:44,22:[2,17],24:35,25:36,27:l},{26:45,27:[1,46]},{27:[2,20]},{28:47,30:o,31:s,32:c,33:u},t(d,[2,27]),t(d,[2,28]),{11:[1,48]},{19:49,23:[1,51],36:[1,50]},t(r,[2,13]),{22:[2,18]},t(p,[2,19]),t(p,[2,21]),{23:[2,22]},t(f,[2,10]),t(r,[2,12]),t(r,[2,29]),t(r,[2,30])],defaultActions:{5:[2,31],7:[2,2],20:[2,34],31:[2,33],37:[2,20],44:[2,18],47:[2,22]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),y={yy:{}};for(var g in this.yy)Object.prototype.hasOwnProperty.call(this.yy,g)&&(y.yy[g]=this.yy[g]);p.setInput(t,y.yy),y.yy.lexer=p,y.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var m=p.yylloc;a.push(m);var v=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof y.yy.parseError?this.parseError=y.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:m,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,y.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},g={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),37;case 1:return this.begin("type_directive"),38;case 2:return this.popState(),this.begin("arg_directive"),15;case 3:return this.popState(),this.popState(),40;case 4:return 39;case 5:case 6:break;case 7:return 11;case 8:break;case 9:return 9;case 10:return 36;case 11:return 4;case 12:return this.begin("block"),20;case 13:break;case 14:return 27;case 15:break;case 16:return this.popState(),22;case 17:return e.yytext[0];case 18:return 30;case 19:return 31;case 20:return 32;case 21:return 33;case 22:return 30;case 23:return 31;case 24:return 32;case 25:return 34;case 26:return 35;case 27:case 28:return 34;case 29:return 23;case 30:return e.yytext[0];case 31:return 6}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:[\s]+)/i,/^(?:"[^"]*")/i,/^(?:erDiagram\b)/i,/^(?:\{)/i,/^(?:\s+)/i,/^(?:[A-Za-z][A-Za-z0-9\-_]*)/i,/^(?:[\n]+)/i,/^(?:\})/i,/^(?:.)/i,/^(?:\|o\b)/i,/^(?:\}o\b)/i,/^(?:\}\|)/i,/^(?:\|\|)/i,/^(?:o\|)/i,/^(?:o\{)/i,/^(?:\|\{)/i,/^(?:\.\.)/i,/^(?:--)/i,/^(?:\.-)/i,/^(?:-\.)/i,/^(?:[A-Za-z][A-Za-z0-9\-_]*)/i,/^(?:.)/i,/^(?:$)/i],conditions:{open_directive:{rules:[1],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},block:{rules:[13,14,15,16,17],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,12,18,19,20,21,22,23,24,25,26,27,28,29,30,31],inclusive:!0}}};function m(){this.yy={}}return y.lexer=g,m.prototype=y,y.Parser=m,new m}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(17).readFileSync(n(18).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(12),n(7)(t))},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(11),i=n(6);e.default=function(t){var e=i.default.parse(t),n=e.r,a=e.g,o=e.b,s=.2126*r.default.channel.toLinear(n)+.7152*r.default.channel.toLinear(a)+.0722*r.default.channel.toLinear(o);return r.default.lang.round(s)}},function(t,e,n){"use strict";var r=n(13);t.exports=i;function i(t){this._isDirected=!r.has(t,"directed")||t.directed,this._isMultigraph=!!r.has(t,"multigraph")&&t.multigraph,this._isCompound=!!r.has(t,"compound")&&t.compound,this._label=void 0,this._defaultNodeLabelFn=r.constant(void 0),this._defaultEdgeLabelFn=r.constant(void 0),this._nodes={},this._isCompound&&(this._parent={},this._children={},this._children["\0"]={}),this._in={},this._preds={},this._out={},this._sucs={},this._edgeObjs={},this._edgeLabels={}}function a(t,e){t[e]?t[e]++:t[e]=1}function o(t,e){--t[e]||delete t[e]}function s(t,e,n,i){var a=""+e,o=""+n;if(!t&&a>o){var s=a;a=o,o=s}return a+""+o+""+(r.isUndefined(i)?"\0":i)}function c(t,e,n,r){var i=""+e,a=""+n;if(!t&&i>a){var o=i;i=a,a=o}var s={v:i,w:a};return r&&(s.name=r),s}function u(t,e){return s(t,e.v,e.w,e.name)}i.prototype._nodeCount=0,i.prototype._edgeCount=0,i.prototype.isDirected=function(){return this._isDirected},i.prototype.isMultigraph=function(){return this._isMultigraph},i.prototype.isCompound=function(){return this._isCompound},i.prototype.setGraph=function(t){return this._label=t,this},i.prototype.graph=function(){return this._label},i.prototype.setDefaultNodeLabel=function(t){return r.isFunction(t)||(t=r.constant(t)),this._defaultNodeLabelFn=t,this},i.prototype.nodeCount=function(){return this._nodeCount},i.prototype.nodes=function(){return r.keys(this._nodes)},i.prototype.sources=function(){var t=this;return r.filter(this.nodes(),(function(e){return r.isEmpty(t._in[e])}))},i.prototype.sinks=function(){var t=this;return r.filter(this.nodes(),(function(e){return r.isEmpty(t._out[e])}))},i.prototype.setNodes=function(t,e){var n=arguments,i=this;return r.each(t,(function(t){n.length>1?i.setNode(t,e):i.setNode(t)})),this},i.prototype.setNode=function(t,e){return r.has(this._nodes,t)?(arguments.length>1&&(this._nodes[t]=e),this):(this._nodes[t]=arguments.length>1?e:this._defaultNodeLabelFn(t),this._isCompound&&(this._parent[t]="\0",this._children[t]={},this._children["\0"][t]=!0),this._in[t]={},this._preds[t]={},this._out[t]={},this._sucs[t]={},++this._nodeCount,this)},i.prototype.node=function(t){return this._nodes[t]},i.prototype.hasNode=function(t){return r.has(this._nodes,t)},i.prototype.removeNode=function(t){var e=this;if(r.has(this._nodes,t)){var n=function(t){e.removeEdge(e._edgeObjs[t])};delete this._nodes[t],this._isCompound&&(this._removeFromParentsChildList(t),delete this._parent[t],r.each(this.children(t),(function(t){e.setParent(t)})),delete this._children[t]),r.each(r.keys(this._in[t]),n),delete this._in[t],delete this._preds[t],r.each(r.keys(this._out[t]),n),delete this._out[t],delete this._sucs[t],--this._nodeCount}return this},i.prototype.setParent=function(t,e){if(!this._isCompound)throw new Error("Cannot set parent in a non-compound graph");if(r.isUndefined(e))e="\0";else{for(var n=e+="";!r.isUndefined(n);n=this.parent(n))if(n===t)throw new Error("Setting "+e+" as parent of "+t+" would create a cycle");this.setNode(e)}return this.setNode(t),this._removeFromParentsChildList(t),this._parent[t]=e,this._children[e][t]=!0,this},i.prototype._removeFromParentsChildList=function(t){delete this._children[this._parent[t]][t]},i.prototype.parent=function(t){if(this._isCompound){var e=this._parent[t];if("\0"!==e)return e}},i.prototype.children=function(t){if(r.isUndefined(t)&&(t="\0"),this._isCompound){var e=this._children[t];if(e)return r.keys(e)}else{if("\0"===t)return this.nodes();if(this.hasNode(t))return[]}},i.prototype.predecessors=function(t){var e=this._preds[t];if(e)return r.keys(e)},i.prototype.successors=function(t){var e=this._sucs[t];if(e)return r.keys(e)},i.prototype.neighbors=function(t){var e=this.predecessors(t);if(e)return r.union(e,this.successors(t))},i.prototype.isLeaf=function(t){return 0===(this.isDirected()?this.successors(t):this.neighbors(t)).length},i.prototype.filterNodes=function(t){var e=new this.constructor({directed:this._isDirected,multigraph:this._isMultigraph,compound:this._isCompound});e.setGraph(this.graph());var n=this;r.each(this._nodes,(function(n,r){t(r)&&e.setNode(r,n)})),r.each(this._edgeObjs,(function(t){e.hasNode(t.v)&&e.hasNode(t.w)&&e.setEdge(t,n.edge(t))}));var i={};return this._isCompound&&r.each(e.nodes(),(function(t){e.setParent(t,function t(r){var a=n.parent(r);return void 0===a||e.hasNode(a)?(i[r]=a,a):a in i?i[a]:t(a)}(t))})),e},i.prototype.setDefaultEdgeLabel=function(t){return r.isFunction(t)||(t=r.constant(t)),this._defaultEdgeLabelFn=t,this},i.prototype.edgeCount=function(){return this._edgeCount},i.prototype.edges=function(){return r.values(this._edgeObjs)},i.prototype.setPath=function(t,e){var n=this,i=arguments;return r.reduce(t,(function(t,r){return i.length>1?n.setEdge(t,r,e):n.setEdge(t,r),r})),this},i.prototype.setEdge=function(){var t,e,n,i,o=!1,u=arguments[0];"object"==typeof u&&null!==u&&"v"in u?(t=u.v,e=u.w,n=u.name,2===arguments.length&&(i=arguments[1],o=!0)):(t=u,e=arguments[1],n=arguments[3],arguments.length>2&&(i=arguments[2],o=!0)),t=""+t,e=""+e,r.isUndefined(n)||(n=""+n);var l=s(this._isDirected,t,e,n);if(r.has(this._edgeLabels,l))return o&&(this._edgeLabels[l]=i),this;if(!r.isUndefined(n)&&!this._isMultigraph)throw new Error("Cannot set a named edge when isMultigraph = false");this.setNode(t),this.setNode(e),this._edgeLabels[l]=o?i:this._defaultEdgeLabelFn(t,e,n);var h=c(this._isDirected,t,e,n);return t=h.v,e=h.w,Object.freeze(h),this._edgeObjs[l]=h,a(this._preds[e],t),a(this._sucs[t],e),this._in[e][l]=h,this._out[t][l]=h,this._edgeCount++,this},i.prototype.edge=function(t,e,n){var r=1===arguments.length?u(this._isDirected,arguments[0]):s(this._isDirected,t,e,n);return this._edgeLabels[r]},i.prototype.hasEdge=function(t,e,n){var i=1===arguments.length?u(this._isDirected,arguments[0]):s(this._isDirected,t,e,n);return r.has(this._edgeLabels,i)},i.prototype.removeEdge=function(t,e,n){var r=1===arguments.length?u(this._isDirected,arguments[0]):s(this._isDirected,t,e,n),i=this._edgeObjs[r];return i&&(t=i.v,e=i.w,delete this._edgeLabels[r],delete this._edgeObjs[r],o(this._preds[e],t),o(this._sucs[t],e),delete this._in[e][r],delete this._out[t][r],this._edgeCount--),this},i.prototype.inEdges=function(t,e){var n=this._in[t];if(n){var i=r.values(n);return e?r.filter(i,(function(t){return t.v===e})):i}},i.prototype.outEdges=function(t,e){var n=this._out[t];if(n){var i=r.values(n);return e?r.filter(i,(function(t){return t.w===e})):i}},i.prototype.nodeEdges=function(t,e){var n=this.inEdges(t,e);if(n)return n.concat(this.outEdges(t,e))}},function(t,e,n){var r=n(33)(n(19),"Map");t.exports=r},function(t,e,n){var r=n(222),i=n(229),a=n(231),o=n(232),s=n(233);function c(t){var e=-1,n=null==t?0:t.length;for(this.clear();++e-1&&t%1==0&&t<=9007199254740991}},function(t,e,n){(function(t){var r=n(109),i=e&&!e.nodeType&&e,a=i&&"object"==typeof t&&t&&!t.nodeType&&t,o=a&&a.exports===i&&r.process,s=function(){try{var t=a&&a.require&&a.require("util").types;return t||o&&o.binding&&o.binding("util")}catch(t){}}();t.exports=s}).call(this,n(7)(t))},function(t,e,n){var r=n(63),i=n(239),a=Object.prototype.hasOwnProperty;t.exports=function(t){if(!r(t))return i(t);var e=[];for(var n in Object(t))a.call(t,n)&&"constructor"!=n&&e.push(n);return e}},function(t,e,n){var r=n(116),i=n(117),a=Object.prototype.propertyIsEnumerable,o=Object.getOwnPropertySymbols,s=o?function(t){return null==t?[]:(t=Object(t),r(o(t),(function(e){return a.call(t,e)})))}:i;t.exports=s},function(t,e){t.exports=function(t,e){for(var n=-1,r=e.length,i=t.length;++n0&&a(l)?n>1?t(l,n-1,a,o,s):r(s,l):o||(s[s.length]=l)}return s}},function(t,e,n){var r=n(43);t.exports=function(t,e,n){for(var i=-1,a=t.length;++i4,u=c?1:17,l=c?8:4,h=s?0:-1,f=c?255:15;return r.default.set({r:(i>>l*(h+3)&f)*u,g:(i>>l*(h+2)&f)*u,b:(i>>l*(h+1)&f)*u,a:s?(i&f)*u/255:1},t)}}},stringify:function(t){var e=t.r,n=t.g,r=t.b,a=t.a;return a<1?"#"+i.DEC2HEX[Math.round(e)]+i.DEC2HEX[Math.round(n)]+i.DEC2HEX[Math.round(r)]+i.DEC2HEX[Math.round(255*a)]:"#"+i.DEC2HEX[Math.round(e)]+i.DEC2HEX[Math.round(n)]+i.DEC2HEX[Math.round(r)]}};e.default=a},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(11),i=n(46),a=n(6);e.default=function(t,e,n,o){void 0===o&&(o=1);var s=i.default.set({h:r.default.channel.clamp.h(t),s:r.default.channel.clamp.s(e),l:r.default.channel.clamp.l(n),a:r.default.channel.clamp.a(o)});return a.default.stringify(s)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"a")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(77);e.default=function(t){return r.default(t)>=.5}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"a",e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"a",-e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(6),i=n(53);e.default=function(t,e){var n=r.default.parse(t),a={};for(var o in e)e[o]&&(a[o]=n[o]+e[o]);return i.default(t,a)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(6),i=n(52);e.default=function(t,e,n){void 0===n&&(n=50);var a=r.default.parse(t),o=a.r,s=a.g,c=a.b,u=a.a,l=r.default.parse(e),h=l.r,f=l.g,d=l.b,p=l.a,y=n/100,g=2*y-1,m=u-p,v=((g*m==-1?g:(g+m)/(1+g*m))+1)/2,b=1-v,x=o*v+h*b,_=s*v+f*b,k=c*v+d*b,w=u*y+p*(1-y);return i.default(x,_,k,w)}},function(t,e){},function(t,e,n){var r=n(54),i=n(81),a=n(59),o=n(234),s=n(240),c=n(114),u=n(115),l=n(243),h=n(244),f=n(119),d=n(245),p=n(42),y=n(249),g=n(250),m=n(124),v=n(5),b=n(41),x=n(254),_=n(14),k=n(256),w=n(30),E=n(35),T={};T["[object Arguments]"]=T["[object Array]"]=T["[object ArrayBuffer]"]=T["[object DataView]"]=T["[object Boolean]"]=T["[object Date]"]=T["[object Float32Array]"]=T["[object Float64Array]"]=T["[object Int8Array]"]=T["[object Int16Array]"]=T["[object Int32Array]"]=T["[object Map]"]=T["[object Number]"]=T["[object Object]"]=T["[object RegExp]"]=T["[object Set]"]=T["[object String]"]=T["[object Symbol]"]=T["[object Uint8Array]"]=T["[object Uint8ClampedArray]"]=T["[object Uint16Array]"]=T["[object Uint32Array]"]=!0,T["[object Error]"]=T["[object Function]"]=T["[object WeakMap]"]=!1,t.exports=function t(e,n,C,S,A,M){var O,N=1&n,B=2&n,D=4&n;if(C&&(O=A?C(e,S,A,M):C(e)),void 0!==O)return O;if(!_(e))return e;var L=v(e);if(L){if(O=y(e),!N)return u(e,O)}else{var I=p(e),R="[object Function]"==I||"[object GeneratorFunction]"==I;if(b(e))return c(e,N);if("[object Object]"==I||"[object Arguments]"==I||R&&!A){if(O=B||R?{}:m(e),!N)return B?h(e,s(O,e)):l(e,o(O,e))}else{if(!T[I])return A?e:{};O=g(e,I,N)}}M||(M=new r);var F=M.get(e);if(F)return F;M.set(e,O),k(e)?e.forEach((function(r){O.add(t(r,n,C,r,e,M))})):x(e)&&e.forEach((function(r,i){O.set(i,t(r,n,C,i,e,M))}));var P=L?void 0:(D?B?d:f:B?E:w)(e);return i(P||e,(function(r,i){P&&(r=e[i=r]),a(O,i,t(r,n,C,i,e,M))})),O}},function(t,e,n){(function(e){var n="object"==typeof e&&e&&e.Object===Object&&e;t.exports=n}).call(this,n(216))},function(t,e){var n=Function.prototype.toString;t.exports=function(t){if(null!=t){try{return n.call(t)}catch(t){}try{return t+""}catch(t){}}return""}},function(t,e,n){var r=n(33),i=function(){try{var t=r(Object,"defineProperty");return t({},"",{}),t}catch(t){}}();t.exports=i},function(t,e,n){var r=n(235),i=n(48),a=n(5),o=n(41),s=n(61),c=n(49),u=Object.prototype.hasOwnProperty;t.exports=function(t,e){var n=a(t),l=!n&&i(t),h=!n&&!l&&o(t),f=!n&&!l&&!h&&c(t),d=n||l||h||f,p=d?r(t.length,String):[],y=p.length;for(var g in t)!e&&!u.call(t,g)||d&&("length"==g||h&&("offset"==g||"parent"==g)||f&&("buffer"==g||"byteLength"==g||"byteOffset"==g)||s(g,y))||p.push(g);return p}},function(t,e){t.exports=function(t,e){return function(n){return t(e(n))}}},function(t,e,n){(function(t){var r=n(19),i=e&&!e.nodeType&&e,a=i&&"object"==typeof t&&t&&!t.nodeType&&t,o=a&&a.exports===i?r.Buffer:void 0,s=o?o.allocUnsafe:void 0;t.exports=function(t,e){if(e)return t.slice();var n=t.length,r=s?s(n):new t.constructor(n);return t.copy(r),r}}).call(this,n(7)(t))},function(t,e){t.exports=function(t,e){var n=-1,r=t.length;for(e||(e=Array(r));++nl))return!1;var f=c.get(t),d=c.get(e);if(f&&d)return f==e&&d==t;var p=-1,y=!0,g=2&n?new r:void 0;for(c.set(t,e),c.set(e,t);++p0&&(a=c.removeMin(),(o=s[a]).distance!==Number.POSITIVE_INFINITY);)r(a).forEach(u);return s}(t,String(e),n||a,r||function(e){return t.outEdges(e)})};var a=r.constant(1)},function(t,e,n){var r=n(13);function i(){this._arr=[],this._keyIndices={}}t.exports=i,i.prototype.size=function(){return this._arr.length},i.prototype.keys=function(){return this._arr.map((function(t){return t.key}))},i.prototype.has=function(t){return r.has(this._keyIndices,t)},i.prototype.priority=function(t){var e=this._keyIndices[t];if(void 0!==e)return this._arr[e].priority},i.prototype.min=function(){if(0===this.size())throw new Error("Queue underflow");return this._arr[0].key},i.prototype.add=function(t,e){var n=this._keyIndices;if(t=String(t),!r.has(n,t)){var i=this._arr,a=i.length;return n[t]=a,i.push({key:t,priority:e}),this._decrease(a),!0}return!1},i.prototype.removeMin=function(){this._swap(0,this._arr.length-1);var t=this._arr.pop();return delete this._keyIndices[t.key],this._heapify(0),t.key},i.prototype.decrease=function(t,e){var n=this._keyIndices[t];if(e>this._arr[n].priority)throw new Error("New priority is greater than current priority. Key: "+t+" Old: "+this._arr[n].priority+" New: "+e);this._arr[n].priority=e,this._decrease(n)},i.prototype._heapify=function(t){var e=this._arr,n=2*t,r=n+1,i=t;n>1].priority2?e[2]:void 0;for(u&&a(e[0],e[1],u)&&(r=1);++n1&&o.sort((function(t,e){var r=t.x-n.x,i=t.y-n.y,a=Math.sqrt(r*r+i*i),o=e.x-n.x,s=e.y-n.y,c=Math.sqrt(o*o+s*s);return aMath.abs(o)*u?(s<0&&(u=-u),n=0===s?0:u*o/s,r=u):(o<0&&(c=-c),n=c,r=0===o?0:c*s/o);return{x:i+n,y:a+r}}},function(t,e,n){ +/*! @license DOMPurify 2.3.0 | (c) Cure53 and other contributors | Released under the Apache license 2.0 and Mozilla Public License 2.0 | github.com/cure53/DOMPurify/blob/2.3.0/LICENSE */ +t.exports=function(){"use strict";var t=Object.hasOwnProperty,e=Object.setPrototypeOf,n=Object.isFrozen,r=Object.getPrototypeOf,i=Object.getOwnPropertyDescriptor,a=Object.freeze,o=Object.seal,s=Object.create,c="undefined"!=typeof Reflect&&Reflect,u=c.apply,l=c.construct;u||(u=function(t,e,n){return t.apply(e,n)}),a||(a=function(t){return t}),o||(o=function(t){return t}),l||(l=function(t,e){return new(Function.prototype.bind.apply(t,[null].concat(function(t){if(Array.isArray(t)){for(var e=0,n=Array(t.length);e1?n-1:0),i=1;i/gm),j=o(/^data-[\-\w.\u00B7-\uFFFF]/),Y=o(/^aria-[\-\w]+$/),z=o(/^(?:(?:(?:f|ht)tps?|mailto|tel|callto|cid|xmpp):|[^a-z]|[a-z+.\-]+(?:[^a-z+.\-:]|$))/i),U=o(/^(?:\w+script|data):/i),$=o(/[\u0000-\u0020\u00A0\u1680\u180E\u2000-\u2029\u205F\u3000]/g),q="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t};function W(t){if(Array.isArray(t)){for(var e=0,n=Array(t.length);e0&&void 0!==arguments[0]?arguments[0]:H(),n=function(e){return t(e)};if(n.version="2.3.0",n.removed=[],!e||!e.document||9!==e.document.nodeType)return n.isSupported=!1,n;var r=e.document,i=e.document,o=e.DocumentFragment,s=e.HTMLTemplateElement,c=e.Node,u=e.Element,l=e.NodeFilter,h=e.NamedNodeMap,k=void 0===h?e.NamedNodeMap||e.MozNamedAttrMap:h,G=e.Text,X=e.Comment,Z=e.DOMParser,K=e.trustedTypes,Q=u.prototype,J=T(Q,"cloneNode"),tt=T(Q,"nextSibling"),et=T(Q,"childNodes"),nt=T(Q,"parentNode");if("function"==typeof s){var rt=i.createElement("template");rt.content&&rt.content.ownerDocument&&(i=rt.content.ownerDocument)}var it=V(K,r),at=it&&Ft?it.createHTML(""):"",ot=i,st=ot.implementation,ct=ot.createNodeIterator,ut=ot.createDocumentFragment,lt=ot.getElementsByTagName,ht=r.importNode,ft={};try{ft=E(i).documentMode?i.documentMode:{}}catch(t){}var dt={};n.isSupported="function"==typeof nt&&st&&void 0!==st.createHTMLDocument&&9!==ft;var pt=F,yt=P,gt=j,mt=Y,vt=U,bt=$,xt=z,_t=null,kt=w({},[].concat(W(C),W(S),W(A),W(O),W(B))),wt=null,Et=w({},[].concat(W(D),W(L),W(I),W(R))),Tt=null,Ct=null,St=!0,At=!0,Mt=!1,Ot=!1,Nt=!1,Bt=!1,Dt=!1,Lt=!1,It=!1,Rt=!0,Ft=!1,Pt=!0,jt=!0,Yt=!1,zt={},Ut=w({},["annotation-xml","audio","colgroup","desc","foreignobject","head","iframe","math","mi","mn","mo","ms","mtext","noembed","noframes","noscript","plaintext","script","style","svg","template","thead","title","video","xmp"]),$t=null,qt=w({},["audio","video","img","source","image","track"]),Wt=null,Ht=w({},["alt","class","for","id","label","name","pattern","placeholder","summary","title","value","style","xmlns"]),Vt="http://www.w3.org/1998/Math/MathML",Gt="http://www.w3.org/2000/svg",Xt="http://www.w3.org/1999/xhtml",Zt=Xt,Kt=!1,Qt=null,Jt=i.createElement("form"),te=function(t){Qt&&Qt===t||(t&&"object"===(void 0===t?"undefined":q(t))||(t={}),t=E(t),_t="ALLOWED_TAGS"in t?w({},t.ALLOWED_TAGS):kt,wt="ALLOWED_ATTR"in t?w({},t.ALLOWED_ATTR):Et,Wt="ADD_URI_SAFE_ATTR"in t?w(E(Ht),t.ADD_URI_SAFE_ATTR):Ht,$t="ADD_DATA_URI_TAGS"in t?w(E(qt),t.ADD_DATA_URI_TAGS):qt,Tt="FORBID_TAGS"in t?w({},t.FORBID_TAGS):{},Ct="FORBID_ATTR"in t?w({},t.FORBID_ATTR):{},zt="USE_PROFILES"in t&&t.USE_PROFILES,St=!1!==t.ALLOW_ARIA_ATTR,At=!1!==t.ALLOW_DATA_ATTR,Mt=t.ALLOW_UNKNOWN_PROTOCOLS||!1,Ot=t.SAFE_FOR_TEMPLATES||!1,Nt=t.WHOLE_DOCUMENT||!1,Lt=t.RETURN_DOM||!1,It=t.RETURN_DOM_FRAGMENT||!1,Rt=!1!==t.RETURN_DOM_IMPORT,Ft=t.RETURN_TRUSTED_TYPE||!1,Dt=t.FORCE_BODY||!1,Pt=!1!==t.SANITIZE_DOM,jt=!1!==t.KEEP_CONTENT,Yt=t.IN_PLACE||!1,xt=t.ALLOWED_URI_REGEXP||xt,Zt=t.NAMESPACE||Xt,Ot&&(At=!1),It&&(Lt=!0),zt&&(_t=w({},[].concat(W(B))),wt=[],!0===zt.html&&(w(_t,C),w(wt,D)),!0===zt.svg&&(w(_t,S),w(wt,L),w(wt,R)),!0===zt.svgFilters&&(w(_t,A),w(wt,L),w(wt,R)),!0===zt.mathMl&&(w(_t,O),w(wt,I),w(wt,R))),t.ADD_TAGS&&(_t===kt&&(_t=E(_t)),w(_t,t.ADD_TAGS)),t.ADD_ATTR&&(wt===Et&&(wt=E(wt)),w(wt,t.ADD_ATTR)),t.ADD_URI_SAFE_ATTR&&w(Wt,t.ADD_URI_SAFE_ATTR),jt&&(_t["#text"]=!0),Nt&&w(_t,["html","head","body"]),_t.table&&(w(_t,["tbody"]),delete Tt.tbody),a&&a(t),Qt=t)},ee=w({},["mi","mo","mn","ms","mtext"]),ne=w({},["foreignobject","desc","title","annotation-xml"]),re=w({},S);w(re,A),w(re,M);var ie=w({},O);w(ie,N);var ae=function(t){var e=nt(t);e&&e.tagName||(e={namespaceURI:Xt,tagName:"template"});var n=y(t.tagName),r=y(e.tagName);if(t.namespaceURI===Gt)return e.namespaceURI===Xt?"svg"===n:e.namespaceURI===Vt?"svg"===n&&("annotation-xml"===r||ee[r]):Boolean(re[n]);if(t.namespaceURI===Vt)return e.namespaceURI===Xt?"math"===n:e.namespaceURI===Gt?"math"===n&&ne[r]:Boolean(ie[n]);if(t.namespaceURI===Xt){if(e.namespaceURI===Gt&&!ne[r])return!1;if(e.namespaceURI===Vt&&!ee[r])return!1;var i=w({},["title","style","font","a","script"]);return!ie[n]&&(i[n]||!re[n])}return!1},oe=function(t){p(n.removed,{element:t});try{t.parentNode.removeChild(t)}catch(e){try{t.outerHTML=at}catch(e){t.remove()}}},se=function(t,e){try{p(n.removed,{attribute:e.getAttributeNode(t),from:e})}catch(t){p(n.removed,{attribute:null,from:e})}if(e.removeAttribute(t),"is"===t&&!wt[t])if(Lt||It)try{oe(e)}catch(t){}else try{e.setAttribute(t,"")}catch(t){}},ce=function(t){var e=void 0,n=void 0;if(Dt)t=""+t;else{var r=g(t,/^[\r\n\t ]+/);n=r&&r[0]}var a=it?it.createHTML(t):t;if(Zt===Xt)try{e=(new Z).parseFromString(a,"text/html")}catch(t){}if(!e||!e.documentElement){e=st.createDocument(Zt,"template",null);try{e.documentElement.innerHTML=Kt?"":a}catch(t){}}var o=e.body||e.documentElement;return t&&n&&o.insertBefore(i.createTextNode(n),o.childNodes[0]||null),Zt===Xt?lt.call(e,Nt?"html":"body")[0]:Nt?e.documentElement:o},ue=function(t){return ct.call(t.ownerDocument||t,t,l.SHOW_ELEMENT|l.SHOW_COMMENT|l.SHOW_TEXT,null,!1)},le=function(t){return!(t instanceof G||t instanceof X||"string"==typeof t.nodeName&&"string"==typeof t.textContent&&"function"==typeof t.removeChild&&t.attributes instanceof k&&"function"==typeof t.removeAttribute&&"function"==typeof t.setAttribute&&"string"==typeof t.namespaceURI&&"function"==typeof t.insertBefore)},he=function(t){return"object"===(void 0===c?"undefined":q(c))?t instanceof c:t&&"object"===(void 0===t?"undefined":q(t))&&"number"==typeof t.nodeType&&"string"==typeof t.nodeName},fe=function(t,e,r){dt[t]&&f(dt[t],(function(t){t.call(n,e,r,Qt)}))},de=function(t){var e=void 0;if(fe("beforeSanitizeElements",t,null),le(t))return oe(t),!0;if(g(t.nodeName,/[\u0080-\uFFFF]/))return oe(t),!0;var r=y(t.nodeName);if(fe("uponSanitizeElement",t,{tagName:r,allowedTags:_t}),!he(t.firstElementChild)&&(!he(t.content)||!he(t.content.firstElementChild))&&x(/<[/\w]/g,t.innerHTML)&&x(/<[/\w]/g,t.textContent))return oe(t),!0;if(!_t[r]||Tt[r]){if(jt&&!Ut[r]){var i=nt(t)||t.parentNode,a=et(t)||t.childNodes;if(a&&i)for(var o=a.length-1;o>=0;--o)i.insertBefore(J(a[o],!0),tt(t))}return oe(t),!0}return t instanceof u&&!ae(t)?(oe(t),!0):"noscript"!==r&&"noembed"!==r||!x(/<\/no(script|embed)/i,t.innerHTML)?(Ot&&3===t.nodeType&&(e=t.textContent,e=m(e,pt," "),e=m(e,yt," "),t.textContent!==e&&(p(n.removed,{element:t.cloneNode()}),t.textContent=e)),fe("afterSanitizeElements",t,null),!1):(oe(t),!0)},pe=function(t,e,n){if(Pt&&("id"===e||"name"===e)&&(n in i||n in Jt))return!1;if(At&&!Ct[e]&&x(gt,e));else if(St&&x(mt,e));else{if(!wt[e]||Ct[e])return!1;if(Wt[e]);else if(x(xt,m(n,bt,"")));else if("src"!==e&&"xlink:href"!==e&&"href"!==e||"script"===t||0!==v(n,"data:")||!$t[t])if(Mt&&!x(vt,m(n,bt,"")));else if(n)return!1}return!0},ye=function(t){var e=void 0,r=void 0,i=void 0,a=void 0;fe("beforeSanitizeAttributes",t,null);var o=t.attributes;if(o){var s={attrName:"",attrValue:"",keepAttr:!0,allowedAttributes:wt};for(a=o.length;a--;){var c=e=o[a],u=c.name,l=c.namespaceURI;if(r=b(e.value),i=y(u),s.attrName=i,s.attrValue=r,s.keepAttr=!0,s.forceKeepAttr=void 0,fe("uponSanitizeAttribute",t,s),r=s.attrValue,!s.forceKeepAttr&&(se(u,t),s.keepAttr))if(x(/\/>/i,r))se(u,t);else{Ot&&(r=m(r,pt," "),r=m(r,yt," "));var h=t.nodeName.toLowerCase();if(pe(h,i,r))try{l?t.setAttributeNS(l,u,r):t.setAttribute(u,r),d(n.removed)}catch(t){}}}fe("afterSanitizeAttributes",t,null)}},ge=function t(e){var n=void 0,r=ue(e);for(fe("beforeSanitizeShadowDOM",e,null);n=r.nextNode();)fe("uponSanitizeShadowNode",n,null),de(n)||(n.content instanceof o&&t(n.content),ye(n));fe("afterSanitizeShadowDOM",e,null)};return n.sanitize=function(t,i){var a=void 0,s=void 0,u=void 0,l=void 0,h=void 0;if((Kt=!t)&&(t="\x3c!--\x3e"),"string"!=typeof t&&!he(t)){if("function"!=typeof t.toString)throw _("toString is not a function");if("string"!=typeof(t=t.toString()))throw _("dirty is not a string, aborting")}if(!n.isSupported){if("object"===q(e.toStaticHTML)||"function"==typeof e.toStaticHTML){if("string"==typeof t)return e.toStaticHTML(t);if(he(t))return e.toStaticHTML(t.outerHTML)}return t}if(Bt||te(i),n.removed=[],"string"==typeof t&&(Yt=!1),Yt);else if(t instanceof c)1===(s=(a=ce("\x3c!----\x3e")).ownerDocument.importNode(t,!0)).nodeType&&"BODY"===s.nodeName||"HTML"===s.nodeName?a=s:a.appendChild(s);else{if(!Lt&&!Ot&&!Nt&&-1===t.indexOf("<"))return it&&Ft?it.createHTML(t):t;if(!(a=ce(t)))return Lt?null:at}a&&Dt&&oe(a.firstChild);for(var f=ue(Yt?t:a);u=f.nextNode();)3===u.nodeType&&u===l||de(u)||(u.content instanceof o&&ge(u.content),ye(u),l=u);if(l=null,Yt)return t;if(Lt){if(It)for(h=ut.call(a.ownerDocument);a.firstChild;)h.appendChild(a.firstChild);else h=a;return Rt&&(h=ht.call(r,h,!0)),h}var d=Nt?a.outerHTML:a.innerHTML;return Ot&&(d=m(d,pt," "),d=m(d,yt," ")),it&&Ft?it.createHTML(d):d},n.setConfig=function(t){te(t),Bt=!0},n.clearConfig=function(){Qt=null,Bt=!1},n.isValidAttribute=function(t,e,n){Qt||te({});var r=y(t),i=y(e);return pe(r,i,n)},n.addHook=function(t,e){"function"==typeof e&&(dt[t]=dt[t]||[],p(dt[t],e))},n.removeHook=function(t){dt[t]&&d(dt[t])},n.removeHooks=function(t){dt[t]&&(dt[t]=[])},n.removeAllHooks=function(){dt={}},n}()}()},function(t,e){t.exports=function(t,e){return t.intersect(e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(52);e.hex=r.default;var i=n(52);e.rgb=i.default;var a=n(52);e.rgba=a.default;var o=n(100);e.hsl=o.default;var s=n(100);e.hsla=s.default;var c=n(181);e.toKeyword=c.default;var u=n(182);e.toHex=u.default;var l=n(183);e.toRgba=l.default;var h=n(184);e.toHsla=h.default;var f=n(29);e.channel=f.default;var d=n(185);e.red=d.default;var p=n(186);e.green=p.default;var y=n(187);e.blue=y.default;var g=n(188);e.hue=g.default;var m=n(189);e.saturation=m.default;var v=n(190);e.lightness=v.default;var b=n(101);e.alpha=b.default;var x=n(101);e.opacity=x.default;var _=n(191);e.contrast=_.default;var k=n(77);e.luminance=k.default;var w=n(192);e.isDark=w.default;var E=n(102);e.isLight=E.default;var T=n(193);e.isValid=T.default;var C=n(194);e.saturate=C.default;var S=n(195);e.desaturate=S.default;var A=n(196);e.lighten=A.default;var M=n(197);e.darken=M.default;var O=n(103);e.opacify=O.default;var N=n(103);e.fadeIn=N.default;var B=n(104);e.transparentize=B.default;var D=n(104);e.fadeOut=D.default;var L=n(198);e.complement=L.default;var I=n(199);e.grayscale=I.default;var R=n(105);e.adjust=R.default;var F=n(53);e.change=F.default;var P=n(200);e.invert=P.default;var j=n(106);e.mix=j.default;var Y=n(201);e.scale=Y.default},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r={min:{r:0,g:0,b:0,s:0,l:0,a:0},max:{r:255,g:255,b:255,h:360,s:100,l:100,a:1},clamp:{r:function(t){return t>=255?255:t<0?0:t},g:function(t){return t>=255?255:t<0?0:t},b:function(t){return t>=255?255:t<0?0:t},h:function(t){return t%360},s:function(t){return t>=100?100:t<0?0:t},l:function(t){return t>=100?100:t<0?0:t},a:function(t){return t>=1?1:t<0?0:t}},toLinear:function(t){var e=t/255;return t>.03928?Math.pow((e+.055)/1.055,2.4):e/12.92},hue2rgb:function(t,e,n){return n<0&&(n+=1),n>1&&(n-=1),n<1/6?t+6*(e-t)*n:n<.5?e:n<2/3?t+(e-t)*(2/3-n)*6:t},hsl2rgb:function(t,e){var n=t.h,i=t.s,a=t.l;if(!i)return 2.55*a;n/=360,i/=100;var o=(a/=100)<.5?a*(1+i):a+i-a*i,s=2*a-o;switch(e){case"r":return 255*r.hue2rgb(s,o,n+1/3);case"g":return 255*r.hue2rgb(s,o,n);case"b":return 255*r.hue2rgb(s,o,n-1/3)}},rgb2hsl:function(t,e){var n=t.r,r=t.g,i=t.b;n/=255,r/=255,i/=255;var a=Math.max(n,r,i),o=Math.min(n,r,i),s=(a+o)/2;if("l"===e)return 100*s;if(a===o)return 0;var c=a-o;if("s"===e)return 100*(s>.5?c/(2-a-o):c/(a+o));switch(a){case n:return 60*((r-i)/c+(rn?Math.min(e,Math.max(n,t)):Math.min(n,Math.max(e,t))},round:function(t){return Math.round(1e10*t)/1e10}};e.default=r},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r={dec2hex:function(t){var e=Math.round(t).toString(16);return e.length>1?e:"0"+e}};e.default=r},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(11),i=n(176),a=function(){function t(t,e){this.color=e,this.changed=!1,this.data=t,this.type=new i.default}return t.prototype.set=function(t,e){return this.color=e,this.changed=!1,this.data=t,this.type.type=0,this},t.prototype._ensureHSL=function(){var t=this.data,e=t.h,n=t.s,i=t.l;void 0===e&&(t.h=r.default.channel.rgb2hsl(t,"h")),void 0===n&&(t.s=r.default.channel.rgb2hsl(t,"s")),void 0===i&&(t.l=r.default.channel.rgb2hsl(t,"l"))},t.prototype._ensureRGB=function(){var t=this.data,e=t.r,n=t.g,i=t.b;void 0===e&&(t.r=r.default.channel.hsl2rgb(t,"r")),void 0===n&&(t.g=r.default.channel.hsl2rgb(t,"g")),void 0===i&&(t.b=r.default.channel.hsl2rgb(t,"b"))},Object.defineProperty(t.prototype,"r",{get:function(){var t=this.data,e=t.r;return this.type.is(2)||void 0===e?(this._ensureHSL(),r.default.channel.hsl2rgb(t,"r")):e},set:function(t){this.type.set(1),this.changed=!0,this.data.r=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"g",{get:function(){var t=this.data,e=t.g;return this.type.is(2)||void 0===e?(this._ensureHSL(),r.default.channel.hsl2rgb(t,"g")):e},set:function(t){this.type.set(1),this.changed=!0,this.data.g=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"b",{get:function(){var t=this.data,e=t.b;return this.type.is(2)||void 0===e?(this._ensureHSL(),r.default.channel.hsl2rgb(t,"b")):e},set:function(t){this.type.set(1),this.changed=!0,this.data.b=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"h",{get:function(){var t=this.data,e=t.h;return this.type.is(1)||void 0===e?(this._ensureRGB(),r.default.channel.rgb2hsl(t,"h")):e},set:function(t){this.type.set(2),this.changed=!0,this.data.h=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"s",{get:function(){var t=this.data,e=t.s;return this.type.is(1)||void 0===e?(this._ensureRGB(),r.default.channel.rgb2hsl(t,"s")):e},set:function(t){this.type.set(2),this.changed=!0,this.data.s=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"l",{get:function(){var t=this.data,e=t.l;return this.type.is(1)||void 0===e?(this._ensureRGB(),r.default.channel.rgb2hsl(t,"l")):e},set:function(t){this.type.set(2),this.changed=!0,this.data.l=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"a",{get:function(){return this.data.a},set:function(t){this.changed=!0,this.data.a=t},enumerable:!0,configurable:!0}),t}();e.default=a},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=function(){function t(){this.type=0}return t.prototype.get=function(){return this.type},t.prototype.set=function(t){if(this.type&&this.type!==t)throw new Error("Cannot change both RGB and HSL channels at the same time");this.type=t},t.prototype.reset=function(){this.type=0},t.prototype.is=function(t){return this.type===t},t}();e.default=r},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(11),i={};e.DEC2HEX=i;for(var a=0;a<=255;a++)i[a]=r.default.unit.dec2hex(a)},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(99),i={colors:{aliceblue:"#f0f8ff",antiquewhite:"#faebd7",aqua:"#00ffff",aquamarine:"#7fffd4",azure:"#f0ffff",beige:"#f5f5dc",bisque:"#ffe4c4",black:"#000000",blanchedalmond:"#ffebcd",blue:"#0000ff",blueviolet:"#8a2be2",brown:"#a52a2a",burlywood:"#deb887",cadetblue:"#5f9ea0",chartreuse:"#7fff00",chocolate:"#d2691e",coral:"#ff7f50",cornflowerblue:"#6495ed",cornsilk:"#fff8dc",crimson:"#dc143c",cyanaqua:"#00ffff",darkblue:"#00008b",darkcyan:"#008b8b",darkgoldenrod:"#b8860b",darkgray:"#a9a9a9",darkgreen:"#006400",darkgrey:"#a9a9a9",darkkhaki:"#bdb76b",darkmagenta:"#8b008b",darkolivegreen:"#556b2f",darkorange:"#ff8c00",darkorchid:"#9932cc",darkred:"#8b0000",darksalmon:"#e9967a",darkseagreen:"#8fbc8f",darkslateblue:"#483d8b",darkslategray:"#2f4f4f",darkslategrey:"#2f4f4f",darkturquoise:"#00ced1",darkviolet:"#9400d3",deeppink:"#ff1493",deepskyblue:"#00bfff",dimgray:"#696969",dimgrey:"#696969",dodgerblue:"#1e90ff",firebrick:"#b22222",floralwhite:"#fffaf0",forestgreen:"#228b22",fuchsia:"#ff00ff",gainsboro:"#dcdcdc",ghostwhite:"#f8f8ff",gold:"#ffd700",goldenrod:"#daa520",gray:"#808080",green:"#008000",greenyellow:"#adff2f",grey:"#808080",honeydew:"#f0fff0",hotpink:"#ff69b4",indianred:"#cd5c5c",indigo:"#4b0082",ivory:"#fffff0",khaki:"#f0e68c",lavender:"#e6e6fa",lavenderblush:"#fff0f5",lawngreen:"#7cfc00",lemonchiffon:"#fffacd",lightblue:"#add8e6",lightcoral:"#f08080",lightcyan:"#e0ffff",lightgoldenrodyellow:"#fafad2",lightgray:"#d3d3d3",lightgreen:"#90ee90",lightgrey:"#d3d3d3",lightpink:"#ffb6c1",lightsalmon:"#ffa07a",lightseagreen:"#20b2aa",lightskyblue:"#87cefa",lightslategray:"#778899",lightslategrey:"#778899",lightsteelblue:"#b0c4de",lightyellow:"#ffffe0",lime:"#00ff00",limegreen:"#32cd32",linen:"#faf0e6",magenta:"#ff00ff",maroon:"#800000",mediumaquamarine:"#66cdaa",mediumblue:"#0000cd",mediumorchid:"#ba55d3",mediumpurple:"#9370db",mediumseagreen:"#3cb371",mediumslateblue:"#7b68ee",mediumspringgreen:"#00fa9a",mediumturquoise:"#48d1cc",mediumvioletred:"#c71585",midnightblue:"#191970",mintcream:"#f5fffa",mistyrose:"#ffe4e1",moccasin:"#ffe4b5",navajowhite:"#ffdead",navy:"#000080",oldlace:"#fdf5e6",olive:"#808000",olivedrab:"#6b8e23",orange:"#ffa500",orangered:"#ff4500",orchid:"#da70d6",palegoldenrod:"#eee8aa",palegreen:"#98fb98",paleturquoise:"#afeeee",palevioletred:"#db7093",papayawhip:"#ffefd5",peachpuff:"#ffdab9",peru:"#cd853f",pink:"#ffc0cb",plum:"#dda0dd",powderblue:"#b0e0e6",purple:"#800080",rebeccapurple:"#663399",red:"#ff0000",rosybrown:"#bc8f8f",royalblue:"#4169e1",saddlebrown:"#8b4513",salmon:"#fa8072",sandybrown:"#f4a460",seagreen:"#2e8b57",seashell:"#fff5ee",sienna:"#a0522d",silver:"#c0c0c0",skyblue:"#87ceeb",slateblue:"#6a5acd",slategray:"#708090",slategrey:"#708090",snow:"#fffafa",springgreen:"#00ff7f",tan:"#d2b48c",teal:"#008080",thistle:"#d8bfd8",transparent:"#00000000",turquoise:"#40e0d0",violet:"#ee82ee",wheat:"#f5deb3",white:"#ffffff",whitesmoke:"#f5f5f5",yellow:"#ffff00",yellowgreen:"#9acd32"},parse:function(t){t=t.toLowerCase();var e=i.colors[t];if(e)return r.default.parse(e)},stringify:function(t){var e=r.default.stringify(t);for(var n in i.colors)if(i.colors[n]===e)return n}};e.default=i},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(11),i=n(46),a={re:/^rgba?\(\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e\d+)?(%?))\s*?(?:,|\s)\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e\d+)?(%?))\s*?(?:,|\s)\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e\d+)?(%?))(?:\s*?(?:,|\/)\s*?\+?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e\d+)?(%?)))?\s*?\)$/i,parse:function(t){var e=t.charCodeAt(0);if(114===e||82===e){var n=t.match(a.re);if(n){var o=n[1],s=n[2],c=n[3],u=n[4],l=n[5],h=n[6],f=n[7],d=n[8];return i.default.set({r:r.default.channel.clamp.r(s?2.55*parseFloat(o):parseFloat(o)),g:r.default.channel.clamp.g(u?2.55*parseFloat(c):parseFloat(c)),b:r.default.channel.clamp.b(h?2.55*parseFloat(l):parseFloat(l)),a:f?r.default.channel.clamp.a(d?parseFloat(f)/100:parseFloat(f)):1},t)}}},stringify:function(t){var e=t.r,n=t.g,i=t.b,a=t.a;return a<1?"rgba("+r.default.lang.round(e)+", "+r.default.lang.round(n)+", "+r.default.lang.round(i)+", "+r.default.lang.round(a)+")":"rgb("+r.default.lang.round(e)+", "+r.default.lang.round(n)+", "+r.default.lang.round(i)+")"}};e.default=a},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(11),i=n(46),a={re:/^hsla?\(\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e-?\d+)?(?:deg|grad|rad|turn)?)\s*?(?:,|\s)\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e-?\d+)?%)\s*?(?:,|\s)\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e-?\d+)?%)(?:\s*?(?:,|\/)\s*?\+?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e-?\d+)?(%)?))?\s*?\)$/i,hueRe:/^(.+?)(deg|grad|rad|turn)$/i,_hue2deg:function(t){var e=t.match(a.hueRe);if(e){var n=e[1];switch(e[2]){case"grad":return r.default.channel.clamp.h(.9*parseFloat(n));case"rad":return r.default.channel.clamp.h(180*parseFloat(n)/Math.PI);case"turn":return r.default.channel.clamp.h(360*parseFloat(n))}}return r.default.channel.clamp.h(parseFloat(t))},parse:function(t){var e=t.charCodeAt(0);if(104===e||72===e){var n=t.match(a.re);if(n){var o=n[1],s=n[2],c=n[3],u=n[4],l=n[5];return i.default.set({h:a._hue2deg(o),s:r.default.channel.clamp.s(parseFloat(s)),l:r.default.channel.clamp.l(parseFloat(c)),a:u?r.default.channel.clamp.a(l?parseFloat(u)/100:parseFloat(u)):1},t)}}},stringify:function(t){var e=t.h,n=t.s,i=t.l,a=t.a;return a<1?"hsla("+r.default.lang.round(e)+", "+r.default.lang.round(n)+"%, "+r.default.lang.round(i)+"%, "+a+")":"hsl("+r.default.lang.round(e)+", "+r.default.lang.round(n)+"%, "+r.default.lang.round(i)+"%)"}};e.default=a},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(6);e.default=function(t){return r.default.format.keyword.stringify(r.default.parse(t))}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(6);e.default=function(t){return r.default.format.hex.stringify(r.default.parse(t))}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(6);e.default=function(t){return r.default.format.rgba.stringify(r.default.parse(t))}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(6);e.default=function(t){return r.default.format.hsla.stringify(r.default.parse(t))}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"r")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"g")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"b")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"h")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"s")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"l")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(11),i=n(77);e.default=function(t,e){var n=i.default(t),a=i.default(e),o=Math.max(n,a),s=Math.min(n,a),c=(o+Number.EPSILON)/(s+Number.EPSILON);return r.default.lang.round(r.default.lang.clamp(c,1,10))}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(102);e.default=function(t){return!r.default(t)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(6);e.default=function(t){try{return r.default.parse(t),!0}catch(t){return!1}}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"s",e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"s",-e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"l",e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"l",-e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t){return r.default(t,"h",180)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(53);e.default=function(t){return r.default(t,{s:0})}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(6),i=n(106);e.default=function(t,e){void 0===e&&(e=100);var n=r.default.parse(t);return n.r=255-n.r,n.g=255-n.g,n.b=255-n.b,i.default(n,t,e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(11),i=n(6),a=n(105);e.default=function(t,e){var n,o,s,c=i.default.parse(t),u={};for(var l in e)u[l]=(n=c[l],o=e[l],s=r.default.channel.max[l],o>0?(s-n)*o/100:n*o/100);return a.default(t,u)}},function(t,e,n){var r={"./locale":107,"./locale.js":107};function i(t){var e=a(t);return n(e)}function a(t){if(!n.o(r,t)){var e=new Error("Cannot find module '"+t+"'");throw e.code="MODULE_NOT_FOUND",e}return r[t]}i.keys=function(){return Object.keys(r)},i.resolve=a,t.exports=i,i.id=202},function(t,e,n){t.exports={Graph:n(78),version:n(305)}},function(t,e,n){var r=n(108);t.exports=function(t){return r(t,4)}},function(t,e){t.exports=function(){this.__data__=[],this.size=0}},function(t,e,n){var r=n(56),i=Array.prototype.splice;t.exports=function(t){var e=this.__data__,n=r(e,t);return!(n<0)&&(n==e.length-1?e.pop():i.call(e,n,1),--this.size,!0)}},function(t,e,n){var r=n(56);t.exports=function(t){var e=this.__data__,n=r(e,t);return n<0?void 0:e[n][1]}},function(t,e,n){var r=n(56);t.exports=function(t){return r(this.__data__,t)>-1}},function(t,e,n){var r=n(56);t.exports=function(t,e){var n=this.__data__,i=r(n,t);return i<0?(++this.size,n.push([t,e])):n[i][1]=e,this}},function(t,e,n){var r=n(55);t.exports=function(){this.__data__=new r,this.size=0}},function(t,e){t.exports=function(t){var e=this.__data__,n=e.delete(t);return this.size=e.size,n}},function(t,e){t.exports=function(t){return this.__data__.get(t)}},function(t,e){t.exports=function(t){return this.__data__.has(t)}},function(t,e,n){var r=n(55),i=n(79),a=n(80);t.exports=function(t,e){var n=this.__data__;if(n instanceof r){var o=n.__data__;if(!i||o.length<199)return o.push([t,e]),this.size=++n.size,this;n=this.__data__=new a(o)}return n.set(t,e),this.size=n.size,this}},function(t,e,n){var r=n(39),i=n(219),a=n(14),o=n(110),s=/^\[object .+?Constructor\]$/,c=Function.prototype,u=Object.prototype,l=c.toString,h=u.hasOwnProperty,f=RegExp("^"+l.call(h).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$");t.exports=function(t){return!(!a(t)||i(t))&&(r(t)?f:s).test(o(t))}},function(t,e){var n;n=function(){return this}();try{n=n||new Function("return this")()}catch(t){"object"==typeof window&&(n=window)}t.exports=n},function(t,e,n){var r=n(40),i=Object.prototype,a=i.hasOwnProperty,o=i.toString,s=r?r.toStringTag:void 0;t.exports=function(t){var e=a.call(t,s),n=t[s];try{t[s]=void 0;var r=!0}catch(t){}var i=o.call(t);return r&&(e?t[s]=n:delete t[s]),i}},function(t,e){var n=Object.prototype.toString;t.exports=function(t){return n.call(t)}},function(t,e,n){var r,i=n(220),a=(r=/[^.]+$/.exec(i&&i.keys&&i.keys.IE_PROTO||""))?"Symbol(src)_1."+r:"";t.exports=function(t){return!!a&&a in t}},function(t,e,n){var r=n(19)["__core-js_shared__"];t.exports=r},function(t,e){t.exports=function(t,e){return null==t?void 0:t[e]}},function(t,e,n){var r=n(223),i=n(55),a=n(79);t.exports=function(){this.size=0,this.__data__={hash:new r,map:new(a||i),string:new r}}},function(t,e,n){var r=n(224),i=n(225),a=n(226),o=n(227),s=n(228);function c(t){var e=-1,n=null==t?0:t.length;for(this.clear();++e0){if(++e>=800)return arguments[0]}else e=0;return t.apply(void 0,arguments)}}},function(t,e,n){var r=n(131),i=n(297),a=n(301),o=n(132),s=n(302),c=n(92);t.exports=function(t,e,n){var u=-1,l=i,h=t.length,f=!0,d=[],p=d;if(n)f=!1,l=a;else if(h>=200){var y=e?null:s(t);if(y)return c(y);f=!1,l=o,p=new r}else p=e?[]:d;t:for(;++u-1}},function(t,e,n){var r=n(145),i=n(299),a=n(300);t.exports=function(t,e,n){return e==e?a(t,e,n):r(t,i,n)}},function(t,e){t.exports=function(t){return t!=t}},function(t,e){t.exports=function(t,e,n){for(var r=n-1,i=t.length;++r1||1===e.length&&t.hasEdge(e[0],e[0])}))}},function(t,e,n){var r=n(13);t.exports=function(t,e,n){return function(t,e,n){var r={},i=t.nodes();return i.forEach((function(t){r[t]={},r[t][t]={distance:0},i.forEach((function(e){t!==e&&(r[t][e]={distance:Number.POSITIVE_INFINITY})})),n(t).forEach((function(n){var i=n.v===t?n.w:n.v,a=e(n);r[t][i]={distance:a,predecessor:t}}))})),i.forEach((function(t){var e=r[t];i.forEach((function(n){var a=r[n];i.forEach((function(n){var r=a[t],i=e[n],o=a[n],s=r.distance+i.distance;s0;){if(n=c.removeMin(),r.has(s,n))o.setEdge(n,s[n]);else{if(l)throw new Error("Input graph is not connected: "+t);l=!0}t.nodeEdges(n).forEach(u)}return o}},function(t,e,n){var r;try{r=n(3)}catch(t){}r||(r=window.graphlib),t.exports=r},function(t,e,n){"use strict";var r=n(4),i=n(352),a=n(355),o=n(356),s=n(9).normalizeRanks,c=n(358),u=n(9).removeEmptyRanks,l=n(359),h=n(360),f=n(361),d=n(362),p=n(371),y=n(9),g=n(20).Graph;t.exports=function(t,e){var n=e&&e.debugTiming?y.time:y.notime;n("layout",(function(){var e=n(" buildLayoutGraph",(function(){return function(t){var e=new g({multigraph:!0,compound:!0}),n=C(t.graph());return e.setGraph(r.merge({},v,T(n,m),r.pick(n,b))),r.forEach(t.nodes(),(function(n){var i=C(t.node(n));e.setNode(n,r.defaults(T(i,x),_)),e.setParent(n,t.parent(n))})),r.forEach(t.edges(),(function(n){var i=C(t.edge(n));e.setEdge(n,r.merge({},w,T(i,k),r.pick(i,E)))})),e}(t)}));n(" runLayout",(function(){!function(t,e){e(" makeSpaceForEdgeLabels",(function(){!function(t){var e=t.graph();e.ranksep/=2,r.forEach(t.edges(),(function(n){var r=t.edge(n);r.minlen*=2,"c"!==r.labelpos.toLowerCase()&&("TB"===e.rankdir||"BT"===e.rankdir?r.width+=r.labeloffset:r.height+=r.labeloffset)}))}(t)})),e(" removeSelfEdges",(function(){!function(t){r.forEach(t.edges(),(function(e){if(e.v===e.w){var n=t.node(e.v);n.selfEdges||(n.selfEdges=[]),n.selfEdges.push({e:e,label:t.edge(e)}),t.removeEdge(e)}}))}(t)})),e(" acyclic",(function(){i.run(t)})),e(" nestingGraph.run",(function(){l.run(t)})),e(" rank",(function(){o(y.asNonCompoundGraph(t))})),e(" injectEdgeLabelProxies",(function(){!function(t){r.forEach(t.edges(),(function(e){var n=t.edge(e);if(n.width&&n.height){var r=t.node(e.v),i={rank:(t.node(e.w).rank-r.rank)/2+r.rank,e:e};y.addDummyNode(t,"edge-proxy",i,"_ep")}}))}(t)})),e(" removeEmptyRanks",(function(){u(t)})),e(" nestingGraph.cleanup",(function(){l.cleanup(t)})),e(" normalizeRanks",(function(){s(t)})),e(" assignRankMinMax",(function(){!function(t){var e=0;r.forEach(t.nodes(),(function(n){var i=t.node(n);i.borderTop&&(i.minRank=t.node(i.borderTop).rank,i.maxRank=t.node(i.borderBottom).rank,e=r.max(e,i.maxRank))})),t.graph().maxRank=e}(t)})),e(" removeEdgeLabelProxies",(function(){!function(t){r.forEach(t.nodes(),(function(e){var n=t.node(e);"edge-proxy"===n.dummy&&(t.edge(n.e).labelRank=n.rank,t.removeNode(e))}))}(t)})),e(" normalize.run",(function(){a.run(t)})),e(" parentDummyChains",(function(){c(t)})),e(" addBorderSegments",(function(){h(t)})),e(" order",(function(){d(t)})),e(" insertSelfEdges",(function(){!function(t){var e=y.buildLayerMatrix(t);r.forEach(e,(function(e){var n=0;r.forEach(e,(function(e,i){var a=t.node(e);a.order=i+n,r.forEach(a.selfEdges,(function(e){y.addDummyNode(t,"selfedge",{width:e.label.width,height:e.label.height,rank:a.rank,order:i+ ++n,e:e.e,label:e.label},"_se")})),delete a.selfEdges}))}))}(t)})),e(" adjustCoordinateSystem",(function(){f.adjust(t)})),e(" position",(function(){p(t)})),e(" positionSelfEdges",(function(){!function(t){r.forEach(t.nodes(),(function(e){var n=t.node(e);if("selfedge"===n.dummy){var r=t.node(n.e.v),i=r.x+r.width/2,a=r.y,o=n.x-i,s=r.height/2;t.setEdge(n.e,n.label),t.removeNode(e),n.label.points=[{x:i+2*o/3,y:a-s},{x:i+5*o/6,y:a-s},{x:i+o,y:a},{x:i+5*o/6,y:a+s},{x:i+2*o/3,y:a+s}],n.label.x=n.x,n.label.y=n.y}}))}(t)})),e(" removeBorderNodes",(function(){!function(t){r.forEach(t.nodes(),(function(e){if(t.children(e).length){var n=t.node(e),i=t.node(n.borderTop),a=t.node(n.borderBottom),o=t.node(r.last(n.borderLeft)),s=t.node(r.last(n.borderRight));n.width=Math.abs(s.x-o.x),n.height=Math.abs(a.y-i.y),n.x=o.x+n.width/2,n.y=i.y+n.height/2}})),r.forEach(t.nodes(),(function(e){"border"===t.node(e).dummy&&t.removeNode(e)}))}(t)})),e(" normalize.undo",(function(){a.undo(t)})),e(" fixupEdgeLabelCoords",(function(){!function(t){r.forEach(t.edges(),(function(e){var n=t.edge(e);if(r.has(n,"x"))switch("l"!==n.labelpos&&"r"!==n.labelpos||(n.width-=n.labeloffset),n.labelpos){case"l":n.x-=n.width/2+n.labeloffset;break;case"r":n.x+=n.width/2+n.labeloffset}}))}(t)})),e(" undoCoordinateSystem",(function(){f.undo(t)})),e(" translateGraph",(function(){!function(t){var e=Number.POSITIVE_INFINITY,n=0,i=Number.POSITIVE_INFINITY,a=0,o=t.graph(),s=o.marginx||0,c=o.marginy||0;function u(t){var r=t.x,o=t.y,s=t.width,c=t.height;e=Math.min(e,r-s/2),n=Math.max(n,r+s/2),i=Math.min(i,o-c/2),a=Math.max(a,o+c/2)}r.forEach(t.nodes(),(function(e){u(t.node(e))})),r.forEach(t.edges(),(function(e){var n=t.edge(e);r.has(n,"x")&&u(n)})),e-=s,i-=c,r.forEach(t.nodes(),(function(n){var r=t.node(n);r.x-=e,r.y-=i})),r.forEach(t.edges(),(function(n){var a=t.edge(n);r.forEach(a.points,(function(t){t.x-=e,t.y-=i})),r.has(a,"x")&&(a.x-=e),r.has(a,"y")&&(a.y-=i)})),o.width=n-e+s,o.height=a-i+c}(t)})),e(" assignNodeIntersects",(function(){!function(t){r.forEach(t.edges(),(function(e){var n,r,i=t.edge(e),a=t.node(e.v),o=t.node(e.w);i.points?(n=i.points[0],r=i.points[i.points.length-1]):(i.points=[],n=o,r=a),i.points.unshift(y.intersectRect(a,n)),i.points.push(y.intersectRect(o,r))}))}(t)})),e(" reversePoints",(function(){!function(t){r.forEach(t.edges(),(function(e){var n=t.edge(e);n.reversed&&n.points.reverse()}))}(t)})),e(" acyclic.undo",(function(){i.undo(t)}))}(e,n)})),n(" updateInputGraph",(function(){!function(t,e){r.forEach(t.nodes(),(function(n){var r=t.node(n),i=e.node(n);r&&(r.x=i.x,r.y=i.y,e.children(n).length&&(r.width=i.width,r.height=i.height))})),r.forEach(t.edges(),(function(n){var i=t.edge(n),a=e.edge(n);i.points=a.points,r.has(a,"x")&&(i.x=a.x,i.y=a.y)})),t.graph().width=e.graph().width,t.graph().height=e.graph().height}(t,e)}))}))};var m=["nodesep","edgesep","ranksep","marginx","marginy"],v={ranksep:50,edgesep:20,nodesep:50,rankdir:"tb"},b=["acyclicer","ranker","rankdir","align"],x=["width","height"],_={width:0,height:0},k=["minlen","weight","width","height","labeloffset"],w={minlen:1,weight:1,width:0,height:0,labeloffset:10,labelpos:"r"},E=["labelpos"];function T(t,e){return r.mapValues(r.pick(t,e),Number)}function C(t){var e={};return r.forEach(t,(function(t,n){e[n.toLowerCase()]=t})),e}},function(t,e,n){var r=n(108);t.exports=function(t){return r(t,5)}},function(t,e,n){var r=n(320)(n(321));t.exports=r},function(t,e,n){var r=n(26),i=n(25),a=n(30);t.exports=function(t){return function(e,n,o){var s=Object(e);if(!i(e)){var c=r(n,3);e=a(e),n=function(t){return c(s[t],t,s)}}var u=t(e,n,o);return u>-1?s[c?e[u]:u]:void 0}}},function(t,e,n){var r=n(145),i=n(26),a=n(322),o=Math.max;t.exports=function(t,e,n){var s=null==t?0:t.length;if(!s)return-1;var c=null==n?0:a(n);return c<0&&(c=o(s+c,0)),r(t,i(e,3),c)}},function(t,e,n){var r=n(155);t.exports=function(t){var e=r(t),n=e%1;return e==e?n?e-n:e:0}},function(t,e,n){var r=n(324),i=n(14),a=n(43),o=/^[-+]0x[0-9a-f]+$/i,s=/^0b[01]+$/i,c=/^0o[0-7]+$/i,u=parseInt;t.exports=function(t){if("number"==typeof t)return t;if(a(t))return NaN;if(i(t)){var e="function"==typeof t.valueOf?t.valueOf():t;t=i(e)?e+"":e}if("string"!=typeof t)return 0===t?t:+t;t=r(t);var n=s.test(t);return n||c.test(t)?u(t.slice(2),n?2:8):o.test(t)?NaN:+t}},function(t,e,n){var r=n(325),i=/^\s+/;t.exports=function(t){return t?t.slice(0,r(t)+1).replace(i,""):t}},function(t,e){var n=/\s/;t.exports=function(t){for(var e=t.length;e--&&n.test(t.charAt(e)););return e}},function(t,e,n){var r=n(91),i=n(127),a=n(35);t.exports=function(t,e){return null==t?t:r(t,i(e),a)}},function(t,e){t.exports=function(t){var e=null==t?0:t.length;return e?t[e-1]:void 0}},function(t,e,n){var r=n(60),i=n(90),a=n(26);t.exports=function(t,e){var n={};return e=a(e,3),i(t,(function(t,i,a){r(n,i,e(t,i,a))})),n}},function(t,e,n){var r=n(96),i=n(330),a=n(36);t.exports=function(t){return t&&t.length?r(t,a,i):void 0}},function(t,e){t.exports=function(t,e){return t>e}},function(t,e,n){var r=n(332),i=n(335)((function(t,e,n){r(t,e,n)}));t.exports=i},function(t,e,n){var r=n(54),i=n(157),a=n(91),o=n(333),s=n(14),c=n(35),u=n(159);t.exports=function t(e,n,l,h,f){e!==n&&a(n,(function(a,c){if(f||(f=new r),s(a))o(e,n,c,l,t,h,f);else{var d=h?h(u(e,c),a,c+"",e,n,f):void 0;void 0===d&&(d=a),i(e,c,d)}}),c)}},function(t,e,n){var r=n(157),i=n(114),a=n(123),o=n(115),s=n(124),c=n(48),u=n(5),l=n(146),h=n(41),f=n(39),d=n(14),p=n(158),y=n(49),g=n(159),m=n(334);t.exports=function(t,e,n,v,b,x,_){var k=g(t,n),w=g(e,n),E=_.get(w);if(E)r(t,n,E);else{var T=x?x(k,w,n+"",t,e,_):void 0,C=void 0===T;if(C){var S=u(w),A=!S&&h(w),M=!S&&!A&&y(w);T=w,S||A||M?u(k)?T=k:l(k)?T=o(k):A?(C=!1,T=i(w,!0)):M?(C=!1,T=a(w,!0)):T=[]:p(w)||c(w)?(T=k,c(k)?T=m(k):d(k)&&!f(k)||(T=s(w))):C=!1}C&&(_.set(w,T),b(T,w,v,x,_),_.delete(w)),r(t,n,T)}}},function(t,e,n){var r=n(47),i=n(35);t.exports=function(t){return r(t,i(t))}},function(t,e,n){var r=n(69),i=n(70);t.exports=function(t){return r((function(e,n){var r=-1,a=n.length,o=a>1?n[a-1]:void 0,s=a>2?n[2]:void 0;for(o=t.length>3&&"function"==typeof o?(a--,o):void 0,s&&i(n[0],n[1],s)&&(o=a<3?void 0:o,a=1),e=Object(e);++r1&&o(t,e[0],e[1])?e=[]:n>2&&o(e[0],e[1],e[2])&&(e=[e[0]]),i(t,r(e,1),[])}));t.exports=s},function(t,e,n){var r=n(68),i=n(66),a=n(26),o=n(141),s=n(347),c=n(62),u=n(348),l=n(36),h=n(5);t.exports=function(t,e,n){e=e.length?r(e,(function(t){return h(t)?function(e){return i(e,1===t.length?t[0]:t)}:t})):[l];var f=-1;e=r(e,c(a));var d=o(t,(function(t,n,i){return{criteria:r(e,(function(e){return e(t)})),index:++f,value:t}}));return s(d,(function(t,e){return u(t,e,n)}))}},function(t,e){t.exports=function(t,e){var n=t.length;for(t.sort(e);n--;)t[n]=t[n].value;return t}},function(t,e,n){var r=n(349);t.exports=function(t,e,n){for(var i=-1,a=t.criteria,o=e.criteria,s=a.length,c=n.length;++i=c?u:u*("desc"==n[i]?-1:1)}return t.index-e.index}},function(t,e,n){var r=n(43);t.exports=function(t,e){if(t!==e){var n=void 0!==t,i=null===t,a=t==t,o=r(t),s=void 0!==e,c=null===e,u=e==e,l=r(e);if(!c&&!l&&!o&&t>e||o&&s&&u&&!c&&!l||i&&s&&u||!n&&u||!a)return 1;if(!i&&!o&&!l&&t0;--c)if(r=e[c].dequeue()){i=i.concat(s(t,e,n,r,!0));break}}return i}(n.graph,n.buckets,n.zeroIdx);return r.flatten(r.map(u,(function(e){return t.outEdges(e.v,e.w)})),!0)};var o=r.constant(1);function s(t,e,n,i,a){var o=a?[]:void 0;return r.forEach(t.inEdges(i.v),(function(r){var i=t.edge(r),s=t.node(r.v);a&&o.push({v:r.v,w:r.w}),s.out-=i,c(e,n,s)})),r.forEach(t.outEdges(i.v),(function(r){var i=t.edge(r),a=r.w,o=t.node(a);o.in-=i,c(e,n,o)})),t.removeNode(i.v),o}function c(t,e,n){n.out?n.in?t[n.out-n.in+e].enqueue(n):t[t.length-1].enqueue(n):t[0].enqueue(n)}},function(t,e){function n(){var t={};t._next=t._prev=t,this._sentinel=t}function r(t){t._prev._next=t._next,t._next._prev=t._prev,delete t._next,delete t._prev}function i(t,e){if("_next"!==t&&"_prev"!==t)return e}t.exports=n,n.prototype.dequeue=function(){var t=this._sentinel,e=t._prev;if(e!==t)return r(e),e},n.prototype.enqueue=function(t){var e=this._sentinel;t._prev&&t._next&&r(t),t._next=e._next,e._next._prev=t,e._next=t,t._prev=e},n.prototype.toString=function(){for(var t=[],e=this._sentinel,n=e._prev;n!==e;)t.push(JSON.stringify(n,i)),n=n._prev;return"["+t.join(", ")+"]"}},function(t,e,n){"use strict";var r=n(4),i=n(9);t.exports={run:function(t){t.graph().dummyChains=[],r.forEach(t.edges(),(function(e){!function(t,e){var n,r,a,o=e.v,s=t.node(o).rank,c=e.w,u=t.node(c).rank,l=e.name,h=t.edge(e),f=h.labelRank;if(u===s+1)return;for(t.removeEdge(e),a=0,++s;sc.lim&&(u=c,l=!0);var h=r.filter(e.edges(),(function(e){return l===v(t,t.node(e.v),u)&&l!==v(t,t.node(e.w),u)}));return r.minBy(h,(function(t){return a(e,t)}))}function m(t,e,n,i){var a=n.v,o=n.w;t.removeEdge(a,o),t.setEdge(i.v,i.w,{}),d(t),h(t,e),function(t,e){var n=r.find(t.nodes(),(function(t){return!e.node(t).parent})),i=s(t,n);i=i.slice(1),r.forEach(i,(function(n){var r=t.node(n).parent,i=e.edge(n,r),a=!1;i||(i=e.edge(r,n),a=!0),e.node(n).rank=e.node(r).rank+(a?i.minlen:-i.minlen)}))}(t,e)}function v(t,e,n){return n.low<=e.lim&&e.lim<=n.lim}t.exports=l,l.initLowLimValues=d,l.initCutValues=h,l.calcCutValue=f,l.leaveEdge=y,l.enterEdge=g,l.exchangeEdges=m},function(t,e,n){var r=n(4);t.exports=function(t){var e=function(t){var e={},n=0;function i(a){var o=n;r.forEach(t.children(a),i),e[a]={low:o,lim:n++}}return r.forEach(t.children(),i),e}(t);r.forEach(t.graph().dummyChains,(function(n){for(var r=t.node(n),i=r.edgeObj,a=function(t,e,n,r){var i,a,o=[],s=[],c=Math.min(e[n].low,e[r].low),u=Math.max(e[n].lim,e[r].lim);i=n;do{i=t.parent(i),o.push(i)}while(i&&(e[i].low>c||u>e[i].lim));a=i,i=r;for(;(i=t.parent(i))!==a;)s.push(i);return{path:o.concat(s.reverse()),lca:a}}(t,e,i.v,i.w),o=a.path,s=a.lca,c=0,u=o[c],l=!0;n!==i.w;){if(r=t.node(n),l){for(;(u=o[c])!==s&&t.node(u).maxRank=2),s=l.buildLayerMatrix(t);var g=a(t,s);g0;)e%2&&(n+=c[e+1]),c[e=e-1>>1]+=t.weight;u+=t.weight*n}))),u}t.exports=function(t,e){for(var n=0,r=1;r=t.barycenter)&&function(t,e){var n=0,r=0;t.weight&&(n+=t.barycenter*t.weight,r+=t.weight);e.weight&&(n+=e.barycenter*e.weight,r+=e.weight);t.vs=e.vs.concat(t.vs),t.barycenter=n/r,t.weight=r,t.i=Math.min(e.i,t.i),e.merged=!0}(t,e)}}function i(e){return function(n){n.in.push(e),0==--n.indegree&&t.push(n)}}for(;t.length;){var a=t.pop();e.push(a),r.forEach(a.in.reverse(),n(a)),r.forEach(a.out,i(a))}return r.map(r.filter(e,(function(t){return!t.merged})),(function(t){return r.pick(t,["vs","i","barycenter","weight"])}))}(r.filter(n,(function(t){return!t.indegree})))}},function(t,e,n){var r=n(4),i=n(9);function a(t,e,n){for(var i;e.length&&(i=r.last(e)).i<=n;)e.pop(),t.push(i.vs),n++;return n}t.exports=function(t,e){var n=i.partition(t,(function(t){return r.has(t,"barycenter")})),o=n.lhs,s=r.sortBy(n.rhs,(function(t){return-t.i})),c=[],u=0,l=0,h=0;o.sort((f=!!e,function(t,e){return t.barycentere.barycenter?1:f?e.i-t.i:t.i-e.i})),h=a(c,s,h),r.forEach(o,(function(t){h+=t.vs.length,c.push(t.vs),u+=t.barycenter*t.weight,l+=t.weight,h=a(c,s,h)}));var f;var d={vs:r.flatten(c,!0)};l&&(d.barycenter=u/l,d.weight=l);return d}},function(t,e,n){var r=n(4),i=n(20).Graph;t.exports=function(t,e,n){var a=function(t){var e;for(;t.hasNode(e=r.uniqueId("_root")););return e}(t),o=new i({compound:!0}).setGraph({root:a}).setDefaultNodeLabel((function(e){return t.node(e)}));return r.forEach(t.nodes(),(function(i){var s=t.node(i),c=t.parent(i);(s.rank===e||s.minRank<=e&&e<=s.maxRank)&&(o.setNode(i),o.setParent(i,c||a),r.forEach(t[n](i),(function(e){var n=e.v===i?e.w:e.v,a=o.edge(n,i),s=r.isUndefined(a)?0:a.weight;o.setEdge(n,i,{weight:t.edge(e).weight+s})})),r.has(s,"minRank")&&o.setNode(i,{borderLeft:s.borderLeft[e],borderRight:s.borderRight[e]}))})),o}},function(t,e,n){var r=n(4);t.exports=function(t,e,n){var i,a={};r.forEach(n,(function(n){for(var r,o,s=t.parent(n);s;){if((r=t.parent(s))?(o=a[r],a[r]=s):(o=i,i=s),o&&o!==s)return void e.setEdge(o,s);s=r}}))}},function(t,e,n){"use strict";var r=n(4),i=n(9),a=n(372).positionX;t.exports=function(t){(function(t){var e=i.buildLayerMatrix(t),n=t.graph().ranksep,a=0;r.forEach(e,(function(e){var i=r.max(r.map(e,(function(e){return t.node(e).height})));r.forEach(e,(function(e){t.node(e).y=a+i/2})),a+=i+n}))})(t=i.asNonCompoundGraph(t)),r.forEach(a(t),(function(e,n){t.node(n).x=e}))}},function(t,e,n){"use strict";var r=n(4),i=n(20).Graph,a=n(9);function o(t,e){var n={};return r.reduce(e,(function(e,i){var a=0,o=0,s=e.length,u=r.last(i);return r.forEach(i,(function(e,l){var h=function(t,e){if(t.node(e).dummy)return r.find(t.predecessors(e),(function(e){return t.node(e).dummy}))}(t,e),f=h?t.node(h).order:s;(h||e===u)&&(r.forEach(i.slice(o,l+1),(function(e){r.forEach(t.predecessors(e),(function(r){var i=t.node(r),o=i.order;!(os)&&c(n,e,u)}))}))}return r.reduce(e,(function(e,n){var a,o=-1,s=0;return r.forEach(n,(function(r,c){if("border"===t.node(r).dummy){var u=t.predecessors(r);u.length&&(a=t.node(u[0]).order,i(n,s,c,o,a),s=c,o=a)}i(n,s,n.length,a,e.length)})),n})),n}function c(t,e,n){if(e>n){var r=e;e=n,n=r}var i=t[e];i||(t[e]=i={}),i[n]=!0}function u(t,e,n){if(e>n){var i=e;e=n,n=i}return r.has(t[e],n)}function l(t,e,n,i){var a={},o={},s={};return r.forEach(e,(function(t){r.forEach(t,(function(t,e){a[t]=t,o[t]=t,s[t]=e}))})),r.forEach(e,(function(t){var e=-1;r.forEach(t,(function(t){var c=i(t);if(c.length)for(var l=((c=r.sortBy(c,(function(t){return s[t]}))).length-1)/2,h=Math.floor(l),f=Math.ceil(l);h<=f;++h){var d=c[h];o[t]===t&&e0}t.exports=function(t,e,r,i){var a,o,s,c,u,l,h,f,d,p,y,g,m;if(a=e.y-t.y,s=t.x-e.x,u=e.x*t.y-t.x*e.y,d=a*r.x+s*r.y+u,p=a*i.x+s*i.y+u,0!==d&&0!==p&&n(d,p))return;if(o=i.y-r.y,c=r.x-i.x,l=i.x*r.y-r.x*i.y,h=o*t.x+c*t.y+l,f=o*e.x+c*e.y+l,0!==h&&0!==f&&n(h,f))return;if(0===(y=a*c-o*s))return;return g=Math.abs(y/2),{x:(m=s*l-c*u)<0?(m-g)/y:(m+g)/y,y:(m=o*u-a*l)<0?(m-g)/y:(m+g)/y}}},function(t,e,n){var r=n(44),i=n(31),a=n(153).layout;t.exports=function(){var t=n(378),e=n(381),i=n(382),u=n(383),l=n(384),h=n(385),f=n(386),d=n(387),p=n(388),y=function(n,y){!function(t){t.nodes().forEach((function(e){var n=t.node(e);r.has(n,"label")||t.children(e).length||(n.label=e),r.has(n,"paddingX")&&r.defaults(n,{paddingLeft:n.paddingX,paddingRight:n.paddingX}),r.has(n,"paddingY")&&r.defaults(n,{paddingTop:n.paddingY,paddingBottom:n.paddingY}),r.has(n,"padding")&&r.defaults(n,{paddingLeft:n.padding,paddingRight:n.padding,paddingTop:n.padding,paddingBottom:n.padding}),r.defaults(n,o),r.each(["paddingLeft","paddingRight","paddingTop","paddingBottom"],(function(t){n[t]=Number(n[t])})),r.has(n,"width")&&(n._prevWidth=n.width),r.has(n,"height")&&(n._prevHeight=n.height)})),t.edges().forEach((function(e){var n=t.edge(e);r.has(n,"label")||(n.label=""),r.defaults(n,s)}))}(y);var g=c(n,"output"),m=c(g,"clusters"),v=c(g,"edgePaths"),b=i(c(g,"edgeLabels"),y),x=t(c(g,"nodes"),y,d);a(y),l(x,y),h(b,y),u(v,y,p);var _=e(m,y);f(_,y),function(t){r.each(t.nodes(),(function(e){var n=t.node(e);r.has(n,"_prevWidth")?n.width=n._prevWidth:delete n.width,r.has(n,"_prevHeight")?n.height=n._prevHeight:delete n.height,delete n._prevWidth,delete n._prevHeight}))}(y)};return y.createNodes=function(e){return arguments.length?(t=e,y):t},y.createClusters=function(t){return arguments.length?(e=t,y):e},y.createEdgeLabels=function(t){return arguments.length?(i=t,y):i},y.createEdgePaths=function(t){return arguments.length?(u=t,y):u},y.shapes=function(t){return arguments.length?(d=t,y):d},y.arrows=function(t){return arguments.length?(p=t,y):p},y};var o={paddingLeft:10,paddingRight:10,paddingTop:10,paddingBottom:10,rx:0,ry:0,shape:"rect"},s={arrowhead:"normal",curve:i.curveLinear};function c(t,e){var n=t.select("g."+e);return n.empty()&&(n=t.append("g").attr("class",e)),n}},function(t,e,n){"use strict";var r=n(44),i=n(98),a=n(15),o=n(31);t.exports=function(t,e,n){var s,c=e.nodes().filter((function(t){return!a.isSubgraph(e,t)})),u=t.selectAll("g.node").data(c,(function(t){return t})).classed("update",!0);u.exit().remove(),u.enter().append("g").attr("class","node").style("opacity",0),(u=t.selectAll("g.node")).each((function(t){var s=e.node(t),c=o.select(this);a.applyClass(c,s.class,(c.classed("update")?"update ":"")+"node"),c.select("g.label").remove();var u=c.append("g").attr("class","label"),l=i(u,s),h=n[s.shape],f=r.pick(l.node().getBBox(),"width","height");s.elem=this,s.id&&c.attr("id",s.id),s.labelId&&u.attr("id",s.labelId),r.has(s,"width")&&(f.width=s.width),r.has(s,"height")&&(f.height=s.height),f.width+=s.paddingLeft+s.paddingRight,f.height+=s.paddingTop+s.paddingBottom,u.attr("transform","translate("+(s.paddingLeft-s.paddingRight)/2+","+(s.paddingTop-s.paddingBottom)/2+")");var d=o.select(this);d.select(".label-container").remove();var p=h(d,f,s).classed("label-container",!0);a.applyStyle(p,s.style);var y=p.node().getBBox();s.width=y.width,s.height=y.height})),s=u.exit?u.exit():u.selectAll(null);return a.applyTransition(s,e).style("opacity",0).remove(),u}},function(t,e,n){var r=n(15);t.exports=function(t,e){for(var n=t.append("text"),i=function(t){for(var e,n="",r=!1,i=0;i0&&void 0!==arguments[0]?arguments[0]:"fatal";isNaN(t)&&(t=t.toLowerCase(),void 0!==a[t]&&(t=a[t])),o.trace=function(){},o.debug=function(){},o.info=function(){},o.warn=function(){},o.error=function(){},o.fatal=function(){},t<=a.fatal&&(o.fatal=console.error?console.error.bind(console,c("FATAL"),"color: orange"):console.log.bind(console,"",c("FATAL"))),t<=a.error&&(o.error=console.error?console.error.bind(console,c("ERROR"),"color: orange"):console.log.bind(console,"",c("ERROR"))),t<=a.warn&&(o.warn=console.warn?console.warn.bind(console,c("WARN"),"color: orange"):console.log.bind(console,"",c("WARN"))),t<=a.info&&(o.info=console.info?console.info.bind(console,c("INFO"),"color: lightblue"):console.log.bind(console,"",c("INFO"))),t<=a.debug&&(o.debug=console.debug?console.debug.bind(console,c("DEBUG"),"color: lightgreen"):console.log.bind(console,"",c("DEBUG")))},c=function(t){var e=i()().format("ss.SSS");return"%c".concat(e," : ").concat(t," : ")},u=n(0),l="comm",h="decl",f=Math.abs,d=String.fromCharCode;function p(t){return t.trim()}function y(t,e,n){return t.replace(e,n)}function g(t,e){return 0|t.charCodeAt(e)}function m(t,e,n){return t.slice(e,n)}function v(t){return t.length}function b(t){return t.length}function x(t,e){return e.push(t),t}var _=1,k=1,w=0,E=0,T=0,C="";function S(t,e,n,r,i,a,o){return{value:t,root:e,parent:n,type:r,props:i,children:a,line:_,column:k,length:o,return:""}}function A(){return T=E>0?g(C,--E):0,k--,10===T&&(k=1,_--),T}function M(){return T=E2||D(T)>3?"":" "}function P(t,e){for(;--e&&M()&&!(T<48||T>102||T>57&&T<65||T>70&&T<97););return B(t,N()+(e<6&&32==O()&&32==M()))}function j(t,e){for(;M()&&t+T!==57&&(t+T!==84||47!==O()););return"/*"+B(e,E-1)+"*"+d(47===t?t:M())}function Y(t){for(;!D(O());)M();return B(t,E)}function z(t){return I(function t(e,n,r,i,a,o,s,c,u){var l=0,h=0,f=s,p=0,g=0,m=0,b=1,_=1,k=1,w=0,E="",T=a,C=o,S=i,B=E;for(;_;)switch(m=w,w=M()){case 34:case 39:case 91:case 40:B+=R(w);break;case 9:case 10:case 13:case 32:B+=F(m);break;case 92:B+=P(N()-1,7);continue;case 47:switch(O()){case 42:case 47:x($(j(M(),N()),n,r),u);break;default:B+="/"}break;case 123*b:c[l++]=v(B)*k;case 125*b:case 59:case 0:switch(w){case 0:case 125:_=0;case 59+h:g>0&&v(B)-f&&x(g>32?q(B+";",i,r,f-1):q(y(B," ","")+";",i,r,f-2),u);break;case 59:B+=";";default:if(x(S=U(B,n,r,l,h,a,c,E,T=[],C=[],f),o),123===w)if(0===h)t(B,n,S,S,T,o,f,c,C);else switch(p){case 100:case 109:case 115:t(e,S,S,i&&x(U(e,S,S,0,0,a,c,E,a,T=[],f),C),a,C,f,c,i?T:C);break;default:t(B,S,S,S,[""],C,f,c,C)}}l=h=g=0,b=k=1,E=B="",f=s;break;case 58:f=1+v(B),g=m;default:if(b<1)if(123==w)--b;else if(125==w&&0==b++&&125==A())continue;switch(B+=d(w),w*b){case 38:k=h>0?1:(B+="\f",-1);break;case 44:c[l++]=(v(B)-1)*k,k=1;break;case 64:45===O()&&(B+=R(M())),p=O(),h=v(E=B+=Y(N())),w++;break;case 45:45===m&&2==v(B)&&(b=0)}}return o}("",null,null,null,[""],t=L(t),0,[0],t))}function U(t,e,n,r,i,a,o,s,c,u,l){for(var h=i-1,d=0===i?a:[""],g=b(d),v=0,x=0,_=0;v0?d[k]+" "+w:y(w,/&\f/g,d[k])))&&(c[_++]=E);return S(t,e,n,0===i?"rule":s,c,u,l)}function $(t,e,n){return S(t,e,n,l,d(T),m(t,2,-2),0)}function q(t,e,n,r){return S(t,e,n,h,m(t,0,r),m(t,r+1,-1),r)}function W(t,e){for(var n="",r=b(t),i=0;i=0;){if(!((n=t.indexOf("=0)){e+=t,n=-1;break}e+=t.substr(0,n),(n=(t=t.substr(n+1)).indexOf("<\/script>"))>=0&&(n+=9,t=t.substr(n))}return e=(e=(e=e.replace(/javascript:/g,"#")).replace(/onerror=/g,"onerror:")).replace(/