diff --git a/Makefile b/Makefile index c7756208cc..576b25eebf 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -DOCKER = docker +DOCKER ?= docker HUGO_VERSION = $(shell grep ^HUGO_VERSION netlify.toml | tail -n 1 | cut -d '=' -f 2 | tr -d " \"\n") DOCKER_IMAGE = kubernetes-hugo DOCKER_RUN = $(DOCKER) run --rm --interactive --tty --volume $(CURDIR):/src diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 0de965afdc..34c36cee81 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -124,6 +124,7 @@ aliases: - girikuncoro - irvifa - wahyuoi + - phanama sig-docs-it-owners: # Admins for Italian content - fabriziopandini - mattiaperi diff --git a/config.toml b/config.toml index 4acaa66e95..772c160e1f 100644 --- a/config.toml +++ b/config.toml @@ -23,6 +23,14 @@ disableLanguages = ["hi", "no"] [markup] [markup.goldmark] + [markup.goldmark.extensions] + definitionList = true + table = true + typographer = false + [markup.goldmark.parser] + attribute = true + autoHeadingID = true + autoHeadingIDType = "blackfriday" [markup.goldmark.renderer] unsafe = true [markup.highlight] @@ -35,6 +43,10 @@ disableLanguages = ["hi", "no"] noClasses = true style = "emacs" tabWidth = 4 + [markup.tableOfContents] + endLevel = 2 + ordered = false + startLevel = 2 [frontmatter] date = ["date", ":filename", "publishDate", "lastmod"] @@ -82,11 +94,10 @@ nextUrl = "https://kubernetes-io-vnext-staging.netlify.com/" githubWebsiteRepo = "github.com/kubernetes/website" githubWebsiteRaw = "raw.githubusercontent.com/kubernetes/website" -# param for displaying an announcement block on every page; see PR #16210 -announcement = false -# announcement_message is only displayed when announcement = true; update with your specific message -announcement_message = "The Kubernetes Documentation team would like your feedback! Please take a short survey so we can improve the Kubernetes online documentation." - +# param for displaying an announcement block on every page. +# See /i18n/en.toml for message text and title. +announcement = true +announcement_bg = "#000000" #choose a dark color – text is white [params.pushAssets] css = [ @@ -313,4 +324,4 @@ contentDir = "content/uk" [languages.uk.params] time_format_blog = "02.01.2006" # A list of language codes to look for untranslated content, ordered from left to right. -language_alternatives = ["en"] \ No newline at end of file +language_alternatives = ["en"] diff --git a/content/de/docs/concepts/_index.md b/content/de/docs/concepts/_index.md index 82b5b0e5b0..43d273b432 100644 --- a/content/de/docs/concepts/_index.md +++ b/content/de/docs/concepts/_index.md @@ -1,17 +1,17 @@ --- title: Konzepte main_menu: true -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + Im Abschnitt Konzepte erfahren Sie mehr über die Bestandteile des Kubernetes-Systems und die Abstraktionen, die Kubernetes zur Verwaltung Ihres Clusters zur Verfügung stellt. Sie erhalten zudem ein tieferes Verständnis der Funktionsweise von Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Überblick @@ -65,11 +65,12 @@ Die Nodes in einem Cluster sind die Maschinen (VMs, physische Server usw.), auf * [Anmerkungen](/docs/concepts/overview/working-with-objects/annotations/) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Wenn Sie eine Konzeptseite schreiben möchten, lesen Sie [Seitenvorlagen verwenden](/docs/home/contribute/page-templates/) für Informationen zum Konzeptseitentyp und zur Dokumentations Vorlage. -{{% /capture %}} + diff --git a/content/de/docs/concepts/architecture/cloud-controller.md b/content/de/docs/concepts/architecture/cloud-controller.md index 7e044119b8..21da96e612 100644 --- a/content/de/docs/concepts/architecture/cloud-controller.md +++ b/content/de/docs/concepts/architecture/cloud-controller.md @@ -1,10 +1,10 @@ --- title: Zugrunde liegende Konzepte des Cloud Controller Manager -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Das Konzept des Cloud Controller Managers (CCM) (nicht zu verwechseln mit der Binärdatei) wurde ursprünglich entwickelt, um Cloud-spezifischen Anbieter Code und den Kubernetes Kern unabhängig voneinander entwickeln zu können. Der Cloud Controller Manager läuft zusammen mit anderen Master Komponenten wie dem Kubernetes Controller Manager, dem API-Server und dem Scheduler auf dem Host. Es kann auch als Kubernetes Addon gestartet werden, in diesem Fall läuft er auf Kubernetes. Das Design des Cloud Controller Managers basiert auf einem Plugin Mechanismus, der es neuen Cloud Anbietern ermöglicht, sich mit Kubernetes einfach über Plugins zu integrieren. Es gibt Pläne für die Einbindung neuer Cloud Anbieter auf Kubernetes und für die Migration von Cloud Anbietern vom alten Modell auf das neue CCM-Modell. @@ -15,10 +15,10 @@ Die Architektur eines Kubernetes Clusters ohne den Cloud Controller Manager sieh ![Pre CCM Kube Arch](/images/docs/pre-ccm-arch.png) -{{% /capture %}} -{{% capture body %}} + + ## Design @@ -235,4 +235,4 @@ Die folgenden Cloud Anbieter haben CCMs implementiert: Eine vollständige Anleitung zur Konfiguration und zum Betrieb des CCM findest du [hier](/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager). -{{% /capture %}} + diff --git a/content/de/docs/concepts/architecture/master-node-communication.md b/content/de/docs/concepts/architecture/master-node-communication.md index e7903f2606..6874cdb687 100644 --- a/content/de/docs/concepts/architecture/master-node-communication.md +++ b/content/de/docs/concepts/architecture/master-node-communication.md @@ -1,18 +1,18 @@ --- title: Master-Node Kommunikation -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Dieses Dokument katalogisiert die Kommunikationspfade zwischen dem Master (eigentlich dem Apiserver) und des Kubernetes-Clusters. Die Absicht besteht darin, Benutzern die Möglichkeit zu geben, ihre Installation so anzupassen, dass die Netzwerkkonfiguration so abgesichert wird, dass der Cluster in einem nicht vertrauenswürdigen Netzwerk (oder mit vollständig öffentlichen IP-Adressen eines Cloud-Providers) ausgeführt werden kann. -{{% /capture %}} -{{% capture body %}} + + ## Cluster zum Master @@ -69,4 +69,4 @@ Dieser Tunnel stellt sicher, dass der Datenverkehr nicht außerhalb des Netzwerk SSH-Tunnel werden zur Zeit nicht unterstützt. Sie sollten also nicht verwendet werden, sei denn, man weiß, was man tut. Ein Ersatz für diesen Kommunikationskanal wird entwickelt. -{{% /capture %}} + diff --git a/content/de/docs/concepts/architecture/nodes.md b/content/de/docs/concepts/architecture/nodes.md index 0f3d396968..8a2b8b7fde 100644 --- a/content/de/docs/concepts/architecture/nodes.md +++ b/content/de/docs/concepts/architecture/nodes.md @@ -1,10 +1,10 @@ --- title: Nodes -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Ein Knoten (Node in Englisch) ist eine Arbeitsmaschine in Kubernetes, früher als `minion` bekannt. Ein Node kann je nach Cluster eine VM oder eine physische Maschine sein. Jeder Node enthält @@ -13,10 +13,10 @@ und wird von den Master-Komponenten verwaltet. Die Dienste auf einem Node umfassen die [Container Runtime](/docs/concepts/overview/components/#node-components), das Kubelet und den Kube-Proxy. Weitere Informationen finden Sie im Abschnitt Kubernetes Node in der Architekturdesign-Dokumentation. -{{% /capture %}} -{{% capture body %}} + + ## Node Status @@ -244,4 +244,4 @@ Wenn Sie Ressourcen explizit für Nicht-Pod-Prozesse reservieren möchten, folge Node ist eine Top-Level-Ressource in der Kubernetes-REST-API. Weitere Details zum API-Objekt finden Sie unter: [Node API object](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#node-v1-core). -{{% /capture %}} + diff --git a/content/de/docs/concepts/cluster-administration/addons.md b/content/de/docs/concepts/cluster-administration/addons.md index 4d26b57da8..f5eedeb59b 100644 --- a/content/de/docs/concepts/cluster-administration/addons.md +++ b/content/de/docs/concepts/cluster-administration/addons.md @@ -1,9 +1,9 @@ --- title: Addons Installieren -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Add-Ons erweitern die Funktionalität von Kubernetes. @@ -12,10 +12,10 @@ Diese Seite gibt eine Übersicht über einige verfügbare Add-Ons und verweist a Die Add-Ons in den einzelnen Kategorien sind alphabetisch sortiert - Die Reihenfolge impliziert keine bevorzugung einzelner Projekte. -{{% /capture %}} -{{% capture body %}} + + ## Networking und Network Policy @@ -53,4 +53,4 @@ Es gibt einige weitere Add-Ons die in dem abgekündigten [cluster/addons](https: Add-Ons die ordentlich gewartet werden dürfen gerne hier aufgezählt werden. Wir freuen uns auf PRs! -{{% /capture %}} + diff --git a/content/de/docs/concepts/cluster-administration/controller-metrics.md b/content/de/docs/concepts/cluster-administration/controller-metrics.md index 4fd9b0d538..86cd0a1548 100644 --- a/content/de/docs/concepts/cluster-administration/controller-metrics.md +++ b/content/de/docs/concepts/cluster-administration/controller-metrics.md @@ -1,15 +1,15 @@ --- title: Controller Manager Metriken -content_template: templates/concept +content_type: concept weight: 100 --- -{{% capture overview %}} + Controller Manager Metriken liefern wichtige Erkenntnisse über die Leistung und den Zustand von den Controller Managern. -{{% /capture %}} -{{% capture body %}} + + ## Was sind Controller Manager Metriken Die Kennzahlen des Controller Managers liefert wichtige Erkenntnisse über die Leistung und den Zustand des Controller Managers. @@ -38,4 +38,3 @@ Die Metriken werden im [Prometheus Format](https://prometheus.io/docs/instrument In einer Produktionsumgebung können Sie Prometheus oder einen anderen Metrik Scraper konfigurieren, um diese Metriken regelmäßig zu sammeln und in einer Art Zeitreihen Datenbank verfügbar zu machen. -{{% /capture %}} \ No newline at end of file diff --git a/content/de/docs/concepts/cluster-administration/proxies.md b/content/de/docs/concepts/cluster-administration/proxies.md index 16d36ad518..a872dbad8d 100644 --- a/content/de/docs/concepts/cluster-administration/proxies.md +++ b/content/de/docs/concepts/cluster-administration/proxies.md @@ -1,14 +1,14 @@ --- title: Proxies in Kubernetes -content_template: templates/concept +content_type: concept weight: 90 --- -{{% capture overview %}} + Auf dieser Seite werden die im Kubernetes verwendeten Proxies erläutert. -{{% /capture %}} -{{% capture body %}} + + ## Proxies @@ -61,4 +61,3 @@ Kubernetes Benutzer müssen sich in der Regel um nichts anderes als die ersten b Proxies haben die Möglichkeit der Umleitung (redirect) ersetzt. Umleitungen sind veraltet. -{{% /capture %}} \ No newline at end of file diff --git a/content/de/docs/concepts/containers/images.md b/content/de/docs/concepts/containers/images.md index 03ec9b4a0e..d142405f06 100644 --- a/content/de/docs/concepts/containers/images.md +++ b/content/de/docs/concepts/containers/images.md @@ -1,18 +1,18 @@ --- title: Images -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Sie erstellen ihr Docker Image und laden es in eine Registry hoch, bevor es in einem Kubernetes Pod referenziert werden kann. Die `image` Eigenschaft eines Containers unterstüzt die gleiche Syntax wie die des `docker` Kommandos, inklusive privater Registries und Tags. -{{% /capture %}} -{{% capture body %}} + + ## Aktualisieren von Images @@ -334,7 +334,7 @@ Es gibt eine Anzahl an Lösungen um eigene Registries zu konfigurieren, hier sin - Generieren die Registry - Zugriffsdaten für jeden Mandanten, abgelegt in einem Secret das in jedem Mandanten - Namespace vorhanden ist. - Der Mandant fügt dieses Sercret zu den imagePullSecrets in jedem seiner Namespace hinzu. -{{% /capture %}} + Falls die Zugriff auf mehrere Registries benötigen, können sie ein Secret für jede Registry erstellen, Kubelet wird jedwede `imagePullSecrets` in einer einzelnen `.docker/config.json` zusammenfassen. diff --git a/content/de/docs/concepts/example-concept-template.md b/content/de/docs/concepts/example-concept-template.md index 9f3a2bcfac..a7694cc54b 100644 --- a/content/de/docs/concepts/example-concept-template.md +++ b/content/de/docs/concepts/example-concept-template.md @@ -1,10 +1,10 @@ --- title: Konzept Dokumentations-Vorlage -content_template: templates/concept +content_type: concept toc_hide: true --- -{{% capture overview %}} + {{< note >}} Stellen Sie auch sicher [einen Eintrag im Inhaltsverzeichnis](/docs/home/contribute/write-new-topic/#creating-an-entry-in-the-table-of-contents) für Ihr neues Dokument zu erstellen. @@ -12,9 +12,9 @@ Stellen Sie auch sicher [einen Eintrag im Inhaltsverzeichnis](/docs/home/contrib Diese Seite erklärt ... -{{% /capture %}} -{{% capture body %}} + + ## Verstehen ... @@ -24,15 +24,16 @@ Kubernetes bietet ... Benutzen Sie ... -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + **[Optionaler Bereich]** * Lernen Sie mehr über [ein neues Thema schreiben](/docs/home/contribute/write-new-topic/). * Besuchen Sie [Seitenvorlagen verwenden - Konzeptvorlage](/docs/home/contribute/page-templates/#concept_template) wie Sie diese Vorlage verwenden. -{{% /capture %}} + diff --git a/content/de/docs/concepts/overview/components.md b/content/de/docs/concepts/overview/components.md index 47560e0a68..af371e3b87 100644 --- a/content/de/docs/concepts/overview/components.md +++ b/content/de/docs/concepts/overview/components.md @@ -1,17 +1,17 @@ --- title: Kubernetes Komponenten -content_template: templates/concept +content_type: concept weight: 20 card: name: concepts weight: 20 --- -{{% capture overview %}} + In diesem Dokument werden die verschiedenen binären Komponenten beschrieben, die zur Bereitstellung eines funktionsfähigen Kubernetes-Clusters erforderlich sind. -{{% /capture %}} -{{% capture body %}} + + ## Master-Komponenten Master-Komponenten stellen die Steuerungsebene des Clusters bereit. Master-Komponenten treffen globale Entscheidungen über den Cluster (z. B. Zeitplanung) und das Erkennen und Reagieren auf Clusterereignisse (Starten eines neuen Pods, wenn das `replicas`-Feld eines Replikationscontrollers nicht zufriedenstellend ist). @@ -107,6 +107,6 @@ Von Kubernetes gestartete Container schließen diesen DNS-Server automatisch in Ein [Cluster-level logging](/docs/concepts/cluster-administration/logging/) Mechanismus ist für das Speichern von Containerprotokollen in einem zentralen Protokollspeicher mit Such- / Browsing-Schnittstelle verantwortlich. -{{% /capture %}} + diff --git a/content/de/docs/concepts/overview/what-is-kubernetes.md b/content/de/docs/concepts/overview/what-is-kubernetes.md index af7cfc614a..66b79d6928 100644 --- a/content/de/docs/concepts/overview/what-is-kubernetes.md +++ b/content/de/docs/concepts/overview/what-is-kubernetes.md @@ -1,17 +1,17 @@ --- title: Was ist Kubernetes? -content_template: templates/concept +content_type: concept weight: 10 card: name: concepts weight: 10 --- -{{% capture overview %}} + Diese Seite ist eine Übersicht über Kubernetes. -{{% /capture %}} -{{% capture body %}} + + Kubernetes ist eine portable, erweiterbare Open-Source-Plattform zur Verwaltung von containerisierten Arbeitslasten und Services, die sowohl die deklarative Konfiguration als auch die Automatisierung erleichtert. @@ -160,11 +160,12 @@ Der Name **Kubernetes** stammt aus dem Griechischen, bedeutet *Steuermann* oder [cybernetic](http://www.etymonline.com/index.php?term=cybernetics). *K8s* ist eine Abkürzung, die durch Ersetzen der 8 Buchstaben "ubernete" mit "8" abgeleitet wird. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Bereit loszulegen](/docs/setup/)? * Weitere Einzelheiten finden Sie in der [Kubernetes Dokumentation](/docs/home/). -{{% /capture %}} + diff --git a/content/de/docs/contribute/_index.md b/content/de/docs/contribute/_index.md index 9853ef4e54..db7c6687ab 100644 --- a/content/de/docs/contribute/_index.md +++ b/content/de/docs/contribute/_index.md @@ -1,12 +1,12 @@ --- -content_template: templates/concept +content_type: concept title: Zur Kubernets-Dokumentation beitragen linktitle: Mitmachen main_menu: true weight: 80 --- -{{% capture overview %}} + Wenn Sie an der Dokumentation oder der Website von Kubernetes mitwirken möchten, freuen wir uns über Ihre Hilfe! Jeder kann seinen Beitrag leisten, unabhängig davon ob Sie neu im Projekt sind oder schon lange dabei sind, und ob Sie sich als @@ -15,7 +15,7 @@ Entwickler, Endbenutzer oder einfach jemanden, der es einfach nicht aushält, Ti Weitere Möglichkeiten, sich in der Kubernetes-Community zu engagieren oder mehr über uns zu erfahren, finden Sie auf der [Kubernetes-Community-Seite](/community/). Informationen zum Handbuch zur Dokumentation von Kubernetes finden Sie im [Gestaltungshandbuch](/docs/contribute/style/style-guide/). -{{% capture body %}} + ## Arten von Mitwirkenden @@ -59,4 +59,4 @@ Dies ist keine vollständige Liste von Möglichkeiten, wie Sie zur Kubernetes-Do - Verbesserungsvorschläge für Dokumentprüfungen vorschlagen - Vorschläge für Verbesserungen der Kubernetes-Website oder anderer Tools -{{% /capture %}} + diff --git a/content/de/docs/contribute/localization.md b/content/de/docs/contribute/localization.md index 79084cc0f2..031eeb8755 100644 --- a/content/de/docs/contribute/localization.md +++ b/content/de/docs/contribute/localization.md @@ -1,6 +1,6 @@ --- title: Lokalisierung der Kubernetes Dokumentation -content_template: templates/concept +content_type: concept weight: 50 card: name: mitarbeiten @@ -8,13 +8,13 @@ card: title: Übersetzen der Dokumentation --- -{{% capture overview %}} + Diese Seite zeigt dir wie die Dokumentation für verschiedene Sprachen [lokalisiert](https://blog.mozilla.org/l10n/2011/12/14/i18n-vs-l10n-whats-the-diff/) wird. -{{% /capture %}} -{{% capture body %}} + + ## Erste Schritte @@ -277,13 +277,14 @@ SIG Docs begrüßt Upstream Beiträge, also auf das englische Original, und Korr Du kannst auch dazu beitragen, Inhalte zu einer bestehenden Lokalisierung hinzuzufügen oder zu verbessern. Trete dem [Slack-Kanal](https://kubernetes.slack.com/messages/C1J0BPD2M/) für die Lokalisierung bei und beginne mit der Eröffnung von PRs, um zu helfen. Bitte beschränke deine Pull-Anfragen auf eine einzige Lokalisierung, da Pull-Anfragen, die Inhalte in mehreren Lokalisierungen ändern, schwer zu überprüfen sein könnten. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Sobald eine Lokalisierung die Anforderungen an den Arbeitsablauf und die Mindestausgabe erfüllt, wird SIG docs: - Die Sprachauswahl auf der Website aktivieren - Die Verfügbarkeit der Lokalisierung über die Kanäle der [Cloud Native Computing Foundation](https://www.cncf.io/about/) (CNCF), einschließlich des [Kubernetes Blogs](https://kubernetes.io/blog/) veröffentlichen. -{{% /capture %}} + diff --git a/content/de/docs/home/supported-doc-versions.md b/content/de/docs/home/supported-doc-versions.md index 8463d1bcd9..c1064b9730 100644 --- a/content/de/docs/home/supported-doc-versions.md +++ b/content/de/docs/home/supported-doc-versions.md @@ -1,20 +1,20 @@ --- title: Unterstützte Versionen der Kubernetes-Dokumentation -content_template: templates/concept +content_type: concept card: name: about weight: 10 title: Unterstützte Versionen der Dokumentation --- -{{% capture overview %}} + Diese Website enthält Dokumentation für die aktuelle Version von Kubernetes und die vier vorherigen Versionen von Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Aktuelle Version @@ -25,6 +25,6 @@ Die aktuelle Version ist {{< versions-other >}} -{{% /capture %}} + diff --git a/content/de/docs/reference/_index.md b/content/de/docs/reference/_index.md index b57b2740cd..7ffa9c04c9 100644 --- a/content/de/docs/reference/_index.md +++ b/content/de/docs/reference/_index.md @@ -5,16 +5,16 @@ approvers: linkTitle: "Referenzen" main_menu: true weight: 70 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Dieser Abschnitt der Kubernetes-Dokumentation enthält Referenzinformationen. -{{% /capture %}} -{{% capture body %}} + + ## API-Referenz @@ -58,4 +58,4 @@ Offiziell unterstützte Clientbibliotheken: Ein Archiv der Designdokumente für Kubernetes-Funktionalität. Gute Ansatzpunkte sind [Kubernetes Architektur](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md) und [Kubernetes Design Übersicht](https://git.k8s.io/community/contributors/design-proposals). -{{% /capture %}} + diff --git a/content/de/docs/reference/kubectl/cheatsheet.md b/content/de/docs/reference/kubectl/cheatsheet.md index c68fc183b5..15b8d0bda3 100644 --- a/content/de/docs/reference/kubectl/cheatsheet.md +++ b/content/de/docs/reference/kubectl/cheatsheet.md @@ -1,20 +1,20 @@ --- title: kubectl Spickzettel -content_template: templates/concept +content_type: concept card: name: reference weight: 30 --- -{{% capture overview %}} + Siehe auch: [Kubectl Überblick](/docs/reference/kubectl/overview/) und [JsonPath Dokumentation](/docs/reference/kubectl/jsonpath). Diese Seite ist eine Übersicht über den Befehl `kubectl`. -{{% /capture %}} -{{% capture body %}} + + # kubectl - Spickzettel @@ -335,9 +335,10 @@ Ausführlichkeit | Beschreibung `--v=8` | HTTP-Anforderungsinhalt anzeigen `--v=9` | HTTP-Anforderungsinhalt anzeigen, ohne den Inhalt zu kürzen. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Lernen Sie mehr im [Überblick auf kubectl](/docs/reference/kubectl/overview/). @@ -347,4 +348,4 @@ Ausführlichkeit | Beschreibung * Entdecken Sie mehr Community [kubectl Spickzettel](https://github.com/dennyzhang/cheatsheet-kubernetes-A4). -{{% /capture %}} + diff --git a/content/de/docs/reference/tools.md b/content/de/docs/reference/tools.md index 3daeda6efe..42bba64616 100644 --- a/content/de/docs/reference/tools.md +++ b/content/de/docs/reference/tools.md @@ -1,13 +1,13 @@ --- title: Tools -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Kubernetes enthält mehrere integrierte Tools, die Ihnen bei der Arbeit mit dem Kubernetes System helfen. -{{% /capture %}} -{{% capture body %}} + + ## Kubectl [`kubectl`](/docs/tasks/tools/install-kubectl/) ist ein Kommandozeilenprogramm für Kubernetes. Es steuert den Kubernetes Clustermanager. @@ -49,4 +49,4 @@ Verwenden Sie Kompose um: * Ein Docker Compose Datei in Kubernetes Objekte zu übersetzen * Von Ihrer lokalen Docker Entwicklung auf eine Kubernetes verwaltete Entwicklung zu wechseln * v1 oder v2 Docker Compose `yaml` Dateien oder [Distributed Application Bundles](https://docs.docker.com/compose/bundles/) zu konvertieren -{{% /capture %}} + diff --git a/content/de/docs/setup/_index.md b/content/de/docs/setup/_index.md index 3ee12cbb7b..d7f074efb3 100644 --- a/content/de/docs/setup/_index.md +++ b/content/de/docs/setup/_index.md @@ -2,10 +2,10 @@ title: Setup main_menu: true weight: 30 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Diese Sektion umfasst verschiedene Optionen zum Einrichten und Betrieb von Kubernetes. @@ -15,9 +15,9 @@ Sie können einen Kubernetes-Cluster auf einer lokalen Maschine, Cloud, On-Prem Noch einfacher können Sie einen Kubernetes-Cluster in einer Lern- und Produktionsumgebung erstellen. -{{% /capture %}} -{{% capture body %}} + + ## Lernumgebung @@ -99,4 +99,4 @@ Die folgende Tabelle für Produktionsumgebungs-Lösungen listet Anbieter und der | [VMware](https://cloud.vmware.com/) | [VMware Cloud PKS](https://cloud.vmware.com/vmware-cloud-pks) |[VMware Enterprise PKS](https://cloud.vmware.com/vmware-enterprise-pks) | [VMware Enterprise PKS](https://cloud.vmware.com/vmware-enterprise-pks) | [VMware Essential PKS](https://cloud.vmware.com/vmware-essential-pks) | |[VMware Essential PKS](https://cloud.vmware.com/vmware-essential-pks) | [Z.A.R.V.I.S.](https://zarvis.ai/) | ✔ | | | | | | -{{% /capture %}} + diff --git a/content/de/docs/setup/minikube.md b/content/de/docs/setup/minikube.md index 06734bd28f..d4c0b9462e 100644 --- a/content/de/docs/setup/minikube.md +++ b/content/de/docs/setup/minikube.md @@ -1,15 +1,15 @@ --- title: Kubernetes lokal über Minikube betreiben -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Minikube ist ein Tool, mit dem Kubernetes lokal einfach ausgeführt werden kann. Minikube führt einen Kubernetes-Cluster mit einem einzigen Node in einer VM auf Ihrem Laptop aus, damit Anwender Kubernetes ausprobieren oder täglich damit entwickeln können. -{{% /capture %}} -{{% capture body %}} + + ## Minikube-Funktionen @@ -439,4 +439,4 @@ Weitere Informationen zu Minikube finden Sie im [Vorschlag](https://git.k8s.io/c Beiträge, Fragen und Kommentare werden begrüßt und ermutigt! Minikube-Entwickler finden Sie in [Slack](https://kubernetes.slack.com) im #minikube Kanal (Erhalten Sie [hier](http://slack.kubernetes.io/) eine Einladung). Wir haben ausserdem die [kubernetes-dev Google Groups-Mailingliste](https://groups.google.com/forum/#!forum/kubernetes-dev). Wenn Sie in der Liste posten, fügen Sie Ihrem Betreff bitte "minikube:" voran. -{{% /capture %}} + diff --git a/content/de/docs/setup/release/building-from-source.md b/content/de/docs/setup/release/building-from-source.md index 55d324574f..76879df995 100644 --- a/content/de/docs/setup/release/building-from-source.md +++ b/content/de/docs/setup/release/building-from-source.md @@ -1,19 +1,19 @@ --- title: Release erstellen -content_template: templates/concept +content_type: concept card: name: download weight: 20 title: Release erstellen --- -{{% capture overview %}} + Sie können entweder eine Version aus dem Quellcode erstellen oder eine bereits kompilierte Version herunterladen. Wenn Sie nicht vorhaben, Kubernetes selbst zu entwickeln, empfehlen wir die Verwendung eines vorkompilierten Builds der aktuellen Version, die Sie in den [Versionshinweisen](/docs/setup/release/notes/) finden. Der Kubernetes-Quellcode kann aus dem [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) repo der heruntergeladen werden. -{{% /capture %}} -{{% capture body %}} + + ## Aus dem Quellcode kompilieren @@ -29,4 +29,4 @@ make release Mehr Informationen zum Release-Prozess finden Sie im kubernetes/kubernetes [`build`](http://releases.k8s.io/{{< param "githubbranch" >}}/build/) Verzeichnis. -{{% /capture %}} + diff --git a/content/de/docs/tasks/_index.md b/content/de/docs/tasks/_index.md index 1589a013e0..bfa2a73dba 100644 --- a/content/de/docs/tasks/_index.md +++ b/content/de/docs/tasks/_index.md @@ -2,19 +2,19 @@ title: Aufgaben main_menu: true weight: 50 -content_template: templates/concept +content_type: concept --- {{< toc >}} -{{% capture overview %}} + Dieser Abschnitt der Kubernetes-Dokumentation enthält Seiten, die zeigen, wie man einzelne Aufgaben erledigt. Eine Aufgabenseite zeigt, wie man eine einzelne Aufgabe ausführt, typischerweise durch eine kurze Abfolge von Schritten. -{{% /capture %}} -{{% capture body %}} + + ## Webbenutzeroberfläche (Dashboard) @@ -76,10 +76,11 @@ Konfigurieren und planen Sie NVIDIA-GPUs für die Verwendung durch Nodes in eine Konfigurieren und verwalten Sie `HugePages` als planbare Ressource in einem Cluster. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Wenn Sie eine Aufgabenseite schreiben möchten, finden Sie weitere Informationen unter [Erstellen einer Pull-Anfrage für Dokumentation](/docs/home/contribute/create-pull-request/). -{{% /capture %}} + diff --git a/content/de/docs/tasks/run-application/horizontal-pod-autoscale.md b/content/de/docs/tasks/run-application/horizontal-pod-autoscale.md index 03cbb787bb..cd120285f1 100644 --- a/content/de/docs/tasks/run-application/horizontal-pod-autoscale.md +++ b/content/de/docs/tasks/run-application/horizontal-pod-autoscale.md @@ -5,11 +5,11 @@ feature: description: > Skaliere deine Anwendung mit einem einfachen Befehl, über die Benutzeroberfläche oder automatisch, basierend auf der CPU-Auslastung. -content_template: templates/concept +content_type: concept weight: 90 --- -{{% capture overview %}} + Der Horizontal Pod Autoscaler skaliert automatisch die Anzahl der Pods eines Replication Controller, Deployment oder Replikat Set basierend auf der beobachteten CPU-Auslastung (oder, mit Unterstützung von [benutzerdefinierter Metriken](https://git.k8s.io/community/contributors/design-proposals/instrumentation/custom-metrics-api.md), von der Anwendung bereitgestellten Metriken). Beachte, dass die horizontale Pod Autoskalierung nicht für Objekte gilt, die nicht skaliert werden können, z. B. DaemonSets. @@ -17,9 +17,9 @@ Der Horizontal Pod Autoscaler ist als Kubernetes API-Ressource und einem Control Die Ressource bestimmt das Verhalten des Controllers. Der Controller passt die Anzahl der Replikate eines Replication Controller oder Deployments regelmäßig an, um die beobachtete durchschnittliche CPU-Auslastung an das vom Benutzer angegebene Ziel anzupassen. -{{% /capture %}} -{{% capture body %}} + + ## Wie funktioniert der Horizontal Pod Autoscaler? @@ -161,12 +161,13 @@ Standardmäßig ruft der HorizontalPodAutoscaler Controller Metriken aus einer R * Das Flag `--horizontal-pod-autoscaler-use-rest-clients` ist auf `true` oder ungesetzt. Wird dies auf `false` gesetzt wird die Heapster basierte Autoskalierung aktiviert, welche veraltet ist. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Design Dokument [Horizontal Pod Autoscaling](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md). * kubectl autoscale Befehl: [kubectl autoscale](/docs/reference/generated/kubectl/kubectl-commands/#autoscale). * Verwenden des [Horizontal Pod Autoscaler](/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). -{{% /capture %}} + diff --git a/content/de/docs/tasks/tools/install-kubectl.md b/content/de/docs/tasks/tools/install-kubectl.md index dd9c68c2ca..d7fb7aa759 100644 --- a/content/de/docs/tasks/tools/install-kubectl.md +++ b/content/de/docs/tasks/tools/install-kubectl.md @@ -1,6 +1,6 @@ --- title: Installieren und konfigurieren von kubectl -content_template: templates/task +content_type: task weight: 10 card: name: tasks @@ -8,17 +8,18 @@ card: title: Kubectl installieren --- -{{% capture overview %}} + Verwenden Sie das Kubernetes Befehlszeilenprogramm, [kubectl](/docs/user-guide/kubectl/), um Anwendungen auf Kubernetes bereitzustellen und zu verwalten. Mit kubectl können Sie Clusterressourcen überprüfen, Komponenten erstellen, löschen und aktualisieren; Ihren neuen Cluster betrachten; und Beispielanwendungen aufrufen. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Sie müssen eine kubectl-Version verwenden, die innerhalb eines geringfügigen Versionsunterschieds zur Version Ihres Clusters liegt. Ein v1.2-Client sollte beispielsweise mit einem v1.1, v1.2 und v1.3-Master arbeiten. Die Verwendung der neuesten Version von kubectl verhindert unvorhergesehene Probleme. -{{% /capture %}} -{{% capture steps %}} + + ## Kubectl installieren @@ -421,9 +422,10 @@ compinit {{% /tab %}} {{< /tabs >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [Erfahren Sie, wie Sie Ihre Anwendung starten und verfügbar machen.](/docs/tasks/access-application-cluster/service-access-application-cluster/) -{{% /capture %}} + diff --git a/content/de/docs/tasks/tools/install-minikube.md b/content/de/docs/tasks/tools/install-minikube.md index c3d08bac30..318fcf25aa 100644 --- a/content/de/docs/tasks/tools/install-minikube.md +++ b/content/de/docs/tasks/tools/install-minikube.md @@ -1,28 +1,29 @@ --- title: Installation von Minikube -content_template: templates/task +content_type: task weight: 20 card: name: tasks weight: 10 --- -{{% capture overview %}} + Diese Seite zeigt Ihnen, wie Sie [Minikube](/docs/tutorials/hello-minikube) installieren, ein Programm, das einen Kubernetes-Cluster mit einem einzigen Node in einer virtuellen Maschine auf Ihrem Laptop ausführt. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Die VT-x- oder AMD-v-Virtualisierung muss im BIOS Ihres Computers aktiviert sein. Um dies unter Linux zu überprüfen, führen Sie Folgendes aus und vergewissern Sie sich, dass die Ausgabe nicht leer ist: ```shell egrep --color 'vmx|svm' /proc/cpuinfo ``` -{{% /capture %}} -{{% capture steps %}} + + ## Einen Hypervisor installieren @@ -106,13 +107,6 @@ Schließen Sie nach der Installation von Minikube die aktuelle CLI-Sitzung und s So installieren Sie Minikube manuell unter Windows mit [Windows Installer](https://docs.microsoft.com/en-us/windows/desktop/msi/windows-installer-portal), laden Sie die Datei [`minikube-installer.exe`](https://github.com/kubernetes/minikube/releases/latest) und führen Sie den Installer aus. -{{% /capture %}} - -{{% capture whatsnext %}} - -* [Kubernetes lokal über Minikube ausführen](/docs/setup/minikube/) - -{{% /capture %}} ## Eine bestehende Installation bereinigen @@ -130,3 +124,8 @@ Müssen Sie die Konfigurationsdateien löschen: ```shell rm -rf ~/.minikube ``` + +## {{% heading "whatsnext" %}} + + +* [Kubernetes lokal über Minikube ausführen](/docs/setup/minikube/) diff --git a/content/de/docs/tutorials/_index.md b/content/de/docs/tutorials/_index.md index 4cb042c124..1dcbbd8a63 100644 --- a/content/de/docs/tutorials/_index.md +++ b/content/de/docs/tutorials/_index.md @@ -2,19 +2,19 @@ title: Tutorials main_menu: true weight: 60 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Dieser Abschnitt der Kubernetes-Dokumentation enthält Tutorials. Ein Tutorial zeigt, wie Sie ein Ziel erreichen, das größer ist als eine einzelne [Aufgabe](/docs/tasks/). Ein Tutorial besteht normalerweise aus mehreren Abschnitten, die jeweils eine Abfolge von Schritten haben. Bevor Sie die einzelnen Lernprogramme durchgehen, möchten Sie möglicherweise ein Lesezeichen zur Seite mit dem [Standardisierten Glossar](/docs/reference/glossary/) setzen um später Informationen nachzuschlagen. -{{% /capture %}} -{{% capture body %}} + + ## Grundlagen @@ -64,12 +64,13 @@ Bevor Sie die einzelnen Lernprogramme durchgehen, möchten Sie möglicherweise e * [Source IP verwenden](/docs/tutorials/services/source-ip/) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Wenn Sie ein Tutorial schreiben möchten, lesen Sie [Seitenvorlagen verwenden](/docs/home/contribute/page-templates/) für weitere Informationen zum Typ der Tutorial-Seite und zur Tutorial-Vorlage. -{{% /capture %}} + diff --git a/content/de/docs/tutorials/hello-minikube.md b/content/de/docs/tutorials/hello-minikube.md index 4d0bc7f0f6..a1bf6dd493 100644 --- a/content/de/docs/tutorials/hello-minikube.md +++ b/content/de/docs/tutorials/hello-minikube.md @@ -1,6 +1,6 @@ --- title: Hallo Minikube -content_template: templates/tutorial +content_type: tutorial weight: 5 menu: main: @@ -13,7 +13,7 @@ card: weight: 10 --- -{{% capture overview %}} + Dieses Tutorial zeigt Ihnen, wie Sie eine einfache "Hallo Welt" Node.js-Anwendung auf Kubernetes mit [Minikube](/docs/getting-started-guides/minikube) und Katacoda ausführen. Katacoda bietet eine kostenlose Kubernetes-Umgebung im Browser. @@ -22,17 +22,19 @@ Katacoda bietet eine kostenlose Kubernetes-Umgebung im Browser. Sie können dieses Tutorial auch verwenden, wenn Sie [Minikube lokal](/docs/tasks/tools/install-minikube/) installiert haben. {{< /note >}} -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Stellen Sie eine Hallo-Welt-Anwendung für Minikube bereit. * Führen Sie die App aus. * Betrachten Sie die Log Dateien. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Dieses Lernprogramm enthält ein aus den folgenden Dateien erstelltes Container-Image: @@ -42,9 +44,9 @@ Dieses Lernprogramm enthält ein aus den folgenden Dateien erstelltes Container- Weitere Informationen zum `docker build` Befehl, lesen Sie die [Docker Dokumentation](https://docs.docker.com/engine/reference/commandline/build/). -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Erstellen Sie einen Minikube-Cluster @@ -260,12 +262,13 @@ Löschen Sie optional die Minikube-VM: minikube delete ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Lernen Sie mehr über [Bereitstellungsobjekte](/docs/concepts/workloads/controllers/deployment/). * Lernen Sie mehr über [Anwendungen bereitstellen](/docs/user-guide/deploying-applications/). * Lernen Sie mehr über [Serviceobjekte](/docs/concepts/services-networking/service/). -{{% /capture %}} + diff --git a/content/en/_index.html b/content/en/_index.html index f54851f05d..97e02aa259 100644 --- a/content/en/_index.html +++ b/content/en/_index.html @@ -3,9 +3,6 @@ title: "Production-Grade Container Orchestration" abstract: "Automated container deployment, scaling, and management" cid: home --- -{{< announcement >}} - -{{< deprecationwarning >}} {{< blocks/section id="oceanNodes" >}} {{% blocks/feature image="flower" %}} @@ -60,4 +57,4 @@ Kubernetes is open source giving you the freedom to take advantage of on-premise {{< blocks/kubernetes-features >}} -{{< blocks/case-studies >}} +{{< blocks/case-studies >}} \ No newline at end of file diff --git a/content/en/blog/_posts/2017-01-00-Running-Mongodb-On-Kubernetes-With-Statefulsets.md b/content/en/blog/_posts/2017-01-00-Running-Mongodb-On-Kubernetes-With-Statefulsets.md index f07860212a..6682d54df1 100644 --- a/content/en/blog/_posts/2017-01-00-Running-Mongodb-On-Kubernetes-With-Statefulsets.md +++ b/content/en/blog/_posts/2017-01-00-Running-Mongodb-On-Kubernetes-With-Statefulsets.md @@ -4,70 +4,49 @@ date: 2017-01-30 slug: running-mongodb-on-kubernetes-with-statefulsets url: /blog/2017/01/Running-Mongodb-On-Kubernetes-With-Statefulsets --- -_Editor's note: Today’s post is by Sandeep Dinesh, Developer Advocate, Google Cloud Platform, showing how to run a database in a container._ +_Editor's note: Today’s post is by Sandeep Dinesh, Developer Advocate, Google Cloud Platform, showing how to run a database in a container._ +{{% warning %}} +This post is several years old. The code examples need changes to work on a current Kubernetes cluster. +{{% /warning %}} -Conventional wisdom says you can’t run a database in a container. “Containers are stateless!” they say, and “databases are pointless without state!” +Conventional wisdom says you can’t run a database in a container. “Containers are stateless!” they say, and “databases are pointless without state!” Of course, this is not true at all. At Google, everything runs in a container, including databases. You just need the right tools. [Kubernetes 1.5](https://kubernetes.io/blog/2016/12/kubernetes-1-5-supporting-production-workloads/) includes the new [StatefulSet](/docs/concepts/abstractions/controllers/statefulsets/) API object (in previous versions, StatefulSet was known as PetSet). With StatefulSets, Kubernetes makes it much easier to run stateful workloads such as databases. -If you’ve followed my previous posts, you know how to create a [MEAN Stack app with Docker](http://blog.sandeepdinesh.com/2015/07/running-mean-web-application-in-docker.html), then [migrate it to Kubernetes](https://medium.com/google-cloud/running-a-mean-stack-on-google-cloud-platform-with-kubernetes-149ca81c2b5d) to provide easier management and reliability, and [create a MongoDB replica set](https://medium.com/google-cloud/mongodb-replica-sets-with-kubernetes-d96606bd9474) to provide redundancy and high availability. +If you’ve followed my previous posts, you know how to create a [MEAN Stack app with Docker](http://blog.sandeepdinesh.com/2015/07/running-mean-web-application-in-docker.html), then [migrate it to Kubernetes](https://medium.com/google-cloud/running-a-mean-stack-on-google-cloud-platform-with-kubernetes-149ca81c2b5d) to provide easier management and reliability, and [create a MongoDB replica set](https://medium.com/google-cloud/mongodb-replica-sets-with-kubernetes-d96606bd9474) to provide redundancy and high availability. -While the replica set in my previous blog post worked, there were some annoying steps that you needed to follow. You had to manually create a disk, a ReplicationController, and a service for each replica. Scaling the set up and down meant managing all of these resources manually, which is an opportunity for error, and would put your stateful application at risk In the previous example, we created a Makefile to ease the management of these resources, but it would have been great if Kubernetes could just take care of all of this for us. - -With StatefulSets, these headaches finally go away. You can create and manage your MongoDB replica set natively in Kubernetes, without the need for scripts and Makefiles. Let’s take a look how. - -_Note: StatefulSets are currently a beta resource. The [sidecar container](https://github.com/cvallance/mongo-k8s-sidecar) used for auto-configuration is also unsupported._ +While the replica set in my previous blog post worked, there were some annoying steps that you needed to follow. You had to manually create a disk, a ReplicationController, and a service for each replica. Scaling the set up and down meant managing all of these resources manually, which is an opportunity for error, and would put your stateful application at risk In the previous example, we created a Makefile to ease the management of these resources, but it would have been great if Kubernetes could just take care of all of this for us. +With StatefulSets, these headaches finally go away. You can create and manage your MongoDB replica set natively in Kubernetes, without the need for scripts and Makefiles. Let’s take a look how. +_Note: StatefulSets are currently a beta resource. The [sidecar container](https://github.com/cvallance/mongo-k8s-sidecar) used for auto-configuration is also unsupported._ **Prerequisites and Setup** - - Before we get started, you’ll need a Kubernetes 1.5+ and the [Kubernetes command line tool](/docs/user-guide/prereqs/). If you want to follow along with this tutorial and use Google Cloud Platform, you also need the [Google Cloud SDK](http://cloud.google.com/sdk). - - Once you have a [Google Cloud project created](https://console.cloud.google.com/projectcreate) and have your Google Cloud SDK setup (hint: gcloud init), we can create our cluster. - - -To create a Kubernetes 1.5 cluster, run the following command: - +To create a Kubernetes 1.5 cluster, run the following command: ``` gcloud container clusters create "test-cluster" ``` - - -This will make a three node Kubernetes cluster. Feel free to [customize the command](https://cloud.google.com/sdk/gcloud/reference/container/clusters/create) as you see fit. +This will make a three node Kubernetes cluster. Feel free to [customize the command](https://cloud.google.com/sdk/gcloud/reference/container/clusters/create) as you see fit. Then, authenticate into the cluster: - - ``` gcloud container clusters get-credentials test-cluster ``` - - - - - - **Setting up the MongoDB replica set** - - To set up the MongoDB replica set, you need three things: A [StorageClass](/docs/user-guide/persistent-volumes/#storageclasses), a [Headless Service](/docs/user-guide/services/#headless-services), and a [StatefulSet](/docs/concepts/abstractions/controllers/statefulsets/). - - -I’ve created the configuration files for these already, and you can clone the example from GitHub: - +I’ve created the configuration files for these already, and you can clone the example from GitHub: ``` git clone https://github.com/thesandlord/mongo-k8s-sidecar.git @@ -75,10 +54,7 @@ git clone https://github.com/thesandlord/mongo-k8s-sidecar.git cd /mongo-k8s-sidecar/example/StatefulSet/ ``` - - -To create the MongoDB replica set, run these two commands: - +To create the MongoDB replica set, run these two commands: ``` kubectl apply -f googlecloud\_ssd.yaml @@ -86,341 +62,202 @@ kubectl apply -f googlecloud\_ssd.yaml kubectl apply -f mongo-statefulset.yaml ``` - - That's it! With these two commands, you have launched all the components required to run an highly available and redundant MongoDB replica set. - - At an high level, it looks something like this: - - ![](https://lh4.googleusercontent.com/ohALxLD4Ugj5FCwWqgqZ4xP9al4lTgrPDc9HsgPWYRZRz_buuYK6LKSC7A5n98DdOO-Po3Zq77Yt43-QhTWdIaXqltHI7PX0zMXAXbpiilYgdowGZapG0lJ9lgubwBj1CwNHHtXA) - - Let’s examine each piece in more detail. - - **StorageClass** - - The storage class tells Kubernetes what kind of storage to use for the database nodes. You can set up many different types of StorageClasses in a ton of different environments. For example, if you run Kubernetes in your own datacenter, you can use [GlusterFS](https://www.gluster.org/). On GCP, your [storage choices](https://cloud.google.com/compute/docs/disks/) are SSDs and hard disks. There are currently drivers for [AWS](/docs/user-guide/persistent-volumes/#aws), [Azure](/docs/user-guide/persistent-volumes/#azure-disk), [Google Cloud](/docs/user-guide/persistent-volumes/#gce), [GlusterFS](/docs/user-guide/persistent-volumes/#glusterfs), [OpenStack Cinder](/docs/user-guide/persistent-volumes/#openstack-cinder), [vSphere](/docs/user-guide/persistent-volumes/#vsphere), [Ceph RBD](/docs/user-guide/persistent-volumes/#ceph-rbd), and [Quobyte](/docs/user-guide/persistent-volumes/#quobyte). +The configuration for the StorageClass looks like this: - -The configuration for the StorageClass looks like this: - - -``` -kind: StorageClass -apiVersion: storage.k8s.io/v1beta1 -metadata: - name: fast -provisioner: kubernetes.io/gce-pd -parameters: +```yaml +kind: StorageClass +apiVersion: storage.k8s.io/v1beta1 +metadata: + name: fast +provisioner: kubernetes.io/gce-pd +parameters: type: pd-ssd ``` - - This configuration creates a new StorageClass called “fast” that is backed by SSD volumes. The StatefulSet can now request a volume, and the StorageClass will automatically create it! - - -Deploy this StorageClass: - +Deploy this StorageClass: ``` kubectl apply -f googlecloud\_ssd.yaml ``` - - **Headless Service** - - Now you have created the Storage Class, you need to make a Headless Service. These are just like normal Kubernetes Services, except they don’t do any load balancing for you. When combined with StatefulSets, they can give you unique DNS addresses that let you directly access the pods! This is perfect for creating MongoDB replica sets, because our app needs to connect to all of the MongoDB nodes individually. +The configuration for the Headless Service looks like this: - -The configuration for the Headless Service looks like this: - - -``` +```yaml apiVersion: v1 - kind: Service - metadata: - - name: mongo - - labels: - - name: mongo - + name: mongo + labels: + name: mongo spec: - - ports: - - - port: 27017 - - targetPort: 27017 - - clusterIP: None - - selector: - - role: mongo + ports: + - port: 27017 + targetPort: 27017 + clusterIP: None + selector: + role: mongo ``` - - You can tell this is a Headless Service because the clusterIP is set to “None.” Other than that, it looks exactly the same as any normal Kubernetes Service. - - **StatefulSet** - - The pièce de résistance. The StatefulSet actually runs MongoDB and orchestrates everything together. StatefulSets differ from Kubernetes [ReplicaSets](/docs/user-guide/replicasets/) (not to be confused with MongoDB replica sets!) in certain ways that makes them more suited for stateful applications. Unlike Kubernetes ReplicaSets, pods created under a StatefulSet have a few unique attributes. The name of the pod is not random, instead each pod gets an ordinal name. Combined with the Headless Service, this allows pods to have stable identification. In addition, pods are created one at a time instead of all at once, which can help when bootstrapping a stateful system. You can read more about StatefulSets in the [documentation](/docs/concepts/abstractions/controllers/statefulsets/). - - Just like before, [this “sidecar” container](https://github.com/cvallance/mongo-k8s-sidecar) will configure the MongoDB replica set automatically. A “sidecar” is a helper container which helps the main container do its work. +The configuration for the StatefulSet looks like this: - -The configuration for the StatefulSet looks like this: - - -``` +```yaml apiVersion: apps/v1beta1 - kind: StatefulSet - metadata: - - name: mongo - + name: mongo spec: - - serviceName: "mongo" - - replicas: 3 - - template: - - metadata: - - labels: - - role: mongo - - environment: test - - spec: - - terminationGracePeriodSeconds: 10 - - containers: - - - name: mongo - - image: mongo - - command: - - - mongod - - - "--replSet" - - - rs0 - - - "--smallfiles" - - - "--noprealloc" - - ports: - - - containerPort: 27017 - - volumeMounts: - - - name: mongo-persistent-storage - - mountPath: /data/db - - - name: mongo-sidecar - - image: cvallance/mongo-k8s-sidecar - - env: - - - name: MONGO\_SIDECAR\_POD\_LABELS - - value: "role=mongo,environment=test" - - volumeClaimTemplates: - - - metadata: - - name: mongo-persistent-storage - - annotations: - - volume.beta.kubernetes.io/storage-class: "fast" - - spec: - - accessModes: ["ReadWriteOnce"] - - resources: - - requests: - - storage: 100Gi + selector: + matchLabels: + role: mongo + environment: test + serviceName: "mongo" + replicas: 3 + template: + metadata: + labels: + role: mongo + environment: test + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: mongo + image: mongo + command: + - mongod + - "--replSet" + - rs0 + - "--smallfiles" + - "--noprealloc" + ports: + - containerPort: 27017 + volumeMounts: + - name: mongo-persistent-storage + mountPath: /data/db + - name: mongo-sidecar + image: cvallance/mongo-k8s-sidecar + env: + - name: MONGO_SIDECAR_POD_LABELS + value: "role=mongo,environment=test" + volumeClaimTemplates: + - metadata: + name: mongo-persistent-storage + spec: + storageClassName: "fast" + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 100Gi ``` - - It’s a little long, but fairly straightforward. - - The first second describes the StatefulSet object. Then, we move into the Metadata section, where you can specify labels and the number of replicas. +Next comes the pod spec. The terminationGracePeriodSeconds is used to gracefully shutdown the pod when you scale down the number of replicas, which is important for databases! Then the configurations for the two containers is shown. The first one runs MongoDB with command line flags that configure the replica set name. It also mounts the persistent storage volume to /data/db, the location where MongoDB saves its data. The second container runs the sidecar. - -Next comes the pod spec. The terminationGracePeriodSeconds is used to gracefully shutdown the pod when you scale down the number of replicas, which is important for databases! Then the configurations for the two containers is shown. The first one runs MongoDB with command line flags that configure the replica set name. It also mounts the persistent storage volume to /data/db, the location where MongoDB saves its data. The second container runs the sidecar. - - - -Finally, there is the volumeClaimTemplates. This is what talks to the StorageClass we created before to provision the volume. It will provision a 100 GB disk for each MongoDB replica. - - +Finally, there is the volumeClaimTemplates. This is what talks to the StorageClass we created before to provision the volume. It will provision a 100 GB disk for each MongoDB replica. **Using the MongoDB replica set** - - -At this point, you should have three pods created in your cluster. These correspond to the three nodes in your MongoDB replica set. You can see them with this command: - +At this point, you should have three pods created in your cluster. These correspond to the three nodes in your MongoDB replica set. You can see them with this command: ``` kubectl get pods NAME READY STATUS RESTARTS AGE - mongo-0 2/2 Running 0 3m - mongo-1 2/2 Running 0 3m - mongo-2 2/2 Running 0 3m ``` +Each pod in a StatefulSet backed by a Headless Service will have a stable DNS name. The template follows this format: \.\ - -Each pod in a StatefulSet backed by a Headless Service will have a stable DNS name. The template follows this format: \.\ - -This means the DNS names for the MongoDB replica set are: - - +This means the DNS names for the MongoDB replica set are: ``` mongo-0.mongo - mongo-1.mongo - mongo-2.mongo ``` +You can use these names directly in the [connection string URI](http://docs.mongodb.com/manual/reference/connection-string) of your app. - -You can use these names directly in the [connection string URI](http://docs.mongodb.com/manual/reference/connection-string) of your app. - -In this case, the connection string URI would be: - +In this case, the connection string URI would be: ``` -“mongodb://mongo-0.mongo,mongo-1.mongo,mongo-2.mongo:27017/dbname\_?” +mongodb://mongo-0.mongo,mongo-1.mongo,mongo-2.mongo:27017/dbname\_? ``` +That’s it! -That’s it! - -**Scaling the MongoDB replica set** - -A huge advantage of StatefulSets is that you can scale them just like Kubernetes ReplicaSets. If you want 5 MongoDB Nodes instead of 3, just run the scale command: - +**Scaling the MongoDB replica set** +A huge advantage of StatefulSets is that you can scale them just like Kubernetes ReplicaSets. If you want 5 MongoDB Nodes instead of 3, just run the scale command: ``` kubectl scale --replicas=5 statefulset mongo ``` +The sidecar container will automatically configure the new MongoDB nodes to join the replica set. -The sidecar container will automatically configure the new MongoDB nodes to join the replica set. +Include the two new nodes (mongo-3.mongo & mongo-4.mongo) in your connection string URI and you are good to go. Too easy! -Include the two new nodes (mongo-3.mongo & mongo-4.mongo) in your connection string URI and you are good to go. Too easy! +**Cleaning Up** -**Cleaning Up** - -To clean up the deployed resources, delete the StatefulSet, Headless Service, and the provisioned volumes. - -Delete the StatefulSet: +To clean up the deployed resources, delete the StatefulSet, Headless Service, and the provisioned volumes. +Delete the StatefulSet: ``` kubectl delete statefulset mongo ``` - - -Delete the Service: - +Delete the Service: ``` kubectl delete svc mongo ``` - - -Delete the Volumes: - - - +Delete the Volumes: ``` kubectl delete pvc -l role=mongo ``` - - - -Finally, you can delete the test cluster: - - - +Finally, you can delete the test cluster: ``` gcloud container clusters delete "test-cluster" ``` - - Happy Hacking! - - For more cool Kubernetes and Container blog posts, follow me on [Twitter](https://twitter.com/sandeepdinesh) and [Medium](https://medium.com/@SandeepDinesh). - - _--Sandeep Dinesh, Developer Advocate, Google Cloud Platform._ diff --git a/content/en/blog/_posts/2020-06-05-Supporting-the-Evolving-Ingress-Specification-in-Kubernetes-1.18.md b/content/en/blog/_posts/2020-06-05-Supporting-the-Evolving-Ingress-Specification-in-Kubernetes-1.18.md new file mode 100644 index 0000000000..10a3e55f4d --- /dev/null +++ b/content/en/blog/_posts/2020-06-05-Supporting-the-Evolving-Ingress-Specification-in-Kubernetes-1.18.md @@ -0,0 +1,56 @@ +--- +layout: blog +title: Supporting the Evolving Ingress Specification in Kubernetes 1.18 +date: 2020-06-05 +slug: Supporting-the-Evolving-Ingress-Specification-in-Kubernetes-1.18 +--- + +**Authors:** Alex Gervais (Datawire.io) + +Earlier this year, the Kubernetes team released [Kubernetes 1.18](https://kubernetes.io/blog/2020/03/25/kubernetes-1-18-release-announcement/), which extended Ingress. In this blog post, we’ll walk through what’s new in the new Ingress specification, what it means for your applications, and how to upgrade to an ingress controller that supports this new specification. + +### What is Kubernetes Ingress +When deploying your applications in Kubernetes, one of the first challenges many people encounter is how to get traffic into their cluster. [Kubernetes ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) is a collection of routing rules that govern how external users access services running in a Kubernetes cluster. There are [three general approaches](https://blog.getambassador.io/kubernetes-ingress-nodeport-load-balancers-and-ingress-controllers-6e29f1c44f2d) for exposing your application: + +* Using a `NodePort` to expose your application on a port across each of your nodes +* Using a `LoadBalancer` service to create an external load balancer that points to a Kubernetes service in your cluster +* Using a Kubernetes Ingress resource + +### What’s new in Kubernetes 1.18 Ingress +There are three significant additions to the Ingress API in Kubernetes 1.18: + +* A new `pathType` field +* A new `IngressClass` resource +* Support for wildcards in hostnames + +The new `pathType` field allows you to specify how Ingress paths should match. +The field supports three types: `ImplementationSpecific` (default), `exact`, and `prefix`. Explicitly defining the expected behavior of path matching will allow every ingress-controller to support a user’s needs and will increase portability between ingress-controller implementation solutions. + +The `IngressClass` resource specifies how Ingresses should be implemented by controllers. This was added to formalize the commonly used but never standardized `kubernetes.io/ingress.class` annotation and allow for implementation-specific extensions and configuration. + +You can read more about these changes, as well as the support for wildcards in hostnames in more detail in [a previous blog post](https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/). + +## Supporting Kubernetes ingress +[Ambassador](https://www.getambassador.io) is an open-source Envoy-based ingress controller. We believe strongly in supporting common standards such as Kubernetes ingress, which we adopted and [announced our initial support for back in 2019](https://blog.getambassador.io/ambassador-ingress-controller-better-config-reporting-updated-envoy-proxy-99dc9139e28f). + +Every Ambassador release goes through rigorous testing. Therefore, we also contributed an [open conformance test suite](https://github.com/kubernetes-sigs/ingress-controller-conformance), supporting Kubernetes ingress. We wrote the initial bits of test code and will keep iterating over the newly added features and different versions of the Ingress specification as it evolves to a stable v1 GA release. Documentation and usage samples, is one of our top priorities. We understand how complex usage can be, especially when transitioning from a previous version of an API. + +Following a test-driven development approach, the first step we took in supporting Ingress improvements in Ambassador was to translate the revised specification -- both in terms of API and behavior -- into a comprehensible test suite. The test suite, although still under heavy development and going through multiple iterations, was rapidly added to the Ambassador CI infrastructure and acceptance criteria. This means every change to the Ambassador codebase going forward will be compliant with the Ingress API and be tested end-to-end in a lightweight [KIND cluster](https://kind.sigs.k8s.io/). Using KIND allowed us to make rapid improvements while limiting our cloud provider infrastructure bill and testing out unreleased Kubernetes features with pre-release builds. + +### Adopting a new specification +With a global comprehension of additions to Ingress introduced in Kubernetes 1.18 and a test suite on hand, we tackled the task of adapting the Ambassador code so that it would support translating the high-level Ingress API resources into Envoy configurations and constructs. Luckily Ambassador already supported previous versions of ingress functionalities so the development effort was incremental. + +We settled on a controller name of `getambassador.io/ingress-controller`. This value, consistent with Ambassador's domain and CRD versions, must be used to tie in an IngressClass `spec.controller` with an Ambassador deployment. The new IngressClass resource allows for extensibility by setting a `spec.parameters` field. At the moment Ambassador makes no use of this field and its usage is reserved for future development. + +Paths can now define different matching behaviors using the `pathType` field. The field will default to a value of `ImplementationSpecific`, which uses the same matching rules as the [Ambassador Mappings](https://www.getambassador.io/docs/latest/topics/using/mappings/) prefix field and previous Ingress specification for backward compatibility reasons. + +### Kubernetes Ingress Controllers +A comprehensive [list of Kubernetes ingress controllers](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) is available in the Kubernetes documentation. Currently, Ambassador is the only ingress controller that supports these new additions to the ingress specification. Powered by the [Envoy Proxy](https://www.envoyproxy.io), Ambassador is the fastest way for you to try out the new ingress specification today. + +Check out the following resources: + +* Ambassador on [GitHub](https://www.github.com/datawire/ambassador) +* The Ambassador [documentation](https://www.getambassador.io/docs) +* [Improvements to the Ingress API](https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/) + +Or join the community on [Slack](http://d6e.co/slack)! diff --git a/content/en/docs/concepts/_index.md b/content/en/docs/concepts/_index.md index 0cb970fd66..c0ea1a2c8d 100644 --- a/content/en/docs/concepts/_index.md +++ b/content/en/docs/concepts/_index.md @@ -1,17 +1,17 @@ --- title: Concepts main_menu: true -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + The Concepts section helps you learn about the parts of the Kubernetes system and the abstractions Kubernetes uses to represent your {{< glossary_tooltip text="cluster" term_id="cluster" length="all" >}}, and helps you obtain a deeper understanding of how Kubernetes works. -{{% /capture %}} -{{% capture body %}} + + ## Overview @@ -60,12 +60,13 @@ The Kubernetes master is responsible for maintaining the desired state for your The nodes in a cluster are the machines (VMs, physical servers, etc) that run your applications and cloud workflows. The Kubernetes master controls each node; you'll rarely interact with nodes directly. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + If you would like to write a concept page, see -[Using Page Templates](/docs/home/contribute/page-templates/) -for information about the concept page type and the concept template. +[Page Content Types](/docs/home/contribute/style/page-content-types/#concept) +for information about the concept page types. + -{{% /capture %}} diff --git a/content/en/docs/concepts/architecture/cloud-controller.md b/content/en/docs/concepts/architecture/cloud-controller.md index 31c0ad9d54..9a731b684a 100644 --- a/content/en/docs/concepts/architecture/cloud-controller.md +++ b/content/en/docs/concepts/architecture/cloud-controller.md @@ -1,10 +1,10 @@ --- title: Cloud Controller Manager -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + {{< feature-state state="beta" for_k8s_version="v1.11" >}} @@ -17,9 +17,9 @@ components. The cloud-controller-manager is structured using a plugin mechanism that allows different cloud providers to integrate their platforms with Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Design @@ -200,8 +200,9 @@ rules: - update ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [Cloud Controller Manager Administration](/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager) has instructions on running and managing the cloud controller manager. @@ -212,4 +213,3 @@ The cloud controller manager uses Go interfaces to allow implementations from an The implementation of the shared controllers highlighted in this document (Node, Route, and Service), and some scaffolding along with the shared cloudprovider interface, is part of the Kubernetes core. Implementations specific to cloud providers are outside the core of Kubernetes and implement the `CloudProvider` interface. For more information about developing plugins, see [Developing Cloud Controller Manager](/docs/tasks/administer-cluster/developing-cloud-controller-manager/). -{{% /capture %}} \ No newline at end of file diff --git a/content/en/docs/concepts/architecture/control-plane-node-communication.md b/content/en/docs/concepts/architecture/control-plane-node-communication.md index 5e85302c38..925f14d17a 100644 --- a/content/en/docs/concepts/architecture/control-plane-node-communication.md +++ b/content/en/docs/concepts/architecture/control-plane-node-communication.md @@ -3,22 +3,22 @@ reviewers: - dchen1107 - liggitt title: Control Plane-Node Communication -content_template: templates/concept +content_type: concept weight: 20 aliases: - master-node-communication --- -{{% capture overview %}} + This document catalogs the communication paths between the control plane (really the apiserver) and the Kubernetes cluster. The intent is to allow users to customize their installation to harden the network configuration such that the cluster can be run on an untrusted network (or on fully public IPs on a cloud provider). -{{% /capture %}} -{{% capture body %}} + + ## Node to Control Plane -All communication paths from the nodes to the control plane terminate at the apiserver (none of the other master components are designed to expose remote services). In a typical deployment, the apiserver is configured to listen for remote connections on a secure HTTPS port (443) with one or more forms of client [authentication](/docs/reference/access-authn-authz/authentication/) enabled. +Kubernetes has a "hub-and-spoke" API pattern. All API usage from nodes (or the pods they run) terminate at the apiserver (none of the other control plane components are designed to expose remote services). The apiserver is configured to listen for remote connections on a secure HTTPS port (typically 443) with one or more forms of client [authentication](/docs/reference/access-authn-authz/authentication/) enabled. One or more forms of [authorization](/docs/reference/access-authn-authz/authorization/) should be enabled, especially if [anonymous requests](/docs/reference/access-authn-authz/authentication/#anonymous-requests) or [service account tokens](/docs/reference/access-authn-authz/authentication/#service-account-tokens) are allowed. Nodes should be provisioned with the public root certificate for the cluster such that they can connect securely to the apiserver along with valid client credentials. For example, on a default GKE deployment, the client credentials provided to the kubelet are in the form of a client certificate. See [kubelet TLS bootstrapping](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/) for automated provisioning of kubelet client certificates. @@ -31,9 +31,11 @@ The control plane components also communicate with the cluster apiserver over th As a result, the default operating mode for connections from the nodes and pods running on the nodes to the control plane is secured by default and can run over untrusted and/or public networks. ## Control Plane to node + There are two primary communication paths from the control plane (apiserver) to the nodes. The first is from the apiserver to the kubelet process which runs on each node in the cluster. The second is from the apiserver to any node, pod, or service through the apiserver's proxy functionality. ### apiserver to kubelet + The connections from the apiserver to the kubelet are used for: * Fetching logs for pods. @@ -61,9 +63,10 @@ This tunnel ensures that the traffic is not exposed outside of the network in wh SSH tunnels are currently deprecated so you shouldn't opt to use them unless you know what you are doing. The Konnectivity service is a replacement for this communication channel. ### Konnectivity service + {{< feature-state for_k8s_version="v1.18" state="beta" >}} -As a replacement to the SSH tunnels, the Konnectivity service provides TCP level proxy for the control plane to Cluster communication. The Konnectivity consists of two parts, the Konnectivity server and the Konnectivity agents, running in the control plane network and the nodes network respectively. The Konnectivity agents initiate connections to the Konnectivity server and maintain the connections. -All control plane to nodes traffic then goes through these connections. +As a replacement to the SSH tunnels, the Konnectivity service provides TCP level proxy for the control plane to cluster communication. The Konnectivity service consists of two parts: the Konnectivity server and the Konnectivity agents, running in the control plane network and the nodes network respectively. The Konnectivity agents initiate connections to the Konnectivity server and maintain the network connections. +After enabling the Konnectivity service, all control plane to nodes traffic goes through these connections. -See [Konnectivity Service Setup](/docs/tasks/setup-konnectivity/) on how to set it up in your cluster. +Follow the [Konnectivity service task](/docs/tasks/extend-kubernetes/setup-konnectivity/) to set up the Konnectivity service in your cluster. diff --git a/content/en/docs/concepts/architecture/controller.md b/content/en/docs/concepts/architecture/controller.md index 2872959bac..547a624a94 100644 --- a/content/en/docs/concepts/architecture/controller.md +++ b/content/en/docs/concepts/architecture/controller.md @@ -1,10 +1,10 @@ --- title: Controllers -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + In robotics and automation, a _control loop_ is a non-terminating loop that regulates the state of a system. @@ -18,10 +18,10 @@ closer to the desired state, by turning equipment on or off. {{< glossary_definition term_id="controller" length="short">}} -{{% /capture %}} -{{% capture body %}} + + ## Controller pattern @@ -150,11 +150,12 @@ You can run your own controller as a set of Pods, or externally to Kubernetes. What fits best will depend on what that particular controller does. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Read about the [Kubernetes control plane](/docs/concepts/#kubernetes-control-plane) * Discover some of the basic [Kubernetes objects](/docs/concepts/#kubernetes-objects) * Learn more about the [Kubernetes API](/docs/concepts/overview/kubernetes-api/) * If you want to write your own controller, see [Extension Patterns](/docs/concepts/extend-kubernetes/extend-cluster/#extension-patterns) in Extending Kubernetes. -{{% /capture %}} + diff --git a/content/en/docs/concepts/architecture/nodes.md b/content/en/docs/concepts/architecture/nodes.md index 32274f5a3b..516e4eb6d9 100644 --- a/content/en/docs/concepts/architecture/nodes.md +++ b/content/en/docs/concepts/architecture/nodes.md @@ -3,11 +3,11 @@ reviewers: - caesarxuchao - dchen1107 title: Nodes -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Kubernetes runs your workload by placing containers into Pods to run on _Nodes_. A node may be a virtual or physical machine, depending on the cluster. Each node @@ -23,9 +23,9 @@ The [components](/docs/concepts/overview/components/#node-components) on a node {{< glossary_tooltip text="container runtime" term_id="container-runtime" >}}, and the {{< glossary_tooltip text="kube-proxy" term_id="kube-proxy" >}}. -{{% /capture %}} -{{% capture body %}} + + ## Management @@ -332,12 +332,13 @@ the kubelet can use topology hints when making resource assignment decisions. See [Control Topology Management Policies on a Node](/docs/tasks/administer-cluster/topology-manager/) for more information. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn about the [components](/docs/concepts/overview/components/#node-components) that make up a node. * Read the [API definition for Node](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#node-v1-core). * Read the [Node](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md#the-kubernetes-node) section of the architecture design document. * Read about [taints and tolerations](/docs/concepts/configuration/taint-and-toleration/). * Read about [cluster autoscaling](/docs/tasks/administer-cluster/cluster-management/#cluster-autoscaling). -{{% /capture %}} + diff --git a/content/en/docs/concepts/cluster-administration/addons.md b/content/en/docs/concepts/cluster-administration/addons.md index 0347327f13..5b5110ec92 100644 --- a/content/en/docs/concepts/cluster-administration/addons.md +++ b/content/en/docs/concepts/cluster-administration/addons.md @@ -1,9 +1,9 @@ --- title: Installing Addons -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Add-ons extend the functionality of Kubernetes. @@ -12,10 +12,10 @@ This page lists some of the available add-ons and links to their respective inst Add-ons in each section are sorted alphabetically - the ordering does not imply any preferential status. -{{% /capture %}} -{{% capture body %}} + + ## Networking and Network Policy @@ -55,4 +55,4 @@ There are several other add-ons documented in the deprecated [cluster/addons](ht Well-maintained ones should be linked to here. PRs welcome! -{{% /capture %}} + diff --git a/content/en/docs/concepts/cluster-administration/certificates.md b/content/en/docs/concepts/cluster-administration/certificates.md index 052e7b9aa5..8cc45252ec 100644 --- a/content/en/docs/concepts/cluster-administration/certificates.md +++ b/content/en/docs/concepts/cluster-administration/certificates.md @@ -1,19 +1,19 @@ --- title: Certificates -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + When using client certificate authentication, you can generate certificates manually through `easyrsa`, `openssl` or `cfssl`. -{{% /capture %}} -{{% capture body %}} + + ### easyrsa @@ -249,4 +249,4 @@ You can use the `certificates.k8s.io` API to provision x509 certificates to use for authentication as documented [here](/docs/tasks/tls/managing-tls-in-a-cluster). -{{% /capture %}} + diff --git a/content/en/docs/concepts/cluster-administration/cloud-providers.md b/content/en/docs/concepts/cluster-administration/cloud-providers.md index 7d2f2a0b66..4f49e7bc42 100644 --- a/content/en/docs/concepts/cluster-administration/cloud-providers.md +++ b/content/en/docs/concepts/cluster-administration/cloud-providers.md @@ -1,16 +1,16 @@ --- title: Cloud Providers -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + This page explains how to manage Kubernetes running on a specific cloud provider. -{{% /capture %}} -{{% capture body %}} + + ### kubeadm [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) is a popular option for creating kubernetes clusters. kubeadm has configuration options to specify configuration information for cloud providers. For example a typical @@ -363,7 +363,7 @@ Kubernetes network plugin and should appear in the `[Route]` section of the [kubenet]: /docs/concepts/cluster-administration/network-plugins/#kubenet -{{% /capture %}} + ## OVirt diff --git a/content/en/docs/concepts/cluster-administration/cluster-administration-overview.md b/content/en/docs/concepts/cluster-administration/cluster-administration-overview.md index 5ba0bb30d8..fc2f55fbcd 100644 --- a/content/en/docs/concepts/cluster-administration/cluster-administration-overview.md +++ b/content/en/docs/concepts/cluster-administration/cluster-administration-overview.md @@ -3,16 +3,16 @@ reviewers: - davidopp - lavalamp title: Cluster Administration Overview -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + The cluster administration overview is for anyone creating or administering a Kubernetes cluster. It assumes some familiarity with core Kubernetes [concepts](/docs/concepts/). -{{% /capture %}} -{{% capture body %}} + + ## Planning a cluster See the guides in [Setup](/docs/setup/) for examples of how to plan, set up, and configure Kubernetes clusters. The solutions listed in this article are called *distros*. @@ -68,6 +68,6 @@ Note: Not all distros are actively maintained. Choose distros which have been te * [Logging and Monitoring Cluster Activity](/docs/concepts/cluster-administration/logging/) explains how logging in Kubernetes works and how to implement it. -{{% /capture %}} + diff --git a/content/en/docs/concepts/cluster-administration/flow-control.md b/content/en/docs/concepts/cluster-administration/flow-control.md index f9ccdca8e9..26fc1194df 100644 --- a/content/en/docs/concepts/cluster-administration/flow-control.md +++ b/content/en/docs/concepts/cluster-administration/flow-control.md @@ -1,10 +1,10 @@ --- title: API Priority and Fairness -content_template: templates/concept +content_type: concept min-kubernetes-server-version: v1.18 --- -{{% capture overview %}} + {{< feature-state state="alpha" for_k8s_version="v1.18" >}} @@ -33,9 +33,9 @@ the `--max-requests-inflight` flag without the API Priority and Fairness feature enabled. {{< /caution >}} -{{% /capture %}} -{{% capture body %}} + + ## Enabling API Priority and Fairness @@ -136,7 +136,7 @@ classes: controllers. * The `workload-low` priority level is for requests from any other service - account, which will typically include all requests from controllers runing in + account, which will typically include all requests from controllers running in Pods. * The `global-default` priority level handles all other traffic, e.g. @@ -366,13 +366,13 @@ poorly-behaved workloads that may be harming system health. request and the PriorityLevel to which it was assigned. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + For background information on design details for API priority and fairness, see the [enhancement proposal](https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/20190228-priority-and-fairness.md). You can make suggestions and feature requests via [SIG API Machinery](https://github.com/kubernetes/community/tree/master/sig-api-machinery). -{{% /capture %}} \ No newline at end of file diff --git a/content/en/docs/concepts/cluster-administration/kubelet-garbage-collection.md b/content/en/docs/concepts/cluster-administration/kubelet-garbage-collection.md index eb41a01cfe..1590561cc9 100644 --- a/content/en/docs/concepts/cluster-administration/kubelet-garbage-collection.md +++ b/content/en/docs/concepts/cluster-administration/kubelet-garbage-collection.md @@ -1,20 +1,20 @@ --- reviewers: title: Configuring kubelet Garbage Collection -content_template: templates/concept +content_type: concept weight: 70 --- -{{% capture overview %}} + Garbage collection is a helpful function of kubelet that will clean up unused images and unused containers. Kubelet will perform garbage collection for containers every minute and garbage collection for images every five minutes. External garbage collection tools are not recommended as these tools can potentially break the behavior of kubelet by removing containers expected to exist. -{{% /capture %}} -{{% capture body %}} + + ## Image Collection @@ -77,10 +77,11 @@ Including: | `--low-diskspace-threshold-mb` | `--eviction-hard` or `eviction-soft` | eviction generalizes disk thresholds to other resources | | `--outofdisk-transition-frequency` | `--eviction-pressure-transition-period` | eviction generalizes disk pressure transition to other resources | -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + See [Configuring Out Of Resource Handling](/docs/tasks/administer-cluster/out-of-resource/) for more details. -{{% /capture %}} + diff --git a/content/en/docs/concepts/cluster-administration/logging.md b/content/en/docs/concepts/cluster-administration/logging.md index e464a2869e..399f8f16cc 100644 --- a/content/en/docs/concepts/cluster-administration/logging.md +++ b/content/en/docs/concepts/cluster-administration/logging.md @@ -3,20 +3,20 @@ reviewers: - piosz - x13n title: Logging Architecture -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + Application and systems logs can help you understand what is happening inside your cluster. The logs are particularly useful for debugging problems and monitoring cluster activity. Most modern applications have some kind of logging mechanism; as such, most container engines are likewise designed to support some kind of logging. The easiest and most embraced logging method for containerized applications is to write to the standard output and standard error streams. However, the native functionality provided by a container engine or runtime is usually not enough for a complete logging solution. For example, if a container crashes, a pod is evicted, or a node dies, you'll usually still want to access your application's logs. As such, logs should have a separate storage and lifecycle independent of nodes, pods, or containers. This concept is called _cluster-level-logging_. Cluster-level logging requires a separate backend to store, analyze, and query logs. Kubernetes provides no native storage solution for log data, but you can integrate many existing logging solutions into your Kubernetes cluster. -{{% /capture %}} -{{% capture body %}} + + Cluster-level logging architectures are described in assumption that a logging backend is present inside or outside of your cluster. If you're @@ -267,4 +267,4 @@ You can implement cluster-level logging by exposing or pushing logs directly fro every application; however, the implementation for such a logging mechanism is outside the scope of Kubernetes. -{{% /capture %}} + diff --git a/content/en/docs/concepts/cluster-administration/manage-deployment.md b/content/en/docs/concepts/cluster-administration/manage-deployment.md index a6dccdbf93..b052dd3a15 100644 --- a/content/en/docs/concepts/cluster-administration/manage-deployment.md +++ b/content/en/docs/concepts/cluster-administration/manage-deployment.md @@ -2,18 +2,18 @@ reviewers: - janetkuo title: Managing Resources -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + You've deployed your application and exposed it via a service. Now what? Kubernetes provides a number of tools to help you manage your application deployment, including scaling and updating. Among the features that we will discuss in more depth are [configuration files](/docs/concepts/configuration/overview/) and [labels](/docs/concepts/overview/working-with-objects/labels/). -{{% /capture %}} -{{% capture body %}} + + ## Organizing resource configurations @@ -402,7 +402,7 @@ For more information, please see [kubectl edit](/docs/reference/generated/kubect You can use `kubectl patch` to update API objects in place. This command supports JSON patch, JSON merge patch, and strategic merge patch. See -[Update API Objects in Place Using kubectl patch](/docs/tasks/run-application/update-api-object-kubectl-patch/) +[Update API Objects in Place Using kubectl patch](/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/) and [kubectl patch](/docs/reference/generated/kubectl/kubectl-commands/#patch). @@ -449,11 +449,12 @@ kubectl edit deployment/my-nginx That's it! The Deployment will declaratively update the deployed nginx application progressively behind the scene. It ensures that only a certain number of old replicas may be down while they are being updated, and only a certain number of new replicas may be created above the desired number of pods. To learn more details about it, visit [Deployment page](/docs/concepts/workloads/controllers/deployment/). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - Learn about [how to use `kubectl` for application introspection and debugging](/docs/tasks/debug-application-cluster/debug-application-introspection/). - See [Configuration Best Practices and Tips](/docs/concepts/configuration/overview/). -{{% /capture %}} + diff --git a/content/en/docs/concepts/cluster-administration/monitoring.md b/content/en/docs/concepts/cluster-administration/monitoring.md index e02ac8231c..fbea5e69c1 100644 --- a/content/en/docs/concepts/cluster-administration/monitoring.md +++ b/content/en/docs/concepts/cluster-administration/monitoring.md @@ -4,21 +4,21 @@ reviewers: - brancz - logicalhan - RainbowMango -content_template: templates/concept +content_type: concept weight: 60 aliases: - controller-metrics.md --- -{{% capture overview %}} + System component metrics can give a better look into what is happening inside them. Metrics are particularly useful for building dashboards and alerts. Metrics in Kubernetes control plane are emitted in [prometheus format](https://prometheus.io/docs/instrumenting/exposition_formats/) and are human readable. -{{% /capture %}} -{{% capture body %}} + + ## Metrics in Kubernetes @@ -124,10 +124,11 @@ cloudprovider_gce_api_request_duration_seconds { request = "detach_disk"} cloudprovider_gce_api_request_duration_seconds { request = "list_disk"} ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Read about the [Prometheus text format](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format) for metrics * See the list of [stable Kubernetes metrics](https://github.com/kubernetes/kubernetes/blob/master/test/instrumentation/testdata/stable-metrics-list.yaml) * Read about the [Kubernetes deprecation policy](https://kubernetes.io/docs/reference/using-api/deprecation-policy/#deprecating-a-feature-or-behavior ) -{{% /capture %}} + diff --git a/content/en/docs/concepts/cluster-administration/networking.md b/content/en/docs/concepts/cluster-administration/networking.md index c260963d87..29044be250 100644 --- a/content/en/docs/concepts/cluster-administration/networking.md +++ b/content/en/docs/concepts/cluster-administration/networking.md @@ -2,11 +2,11 @@ reviewers: - thockin title: Cluster Networking -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + Networking is a central part of Kubernetes, but it can be challenging to understand exactly how it is expected to work. There are 4 distinct networking problems to address: @@ -17,10 +17,10 @@ problems to address: 3. Pod-to-Service communications: this is covered by [services](/docs/concepts/services-networking/service/). 4. External-to-Service communications: this is covered by [services](/docs/concepts/services-networking/service/). -{{% /capture %}} -{{% capture body %}} + + Kubernetes is all about sharing machines between applications. Typically, sharing machines requires ensuring that two applications do not try to use the @@ -312,12 +312,13 @@ Weave Net runs as a [CNI plug-in](https://www.weave.works/docs/net/latest/cni-pl or stand-alone. In either version, it doesn't require any configuration or extra code to run, and in both cases, the network provides one IP address per pod - as is standard for Kubernetes. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + The early design of the networking model and its rationale, and some future plans are described in more detail in the [networking design document](https://git.k8s.io/community/contributors/design-proposals/network/networking.md). -{{% /capture %}} + diff --git a/content/en/docs/concepts/cluster-administration/proxies.md b/content/en/docs/concepts/cluster-administration/proxies.md index 8e03334d12..9bf204bd9f 100644 --- a/content/en/docs/concepts/cluster-administration/proxies.md +++ b/content/en/docs/concepts/cluster-administration/proxies.md @@ -1,14 +1,14 @@ --- title: Proxies in Kubernetes -content_template: templates/concept +content_type: concept weight: 90 --- -{{% capture overview %}} + This page explains proxies used with Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Proxies @@ -62,6 +62,6 @@ will typically ensure that the latter types are setup correctly. Proxies have replaced redirect capabilities. Redirects have been deprecated. -{{% /capture %}} + diff --git a/content/en/docs/concepts/configuration/configmap.md b/content/en/docs/concepts/configuration/configmap.md index 355386f3e7..3e9ddf718f 100644 --- a/content/en/docs/concepts/configuration/configmap.md +++ b/content/en/docs/concepts/configuration/configmap.md @@ -1,10 +1,10 @@ --- title: ConfigMaps -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< glossary_definition term_id="configmap" prepend="A ConfigMap is" length="all" >}} @@ -15,9 +15,9 @@ If the data you want to store are confidential, use a or use additional (third party) tools to keep your data private. {{< /caution >}} -{{% /capture %}} -{{% capture body %}} + + ## Motivation Use a ConfigMap for setting configuration data separately from application code. @@ -157,13 +157,99 @@ or {{< glossary_tooltip text="operators" term_id="operator-pattern" >}} that adjust their behavior based on a ConfigMap. {{< /note >}} +## Using ConfigMaps + +ConfigMaps can be mounted as data volumes. ConfigMaps can also be used by other +parts of the system, without being directly exposed to the Pod. For example, +ConfigMaps can hold data that other parts of the system should use for configuration. + +### Using ConfigMaps as files from a Pod + +To consume a ConfigMap in a volume in a Pod: + +1. Create a config map or use an existing one. Multiple Pods can reference the same config map. +1. Modify your Pod definition to add a volume under `.spec.volumes[]`. Name the volume anything, and have a `.spec.volumes[].configmap.localObjectReference` field set to reference your ConfigMap object. +1. Add a `.spec.containers[].volumeMounts[]` to each container that needs the config map. Specify `.spec.containers[].volumeMounts[].readOnly = true` and `.spec.containers[].volumeMounts[].mountPath` to an unused directory name where you would like the config map to appear. +1. Modify your image or command line so that the program looks for files in that directory. Each key in the config map `data` map becomes the filename under `mountPath`. + +This is an example of a Pod that mounts a ConfigMap in a volume: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: mypod +spec: + containers: + - name: mypod + image: redis + volumeMounts: + - name: foo + mountPath: "/etc/foo" + readOnly: true + volumes: + - name: foo + configMap: + name: myconfigmap +``` + +Each ConfigMap you want to use needs to be referred to in `.spec.volumes`. + +If there are multiple containers in the Pod, then each container needs its +own `volumeMounts` block, but only one `.spec.volumes` is needed per ConfigMap. + +#### Mounted ConfigMaps are updated automatically + +When a config map currently consumed in a volume is updated, projected keys are eventually updated as well. +The kubelet checks whether the mounted config map is fresh on every periodic sync. +However, the kubelet uses its local cache for getting the current value of the ConfigMap. +The type of the cache is configurable using the `ConfigMapAndSecretChangeDetectionStrategy` field in +the [KubeletConfiguration struct](https://github.com/kubernetes/kubernetes/blob/{{< param "docsbranch" >}}/staging/src/k8s.io/kubelet/config/v1beta1/types.go). +A ConfigMap can be either propagated by watch (default), ttl-based, or simply redirecting +all requests directly to the API server. +As a result, the total delay from the moment when the ConfigMap is updated to the moment +when new keys are projected to the Pod can be as long as the kubelet sync period + cache +propagation delay, where the cache propagation delay depends on the chosen cache type +(it equals to watch propagation delay, ttl of cache, or zero correspondingly). + +{{< feature-state for_k8s_version="v1.18" state="alpha" >}} + +The Kubernetes alpha feature _Immutable Secrets and ConfigMaps_ provides an option to set +individual Secrets and ConfigMaps as immutable. For clusters that extensively use ConfigMaps +(at least tens of thousands of unique ConfigMap to Pod mounts), preventing changes to their +data has the following advantages: + +- protects you from accidental (or unwanted) updates that could cause applications outages +- improves performance of your cluster by significantly reducing load on kube-apiserver, by +closing watches for config maps marked as immutable. + +To use this feature, enable the `ImmutableEmphemeralVolumes` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) and set +your Secret or ConfigMap `immutable` field to `true`. For example: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + ... +data: + ... +immutable: true +``` + +{{< note >}} +Once a ConfigMap or Secret is marked as immutable, it is _not_ possible to revert this change +nor to mutate the contents of the `data` field. You can only delete and recreate the ConfigMap. +Existing Pods maintain a mount point to the deleted ConfigMap - it is recommended to recreate +these pods. +{{< /note >}} + + +## {{% heading "whatsnext" %}} -{{% /capture %}} -{{% capture whatsnext %}} * Read about [Secrets](/docs/concepts/configuration/secret/). * Read [Configure a Pod to Use a ConfigMap](/docs/tasks/configure-pod-container/configure-pod-configmap/). * Read [The Twelve-Factor App](https://12factor.net/) to understand the motivation for separating code from configuration. -{{% /capture %}} + diff --git a/content/en/docs/concepts/configuration/manage-resources-containers.md b/content/en/docs/concepts/configuration/manage-resources-containers.md index 69ea4a255d..f8989c4a5d 100644 --- a/content/en/docs/concepts/configuration/manage-resources-containers.md +++ b/content/en/docs/concepts/configuration/manage-resources-containers.md @@ -1,6 +1,6 @@ --- title: Managing Resources for Containers -content_template: templates/concept +content_type: concept weight: 40 feature: title: Automatic bin packing @@ -8,7 +8,7 @@ feature: Automatically places containers based on their resource requirements and other constraints, while not sacrificing availability. Mix critical and best-effort workloads in order to drive up utilization and save even more resources. --- -{{% capture overview %}} + When you specify a {{< glossary_tooltip term_id="pod" >}}, you can optionally specify how much of each resource a {{< glossary_tooltip text="Container" term_id="container" >}} needs. @@ -21,10 +21,10 @@ allowed to use more of that resource than the limit you set. The kubelet also re at least the _request_ amount of that system resource specifically for that container to use. -{{% /capture %}} -{{% capture body %}} + + ## Requests and limits @@ -740,10 +740,11 @@ You can see that the Container was terminated because of `reason:OOM Killed`, wh -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Get hands-on experience [assigning Memory resources to Containers and Pods](/docs/tasks/configure-pod-container/assign-memory-resource/). @@ -758,4 +759,4 @@ You can see that the Container was terminated because of `reason:OOM Killed`, wh * Read about [project quotas](http://xfs.org/docs/xfsdocs-xml-dev/XFS_User_Guide/tmp/en-US/html/xfs-quotas.html) in XFS -{{% /capture %}} + diff --git a/content/en/docs/concepts/configuration/organize-cluster-access-kubeconfig.md b/content/en/docs/concepts/configuration/organize-cluster-access-kubeconfig.md index 480b708018..df767bbc3e 100644 --- a/content/en/docs/concepts/configuration/organize-cluster-access-kubeconfig.md +++ b/content/en/docs/concepts/configuration/organize-cluster-access-kubeconfig.md @@ -1,10 +1,10 @@ --- title: Organizing Cluster Access Using kubeconfig Files -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + Use kubeconfig files to organize information about clusters, users, namespaces, and authentication mechanisms. The `kubectl` command-line tool uses kubeconfig files to @@ -25,10 +25,10 @@ variable or by setting the For step-by-step instructions on creating and specifying kubeconfig files, see [Configure Access to Multiple Clusters](/docs/tasks/access-application-cluster/configure-access-multiple-clusters). -{{% /capture %}} -{{% capture body %}} + + ## Supporting multiple clusters, users, and authentication mechanisms @@ -143,14 +143,15 @@ File references on the command line are relative to the current working director In `$HOME/.kube/config`, relative paths are stored relatively, and absolute paths are stored absolutely. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Configure Access to Multiple Clusters](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) * [`kubectl config`](/docs/reference/generated/kubectl/kubectl-commands#config) -{{% /capture %}} + diff --git a/content/en/docs/concepts/configuration/overview.md b/content/en/docs/concepts/configuration/overview.md index b7b7b829db..fe8cd3002d 100644 --- a/content/en/docs/concepts/configuration/overview.md +++ b/content/en/docs/concepts/configuration/overview.md @@ -2,17 +2,17 @@ reviewers: - mikedanese title: Configuration Best Practices -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + This document highlights and consolidates configuration best practices that are introduced throughout the user guide, Getting Started documentation, and examples. This is a living document. If you think of something that is not on this list but might be useful to others, please don't hesitate to file an issue or submit a PR. -{{% /capture %}} -{{% capture body %}} + + ## General Configuration Tips - When defining configurations, specify the latest stable API version. @@ -105,5 +105,5 @@ The caching semantics of the underlying image provider make even `imagePullPolic - Use `kubectl run` and `kubectl expose` to quickly create single-container Deployments and Services. See [Use a Service to Access an Application in a Cluster](/docs/tasks/access-application-cluster/service-access-application-cluster/) for an example. -{{% /capture %}} + diff --git a/content/en/docs/concepts/configuration/pod-overhead.md b/content/en/docs/concepts/configuration/pod-overhead.md index 9661264820..7057383dac 100644 --- a/content/en/docs/concepts/configuration/pod-overhead.md +++ b/content/en/docs/concepts/configuration/pod-overhead.md @@ -4,11 +4,11 @@ reviewers: - egernst - tallclair title: Pod Overhead -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.18" state="beta" >}} @@ -19,10 +19,10 @@ _Pod Overhead_ is a feature for accounting for the resources consumed by the Pod on top of the container requests & limits. -{{% /capture %}} -{{% capture body %}} + + In Kubernetes, the Pod's overhead is set at [admission](/docs/reference/access-authn-authz/extensible-admission-controllers/#what-are-admission-webhooks) @@ -188,11 +188,12 @@ running with a defined Overhead. This functionality is not available in the 1.9 kube-state-metrics, but is expected in a following release. Users will need to build kube-state-metrics from source in the meantime. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [RuntimeClass](/docs/concepts/containers/runtime-class/) * [PodOverhead Design](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/20190226-pod-overhead.md) -{{% /capture %}} + diff --git a/content/en/docs/concepts/configuration/pod-priority-preemption.md b/content/en/docs/concepts/configuration/pod-priority-preemption.md index c9bddd7e3e..9bfc514257 100644 --- a/content/en/docs/concepts/configuration/pod-priority-preemption.md +++ b/content/en/docs/concepts/configuration/pod-priority-preemption.md @@ -3,11 +3,11 @@ reviewers: - davidopp - wojtek-t title: Pod Priority and Preemption -content_template: templates/concept +content_type: concept weight: 70 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.14" state="stable" >}} @@ -16,9 +16,9 @@ importance of a Pod relative to other Pods. If a Pod cannot be scheduled, the scheduler tries to preempt (evict) lower priority Pods to make scheduling of the pending Pod possible. -{{% /capture %}} -{{% capture body %}} + + {{< warning >}} @@ -407,7 +407,8 @@ usage does not exceed their requests. If a Pod with lower priority is not exceeding its requests, it won't be evicted. Another Pod with higher priority that exceeds its requests may be evicted. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Read about using ResourceQuotas in connection with PriorityClasses: [limit Priority Class consumption by default](/docs/concepts/policy/resource-quotas/#limit-priority-class-consumption-by-default) -{{% /capture %}} + diff --git a/content/en/docs/concepts/configuration/resource-bin-packing.md b/content/en/docs/concepts/configuration/resource-bin-packing.md index 0d475791ce..5d030d94e5 100644 --- a/content/en/docs/concepts/configuration/resource-bin-packing.md +++ b/content/en/docs/concepts/configuration/resource-bin-packing.md @@ -4,19 +4,19 @@ reviewers: - k82cn - ahg-g title: Resource Bin Packing for Extended Resources -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.16" state="alpha" >}} The kube-scheduler can be configured to enable bin packing of resources along with extended resources using `RequestedToCapacityRatioResourceAllocation` priority function. Priority functions can be used to fine-tune the kube-scheduler as per custom needs. -{{% /capture %}} -{{% capture body %}} + + ## Enabling Bin Packing using RequestedToCapacityRatioResourceAllocation @@ -194,4 +194,4 @@ NodeScore = (5 * 5) + (7 * 1) + (10 * 3) / (5 + 1 + 3) ``` -{{% /capture %}} + diff --git a/content/en/docs/concepts/configuration/secret.md b/content/en/docs/concepts/configuration/secret.md index c7b123cacc..5c553b5cc6 100644 --- a/content/en/docs/concepts/configuration/secret.md +++ b/content/en/docs/concepts/configuration/secret.md @@ -2,7 +2,7 @@ reviewers: - mikedanese title: Secrets -content_template: templates/concept +content_type: concept feature: title: Secret and configuration management description: > @@ -10,16 +10,16 @@ feature: weight: 30 --- -{{% capture overview %}} + Kubernetes Secrets let you store and manage sensitive information, such as passwords, OAuth tokens, and ssh keys. Storing confidential information in a Secret is safer and more flexible than putting it verbatim in a {{< glossary_tooltip term_id="pod" >}} definition or in a {{< glossary_tooltip text="container image" term_id="image" >}}. See [Secrets design document](https://git.k8s.io/community/contributors/design-proposals/auth/secrets.md) for more information. -{{% /capture %}} -{{% capture body %}} + + ## Overview of Secrets @@ -29,12 +29,13 @@ Pod specification or in an image. Users can create secrets and the system also creates some secrets. To use a secret, a Pod needs to reference the secret. -A secret can be used with a Pod in two ways: +A secret can be used with a Pod in three ways: -- As files in a +- As [files](#using-secrets-as-files-from-a-pod) in a {{< glossary_tooltip text="volume" term_id="volume" >}} mounted on one or more of its containers. -- By the kubelet when pulling images for the Pod. +- As [container environment variable](#using-secrets-as-environment-variables). +- By the [kubelet when pulling images](#using-imagepullsecrets) for the Pod. ### Built-in Secrets @@ -725,7 +726,7 @@ data has the following advantages: - improves performance of your cluster by significantly reducing load on kube-apiserver, by closing watches for secrets marked as immutable. -To use this feature, enable the `ImmutableEmphemeralVolumes` +To use this feature, enable the `ImmutableEphemeralVolumes` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) and set your Secret or ConfigMap `immutable` field to `true`. For example: ```yaml diff --git a/content/en/docs/concepts/containers/container-environment.md b/content/en/docs/concepts/containers/container-environment.md index 86b595661d..a57ac2181a 100644 --- a/content/en/docs/concepts/containers/container-environment.md +++ b/content/en/docs/concepts/containers/container-environment.md @@ -3,18 +3,18 @@ reviewers: - mikedanese - thockin title: Container Environment -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + This page describes the resources available to Containers in the Container environment. -{{% /capture %}} -{{% capture body %}} + + ## Container environment @@ -53,12 +53,13 @@ FOO_SERVICE_PORT= Services have dedicated IP addresses and are available to the Container via DNS, if [DNS addon](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/dns/) is enabled.  -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [Container lifecycle hooks](/docs/concepts/containers/container-lifecycle-hooks/). * Get hands-on experience [attaching handlers to Container lifecycle events](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). -{{% /capture %}} + diff --git a/content/en/docs/concepts/containers/container-lifecycle-hooks.md b/content/en/docs/concepts/containers/container-lifecycle-hooks.md index fe810d23c5..386e4d00bb 100644 --- a/content/en/docs/concepts/containers/container-lifecycle-hooks.md +++ b/content/en/docs/concepts/containers/container-lifecycle-hooks.md @@ -3,19 +3,19 @@ reviewers: - mikedanese - thockin title: Container Lifecycle Hooks -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + This page describes how kubelet managed Containers can use the Container lifecycle hook framework to run code triggered by events during their management lifecycle. -{{% /capture %}} -{{% capture body %}} + + ## Overview @@ -112,12 +112,13 @@ Events: 1m 22s 2 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Warning FailedPostStartHook ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about the [Container environment](/docs/concepts/containers/container-environment/). * Get hands-on experience [attaching handlers to Container lifecycle events](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). -{{% /capture %}} + diff --git a/content/en/docs/concepts/containers/images.md b/content/en/docs/concepts/containers/images.md index 3d27355e3a..fd83a5be28 100644 --- a/content/en/docs/concepts/containers/images.md +++ b/content/en/docs/concepts/containers/images.md @@ -3,20 +3,20 @@ reviewers: - erictune - thockin title: Images -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + You create your Docker image and push it to a registry before referring to it in a Kubernetes pod. The `image` property of a container supports the same syntax as the `docker` command does, including private registries and tags. -{{% /capture %}} -{{% capture body %}} + + ## Updating Images @@ -66,6 +66,7 @@ Credentials can be provided in several ways: - Using Oracle Cloud Infrastructure Registry (OCIR) - use IAM roles and policies to control access to OCIR repositories - Using Azure Container Registry (ACR) + - use IAM roles and policies to control access to ACR repositories - Using IBM Cloud Container Registry - use IAM roles and policies to grant access to IBM Cloud Container Registry - Configuring Nodes to Authenticate to a Private Registry @@ -130,9 +131,13 @@ Troubleshooting: - `aws_credentials.go:116] Got ECR credentials from ECR API for .dkr.ecr..amazonaws.com` ### Using Azure Container Registry (ACR) -When using [Azure Container Registry](https://azure.microsoft.com/en-us/services/container-registry/) -you can authenticate using either an admin user or a service principal. -In either case, authentication is done via standard Docker authentication. These instructions assume the +Kubernetes has native support for the [Azure Container +Registry (ACR)](https://azure.microsoft.com/en-us/services/container-registry/), when running on Azure Kubernetes Service (AKS). + +The AKS cluster service principal must have `AcrPull` permission in the ACR instance. See [Authenticate with Azure Container Registry from Azure Kubernetes Service](https://docs.microsoft.com/en-us/azure/aks/cluster-container-registry-integration) for configuration instructions. Then, simply use the full ACR image name (e.g. `my_registry.azurecr.io/image:tag`). + +You may also authenticate using either an ACR admin user or a service principal. +In this case, authentication is done via standard Docker authentication. The following instructions assume the [azure-cli](https://github.com/azure/azure-cli) command line tool. You first need to create a registry and generate credentials, complete documentation for this can be found in @@ -370,4 +375,4 @@ common use cases and suggested solutions. If you need access to multiple registries, you can create one secret for each registry. Kubelet will merge any `imagePullSecrets` into a single virtual `.docker/config.json` -{{% /capture %}} + diff --git a/content/en/docs/concepts/containers/overview.md b/content/en/docs/concepts/containers/overview.md index 49162710d7..1d996b8b93 100644 --- a/content/en/docs/concepts/containers/overview.md +++ b/content/en/docs/concepts/containers/overview.md @@ -3,11 +3,11 @@ reviewers: - erictune - thockin title: Containers overview -content_template: templates/concept +content_type: concept weight: 1 --- -{{% capture overview %}} + Containers are a technology for packaging the (compiled) code for an application along with the dependencies it needs at run time. Each @@ -18,10 +18,10 @@ run it. Containers decouple applications from underlying host infrastructure. This makes deployment easier in different cloud or OS environments. -{{% /capture %}} -{{% capture body %}} + + ## Container images A [container image](/docs/concepts/containers/images/) is a ready-to-run @@ -38,8 +38,9 @@ the change, then recreate the container to start from the updated image. {{< glossary_definition term_id="container-runtime" length="all" >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Read about [container images](/docs/concepts/containers/images/) * Read about [Pods](/docs/concepts/workloads/pods/) -{{% /capture %}} + diff --git a/content/en/docs/concepts/containers/runtime-class.md b/content/en/docs/concepts/containers/runtime-class.md index dca6f2d0a8..d1857f3807 100644 --- a/content/en/docs/concepts/containers/runtime-class.md +++ b/content/en/docs/concepts/containers/runtime-class.md @@ -3,11 +3,11 @@ reviewers: - tallclair - dchen1107 title: Runtime Class -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.14" state="beta" >}} @@ -16,10 +16,10 @@ This page describes the RuntimeClass resource and runtime selection mechanism. RuntimeClass is a feature for selecting the container runtime configuration. The container runtime configuration is used to run a Pod's containers. -{{% /capture %}} -{{% capture body %}} + + ## Motivation @@ -180,12 +180,13 @@ Pod overhead is defined in RuntimeClass through the `overhead` fields. Through t you can specify the overhead of running pods utilizing this RuntimeClass and ensure these overheads are accounted for in Kubernetes. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - [RuntimeClass Design](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/runtime-class.md) - [RuntimeClass Scheduling Design](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/runtime-class-scheduling.md) - Read about the [Pod Overhead](/docs/concepts/configuration/pod-overhead/) concept - [PodOverhead Feature Design](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/20190226-pod-overhead.md) -{{% /capture %}} + diff --git a/content/en/docs/concepts/example-concept-template.md b/content/en/docs/concepts/example-concept-template.md index 26ce263ef4..adf3741f90 100644 --- a/content/en/docs/concepts/example-concept-template.md +++ b/content/en/docs/concepts/example-concept-template.md @@ -2,11 +2,11 @@ title: Example Concept Template reviewers: - chenopis -content_template: templates/concept +content_type: concept toc_hide: true --- -{{% capture overview %}} + {{< note >}} Be sure to also [create an entry in the table of contents](/docs/home/contribute/write-new-topic/#creating-an-entry-in-the-table-of-contents) for your new document. @@ -14,9 +14,9 @@ Be sure to also [create an entry in the table of contents](/docs/home/contribute This page explains ... -{{% /capture %}} -{{% capture body %}} + + ## Understanding ... @@ -26,15 +26,16 @@ Kubernetes provides ... To use ... -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + **[Optional Section]** -* Learn more about [Writing a New Topic](/docs/home/contribute/write-new-topic/). -* See [Using Page Templates - Concept template](/docs/home/contribute/page-templates/#concept_template) for how to use this template. - -{{% /capture %}} +* Learn more about [Writing a New Topic](/docs/home/contribute/style/write-new-topic/). +* See [Page Content Types - Concept](/docs/home/contribute/style/page-concept-types/#concept). + + diff --git a/content/en/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md b/content/en/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md index 8bc6e22861..1f47323301 100644 --- a/content/en/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md +++ b/content/en/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md @@ -4,20 +4,20 @@ reviewers: - lavalamp - cheftako - chenopis -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + The aggregation layer allows Kubernetes to be extended with additional APIs, beyond what is offered by the core Kubernetes APIs. The additional APIs can either be ready-made solutions such as [service-catalog](/docs/concepts/extend-kubernetes/service-catalog/), or APIs that you develop yourself. The aggregation layer is different from [Custom Resources](/docs/concepts/extend-kubernetes/api-extension/custom-resources/), which are a way to make the {{< glossary_tooltip term_id="kube-apiserver" text="kube-apiserver" >}} recognise new kinds of object. -{{% /capture %}} -{{% capture body %}} + + ## Aggregation layer @@ -34,13 +34,14 @@ If your extension API server cannot achieve that latency requirement, consider m `EnableAggregatedDiscoveryTimeout=false` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) on the kube-apiserver to disable the timeout restriction. This deprecated feature gate will be removed in a future release. -{{% /capture %}} -{{% capture whatsnext %}} -* To get the aggregator working in your environment, [configure the aggregation layer](/docs/tasks/access-kubernetes-api/configure-aggregation-layer/). -* Then, [setup an extension api-server](/docs/tasks/access-kubernetes-api/setup-extension-api-server/) to work with the aggregation layer. -* Also, learn how to [extend the Kubernetes API using Custom Resource Definitions](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/). +## {{% heading "whatsnext" %}} + + +* To get the aggregator working in your environment, [configure the aggregation layer](/docs/tasks/extend-kubernetes/configure-aggregation-layer/). +* Then, [setup an extension api-server](/docs/tasks/extend-kubernetes/setup-extension-api-server/) to work with the aggregation layer. +* Also, learn how to [extend the Kubernetes API using Custom Resource Definitions](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/). * Read the specification for [APIService](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#apiservice-v1-apiregistration-k8s-io) -{{% /capture %}} + diff --git a/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md b/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md index 976449d0cb..f2ca2e2435 100644 --- a/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md +++ b/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md @@ -3,19 +3,19 @@ title: Custom Resources reviewers: - enisoc - deads2k -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + *Custom resources* are extensions of the Kubernetes API. This page discusses when to add a custom resource to your Kubernetes cluster and when to use a standalone service. It describes the two methods for adding custom resources and how to choose between them. -{{% /capture %}} -{{% capture body %}} + + ## Custom resources A *resource* is an endpoint in the [Kubernetes API](/docs/reference/using-api/api-overview/) that stores a collection of @@ -128,7 +128,7 @@ Regardless of how they are installed, the new resources are referred to as Custo ## CustomResourceDefinitions -The [CustomResourceDefinition](/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/) +The [CustomResourceDefinition](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) API resource allows you to define custom resources. Defining a CRD object creates a new custom resource with a name and schema that you specify. The Kubernetes API serves and handles the storage of your custom resource. @@ -178,17 +178,17 @@ Aggregated APIs offer more advanced API features and customization of other feat | Feature | Description | CRDs | Aggregated API | | ------- | ----------- | ---- | -------------- | -| Validation | Help users prevent errors and allow you to evolve your API independently of your clients. These features are most useful when there are many clients who can't all update at the same time. | Yes. Most validation can be specified in the CRD using [OpenAPI v3.0 validation](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#validation). Any other validations supported by addition of a [Validating Webhook](/docs/reference/access-authn-authz/admission-controllers/#validatingadmissionwebhook-alpha-in-1-8-beta-in-1-9). | Yes, arbitrary validation checks | -| Defaulting | See above | Yes, either via [OpenAPI v3.0 validation](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#defaulting) `default` keyword (GA in 1.17), or via a [Mutating Webhook](/docs/reference/access-authn-authz/admission-controllers/#mutatingadmissionwebhook) (though this will not be run when reading from etcd for old objects). | Yes | -| Multi-versioning | Allows serving the same object through two API versions. Can help ease API changes like renaming fields. Less important if you control your client versions. | [Yes](/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning) | Yes | +| Validation | Help users prevent errors and allow you to evolve your API independently of your clients. These features are most useful when there are many clients who can't all update at the same time. | Yes. Most validation can be specified in the CRD using [OpenAPI v3.0 validation](/docs/tasks/extend-kubernetes/extend-api-custom-resource-definitions/#validation). Any other validations supported by addition of a [Validating Webhook](/docs/reference/access-authn-authz/admission-controllers/#validatingadmissionwebhook-alpha-in-1-8-beta-in-1-9). | Yes, arbitrary validation checks | +| Defaulting | See above | Yes, either via [OpenAPI v3.0 validation](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#defaulting) `default` keyword (GA in 1.17), or via a [Mutating Webhook](/docs/reference/access-authn-authz/admission-controllers/#mutatingadmissionwebhook) (though this will not be run when reading from etcd for old objects). | Yes | +| Multi-versioning | Allows serving the same object through two API versions. Can help ease API changes like renaming fields. Less important if you control your client versions. | [Yes](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning) | Yes | | Custom Storage | If you need storage with a different performance mode (for example, a time-series database instead of key-value store) or isolation for security (for example, encryption of sensitive information, etc.) | No | Yes | | Custom Business Logic | Perform arbitrary checks or actions when creating, reading, updating or deleting an object | Yes, using [Webhooks](/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks). | Yes | -| Scale Subresource | Allows systems like HorizontalPodAutoscaler and PodDisruptionBudget interact with your new resource | [Yes](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#scale-subresource) | Yes | -| Status Subresource | Allows fine-grained access control where user writes the spec section and the controller writes the status section. Allows incrementing object Generation on custom resource data mutation (requires separate spec and status sections in the resource) | [Yes](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#status-subresource) | Yes | +| Scale Subresource | Allows systems like HorizontalPodAutoscaler and PodDisruptionBudget interact with your new resource | [Yes](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#scale-subresource) | Yes | +| Status Subresource | Allows fine-grained access control where user writes the spec section and the controller writes the status section. Allows incrementing object Generation on custom resource data mutation (requires separate spec and status sections in the resource) | [Yes](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#status-subresource) | Yes | | Other Subresources | Add operations other than CRUD, such as "logs" or "exec". | No | Yes | -| strategic-merge-patch | The new endpoints support PATCH with `Content-Type: application/strategic-merge-patch+json`. Useful for updating objects that may be modified both locally, and by the server. For more information, see ["Update API Objects in Place Using kubectl patch"](/docs/tasks/run-application/update-api-object-kubectl-patch/) | No | Yes | +| strategic-merge-patch | The new endpoints support PATCH with `Content-Type: application/strategic-merge-patch+json`. Useful for updating objects that may be modified both locally, and by the server. For more information, see ["Update API Objects in Place Using kubectl patch"](/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/) | No | Yes | | Protocol Buffers | The new resource supports clients that want to use Protocol Buffers | No | Yes | -| OpenAPI Schema | Is there an OpenAPI (swagger) schema for the types that can be dynamically fetched from the server? Is the user protected from misspelling field names by ensuring only allowed fields are set? Are types enforced (in other words, don't put an `int` in a `string` field?) | Yes, based on the [OpenAPI v3.0 validation](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#validation) schema (GA in 1.16). | Yes | +| OpenAPI Schema | Is there an OpenAPI (swagger) schema for the types that can be dynamically fetched from the server? Is the user protected from misspelling field names by ensuring only allowed fields are set? Are types enforced (in other words, don't put an `int` in a `string` field?) | Yes, based on the [OpenAPI v3.0 validation](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation) schema (GA in 1.16). | Yes | ### Common Features @@ -246,12 +246,13 @@ When you add a custom resource, you can access it using: - A REST client that you write. - A client generated using [Kubernetes client generation tools](https://github.com/kubernetes/code-generator) (generating one is an advanced undertaking, but some projects may provide a client along with the CRD or AA). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn how to [Extend the Kubernetes API with the aggregation layer](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/). -* Learn how to [Extend the Kubernetes API with CustomResourceDefinition](/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/). +* Learn how to [Extend the Kubernetes API with CustomResourceDefinition](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/). + -{{% /capture %}} diff --git a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md index 23f64628b5..d27dddd384 100644 --- a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md +++ b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md @@ -2,11 +2,11 @@ reviewers: title: Device Plugins description: Use the Kubernetes device plugin framework to implement plugins for GPUs, NICs, FPGAs, InfiniBand, and similar resources that require vendor-specific setup. -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.10" state="beta" >}} Kubernetes provides a [device plugin framework](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/resource-management/device-plugin.md) @@ -19,9 +19,9 @@ The targeted devices include GPUs, high-performance NICs, FPGAs, InfiniBand adap and other similar computing resources that may require vendor specific initialization and setup. -{{% /capture %}} -{{% capture body %}} + + ## Device plugin registration @@ -225,12 +225,13 @@ Here are some examples of device plugin implementations: * The [SR-IOV Network device plugin](https://github.com/intel/sriov-network-device-plugin) * The [Xilinx FPGA device plugins](https://github.com/Xilinx/FPGA_as_a_Service/tree/master/k8s-fpga-device-plugin/trunk) for Xilinx FPGA devices -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn about [scheduling GPU resources](/docs/tasks/manage-gpus/scheduling-gpus/) using device plugins * Learn about [advertising extended resources](/docs/tasks/administer-cluster/extended-resource-node/) on a node * Read about using [hardware acceleration for TLS ingress](https://kubernetes.io/blog/2019/04/24/hardware-accelerated-ssl/tls-termination-in-ingress-controllers-using-kubernetes-device-plugins-and-runtimeclass/) with Kubernetes * Learn about the [Topology Manager] (/docs/tasks/adminster-cluster/topology-manager/) -{{% /capture %}} + diff --git a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md index 2ff4ae2377..b32bce83dd 100644 --- a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md +++ b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md @@ -4,12 +4,12 @@ reviewers: - freehan - thockin title: Network Plugins -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + {{< feature-state state="alpha" >}} {{< caution >}}Alpha features can change rapidly. {{< /caution >}} @@ -19,9 +19,9 @@ Network plugins in Kubernetes come in a few flavors: * CNI plugins: adhere to the appc/CNI specification, designed for interoperability. * Kubenet plugin: implements basic `cbr0` using the `bridge` and `host-local` CNI plugins -{{% /capture %}} -{{% capture body %}} + + ## Installation @@ -166,8 +166,9 @@ This option is provided to the network-plugin; currently **only kubenet supports * `--network-plugin=kubenet` specifies that we use the `kubenet` network plugin with CNI `bridge` and `host-local` plugins placed in `/opt/cni/bin` or `cni-bin-dir`. * `--network-plugin-mtu=9001` specifies the MTU to use, currently only used by the `kubenet` network plugin. -{{% /capture %}} -{{% capture whatsnext %}} -{{% /capture %}} +## {{% heading "whatsnext" %}} + + + diff --git a/content/en/docs/concepts/extend-kubernetes/extend-cluster.md b/content/en/docs/concepts/extend-kubernetes/extend-cluster.md index 2b5aa1b676..7914b1cab5 100644 --- a/content/en/docs/concepts/extend-kubernetes/extend-cluster.md +++ b/content/en/docs/concepts/extend-kubernetes/extend-cluster.md @@ -5,11 +5,11 @@ reviewers: - lavalamp - cheftako - chenopis -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Kubernetes is highly configurable and extensible. As a result, there is rarely a need to fork or submit patches to the Kubernetes @@ -22,10 +22,10 @@ their work environment. Developers who are prospective {{< glossary_tooltip text useful as an introduction to what extension points and patterns exist, and their trade-offs and limitations. -{{% /capture %}} -{{% capture body %}} + + ## Overview @@ -194,10 +194,11 @@ The scheduler also supports a that permits a webhook backend (scheduler extension) to filter and prioritize the nodes chosen for a pod. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [Custom Resources](/docs/concepts/api-extension/custom-resources/) * Learn about [Dynamic admission control](/docs/reference/access-authn-authz/extensible-admission-controllers/) @@ -207,4 +208,4 @@ the nodes chosen for a pod. * Learn about [kubectl plugins](/docs/tasks/extend-kubectl/kubectl-plugins/) * Learn about the [Operator pattern](/docs/concepts/extend-kubernetes/operator/) -{{% /capture %}} + diff --git a/content/en/docs/concepts/extend-kubernetes/operator.md b/content/en/docs/concepts/extend-kubernetes/operator.md index eb56d5475a..dda8f0020b 100644 --- a/content/en/docs/concepts/extend-kubernetes/operator.md +++ b/content/en/docs/concepts/extend-kubernetes/operator.md @@ -1,20 +1,20 @@ --- title: Operator pattern -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Operators are software extensions to Kubernetes that make use of [custom resources](/docs/concepts/extend-kubernetes/api-extension/custom-resources/) to manage applications and their components. Operators follow Kubernetes principles, notably the [control loop](/docs/concepts/#kubernetes-control-plane). -{{% /capture %}} -{{% capture body %}} + + ## Motivation @@ -113,9 +113,10 @@ Operator. You also implement an Operator (that is, a Controller) using any language / runtime that can act as a [client for the Kubernetes API](/docs/reference/using-api/client-libraries/). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [Custom Resources](/docs/concepts/extend-kubernetes/api-extension/custom-resources/) * Find ready-made operators on [OperatorHub.io](https://operatorhub.io/) to suit your use case @@ -129,4 +130,3 @@ that can act as a [client for the Kubernetes API](/docs/reference/using-api/clie * Read [CoreOS' original article](https://coreos.com/blog/introducing-operators.html) that introduced the Operator pattern * Read an [article](https://cloud.google.com/blog/products/containers-kubernetes/best-practices-for-building-kubernetes-operators-and-stateful-apps) from Google Cloud about best practices for building Operators -{{% /capture %}} \ No newline at end of file diff --git a/content/en/docs/concepts/extend-kubernetes/poseidon-firmament-alternate-scheduler.md b/content/en/docs/concepts/extend-kubernetes/poseidon-firmament-alternate-scheduler.md index 4c5ab12c03..7f81439c41 100644 --- a/content/en/docs/concepts/extend-kubernetes/poseidon-firmament-alternate-scheduler.md +++ b/content/en/docs/concepts/extend-kubernetes/poseidon-firmament-alternate-scheduler.md @@ -1,18 +1,18 @@ --- title: Poseidon-Firmament Scheduler -content_template: templates/concept +content_type: concept weight: 80 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.6" state="alpha" >}} The Poseidon-Firmament scheduler is an alternate scheduler that can be deployed alongside the default Kubernetes scheduler. -{{% /capture %}} -{{% capture body %}} + + ## Introduction @@ -102,10 +102,11 @@ Pod-by-pod schedulers, such as the Kubernetes default scheduler, process Pods in These downsides of pod-by-pod schedulers are addressed by batching or bulk scheduling in Poseidon-Firmament scheduler. Processing several pods in a batch allows the scheduler to jointly consider their placement, and thus to find the best trade-off for the whole batch instead of one pod. At the same time it amortizes work across pods resulting in much higher throughput. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * See [Poseidon-Firmament](https://github.com/kubernetes-sigs/poseidon#readme) on GitHub for more information. * See the [design document](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/design/README.md) for Poseidon. * Read [Firmament: Fast, Centralized Cluster Scheduling at Scale](https://www.usenix.org/system/files/conference/osdi16/osdi16-gog.pdf), the academic paper on the Firmament scheduling design. * If you'd like to contribute to Poseidon-Firmament, refer to the [developer setup instructions](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/devel/README.md). -{{% /capture %}} + diff --git a/content/en/docs/concepts/extend-kubernetes/service-catalog.md b/content/en/docs/concepts/extend-kubernetes/service-catalog.md index 35d181d998..b40ca7ee14 100644 --- a/content/en/docs/concepts/extend-kubernetes/service-catalog.md +++ b/content/en/docs/concepts/extend-kubernetes/service-catalog.md @@ -2,11 +2,11 @@ title: Service Catalog reviewers: - chenopis -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + {{< glossary_definition term_id="service-catalog" length="all" prepend="Service Catalog is" >}} A service broker, as defined by the [Open service broker API spec](https://github.com/openservicebrokerapi/servicebroker/blob/v2.13/spec.md), is an endpoint for a set of managed services offered and maintained by a third-party, which could be a cloud provider such as AWS, GCP, or Azure. @@ -14,10 +14,10 @@ Some examples of managed services are Microsoft Azure Cloud Queue, Amazon Simple Using Service Catalog, a {{< glossary_tooltip text="cluster operator" term_id="cluster-operator" >}} can browse the list of managed services offered by a service broker, provision an instance of a managed service, and bind with it to make it available to an application in the Kubernetes cluster. -{{% /capture %}} -{{% capture body %}} + + ## Example use case An {{< glossary_tooltip text="application developer" term_id="application-developer" >}} wants to use message queuing as part of their application running in a Kubernetes cluster. @@ -222,16 +222,17 @@ The following example describes how to map secret values into application enviro key: topic ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * If you are familiar with {{< glossary_tooltip text="Helm Charts" term_id="helm-chart" >}}, [install Service Catalog using Helm](/docs/tasks/service-catalog/install-service-catalog-using-helm/) into your Kubernetes cluster. Alternatively, you can [install Service Catalog using the SC tool](/docs/tasks/service-catalog/install-service-catalog-using-sc/). * View [sample service brokers](https://github.com/openservicebrokerapi/servicebroker/blob/master/gettingStarted.md#sample-service-brokers). * Explore the [kubernetes-incubator/service-catalog](https://github.com/kubernetes-incubator/service-catalog) project. * View [svc-cat.io](https://svc-cat.io/docs/). -{{% /capture %}} + diff --git a/content/en/docs/concepts/overview/components.md b/content/en/docs/concepts/overview/components.md index 04c4bbe805..f83f00683e 100644 --- a/content/en/docs/concepts/overview/components.md +++ b/content/en/docs/concepts/overview/components.md @@ -2,14 +2,14 @@ reviewers: - lavalamp title: Kubernetes Components -content_template: templates/concept +content_type: concept weight: 20 card: name: concepts weight: 20 --- -{{% capture overview %}} + When you deploy Kubernetes, you get a cluster. {{< glossary_definition term_id="cluster" length="all" prepend="A Kubernetes cluster consists of">}} @@ -20,9 +20,9 @@ Here's the diagram of a Kubernetes cluster with all the components tied together ![Components of Kubernetes](/images/docs/components-of-kubernetes.png) -{{% /capture %}} -{{% capture body %}} + + ## Control Plane Components The control plane's components make global decisions about the cluster (for example, scheduling), as well as detecting and responding to cluster events (for example, starting up a new {{< glossary_tooltip text="pod" term_id="pod">}} when a deployment's `replicas` field is unsatisfied). @@ -122,10 +122,11 @@ about containers in a central database, and provides a UI for browsing that data A [cluster-level logging](/docs/concepts/cluster-administration/logging/) mechanism is responsible for saving container logs to a central log store with search/browsing interface. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn about [Nodes](/docs/concepts/architecture/nodes/) * Learn about [Controllers](/docs/concepts/architecture/controller/) * Learn about [kube-scheduler](/docs/concepts/scheduling-eviction/kube-scheduler/) * Read etcd's official [documentation](https://etcd.io/docs/) -{{% /capture %}} + diff --git a/content/en/docs/concepts/overview/kubernetes-api.md b/content/en/docs/concepts/overview/kubernetes-api.md index bbdef84958..0721dd9ecd 100644 --- a/content/en/docs/concepts/overview/kubernetes-api.md +++ b/content/en/docs/concepts/overview/kubernetes-api.md @@ -2,14 +2,14 @@ reviewers: - chenopis title: The Kubernetes API -content_template: templates/concept +content_type: concept weight: 30 card: name: concepts weight: 30 --- -{{% capture overview %}} + The core of Kubernetes' {{< glossary_tooltip text="control plane" term_id="control-plane" >}} is the {{< glossary_tooltip text="API server" term_id="kube-apiserver" >}}. The API server @@ -21,9 +21,10 @@ The Kubernetes API lets you query and manipulate the state of objects in the Kub API endpoints, resource types and samples are described in the [API Reference](/docs/reference/kubernetes-api/). -{{% /capture %}} -{{% capture body %}} + + + ## API changes @@ -135,10 +136,10 @@ There are several API groups in a cluster: There are two paths to extending the API with [custom resources](/docs/concepts/api-extension/custom-resources/): -1. [CustomResourceDefinition](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/) +1. [CustomResourceDefinition](/docs/tasks/extend-kubernetes/custom-resource-definitions/) lets you declaratively define how the API server should provide your chosen resource API. -1. You can also [implement your own extension API server](/docs/tasks/access-kubernetes-api/setup-extension-api-server/) - and use the [aggregator](/docs/tasks/access-kubernetes-api/configure-aggregation-layer/) +1. You can also [implement your own extension API server](/docs/tasks/extend-kubernetes/setup-extension-api-server/) + and use the [aggregator](/docs/tasks/extend-kubernetes/configure-aggregation-layer/) to make it seamless for clients. ## Enabling or disabling API groups @@ -166,8 +167,9 @@ For example: to enable deployments and daemonsets, set Kubernetes stores its serialized state in terms of the API resources by writing them into {{< glossary_tooltip term_id="etcd" >}}. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [Controlling API Access](/docs/reference/access-authn-authz/controlling-access/) describes how the cluster manages authentication and authorization for API access. @@ -176,5 +178,3 @@ Overall API conventions are described in the document. API endpoints, resource types and samples are described in the [API Reference](/docs/reference/kubernetes-api/). - -{{% /capture %}} diff --git a/content/en/docs/concepts/overview/what-is-kubernetes.md b/content/en/docs/concepts/overview/what-is-kubernetes.md index fbe74e4337..5b30c8e66e 100644 --- a/content/en/docs/concepts/overview/what-is-kubernetes.md +++ b/content/en/docs/concepts/overview/what-is-kubernetes.md @@ -5,18 +5,18 @@ reviewers: title: What is Kubernetes? description: > Kubernetes is a portable, extensible, open-source platform for managing containerized workloads and services, that facilitates both declarative configuration and automation. It has a large, rapidly growing ecosystem. Kubernetes services, support, and tools are widely available. -content_template: templates/concept +content_type: concept weight: 10 card: name: concepts weight: 10 --- -{{% capture overview %}} + This page is an overview of Kubernetes. -{{% /capture %}} -{{% capture body %}} + + Kubernetes is a portable, extensible, open-source platform for managing containerized workloads and services, that facilitates both declarative configuration and automation. It has a large, rapidly growing ecosystem. Kubernetes services, support, and tools are widely available. The name Kubernetes originates from Greek, meaning helmsman or pilot. Google open-sourced the Kubernetes project in 2014. Kubernetes combines [over 15 years of Google's experience](/blog/2015/04/borg-predecessor-to-kubernetes/) running production workloads at scale with best-of-breed ideas and practices from the community. @@ -86,9 +86,10 @@ Kubernetes: * Does not provide nor adopt any comprehensive machine configuration, maintenance, management, or self-healing systems. * Additionally, Kubernetes is not a mere orchestration system. In fact, it eliminates the need for orchestration. The technical definition of orchestration is execution of a defined workflow: first do A, then B, then C. In contrast, Kubernetes comprises a set of independent, composable control processes that continuously drive the current state towards the provided desired state. It shouldn’t matter how you get from A to C. Centralized control is also not required. This results in a system that is easier to use and more powerful, robust, resilient, and extensible. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Take a look at the [Kubernetes Components](/docs/concepts/overview/components/) * Ready to [Get Started](/docs/setup/)? -{{% /capture %}} + diff --git a/content/en/docs/concepts/overview/working-with-objects/annotations.md b/content/en/docs/concepts/overview/working-with-objects/annotations.md index f88c6a0003..d440d2965e 100644 --- a/content/en/docs/concepts/overview/working-with-objects/annotations.md +++ b/content/en/docs/concepts/overview/working-with-objects/annotations.md @@ -1,15 +1,15 @@ --- title: Annotations -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + You can use Kubernetes annotations to attach arbitrary non-identifying metadata to objects. Clients such as tools and libraries can retrieve this metadata. -{{% /capture %}} -{{% capture body %}} + + ## Attaching metadata to objects You can use either labels or annotations to attach metadata to Kubernetes @@ -88,10 +88,11 @@ spec: ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Learn more about [Labels and Selectors](/docs/concepts/overview/working-with-objects/labels/). -{{% /capture %}} + diff --git a/content/en/docs/concepts/overview/working-with-objects/common-labels.md b/content/en/docs/concepts/overview/working-with-objects/common-labels.md index d360d7d284..11e8944c8a 100644 --- a/content/en/docs/concepts/overview/working-with-objects/common-labels.md +++ b/content/en/docs/concepts/overview/working-with-objects/common-labels.md @@ -1,18 +1,18 @@ --- title: Recommended Labels -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + You can visualize and manage Kubernetes objects with more tools than kubectl and the dashboard. A common set of labels allows tools to work interoperably, describing objects in a common manner that all tools can understand. In addition to supporting tooling, the recommended labels describe applications in a way that can be queried. -{{% /capture %}} -{{% capture body %}} + + The metadata is organized around the concept of an _application_. Kubernetes is not a platform as a service (PaaS) and doesn't have or enforce a formal notion of an application. Instead, applications are informal and described with metadata. The definition of @@ -170,4 +170,4 @@ metadata: With the MySQL `StatefulSet` and `Service` you'll notice information about both MySQL and Wordpress, the broader application, are included. -{{% /capture %}} + diff --git a/content/en/docs/concepts/overview/working-with-objects/field-selectors.md b/content/en/docs/concepts/overview/working-with-objects/field-selectors.md index 44cd54e72f..45a81e9035 100644 --- a/content/en/docs/concepts/overview/working-with-objects/field-selectors.md +++ b/content/en/docs/concepts/overview/working-with-objects/field-selectors.md @@ -16,12 +16,7 @@ kubectl get pods --field-selector status.phase=Running ``` {{< note >}} -Field selectors are essentially resource *filters*. By default, no selectors/filters are applied, meaning that all resources of the specified type are selected. This makes the following `kubectl` queries equivalent: - -```shell -kubectl get pods -kubectl get pods --field-selector "" -``` +Field selectors are essentially resource *filters*. By default, no selectors/filters are applied, meaning that all resources of the specified type are selected. This makes the `kubectl` queries `kubectl get pods` and `kubectl get pods --field-selector ""` equivalent. {{< /note >}} ## Supported fields diff --git a/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md b/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md index b9df009db7..1f4f4e7509 100644 --- a/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md +++ b/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md @@ -1,17 +1,17 @@ --- title: Understanding Kubernetes Objects -content_template: templates/concept +content_type: concept weight: 10 card: name: concepts weight: 40 --- -{{% capture overview %}} + This page explains how Kubernetes objects are represented in the Kubernetes API, and how you can express them in `.yaml` format. -{{% /capture %}} -{{% capture body %}} + + ## Understanding Kubernetes objects {#kubernetes-objects} *Kubernetes objects* are persistent entities in the Kubernetes system. Kubernetes uses these entities to represent the state of your cluster. Specifically, they can describe: @@ -87,12 +87,13 @@ For example, the `spec` format for a Pod can be found in and the `spec` format for a Deployment can be found in [DeploymentSpec v1 apps](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#deploymentspec-v1-apps). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Kubernetes API overview](/docs/reference/using-api/api-overview/) explains some more API concepts * Learn about the most important basic Kubernetes objects, such as [Pod](/docs/concepts/workloads/pods/pod-overview/). * Learn about [controllers](/docs/concepts/architecture/controller/) in Kubernetes -{{% /capture %}} + diff --git a/content/en/docs/concepts/overview/working-with-objects/labels.md b/content/en/docs/concepts/overview/working-with-objects/labels.md index f08daf323b..e995db10a5 100644 --- a/content/en/docs/concepts/overview/working-with-objects/labels.md +++ b/content/en/docs/concepts/overview/working-with-objects/labels.md @@ -2,11 +2,11 @@ reviewers: - mikedanese title: Labels and Selectors -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + _Labels_ are key/value pairs that are attached to objects, such as pods. Labels are intended to be used to specify identifying attributes of objects that are meaningful and relevant to users, but do not directly imply semantics to the core system. @@ -24,10 +24,10 @@ Each object can have a set of key/value labels defined. Each Key must be unique Labels allow for efficient queries and watches and are ideal for use in UIs and CLIs. Non-identifying information should be recorded using [annotations](/docs/concepts/overview/working-with-objects/annotations/). -{{% /capture %}} -{{% capture body %}} + + ## Motivation @@ -228,4 +228,4 @@ selector: One use case for selecting over labels is to constrain the set of nodes onto which a pod can schedule. See the documentation on [node selection](/docs/concepts/scheduling-eviction/assign-pod-node/) for more information. -{{% /capture %}} + diff --git a/content/en/docs/concepts/overview/working-with-objects/names.md b/content/en/docs/concepts/overview/working-with-objects/names.md index 01bb53b56d..9831f7335c 100644 --- a/content/en/docs/concepts/overview/working-with-objects/names.md +++ b/content/en/docs/concepts/overview/working-with-objects/names.md @@ -3,11 +3,11 @@ reviewers: - mikedanese - thockin title: Object Names and IDs -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Each object in your cluster has a [_Name_](#names) that is unique for that type of resource. Every Kubernetes object also has a [_UID_](#uids) that is unique across your whole cluster. @@ -16,9 +16,9 @@ For example, you can only have one Pod named `myapp-1234` within the same [names For non-unique user-provided attributes, Kubernetes provides [labels](/docs/concepts/overview/working-with-objects/labels/) and [annotations](/docs/concepts/overview/working-with-objects/annotations/). -{{% /capture %}} -{{% capture body %}} + + ## Names @@ -81,8 +81,9 @@ Some resource types have additional restrictions on their names. Kubernetes UIDs are universally unique identifiers (also known as UUIDs). UUIDs are standardized as ISO/IEC 9834-8 and as ITU-T X.667. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Read about [labels](/docs/concepts/overview/working-with-objects/labels/) in Kubernetes. * See the [Identifiers and Names in Kubernetes](https://git.k8s.io/community/contributors/design-proposals/architecture/identifiers.md) design document. -{{% /capture %}} + diff --git a/content/en/docs/concepts/overview/working-with-objects/namespaces.md b/content/en/docs/concepts/overview/working-with-objects/namespaces.md index 8d6e907afd..30285e6fbf 100644 --- a/content/en/docs/concepts/overview/working-with-objects/namespaces.md +++ b/content/en/docs/concepts/overview/working-with-objects/namespaces.md @@ -4,19 +4,19 @@ reviewers: - mikedanese - thockin title: Namespaces -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Kubernetes supports multiple virtual clusters backed by the same physical cluster. These virtual clusters are called namespaces. -{{% /capture %}} -{{% capture body %}} + + ## When to Use Multiple Namespaces @@ -112,11 +112,12 @@ kubectl api-resources --namespaced=true kubectl api-resources --namespaced=false ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [creating a new namespace](/docs/tasks/administer-cluster/namespaces/#creating-a-new-namespace). * Learn more about [deleting a namespace](/docs/tasks/administer-cluster/namespaces/#deleting-a-namespace). -{{% /capture %}} + diff --git a/content/en/docs/concepts/overview/working-with-objects/object-management.md b/content/en/docs/concepts/overview/working-with-objects/object-management.md index 288be6a684..97f57ff275 100644 --- a/content/en/docs/concepts/overview/working-with-objects/object-management.md +++ b/content/en/docs/concepts/overview/working-with-objects/object-management.md @@ -1,17 +1,17 @@ --- title: Kubernetes Object Management -content_template: templates/concept +content_type: concept weight: 15 --- -{{% capture overview %}} + The `kubectl` command-line tool supports several different ways to create and manage Kubernetes objects. This document provides an overview of the different approaches. Read the [Kubectl book](https://kubectl.docs.kubernetes.io) for details of managing objects by Kubectl. -{{% /capture %}} -{{% capture body %}} + + ## Management techniques @@ -173,9 +173,10 @@ Disadvantages compared to imperative object configuration: - Declarative object configuration is harder to debug and understand results when they are unexpected. - Partial updates using diffs create complex merge and patch operations. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - [Managing Kubernetes Objects Using Imperative Commands](/docs/tasks/manage-kubernetes-objects/imperative-command/) - [Managing Kubernetes Objects Using Object Configuration (Imperative)](/docs/tasks/manage-kubernetes-objects/imperative-config/) @@ -185,4 +186,4 @@ Disadvantages compared to imperative object configuration: - [Kubectl Book](https://kubectl.docs.kubernetes.io) - [Kubernetes API Reference](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) -{{% /capture %}} + diff --git a/content/en/docs/concepts/policy/limit-range.md b/content/en/docs/concepts/policy/limit-range.md index 8bea6c88e7..5b670d38a0 100644 --- a/content/en/docs/concepts/policy/limit-range.md +++ b/content/en/docs/concepts/policy/limit-range.md @@ -2,20 +2,20 @@ reviewers: - nelvadas title: Limit Ranges -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + By default, containers run with unbounded [compute resources](/docs/user-guide/compute-resources) on a Kubernetes cluster. With resource quotas, cluster administrators can restrict resource consumption and creation on a {{< glossary_tooltip text="namespace" term_id="namespace" >}} basis. Within a namespace, a Pod or Container can consume as much CPU and memory as defined by the namespace's resource quota. There is a concern that one Pod or Container could monopolize all available resources. A LimitRange is a policy to constrain resource allocations (to Pods or Containers) in a namespace. -{{% /capture %}} -{{% capture body %}} + + A _LimitRange_ provides constraints that can: @@ -26,9 +26,7 @@ A _LimitRange_ provides constraints that can: ## Enabling LimitRange -LimitRange support is enabled by default for many Kubernetes distributions. It is -enabled when the apiserver `--enable-admission-plugins=` flag has `LimitRanger` admission controller as -one of its arguments. +LimitRange support has been enabled by default since Kubernetes 1.10. A LimitRange is enforced in a particular namespace when there is a LimitRange object in that namespace. @@ -56,9 +54,10 @@ there may be contention for resources. In this case, the Containers or Pods will Neither contention nor changes to a LimitRange will affect already created resources. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Refer to the [LimitRanger design document](https://git.k8s.io/community/contributors/design-proposals/resource-management/admission_control_limit_range.md) for more information. @@ -72,4 +71,4 @@ For examples on using limits, see: - a [detailed example on configuring quota per namespace](/docs/tasks/administer-cluster/quota-memory-cpu-namespace/). -{{% /capture %}} + diff --git a/content/en/docs/concepts/policy/pod-security-policy.md b/content/en/docs/concepts/policy/pod-security-policy.md index 52aa593e6f..5a5241c42e 100644 --- a/content/en/docs/concepts/policy/pod-security-policy.md +++ b/content/en/docs/concepts/policy/pod-security-policy.md @@ -3,21 +3,21 @@ reviewers: - pweil- - tallclair title: Pod Security Policies -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< feature-state state="beta" >}} Pod Security Policies enable fine-grained authorization of pod creation and updates. -{{% /capture %}} -{{% capture body %}} + + ## What is a Pod Security Policy? @@ -34,7 +34,7 @@ administrator to control the following: | Usage of host networking and ports | [`hostNetwork`, `hostPorts`](#host-namespaces) | | Usage of volume types | [`volumes`](#volumes-and-file-systems) | | Usage of the host filesystem | [`allowedHostPaths`](#volumes-and-file-systems) | -| White list of FlexVolume drivers | [`allowedFlexVolumes`](#flexvolume-drivers) | +| Allow specific FlexVolume drivers | [`allowedFlexVolumes`](#flexvolume-drivers) | | Allocating an FSGroup that owns the pod's volumes | [`fsGroup`](#volumes-and-file-systems) | | Requiring the use of a read only root file system | [`readOnlyRootFilesystem`](#volumes-and-file-systems) | | The user and group IDs of the container | [`runAsUser`, `runAsGroup`, `supplementalGroups`](#users-and-groups) | @@ -401,13 +401,13 @@ namespace. Doing so gives the pod access to the loopback device, services listening on localhost, and could be used to snoop on network activity of other pods on the same node. -**HostPorts** - Provides a whitelist of ranges of allowable ports in the host +**HostPorts** - Provides a list of ranges of allowable ports in the host network namespace. Defined as a list of `HostPortRange`, with `min`(inclusive) and `max`(inclusive). Defaults to no allowed host ports. ### Volumes and file systems -**Volumes** - Provides a whitelist of allowed volume types. The allowable values +**Volumes** - Provides a list of allowed volume types. The allowable values correspond to the volume sources that are defined when creating a volume. For the complete list of volume types, see [Types of Volumes](/docs/concepts/storage/volumes/#types-of-volumes). Additionally, `*` @@ -438,7 +438,7 @@ minimum value of the first range as the default. Validates against all ranges. all ranges if `FSGroups` is set. - *RunAsAny* - No default provided. Allows any `fsGroup` ID to be specified. -**AllowedHostPaths** - This specifies a whitelist of host paths that are allowed +**AllowedHostPaths** - This specifies a list of host paths that are allowed to be used by hostPath volumes. An empty list means there is no restriction on host paths used. This is defined as a list of objects with a single `pathPrefix` field, which allows hostPath volumes to mount a path that begins with an @@ -469,7 +469,7 @@ root filesystem (i.e. no writable layer). ### FlexVolume drivers -This specifies a whitelist of FlexVolume drivers that are allowed to be used +This specifies a list of FlexVolume drivers that are allowed to be used by flexvolume. An empty list or nil means there is no restriction on the drivers. Please make sure [`volumes`](#volumes-and-file-systems) field contains the `flexVolume` volume type; no FlexVolume driver is allowed otherwise. @@ -555,7 +555,7 @@ the PodSecurityPolicy. For more details on Linux capabilities, see The following fields take a list of capabilities, specified as the capability name in ALL_CAPS without the `CAP_` prefix. -**AllowedCapabilities** - Provides a whitelist of capabilities that may be added +**AllowedCapabilities** - Provides a list of capabilities that are allowed to be added to a container. The default set of capabilities are implicitly allowed. The empty set means that no additional capabilities may be added beyond the default set. `*` can be used to allow all capabilities. @@ -579,7 +579,7 @@ specified. ### AllowedProcMountTypes -`allowedProcMountTypes` is a whitelist of allowed ProcMountTypes. +`allowedProcMountTypes` is a list of allowed ProcMountTypes. Empty or nil indicates that only the `DefaultProcMountType` may be used. `DefaultProcMount` uses the container runtime defaults for readonly and masked @@ -631,12 +631,13 @@ By default, all safe sysctls are allowed. Refer to the [Sysctl documentation]( /docs/concepts/cluster-administration/sysctl-cluster/#podsecuritypolicy). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + See [Pod Security Standards](/docs/concepts/security/pod-security-standards/) for policy recommendations. Refer to [Pod Security Policy Reference](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podsecuritypolicy-v1beta1-policy) for the api details. -{{% /capture %}} + diff --git a/content/en/docs/concepts/policy/resource-quotas.md b/content/en/docs/concepts/policy/resource-quotas.md index 39f51bf2d7..4fb3f17a38 100644 --- a/content/en/docs/concepts/policy/resource-quotas.md +++ b/content/en/docs/concepts/policy/resource-quotas.md @@ -2,21 +2,21 @@ reviewers: - derekwaynecarr title: Resource Quotas -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + When several users or teams share a cluster with a fixed number of nodes, there is a concern that one team could use more than its fair share of resources. Resource quotas are a tool for administrators to address this concern. -{{% /capture %}} -{{% capture body %}} + + A resource quota, defined by a `ResourceQuota` object, provides constraints that limit aggregate resource consumption per namespace. It can limit the quantity of objects that can @@ -596,10 +596,11 @@ See [LimitedResources](https://github.com/kubernetes/kubernetes/pull/36765) and See a [detailed example for how to use resource quota](/docs/tasks/administer-cluster/quota-api-object/). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + See [ResourceQuota design doc](https://git.k8s.io/community/contributors/design-proposals/resource-management/admission_control_resource_quota.md) for more information. -{{% /capture %}} + diff --git a/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md b/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md index 79a9487c60..a30efc6ef6 100644 --- a/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md +++ b/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md @@ -4,12 +4,12 @@ reviewers: - kevin-wangzefeng - bsalamat title: Assigning Pods to Nodes -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + You can constrain a {{< glossary_tooltip text="Pod" term_id="pod" >}} to only be able to run on particular {{< glossary_tooltip text="Node(s)" term_id="node" >}}, or to prefer to run on particular nodes. @@ -21,9 +21,9 @@ but there are some circumstances where you may want more control on a node where that a pod ends up on a machine with an SSD attached to it, or to co-locate pods from two different services that communicate a lot into the same availability zone. -{{% /capture %}} -{{% capture body %}} + + ## nodeSelector @@ -213,10 +213,8 @@ as at least one already-running pod that has a label with key "security" and val on node N if node N has a label with key `failure-domain.beta.kubernetes.io/zone` and some value V such that there is at least one node in the cluster with key `failure-domain.beta.kubernetes.io/zone` and value V that is running a pod that has a label with key "security" and value "S1".) The pod anti-affinity -rule says that the pod prefers not to be scheduled onto a node if that node is already running a pod with label -having key "security" and value "S2". (If the `topologyKey` were `failure-domain.beta.kubernetes.io/zone` then -it would mean that the pod cannot be scheduled onto a node if that node is in the same zone as a pod with -label having key "security" and value "S2".) See the +rule says that the pod cannot be scheduled onto a node if that node is in the same zone as a pod with +label having key "security" and value "S2". See the [design doc](https://git.k8s.io/community/contributors/design-proposals/scheduling/podaffinity.md) for many more examples of pod affinity and anti-affinity, both the `requiredDuringSchedulingIgnoredDuringExecution` flavor and the `preferredDuringSchedulingIgnoredDuringExecution` flavor. @@ -388,9 +386,10 @@ spec: The above pod will run on the node kube-01. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [Taints](/docs/concepts/scheduling-eviction/taint-and-toleration/) allow a Node to *repel* a set of Pods. @@ -402,4 +401,4 @@ Once a Pod is assigned to a Node, the kubelet runs the Pod and allocates node-lo The [topology manager](/docs/tasks/administer-cluster/topology-manager/) can take part in node-level resource allocation decisions. -{{% /capture %}} + diff --git a/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md b/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md index 2fea98bfb4..406c3f974b 100644 --- a/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md +++ b/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md @@ -1,18 +1,18 @@ --- title: Kubernetes Scheduler -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + In Kubernetes, _scheduling_ refers to making sure that {{< glossary_tooltip text="Pods" term_id="pod" >}} are matched to {{< glossary_tooltip text="Nodes" term_id="node" >}} so that {{< glossary_tooltip term_id="kubelet" >}} can run them. -{{% /capture %}} -{{% capture body %}} + + ## Scheduling overview {#scheduling} @@ -86,12 +86,13 @@ of the scheduler: `QueueSort`, `Filter`, `Score`, `Bind`, `Reserve`, `Permit`, and others. You can also configure the kube-scheduler to run different profiles. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Read about [scheduler performance tuning](/docs/concepts/scheduling-eviction/scheduler-perf-tuning/) * Read about [Pod topology spread constraints](/docs/concepts/workloads/pods/pod-topology-spread-constraints/) * Read the [reference documentation](/docs/reference/command-line-tools-reference/kube-scheduler/) for kube-scheduler * Learn about [configuring multiple schedulers](/docs/tasks/administer-cluster/configure-multiple-schedulers/) * Learn about [topology management policies](/docs/tasks/administer-cluster/topology-manager/) * Learn about [Pod Overhead](/docs/concepts/configuration/pod-overhead/) -{{% /capture %}} + diff --git a/content/en/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md b/content/en/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md index e3d4b16861..06f535a574 100644 --- a/content/en/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md +++ b/content/en/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md @@ -2,11 +2,11 @@ reviewers: - bsalamat title: Scheduler Performance Tuning -content_template: templates/concept +content_type: concept weight: 70 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.14" state="beta" >}} @@ -24,9 +24,9 @@ in a process called _Binding_. This page explains performance tuning optimizations that are relevant for large Kubernetes clusters. -{{% /capture %}} -{{% capture body %}} + + In large clusters, you can tune the scheduler's behaviour balancing scheduling outcomes between latency (new Pods are placed quickly) and @@ -164,4 +164,4 @@ Node 1, Node 5, Node 2, Node 6, Node 3, Node 4 After going over all the Nodes, it goes back to Node 1. -{{% /capture %}} + diff --git a/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md b/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md index d1123b72e1..5798b0579f 100644 --- a/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md +++ b/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md @@ -2,11 +2,11 @@ reviewers: - ahg-g title: Scheduling Framework -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.15" state="alpha" >}} @@ -20,9 +20,9 @@ framework. [kep]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-scheduling/20180409-scheduling-framework.md -{{% /capture %}} -{{% capture body %}} + + # Framework workflow @@ -239,4 +239,3 @@ If you are using Kubernetes v1.18 or later, you can configure a set of plugins a a scheduler profile and then define multiple profiles to fit various kinds of workload. Learn more at [multiple profiles](/docs/reference/scheduling/profiles/#multiple-profiles). -{{% /capture %}} \ No newline at end of file diff --git a/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md b/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md index c803676d3a..89a7eca7b1 100644 --- a/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md +++ b/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md @@ -4,12 +4,12 @@ reviewers: - kevin-wangzefeng - bsalamat title: Taints and Tolerations -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + [_Node affinity_](/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity), is a property of {{< glossary_tooltip text="Pods" term_id="pod" >}} that *attracts* them to a set of {{< glossary_tooltip text="nodes" term_id="node" >}} (either as a preference or a @@ -22,9 +22,9 @@ Taints and tolerations work together to ensure that pods are not scheduled onto inappropriate nodes. One or more taints are applied to a node; this marks that the node should not accept any pods that do not tolerate the taints. -{{% /capture %}} -{{% capture body %}} + + ## Concepts @@ -282,9 +282,10 @@ tolerations to all daemons, to prevent DaemonSets from breaking. Adding these tolerations ensures backward compatibility. You can also add arbitrary tolerations to DaemonSets. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Read about [out of resource handling](/docs/tasks/administer-cluster/out-of-resource/) and how you can configure it * Read about [pod priority](/docs/concepts/configuration/pod-priority-preemption/) -{{% /capture %}} + diff --git a/content/en/docs/concepts/security/overview.md b/content/en/docs/concepts/security/overview.md index 20ba255039..ed3ba48eb4 100644 --- a/content/en/docs/concepts/security/overview.md +++ b/content/en/docs/concepts/security/overview.md @@ -2,13 +2,13 @@ reviewers: - zparnold title: Overview of Cloud Native Security -content_template: templates/concept +content_type: concept weight: 1 --- {{< toc >}} -{{% capture overview %}} + Kubernetes Security (and security in general) is an immense topic that has many highly interrelated parts. In today's era where open source software is integrated into many of the systems that help web applications run, @@ -17,9 +17,9 @@ think about security holistically. This guide will define a mental model for some general concepts surrounding Cloud Native Security. The mental model is completely arbitrary and you should only use it if it helps you think about where to secure your software stack. -{{% /capture %}} -{{% capture body %}} + + ## The 4C's of Cloud Native Security Let's start with a diagram that may help you understand how you can think about security in layers. @@ -153,12 +153,13 @@ Most of the above mentioned suggestions can actually be automated in your code delivery pipeline as part of a series of checks in security. To learn about a more "Continuous Hacking" approach to software delivery, [this article](https://thenewstack.io/beyond-ci-cd-how-continuous-hacking-of-docker-containers-and-pipeline-driven-security-keeps-ygrene-secure/) provides more detail. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Read about [network policies for Pods](/docs/concepts/services-networking/network-policies/) * Read about [securing your cluster](/docs/tasks/administer-cluster/securing-a-cluster/) * Read about [API access control](/docs/reference/access-authn-authz/controlling-access/) * Read about [data encryption in transit](/docs/tasks/tls/managing-tls-in-a-cluster/) for the control plane * Read about [data encryption at rest](/docs/tasks/administer-cluster/encrypt-data/) * Read about [Secrets in Kubernetes](/docs/concepts/configuration/secret/) -{{% /capture %}} + diff --git a/content/en/docs/concepts/security/pod-security-standards.md b/content/en/docs/concepts/security/pod-security-standards.md index 1adf042c91..2afd6c7335 100644 --- a/content/en/docs/concepts/security/pod-security-standards.md +++ b/content/en/docs/concepts/security/pod-security-standards.md @@ -2,11 +2,11 @@ reviewers: - tallclair title: Pod Security Standards -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Security settings for Pods are typically applied by using [security contexts](/docs/tasks/configure-pod-container/security-context/). Security Contexts allow for the @@ -21,9 +21,9 @@ However, numerous means of policy enforcement have arisen that augment or replac PodSecurityPolicy. The intent of this page is to detail recommended Pod security profiles, decoupled from any specific instantiation. -{{% /capture %}} -{{% capture body %}} + + ## Policy Types @@ -43,9 +43,9 @@ should range from highly restricted to highly flexible: The Privileged policy is purposely-open, and entirely unrestricted. This type of policy is typically aimed at system- and infrastructure-level workloads managed by privileged, trusted users. -The privileged policy is defined by an absence of restrictions. For blacklist-oriented enforcement +The privileged policy is defined by an absence of restrictions. For allow-by-default enforcement mechanisms (such as gatekeeper), the privileged profile may be an absence of applied constraints -rather than an instantiated policy. In contrast, for a whitelist oriented mechanism (such as Pod +rather than an instantiated policy. In contrast, for a deny-by-default mechanism (such as Pod Security Policy) the privileged policy should enable all controls (disable all restrictions). ### Baseline/Default @@ -56,8 +56,8 @@ developers of non-critical applications. The following listed controls should be enforced/disallowed: - - + + @@ -90,7 +90,7 @@ enforced/disallowed:
Restricted Fields:
spec.containers[*].securityContext.capabilities.add
spec.initContainers[*].securityContext.capabilities.add
-
Allowed Values: empty (optionally whitelisted defaults)
+
Allowed Values: empty (or restricted to a known list)
@@ -105,20 +105,20 @@ enforced/disallowed: @@ -132,6 +132,31 @@ enforced/disallowed:
Allowed Values: undefined/nil
+ + + + + + + +
Baseline policy specificationBaseline policy specification
Control Policy
Host Ports - HostPorts should be disallowed, or at minimum restricted to a whitelist.
+ HostPorts should be disallowed, or at minimum restricted to a known list.

Restricted Fields:
spec.containers[*].ports[*].hostPort
spec.initContainers[*].ports[*].hostPort
-
Allowed Values: 0, undefined, (whitelisted)
+
Allowed Values: 0, undefined (or restricted to a known list)
AppArmor (optional) - On supported hosts, the `runtime/default` AppArmor profile is applied by default. The default policy should prevent overriding or disabling the policy, or restrict overrides to a whitelisted set of profiles.
+ On supported hosts, the 'runtime/default' AppArmor profile is applied by default. The default policy should prevent overriding or disabling the policy, or restrict overrides to an allowed set of profiles.

Restricted Fields:
metadata.annotations['container.apparmor.security.beta.kubernetes.io/*']
-
Allowed Values: runtime/default, undefined
+
Allowed Values: 'runtime/default', undefined
/proc Mount Type + The default /proc masks are set up to reduce attack surface, and should be required.
+
Restricted Fields:
+ spec.containers[*].securityContext.procMount
+ spec.initContainers[*].securityContext.procMount
+
Allowed Values: undefined/nil, 'Default'
+
Sysctls + Sysctls can disable security mechanisms or affect all containers on a host, and should be disallowed except for an allowed "safe" subset. + A sysctl is considered safe if it is namespaced in the container or the Pod, and it is isolated from other Pods or processes on the same Node.
+
Restricted Fields:
+ spec.securityContext.sysctls
+
Allowed Values:
+ kernel.shm_rmid_forced
+ net.ipv4.ip_local_port_range
+ net.ipv4.tcp_syncookies
+ net.ipv4.ping_group_range
+ undefined/empty
+
@@ -143,7 +168,7 @@ well as lower-trust users.The following listed controls should be enforced/disal - + @@ -184,7 +209,7 @@ well as lower-trust users.The following listed controls should be enforced/disal @@ -297,4 +322,4 @@ kernel. This allows for workloads requiring heightened permissions to still be i Additionally, the protection of sandboxed workloads is highly dependent on the method of sandboxing. As such, no single ‘recommended’ policy is recommended for all sandboxed workloads. -{{% /capture %}} + diff --git a/content/en/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md b/content/en/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md index 6f931a8531..05a6a8bc85 100644 --- a/content/en/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md +++ b/content/en/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md @@ -3,19 +3,19 @@ reviewers: - rickypai - thockin title: Adding entries to Pod /etc/hosts with HostAliases -content_template: templates/concept +content_type: concept weight: 60 --- {{< toc >}} -{{% capture overview %}} + Adding entries to a Pod's /etc/hosts file provides Pod-level override of hostname resolution when DNS and other options are not applicable. In 1.7, users can add these custom entries with the HostAliases field in PodSpec. Modification not using HostAliases is not suggested because the file is managed by Kubelet and can be overwritten on during Pod creation/restart. -{{% /capture %}} -{{% capture body %}} + + ## Default Hosts File Content @@ -125,5 +125,5 @@ overwritten whenever the `hosts` file is remounted by Kubelet in the event of a container restart or a Pod reschedule. Thus, it is not suggested to modify the contents of the file. -{{% /capture %}} + diff --git a/content/en/docs/concepts/services-networking/connect-applications-service.md b/content/en/docs/concepts/services-networking/connect-applications-service.md index 50c012ffc6..79c6053301 100644 --- a/content/en/docs/concepts/services-networking/connect-applications-service.md +++ b/content/en/docs/concepts/services-networking/connect-applications-service.md @@ -4,12 +4,12 @@ reviewers: - lavalamp - thockin title: Connecting Applications with Services -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + ## The Kubernetes model for connecting containers @@ -21,9 +21,9 @@ Coordinating port allocations across multiple developers or teams that provide c This guide uses a simple nginx server to demonstrate proof of concept. -{{% /capture %}} -{{% capture body %}} + + ## Exposing pods to the cluster @@ -394,8 +394,8 @@ kubectl edit svc my-nginx kubectl get svc my-nginx ``` ``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -my-nginx ClusterIP 10.0.162.149 162.222.184.144 80/TCP,81/TCP,82/TCP 21s +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +my-nginx LoadBalancer 10.0.162.149 xx.xxx.xxx.xxx 8080:30163/TCP 21s ``` ``` curl https:// -k @@ -418,12 +418,13 @@ LoadBalancer Ingress: a320587ffd19711e5a37606cf4a74574-1142138393.us-east-1.el ... ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [Using a Service to Access an Application in a Cluster](/docs/tasks/access-application-cluster/service-access-application-cluster/) * Learn more about [Connecting a Front End to a Back End Using a Service](/docs/tasks/access-application-cluster/connecting-frontend-backend/) * Learn more about [Creating an External Load Balancer](/docs/tasks/access-application-cluster/create-external-load-balancer/) -{{% /capture %}} + diff --git a/content/en/docs/concepts/services-networking/dns-pod-service.md b/content/en/docs/concepts/services-networking/dns-pod-service.md index 9cba184168..9d88019e3c 100644 --- a/content/en/docs/concepts/services-networking/dns-pod-service.md +++ b/content/en/docs/concepts/services-networking/dns-pod-service.md @@ -3,14 +3,14 @@ reviewers: - davidopp - thockin title: DNS for Services and Pods -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + This page provides an overview of DNS support by Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Introduction @@ -66,6 +66,13 @@ of the form `auto-generated-name.my-svc.my-namespace.svc.cluster-domain.example` ## Pods +### A/AAAA records + +Any pods created by a Deployment or DaemonSet have the following +DNS resolution available: + +`pod-ip-address.deployment-name.my-namespace.svc.cluster-domain.example.` + ### Pod's hostname and subdomain fields Currently when a pod is created, its hostname is the Pod's `metadata.name` value. @@ -262,11 +269,11 @@ The availability of Pod DNS Config and DNS Policy "`None`" is shown as below. | 1.10 | Beta (on by default)| | 1.9 | Alpha | -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + For guidance on administering DNS configurations, check [Configure DNS Service](/docs/tasks/administer-cluster/dns-custom-nameservers/) -{{% /capture %}} \ No newline at end of file diff --git a/content/en/docs/concepts/services-networking/dual-stack.md b/content/en/docs/concepts/services-networking/dual-stack.md index c753c17cc1..aa249566b9 100644 --- a/content/en/docs/concepts/services-networking/dual-stack.md +++ b/content/en/docs/concepts/services-networking/dual-stack.md @@ -9,11 +9,11 @@ feature: description: > Allocation of IPv4 and IPv6 addresses to Pods and Services -content_template: templates/concept +content_type: concept weight: 70 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.16" state="alpha" >}} @@ -21,9 +21,9 @@ weight: 70 If you enable IPv4/IPv6 dual-stack networking for your Kubernetes cluster, the cluster will support the simultaneous assignment of both IPv4 and IPv6 addresses. -{{% /capture %}} -{{% capture body %}} + + ## Supported Features @@ -103,10 +103,11 @@ The use of publicly routable and non-publicly routable IPv6 address blocks is ac * Kubenet forces IPv4,IPv6 positional reporting of IPs (--cluster-cidr) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Validate IPv4/IPv6 dual-stack](/docs/tasks/network/validate-dual-stack) networking -{{% /capture %}} + diff --git a/content/en/docs/concepts/services-networking/endpoint-slices.md b/content/en/docs/concepts/services-networking/endpoint-slices.md index 940374ae52..7c66ce0072 100644 --- a/content/en/docs/concepts/services-networking/endpoint-slices.md +++ b/content/en/docs/concepts/services-networking/endpoint-slices.md @@ -2,12 +2,12 @@ reviewers: - freehan title: EndpointSlices -content_template: templates/concept +content_type: concept weight: 15 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.17" state="beta" >}} @@ -15,9 +15,9 @@ _EndpointSlices_ provide a simple way to track network endpoints within a Kubernetes cluster. They offer a more scalable and extensible alternative to Endpoints. -{{% /capture %}} -{{% capture body %}} + + ## Motivation @@ -175,11 +175,12 @@ necessary soon anyway. Rolling updates of Deployments also provide a natural repacking of EndpointSlices with all pods and their corresponding endpoints getting replaced. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Enabling EndpointSlices](/docs/tasks/administer-cluster/enabling-endpointslices) * Read [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/) -{{% /capture %}} + diff --git a/content/en/docs/concepts/services-networking/ingress-controllers.md b/content/en/docs/concepts/services-networking/ingress-controllers.md index efeb327049..2c363ce7dc 100644 --- a/content/en/docs/concepts/services-networking/ingress-controllers.md +++ b/content/en/docs/concepts/services-networking/ingress-controllers.md @@ -1,11 +1,11 @@ --- title: Ingress Controllers reviewers: -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + In order for the Ingress resource to work, the cluster must have an ingress controller running. @@ -16,9 +16,9 @@ that best fits your cluster. Kubernetes as a project currently supports and maintains [GCE](https://git.k8s.io/ingress-gce/README.md) and [nginx](https://git.k8s.io/ingress-nginx/README.md) controllers. -{{% /capture %}} -{{% capture body %}} + + ## Additional controllers @@ -64,11 +64,12 @@ controllers operate slightly differently. Make sure you review your ingress controller's documentation to understand the caveats of choosing it. {{< /note >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [Ingress](/docs/concepts/services-networking/ingress/). * [Set up Ingress on Minikube with the NGINX Controller](/docs/tasks/access-application-cluster/ingress-minikube). -{{% /capture %}} + diff --git a/content/en/docs/concepts/services-networking/ingress.md b/content/en/docs/concepts/services-networking/ingress.md index 062dc14f66..430ee3c72d 100644 --- a/content/en/docs/concepts/services-networking/ingress.md +++ b/content/en/docs/concepts/services-networking/ingress.md @@ -2,16 +2,16 @@ reviewers: - bprashanth title: Ingress -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.1" state="beta" >}} {{< glossary_definition term_id="ingress" length="all" >}} -{{% /capture %}} -{{% capture body %}} + + ## Terminology @@ -542,10 +542,11 @@ You can expose a Service in multiple ways that don't directly involve the Ingres * Use [Service.Type=LoadBalancer](/docs/concepts/services-networking/service/#loadbalancer) * Use [Service.Type=NodePort](/docs/concepts/services-networking/service/#nodeport) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn about the [Ingress API](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#ingress-v1beta1-networking-k8s-io) * Learn about [Ingress Controllers](/docs/concepts/services-networking/ingress-controllers/) * [Set up Ingress on Minikube with the NGINX Controller](/docs/tasks/access-application-cluster/ingress-minikube) -{{% /capture %}} + diff --git a/content/en/docs/concepts/services-networking/network-policies.md b/content/en/docs/concepts/services-networking/network-policies.md index 795969757d..4a01707ab2 100644 --- a/content/en/docs/concepts/services-networking/network-policies.md +++ b/content/en/docs/concepts/services-networking/network-policies.md @@ -4,20 +4,20 @@ reviewers: - caseydavenport - danwinship title: Network Policies -content_template: templates/concept +content_type: concept weight: 50 --- {{< toc >}} -{{% capture overview %}} + A network policy is a specification of how groups of {{< glossary_tooltip text="pods" term_id="pod">}} are allowed to communicate with each other and other network endpoints. NetworkPolicy resources use {{< glossary_tooltip text="labels" term_id="label">}} to select pods and define rules which specify what traffic is allowed to the selected pods. -{{% /capture %}} -{{% capture body %}} + + ## Prerequisites Network policies are implemented by the [network plugin](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/). To use network policies, you must be using a networking solution which supports NetworkPolicy. Creating a NetworkPolicy resource without a controller that implements it will have no effect. @@ -89,9 +89,9 @@ __podSelector__: Each NetworkPolicy includes a `podSelector` which selects the g __policyTypes__: Each NetworkPolicy includes a `policyTypes` list which may include either `Ingress`, `Egress`, or both. The `policyTypes` field indicates whether or not the given policy applies to ingress traffic to selected pod, egress traffic from selected pods, or both. If no `policyTypes` are specified on a NetworkPolicy then by default `Ingress` will always be set and `Egress` will be set if the NetworkPolicy has any egress rules. -__ingress__: Each NetworkPolicy may include a list of whitelist `ingress` rules. Each rule allows traffic which matches both the `from` and `ports` sections. The example policy contains a single rule, which matches traffic on a single port, from one of three sources, the first specified via an `ipBlock`, the second via a `namespaceSelector` and the third via a `podSelector`. +__ingress__: Each NetworkPolicy may include a list of allowed `ingress` rules. Each rule allows traffic which matches both the `from` and `ports` sections. The example policy contains a single rule, which matches traffic on a single port, from one of three sources, the first specified via an `ipBlock`, the second via a `namespaceSelector` and the third via a `podSelector`. -__egress__: Each NetworkPolicy may include a list of whitelist `egress` rules. Each rule allows traffic which matches both the `to` and `ports` sections. The example policy contains a single rule, which matches traffic on a single port to any destination in `10.0.0.0/24`. +__egress__: Each NetworkPolicy may include a list of allowed `egress` rules. Each rule allows traffic which matches both the `to` and `ports` sections. The example policy contains a single rule, which matches traffic on a single port to any destination in `10.0.0.0/24`. So, the example NetworkPolicy: @@ -215,12 +215,13 @@ You must be using a {{< glossary_tooltip text="CNI" term_id="cni" >}} plugin tha {{< /note >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - See the [Declare Network Policy](/docs/tasks/administer-cluster/declare-network-policy/) walkthrough for further examples. - See more [recipes](https://github.com/ahmetb/kubernetes-network-policy-recipes) for common scenarios enabled by the NetworkPolicy resource. -{{% /capture %}} + diff --git a/content/en/docs/concepts/services-networking/service-topology.md b/content/en/docs/concepts/services-networking/service-topology.md index 7b3c58a84a..d36b76f55f 100644 --- a/content/en/docs/concepts/services-networking/service-topology.md +++ b/content/en/docs/concepts/services-networking/service-topology.md @@ -8,12 +8,12 @@ feature: description: > Routing of service traffic based upon cluster topology. -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.17" state="alpha" >}} @@ -22,9 +22,9 @@ topology of the cluster. For example, a service can specify that traffic be preferentially routed to endpoints that are on the same Node as the client, or in the same availability zone. -{{% /capture %}} -{{% capture body %}} + + ## Introduction @@ -192,11 +192,12 @@ spec: ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Read about [enabling Service Topology](/docs/tasks/administer-cluster/enabling-service-topology) * Read [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/) -{{% /capture %}} + diff --git a/content/en/docs/concepts/services-networking/service.md b/content/en/docs/concepts/services-networking/service.md index e97d80db21..2ae49ac270 100644 --- a/content/en/docs/concepts/services-networking/service.md +++ b/content/en/docs/concepts/services-networking/service.md @@ -7,12 +7,12 @@ feature: description: > No need to modify your application to use an unfamiliar service discovery mechanism. Kubernetes gives Pods their own IP addresses and a single DNS name for a set of Pods, and can load-balance across them. -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + {{< glossary_definition term_id="service" length="short" >}} @@ -20,9 +20,9 @@ With Kubernetes you don't need to modify your application to use an unfamiliar s Kubernetes gives Pods their own IP addresses and a single DNS name for a set of Pods, and can load-balance across them. -{{% /capture %}} -{{% capture body %}} + + ## Motivation @@ -1227,12 +1227,13 @@ SCTP is not supported on Windows based nodes. The kube-proxy does not support the management of SCTP associations when it is in userspace mode. {{< /warning >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Read [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/) * Read about [Ingress](/docs/concepts/services-networking/ingress/) * Read about [EndpointSlices](/docs/concepts/services-networking/endpoint-slices/) -{{% /capture %}} + diff --git a/content/en/docs/concepts/storage/dynamic-provisioning.md b/content/en/docs/concepts/storage/dynamic-provisioning.md index 77885981f7..dc82e5c2c8 100644 --- a/content/en/docs/concepts/storage/dynamic-provisioning.md +++ b/content/en/docs/concepts/storage/dynamic-provisioning.md @@ -5,11 +5,11 @@ reviewers: - thockin - msau42 title: Dynamic Volume Provisioning -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + Dynamic volume provisioning allows storage volumes to be created on-demand. Without dynamic provisioning, cluster administrators have to manually make @@ -19,10 +19,10 @@ to represent them in Kubernetes. The dynamic provisioning feature eliminates the need for cluster administrators to pre-provision storage. Instead, it automatically provisions storage when it is requested by users. -{{% /capture %}} -{{% capture body %}} + + ## Background @@ -133,4 +133,4 @@ Zones in a Region. Single-Zone storage backends should be provisioned in the Zon Pods are scheduled. This can be accomplished by setting the [Volume Binding Mode](/docs/concepts/storage/storage-classes/#volume-binding-mode). -{{% /capture %}} + diff --git a/content/en/docs/concepts/storage/persistent-volumes.md b/content/en/docs/concepts/storage/persistent-volumes.md index c365e02171..2c3140de83 100644 --- a/content/en/docs/concepts/storage/persistent-volumes.md +++ b/content/en/docs/concepts/storage/persistent-volumes.md @@ -11,18 +11,18 @@ feature: description: > Automatically mount the storage system of your choice, whether from local storage, a public cloud provider such as GCP or AWS, or a network storage system such as NFS, iSCSI, Gluster, Ceph, Cinder, or Flocker. -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + This document describes the current state of _persistent volumes_ in Kubernetes. Familiarity with [volumes](/docs/concepts/storage/volumes/) is suggested. -{{% /capture %}} -{{% capture body %}} + + ## Introduction @@ -746,8 +746,9 @@ and need persistent storage, it is recommended that you use the following patter dynamic storage support (in which case the user should create a matching PV) or the cluster has no storage system (in which case the user cannot deploy config requiring PVCs). -{{% /capture %}} - {{% capture whatsnext %}} + + ## {{% heading "whatsnext" %}} + * Learn more about [Creating a PersistentVolume](/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolume). * Learn more about [Creating a PersistentVolumeClaim](/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolumeclaim). @@ -759,4 +760,3 @@ and need persistent storage, it is recommended that you use the following patter * [PersistentVolumeSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumespec-v1-core) * [PersistentVolumeClaim](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaim-v1-core) * [PersistentVolumeClaimSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaimspec-v1-core) -{{% /capture %}} \ No newline at end of file diff --git a/content/en/docs/concepts/storage/storage-classes.md b/content/en/docs/concepts/storage/storage-classes.md index 1ea7c236d9..d6b3a9e332 100644 --- a/content/en/docs/concepts/storage/storage-classes.md +++ b/content/en/docs/concepts/storage/storage-classes.md @@ -5,19 +5,19 @@ reviewers: - thockin - msau42 title: Storage Classes -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + This document describes the concept of a StorageClass in Kubernetes. Familiarity with [volumes](/docs/concepts/storage/volumes/) and [persistent volumes](/docs/concepts/storage/persistent-volumes) is suggested. -{{% /capture %}} -{{% capture body %}} + + ## Introduction @@ -821,4 +821,4 @@ Delaying volume binding allows the scheduler to consider all of a Pod's scheduling constraints when choosing an appropriate PersistentVolume for a PersistentVolumeClaim. -{{% /capture %}} + diff --git a/content/en/docs/concepts/storage/storage-limits.md b/content/en/docs/concepts/storage/storage-limits.md index 295ed467a2..fb6cffed9c 100644 --- a/content/en/docs/concepts/storage/storage-limits.md +++ b/content/en/docs/concepts/storage/storage-limits.md @@ -5,10 +5,10 @@ reviewers: - thockin - msau42 title: Node-specific Volume Limits -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + This page describes the maximum number of volumes that can be attached to a Node for various cloud providers. @@ -18,9 +18,9 @@ how many volumes can be attached to a Node. It is important for Kubernetes to respect those limits. Otherwise, Pods scheduled on a Node could get stuck waiting for volumes to attach. -{{% /capture %}} -{{% capture body %}} + + ## Kubernetes default limits @@ -78,4 +78,4 @@ Refer to the [CSI specifications](https://github.com/container-storage-interface * For volumes managed by in-tree plugins that have been migrated to a CSI driver, the maximum number of volumes will be the one reported by the CSI driver. -{{% /capture %}} + diff --git a/content/en/docs/concepts/storage/volume-pvc-datasource.md b/content/en/docs/concepts/storage/volume-pvc-datasource.md index 2f29fb9bb9..ac8d16041d 100644 --- a/content/en/docs/concepts/storage/volume-pvc-datasource.md +++ b/content/en/docs/concepts/storage/volume-pvc-datasource.md @@ -5,18 +5,18 @@ reviewers: - thockin - msau42 title: CSI Volume Cloning -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + This document describes the concept of cloning existing CSI Volumes in Kubernetes. Familiarity with [Volumes](/docs/concepts/storage/volumes) is suggested. -{{% /capture %}} -{{% capture body %}} + + ## Introduction @@ -70,4 +70,4 @@ The result is a new PVC with the name `clone-of-pvc-1` that has the exact same c Upon availability of the new PVC, the cloned PVC is consumed the same as other PVC. It's also expected at this point that the newly created PVC is an independent object. It can be consumed, cloned, snapshotted, or deleted independently and without consideration for it's original dataSource PVC. This also implies that the source is not linked in any way to the newly created clone, it may also be modified or deleted without affecting the newly created clone. -{{% /capture %}} + diff --git a/content/en/docs/concepts/storage/volume-snapshot-classes.md b/content/en/docs/concepts/storage/volume-snapshot-classes.md index dcb9516519..f50db19520 100644 --- a/content/en/docs/concepts/storage/volume-snapshot-classes.md +++ b/content/en/docs/concepts/storage/volume-snapshot-classes.md @@ -7,20 +7,20 @@ reviewers: - xing-yang - yuxiangqian title: Volume Snapshot Classes -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + This document describes the concept of `VolumeSnapshotClass` in Kubernetes. Familiarity with [volume snapshots](/docs/concepts/storage/volume-snapshots/) and [storage classes](/docs/concepts/storage/storage-classes) is suggested. -{{% /capture %}} -{{% capture body %}} + + ## Introduction @@ -69,4 +69,4 @@ Volume snapshot classes have parameters that describe volume snapshots belonging the volume snapshot class. Different parameters may be accepted depending on the `driver`. -{{% /capture %}} + diff --git a/content/en/docs/concepts/storage/volume-snapshots.md b/content/en/docs/concepts/storage/volume-snapshots.md index 0ad66e75ae..a6cc122086 100644 --- a/content/en/docs/concepts/storage/volume-snapshots.md +++ b/content/en/docs/concepts/storage/volume-snapshots.md @@ -7,19 +7,19 @@ reviewers: - xing-yang - yuxiangqian title: Volume Snapshots -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.17" state="beta" >}} In Kubernetes, a _VolumeSnapshot_ represents a snapshot of a volume on a storage system. This document assumes that you are already familiar with Kubernetes [persistent volumes](/docs/concepts/storage/persistent-volumes/). -{{% /capture %}} -{{% capture body %}} + + ## Introduction @@ -154,4 +154,4 @@ the *dataSource* field in the `PersistentVolumeClaim` object. For more details, see [Volume Snapshot and Restore Volume from Snapshot](/docs/concepts/storage/persistent-volumes/#volume-snapshot-and-restore-volume-from-snapshot-support). -{{% /capture %}} + diff --git a/content/en/docs/concepts/storage/volumes.md b/content/en/docs/concepts/storage/volumes.md index 7930bf0fe6..fe71c2e86e 100644 --- a/content/en/docs/concepts/storage/volumes.md +++ b/content/en/docs/concepts/storage/volumes.md @@ -5,11 +5,11 @@ reviewers: - thockin - msau42 title: Volumes -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + On-disk files in a Container are ephemeral, which presents some problems for non-trivial applications when running in Containers. First, when a Container @@ -20,10 +20,10 @@ Kubernetes `Volume` abstraction solves both of these problems. Familiarity with [Pods](/docs/user-guide/pods) is suggested. -{{% /capture %}} -{{% capture body %}} + + ## Background @@ -1481,6 +1481,7 @@ sudo systemctl restart docker -{{% capture whatsnext %}} +## {{% heading "whatsnext" %}} + * Follow an example of [deploying WordPress and MySQL with Persistent Volumes](/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/). -{{% /capture %}} + diff --git a/content/en/docs/concepts/workloads/controllers/cron-jobs.md b/content/en/docs/concepts/workloads/controllers/cron-jobs.md index 233e0ca661..aca2996147 100644 --- a/content/en/docs/concepts/workloads/controllers/cron-jobs.md +++ b/content/en/docs/concepts/workloads/controllers/cron-jobs.md @@ -4,11 +4,11 @@ reviewers: - soltysh - janetkuo title: CronJob -content_template: templates/concept +content_type: concept weight: 80 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.8" state="beta" >}} @@ -33,8 +33,8 @@ append 11 characters to the job name provided and there is a constraint that the maximum length of a Job name is no more than 63 characters. -{{% /capture %}} -{{% capture body %}} + + ## CronJob @@ -82,12 +82,13 @@ be down for the same period as the previous example (`08:29:00` to `10:21:00`,) The CronJob is only responsible for creating Jobs that match its schedule, and the Job in turn is responsible for the management of the Pods it represents. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [Cron expression format](https://pkg.go.dev/github.com/robfig/cron?tab=doc#hdr-CRON_Expression_Format) documents the format of CronJob `schedule` fields. For instructions on creating and working with cron jobs, and for an example of CronJob manifest, see [Running automated tasks with cron jobs](/docs/tasks/job/automated-tasks-with-cron-jobs). -{{% /capture %}} + diff --git a/content/en/docs/concepts/workloads/controllers/daemonset.md b/content/en/docs/concepts/workloads/controllers/daemonset.md index a5d0df82ce..7f1b5c4630 100644 --- a/content/en/docs/concepts/workloads/controllers/daemonset.md +++ b/content/en/docs/concepts/workloads/controllers/daemonset.md @@ -6,11 +6,11 @@ reviewers: - janetkuo - kow3ns title: DaemonSet -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + A _DaemonSet_ ensures that all (or some) Nodes run a copy of a Pod. As nodes are added to the cluster, Pods are added to them. As nodes are removed from the cluster, those Pods are garbage @@ -18,18 +18,18 @@ collected. Deleting a DaemonSet will clean up the Pods it created. Some typical uses of a DaemonSet are: -- running a cluster storage daemon, such as `glusterd`, `ceph`, on each node. -- running a logs collection daemon on every node, such as `fluentd` or `filebeat`. -- running a node monitoring daemon on every node, such as [Prometheus Node Exporter](https://github.com/prometheus/node_exporter), [Flowmill](https://github.com/Flowmill/flowmill-k8s/), [Sysdig Agent](https://docs.sysdig.com), `collectd`, [Dynatrace OneAgent](https://www.dynatrace.com/technologies/kubernetes-monitoring/), [AppDynamics Agent](https://docs.appdynamics.com/display/CLOUD/Container+Visibility+with+Kubernetes), [Datadog agent](https://docs.datadoghq.com/agent/kubernetes/daemonset_setup/), [New Relic agent](https://docs.newrelic.com/docs/integrations/kubernetes-integration/installation/kubernetes-installation-configuration), Ganglia `gmond`, [Instana Agent](https://www.instana.com/supported-integrations/kubernetes-monitoring/) or [Elastic Metricbeat](https://www.elastic.co/guide/en/beats/metricbeat/current/running-on-kubernetes.html). +- running a cluster storage daemon on every node +- running a logs collection daemon on every node +- running a node monitoring daemon on every node In a simple case, one DaemonSet, covering all nodes, would be used for each type of daemon. A more complex setup might use multiple DaemonSets for a single type of daemon, but with different flags and/or different memory and cpu requests for different hardware types. -{{% /capture %}} -{{% capture body %}} + + ## Writing a DaemonSet Spec @@ -95,7 +95,7 @@ another DaemonSet, or via another workload resource such as ReplicaSet. Otherwi Kubernetes will not stop you from doing this. One case where you might want to do this is manually create a Pod with a different value on a node for testing. -### Running Pods on Only Some Nodes +### Running Pods on select Nodes If you specify a `.spec.template.spec.nodeSelector`, then the DaemonSet controller will create Pods on nodes which match that [node @@ -103,7 +103,7 @@ selector](/docs/concepts/scheduling-eviction/assign-pod-node/). Likewise if you then DaemonSet controller will create Pods on nodes which match that [node affinity](/docs/concepts/scheduling-eviction/assign-pod-node/). If you do not specify either, then the DaemonSet controller will create Pods on all nodes. -## How Daemon Pods are Scheduled +## How Daemon Pods are scheduled ### Scheduled by default scheduler @@ -144,7 +144,6 @@ In addition, `node.kubernetes.io/unschedulable:NoSchedule` toleration is added automatically to DaemonSet Pods. The default scheduler ignores `unschedulable` Nodes when scheduling DaemonSet Pods. - ### Taints and Tolerations Although Daemon Pods respect @@ -152,17 +151,14 @@ Although Daemon Pods respect the following tolerations are added to DaemonSet Pods automatically according to the related features. -| Toleration Key | Effect | Version | Description | -| ---------------------------------------- | ---------- | ------- | ------------------------------------------------------------ | -| `node.kubernetes.io/not-ready` | NoExecute | 1.13+ | DaemonSet pods will not be evicted when there are node problems such as a network partition. | -| `node.kubernetes.io/unreachable` | NoExecute | 1.13+ | DaemonSet pods will not be evicted when there are node problems such as a network partition. | -| `node.kubernetes.io/disk-pressure` | NoSchedule | 1.8+ | | -| `node.kubernetes.io/memory-pressure` | NoSchedule | 1.8+ | | -| `node.kubernetes.io/unschedulable` | NoSchedule | 1.12+ | DaemonSet pods tolerate unschedulable attributes by default scheduler. | -| `node.kubernetes.io/network-unavailable` | NoSchedule | 1.12+ | DaemonSet pods, who uses host network, tolerate network-unavailable attributes by default scheduler. | - - - +| Toleration Key | Effect | Version | Description | +| ---------------------------------------- | ---------- | ------- | ----------- | +| `node.kubernetes.io/not-ready` | NoExecute | 1.13+ | DaemonSet pods will not be evicted when there are node problems such as a network partition. | +| `node.kubernetes.io/unreachable` | NoExecute | 1.13+ | DaemonSet pods will not be evicted when there are node problems such as a network partition. | +| `node.kubernetes.io/disk-pressure` | NoSchedule | 1.8+ | | +| `node.kubernetes.io/memory-pressure` | NoSchedule | 1.8+ | | +| `node.kubernetes.io/unschedulable` | NoSchedule | 1.12+ | DaemonSet pods tolerate unschedulable attributes by default scheduler. | +| `node.kubernetes.io/network-unavailable` | NoSchedule | 1.12+ | DaemonSet pods, who uses host network, tolerate network-unavailable attributes by default scheduler. | ## Communicating with Daemon Pods @@ -195,7 +191,7 @@ You can [perform a rolling update](/docs/tasks/manage-daemon/update-daemon-set/) ## Alternatives to DaemonSet -### Init Scripts +### Init scripts It is certainly possible to run daemon processes by directly starting them on a node (e.g. using `init`, `upstartd`, or `systemd`). This is perfectly fine. However, there are several advantages to @@ -233,4 +229,4 @@ number of replicas and rolling out updates are more important than controlling e the Pod runs on. Use a DaemonSet when it is important that a copy of a Pod always run on all or certain hosts, and when it needs to start before other Pods. -{{% /capture %}} + diff --git a/content/en/docs/concepts/workloads/controllers/deployment.md b/content/en/docs/concepts/workloads/controllers/deployment.md index 2610380641..6287c0d98e 100644 --- a/content/en/docs/concepts/workloads/controllers/deployment.md +++ b/content/en/docs/concepts/workloads/controllers/deployment.md @@ -7,11 +7,11 @@ feature: description: > Kubernetes progressively rolls out changes to your application or its configuration, while monitoring application health to ensure it doesn't kill all your instances at the same time. If something goes wrong, Kubernetes will rollback the change for you. Take advantage of a growing ecosystem of deployment solutions. -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + A _Deployment_ provides declarative updates for [Pods](/docs/concepts/workloads/pods/pod/) and [ReplicaSets](/docs/concepts/workloads/controllers/replicaset/). @@ -22,10 +22,10 @@ You describe a _desired state_ in a Deployment, and the Deployment {{< glossary_ Do not manage ReplicaSets owned by a Deployment. Consider opening an issue in the main Kubernetes repository if your use case is not covered below. {{< /note >}} -{{% /capture %}} -{{% capture body %}} + + ## Use Case @@ -1166,4 +1166,4 @@ a paused Deployment and one that is not paused, is that any changes into the Pod Deployment will not trigger new rollouts as long as it is paused. A Deployment is not paused by default when it is created. -{{% /capture %}} + diff --git a/content/en/docs/concepts/workloads/controllers/garbage-collection.md b/content/en/docs/concepts/workloads/controllers/garbage-collection.md index 45303b66e8..c11386bc1c 100644 --- a/content/en/docs/concepts/workloads/controllers/garbage-collection.md +++ b/content/en/docs/concepts/workloads/controllers/garbage-collection.md @@ -1,18 +1,18 @@ --- title: Garbage Collection -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + The role of the Kubernetes garbage collector is to delete certain objects that once had an owner, but no longer have an owner. -{{% /capture %}} -{{% capture body %}} + + ## Owners and dependents @@ -168,16 +168,17 @@ See [kubeadm/#149](https://github.com/kubernetes/kubeadm/issues/149#issuecomment Tracked at [#26120](https://github.com/kubernetes/kubernetes/issues/26120) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [Design Doc 1](https://git.k8s.io/community/contributors/design-proposals/api-machinery/garbage-collection.md) [Design Doc 2](https://git.k8s.io/community/contributors/design-proposals/api-machinery/synchronous-garbage-collection.md) -{{% /capture %}} + diff --git a/content/en/docs/concepts/workloads/controllers/jobs-run-to-completion.md b/content/en/docs/concepts/workloads/controllers/jobs-run-to-completion.md index 8c03d14268..11751dd166 100644 --- a/content/en/docs/concepts/workloads/controllers/jobs-run-to-completion.md +++ b/content/en/docs/concepts/workloads/controllers/jobs-run-to-completion.md @@ -3,7 +3,7 @@ reviewers: - erictune - soltysh title: Jobs - Run to Completion -content_template: templates/concept +content_type: concept feature: title: Batch execution description: > @@ -11,7 +11,7 @@ feature: weight: 70 --- -{{% capture overview %}} + A Job creates one or more Pods and ensures that a specified number of them successfully terminate. As pods successfully complete, the Job tracks the successful completions. When a specified number @@ -24,10 +24,10 @@ due to a node hardware failure or a node reboot). You can also use a Job to run multiple Pods in parallel. -{{% /capture %}} -{{% capture body %}} + + ## Running an example Job @@ -140,19 +140,19 @@ See section [specifying your own pod selector](#specifying-your-own-pod-selector There are three main types of task suitable to run as a Job: 1. Non-parallel Jobs - - normally, only one Pod is started, unless the Pod fails. - - the Job is complete as soon as its Pod terminates successfully. + - normally, only one Pod is started, unless the Pod fails. + - the Job is complete as soon as its Pod terminates successfully. 1. Parallel Jobs with a *fixed completion count*: - - specify a non-zero positive value for `.spec.completions`. - - the Job represents the overall task, and is complete when there is one successful Pod for each value in the range 1 to `.spec.completions`. - - **not implemented yet:** Each Pod is passed a different index in the range 1 to `.spec.completions`. + - specify a non-zero positive value for `.spec.completions`. + - the Job represents the overall task, and is complete when there is one successful Pod for each value in the range 1 to `.spec.completions`. + - **not implemented yet:** Each Pod is passed a different index in the range 1 to `.spec.completions`. 1. Parallel Jobs with a *work queue*: - - do not specify `.spec.completions`, default to `.spec.parallelism`. - - the Pods must coordinate amongst themselves or an external service to determine what each should work on. For example, a Pod might fetch a batch of up to N items from the work queue. - - each Pod is independently capable of determining whether or not all its peers are done, and thus that the entire Job is done. - - when _any_ Pod from the Job terminates with success, no new Pods are created. - - once at least one Pod has terminated with success and all Pods are terminated, then the Job is completed with success. - - once any Pod has exited with success, no other Pod should still be doing any work for this task or writing any output. They should all be in the process of exiting. + - do not specify `.spec.completions`, default to `.spec.parallelism`. + - the Pods must coordinate amongst themselves or an external service to determine what each should work on. For example, a Pod might fetch a batch of up to N items from the work queue. + - each Pod is independently capable of determining whether or not all its peers are done, and thus that the entire Job is done. + - when _any_ Pod from the Job terminates with success, no new Pods are created. + - once at least one Pod has terminated with success and all Pods are terminated, then the Job is completed with success. + - once any Pod has exited with success, no other Pod should still be doing any work for this task or writing any output. They should all be in the process of exiting. For a _non-parallel_ Job, you can leave both `.spec.completions` and `.spec.parallelism` unset. When both are unset, both are defaulted to 1. @@ -478,4 +478,4 @@ object, but maintains complete control over what Pods are created and how work i You can use a [`CronJob`](/docs/concepts/workloads/controllers/cron-jobs/) to create a Job that will run at specified times/dates, similar to the Unix tool `cron`. -{{% /capture %}} + diff --git a/content/en/docs/concepts/workloads/controllers/replicaset.md b/content/en/docs/concepts/workloads/controllers/replicaset.md index 92cbe60a33..ef2a069ca1 100644 --- a/content/en/docs/concepts/workloads/controllers/replicaset.md +++ b/content/en/docs/concepts/workloads/controllers/replicaset.md @@ -4,19 +4,19 @@ reviewers: - bprashanth - madhusudancs title: ReplicaSet -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + A ReplicaSet's purpose is to maintain a stable set of replica Pods running at any given time. As such, it is often used to guarantee the availability of a specified number of identical Pods. -{{% /capture %}} -{{% capture body %}} + + ## How a ReplicaSet works @@ -366,4 +366,4 @@ The two serve the same purpose, and behave similarly, except that a ReplicationC selector requirements as described in the [labels user guide](/docs/concepts/overview/working-with-objects/labels/#label-selectors). As such, ReplicaSets are preferred over ReplicationControllers -{{% /capture %}} + diff --git a/content/en/docs/concepts/workloads/controllers/replicationcontroller.md b/content/en/docs/concepts/workloads/controllers/replicationcontroller.md index fe20980ce6..2cc8284940 100644 --- a/content/en/docs/concepts/workloads/controllers/replicationcontroller.md +++ b/content/en/docs/concepts/workloads/controllers/replicationcontroller.md @@ -9,11 +9,11 @@ feature: description: > Restarts containers that fail, replaces and reschedules containers when nodes die, kills containers that don't respond to your user-defined health check, and doesn't advertise them to clients until they are ready to serve. -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< note >}} A [`Deployment`](/docs/concepts/workloads/controllers/deployment/) that configures a [`ReplicaSet`](/docs/concepts/workloads/controllers/replicaset/) is now the recommended way to set up replication. @@ -23,10 +23,10 @@ A _ReplicationController_ ensures that a specified number of pod replicas are ru time. In other words, a ReplicationController makes sure that a pod or a homogeneous set of pods is always up and available. -{{% /capture %}} -{{% capture body %}} + + ## How a ReplicationController Works @@ -285,4 +285,4 @@ safe to terminate when the machine is otherwise ready to be rebooted/shutdown. Read [Run Stateless AP Replication Controller](/docs/tutorials/stateless-application/run-stateless-ap-replication-controller/). -{{% /capture %}} + diff --git a/content/en/docs/concepts/workloads/controllers/statefulset.md b/content/en/docs/concepts/workloads/controllers/statefulset.md index 661955cb48..4f8429d668 100644 --- a/content/en/docs/concepts/workloads/controllers/statefulset.md +++ b/content/en/docs/concepts/workloads/controllers/statefulset.md @@ -7,18 +7,18 @@ reviewers: - kow3ns - smarterclayton title: StatefulSets -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + StatefulSet is the workload API object used to manage stateful applications. {{< glossary_definition term_id="statefulset" length="all" >}} -{{% /capture %}} -{{% capture body %}} + + ## Using StatefulSets @@ -270,12 +270,13 @@ After reverting the template, you must also delete any Pods that StatefulSet had already attempted to run with the bad configuration. StatefulSet will then begin to recreate the Pods using the reverted template. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Follow an example of [deploying a stateful application](/docs/tutorials/stateful-application/basic-stateful-set/). * Follow an example of [deploying Cassandra with Stateful Sets](/docs/tutorials/stateful-application/cassandra/). * Follow an example of [running a replicated stateful application](/docs/tasks/run-application/run-replicated-stateful-application/). -{{% /capture %}} + diff --git a/content/en/docs/concepts/workloads/controllers/ttlafterfinished.md b/content/en/docs/concepts/workloads/controllers/ttlafterfinished.md index c5b88198f4..0d2657d8ce 100644 --- a/content/en/docs/concepts/workloads/controllers/ttlafterfinished.md +++ b/content/en/docs/concepts/workloads/controllers/ttlafterfinished.md @@ -2,11 +2,11 @@ reviewers: - janetkuo title: TTL Controller for Finished Resources -content_template: templates/concept +content_type: concept weight: 65 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.12" state="alpha" >}} @@ -21,12 +21,12 @@ Alpha Disclaimer: this feature is currently alpha, and can be enabled with both `TTLAfterFinished`. -{{% /capture %}} -{{% capture body %}} + + ## TTL Controller @@ -78,12 +78,13 @@ In Kubernetes, it's required to run NTP on all nodes to avoid time skew. Clocks aren't always correct, but the difference should be very small. Please be aware of this risk when setting a non-zero TTL. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [Clean up Jobs automatically](/docs/concepts/workloads/controllers/jobs-run-to-completion/#clean-up-finished-jobs-automatically) [Design doc](https://github.com/kubernetes/enhancements/blob/master/keps/sig-apps/0026-ttl-after-finish.md) -{{% /capture %}} + diff --git a/content/en/docs/concepts/workloads/pods/disruptions.md b/content/en/docs/concepts/workloads/pods/disruptions.md index 9983a67fc8..589bde5668 100644 --- a/content/en/docs/concepts/workloads/pods/disruptions.md +++ b/content/en/docs/concepts/workloads/pods/disruptions.md @@ -4,11 +4,11 @@ reviewers: - foxish - davidopp title: Disruptions -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + This guide is for application owners who want to build highly available applications, and thus need to understand what types of Disruptions can happen to Pods. @@ -16,10 +16,10 @@ what types of Disruptions can happen to Pods. It is also for Cluster Administrators who want to perform automated cluster actions, like upgrading and autoscaling clusters. -{{% /capture %}} -{{% capture body %}} + + ## Voluntary and Involuntary Disruptions @@ -262,13 +262,14 @@ the nodes in your cluster, such as a node or system software upgrade, here are s disruptions largely overlaps with work to support autoscaling and tolerating involuntary disruptions. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Follow steps to protect your application by [configuring a Pod Disruption Budget](/docs/tasks/run-application/configure-pdb/). * Learn more about [draining nodes](/docs/tasks/administer-cluster/safely-drain-node/) -{{% /capture %}} + diff --git a/content/en/docs/concepts/workloads/pods/ephemeral-containers.md b/content/en/docs/concepts/workloads/pods/ephemeral-containers.md index c6506df69c..c1852df707 100644 --- a/content/en/docs/concepts/workloads/pods/ephemeral-containers.md +++ b/content/en/docs/concepts/workloads/pods/ephemeral-containers.md @@ -3,11 +3,11 @@ reviewers: - verb - yujuhong title: Ephemeral Containers -content_template: templates/concept +content_type: concept weight: 80 --- -{{% capture overview %}} + {{< feature-state state="alpha" for_k8s_version="v1.16" >}} @@ -23,9 +23,9 @@ clusters. In accordance with the [Kubernetes Deprecation Policy]( significantly in the future or be removed entirely. {{< /warning >}} -{{% /capture %}} -{{% capture body %}} + + ## Understanding ephemeral containers @@ -192,4 +192,4 @@ example: kubectl attach -it example-pod -c debugger ``` -{{% /capture %}} + diff --git a/content/en/docs/concepts/workloads/pods/init-containers.md b/content/en/docs/concepts/workloads/pods/init-containers.md index 2cf2bf85b5..6e67a9e0ca 100644 --- a/content/en/docs/concepts/workloads/pods/init-containers.md +++ b/content/en/docs/concepts/workloads/pods/init-containers.md @@ -2,20 +2,20 @@ reviewers: - erictune title: Init Containers -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + This page provides an overview of init containers: specialized containers that run before app containers in a {{< glossary_tooltip text="Pod" term_id="pod" >}}. Init containers can contain utilities or setup scripts not present in an app image. You can specify init containers in the Pod specification alongside the `containers` array (which describes app containers). -{{% /capture %}} -{{% capture body %}} + + ## Understanding init containers @@ -317,12 +317,13 @@ reasons: forcing a restart, and the init container completion record has been lost due to garbage collection. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Read about [creating a Pod that has an init container](/docs/tasks/configure-pod-container/configure-pod-initialization/#create-a-pod-that-has-an-init-container) * Learn how to [debug init containers](/docs/tasks/debug-application-cluster/debug-init-containers/) -{{% /capture %}} + diff --git a/content/en/docs/concepts/workloads/pods/pod-lifecycle.md b/content/en/docs/concepts/workloads/pods/pod-lifecycle.md index 74031a3722..60973c46a8 100644 --- a/content/en/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/en/docs/concepts/workloads/pods/pod-lifecycle.md @@ -1,20 +1,20 @@ --- title: Pod Lifecycle -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + {{< comment >}}Updated: 4/14/2015{{< /comment >}} {{< comment >}}Edited and moved to Concepts section: 2/2/17{{< /comment >}} This page describes the lifecycle of a Pod. -{{% /capture %}} -{{% capture body %}} + + ## Pod phase @@ -216,7 +216,7 @@ a list of additional conditions that the kubelet evaluates for Pod readiness. Readiness gates are determined by the current state of `status.condition` fields for the Pod. If Kubernetes cannot find such a condition in the `status.conditions` field of a Pod, the status of the condition -is defaulted to "`False`". Below is an example: +is defaulted to "`False`". Here is an example: @@ -390,10 +390,11 @@ spec: * Node controller sets Pod `phase` to Failed. * If running under a controller, Pod is recreated elsewhere. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Get hands-on experience [attaching handlers to Container lifecycle events](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). @@ -403,7 +404,7 @@ spec: * Learn more about [Container lifecycle hooks](/docs/concepts/containers/container-lifecycle-hooks/). -{{% /capture %}} + diff --git a/content/en/docs/concepts/workloads/pods/pod-overview.md b/content/en/docs/concepts/workloads/pods/pod-overview.md index 2bc2951259..e963b7ace6 100644 --- a/content/en/docs/concepts/workloads/pods/pod-overview.md +++ b/content/en/docs/concepts/workloads/pods/pod-overview.md @@ -2,19 +2,19 @@ reviewers: - erictune title: Pod Overview -content_template: templates/concept +content_type: concept weight: 10 card: name: concepts weight: 60 --- -{{% capture overview %}} + This page provides an overview of `Pod`, the smallest deployable object in the Kubernetes object model. -{{% /capture %}} -{{% capture body %}} + + ## Understanding Pods A *Pod* is the basic execution unit of a Kubernetes application--the smallest and simplest unit in the Kubernetes object model that you create or deploy. A Pod represents processes running on your {{< glossary_tooltip term_id="cluster" text="cluster" >}}. @@ -111,12 +111,13 @@ For example, a Deployment controller ensures that the running Pods match the cur On Nodes, the {{< glossary_tooltip term_id="kubelet" text="kubelet" >}} does not directly observe or manage any of the details around pod templates and updates; those details are abstracted away. That abstraction and separation of concerns simplifies system semantics, and makes it feasible to extend the cluster's behavior without changing existing code. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [Pods](/docs/concepts/workloads/pods/pod/) * [The Distributed System Toolkit: Patterns for Composite Containers](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns) explains common layouts for Pods with more than one container * Learn more about Pod behavior: * [Pod Termination](/docs/concepts/workloads/pods/pod/#termination-of-pods) * [Pod Lifecycle](/docs/concepts/workloads/pods/pod-lifecycle/) -{{% /capture %}} + diff --git a/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md b/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md index 6e6f878449..2b16894e6b 100644 --- a/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md +++ b/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md @@ -1,18 +1,18 @@ --- title: Pod Topology Spread Constraints -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.18" state="beta" >}} You can use _topology spread constraints_ to control how {{< glossary_tooltip text="Pods" term_id="Pod" >}} are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains. This can help to achieve high availability as well as efficient resource utilization. -{{% /capture %}} -{{% capture body %}} + + ## Prerequisites @@ -236,8 +236,7 @@ scheduled - more packed or more scattered. The "EvenPodsSpread" feature provides flexible options to distribute Pods evenly across different topology domains - to achieve high availability or cost-saving. This can also help on rolling update -workloads and scaling out replicas smoothly. -See [Motivation](https://github.com/kubernetes/enhancements/blob/master/keps/sig-scheduling/20190221-pod-topology-spread.md#motivation) for more details. +workloads and scaling out replicas smoothly. See [Motivation](https://github.com/kubernetes/enhancements/tree/master/keps/sig-scheduling/895-pod-topology-spread#motivation) for more details. ## Known Limitations @@ -246,4 +245,4 @@ As of 1.18, at which this feature is Beta, there are some known limitations: - Scaling down a Deployment may result in imbalanced Pods distribution. - Pods matched on tainted nodes are respected. See [Issue 80921](https://github.com/kubernetes/kubernetes/issues/80921) -{{% /capture %}} + diff --git a/content/en/docs/concepts/workloads/pods/pod.md b/content/en/docs/concepts/workloads/pods/pod.md index d64227be48..d87dc92cb2 100644 --- a/content/en/docs/concepts/workloads/pods/pod.md +++ b/content/en/docs/concepts/workloads/pods/pod.md @@ -1,19 +1,19 @@ --- reviewers: title: Pods -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + _Pods_ are the smallest deployable units of computing that can be created and managed in Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## What is a Pod? @@ -206,4 +206,4 @@ describes the object in detail. When creating the manifest for a Pod object, make sure the name specified is a valid [DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). -{{% /capture %}} + diff --git a/content/en/docs/concepts/workloads/pods/podpreset.md b/content/en/docs/concepts/workloads/pods/podpreset.md index a1906c8b99..f77e34a3f9 100644 --- a/content/en/docs/concepts/workloads/pods/podpreset.md +++ b/content/en/docs/concepts/workloads/pods/podpreset.md @@ -2,20 +2,20 @@ reviewers: - jessfraz title: Pod Preset -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.6" state="alpha" >}} This page provides an overview of PodPresets, which are objects for injecting certain information into pods at creation time. The information can include secrets, volumes, volume mounts, and environment variables. -{{% /capture %}} -{{% capture body %}} + + ## Understanding Pod presets A PodPreset is an API resource for injecting additional runtime requirements @@ -82,12 +82,13 @@ There may be instances where you wish for a Pod to not be altered by any Pod Preset mutations. In these cases, you can add an annotation in the Pod Spec of the form: `podpreset.admission.kubernetes.io/exclude: "true"`. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + See [Injecting data into a Pod using PodPreset](/docs/tasks/inject-data-application/podpreset/) For more information about the background, see the [design proposal for PodPreset](https://git.k8s.io/community/contributors/design-proposals/service-catalog/pod-preset.md). -{{% /capture %}} + diff --git a/content/en/docs/contribute/_index.md b/content/en/docs/contribute/_index.md index c6aa348125..2f93af4a35 100644 --- a/content/en/docs/contribute/_index.md +++ b/content/en/docs/contribute/_index.md @@ -1,5 +1,5 @@ --- -content_template: templates/concept +content_type: concept title: Contribute to Kubernetes docs linktitle: Contribute main_menu: true @@ -10,7 +10,7 @@ card: title: Start contributing --- -{{% capture overview %}} + This website is maintained by [Kubernetes SIG Docs](/docs/contribute/#get-involved-with-sig-docs). @@ -23,9 +23,9 @@ Kubernetes documentation contributors: Kubernetes documentation welcomes improvements from all contributors, new and experienced! -{{% /capture %}} -{{% capture body %}} + + ## Getting started @@ -48,7 +48,7 @@ roles and permissions. - [Open a pull request using GitHub](/docs/contribute/new-content/new-content/#changes-using-github) to existing documentation and learn more about filing issues in GitHub. - [Review pull requests](/docs/contribute/review/reviewing-prs/) from other Kubernetes community members for accuracy and language. - Read the Kubernetes [content](/docs/contribute/style/content-guide/) and [style guides](/docs/contribute/style/style-guide/) so you can leave informed comments. -- Learn how to [use page templates](/docs/contribute/style/page-templates/) and [Hugo shortcodes](/docs/contribute/style/hugo-shortcodes/) to make bigger changes. +- Learn about [page content types](/docs/contribute/style/page-content-types/) and [Hugo shortcodes](/docs/contribute/style/hugo-shortcodes/). ## Next steps @@ -75,4 +75,4 @@ SIG Docs communicates with different methods: - Read the [contributor cheatsheet](https://github.com/kubernetes/community/tree/master/contributors/guide/contributor-cheatsheet) to get involved with Kubernetes feature development. - Submit a [blog post or case study](/docs/contribute/new-content/blogs-case-studies/). -{{% /capture %}} + diff --git a/content/en/docs/contribute/advanced.md b/content/en/docs/contribute/advanced.md index 2ed3a4afd6..9cf6a65883 100644 --- a/content/en/docs/contribute/advanced.md +++ b/content/en/docs/contribute/advanced.md @@ -1,11 +1,11 @@ --- title: Advanced contributing slug: advanced -content_template: templates/concept +content_type: concept weight: 98 --- -{{% capture overview %}} + This page assumes that you understand how to [contribute to new content](/docs/contribute/new-content/overview) and @@ -13,9 +13,9 @@ This page assumes that you understand how to to learn about more ways to contribute. You need to use the Git command line client and other tools for some of these tasks. -{{% /capture %}} -{{% capture body %}} + + ## Be the PR Wrangler for a week @@ -245,4 +245,4 @@ When you’re ready to stop recording, click Stop. The video uploads automatically to YouTube. -{{% /capture %}} + diff --git a/content/en/docs/contribute/generate-ref-docs/contribute-upstream.md b/content/en/docs/contribute/generate-ref-docs/contribute-upstream.md index 6c4d93cd40..5f4edbcc77 100644 --- a/content/en/docs/contribute/generate-ref-docs/contribute-upstream.md +++ b/content/en/docs/contribute/generate-ref-docs/contribute-upstream.md @@ -1,10 +1,10 @@ --- title: Contributing to the Upstream Kubernetes Code -content_template: templates/task +content_type: task weight: 20 --- -{{% capture overview %}} + This page shows how to contribute to the upstream `kubernetes/kubernetes` project. You can fix bugs found in the Kubernetes API documentation or the content of @@ -16,9 +16,10 @@ API or the `kube-*` components from the upstream code, see the following instruc - [Generating Reference Documentation for the Kubernetes API](/docs/contribute/generate-ref-docs/kubernetes-api/) - [Generating Reference Documentation for the Kubernetes Components and Tools](/docs/contribute/generate-ref-docs/kubernetes-components/) -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + - You need to have these tools installed: @@ -35,9 +36,9 @@ API or the `kube-*` components from the upstream code, see the following instruc For more information, see [Creating a Pull Request](https://help.github.com/articles/creating-a-pull-request/) and [GitHub Standard Fork & Pull Request Workflow](https://gist.github.com/Chaser324/ce0505fbed06b947d962). -{{% /capture %}} -{{% capture steps %}} + + ## The big picture @@ -230,12 +231,13 @@ the API reference documentation. You are now ready to follow the [Generating Reference Documentation for the Kubernetes API](/docs/contribute/generate-ref-docs/kubernetes-api/) guide to generate the [published Kubernetes API reference documentation](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Generating Reference Documentation for the Kubernetes API](/docs/contribute/generate-ref-docs/kubernetes-api/) * [Generating Reference Docs for Kubernetes Components and Tools](/docs/contribute/generate-ref-docs/kubernetes-components/) * [Generating Reference Documentation for kubectl Commands](/docs/contribute/generate-ref-docs/kubectl/) -{{% /capture %}} + diff --git a/content/en/docs/contribute/generate-ref-docs/kubectl.md b/content/en/docs/contribute/generate-ref-docs/kubectl.md index 5930a1f452..f057ce6800 100644 --- a/content/en/docs/contribute/generate-ref-docs/kubectl.md +++ b/content/en/docs/contribute/generate-ref-docs/kubectl.md @@ -1,10 +1,10 @@ --- title: Generating Reference Documentation for kubectl Commands -content_template: templates/task +content_type: task weight: 90 --- -{{% capture overview %}} + This page shows how to generate the `kubectl` command reference. @@ -21,15 +21,16 @@ reference page, see [Generating Reference Pages for Kubernetes Components and Tools](/docs/home/contribute/generated-reference/kubernetes-components/). {{< /note >}} -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "prerequisites-ref-docs.md" >}} -{{% /capture %}} -{{% capture steps %}} + + ## Setting up the local repositories @@ -253,12 +254,13 @@ A few minutes after your pull request is merged, your updated reference topics will be visible in the [published documentation](/docs/home). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Generating Reference Documentation Quickstart](/docs/contribute/generate-ref-docs/quickstart/) * [Generating Reference Documentation for Kubernetes Components and Tools](/docs/contribute/generate-ref-docs/kubernetes-components/) * [Generating Reference Documentation for the Kubernetes API](/docs/contribute/generate-ref-docs/kubernetes-api/) -{{% /capture %}} + diff --git a/content/en/docs/contribute/generate-ref-docs/kubernetes-api.md b/content/en/docs/contribute/generate-ref-docs/kubernetes-api.md index 5060d3b6e0..10482eda97 100644 --- a/content/en/docs/contribute/generate-ref-docs/kubernetes-api.md +++ b/content/en/docs/contribute/generate-ref-docs/kubernetes-api.md @@ -1,10 +1,10 @@ --- title: Generating Reference Documentation for the Kubernetes API -content_template: templates/task +content_type: task weight: 50 --- -{{% capture overview %}} + This page shows how to update the Kubernetes API reference documentation. @@ -18,15 +18,16 @@ If you find bugs in the generated documentation, you need to If you need only to regenerate the reference documentation from the [OpenAPI](https://github.com/OAI/OpenAPI-Specification) spec, continue reading this page. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "prerequisites-ref-docs.md" >}} -{{% /capture %}} -{{% capture steps %}} + + ## Setting up the local repositories @@ -194,12 +195,13 @@ Submit your changes as a Monitor your pull request, and respond to reviewer comments as needed. Continue to monitor your pull request until it has been merged. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Generating Reference Documentation Quickstart](/docs/contribute/generate-ref-docs/quickstart/) * [Generating Reference Docs for Kubernetes Components and Tools](/docs/contribute/generate-ref-docs/kubernetes-components/) * [Generating Reference Documentation for kubectl Commands](/docs/contribute/generate-ref-docs/kubectl/) -{{% /capture %}} + diff --git a/content/en/docs/contribute/generate-ref-docs/kubernetes-components.md b/content/en/docs/contribute/generate-ref-docs/kubernetes-components.md index f71db7afb1..be84beeb08 100644 --- a/content/en/docs/contribute/generate-ref-docs/kubernetes-components.md +++ b/content/en/docs/contribute/generate-ref-docs/kubernetes-components.md @@ -1,34 +1,36 @@ --- title: Generating Reference Pages for Kubernetes Components and Tools -content_template: templates/task +content_type: task weight: 120 --- -{{% capture overview %}} + This page shows how to build the Kubernetes component and tool reference pages. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Start with the [Prerequisites section](/docs/contribute/generate-ref-docs/quickstart/#before-you-begin) in the Reference Documentation Quickstart guide. -{{% /capture %}} -{{% capture steps %}} + + Follow the [Reference Documentation Quickstart](/docs/contribute/generate-ref-docs/quickstart/) to generate the Kubernetes component and tool reference pages. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Generating Reference Documentation Quickstart](/docs/contribute/generate-ref-docs/quickstart/) * [Generating Reference Documentation for kubectl Commands](/docs/contribute/generate-ref-docs/kubectl/) * [Generating Reference Documentation for the Kubernetes API](/docs/contribute/generate-ref-docs/kubernetes-api/) * [Contributing to the Upstream Kubernetes Project for Documentation](/docs/contribute/generate-ref-docs/contribute-upstream/) -{{% /capture %}} + diff --git a/content/en/docs/contribute/generate-ref-docs/quickstart.md b/content/en/docs/contribute/generate-ref-docs/quickstart.md index 9645c64170..df5cdbb95f 100644 --- a/content/en/docs/contribute/generate-ref-docs/quickstart.md +++ b/content/en/docs/contribute/generate-ref-docs/quickstart.md @@ -1,24 +1,25 @@ --- title: Quickstart -content_template: templates/task +content_type: task weight: 40 --- -{{% capture overview %}} + This page shows how to use the `update-imported-docs` script to generate the Kubernetes reference documentation. The script automates the build setup and generates the reference documentation for a release. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "prerequisites-ref-docs.md" >}} -{{% /capture %}} -{{% capture steps %}} + + ## Getting the docs repository @@ -246,9 +247,10 @@ A few minutes after your pull request is merged, your updated reference topics will be visible in the [published documentation](/docs/home/). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + To generate the individual reference documentation by manually setting up the required build repositories and running the build targets, see the following guides: @@ -257,4 +259,4 @@ running the build targets, see the following guides: * [Generating Reference Documentation for kubectl Commands](/docs/contribute/generate-ref-docs/kubectl/) * [Generating Reference Documentation for the Kubernetes API](/docs/contribute/generate-ref-docs/kubernetes-api/) -{{% /capture %}} + diff --git a/content/en/docs/contribute/localization.md b/content/en/docs/contribute/localization.md index cb3cf03187..0c698305b9 100644 --- a/content/en/docs/contribute/localization.md +++ b/content/en/docs/contribute/localization.md @@ -1,6 +1,6 @@ --- title: Localizing Kubernetes documentation -content_template: templates/concept +content_type: concept approvers: - remyleone - rlenferink @@ -12,13 +12,13 @@ card: title: Translating the docs --- -{{% capture overview %}} + This page shows you how to [localize](https://blog.mozilla.org/l10n/2011/12/14/i18n-vs-l10n-whats-the-diff/) the docs for a different language. -{{% /capture %}} -{{% capture body %}} + + ## Getting started @@ -279,13 +279,14 @@ SIG Docs welcomes upstream contributions and corrections to the English source. You can also help add or improve content to an existing localization. Join the [Slack channel](https://kubernetes.slack.com/messages/C1J0BPD2M/) for the localization, and start opening PRs to help. Please limit pull requests to a single localization since pull requests that change content in multiple localizations could be difficult to review. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Once a localization meets requirements for workflow and minimum output, SIG docs will: - Enable language selection on the website - Publicize the localization's availability through [Cloud Native Computing Foundation](https://www.cncf.io/about/) (CNCF) channels, including the [Kubernetes blog](https://kubernetes.io/blog/). -{{% /capture %}} + diff --git a/content/en/docs/contribute/new-content/blogs-case-studies.md b/content/en/docs/contribute/new-content/blogs-case-studies.md index 90c50ae6e1..76acbd2d41 100644 --- a/content/en/docs/contribute/new-content/blogs-case-studies.md +++ b/content/en/docs/contribute/new-content/blogs-case-studies.md @@ -2,19 +2,19 @@ title: Submitting blog posts and case studies linktitle: Blogs and case studies slug: blogs-case-studies -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Anyone can write a blog post and submit it for review. Case studies require extensive review before they're approved. -{{% /capture %}} -{{% capture body %}} + + ## Write a blog post @@ -52,8 +52,9 @@ Have a look at the source for the Refer to the [case study guidelines](https://github.com/cncf/foundation/blob/master/case-study-guidelines.md) and submit your request as outlined in the guidelines. -{{% /capture %}} -{{% capture whatsnext %}} -{{% /capture %}} +## {{% heading "whatsnext" %}} + + + diff --git a/content/en/docs/contribute/new-content/new-features.md b/content/en/docs/contribute/new-content/new-features.md index 68087a2a79..54db84da8f 100644 --- a/content/en/docs/contribute/new-content/new-features.md +++ b/content/en/docs/contribute/new-content/new-features.md @@ -1,7 +1,7 @@ --- title: Documenting a feature for a release linktitle: Documenting for a release -content_template: templates/concept +content_type: concept main_menu: true weight: 20 card: @@ -9,7 +9,7 @@ card: weight: 45 title: Documenting a feature for a release --- -{{% capture overview %}} + Each major Kubernetes release introduces new features that require documentation. New releases also bring updates to existing features and documentation (such as upgrading a feature from alpha to beta). @@ -19,9 +19,9 @@ feature as a pull request to the appropriate development branch of the editorial feedback or edits the draft directly. This section covers the branching conventions and process used during a release by both groups. -{{% /capture %}} -{{% capture body %}} + + ## For documentation contributors @@ -131,4 +131,3 @@ add it to [Alpha/Beta Feature gates](/docs/reference/command-line-tools-referenc as part of your pull request. If your feature is moving out of Alpha, make sure to remove it from that table. -{{% /capture %}} \ No newline at end of file diff --git a/content/en/docs/contribute/new-content/open-a-pr.md b/content/en/docs/contribute/new-content/open-a-pr.md index 4407568aff..05a576a74b 100644 --- a/content/en/docs/contribute/new-content/open-a-pr.md +++ b/content/en/docs/contribute/new-content/open-a-pr.md @@ -1,14 +1,14 @@ --- title: Opening a pull request slug: new-content -content_template: templates/concept +content_type: concept weight: 10 card: name: contribute weight: 40 --- -{{% capture overview %}} + {{< note >}} **Code developers**: If you are documenting a new feature for an @@ -22,9 +22,9 @@ If your change is small, or you're unfamiliar with git, read [Changes using GitH If your changes are large, read [Work from a local fork](#fork-the-repo) to learn how to make changes locally on your computer. -{{% /capture %}} -{{% capture body %}} + + ## Changes using GitHub @@ -475,10 +475,11 @@ Most repositories use issue and PR templates. Have a look through some open issues and PRs to get a feel for that team's processes. Make sure to fill out the templates with as much detail as possible when you file issues or PRs. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - Read [Reviewing](/docs/contribute/reviewing/revewing-prs) to learn more about the review process. -{{% /capture %}} + diff --git a/content/en/docs/contribute/new-content/overview.md b/content/en/docs/contribute/new-content/overview.md index 11f4c067d7..e9ef332430 100644 --- a/content/en/docs/contribute/new-content/overview.md +++ b/content/en/docs/contribute/new-content/overview.md @@ -1,25 +1,25 @@ --- title: Contributing new content overview linktitle: Overview -content_template: templates/concept +content_type: concept main_menu: true weight: 5 --- -{{% capture overview %}} + This section contains information you should know before contributing new content. -{{% /capture %}} -{{% capture body %}} + + ## Contributing basics - Write Kubernetes documentation in Markdown and build the Kubernetes site using [Hugo](https://gohugo.io/). - The source is in [GitHub](https://github.com/kubernetes/website). You can find Kubernetes documentation at `/content/en/docs/`. Some of the reference documentation is automatically generated from scripts in the `update-imported-docs/` directory. -- [Page templates](/docs/contribute/style/page-templates/) control the presentation of documentation content in Hugo. +- [Page content types](/docs/contribute/style/page-content-types/) describe the presentation of documentation content in Hugo. - In addition to the standard Hugo shortcodes, we use a number of [custom Hugo shortcodes](/docs/contribute/style/hugo-shortcodes/) in our documentation to control the presentation of content. - Documentation source is available in multiple languages in `/content/`. Each language has its own folder with a two-letter code determined by the [ISO 639-1 standard](https://www.loc.gov/standards/iso639-2/php/code_list.php). For example, English documentation source is stored in `/content/en/docs/`. - For more information about contributing to documentation in multiple languages or starting a new translation, see [localization](/docs/contribute/localization). @@ -58,4 +58,4 @@ Limit pull requests to one language per PR. If you need to make an identical cha The [doc contributors tools](https://github.com/kubernetes/website/tree/master/content/en/docs/doc-contributor-tools) directory in the `kubernetes/website` repository contains tools to help your contribution journey go more smoothly. -{{% /capture %}} + diff --git a/content/en/docs/contribute/participating.md b/content/en/docs/contribute/participating.md index 3f491dc856..681c53f994 100644 --- a/content/en/docs/contribute/participating.md +++ b/content/en/docs/contribute/participating.md @@ -1,13 +1,13 @@ --- title: Participating in SIG Docs -content_template: templates/concept +content_type: concept weight: 60 card: name: contribute weight: 60 --- -{{% capture overview %}} + SIG Docs is one of the [special interest groups](https://github.com/kubernetes/community/blob/master/sig-list.md) @@ -30,9 +30,9 @@ The rest of this document outlines some unique ways these roles function within SIG Docs, which is responsible for maintaining one of the most public-facing aspects of Kubernetes -- the Kubernetes website and documentation. -{{% /capture %}} -{{% capture body %}} + + ## Roles and responsibilities @@ -302,9 +302,10 @@ SIG Docs approvers. Here's how it works. specific roles, such as [PR Wrangler](/docs/contribute/advanced#be-the-pr-wrangler-for-a-week) or [SIG Docs chairperson](#sig-docs-chairperson). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + For more information about contributing to the Kubernetes documentation, see: @@ -312,4 +313,4 @@ For more information about contributing to the Kubernetes documentation, see: - [Reviewing content](/docs/contribute/review/reviewing-prs) - [Documentation style guide](/docs/contribute/style/) -{{% /capture %}} + diff --git a/content/en/docs/contribute/review/_index.md b/content/en/docs/contribute/review/_index.md index bc70e3c6f1..d2a1a5c906 100644 --- a/content/en/docs/contribute/review/_index.md +++ b/content/en/docs/contribute/review/_index.md @@ -3,12 +3,12 @@ title: Reviewing changes weight: 30 --- -{{% capture overview %}} + This section describes how to review content. -{{% /capture %}} -{{% capture body %}} -{{% /capture %}} + + + diff --git a/content/en/docs/contribute/review/for-approvers.md b/content/en/docs/contribute/review/for-approvers.md index dccc6cfe38..0cddbcba6a 100644 --- a/content/en/docs/contribute/review/for-approvers.md +++ b/content/en/docs/contribute/review/for-approvers.md @@ -2,11 +2,11 @@ title: Reviewing for approvers and reviewers linktitle: For approvers and reviewers slug: for-approvers -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + SIG Docs [Reviewers](/docs/contribute/participating/#reviewers) and [Approvers](/docs/contribute/participating/#approvers) do a few extra things when reviewing a change. @@ -19,10 +19,10 @@ requests (PRs) that are not already under active review. In addition to the rotation, a bot assigns reviewers and approvers for the PR based on the owners for the affected files. -{{% /capture %}} -{{% capture body %}} + + ## Reviewing a PR @@ -224,4 +224,3 @@ If this is a documentation issue, please re-open this issue. ``` -{{% /capture %}} \ No newline at end of file diff --git a/content/en/docs/contribute/review/reviewing-prs.md b/content/en/docs/contribute/review/reviewing-prs.md index cb432a97ba..3c271aa44f 100644 --- a/content/en/docs/contribute/review/reviewing-prs.md +++ b/content/en/docs/contribute/review/reviewing-prs.md @@ -1,11 +1,11 @@ --- title: Reviewing pull requests -content_template: templates/concept +content_type: concept main_menu: true weight: 10 --- -{{% capture overview %}} + Anyone can review a documentation pull request. Visit the [pull requests](https://github.com/kubernetes/website/pulls) section in the Kubernetes website repository to see open pull requests. @@ -19,9 +19,9 @@ Before reviewing, it's a good idea to: [style guide](/docs/contribute/style/style-guide/) so you can leave informed comments. - Understand the different [roles and responsibilities](/docs/contribute/participating/#roles-and-responsibilities) in the Kubernetes documentation community. -{{% /capture %}} -{{% capture body %}} + + ## Before you begin @@ -86,7 +86,7 @@ When reviewing, use the following as a starting point. - Did this PR change or remove a page title, slug/alias or anchor link? If so, are there broken links as a result of this PR? Is there another option, like changing the page title without changing the slug? - Does the PR introduce a new page? If so: - - Is the page using the right [page template](/docs/contribute/style/page-templates/) and associated Hugo shortcodes? + - Is the page using the right [page content type](/docs/contribute/style/page-content-types/) and associated Hugo shortcodes? - Does the page appear correctly in the section's side navigation (or at all)? - Should the page appear on the [Docs Home](/docs/home/) listing? - Do the changes show up in the Netlify preview? Be particularly vigilant about lists, code blocks, tables, notes and images. @@ -95,4 +95,3 @@ When reviewing, use the following as a starting point. For small issues with a PR, like typos or whitespace, prefix your comments with `nit:`. This lets the author know the issue is non-critical. -{{% /capture %}} \ No newline at end of file diff --git a/content/en/docs/contribute/style/content-guide.md b/content/en/docs/contribute/style/content-guide.md index b5d8ed5d02..2f367c9a81 100644 --- a/content/en/docs/contribute/style/content-guide.md +++ b/content/en/docs/contribute/style/content-guide.md @@ -1,11 +1,11 @@ --- title: Documentation Content Guide linktitle: Content guide -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + This page contains guidelines for Kubernetes documentation. @@ -17,9 +17,9 @@ You can register for Kubernetes Slack at http://slack.k8s.io/. For information on creating new content for the Kubernetes docs, follow the [style guide](/docs/contribute/style/style-guide). -{{% /capture %}} -{{% capture body %}} + + ## Overview @@ -69,10 +69,11 @@ ask for help in [#sig-docs on Kubernetes Slack](https://kubernetes.slack.com/mes If you have questions about allowed content, join the [Kubernetes Slack](http://slack.k8s.io/) #sig-docs channel and ask! -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Read the [Style guide](/docs/contribute/style/style-guide). -{{% /capture %}} + diff --git a/content/en/docs/contribute/style/content-organization.md b/content/en/docs/contribute/style/content-organization.md index e93cf8126e..249bebf0fb 100644 --- a/content/en/docs/contribute/style/content-organization.md +++ b/content/en/docs/contribute/style/content-organization.md @@ -1,17 +1,17 @@ --- title: Content organization -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + This site uses Hugo. In Hugo, [content organization](https://gohugo.io/content-management/organization/) is a core concept. -{{% /capture %}} -{{% capture body %}} + + {{% note %}} **Hugo Tip:** Start Hugo with `hugo server --navigateToChanged` for content edit-sessions. @@ -126,12 +126,13 @@ Some important notes to the files in the bundles: The [SASS](https://sass-lang.com/) source of the stylesheets for this site is stored in `assets/sass` and is automatically built by Hugo. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn about [custom Hugo shortcodes](/docs/contribute/style/hugo-shortcodes/) * Learn about the [Style guide](/docs/contribute/style/style-guide) * Learn about the [Content guide](/docs/contribute/style/content-guide) -{{% /capture %}} + diff --git a/content/en/docs/contribute/style/hugo-shortcodes/index.md b/content/en/docs/contribute/style/hugo-shortcodes/index.md index 60479c7fec..12d00ae01a 100644 --- a/content/en/docs/contribute/style/hugo-shortcodes/index.md +++ b/content/en/docs/contribute/style/hugo-shortcodes/index.md @@ -2,16 +2,16 @@ approvers: - chenopis title: Custom Hugo Shortcodes -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + This page explains the custom Hugo shortcodes that can be used in Kubernetes markdown documentation. Read more about shortcodes in the [Hugo documentation](https://gohugo.io/content-management/shortcodes). -{{% /capture %}} -{{% capture body %}} + + ## Feature state @@ -235,12 +235,13 @@ Renders to: {{< tab name="JSON File" include="podtemplate" />}} {{< /tabs >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn about [Hugo](https://gohugo.io/). -* Learn about [writing a new topic](/docs/home/contribute/write-new-topic/). -* Learn about [using page templates](/docs/home/contribute/page-templates/). +* Learn about [writing a new topic](/docs/home/contribute/style/write-new-topic/). +* Learn about [page content types](/docs/home/contribute/style/page-content-types/). * Learn about [staging your changes](/docs/home/contribute/stage-documentation-changes/) * Learn about [creating a pull request](/docs/home/contribute/create-pull-request/). -{{% /capture %}} + diff --git a/content/en/docs/contribute/style/page-content-types.md b/content/en/docs/contribute/style/page-content-types.md new file mode 100644 index 0000000000..a14c74cebf --- /dev/null +++ b/content/en/docs/contribute/style/page-content-types.md @@ -0,0 +1,213 @@ +--- +title: Page content types +content_type: concept +weight: 30 +card: + name: contribute + weight: 30 +--- + + + +The Kubernetes documentation follows several types of page content: + +- Concept +- Task +- Tutorial +- Reference + + + +## Content sections + +Each page content type contains a number of sections declared as +Markdown comments and HTML headings. HTML section headings render using the +`heading` shortcode. This page structure helps to maintain the different content types. + +Examples of Markdown comments defining page content sections: + +```markdown + +``` + +```markdown + +``` + +To create common headings in your content pages, use the `heading` shortcode with +a heading string. + +Examples of heading strings: + +- whatsnext +- prerequisites +- objectives +- cleanup +- synopsis +- seealso +- options + +To create a `whatsnext` heading, add the heading shortcode +to your page as follows: + +```none +## {{%/* heading "whatsnext" */%}} +``` + +The `whatsnext` heading displays as: + +## {{% heading "whatsnext" %}} + +You can declare a `prerequisites` heading as: + +```none +## {{%/* heading "prerequisites" */%}} +``` + +The `prerequisites` heading displays as: + +## {{% heading "prerequisites" %}} + +The `heading` shortcode takes one string parameter. The string matches the prefix +of a variable in the `i18n/.toml` files. + +`i18n/en.toml`: + +```toml +[whatsnext_heading] +other = "What's next" +``` + +`i18n/ko.toml`: + +```toml +[whatsnext_heading] +other = "다음 내용" +``` + +## Concept + +A concept page explains some aspect of Kubernetes. For example, a concept +page might describe the Kubernetes Deployment object and explain the role it +plays as an application once it is deployed, scaled, and updated. Typically, concept +pages don't include sequences of steps, but instead provide links to tasks or +tutorials. + +To write a new concept page, create a Markdown file in a subdirectory of the +`/content/en/docs/concepts` directory, with the following characteristics: + +Concept pages are divided into three sections: + +| Page section | +|---------------| +| overview | +| body | +| whatsnext | + +Fill each section with content. Follow these guidelines: + +- Organize content with H2 and H3 headings. +- For `overview`, set the topic's context with a single paragraph. +- For `body`, explain the concept. +- For `whatsnext`, provide a bulleted list of topics (5 maximum) to learn more about the concept. + +[Annotations](/docs/concepts/overview/working-with-objects/annotations/) is a published example of a concept page. + +## Task + +A task page shows how to do a single thing, typically by giving a short +sequence of steps. Task pages have minimal explanation, but often provide links +to conceptual topics that provide related background and knowledge. + +To write a new task page, create a Markdown file in a subdirectory of the +`/content/en/docs/tasks` directory, with the following characteristics: + +| Page section | +|---------------| +| overview | +| prerequisites | +| steps | +| discussion | +| whatsnext | + +Within each section, write your content. Use the following guidelines: + +- Use a minimum of H2 headings (with two leading `#` characters). The sections + themselves are titled automatically by the template. +- For `overview`, use a paragraph to set context for the entire topic. +- For `prerequisites`, use bullet lists when possible. Start adding additional + prerequisites below the `include`. The default prerequisites include a running Kubernetes cluster. +- For `steps`, use numbered lists. +- For discussion, use normal content to expand upon the information covered + in `steps`. +- For `whatsnext`, give a bullet list of up to 5 topics the reader might be + interested in reading next. + +An example of a published task topic is [Using an HTTP proxy to access the Kubernetes API](/docs/tasks/extend-kubernetes/http-proxy-access-api/). + +## Tutorial + +A tutorial page shows how to accomplish a goal that is larger than a single +task. Typically a tutorial page has several sections, each of which has a +sequence of steps. For example, a tutorial might provide a walkthrough of a +code sample that illustrates a certain feature of Kubernetes. Tutorials can +include surface-level explanations, but should link to related concept topics +for deep explanations. + +To write a new tutorial page, create a Markdown file in a subdirectory of the +`/content/en/docs/tutorials` directory, with the following characteristics: + +| Page section | +|---------------| +| overview | +| prerequisites | +| objectives | +| lessoncontent | +| cleanup | +| whatsnext | + +Within each section, write your content. Use the following guidelines: + +- Use a minimum of H2 headings (with two leading `#` characters). The sections + themselves are titled automatically by the template. +- For `overview`, use a paragraph to set context for the entire topic. +- For `prerequisites`, use bullet lists when possible. Add additional + prerequisites below the ones included by default. +- For `objectives`, use bullet lists. +- For `lessoncontent`, use a mix of numbered lists and narrative content as + appropriate. +- For `cleanup`, use numbered lists to describe the steps to clean up the + state of the cluster after finishing the task. +- For `whatsnext`, give a bullet list of up to 5 topics the reader might be + interested in reading next. + +An example of a published tutorial topic is +[Running a Stateless Application Using a Deployment](/docs/tutorials/stateless-application/run-stateless-application-deployment/). + +## Reference + +A component tool reference page shows the description and flag options output for +a Kubernetes component tool. Each page output depends upon the component tool's source +code in `kubernetes/kubernetes`. + +A tool reference page has several possible sections: + +| Page section | +|------------------------------| +| synopsis | +| options | +| options from parent commands | +| examples | +| seealso | + +Examples of published tool reference pages are: + +- [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init/) +- [kube-apiserver](/docs/reference/command-line-tools-reference/kube-apiserver/) +- [kubectl](/docs/reference/kubectl/kubectl/) + +## {{% heading "whatsnext" %}} + +- Learn about the [Style guide](/docs/contribute/style/style-guide/) +- Learn about the [Content guide](/docs/contribute/style/content-guide/) +- Learn about [content organization](/docs/contribute/style/content-organization/) diff --git a/content/en/docs/contribute/style/page-templates.md b/content/en/docs/contribute/style/page-templates.md deleted file mode 100644 index 7521ee3ecb..0000000000 --- a/content/en/docs/contribute/style/page-templates.md +++ /dev/null @@ -1,222 +0,0 @@ ---- -title: Using Page Templates -content_template: templates/concept -weight: 30 -card: - name: contribute - weight: 30 ---- - -{{% capture overview %}} - -When contributing new topics, apply one of the following templates to them. -This standardizes the user experience of a given page. - -The page templates are in the -[`layouts/partials/templates`](https://git.k8s.io/website/layouts/partials/templates) -directory of the [`kubernetes/website`](https://github.com/kubernetes/website) -repository. - -{{< note >}} -Every new topic needs to use a template. If you are unsure which -template to use for a new topic, start with the -[concept template](#concept-template). -{{< /note >}} - - -{{% /capture %}} - - -{{% capture body %}} - -## Concept template - -A concept page explains some aspect of Kubernetes. For example, a concept -page might describe the Kubernetes Deployment object and explain the role it -plays as an application once it is deployed, scaled, and updated. Typically, concept -pages don't include sequences of steps, but instead provide links to tasks or -tutorials. - - -To write a new concept page, create a Markdown file in a subdirectory of the -`/content/en/docs/concepts` directory, with the following characteristics: - -- In the page's YAML front-matter, set `content_template: templates/concept`. -- In the page's body, set the required `capture` variables and any optional - ones you want to include: - - | Variable | Required? | - |---------------|-----------| - | overview | yes | - | body | yes | - | whatsnext | no | - - The page's body will look like this (remove any optional captures you don't - need): - - ``` - {{%/* capture overview */%}} - - {{%/* /capture */%}} - - {{%/* capture body */%}} - - {{%/* /capture */%}} - - {{%/* capture whatsnext */%}} - - {{%/* /capture */%}} - ``` - -- Fill each section with content. Follow these guidelines: - - Organize content with H2 and H3 headings. - - For `overview`, set the topic's context with a single paragraph. - - For `body`, explain the concept. - - For `whatsnext`, provide a bulleted list of topics (5 maximum) to learn more about the concept. - -[Annotations](/docs/concepts/overview/working-with-objects/annotations/) is a published example of the concept template. This page also uses the concept template. - -## Task template - -A task page shows how to do a single thing, typically by giving a short -sequence of steps. Task pages have minimal explanation, but often provide links -to conceptual topics that provide related background and knowledge. - -To write a new task page, create a Markdown file in a subdirectory of the -`/content/en/docs/tasks` directory, with the following characteristics: - -- In the page's YAML front-matter, set `content_template: templates/task`. -- In the page's body, set the required `capture` variables and any optional - ones you want to include: - - | Variable | Required? | - |---------------|-----------| - | overview | yes | - | prerequisites | yes | - | steps | no | - | discussion | no | - | whatsnext | no | - - The page's body will look like this (remove any optional captures you don't - need): - - ``` - {{%/* capture overview */%}} - - {{%/* /capture */%}} - - {{%/* capture prerequisites */%}} - - {{}} {{}} - - {{%/* /capture */%}} - - {{%/* capture steps */%}} - - {{%/* /capture */%}} - - {{%/* capture discussion */%}} - - {{%/* /capture */%}} - - {{%/* capture whatsnext */%}} - - {{%/* /capture */%}} - ``` - -- Within each section, write your content. Use the following guidelines: - - Use a minimum of H2 headings (with two leading `#` characters). The sections - themselves are titled automatically by the template. - - For `overview`, use a paragraph to set context for the entire topic. - - For `prerequisites`, use bullet lists when possible. Start adding additional - prerequisites below the `include`. The default prerequisites include a running Kubernetes cluster. - - For `steps`, use numbered lists. - - For discussion, use normal content to expand upon the information covered - in `steps`. - - For `whatsnext`, give a bullet list of up to 5 topics the reader might be - interested in reading next. - -An example of a published topic that uses the task template is [Using an HTTP proxy to access the Kubernetes API](/docs/tasks/access-kubernetes-api/http-proxy-access-api). - -## Tutorial template - -A tutorial page shows how to accomplish a goal that is larger than a single -task. Typically a tutorial page has several sections, each of which has a -sequence of steps. For example, a tutorial might provide a walkthrough of a -code sample that illustrates a certain feature of Kubernetes. Tutorials can -include surface-level explanations, but should link to related concept topics -for deep explanations. - -To write a new tutorial page, create a Markdown file in a subdirectory of the -`/content/en/docs/tutorials` directory, with the following characteristics: - -- In the page's YAML front-matter, set `content_template: templates/tutorial`. -- In the page's body, set the required `capture` variables and any optional - ones you want to include: - - | Variable | Required? | - |---------------|-----------| - | overview | yes | - | prerequisites | yes | - | objectives | yes | - | lessoncontent | yes | - | cleanup | no | - | whatsnext | no | - - The page's body will look like this (remove any optional captures you don't - need): - - ``` - {{%/* capture overview */%}} - - {{%/* /capture */%}} - - {{%/* capture prerequisites */%}} - - {{}} {{}} - - {{%/* /capture */%}} - - {{%/* capture objectives */%}} - - {{%/* /capture */%}} - - {{%/* capture lessoncontent */%}} - - {{%/* /capture */%}} - - {{%/* capture cleanup */%}} - - {{%/* /capture */%}} - - {{%/* capture whatsnext */%}} - - {{%/* /capture */%}} - ``` - -- Within each section, write your content. Use the following guidelines: - - Use a minimum of H2 headings (with two leading `#` characters). The sections - themselves are titled automatically by the template. - - For `overview`, use a paragraph to set context for the entire topic. - - For `prerequisites`, use bullet lists when possible. Add additional - prerequisites below the ones included by default. - - For `objectives`, use bullet lists. - - For `lessoncontent`, use a mix of numbered lists and narrative content as - appropriate. - - For `cleanup`, use numbered lists to describe the steps to clean up the - state of the cluster after finishing the task. - - For `whatsnext`, give a bullet list of up to 5 topics the reader might be - interested in reading next. - -An example of a published topic that uses the tutorial template is -[Running a Stateless Application Using a Deployment](/docs/tutorials/stateless-application/run-stateless-application-deployment/). - -{{% /capture %}} - -{{% capture whatsnext %}} - -- Learn about the [Style guide](/docs/contribute/style/style-guide/) -- Learn about the [Content guide](/docs/contribute/style/content-guide/) -- Learn about [content organization](/docs/contribute/style/content-organization/) - -{{% /capture %}} diff --git a/content/en/docs/contribute/style/style-guide.md b/content/en/docs/contribute/style/style-guide.md index 34dce6adac..78ddd4a787 100644 --- a/content/en/docs/contribute/style/style-guide.md +++ b/content/en/docs/contribute/style/style-guide.md @@ -1,26 +1,25 @@ --- title: Documentation Style Guide linktitle: Style guide -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + This page gives writing style guidelines for the Kubernetes documentation. These are guidelines, not rules. Use your best judgment, and feel free to propose changes to this document in a pull request. For additional information on creating new content for the Kubernetes -documentation, read the [Documentation Content Guide](/docs/contribute/style/content-guide/) and follow the instructions on -[using page templates](/docs/contribute/style/page-templates/) and [creating a documentation pull request](/docs/contribute/new-content/open-a-pr). +documentation, read the [Documentation Content Guide](/docs/contribute/style/content-guide/). Changes to the style guide are made by SIG Docs as a group. To propose a change or addition, [add it to the agenda](https://docs.google.com/document/d/1ddHwLK3kUMX1wVFIwlksjTk0MsqitBnWPe1LRa1Rx5A/edit) for an upcoming SIG Docs meeting, and attend the meeting to participate in the discussion. -{{% /capture %}} -{{% capture body %}} + + {{< note >}} Kubernetes documentation uses [Blackfriday Markdown Renderer](https://github.com/russross/blackfriday) along with a few [Hugo Shortcodes](/docs/home/contribute/includes/) to support glossary entries, tabs, @@ -212,7 +211,7 @@ The output is similar to this: Code examples and configuration examples that include version information should be consistent with the accompanying text. -If the information is version specific, the Kubernetes version needs to be defined in the `prerequisites` section of the [Task template](/docs/contribute/style/page-templates/#task-template) or the [Tutorial template](/docs/contribute/style/page-templates/#tutorial-template). Once the page is saved, the `prerequisites` section is shown as **Before you begin**. +If the information is version specific, the Kubernetes version needs to be defined in the `prerequisites` section of the [Task template](/docs/contribute/style/page-content-types/#task) or the [Tutorial template](/docs/contribute/style/page-content-types/#tutorial). Once the page is saved, the `prerequisites` section is shown as **Before you begin**. To specify the Kubernetes version for a task or tutorial page, include `min-kubernetes-server-version` in the front matter of the page. @@ -585,13 +584,12 @@ The Federation feature provides ... | The new Federation feature provides ... {{< /table >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn about [writing a new topic](/docs/contribute/style/write-new-topic/). -* Learn about [using page templates](/docs/contribute/style/page-templates/). +* Learn about [using page templates](/docs/contribute/style/page-content-types/). * Learn about [staging your changes](/docs/contribute/stage-documentation-changes/) -* Learn about [creating a pull request](/docs/contribute/start/#submit-a-pull-request/). - -{{% /capture %}} +* Learn about [creating a pull request](/docs/contribute/new-content/open-a-pr/). diff --git a/content/en/docs/contribute/style/write-new-topic.md b/content/en/docs/contribute/style/write-new-topic.md index 65dca22f1a..8bd4b8fbe2 100644 --- a/content/en/docs/contribute/style/write-new-topic.md +++ b/content/en/docs/contribute/style/write-new-topic.md @@ -1,19 +1,20 @@ --- title: Writing a new topic -content_template: templates/task +content_type: task weight: 20 --- -{{% capture overview %}} + This page shows how to create a new topic for the Kubernetes docs. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Create a fork of the Kubernetes documentation repository as described in [Open a PR](/docs/new-content/open-a-pr/). -{{% /capture %}} -{{% capture steps %}} + + ## Choosing a page type @@ -27,9 +28,8 @@ Task | A task page shows how to do a single thing. The idea is to give readers a Tutorial | A tutorial page shows how to accomplish a goal that ties together several Kubernetes features. A tutorial might provide several sequences of steps that readers can actually do as they read the page. Or it might provide explanations of related pieces of code. For example, a tutorial could provide a walkthrough of a code sample. A tutorial can include brief explanations of the Kubernetes features that are being tied together, but should link to related concept topics for deep explanations of individual features. {{< /table >}} -Use a template for each new page. Each page type has a -[template](/docs/contribute/style/page-templates/) -that you can use as you write your topic. Using templates helps ensure +Use a [content type](/docs/contribute/style/page-content-types/) for each new page +that you write. Using page type helps ensure consistency among topics of a given type. ## Choosing a title and filename @@ -37,12 +37,12 @@ consistency among topics of a given type. Choose a title that has the keywords you want search engines to find. Create a filename that uses the words in your title separated by hyphens. For example, the topic with title -[Using an HTTP Proxy to Access the Kubernetes API](/docs/tasks/access-kubernetes-api/http-proxy-access-api/) +[Using an HTTP Proxy to Access the Kubernetes API](/docs/tasks/extend-kubernetes/http-proxy-access-api/) has filename `http-proxy-access-api.md`. You don't need to put "kubernetes" in the filename, because "kubernetes" is already in the URL for the topic, for example: - /docs/tasks/access-kubernetes-api/http-proxy-access-api/ + /docs/tasks/extend-kubernetes/http-proxy-access-api/ ## Adding the topic title to the front matter @@ -159,9 +159,10 @@ For an example of a topic that uses this technique, see Put image files in the `/images` directory. The preferred image format is SVG. -{{% /capture %}} -{{% capture whatsnext %}} -* Learn about [using page templates](/docs/contribute/page-templates/). + +## {{% heading "whatsnext" %}} + +* Learn about [using page content types](/docs/contribute/style/page-content-types/). * Learn about [creating a pull request](/docs/contribute/new-content/open-a-pr/). -{{% /capture %}} + diff --git a/content/en/docs/contribute/suggesting-improvements.md b/content/en/docs/contribute/suggesting-improvements.md index 19133f379b..e48c2915b9 100644 --- a/content/en/docs/contribute/suggesting-improvements.md +++ b/content/en/docs/contribute/suggesting-improvements.md @@ -1,14 +1,14 @@ --- title: Suggesting content improvements slug: suggest-improvements -content_template: templates/concept +content_type: concept weight: 10 card: name: contribute weight: 20 --- -{{% capture overview %}} + If you notice an issue with Kubernetes documentation, or have an idea for new content, then open an issue. All you need is a [GitHub account](https://github.com/join) and a web browser. @@ -16,9 +16,9 @@ In most cases, new work on Kubernetes documentation begins with an issue in GitH then review, categorize and tag issues as needed. Next, you or another member of the Kubernetes community open a pull request with changes to resolve the issue. -{{% /capture %}} -{{% capture body %}} + + ## Opening an issue @@ -62,4 +62,4 @@ Keep the following in mind when filing an issue: fellow contributors. For example, "The docs are terrible" is not helpful or polite feedback. -{{% /capture %}} + diff --git a/content/en/docs/home/_index.md b/content/en/docs/home/_index.md index 6b6e77c39a..b4f9a2ee66 100644 --- a/content/en/docs/home/_index.md +++ b/content/en/docs/home/_index.md @@ -56,8 +56,8 @@ cards: description: Anyone can contribute, whether you’re new to the project or you’ve been around a long time. button: Contribute to the docs button_path: /docs/contribute -- name: download - title: Download Kubernetes +- name: release-notes + title: Release Notes description: If you are installing Kubernetes or upgrading to the newest version, refer to the current release notes. - name: about title: About the documentation diff --git a/content/en/docs/home/supported-doc-versions.md b/content/en/docs/home/supported-doc-versions.md index 45a6012eaa..bd368b2b54 100644 --- a/content/en/docs/home/supported-doc-versions.md +++ b/content/en/docs/home/supported-doc-versions.md @@ -1,20 +1,20 @@ --- title: Supported Versions of the Kubernetes Documentation -content_template: templates/concept +content_type: concept card: name: about weight: 10 title: Supported Versions of the Documentation --- -{{% capture overview %}} + This website contains documentation for the current version of Kubernetes and the four previous versions of Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Current version @@ -25,6 +25,6 @@ The current version is {{< versions-other >}} -{{% /capture %}} + diff --git a/content/en/docs/reference/_index.md b/content/en/docs/reference/_index.md index 8b0faf5e91..619430875e 100644 --- a/content/en/docs/reference/_index.md +++ b/content/en/docs/reference/_index.md @@ -5,16 +5,16 @@ approvers: linkTitle: "Reference" main_menu: true weight: 70 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + This section of the Kubernetes documentation contains references. -{{% /capture %}} -{{% capture body %}} + + ## API Reference @@ -52,4 +52,4 @@ client libraries: An archive of the design docs for Kubernetes functionality. Good starting points are [Kubernetes Architecture](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md) and [Kubernetes Design Overview](https://git.k8s.io/community/contributors/design-proposals). -{{% /capture %}} + diff --git a/content/en/docs/reference/access-authn-authz/abac.md b/content/en/docs/reference/access-authn-authz/abac.md index 40c56a985c..3810942660 100644 --- a/content/en/docs/reference/access-authn-authz/abac.md +++ b/content/en/docs/reference/access-authn-authz/abac.md @@ -5,15 +5,15 @@ reviewers: - deads2k - liggitt title: Using ABAC Authorization -content_template: templates/concept +content_type: concept weight: 80 --- -{{% capture overview %}} + Attribute-based access control (ABAC) defines an access control paradigm whereby access rights are granted to users through the use of policies which combine attributes together. -{{% /capture %}} -{{% capture body %}} + + ## Policy File Format To enable `ABAC` mode, specify `--authorization-policy-file=SOME_FILENAME` and `--authorization-mode=ABAC` on startup. @@ -152,5 +152,5 @@ privilege to the API using ABAC, you would add this line to your policy file: The apiserver will need to be restarted to pickup the new policy lines. -{{% /capture %}} + diff --git a/content/en/docs/reference/access-authn-authz/admission-controllers.md b/content/en/docs/reference/access-authn-authz/admission-controllers.md index a254e43a84..e0f5ea0f43 100644 --- a/content/en/docs/reference/access-authn-authz/admission-controllers.md +++ b/content/en/docs/reference/access-authn-authz/admission-controllers.md @@ -7,15 +7,15 @@ reviewers: - janetkuo - thockin title: Using Admission Controllers -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + This page provides an overview of Admission Controllers. -{{% /capture %}} -{{% capture body %}} + + ## What are they? An admission controller is a piece of code that intercepts requests to the @@ -32,6 +32,8 @@ which are configured in the API. Admission controllers may be "validating", "mutating", or both. Mutating controllers may modify the objects they admit; validating controllers may not. +Admission controllers limit requests to create, delete, modify or connect to (proxy). They do not support read requests. + The admission control process proceeds in two phases. In the first phase, mutating admission controllers are run. In the second phase, validating admission controllers are run. Note again that some of the controllers are @@ -610,7 +612,7 @@ node selector. 2. If the namespace lacks such an annotation, use the `clusterDefaultNodeSelector` defined in the `PodNodeSelector` plugin configuration file as the node selector. 3. Evaluate the pod's node selector against the namespace node selector for conflicts. Conflicts result in rejection. -4. Evaluate the pod's node selector against the namespace-specific whitelist defined the plugin configuration file. +4. Evaluate the pod's node selector against the namespace-specific allowed selector defined the plugin configuration file. Conflicts result in rejection. {{< note >}} @@ -672,15 +674,15 @@ for more information. The PodTolerationRestriction admission controller verifies any conflict between tolerations of a pod and the tolerations of its namespace. It rejects the pod request if there is a conflict. It then merges the tolerations annotated on the namespace into the tolerations of the pod. -The resulting tolerations are checked against a whitelist of tolerations annotated to the namespace. +The resulting tolerations are checked against a list of allowed tolerations annotated to the namespace. If the check succeeds, the pod request is admitted otherwise it is rejected. -If the namespace of the pod does not have any associated default tolerations or a whitelist of -tolerations annotated, the cluster-level default tolerations or cluster-level whitelist of tolerations are used +If the namespace of the pod does not have any associated default tolerations or allowed +tolerations annotated, the cluster-level default tolerations or cluster-level list of allowed tolerations are used instead if they are specified. Tolerations to a namespace are assigned via the `scheduler.alpha.kubernetes.io/defaultTolerations` annotation key. -The whitelist can be added via the `scheduler.alpha.kubernetes.io/tolerationsWhitelist` annotation key. +The list of allowed tolerations can be added via the `scheduler.alpha.kubernetes.io/tolerationsWhitelist` annotation key. Example for namespace annotations: @@ -773,4 +775,4 @@ in the mutating phase. For earlier versions, there was no concept of validating versus mutating and the admission controllers ran in the exact order specified. -{{% /capture %}} + diff --git a/content/en/docs/reference/access-authn-authz/authentication.md b/content/en/docs/reference/access-authn-authz/authentication.md index b240fb4e22..8cb8013c76 100644 --- a/content/en/docs/reference/access-authn-authz/authentication.md +++ b/content/en/docs/reference/access-authn-authz/authentication.md @@ -6,15 +6,15 @@ reviewers: - deads2k - liggitt title: Authenticating -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + This page provides an overview of authenticating. -{{% /capture %}} -{{% capture body %}} + + ## Users in Kubernetes All Kubernetes clusters have two categories of users: service accounts managed @@ -860,4 +860,4 @@ RFC3339 timestamp. Presence or absence of an expiry has the following impact: } } ``` -{{% /capture %}} + diff --git a/content/en/docs/reference/access-authn-authz/authorization.md b/content/en/docs/reference/access-authn-authz/authorization.md index 3a942266fc..74c433b8ee 100644 --- a/content/en/docs/reference/access-authn-authz/authorization.md +++ b/content/en/docs/reference/access-authn-authz/authorization.md @@ -5,16 +5,16 @@ reviewers: - deads2k - liggitt title: Authorization Overview -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + Learn more about Kubernetes authorization, including details about creating policies using the supported authorization modules. -{{% /capture %}} -{{% capture body %}} + + In Kubernetes, you must be authenticated (logged in) before your request can be authorized (granted permission to access). For information about authentication, see [Controlling Access to the Kubernetes API](/docs/reference/access-authn-authz/controlling-access/). @@ -197,9 +197,10 @@ namespace can: read all secrets in the namespace; read all config maps in the namespace; and impersonate any service account in the namespace and take any action the account could take. This applies regardless of authorization mode. {{< /caution >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * To learn more about Authentication, see **Authentication** in [Controlling Access to the Kubernetes API](/docs/reference/access-authn-authz/controlling-access/). * To learn more about Admission Control, see [Using Admission Controllers](/docs/reference/access-authn-authz/admission-controllers/). -{{% /capture %}} + diff --git a/content/en/docs/reference/access-authn-authz/bootstrap-tokens.md b/content/en/docs/reference/access-authn-authz/bootstrap-tokens.md index c8c55c08d6..542b5267be 100644 --- a/content/en/docs/reference/access-authn-authz/bootstrap-tokens.md +++ b/content/en/docs/reference/access-authn-authz/bootstrap-tokens.md @@ -2,11 +2,11 @@ reviewers: - jbeda title: Authenticating with Bootstrap Tokens -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.18" state="stable" >}} @@ -16,9 +16,9 @@ to support [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/), but can be u for users that wish to start clusters without `kubeadm`. It is also built to work, via RBAC policy, with the [Kubelet TLS Bootstrapping](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/) system. -{{% /capture %}} -{{% capture body %}} + + ## Bootstrap Tokens Overview Bootstrap Tokens are defined with a specific type @@ -188,4 +188,4 @@ client relying on the signature to bootstrap TLS trust. Consult the [kubeadm implementation details](/docs/reference/setup-tools/kubeadm/implementation-details/) section for more information. -{{% /capture %}} + diff --git a/content/en/docs/reference/access-authn-authz/certificate-signing-requests.md b/content/en/docs/reference/access-authn-authz/certificate-signing-requests.md index 3e81215dd8..fea62e545e 100644 --- a/content/en/docs/reference/access-authn-authz/certificate-signing-requests.md +++ b/content/en/docs/reference/access-authn-authz/certificate-signing-requests.md @@ -4,11 +4,11 @@ reviewers: - mikedanese - munnerz title: Certificate Signing Requests -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.18" state="beta" >}} @@ -21,9 +21,9 @@ A CertificateSigningRequest (CSR) resource is used to request that a certificate by a denoted signer, after which the request may be approved or denied before finally being signed. -{{% /capture %}} -{{% capture body %}} + + ## Request signing process The _CertificateSigningRequest_ resource type allows a client to ask for an X.509 certificate @@ -317,9 +317,10 @@ subresource of the CSR to be signed. As part of this request, the `status.certificate` field should be set to contain the signed certificate. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Read [Manage TLS Certificates in a Cluster](https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/) * View the source code for the kube-controller-manager built in [signer](https://github.com/kubernetes/kubernetes/blob/32ec6c212ec9415f604ffc1f4c1f29b782968ff1/pkg/controller/certificates/signer/cfssl_signer.go) @@ -327,4 +328,4 @@ signed certificate. * For details of X.509 itself, refer to [RFC 5280](https://tools.ietf.org/html/rfc5280#section-3.1) section 3.1 * For information on the syntax of PKCS#10 certificate signing requests, refer to [RFC 2986](https://tools.ietf.org/html/rfc2986) -{{% /capture %}} + diff --git a/content/en/docs/reference/access-authn-authz/controlling-access.md b/content/en/docs/reference/access-authn-authz/controlling-access.md index 21c08447ff..e0273177aa 100644 --- a/content/en/docs/reference/access-authn-authz/controlling-access.md +++ b/content/en/docs/reference/access-authn-authz/controlling-access.md @@ -3,15 +3,15 @@ reviewers: - erictune - lavalamp title: Controlling Access to the Kubernetes API -content_template: templates/concept +content_type: concept weight: 5 --- -{{% capture overview %}} + This page provides an overview of controlling access to the Kubernetes API. -{{% /capture %}} -{{% capture body %}} + + Users [access the API](/docs/tasks/access-application-cluster/access-cluster/) using `kubectl`, client libraries, or by making REST requests. Both human users and [Kubernetes service accounts](/docs/tasks/configure-pod-container/configure-service-account/) can be @@ -23,7 +23,7 @@ following diagram: ## Transport Security -In a typical Kubernetes cluster, the API serves on port 6443. +In a typical Kubernetes cluster, the API serves on port 443. The API server presents a certificate. This certificate is often self-signed, so `$USER/.kube/config` on the user's machine typically contains the root certificate for the API server's certificate, which when specified @@ -63,9 +63,9 @@ users in its object store. ## Authorization -After the request is authenticated as coming from a specific user, the request must be authorized. This is shown as step **2** in the diagram. +After the request is authenticated as coming from a specific user, the request must be authorized. This is shown as step **2** in the diagram. -A request must include the username of the requester, the requested action, and the object affected by the action. The request is authorized if an existing policy declares that the user has permissions to complete the requested action. +A request must include the username of the requester, the requested action, and the object affected by the action. The request is authorized if an existing policy declares that the user has permissions to complete the requested action. For example, if Bob has the policy below, then he can read pods only in the namespace `projectCaribou`: @@ -97,7 +97,7 @@ If Bob makes the following request, the request is authorized because he is allo } } ``` -If Bob makes a request to write (`create` or `update`) to the objects in the `projectCaribou` namespace, his authorization is denied. If Bob makes a request to read (`get`) objects in a different namespace such as `projectFish`, then his authorization is denied. +If Bob makes a request to write (`create` or `update`) to the objects in the `projectCaribou` namespace, his authorization is denied. If Bob makes a request to read (`get`) objects in a different namespace such as `projectFish`, then his authorization is denied. Kubernetes authorization requires that you use common REST attributes to interact with existing organization-wide or cloud-provider-wide access control systems. It is important to use REST formatting because these control systems might interact with other APIs besides the Kubernetes API. @@ -110,10 +110,11 @@ To learn more about Kubernetes authorization, including details about creating p Admission Control Modules are software modules that can modify or reject requests. In addition to the attributes available to Authorization Modules, Admission -Control Modules can access the contents of the object that is being created or updated. -They act on objects being created, deleted, updated or connected (proxy), but not reads. +Control Modules can access the contents of the object that is being created or modified. -Multiple admission controllers can be configured. Each is called in order. +Admission controllers act on requests that create, modify, delete, or connect to (proxy) an object. +Admission controllers do not act on requests that merely read objects. +When multiple admission controllers are configured, they are called in order. This is shown as step **3** in the diagram. @@ -161,4 +162,4 @@ When the cluster is created by `kube-up.sh`, on Google Compute Engine (GCE), and on several other cloud providers, the API server serves on port 443. On GCE, a firewall rule is configured on the project to allow external HTTPS access to the API. Other cluster setup methods vary. -{{% /capture %}} + diff --git a/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md b/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md index 40d00cddf4..718c9d1147 100644 --- a/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md +++ b/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md @@ -7,18 +7,17 @@ reviewers: - liggitt - jpbetz title: Dynamic Admission Control -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + In addition to [compiled-in admission plugins](/docs/reference/access-authn-authz/admission-controllers/), admission plugins can be developed as extensions and run as webhooks configured at runtime. This page describes how to build, configure, use, and monitor admission webhooks. -{{% /capture %}} -{{% capture body %}} + ## What are admission webhooks? Admission webhooks are HTTP callbacks that receive admission requests and do @@ -1589,4 +1588,4 @@ If your admission webhooks don't intend to modify the behavior of the Kubernetes plane, exclude the `kube-system` namespace from being intercepted using a [`namespaceSelector`](#matching-requests-namespaceselector). -{{% /capture %}} + diff --git a/content/en/docs/reference/access-authn-authz/node.md b/content/en/docs/reference/access-authn-authz/node.md index 6c0e2f3e99..439d97ff84 100644 --- a/content/en/docs/reference/access-authn-authz/node.md +++ b/content/en/docs/reference/access-authn-authz/node.md @@ -5,15 +5,15 @@ reviewers: - liggitt - ericchiang title: Using Node Authorization -content_template: templates/concept +content_type: concept weight: 90 --- -{{% capture overview %}} + Node authorization is a special-purpose authorization mode that specifically authorizes API requests made by kubelets. -{{% /capture %}} -{{% capture body %}} + + ## Overview The Node authorizer allows a kubelet to perform API operations. This includes: @@ -96,4 +96,4 @@ In 1.8, the binding will not be created at all. When using RBAC, the `system:node` cluster role will continue to be created, for compatibility with deployment methods that bind other users or groups to that role. -{{% /capture %}} + diff --git a/content/en/docs/reference/access-authn-authz/rbac.md b/content/en/docs/reference/access-authn-authz/rbac.md index 7034f9a18e..20b1224e59 100644 --- a/content/en/docs/reference/access-authn-authz/rbac.md +++ b/content/en/docs/reference/access-authn-authz/rbac.md @@ -4,17 +4,17 @@ reviewers: - deads2k - liggitt title: Using RBAC Authorization -content_template: templates/concept -aliases: [../../../rbac/] +content_type: concept +aliases: [/rbac/] weight: 70 --- -{{% capture overview %}} + Role-based access control (RBAC) is a method of regulating access to computer or network resources based on the roles of individual users within your organization. -{{% /capture %}} -{{% capture body %}} + + RBAC authorization uses the `rbac.authorization.k8s.io` {{< glossary_tooltip text="API group" term_id="api-group" >}} to drive authorization decisions, allowing you to dynamically configure policies through the Kubernetes API. @@ -1209,5 +1209,3 @@ kubectl create clusterrolebinding permissive-binding \ After you have transitioned to use RBAC, you should adjust the access controls for your cluster to ensure that these meet your information security needs. - -{{% /capture %}} diff --git a/content/en/docs/reference/access-authn-authz/service-accounts-admin.md b/content/en/docs/reference/access-authn-authz/service-accounts-admin.md index 5c2dd3ddc5..6d2cf76573 100644 --- a/content/en/docs/reference/access-authn-authz/service-accounts-admin.md +++ b/content/en/docs/reference/access-authn-authz/service-accounts-admin.md @@ -5,19 +5,19 @@ reviewers: - lavalamp - liggitt title: Managing Service Accounts -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + This is a Cluster Administrator guide to service accounts. It assumes knowledge of the [User Guide to Service Accounts](/docs/user-guide/service-accounts). Support for authorization and user accounts is planned but incomplete. Sometimes incomplete features are referred to in order to better describe service accounts. -{{% /capture %}} -{{% capture body %}} + + ## User accounts versus service accounts Kubernetes distinguishes between the concept of a user account and a service account @@ -115,4 +115,4 @@ kubectl delete secret mysecretname Service Account Controller manages ServiceAccount inside namespaces, and ensures a ServiceAccount named "default" exists in every active namespace. -{{% /capture %}} + diff --git a/content/en/docs/reference/access-authn-authz/webhook.md b/content/en/docs/reference/access-authn-authz/webhook.md index 3f667fa5ef..cf5944d9a1 100644 --- a/content/en/docs/reference/access-authn-authz/webhook.md +++ b/content/en/docs/reference/access-authn-authz/webhook.md @@ -5,15 +5,15 @@ reviewers: - deads2k - liggitt title: Webhook Mode -content_template: templates/concept +content_type: concept weight: 95 --- -{{% capture overview %}} + A WebHook is an HTTP callback: an HTTP POST that occurs when something happens; a simple event-notification via HTTP POST. A web application implementing WebHooks will POST a message to a URL when certain things happen. -{{% /capture %}} -{{% capture body %}} + + When specified, mode `Webhook` causes Kubernetes to query an outside REST service when determining user privileges. @@ -174,6 +174,6 @@ to the REST api. For further documentation refer to the authorization.v1beta1 API objects and [webhook.go](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go). -{{% /capture %}} + diff --git a/content/en/docs/reference/command-line-tools-reference/cloud-controller-manager.md b/content/en/docs/reference/command-line-tools-reference/cloud-controller-manager.md index ee8d7f1ed1..982eb0993e 100644 --- a/content/en/docs/reference/command-line-tools-reference/cloud-controller-manager.md +++ b/content/en/docs/reference/command-line-tools-reference/cloud-controller-manager.md @@ -1,10 +1,11 @@ --- title: cloud-controller-manager -content_template: templates/tool-reference +content_type: tool-reference weight: 30 --- -{{% capture synopsis %}} +## {{% heading "synopsis" %}} + The Cloud controller manager is a daemon that embeds @@ -14,9 +15,10 @@ the cloud specific control loops shipped with Kubernetes. cloud-controller-manager [flags] ``` -{{% /capture %}} -{{% capture options %}} + +## {{% heading "options" %}} +
Restricted policy specificationRestricted policy specification
Control
Privilege Escalation - Privilege escalation to root should not be allowed.
+ Privilege escalation to root should not be allowed.

Restricted Fields:
spec.containers[*].securityContext.privileged
spec.initContainers[*].securityContext.privileged
@@ -194,7 +219,7 @@ well as lower-trust users.The following listed controls should be enforced/disal
Running as Non-root - Containers must be required to run as non-root users.
+ Containers must be required to run as non-root users.

Restricted Fields:
spec.securityContext.runAsNonRoot
spec.containers[*].securityContext.runAsNonRoot
@@ -205,7 +230,7 @@ well as lower-trust users.The following listed controls should be enforced/disal
Non-root groups (optional) - Containers should be forbidden from running with a root primary or supplementary GID.
+ Containers should be forbidden from running with a root primary or supplementary GID.

Restricted Fields:
spec.securityContext.runAsGroup
spec.securityContext.supplementalGroups[*]
@@ -224,12 +249,12 @@ well as lower-trust users.The following listed controls should be enforced/disal
Seccomp - The runtime/default seccomp profile must be required, or allow additional whitelisted values.
+ The 'runtime/default' seccomp profile must be required, or allow specific additional profiles.

Restricted Fields:
metadata.annotations['seccomp.security.alpha.kubernetes.io/pod']
metadata.annotations['container.seccomp.security.alpha.kubernetes.io/*']

Allowed Values:
- runtime/default
+ 'runtime/default'
undefined (container annotation)
@@ -534,5 +536,5 @@ cloud-controller-manager [flags] -{{% /capture %}} + diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates.md b/content/en/docs/reference/command-line-tools-reference/feature-gates.md index 7716c7be7d..78784eace7 100644 --- a/content/en/docs/reference/command-line-tools-reference/feature-gates.md +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates.md @@ -1,17 +1,17 @@ --- weight: 10 title: Feature Gates -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + This page contains an overview of the various feature gates an administrator can specify on different Kubernetes components. See [feature stages](#feature-stages) for an explanation of the stages for a feature. -{{% /capture %}} -{{% capture body %}} + + ## Overview Feature gates are a set of key=value pairs that describe Kubernetes features. @@ -511,8 +511,9 @@ Each feature gate is designed for enabling/disabling a specific feature: - `WinDSR`: Allows kube-proxy to create DSR loadbalancers for Windows. - `WinOverlay`: Allows kube-proxy to run in overlay mode for Windows. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * The [deprecation policy](/docs/reference/using-api/deprecation-policy/) for Kubernetes explains the project's approach to removing features and components. -{{% /capture %}} + diff --git a/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md b/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md index 6e9454dc49..01cf6a87b8 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md @@ -1,10 +1,11 @@ --- title: kube-apiserver -content_template: templates/tool-reference +content_type: tool-reference weight: 30 --- -{{% capture synopsis %}} +## {{% heading "synopsis" %}} + The Kubernetes API server validates and configures data @@ -16,9 +17,10 @@ cluster's shared state through which all other components interact. kube-apiserver [flags] ``` -{{% /capture %}} -{{% capture options %}} + +## {{% heading "options" %}} +
@@ -1082,5 +1084,5 @@ kube-apiserver [flags] -{{% /capture %}} + diff --git a/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md b/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md index 69be42b17e..75fed787d0 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md @@ -1,10 +1,11 @@ --- title: kube-controller-manager -content_template: templates/tool-reference +content_type: tool-reference weight: 30 --- -{{% capture synopsis %}} +## {{% heading "synopsis" %}} + The Kubernetes controller manager is a daemon that embeds @@ -20,9 +21,10 @@ controller, and serviceaccounts controller. kube-controller-manager [flags] ``` -{{% /capture %}} -{{% capture options %}} + +## {{% heading "options" %}} +
@@ -897,5 +899,5 @@ kube-controller-manager [flags] -{{% /capture %}} + diff --git a/content/en/docs/reference/command-line-tools-reference/kube-proxy.md b/content/en/docs/reference/command-line-tools-reference/kube-proxy.md index 1ad3f6ee15..535bd81aa6 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-proxy.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-proxy.md @@ -1,10 +1,11 @@ --- title: kube-proxy -content_template: templates/tool-reference +content_type: tool-reference weight: 30 --- -{{% capture synopsis %}} +## {{% heading "synopsis" %}} + The Kubernetes network proxy runs on each node. This @@ -19,9 +20,10 @@ with the apiserver API to configure the proxy. kube-proxy [flags] ``` -{{% /capture %}} -{{% capture options %}} + +## {{% heading "options" %}} +
@@ -336,5 +338,5 @@ kube-proxy [flags] -{{% /capture %}} + diff --git a/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md b/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md index f807c5d024..d510610140 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md @@ -1,10 +1,11 @@ --- title: kube-scheduler -content_template: templates/tool-reference +content_type: tool-reference weight: 30 --- -{{% capture synopsis %}} +## {{% heading "synopsis" %}} + The Kubernetes scheduler is a policy-rich, topology-aware, @@ -20,9 +21,10 @@ for more information about scheduling and the kube-scheduler component. kube-scheduler [flags] ``` -{{% /capture %}} -{{% capture options %}} + +## {{% heading "options" %}} +
@@ -512,5 +514,5 @@ kube-scheduler [flags] -{{% /capture %}} + diff --git a/content/en/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping.md b/content/en/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping.md index 6269a3ec5a..562ec5b867 100644 --- a/content/en/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping.md +++ b/content/en/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping.md @@ -5,10 +5,10 @@ reviewers: - smarterclayton - awly title: TLS bootstrapping -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + In a Kubernetes cluster, the components on the worker nodes - kubelet and kube-proxy - need to communicate with Kubernetes master components, specifically kube-apiserver. In order to ensure that communication is kept private, not interfered with, and ensure that each component of the cluster is talking to another trusted component, we strongly @@ -24,9 +24,9 @@ found [here](https://github.com/kubernetes/kubernetes/pull/20439). This document describes the process of node initialization, how to set up TLS client certificate bootstrapping for kubelets, and how it works. -{{% /capture %}} -{{% capture body %}} + + ## Initialization Process When a worker node starts up, the kubelet does the following: @@ -454,4 +454,4 @@ An issue is open referencing this [here](https://github.com/kubernetes/kubernete -{{% /capture %}} + diff --git a/content/en/docs/reference/command-line-tools-reference/kubelet.md b/content/en/docs/reference/command-line-tools-reference/kubelet.md index 595ef138fc..54dc1a84d9 100644 --- a/content/en/docs/reference/command-line-tools-reference/kubelet.md +++ b/content/en/docs/reference/command-line-tools-reference/kubelet.md @@ -1,10 +1,11 @@ --- title: kubelet -content_template: templates/tool-reference +content_type: tool-reference weight: 28 --- -{{% capture synopsis %}} +## {{% heading "synopsis" %}} + The kubelet is the primary "node agent" that runs on each node. It can register the node with the apiserver using one of: the hostname; a flag to override the hostname; or specific logic for a cloud provider. @@ -24,10 +25,11 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API (u kubelet [flags] ``` -{{% /capture %}} -{{% capture options %}} + +## {{% heading "options" %}} +
@@ -1265,4 +1267,4 @@ kubelet [flags]
-{{% /capture %}} + diff --git a/content/en/docs/reference/glossary/aggregation-layer.md b/content/en/docs/reference/glossary/aggregation-layer.md index e5bafd9c06..620460429c 100644 --- a/content/en/docs/reference/glossary/aggregation-layer.md +++ b/content/en/docs/reference/glossary/aggregation-layer.md @@ -14,6 +14,6 @@ tags: --- The aggregation layer lets you install additional Kubernetes-style APIs in your cluster. - + -When you've configured the {{< glossary_tooltip text="Kubernetes API Server" term_id="kube-apiserver" >}} to [support additional APIs](/docs/tasks/access-kubernetes-api/configure-aggregation-layer/), you can add `APIService` objects to "claim" a URL path in the Kubernetes API. +When you've configured the {{< glossary_tooltip text="Kubernetes API Server" term_id="kube-apiserver" >}} to [support additional APIs](/docs/tasks/extend-kubernetes/configure-aggregation-layer/), you can add `APIService` objects to "claim" a URL path in the Kubernetes API. diff --git a/content/en/docs/reference/glossary/container-env-variables.md b/content/en/docs/reference/glossary/container-env-variables.md index 5e19a1dfa2..1453193b01 100755 --- a/content/en/docs/reference/glossary/container-env-variables.md +++ b/content/en/docs/reference/glossary/container-env-variables.md @@ -2,7 +2,7 @@ title: Container Environment Variables id: container-env-variables date: 2018-04-12 -full_link: /docs/concepts/containers/container-environment-variables/ +full_link: /docs/concepts/containers/container-environment/ short_description: > Container environment variables are name=value pairs that provide useful information into containers running in a Pod. diff --git a/content/en/docs/reference/glossary/customresourcedefinition.md b/content/en/docs/reference/glossary/customresourcedefinition.md index 16f4a69411..9e6ec9c7c5 100755 --- a/content/en/docs/reference/glossary/customresourcedefinition.md +++ b/content/en/docs/reference/glossary/customresourcedefinition.md @@ -2,7 +2,7 @@ title: CustomResourceDefinition id: CustomResourceDefinition date: 2018-04-12 -full_link: /docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/ +full_link: /docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/ short_description: > Custom code that defines a resource to add to your Kubernetes API server without building a complete custom server. @@ -14,7 +14,6 @@ tags: --- Custom code that defines a resource to add to your Kubernetes API server without building a complete custom server. - - -Custom Resource Definitions let you extend the Kubernetes API for your environment if the publicly supported API resources can't meet your needs. + +Custom Resource Definitions let you extend the Kubernetes API for your environment if the publicly supported API resources can't meet your needs. diff --git a/content/en/docs/reference/issues-security/security.md b/content/en/docs/reference/issues-security/security.md index 709f26ffe1..b9b1ce7c37 100644 --- a/content/en/docs/reference/issues-security/security.md +++ b/content/en/docs/reference/issues-security/security.md @@ -6,15 +6,15 @@ reviewers: - erictune - philips - jessfraz -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + This page describes Kubernetes security and disclosure information. -{{% /capture %}} -{{% capture body %}} + + ## Security Announcements Join the [kubernetes-security-announce](https://groups.google.com/forum/#!forum/kubernetes-security-announce) group for emails about security and major API announcements. @@ -29,7 +29,7 @@ To make a report, submit your vulnerability to the [Kubernetes bug bounty progra You can also email the private [security@kubernetes.io](mailto:security@kubernetes.io) list with the security details and the details expected for [all Kubernetes bug reports](https://git.k8s.io/kubernetes/.github/ISSUE_TEMPLATE/bug-report.md). -You may encrypt your email to this list using the GPG keys of the [Product Security Committee members](https://git.k8s.io/security/security-release-process.md#product-security-committee-psc). Encryption using GPG is NOT required to make a disclosure. +You may encrypt your email to this list using the GPG keys of the [Product Security Committee members](https://git.k8s.io/security/README.md#product-security-committee-psc). Encryption using GPG is NOT required to make a disclosure. ### When Should I Report a Vulnerability? @@ -56,4 +56,4 @@ As the security issue moves from triage, to identified fix, to release planning ## Public Disclosure Timing A public disclosure date is negotiated by the Kubernetes Product Security Committee and the bug submitter. We prefer to fully disclose the bug as soon as possible once a user mitigation is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for vendor coordination. The timeframe for disclosure is from immediate (especially if it's already publicly known) to a few weeks. For a vulnerability with a straightforward mitigation, we expect report date to disclosure date to be on the order of 7 days. The Kubernetes Product Security Committee holds the final say when setting a disclosure date. -{{% /capture %}} + diff --git a/content/en/docs/reference/kubectl/cheatsheet.md b/content/en/docs/reference/kubectl/cheatsheet.md index dbf0e2dc63..23d074456c 100644 --- a/content/en/docs/reference/kubectl/cheatsheet.md +++ b/content/en/docs/reference/kubectl/cheatsheet.md @@ -4,21 +4,21 @@ reviewers: - erictune - krousey - clove -content_template: templates/concept +content_type: concept card: name: reference weight: 30 --- -{{% capture overview %}} + See also: [Kubectl Overview](/docs/reference/kubectl/overview/) and [JsonPath Guide](/docs/reference/kubectl/jsonpath). This page is an overview of the `kubectl` command. -{{% /capture %}} -{{% capture body %}} + + # kubectl - Cheat Sheet @@ -382,9 +382,10 @@ Verbosity | Description `--v=8` | Display HTTP request contents. `--v=9` | Display HTTP request contents without truncation of contents. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [Overview of kubectl](/docs/reference/kubectl/overview/). @@ -394,4 +395,4 @@ Verbosity | Description * See more community [kubectl cheatsheets](https://github.com/dennyzhang/cheatsheet-kubernetes-A4). -{{% /capture %}} + diff --git a/content/en/docs/reference/kubectl/conventions.md b/content/en/docs/reference/kubectl/conventions.md index c4bdd59ec5..062847c485 100644 --- a/content/en/docs/reference/kubectl/conventions.md +++ b/content/en/docs/reference/kubectl/conventions.md @@ -2,14 +2,14 @@ title: kubectl Usage Conventions reviewers: - janetkuo -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Recommended usage conventions for `kubectl`. -{{% /capture %}} -{{% capture body %}} + + ## Using `kubectl` in Reusable Scripts @@ -59,4 +59,4 @@ You can generate the following resources with a kubectl command, `kubectl create * You can use `kubectl apply` to create or update resources. For more information about using kubectl apply to update resources, see [Kubectl Book](https://kubectl.docs.kubernetes.io). -{{% /capture %}} + diff --git a/content/en/docs/reference/kubectl/docker-cli-to-kubectl.md b/content/en/docs/reference/kubectl/docker-cli-to-kubectl.md index ac4776fd64..a04334ae17 100644 --- a/content/en/docs/reference/kubectl/docker-cli-to-kubectl.md +++ b/content/en/docs/reference/kubectl/docker-cli-to-kubectl.md @@ -1,16 +1,16 @@ --- title: kubectl for Docker Users -content_template: templates/concept +content_type: concept reviewers: - brendandburns - thockin --- -{{% capture overview %}} + You can use the Kubernetes command line tool kubectl to interact with the API Server. Using kubectl is straightforward if you are familiar with the Docker command line tool. However, there are a few differences between the docker commands and the kubectl commands. The following sections show a docker sub-command and describe the equivalent kubectl command. -{{% /capture %}} -{{% capture body %}} + + ## docker run To run an nginx Pod and expose the Pod, see [kubectl run](/docs/reference/generated/kubectl/kubectl-commands/#run). @@ -363,4 +363,4 @@ Grafana is running at https://203.0.113.141/api/v1/namespaces/kube-system/servic Heapster is running at https://203.0.113.141/api/v1/namespaces/kube-system/services/monitoring-heapster/proxy InfluxDB is running at https://203.0.113.141/api/v1/namespaces/kube-system/services/monitoring-influxdb/proxy ``` -{{% /capture %}} + diff --git a/content/en/docs/reference/kubectl/jsonpath.md b/content/en/docs/reference/kubectl/jsonpath.md index 731af0004e..50c051c9f4 100644 --- a/content/en/docs/reference/kubectl/jsonpath.md +++ b/content/en/docs/reference/kubectl/jsonpath.md @@ -1,14 +1,14 @@ --- title: JSONPath Support -content_template: templates/concept +content_type: concept weight: 25 --- -{{% capture overview %}} + Kubectl supports JSONPath template. -{{% /capture %}} -{{% capture body %}} + + JSONPath template is composed of JSONPath expressions enclosed by curly braces {}. Kubectl uses JSONPath expressions to filter on specific fields in the JSON object and format the output. @@ -98,4 +98,4 @@ kubectl get pods -o=jsonpath="{range .items[*]}{.metadata.name}{\"\t\"}{.status. ``` {{< /note >}} -{{% /capture %}} + diff --git a/content/en/docs/reference/kubectl/kubectl.md b/content/en/docs/reference/kubectl/kubectl.md index 6342de0008..f734d32f99 100644 --- a/content/en/docs/reference/kubectl/kubectl.md +++ b/content/en/docs/reference/kubectl/kubectl.md @@ -1,10 +1,11 @@ --- title: kubectl -content_template: templates/tool-reference +content_type: tool-reference weight: 30 --- -{{% capture synopsis %}} +## {{% heading "synopsis" %}} + kubectl controls the Kubernetes cluster manager. @@ -15,9 +16,10 @@ kubectl controls the Kubernetes cluster manager. kubectl [flags] ``` -{{% /capture %}} -{{% capture options %}} + +## {{% heading "options" %}} + @@ -521,9 +523,10 @@ kubectl [flags] -{{% /capture %}} -{{% capture seealso %}} + +## {{% heading "seealso" %}} + * [kubectl alpha](/docs/reference/generated/kubectl/kubectl-commands#alpha) - Commands for features in alpha * [kubectl annotate](/docs/reference/generated/kubectl/kubectl-commands#annotate) - Update the annotations on a resource @@ -569,5 +572,5 @@ kubectl [flags] * [kubectl version](/docs/reference/generated/kubectl/kubectl-commands#version) - Print the client and server version information * [kubectl wait](/docs/reference/generated/kubectl/kubectl-commands#wait) - Experimental: Wait for a specific condition on one or many resources. -{{% /capture %}} + diff --git a/content/en/docs/reference/kubectl/overview.md b/content/en/docs/reference/kubectl/overview.md index fa8633fb5f..84e2272dca 100644 --- a/content/en/docs/reference/kubectl/overview.md +++ b/content/en/docs/reference/kubectl/overview.md @@ -2,21 +2,21 @@ reviewers: - hw-qiaolei title: Overview of kubectl -content_template: templates/concept +content_type: concept weight: 20 card: name: reference weight: 20 --- -{{% capture overview %}} + Kubectl is a command line tool for controlling Kubernetes clusters. `kubectl` looks for a file named config in the $HOME/.kube directory. You can specify other [kubeconfig](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) files by setting the KUBECONFIG environment variable or by setting the [`--kubeconfig`](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) flag. This overview covers `kubectl` syntax, describes the command operations, and provides common examples. For details about each command, including all the supported flags and subcommands, see the [kubectl](/docs/reference/generated/kubectl/kubectl-commands/) reference documentation. For installation instructions see [installing kubectl](/docs/tasks/kubectl/install/). -{{% /capture %}} -{{% capture body %}} + + ## Syntax @@ -32,11 +32,11 @@ where `command`, `TYPE`, `NAME`, and `flags` are: * `TYPE`: Specifies the [resource type](#resource-types). Resource types are case-insensitive and you can specify the singular, plural, or abbreviated forms. For example, the following commands produce the same output: - ```shell - kubectl get pod pod1 - kubectl get pods pod1 - kubectl get po pod1 - ``` + ```shell + kubectl get pod pod1 + kubectl get pods pod1 + kubectl get po pod1 + ``` * `NAME`: Specifies the name of the resource. Names are case-sensitive. If the name is omitted, details for all resources are displayed, for example `kubectl get pods`. @@ -424,7 +424,7 @@ kubectl hello hello world ``` -``` +```shell # we can "uninstall" a plugin, by simply removing it from our PATH sudo rm /usr/local/bin/kubectl-hello ``` @@ -442,7 +442,7 @@ The following kubectl-compatible plugins are available: /usr/local/bin/kubectl-foo /usr/local/bin/kubectl-bar ``` -``` +```shell # this command can also warn us about plugins that are # not executable, or that are overshadowed by other # plugins, for example @@ -488,10 +488,11 @@ Current user: plugins-user To find out more about plugins, take a look at the [example cli plugin](https://github.com/kubernetes/sample-cli-plugin). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Start using the [kubectl](/docs/reference/generated/kubectl/kubectl-commands/) commands. -{{% /capture %}} + diff --git a/content/en/docs/reference/kubernetes-api/labels-annotations-taints.md b/content/en/docs/reference/kubernetes-api/labels-annotations-taints.md index e1f1e9a801..d1faa51a88 100644 --- a/content/en/docs/reference/kubernetes-api/labels-annotations-taints.md +++ b/content/en/docs/reference/kubernetes-api/labels-annotations-taints.md @@ -1,18 +1,18 @@ --- title: Well-Known Labels, Annotations and Taints -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + Kubernetes reserves all labels and annotations in the kubernetes.io namespace. This document serves both as a reference to the values and as a coordination point for assigning values. -{{% /capture %}} -{{% capture body %}} + + ## kubernetes.io/arch @@ -130,4 +130,4 @@ If `PersistentVolumeLabel` does not support automatic labeling of your Persisten adding the labels manually (or adding support for `PersistentVolumeLabel`). With `PersistentVolumeLabel`, the scheduler prevents Pods from mounting volumes in a different zone. If your infrastructure doesn't have this constraint, you don't need to add the zone labels to the volumes at all. -{{% /capture %}} + diff --git a/content/en/docs/reference/scheduling/policies.md b/content/en/docs/reference/scheduling/policies.md index 0bf6e030b0..67d34e59f7 100644 --- a/content/en/docs/reference/scheduling/policies.md +++ b/content/en/docs/reference/scheduling/policies.md @@ -1,10 +1,10 @@ --- title: Scheduling Policies -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + A scheduling Policy can be used to specify the *predicates* and *priorities* that the {{< glossary_tooltip text="kube-scheduler" term_id="kube-scheduler" >}} @@ -16,9 +16,9 @@ You can set a scheduling policy by running `kube-scheduler --policy-configmap ` and using the [Policy type](https://pkg.go.dev/k8s.io/kube-scheduler@v0.18.0/config/v1?tab=doc#Policy). -{{% /capture %}} -{{% capture body %}} + + ## Predicates @@ -117,9 +117,10 @@ The following *priorities* implement scoring: - `EvenPodsSpreadPriority`: Implements preferred [pod topology spread constraints](/docs/concepts/workloads/pods/pod-topology-spread-constraints/). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn about [scheduling](/docs/concepts/scheduling-eviction/kube-scheduler/) * Learn about [kube-scheduler profiles](/docs/reference/scheduling/profiles/) -{{% /capture %}} + diff --git a/content/en/docs/reference/scheduling/profiles.md b/content/en/docs/reference/scheduling/profiles.md index 48fa961b2e..fe28d10bd1 100644 --- a/content/en/docs/reference/scheduling/profiles.md +++ b/content/en/docs/reference/scheduling/profiles.md @@ -1,10 +1,10 @@ --- title: Scheduling Profiles -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.18" state="alpha" >}} @@ -20,9 +20,9 @@ or [`v1alpha2`](https://pkg.go.dev/k8s.io/kube-scheduler@{{< param "fullversion" The `v1alpha2` API allows you to configure kube-scheduler to run [multiple profiles](#multiple-profiles). -{{% /capture %}} -{{% capture body %}} + + ## Extension points @@ -174,8 +174,9 @@ the same configuration parameters (if applicable). This is because the scheduler only has one pending pods queue. {{< /note >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn about [scheduling](/docs/concepts/scheduling-eviction/kube-scheduler/) -{{% /capture %}} + diff --git a/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md b/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md index a0aa304217..cb42a34df9 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md +++ b/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md @@ -3,10 +3,10 @@ reviewers: - luxas - jbeda title: Implementation details -content_template: templates/concept +content_type: concept weight: 100 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.10" state="stable" >}} @@ -14,9 +14,9 @@ weight: 100 However, it might not be obvious _how_ kubeadm does that. This document provides additional details on what happen under the hood, with the aim of sharing knowledge on Kubernetes cluster best practices. -{{% /capture %}} -{{% capture body %}} + + ## Core design principles The cluster that `kubeadm init` and `kubeadm join` set up should be: @@ -531,4 +531,4 @@ Please note that: 1. To make dynamic kubelet configuration work, flag `--dynamic-config-dir=/var/lib/kubelet/config/dynamic` should be specified in `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` -{{% /capture %}} + diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md index c918cd5580..a4b0e501d8 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md @@ -3,10 +3,10 @@ reviewers: - luxas - jbeda title: kubeadm config -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + During `kubeadm init`, kubeadm uploads the `ClusterConfiguration` object to your cluster in a ConfigMap called `kubeadm-config` in the `kube-system` namespace. This configuration is then read during `kubeadm join`, `kubeadm reset` and `kubeadm upgrade`. To view this ConfigMap call `kubeadm config view`. @@ -19,9 +19,9 @@ In Kubernetes v1.13.0 and later to list/pull kube-dns images instead of the Core the `--config` method described [here](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-addon) has to be used. -{{% /capture %}} -{{% capture body %}} + + ## kubeadm config view {#cmd-config-view} {{< include "generated/kubeadm_config_view.md" >}} @@ -40,8 +40,9 @@ has to be used. ## kubeadm config images pull {#cmd-config-images-pull} {{< include "generated/kubeadm_config_images_pull.md" >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [kubeadm upgrade](/docs/reference/setup-tools/kubeadm/kubeadm-upgrade/) to upgrade a Kubernetes cluster to a newer version -{{% /capture %}} + diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md index 7103b39d42..54729065c6 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md @@ -3,14 +3,14 @@ reviewers: - luxas - jbeda title: kubeadm init -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + This command initializes a Kubernetes control-plane node. -{{% /capture %}} -{{% capture body %}} + + {{< include "generated/kubeadm_init.md" >}} @@ -255,12 +255,13 @@ it does not allow the root CA hash to be validated with `--discovery-token-ca-cert-hash` (since it's not generated when the nodes are provisioned). For details, see the [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join/). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [kubeadm init phase](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/) to understand more about `kubeadm init` phases * [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join/) to bootstrap a Kubernetes worker node and join it to the cluster * [kubeadm upgrade](/docs/reference/setup-tools/kubeadm/kubeadm-upgrade/) to upgrade a Kubernetes cluster to a newer version * [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset/) to revert any changes made to this host by `kubeadm init` or `kubeadm join` -{{% /capture %}} + diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md index 1e99d1682b..abceaf5f70 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md @@ -3,14 +3,14 @@ reviewers: - luxas - jbeda title: kubeadm join -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + This command initializes a Kubernetes worker node and joins it to the cluster. -{{% /capture %}} -{{% capture body %}} + + {{< include "generated/kubeadm_join.md" >}} ### The join workflow {#join-workflow} @@ -276,10 +276,11 @@ kubeadm config print join-defaults For details on individual fields in `JoinConfiguration` see [the godoc](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm#JoinConfiguration). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init/) to bootstrap a Kubernetes control-plane node * [kubeadm token](/docs/reference/setup-tools/kubeadm/kubeadm-token/) to manage tokens for `kubeadm join` * [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset/) to revert any changes made to this host by `kubeadm init` or `kubeadm join` -{{% /capture %}} + diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-reset.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-reset.md index 7185a51475..2664283daa 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-reset.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-reset.md @@ -3,14 +3,14 @@ reviewers: - luxas - jbeda title: kubeadm reset -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + Performs a best effort revert of changes made by `kubeadm init` or `kubeadm join`. -{{% /capture %}} -{{% capture body %}} + + {{< include "generated/kubeadm_reset.md" >}} ### Reset workflow {#reset-workflow} @@ -35,9 +35,10 @@ etcdctl del "" --prefix ``` See the [etcd documentation](https://github.com/coreos/etcd/tree/master/etcdctl) for more information. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init/) to bootstrap a Kubernetes control-plane node * [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join/) to bootstrap a Kubernetes worker node and join it to the cluster -{{% /capture %}} + diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-token.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-token.md index a8e9c7cd99..92a187bb92 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-token.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-token.md @@ -3,10 +3,10 @@ reviewers: - luxas - jbeda title: kubeadm token -content_template: templates/concept +content_type: concept weight: 70 --- -{{% capture overview %}} + Bootstrap tokens are used for establishing bidirectional trust between a node joining the cluster and a control-plane node, as described in [authenticating with bootstrap tokens](/docs/reference/access-authn-authz/bootstrap-tokens/). @@ -14,9 +14,9 @@ the cluster and a control-plane node, as described in [authenticating with boots `kubeadm init` creates an initial token with a 24-hour TTL. The following commands allow you to manage such a token and also to create and manage new ones. -{{% /capture %}} -{{% capture body %}} + + ## kubeadm token create {#cmd-token-create} {{< include "generated/kubeadm_token_create.md" >}} @@ -28,8 +28,9 @@ such a token and also to create and manage new ones. ## kubeadm token list {#cmd-token-list} {{< include "generated/kubeadm_token_list.md" >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join/) to bootstrap a Kubernetes worker node and join it to the cluster -{{% /capture %}} + diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md index 31c2f11d9c..71483aa1d6 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md @@ -3,15 +3,15 @@ reviewers: - luxas - jbeda title: kubeadm upgrade -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + `kubeadm upgrade` is a user-friendly command that wraps complex upgrading logic behind one command, with support for both planning an upgrade and actually performing it. -{{% /capture %}} -{{% capture body %}} + + ## kubeadm upgrade guidance @@ -46,8 +46,9 @@ reports of unexpected results. ## kubeadm upgrade node {#cmd-upgrade-node} {{< include "generated/kubeadm_upgrade_node.md" >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [kubeadm config](/docs/reference/setup-tools/kubeadm/kubeadm-config/) if you initialized your cluster using kubeadm v1.7.x or lower, to configure your cluster for `kubeadm upgrade` -{{% /capture %}} + diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-version.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-version.md index 5da4209f3e..a4b57e796c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-version.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-version.md @@ -3,13 +3,13 @@ reviewers: - luxas - jbeda title: kubeadm version -content_template: templates/concept +content_type: concept weight: 80 --- -{{% capture overview %}} + This command prints the version of kubeadm. -{{% /capture %}} -{{% capture body %}} + + {{< include "generated/kubeadm_version.md" >}} -{{% /capture %}} + diff --git a/content/en/docs/reference/tools.md b/content/en/docs/reference/tools.md index 349ce58f2c..ef210f2b07 100644 --- a/content/en/docs/reference/tools.md +++ b/content/en/docs/reference/tools.md @@ -2,14 +2,14 @@ reviewers: - janetkuo title: Tools -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Kubernetes contains several built-in tools to help you work with the Kubernetes system. -{{% /capture %}} -{{% capture body %}} + + ## Kubectl [`kubectl`](/docs/tasks/tools/install-kubectl/) is the command line tool for Kubernetes. It controls the Kubernetes cluster manager. @@ -51,4 +51,4 @@ Use Kompose to: * Translate a Docker Compose file into Kubernetes objects * Go from local Docker development to managing your application via Kubernetes * Convert v1 or v2 Docker Compose `yaml` files or [Distributed Application Bundles](https://docs.docker.com/compose/bundles/) -{{% /capture %}} + diff --git a/content/en/docs/reference/using-api/api-concepts.md b/content/en/docs/reference/using-api/api-concepts.md index 0c3f1b2341..f83c43c00f 100644 --- a/content/en/docs/reference/using-api/api-concepts.md +++ b/content/en/docs/reference/using-api/api-concepts.md @@ -4,15 +4,15 @@ reviewers: - smarterclayton - lavalamp - liggitt -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + This page describes common concepts in the Kubernetes API. -{{% /capture %}} -{{% capture body %}} + + The Kubernetes API is a resource-based (RESTful) programmatic interface provided via HTTP. It supports retrieving, creating, updating, and deleting primary resources via the standard HTTP verbs (POST, PUT, PATCH, DELETE, GET), includes additional subresources for many objects that allow fine grained authorization (such as binding a pod to a node), and can accept and serve those resources in different representations for convenience or efficiency. It also supports efficient change notifications on resources via "watches" and consistent lists to allow other components to effectively cache and synchronize the state of resources. diff --git a/content/en/docs/reference/using-api/api-overview.md b/content/en/docs/reference/using-api/api-overview.md index 3820085e6b..25b7d46af9 100644 --- a/content/en/docs/reference/using-api/api-overview.md +++ b/content/en/docs/reference/using-api/api-overview.md @@ -4,7 +4,7 @@ reviewers: - erictune - lavalamp - jbeda -content_template: templates/concept +content_type: concept weight: 10 card: name: reference @@ -12,11 +12,11 @@ card: title: Overview of API --- -{{% capture overview %}} + This page provides an overview of the Kubernetes API. -{{% /capture %}} -{{% capture body %}} + + The REST API is the fundamental fabric of Kubernetes. All operations and communications between components, and external user commands are REST API calls that the API Server handles. Consequently, everything in the Kubernetes platform is treated as an API object and has a corresponding entry in the [API](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/). @@ -86,7 +86,7 @@ Currently, there are several API groups in use: The two paths that support extending the API with [custom resources](/docs/concepts/api-extension/custom-resources/) are: - - [CustomResourceDefinition](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/) + - [CustomResourceDefinition](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) for basic CRUD needs. - [aggregator](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/aggregated-api-servers.md) for a full set of Kubernetes API semantics to implement their own apiserver. diff --git a/content/en/docs/reference/using-api/client-libraries.md b/content/en/docs/reference/using-api/client-libraries.md index 0d8af9394d..1531b2c5df 100644 --- a/content/en/docs/reference/using-api/client-libraries.md +++ b/content/en/docs/reference/using-api/client-libraries.md @@ -2,16 +2,16 @@ title: Client Libraries reviewers: - ahmetb -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + This page contains an overview of the client libraries for using the Kubernetes API from various programming languages. -{{% /capture %}} -{{% capture body %}} + + To write applications using the [Kubernetes REST API](/docs/reference/using-api/api-overview/), you do not need to implement the API calls and request/response types yourself. You can use a client library for the programming language you are using. @@ -75,6 +75,6 @@ their authors, not the Kubernetes team. | DotNet (RestSharp) | [github.com/masroorhasan/Kubernetes.DotNet](https://github.com/masroorhasan/Kubernetes.DotNet) | | Elixir | [github.com/obmarg/kazan](https://github.com/obmarg/kazan/) | | Elixir | [github.com/coryodaniel/k8s](https://github.com/coryodaniel/k8s) | -{{% /capture %}} + diff --git a/content/en/docs/reference/using-api/deprecation-policy.md b/content/en/docs/reference/using-api/deprecation-policy.md index f55438cd18..a21d0887ba 100644 --- a/content/en/docs/reference/using-api/deprecation-policy.md +++ b/content/en/docs/reference/using-api/deprecation-policy.md @@ -4,15 +4,15 @@ reviewers: - lavalamp - thockin title: Kubernetes Deprecation Policy -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + This document details the deprecation policy for various facets of the system. -{{% /capture %}} -{{% capture body %}} + + Kubernetes is a large system with many components and many contributors. As with any such software, the feature set naturally evolves over time, and sometimes a feature may need to be removed. This could include an API, a flag, @@ -425,4 +425,4 @@ leaders to find the best solutions for those specific cases, always bearing in mind that Kubernetes is committed to being a stable system that, as much as possible, never breaks users. Exceptions will always be announced in all relevant release notes. -{{% /capture %}} + diff --git a/content/en/docs/setup/_index.md b/content/en/docs/setup/_index.md index 16702b40f5..91b734953c 100644 --- a/content/en/docs/setup/_index.md +++ b/content/en/docs/setup/_index.md @@ -7,7 +7,7 @@ no_issue: true title: Getting started main_menu: true weight: 20 -content_template: templates/concept +content_type: concept card: name: setup weight: 20 @@ -18,7 +18,7 @@ card: title: Production environment --- -{{% capture overview %}} + This section covers different options to set up and run Kubernetes. @@ -28,9 +28,9 @@ You can deploy a Kubernetes cluster on a local machine, cloud, on-prem datacente More simply, you can create a Kubernetes cluster in learning and production environments. -{{% /capture %}} -{{% capture body %}} + + ## Learning environment @@ -51,4 +51,4 @@ When evaluating a solution for a production environment, consider which aspects [Kubernetes Partners](https://kubernetes.io/partners/#conformance) includes a list of [Certified Kubernetes](https://github.com/cncf/k8s-conformance/#certified-kubernetes) providers. -{{% /capture %}} + diff --git a/content/en/docs/setup/best-practices/certificates.md b/content/en/docs/setup/best-practices/certificates.md index 6169b3f872..a85d44e0f4 100644 --- a/content/en/docs/setup/best-practices/certificates.md +++ b/content/en/docs/setup/best-practices/certificates.md @@ -2,20 +2,20 @@ title: PKI certificates and requirements reviewers: - sig-cluster-lifecycle -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + Kubernetes requires PKI certificates for authentication over TLS. If you install Kubernetes with [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/), the certificates that your cluster requires are automatically generated. You can also generate your own certificates -- for example, to keep your private keys more secure by not storing them on the API server. This page explains the certificates that your cluster requires. -{{% /capture %}} -{{% capture body %}} + + ## How certificates are used by your cluster @@ -31,7 +31,7 @@ Kubernetes requires PKI for the following operations: * Client and server certificates for the [front-proxy][proxy] {{< note >}} -`front-proxy` certificates are required only if you run kube-proxy to support [an extension API server](/docs/tasks/access-kubernetes-api/setup-extension-api-server/). +`front-proxy` certificates are required only if you run kube-proxy to support [an extension API server](/docs/tasks/extend-kubernetes/setup-extension-api-server/). {{< /note >}} etcd also implements mutual TLS to authenticate clients and peers. @@ -162,6 +162,6 @@ These files are used as follows: [usage]: https://godoc.org/k8s.io/api/certificates/v1beta1#KeyUsage [kubeadm]: /docs/reference/setup-tools/kubeadm/kubeadm/ -[proxy]: /docs/tasks/access-kubernetes-api/configure-aggregation-layer/ +[proxy]: /docs/tasks/extend-kubernetes/configure-aggregation-layer/ + -{{% /capture %}} diff --git a/content/en/docs/setup/best-practices/multiple-zones.md b/content/en/docs/setup/best-practices/multiple-zones.md index ba58df028f..ab61c839a9 100644 --- a/content/en/docs/setup/best-practices/multiple-zones.md +++ b/content/en/docs/setup/best-practices/multiple-zones.md @@ -5,16 +5,16 @@ reviewers: - quinton-hoole title: Running in multiple zones weight: 10 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + This page describes how to run a cluster in multiple zones. -{{% /capture %}} -{{% capture body %}} + + ## Introduction @@ -401,4 +401,4 @@ KUBERNETES_PROVIDER=aws KUBE_USE_EXISTING_MASTER=true KUBE_AWS_ZONE=us-west-2b k KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2a kubernetes/cluster/kube-down.sh ``` -{{% /capture %}} + diff --git a/content/en/docs/setup/learning-environment/kind.md b/content/en/docs/setup/learning-environment/kind.md index e476d220d0..ac355bd157 100644 --- a/content/en/docs/setup/learning-environment/kind.md +++ b/content/en/docs/setup/learning-environment/kind.md @@ -1,22 +1,22 @@ --- title: Installing Kubernetes with Kind weight: 40 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Kind is a tool for running local Kubernetes clusters using Docker container "nodes". -{{% /capture %}} -{{% capture body %}} + + ## Installation See [Installing Kind](https://kind.sigs.k8s.io/docs/user/quick-start/). -{{% /capture %}} + diff --git a/content/en/docs/setup/learning-environment/minikube.md b/content/en/docs/setup/learning-environment/minikube.md index e314d56608..a794141f2d 100644 --- a/content/en/docs/setup/learning-environment/minikube.md +++ b/content/en/docs/setup/learning-environment/minikube.md @@ -5,16 +5,16 @@ reviewers: - aaron-prindle title: Installing Kubernetes with Minikube weight: 30 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Minikube is a tool that makes it easy to run Kubernetes locally. Minikube runs a single-node Kubernetes cluster inside a Virtual Machine (VM) on your laptop for users looking to try out Kubernetes or develop with it day-to-day. -{{% /capture %}} -{{% capture body %}} + + ## Minikube Features @@ -369,7 +369,12 @@ The `minikube delete` command can be used to delete your cluster. This command shuts down and deletes the Minikube Virtual Machine. No data or state is preserved. ### Upgrading Minikube -If you are using macOS, see [Upgrading Minikube](https://minikube.sigs.k8s.io/docs/start/macos/#upgrading-minikube) to upgrade your existing minikube installation. +If you are using macOS and [Brew Package Manager](https://brew.sh/) is installed run: + +```shell +brew update +brew upgrade minikube +``` ## Interacting with Your Cluster @@ -380,9 +385,11 @@ This context contains the configuration to communicate with your Minikube cluste Minikube sets this context to default automatically, but if you need to switch back to it in the future, run: -`kubectl config use-context minikube`, +`kubectl config use-context minikube` -Or pass the context on each command like this: `kubectl get pods --context=minikube`. +Or pass the context on each command like this: + +`kubectl get pods --context=minikube` ### Dashboard @@ -509,4 +516,4 @@ For more information about Minikube, see the [proposal](https://git.k8s.io/commu Contributions, questions, and comments are all welcomed and encouraged! Minikube developers hang out on [Slack](https://kubernetes.slack.com) in the #minikube channel (get an invitation [here](http://slack.kubernetes.io/)). We also have the [kubernetes-dev Google Groups mailing list](https://groups.google.com/forum/#!forum/kubernetes-dev). If you are posting to the list please prefix your subject with "minikube: ". -{{% /capture %}} + diff --git a/content/en/docs/setup/production-environment/container-runtimes.md b/content/en/docs/setup/production-environment/container-runtimes.md index 7db25e022b..96525eab77 100644 --- a/content/en/docs/setup/production-environment/container-runtimes.md +++ b/content/en/docs/setup/production-environment/container-runtimes.md @@ -3,17 +3,17 @@ reviewers: - vincepri - bart0sh title: Container runtimes -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.6" state="stable" >}} To run containers in Pods, Kubernetes uses a container runtime. Here are the installation instructions for various runtimes. -{{% /capture %}} -{{% capture body %}} + + {{< caution >}} @@ -70,7 +70,7 @@ Keep track of the latest verified Docker version in the Kubernetes release notes Use the following commands to install Docker on your system: {{< tabs name="tab-cri-docker-installation" >}} -{{< tab name="Ubuntu 16.04+" >}} +{{% tab name="Ubuntu 16.04+" %}} ```shell # (Install Docker CE) @@ -124,8 +124,8 @@ mkdir -p /etc/systemd/system/docker.service.d systemctl daemon-reload systemctl restart docker ``` -{{< /tab >}} -{{< tab name="CentOS/RHEL 7.4+" >}} +{{% /tab %}} +{{% tab name="CentOS/RHEL 7.4+" %}} ```shell # (Install Docker CE) @@ -179,7 +179,7 @@ mkdir -p /etc/systemd/system/docker.service.d systemctl daemon-reload systemctl restart docker ``` -{{< /tab >}} +{{% /tab %}} {{< /tabs >}} Refer to the [official Docker installation guides](https://docs.docker.com/engine/installation/) @@ -213,7 +213,7 @@ sysctl --system ``` {{< tabs name="tab-cri-cri-o-installation" >}} -{{< tab name="Debian" >}} +{{% tab name="Debian" %}} ```shell # Debian Unstable/Sid @@ -243,9 +243,9 @@ and then install CRI-O: ```shell sudo apt-get install cri-o-1.17 ``` -{{< /tab >}} +{{% /tab %}} -{{< tab name="Ubuntu 18.04, 19.04 and 19.10" >}} +{{% tab name="Ubuntu 18.04, 19.04 and 19.10" %}} ```shell # Configure package repository @@ -259,9 +259,9 @@ sudo apt-get update # Install CRI-O sudo apt-get install cri-o-1.17 ``` -{{< /tab >}} +{{% /tab %}} -{{< tab name="CentOS/RHEL 7.4+" >}} +{{% tab name="CentOS/RHEL 7.4+" %}} ```shell # Install prerequisites @@ -273,14 +273,14 @@ curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:{{< skew late # Install CRI-O yum install -y cri-o ``` -{{< /tab >}} +{{% /tab %}} -{{< tab name="openSUSE Tumbleweed" >}} +{{% tab name="openSUSE Tumbleweed" %}} ```shell sudo zypper install cri-o ``` -{{< /tab >}} +{{% /tab %}} {{< /tabs >}} ### Start CRI-O @@ -323,7 +323,7 @@ sysctl --system ### Install containerd {{< tabs name="tab-cri-containerd-installation" >}} -{{< tab name="Ubuntu 16.04" >}} +{{% tab name="Ubuntu 16.04" %}} ```shell # (Install containerd) @@ -360,8 +360,8 @@ containerd config default > /etc/containerd/config.toml # Restart containerd systemctl restart containerd ``` -{{< /tab >}} -{{< tab name="CentOS/RHEL 7.4+" >}} +{{% /tab %}} +{{% tab name="CentOS/RHEL 7.4+" %}} ```shell # (Install containerd) @@ -389,7 +389,7 @@ containerd config default > /etc/containerd/config.toml # Restart containerd systemctl restart containerd ``` -{{< /tab >}} +{{% /tab %}} {{< /tabs >}} ### systemd @@ -402,4 +402,4 @@ When using kubeadm, manually configure the Refer to the [Frakti QuickStart guide](https://github.com/kubernetes/frakti#quickstart) for more information. -{{% /capture %}} + diff --git a/content/en/docs/setup/production-environment/on-premises-vm/cloudstack.md b/content/en/docs/setup/production-environment/on-premises-vm/cloudstack.md index e85953dd86..1f7d1fd81f 100644 --- a/content/en/docs/setup/production-environment/on-premises-vm/cloudstack.md +++ b/content/en/docs/setup/production-environment/on-premises-vm/cloudstack.md @@ -2,10 +2,10 @@ reviewers: - thockin title: Cloudstack -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + [CloudStack](https://cloudstack.apache.org/) is a software to build public and private clouds based on hardware virtualization principles (traditional IaaS). To deploy Kubernetes on CloudStack there are several possibilities depending on the Cloud being used and what images are made available. CloudStack also has a vagrant plugin available, hence Vagrant could be used to deploy Kubernetes either using the existing shell provisioner or using new Salt based recipes. @@ -13,9 +13,9 @@ content_template: templates/concept This guide uses a single [Ansible playbook](https://github.com/apachecloudstack/k8s), which is completely automated and can deploy Kubernetes on a CloudStack based Cloud using CoreOS images. The playbook, creates an ssh key pair, creates a security group and associated rules and finally starts coreOS instances configured via cloud-init. -{{% /capture %}} -{{% capture body %}} + + ## Prerequisites @@ -118,4 +118,4 @@ IaaS Provider | Config. Mgmt | OS | Networking | Docs CloudStack | Ansible | CoreOS | flannel | [docs](/docs/setup/production-environment/on-premises-vm/cloudstack/) | | Community ([@Guiques](https://github.com/ltupin/)) -{{% /capture %}} + diff --git a/content/en/docs/setup/production-environment/on-premises-vm/dcos.md b/content/en/docs/setup/production-environment/on-premises-vm/dcos.md index 12e47948e2..e4b310902c 100644 --- a/content/en/docs/setup/production-environment/on-premises-vm/dcos.md +++ b/content/en/docs/setup/production-environment/on-premises-vm/dcos.md @@ -2,10 +2,10 @@ reviewers: - smugcloud title: Kubernetes on DC/OS -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Mesosphere provides an easy option to provision Kubernetes onto [DC/OS](https://mesosphere.com/product/), offering: @@ -14,12 +14,12 @@ Mesosphere provides an easy option to provision Kubernetes onto [DC/OS](https:// * Highly available and secure by default * Kubernetes running alongside fast-data platforms (e.g. Akka, Cassandra, Kafka, Spark) -{{% /capture %}} -{{% capture body %}} + + ## Official Mesosphere Guide The canonical source of getting started on DC/OS is located in the [quickstart repo](https://github.com/mesosphere/dcos-kubernetes-quickstart). -{{% /capture %}} + diff --git a/content/en/docs/setup/production-environment/on-premises-vm/ovirt.md b/content/en/docs/setup/production-environment/on-premises-vm/ovirt.md index be6f3b8e77..1d57b6f7eb 100644 --- a/content/en/docs/setup/production-environment/on-premises-vm/ovirt.md +++ b/content/en/docs/setup/production-environment/on-premises-vm/ovirt.md @@ -3,16 +3,16 @@ reviewers: - caesarxuchao - erictune title: oVirt -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + oVirt is a virtual datacenter manager that delivers powerful management of multiple virtual machines on multiple hosts. Using KVM and libvirt, oVirt can be installed on Fedora, CentOS, or Red Hat Enterprise Linux hosts to set up and manage your virtual data center. -{{% /capture %}} -{{% capture body %}} + + ## oVirt Cloud Provider Deployment @@ -69,4 +69,4 @@ IaaS Provider | Config. Mgmt | OS | Networking | Docs oVirt | | | | [docs](/docs/setup/production-environment/on-premises-vm/ovirt/) | | Community ([@simon3z](https://github.com/simon3z)) -{{% /capture %}} + diff --git a/content/en/docs/setup/production-environment/tools/kops.md b/content/en/docs/setup/production-environment/tools/kops.md index 10ae6dfa65..338dbee0e5 100644 --- a/content/en/docs/setup/production-environment/tools/kops.md +++ b/content/en/docs/setup/production-environment/tools/kops.md @@ -1,10 +1,10 @@ --- title: Installing Kubernetes with kops -content_template: templates/task +content_type: task weight: 20 --- -{{% capture overview %}} + This quickstart shows you how to easily install a Kubernetes cluster on AWS. It uses a tool called [`kops`](https://github.com/kubernetes/kops). @@ -18,9 +18,10 @@ kops is an automated provisioning system: * High-Availability support - see the [high_availability.md](https://github.com/kubernetes/kops/blob/master/docs/operations/high_availability.md) * Can directly provision, or generate terraform manifests - see the [terraform.md](https://github.com/kubernetes/kops/blob/master/docs/terraform.md) -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * You must have [kubectl](/docs/tasks/tools/install-kubectl/) installed. @@ -28,9 +29,9 @@ kops is an automated provisioning system: * You must have an [AWS account](https://docs.aws.amazon.com/polly/latest/dg/setting-up.html), generate [IAM keys](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys) and [configure](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html#cli-quick-configuration) them. -{{% /capture %}} -{{% capture steps %}} + + ## Creating a cluster @@ -225,13 +226,14 @@ See the [list of add-ons](/docs/concepts/cluster-administration/addons/) to expl * To delete your cluster: `kops delete cluster useast1.dev.example.com --yes` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about Kubernetes [concepts](/docs/concepts/) and [`kubectl`](/docs/user-guide/kubectl-overview/). * Learn more about `kops` [advanced usage](https://kops.sigs.k8s.io/) for tutorials, best practices and advanced configuration options. * Follow `kops` community discussions on Slack: [community discussions](https://github.com/kubernetes/kops#other-ways-to-communicate-with-the-contributors) * Contribute to `kops` by addressing or raising an issue [GitHub Issues](https://github.com/kubernetes/kops/issues) -{{% /capture %}} + diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md b/content/en/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md index e2ae7267bc..1bcdad0092 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md @@ -2,11 +2,11 @@ reviewers: - sig-cluster-lifecycle title: Customizing control plane configuration with kubeadm -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.12" state="stable" >}} @@ -30,9 +30,9 @@ For more details on each field in the configuration you can navigate to our You can generate a `ClusterConfiguration` object with default values by running `kubeadm config print init-defaults` and saving the output to a file of your choice. {{< /note >}} -{{% /capture %}} -{{% capture body %}} + + ## APIServer flags @@ -83,4 +83,4 @@ scheduler: kubeconfig: /home/johndoe/kubeconfig.yaml ``` -{{% /capture %}} + diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md index 2d38666386..ace94edad2 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md @@ -2,11 +2,11 @@ reviewers: - sig-cluster-lifecycle title: Creating a single control-plane cluster with kubeadm -content_template: templates/task +content_type: task weight: 30 --- -{{% capture overview %}} + The `kubeadm` tool helps you bootstrap a minimum viable Kubernetes cluster that conforms to best practices. In fact, you can use `kubeadm` to set up a cluster that will pass the [Kubernetes Conformance tests](https://kubernetes.io/blog/2017/10/software-conformance-certification). `kubeadm` also supports other cluster @@ -24,9 +24,10 @@ of cloud servers, a Raspberry Pi, and more. Whether you're deploying into the cloud or on-premises, you can integrate `kubeadm` into provisioning systems such as Ansible or Terraform. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + To follow this guide, you need: @@ -53,9 +54,9 @@ slightly as the tool evolves, but the overall implementation should be pretty st Any commands under `kubeadm alpha` are, by definition, supported on an alpha level. {{< /note >}} -{{% /capture %}} -{{% capture steps %}} + + ## Objectives @@ -312,12 +313,11 @@ kubectl apply -f https://docs.projectcalico.org/v3.14/manifests/calico.yaml {{% /tab %}} {{% tab name="Cilium" %}} -For Cilium to work correctly, you must pass `--pod-network-cidr=10.217.0.0/16` to `kubeadm init`. To deploy Cilium you just need to run: ```shell -kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.6/install/kubernetes/quick-install.yaml +kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.8/install/kubernetes/quick-install.yaml ``` Once all Cilium Pods are marked as `READY`, you start using your cluster. @@ -495,10 +495,10 @@ and `scp` using that other user instead. The `admin.conf` file gives the user _superuser_ privileges over the cluster. This file should be used sparingly. For normal users, it's recommended to -generate an unique credential to which you whitelist privileges. You can do +generate an unique credential to which you grant privileges. You can do this with the `kubeadm alpha kubeconfig user --client-name ` command. That command will print out a KubeConfig file to STDOUT which you -should save to a file and distribute to your user. After that, whitelist +should save to a file and distribute to your user. After that, grant privileges by using `kubectl create (cluster)rolebinding`. {{< /note >}} @@ -564,9 +564,9 @@ See the [`kubeadm reset`](/docs/reference/setup-tools/kubeadm/kubeadm-reset/) reference documentation for more information about this subcommand and its options. -{{% /capture %}} -{{% capture discussion %}} + + ## What's next {#whats-next} @@ -641,4 +641,4 @@ supports your chosen platform. If you are running into difficulties with kubeadm, please consult our [troubleshooting docs](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/). -{{% /capture %}} + diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/ha-topology.md b/content/en/docs/setup/production-environment/tools/kubeadm/ha-topology.md index ec05ee12db..53b1f38024 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/ha-topology.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/ha-topology.md @@ -2,11 +2,11 @@ reviewers: - sig-cluster-lifecycle title: Options for Highly Available topology -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + This page explains the two options for configuring the topology of your highly available (HA) Kubernetes clusters. @@ -22,9 +22,9 @@ kubeadm bootstraps the etcd cluster statically. Read the etcd [Clustering Guide] for more details. {{< /note >}} -{{% /capture %}} -{{% capture body %}} + + ## Stacked etcd topology @@ -67,10 +67,11 @@ A minimum of three hosts for control plane nodes and three hosts for etcd nodes ![External etcd topology](/images/kubeadm/kubeadm-ha-topology-external-etcd.svg) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - [Set up a highly available cluster with kubeadm](/docs/setup/production-environment/tools/kubeadm/high-availability/) -{{% /capture %}} + diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md b/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md index 162e60e175..436f4e3573 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md @@ -2,11 +2,11 @@ reviewers: - sig-cluster-lifecycle title: Creating Highly Available clusters with kubeadm -content_template: templates/task +content_type: task weight: 60 --- -{{% capture overview %}} + This page explains two different approaches to setting up a highly available Kubernetes cluster using kubeadm: @@ -30,9 +30,10 @@ environment, neither approach documented here works with Service objects of type LoadBalancer, or with dynamic PersistentVolumes. {{< /caution >}} -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + For both methods you need this infrastructure: @@ -50,9 +51,9 @@ For the external etcd cluster only, you also need: - Three additional machines for etcd members -{{% /capture %}} -{{% capture steps %}} + + ## First steps for both methods @@ -373,4 +374,4 @@ SSH is required if you want to control all nodes from a single machine. # Quote this line if you are using external etcd mv /home/${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key ``` -{{% /capture %}} + diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index 9438e86140..e06918d7b8 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -1,6 +1,6 @@ --- title: Installing kubeadm -content_template: templates/task +content_type: task weight: 10 card: name: setup @@ -8,14 +8,15 @@ card: title: Install the kubeadm setup tool --- -{{% capture overview %}} + This page shows how to install the `kubeadm` toolbox. For information how to create a cluster with kubeadm once you have performed this installation process, see the [Using kubeadm to Create a Cluster](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) page. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * One or more machines running one of: - Ubuntu 16.04+ @@ -32,9 +33,9 @@ For information how to create a cluster with kubeadm once you have performed thi * Certain ports are open on your machines. See [here](#check-required-ports) for more details. * Swap disabled. You **MUST** disable swap in order for the kubelet to work properly. -{{% /capture %}} -{{% capture steps %}} + + ## Verify the MAC address and product_uuid are unique for every node {#verify-mac-address} @@ -301,8 +302,8 @@ like CRI-O and containerd is work in progress. If you are running into difficulties with kubeadm, please consult our [troubleshooting docs](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/). -{{% capture whatsnext %}} +## {{% heading "whatsnext" %}} + * [Using kubeadm to Create a Cluster](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) -{{% /capture %}} \ No newline at end of file diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md b/content/en/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md index 070dbd7274..8dfcb250ce 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md @@ -2,11 +2,11 @@ reviewers: - sig-cluster-lifecycle title: Configuring each kubelet in your cluster using kubeadm -content_template: templates/concept +content_type: concept weight: 80 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.11" state="stable" >}} @@ -26,9 +26,9 @@ characteristics of a given machine (such as OS, storage, and networking). You ca of your kubelets manually, but kubeadm now provides a `KubeletConfiguration` API type for [managing your kubelet configurations centrally](#configure-kubelets-using-kubeadm). -{{% /capture %}} -{{% capture body %}} + + ## Kubelet configuration patterns @@ -203,4 +203,4 @@ The DEB and RPM packages shipped with the Kubernetes releases are: | `kubernetes-cni` | Installs the official CNI binaries into the `/opt/cni/bin` directory. | | `cri-tools` | Installs the `/usr/bin/crictl` binary from the [cri-tools git repository](https://github.com/kubernetes-incubator/cri-tools). | -{{% /capture %}} + diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/self-hosting.md b/content/en/docs/setup/production-environment/tools/kubeadm/self-hosting.md index 84c98ebe9c..334e2266f2 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/self-hosting.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/self-hosting.md @@ -2,11 +2,11 @@ reviewers: - sig-cluster-lifecycle title: Configuring your kubernetes cluster to self-host the control plane -content_template: templates/concept +content_type: concept weight: 100 --- -{{% capture overview %}} + ### Self-hosting the Kubernetes control plane {#self-hosting} @@ -19,9 +19,9 @@ configured in the kubelet via static files. To create a self-hosted cluster see the [kubeadm alpha selfhosting pivot](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/#cmd-selfhosting) command. -{{% /capture %}} -{{% capture body %}} + + #### Caveats @@ -67,4 +67,4 @@ In summary, `kubeadm alpha selfhosting` works as follows: 1. When the original static control plane stops, the new self-hosted control plane is able to bind to listening ports and become active. -{{% /capture %}} + diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md index 708e10569f..739b405d14 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md @@ -2,11 +2,11 @@ reviewers: - sig-cluster-lifecycle title: Set up a High Availability etcd cluster with kubeadm -content_template: templates/task +content_type: task weight: 70 --- -{{% capture overview %}} + {{< note >}} While kubeadm is being used as the management tool for external etcd nodes @@ -23,9 +23,10 @@ becoming unavailable. This task walks through the process of creating a high availability etcd cluster of three members that can be used as an external etcd when using kubeadm to set up a kubernetes cluster. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * Three hosts that can talk to each other over ports 2379 and 2380. This document assumes these default ports. However, they are configurable through @@ -36,9 +37,9 @@ when using kubeadm to set up a kubernetes cluster. [toolbox]: /docs/setup/production-environment/tools/kubeadm/install-kubeadm/ -{{% /capture %}} -{{% capture steps %}} + + ## Setting up the cluster @@ -264,12 +265,13 @@ this example. - Set `${ETCD_TAG}` to the version tag of your etcd image. For example `3.4.3-0`. To see the etcd image and tag that kubeadm uses execute `kubeadm config images list --kubernetes-version ${K8S_VERSION}`, where `${K8S_VERSION}` is for example `v1.17.0` - Set `${HOST0}`to the IP address of the host you are testing. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Once you have a working 3 member etcd cluster, you can continue setting up a highly available control plane using the [external etcd method with kubeadm](/docs/setup/production-environment/tools/kubeadm/high-availability/). -{{% /capture %}} + diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md index 054f4b28fb..0294284c9a 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md @@ -1,10 +1,10 @@ --- title: Troubleshooting kubeadm -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + As with any program, you might run into an error installing or running kubeadm. This page lists some common failure scenarios and have provided steps that can help you understand and fix the problem. @@ -18,9 +18,9 @@ If your problem is not listed below, please follow the following steps: - If you are unsure about how kubeadm works, you can ask on [Slack](http://slack.k8s.io/) in #kubeadm, or open a question on [StackOverflow](https://stackoverflow.com/questions/tagged/kubernetes). Please include relevant tags like `#kubernetes` and `#kubeadm` so folks can help you. -{{% /capture %}} -{{% capture body %}} + + ## Not possible to join a v1.18 Node to a v1.17 cluster due to missing RBAC @@ -404,4 +404,4 @@ nodeRegistration: Alternatively, you can modify `/etc/fstab` to make the `/usr` mount writeable, but please be advised that this is modifying a design principle of the Linux distribution. -{{% /capture %}} + diff --git a/content/en/docs/setup/production-environment/tools/kubespray.md b/content/en/docs/setup/production-environment/tools/kubespray.md index ae323d38cf..07c0b3c574 100644 --- a/content/en/docs/setup/production-environment/tools/kubespray.md +++ b/content/en/docs/setup/production-environment/tools/kubespray.md @@ -1,10 +1,10 @@ --- title: Installing Kubernetes with Kubespray -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + This quickstart helps to install a Kubernetes cluster hosted on GCE, Azure, OpenStack, AWS, vSphere, Packet (bare metal), Oracle Cloud Infrastructure (Experimental) or Baremetal with [Kubespray](https://github.com/kubernetes-sigs/kubespray). @@ -23,9 +23,9 @@ Kubespray is a composition of [Ansible](http://docs.ansible.com/) playbooks, [in To choose a tool which best fits your use case, read [this comparison](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/comparisons.md) to [kubeadm](/docs/admin/kubeadm/) and [kops](/docs/setup/production-environment/tools/kops/). -{{% /capture %}} -{{% capture body %}} + + ## Creating a cluster @@ -113,10 +113,10 @@ When running the reset playbook, be sure not to accidentally target your product * Slack Channel: [#kubespray](https://kubernetes.slack.com/messages/kubespray/) (You can get your invite [here](http://slack.k8s.io/)) * [GitHub Issues](https://github.com/kubernetes-sigs/kubespray/issues) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Check out planned work on Kubespray's [roadmap](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/roadmap.md). -{{% /capture %}} \ No newline at end of file diff --git a/content/en/docs/setup/production-environment/turnkey/aws.md b/content/en/docs/setup/production-environment/turnkey/aws.md index 922f4a3eb9..92dd18075c 100644 --- a/content/en/docs/setup/production-environment/turnkey/aws.md +++ b/content/en/docs/setup/production-environment/turnkey/aws.md @@ -3,16 +3,17 @@ reviewers: - justinsb - clove title: Running Kubernetes on AWS EC2 -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page describes how to install a Kubernetes cluster on AWS. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + To create a Kubernetes cluster on AWS, you will need an Access Key ID and a Secret Access Key from AWS. @@ -28,9 +29,9 @@ To create a Kubernetes cluster on AWS, you will need an Access Key ID and a Secr * [KubeOne](https://github.com/kubermatic/kubeone) is an open source cluster lifecycle management tool that creates, upgrades and manages Kubernetes Highly-Available clusters. -{{% /capture %}} -{{% capture steps %}} + + ## Getting started with your cluster @@ -90,4 +91,4 @@ AWS | KubeOne | Ubuntu, CoreOS, CentOS | canal, weave Please see the [Kubernetes docs](/docs/) for more details on administering and using a Kubernetes cluster. -{{% /capture %}} + diff --git a/content/en/docs/setup/production-environment/turnkey/gce.md b/content/en/docs/setup/production-environment/turnkey/gce.md index 7ec902d10b..60c4e690d9 100644 --- a/content/en/docs/setup/production-environment/turnkey/gce.md +++ b/content/en/docs/setup/production-environment/turnkey/gce.md @@ -5,16 +5,17 @@ reviewers: - mikedanese - thockin title: Running Kubernetes on Google Compute Engine -content_template: templates/task +content_type: task --- -{{% capture overview %}} + The example below creates a Kubernetes cluster with 3 worker node Virtual Machines and a master Virtual Machine (i.e. 4 VMs in your cluster). This cluster is set up and controlled from your workstation (or wherever you find convenient). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + If you want a simplified getting started experience and GUI for managing clusters, please consider trying [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/) for hosted cluster installation and management. @@ -36,9 +37,9 @@ If you want to use custom binaries or pure open source Kubernetes, please contin 1. Make sure you can start up a GCE VM from the command line. At least make sure you can do the [Create an instance](https://cloud.google.com/compute/docs/instances/#startinstancegcloud) part of the GCE Quickstart. 1. Make sure you can SSH into the VM without interactive prompts. See the [Log in to the instance](https://cloud.google.com/compute/docs/instances/#sshing) part of the GCE Quickstart. -{{% /capture %}} -{{% capture steps %}} + + ## Starting a cluster @@ -225,4 +226,4 @@ GCE | Saltstack | Debian | GCE | [docs](/docs/setup/ Please see the [Kubernetes docs](/docs/) for more details on administering and using a Kubernetes cluster. -{{% /capture %}} + diff --git a/content/en/docs/setup/production-environment/turnkey/icp.md b/content/en/docs/setup/production-environment/turnkey/icp.md index 81375ad370..1ebb7a9267 100644 --- a/content/en/docs/setup/production-environment/turnkey/icp.md +++ b/content/en/docs/setup/production-environment/turnkey/icp.md @@ -27,13 +27,9 @@ The following modules are available where you can deploy IBM Cloud Private by us ## IBM Cloud Private on AWS -You can deploy an IBM Cloud Private cluster on Amazon Web Services (AWS) by using either AWS CloudFormation or Terraform. +You can deploy an IBM Cloud Private cluster on Amazon Web Services (AWS) using Terraform. -IBM Cloud Private has a Quick Start that automatically deploys IBM Cloud Private into a new virtual private cloud (VPC) on the AWS Cloud. A regular deployment takes about 60 minutes, and a high availability (HA) deployment takes about 75 minutes to complete. The Quick Start includes AWS CloudFormation templates and a deployment guide. - -This Quick Start is for users who want to explore application modernization and want to accelerate meeting their digital transformation goals, by using IBM Cloud Private and IBM tooling. The Quick Start helps users rapidly deploy a high availability (HA), production-grade, IBM Cloud Private reference architecture on AWS. For all of the details and the deployment guide, see the [IBM Cloud Private on AWS Quick Start](https://aws.amazon.com/quickstart/architecture/ibm-cloud-private/). - -IBM Cloud Private can also run on the AWS cloud platform by using Terraform. To deploy IBM Cloud Private in an AWS EC2 environment, see [Installing IBM Cloud Private on AWS](https://github.com/ibm-cloud-architecture/refarch-privatecloud/blob/master/Installing_ICp_on_aws.md). +IBM Cloud Private can also run on the AWS cloud platform by using Terraform. To deploy IBM Cloud Private in an AWS EC2 environment, see [Installing IBM Cloud Private on AWS](https://github.com/ibm-cloud-architecture/terraform-icp-aws). ## IBM Cloud Private on Azure diff --git a/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md b/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md index 78e61d4588..09a74d1450 100644 --- a/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md +++ b/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md @@ -3,17 +3,17 @@ reviewers: - michmike - patricklang title: Intro to Windows support in Kubernetes -content_template: templates/concept +content_type: concept weight: 65 --- -{{% capture overview %}} + Windows applications constitute a large portion of the services and applications that run in many organizations. [Windows containers](https://aka.ms/windowscontainers) provide a modern way to encapsulate processes and package dependencies, making it easier to use DevOps practices and follow cloud native patterns for Windows applications. Kubernetes has become the defacto standard container orchestrator, and the release of Kubernetes 1.14 includes production support for scheduling Windows containers on Windows nodes in a Kubernetes cluster, enabling a vast ecosystem of Windows applications to leverage the power of Kubernetes. Organizations with investments in Windows-based applications and Linux-based applications don't have to look for separate orchestrators to manage their workloads, leading to increased operational efficiencies across their deployments, regardless of operating system. -{{% /capture %}} -{{% capture body %}} + + ## Windows containers in Kubernetes @@ -584,9 +584,10 @@ If filing a bug, please include detailed information about how to reproduce the * [Relevant logs](https://github.com/kubernetes/community/blob/master/sig-windows/CONTRIBUTING.md#gathering-logs) * Tag the issue sig/windows by commenting on the issue with `/sig windows` to bring it to a SIG-Windows member's attention -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + We have a lot of features in our roadmap. An abbreviated high level list is included below, but we encourage you to view our [roadmap project](https://github.com/orgs/kubernetes/projects/8) and help us make Windows support better by [contributing](https://github.com/kubernetes/community/blob/master/sig-windows/). @@ -638,4 +639,4 @@ properly provisioned. * More CNIs * More Storage Plugins -{{% /capture %}} + diff --git a/content/en/docs/setup/production-environment/windows/user-guide-windows-containers.md b/content/en/docs/setup/production-environment/windows/user-guide-windows-containers.md index aa1c1f3783..e28afeb9f2 100644 --- a/content/en/docs/setup/production-environment/windows/user-guide-windows-containers.md +++ b/content/en/docs/setup/production-environment/windows/user-guide-windows-containers.md @@ -3,17 +3,17 @@ reviewers: - michmike - patricklang title: Guide for scheduling Windows containers in Kubernetes -content_template: templates/concept +content_type: concept weight: 75 --- -{{% capture overview %}} + Windows applications constitute a large portion of the services and applications that run in many organizations. This guide walks you through the steps to configure and deploy a Windows container in Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Objectives @@ -245,6 +245,6 @@ spec: ``` -{{% /capture %}} + [RuntimeClass]: https://kubernetes.io/docs/concepts/containers/runtime-class/ diff --git a/content/en/docs/setup/release/notes.md b/content/en/docs/setup/release/notes.md index a344a11fc0..d80d6c0ffd 100644 --- a/content/en/docs/setup/release/notes.md +++ b/content/en/docs/setup/release/notes.md @@ -2,7 +2,7 @@ title: v1.18 Release Notes weight: 10 card: - name: download + name: release-notes weight: 20 anchors: - anchor: "#" diff --git a/content/en/docs/setup/release/version-skew-policy.md b/content/en/docs/setup/release/version-skew-policy.md index dc411807c5..cc506352d3 100644 --- a/content/en/docs/setup/release/version-skew-policy.md +++ b/content/en/docs/setup/release/version-skew-policy.md @@ -7,16 +7,16 @@ reviewers: - sig-node - sig-release title: Kubernetes version and version skew support policy -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + This document describes the maximum version skew supported between various Kubernetes components. Specific cluster deployment tools may place additional restrictions on version skew. -{{% /capture %}} -{{% capture body %}} + + ## Supported versions @@ -27,11 +27,11 @@ For more information, see [Kubernetes Release Versioning](https://github.com/kub The Kubernetes project maintains release branches for the most recent three minor releases ({{< skew latestVersion >}}, {{< skew prevMinorVersion >}}, {{< skew oldestMinorVersion >}}). Applicable fixes, including security fixes, may be backported to those three release branches, depending on severity and feasibility. -Patch releases are cut from those branches at a regular cadence, or as needed. -This decision is owned by the [patch release team](https://github.com/kubernetes/sig-release/blob/master/release-engineering/role-handbooks/patch-release-team.md#release-timing). -The patch release team is part of [release managers](https://github.com/kubernetes/sig-release/blob/master/release-managers.md). For more information, see [Kubernetes Patch releases](https://github.com/kubernetes/sig-release/blob/master/releases/patch-releases.md). +Patch releases are cut from those branches at a [regular cadence](https://git.k8s.io/sig-release/releases/patch-releases.md#cadence), plus additional urgent releases, when required. -Minor releases occur approximately every 3 months, so each minor release branch is maintained for approximately 9 months. +The [Release Managers](https://git.k8s.io/sig-release/release-managers.md) group owns this decision. + +For more information, see the Kubernetes [patch releases](https://git.k8s.io/sig-release/releases/patch-releases.md) page. ## Supported version skew diff --git a/content/en/docs/tasks/_index.md b/content/en/docs/tasks/_index.md index 1dee1f38f1..504ec1dd89 100644 --- a/content/en/docs/tasks/_index.md +++ b/content/en/docs/tasks/_index.md @@ -2,20 +2,20 @@ title: Tasks main_menu: true weight: 50 -content_template: templates/concept +content_type: concept --- {{< toc >}} -{{% capture overview %}} + This section of the Kubernetes documentation contains pages that show how to do individual tasks. A task page shows how to do a single thing, typically by giving a short sequence of steps. -{{% /capture %}} -{{% capture body %}} + + ## Web UI (Dashboard) @@ -73,11 +73,12 @@ Configure and schedule NVIDIA GPUs for use as a resource by nodes in a cluster. Configure and schedule huge pages as a schedulable resource in a cluster. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + If you would like to write a task page, see [Creating a Documentation Pull Request](/docs/home/contribute/create-pull-request/). -{{% /capture %}} + diff --git a/content/en/docs/tasks/access-application-cluster/access-cluster.md b/content/en/docs/tasks/access-application-cluster/access-cluster.md index 05835f2b08..39ad8b4b7e 100644 --- a/content/en/docs/tasks/access-application-cluster/access-cluster.md +++ b/content/en/docs/tasks/access-application-cluster/access-cluster.md @@ -1,17 +1,17 @@ --- title: Accessing Clusters weight: 20 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + This topic discusses multiple ways to interact with clusters. -{{% /capture %}} -{{% capture body %}} + + ## Accessing for the first time with kubectl @@ -376,4 +376,3 @@ There are several different proxies you may encounter when using Kubernetes: Kubernetes users will typically not need to worry about anything other than the first two types. The cluster admin will typically ensure that the latter types are setup correctly. -{{% /capture %}} \ No newline at end of file diff --git a/content/en/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume.md b/content/en/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume.md index 33547cdca6..1d00516d28 100644 --- a/content/en/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume.md +++ b/content/en/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume.md @@ -1,25 +1,26 @@ --- title: Communicate Between Containers in the Same Pod Using a Shared Volume -content_template: templates/task +content_type: task weight: 110 --- -{{% capture overview %}} + This page shows how to use a Volume to communicate between two Containers running in the same Pod. See also how to allow processes to communicate by [sharing process namespace](/docs/tasks/configure-pod-container/share-process-namespace/) between containers. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Creating a Pod that runs two Containers @@ -108,10 +109,10 @@ The output shows that nginx serves a web page written by the debian container: Hello from the debian container -{{% /capture %}} -{{% capture discussion %}} + + ## Discussion @@ -127,10 +128,11 @@ The Volume in this exercise provides a way for Containers to communicate during the life of the Pod. If the Pod is deleted and recreated, any data stored in the shared Volume is lost. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [patterns for composite containers](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns). @@ -147,7 +149,7 @@ the shared Volume is lost. * See [Pod](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core). -{{% /capture %}} + diff --git a/content/en/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md b/content/en/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md index acd023548a..3f3100395c 100644 --- a/content/en/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md +++ b/content/en/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md @@ -1,6 +1,6 @@ --- title: Configure Access to Multiple Clusters -content_template: templates/task +content_type: task weight: 30 card: name: tasks @@ -8,7 +8,7 @@ card: --- -{{% capture overview %}} + This page shows how to configure access to multiple clusters by using configuration files. After your clusters, users, and contexts are defined in @@ -21,15 +21,21 @@ a *kubeconfig file*. This is a generic way of referring to configuration files. It does not mean that there is a file named `kubeconfig`. {{< /note >}} -{{% /capture %}} -{{% capture prerequisites %}} -{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} +## {{% heading "prerequisites" %}} -{{% /capture %}} -{{% capture steps %}} +{{< include "task-tutorial-prereqs.md" >}} + +To check that {{< glossary_tooltip text="kubectl" term_id="kubectl" >}} is installed, +run `kubectl version --client`. The kubectl version should be +[within one minor version](/docs/setup/release/version-skew-policy/#kubectl) of your +cluster's API server. + + + + ## Define clusters, users, and contexts @@ -369,14 +375,15 @@ export KUBECONFIG=$KUBECONFIG_SAVED $Env:KUBECONFIG=$ENV:KUBECONFIG_SAVED ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Organizing Cluster Access Using kubeconfig Files](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) * [kubectl config](/docs/reference/generated/kubectl/kubectl-commands#config) -{{% /capture %}} + diff --git a/content/en/docs/tasks/access-application-cluster/configure-cloud-provider-firewall.md b/content/en/docs/tasks/access-application-cluster/configure-cloud-provider-firewall.md deleted file mode 100644 index 0ab9428a36..0000000000 --- a/content/en/docs/tasks/access-application-cluster/configure-cloud-provider-firewall.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -reviewers: -- bprashanth -- davidopp -title: Configure Your Cloud Provider's Firewalls -content_template: templates/task -weight: 90 ---- - -{{% capture overview %}} - -Many cloud providers (e.g. Google Compute Engine) define firewalls that help prevent inadvertent -exposure to the internet. When exposing a service to the external world, you may need to open up -one or more ports in these firewalls to serve traffic. This document describes this process, as -well as any provider specific details that may be necessary. - -{{% /capture %}} - - -{{% capture prerequisites %}} - -{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} - -{{% /capture %}} - -{{% capture steps %}} - -## Restrict Access For LoadBalancer Service - - When using a Service with `spec.type: LoadBalancer`, you can specify the IP ranges that are allowed to access the load balancer - by using `spec.loadBalancerSourceRanges`. This field takes a list of IP CIDR ranges, which Kubernetes will use to configure firewall exceptions. - This feature is currently supported on Google Compute Engine, Google Kubernetes Engine, AWS Elastic Kubernetes Service, Azure Kubernetes Service, and IBM Cloud Kubernetes Service. This field will be ignored if the cloud provider does not support the feature. - - Assuming 10.0.0.0/8 is the internal subnet. In the following example, a load balancer will be created that is only accessible to cluster internal IPs. - This will not allow clients from outside of your Kubernetes cluster to access the load balancer. - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: myapp -spec: - ports: - - port: 8765 - targetPort: 9376 - selector: - app: example - type: LoadBalancer - loadBalancerSourceRanges: - - 10.0.0.0/8 -``` - - In the following example, a load balancer will be created that is only accessible to clients with IP addresses from 130.211.204.1 and 130.211.204.2. - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: myapp -spec: - ports: - - port: 8765 - targetPort: 9376 - selector: - app: example - type: LoadBalancer - loadBalancerSourceRanges: - - 130.211.204.1/32 - - 130.211.204.2/32 -``` - -## Google Compute Engine - -When using a Service with `spec.type: LoadBalancer`, the firewall will be -opened automatically. When using `spec.type: NodePort`, however, the firewall -is *not* opened by default. - -Google Compute Engine firewalls are documented [elsewhere](https://cloud.google.com/compute/docs/networking#firewalls_1). - -You can add a firewall with the `gcloud` command line tool: - -```shell -gcloud compute firewall-rules create my-rule --allow=tcp: -``` - -{{< note >}} -GCE firewalls are defined per-vm, rather than per-ip address. This means that -when you open a firewall for a service's ports, anything that serves on that -port on that VM's host IP address may potentially serve traffic. Note that this -is not a problem for other Kubernetes services, as they listen on IP addresses -that are different than the host node's external IP address. - -Consider: - - * You create a Service with an external load balancer (IP Address 1.2.3.4) - and port 80 - * You open the firewall for port 80 for all nodes in your cluster, so that - the external Service actually can deliver packets to your Service - * You start an nginx server, running on port 80 on the host virtual machine - (IP Address 2.3.4.5). This nginx is also exposed to the internet on - the VM's external IP address. - -Consequently, please be careful when opening firewalls in Google Compute Engine -or Google Kubernetes Engine. You may accidentally be exposing other services to -the wilds of the internet. - -{{< /note >}} - -{{% /capture %}} diff --git a/content/en/docs/tasks/access-application-cluster/configure-dns-cluster.md b/content/en/docs/tasks/access-application-cluster/configure-dns-cluster.md index 4c17d3128d..3535fdb8bc 100644 --- a/content/en/docs/tasks/access-application-cluster/configure-dns-cluster.md +++ b/content/en/docs/tasks/access-application-cluster/configure-dns-cluster.md @@ -1,13 +1,13 @@ --- title: Configure DNS for a Cluster weight: 120 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Kubernetes offers a DNS cluster addon, which most of the supported environments enable by default. In Kubernetes version 1.11 and later, CoreDNS is recommended and is installed by default with kubeadm. -{{% /capture %}} -{{% capture body %}} + + For more information on how to configure CoreDNS for a Kubernetes cluster, see the [Customizing DNS Service](/docs/tasks/administer-cluster/dns-custom-nameservers/). An example demonstrating how to use Kubernetes DNS with kube-dns, see the [Kubernetes DNS sample plugin](https://github.com/kubernetes/examples/tree/master/staging/cluster-dns). -{{% /capture %}} + diff --git a/content/en/docs/tasks/access-application-cluster/connecting-frontend-backend.md b/content/en/docs/tasks/access-application-cluster/connecting-frontend-backend.md index 264d930d5f..0ce827185c 100644 --- a/content/en/docs/tasks/access-application-cluster/connecting-frontend-backend.md +++ b/content/en/docs/tasks/access-application-cluster/connecting-frontend-backend.md @@ -1,30 +1,32 @@ --- title: Connect a Front End to a Back End Using a Service -content_template: templates/tutorial +content_type: tutorial weight: 70 --- -{{% capture overview %}} + This task shows how to create a frontend and a backend microservice. The backend microservice is a hello greeter. The frontend and backend are connected using a Kubernetes {{< glossary_tooltip term_id="service" >}} object. -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Create and run a microservice using a {{< glossary_tooltip term_id="deployment" >}} object. * Route traffic to the backend using a frontend. * Use a Service object to connect the frontend application to the backend application. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -34,10 +36,10 @@ frontend and backend are connected using a Kubernetes support this, you can use a Service of type [NodePort](/docs/concepts/services-networking/service/#nodeport) instead. -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Creating the backend using a Deployment @@ -201,9 +203,10 @@ The output shows the message generated by the backend: {"message":"Hello"} ``` -{{% /capture %}} -{{% capture cleanup %}} + +## {{% heading "cleanup" %}} + To delete the Services, enter this command: @@ -213,13 +216,14 @@ To delete the Deployments, the ReplicaSets and the Pods that are running the bac kubectl delete deployment frontend hello -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [Services](/docs/concepts/services-networking/service/) * Learn more about [ConfigMaps](/docs/tasks/configure-pod-container/configure-pod-configmap/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/access-application-cluster/create-external-load-balancer.md b/content/en/docs/tasks/access-application-cluster/create-external-load-balancer.md index 720203d60d..7dcc613232 100644 --- a/content/en/docs/tasks/access-application-cluster/create-external-load-balancer.md +++ b/content/en/docs/tasks/access-application-cluster/create-external-load-balancer.md @@ -1,11 +1,11 @@ --- title: Create an External Load Balancer -content_template: templates/task +content_type: task weight: 80 --- -{{% capture overview %}} + This page shows how to create an External Load Balancer. @@ -24,15 +24,16 @@ services externally-reachable URLs, load balance the traffic, terminate SSL etc. please check the [Ingress](/docs/concepts/services-networking/ingress/) documentation. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Configuration file @@ -199,4 +200,4 @@ Once the external load balancers provide weights, this functionality can be adde Internal pod to pod traffic should behave similar to ClusterIP services, with equal probability across all pods. -{{% /capture %}} + diff --git a/content/en/docs/tasks/access-application-cluster/ingress-minikube.md b/content/en/docs/tasks/access-application-cluster/ingress-minikube.md index 0a16c71064..9288ec3064 100644 --- a/content/en/docs/tasks/access-application-cluster/ingress-minikube.md +++ b/content/en/docs/tasks/access-application-cluster/ingress-minikube.md @@ -1,25 +1,26 @@ --- title: Set up Ingress on Minikube with the NGINX Ingress Controller -content_template: templates/task +content_type: task weight: 100 --- -{{% capture overview %}} + An [Ingress](/docs/concepts/services-networking/ingress/) is an API object that defines rules which allow external access to services in a cluster. An [Ingress controller](/docs/concepts/services-networking/ingress-controllers/) fulfills the rules set in the Ingress. This page shows you how to set up a simple Ingress which routes requests to Service web or web2 depending on the HTTP URI. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Create a Minikube cluster @@ -275,13 +276,14 @@ The following file is an Ingress resource that sends traffic to your Service via {{< note >}}If you are running Minikube locally, you can visit hello-world.info and hello-world.info/v2 from your browser.{{< /note >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Read more about [Ingress](/docs/concepts/services-networking/ingress/) * Read more about [Ingress Controllers](/docs/concepts/services-networking/ingress-controllers/) * Read more about [Services](/docs/concepts/services-networking/service/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/access-application-cluster/list-all-running-container-images.md b/content/en/docs/tasks/access-application-cluster/list-all-running-container-images.md index b3fb886d11..d1e1ba1568 100644 --- a/content/en/docs/tasks/access-application-cluster/list-all-running-container-images.md +++ b/content/en/docs/tasks/access-application-cluster/list-all-running-container-images.md @@ -1,23 +1,24 @@ --- title: List All Container Images Running in a Cluster -content_template: templates/task +content_type: task weight: 100 --- -{{% capture overview %}} + This page shows how to use kubectl to list all of the Container images for Pods running in a cluster. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + In this exercise you will use kubectl to fetch all of the Pods running in a cluster, and format the output to pull out the list @@ -108,19 +109,20 @@ kubectl get pods --all-namespaces -o go-template --template="{{range .items}}{{r -{{% /capture %}} -{{% capture discussion %}} -{{% /capture %}} + + + + +## {{% heading "whatsnext" %}} -{{% capture whatsnext %}} ### Reference * [Jsonpath](/docs/user-guide/jsonpath/) reference guide * [Go template](https://golang.org/pkg/text/template/) reference guide -{{% /capture %}} + diff --git a/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md b/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md index fc24022d0c..a6c2e217a5 100644 --- a/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md +++ b/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md @@ -1,29 +1,30 @@ --- title: Use Port Forwarding to Access Applications in a Cluster -content_template: templates/task +content_type: task weight: 40 min-kubernetes-server-version: v1.10 --- -{{% capture overview %}} + This page shows how to use `kubectl port-forward` to connect to a Redis server running in a Kubernetes cluster. This type of connection can be useful for database debugging. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * Install [redis-cli](http://redis.io/topics/rediscli). -{{% /capture %}} -{{% capture steps %}} + + ## Creating Redis deployment and service @@ -179,10 +180,10 @@ for database debugging. PONG ``` -{{% /capture %}} -{{% capture discussion %}} + + ## Discussion @@ -196,9 +197,10 @@ The support for UDP protocol is tracked in [issue 47862](https://github.com/kubernetes/kubernetes/issues/47862). {{< /note >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Learn more about [kubectl port-forward](/docs/reference/generated/kubectl/kubectl-commands/#port-forward). -{{% /capture %}} + diff --git a/content/en/docs/tasks/access-application-cluster/service-access-application-cluster.md b/content/en/docs/tasks/access-application-cluster/service-access-application-cluster.md index af5eb2db86..fe90981432 100644 --- a/content/en/docs/tasks/access-application-cluster/service-access-application-cluster.md +++ b/content/en/docs/tasks/access-application-cluster/service-access-application-cluster.md @@ -1,35 +1,37 @@ --- title: Use a Service to Access an Application in a Cluster -content_template: templates/tutorial +content_type: tutorial weight: 60 --- -{{% capture overview %}} + This page shows how to create a Kubernetes Service object that external clients can use to access an application running in a cluster. The Service provides load balancing for an application that has two running instances. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Run two instances of a Hello World application. * Create a Service object that exposes a node port. * Use the Service object to access the running application. -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Creating a service for an application running in two pods @@ -130,10 +132,11 @@ As an alternative to using `kubectl expose`, you can use a [service configuration file](/docs/concepts/services-networking/service/) to create a Service. -{{% /capture %}} -{{% capture cleanup %}} + +## {{% heading "cleanup" %}} + To delete the Service, enter this command: @@ -144,11 +147,12 @@ the Hello World application, enter this command: kubectl delete deployment hello-world -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Learn more about [connecting applications with services](/docs/concepts/services-networking/connect-applications-service/). -{{% /capture %}} + diff --git a/content/en/docs/tasks/access-application-cluster/web-ui-dashboard.md b/content/en/docs/tasks/access-application-cluster/web-ui-dashboard.md index 88132f5218..7a37fdc20b 100644 --- a/content/en/docs/tasks/access-application-cluster/web-ui-dashboard.md +++ b/content/en/docs/tasks/access-application-cluster/web-ui-dashboard.md @@ -4,7 +4,7 @@ reviewers: - mikedanese - rf232 title: Web UI (Dashboard) -content_template: templates/concept +content_type: concept weight: 10 card: name: tasks @@ -12,7 +12,7 @@ card: title: Use the Web UI Dashboard --- -{{% capture overview %}} + Dashboard is a web-based Kubernetes user interface. You can use Dashboard to deploy containerized applications to a Kubernetes cluster, troubleshoot your containerized application, and manage the cluster resources. You can use Dashboard to get an overview of applications running on your cluster, as well as for creating or modifying individual Kubernetes resources (such as Deployments, Jobs, DaemonSets, etc). For example, you can scale a Deployment, initiate a rolling update, restart a pod or deploy new applications using a deploy wizard. @@ -20,10 +20,10 @@ Dashboard also provides information on the state of Kubernetes resources in your ![Kubernetes Dashboard UI](/images/docs/ui-dashboard.png) -{{% /capture %}} -{{% capture body %}} + + ## Deploying the Dashboard UI @@ -83,7 +83,11 @@ The deploy wizard expects that you provide the following information: A [Deployment](/docs/concepts/workloads/controllers/deployment/) will be created to maintain the desired number of Pods across your cluster. -- **Service** (optional): For some parts of your application (e.g. frontends) you may want to expose a [Service](/docs/concepts/services-networking/service/) onto an external, maybe public IP address outside of your cluster (external Service). For external Services, you may need to open up one or more ports to do so. Find more details [here](/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/). +- **Service** (optional): For some parts of your application (e.g. frontends) you may want to expose a [Service](/docs/concepts/services-networking/service/) onto an external, maybe public IP address outside of your cluster (external Service). + + {{< note >}} + For external Services, you may need to open up one or more ports to do so. + {{< /note >}} Other Services that are only visible from inside the cluster are called internal Services. @@ -162,11 +166,12 @@ Pod lists and detail pages link to a logs viewer that is built into Dashboard. T ![Logs viewer](/images/docs/ui-dashboard-logs-view.png) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + For more information, see the [Kubernetes Dashboard project page](https://github.com/kubernetes/dashboard). -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/access-cluster-api.md b/content/en/docs/tasks/administer-cluster/access-cluster-api.md index 5aa79d4406..659c8d777c 100644 --- a/content/en/docs/tasks/administer-cluster/access-cluster-api.md +++ b/content/en/docs/tasks/administer-cluster/access-cluster-api.md @@ -1,18 +1,19 @@ --- title: Access Clusters Using the Kubernetes API -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page shows how to access clusters using the Kubernetes API. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Accessing the Kubernetes API @@ -269,8 +270,8 @@ public class KubeConfigFileClientExample { CoreV1Api api = new CoreV1Api(); // invokes the CoreV1Api client - V1PodList list = api.listPodForAllNamespaces(null, null, null, null, null, null, null, null); - System.out.Println("Listing all pods: "); + V1PodList list = api.listPodForAllNamespaces(null, null, null, null, null, null, null, null, null); + System.out.println("Listing all pods: "); for (V1Pod item : list.getItems()) { System.out.println(item.getMetadata().getName()); } @@ -449,5 +450,5 @@ The output will be similar to this: } ``` -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/access-cluster-services.md b/content/en/docs/tasks/administer-cluster/access-cluster-services.md index 57cdc835de..979a75a162 100644 --- a/content/en/docs/tasks/administer-cluster/access-cluster-services.md +++ b/content/en/docs/tasks/administer-cluster/access-cluster-services.md @@ -1,18 +1,19 @@ --- title: Access Services Running on Clusters -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page shows how to connect to services running on the Kubernetes cluster. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Accessing services running on the cluster @@ -132,6 +133,6 @@ You may be able to put an apiserver proxy URL into the address bar of a browser. - Some web apps may not work, particularly those with client side javascript that construct URLs in a way that is unaware of the proxy path prefix. -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/change-default-storage-class.md b/content/en/docs/tasks/administer-cluster/change-default-storage-class.md index a2070bcfe3..453cfef221 100644 --- a/content/en/docs/tasks/administer-cluster/change-default-storage-class.md +++ b/content/en/docs/tasks/administer-cluster/change-default-storage-class.md @@ -1,21 +1,22 @@ --- title: Change the default StorageClass -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page shows how to change the default Storage Class that is used to provision volumes for PersistentVolumeClaims that have no special requirements. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Why change the default storage class? @@ -93,10 +94,11 @@ for details about addon manager and how to disable individual addons. gold (default) kubernetes.io/gce-pd 1d ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [PersistentVolumes](/docs/concepts/storage/persistent-volumes/). -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/change-pv-reclaim-policy.md b/content/en/docs/tasks/administer-cluster/change-pv-reclaim-policy.md index a7ac4d80c9..729c7bde4f 100644 --- a/content/en/docs/tasks/administer-cluster/change-pv-reclaim-policy.md +++ b/content/en/docs/tasks/administer-cluster/change-pv-reclaim-policy.md @@ -1,20 +1,21 @@ --- title: Change the Reclaim Policy of a PersistentVolume -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page shows how to change the reclaim policy of a Kubernetes PersistentVolume. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Why change reclaim policy of a PersistentVolume @@ -80,9 +81,10 @@ kubectl patch pv -p "{\"spec\":{\"persistentVolumeReclaimPolicy\" `default/claim3` has reclaim policy `Retain`. It will not be automatically deleted when a user deletes claim `default/claim3`. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [PersistentVolumes](/docs/concepts/storage/persistent-volumes/). * Learn more about [PersistentVolumeClaims](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims). @@ -91,6 +93,6 @@ kubectl patch pv -p "{\"spec\":{\"persistentVolumeReclaimPolicy\" * [PersistentVolume](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolume-v1-core) * [PersistentVolumeClaim](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaim-v1-core) * See the `persistentVolumeReclaimPolicy` field of [PersistentVolumeSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaim-v1-core). -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/cluster-management.md b/content/en/docs/tasks/administer-cluster/cluster-management.md index 65728ec4ee..7cbab3aa2c 100644 --- a/content/en/docs/tasks/administer-cluster/cluster-management.md +++ b/content/en/docs/tasks/administer-cluster/cluster-management.md @@ -3,20 +3,20 @@ reviewers: - lavalamp - thockin title: Cluster Management -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + This document describes several topics related to the lifecycle of a cluster: creating a new cluster, upgrading your cluster's master and worker nodes, performing node maintenance (e.g. kernel upgrades), and upgrading the Kubernetes API version of a running cluster. -{{% /capture %}} -{{% capture body %}} + + ## Creating and configuring a Cluster @@ -224,4 +224,4 @@ kubectl convert -f pod.yaml --output-version v1 For more options, please refer to the usage of [kubectl convert](/docs/reference/generated/kubectl/kubectl-commands#convert) command. -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/configure-multiple-schedulers.md b/content/en/docs/tasks/administer-cluster/configure-multiple-schedulers.md index 436584ad14..e4b58b70e3 100644 --- a/content/en/docs/tasks/administer-cluster/configure-multiple-schedulers.md +++ b/content/en/docs/tasks/administer-cluster/configure-multiple-schedulers.md @@ -3,10 +3,10 @@ reviewers: - davidopp - madhusudancs title: Configure Multiple Schedulers -content_template: templates/task +content_type: task --- -{{% capture overview %}} + Kubernetes ships with a default scheduler that is described [here](/docs/admin/kube-scheduler/). If the default scheduler does not suit your needs you can implement your own scheduler. @@ -19,16 +19,17 @@ document. Please refer to the kube-scheduler implementation in [pkg/scheduler](https://github.com/kubernetes/kubernetes/tree/{{< param "githubbranch" >}}/pkg/scheduler) in the Kubernetes source directory for a canonical example. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Package the scheduler @@ -219,9 +220,9 @@ kubectl create -f pod3.yaml kubectl get pods ``` -{{% /capture %}} -{{% capture discussion %}} + + ### Verifying that the pods were scheduled using the desired schedulers @@ -241,4 +242,4 @@ verify that the pods were scheduled by the desired schedulers. kubectl get events ``` -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/configure-upgrade-etcd.md b/content/en/docs/tasks/administer-cluster/configure-upgrade-etcd.md index 73cecd999b..91661d235f 100644 --- a/content/en/docs/tasks/administer-cluster/configure-upgrade-etcd.md +++ b/content/en/docs/tasks/administer-cluster/configure-upgrade-etcd.md @@ -3,23 +3,24 @@ reviewers: - mml - wojtek-t title: Operating etcd clusters for Kubernetes -content_template: templates/task +content_type: task --- -{{% capture overview %}} + {{< glossary_definition term_id="etcd" length="all" prepend="etcd is a ">}} -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Prerequisites @@ -238,4 +239,4 @@ To urgently fix this bug for Kubernetes 1.15 or earlier, build a custom kube-api See ["kube-apiserver 1.13.x refuses to work when first etcd-server is not available"](https://github.com/kubernetes/kubernetes/issues/72102). -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/coredns.md b/content/en/docs/tasks/administer-cluster/coredns.md index 2e50d54f06..32d4f7d7ec 100644 --- a/content/en/docs/tasks/administer-cluster/coredns.md +++ b/content/en/docs/tasks/administer-cluster/coredns.md @@ -3,18 +3,19 @@ reviewers: - johnbelamaric title: Using CoreDNS for Service Discovery min-kubernetes-server-version: v1.9 -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page describes the CoreDNS upgrade process and how to install CoreDNS instead of kube-dns. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## About CoreDNS @@ -89,14 +90,15 @@ There is a helpful [guideline and walkthrough](https://github.com/coredns/deploy When resource utilisation is a concern, it may be useful to tune the configuration of CoreDNS. For more details, check out the [documentation on scaling CoreDNS](https://github.com/coredns/deployment/blob/master/kubernetes/Scaling_CoreDNS.md). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + You can configure [CoreDNS](https://coredns.io) to support many more use cases than kube-dns by modifying the `Corefile`. For more information, see the [CoreDNS site](https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/). -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/cpu-management-policies.md b/content/en/docs/tasks/administer-cluster/cpu-management-policies.md index 9568843e87..1b29abf17c 100644 --- a/content/en/docs/tasks/administer-cluster/cpu-management-policies.md +++ b/content/en/docs/tasks/administer-cluster/cpu-management-policies.md @@ -4,10 +4,10 @@ reviewers: - sjenning - ConnorDoyle - balajismaniam -content_template: templates/task +content_type: task --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.12" state="beta" >}} @@ -18,16 +18,17 @@ acceptably. The kubelet provides methods to enable more complex workload placement policies while keeping the abstraction free from explicit placement directives. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## CPU Management Policies @@ -211,4 +212,4 @@ and `requests` are set equal to `limits` when not explicitly specified. And the container's resource limit for the CPU resource is an integer greater than or equal to one. The `nginx` container is granted 2 exclusive CPUs. -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/declare-network-policy.md b/content/en/docs/tasks/administer-cluster/declare-network-policy.md index 1b6a706934..61add5312a 100644 --- a/content/en/docs/tasks/administer-cluster/declare-network-policy.md +++ b/content/en/docs/tasks/administer-cluster/declare-network-policy.md @@ -4,13 +4,14 @@ reviewers: - danwinship title: Declare Network Policy min-kubernetes-server-version: v1.8 -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This document helps you get started using the Kubernetes [NetworkPolicy API](/docs/concepts/services-networking/network-policies/) to declare network policies that govern how pods communicate with each other. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -25,9 +26,9 @@ Make sure you've configured a network provider with network policy support. Ther {{< note >}} The above list is sorted alphabetically by product name, not by recommendation or preference. This example is valid for a Kubernetes cluster using any of these providers. {{< /note >}} -{{% /capture %}} -{{% capture steps %}} + + ## Create an `nginx` deployment and expose it via a service @@ -146,4 +147,4 @@ Connecting to nginx (10.100.0.16:80) remote file exists ``` -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/developing-cloud-controller-manager.md b/content/en/docs/tasks/administer-cluster/developing-cloud-controller-manager.md index 0e80a018c4..0f6579d915 100644 --- a/content/en/docs/tasks/administer-cluster/developing-cloud-controller-manager.md +++ b/content/en/docs/tasks/administer-cluster/developing-cloud-controller-manager.md @@ -4,18 +4,18 @@ reviewers: - thockin - wlan0 title: Developing Cloud Controller Manager -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.11" state="beta" >}} {{< glossary_definition term_id="cloud-controller-manager" length="all" prepend="The cloud-controller-manager is">}} -{{% /capture %}} -{{% capture body %}} + + ## Background @@ -41,4 +41,4 @@ controller manager as your starting point. For in-tree cloud providers, you can run the in-tree cloud controller manager as a {{< glossary_tooltip term_id="daemonset" >}} in your cluster. See [Cloud Controller Manager Administration](/docs/tasks/administer-cluster/running-cloud-controller/) for more details. -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/dns-custom-nameservers.md b/content/en/docs/tasks/administer-cluster/dns-custom-nameservers.md index f3101bf6c9..f5e1e93239 100644 --- a/content/en/docs/tasks/administer-cluster/dns-custom-nameservers.md +++ b/content/en/docs/tasks/administer-cluster/dns-custom-nameservers.md @@ -3,24 +3,25 @@ reviewers: - bowei - zihongz title: Customizing DNS Service -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page explains how to configure your DNS Pod and customize the DNS resolution process. In Kubernetes version 1.11 and later, CoreDNS is at GA and is installed by default with kubeadm. See [CoreDNS ConfigMap options](#coredns-configmap-options) and [Using CoreDNS for Service Discovery](/docs/tasks/administer-cluster/coredns/). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * Kubernetes version 1.6 or later. To work with CoreDNS, version 1.9 or later. * The appropriate add-on: kube-dns or CoreDNS. To install with kubeadm, see [the kubeadm reference documentation](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/#cmd-phase-addon). -{{% /capture %}} -{{% capture steps %}} + + ## Introduction @@ -213,9 +214,9 @@ their destination DNS servers: See [ConfigMap options](#configmap-options) for details about the configuration option format. -{{% /capture %}} -{{% capture discussion %}} + + #### Effects on Pods @@ -302,7 +303,7 @@ data: ["172.16.0.1"] ``` -{{% /capture %}} + ## CoreDNS configuration equivalent to kube-dns diff --git a/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md b/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md index 3a69bd84ec..26aa968855 100644 --- a/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md +++ b/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md @@ -3,20 +3,21 @@ reviewers: - bowei - zihongz title: Debugging DNS Resolution -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page provides hints on diagnosing DNS problems. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * Kubernetes version 1.6 and above. * The cluster must be configured to use the `coredns` (or `kube-dns`) addons. -{{% /capture %}} -{{% capture steps %}} + + ### Create a simple Pod to use as a test environment @@ -273,5 +274,5 @@ for more information. ## What's next - [Autoscaling the DNS Service in a Cluster](/docs/tasks/administer-cluster/dns-horizontal-autoscaling/). -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/dns-horizontal-autoscaling.md b/content/en/docs/tasks/administer-cluster/dns-horizontal-autoscaling.md index 5d5dc98ade..6fd887bd8f 100644 --- a/content/en/docs/tasks/administer-cluster/dns-horizontal-autoscaling.md +++ b/content/en/docs/tasks/administer-cluster/dns-horizontal-autoscaling.md @@ -1,14 +1,15 @@ --- title: Autoscale the DNS Service in a Cluster -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page shows how to enable and configure autoscaling of the DNS service in your Kubernetes cluster. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -16,9 +17,9 @@ your Kubernetes cluster. * Make sure [Kubernetes DNS](/docs/concepts/services-networking/dns-pod-service/) is enabled. -{{% /capture %}} -{{% capture steps %}} + + ## Determine whether DNS horizontal autoscaling is already enabled {#determining-whether-dns-horizontal-autoscaling-is-already-enabled} @@ -201,9 +202,9 @@ The common path for this dns-autoscaler is: After the manifest file is deleted, the Addon Manager will delete the dns-autoscaler Deployment. -{{% /capture %}} -{{% capture discussion %}} + + ## Understanding how DNS horizontal autoscaling works @@ -226,10 +227,11 @@ the autoscaler Pod. * The autoscaler provides a controller interface to support two control patterns: *linear* and *ladder*. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Read about [Guaranteed Scheduling For Critical Add-On Pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/). * Learn more about the [implementation of cluster-proportional-autoscaler](https://github.com/kubernetes-incubator/cluster-proportional-autoscaler). -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/enabling-endpointslices.md b/content/en/docs/tasks/administer-cluster/enabling-endpointslices.md index b8e4cf900d..b9e389ead7 100644 --- a/content/en/docs/tasks/administer-cluster/enabling-endpointslices.md +++ b/content/en/docs/tasks/administer-cluster/enabling-endpointslices.md @@ -3,19 +3,20 @@ reviewers: - bowei - freehan title: Enabling EndpointSlices -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page provides an overview of enabling EndpointSlices in Kubernetes. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Introduction @@ -55,9 +56,10 @@ existing Endpoints functionality, EndpointSlices include new bits of information such as topology. They will allow for greater scalability and extensibility of network endpoints in your cluster. -{{% capture whatsnext %}} +## {{% heading "whatsnext" %}} + * Read about [EndpointSlices](/docs/concepts/services-networking/endpoint-slices/) * Read [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/enabling-service-topology.md b/content/en/docs/tasks/administer-cluster/enabling-service-topology.md index c39b9b366d..998bb8b2e5 100644 --- a/content/en/docs/tasks/administer-cluster/enabling-service-topology.md +++ b/content/en/docs/tasks/administer-cluster/enabling-service-topology.md @@ -4,19 +4,20 @@ reviewers: - johnbelamaric - imroc title: Enabling Service Topology -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page provides an overview of enabling Service Topology in Kubernetes. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Introduction @@ -45,10 +46,11 @@ To enable service topology, enable the `ServiceTopology` and `EndpointSlice` fea ``` -{{% capture whatsnext %}} +## {{% heading "whatsnext" %}} + * Read about the [Service Topology](/docs/concepts/services-networking/service-topology) concept * Read about [Endpoint Slices](/docs/concepts/services-networking/endpoint-slices) * Read [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/encrypt-data.md b/content/en/docs/tasks/administer-cluster/encrypt-data.md index b96f034963..8499855bb0 100644 --- a/content/en/docs/tasks/administer-cluster/encrypt-data.md +++ b/content/en/docs/tasks/administer-cluster/encrypt-data.md @@ -2,23 +2,24 @@ reviewers: - smarterclayton title: Encrypting Secret Data at Rest -content_template: templates/task +content_type: task min-kubernetes-server-version: 1.13 --- -{{% capture overview %}} + This page shows how to enable and configure encryption of secret data at rest. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * etcd v3.0 or later is required -{{% /capture %}} -{{% capture steps %}} + + ## Configuration and determining whether encryption at rest is already enabled @@ -215,4 +216,4 @@ kubectl get secrets --all-namespaces -o json | kubectl replace -f - ``` to force all secrets to be decrypted. -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/extended-resource-node.md b/content/en/docs/tasks/administer-cluster/extended-resource-node.md index 49e491d251..07d8fea616 100644 --- a/content/en/docs/tasks/administer-cluster/extended-resource-node.md +++ b/content/en/docs/tasks/administer-cluster/extended-resource-node.md @@ -1,26 +1,27 @@ --- title: Advertise Extended Resources for a Node -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page shows how to specify extended resources for a Node. Extended resources allow cluster administrators to advertise node-level resources that would otherwise be unknown to Kubernetes. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Get the names of your Nodes @@ -189,10 +190,11 @@ kubectl describe node | grep dongle (you should not see any output) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + ### For application developers @@ -204,4 +206,4 @@ kubectl describe node | grep dongle * [Configure Minimum and Maximum CPU Constraints for a Namespace](/docs/tasks/administer-cluster/cpu-constraint-namespace/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods.md b/content/en/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods.md index 0b00eed125..0d5b6d4ebe 100644 --- a/content/en/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods.md +++ b/content/en/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods.md @@ -4,10 +4,10 @@ reviewers: - filipg - piosz title: Guaranteed Scheduling For Critical Add-On Pods -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + In addition to Kubernetes core components like api-server, scheduler, controller-manager running on a master machine there are a number of add-ons which, for various reasons, must run on a regular cluster node (rather than the Kubernetes master). @@ -19,14 +19,14 @@ vacated by the evicted critical add-on pod or the amount of resources available Note that marking a pod as critical is not meant to prevent evictions entirely; it only prevents the pod from becoming permanently unavailable. For static pods, this means it can't be evicted, but for non-static pods, it just means they will always be rescheduled. -{{% /capture %}} -{{% capture body %}} + + ### Marking pod as critical To mark a Pod as critical, set priorityClassName for that Pod to `system-cluster-critical` or `system-node-critical`. `system-node-critical` is the highest available priority, even higher than `system-cluster-critical`. -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/highly-available-master.md b/content/en/docs/tasks/administer-cluster/highly-available-master.md index e5529da7c7..e2a582f8b2 100644 --- a/content/en/docs/tasks/administer-cluster/highly-available-master.md +++ b/content/en/docs/tasks/administer-cluster/highly-available-master.md @@ -2,26 +2,27 @@ reviewers: - jszczepkowski title: Set up High-Availability Kubernetes Masters -content_template: templates/task +content_type: task --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.5" state="alpha" >}} You can replicate Kubernetes masters in `kube-up` or `kube-down` scripts for Google Compute Engine. This document describes how to use kube-up/down scripts to manage highly available (HA) masters and how HA masters are implemented for use with GCE. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Starting an HA-compatible cluster @@ -118,9 +119,9 @@ If the cluster is large, it may take a long time to duplicate its state. This operation may be sped up by migrating etcd data directory, as described [here](https://coreos.com/etcd/docs/latest/admin_guide.html#member-migration) (we are considering adding support for etcd data dir migration in future). -{{% /capture %}} -{{% capture discussion %}} + + ## Implementation notes @@ -173,4 +174,4 @@ To make such deployment secure, communication between etcd instances is authoriz [Automated HA master deployment - design doc](https://git.k8s.io/community/contributors/design-proposals/cluster-lifecycle/ha_master.md) -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/ip-masq-agent.md b/content/en/docs/tasks/administer-cluster/ip-masq-agent.md index bdc871ddd9..9c2e1d3d5d 100644 --- a/content/en/docs/tasks/administer-cluster/ip-masq-agent.md +++ b/content/en/docs/tasks/administer-cluster/ip-masq-agent.md @@ -1,19 +1,20 @@ --- title: IP Masquerade Agent User Guide -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page shows how to configure and enable the ip-masq-agent. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture discussion %}} + + ## IP Masquerade Agent User Guide The ip-masq-agent configures iptables rules to hide a pod's IP address behind the cluster node's IP address. This is typically done when sending traffic to destinations outside the cluster's pod [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) range. @@ -53,9 +54,9 @@ MASQUERADE all -- anywhere anywhere /* ip-masq-agent: By default, in GCE/Google Kubernetes Engine starting with Kubernetes version 1.7.0, if network policy is enabled or you are using a cluster CIDR not in the 10.0.0.0/8 range, the ip-masq-agent will run in your cluster. If you are running in another environment, you can add the ip-masq-agent [DaemonSet](/docs/concepts/workloads/controllers/daemonset/) to your cluster: -{{% /capture %}} -{{% capture steps %}} + + ## Create an ip-masq-agent To create an ip-masq-agent, run the following kubectl command: @@ -110,4 +111,4 @@ nonMasqueradeCIDRs: resyncInterval: 60s masqLinkLocal: true ``` -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/kms-provider.md b/content/en/docs/tasks/administer-cluster/kms-provider.md index d90ca853cf..34cc1d6b66 100644 --- a/content/en/docs/tasks/administer-cluster/kms-provider.md +++ b/content/en/docs/tasks/administer-cluster/kms-provider.md @@ -2,13 +2,14 @@ reviewers: - smarterclayton title: Using a KMS provider for data encryption -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page shows how to configure a Key Management Service (KMS) provider and plugin to enable secret data encryption. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -18,9 +19,9 @@ This page shows how to configure a Key Management Service (KMS) provider and plu {{< feature-state for_k8s_version="v1.12" state="beta" >}} -{{% /capture %}} -{{% capture steps %}} + + The KMS encryption provider uses an envelope encryption scheme to encrypt data in etcd. The data is encrypted using a data encryption key (DEK); a new DEK is generated for each encryption. The DEKs are encrypted with a key encryption key (KEK) that is stored and managed in a remote KMS. The KMS provider uses gRPC to communicate with a specific KMS plugin. The KMS plugin, which is implemented as a gRPC server and deployed on the same host(s) as the Kubernetes master(s), is responsible for all communication with the remote KMS. @@ -183,4 +184,4 @@ To disable encryption at rest: ``` kubectl get secrets --all-namespaces -o json | kubectl replace -f - ``` -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md b/content/en/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md index 28df69c13a..e82c53f3a6 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md @@ -4,20 +4,21 @@ reviewers: - patricklang title: Adding Windows nodes min-kubernetes-server-version: 1.17 -content_template: templates/tutorial +content_type: tutorial weight: 30 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.18" state="beta" >}} You can use Kubernetes to run a mixture of Linux and Windows nodes, so you can mix Pods that run on Linux on with Pods that run on Windows. This page shows how to register Windows nodes to your cluster. -{{% /capture %}} -{{% capture prerequisites %}} {{< version-check >}} + +## {{% heading "prerequisites" %}} + {{< version-check >}} * Obtain a [Windows Server 2019 license](https://www.microsoft.com/en-us/cloud-platform/windows-server-pricing) (or higher) in order to configure the Windows node that hosts Windows containers. @@ -25,18 +26,19 @@ If you are using VXLAN/Overlay networking you must have also have [KB4489899](ht * A Linux-based Kubernetes kubeadm cluster in which you have access to the control plane (see [Creating a single control-plane cluster with kubeadm](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/)). -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Register a Windows node to the cluster * Configure networking so Pods and Services on Linux and Windows can communicate with each other -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Getting Started: Adding a Windows Node to Your Cluster @@ -176,10 +178,11 @@ kubectl -n kube-system get pods -l app=flannel Once the flannel Pod is running, your node should enter the `Ready` state and then be available to handle workloads. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - [Upgrading Windows kubeadm nodes](/docs/tasks/administer-cluster/kubeadm/upgrading-windows-nodes) -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md index 6329c4a395..461e45bda6 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md @@ -2,25 +2,26 @@ reviewers: - sig-cluster-lifecycle title: Certificate Management with kubeadm -content_template: templates/task +content_type: task weight: 10 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.15" state="stable" >}} Client certificates generated by [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) expire after 1 year. This page explains how to manage certificate renewals with kubeadm. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + You should be familiar with [PKI certificates and requirements in Kubernetes](/docs/setup/best-practices/certificates/). -{{% /capture %}} -{{% capture steps %}} + + ## Using custom certificates {#custom-certificates} @@ -242,4 +243,8 @@ After a certificate is signed using your preferred method, the certificate and t [cert-cas]: /docs/setup/best-practices/certificates/#single-root-ca [cert-table]: /docs/setup/best-practices/certificates/#all-certificates -{{% /capture %}} +## Certificate authority (CA) rotation {#certificate-authority-rotation} + +Kubeadm does not support rotation or replacement of CA certificates out of the box. + +For more information about manual rotation or replacement of CA, see [manual rotation of CA certificates](/docs/tasks/tls/manual-rotation-of-ca-certificates/). diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md index f0368ecaf9..2c4c3d135e 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md @@ -2,12 +2,12 @@ reviewers: - sig-cluster-lifecycle title: Upgrading kubeadm clusters -content_template: templates/task +content_type: task weight: 20 min-kubernetes-server-version: 1.18 --- -{{% capture overview %}} + This page explains how to upgrade a Kubernetes cluster created with kubeadm from version 1.17.x to version 1.18.x, and from version 1.18.x to 1.18.y (where `y > x`). @@ -26,9 +26,10 @@ The upgrade workflow at high level is the following: 1. Upgrade additional control plane nodes. 1. Upgrade worker nodes. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + - You need to have a kubeadm Kubernetes cluster running version 1.17.0 or later. - [Swap must be disabled](https://serverfault.com/questions/684771/best-way-to-disable-swap-in-linux). @@ -44,9 +45,9 @@ The upgrade workflow at high level is the following: or between PATCH versions of the same MINOR. That is, you cannot skip MINOR versions when you upgrade. For example, you can upgrade from 1.y to 1.y+1, but not from 1.y to 1.y+2. -{{% /capture %}} -{{% capture steps %}} + + ## Determine which version to upgrade to @@ -148,7 +149,7 @@ Find the latest stable 1.18 version: {{< note >}} `kubeadm upgrade` also automatically renews the certificates that it manages on this node. To opt-out of certificate renewal the flag `--certificate-renewal=false` can be used. -For more information see the [certificate management guide](/docs/tasks/administer-cluster/kubeadmkubeadm-certs). +For more information see the [certificate management guide](/docs/tasks/administer-cluster/kubeadm/kubeadm-certs). {{}} - Choose a version to upgrade to, and run the appropriate command. For example: @@ -295,6 +296,7 @@ Upgrade the kubelet and kubectl on all control plane nodes: Restart the kubelet ```shell +sudo systemctl daemon-reload sudo systemctl restart kubelet ``` @@ -373,6 +375,7 @@ without compromising the minimum required capacity for running your workloads. - Restart the kubelet ```shell + sudo systemctl daemon-reload sudo systemctl restart kubelet ``` @@ -395,7 +398,7 @@ kubectl get nodes The `STATUS` column should show `Ready` for all your nodes, and the version number should be updated. -{{% /capture %}} + ## Recovering from a failure state @@ -441,4 +444,4 @@ and post-upgrade manifest file for a certain component, a backup file for it wil `kubeadm upgrade node` does the following on worker nodes: - Fetches the kubeadm `ClusterConfiguration` from the cluster. -- Upgrades the kubelet configuration for this node. \ No newline at end of file +- Upgrades the kubelet configuration for this node. diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/upgrading-windows-nodes.md b/content/en/docs/tasks/administer-cluster/kubeadm/upgrading-windows-nodes.md index a6c626a627..35857d09a0 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/upgrading-windows-nodes.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/upgrading-windows-nodes.md @@ -1,29 +1,30 @@ --- title: Upgrading Windows nodes min-kubernetes-server-version: 1.17 -content_template: templates/task +content_type: task weight: 40 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.18" state="beta" >}} This page explains how to upgrade a Windows node [created with kubeadm](/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * Familiarize yourself with [the process for upgrading the rest of your kubeadm cluster](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade). You will want to upgrade the control plane nodes before upgrading your Windows nodes. -{{% /capture %}} -{{% capture steps %}} + + ## Upgrading worker nodes @@ -90,4 +91,4 @@ again replacing {{< param "fullversion" >}} with your desired version: ``` -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/kubelet-config-file.md b/content/en/docs/tasks/administer-cluster/kubelet-config-file.md index 6ffe290a19..54cd837370 100644 --- a/content/en/docs/tasks/administer-cluster/kubelet-config-file.md +++ b/content/en/docs/tasks/administer-cluster/kubelet-config-file.md @@ -3,10 +3,10 @@ reviewers: - mtaufen - dawnchen title: Set Kubelet parameters via a config file -content_template: templates/task +content_type: task --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.10" state="beta" >}} A subset of the Kubelet's configuration parameters may be @@ -16,15 +16,16 @@ This functionality is considered beta in v1.10. Providing parameters via a config file is the recommended approach because it simplifies node deployment and configuration management. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + - A v1.10 or higher Kubelet binary must be installed for beta functionality. -{{% /capture %}} -{{% capture steps %}} + + ## Create the config file @@ -67,9 +68,9 @@ If `--config` is provided and the values are not specified via the command line, defaults for the `KubeletConfiguration` version apply. In the above example, this version is `kubelet.config.k8s.io/v1beta1`. -{{% /capture %}} -{{% capture discussion %}} + + ## Relationship to Dynamic Kubelet Config @@ -77,6 +78,6 @@ If you are using the [Dynamic Kubelet Configuration](/docs/tasks/administer-clus feature, the combination of configuration provided via `--config` and any flags which override these values is considered the default "last known good" configuration by the automatic rollback mechanism. -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/limit-storage-consumption.md b/content/en/docs/tasks/administer-cluster/limit-storage-consumption.md index 83ec069915..13dec384ea 100644 --- a/content/en/docs/tasks/administer-cluster/limit-storage-consumption.md +++ b/content/en/docs/tasks/administer-cluster/limit-storage-consumption.md @@ -1,9 +1,9 @@ --- title: Limit Storage Consumption -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This example demonstrates an easy way to limit the amount of storage consumed in a namespace. @@ -11,15 +11,16 @@ The following resources are used in the demonstration: [ResourceQuota](/docs/con [LimitRange](/docs/tasks/administer-cluster/memory-default-namespace/), and [PersistentVolumeClaim](/docs/concepts/storage/persistent-volumes/). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Scenario: Limiting Storage Consumption The cluster-admin is operating a cluster on behalf of a user population and the admin wants to control @@ -77,9 +78,9 @@ spec: requests.storage: "5Gi" ``` -{{% /capture %}} -{{% capture discussion %}} + + ## Summary @@ -87,6 +88,6 @@ A limit range can put a ceiling on how much storage is requested while a resourc consumed by a namespace through claim counts and cumulative storage capacity. The allows a cluster-admin to plan their cluster's storage budget without risk of any one project going over their allotment. -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace.md b/content/en/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace.md index a1d4c786c6..d3d1541d27 100644 --- a/content/en/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace.md +++ b/content/en/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace.md @@ -1,11 +1,11 @@ --- title: Configure Minimum and Maximum CPU Constraints for a Namespace -content_template: templates/task +content_type: task weight: 40 --- -{{% capture overview %}} + This page shows how to set minimum and maximum values for the CPU resources used by Containers and Pods in a namespace. You specify minimum and maximum CPU values in a @@ -13,19 +13,20 @@ and Pods in a namespace. You specify minimum and maximum CPU values in a object. If a Pod does not meet the constraints imposed by the LimitRange, it cannot be created in the namespace. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} Your cluster must have at least 1 CPU available for use to run the task examples. -{{% /capture %}} -{{% capture steps %}} + + ## Create a namespace @@ -239,9 +240,10 @@ Delete your namespace: kubectl delete namespace constraints-cpu-example ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + ### For cluster administrators @@ -266,7 +268,7 @@ kubectl delete namespace constraints-cpu-example * [Configure Quality of Service for Pods](/docs/tasks/configure-pod-container/quality-service-pod/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace.md b/content/en/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace.md index 65a91a3538..d2e15c91da 100644 --- a/content/en/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace.md +++ b/content/en/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace.md @@ -1,10 +1,10 @@ --- title: Configure Default CPU Requests and Limits for a Namespace -content_template: templates/task +content_type: task weight: 20 --- -{{% capture overview %}} + This page shows how to configure default CPU requests and limits for a namespace. A Kubernetes cluster can be divided into namespaces. If a Container is created in a namespace @@ -12,14 +12,15 @@ that has a default CPU limit, and the Container does not specify its own CPU lim the Container is assigned the default CPU limit. Kubernetes assigns a default CPU request under certain conditions that are explained later in this topic. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Create a namespace @@ -163,9 +164,10 @@ Delete your namespace: kubectl delete namespace default-cpu-example ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + ### For cluster administrators @@ -189,6 +191,6 @@ kubectl delete namespace default-cpu-example * [Configure Quality of Service for Pods](/docs/tasks/configure-pod-container/quality-service-pod/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace.md b/content/en/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace.md index e6a6e1c2b0..a5ad383e78 100644 --- a/content/en/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace.md +++ b/content/en/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace.md @@ -1,11 +1,11 @@ --- title: Configure Minimum and Maximum Memory Constraints for a Namespace -content_template: templates/task +content_type: task weight: 30 --- -{{% capture overview %}} + This page shows how to set minimum and maximum values for memory used by Containers running in a namespace. You specify minimum and maximum memory values in a @@ -13,19 +13,20 @@ running in a namespace. You specify minimum and maximum memory values in a object. If a Pod does not meet the constraints imposed by the LimitRange, it cannot be created in the namespace. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} Each node in your cluster must have at least 1 GiB of memory. -{{% /capture %}} -{{% capture steps %}} + + ## Create a namespace @@ -239,9 +240,10 @@ Delete your namespace: kubectl delete namespace constraints-mem-example ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + ### For cluster administrators @@ -265,7 +267,7 @@ kubectl delete namespace constraints-mem-example * [Configure Quality of Service for Pods](/docs/tasks/configure-pod-container/quality-service-pod/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/manage-resources/memory-default-namespace.md b/content/en/docs/tasks/administer-cluster/manage-resources/memory-default-namespace.md index bb5070bc98..df7fce39f2 100644 --- a/content/en/docs/tasks/administer-cluster/manage-resources/memory-default-namespace.md +++ b/content/en/docs/tasks/administer-cluster/manage-resources/memory-default-namespace.md @@ -1,27 +1,28 @@ --- title: Configure Default Memory Requests and Limits for a Namespace -content_template: templates/task +content_type: task weight: 10 --- -{{% capture overview %}} + This page shows how to configure default memory requests and limits for a namespace. If a Container is created in a namespace that has a default memory limit, and the Container does not specify its own memory limit, then the Container is assigned the default memory limit. Kubernetes assigns a default memory request under certain conditions that are explained later in this topic. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} Each node in your cluster must have at least 2 GiB of memory. -{{% /capture %}} -{{% capture steps %}} + + ## Create a namespace @@ -170,9 +171,10 @@ Delete your namespace: kubectl delete namespace default-mem-example ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + ### For cluster administrators @@ -196,6 +198,6 @@ kubectl delete namespace default-mem-example * [Configure Quality of Service for Pods](/docs/tasks/configure-pod-container/quality-service-pod/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace.md b/content/en/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace.md index 9558766410..d69e3d29d6 100644 --- a/content/en/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace.md +++ b/content/en/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace.md @@ -1,30 +1,31 @@ --- title: Configure Memory and CPU Quotas for a Namespace -content_template: templates/task +content_type: task weight: 50 --- -{{% capture overview %}} + This page shows how to set quotas for the total amount memory and CPU that can be used by all Containers running in a namespace. You specify quotas in a [ResourceQuota](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#resourcequota-v1-core) object. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} Each node in your cluster must have at least 1 GiB of memory. -{{% /capture %}} -{{% capture steps %}} + + ## Create a namespace @@ -146,9 +147,10 @@ Delete your namespace: kubectl delete namespace quota-mem-cpu-example ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + ### For cluster administrators @@ -172,7 +174,7 @@ kubectl delete namespace quota-mem-cpu-example * [Configure Quality of Service for Pods](/docs/tasks/configure-pod-container/quality-service-pod/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/manage-resources/quota-pod-namespace.md b/content/en/docs/tasks/administer-cluster/manage-resources/quota-pod-namespace.md index 31cac82cf1..c44a07681f 100644 --- a/content/en/docs/tasks/administer-cluster/manage-resources/quota-pod-namespace.md +++ b/content/en/docs/tasks/administer-cluster/manage-resources/quota-pod-namespace.md @@ -1,28 +1,29 @@ --- title: Configure a Pod Quota for a Namespace -content_template: templates/task +content_type: task weight: 60 --- -{{% capture overview %}} + This page shows how to set a quota for the total number of Pods that can run in a namespace. You specify quotas in a [ResourceQuota](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#resourcequota-v1-core) object. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Create a namespace @@ -107,9 +108,10 @@ Delete your namespace: kubectl delete namespace quota-pod-example ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + ### For cluster administrators @@ -133,7 +135,7 @@ kubectl delete namespace quota-pod-example * [Configure Quality of Service for Pods](/docs/tasks/configure-pod-container/quality-service-pod/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/namespaces-walkthrough.md b/content/en/docs/tasks/administer-cluster/namespaces-walkthrough.md index 9e3f4d6371..2bf0de8231 100644 --- a/content/en/docs/tasks/administer-cluster/namespaces-walkthrough.md +++ b/content/en/docs/tasks/administer-cluster/namespaces-walkthrough.md @@ -3,10 +3,10 @@ reviewers: - derekwaynecarr - janetkuo title: Namespaces Walkthrough -content_template: templates/task +content_type: task --- -{{% capture overview %}} + Kubernetes {{< glossary_tooltip text="namespaces" term_id="namespace" >}} help different projects, teams, or customers to share a Kubernetes cluster. @@ -19,16 +19,17 @@ Use of multiple namespaces is optional. This example demonstrates how to use Kubernetes namespaces to subdivide your cluster. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Prerequisites @@ -242,7 +243,7 @@ snowflake 2/2 2 2 2m ``` ```shell -kubectl get pods -l run=snowflake +kubectl get pods -l app=snowflake ``` ``` NAME READY STATUS RESTARTS AGE @@ -279,7 +280,7 @@ cattle 5/5 5 5 10s ``` ```shell -kubectl get pods -l run=cattle +kubectl get pods -l app=cattle ``` ``` NAME READY STATUS RESTARTS AGE @@ -295,4 +296,4 @@ At this point, it should be clear that the resources users create in one namespa As the policy support in Kubernetes evolves, we will extend this scenario to show how you can provide different authorization rules for each namespace. -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/namespaces.md b/content/en/docs/tasks/administer-cluster/namespaces.md index 1d67d7a73f..be7906e40f 100644 --- a/content/en/docs/tasks/administer-cluster/namespaces.md +++ b/content/en/docs/tasks/administer-cluster/namespaces.md @@ -3,19 +3,20 @@ reviewers: - derekwaynecarr - janetkuo title: Share a Cluster with Namespaces -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page shows how to view, work in, and delete {{< glossary_tooltip text="namespaces" term_id="namespace" >}}. The page also shows how to use Kubernetes namespaces to subdivide your cluster. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * Have an [existing Kubernetes cluster](/docs/setup/). * Have a basic understanding of Kubernetes _[Pods](/docs/concepts/workloads/pods/pod/)_, _[Services](/docs/concepts/services-networking/service/)_, and _[Deployments](/docs/concepts/workloads/controllers/deployment/)_. -{{% /capture %}} -{{% capture steps %}} + + ## Viewing namespaces @@ -189,12 +190,10 @@ This delete is asynchronous, so for a time you will see the namespace in the `Te To demonstrate this, let's spin up a simple Deployment and Pods in the `development` namespace. ```shell - kubectl create deployment snowflake --image=k8s.gcr.io/serve_hostname -n=development + kubectl create deployment snowflake --image=k8s.gcr.io/serve_hostname -n=development kubectl scale deployment snowflake --replicas=2 -n=development ``` We have just created a deployment whose replica size is 2 that is running the pod called `snowflake` with a basic container that just serves the hostname. - Note that `kubectl run` creates deployments only on Kubernetes cluster >= v1.2. If you are running older versions, it creates replication controllers instead. - If you want to obtain the old behavior, use `--generator=run/v1` to create replication controllers. See [`kubectl run`](/docs/reference/generated/kubectl/kubectl-commands/#run) for more details. ```shell kubectl get deployment -n=development @@ -204,7 +203,7 @@ This delete is asynchronous, so for a time you will see the namespace in the `Te snowflake 2/2 2 2 2m ``` ```shell - kubectl get pods -l run=snowflake -n=development + kubectl get pods -l app=snowflake -n=development ``` ``` NAME READY STATUS RESTARTS AGE @@ -226,21 +225,26 @@ This delete is asynchronous, so for a time you will see the namespace in the `Te Production likes to run cattle, so let's create some cattle pods. ```shell - kubectl run cattle --image=k8s.gcr.io/serve_hostname -n=production + kubectl create deployment cattle --image=k8s.gcr.io/serve_hostname -n=production + kubectl scale deployment cattle --replicas=5 -n=production - kubectl get pods -n=production + kubectl get deployment -n=production ``` ``` - NAME READY STATUS RESTARTS AGE - cattle 1/1 Running 0 3s + NAME READY UP-TO-DATE AVAILABLE AGE + cattle 5/5 5 5 10s ``` ```shell - kubectl get pods -l run=cattle -n=production + kubectl get pods -l app=cattle -n=production ``` ``` NAME READY STATUS RESTARTS AGE - cattle 1/1 Running 0 34s + cattle-2263376956-41xy6 1/1 Running 0 34s + cattle-2263376956-kw466 1/1 Running 0 34s + cattle-2263376956-n4v97 1/1 Running 0 34s + cattle-2263376956-p5p3i 1/1 Running 0 34s + cattle-2263376956-sxpth 1/1 Running 0 34s ``` At this point, it should be clear that the resources users create in one namespace are hidden from the other namespace. @@ -248,9 +252,9 @@ At this point, it should be clear that the resources users create in one namespa As the policy support in Kubernetes evolves, we will extend this scenario to show how you can provide different authorization rules for each namespace. -{{% /capture %}} -{{% capture discussion %}} + + ## Understanding the motivation for using namespaces @@ -300,12 +304,13 @@ is local to a namespace. This is useful for using the same configuration across multiple namespaces such as Development, Staging and Production. If you want to reach across namespaces, you need to use the fully qualified domain name (FQDN). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [setting the namespace preference](/docs/concepts/overview/working-with-objects/namespaces/#setting-the-namespace-preference). * Learn more about [setting the namespace for a request](/docs/concepts/overview/working-with-objects/namespaces/#setting-the-namespace-for-a-request) * See [namespaces design](https://github.com/kubernetes/community/blob/{{< param "githubbranch" >}}/contributors/design-proposals/architecture/namespaces.md). -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy.md b/content/en/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy.md index 7046752a5f..9efdccfb6e 100644 --- a/content/en/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy.md +++ b/content/en/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy.md @@ -2,19 +2,20 @@ reviewers: - caseydavenport title: Use Calico for NetworkPolicy -content_template: templates/task +content_type: task weight: 10 --- -{{% capture overview %}} + This page shows a couple of quick ways to create a Calico cluster on Kubernetes. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Decide whether you want to deploy a [cloud](#creating-a-calico-cluster-with-google-kubernetes-engine-gke) or [local](#creating-a-local-calico-cluster-with-kubeadm) cluster. -{{% /capture %}} -{{% capture steps %}} + + ## Creating a Calico cluster with Google Kubernetes Engine (GKE) **Prerequisite**: [gcloud](https://cloud.google.com/sdk/docs/quickstarts). @@ -44,10 +45,11 @@ Decide whether you want to deploy a [cloud](#creating-a-calico-cluster-with-goog To get a local single-host Calico cluster in fifteen minutes using kubeadm, refer to the [Calico Quickstart](https://docs.projectcalico.org/latest/getting-started/kubernetes/). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Once your cluster is running, you can follow the [Declare Network Policy](/docs/tasks/administer-cluster/declare-network-policy/) to try out Kubernetes NetworkPolicy. -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/network-policy-provider/cilium-network-policy.md b/content/en/docs/tasks/administer-cluster/network-policy-provider/cilium-network-policy.md index cca685d395..95912f4f88 100644 --- a/content/en/docs/tasks/administer-cluster/network-policy-provider/cilium-network-policy.md +++ b/content/en/docs/tasks/administer-cluster/network-policy-provider/cilium-network-policy.md @@ -3,23 +3,24 @@ reviewers: - danwent - aanm title: Use Cilium for NetworkPolicy -content_template: templates/task +content_type: task weight: 20 --- -{{% capture overview %}} + This page shows how to use Cilium for NetworkPolicy. For background on Cilium, read the [Introduction to Cilium](https://docs.cilium.io/en/stable/intro). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Deploying Cilium on Minikube for Basic Testing To get familiar with Cilium easily you can follow the @@ -75,9 +76,9 @@ For detailed instructions around deploying Cilium for production, see: This documentation includes detailed requirements, instructions and example production DaemonSet files. -{{% /capture %}} -{{% capture discussion %}} + + ## Understanding Cilium components Deploying a cluster with Cilium adds Pods to the `kube-system` namespace. To see @@ -98,14 +99,15 @@ cilium-6rxbd 1/1 Running 0 1m A `cilium` Pod runs on each node in your cluster and enforces network policy on the traffic to/from Pods on that node using Linux BPF. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Once your cluster is running, you can follow the [Declare Network Policy](/docs/tasks/administer-cluster/declare-network-policy/) to try out Kubernetes NetworkPolicy with Cilium. Have fun, and if you have questions, contact us using the [Cilium Slack Channel](https://cilium.herokuapp.com/). -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/network-policy-provider/kube-router-network-policy.md b/content/en/docs/tasks/administer-cluster/network-policy-provider/kube-router-network-policy.md index 0111f6c21f..673118e312 100644 --- a/content/en/docs/tasks/administer-cluster/network-policy-provider/kube-router-network-policy.md +++ b/content/en/docs/tasks/administer-cluster/network-policy-provider/kube-router-network-policy.md @@ -2,25 +2,27 @@ reviewers: - murali-reddy title: Use Kube-router for NetworkPolicy -content_template: templates/task +content_type: task weight: 30 --- -{{% capture overview %}} + This page shows how to use [Kube-router](https://github.com/cloudnativelabs/kube-router) for NetworkPolicy. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + You need to have a Kubernetes cluster running. If you do not already have a cluster, you can create one by using any of the cluster installers like Kops, Bootkube, Kubeadm etc. -{{% /capture %}} -{{% capture steps %}} + + ## Installing Kube-router addon The Kube-router Addon comes with a Network Policy Controller that watches Kubernetes API server for any NetworkPolicy and pods updated and configures iptables rules and ipsets to allow or block traffic as directed by the policies. Please follow the [trying Kube-router with cluster installers](https://www.kube-router.io/docs/user-guide/#try-kube-router-with-cluster-installers) guide to install Kube-router addon. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Once you have installed the Kube-router addon, you can follow the [Declare Network Policy](/docs/tasks/administer-cluster/declare-network-policy/) to try out Kubernetes NetworkPolicy. -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/network-policy-provider/romana-network-policy.md b/content/en/docs/tasks/administer-cluster/network-policy-provider/romana-network-policy.md index 42577dae85..df6adcd39f 100644 --- a/content/en/docs/tasks/administer-cluster/network-policy-provider/romana-network-policy.md +++ b/content/en/docs/tasks/administer-cluster/network-policy-provider/romana-network-policy.md @@ -2,23 +2,24 @@ reviewers: - chrismarino title: Romana for NetworkPolicy -content_template: templates/task +content_type: task weight: 40 --- -{{% capture overview %}} + This page shows how to use Romana for NetworkPolicy. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Complete steps 1, 2, and 3 of the [kubeadm getting started guide](/docs/getting-started-guides/kubeadm/). -{{% /capture %}} -{{% capture steps %}} + + ## Installing Romana with kubeadm @@ -32,12 +33,13 @@ To apply network policies use one of the following: * [Example of Romana network policy](https://github.com/romana/core/blob/master/doc/policy.md). * The NetworkPolicy API. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Once you have installed Romana, you can follow the [Declare Network Policy](/docs/tasks/administer-cluster/declare-network-policy/) to try out Kubernetes NetworkPolicy. -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/network-policy-provider/weave-network-policy.md b/content/en/docs/tasks/administer-cluster/network-policy-provider/weave-network-policy.md index 0fcb4ea107..a9d15f40a6 100644 --- a/content/en/docs/tasks/administer-cluster/network-policy-provider/weave-network-policy.md +++ b/content/en/docs/tasks/administer-cluster/network-policy-provider/weave-network-policy.md @@ -2,23 +2,24 @@ reviewers: - bboreham title: Weave Net for NetworkPolicy -content_template: templates/task +content_type: task weight: 50 --- -{{% capture overview %}} + This page shows how to use Weave Net for NetworkPolicy. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + You need to have a Kubernetes cluster. Follow the [kubeadm getting started guide](/docs/getting-started-guides/kubeadm/) to bootstrap one. -{{% /capture %}} -{{% capture steps %}} + + ## Install the Weave Net addon @@ -48,12 +49,13 @@ weave-net-pmw8w 2/2 Running 0 9d Each Node has a weave Pod, and all Pods are `Running` and `2/2 READY`. (`2/2` means that each Pod has `weave` and `weave-npc`.) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Once you have installed the Weave Net addon, you can follow the [Declare Network Policy](/docs/tasks/administer-cluster/declare-network-policy/) to try out Kubernetes NetworkPolicy. If you have any question, contact us at [#weave-community on Slack or Weave User Group](https://github.com/weaveworks/weave#getting-help). -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/nodelocaldns.md b/content/en/docs/tasks/administer-cluster/nodelocaldns.md index cb033f2925..8aa6b9249b 100644 --- a/content/en/docs/tasks/administer-cluster/nodelocaldns.md +++ b/content/en/docs/tasks/administer-cluster/nodelocaldns.md @@ -4,21 +4,22 @@ reviewers: - zihongz - sftim title: Using NodeLocal DNSCache in Kubernetes clusters -content_template: templates/task +content_type: task --- - -{{% capture overview %}} + + {{< feature-state for_k8s_version="v1.18" state="stable" >}} This page provides an overview of NodeLocal DNSCache feature in Kubernetes. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} - {{% capture steps %}} + + ## Introduction @@ -88,4 +89,4 @@ This feature can be enabled using the following steps: Once enabled, node-local-dns Pods will run in the kube-system namespace on each of the cluster nodes. This Pod runs [CoreDNS](https://github.com/coredns/coredns) in cache mode, so all CoreDNS metrics exposed by the different plugins will be available on a per-node basis. You can disable this feature by removing the DaemonSet, using `kubectl delete -f ` . You should also revert any changes you made to the kubelet configuration. - {{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/out-of-resource.md b/content/en/docs/tasks/administer-cluster/out-of-resource.md index c52415f4c6..a9d2ee3702 100644 --- a/content/en/docs/tasks/administer-cluster/out-of-resource.md +++ b/content/en/docs/tasks/administer-cluster/out-of-resource.md @@ -4,10 +4,10 @@ reviewers: - vishh - timstclair title: Configure Out of Resource Handling -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + This page explains how to configure out of resource handling with `kubelet`. @@ -16,10 +16,10 @@ are low. This is especially important when dealing with incompressible compute resources, such as memory or disk space. If such resources are exhausted, nodes become unstable. -{{% /capture %}} -{{% capture body %}} + + ## Eviction Policy @@ -372,4 +372,4 @@ to prevent system OOMs, and promote eviction of workloads so cluster state can r The Pod eviction may evict more Pods than needed due to stats collection timing gap. This can be mitigated by adding the ability to get root container stats on an on-demand basis [(https://github.com/google/cadvisor/issues/1247)](https://github.com/google/cadvisor/issues/1247) in the future. -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/quota-api-object.md b/content/en/docs/tasks/administer-cluster/quota-api-object.md index faf7210384..1fb48c7a2b 100644 --- a/content/en/docs/tasks/administer-cluster/quota-api-object.md +++ b/content/en/docs/tasks/administer-cluster/quota-api-object.md @@ -1,10 +1,10 @@ --- title: Configure Quotas for API Objects -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page shows how to configure quotas for API objects, including PersistentVolumeClaims and Services. A quota restricts the number of @@ -13,17 +13,18 @@ You specify quotas in a [ResourceQuota](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#resourcequota-v1-core) object. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Create a namespace @@ -140,9 +141,10 @@ Delete your namespace: kubectl delete namespace quota-object-example ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + ### For cluster administrators @@ -167,7 +169,7 @@ kubectl delete namespace quota-object-example * [Configure Quality of Service for Pods](/docs/tasks/configure-pod-container/quality-service-pod/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/reconfigure-kubelet.md b/content/en/docs/tasks/administer-cluster/reconfigure-kubelet.md index af5696d622..6218e8ce81 100644 --- a/content/en/docs/tasks/administer-cluster/reconfigure-kubelet.md +++ b/content/en/docs/tasks/administer-cluster/reconfigure-kubelet.md @@ -3,11 +3,11 @@ reviewers: - mtaufen - dawnchen title: Reconfigure a Node's Kubelet in a Live Cluster -content_template: templates/task +content_type: task min-kubernetes-server-version: v1.11 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.11" state="beta" >}} [Dynamic Kubelet Configuration](https://github.com/kubernetes/enhancements/issues/281) @@ -25,9 +25,10 @@ of nodes before rolling them out cluster-wide. Advice on configuring specific fields is available in the inline `KubeletConfiguration` [type documentation](https://github.com/kubernetes/kubernetes/blob/release-1.11/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go). {{< /warning >}} -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + You need to have a Kubernetes cluster. You also need kubectl v1.11 or higher, configured to communicate with your cluster. {{< version-check >}} @@ -43,9 +44,9 @@ because there are manual alternatives. For each node that you're reconfiguring, you must set the kubelet `--dynamic-config-dir` flag to a writable directory. -{{% /capture %}} -{{% capture steps %}} + + ## Reconfiguring the kubelet on a running node in your cluster @@ -71,7 +72,7 @@ will receive default values appropriate to the configuration version (e.g. `kubelet.config.k8s.io/v1beta1`), unless overridden by flags. The status of the Node's kubelet configuration is reported via -`Node.Spec.Status.Config`. Once you have updated a Node to use the new +`Node.Status.Config`. Once you have updated a Node to use the new ConfigMap, you can observe this status to confirm that the Node is using the intended configuration. @@ -311,9 +312,9 @@ empty, since all config sources have been reset to `nil`, which indicates that the local default config is `assigned`, `active`, and `lastKnownGood`, and no error is reported. -{{% /capture %}} -{{% capture discussion %}} + + ## `kubectl patch` example You can change a Node's configSource using several different mechanisms. @@ -374,9 +375,9 @@ internal failure, see Kubelet log for details | The kubelet encountered some int {{< /table >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - For more information on configuring the kubelet via a configuration file, see [Set kubelet parameters via a config file](/docs/tasks/administer-cluster/kubelet-config-file). - See the reference documentation for [`NodeConfigSource`](https://kubernetes.io/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#nodeconfigsource-v1-core) -{{% /capture %}} \ No newline at end of file diff --git a/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md b/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md index c78c9edb42..4f00675c37 100644 --- a/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md +++ b/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md @@ -4,11 +4,11 @@ reviewers: - derekwaynecarr - dashpole title: Reserve Compute Resources for System Daemons -content_template: templates/task +content_type: task min-kubernetes-server-version: 1.8 --- -{{% capture overview %}} + Kubernetes nodes can be scheduled to `Capacity`. Pods can consume all the available capacity on a node by default. This is an issue because nodes @@ -22,19 +22,20 @@ compute resources for system daemons. Kubernetes recommends cluster administrators to configure `Node Allocatable` based on their workload density on each node. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} Your Kubernetes server must be at or later than version 1.17 to use the kubelet command line option `--reserved-cpus` to set an [explicitly reserved CPU list](#explicitly-reserved-cpu-list). -{{% /capture %}} -{{% capture steps %}} + + ## Node Allocatable @@ -226,9 +227,9 @@ more features are added. Over time, kubernetes project will attempt to bring down utilization of node system daemons, but that is not a priority as of now. So expect a drop in `Allocatable` capacity in future releases. -{{% /capture %}} -{{% capture discussion %}} + + ## Example Scenario @@ -251,4 +252,3 @@ If `kube-reserved` and/or `system-reserved` is not enforced and system daemons exceed their reservation, `kubelet` evicts pods whenever the overall node memory usage is higher than `31.5Gi` or `storage` is greater than `90Gi` -{{% /capture %}} \ No newline at end of file diff --git a/content/en/docs/tasks/administer-cluster/running-cloud-controller.md b/content/en/docs/tasks/administer-cluster/running-cloud-controller.md index 71cc28ff40..aa01c902e4 100644 --- a/content/en/docs/tasks/administer-cluster/running-cloud-controller.md +++ b/content/en/docs/tasks/administer-cluster/running-cloud-controller.md @@ -4,10 +4,10 @@ reviewers: - thockin - wlan0 title: Cloud Controller Manager Administration -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + {{< feature-state state="beta" for_k8s_version="v1.11" >}} @@ -15,10 +15,10 @@ Since cloud providers develop and release at a different pace compared to the Ku The `cloud-controller-manager` can be linked to any cloud provider that satisfies [cloudprovider.Interface](https://github.com/kubernetes/cloud-provider/blob/master/cloud.go). For backwards compatibility, the [cloud-controller-manager](https://github.com/kubernetes/kubernetes/tree/master/cmd/cloud-controller-manager) provided in the core Kubernetes project uses the same cloud libraries as `kube-controller-manager`. Cloud providers already supported in Kubernetes core are expected to use the in-tree cloud-controller-manager to transition out of Kubernetes core. -{{% /capture %}} -{{% capture body %}} + + ## Administration @@ -82,9 +82,10 @@ A good example of this is the TLS bootstrapping feature in the Kubelet. TLS boot As this initiative evolves, changes will be made to address these issues in upcoming releases. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + To build and develop your own cloud controller manager, read [Developing Cloud Controller Manager](/docs/tasks/administer-cluster/developing-cloud-controller-manager/). -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/safely-drain-node.md b/content/en/docs/tasks/administer-cluster/safely-drain-node.md index 29006ff754..e18b2ed87d 100644 --- a/content/en/docs/tasks/administer-cluster/safely-drain-node.md +++ b/content/en/docs/tasks/administer-cluster/safely-drain-node.md @@ -5,14 +5,15 @@ reviewers: - foxish - kow3ns title: Safely Drain a Node while Respecting the PodDisruptionBudget -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page shows how to safely drain a node, respecting the PodDisruptionBudget you have defined. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + This task assumes that you have met the following prerequisites: @@ -24,9 +25,9 @@ This task assumes that you have met the following prerequisites: and [Configured PodDisruptionBudgets](/docs/tasks/run-application/configure-pdb/) for applications that need them. -{{% /capture %}} -{{% capture steps %}} + + ## Use `kubectl drain` to remove a node from service @@ -151,13 +152,14 @@ In this case, there are two potential solutions: Kubernetes does not specify what the behavior should be in this case; it is up to the application owners and cluster owners to establish an agreement on behavior in these cases. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Follow steps to protect your application by [configuring a Pod Disruption Budget](/docs/tasks/run-application/configure-pdb/). * Learn more about [maintenance on a node](/docs/tasks/administer-cluster/cluster-management/#maintenance-on-a-node). -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/securing-a-cluster.md b/content/en/docs/tasks/administer-cluster/securing-a-cluster.md index d2d58ae702..7e558fb48f 100644 --- a/content/en/docs/tasks/administer-cluster/securing-a-cluster.md +++ b/content/en/docs/tasks/administer-cluster/securing-a-cluster.md @@ -5,23 +5,24 @@ reviewers: - ericchiang - destijl title: Securing a Cluster -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This document covers topics related to protecting a cluster from accidental or malicious access and provides recommendations on overall security. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Controlling access to the Kubernetes API @@ -254,6 +255,6 @@ Join the [kubernetes-announce](https://groups.google.com/forum/#!forum/kubernete group for emails about security announcements. See the [security reporting](/security/) page for more on how to report vulnerabilities. -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/sysctl-cluster.md b/content/en/docs/tasks/administer-cluster/sysctl-cluster.md index f96c066dd5..d6aea240c1 100644 --- a/content/en/docs/tasks/administer-cluster/sysctl-cluster.md +++ b/content/en/docs/tasks/administer-cluster/sysctl-cluster.md @@ -2,25 +2,26 @@ title: Using sysctls in a Kubernetes Cluster reviewers: - sttts -content_template: templates/task +content_type: task --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.12" state="beta" >}} This document describes how to configure and use kernel parameters within a Kubernetes cluster using the {{< glossary_tooltip term_id="sysctl" >}} interface. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Listing all Sysctl Parameters @@ -140,9 +141,9 @@ spec: value: "65536" ... ``` -{{% /capture %}} -{{% capture discussion %}} + + {{< warning >}} Due to their nature of being _unsafe_, the use of _unsafe_ sysctls @@ -188,9 +189,9 @@ Do not configure these two fields such that there is overlap, meaning that a given sysctl is both allowed and forbidden. {{< warning >}} -If you whitelist unsafe sysctls via the `allowedUnsafeSysctls` field +If you allow unsafe sysctls via the `allowedUnsafeSysctls` field in a PodSecurityPolicy, any pod using such a sysctl will fail to start -if the sysctl is not whitelisted via the `--allowed-unsafe-sysctls` kubelet +if the sysctl is not allowed via the `--allowed-unsafe-sysctls` kubelet flag as well on that node. {{< /warning >}} @@ -210,4 +211,4 @@ spec: ... ``` -{{% /capture %}} + diff --git a/content/en/docs/tasks/administer-cluster/topology-manager.md b/content/en/docs/tasks/administer-cluster/topology-manager.md index 156146e1c2..8455bb0d8d 100644 --- a/content/en/docs/tasks/administer-cluster/topology-manager.md +++ b/content/en/docs/tasks/administer-cluster/topology-manager.md @@ -8,11 +8,11 @@ reviewers: - nolancon - bg-chun -content_template: templates/task +content_type: task min-kubernetes-server-version: v1.18 --- -{{% capture overview %}} + {{< feature-state state="beta" for_k8s_version="v1.18" >}} @@ -22,15 +22,16 @@ In order to extract the best performance, optimizations related to CPU isolation _Topology Manager_ is a Kubelet component that aims to co-ordinate the set of components that are responsible for these optimizations. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## How Topology Manager Works @@ -216,4 +217,4 @@ Using this information the Topology Manager calculates the optimal hint for the 3. The Device Manager and the CPU Manager are the only components to adopt the Topology Manager's HintProvider interface. This means that NUMA alignment can only be achieved for resources managed by the CPU Manager and the Device Manager. Memory or Hugepages are not considered by the Topology Manager for NUMA alignment. -{{% /capture %}} + diff --git a/content/en/docs/tasks/configure-pod-container/assign-cpu-resource.md b/content/en/docs/tasks/configure-pod-container/assign-cpu-resource.md index 181b92b8e1..5e79704cc4 100644 --- a/content/en/docs/tasks/configure-pod-container/assign-cpu-resource.md +++ b/content/en/docs/tasks/configure-pod-container/assign-cpu-resource.md @@ -1,20 +1,21 @@ --- title: Assign CPU Resources to Containers and Pods -content_template: templates/task +content_type: task weight: 20 --- -{{% capture overview %}} + This page shows how to assign a CPU *request* and a CPU *limit* to a container. Containers cannot use more CPU than the configured limit. Provided the system has CPU time free, a container is guaranteed to be allocated as much CPU as it requests. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -48,10 +49,10 @@ NAME v1beta1.metrics.k8s.io ``` -{{% /capture %}} -{{% capture steps %}} + + ## Create a namespace @@ -239,9 +240,10 @@ Delete your namespace: kubectl delete namespace cpu-example ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + ### For app developers @@ -266,4 +268,4 @@ kubectl delete namespace cpu-example * [Configure Quotas for API Objects](/docs/tasks/administer-cluster/quota-api-object/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/configure-pod-container/assign-memory-resource.md b/content/en/docs/tasks/configure-pod-container/assign-memory-resource.md index e8f7d8073a..394f435d12 100644 --- a/content/en/docs/tasks/configure-pod-container/assign-memory-resource.md +++ b/content/en/docs/tasks/configure-pod-container/assign-memory-resource.md @@ -1,19 +1,20 @@ --- title: Assign Memory Resources to Containers and Pods -content_template: templates/task +content_type: task weight: 10 --- -{{% capture overview %}} + This page shows how to assign a memory *request* and a memory *limit* to a Container. A Container is guaranteed to have as much memory as it requests, but is not allowed to use more memory than its limit. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -46,9 +47,9 @@ NAME v1beta1.metrics.k8s.io ``` -{{% /capture %}} -{{% capture steps %}} + + ## Create a namespace @@ -330,9 +331,10 @@ Delete your namespace. This deletes all the Pods that you created for this task: kubectl delete namespace mem-example ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + ### For app developers @@ -356,7 +358,7 @@ kubectl delete namespace mem-example * [Configure Quotas for API Objects](/docs/tasks/administer-cluster/quota-api-object/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity.md b/content/en/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity.md index 16773cd215..8306724c1d 100644 --- a/content/en/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity.md +++ b/content/en/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity.md @@ -1,22 +1,23 @@ --- title: Assign Pods to Nodes using Node Affinity min-kubernetes-server-version: v1.10 -content_template: templates/task +content_type: task weight: 120 --- -{{% capture overview %}} + This page shows how to assign a Kubernetes Pod to a particular node using Node Affinity in a Kubernetes cluster. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Add a label to a node @@ -112,9 +113,10 @@ This means that the pod will prefer a node that has a `disktype=ssd` label. nginx 1/1 Running 0 13s 10.200.0.4 worker0 ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Learn more about [Node Affinity](/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity). -{{% /capture %}} + diff --git a/content/en/docs/tasks/configure-pod-container/assign-pods-nodes.md b/content/en/docs/tasks/configure-pod-container/assign-pods-nodes.md index b5f6876e6b..f1e6e6e9ef 100644 --- a/content/en/docs/tasks/configure-pod-container/assign-pods-nodes.md +++ b/content/en/docs/tasks/configure-pod-container/assign-pods-nodes.md @@ -1,21 +1,22 @@ --- title: Assign Pods to Nodes -content_template: templates/task +content_type: task weight: 120 --- -{{% capture overview %}} + This page shows how to assign a Kubernetes Pod to a particular node in a Kubernetes cluster. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Add a label to a node @@ -94,10 +95,11 @@ You can also schedule a pod to one specific node via setting `nodeName`. Use the configuration file to create a pod that will get scheduled on `foo-node` only. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [labels and selectors](/docs/concepts/overview/working-with-objects/labels/). * Learn more about [nodes](/docs/concepts/architecture/nodes/). -{{% /capture %}} + diff --git a/content/en/docs/tasks/configure-pod-container/attach-handler-lifecycle-event.md b/content/en/docs/tasks/configure-pod-container/attach-handler-lifecycle-event.md index 57b5fad6c6..f5116e7691 100644 --- a/content/en/docs/tasks/configure-pod-container/attach-handler-lifecycle-event.md +++ b/content/en/docs/tasks/configure-pod-container/attach-handler-lifecycle-event.md @@ -1,27 +1,28 @@ --- title: Attach Handlers to Container Lifecycle Events -content_template: templates/task +content_type: task weight: 140 --- -{{% capture overview %}} + This page shows how to attach handlers to Container lifecycle events. Kubernetes supports the postStart and preStop events. Kubernetes sends the postStart event immediately after a Container is started, and it sends the preStop event immediately before the Container is terminated. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Define postStart and preStop handlers @@ -56,11 +57,11 @@ The output shows the text written by the postStart handler: Hello from the postStart handler -{{% /capture %}} -{{% capture discussion %}} + + ## Discussion @@ -82,10 +83,11 @@ This means that the preStop hook is not invoked when the Pod is *completed*. This limitation is tracked in [issue #55087](https://github.com/kubernetes/kubernetes/issues/55807). {{< /note >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [Container lifecycle hooks](/docs/concepts/containers/container-lifecycle-hooks/). * Learn more about the [lifecycle of a Pod](/docs/concepts/workloads/pods/pod-lifecycle/). @@ -97,6 +99,6 @@ This limitation is tracked in [issue #55087](https://github.com/kubernetes/kuber * [Container](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core) * See `terminationGracePeriodSeconds` in [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core) -{{% /capture %}} + diff --git a/content/en/docs/tasks/configure-pod-container/configure-gmsa.md b/content/en/docs/tasks/configure-pod-container/configure-gmsa.md index 8045ae9a02..a2b3f24628 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-gmsa.md +++ b/content/en/docs/tasks/configure-pod-container/configure-gmsa.md @@ -1,10 +1,10 @@ --- title: Configure GMSA for Windows Pods and containers -content_template: templates/task +content_type: task weight: 20 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.18" state="stable" >}} @@ -12,14 +12,15 @@ This page shows how to configure [Group Managed Service Accounts](https://docs.m In Kubernetes, GMSA credential specs are configured at a Kubernetes cluster-wide scope as Custom Resources. Windows Pods, as well as individual containers within a Pod, can be configured to use a GMSA for domain based functions (e.g. Kerberos authentication) when interacting with other Windows services. As of v1.16, the Docker runtime supports GMSA for Windows workloads. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + You need to have a Kubernetes cluster and the `kubectl` command-line tool must be configured to communicate with your cluster. The cluster is expected to have Windows worker nodes. This section covers a set of initial steps required once for each cluster: ### Install the GMSACredentialSpec CRD -A [CustomResourceDefinition](/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/)(CRD) for GMSA credential spec resources needs to be configured on the cluster to define the custom resource type `GMSACredentialSpec`. Download the GMSA CRD [YAML](https://github.com/kubernetes-sigs/windows-gmsa/blob/master/admission-webhook/deploy/gmsa-crd.yml) and save it as gmsa-crd.yaml. +A [CustomResourceDefinition](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/)(CRD) for GMSA credential spec resources needs to be configured on the cluster to define the custom resource type `GMSACredentialSpec`. Download the GMSA CRD [YAML](https://github.com/kubernetes-sigs/windows-gmsa/blob/master/admission-webhook/deploy/gmsa-crd.yml) and save it as gmsa-crd.yaml. Next, install the CRD with `kubectl apply -f gmsa-crd.yaml` ### Install webhooks to validate GMSA users @@ -43,9 +44,9 @@ A [script](https://github.com/kubernetes-sigs/windows-gmsa/blob/master/admission The [YAML template](https://github.com/kubernetes-sigs/windows-gmsa/blob/master/admission-webhook/deploy/gmsa-webhook.yml.tpl) used by the script may also be used to deploy the webhooks and associated objects manually (with appropriate substitutions for the parameters) -{{% /capture %}} -{{% capture steps %}} + + ## Configure GMSAs and Windows nodes in Active Directory Before Pods in Kubernetes can be configured to use GMSAs, the desired GMSAs need to be provisioned in Active Directory as described in the [Windows GMSA documentation](https://docs.microsoft.com/en-us/windows-server/security/group-managed-service-accounts/getting-started-with-group-managed-service-accounts#BKMK_Step1). Windows worker nodes (that are part of the Kubernetes cluster) need to be configured in Active Directory to access the secret credentials associated with the desired GMSA as described in the [Windows GMSA documentation](https://docs.microsoft.com/en-us/windows-server/security/group-managed-service-accounts/getting-started-with-group-managed-service-accounts#to-add-member-hosts-using-the-set-adserviceaccount-cmdlet) @@ -252,4 +253,4 @@ If the above command corrects the error, you can automate the step by adding the If you add the `lifecycle` section show above to your Pod spec, the Pod will execute the commands listed to restart the `netlogon` service until the `nltest.exe /query` command exits without error. -{{% /capture %}} + diff --git a/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md b/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md index 19b077ab35..ed5aa24044 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md +++ b/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md @@ -1,10 +1,10 @@ --- title: Configure Liveness, Readiness and Startup Probes -content_template: templates/task +content_type: task weight: 110 --- -{{% capture overview %}} + This page shows how to configure liveness, readiness and startup probes for containers. @@ -25,15 +25,16 @@ it succeeds, making sure those probes don't interfere with the application start This can be used to adopt liveness checks on slow starting containers, avoiding them getting killed by the kubelet before they are up and running. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Define a liveness command @@ -360,9 +361,10 @@ For a TCP probe, the kubelet makes the probe connection at the node, not in the means that you can not use a service name in the `host` parameter since the kubelet is unable to resolve it. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [Container Probes](/docs/concepts/workloads/pods/pod-lifecycle/#container-probes). @@ -373,6 +375,6 @@ You can also read the API references for: * [Container](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core) * [Probe](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#probe-v1-core) -{{% /capture %}} + diff --git a/content/en/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md b/content/en/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md index 024e6929c7..6ff6c21530 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md +++ b/content/en/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md @@ -1,10 +1,10 @@ --- title: Configure a Pod to Use a PersistentVolume for Storage -content_template: templates/task +content_type: task weight: 60 --- -{{% capture overview %}} + This page shows you how to configure a Pod to use a {{< glossary_tooltip text="PersistentVolumeClaim" term_id="persistent-volume-claim" >}} @@ -20,9 +20,10 @@ PersistentVolume. 1. You create a Pod that uses the above PersistentVolumeClaim for storage. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * You need to have a Kubernetes cluster that has only one Node, and the {{< glossary_tooltip text="kubectl" term_id="kubectl" >}} @@ -33,9 +34,9 @@ do not already have a single-node cluster, you can create one by using * Familiarize yourself with the material in [Persistent Volumes](/docs/concepts/storage/persistent-volumes/). -{{% /capture %}} -{{% capture steps %}} + + ## Create an index.html file on your Node @@ -237,10 +238,10 @@ sudo rmdir /mnt/data You can now close the shell to your Node. -{{% /capture %}} -{{% capture discussion %}} + + ## Access control @@ -270,10 +271,11 @@ When a Pod consumes a PersistentVolume, the GIDs associated with the PersistentVolume are not present on the Pod resource itself. {{< /note >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [PersistentVolumes](/docs/concepts/storage/persistent-volumes/). * Read the [Persistent Storage design document](https://git.k8s.io/community/contributors/design-proposals/storage/persistent-storage.md). @@ -285,6 +287,6 @@ PersistentVolume are not present on the Pod resource itself. * [PersistentVolumeClaim](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaim-v1-core) * [PersistentVolumeClaimSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaimspec-v1-core) -{{% /capture %}} + diff --git a/content/en/docs/tasks/configure-pod-container/configure-pod-configmap.md b/content/en/docs/tasks/configure-pod-container/configure-pod-configmap.md index c7f80b0fad..42eff59db0 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-pod-configmap.md +++ b/content/en/docs/tasks/configure-pod-container/configure-pod-configmap.md @@ -1,24 +1,25 @@ --- title: Configure a Pod to Use a ConfigMap -content_template: templates/task +content_type: task weight: 150 card: name: tasks weight: 50 --- -{{% capture overview %}} + ConfigMaps allow you to decouple configuration artifacts from image content to keep containerized applications portable. This page provides a series of usage examples demonstrating how to create ConfigMaps and configure Pods using data stored in ConfigMaps. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Create a ConfigMap @@ -628,9 +629,9 @@ When a ConfigMap already being consumed in a volume is updated, projected keys a A container using a ConfigMap as a [subPath](/docs/concepts/storage/volumes/#using-subpath) volume will not receive ConfigMap updates. {{< /note >}} -{{% /capture %}} -{{% capture discussion %}} + + ## Understanding ConfigMaps and Pods @@ -680,9 +681,10 @@ data: - You can't use ConfigMaps for {{< glossary_tooltip text="static pods" term_id="static-pod" >}}, because the Kubelet does not support this. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Follow a real world example of [Configuring Redis using a ConfigMap](/docs/tutorials/configuration/configure-redis-using-configmap/). -{{% /capture %}} + diff --git a/content/en/docs/tasks/configure-pod-container/configure-pod-initialization.md b/content/en/docs/tasks/configure-pod-container/configure-pod-initialization.md index a418a8d7c0..9a8a33f655 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-pod-initialization.md +++ b/content/en/docs/tasks/configure-pod-container/configure-pod-initialization.md @@ -1,22 +1,23 @@ --- title: Configure Pod Initialization -content_template: templates/task +content_type: task weight: 130 --- -{{% capture overview %}} + This page shows how to use an Init Container to initialize a Pod before an application Container runs. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Create a Pod that has an Init Container @@ -78,9 +79,10 @@ The output shows that nginx is serving the web page that was written by the init

Kubernetes is open source giving you the freedom to take advantage ...

... -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [communicating between Containers running in the same Pod](/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume/). @@ -88,6 +90,6 @@ The output shows that nginx is serving the web page that was written by the init * Learn more about [Volumes](/docs/concepts/storage/volumes/). * Learn more about [Debugging Init Containers](/docs/tasks/debug-application-cluster/debug-init-containers/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/configure-pod-container/configure-projected-volume-storage.md b/content/en/docs/tasks/configure-pod-container/configure-projected-volume-storage.md index ec6f2d9528..ad99a05c27 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-projected-volume-storage.md +++ b/content/en/docs/tasks/configure-pod-container/configure-projected-volume-storage.md @@ -3,11 +3,11 @@ reviewers: - jpeeler - pmorie title: Configure a Pod to Use a Projected Volume for Storage -content_template: templates/task +content_type: task weight: 70 --- -{{% capture overview %}} + This page shows how to use a [`projected`](/docs/concepts/storage/volumes/#projected) Volume to mount several existing volume sources into the same directory. Currently, `secret`, `configMap`, `downwardAPI`, and `serviceAccountToken` volumes can be projected. @@ -15,13 +15,14 @@ and `serviceAccountToken` volumes can be projected. {{< note >}} `serviceAccountToken` is not a volume type. {{< /note >}} -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Configure a projected volume for a pod In this exercise, you create username and password {{< glossary_tooltip text="Secrets" term_id="secret" >}} from local files. You then create a Pod that runs one container, using a [`projected`](/docs/concepts/storage/volumes/#projected) Volume to mount the Secrets into the same shared directory. @@ -77,9 +78,10 @@ kubectl delete pod test-projected-volume kubectl delete secret user pass ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [`projected`](/docs/concepts/storage/volumes/#projected) volumes. * Read the [all-in-one volume](https://github.com/kubernetes/community/blob/{{< param "githubbranch" >}}/contributors/design-proposals/node/all-in-one-volume.md) design document. -{{% /capture %}} + diff --git a/content/en/docs/tasks/configure-pod-container/configure-runasusername.md b/content/en/docs/tasks/configure-pod-container/configure-runasusername.md index ac912327f3..12c10a9ddf 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-runasusername.md +++ b/content/en/docs/tasks/configure-pod-container/configure-runasusername.md @@ -1,24 +1,25 @@ --- title: Configure RunAsUserName for Windows pods and containers -content_template: templates/task +content_type: task weight: 20 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.18" state="stable" >}} This page shows how to use the `runAsUserName` setting for Pods and containers that will run on Windows nodes. This is roughly equivalent of the Linux-specific `runAsUser` setting, allowing you to run applications in a container as a different username than the default. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + You need to have a Kubernetes cluster and the kubectl command-line tool must be configured to communicate with your cluster. The cluster is expected to have Windows worker nodes where pods with containers running Windows workloads will get scheduled. -{{% /capture %}} -{{% capture steps %}} + + ## Set the Username for a Pod @@ -114,12 +115,12 @@ Examples of acceptable values for the `runAsUserName` field: `ContainerAdministr For more information about these limtations, check [here](https://support.microsoft.com/en-us/help/909264/naming-conventions-in-active-directory-for-computers-domains-sites-and) and [here](https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.localaccounts/new-localuser?view=powershell-5.1). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Guide for scheduling Windows containers in Kubernetes](/docs/setup/production-environment/windows/user-guide-windows-containers/) * [Managing Workload Identity with Group Managed Service Accounts (GMSA)](/docs/setup/production-environment/windows/user-guide-windows-containers/#managing-workload-identity-with-group-managed-service-accounts) * [Configure GMSA for Windows pods and containers](/docs/tasks/configure-pod-container/configure-gmsa/) -{{% /capture %}} \ No newline at end of file diff --git a/content/en/docs/tasks/configure-pod-container/configure-service-account.md b/content/en/docs/tasks/configure-pod-container/configure-service-account.md index 021a8feb22..eaaabb9e94 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-service-account.md +++ b/content/en/docs/tasks/configure-pod-container/configure-service-account.md @@ -4,11 +4,11 @@ reviewers: - liggitt - thockin title: Configure Service Accounts for Pods -content_template: templates/task +content_type: task weight: 90 --- -{{% capture overview %}} + A service account provides an identity for processes that run in a Pod. {{< note >}} @@ -23,16 +23,17 @@ authenticated by the apiserver as a particular User Account (currently this is usually `admin`, unless your cluster administrator has customized your cluster). Processes in containers inside pods can also contact the apiserver. When they do, they are authenticated as a particular Service Account (for example, `default`). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Use the Default Service Account to access the API server. @@ -370,9 +371,10 @@ override the `jwks_uri` in the OpenID Provider Configuration so that it points to the public endpoint, rather than the API server's address, by passing the `--service-account-jwks-uri` flag to the API server. Like the issuer URL, the JWKS URI is required to use the `https` scheme. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + See also: @@ -380,4 +382,4 @@ See also: - [Service Account Signing Key Retrieval KEP](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/20190730-oidc-discovery.md) - [OIDC Discovery Spec](https://openid.net/specs/openid-connect-discovery-1_0.html) -{{% /capture %}} + diff --git a/content/en/docs/tasks/configure-pod-container/configure-volume-storage.md b/content/en/docs/tasks/configure-pod-container/configure-volume-storage.md index bec97a2975..69e665b42e 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-volume-storage.md +++ b/content/en/docs/tasks/configure-pod-container/configure-volume-storage.md @@ -1,10 +1,10 @@ --- title: Configure a Pod to Use a Volume for Storage -content_template: templates/task +content_type: task weight: 50 --- -{{% capture overview %}} + This page shows how to configure a Pod to use a Volume for storage. @@ -14,15 +14,16 @@ consistent storage that is independent of the Container, you can use a [Volume](/docs/concepts/storage/volumes/). This is especially important for stateful applications, such as key-value stores (such as Redis) and databases. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Configure a volume for a Pod @@ -126,9 +127,10 @@ of `Always`. kubectl delete pod redis ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * See [Volume](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#volume-v1-core). @@ -140,6 +142,6 @@ GCE and EBS on EC2, which are preferred for critical data and will handle details such as mounting and unmounting the devices on the nodes. See [Volumes](/docs/concepts/storage/volumes/) for more details. -{{% /capture %}} + diff --git a/content/en/docs/tasks/configure-pod-container/extended-resource.md b/content/en/docs/tasks/configure-pod-container/extended-resource.md index 36d957ca01..25fa11b0d9 100644 --- a/content/en/docs/tasks/configure-pod-container/extended-resource.md +++ b/content/en/docs/tasks/configure-pod-container/extended-resource.md @@ -1,19 +1,20 @@ --- title: Assign Extended Resources to a Container -content_template: templates/task +content_type: task weight: 40 --- -{{% capture overview %}} + {{< feature-state state="stable" >}} This page shows how to assign extended resources to a Container. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -21,10 +22,10 @@ Before you do this exercise, do the exercise in [Advertise Extended Resources for a Node](/docs/tasks/administer-cluster/extended-resource-node/). That will configure one of your Nodes to advertise a dongle resource. -{{% /capture %}} -{{% capture steps %}} + + ## Assign an extended resource to a Pod @@ -127,9 +128,10 @@ kubectl delete pod extended-resource-demo kubectl delete pod extended-resource-demo-2 ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + ### For application developers @@ -140,4 +142,4 @@ kubectl delete pod extended-resource-demo-2 * [Advertise Extended Resources for a Node](/docs/tasks/administer-cluster/extended-resource-node/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/configure-pod-container/pull-image-private-registry.md b/content/en/docs/tasks/configure-pod-container/pull-image-private-registry.md index 9184883003..ce0b5b3656 100644 --- a/content/en/docs/tasks/configure-pod-container/pull-image-private-registry.md +++ b/content/en/docs/tasks/configure-pod-container/pull-image-private-registry.md @@ -1,26 +1,27 @@ --- title: Pull an Image from a Private Registry -content_template: templates/task +content_type: task weight: 100 --- -{{% capture overview %}} + This page shows how to create a Pod that uses a Secret to pull an image from a private Docker registry or repository. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * To do this exercise, you need a [Docker ID](https://docs.docker.com/docker-id/) and password. -{{% /capture %}} -{{% capture steps %}} + + ## Log in to Docker @@ -200,9 +201,10 @@ kubectl apply -f my-private-reg-pod.yaml kubectl get pod private-reg ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [Secrets](/docs/concepts/configuration/secret/). * Learn more about [using a private registry](/docs/concepts/containers/images/#using-a-private-registry). @@ -211,5 +213,5 @@ kubectl get pod private-reg * See [Secret](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#secret-v1-core). * See the `imagePullSecrets` field of [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core). -{{% /capture %}} + diff --git a/content/en/docs/tasks/configure-pod-container/quality-service-pod.md b/content/en/docs/tasks/configure-pod-container/quality-service-pod.md index cd9edd0410..dec9e8db91 100644 --- a/content/en/docs/tasks/configure-pod-container/quality-service-pod.md +++ b/content/en/docs/tasks/configure-pod-container/quality-service-pod.md @@ -1,27 +1,28 @@ --- title: Configure Quality of Service for Pods -content_template: templates/task +content_type: task weight: 30 --- -{{% capture overview %}} + This page shows how to configure Pods so that they will be assigned particular Quality of Service (QoS) classes. Kubernetes uses QoS classes to make decisions about scheduling and evicting Pods. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## QoS classes @@ -235,9 +236,10 @@ Delete your namespace: kubectl delete namespace qos-example ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + ### For app developers @@ -263,7 +265,7 @@ kubectl delete namespace qos-example * [Configure Quotas for API Objects](/docs/tasks/administer-cluster/quota-api-object/) * [Control Topology Management policies on a node](/docs/tasks/administer-cluster/topology-manager/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/configure-pod-container/security-context.md b/content/en/docs/tasks/configure-pod-container/security-context.md index 0c2bb05d0c..38662760b7 100644 --- a/content/en/docs/tasks/configure-pod-container/security-context.md +++ b/content/en/docs/tasks/configure-pod-container/security-context.md @@ -4,11 +4,11 @@ reviewers: - mikedanese - thockin title: Configure a Security Context for a Pod or Container -content_template: templates/task +content_type: task weight: 80 --- -{{% capture overview %}} + A security context defines privilege and access control settings for a Pod or Container. Security context settings include, but are not limited to: @@ -37,15 +37,16 @@ for a comprehensive list. For more information about security mechanisms in Linux, see [Overview of Linux Kernel Security Features](https://www.linux.com/learn/overview-linux-kernel-security-features) -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Set the security context for a Pod @@ -409,9 +410,10 @@ kubectl delete pod security-context-demo-3 kubectl delete pod security-context-demo-4 ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [PodSecurityContext](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podsecuritycontext-v1-core) * [SecurityContext](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#securitycontext-v1-core) @@ -423,4 +425,4 @@ kubectl delete pod security-context-demo-4 document](https://git.k8s.io/community/contributors/design-proposals/auth/no-new-privs.md) -{{% /capture %}} + diff --git a/content/en/docs/tasks/configure-pod-container/share-process-namespace.md b/content/en/docs/tasks/configure-pod-container/share-process-namespace.md index ee227d3f9b..dfb8e40906 100644 --- a/content/en/docs/tasks/configure-pod-container/share-process-namespace.md +++ b/content/en/docs/tasks/configure-pod-container/share-process-namespace.md @@ -5,11 +5,11 @@ reviewers: - verb - yujuhong - dchen1107 -content_template: templates/task +content_type: task weight: 160 --- -{{% capture overview %}} + {{< feature-state state="stable" for_k8s_version="v1.17" >}} @@ -21,15 +21,16 @@ You can use this feature to configure cooperating containers, such as a log handler sidecar container, or to troubleshoot container images that don't include debugging utilities like a shell. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Configure a Pod @@ -93,9 +94,9 @@ events { worker_connections 1024; ``` -{{% /capture %}} -{{% capture discussion %}} + + ## Understanding Process Namespace Sharing @@ -117,6 +118,6 @@ containers, though, so it's important to understand these differences: `/proc/$pid/root` link.** This makes debugging easier, but it also means that filesystem secrets are protected only by filesystem permissions. -{{% /capture %}} + diff --git a/content/en/docs/tasks/configure-pod-container/static-pod.md b/content/en/docs/tasks/configure-pod-container/static-pod.md index fc31526348..5189fdb882 100644 --- a/content/en/docs/tasks/configure-pod-container/static-pod.md +++ b/content/en/docs/tasks/configure-pod-container/static-pod.md @@ -3,10 +3,10 @@ reviewers: - jsafrane title: Create static Pods weight: 170 -content_template: templates/task +content_type: task --- -{{% capture overview %}} + *Static Pods* are managed directly by the kubelet daemon on a specific node, @@ -30,9 +30,10 @@ Pods to run a Pod on every node, you should probably be using a instead. {{< /note >}} -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -41,10 +42,10 @@ and that your nodes are running the Fedora operating system. Instructions for other distributions or Kubernetes installations may vary. -{{% /capture %}} -{{% capture steps %}} + + ## Create a static pod {#static-pod-creation} @@ -236,4 +237,4 @@ CONTAINER ID IMAGE COMMAND CREATED ... e7a62e3427f1 nginx:latest "nginx -g 'daemon of 27 seconds ago ``` -{{% /capture %}} + diff --git a/content/en/docs/tasks/configure-pod-container/translate-compose-kubernetes.md b/content/en/docs/tasks/configure-pod-container/translate-compose-kubernetes.md index 847d76f25c..4fadbb3f42 100644 --- a/content/en/docs/tasks/configure-pod-container/translate-compose-kubernetes.md +++ b/content/en/docs/tasks/configure-pod-container/translate-compose-kubernetes.md @@ -2,26 +2,27 @@ reviewers: - cdrage title: Translate a Docker Compose File to Kubernetes Resources -content_template: templates/task +content_type: task weight: 200 --- -{{% capture overview %}} + What's Kompose? It's a conversion tool for all things compose (namely Docker Compose) to container orchestrators (Kubernetes or OpenShift). More information can be found on the Kompose website at [http://kompose.io](http://kompose.io). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Install Kompose @@ -200,9 +201,9 @@ you need is an existing `docker-compose.yml` file. $ curl http://192.0.2.89 ``` -{{% /capture %}} -{{% capture discussion %}} + + ## User Guide @@ -606,4 +607,4 @@ Kompose supports Docker Compose versions: 1, 2 and 3. We have limited support on A full list on compatibility between all three versions is listed in our [conversion document](https://github.com/kubernetes/kompose/blob/master/docs/conversion.md) including a list of all incompatible Docker Compose keys. -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/audit.md b/content/en/docs/tasks/debug-application-cluster/audit.md index 5a57779b6c..600af51d00 100644 --- a/content/en/docs/tasks/debug-application-cluster/audit.md +++ b/content/en/docs/tasks/debug-application-cluster/audit.md @@ -3,11 +3,11 @@ reviewers: - soltysh - sttts - ericchiang -content_template: templates/concept +content_type: concept title: Auditing --- -{{% capture overview %}} + Kubernetes auditing provides a security-relevant chronological set of records documenting the sequence of activities that have affected system by individual users, administrators @@ -22,10 +22,10 @@ answer the following questions: - from where was it initiated? - to where was it going? -{{% /capture %}} -{{% capture body %}} + + [Kube-apiserver][kube-apiserver] performs auditing. Each request on each stage of its execution generates an event, which is then pre-processed according to @@ -245,7 +245,7 @@ Existing static backends that you configure with runtime flags are not affected The AuditSink policy differs from the legacy audit runtime policy. This is because the API object serves different use cases. The policy will continue to evolve to serve more use cases. -The `level` field applies the given audit level to all requests. The `stages` field is now a whitelist of stages to record. +The `level` field applies the given audit level to all requests. The `stages` field is now a list of allowed stages to record. #### Contacting the webhook @@ -503,12 +503,13 @@ plugin which supports full-text search and analytics. [logstash_install_doc]: https://www.elastic.co/guide/en/logstash/current/installing-logstash.html [kube-aggregator]: /docs/concepts/api-extension/apiserver-aggregation -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Visit [Auditing with Falco](/docs/tasks/debug-application-cluster/falco). Learn about [Mutating webhook auditing annotations](/docs/reference/access-authn-authz/extensible-admission-controllers/#mutating-webhook-auditing-annotations). -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/crictl.md b/content/en/docs/tasks/debug-application-cluster/crictl.md index f7bfec87ff..a047f194e9 100644 --- a/content/en/docs/tasks/debug-application-cluster/crictl.md +++ b/content/en/docs/tasks/debug-application-cluster/crictl.md @@ -4,11 +4,11 @@ reviewers: - feiskyer - mrunalp title: Debugging Kubernetes nodes with crictl -content_template: templates/task +content_type: task --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.11" state="stable" >}} @@ -17,15 +17,16 @@ You can use it to inspect and debug container runtimes and applications on a Kubernetes node. `crictl` and its source are hosted in the [cri-tools](https://github.com/kubernetes-incubator/cri-tools) repository. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + `crictl` requires a Linux operating system with a CRI runtime. -{{% /capture %}} -{{% capture steps %}} + + ## Installing crictl @@ -347,12 +348,12 @@ CONTAINER ID IMAGE CREATED STATE 3e025dd50a72d busybox About a minute ago Running busybox 0 ``` -{{% /capture %}} -{{% capture discussion %}} + + See [kubernetes-incubator/cri-tools](https://github.com/kubernetes-incubator/cri-tools) for more information. -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/debug-application-introspection.md b/content/en/docs/tasks/debug-application-cluster/debug-application-introspection.md index 2f5d6e7eda..730b9fb00c 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-application-introspection.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-application-introspection.md @@ -2,20 +2,20 @@ reviewers: - janetkuo - thockin -content_template: templates/concept +content_type: concept title: Application Introspection and Debugging --- -{{% capture overview %}} + Once your application is running, you'll inevitably need to debug problems with it. Earlier we described how you can use `kubectl get pods` to retrieve simple status information about your pods. But there are a number of ways to get even more information about your application. -{{% /capture %}} -{{% capture body %}} + + ## Using `kubectl describe pod` to fetch details about pods @@ -387,17 +387,18 @@ status: systemUUID: ABE5F6B4-D44B-108B-C46A-24CCE16C8B6E ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Learn about additional debugging tools, including: * [Logging](/docs/concepts/cluster-administration/logging/) * [Monitoring](/docs/tasks/debug-application-cluster/resource-usage-monitoring/) * [Getting into containers via `exec`](/docs/tasks/debug-application-cluster/get-shell-running-container/) -* [Connecting to containers via proxies](/docs/tasks/access-kubernetes-api/http-proxy-access-api/) +* [Connecting to containers via proxies](/docs/tasks/extend-kubernetes/http-proxy-access-api/) * [Connecting to containers via port forwarding](/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) * [Inspect Kubernetes node with crictl](/docs/tasks/debug-application-cluster/crictl/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/debug-application.md b/content/en/docs/tasks/debug-application-cluster/debug-application.md index 08f0fad008..a5c37541c3 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-application.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-application.md @@ -3,19 +3,19 @@ reviewers: - mikedanese - thockin title: Troubleshoot Applications -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + This guide is to help users debug applications that are deployed into Kubernetes and not behaving correctly. This is *not* a guide for people who want to debug their cluster. For that you should check out [this guide](/docs/admin/cluster-troubleshooting). -{{% /capture %}} -{{% capture body %}} + + ## Diagnosing the problem @@ -161,12 +161,13 @@ check: * Can you connect to your pods directly? Get the IP address for the Pod, and try to connect directly to that IP. * Is your application serving on the port that you configured? Kubernetes doesn't do port remapping, so if your application serves on 8080, the `containerPort` field needs to be 8080. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + If none of the above solves your problem, follow the instructions in [Debugging Service document](/docs/user-guide/debugging-services) to make sure that your `Service` is running, has `Endpoints`, and your `Pods` are actually serving; you have DNS working, iptables rules installed, and kube-proxy does not seem to be misbehaving. You may also visit [troubleshooting document](/docs/troubleshooting/) for more information. -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/debug-cluster.md b/content/en/docs/tasks/debug-application-cluster/debug-cluster.md index 473f364361..0a66bed195 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-cluster.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-cluster.md @@ -2,20 +2,20 @@ reviewers: - davidopp title: Troubleshoot Clusters -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + This doc is about cluster troubleshooting; we assume you have already ruled out your application as the root cause of the problem you are experiencing. See the [application troubleshooting guide](/docs/tasks/debug-application-cluster/debug-application) for tips on application debugging. You may also visit [troubleshooting document](/docs/troubleshooting/) for more information. -{{% /capture %}} -{{% capture body %}} + + ## Listing your cluster @@ -124,4 +124,4 @@ This is an incomplete list of things that could go wrong, and how to adjust your - Mitigates: Node shutdown - Mitigates: Kubelet software fault -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/debug-init-containers.md b/content/en/docs/tasks/debug-application-cluster/debug-init-containers.md index 296f0a0648..a6a3a44d98 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-init-containers.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-init-containers.md @@ -8,19 +8,20 @@ reviewers: - kow3ns - smarterclayton title: Debug Init Containers -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page shows how to investigate problems related to the execution of Init Containers. The example command lines below refer to the Pod as `` and the Init Containers as `` and ``. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -28,9 +29,9 @@ Init Containers. The example command lines below refer to the Pod as [Init Containers](/docs/concepts/abstractions/init-containers/). * You should have [Configured an Init Container](/docs/tasks/configure-pod-container/configure-pod-initialization/#creating-a-pod-that-has-an-init-container/). -{{% /capture %}} -{{% capture steps %}} + + ## Checking the status of Init Containers @@ -113,9 +114,9 @@ Init Containers that run a shell script print commands as they're executed. For example, you can do this in Bash by running `set -x` at the beginning of the script. -{{% /capture %}} -{{% capture discussion %}} + + ## Understanding Pod status @@ -131,7 +132,7 @@ Status | Meaning `Pending` | The Pod has not yet begun executing Init Containers. `PodInitializing` or `Running` | The Pod has already finished executing Init Containers. -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md b/content/en/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md index 28c9885e57..9793b472e0 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md @@ -2,25 +2,26 @@ reviewers: - bprashanth title: Debug Pods and ReplicationControllers -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page shows how to debug Pods and ReplicationControllers. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * You should be familiar with the basics of [Pods](/docs/concepts/workloads/pods/pod/) and [Pod Lifecycle](/docs/concepts/workloads/pods/pod-lifecycle/). -{{% /capture %}} -{{% capture steps %}} + + ## Debugging Pods @@ -106,4 +107,4 @@ or they can't. If they can't create pods, then please refer to the You can also use `kubectl describe rc ${CONTROLLER_NAME}` to inspect events related to the replication controller. -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/debug-running-pod.md b/content/en/docs/tasks/debug-application-cluster/debug-running-pod.md index a812640555..5e67585705 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-running-pod.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-running-pod.md @@ -3,16 +3,17 @@ reviewers: - verb - soltysh title: Debug Running Pods -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page explains how to debug Pods running (or crashing) on a Node. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * Your {{< glossary_tooltip text="Pod" term_id="pod" >}} should already be scheduled and running. If your Pod is not yet running, start with [Troubleshoot @@ -21,9 +22,9 @@ This page explains how to debug Pods running (or crashing) on a Node. Pod is running and have shell access to run commands on that Node. You don't need that access to run the standard debug steps that use `kubectl`. -{{% /capture %}} -{{% capture steps %}} + + ## Examining pod logs {#examine-pod-logs} @@ -187,4 +188,4 @@ given tools in the Kubernetes API. Therefore, if you find yourself needing to ssh into a machine, please file a feature request on GitHub describing your use case and why these tools are insufficient. -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/debug-service.md b/content/en/docs/tasks/debug-application-cluster/debug-service.md index 8656f3ae7e..c4e12042fb 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-service.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-service.md @@ -2,21 +2,21 @@ reviewers: - thockin - bowei -content_template: templates/concept +content_type: concept title: Debug Services --- -{{% capture overview %}} + An issue that comes up rather frequently for new installations of Kubernetes is that a Service is not working properly. You've run your Pods through a Deployment (or other workload controller) and created a Service, but you get no response when you try to access it. This document will hopefully help you to figure out what's going wrong. -{{% /capture %}} -{{% capture body %}} + + ## Running commands in a Pod @@ -728,10 +728,11 @@ Contact us on [Forum](https://discuss.kubernetes.io) or [GitHub](https://github.com/kubernetes/kubernetes). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Visit [troubleshooting document](/docs/troubleshooting/) for more information. -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/debug-stateful-set.md b/content/en/docs/tasks/debug-application-cluster/debug-stateful-set.md index 8bf56bb10c..755c9b725e 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-stateful-set.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-stateful-set.md @@ -8,23 +8,24 @@ reviewers: - kow3ns - smarterclayton title: Debug a StatefulSet -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This task shows you how to debug a StatefulSet. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * You need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. * You should have a StatefulSet running that you want to investigate. -{{% /capture %}} -{{% capture steps %}} + + ## Debugging a StatefulSet @@ -41,12 +42,13 @@ instructions on how to deal with them. You can debug individual Pods in a StatefulSet using the [Debugging Pods](/docs/tasks/debug-application-cluster/debug-pod-replication-controller/) guide. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Learn more about [debugging an init-container](/docs/tasks/debug-application-cluster/debug-init-containers/). -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md b/content/en/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md index 44b4225fdc..44dcf0e909 100644 --- a/content/en/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md +++ b/content/en/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md @@ -1,9 +1,9 @@ --- title: Determine the Reason for Pod Failure -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page shows how to write and read a Container termination message. @@ -16,17 +16,18 @@ put in a termination message should also be written to the general [Kubernetes logs](/docs/concepts/cluster-administration/logging/). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Writing and reading a termination message @@ -82,6 +83,11 @@ value of `/dev/termination-log`. By customizing this field, you can tell Kuberne to use a different file. Kubernetes use the contents from the specified file to populate the Container's status message on both success and failure. +The termination message is intended to be brief final status, such as an assertion failure message. +The kubelet truncates messages that are longer than 4096 bytes. The total message length across all +containers will be limited to 12KiB. The default termination message path is `/dev/termination-log`. +You cannot set the termination message path after a Pod is launched + In the following example, the container writes termination messages to `/tmp/my-log` for Kubernetes to retrieve: @@ -105,16 +111,17 @@ to use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * See the `terminationMessagePath` field in [Container](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core). * Learn about [retrieving logs](/docs/concepts/cluster-administration/logging/). * Learn about [Go templates](https://golang.org/pkg/text/template/). -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/events-stackdriver.md b/content/en/docs/tasks/debug-application-cluster/events-stackdriver.md index d852ed3cf9..859c163307 100644 --- a/content/en/docs/tasks/debug-application-cluster/events-stackdriver.md +++ b/content/en/docs/tasks/debug-application-cluster/events-stackdriver.md @@ -2,11 +2,11 @@ reviewers: - piosz - x13n -content_template: templates/concept +content_type: concept title: Events in Stackdriver --- -{{% capture overview %}} + Kubernetes events are objects that provide insight into what is happening inside a cluster, such as what decisions were made by scheduler or why some @@ -34,10 +34,10 @@ of the potential inaccuracy. {{< /note >}} -{{% /capture %}} -{{% capture body %}} + + ## Deployment @@ -91,4 +91,4 @@ jsonPayload.involvedObject.name:"nginx-deployment" {{< figure src="/images/docs/stackdriver-event-exporter-filter.png" alt="Filtered events in the Stackdriver Logging interface" width="500" >}} -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/falco.md b/content/en/docs/tasks/debug-application-cluster/falco.md index 003b287602..f5f67406a9 100644 --- a/content/en/docs/tasks/debug-application-cluster/falco.md +++ b/content/en/docs/tasks/debug-application-cluster/falco.md @@ -3,19 +3,19 @@ reviewers: - soltysh - sttts - ericchiang -content_template: templates/concept +content_type: concept title: Auditing with Falco --- -{{% capture overview %}} + ### Use Falco to collect audit events [Falco](https://falco.org/) is an open source project for intrusion and abnormality detection for Cloud Native platforms. This section describes how to set up Falco, how to send audit events to the Kubernetes Audit endpoint exposed by Falco, and how Falco applies a set of rules to automatically detect suspicious behavior. -{{% /capture %}} -{{% capture body %}} + + #### Install Falco @@ -118,4 +118,4 @@ For further details, see [Kubernetes Audit Events][falco_ka_docs] in the Falco d [falco_installation]: https://falco.org/docs/installation [falco_helm_chart]: https://github.com/helm/charts/tree/master/stable/falco -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/get-shell-running-container.md b/content/en/docs/tasks/debug-application-cluster/get-shell-running-container.md index f3ff92c196..12502ef102 100644 --- a/content/en/docs/tasks/debug-application-cluster/get-shell-running-container.md +++ b/content/en/docs/tasks/debug-application-cluster/get-shell-running-container.md @@ -3,25 +3,26 @@ reviewers: - caesarxuchao - mikedanese title: Get a Shell to a Running Container -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This page shows how to use `kubectl exec` to get a shell to a running Container. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Getting a shell to a Container @@ -122,9 +123,9 @@ kubectl exec shell-demo ls / kubectl exec shell-demo cat /proc/1/mounts ``` -{{% /capture %}} -{{% capture discussion %}} + + ## Opening a shell when a Pod has more than one Container @@ -138,14 +139,15 @@ shell to the main-app Container. kubectl exec -it my-pod --container main-app -- /bin/bash ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [kubectl exec](/docs/reference/generated/kubectl/kubectl-commands/#exec) -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/local-debugging.md b/content/en/docs/tasks/debug-application-cluster/local-debugging.md index 9cfc216ee4..d00c1398eb 100644 --- a/content/en/docs/tasks/debug-application-cluster/local-debugging.md +++ b/content/en/docs/tasks/debug-application-cluster/local-debugging.md @@ -1,9 +1,9 @@ --- title: Developing and debugging services locally -content_template: templates/task +content_type: task --- -{{% capture overview %}} + Kubernetes applications usually consist of multiple, separate services, each running in its own container. Developing and debugging these services on a remote Kubernetes cluster can be cumbersome, requiring you to [get a shell on a running container](/docs/tasks/debug-application-cluster/get-shell-running-container/) and running your tools inside the remote shell. @@ -12,17 +12,18 @@ Kubernetes applications usually consist of multiple, separate services, each run This document describes using `telepresence` to develop and debug services running on a remote cluster locally. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * Kubernetes cluster is installed * `kubectl` is configured to communicate with the cluster * [Telepresence](https://www.telepresence.io/reference/install) is installed -{{% /capture %}} -{{% capture steps %}} + + ## Getting a shell on a remote cluster @@ -46,9 +47,10 @@ where $DEPLOYMENT_NAME is the name of your existing deployment. Running this command spawns a shell. In the shell, start your service. You can then make edits to the source code locally, save, and see the changes take effect immediately. You can also run your service in a debugger, or any other local development tool. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + If you're interested in a hands-on tutorial, check out [this tutorial](https://cloud.google.com/community/tutorials/developing-services-with-k8s) that walks through locally developing the Guestbook application on Google Kubernetes Engine. @@ -56,4 +58,4 @@ Telepresence has [numerous proxying options](https://www.telepresence.io/referen For further reading, visit the [Telepresence website](https://www.telepresence.io). -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana.md b/content/en/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana.md index 327bfdf925..c47b117391 100644 --- a/content/en/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana.md +++ b/content/en/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana.md @@ -2,11 +2,11 @@ reviewers: - piosz - x13n -content_template: templates/concept +content_type: concept title: Logging Using Elasticsearch and Kibana --- -{{% capture overview %}} + On the Google Compute Engine (GCE) platform, the default logging support targets [Stackdriver Logging](https://cloud.google.com/logging/), which is described in detail @@ -21,9 +21,9 @@ Stackdriver Logging when running on GCE. You cannot automatically deploy Elasticsearch and Kibana in the Kubernetes cluster hosted on Google Kubernetes Engine. You have to deploy them manually. {{< /note >}} -{{% /capture %}} -{{% capture body %}} + + To use Elasticsearch and Kibana for cluster logging, you should set the following environment variable as shown below when creating your cluster with @@ -114,11 +114,12 @@ Here is a typical view of ingested logs from the Kibana viewer: ![Kibana logs](/images/docs/kibana-logs.png) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Kibana opens up all sorts of powerful options for exploring your logs! For some ideas on how to dig into it, check out [Kibana's documentation](https://www.elastic.co/guide/en/kibana/current/discover.html). -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/logging-stackdriver.md b/content/en/docs/tasks/debug-application-cluster/logging-stackdriver.md index a60ceeedfb..be80133d34 100644 --- a/content/en/docs/tasks/debug-application-cluster/logging-stackdriver.md +++ b/content/en/docs/tasks/debug-application-cluster/logging-stackdriver.md @@ -3,10 +3,10 @@ reviewers: - piosz - x13n title: Logging Using Stackdriver -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Before reading this page, it's highly recommended to familiarize yourself with the [overview of logging in Kubernetes](/docs/concepts/cluster-administration/logging). @@ -18,10 +18,10 @@ see the [sidecar approach](/docs/concepts/cluster-administration/logging#sidecar in the Kubernetes logging overview. {{< /note >}} -{{% /capture %}} -{{% capture body %}} + + ## Deploying @@ -368,4 +368,4 @@ with minor changes: Then run `make build push` from this directory. After updating `DaemonSet` to pick up the new image, you can use the plugin you installed in the fluentd configuration. -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/monitor-node-health.md b/content/en/docs/tasks/debug-application-cluster/monitor-node-health.md index f434adb17e..9ebeeeddad 100644 --- a/content/en/docs/tasks/debug-application-cluster/monitor-node-health.md +++ b/content/en/docs/tasks/debug-application-cluster/monitor-node-health.md @@ -2,11 +2,11 @@ reviewers: - Random-Liu - dchen1107 -content_template: templates/task +content_type: task title: Monitor Node Health --- -{{% capture overview %}} + *Node problem detector* is a [DaemonSet](/docs/concepts/workloads/controllers/daemonset/) monitoring the node health. It collects node problems from various daemons and reports them @@ -23,15 +23,16 @@ introduced to deal with node problems. See more information [here](https://github.com/kubernetes/node-problem-detector). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Limitations @@ -162,9 +163,9 @@ Kernel monitor uses [`Translator`](https://github.com/kubernetes/node-problem-de plugin to translate kernel log the internal data structure. It is easy to implement a new translator for a new log format. -{{% /capture %}} -{{% capture discussion %}} + + ## Caveats @@ -177,4 +178,4 @@ resource overhead on each node. Usually this is fine, because: * Even under high load, the resource usage is acceptable. (see [benchmark result](https://github.com/kubernetes/node-problem-detector/issues/2#issuecomment-220255629)) -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md b/content/en/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md index 547790e5b0..dbd4aa6cf4 100644 --- a/content/en/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md +++ b/content/en/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md @@ -3,20 +3,20 @@ reviewers: - fgrzadkowski - piosz title: Resource metrics pipeline -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Resource usage metrics, such as container CPU and memory usage, are available in Kubernetes through the Metrics API. These metrics can be either accessed directly by user, for example by using `kubectl top` command, or used by a controller in the cluster, e.g. Horizontal Pod Autoscaler, to make decisions. -{{% /capture %}} -{{% capture body %}} + + ## The Metrics API @@ -61,4 +61,4 @@ Metrics Server is registered with the main API server through Learn more about the metrics server in [the design doc](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/metrics-server.md). -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/resource-usage-monitoring.md b/content/en/docs/tasks/debug-application-cluster/resource-usage-monitoring.md index de8c538118..6cb716da9c 100644 --- a/content/en/docs/tasks/debug-application-cluster/resource-usage-monitoring.md +++ b/content/en/docs/tasks/debug-application-cluster/resource-usage-monitoring.md @@ -1,11 +1,11 @@ --- reviewers: - mikedanese -content_template: templates/concept +content_type: concept title: Tools for Monitoring Resources --- -{{% capture overview %}} + To scale an application and provide a reliable service, you need to understand how the application behaves when it is deployed. You can examine @@ -16,9 +16,9 @@ information about an application's resource usage at each of these levels. This information allows you to evaluate your application's performance and where bottlenecks can be removed to improve overall performance. -{{% /capture %}} -{{% capture body %}} + + In Kubernetes, application monitoring does not depend on a single monitoring solution. On new clusters, you can use [resource metrics](#resource-metrics-pipeline) or [full metrics](#full-metrics-pipeline) pipelines to collect monitoring statistics. @@ -55,4 +55,4 @@ then exposes them to Kubernetes via an adapter by implementing either the [Prometheus](https://prometheus.io), a CNCF project, can natively monitor Kubernetes, nodes, and Prometheus itself. Full metrics pipeline projects that are not part of the CNCF are outside the scope of Kubernetes documentation. -{{% /capture %}} + diff --git a/content/en/docs/tasks/debug-application-cluster/troubleshooting.md b/content/en/docs/tasks/debug-application-cluster/troubleshooting.md index 1ed0f5aa5b..82301275ce 100644 --- a/content/en/docs/tasks/debug-application-cluster/troubleshooting.md +++ b/content/en/docs/tasks/debug-application-cluster/troubleshooting.md @@ -2,11 +2,11 @@ reviewers: - brendandburns - davidopp -content_template: templates/concept +content_type: concept title: Troubleshooting --- -{{% capture overview %}} + Sometimes things go wrong. This guide is aimed at making them right. It has two sections: @@ -17,10 +17,10 @@ two sections: You should also check the known issues for the [release](https://github.com/kubernetes/kubernetes/releases) you're using. -{{% /capture %}} -{{% capture body %}} + + ## Getting help @@ -104,4 +104,4 @@ problem, such as: * Cloud provider, OS distro, network configuration, and Docker version * Steps to reproduce the problem -{{% /capture %}} + diff --git a/content/en/docs/tasks/example-task-template.md b/content/en/docs/tasks/example-task-template.md index c723460fc0..90d14e98da 100644 --- a/content/en/docs/tasks/example-task-template.md +++ b/content/en/docs/tasks/example-task-template.md @@ -2,11 +2,11 @@ title: Example Task Template reviewers: - chenopis -content_template: templates/task +content_type: task toc_hide: true --- -{{% capture overview %}} + {{< note >}} Be sure to also [create an entry in the table of contents](/docs/contribute/style/write-new-topic/#placing-your-topic-in-the-table-of-contents) for your new document. @@ -14,39 +14,39 @@ Be sure to also [create an entry in the table of contents](/docs/contribute/styl This page shows how to ... -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * Do this. * Do this too. -{{% /capture %}} -{{% capture steps %}} + + ## Doing ... 1. Do this. 1. Do this next. Possibly read this [related explanation](#). -{{% /capture %}} -{{% capture discussion %}} + + ## Understanding ... **[Optional Section]** Here's an interesting thing to know about the steps you just did. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + **[Optional Section]** * Learn more about [Writing a New Topic](/docs/home/contribute/write-new-topic/). -* See [Using Page Templates - Task template](/docs/home/contribute/page-templates/#task_template) for how to use this template. - -{{% /capture %}} \ No newline at end of file +* Learn about [Page Content Types - Task](/docs/home/contribute/style/page-content-types/#task). diff --git a/content/en/docs/tasks/extend-kubectl/kubectl-plugins.md b/content/en/docs/tasks/extend-kubectl/kubectl-plugins.md index dcc315871b..7d14b86f24 100644 --- a/content/en/docs/tasks/extend-kubectl/kubectl-plugins.md +++ b/content/en/docs/tasks/extend-kubectl/kubectl-plugins.md @@ -4,23 +4,24 @@ reviewers: - juanvallejo - soltysh description: With kubectl plugins, you can extend the functionality of the kubectl command by adding new subcommands. -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This guide demonstrates how to install and write extensions for [kubectl](/docs/reference/kubectl/kubectl/). By thinking of core `kubectl` commands as essential building blocks for interacting with a Kubernetes cluster, a cluster administrator can think of plugins as a means of utilizing these building blocks to create more complex behavior. Plugins extend `kubectl` with new sub-commands, allowing for new and custom features not included in the main distribution of `kubectl`. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + You need to have a working `kubectl` binary installed. -{{% /capture %}} -{{% capture steps %}} + + ## Installing kubectl plugins @@ -375,9 +376,10 @@ set up a build environment (if it needs compiling), and deploy the plugin. If you also make compiled packages available, or use Krew, that will make installs easier. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Check the Sample CLI Plugin repository for a [detailed example](https://github.com/kubernetes/sample-cli-plugin) of a @@ -386,4 +388,4 @@ installs easier. [SIG CLI team](https://github.com/kubernetes/community/tree/master/sig-cli). * Read about [Krew](https://krew.dev/), a package manager for kubectl plugins. -{{% /capture %}} + diff --git a/content/en/docs/tasks/access-kubernetes-api/_index.md b/content/en/docs/tasks/extend-kubernetes/_index.md old mode 100755 new mode 100644 similarity index 100% rename from content/en/docs/tasks/access-kubernetes-api/_index.md rename to content/en/docs/tasks/extend-kubernetes/_index.md diff --git a/content/en/docs/tasks/access-kubernetes-api/configure-aggregation-layer.md b/content/en/docs/tasks/extend-kubernetes/configure-aggregation-layer.md similarity index 95% rename from content/en/docs/tasks/access-kubernetes-api/configure-aggregation-layer.md rename to content/en/docs/tasks/extend-kubernetes/configure-aggregation-layer.md index 9a77378d5c..739a69d45a 100644 --- a/content/en/docs/tasks/access-kubernetes-api/configure-aggregation-layer.md +++ b/content/en/docs/tasks/extend-kubernetes/configure-aggregation-layer.md @@ -4,31 +4,27 @@ reviewers: - lavalamp - cheftako - chenopis -content_template: templates/task +content_type: task weight: 10 --- -{{% capture overview %}} + -Configuring the [aggregation layer](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/) allows the Kubernetes apiserver to be extended with additional APIs, which are not part of the core Kubernetes APIs. +Configuring the [aggregation layer](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/) allows the Kubernetes apiserver to be extended with additional APIs, which are not part of the core Kubernetes APIs. -{{% /capture %}} - -{{% capture prerequisites %}} +## {{% heading "prerequisites" %}} {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} {{< note >}} -There are a few setup requirements for getting the aggregation layer working in your environment to support mutual TLS auth between the proxy and extension apiservers. Kubernetes and the kube-apiserver have multiple CAs, so make sure that the proxy is signed by the aggregation layer CA and not by something else, like the master CA. +There are a few setup requirements for getting the aggregation layer working in your environment to support mutual TLS auth between the proxy and extension apiservers. Kubernetes and the kube-apiserver have multiple CAs, so make sure that the proxy is signed by the aggregation layer CA and not by something else, like the master CA. +{{< /note >}} {{< caution >}} Reusing the same CA for different client types can negatively impact the cluster's ability to function. For more information, see [CA Reusage and Conflicts](#ca-reusage-and-conflicts). {{< /caution >}} -{{< /note >}} -{{% /capture %}} - -{{% capture steps %}} + ## Authentication Flow @@ -137,7 +133,10 @@ The Kubernetes apiserver connects to the extension apiserver over TLS, authentic The Kubernetes apiserver will use the files indicated by `--proxy-client-*-file` to authenticate to the extension apiserver. In order for the request to be considered valid by a compliant extension apiserver, the following conditions must be met: 1. The connection must be made using a client certificate that is signed by the CA whose certificate is in `--requestheader-client-ca-file`. -2. The connection must be made using a client certificate whose CN is one of those listed in `--requestheader-allowed-names`. **Note:** You can set this option to blank as `--requestheader-allowed-names=""`. This will indicate to an extension apiserver that _any_ CN is acceptable. +2. The connection must be made using a client certificate whose CN is one of those listed in `--requestheader-allowed-names`. + +{{< note >}}You can set this option to blank as `--requestheader-allowed-names=""`. This will indicate to an extension apiserver that _any_ CN is acceptable. +{{< /note >}} When started with these options, the Kubernetes apiserver will: @@ -222,7 +221,7 @@ If you are not running kube-proxy on a host running the API server, then you mus --enable-aggregator-routing=true -{{% /capture %}} + ### Register APIService objects @@ -275,11 +274,8 @@ spec: ... ``` -{{% capture whatsnext %}} - -* [Setup an extension api-server](/docs/tasks/access-kubernetes-api/setup-extension-api-server/) to work with the aggregation layer. -* For a high level overview, see [Extending the Kubernetes API with the aggregation layer](/docs/concepts/api-extension/apiserver-aggregation/). -* Learn how to [Extend the Kubernetes API Using Custom Resource Definitions](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/). - -{{% /capture %}} +## {{% heading "whatsnext" %}} +* [Setup an extension api-server](/docs/tasks/extend-kubernetes/setup-extension-api-server/) to work with the aggregation layer. +* For a high level overview, see [Extending the Kubernetes API with the aggregation layer](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/). +* Learn how to [Extend the Kubernetes API Using Custom Resource Definitions](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/). diff --git a/content/en/docs/tasks/access-kubernetes-api/custom-resources/_index.md b/content/en/docs/tasks/extend-kubernetes/custom-resources/_index.md similarity index 100% rename from content/en/docs/tasks/access-kubernetes-api/custom-resources/_index.md rename to content/en/docs/tasks/extend-kubernetes/custom-resources/_index.md diff --git a/content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning.md b/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning.md similarity index 99% rename from content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning.md rename to content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning.md index ec35dd88e8..6eaf0cdd3a 100644 --- a/content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning.md +++ b/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning.md @@ -3,19 +3,20 @@ title: Versions in CustomResourceDefinitions reviewers: - sttts - liggitt -content_template: templates/task +content_type: task weight: 30 min-kubernetes-server-version: v1.16 --- -{{% capture overview %}} + This page explains how to add versioning information to [CustomResourceDefinitions](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#customresourcedefinition-v1beta1-apiextensions), to indicate the stability level of your CustomResourceDefinitions or advance your API to a new version with conversion between API representations. It also describes how to upgrade an object from one version to another. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} @@ -23,9 +24,9 @@ You should have a initial understanding of [custom resources](/docs/concepts/api {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Overview @@ -961,4 +962,4 @@ The following is an example procedure to upgrade from `v1beta1` to `v1`. storage version, which is `v1`. 2. Remove `v1beta1` from the CustomResourceDefinition `status.storedVersions` field. -{{% /capture %}} + diff --git a/content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions.md b/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions.md similarity index 95% rename from content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions.md rename to content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions.md index 4fcd389ba2..78b55b58dc 100644 --- a/content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions.md +++ b/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions.md @@ -6,18 +6,19 @@ reviewers: - liggitt - roycaihw - sttts -content_template: templates/task +content_type: task weight: 20 --- -{{% capture overview %}} + This page shows how to install a [custom resource](/docs/concepts/extend-kubernetes/api-extension/custom-resources/) into the Kubernetes API by creating a [CustomResourceDefinition](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#customresourcedefinition-v1beta1-apiextensions). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -25,9 +26,9 @@ into the Kubernetes API by creating a * Read about [custom resources](/docs/concepts/api-extension/custom-resources/). -{{% /capture %}} -{{% capture steps %}} + + ## Create a CustomResourceDefinition @@ -250,11 +251,11 @@ If you later recreate the same CustomResourceDefinition, it will start out empty {{< feature-state state="stable" for_k8s_version="v1.16" >}} -CustomResources traditionally store arbitrary JSON (next to `apiVersion`, `kind` and `metadata`, which is validated by the API server implicitly). With [OpenAPI v3.0 validation](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#validation) a schema can be specified, which is validated during creation and updates, compare below for details and limits of such a schema. +CustomResources traditionally store arbitrary JSON (next to `apiVersion`, `kind` and `metadata`, which is validated by the API server implicitly). With [OpenAPI v3.0 validation](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation) a schema can be specified, which is validated during creation and updates, compare below for details and limits of such a schema. With `apiextensions.k8s.io/v1` the definition of a structural schema is mandatory for CustomResourceDefinitions, while in `v1beta1` this is still optional. -A structural schema is an [OpenAPI v3.0 validation schema](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#validation) which: +A structural schema is an [OpenAPI v3.0 validation schema](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation) which: 1. specifies a non-empty type (via `type` in OpenAPI) for the root, for each specified field of an object node (via `properties` or `additionalProperties` in OpenAPI) and for each item in an array node (via `items` in OpenAPI), with the exception of: * a node with `x-kubernetes-int-or-string: true` @@ -363,15 +364,15 @@ Violations of the structural schema rules are reported in the `NonStructural` co Structural schemas are a requirement for `apiextensions.k8s.io/v1`, and disables the following features for `apiextensions.k8s.io/v1beta1`: -* [Validation Schema Publishing](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#publish-validation-schema-in-openapi-v2) -* [Webhook Conversion](/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning/#webhook-conversion) +* [Validation Schema Publishing](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#publish-validation-schema-in-openapi-v2) +* [Webhook Conversion](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#webhook-conversion) * [Pruning](#preserving-unknown-fields) ### Pruning versus preserving unknown fields {#preserving-unknown-fields} {{< feature-state state="stable" for_k8s_version="v1.16" >}} -CustomResourceDefinitions traditionally store any (possibly validated) JSON as is in etcd. This means that unspecified fields (if there is a [OpenAPI v3.0 validation schema](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#validation) at all) are persisted. This is in contrast to native Kubernetes resources such as a pod where unknown fields are dropped before being persisted to etcd. We call this "pruning" of unknown fields. +CustomResourceDefinitions traditionally store any (possibly validated) JSON as is in etcd. This means that unspecified fields (if there is a [OpenAPI v3.0 validation schema](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation) at all) are persisted. This is in contrast to native Kubernetes resources such as a pod where unknown fields are dropped before being persisted to etcd. We call this "pruning" of unknown fields. {{< tabs name="CustomResourceDefinition_pruning" >}} {{% tab name="apiextensions.k8s.io/v1" %}} @@ -426,7 +427,7 @@ spec: The field `someRandomField` has been pruned. -Note that the `kubectl create` call uses `--validate=false` to skip client-side validation. Because the [OpenAPI validation schemas are also published](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#publish-validation-schema-in-openapi-v2) to kubectl, it will also check for unknown fields and reject those objects long before they are sent to the API server. +Note that the `kubectl create` call uses `--validate=false` to skip client-side validation. Because the [OpenAPI validation schemas are also published](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#publish-validation-schema-in-openapi-v2) to kubectl, it will also check for unknown fields and reject those objects long before they are sent to the API server. ### Controlling pruning @@ -532,7 +533,7 @@ allOf: With one of those specification, both an integer and a string validate. -In [Validation Schema Publishing](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#publish-validation-schema-in-openapi-v2), `x-kubernetes-int-or-string: true` is unfolded to one of the two patterns shown above. +In [Validation Schema Publishing](/docs/tasks/extend-kubernetes/custom-resources/extend-api-custom-resource-definitions/#publish-validation-schema-in-openapi-v2), `x-kubernetes-int-or-string: true` is unfolded to one of the two patterns shown above. ### RawExtension @@ -564,13 +565,13 @@ With `x-kubernetes-embedded-resource: true`, the `apiVersion`, `kind` and `metad ## Serving multiple versions of a CRD -See [Custom resource definition versioning](/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning/) +See [Custom resource definition versioning](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/) for more information about serving multiple versions of your CustomResourceDefinition and migrating your objects from one version to another. -{{% /capture %}} -{{% capture discussion %}} + + ## Advanced topics ### Finalizers @@ -633,7 +634,7 @@ Additionally, the following restrictions are applied to the schema: These fields can only be set with specific features enabled: -- `default`: can be set for `apiextensions.k8s.io/v1` CustomResourceDefinitions. Defaulting is in GA since 1.17 (beta since 1.16 with the `CustomResourceDefaulting` feature gate to be enabled, which is the case automatically for many clusters for beta features). Compare [Validation Schema Defaulting](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#defaulting). +- `default`: can be set for `apiextensions.k8s.io/v1` CustomResourceDefinitions. Defaulting is in GA since 1.17 (beta since 1.16 with the `CustomResourceDefaulting` feature gate to be enabled, which is the case automatically for many clusters for beta features). Compare [Validation Schema Defaulting](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#defaulting). {{< note >}} Compare with [structural schemas](#specifying-a-structural-schema) for further restriction required for certain CustomResourceDefinition features. @@ -1448,13 +1449,13 @@ NAME AGE crontabs/my-new-cron-object 3s ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * See [CustomResourceDefinition](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#customresourcedefinition-v1-apiextensions-k8s-io). -* Serve [multiple versions](/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning/) of a +* Serve [multiple versions](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/) of a CustomResourceDefinition. -{{% /capture %}} \ No newline at end of file diff --git a/content/en/docs/tasks/access-kubernetes-api/http-proxy-access-api.md b/content/en/docs/tasks/extend-kubernetes/http-proxy-access-api.md similarity index 90% rename from content/en/docs/tasks/access-kubernetes-api/http-proxy-access-api.md rename to content/en/docs/tasks/extend-kubernetes/http-proxy-access-api.md index be282a29c1..dd80c8c349 100644 --- a/content/en/docs/tasks/access-kubernetes-api/http-proxy-access-api.md +++ b/content/en/docs/tasks/extend-kubernetes/http-proxy-access-api.md @@ -1,14 +1,15 @@ --- title: Use an HTTP Proxy to Access the Kubernetes API -content_template: templates/task +content_type: task weight: 40 --- -{{% capture overview %}} + This page shows how to use an HTTP proxy to access the Kubernetes API. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -19,9 +20,7 @@ a Hello world application by entering this command: kubectl run node-hello --image=gcr.io/google-samples/node-hello:1.0 --port=8080 ``` -{{% /capture %}} - -{{% capture steps %}} + ## Using kubectl to start a proxy server @@ -81,10 +80,6 @@ The output should look similar to this: ... } -{{% /capture %}} +## {{% heading "whatsnext" %}} -{{% capture whatsnext %}} Learn more about [kubectl proxy](/docs/reference/generated/kubectl/kubectl-commands#proxy). -{{% /capture %}} - - diff --git a/content/en/docs/tasks/access-kubernetes-api/setup-extension-api-server.md b/content/en/docs/tasks/extend-kubernetes/setup-extension-api-server.md similarity index 70% rename from content/en/docs/tasks/access-kubernetes-api/setup-extension-api-server.md rename to content/en/docs/tasks/extend-kubernetes/setup-extension-api-server.md index 71c6059eec..626ddcab5c 100644 --- a/content/en/docs/tasks/access-kubernetes-api/setup-extension-api-server.md +++ b/content/en/docs/tasks/extend-kubernetes/setup-extension-api-server.md @@ -1,34 +1,35 @@ --- -title: Setup an Extension API Server +title: Set up an Extension API Server reviewers: - lavalamp - cheftako - chenopis -content_template: templates/task +content_type: task weight: 15 --- -{{% capture overview %}} + -Setting up an extension API server to work the aggregation layer allows the Kubernetes apiserver to be extended with additional APIs, which are not part of the core Kubernetes APIs. +Setting up an extension API server to work with the aggregation layer allows the Kubernetes apiserver to be extended with additional APIs, which are not part of the core Kubernetes APIs. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -* You must [configure the aggregation layer](/docs/tasks/access-kubernetes-api/configure-aggregation-layer/) and enable the apiserver flags. +* You must [configure the aggregation layer](/docs/tasks/extend-kubernetes/configure-aggregation-layer/) and enable the apiserver flags. -{{% /capture %}} -{{% capture steps %}} + + ## Setup an extension api-server to work with the aggregation layer The following steps describe how to set up an extension-apiserver *at a high level*. These steps apply regardless if you're using YAML configs or using APIs. An attempt is made to specifically identify any differences between the two. For a concrete example of how they can be implemented using YAML configs, you can look at the [sample-apiserver](https://github.com/kubernetes/sample-apiserver/blob/master/README.md) in the Kubernetes repo. -Alternatively, you can use an existing 3rd party solution, such as [apiserver-builder](https://github.com/Kubernetes-incubator/apiserver-builder/blob/master/README.md), which should generate a skeleton and automate all of the following steps for you. +Alternatively, you can use an existing 3rd party solution, such as [apiserver-builder](https://github.com/kubernetes-sigs/apiserver-builder-alpha/blob/master/README.md), which should generate a skeleton and automate all of the following steps for you. 1. Make sure the APIService API is enabled (check `--runtime-config`). It should be on by default, unless it's been deliberately turned off in your cluster. 1. You may need to make an RBAC rule allowing you to add APIService objects, or get your cluster administrator to make one. (Since API extensions affect the entire cluster, it is not recommended to do testing/development/debug of an API extension in a live cluster.) @@ -44,17 +45,13 @@ Alternatively, you can use an existing 3rd party solution, such as [apiserver-bu 1. Create a Kubernetes cluster role binding from the service account in your namespace to the `system:auth-delegator` cluster role to delegate auth decisions to the Kubernetes core API server. 1. Create a Kubernetes role binding from the service account in your namespace to the `extension-apiserver-authentication-reader` role. This allows your extension api-server to access the `extension-apiserver-authentication` configmap. 1. Create a Kubernetes apiservice. The CA cert above should be base64 encoded, stripped of new lines and used as the spec.caBundle in the apiservice. This should not be namespaced. If using the [kube-aggregator API](https://github.com/kubernetes/kube-aggregator/), only pass in the PEM encoded CA bundle because the base 64 encoding is done for you. -1. Use kubectl to get your resource. It should return "No resources found." Which means that everything worked but you currently have no objects of that resource type created yet. - -{{% /capture %}} - -{{% capture whatsnext %}} - -* If you haven't already, [configure the aggregation layer](/docs/tasks/access-kubernetes-api/configure-aggregation-layer/) and enable the apiserver flags. -* For a high level overview, see [Extending the Kubernetes API with the aggregation layer](/docs/concepts/api-extension/apiserver-aggregation). -* Learn how to [Extend the Kubernetes API Using Custom Resource Definitions](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/). - -{{% /capture %}} +1. Use kubectl to get your resource. When run, kubectl should return "No resources found.". This message +indicates that everything worked but you currently have no objects of that resource type created. +## {{% heading "whatsnext" %}} + +* Walk through the steps to [configure the API aggregation layer](/docs/tasks/extend-kubernetes/configure-aggregation-layer/) and enable the apiserver flags. +* For a high level overview, see [Extending the Kubernetes API with the aggregation layer](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/). +* Learn how to [Extend the Kubernetes API using Custom Resource Definitions](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/). diff --git a/content/en/docs/tasks/setup-konnectivity/setup-konnectivity.md b/content/en/docs/tasks/extend-kubernetes/setup-konnectivity.md similarity index 61% rename from content/en/docs/tasks/setup-konnectivity/setup-konnectivity.md rename to content/en/docs/tasks/extend-kubernetes/setup-konnectivity.md index b5dbd05215..da9dabb135 100644 --- a/content/en/docs/tasks/setup-konnectivity/setup-konnectivity.md +++ b/content/en/docs/tasks/extend-kubernetes/setup-konnectivity.md @@ -1,40 +1,39 @@ --- title: Set up Konnectivity service -content_template: templates/task +content_type: task weight: 70 --- -{{% capture overview %}} + -The Konnectivity service provides TCP level proxy for the Master → Cluster +The Konnectivity service provides a TCP level proxy for the control plane to cluster communication. -{{% /capture %}} - -{{% capture prerequisites %}} +## {{% heading "prerequisites" %}} {{< include "task-tutorial-prereqs.md" >}} -{{% /capture %}} - -{{% capture steps %}} + ## Configure the Konnectivity service -First, you need to configure the API Server to use the Konnectivity service -to direct its network traffic to cluster nodes: - -1. Set the `--egress-selector-config-file` flag of the API Server, it is the -path to the API Server egress configuration file. -1. At the path, create a configuration file. For example, +The following steps require an egress configuration, for example: {{< codenew file="admin/konnectivity/egress-selector-configuration.yaml" >}} +You need to configure the API Server to use the Konnectivity service +and direct the network traffic to the cluster nodes: + +1. Create an egress configuration file such as `admin/konnectivity/egress-selector-configuration.yaml`. +1. Set the `--egress-selector-config-file` flag of the API Server to the path of +your API Server egress configuration file. + Next, you need to deploy the Konnectivity server and agents. [kubernetes-sigs/apiserver-network-proxy](https://github.com/kubernetes-sigs/apiserver-network-proxy) is a reference implementation. -Deploy the Konnectivity server on your master node. The provided yaml assumes +Deploy the Konnectivity server on your control plane node. The provided +`konnectivity-server.yaml` manifest assumes that the Kubernetes components are deployed as a {{< glossary_tooltip text="static Pod" term_id="static-pod" >}} in your cluster. If not, you can deploy the Konnectivity server as a DaemonSet. @@ -48,5 +47,3 @@ Then deploy the Konnectivity agents in your cluster: Last, if RBAC is enabled in your cluster, create the relevant RBAC rules: {{< codenew file="admin/konnectivity/konnectivity-rbac.yaml" >}} - -{{% /capture %}} \ No newline at end of file diff --git a/content/en/docs/tasks/inject-data-application/define-command-argument-container.md b/content/en/docs/tasks/inject-data-application/define-command-argument-container.md index 66ebd69c13..faaffc52a2 100644 --- a/content/en/docs/tasks/inject-data-application/define-command-argument-container.md +++ b/content/en/docs/tasks/inject-data-application/define-command-argument-container.md @@ -1,25 +1,26 @@ --- title: Define a Command and Arguments for a Container -content_template: templates/task +content_type: task weight: 10 --- -{{% capture overview %}} + This page shows how to define commands and arguments when you run a container in a {{< glossary_tooltip term_id="pod" >}}. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Define a command and arguments when you create a Pod @@ -145,14 +146,15 @@ Here are some examples: | `[/ep-1]` | `[foo bar]` | `[/ep-2]` | `[zoo boo]` | `[ep-2 zoo boo]` | -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [configuring pods and containers](/docs/tasks/). * Learn more about [running commands in a container](/docs/tasks/debug-application-cluster/get-shell-running-container/). * See [Container](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core). -{{% /capture %}} + diff --git a/content/en/docs/tasks/inject-data-application/define-environment-variable-container.md b/content/en/docs/tasks/inject-data-application/define-environment-variable-container.md index d10bbd323f..d75d930c56 100644 --- a/content/en/docs/tasks/inject-data-application/define-environment-variable-container.md +++ b/content/en/docs/tasks/inject-data-application/define-environment-variable-container.md @@ -1,25 +1,26 @@ --- title: Define Environment Variables for a Container -content_template: templates/task +content_type: task weight: 20 --- -{{% capture overview %}} + This page shows how to define environment variables for a container in a Kubernetes Pod. -{{% /capture %}} -{{% capture prerequisites %}} -{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} - -{{% /capture %}} +## {{% heading "prerequisites" %}} -{{% capture steps %}} +{{< include "task-tutorial-prereqs.md" >}} + + + + + ## Define an environment variable for a container @@ -29,12 +30,12 @@ that run in the Pod. To set environment variables, include the `env` or In this exercise, you create a Pod that runs one container. The configuration file for the Pod defines an environment variable with name `DEMO_GREETING` and -value `"Hello from the environment"`. Here is the configuration file for the +value `"Hello from the environment"`. Here is the configuration manifest for the Pod: {{< codenew file="pods/inject/envars.yaml" >}} -1. Create a Pod based on the YAML configuration file: +1. Create a Pod based on that manifest: ```shell kubectl apply -f https://k8s.io/examples/pods/inject/envars.yaml @@ -46,7 +47,7 @@ Pod: kubectl get pods -l purpose=demonstrate-envars ``` - The output is similar to this: + The output is similar to: ``` NAME READY STATUS RESTARTS AGE @@ -62,7 +63,8 @@ Pod: 1. In your shell, run the `printenv` command to list the environment variables. ```shell - root@envar-demo:/# printenv + # Run this in the shell inside the container + printenv ``` The output is similar to this: @@ -80,12 +82,24 @@ Pod: {{< note >}} The environment variables set using the `env` or `envFrom` field -will override any environment variables specified in the container image. +override any environment variables specified in the container image. +{{< /note >}} + +{{< note >}} +The environment variables can reference each other, and cycles are possible, +pay attention to the order before using {{< /note >}} ## Using environment variables inside of your config -Environment variables that you define in a Pod's configuration can be used elsewhere in the configuration, for example in commands and arguments that you set for the Pod's containers. In the example configuration below, the `GREETING`, `HONORIFIC`, and `NAME` environment variables are set to `Warm greetings to`, `The Most Honorable`, and `Kubernetes`, respectively. Those environment variables are then used in the CLI arguments passed to the `env-print-demo` container. +Environment variables that you define in a Pod's configuration can be used +elsewhere in the configuration, for example in commands and arguments that +you set for the Pod's containers. +In the example configuration below, the `GREETING`, `HONORIFIC`, and +`NAME` environment variables are set to `Warm greetings to`, `The Most +Honorable`, and `Kubernetes`, respectively. Those environment variables +are then used in the CLI arguments passed to the `env-print-demo` +container. ```yaml apiVersion: v1 @@ -109,12 +123,13 @@ spec: Upon creation, the command `echo Warm greetings to The Most Honorable Kubernetes` is run on the container. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [environment variables](/docs/tasks/inject-data-application/environment-variable-expose-pod-information/). * Learn about [using secrets as environment variables](/docs/user-guide/secrets/#using-secrets-as-environment-variables). * See [EnvVarSource](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#envvarsource-v1-core). -{{% /capture %}} + diff --git a/content/en/docs/tasks/inject-data-application/distribute-credentials-secure.md b/content/en/docs/tasks/inject-data-application/distribute-credentials-secure.md index 2fb15aa3b2..de4d32d7b9 100644 --- a/content/en/docs/tasks/inject-data-application/distribute-credentials-secure.md +++ b/content/en/docs/tasks/inject-data-application/distribute-credentials-secure.md @@ -1,22 +1,23 @@ --- title: Distribute Credentials Securely Using Secrets -content_template: templates/task +content_type: task weight: 50 min-kubernetes-server-version: v1.6 --- -{{% capture overview %}} + This page shows how to securely inject sensitive data, such as passwords and encryption keys, into Pods. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} -{{% /capture %}} -{{% capture steps %}} + + ## Convert your secret data to a base-64 representation @@ -243,9 +244,10 @@ This functionality is available in Kubernetes v1.6 and later. password: 39528$vdg7Jb ```` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [Secrets](/docs/concepts/configuration/secret/). * Learn about [Volumes](/docs/concepts/storage/volumes/). @@ -256,5 +258,5 @@ This functionality is available in Kubernetes v1.6 and later. * [Volume](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#volume-v1-core) * [Pod](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core) -{{% /capture %}} + diff --git a/content/en/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information.md b/content/en/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information.md index a24aba65b6..4ab41f2a23 100644 --- a/content/en/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information.md +++ b/content/en/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information.md @@ -1,25 +1,26 @@ --- title: Expose Pod Information to Containers Through Files -content_template: templates/task +content_type: task weight: 40 --- -{{% capture overview %}} + This page shows how a Pod can use a DownwardAPIVolumeFile to expose information about itself to Containers running in the Pod. A DownwardAPIVolumeFile can expose Pod fields and Container fields. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## The Downward API @@ -189,9 +190,9 @@ In your shell, view the `cpu_limit` file: You can use similar commands to view the `cpu_request`, `mem_limit` and `mem_request` files. -{{% /capture %}} -{{% capture discussion %}} + + ## Capabilities of the Downward API @@ -249,10 +250,11 @@ application, but that is tedious and error prone, and it violates the goal of lo coupling. A better option would be to use the Pod's name as an identifier, and inject the Pod's name into the well-known environment variable. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core) * [Volume](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#volume-v1-core) @@ -260,7 +262,7 @@ inject the Pod's name into the well-known environment variable. * [DownwardAPIVolumeFile](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#downwardapivolumefile-v1-core) * [ResourceFieldSelector](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#resourcefieldselector-v1-core) -{{% /capture %}} + diff --git a/content/en/docs/tasks/inject-data-application/environment-variable-expose-pod-information.md b/content/en/docs/tasks/inject-data-application/environment-variable-expose-pod-information.md index c23b3ba75a..2b59921c6e 100644 --- a/content/en/docs/tasks/inject-data-application/environment-variable-expose-pod-information.md +++ b/content/en/docs/tasks/inject-data-application/environment-variable-expose-pod-information.md @@ -1,26 +1,27 @@ --- title: Expose Pod Information to Containers Through Environment Variables -content_template: templates/task +content_type: task weight: 30 --- -{{% capture overview %}} + This page shows how a Pod can use environment variables to expose information about itself to Containers running in the Pod. Environment variables can expose Pod fields and Container fields. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## The Downward API @@ -154,9 +155,10 @@ The output shows the values of selected environment variables: 67108864 ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Defining Environment Variables for a Container](/docs/tasks/inject-data-application/define-environment-variable-container/) * [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core) @@ -166,5 +168,5 @@ The output shows the values of selected environment variables: * [ObjectFieldSelector](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#objectfieldselector-v1-core) * [ResourceFieldSelector](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#resourcefieldselector-v1-core) -{{% /capture %}} + diff --git a/content/en/docs/tasks/inject-data-application/podpreset.md b/content/en/docs/tasks/inject-data-application/podpreset.md index dcf159acf5..6533629ce4 100644 --- a/content/en/docs/tasks/inject-data-application/podpreset.md +++ b/content/en/docs/tasks/inject-data-application/podpreset.md @@ -3,26 +3,27 @@ reviewers: - jessfraz title: Inject Information into Pods Using a PodPreset min-kubernetes-server-version: v1.6 -content_template: templates/task +content_type: task weight: 60 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.6" state="alpha" >}} This page shows how to use PodPreset objects to inject information like {{< glossary_tooltip text="Secrets" term_id="secret" >}}, volume mounts, and {{< glossary_tooltip text="environment variables" term_id="container-env-variables" >}} into Pods at creation time. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + You need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one using [Minikube](/docs/setup/learning-environment/minikube/). Make sure that you have [enabled PodPreset](/docs/concepts/workloads/pods/podpreset/#enable-pod-preset) in your cluster. -{{% /capture %}} -{{% capture steps %}} + + ## Use Pod presets to inject environment variables and volumes @@ -321,4 +322,4 @@ The output shows that the PodPreset was deleted: podpreset "allow-database" deleted ``` -{{% /capture %}} + diff --git a/content/en/docs/tasks/job/automated-tasks-with-cron-jobs.md b/content/en/docs/tasks/job/automated-tasks-with-cron-jobs.md index ae5b6633ad..602ad8482d 100644 --- a/content/en/docs/tasks/job/automated-tasks-with-cron-jobs.md +++ b/content/en/docs/tasks/job/automated-tasks-with-cron-jobs.md @@ -3,11 +3,11 @@ title: Running Automated Tasks with a CronJob min-kubernetes-server-version: v1.8 reviewers: - chenopis -content_template: templates/task +content_type: task weight: 10 --- -{{% capture overview %}} + You can use a {{< glossary_tooltip text="CronJob" term_id="cronjob" >}} to run {{< glossary_tooltip text="Jobs" term_id="job" >}} on a time-based schedule. These automated jobs run like [Cron](https://en.wikipedia.org/wiki/Cron) tasks on a Linux or UNIX system. @@ -21,15 +21,16 @@ Therefore, jobs should be idempotent. For more limitations, see [CronJobs](/docs/concepts/workloads/controllers/cron-jobs). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} -{{% /capture %}} -{{% capture steps %}} + + ## Creating a Cron Job @@ -207,4 +208,4 @@ The `.spec.successfulJobsHistoryLimit` and `.spec.failedJobsHistoryLimit` fields These fields specify how many completed and failed jobs should be kept. By default, they are set to 3 and 1 respectively. Setting a limit to `0` corresponds to keeping none of the corresponding kind of jobs after they finish. -{{% /capture %}} + diff --git a/content/en/docs/tasks/job/coarse-parallel-processing-work-queue.md b/content/en/docs/tasks/job/coarse-parallel-processing-work-queue.md index 707c5b9850..346fbdda8d 100644 --- a/content/en/docs/tasks/job/coarse-parallel-processing-work-queue.md +++ b/content/en/docs/tasks/job/coarse-parallel-processing-work-queue.md @@ -1,12 +1,12 @@ --- title: Coarse Parallel Processing Using a Work Queue min-kubernetes-server-version: v1.8 -content_template: templates/task +content_type: task weight: 30 --- -{{% capture overview %}} + In this example, we will run a Kubernetes Job with multiple parallel worker processes. @@ -23,19 +23,20 @@ Here is an overview of the steps in this example: 1. **Start a Job that works on tasks from the queue**. The Job starts several pods. Each pod takes one task from the message queue, processes it, and repeats until the end of the queue is reached. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Be familiar with the basic, non-parallel, use of [Job](/docs/concepts/jobs/run-to-completion-finite-workloads/). {{< include "task-tutorial-prereqs.md" >}} -{{% /capture %}} -{{% capture steps %}} + + ## Starting a message queue service @@ -292,9 +293,9 @@ Events: All our pods succeeded. Yay. -{{% /capture %}} -{{% capture discussion %}} + + ## Alternatives @@ -331,4 +332,4 @@ exits with success, or if the node crashes before the kubelet is able to post th back to the api-server, then the Job will not appear to be complete, even though all items in the queue have been processed. -{{% /capture %}} + diff --git a/content/en/docs/tasks/job/fine-parallel-processing-work-queue.md b/content/en/docs/tasks/job/fine-parallel-processing-work-queue.md index 26fbbacaa7..f502113c8f 100644 --- a/content/en/docs/tasks/job/fine-parallel-processing-work-queue.md +++ b/content/en/docs/tasks/job/fine-parallel-processing-work-queue.md @@ -1,11 +1,11 @@ --- title: Fine Parallel Processing Using a Work Queue -content_template: templates/task +content_type: task min-kubernetes-server-version: v1.8 weight: 40 --- -{{% capture overview %}} + In this example, we will run a Kubernetes Job with multiple parallel worker processes in a given pod. @@ -25,23 +25,24 @@ Here is an overview of the steps in this example: 1. **Start a Job that works on tasks from the queue**. The Job starts several pods. Each pod takes one task from the message queue, processes it, and repeats until the end of the queue is reached. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} -{{% /capture %}} -{{% capture steps %}} + + Be familiar with the basic, non-parallel, use of [Job](/docs/concepts/jobs/run-to-completion-finite-workloads/). -{{% /capture %}} -{{% capture steps %}} + + ## Starting Redis @@ -226,9 +227,9 @@ Working on lemon As you can see, one of our pods worked on several work units. -{{% /capture %}} -{{% capture discussion %}} + + ## Alternatives @@ -240,4 +241,4 @@ consider running your background workers with a `ReplicaSet` instead, and consider running a background processing library such as [https://github.com/resque/resque](https://github.com/resque/resque). -{{% /capture %}} + diff --git a/content/en/docs/tasks/job/parallel-processing-expansion.md b/content/en/docs/tasks/job/parallel-processing-expansion.md index e2d0975a70..3477be2650 100644 --- a/content/en/docs/tasks/job/parallel-processing-expansion.md +++ b/content/en/docs/tasks/job/parallel-processing-expansion.md @@ -1,11 +1,11 @@ --- title: Parallel Processing using Expansions -content_template: templates/task +content_type: task min-kubernetes-server-version: v1.8 weight: 20 --- -{{% capture overview %}} + This task demonstrates running multiple {{< glossary_tooltip text="Jobs" term_id="job" >}} based on a common template. You can use this approach to process batches of work in @@ -16,9 +16,10 @@ The sample Jobs process each item simply by printing a string then pausing. See [using Jobs in real workloads](#using-jobs-in-real-workloads) to learn about how this pattern fits more realistic use cases. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + You should be familiar with the basic, non-parallel, use of [Job](/docs/concepts/jobs/run-to-completion-finite-workloads/). @@ -35,10 +36,10 @@ Once you have Python set up, you can install Jinja2 by running: ```shell pip install --user jinja2 ``` -{{% /capture %}} -{{% capture steps %}} + + ## Create Jobs based on a template @@ -252,8 +253,8 @@ Kubernetes accepts and runs the Jobs you created. kubectl delete job -l jobgroup=jobexample ``` -{{% /capture %}} -{{% capture discussion %}} + + ## Using Jobs in real workloads @@ -310,4 +311,4 @@ objects. You could also consider writing your own [controller](/docs/concepts/architecture/controller/) to manage Job objects automatically. -{{% /capture %}} + diff --git a/content/en/docs/tasks/manage-daemon/rollback-daemon-set.md b/content/en/docs/tasks/manage-daemon/rollback-daemon-set.md index 4b1d424066..2d6dc9d0d6 100644 --- a/content/en/docs/tasks/manage-daemon/rollback-daemon-set.md +++ b/content/en/docs/tasks/manage-daemon/rollback-daemon-set.md @@ -2,28 +2,29 @@ reviewers: - janetkuo title: Perform a Rollback on a DaemonSet -content_template: templates/task +content_type: task weight: 20 --- -{{% capture overview %}} + This page shows how to perform a rollback on a DaemonSet. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * The DaemonSet rollout history and DaemonSet rollback features are only supported in `kubectl` in Kubernetes version 1.7 or later. * Make sure you know how to [perform a rolling update on a DaemonSet](/docs/tasks/manage-daemon/update-daemon-set/). -{{% /capture %}} -{{% capture steps %}} + + ## Performing a Rollback on a DaemonSet @@ -104,10 +105,10 @@ When the rollback is complete, the output is similar to this: daemonset "" successfully rolled out ``` -{{% /capture %}} -{{% capture discussion %}} + + ## Understanding DaemonSet Revisions @@ -154,6 +155,6 @@ have revision 1 and 2 in the system, and roll back from revision 2 to revision * See [troubleshooting DaemonSet rolling update](/docs/tasks/manage-daemon/update-daemon-set/#troubleshooting). -{{% /capture %}} + diff --git a/content/en/docs/tasks/manage-daemon/update-daemon-set.md b/content/en/docs/tasks/manage-daemon/update-daemon-set.md index 8e32763e01..b9168ed098 100644 --- a/content/en/docs/tasks/manage-daemon/update-daemon-set.md +++ b/content/en/docs/tasks/manage-daemon/update-daemon-set.md @@ -2,25 +2,26 @@ reviewers: - janetkuo title: Perform a Rolling Update on a DaemonSet -content_template: templates/task +content_type: task weight: 10 --- -{{% capture overview %}} + This page shows how to perform a rolling update on a DaemonSet. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * The DaemonSet rolling update feature is only supported in Kubernetes version 1.6 or later. -{{% /capture %}} -{{% capture steps %}} + + ## DaemonSet Update Strategy @@ -190,13 +191,14 @@ Delete DaemonSet from a namespace : kubectl delete ds fluentd-elasticsearch -n kube-system ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * See [Task: Performing a rollback on a DaemonSet](/docs/tasks/manage-daemon/rollback-daemon-set/) * See [Concepts: Creating a DaemonSet to adopt existing DaemonSet pods](/docs/concepts/workloads/controllers/daemonset/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/manage-gpus/scheduling-gpus.md b/content/en/docs/tasks/manage-gpus/scheduling-gpus.md index 4c0b9f9bc3..dce3db810b 100644 --- a/content/en/docs/tasks/manage-gpus/scheduling-gpus.md +++ b/content/en/docs/tasks/manage-gpus/scheduling-gpus.md @@ -1,11 +1,11 @@ --- reviewers: - vishh -content_template: templates/concept +content_type: concept title: Schedule GPUs --- -{{% capture overview %}} + {{< feature-state state="beta" for_k8s_version="v1.10" >}} @@ -15,10 +15,10 @@ Kubernetes includes **experimental** support for managing AMD and NVIDIA GPUs This page describes how users can consume GPUs across different Kubernetes versions and the current limitations. -{{% /capture %}} -{{% capture body %}} + + ## Using device plugins @@ -98,7 +98,7 @@ has the following requirements: - Kubelet must use Docker as its container runtime - `nvidia-container-runtime` must be configured as the [default runtime](https://github.com/NVIDIA/k8s-device-plugin#preparing-your-gpu-nodes) for Docker, instead of runc. -- The version of the NVIDIA drivers must match the constraint ~= 361.93 +- The version of the NVIDIA drivers must match the constraint ~= 384.81. To deploy the NVIDIA device plugin once your cluster is running and the above requirements are satisfied: @@ -216,4 +216,4 @@ spec: This will ensure that the Pod will be scheduled to a node that has the GPU type you specified. -{{% /capture %}} + diff --git a/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md b/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md index ad6b969c87..b01ae06df2 100644 --- a/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md +++ b/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md @@ -2,19 +2,20 @@ reviewers: - derekwaynecarr title: Manage HugePages -content_template: templates/task +content_type: task --- -{{% capture overview %}} + {{< feature-state state="stable" >}} Kubernetes supports the allocation and consumption of pre-allocated huge pages by applications in a Pod as a **GA** feature. This page describes how users can consume huge pages and the current limitations. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + 1. Kubernetes nodes must pre-allocate huge pages in order for the node to report its huge page capacity. A node can pre-allocate huge pages for multiple @@ -23,9 +24,9 @@ can consume huge pages and the current limitations. The nodes will automatically discover and report all huge page resources as schedulable resources. -{{% /capture %}} -{{% capture steps %}} + + ## API @@ -125,5 +126,5 @@ term_id="kube-apiserver" >}} (`--feature-gates=HugePageStorageMediumSize=true`). - NUMA locality guarantees as a feature of quality of service. - LimitRange support. -{{% /capture %}} + diff --git a/content/en/docs/tasks/manage-kubernetes-objects/declarative-config.md b/content/en/docs/tasks/manage-kubernetes-objects/declarative-config.md index f82e54d364..308a4cf9b8 100644 --- a/content/en/docs/tasks/manage-kubernetes-objects/declarative-config.md +++ b/content/en/docs/tasks/manage-kubernetes-objects/declarative-config.md @@ -1,27 +1,28 @@ --- title: Declarative Management of Kubernetes Objects Using Configuration Files -content_template: templates/task +content_type: task weight: 10 --- -{{% capture overview %}} + Kubernetes objects can be created, updated, and deleted by storing multiple object configuration files in a directory and using `kubectl apply` to recursively create and update those objects as needed. This method retains writes made to live objects without merging the changes back into the object configuration files. `kubectl diff` also gives you a preview of what changes `apply` will make. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Install [`kubectl`](/docs/tasks/tools/install-kubectl/). {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Trade-offs @@ -999,11 +1000,12 @@ template: controller-selector: "apps/v1/deployment/nginx" ``` -{{% capture whatsnext %}} +## {{% heading "whatsnext" %}} + * [Managing Kubernetes Objects Using Imperative Commands](/docs/tasks/manage-kubernetes-objects/imperative-command/) * [Imperative Management of Kubernetes Objects Using Configuration Files](/docs/tasks/manage-kubernetes-objects/imperative-config/) * [Kubectl Command Reference](/docs/reference/generated/kubectl/kubectl/) * [Kubernetes API Reference](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/manage-kubernetes-objects/imperative-command.md b/content/en/docs/tasks/manage-kubernetes-objects/imperative-command.md index 6b1357a133..dd8b6b0f53 100644 --- a/content/en/docs/tasks/manage-kubernetes-objects/imperative-command.md +++ b/content/en/docs/tasks/manage-kubernetes-objects/imperative-command.md @@ -1,23 +1,24 @@ --- title: Managing Kubernetes Objects Using Imperative Commands -content_template: templates/task +content_type: task weight: 30 --- -{{% capture overview %}} + Kubernetes objects can quickly be created, updated, and deleted directly using imperative commands built into the `kubectl` command-line tool. This document explains how those commands are organized and how to use them to manage live objects. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Install [`kubectl`](/docs/tasks/tools/install-kubectl/). {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Trade-offs @@ -159,13 +160,14 @@ kubectl create --edit -f /tmp/srv.yaml 1. The `kubectl create service` command creates the configuration for the Service and saves it to `/tmp/srv.yaml`. 1. The `kubectl create --edit` command opens the configuration file for editing before it creates the object. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Managing Kubernetes Objects Using Object Configuration (Imperative)](/docs/tasks/manage-kubernetes-objects/imperative-config/) * [Managing Kubernetes Objects Using Object Configuration (Declarative)](/docs/tasks/manage-kubernetes-objects/declarative-config/) * [Kubectl Command Reference](/docs/reference/generated/kubectl/kubectl/) * [Kubernetes API Reference](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/manage-kubernetes-objects/imperative-config.md b/content/en/docs/tasks/manage-kubernetes-objects/imperative-config.md index ec6057cd68..97b62e6f0f 100644 --- a/content/en/docs/tasks/manage-kubernetes-objects/imperative-config.md +++ b/content/en/docs/tasks/manage-kubernetes-objects/imperative-config.md @@ -1,24 +1,25 @@ --- title: Imperative Management of Kubernetes Objects Using Configuration Files -content_template: templates/task +content_type: task weight: 40 --- -{{% capture overview %}} + Kubernetes objects can be created, updated, and deleted by using the `kubectl` command-line tool along with an object configuration file written in YAML or JSON. This document explains how to define and manage objects using configuration files. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Install [`kubectl`](/docs/tasks/tools/install-kubectl/). {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Trade-offs @@ -142,13 +143,14 @@ template: controller-selector: "apps/v1/deployment/nginx" ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Managing Kubernetes Objects Using Imperative Commands](/docs/tasks/manage-kubernetes-objects/imperative-command/) * [Managing Kubernetes Objects Using Object Configuration (Declarative)](/docs/tasks/manage-kubernetes-objects/declarative-config/) * [Kubectl Command Reference](/docs/reference/generated/kubectl/kubectl/) * [Kubernetes API Reference](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md b/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md index c74374a0dc..a7d887da3b 100644 --- a/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md +++ b/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md @@ -1,10 +1,10 @@ --- title: Declarative Management of Kubernetes Objects Using Kustomize -content_template: templates/task +content_type: task weight: 20 --- -{{% capture overview %}} + [Kustomize](https://github.com/kubernetes-sigs/kustomize) is a standalone tool to customize Kubernetes objects @@ -24,17 +24,18 @@ To apply those Resources, run `kubectl apply` with `--kustomize` or `-k` flag: kubectl apply -k ``` -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Install [`kubectl`](/docs/tasks/tools/install-kubectl/). {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Overview of Kustomize @@ -824,13 +825,14 @@ deployment.apps "dev-my-nginx" deleted | configurations | []string | Each entry in this list should resolve to a file containing [Kustomize transformer configurations](https://github.com/kubernetes-sigs/kustomize/tree/master/examples/transformerconfigs) | | crds | []string | Each entry in this list should resolve to an OpenAPI definition file for Kubernetes types | -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Kustomize](https://github.com/kubernetes-sigs/kustomize) * [Kubectl Book](https://kubectl.docs.kubernetes.io) * [Kubectl Command Reference](/docs/reference/generated/kubectl/kubectl/) * [Kubernetes API Reference](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/run-application/update-api-object-kubectl-patch.md b/content/en/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch.md similarity index 97% rename from content/en/docs/tasks/run-application/update-api-object-kubectl-patch.md rename to content/en/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch.md index dcfc250df1..60d6ae8099 100644 --- a/content/en/docs/tasks/run-application/update-api-object-kubectl-patch.md +++ b/content/en/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch.md @@ -1,25 +1,26 @@ --- title: Update API Objects in Place Using kubectl patch description: Use kubectl patch to update Kubernetes API objects in place. Do a strategic merge patch or a JSON merge patch. -content_template: templates/task -weight: 40 +content_type: task +weight: 50 --- -{{% capture overview %}} + This task shows how to use `kubectl patch` to update an API object in place. The exercises in this task demonstrate a strategic merge patch and a JSON merge patch. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Use a strategic merge patch to update a Deployment @@ -330,14 +331,15 @@ create the Deployment object. Other commands for updating API objects include and [kubectl apply](/docs/reference/generated/kubectl/kubectl-commands/#apply). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Kubernetes Object Management](/docs/concepts/overview/working-with-objects/object-management/) * [Managing Kubernetes Objects Using Imperative Commands](/docs/tasks/manage-kubernetes-objects/imperative-command/) * [Imperative Management of Kubernetes Objects Using Configuration Files](/docs/tasks/manage-kubernetes-objects/imperative-config/) * [Declarative Management of Kubernetes Objects Using Configuration Files](/docs/tasks/manage-kubernetes-objects/declarative-config/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/network/validate-dual-stack.md b/content/en/docs/tasks/network/validate-dual-stack.md index 0e6d586bea..1e21af226d 100644 --- a/content/en/docs/tasks/network/validate-dual-stack.md +++ b/content/en/docs/tasks/network/validate-dual-stack.md @@ -4,14 +4,15 @@ reviewers: - khenidak min-kubernetes-server-version: v1.16 title: Validate IPv4/IPv6 dual-stack -content_template: templates/task +content_type: task --- -{{% capture overview %}} + This document shares how to validate IPv4/IPv6 dual-stack enabled Kubernetes clusters. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * Provider support for dual-stack networking (Cloud provider or otherwise must be able to provide Kubernetes nodes with routable IPv4/IPv6 network interfaces) * A [network plugin](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) that supports dual-stack (such as Kubenet or Calico) @@ -20,9 +21,9 @@ This document shares how to validate IPv4/IPv6 dual-stack enabled Kubernetes clu {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Validate addressing @@ -158,4 +159,4 @@ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S my-service ClusterIP fe80:20d::d06b 2001:db8:f100:4002::9d37:c0d7 80:31868/TCP 30s ``` -{{% /capture %}} + diff --git a/content/en/docs/tasks/run-application/configure-pdb.md b/content/en/docs/tasks/run-application/configure-pdb.md index d98538c262..8113e07128 100644 --- a/content/en/docs/tasks/run-application/configure-pdb.md +++ b/content/en/docs/tasks/run-application/configure-pdb.md @@ -1,10 +1,10 @@ --- title: Specifying a Disruption Budget for your Application -content_template: templates/task +content_type: task weight: 110 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.5" state="beta" >}} @@ -13,9 +13,10 @@ that your application experiences, allowing for higher availability while permitting the cluster administrator to manage the clusters nodes. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * You are the owner of an application running on a Kubernetes cluster that requires high availability. * You should know how to deploy [Replicated Stateless Applications](/docs/tasks/run-application/run-stateless-application-deployment/) @@ -23,9 +24,9 @@ nodes. * You should have read about [Pod Disruptions](/docs/concepts/workloads/pods/disruptions/). * You should confirm with your cluster owner or service provider that they respect Pod Disruption Budgets. -{{% /capture %}} -{{% capture steps %}} + + ## Protecting an Application with a PodDisruptionBudget @@ -34,9 +35,9 @@ nodes. 1. Create a PDB definition as a YAML file. 1. Create the PDB object from the YAML file. -{{% /capture %}} -{{% capture discussion %}} + + ## Identify an Application to Protect @@ -51,7 +52,7 @@ specified by one of the built-in Kubernetes controllers: In this case, make a note of the controller's `.spec.selector`; the same selector goes into the PDBs `.spec.selector`. -From version 1.15 PDBs support custom controllers where the [scale subresource](/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#scale-subresource) is enabled. +From version 1.15 PDBs support custom controllers where the [scale subresource](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#scale-subresource) is enabled. You can also use PDBs with pods which are not controlled by one of the above controllers, or arbitrary groups of pods, but there are some restrictions, @@ -238,6 +239,6 @@ You can use a selector which selects a subset or superset of the pods belonging controller. However, when there are multiple PDBs in a namespace, you must be careful not to create PDBs whose selectors overlap. -{{% /capture %}} + diff --git a/content/en/docs/tasks/run-application/delete-stateful-set.md b/content/en/docs/tasks/run-application/delete-stateful-set.md index d37e3ba7a0..7a4a94fab4 100644 --- a/content/en/docs/tasks/run-application/delete-stateful-set.md +++ b/content/en/docs/tasks/run-application/delete-stateful-set.md @@ -6,23 +6,24 @@ reviewers: - janetkuo - smarterclayton title: Delete a StatefulSet -content_template: templates/task +content_type: task weight: 60 --- -{{% capture overview %}} + This task shows you how to delete a {{< glossary_tooltip term_id="StatefulSet" >}}. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * This task assumes you have an application running on your cluster represented by a StatefulSet. -{{% /capture %}} -{{% capture steps %}} + + ## Deleting a StatefulSet @@ -81,12 +82,13 @@ In the example above, the Pods have the label `app=myapp`; substitute your own l If you find that some pods in your StatefulSet are stuck in the 'Terminating' or 'Unknown' states for an extended period of time, you may need to manually intervene to forcefully delete the pods from the apiserver. This is a potentially dangerous task. Refer to [Force Delete StatefulSet Pods](/docs/tasks/run-application/force-delete-stateful-set-pod/) for details. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Learn more about [force deleting StatefulSet Pods](/docs/tasks/run-application/force-delete-stateful-set-pod/). -{{% /capture %}} + diff --git a/content/en/docs/tasks/run-application/force-delete-stateful-set-pod.md b/content/en/docs/tasks/run-application/force-delete-stateful-set-pod.md index b2b364f5f9..48a61a260d 100644 --- a/content/en/docs/tasks/run-application/force-delete-stateful-set-pod.md +++ b/content/en/docs/tasks/run-application/force-delete-stateful-set-pod.md @@ -5,22 +5,23 @@ reviewers: - foxish - smarterclayton title: Force Delete StatefulSet Pods -content_template: templates/task +content_type: task weight: 70 --- -{{% capture overview %}} + This page shows how to delete Pods which are part of a {{< glossary_tooltip text="stateful set" term_id="StatefulSet" >}}, and explains the considerations to keep in mind when doing so. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * This is a fairly advanced task and has the potential to violate some of the properties inherent to StatefulSet. * Before proceeding, make yourself familiar with the considerations enumerated below. -{{% /capture %}} -{{% capture steps %}} + + ## StatefulSet considerations @@ -74,10 +75,11 @@ kubectl patch pod -p '{"metadata":{"finalizers":null}}' Always perform force deletion of StatefulSet Pods carefully and with complete knowledge of the risks involved. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Learn more about [debugging a StatefulSet](/docs/tasks/debug-application-cluster/debug-stateful-set/). -{{% /capture %}} + diff --git a/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md b/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md index cab3e0af7f..7f3b046b68 100644 --- a/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md +++ b/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md @@ -5,11 +5,11 @@ reviewers: - justinsb - directxman12 title: Horizontal Pod Autoscaler Walkthrough -content_template: templates/task +content_type: task weight: 100 --- -{{% capture overview %}} + Horizontal Pod Autoscaler automatically scales the number of pods in a replication controller, deployment, replica set or stateful set based on observed CPU utilization @@ -17,11 +17,12 @@ in a replication controller, deployment, replica set or stateful set based on ob This document walks you through an example of enabling Horizontal Pod Autoscaler for the php-apache server. For more information on how Horizontal Pod Autoscaler behaves, see the [Horizontal Pod Autoscaler user guide](/docs/tasks/run-application/horizontal-pod-autoscale/). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + This example requires a running Kubernetes cluster and kubectl, version 1.2 or later. [metrics-server](https://github.com/kubernetes-incubator/metrics-server/) monitoring needs to be deployed in the cluster @@ -35,9 +36,9 @@ not related to any Kubernetes object you must have a Kubernetes cluster at versi you must be able to communicate with the API server that provides the external metrics API. See the [Horizontal Pod Autoscaler user guide](/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics) for more details. -{{% /capture %}} -{{% capture steps %}} + + ## Run & expose php-apache server @@ -181,9 +182,9 @@ Here CPU utilization dropped to 0, and so HPA autoscaled the number of replicas Autoscaling the replicas may take a few minutes. {{< /note >}} -{{% /capture %}} -{{% capture discussion %}} + + ## Autoscaling on multiple metrics and custom metrics @@ -483,4 +484,4 @@ kubectl create -f https://k8s.io/examples/application/hpa/php-apache.yaml horizontalpodautoscaler.autoscaling/php-apache created ``` -{{% /capture %}} + diff --git a/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md b/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md index f6852845ec..6dc61f8d4f 100644 --- a/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md +++ b/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md @@ -9,11 +9,11 @@ feature: description: > Scale your application up and down with a simple command, with a UI, or automatically based on CPU usage. -content_template: templates/concept +content_type: concept weight: 90 --- -{{% capture overview %}} + The Horizontal Pod Autoscaler automatically scales the number of pods in a replication controller, deployment, replica set or stateful set based on observed CPU utilization (or, with @@ -26,10 +26,10 @@ The resource determines the behavior of the controller. The controller periodically adjusts the number of replicas in a replication controller or deployment to match the observed average CPU utilization to the target specified by user. -{{% /capture %}} -{{% capture body %}} + + ## How does the Horizontal Pod Autoscaler work? @@ -260,7 +260,7 @@ See [Support for metrics APIs](#support-for-metrics-apis) for the requirements. By default, the HorizontalPodAutoscaler controller retrieves metrics from a series of APIs. In order for it to access these APIs, cluster administrators must ensure that: -* The [API aggregation layer](/docs/tasks/access-kubernetes-api/configure-aggregation-layer/) is enabled. +* The [API aggregation layer](/docs/tasks/extend-kubernetes/configure-aggregation-layer/) is enabled. * The corresponding APIs are registered: @@ -376,7 +376,7 @@ For scaling down the stabilization window is _300_ seconds(or the value of the for scaling down which allows a 100% of the currently running replicas to be removed which means the scaling target can be scaled down to the minimum allowed replicas. For scaling up there is no stabilization window. When the metrics indicate that the target should be -scaled up the target is scaled up immediately. There are 2 policies which. 4 pods or a 100% of the currently +scaled up the target is scaled up immediately. There are 2 policies where 4 pods or a 100% of the currently running replicas will be added every 15 seconds till the HPA reaches its steady state. ### Example: change downscale stabilization window @@ -431,12 +431,13 @@ behavior: selectPolicy: Disabled ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Design documentation: [Horizontal Pod Autoscaling](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md). * kubectl autoscale command: [kubectl autoscale](/docs/reference/generated/kubectl/kubectl-commands/#autoscale). * Usage example of [Horizontal Pod Autoscaler](/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). -{{% /capture %}} + diff --git a/content/en/docs/tasks/run-application/run-replicated-stateful-application.md b/content/en/docs/tasks/run-application/run-replicated-stateful-application.md index 7a85a74014..2a7d255c2b 100644 --- a/content/en/docs/tasks/run-application/run-replicated-stateful-application.md +++ b/content/en/docs/tasks/run-application/run-replicated-stateful-application.md @@ -7,11 +7,11 @@ reviewers: - kow3ns - smarterclayton title: Run a Replicated Stateful Application -content_template: templates/tutorial +content_type: tutorial weight: 30 --- -{{% capture overview %}} + This page shows how to run a replicated stateful application using a [StatefulSet](/docs/concepts/workloads/controllers/statefulset/) controller. @@ -23,9 +23,10 @@ asynchronous replication. on general patterns for running stateful applications in Kubernetes. {{< /note >}} -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * {{< include "default-storage-class-prereqs.md" >}} @@ -38,18 +39,19 @@ on general patterns for running stateful applications in Kubernetes. * Some familiarity with MySQL helps, but this tutorial aims to present general patterns that should be useful for other systems. -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Deploy a replicated MySQL topology with a StatefulSet controller. * Send MySQL client traffic. * Observe resistance to downtime. * Scale the StatefulSet up and down. -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Deploy MySQL @@ -479,9 +481,10 @@ kubectl delete pvc data-mysql-3 kubectl delete pvc data-mysql-4 ``` -{{% /capture %}} -{{% capture cleanup %}} + +## {{% heading "cleanup" %}} + 1. Cancel the `SELECT @@server_id` loop by pressing **Ctrl+C** in its terminal, or running the following from another terminal: @@ -522,9 +525,10 @@ kubectl delete pvc data-mysql-4 Some dynamic provisioners (such as those for EBS and PD) also release the underlying resources upon deleting the PersistentVolumes. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [scaling a StatefulSet](/docs/tasks/run-application/scale-stateful-set/). * Learn more about [debugging a StatefulSet](/docs/tasks/debug-application-cluster/debug-stateful-set/). * Learn more about [deleting a StatefulSet](/docs/tasks/run-application/delete-stateful-set/). @@ -532,7 +536,7 @@ kubectl delete pvc data-mysql-4 * Look in the [Helm Charts repository](https://github.com/kubernetes/charts) for other stateful application examples. -{{% /capture %}} + diff --git a/content/en/docs/tasks/run-application/run-single-instance-stateful-application.md b/content/en/docs/tasks/run-application/run-single-instance-stateful-application.md index 777265c68b..4c43948a21 100644 --- a/content/en/docs/tasks/run-application/run-single-instance-stateful-application.md +++ b/content/en/docs/tasks/run-application/run-single-instance-stateful-application.md @@ -1,37 +1,39 @@ --- title: Run a Single-Instance Stateful Application -content_template: templates/tutorial +content_type: tutorial weight: 20 --- -{{% capture overview %}} + This page shows you how to run a single-instance stateful application in Kubernetes using a PersistentVolume and a Deployment. The application is MySQL. -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Create a PersistentVolume referencing a disk in your environment. * Create a MySQL Deployment. * Expose MySQL to other pods in the cluster at a known DNS name. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * {{< include "default-storage-class-prereqs.md" >}} -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Deploy MySQL @@ -180,10 +182,11 @@ PersistentVolume when it sees that you deleted the PersistentVolumeClaim. Some dynamic provisioners (such as those for EBS and PD) also release the underlying resource upon deleting the PersistentVolume. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [Deployment objects](/docs/concepts/workloads/controllers/deployment/). @@ -193,6 +196,6 @@ underlying resource upon deleting the PersistentVolume. * [Volumes](/docs/concepts/storage/volumes/) and [Persistent Volumes](/docs/concepts/storage/persistent-volumes/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/run-application/run-stateless-application-deployment.md b/content/en/docs/tasks/run-application/run-stateless-application-deployment.md index 68d41b5a83..9e6ed4a25e 100644 --- a/content/en/docs/tasks/run-application/run-stateless-application-deployment.md +++ b/content/en/docs/tasks/run-application/run-stateless-application-deployment.md @@ -1,34 +1,36 @@ --- title: Run a Stateless Application Using a Deployment min-kubernetes-server-version: v1.9 -content_template: templates/tutorial +content_type: tutorial weight: 10 --- -{{% capture overview %}} + This page shows how to run an application using a Kubernetes Deployment object. -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Create an nginx deployment. * Use kubectl to list information about the deployment. * Update the deployment. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Creating and exploring an nginx deployment @@ -146,13 +148,14 @@ which in turn uses a ReplicaSet. Before the Deployment and ReplicaSet were added to Kubernetes, replicated applications were configured using a [ReplicationController](/docs/concepts/workloads/controllers/replicationcontroller/). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [Deployment objects](/docs/concepts/workloads/controllers/deployment/). -{{% /capture %}} + diff --git a/content/en/docs/tasks/run-application/scale-stateful-set.md b/content/en/docs/tasks/run-application/scale-stateful-set.md index 462025836d..6e34babf9d 100644 --- a/content/en/docs/tasks/run-application/scale-stateful-set.md +++ b/content/en/docs/tasks/run-application/scale-stateful-set.md @@ -8,15 +8,16 @@ reviewers: - kow3ns - smarterclayton title: Scale a StatefulSet -content_template: templates/task +content_type: task weight: 50 --- -{{% capture overview %}} + This task shows how to scale a StatefulSet. Scaling a StatefulSet refers to increasing or decreasing the number of replicas. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * StatefulSets are only available in Kubernetes version 1.5 or later. To check your version of Kubernetes, run `kubectl version`. @@ -26,9 +27,9 @@ This task shows how to scale a StatefulSet. Scaling a StatefulSet refers to incr * You should perform scaling only when you are confident that your stateful application cluster is completely healthy. -{{% /capture %}} -{{% capture steps %}} + + ## Scaling StatefulSets @@ -90,10 +91,11 @@ to reason about scaling operations at the application level in these cases, and perform scaling only when you are sure that your stateful application cluster is completely healthy. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [deleting a StatefulSet](/docs/tasks/run-application/delete-stateful-set/). -{{% /capture %}} + diff --git a/content/en/docs/tasks/service-catalog/install-service-catalog-using-helm.md b/content/en/docs/tasks/service-catalog/install-service-catalog-using-helm.md index 73268ff714..499bc1fa90 100644 --- a/content/en/docs/tasks/service-catalog/install-service-catalog-using-helm.md +++ b/content/en/docs/tasks/service-catalog/install-service-catalog-using-helm.md @@ -2,18 +2,19 @@ title: Install Service Catalog using Helm reviewers: - chenopis -content_template: templates/task +content_type: task --- -{{% capture overview %}} + {{< glossary_definition term_id="service-catalog" length="all" prepend="Service Catalog is" >}} Use [Helm](https://helm.sh/) to install Service Catalog on your Kubernetes cluster. Up to date information on this process can be found at the [kubernetes-sigs/service-catalog](https://github.com/kubernetes-sigs/service-catalog/blob/master/docs/install.md) repo. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * Understand the key concepts of [Service Catalog](/docs/concepts/service-catalog/). * Service Catalog requires a Kubernetes cluster running version 1.7 or higher. * You must have a Kubernetes cluster with cluster DNS enabled. @@ -24,10 +25,10 @@ Use [Helm](https://helm.sh/) to install Service Catalog on your Kubernetes clust * Follow the [Helm install instructions](https://helm.sh/docs/intro/install/). * If you already have an appropriate version of Helm installed, execute `helm init` to install Tiller, the server-side component of Helm. -{{% /capture %}} -{{% capture steps %}} + + ## Add the service-catalog Helm repository Once Helm is installed, add the *service-catalog* Helm repository to your local machine by executing the following command: @@ -105,11 +106,12 @@ helm install svc-cat/catalog --name catalog --namespace catalog ``` {{% /tab %}} {{< /tabs >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * View [sample service brokers](https://github.com/openservicebrokerapi/servicebroker/blob/master/gettingStarted.md#sample-service-brokers). * Explore the [kubernetes-sigs/service-catalog](https://github.com/kubernetes-sigs/service-catalog) project. -{{% /capture %}} + diff --git a/content/en/docs/tasks/service-catalog/install-service-catalog-using-sc.md b/content/en/docs/tasks/service-catalog/install-service-catalog-using-sc.md index 2a50ca2ff8..a45474e297 100644 --- a/content/en/docs/tasks/service-catalog/install-service-catalog-using-sc.md +++ b/content/en/docs/tasks/service-catalog/install-service-catalog-using-sc.md @@ -2,10 +2,10 @@ title: Install Service Catalog using SC reviewers: - chenopis -content_template: templates/task +content_type: task --- -{{% capture overview %}} + {{< glossary_definition term_id="service-catalog" length="all" prepend="Service Catalog is" >}} You can use the GCP [Service Catalog Installer](https://github.com/GoogleCloudPlatform/k8s-service-catalog#installation) @@ -14,10 +14,11 @@ Google Cloud projects. Service Catalog itself can work with any kind of managed service, not just Google Cloud. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * Understand the key concepts of [Service Catalog](/docs/concepts/service-catalog/). * Install [Go 1.6+](https://golang.org/dl/) and set the `GOPATH`. * Install the [cfssl](https://github.com/cloudflare/cfssl) tool needed for generating SSL artifacts. @@ -27,10 +28,10 @@ Service Catalog itself can work with any kind of managed service, not just Googl kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user= -{{% /capture %}} -{{% capture steps %}} + + ## Install `sc` in your local environment The installer runs on your local computer as a CLI tool named `sc`. @@ -71,11 +72,12 @@ If you would like to uninstall Service Catalog from your Kubernetes cluster usin sc uninstall ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * View [sample service brokers](https://github.com/openservicebrokerapi/servicebroker/blob/master/gettingStarted.md#sample-service-brokers). * Explore the [kubernetes-incubator/service-catalog](https://github.com/kubernetes-incubator/service-catalog) project. -{{% /capture %}} + diff --git a/content/en/docs/tasks/setup-konnectivity/_index.md b/content/en/docs/tasks/setup-konnectivity/_index.md deleted file mode 100755 index 09f254eba0..0000000000 --- a/content/en/docs/tasks/setup-konnectivity/_index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: "Setup Konnectivity Service" -weight: 20 ---- - diff --git a/content/en/docs/tasks/tls/certificate-rotation.md b/content/en/docs/tasks/tls/certificate-rotation.md index 3cf55db335..890621c19f 100644 --- a/content/en/docs/tasks/tls/certificate-rotation.md +++ b/content/en/docs/tasks/tls/certificate-rotation.md @@ -2,23 +2,24 @@ reviewers: - jcbsmpsn - mikedanese -title: Certificate Rotation -content_template: templates/task +title: Configure Certificate Rotation for the Kubelet +content_type: task --- -{{% capture overview %}} + This page shows how to enable and configure certificate rotation for the kubelet. -{{% /capture %}} + {{< feature-state for_k8s_version="v1.8" state="beta" >}} -{{% capture prerequisites %}} +## {{% heading "prerequisites" %}} + * Kubernetes version 1.8.0 or later is required -{{% /capture %}} -{{% capture steps %}} + + ## Overview @@ -77,6 +78,6 @@ kubelet will retrieve the new signed certificate from the Kubernetes API and write that to disk. Then it will update the connections it has to the Kubernetes API to reconnect using the new certificate. -{{% /capture %}} + diff --git a/content/en/docs/tasks/tls/managing-tls-in-a-cluster.md b/content/en/docs/tasks/tls/managing-tls-in-a-cluster.md index 7cd4cc8be5..5098d353d8 100644 --- a/content/en/docs/tasks/tls/managing-tls-in-a-cluster.md +++ b/content/en/docs/tasks/tls/managing-tls-in-a-cluster.md @@ -1,13 +1,13 @@ --- title: Manage TLS Certificates in a Cluster -content_template: templates/task +content_type: task reviewers: - mikedanese - beacham - liggit --- -{{% capture overview %}} + Kubernetes provides a `certificates.k8s.io` API, which lets you provision TLS certificates signed by a Certificate Authority (CA) that you control. These CA @@ -23,16 +23,17 @@ CA for this purpose, but you should never rely on this. Do not assume that these certificates will validate against the cluster root CA. {{< /note >}} -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Trusting TLS in a Cluster @@ -222,4 +223,4 @@ enable it, pass the `--cluster-signing-cert-file` and `--cluster-signing-key-file` parameters to the controller manager with paths to your Certificate Authority's keypair. -{{% /capture %}} + diff --git a/content/en/docs/tasks/tls/manual-rotation-of-ca-certificates.md b/content/en/docs/tasks/tls/manual-rotation-of-ca-certificates.md new file mode 100644 index 0000000000..4146608760 --- /dev/null +++ b/content/en/docs/tasks/tls/manual-rotation-of-ca-certificates.md @@ -0,0 +1,144 @@ +--- +title: Manual Rotation of CA Certificates +min-kubernetes-server-version: v1.13 +content_template: templates/task +--- + + + +This page shows how to manually rotate the certificate authority (CA) certificates. + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + +- For more information about authentication in Kubernetes, see [Authenticating](/docs/reference/access-authn-authz/authentication). +- For more information about best practices for CA certificates, see [Single root CA](docs/setup/best-practices/certificates/#single-root-ca). + + + +## Rotate the CA certificates manually + +{{< caution >}} + +Make sure to back up your certificate directory along with configuration files and any other necessary files. + +This approach assumes operation of the Kubernetes control plane in a HA configuration with multiple API servers. Graceful termination of the API server is also assumed so clients can cleanly disconnect from one API server and reconnect to another. + +Configurations with a single API server will experience unavailability while the API server is being restarted. + +{{< /caution >}} + +1. Distribute the new CA certificates and private keys (ex: `ca.crt`, `ca.key`, `front-proxy-ca.crt`, and `front-proxy-ca.key`) to all your control plane nodes in the Kubernetes certificates directory. + +1. Update *Kubernetes controller manager's* `--root-ca-file` to include both old and new CA and restart controller manager. + + Any service account created after this point will get secrets that include both old and new CAs. + + {{< note >}} + + Remove the flag `--client-ca-file` from the *Kubernetes controller manager* configuration. You can also replace the existing client CA file or change this configuration item to reference a new, updated CA. [Issue 1350](https://github.com/kubernetes/kubeadm/issues/1350) tracks an issue with *Kubernetes controller manager* being unable to accept a CA bundle. + + {{< /note >}} + +1. Update all service account tokens to include both old and new CA certificates. + + If any pods are started before new CA is used by API servers, they will get this update and trust both old and new CAs. + + ```shell + base64_encoded_ca="$(base64 )" + + for namespace in $(kubectl get ns --no-headers | awk '{print $1}'); do + for token in $(kubectl get secrets --namespace "$namespace" --field-selector type=kubernetes.io/service-account-token -o name); do + kubectl get $token --namespace "$namespace" -o yaml | \ + /bin/sed "s/\(ca.crt:\).*/\1 ${base64_encoded_ca}" | \ + kubectl apply -f - + done + done + ``` + +1. Restart all pods using in-cluster configs (ex: kube-proxy, coredns, etc) so they can use the updated certificate authority data from *ServiceAccount* secrets. + + * Make sure coredns, kube-proxy and other pods using in-cluster configs are working as expected. + +1. Append the both old and new CA to the file against `--client-ca-file` and `--kubelet-certificate-authority` flag in the `kube-apiserver` configuration. + +1. Append the both old and new CA to the file against `--client-ca-file` flag in the `kube-scheduler` configuration. + +1. Update certificates for user accounts by replacing the content of `client-certificate-data` and `client-key-data` respectively. + + For information about creating certificates for individual user accounts, see [Configure certificates for user accounts](/docs/setup/best-practices/certificates/#configure-certificates-for-user-accounts). + + Additionally, update the `certificate-authority-data` section in the kubeconfig files, respectively with Base64-encoded old and new certificate authority data + +1. Follow below steps in a rolling fashion. + + 1. Restart any other *[aggregated api servers](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/)* or *webhook handlers* to trust the new CA certificates. + + 1. Restart the kubelet by update the file against `clientCAFile` in kubelet configuration and `certificate-authority-data` in kubelet.conf to use both the old and new CA on all nodes. + + If your kubelet is not using client certificate rotation update `client-certificate-data` and `client-key-data` in kubelet.conf on all nodes along with the kubelet client certificate file usually found in `/var/lib/kubelet/pki`. + + + 1. Restart API servers with the certificates (`apiserver.crt`, `apiserver-kubelet-client.crt` and `front-proxy-client.crt`) signed by new CA. You can use the existing private keys or new private keys. If you changed the private keys then update these in the Kubernetes certificates directory as well. + + Since the pod trusts both old and new CAs, there will be a momentarily disconnection after which the pod's kube client will reconnect to the new API server that uses the certificate signed by the new CA. + + * Restart Scheduler to use the new CAs. + + * Make sure control plane components logs no TLS errors. + + {{< note >}} + + To generate certificates and private keys for your cluster using the `openssl` command line tool, see [Certificates (`openssl`)](/docs/concepts/cluster-administration/certificates/#openssl). + You can also use [`cfssl`](/docs/concepts/cluster-administration/certificates/#cfssl). + + {{< /note >}} + + 1. Annotate any Daemonsets and Deployments to trigger pod replacement in a safer rolling fashion. + + Example: + + ```shell + for namespace in $(kubectl get namespace -o jsonpath='{.items[*].metadata.name}'); do + for name in $(kubectl get deployments -n $namespace -o jsonpath='{.items[*].metadata.name}'); do + kubectl patch deployment -n ${namespace} ${name} -p '{"spec":{"template":{"metadata":{"annotations":{"ca-rotation": "1"}}}}}'; + done + for name in $(kubectl get daemonset -n $namespace -o jsonpath='{.items[*].metadata.name}'); do + kubectl patch daemonset -n ${namespace} ${name} -p '{"spec":{"template":{"metadata":{"annotations":{"ca-rotation": "1"}}}}}'; + done + done + ``` + + {{< note >}} + + To limit the number of concurrent disruptions that your application experiences, see [configure pod disruption budget](docs/tasks/run-application/configure-pdb/). + + {{< /note >}} + +1. If your cluster is using bootstrap tokens to join nodes, update the ConfigMap `cluster-info` in the `kube-public` namespace with new CA. + + ```shell + base64_encoded_ca="$(base64 /etc/kubernetes/pki/ca.crt)" + + kubectl get cm/cluster-info --namespace kube-public -o yaml | \ + /bin/sed "s/\(certificate-authority-data:\).*/\1 ${base64_encoded_ca}" | \ + kubectl apply -f - + ``` + +1. Verify the cluster functionality. + + 1. Validate the logs from control plane components, along with the kubelet and the kube-proxy are not throwing any tls errors, see [looking at the logs](/docs/tasks/debug-application-cluster/debug-cluster/#looking-at-logs). + + 1. Validate logs from any aggregated api servers and pods using in-cluster config. + +1. Once the cluster functionality is successfully verified: + + 1. Update all service account tokens to include new CA certificate only. + + * All pods using an in-cluster kubeconfig will eventually need to be restarted to pick up the new SA secret for the old CA to be completely untrusted. + + 1. Restart the control plane components by removing the old CA from the kubeconfig files and the files against `--client-ca-file`, `--root-ca-file` flags resp. + + 1. Restart kubelet by removing the old CA from file against the `clientCAFile` flag and kubelet kubeconfig file. diff --git a/content/en/docs/tasks/tools/install-kubectl.md b/content/en/docs/tasks/tools/install-kubectl.md index 131362109a..6dcad6b39c 100644 --- a/content/en/docs/tasks/tools/install-kubectl.md +++ b/content/en/docs/tasks/tools/install-kubectl.md @@ -2,7 +2,7 @@ reviewers: - mikedanese title: Install and Set Up kubectl -content_template: templates/task +content_type: task weight: 10 card: name: tasks @@ -10,15 +10,16 @@ card: title: Install kubectl --- -{{% capture overview %}} + The Kubernetes command-line tool, [kubectl](/docs/user-guide/kubectl/), allows you to run commands against Kubernetes clusters. You can use kubectl to deploy applications, inspect and manage cluster resources, and view logs. For a complete list of kubectl operations, see [Overview of kubectl](/docs/reference/kubectl/overview/). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + You must use a kubectl version that is within one minor version difference of your cluster. For example, a v1.2 client should work with v1.1, v1.2, and v1.3 master. Using the latest version of kubectl helps avoid unforeseen issues. -{{% /capture %}} -{{% capture steps %}} + + ## Install kubectl on Linux @@ -508,12 +509,13 @@ compinit {{< /tabs >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Install Minikube](/docs/tasks/tools/install-minikube/) * See the [getting started guides](/docs/setup/) for more about creating clusters. * [Learn how to launch and expose your application.](/docs/tasks/access-application-cluster/service-access-application-cluster/) * If you need access to a cluster you didn't create, see the [Sharing Cluster Access document](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/). * Read the [kubectl reference docs](/docs/reference/kubectl/kubectl/) -{{% /capture %}} + diff --git a/content/en/docs/tasks/tools/install-minikube.md b/content/en/docs/tasks/tools/install-minikube.md index 50e4436dec..f1f3788141 100644 --- a/content/en/docs/tasks/tools/install-minikube.md +++ b/content/en/docs/tasks/tools/install-minikube.md @@ -1,19 +1,20 @@ --- title: Install Minikube -content_template: templates/task +content_type: task weight: 20 card: name: tasks weight: 10 --- -{{% capture overview %}} + This page shows you how to install [Minikube](/docs/tutorials/hello-minikube), a tool that runs a single-node Kubernetes cluster in a virtual machine on your personal computer. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< tabs name="minikube_before_you_begin" >}} {{% tab name="Linux" %}} @@ -53,11 +54,11 @@ Hyper-V Requirements: A hypervisor has been detected. Features required for {{% /tab %}} {{< /tabs >}} -{{% /capture %}} -{{% capture steps %}} -# Installing minikube + + +## Installing minikube {{< tabs name="tab_with_md" >}} {{% tab name="Linux" %}} @@ -199,15 +200,6 @@ To install Minikube manually on Windows, download [`minikube-windows-amd64`](htt {{% /tab %}} {{< /tabs >}} - -{{% /capture %}} - -{{% capture whatsnext %}} - -* [Running Kubernetes Locally via Minikube](/docs/setup/learning-environment/minikube/) - -{{% /capture %}} - ## Confirm Installation To confirm successful installation of both a hypervisor and Minikube, you can run the following command to start up a local Kubernetes cluster: @@ -218,6 +210,10 @@ For setting the `--driver` with `minikube start`, enter the name of the hypervis {{< /note >}} +{{< caution >}} +When using KVM, note that libvirt's default QEMU URI under Debian and some other systems is `qemu:///session` whereas Minikube's default QEMU URI is `qemu:///system`. If this is the case for your system, you will need to pass `--kvm-qemu-uri qemu:///session` to `minikube start`. +{{< /caution >}} + ```shell minikube start --driver= ``` @@ -259,3 +255,8 @@ then you need to clear minikube's local state: ```shell minikube delete ``` + +## {{% heading "whatsnext" %}} + + +* [Running Kubernetes Locally via Minikube](/docs/setup/learning-environment/minikube/) diff --git a/content/en/docs/tutorials/_index.md b/content/en/docs/tutorials/_index.md index 9f8de2129e..5551e5a8ea 100644 --- a/content/en/docs/tutorials/_index.md +++ b/content/en/docs/tutorials/_index.md @@ -2,10 +2,10 @@ title: Tutorials main_menu: true weight: 60 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + This section of the Kubernetes documentation contains tutorials. A tutorial shows how to accomplish a goal that is larger than a single @@ -14,9 +14,9 @@ each of which has a sequence of steps. Before walking through each tutorial, you may want to bookmark the [Standardized Glossary](/docs/reference/glossary/) page for later references. -{{% /capture %}} -{{% capture body %}} + + ## Basics @@ -64,12 +64,13 @@ Before walking through each tutorial, you may want to bookmark the * [Using Source IP](/docs/tutorials/services/source-ip/) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + If you would like to write a tutorial, see -[Using Page Templates](/docs/home/contribute/page-templates/) -for information about the tutorial page type and the tutorial template. +[Content Page Types](/docs/home/contribute/style/page-content-types/) +for information about the tutorial page type. + -{{% /capture %}} diff --git a/content/en/docs/tutorials/clusters/apparmor.md b/content/en/docs/tutorials/clusters/apparmor.md index ae1de98ab2..ddad3d54c2 100644 --- a/content/en/docs/tutorials/clusters/apparmor.md +++ b/content/en/docs/tutorials/clusters/apparmor.md @@ -2,10 +2,10 @@ reviewers: - stclair title: AppArmor -content_template: templates/tutorial +content_type: tutorial --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.4" state="beta" >}} @@ -13,7 +13,7 @@ content_template: templates/tutorial AppArmor is a Linux kernel security module that supplements the standard Linux user and group based permissions to confine programs to a limited set of resources. AppArmor can be configured for any application to reduce its potential attack surface and provide greater in-depth defense. It is -configured through profiles tuned to whitelist the access needed by a specific program or container, +configured through profiles tuned to allow the access needed by a specific program or container, such as Linux capabilities, network access, file permissions, etc. Each profile can be run in either *enforcing* mode, which blocks access to disallowed resources, or *complain* mode, which only reports violations. @@ -24,9 +24,10 @@ that AppArmor is not a silver bullet and can only do so much to protect against application code. It is important to provide good, restrictive profiles, and harden your applications and cluster from other angles as well. -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * See an example of how to load a profile on a node * Learn how to enforce the profile on a Pod @@ -34,9 +35,10 @@ applications and cluster from other angles as well. * See what happens when a profile is violated * See what happens when a profile cannot be loaded -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Make sure: @@ -111,9 +113,9 @@ gke-test-default-pool-239f5d02-x1kf: kubelet is posting ready status. AppArmor e gke-test-default-pool-239f5d02-xwux: kubelet is posting ready status. AppArmor enabled ``` -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Securing a Pod @@ -458,13 +460,14 @@ Specifying the list of profiles Pod containers is allowed to specify: - Although an escaped comma is a legal character in a profile name, it cannot be explicitly allowed here. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Additional resources: * [Quick guide to the AppArmor profile language](https://gitlab.com/apparmor/apparmor/wikis/QuickProfileLanguage) * [AppArmor core policy reference](https://gitlab.com/apparmor/apparmor/wikis/Policy_Layout) -{{% /capture %}} + diff --git a/content/en/docs/tutorials/configuration/configure-redis-using-configmap.md b/content/en/docs/tutorials/configuration/configure-redis-using-configmap.md index 7ae7fb087b..37f6f9e014 100644 --- a/content/en/docs/tutorials/configuration/configure-redis-using-configmap.md +++ b/content/en/docs/tutorials/configuration/configure-redis-using-configmap.md @@ -3,16 +3,17 @@ reviewers: - eparis - pmorie title: Configuring Redis using a ConfigMap -content_template: templates/tutorial +content_type: tutorial --- -{{% capture overview %}} + This page provides a real world example of how to configure Redis using a ConfigMap and builds upon the [Configure Containers Using a ConfigMap](/docs/tasks/configure-pod-container/configure-pod-configmap/) task. -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Create a `kustomization.yaml` file containing: * a ConfigMap generator @@ -20,18 +21,19 @@ This page provides a real world example of how to configure Redis using a Config * Apply the directory by running `kubectl apply -k ./` * Verify that the configuration was correctly applied. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * The example shown on this page works with `kubectl` 1.14 and above. * Understand [Configure Containers Using a ConfigMap](/docs/tasks/configure-pod-container/configure-pod-configmap/). -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Real World Example: Configuring Redis using a ConfigMap @@ -105,12 +107,13 @@ Delete the created pod: kubectl delete pod redis ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [ConfigMaps](/docs/tasks/configure-pod-container/configure-pod-configmap/). -{{% /capture %}} + diff --git a/content/en/docs/tutorials/hello-minikube.md b/content/en/docs/tutorials/hello-minikube.md index de6875b582..9ba2de1abf 100644 --- a/content/en/docs/tutorials/hello-minikube.md +++ b/content/en/docs/tutorials/hello-minikube.md @@ -1,6 +1,6 @@ --- title: Hello Minikube -content_template: templates/tutorial +content_type: tutorial weight: 5 menu: main: @@ -13,7 +13,7 @@ card: weight: 10 --- -{{% capture overview %}} + This tutorial shows you how to run a sample app on Kubernetes using [Minikube](/docs/setup/learning-environment/minikube) and Katacoda. @@ -23,23 +23,25 @@ Katacoda provides a free, in-browser Kubernetes environment. You can also follow this tutorial if you've installed [Minikube locally](/docs/tasks/tools/install-minikube/). {{< /note >}} -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Deploy a sample application to Minikube. * Run the app. * View application logs. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + This tutorial provides a container image that uses NGINX to echo back all the requests. -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Create a Minikube cluster @@ -272,12 +274,13 @@ Optionally, delete the Minikube VM: minikube delete ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [Deployment objects](/docs/concepts/workloads/controllers/deployment/). * Learn more about [Deploying applications](/docs/tasks/run-application/run-stateless-application-deployment/). * Learn more about [Service objects](/docs/concepts/services-networking/service/). -{{% /capture %}} + diff --git a/content/en/docs/tutorials/services/source-ip.md b/content/en/docs/tutorials/services/source-ip.md index ca3a2bb409..03a9bb097c 100644 --- a/content/en/docs/tutorials/services/source-ip.md +++ b/content/en/docs/tutorials/services/source-ip.md @@ -1,19 +1,20 @@ --- title: Using Source IP -content_template: templates/tutorial +content_type: tutorial min-kubernetes-server-version: v1.5 --- -{{% capture overview %}} + Applications running in a Kubernetes cluster find and communicate with each other, and the outside world, through the Service abstraction. This document explains what happens to the source IP of packets sent to different types of Services, and how you can toggle this behavior according to your needs. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + ### Terminology @@ -54,18 +55,19 @@ The output is: deployment.apps/source-ip-app created ``` -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Expose a simple application through various types of Services * Understand how each Service type handles source IP NAT * Understand the tradeoffs involved in preserving source IP -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Source IP for Services with `Type=ClusterIP` @@ -423,9 +425,10 @@ Load balancers in the second category can leverage the feature described above by creating an HTTP health check pointing at the port stored in the `service.spec.healthCheckNodePort` field on the Service. -{{% /capture %}} -{{% capture cleanup %}} + +## {{% heading "cleanup" %}} + Delete the Services: @@ -439,10 +442,11 @@ Delete the Deployment, ReplicaSet and Pod: kubectl delete deployment source-ip-app ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [connecting applications via services](/docs/concepts/services-networking/connect-applications-service/) * Read how to [Create an External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) -{{% /capture %}} + diff --git a/content/en/docs/tutorials/stateful-application/basic-stateful-set.md b/content/en/docs/tutorials/stateful-application/basic-stateful-set.md index e8f3156694..235de6cfaa 100644 --- a/content/en/docs/tutorials/stateful-application/basic-stateful-set.md +++ b/content/en/docs/tutorials/stateful-application/basic-stateful-set.md @@ -7,17 +7,18 @@ reviewers: - kow3ns - smarterclayton title: StatefulSet Basics -content_template: templates/tutorial +content_type: tutorial weight: 10 --- -{{% capture overview %}} + This tutorial provides an introduction to managing applications with [StatefulSets](/docs/concepts/workloads/controllers/statefulset/). It demonstrates how to create, delete, scale, and update the Pods of StatefulSets. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Before you begin this tutorial, you should familiarize yourself with the following Kubernetes concepts. @@ -33,9 +34,10 @@ This tutorial assumes that your cluster is configured to dynamically provision PersistentVolumes. If your cluster is not configured to do so, you will have to manually provision two 1 GiB volumes prior to starting this tutorial. -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + StatefulSets are intended to be used with stateful applications and distributed systems. However, the administration of stateful applications and distributed systems on Kubernetes is a broad, complex topic. In order to @@ -49,9 +51,9 @@ After this tutorial, you will be familiar with the following. * How to delete a StatefulSet * How to scale a StatefulSet * How to update a StatefulSet's Pods -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Creating a StatefulSet Begin by creating a StatefulSet using the example below. It is similar to the @@ -1035,13 +1037,14 @@ Service. ```shell kubectl delete svc nginx ``` -{{% /capture %}} -{{% capture cleanup %}} + +## {{% heading "cleanup" %}} + You will need to delete the persistent storage media for the PersistentVolumes used in this tutorial. Follow the necessary steps, based on your environment, storage configuration, and provisioning method, to ensure that all storage is reclaimed. -{{% /capture %}} + diff --git a/content/en/docs/tutorials/stateful-application/cassandra.md b/content/en/docs/tutorials/stateful-application/cassandra.md index f55a852abb..3fa56b26ea 100644 --- a/content/en/docs/tutorials/stateful-application/cassandra.md +++ b/content/en/docs/tutorials/stateful-application/cassandra.md @@ -2,11 +2,11 @@ title: "Example: Deploying Cassandra with a StatefulSet" reviewers: - ahmetb -content_template: templates/tutorial +content_type: tutorial weight: 30 --- -{{% capture overview %}} + This tutorial shows you how to run [Apache Cassandra](http://cassandra.apache.org/) on Kubernetes. Cassandra, a database, needs persistent storage to provide data durability (application _state_). In this example, a custom Cassandra seed provider lets the database discover new Cassandra instances as they join the Cassandra cluster. *StatefulSets* make it easier to deploy stateful applications into your Kubernetes cluster. For more information on the features used in this tutorial, see [StatefulSet](/docs/concepts/workloads/controllers/statefulset/). @@ -23,17 +23,19 @@ nodes in the ring. This tutorial deploys a custom Cassandra seed provider that lets the database discover new Cassandra Pods as they appear inside your Kubernetes cluster. {{< /note >}} -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Create and validate a Cassandra headless {{< glossary_tooltip text="Service" term_id="service" >}}. * Use a {{< glossary_tooltip term_id="StatefulSet" >}} to create a Cassandra ring. * Validate the StatefulSet. * Modify the StatefulSet. * Delete the StatefulSet and its {{< glossary_tooltip text="Pods" term_id="pod" >}}. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} To complete this tutorial, you should already have a basic familiarity with {{< glossary_tooltip text="Pods" term_id="pod" >}}, {{< glossary_tooltip text="Services" term_id="service" >}}, and {{< glossary_tooltip text="StatefulSets" term_id="StatefulSet" >}}. @@ -48,9 +50,9 @@ minikube start --memory 5120 --cpus=4 ``` {{< /caution >}} -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Creating a headless Service for Cassandra {#creating-a-cassandra-headless-service} In Kubernetes, a {{< glossary_tooltip text="Service" term_id="service" >}} describes a set of {{< glossary_tooltip text="Pods" term_id="pod" >}} that perform the same task. @@ -219,9 +221,10 @@ Use `kubectl edit` to modify the size of a Cassandra StatefulSet. cassandra 4 4 36m ``` -{{% /capture %}} -{{% capture cleanup %}} + +## {{% heading "cleanup" %}} + Deleting or scaling a StatefulSet down does not delete the volumes associated with the StatefulSet. This setting is for your safety because your data is more valuable than automatically purging all related StatefulSet resources. {{< warning >}} @@ -261,12 +264,13 @@ By using environment variables you can change values that are inserted into `cas | `CASSANDRA_RPC_ADDRESS` | `0.0.0.0` | -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn how to [Scale a StatefulSet](/docs/tasks/run-application/scale-stateful-set/). * Learn more about the [*KubernetesSeedProvider*](https://github.com/kubernetes/examples/blob/master/cassandra/java/src/main/java/io/k8s/cassandra/KubernetesSeedProvider.java) * See more custom [Seed Provider Configurations](https://git.k8s.io/examples/cassandra/java/README.md) -{{% /capture %}} + diff --git a/content/en/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume.md b/content/en/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume.md index 0f97c2160b..eb389abf36 100644 --- a/content/en/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume.md +++ b/content/en/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume.md @@ -2,7 +2,7 @@ title: "Example: Deploying WordPress and MySQL with Persistent Volumes" reviewers: - ahmetb -content_template: templates/tutorial +content_type: tutorial weight: 20 card: name: tutorials @@ -10,7 +10,7 @@ card: title: "Stateful Example: Wordpress with Persistent Volumes" --- -{{% capture overview %}} + This tutorial shows you how to deploy a WordPress site and a MySQL database using Minikube. Both applications use PersistentVolumes and PersistentVolumeClaims to store data. A [PersistentVolume](/docs/concepts/storage/persistent-volumes/) (PV) is a piece of storage in the cluster that has been manually provisioned by an administrator, or dynamically provisioned by Kubernetes using a [StorageClass](/docs/concepts/storage/storage-classes). A [PersistentVolumeClaim](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) (PVC) is a request for storage by a user that can be fulfilled by a PV. PersistentVolumes and PersistentVolumeClaims are independent from Pod lifecycles and preserve data through restarting, rescheduling, and even deleting Pods. @@ -23,9 +23,10 @@ This deployment is not suitable for production use cases, as it uses single inst The files provided in this tutorial are using GA Deployment APIs and are specific to kubernetes version 1.9 and later. If you wish to use this tutorial with an earlier version of Kubernetes, please update the API version appropriately, or reference earlier versions of this tutorial. {{< /note >}} -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Create PersistentVolumeClaims and PersistentVolumes * Create a `kustomization.yaml` with * a Secret generator @@ -34,9 +35,10 @@ The files provided in this tutorial are using GA Deployment APIs and are specifi * Apply the kustomization directory by `kubectl apply -k ./` * Clean up -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} The example shown on this page works with `kubectl` 1.14 and above. @@ -47,9 +49,9 @@ Download the following configuration files: 1. [wordpress-deployment.yaml](/examples/application/wordpress/wordpress-deployment.yaml) -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Create PersistentVolumeClaims and PersistentVolumes @@ -218,9 +220,10 @@ Now you can verify that all objects exist. Do not leave your WordPress installation on this page. If another user finds it, they can set up a website on your instance and use it to serve malicious content.

Either install WordPress by creating a username and password or delete your instance. {{< /warning >}} -{{% /capture %}} -{{% capture cleanup %}} + +## {{% heading "cleanup" %}} + 1. Run the following command to delete your Secret, Deployments, Services and PersistentVolumeClaims: @@ -228,14 +231,15 @@ Do not leave your WordPress installation on this page. If another user finds it, kubectl delete -k ./ ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about [Introspection and Debugging](/docs/tasks/debug-application-cluster/debug-application-introspection/) * Learn more about [Jobs](/docs/concepts/workloads/controllers/jobs-run-to-completion/) * Learn more about [Port Forwarding](/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) * Learn how to [Get a Shell to a Container](/docs/tasks/debug-application-cluster/get-shell-running-container/) -{{% /capture %}} + diff --git a/content/en/docs/tutorials/stateful-application/zookeeper.md b/content/en/docs/tutorials/stateful-application/zookeeper.md index ee58827f83..3bed3e059c 100644 --- a/content/en/docs/tutorials/stateful-application/zookeeper.md +++ b/content/en/docs/tutorials/stateful-application/zookeeper.md @@ -8,18 +8,19 @@ reviewers: - kow3ns - smarterclayton title: Running ZooKeeper, A Distributed System Coordinator -content_template: templates/tutorial +content_type: tutorial weight: 40 --- -{{% capture overview %}} + This tutorial demonstrates running [Apache Zookeeper](https://zookeeper.apache.org) on Kubernetes using [StatefulSets](/docs/concepts/workloads/controllers/statefulset/), [PodDisruptionBudgets](/docs/concepts/workloads/pods/disruptions/#specifying-a-poddisruptionbudget), and [PodAntiAffinity](/docs/user-guide/node-selection/#inter-pod-affinity-and-anti-affinity-beta-feature). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Before starting this tutorial, you should be familiar with the following Kubernetes concepts. @@ -40,18 +41,19 @@ This tutorial assumes that you have configured your cluster to dynamically provi PersistentVolumes. If your cluster is not configured to do so, you will have to manually provision three 20 GiB volumes before starting this tutorial. -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + After this tutorial, you will know the following. - How to deploy a ZooKeeper ensemble using StatefulSet. - How to consistently configure the ensemble using ConfigMaps. - How to spread the deployment of ZooKeeper servers in the ensemble. - How to use PodDisruptionBudgets to ensure service availability during planned maintenance. - {{% /capture %}} + -{{% capture lessoncontent %}} + ### ZooKeeper Basics @@ -1090,9 +1092,10 @@ node "kubernetes-node-ixsl" uncordoned You can use `kubectl drain` in conjunction with `PodDisruptionBudgets` to ensure that your services remain available during maintenance. If drain is used to cordon nodes and evict pods prior to taking the node offline for maintenance, services that express a disruption budget will have that budget respected. You should always allocate additional capacity for critical services so that their Pods can be immediately rescheduled. -{{% /capture %}} -{{% capture cleanup %}} + +## {{% heading "cleanup" %}} + - Use `kubectl uncordon` to uncordon all the nodes in your cluster. - You will need to delete the persistent storage media for the PersistentVolumes @@ -1100,5 +1103,5 @@ You can use `kubectl drain` in conjunction with `PodDisruptionBudgets` to ensure storage configuration, and provisioning method, to ensure that all storage is reclaimed. -{{% /capture %}} + diff --git a/content/en/docs/tutorials/stateless-application/expose-external-ip-address.md b/content/en/docs/tutorials/stateless-application/expose-external-ip-address.md index 4f4dbda986..2974c77c94 100644 --- a/content/en/docs/tutorials/stateless-application/expose-external-ip-address.md +++ b/content/en/docs/tutorials/stateless-application/expose-external-ip-address.md @@ -1,18 +1,19 @@ --- title: Exposing an External IP Address to Access an Application in a Cluster -content_template: templates/tutorial +content_type: tutorial weight: 10 --- -{{% capture overview %}} + This page shows how to create a Kubernetes Service object that exposes an external IP address. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * Install [kubectl](/docs/tasks/tools/install-kubectl/). @@ -24,19 +25,20 @@ external IP address. * Configure `kubectl` to communicate with your Kubernetes API server. For instructions, see the documentation for your cloud provider. -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Run five instances of a Hello World application. * Create a Service object that exposes an external IP address. * Use the Service object to access the running application. -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Creating a service for an application running in five pods @@ -148,10 +150,11 @@ The preceding command creates a Hello Kubernetes! -{{% /capture %}} -{{% capture cleanup %}} + +## {{% heading "cleanup" %}} + To delete the Service, enter this command: @@ -162,11 +165,12 @@ the Hello World application, enter this command: kubectl delete deployment hello-world -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Learn more about [connecting applications with services](/docs/concepts/services-networking/connect-applications-service/). -{{% /capture %}} + diff --git a/content/en/docs/tutorials/stateless-application/guestbook-logs-metrics-with-elk.md b/content/en/docs/tutorials/stateless-application/guestbook-logs-metrics-with-elk.md index bc991098d5..0c4964a17f 100644 --- a/content/en/docs/tutorials/stateless-application/guestbook-logs-metrics-with-elk.md +++ b/content/en/docs/tutorials/stateless-application/guestbook-logs-metrics-with-elk.md @@ -2,7 +2,7 @@ title: "Example: Add logging and metrics to the PHP / Redis Guestbook example" reviewers: - sftim -content_template: templates/tutorial +content_type: tutorial weight: 21 card: name: tutorials @@ -10,7 +10,7 @@ card: title: "Example: Add logging and metrics to the PHP / Redis Guestbook example" --- -{{% capture overview %}} + This tutorial builds upon the [PHP Guestbook with Redis](/docs/tutorials/stateless-application/guestbook) tutorial. Lightweight log, metric, and network data open source shippers, or *Beats*, from Elastic are deployed in the same Kubernetes cluster as the guestbook. The Beats collect, parse, and index the data into Elasticsearch so that you can view and analyze the resulting operational information in Kibana. This example consists of the following components: * A running instance of the [PHP Guestbook with Redis tutorial](/docs/tutorials/stateless-application/guestbook) @@ -19,17 +19,19 @@ This tutorial builds upon the [PHP Guestbook with Redis](/docs/tutorials/statele * Metricbeat * Packetbeat -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Start up the PHP Guestbook with Redis. * Install kube-state-metrics. * Create a Kubernetes secret. * Deploy the Beats. * View dashboards of your logs and metrics. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -40,9 +42,9 @@ Additionally you need: * A running Elasticsearch and Kibana deployment. You can use [Elasticsearch Service in Elastic Cloud](https://cloud.elastic.co), run the [download files](https://www.elastic.co/guide/en/elastic-stack-get-started/current/get-started-elastic-stack.html) on your workstation or servers, or the [Elastic Helm Charts](https://github.com/elastic/helm-charts). -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Start up the PHP Guestbook with Redis This tutorial builds on the [PHP Guestbook with Redis](/docs/tutorials/stateless-application/guestbook) tutorial. If you have the guestbook application running, then you can monitor that. If you do not have it running then follow the instructions to deploy the guestbook and do not perform the **Cleanup** steps. Come back to this page when you have the guestbook running. @@ -366,9 +368,10 @@ kubectl scale --replicas=3 deployment/frontend See the screenshot, add the indicated filters and then add the columns to the view. You can see the ScalingReplicaSet entry that is marked, following from there to the top of the list of events shows the image being pulled, the volumes mounted, the pod starting, etc. ![Kibana Discover](https://raw.githubusercontent.com/elastic/examples/master/beats-k8s-send-anywhere/scaling-up.png) -{{% /capture %}} -{{% capture cleanup %}} + +## {{% heading "cleanup" %}} + Deleting the Deployments and Services also deletes any running Pods. Use labels to delete multiple resources with one command. 1. Run the following commands to delete all Pods, Deployments, and Services. @@ -396,11 +399,11 @@ Deleting the Deployments and Services also deletes any running Pods. Use labels No resources found. ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn about [tools for monitoring resources](/docs/tasks/debug-application-cluster/resource-usage-monitoring/) * Read more about [logging architecture](/docs/concepts/cluster-administration/logging/) * Read more about [application introspection and debugging](/docs/tasks/debug-application-cluster/) * Read more about [troubleshoot applications](/docs/tasks/debug-application-cluster/resource-usage-monitoring/) -{{% /capture %}} \ No newline at end of file diff --git a/content/en/docs/tutorials/stateless-application/guestbook.md b/content/en/docs/tutorials/stateless-application/guestbook.md index e8c71bc613..f321d5391a 100644 --- a/content/en/docs/tutorials/stateless-application/guestbook.md +++ b/content/en/docs/tutorials/stateless-application/guestbook.md @@ -2,7 +2,7 @@ title: "Example: Deploying PHP Guestbook application with Redis" reviewers: - ahmetb -content_template: templates/tutorial +content_type: tutorial weight: 20 card: name: tutorials @@ -10,32 +10,34 @@ card: title: "Stateless Example: PHP Guestbook with Redis" --- -{{% capture overview %}} + This tutorial shows you how to build and deploy a simple, multi-tier web application using Kubernetes and [Docker](https://www.docker.com/). This example consists of the following components: * A single-instance [Redis](https://redis.io/) master to store guestbook entries * Multiple [replicated Redis](https://redis.io/topics/replication) instances to serve reads * Multiple web frontend instances -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Start up a Redis master. * Start up Redis slaves. * Start up the guestbook frontend. * Expose and view the Frontend Service. * Clean up. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Start up the Redis Master @@ -321,9 +323,10 @@ Scaling up or down is easy because your servers are defined as a Service that us redis-slave-2005841000-phfv9 1/1 Running 0 1h ``` -{{% /capture %}} -{{% capture cleanup %}} + +## {{% heading "cleanup" %}} + Deleting the Deployments and Services also deletes any running Pods. Use labels to delete multiple resources with one command. 1. Run the following commands to delete all Pods, Deployments, and Services. @@ -358,12 +361,13 @@ Deleting the Deployments and Services also deletes any running Pods. Use labels No resources found. ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Add [ELK logging and monitoring](../guestbook-logs-metrics-with-elk/) to your Guestbook application * Complete the [Kubernetes Basics](/docs/tutorials/kubernetes-basics/) Interactive Tutorials * Use Kubernetes to create a blog using [Persistent Volumes for MySQL and Wordpress](/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/#visit-your-new-wordpress-blog) * Read more about [connecting applications](/docs/concepts/services-networking/connect-applications-service/) * Read more about [Managing Resources](/docs/concepts/cluster-administration/manage-deployment/#using-labels-effectively) -{{% /capture %}} + diff --git a/content/es/docs/_index.md b/content/es/docs/_index.md index e036f2e97e..a5cbf56e30 100644 --- a/content/es/docs/_index.md +++ b/content/es/docs/_index.md @@ -3,18 +3,18 @@ reviewers: - raelga title: Documentación weight: 10 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + **¡Bienvenido a la documentación de Kubernetes en Castellano!** Como podrá comprobar, la mayor parte de la documentación aún está disponible solo en inglés, pero no se preocupe, hay un equipo trabajando en la traducción al castellano. -{{% /capture %}} -{{% capture body %}} + + Si quiere participar, puede entrar al canal de Slack [#kubernets-docs-es](http://slack.kubernetes.io/) y formar parte del equipo detrás de la localización. diff --git a/content/es/docs/concepts/_index.md b/content/es/docs/concepts/_index.md index cd95ebfb9c..fddd126047 100644 --- a/content/es/docs/concepts/_index.md +++ b/content/es/docs/concepts/_index.md @@ -1,17 +1,17 @@ --- title: Conceptos main_menu: true -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + La sección de conceptos te ayudará a conocer los componentes de Kubernetes así como las abstracciones que utiliza para representar tu cluster. Además, te ayudará a obtener un conocimiento más profundo sobre cómo funciona Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Introducción @@ -66,12 +66,13 @@ En un clúster de Kubernetes, los nodos son las máquinas (máquinas virtuales, * [Annotations](/docs/concepts/overview/working-with-objects/annotations/) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Si estás interesado en escribir una página sobre conceptos, revisa [Usando Templates de Páginas](/docs/home/contribute/page-templates/) para obtener información sobre el tipo de página conceptos y la plantilla conceptos. -{{% /capture %}} + diff --git a/content/es/docs/concepts/architecture/cloud-controller.md b/content/es/docs/concepts/architecture/cloud-controller.md index ead5481fd5..4de5b4418a 100644 --- a/content/es/docs/concepts/architecture/cloud-controller.md +++ b/content/es/docs/concepts/architecture/cloud-controller.md @@ -1,10 +1,10 @@ --- title: Conceptos subyacentes del Cloud Controller Manager -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + El concepto del Cloud Controller Manager (CCM) (no confundir con el ejecutable) fue creado originalmente para permitir que Kubernetes y el código específico de proveedores de servicios en la nube evolucionasen de forma independiente. El Cloud Controller Manager se ejecuta a la par con otros componentes maestros como el Kubernetes Controller Manager, el API Server y el planificador. También puede ejecutarse como un extra, en cuyo caso se ejecuta por encima de Kubernetes. @@ -16,10 +16,10 @@ En la siguiente imagen, se puede ver la arquitectura de un cluster de Kubernetes ![Arquitectura previa a CCM](/images/docs/pre-ccm-arch.png) -{{% /capture %}} -{{% capture body %}} + + ## Diseño @@ -235,4 +235,4 @@ Los siguientes proveedores de servicios en la nube han implementado CCMs: Instrucciones para configurar y ejecutar el CCM pueden encontrarse [aquí](/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager). -{{% /capture %}} + diff --git a/content/es/docs/concepts/architecture/master-node-communication.md b/content/es/docs/concepts/architecture/master-node-communication.md index 379f23589b..5b441b6bcd 100644 --- a/content/es/docs/concepts/architecture/master-node-communication.md +++ b/content/es/docs/concepts/architecture/master-node-communication.md @@ -2,17 +2,17 @@ reviewers: - glo-pena title: Comunicación Nodo-Maestro -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Este documento cataloga las diferentes vías de comunicación entre el nodo máster (en realidad el apiserver) y el clúster de Kubernetes. La intención es permitir a los usuarios personalizar sus instalaciones para proteger sus configuraciones de red de forma que el clúster pueda ejecutarse en una red insegura. (o en un proveedor de servicios en la nube con direcciones IP públicas) -{{% /capture %}} -{{% capture body %}} + + ### Clúster a Máster @@ -56,4 +56,3 @@ Kubernetes ofrece soporte para túneles SSH que protegen la comunicación Mást Los túneles SSH se consideran obsoletos, y no deberían utilizarse a menos que se sepa lo que se está haciendo. Se está diseñando un reemplazo para este canal de comunicación. -{{% /capture %}} \ No newline at end of file diff --git a/content/es/docs/concepts/architecture/nodes.md b/content/es/docs/concepts/architecture/nodes.md index 349e9f4a6c..34b0303082 100644 --- a/content/es/docs/concepts/architecture/nodes.md +++ b/content/es/docs/concepts/architecture/nodes.md @@ -2,18 +2,18 @@ reviewers: - glo-pena title: Nodos -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Un nodo es una máquina de trabajo en Kubernetes, previamente conocida como `minion`. Un nodo puede ser una máquina virtual o física, dependiendo del tipo de clúster. Cada nodo está gestionado por el componente máster y contiene los servicios necesarios para ejecutar [pods](/docs/concepts/workloads/pods/pod). Los servicios en un nodo incluyen el [container runtime](/docs/concepts/overview/components/#node-components), kubelet y el kube-proxy. Accede a la sección [The Kubernetes Node](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md#the-kubernetes-node) en el documento de diseño de arquitectura para más detalle. -{{% /capture %}} -{{% capture body %}} + + ## Estado del Nodo @@ -180,4 +180,4 @@ Para reservar explícitamente recursos en la máquina huésped para procesos no Un nodo es un recurso principal dentro de la REST API de Kubernetes. Más detalles sobre el objeto en la API se puede encontrar en: [Object Node API](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#node-v1-core). -{{% /capture %}} + diff --git a/content/es/docs/concepts/configuration/organize-cluster-access-kubeconfig.md b/content/es/docs/concepts/configuration/organize-cluster-access-kubeconfig.md index dc9f9e14a5..1fa5f7fc58 100644 --- a/content/es/docs/concepts/configuration/organize-cluster-access-kubeconfig.md +++ b/content/es/docs/concepts/configuration/organize-cluster-access-kubeconfig.md @@ -1,10 +1,10 @@ --- title: Organizar el acceso a los clústeres utilizando archivos kubeconfig -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + Utilice los archivos kubeconfig para organizar la información acerca de los clústeres, los usuarios, los Namespaces y los mecanismos de autenticación. La herramienta de @@ -26,9 +26,9 @@ Para obtener instrucciones paso a paso acerca de cómo crear y especificar los a consulte el recurso [Configurar El Acceso A Múltiples Clústeres](/docs/tasks/access-application-cluster/configure-access-multiple-clusters). -{{% /capture %}} -{{% capture body %}} + + ## Compatibilidad con múltiples clústeres, usuarios y mecanismos de autenticación @@ -143,11 +143,12 @@ Las referencias de un archivo en la línea de comandos son relativas al director Dentro de `$HOME/.kube/config`, las rutas relativas se almacenan de manera relativa a la ubicación del archivo kubeconfig , al igual que las rutas absolutas se almacenan absolutamente. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Configurar el acceso a multiples Clústeres](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) * [`kubectl config`](/docs/reference/generated/kubectl/kubectl-commands#config) -{{% /capture %}} + diff --git a/content/es/docs/concepts/containers/container-environment-variables.md b/content/es/docs/concepts/containers/container-environment-variables.md index eb0f9a8d9c..7f35309329 100644 --- a/content/es/docs/concepts/containers/container-environment-variables.md +++ b/content/es/docs/concepts/containers/container-environment-variables.md @@ -3,18 +3,18 @@ reviewers: - astuky - raelga title: Variables de entorno de un Container -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Esta página explica los recursos disponibles para Containers dentro del entorno de un Container. -{{% /capture %}} -{{% capture body %}} + + ## Entorno del Container @@ -50,11 +50,12 @@ FOO_SERVICE_PORT= Los servicios tienen direcciones IP dedicadas y están disponibles para el Container a través de DNS, si el [complemento para DNS](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/dns/) está habilitado. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Más información sobre cómo ejecutar código en respuesta a los cambios de etapa durante ciclo de vida de un contenedor la puedes encontrar en [Container lifecycle hooks](/docs/concepts/containers/container-lifecycle-hooks/). * Practica [añadiendo handlers a los lifecycle events de un Container ](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). -{{% /capture %}} + diff --git a/content/es/docs/concepts/containers/container-lifecycle-hooks.md b/content/es/docs/concepts/containers/container-lifecycle-hooks.md index 74fdc721cc..18cee92897 100644 --- a/content/es/docs/concepts/containers/container-lifecycle-hooks.md +++ b/content/es/docs/concepts/containers/container-lifecycle-hooks.md @@ -1,18 +1,18 @@ --- title: Container Lifecycle Hooks -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Esta página describe como los contenedores gestionados por kubelet pueden utilizar el framework _Container lifecycle hook_ (hook del ciclo de vida del contenedor) para ejecutar código disparado por eventos durante la gestión de su ciclo de vida (lifecycle). -{{% /capture %}} -{{% capture body %}} + + ## Introducción @@ -109,12 +109,13 @@ Events: 1m 22s 2 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Warning FailedPostStartHook ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Aprende más sobre [variables de entorno de contenedores](/docs/concepts/containers/container-environment-variables/). * Practica [adjuntando controladores a los eventos de lifecycle de los contenedores](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). -{{% /capture %}} + diff --git a/content/es/docs/concepts/overview/what-is-kubernetes.md b/content/es/docs/concepts/overview/what-is-kubernetes.md index 5f3660009b..0c53e120c3 100644 --- a/content/es/docs/concepts/overview/what-is-kubernetes.md +++ b/content/es/docs/concepts/overview/what-is-kubernetes.md @@ -2,18 +2,18 @@ reviewers: - raelga title: ¿Qué es Kubernetes? -content_template: templates/concept +content_type: concept weight: 10 card: name: concepts weight: 10 --- -{{% capture overview %}} + Esta página ofrece una visión general sobre Kubernetes. -{{% /capture %}} -{{% capture body %}} + + Kubernetes es una plataforma portable y extensible de código abierto para administrar cargas de trabajo y servicios. Kubernetes facilita la automatización y la configuración declarativa. Tiene un ecosistema grande y en rápido crecimiento. @@ -154,11 +154,12 @@ En resumen, los beneficios de usar contenedores incluyen: El nombre **Kubernetes** proviene del griego y significa *timonel* o *piloto*. Es la raíz de *gobernador* y de [cibernética](http://www.etymonline.com/index.php?term=cybernetics). *K8s* es una abrevación que se obtiene al reemplazar las ocho letras "ubernete" con el número 8. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * ¿Estás listo para [empezar](/docs/setup/)? * Para saber más, visita el resto de la [documentación de Kubernetes](/docs/home/). -{{% /capture %}} + diff --git a/content/es/docs/concepts/overview/working-with-objects/annotations.md b/content/es/docs/concepts/overview/working-with-objects/annotations.md index 5cfa070d1b..d4d75cd680 100644 --- a/content/es/docs/concepts/overview/working-with-objects/annotations.md +++ b/content/es/docs/concepts/overview/working-with-objects/annotations.md @@ -1,14 +1,14 @@ --- title: Anotaciones -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + Puedes usar las anotaciones de Kubernetes para adjuntar metadatos arbitrarios a los objetos, de tal forma que clientes como herramientas y librerías puedan obtener fácilmente dichos metadatos. -{{% /capture %}} -{{% capture body %}} + + ## Adjuntar metadatos a los objetos Puedes usar las etiquetas o anotaciones para adjuntar metadatos a los objetos de Kubernetes. @@ -68,10 +68,11 @@ Si se omite el prefijo, la clave de la anotación se entiende que es privada par Los prefijos `kubernetes.io/` y `k8s.io/` se reservan para el uso exclusivo de los componentes principales de Kubernetes. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Aprende más acerca de las [Etiquetas y Selectores](/docs/concepts/overview/working-with-objects/labels/). -{{% /capture %}} + diff --git a/content/es/docs/concepts/overview/working-with-objects/common-labels.md b/content/es/docs/concepts/overview/working-with-objects/common-labels.md index 32d543652f..8ef8794d34 100644 --- a/content/es/docs/concepts/overview/working-with-objects/common-labels.md +++ b/content/es/docs/concepts/overview/working-with-objects/common-labels.md @@ -1,9 +1,9 @@ --- title: Etiquetas recomendadas -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Puedes visualizar y gestionar los objetos de Kubernetes con herramientas adicionales a kubectl y el propio tablero de control. Un conjunto común de etiquetas permite a dichas herramientas trabajar de forma interoperable, describiendo los objetos de una forma común que todas las @@ -11,9 +11,9 @@ herramientas puedan entender. Además del soporte a herramientas, las etiquetas recomendadas describen las aplicaciones de forma que puedan ser consultadas. -{{% /capture %}} -{{% capture body %}} + + Los metadatos se organizan en torno al concepto de una _aplicación_. Kubernetes no es una plataforma como servicio (PaaS) y ni tiene o restringe la definición formal de una aplicación. Al contrario, las aplicaciones son informales y se describen mediante el uso de los metadatos. @@ -171,4 +171,3 @@ metadata: Con los objetos `StatefulSet` y `Service` de MySQL te darás cuenta que se incluye la información acerca de MySQL y Wordpress, la aplicación global. -{{% /capture %}} \ No newline at end of file diff --git a/content/es/docs/concepts/overview/working-with-objects/kubernetes-objects.md b/content/es/docs/concepts/overview/working-with-objects/kubernetes-objects.md index b4d55ba10e..be14b38de6 100644 --- a/content/es/docs/concepts/overview/working-with-objects/kubernetes-objects.md +++ b/content/es/docs/concepts/overview/working-with-objects/kubernetes-objects.md @@ -1,17 +1,17 @@ --- title: Entender los Objetos de Kubernetes -content_template: templates/concept +content_type: concept weight: 10 card: name: concepts weight: 40 --- -{{% capture overview %}} + Esta página explica cómo se representan los objetos de Kubernetes en la API de Kubernetes, y cómo puedes definirlos en formato `.yaml`. -{{% /capture %}} -{{% capture body %}} + + ## Entender los Objetos de Kubernetes Los *Objetos de Kubernetes* son entidades persistentes dentro del sistema de Kubernetes. Kubernetes utiliza estas entidades para representar el estado de tu clúster. Específicamente, pueden describir: @@ -69,10 +69,11 @@ Por ejemplo, el formato de la `spec` para un objeto de tipo `Pod` lo puedes enco y el formato de la `spec` para un objeto de tipo `Deployment` lo puedes encontrar [aquí](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#deploymentspec-v1-apps). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Aprender más acerca de los objetos básicos más importantes de Kubernetes, como el [Pod](/docs/concepts/workloads/pods/pod-overview/). -{{% /capture %}} + diff --git a/content/es/docs/concepts/overview/working-with-objects/labels.md b/content/es/docs/concepts/overview/working-with-objects/labels.md index ae42584883..18815c01a4 100644 --- a/content/es/docs/concepts/overview/working-with-objects/labels.md +++ b/content/es/docs/concepts/overview/working-with-objects/labels.md @@ -1,10 +1,10 @@ --- title: Etiquetas y Selectores -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + Las _etiquetas_ son pares de clave/valor que se asocian a los objetos, como los pods. El propósito de las etiquetas es permitir identificar atributos de los objetos que son relevantes y significativos para los usuarios, pero que no tienen significado para el sistema principal. @@ -22,10 +22,10 @@ Cada objeto puede tener un conjunto de etiquetas clave/valor definidas, donde ca Las etiquetas permiten consultar y monitorizar los objetos de forma más eficiente y son ideales para su uso en UIs y CLIs. El resto de información no identificada debe ser registrada usando [anotaciones](/docs/concepts/overview/working-with-objects/annotations/). -{{% /capture %}} -{{% capture body %}} + + ## Motivación @@ -201,4 +201,4 @@ selector: Un caso de uso de selección basada en etiquetas es la posibilidad de limitar los nodos en los que un pod puede desplegarse. Ver la documentación sobre [selección de nodo](/docs/concepts/configuration/assign-pod-node/) para más información. -{{% /capture %}} + diff --git a/content/es/docs/concepts/overview/working-with-objects/names.md b/content/es/docs/concepts/overview/working-with-objects/names.md index 75e64b963e..ef241f6aff 100644 --- a/content/es/docs/concepts/overview/working-with-objects/names.md +++ b/content/es/docs/concepts/overview/working-with-objects/names.md @@ -1,10 +1,10 @@ --- title: Nombres -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Todos los objetos de la API REST de Kubernetes se identifica de forma inequívoca mediante un Nombre y un UID. @@ -12,10 +12,10 @@ Para aquellos atributos provistos por el usuario que no son únicos, Kubernetes Echa un vistazo al [documento de diseño de identificadores](https://git.k8s.io/community/contributors/design-proposals/architecture/identifiers.md) para información precisa acerca de las reglas sintácticas de los Nombres y UIDs. -{{% /capture %}} -{{% capture body %}} + + ## Nombres @@ -27,4 +27,4 @@ Por regla general, los nombres de los recursos de Kubernetes no deben exceder la {{< glossary_definition term_id="uid" length="all" >}} -{{% /capture %}} + diff --git a/content/es/docs/concepts/overview/working-with-objects/namespaces.md b/content/es/docs/concepts/overview/working-with-objects/namespaces.md index 5f963c87cc..b3c3c73e14 100644 --- a/content/es/docs/concepts/overview/working-with-objects/namespaces.md +++ b/content/es/docs/concepts/overview/working-with-objects/namespaces.md @@ -1,18 +1,18 @@ --- title: Espacios de nombres -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Kubernetes soporta múltiples clústeres virtuales respaldados por el mismo clúster físico. Estos clústeres virtuales se denominan espacios de nombres (namespaces). -{{% /capture %}} -{{% capture body %}} + + ## Cuándo Usar Múltiple Espacios de Nombre @@ -112,4 +112,4 @@ kubectl api-resources --namespaced=true kubectl api-resources --namespaced=false ``` -{{% /capture %}} + diff --git a/content/es/docs/concepts/workloads/controllers/cron-jobs.md b/content/es/docs/concepts/workloads/controllers/cron-jobs.md index 906f5f8630..7be1e4befc 100644 --- a/content/es/docs/concepts/workloads/controllers/cron-jobs.md +++ b/content/es/docs/concepts/workloads/controllers/cron-jobs.md @@ -1,10 +1,10 @@ --- title: CronJob -content_template: templates/concept +content_type: concept weight: 80 --- -{{% capture overview %}} + Un _Cron Job_ ejecuta tareas, [Jobs](/docs/concepts/workloads/controllers/jobs-run-to-completion/), a intervalos regulares. @@ -19,10 +19,10 @@ Para instrucciones sobre cómo crear y trabajar con trabajos programados, incluyendo definiciones de ejemplo, puedes consultar [Ejecutar tareas automatizadas con trabajos programados](/docs/tasks/job/automated-tasks-with-cron-jobs). -{{% /capture %}} -{{% capture body %}} + + ## Limitaciones de las tareas programados @@ -58,4 +58,4 @@ Esto ocurre porque el controlador en este caso comprueba cuántas programaciones El CronJob es únicamente responsable de crear los Jobs que coinciden con su programación, y el Job por otro lado es el responsable de gestionar los Pods que representa. -{{% /capture %}} + diff --git a/content/es/docs/concepts/workloads/controllers/daemonset.md b/content/es/docs/concepts/workloads/controllers/daemonset.md index ada033d84c..d52a5d7010 100644 --- a/content/es/docs/concepts/workloads/controllers/daemonset.md +++ b/content/es/docs/concepts/workloads/controllers/daemonset.md @@ -1,10 +1,10 @@ --- title: DaemonSet -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + Un _DaemonSet_ garantiza que todos (o algunos) de los nodos ejecuten una copia de un Pod. Conforme se añade más nodos al clúster, nuevos Pods son añadidos a los mismos. Conforme se elimina nodos del clúster, dichos Pods se destruyen. @@ -26,10 +26,10 @@ De forma básica, se debería usar un DaemonSet, cubriendo todos los nodos, por En configuraciones más complejas se podría usar múltiples DaemonSets para un único tipo de proceso, pero con diferentes parámetros y/o diferentes peticiones de CPU y memoria según el tipo de hardware. -{{% /capture %}} -{{% capture body %}} + + ## Escribir una especificación de DaemonSet @@ -235,4 +235,4 @@ del número de réplicas y las actualizaciones continuas son mucho más importan Utiliza un DaemonSet cuando es importante que una copia de un Pod siempre se ejecute en cada uno de los nodos, y cuando se necesite que arranque antes que el resto de Pods. -{{% /capture %}} + diff --git a/content/es/docs/concepts/workloads/controllers/replicationcontroller.md b/content/es/docs/concepts/workloads/controllers/replicationcontroller.md index 657523a650..970eb4e8ec 100644 --- a/content/es/docs/concepts/workloads/controllers/replicationcontroller.md +++ b/content/es/docs/concepts/workloads/controllers/replicationcontroller.md @@ -8,11 +8,11 @@ feature: mata los contenedores que no responden a tus pruebas de salud definidas, y no los expone a los clientes hasta que no están listo para servirse. -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< note >}} hoy en día la forma recomendada de configurar la replicación es con un [`Deployment`](/docs/concepts/workloads/controllers/deployment/) que configura un [`ReplicaSet`](/docs/concepts/workloads/controllers/replicaset/). @@ -22,10 +22,10 @@ Un _ReplicationController_ garantiza que un número determinado de réplicas se en todo momento. En otras palabras, un ReplicationController se asegura que un pod o un conjunto homogéneo de pods siempre esté arriba y disponible. -{{% /capture %}} -{{% capture body %}} + + ## Cómo Funciona un ReplicationController @@ -327,6 +327,6 @@ terminarlo cuando el servidor está listo para reiniciarse/apagarse. Lee [Ejecutar Aplicaciones sin Estado con un ReplicationController](/docs/tutorials/stateless-application/run-stateless-ap-replication-controller/). -{{% /capture %}} + diff --git a/content/es/docs/concepts/workloads/controllers/statefulset.md b/content/es/docs/concepts/workloads/controllers/statefulset.md index 390fdc21fe..1211160545 100644 --- a/content/es/docs/concepts/workloads/controllers/statefulset.md +++ b/content/es/docs/concepts/workloads/controllers/statefulset.md @@ -1,10 +1,10 @@ --- title: StatefulSets -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + Un StatefulSet es el objeto de la API workload que se usa para gestionar aplicaciones con estado. @@ -13,9 +13,9 @@ Los StatefulSets son estables (GA) en la versión 1.9. {{< /note >}} {{< glossary_definition term_id="statefulset" length="all" >}} -{{% /capture %}} -{{% capture body %}} + + ## Usar StatefulSets @@ -257,11 +257,12 @@ Antes de revertir la plantilla, debes también eliminar cualquier Pod que el Sta intentando ejecutar con la configuración incorrecta. El StatefulSet comenzará entonces a recrear los Pods usando la plantilla revertida. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Sigue el ejemplo de cómo [desplegar un aplicación con estado](/docs/tutorials/stateful-application/basic-stateful-set/). * Sigue el ejemplo de cómo [desplegar Cassandra con StatefulSets](/docs/tutorials/stateful-application/cassandra/). -{{% /capture %}} + diff --git a/content/es/docs/concepts/workloads/controllers/ttlafterfinished.md b/content/es/docs/concepts/workloads/controllers/ttlafterfinished.md index cd0cbda2e3..101004f3ec 100644 --- a/content/es/docs/concepts/workloads/controllers/ttlafterfinished.md +++ b/content/es/docs/concepts/workloads/controllers/ttlafterfinished.md @@ -1,10 +1,10 @@ --- title: Controlador TTL para Recursos Finalizados -content_template: templates/concept +content_type: concept weight: 65 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.12" state="alpha" >}} @@ -19,12 +19,12 @@ Descargo de responsabilidad Alpha: esta característica está actualmente en ver `TTLAfterFinished`. -{{% /capture %}} -{{% capture body %}} + + ## Controlador TTL @@ -74,12 +74,13 @@ En Kubernetes, se necesita ejecutar NTP en todos los nodos para evitar este problema. Los relojes no siempre son correctos, pero la diferencia debería ser muy pequeña. Ten presente este riesgo cuando pongas un valor distinto de cero para el TTL. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [Limpiar Jobs automáticamente](/docs/concepts/workloads/controllers/jobs-run-to-completion/#clean-up-finished-jobs-automatically) [Documento de diseño](https://github.com/kubernetes/community/blob/master/keps/sig-apps/0026-ttl-after-finish.md) -{{% /capture %}} + diff --git a/content/es/docs/concepts/workloads/pods/ephemeral-containers.md b/content/es/docs/concepts/workloads/pods/ephemeral-containers.md index dbf353db43..1b939c969e 100644 --- a/content/es/docs/concepts/workloads/pods/ephemeral-containers.md +++ b/content/es/docs/concepts/workloads/pods/ephemeral-containers.md @@ -3,11 +3,11 @@ reviewers: - astuky - raelga title: Containers Efímeros -content_template: templates/concept +content_type: concept weight: 80 --- -{{% capture overview %}} + {{< feature-state state="alpha" >}} @@ -24,9 +24,9 @@ Deprecación de Kubernetes](/docs/reference/using-api/deprecation-policy/), esta alfa puede variar significativamente en el futuro o ser eliminada por completo. {{< /warning >}} -{{% /capture %}} -{{% capture body %}} + + ## Entendiendo los Containers efímeros @@ -211,4 +211,4 @@ PID USER TIME COMMAND 29 root 0:00 ps auxww ``` -{{% /capture %}} + diff --git a/content/es/docs/concepts/workloads/pods/pod.md b/content/es/docs/concepts/workloads/pods/pod.md new file mode 100644 index 0000000000..4c6b5c7498 --- /dev/null +++ b/content/es/docs/concepts/workloads/pods/pod.md @@ -0,0 +1,154 @@ +--- +reviewers: +title: Pods +content_template: templates/concept +weight: 20 +--- + +{{% capture overview %}} + +Los _Pods_ son las unidades de computación desplegables más pequeñas que se pueden crear y gestionar en Kubernetes. + +{{% /capture %}} + + +{{% capture body %}} + +## ¿Qué és un Pod? + +Un _Pod_ (como en una vaina de ballenas o vaina de guisantes) es un grupo de uno o más contenedores (como contenedores Docker), con almacenamiento/red compartidos, y unas especificaciones de cómo ejecutar los contenedores. Los contenidos de un Pod son siempre coubicados, coprogramados y ejecutados en un contexto compartido. Un Pod modela un "host lógico" específico de la aplicación: contiene uno o más contenedores de aplicaciones relativamente entrelazados. Antes de la llegada de los contenedores, ejecutarse en la misma máquina física o virtual significaba ser ejecutado en el mismo host lógico. + +Mientras que Kubernetes soporta más {{}} a parte de Docker, este último es el más conocido y ayuda a describir Pods en términos de Docker. + +El contexto compartido de un Pod es un conjunto de namespaces de Linux, cgroups y, potencialmente, otras facetas de aislamiento, las mismas cosas que aíslan un contenedor Docker. Dentro del contexto de un Pod, las aplicaciones individuales pueden tener más subaislamientos aplicados. + +Los contenedores dentro de un Pod comparten dirección IP y puerto, y pueden encontrarse a través de `localhost`. También pueden comunicarse entre sí mediante comunicaciones estándar entre procesos, como semáforos de SystemV o la memoria compartida POSIX. Los contenedores en diferentes Pods tienen direcciones IP distintas y no pueden comunicarse por IPC sin [configuración especial](/es/docs /concepts/policy/pod-security-policy/). +Estos contenedores normalmente se comunican entre sí a través de las direcciones IP del Pod. + +Las aplicaciones dentro de un Pod también tienen acceso a {{}} compartidos, que se definen como parte de un Pod y están disponibles para ser montados en el sistema de archivos de cada aplicación. + +En términos de [Docker](https://www.docker.com/), un Pod se modela como un grupo de contenedores de Docker con namespaces y volúmenes de sistemas de archivos compartidos. + +Al igual que los contenedores de aplicaciones individuales, los Pods se consideran entidades relativamente efímeras (en lugar de duraderas). Como se explica en [ciclo de vida del pod](/es/docs/concepts/workloads/pods/pod-lifecycle/), los Pods se crean, se les asigna un identificador único (UID) y se planifican en nodos donde permanecen hasta su finalización (según la política de reinicio) o supresión. Si un {{}} muere, los Pods programados para ese nodo se programan para su eliminación después de un período de tiempo de espera. Un Pod dado (definido por su UID) no se "replanifica" a un nuevo nodo; en su lugar, puede reemplazarse por un Pod idéntico, con incluso el mismo nombre si lo desea, pero con un nuevo UID (consulte [controlador de replicación](/es/docs/concepts/workloads/controllers/replicationcontroller/) para obtener más detalles). + +Cuando se dice que algo tiene la misma vida útil que un Pod, como un volumen, significa que existe mientras exista ese Pod (con ese UID). Si ese Pod se elimina por cualquier motivo, incluso si se crea un reemplazo idéntico, el recurso relacionado (por ejemplo, el volumen) también se destruye y se crea de nuevo. +{{< figure src="/images/docs/pod.svg" title="diagrama de Pod" width="50%" >}} + +*Un Pod de múltiples contenedores que contiene un extractor de archivos y un servidor web que utiliza un volumen persistente para el almacenamiento compartido entre los contenedores.* + +## Motivación para los Pods + +### Gestión + +Los Pods son un modelo del patrón de múltiples procesos de cooperación que forman una unidad de servicio cohesiva. Simplifican la implementación y la administración de las aplicaciones proporcionando una abstracción de mayor nivel que el conjunto de las aplicaciones que lo constituyen. Los Pods sirven como unidad de despliegue, escalado horizontal y replicación. La colocación (coprogramación), el destino compartido (por ejemplo, la finalización), la replicación coordinada, el uso compartido de recursos y la gestión de dependencias se controlan automáticamente para los contenedores en un Pod. + +### Recursos compartidos y comunicación + +Los Pods permiten el intercambio de datos y la comunicación entre los contenedores que lo constituyen. + +Todas las aplicaciones en un Pod utilizan el mismo namespace de red (la misma IP y puerto) y, por lo tanto, pueden "encontrarse" entre sí y comunicarse utilizando `localhost`. +Debido a esto, las aplicaciones en un Pod deben coordinar su uso de puertos. Cada Pod tiene una dirección IP en un espacio de red compartido que tiene comunicación completa con otros servidores físicos y Pods a través de la red. + +Los contenedores dentro del Pod ven que el hostname del sistema es el mismo que el `nombre` configurado para el Pod. Hay más información sobre esto en la sección [networking](/es/docs/concepts/cluster-administration/networking/). + +Además de definir los contenedores de aplicaciones que se ejecutan en el Pod, el Pod especifica un conjunto de volúmenes de almacenamiento compartido. Los volúmenes permiten que los datos sobrevivan a reinicios de contenedores y se compartan entre las aplicaciones dentro del Pod. + +## Usos de Pods + +Los Pods pueden ser usados para alojar pilas de aplicaciones integradas (por ejemplo, LAMP), pero su objetivo principal es apoyar los programas de ayuda coubicados y coadministrados, como: + +* sistemas de gestión de contenido, loaders de datos y archivos, gestores de caché locales, etc. +* copia de seguridad de registro y punto de control, compresión, rotación, captura de imágenes, etc. +* observadores de cambio de datos, adaptadores de registro y monitoreo, publicadores de eventos, etc. +* proxies, bridges y adaptadores. +* controladores, configuradores y actualizadores. + +Los Pods individuales no están diseñados para ejecutar varias instancias de la misma aplicación, en general. + +Para una explicación más detallada, ver [El sistema distribuido ToolKit: Patrones para Contenedores multiaplicación](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns). + +## Alternativas + +_¿Por qué simplemente no ejecutar múltiples programas en un solo contenedor de Docker?_ + +1. Transparencia. Hacer visibles los contenedores dentro del Pod + a la infraestructura permite que esta brinde servicios, como gestión de procesos + y monitoreo de recursos, a los contenedores, facilitando una + serie de comodidades a los usuarios. +1. Desacople de dependencias de software. Los contenedores individuales pueden ser + versionados, reconstruidos y redistribuidos independientemente. Kubernetes podría incluso apoyar + actualizaciones en vivo de contenedores individuales en un futuro. +1. Facilidad de uso. Los usuarios no necesitan ejecutar sus propios administradores de procesos, + para propagación de señales, códigos de salida, etc. +1. Eficiencia. Debido a que la infraestructura asume más responsabilidad, + los contenedores pueden ser más livianos. + +_¿Por qué no admitir la planificación conjunta de contenedores por afinidad?_ + +Ese enfoque proporcionaría la ubicación conjunta, pero no la mayor parte de +beneficios de los Pods, como compartir recursos, IPC, compartir el destino garantizado y +gestión simplificada. + +## Durabilidad de pods (o su ausencia) + +Los Pods no están destinados a ser tratados como entidades duraderas. No sobrevivirán a errores de planificación, caídas de nodo u otros desalojos, ya sea por falta de recursos o en el caso de mantenimiento de nodos. + +En general, los usuarios no deberían necesitar crear Pods directamente, deberían +usar siempre controladores incluso para Pods individuales, como por ejemplo, los +[Deployments](/es/docs/concepts/workloads/controllers/deployment/). +Los controladores proporcionan autorecuperación con un alcance de clúster, así como replicación +y gestión de despliegue. +Otros controladores como los [StatefulSet](/es/docs/concepts/workloads/controllers/statefulset.md) +pueden tambien proporcionar soporte para Pods que necesiten persistir el estado. + +El uso de API colectivas como la principal primitiva de cara al usuario es relativamente común entre los sistemas de planificación de clúster, incluyendo [Borg](https://research.google.com/pubs/pub43438.html), [Marathon](https://mesosphere.github.io/marathon/docs/rest-api.html), [Aurora](http://aurora.apache.org/documentation/latest/reference/configuration/#job-schema), y [Tupperware](http://www.slideshare.net/Docker/aravindnarayanan-facebook140613153626phpapp02-37588997). + +El Pod se expone como primitiva para facilitar: + +* planificación y capacidad de conexión del controlador +* soporte para operaciones a nivel de Pod sin la necesidad de "proxy" a través de las API del controlador +* desacople de la vida útil del Pod de la vida útil del controlador, como para el arranque +* desacople de controladores y servicios, el endpoint del controlador solo mira Pods +* composición limpia de funcionalidad a nivel de Kubelet con funcionalidad a nivel de clúster, Kubelet es efectivamente el "controlador de Pod" +* aplicaciones en alta disponibilidad, que esperan que los Pods sean reemplazados antes de su finalización y ciertamente antes de su eliminación, como en el caso de desalojos planificados o descarga previa de imágenes. + +## Finalización de Pods + +Debido a que los Pods representan procesos en ejecución en los nodos del clúster, es importante permitir que esos procesos finalicen de forma correcta cuando ya no se necesiten (en lugar de ser detenidos bruscamente con una señal de KILL). Los usuarios deben poder solicitar la eliminación y saber cuándo finalizan los procesos, pero también deben poder asegurarse de que las eliminaciones finalmente se completen. Cuando un usuario solicita la eliminación de un Pod, el sistema registra el período de gracia previsto antes de que el Pod pueda ser eliminado de forma forzada, y se envía una señal TERM al proceso principal en cada contenedor. Una vez que el período de gracia ha expirado, la señal KILL se envía a esos procesos y el Pod se elimina del servidor API. Si se reinicia Kubelet o el administrador de contenedores mientras se espera que finalicen los procesos, la terminación se volverá a intentar con el período de gracia completo. + +Un ejemplo del ciclo de terminación de un Pod: + +1. El usuario envía un comando para eliminar Pod, con un período de gracia predeterminado (30s) +1. El Pod en el servidor API se actualiza con el tiempo a partir del cual el Pod se considera "muerto" junto con el período de gracia. +1. El Pod aparece como "Terminando" cuando aparece en los comandos del cliente +1. (simultáneo con 3) Cuando el Kubelet ve que un Pod se ha marcado como terminado porque se ha configurado el tiempo en 2, comienza el proceso de apagado del Pod. + 1. Si uno de los contenedores del Pod ha definido un [preStop hook](/es/docs/concepts/containers/container-lifecycle-hooks/#hook-details), se invoca dentro del contenedor. Si el hook `preStop` todavía se está ejecutando después de que expire el período de gracia, el paso 2 se invoca con un pequeño período de gracia extendido (2s). + 1. El contenedor recibe la señal TERM. Tenga en cuenta que no todos los contenedores en el Pod recibirán la señal TERM al mismo tiempo y cada uno puede requerir un hook `preStop` si el orden en el que se cierra es importante. +1. (simultáneo con 3) Pod se elimina de la lista de endponts del servicio, y ya no se considera parte del conjunto de Pods en ejecución para controladores de replicación. Los Pods que se apagan lentamente no pueden continuar sirviendo el tráfico ya que los balanceadores de carga (como el proxy de servicio) los eliminan de sus rotaciones. +1. Cuando expira el período de gracia, todos los procesos que todavía se ejecutan en el Pod se eliminan con SIGKILL. +1. El Kubelet terminará de eliminar el Pod en el servidor API configurando el período de gracia 0 (eliminación inmediata). El Pod desaparece de la API y ya no es visible desde el cliente. + +Por defecto, todas las eliminaciones se realizan correctamente en 30 segundos. El comando `kubectl delete` admite la opción` --grace-period = `que permite al usuario anular el valor predeterminado y especificar su propio valor. El valor `0` [forzar eliminación](/es/docs/concepts/workloads/pods/pod/#force-deletion-of-pods) del Pod. +Debe especificar un indicador adicional `--force` junto con` --grace-period = 0` para realizar eliminaciones forzadas. + +### Forzar destrucción de Pods + +La eliminación forzada de un Pod se define como la eliminación de un Pod del estado del clúster y etcd inmediatamente. Cuando se realiza una eliminación forzada, el apiserver no espera la confirmación del kubelet de que el Pod ha finalizado en el nodo en el que se estaba ejecutando. Elimina el Pod en la API inmediatamente para que se pueda crear un nuevo Pod con el mismo nombre. En el nodo, los Pods que están configurados para terminar de inmediato recibirán un pequeño período de gracia antes de ser forzadas a matar. + +Estas eliminaciones pueden ser potencialmente peligrosas para algunos Pods y deben realizarse con precaución. En el caso de Pods de StatefulSets, consulte la documentación de la tarea para [eliminando Pods de un StatefulSet](/es/docs/tasks/run-application/force-delete-stateful-set-pod/). + +## Modo privilegiado para Pods + +Cualquier contenedor en un Pod puede habilitar el modo privilegiado, utilizando el indicador `privilegiado` en el [contexto de seguridad](/docs/tasks/configure-pod-container/security-context/) de la especificación del contenedor. Esto es útil para contenedores que desean usar capacidades de Linux como manipular la pila de red y acceder a dispositivos. Los procesos dentro del contenedor obtienen casi los mismos privilegios que están disponibles para los procesos fuera de un contenedor. Con el modo privilegiado, debería ser más fácil escribir complementos de red y volumen como Pods separados que no necesitan compilarse en el kubelet. + +{{< note >}} +El {{}} debe admitir el concepto de un contenedor privilegiado para que esta configuración sea relevante. +{{< /note >}} + +## API + +Pod es un recurso de nivel superior en la API REST de Kubernetes. +La definición de [objeto de API Pod](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core) +describe el objeto en detalle. + +{{% /capture %}} diff --git a/content/es/docs/concepts/workloads/pods/podpreset.md b/content/es/docs/concepts/workloads/pods/podpreset.md index 5e38b534b6..87cd6bb83f 100644 --- a/content/es/docs/concepts/workloads/pods/podpreset.md +++ b/content/es/docs/concepts/workloads/pods/podpreset.md @@ -2,19 +2,19 @@ reviewers: - raelga title: Pod Preset -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + Esta página provee una descripción general de los Pod Presets, los cuales son los objetos que se utilizan para inyectar cierta información en los Pods en el momento de la creación. Esta información puede incluir secretos, volúmenes, montajes de volúmenes y variables de entorno. -{{% /capture %}} -{{% capture body %}} + + ## Entendiendo los Pod Presets Un `Pod Preset` es un recurso de la API utilizado para poder inyectar requerimientos @@ -86,10 +86,11 @@ Con el fin de utilizar los Pod Presets en un clúster debe asegurarse de lo sigu 3. Que se han definido los Pod Presets mediante la creación de objetos `PodPreset` en el namespace que se utilizará. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Inyectando datos en un Pod usando PodPreset](/docs/tasks/inject-data-application/podpreset/) -{{% /capture %}} + diff --git a/content/es/docs/contribute/_index.md b/content/es/docs/contribute/_index.md index dc9de6efaa..46157bab39 100644 --- a/content/es/docs/contribute/_index.md +++ b/content/es/docs/contribute/_index.md @@ -1,12 +1,12 @@ --- -content_template: templates/concept +content_type: concept title: Contribuir a la documentación de Kubernetes linktitle: Contribuir main_menu: true weight: 80 --- -{{% capture overview %}} + Kubernetes es posible gracias a la participación de la comunidad y la documentación es vital para facilitar el acceso al proyecto. @@ -22,7 +22,7 @@ aprender sobre nosotros, visite la sección [comunidad de Kubernetes](/community Para obtener información cómo escribir documentación de Kubernetes, consulte la [guía de estilo](/docs/contribute/style/style-guide/). -{{% capture body %}} + ## Tipos de contribuidores @@ -82,4 +82,4 @@ para proporcionar un punto de partida. - Proponer mejoras a los tests de la documentación - Proponer mejoras al sitio web de Kubernetes y otras herramientas -{{% /capture %}} + diff --git a/content/es/docs/reference/_index.md b/content/es/docs/reference/_index.md index ae49836e5f..070cb93765 100644 --- a/content/es/docs/reference/_index.md +++ b/content/es/docs/reference/_index.md @@ -5,16 +5,16 @@ approvers: linkTitle: "Referencia" main_menu: true weight: 70 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Esta sección de la documentación de Kubernetes contiene información de referencia. -{{% /capture %}} -{{% capture body %}} + + ## Información de referencia sobre la API @@ -61,4 +61,4 @@ Un archivo de los documentos de diseño para la funcionalidad de Kubernetes. Puedes empezar por [Arquitectura de Kubernetes](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md) y [Vista general del diseño de Kubernetes](https://git.k8s.io/community/contributors/design-proposals). -{{% /capture %}} + diff --git a/content/es/docs/setup/_index.md b/content/es/docs/setup/_index.md index 0febe853f6..1aed608b20 100644 --- a/content/es/docs/setup/_index.md +++ b/content/es/docs/setup/_index.md @@ -3,10 +3,10 @@ no_issue: true title: Setup main_menu: true weight: 30 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + En esta sección encontrarás toda la información necesaria para poder identificar **la solución que mejor se adapta a tus necesidades**. @@ -23,9 +23,9 @@ desplegar una solución parcialmente automatizada que te ofrezca un poco más de control o directamente crear tu propio clúster de forma completamente manual personalizando y controlando cada componente. -{{% /capture %}} -{{% capture body %}} + + ## Soluciones para la máquina en local @@ -82,4 +82,4 @@ Deberías elegir una solución de este tipo si: Una solución personalizadas proporciona total libertad sobre los clústeres pero requiere más conocimiento y experiencia. -{{% /capture %}} + diff --git a/content/es/docs/setup/release/building-from-source.md b/content/es/docs/setup/release/building-from-source.md index ec0f10a4af..42db05df4c 100644 --- a/content/es/docs/setup/release/building-from-source.md +++ b/content/es/docs/setup/release/building-from-source.md @@ -2,7 +2,7 @@ reviewers: - seomago title: Compilando desde código fuente -content_template: templates/concept +content_type: concept card: name: download weight: 20 @@ -10,13 +10,13 @@ card: --- -{{% capture overview %}} + Se puede o bien crear una release desde el código fuente o bien descargar una versión pre-built. Si no se pretende hacer un desarrollo de Kubernetes en sí mismo, se sugiere usar una version pre-built de la release actual, que se puede encontrar en [Release Notes](/docs/setup/release/notes/). El código fuente de Kubernetes se puede descargar desde el repositorio [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) . -{{% /capture %}} -{{% capture body %}} + + ## Compilar desde código fuente @@ -34,4 +34,4 @@ Para más detalles sobre el proceso de compilación de una release, visita la ca -{{% /capture %}} + diff --git a/content/es/docs/tasks/_index.md b/content/es/docs/tasks/_index.md index d46fd8efda..12d741e263 100644 --- a/content/es/docs/tasks/_index.md +++ b/content/es/docs/tasks/_index.md @@ -2,20 +2,20 @@ title: Tareas main_menu: true weight: 50 -content_template: templates/concept +content_type: concept --- {{< toc >}} -{{% capture overview %}} + Esta sección de la documentación de Kubernetes contiene páginas que muestran cómo acometer tareas individuales. Cada página de tarea muestra cómo realizar una única cosa, típicamente proporcionando una pequeña secuencia de comandos. -{{% /capture %}} -{{% capture body %}} + + ## Interfaz Web de Usuario (Tablero de Control) @@ -77,11 +77,12 @@ COnfigura y planifica GPUs de NVIDIA para hacerlas disponibles como recursos a l Configura y planifica HugePages como un recurso planificado en un clúster. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Si quisieras escribir una página de Tareas, echa un vistazo a [Crear una Petición de Subida de Documentación](/docs/home/contribute/create-pull-request/). -{{% /capture %}} + diff --git a/content/es/docs/tasks/configure-pod-container/configure-volume-storage.md b/content/es/docs/tasks/configure-pod-container/configure-volume-storage.md index 1ddcdaa7e7..c4f08f2969 100644 --- a/content/es/docs/tasks/configure-pod-container/configure-volume-storage.md +++ b/content/es/docs/tasks/configure-pod-container/configure-volume-storage.md @@ -1,24 +1,25 @@ --- title: Configura un Pod para Usar un Volume como Almacenamiento -content_template: templates/task +content_type: task weight: 50 --- -{{% capture overview %}} + En esta página se muestra cómo configurar un Pod para usar un Volume (volumen) como almacenamiento. El sistema de ficheros de un Contenedor existe mientras el Contenedor exista. Por tanto, cuando un Contenedor es destruido o reiniciado, los cambios realizados en el sistema de ficheros se pierden. Para un almacenamiento más consistente que sea independiente del ciclo de vida del Contenedor, puedes usar un [Volume](/docs/concepts/storage/volumes/). Esta característica es especialmente importante para aplicaciones que deben mantener un estado, como motores de almacenamiento clave-valor (por ejemplo Redis) y bases de datos. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Configura un Volume para un Pod @@ -116,9 +117,10 @@ de `Always` (siempre). kubectl delete pod redis ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Revisa [Volume](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#volume-v1-core). @@ -126,6 +128,6 @@ de `Always` (siempre). * Además del almacenamiento local proporcionado por `emptyDir`, Kubernetes soporta diferentes tipos de soluciones de almacenamiento por red, incluyendo los discos gestionados de los diferentes proveedores cloud, como por ejemplo los *Persistent Disks* en Google Cloud Platform o el *Elastic Block Storage* de Amazon Web Services. Este tipo de soluciones para volúmenes son las preferidas para el almacenamiento de datos críticos. Kubernetes se encarga de todos los detalles, tal como montar y desmontar los dispositivos en los nodos del clúster. Revisa [Volumes](/docs/concepts/storage/volumes/) para obtener más información. -{{% /capture %}} + diff --git a/content/es/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md b/content/es/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md index c8fe825df2..b1398e2faa 100644 --- a/content/es/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md +++ b/content/es/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md @@ -2,20 +2,20 @@ reviewers: - raelga title: Pipeline de métricas de recursos -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Desde Kubernetes 1.8, las métricas de uso de recursos, tales como el uso de CPU y memoria del contenedor, están disponibles en Kubernetes a través de la API de métricas. Estas métricas son accedidas directamente por el usuario, por ejemplo usando el comando `kubectl top`, o usadas por un controlador en el cluster, por ejemplo el Horizontal Pod Autoscaler, para la toma de decisiones. -{{% /capture %}} -{{% capture body %}} + + ## La API de Métricas @@ -54,4 +54,4 @@ El servidor de métricas se añadió a la API de Kubernetes utilizando el Puedes aprender más acerca del servidor de métricas en el [documento de diseño](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/metrics-server.md). -{{% /capture %}} + diff --git a/content/es/docs/tasks/run-application/configure-pdb.md b/content/es/docs/tasks/run-application/configure-pdb.md index b4d44ed296..c863eda497 100644 --- a/content/es/docs/tasks/run-application/configure-pdb.md +++ b/content/es/docs/tasks/run-application/configure-pdb.md @@ -1,24 +1,25 @@ --- title: Especificando un presupuesto de disrupción para tu aplicación -content_template: templates/task +content_type: task weight: 110 --- -{{% capture overview %}} + Ésta pagina enseña como limitar el numero de disrupciones concurrentes que afectan a tu aplicación definiendo presupuestos de disrupción de pods, Pod Disruption Budgets (PDB) en inglés. Estos presupuestos definen el mínimo número de pods que deben estar ejecutándose en todo momento para asegurar la disponibilidad de la aplicación durante operaciones de mantenimiento efectuadas sobre los nodos por los administradores del cluster. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * Tener permisos de administrador sobre la aplicación que esta corriendo en Kubernetes y requiere alta disponibilidad * Deberías saber como desplegar [Múltiples réplicas de aplicaciones stateless](/docs/tasks/run-application/run-stateless-application-deployment/) y/o [Múltiples réplicas de aplicaciones stateful](/docs/tasks/run-application/run-replicated-stateful-application/). * Deberías haber leido acerca de [Disrupciones de un Pod](/docs/concepts/workloads/pods/disruptions/). * Deberías confirmar con el propietario del cluster o proveedor de servicio que respetan Presupuestos de Disrupción para Pods. -{{% /capture %}} -{{% capture steps %}} + + ## Protegiendo una aplicación con un PodDisruptionBudget @@ -27,9 +28,9 @@ weight: 110 3. Crea un PDB usando un archivo YAML. 4. Crea el objecto PDB desde el archivo YAML. -{{% /capture %}} -{{% capture discussion %}} + + ## Identifica la applicación que quieres proteger @@ -225,6 +226,6 @@ Puedes utilizar un PDB con pods controlados por otro tipo de controlador, por un Puedes usar un selector que selecciona un subconjunto o superconjunto de los pods que pertenecen a un controlador incorporado. Sin embargo, cuando hay varios PDB en un namespace, debes tener cuidado de no crear PDBs cuyos selectores se superponen. -{{% /capture %}} + diff --git a/content/es/docs/tasks/run-application/run-stateless-application-deployment.md b/content/es/docs/tasks/run-application/run-stateless-application-deployment.md index 6696a0186b..4bbe221adf 100644 --- a/content/es/docs/tasks/run-application/run-stateless-application-deployment.md +++ b/content/es/docs/tasks/run-application/run-stateless-application-deployment.md @@ -1,34 +1,36 @@ --- title: Corre una aplicación stateless usando un Deployment min-kubernetes-server-version: v1.9 -content_template: templates/tutorial +content_type: tutorial weight: 10 --- -{{% capture overview %}} + Ésta página enseña como correr una aplicación stateless usando un `deployment` de Kubernetes. -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Crear un `deployment` de nginx. * Usar kubectl para obtener información acerca del `deployment`. * Actualizar el `deployment`. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Creando y explorando un nginx deployment @@ -141,11 +143,12 @@ Elimina el `deployment` por el nombre: La manera preferida de crear una aplicación con múltiples instancias es usando un Deployment, el cual usa un ReplicaSet. Antes de que Deployment y ReplicaSet fueran introducidos en Kubernetes, aplicaciones con múltiples instancias eran configuradas usando un [ReplicationController](/docs/concepts/workloads/controllers/replicationcontroller/). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Aprende más acerca de [Deployments](/docs/concepts/workloads/controllers/deployment/). -{{% /capture %}} + diff --git a/content/es/docs/tasks/tools/install-kubectl.md b/content/es/docs/tasks/tools/install-kubectl.md index 384116cc3a..8a0791e262 100644 --- a/content/es/docs/tasks/tools/install-kubectl.md +++ b/content/es/docs/tasks/tools/install-kubectl.md @@ -2,7 +2,7 @@ reviewers: - mikedanese title: Instalar y Configurar kubectl -content_template: templates/task +content_type: task weight: 10 card: name: tasks @@ -10,16 +10,17 @@ card: title: Instalar kubectl --- -{{% capture overview %}} + Usa la herramienta de línea de comandos de Kubernetes, [kubectl](/docs/user-guide/kubectl/), para desplegar y gestionar aplicaciones en Kubernetes. Usando kubectl, puedes inspeccionar recursos del clúster; crear, eliminar, y actualizar componentes; explorar tu nuevo clúster; y arrancar aplicaciones de ejemplo. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Debes usar una versión de kubectl que esté a menos de una versión menor de diferencia con tu clúster. Por ejemplo, un cliente v1.2 debería funcionar con un máster v1.1, v1.2, y v1.3. Usar la última versión de kubectl ayuda a evitar problemas inesperados. -{{% /capture %}} -{{% capture steps %}} + + ## Instalar kubectl @@ -421,9 +422,10 @@ compinit {{% /tab %}} {{< /tabs >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [Aprender cómo lanzar y exponer tu aplicación.](/docs/tasks/access-application-cluster/service-access-application-cluster/) -{{% /capture %}} + diff --git a/content/es/docs/tasks/tools/install-minikube.md b/content/es/docs/tasks/tools/install-minikube.md index 7538afa704..e19912e636 100644 --- a/content/es/docs/tasks/tools/install-minikube.md +++ b/content/es/docs/tasks/tools/install-minikube.md @@ -1,28 +1,29 @@ --- title: Instalar Minikube -content_template: templates/task +content_type: task weight: 20 card: name: tasks weight: 10 --- -{{% capture overview %}} + Esta página muestra cómo instalar [Minikube](/docs/tutorials/hello-minikube), una herramienta que despliega un clúster de Kubernetes con un único nodo en una máquina virtual. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + La virtualización VT-x o AMD-v debe estar habilitada en la BIOS de tu ordenador. En Linux, puedes comprobar si la tienes habilitada buscando 'vmx' o 'svm' en el fichero `/proc/cpuinfo`: ```shell egrep --color 'vmx|svm' /proc/cpuinfo ``` -{{% /capture %}} -{{% capture steps %}} + + ## Instalar un Hipervisor @@ -106,13 +107,6 @@ Para instalar Minikube manualmente en Windows, descarga [`minikube-windows-amd64 Para instalar Minikube manualmente en Windows usando [Windows Installer](https://docs.microsoft.com/en-us/windows/desktop/msi/windows-installer-portal), descarga [`minikube-installer.exe`](https://github.com/kubernetes/minikube/releases/latest) y ejecuta el instalador. -{{% /capture %}} - -{{% capture whatsnext %}} - -* [Ejecutar Kubernetes Localmente via Minikube](/docs/setup/minikube/) - -{{% /capture %}} ## Limpiar todo para comenzar de cero @@ -130,3 +124,8 @@ Necesitas eliminar permanentemente los siguientes archivos de configuración: ```shell rm -rf ~/.minikube ``` + +## {{% heading "whatsnext" %}} + + +* [Ejecutar Kubernetes Localmente via Minikube](/docs/setup/minikube/) \ No newline at end of file diff --git a/content/es/docs/tutorials/_index.md b/content/es/docs/tutorials/_index.md index 7fed31f2f6..ebf5de461c 100644 --- a/content/es/docs/tutorials/_index.md +++ b/content/es/docs/tutorials/_index.md @@ -2,10 +2,10 @@ title: Tutoriales main_menu: true weight: 60 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Esta sección de la documentación de Kubernetes contiene tutoriales. @@ -15,9 +15,9 @@ una de ellas contiene un procedimiento. Antes de recorrer cada tutorial, recomendamos añadir un marcador a [Glosario de términos](/docs/reference/glossary/) para poder consultarlo fácilmente. -{{% /capture %}} -{{% capture body %}} + + ## Esenciales @@ -67,10 +67,11 @@ Antes de recorrer cada tutorial, recomendamos añadir un marcador a * [Using Source IP](/docs/tutorials/services/source-ip/) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Si quieres escribir un tutorial, revisa [utilizando templates](/docs/home/contribute/page-templates/) para obtener información sobre el tipo de página y la plantilla de los tutotriales. -{{% /capture %}} + diff --git a/content/es/docs/tutorials/hello-minikube.md b/content/es/docs/tutorials/hello-minikube.md index 144256637b..67d7bf7afa 100644 --- a/content/es/docs/tutorials/hello-minikube.md +++ b/content/es/docs/tutorials/hello-minikube.md @@ -1,6 +1,6 @@ --- title: Hello Minikube -content_template: templates/tutorial +content_type: tutorial weight: 5 menu: main: @@ -13,7 +13,7 @@ card: weight: 10 --- -{{% capture overview %}} + Este tutorial muestra como ejecutar una aplicación Node.js Hola Mundo en Kubernetes utilizando [Minikube](/docs/setup/learning-environment/minikube) y Katacoda. @@ -23,17 +23,19 @@ Katacoda provee un ambiente de Kubernetes desde el navegador. También se puede seguir este tutorial si se ha instalado [Minikube localmente](/docs/tasks/tools/install-minikube/). {{< /note >}} -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Desplegar una aplicación Hola Mundo en Minikube. * Ejecutar la aplicación. * Ver los logs de la aplicación. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Este tutorial provee una imagen de contenedor construida desde los siguientes archivos: @@ -43,9 +45,9 @@ Este tutorial provee una imagen de contenedor construida desde los siguientes ar Para más información sobre el comando `docker build`, lea la [documentación de Docker ](https://docs.docker.com/engine/reference/commandline/build/). -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Crear un clúster Minikube @@ -264,12 +266,13 @@ Opcional, eliminar la máquina virtual de Minikube: minikube delete ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Leer más sobre [Deployments](/docs/concepts/workloads/controllers/deployment/). * Leer más sobre [Desplegando aplicaciones](/docs/tasks/run-application/run-stateless-application-deployment/). * Leer más sobre [Services](/docs/concepts/services-networking/service/). -{{% /capture %}} + diff --git a/content/fr/docs/concepts/_index.md b/content/fr/docs/concepts/_index.md index 60edaf66cf..8819065e27 100644 --- a/content/fr/docs/concepts/_index.md +++ b/content/fr/docs/concepts/_index.md @@ -2,18 +2,18 @@ title: Concepts main_menu: true description: Concepts Kubernetes -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + La section Concepts vous aide à mieux comprendre les composants du système Kubernetes et les abstractions que Kubernetes utilise pour représenter votre cluster. Elle vous aide également à mieux comprendre le fonctionnement de Kubernetes en général. -{{% /capture %}} -{{% capture body %}} + + ## Vue d'ensemble @@ -81,12 +81,13 @@ Le master node Kubernetes contrôle chaque noeud; vous interagirez rarement dire * [Annotations](/docs/concepts/overview/working-with-objects/annotations/) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Si vous souhaitez écrire une page de concept, consultez [Utilisation de modèles de page](/docs/home/contribute/page-templates/) pour plus d'informations sur le type de page pour la documentation d'un concept. -{{% /capture %}} + diff --git a/content/fr/docs/concepts/architecture/cloud-controller.md b/content/fr/docs/concepts/architecture/cloud-controller.md index ca0542a2c3..7fb9f8e588 100644 --- a/content/fr/docs/concepts/architecture/cloud-controller.md +++ b/content/fr/docs/concepts/architecture/cloud-controller.md @@ -1,10 +1,10 @@ --- title: Concepts sous-jacents au Cloud Controller Manager -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Le concept de cloud controller manager (CCM) (ne pas confondre avec le binaire) a été créé à l'origine pour permettre au code de fournisseur spécifique de cloud et au noyau Kubernetes d'évoluer indépendamment les uns des autres. Le gestionnaire de contrôleur de cloud fonctionne aux côtés d'autres composants principaux, tels que le gestionnaire de contrôleur Kubernetes, le serveur d'API et le planificateur. @@ -19,9 +19,9 @@ Voici l'architecture d'un cluster Kubernetes sans le cloud controller manager: ![Pre CCM Kube Arch](/images/docs/pre-ccm-arch.png) -{{% /capture %}} -{{% capture body %}} + + ## Conception @@ -259,4 +259,4 @@ Les fournisseurs de cloud suivants ont implémenté leur CCM: Des instructions complètes pour la configuration et l'exécution du CCM sont fournies [ici](/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager). -{{% /capture %}} + diff --git a/content/fr/docs/concepts/architecture/master-node-communication.md b/content/fr/docs/concepts/architecture/master-node-communication.md index 09790d3e21..23dd1a4b58 100644 --- a/content/fr/docs/concepts/architecture/master-node-communication.md +++ b/content/fr/docs/concepts/architecture/master-node-communication.md @@ -1,18 +1,18 @@ --- title: Communication Master-Node -content_template: templates/concept +content_type: concept description: Communication Master-Node Kubernetes weight: 20 --- -{{% capture overview %}} + Ce document répertorie les canaux de communication entre l'API du noeud maître (apiserver of master node en anglais) et le reste du cluster Kubernetes. L'objectif est de permettre aux utilisateurs de personnaliser leur installation afin de sécuriser la configuration réseau, de sorte que le cluster puisse être exécuté sur un réseau non approuvé (ou sur des adresses IP entièrement publiques d'un fournisseur de cloud). -{{% /capture %}} -{{% capture body %}} + + ## Communication du Cluster vers le Master @@ -72,4 +72,4 @@ Ce tunnel garantit que le trafic n'est pas exposé en dehors du réseau dans leq Les tunnels SSH étant actuellement obsolètes, vous ne devriez pas choisir de les utiliser à moins de savoir ce que vous faites. Un remplacement pour ce canal de communication est en cours de conception. -{{% /capture %}} + diff --git a/content/fr/docs/concepts/architecture/nodes.md b/content/fr/docs/concepts/architecture/nodes.md index 17d5c807d3..fd211a1a35 100644 --- a/content/fr/docs/concepts/architecture/nodes.md +++ b/content/fr/docs/concepts/architecture/nodes.md @@ -3,11 +3,11 @@ reviewers: - sieben title: Noeuds description: Concept Noeud Kubernetes -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Un nœud est une machine de travail dans Kubernetes, connue auparavant sous le nom de `minion`. Un nœud peut être une machine virtuelle ou une machine physique, selon le cluster. @@ -15,9 +15,9 @@ Chaque nœud contient les services nécessaires à l'exécution de [pods](/docs/ Les services sur un nœud incluent le [container runtime](/docs/concepts/overview/components/#node-components), kubelet and kube-proxy. Consultez la section [Le Nœud Kubernetes](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md#the-kubernetes-node) dans le document de conception de l'architecture pour plus de détails. -{{% /capture %}} -{{% capture body %}} + + ## Statut du nœud @@ -229,4 +229,4 @@ Si vous souhaitez réserver explicitement des ressources pour des processus autr L'objet Node est une ressource de niveau supérieur dans l'API REST de Kubernetes. Plus de détails sur l'objet API peuvent être trouvés à l'adresse suivante: [Node API object](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#node-v1-core). -{{% /capture %}} + diff --git a/content/fr/docs/concepts/cluster-administration/certificates.md b/content/fr/docs/concepts/cluster-administration/certificates.md index 6de85718f2..aea92f4967 100644 --- a/content/fr/docs/concepts/cluster-administration/certificates.md +++ b/content/fr/docs/concepts/cluster-administration/certificates.md @@ -1,19 +1,19 @@ --- title: Certificats -content_template: templates/concept +content_type: concept description: Certifications cluster Kubernetes weight: 20 --- -{{% capture overview %}} + Lorsque vous utilisez l'authentification par certificats client, vous pouvez générer des certificats manuellement grâce à `easyrsa`, `openssl` ou `cfssl`. -{{% /capture %}} -{{% capture body %}} + + ### easyrsa @@ -245,4 +245,4 @@ Vous pouvez utiliser l’API `certificates.k8s.io` pour faire créer des Certificats x509 à utiliser pour l'authentification, comme documenté [ici](/docs/tasks/tls/managing-tls-in-a-cluster). -{{% /capture %}} + diff --git a/content/fr/docs/concepts/cluster-administration/cluster-administration-overview.md b/content/fr/docs/concepts/cluster-administration/cluster-administration-overview.md index f0ce6315e8..134a6fb3a0 100644 --- a/content/fr/docs/concepts/cluster-administration/cluster-administration-overview.md +++ b/content/fr/docs/concepts/cluster-administration/cluster-administration-overview.md @@ -1,16 +1,16 @@ --- title: Vue d'ensemble de l'administration d'un cluster -content_template: templates/concept +content_type: concept description: Administration cluster Kubernetes weight: 10 --- -{{% capture overview %}} + La vue d'ensemble de l'administration d'un cluster est destinée à toute personne créant ou administrant un cluster Kubernetes. Il suppose une certaine familiarité avec les [concepts](/docs/concepts/) de Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Planifier le déploiement d'un cluster Voir le guide: [choisir la bonne solution](/fr/docs/setup/pick-right-solution/) pour des exemples de planification, de mise en place et de configuration de clusters Kubernetes. Les solutions répertoriées dans cet article s'appellent des *distributions*. @@ -64,4 +64,4 @@ A noter: Toutes les distributions ne sont pas activement maintenues. Choisissez * [Integration DNS](/docs/concepts/services-networking/dns-pod-service/) décrit comment résoudre un nom DNS directement vers un service Kubernetes. * [Journalisation des évènements et surveillance de l'activité du cluster](/docs/concepts/cluster-administration/logging/) explique le fonctionnement de la journalisation des évènements dans Kubernetes et son implémentation. -{{% /capture %}} + diff --git a/content/fr/docs/concepts/cluster-administration/logging.md b/content/fr/docs/concepts/cluster-administration/logging.md index 18e80dc650..b6384efe47 100644 --- a/content/fr/docs/concepts/cluster-administration/logging.md +++ b/content/fr/docs/concepts/cluster-administration/logging.md @@ -3,11 +3,11 @@ reviewers: - piosz - x13n title: Architecture de Journalisation d'évènements (logging) -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + La journalisation des évènements systèmes et d'applications peut aider à comprendre ce qui se passe dans un cluster. Les journaux sont particulièrement @@ -32,10 +32,10 @@ solution de stockage pour les journaux mais il est possible d'intégrer de nombreuses solutions de journalisation d'évènements dans un cluster Kubernetes. -{{% /capture %}} -{{% capture body %}} + + L'architecture de journalisation des évènements au niveau du cluster est décrite en considérant qu'un backend de journalisation est présent à l'intérieur ou à @@ -355,4 +355,4 @@ Toutefois l'implémentation de ce mécanisme de journalisation est hors du cadre de Kubernetes. -{{% /capture %}} + diff --git a/content/fr/docs/concepts/configuration/secret.md b/content/fr/docs/concepts/configuration/secret.md index 5c7ac8d1b2..79b9e2e533 100644 --- a/content/fr/docs/concepts/configuration/secret.md +++ b/content/fr/docs/concepts/configuration/secret.md @@ -1,6 +1,6 @@ --- title: Secrets -content_template: templates/concept +content_type: concept feature: title: Gestion du secret et de la configuration description: > @@ -9,15 +9,15 @@ weight: 50 --- -{{% capture overview %}} + Les objets `secret` de Kubernetes vous permettent de stocker et de gérer des informations sensibles, telles que les mots de passe, les jetons OAuth et les clés ssh. Mettre ces informations dans un `secret` est plus sûr et plus flexible que de le mettre en dur dans la définition d'un {{< glossary_tooltip term_id="pod" >}} ou dans une {{< glossary_tooltip text="container image" term_id="image" >}}. Voir [Document de conception des secrets](https://git.k8s.io/community/contributors/design-proposals/auth/secrets.md) pour plus d'informations. -{{% /capture %}} -{{% capture body %}} + + ## Présentation des secrets @@ -976,6 +976,7 @@ Vous pouvez activer le [chiffrement au repos](/docs/tasks/administer-cluster/enc * Actuellement, toute personne disposant des droit root sur n'importe quel nœud peut lire _n'importe quel_ secret depuis l'apiserver, en usurpant l'identité du kubelet. Il est prévu de n'envoyer des secrets qu'aux nœuds qui en ont réellement besoin, pour limiter l'impact d'un exploit root sur un seul nœud. -{{% capture whatsnext %}} +## {{% heading "whatsnext" %}} + + -{{% /capture %}} diff --git a/content/fr/docs/concepts/containers/container-environment-variables.md b/content/fr/docs/concepts/containers/container-environment-variables.md index 30767d63c0..547809ffbf 100644 --- a/content/fr/docs/concepts/containers/container-environment-variables.md +++ b/content/fr/docs/concepts/containers/container-environment-variables.md @@ -1,18 +1,18 @@ --- title: Les variables d’environnement du conteneur description: Variables d'environnement pour conteneur Kubernetes -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Cette page décrit les ressources disponibles pour les conteneurs dans l'environnement de conteneur. -{{% /capture %}} -{{% capture body %}} + + ## L'environnement du conteneur @@ -51,12 +51,13 @@ FOO_SERVICE_PORT= Les services ont des adresses IP dédiées et sont disponibles pour le conteneur avec le DNS, si le [module DNS](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/dns/) est activé.  -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * En savoir plus sur [les hooks du cycle de vie d'un conteneur](/docs/concepts/containers/container-lifecycle-hooks/). * Acquérir une expérience pratique [en attachant les handlers aux événements du cycle de vie du conteneur](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). -{{% /capture %}} + diff --git a/content/fr/docs/concepts/containers/container-lifecycle-hooks.md b/content/fr/docs/concepts/containers/container-lifecycle-hooks.md index 65aed32b62..82c1db2ec5 100644 --- a/content/fr/docs/concepts/containers/container-lifecycle-hooks.md +++ b/content/fr/docs/concepts/containers/container-lifecycle-hooks.md @@ -1,20 +1,20 @@ --- reviewers: title: Hooks de cycle de vie de conteneurs -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Cette page décrit comment un conteneur pris en charge par kubelet peut utiliser le framework de Hooks de cycle de vie de conteneurs pour exécuter du code déclenché par des événements durant son cycle de vie. -{{% /capture %}} -{{% capture body %}} + + ## Aperçu @@ -113,12 +113,13 @@ Events: 1m 22s 2 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Warning FailedPostStartHook ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * En savoir plus sur l'[Environnement d'un conteneur](/fr/docs/concepts/containers/container-environment/). * Entraînez-vous à [attacher des handlers de conteneurs à des événements de cycle de vie](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). -{{% /capture %}} + diff --git a/content/fr/docs/concepts/containers/images.md b/content/fr/docs/concepts/containers/images.md index 0e0160dd4b..1e7bbe3e98 100644 --- a/content/fr/docs/concepts/containers/images.md +++ b/content/fr/docs/concepts/containers/images.md @@ -1,20 +1,20 @@ --- title: Images description: Images conteneur Kubernetes -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Vous créez une image Docker et la poussez dans un registre avant de la référencer depuis un pod Kubernetes. La propriété `image` d'un conteneur utilise la même syntaxe que la commande `docker`, y compris pour les registres privés et les tags. -{{% /capture %}} -{{% capture body %}} + + ## Mettre à jour des images @@ -356,4 +356,4 @@ pod - Le *tenant* ajoute ce secret dans les imagePullSecrets de chaque pod. Si vous devez accéder à plusieurs registres, vous pouvez créer un secret pour chaque registre. Kubelet va fusionner tous les `imagePullSecrets` dans un unique `.docker/config.json` virtuel. -{{% /capture %}} + diff --git a/content/fr/docs/concepts/containers/runtime-class.md b/content/fr/docs/concepts/containers/runtime-class.md index 0106abc107..c8429d8507 100644 --- a/content/fr/docs/concepts/containers/runtime-class.md +++ b/content/fr/docs/concepts/containers/runtime-class.md @@ -1,20 +1,20 @@ --- title: Classe d'exécution (Runtime Class) description: Classe d'execution conteneur pour Kubernetes -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.12" state="alpha" >}} Cette page décrit la ressource RuntimeClass et le mécanisme de sélection d'exécution (runtime). -{{% /capture %}} -{{% capture body %}} + + ## Runtime Class @@ -112,4 +112,4 @@ message d'erreur. Si aucun `runtimeClassName` n'est spécifié, le RuntimeHandler par défault sera utilisé, qui équivaut au comportement lorsque la fonctionnalité RuntimeClass est désactivée. -{{% /capture %}} + diff --git a/content/fr/docs/concepts/overview/components.md b/content/fr/docs/concepts/overview/components.md index 7617e94573..8adc32f78e 100644 --- a/content/fr/docs/concepts/overview/components.md +++ b/content/fr/docs/concepts/overview/components.md @@ -1,18 +1,18 @@ --- title: Composants de Kubernetes -content_template: templates/concept +content_type: concept weight: 20 card: name: concepts weight: 20 --- -{{% capture overview %}} + Ce document résume les divers composants binaires requis pour livrer un cluster Kubernetes fonctionnel. -{{% /capture %}} -{{% capture body %}} + + ## Composants Master Les composants Master fournissent le plan de contrôle (control plane) du cluster. @@ -120,9 +120,10 @@ fournit une interface utilisateur pour parcourir ces données. Un mécanisme de [logging au niveau cluster](/docs/concepts/cluster-administration/logging/) est chargé de sauvegarder les logs des conteneurs dans un magasin de logs central avec une interface de recherche/navigation. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * En savoir plus sur les [Nœuds](/fr/docs/concepts/architecture/nodes/) * En savoir plus sur [kube-scheduler](/docs/concepts/scheduling/kube-scheduler/) * Lire la [documentation officielle d'etcd](https://etcd.io/docs/) -{{% /capture %}} + diff --git a/content/fr/docs/concepts/overview/what-is-kubernetes.md b/content/fr/docs/concepts/overview/what-is-kubernetes.md index 1a6fce4f82..e71283aadf 100644 --- a/content/fr/docs/concepts/overview/what-is-kubernetes.md +++ b/content/fr/docs/concepts/overview/what-is-kubernetes.md @@ -1,18 +1,18 @@ --- title: Qu'est-ce-que Kubernetes ? description: Description de Kubernetes -content_template: templates/concept +content_type: concept weight: 10 card: name: concepts weight: 10 --- -{{% capture overview %}} + Cette page est une vue d'ensemble de Kubernetes. -{{% /capture %}} -{{% capture body %}} + + Kubernetes est une plate-forme open-source extensible et portable pour la gestion de charges de travail (workloads) et des services conteneurisés. Elle favorise à la fois l'écriture de configuration déclarative (declarative configuration) et l'automatisation. C'est un large écosystème en rapide expansion. @@ -125,9 +125,10 @@ Résumé des bénéfices des conteneurs : Le nom **Kubernetes** tire son origine du grec ancien, signifiant _capitaine_ ou _pilôte_ et est la racine de _gouverneur_ et [cybernetic](http://www.etymonline.com/index.php?term=cybernetics). _K8s_ est l'abréviation dérivée par le remplacement des 8 lettres "ubernete" par "8". -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Prêt à [commencer](/docs/setup/) ? * Pour plus de détails, voir la [documentation Kubernetes](/docs/home/). -{{% /capture %}} + diff --git a/content/fr/docs/concepts/services-networking/dns-pod-service.md b/content/fr/docs/concepts/services-networking/dns-pod-service.md index 79d2c69ac9..67ee10ea0a 100644 --- a/content/fr/docs/concepts/services-networking/dns-pod-service.md +++ b/content/fr/docs/concepts/services-networking/dns-pod-service.md @@ -1,14 +1,14 @@ --- title: DNS pour les services et les pods description: DNS services pods Kubernetes -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Cette page fournit une vue d'ensemble du support DNS par Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Introduction @@ -221,11 +221,11 @@ search default.svc.cluster.local svc.cluster.local cluster.local options ndots:5 ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Pour obtenir des recommendations sur l’administration des configurations DNS, consultez [Configurer le service DNS](/docs/tasks/administer-cluster/dns-custom-nameservers/) -{{% /capture %}} \ No newline at end of file diff --git a/content/fr/docs/concepts/services-networking/endpoint-slices.md b/content/fr/docs/concepts/services-networking/endpoint-slices.md index b06117cc00..f019b1f0fe 100644 --- a/content/fr/docs/concepts/services-networking/endpoint-slices.md +++ b/content/fr/docs/concepts/services-networking/endpoint-slices.md @@ -6,20 +6,20 @@ feature: description: > Suivi évolutif des réseaux Endpoints dans un cluster Kubernetes. -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.17" state="beta" >}} _EndpointSlices_ offrent une méthode simple pour suivre les Endpoints d'un réseau au sein d'un cluster de Kubernetes. Ils offrent une alternative plus évolutive et extensible aux Endpoints. -{{% /capture %}} -{{% capture body %}} + + ## Resource pour EndpointSlice {#endpointslice-resource} @@ -112,11 +112,11 @@ Puisque tous les Endpoints d'un réseau pour un Service ont été stockés dans Cela a affecté les performances des composants Kubernetes (notamment le plan de contrôle) et a causé une grande quantité de trafic réseau et de traitements lorsque les Endpoints changent. Les EndpointSlices aident à atténuer ces problèmes ainsi qu'à fournir une plate-forme extensible pour des fonctionnalités supplémentaires telles que le routage topologique. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Activer EndpointSlices](/docs/tasks/administer-cluster/enabling-endpointslices) * Lire [Connecter des applications aux Services](/docs/concepts/services-networking/connect-applications-service/) -{{% /capture %}} \ No newline at end of file diff --git a/content/fr/docs/concepts/services-networking/ingress.md b/content/fr/docs/concepts/services-networking/ingress.md index 250cd6f468..8be9ea32bf 100644 --- a/content/fr/docs/concepts/services-networking/ingress.md +++ b/content/fr/docs/concepts/services-networking/ingress.md @@ -5,17 +5,17 @@ reviewers: - rekcah78 - rbenzair title: Ingress -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + Un Ingress est un objet Kubernetes qui gère l'accès externe aux services dans un cluster, généralement du trafic HTTP. Un Ingress peut fournir un équilibrage de charge, une terminaison TLS et un hébergement virtuel basé sur un nom. -{{% /capture %}} -{{% capture body %}} + + ## Terminologie @@ -431,8 +431,9 @@ Vous pouvez exposer un service de plusieurs manières sans impliquer directement * Utilisez [Service.Type=NodePort](/docs/concepts/services-networking/service/#nodeport) * Utilisez un [Proxy du port](https://git.k8s.io/contrib/for-demos/proxy-to-service) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Configurer Ingress sur Minikube avec le contrôleur NGINX](/docs/tasks/access-application-cluster/ingress-minikube) -{{% /capture %}} + diff --git a/content/fr/docs/concepts/services-networking/service.md b/content/fr/docs/concepts/services-networking/service.md index 12b6453a6f..3360c48428 100644 --- a/content/fr/docs/concepts/services-networking/service.md +++ b/content/fr/docs/concepts/services-networking/service.md @@ -6,21 +6,21 @@ feature: Pas besoin de modifier votre application pour utiliser un mécanisme de découverte de services inconnu. Kubernetes donne aux pods leurs propres adresses IP et un nom DNS unique pour un ensemble de pods, et peut équilibrer la charge entre eux. -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + {{< glossary_definition term_id="service" length="short" >}} Avec Kubernetes, vous n'avez pas besoin de modifier votre application pour utiliser un mécanisme de découverte de services inconnu. Kubernetes donne aux pods leurs propres adresses IP et un nom DNS unique pour un ensemble de pods, et peut équilibrer la charge entre eux. -{{% /capture %}} -{{% capture body %}} + + ## Motivation @@ -995,12 +995,13 @@ Le projet Kubernetes vise à améliorer la prise en charge des services L7 (HTTP Le projet Kubernetes prévoit d'avoir des modes d'entrée plus flexibles pour les services, qui englobent les modes ClusterIP, NodePort et LoadBalancer actuels et plus encore. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Voir [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/) * Voir [Ingress](/docs/concepts/services-networking/ingress/) * Voir [Endpoint Slices](/docs/concepts/services-networking/endpoint-slices/) -{{% /capture %}} + diff --git a/content/fr/docs/concepts/storage/persistent-volumes.md b/content/fr/docs/concepts/storage/persistent-volumes.md index f8644b82a1..e1a1701fcb 100644 --- a/content/fr/docs/concepts/storage/persistent-volumes.md +++ b/content/fr/docs/concepts/storage/persistent-volumes.md @@ -5,18 +5,18 @@ feature: description: > Montez automatiquement le système de stockage de votre choix, que ce soit à partir du stockage local, d'un fournisseur de cloud public tel que GCP ou AWS, ou un système de stockage réseau tel que NFS, iSCSI, Gluster, Ceph, Cinder ou Flocker. -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Ce document décrit l'état actuel de `PersistentVolumes` dans Kubernetes. Une connaissance des [volumes](/fr/docs/concepts/storage/volumes/) est suggérée. -{{% /capture %}} -{{% capture body %}} + + ## Introduction @@ -758,4 +758,4 @@ Si vous écrivez des templates de configuration ou des exemples qui s'exécutent De nombreux environnements de cluster ont une `StorageClass` par défaut installée, où les administrateurs peuvent créer leur propre `StorageClass` par défaut. * Dans votre outillage, surveillez les PVCs qui ne sont pas liés après un certain temps et signalez-le à l'utilisateur, car cela peut indiquer que le cluster n'a pas de support de stockage dynamique (auquel cas l'utilisateur doit créer un PV correspondant) ou que le cluster n'a aucun système de stockage (auquel cas l'utilisateur ne peut pas déployer de configuration nécessitant des PVCs). -{{% /capture %}} + diff --git a/content/fr/docs/concepts/storage/volumes.md b/content/fr/docs/concepts/storage/volumes.md index 51f1c99dfd..1038e77ced 100644 --- a/content/fr/docs/concepts/storage/volumes.md +++ b/content/fr/docs/concepts/storage/volumes.md @@ -1,10 +1,10 @@ --- title: Volumes -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Les fichiers sur disque dans un conteneur sont éphémères, ce qui présente des problèmes pour des applications non-triviales lorsqu'elles s'exécutent dans des conteneurs. Premièrement, lorsqu'un @@ -15,9 +15,9 @@ il est souvent nécessaire de partager des fichiers entre ces conteneurs. L'abst Une connaissance des [Pods](/fr/docs/concepts/workloads/pods/pod) est suggérée. -{{% /capture %}} -{{% capture body %}} + + ## Contexte @@ -1245,6 +1245,7 @@ sudo systemctl restart docker -{{% capture whatsnext %}} +## {{% heading "whatsnext" %}} + * Suivez un exemple de [déploiement de WordPress et MySQL avec des volumes persistants](/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/). -{{% /capture %}} + diff --git a/content/fr/docs/concepts/workloads/controllers/deployment.md b/content/fr/docs/concepts/workloads/controllers/deployment.md index 4e6fb3bda5..e8034cc9ad 100644 --- a/content/fr/docs/concepts/workloads/controllers/deployment.md +++ b/content/fr/docs/concepts/workloads/controllers/deployment.md @@ -7,11 +7,11 @@ feature: En cas de problème, Kubernetes annulera le changement pour vous. Profitez d'un écosystème croissant de solutions de déploiement. -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Un _Deployment_ (déploiement en français) fournit des mises à jour déclaratives pour [Pods](/fr/docs/concepts/workloads/pods/pod/) et [ReplicaSets](/fr/docs/concepts/workloads/controllers/replicaset/). @@ -23,9 +23,9 @@ Ne gérez pas les ReplicaSets appartenant à un Deployment. Pensez à ouvrir un ticket dans le dépot Kubernetes principal si votre cas d'utilisation n'est pas traité ci-dessous. {{< /note >}} -{{% /capture %}} -{{% capture body %}} + + ## Cas d'utilisation @@ -1222,4 +1222,4 @@ Un déploiement n'est pas suspendu par défaut lors de sa création. [`kubectl rolling-update`](/docs/reference/generated/kubectl/kubectl-commands#rolling-update) met à jour les pods et les ReplicationControllers de la même manière. Mais les déploiements sont recommandés, car ils sont déclaratifs, côté serveur et ont des fonctionnalités supplémentaires, telles que la restauration de toute révision précédente même après la mise à jour progressive.. -{{% /capture %}} + diff --git a/content/fr/docs/concepts/workloads/controllers/replicaset.md b/content/fr/docs/concepts/workloads/controllers/replicaset.md index 24c7676017..81ccb6e7b3 100644 --- a/content/fr/docs/concepts/workloads/controllers/replicaset.md +++ b/content/fr/docs/concepts/workloads/controllers/replicaset.md @@ -1,17 +1,17 @@ --- title: ReplicaSet -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Un ReplicaSet (ensemble de réplicas en français) a pour but de maintenir un ensemble stable de Pods à un moment donné. Cet objet est souvent utilisé pour garantir la disponibilité d'un certain nombre identique de Pods. -{{% /capture %}} -{{% capture body %}} + + ## Comment un ReplicaSet fonctionne @@ -342,4 +342,4 @@ Les deux servent le même objectif et se comportent de la même manière, à la les exigences de sélecteur décrites dans le [labels user guide](/docs/concepts/overview/working-with-objects/labels/#label-selectors). En tant que tels, les ReplicaSets sont préférés aux ReplicationControllers. -{{% /capture %}} + diff --git a/content/fr/docs/concepts/workloads/pods/init-containers.md b/content/fr/docs/concepts/workloads/pods/init-containers.md index c2ac521df4..fb4b6f3270 100644 --- a/content/fr/docs/concepts/workloads/pods/init-containers.md +++ b/content/fr/docs/concepts/workloads/pods/init-containers.md @@ -1,17 +1,17 @@ --- title: Init Containers -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + Cette page fournit une vue d'ensemble des _conteneurs d'initialisation_ (init containers) : des conteneurs spécialisés qui s'exécutent avant les conteneurs d'application dans un {{< glossary_tooltip text="Pod" term_id="pod" >}}. Les init containers peuvent contenir des utilitaires ou des scripts d'installation qui ne sont pas présents dans une image d'application. Vous pouvez spécifier des init containers dans la spécification du Pod à côté du tableau `containers` (qui décrit les conteneurs d'application) -{{% /capture %}} -{{% capture body %}} + + ## Comprendre les init containers @@ -318,12 +318,13 @@ redémarrage du conteneur d'application. * Le conteneur d'infrastructure Pod est redémarré. Ceci est peu commun et serait effectué par une personne ayant un accès root aux nœuds. * Tous les conteneurs dans un Pod sont terminés tandis que `restartPolicy` est configurée à "Always", ce qui force le redémarrage, et l'enregistrement de complétion du init container a été perdu à cause d'une opération de garbage collection (récupération de mémoire). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Lire à propos de la [création d'un Pod ayant un init container](/docs/tasks/configure-pod-container/configure-pod-initialization/#creating-a-pod-that-has-an-init-container) * Apprendre à [debugger les init containers](/docs/tasks/debug-application-cluster/debug-init-containers/) -{{% /capture %}} + diff --git a/content/fr/docs/concepts/workloads/pods/pod-lifecycle.md b/content/fr/docs/concepts/workloads/pods/pod-lifecycle.md index d570b13bba..9a6f96d36a 100644 --- a/content/fr/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/fr/docs/concepts/workloads/pods/pod-lifecycle.md @@ -1,17 +1,17 @@ --- title: Cycle de vie d'un Pod -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Cette page décrit le cycle de vie d'un Pod. -{{% /capture %}} -{{% capture body %}} + + ## Phase du Pod @@ -381,10 +381,11 @@ spec: * Le contrôleur de Nœud passe la `phase` du Pod à Failed. * Si le Pod s'exécute sous un contrôleur, le Pod est recréé ailleurs. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Apprenez par la pratique [attacher des handlers à des événements de cycle de vie d'un conteneur](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). @@ -394,7 +395,7 @@ spec: * En apprendre plus sur les [hooks de cycle de vie d'un Conteneur](/docs/concepts/containers/container-lifecycle-hooks/). -{{% /capture %}} + diff --git a/content/fr/docs/concepts/workloads/pods/pod-overview.md b/content/fr/docs/concepts/workloads/pods/pod-overview.md index 385ce5ab86..b1803ba5e0 100644 --- a/content/fr/docs/concepts/workloads/pods/pod-overview.md +++ b/content/fr/docs/concepts/workloads/pods/pod-overview.md @@ -1,18 +1,18 @@ --- title: Aperçu du Pod description: Pod Concept Kubernetes -content_template: templates/concept +content_type: concept weight: 10 card: name: concepts weight: 60 --- -{{% capture overview %}} + Cette page fournit un aperçu du `Pod`, l'objet déployable le plus petit dans le modèle d'objets Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Comprendre les Pods @@ -98,11 +98,12 @@ spec: ``` Plutôt que de spécifier tous les états désirés courants de tous les réplicas, les templates de pod sont comme des emporte-pièces. Une fois qu'une pièce a été coupée, la pièce n'a plus de relation avec l'outil. Il n'y a pas de lien qui persiste dans le temps entre le template et le pod. Un changement à venir dans le template ou même le changement pour un nouveau template n'a pas d'effet direct sur les pods déjà créés. De manière similaire, les pods créés par un replication controller peuvent par la suite être modifiés directement. C'est en contraste délibéré avec les pods, qui spécifient l'état désiré courant de tous les conteneurs appartenant au pod. Cette approche simplifie radicalement la sémantique système et augmente la flexibilité de la primitive. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * En savoir plus sur les [Pods](/docs/concepts/workloads/pods/pod/) * En savoir plus sur le comportement des Pods : * [Terminaison d'un Pod](/docs/concepts/workloads/pods/pod/#termination-of-pods) * [Cycle de vie d'un Pod](/docs/concepts/workloads/pods/pod-lifecycle/) -{{% /capture %}} + diff --git a/content/fr/docs/concepts/workloads/pods/pod.md b/content/fr/docs/concepts/workloads/pods/pod.md index b4a6f66bc7..4d685cca80 100644 --- a/content/fr/docs/concepts/workloads/pods/pod.md +++ b/content/fr/docs/concepts/workloads/pods/pod.md @@ -1,19 +1,19 @@ --- reviewers: title: Pods -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Les _Pods_ sont les plus petites unités informatiques déployables qui peuvent être créées et gérées dans Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Qu'est-ce qu'un pod ? @@ -196,4 +196,4 @@ spec.containers[0].securityContext.privileged: forbidden '<*>(0xc20b222db0)true' Le Pod est une ressource au plus haut niveau dans l'API REST Kubernetes. Plus de détails sur l'objet de l'API peuvent être trouvés à : [Objet de l'API Pod](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core). -{{% /capture %}} + diff --git a/content/fr/docs/contribute/_index.md b/content/fr/docs/contribute/_index.md index 60170f8396..821136d52d 100644 --- a/content/fr/docs/contribute/_index.md +++ b/content/fr/docs/contribute/_index.md @@ -1,5 +1,5 @@ --- -content_template: templates/concept +content_type: concept title: Contribuer à la documentation Kubernetes description: Contribution documentation Kubernetes linktitle: Contribuer @@ -7,7 +7,7 @@ main_menu: true weight: 80 --- -{{% capture overview %}} + Si vous souhaitez contribuer à la documentation ou au site Web de Kubernetes, nous serons ravis de vous aider! Tout le monde peut contribuer, que vous soyez nouveau dans le projet ou que vous y travailliez depuis longtemps, et que vous vous identifiez vous-même en tant que développeur, utilisateur final ou quelqu'un qui ne supporte tout simplement pas les fautes de frappe. @@ -15,7 +15,7 @@ Tout le monde peut contribuer, que vous soyez nouveau dans le projet ou que vous Pour vous impliquer de plusieurs façons dans la communauté Kubernetes ou d’en savoir plus sur nous, visitez le [Site de la communauté Kubernetes](/community/). Pour plus d'informations sur le guide de style de la documentation Kubernetes, reportez-vous à la section [style guide](/docs/contribute/style/style-guide/). -{{% capture body %}} + ## Types de contributeurs @@ -59,4 +59,4 @@ Il ne s'agit pas d'une liste exhaustive des manières dont vous pouvez contribue - Proposer des améliorations aux tests de documentation - Proposer des améliorations au site Web de Kubernetes ou à d'autres outils -{{% /capture %}} + diff --git a/content/fr/docs/contribute/advanced.md b/content/fr/docs/contribute/advanced.md index 7cf7ad7cba..1634ffd2bb 100644 --- a/content/fr/docs/contribute/advanced.md +++ b/content/fr/docs/contribute/advanced.md @@ -1,18 +1,18 @@ --- title: Contributions avancées slug: advanced -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Cette page suppose que vous avez lu et maîtrisé les sujets suivants : [Commencez à contribuer](/docs/contribute/start/) et [Contribution Intermédiaire](/docs/contribute/intermediate/) et êtes prêts à apprendre plus de façons de contribuer. Vous devez utiliser Git et d'autres outils pour certaines de ces tâches. -{{% /capture %}} -{{% capture body %}} + + ## Soyez le trieur de PR pendant une semaine @@ -91,4 +91,4 @@ Les nouveaux contributeurs docs peuvent demander des sponsors dans le canal #sig Si vous vous sentez confiant dans le travail des candidats, vous vous portez volontaire pour les parrainer. Lorsqu’ils soumettent leur demande d’adhésion, répondez-y avec un "+1" et indiquez les raisons pour lesquelles vous estimez que les demandeurs sont des candidat(e)s valables pour devenir membre de l’organisation Kubernetes. -{{% /capture %}} + diff --git a/content/fr/docs/contribute/generate-ref-docs/federation-api.md b/content/fr/docs/contribute/generate-ref-docs/federation-api.md index c0a792bfda..aa25853d64 100644 --- a/content/fr/docs/contribute/generate-ref-docs/federation-api.md +++ b/content/fr/docs/contribute/generate-ref-docs/federation-api.md @@ -1,16 +1,17 @@ --- title: Génération de la documentation de référence pour l'API de fédération Kubernetes description: Federation Référence API Kubernetes Documentation -content_template: templates/task +content_type: task --- -{{% capture overview %}} + Cette page montre comment générer automatiquement des pages de référence pour l'API de fédération Kubernetes. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * Vous devez avoir [Git](https://git-scm.com/book/fr/v2/D%C3%A9marrage-rapide-Installation-de-Git) installé. @@ -22,9 +23,9 @@ Cette page montre comment générer automatiquement des pages de référence pou Généralement, cela implique la création d'un fork du dépôt. Pour plus d'informations, voir [Création d'une pull request de documentation](/docs/home/contribute/create-pull-request/). -{{% /capture %}} -{{% capture steps %}} + + ## Exécution du script update-federation-api-docs.sh @@ -64,12 +65,13 @@ Ces fichiers sont publiés à [kubernetes.io/docs/reference](/docs/reference/): * [Federation API extensions/v1beta1 Operations](/docs/reference/federation/extensions/v1beta1/operations/) * [Federation API extensions/v1beta1 Definitions](/docs/reference/federation/extensions/v1beta1/definitions/) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Génération de documentation de référence pour l'API Kubernetes](/docs/home/contribute/generated-reference/kubernetes-api/) * [Génération de documentation de référence pour les commandes kubectl](/docs/home/contribute/generated-reference/kubectl/) * [Génération de pages de référence pour les composants et les outils Kubernetes](/docs/home/contribute/generated-reference/kubernetes-components/) -{{% /capture %}} + diff --git a/content/fr/docs/contribute/generate-ref-docs/kubernetes-api.md b/content/fr/docs/contribute/generate-ref-docs/kubernetes-api.md index edf56ff826..9e00fb57b0 100644 --- a/content/fr/docs/contribute/generate-ref-docs/kubernetes-api.md +++ b/content/fr/docs/contribute/generate-ref-docs/kubernetes-api.md @@ -1,16 +1,17 @@ --- title: Génération de documentation de référence pour l'API Kubernetes description: Génération documentation référence API Kubernetes -content_template: templates/task +content_type: task --- -{{% capture overview %}} + Cette page montre comment mettre à jour les documents de référence générés automatiquement pour l'API Kubernetes. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Vous devez avoir ces outils installés: @@ -25,9 +26,9 @@ Vous devez savoir comment créer une pull request dans un dépôt GitHub. Généralement, cela implique la création d'un fork du dépôt. Pour plus d'informations, voir [Créer une Pull Request de documentation](/docs/home/contribute/create-pull-request/) et [GitHub Standard Fork & Pull Request Workflow](https://gist.github.com/Chaser324/ce0505fbed06b947d962). -{{% /capture %}} -{{% capture steps %}} + + ## Généralités @@ -327,12 +328,13 @@ Continuez à surveiller votre pull request jusqu'à ce qu'elle ait été mergée Quelques minutes après que votre pull request soit fusionnée, vos modifications seront visibles dans la [documentation de référence publiée](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Génération de documents de référence pour les composants et les outils Kubernetes](/docs/home/contribute/generated-reference/kubernetes-components/) * [Génération de documentation de référence pour les commandes kubectl](/docs/home/contribute/generated-reference/kubectl/) * [Génération de documentation de référence pour l'API de fédération Kubernetes](/docs/home/contribute/generated-reference/federation-api/) -{{% /capture %}} + diff --git a/content/fr/docs/contribute/generate-ref-docs/kubernetes-components.md b/content/fr/docs/contribute/generate-ref-docs/kubernetes-components.md index 7bdf6fadd4..e473789e46 100644 --- a/content/fr/docs/contribute/generate-ref-docs/kubernetes-components.md +++ b/content/fr/docs/contribute/generate-ref-docs/kubernetes-components.md @@ -1,15 +1,16 @@ --- title: Génération de pages de référence pour les composants et les outils Kubernetes -content_template: templates/task +content_type: task --- -{{% capture overview %}} + Cette page montre comment utiliser l'outil `update-importer-docs` pour générer une documentation de référence pour les outils et les composants des dépôts [Kubernetes](https://github.com/kubernetes/kubernetes) et [Federation](https://github.com/kubernetes/federation). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * Vous avez besoin d'une machine qui exécute Linux ou macOS. @@ -29,9 +30,9 @@ Cette page montre comment utiliser l'outil `update-importer-docs` pour générer Cela implique généralement la création d’un fork d'un dépôt. Pour plus d'informations, consultez [Créer une Pull Request de documentation](/docs/home/contribute/create-pull-request/). -{{% /capture %}} -{{% capture steps %}} + + ## Obtenir deux dépôts @@ -193,12 +194,13 @@ Consultez votre pull request et répondez aux corrections suggérées par les r Quelques minutes après le merge votre pull request, vos références mises à jour seront visibles dans la [documentation publiée](/docs/home/). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Génération de documentation de référence pour les commandes kubectl](/docs/home/contribute/generated-reference/kubectl/) * [Génération de documentation de référence pour l'API Kubernetes](/fr/docs/contribute/generate-ref-docs/kubernetes-api/) * [Génération de documentation de référence pour l'API de fédération Kubernetes](/docs/home/contribute/generated-reference/federation-api/) -{{% /capture %}} + diff --git a/content/fr/docs/contribute/localization.md b/content/fr/docs/contribute/localization.md index f07082672f..91667afdd2 100644 --- a/content/fr/docs/contribute/localization.md +++ b/content/fr/docs/contribute/localization.md @@ -1,20 +1,20 @@ --- title: Traduction de la documentation Kubernetes -content_template: templates/concept +content_type: concept card: name: contribute weight: 30 title: Translating the docs --- -{{% capture overview %}} + La documentation de Kubernetes est disponible dans plusieurs langues. Nous vous encourageons à ajouter de nouvelles [traductions](https://blog.mozilla.org/l10n/2011/12/14/i18n-vs-l10n-whats-the-diff/)! -{{% /capture %}} -{{% capture body %}} + + ## Commencer @@ -221,13 +221,14 @@ Pour plus d'informations sur le travail à partir de forks ou directement à par SIG Docs souhaite la bienvenue aux [contributions et corrections upstream](/docs/contribute/intermediate#localize-content) à la source anglaise. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Une fois qu'une traduction répond aux exigences de logistique et à une couverture admissible, le SIG docs se chargera des taches suivantes: - Activer la sélection de la langue sur le site Web - Publier la disponibilité de la traduction via les canaux de la [Cloud Native Computing Foundation](https://www.cncf.io/), y compris sur le blog de [Kubernetes](https://kubernetes.io/blog/). -{{% /capture %}} + diff --git a/content/fr/docs/contribute/participating.md b/content/fr/docs/contribute/participating.md index 199f2d5f33..34015b19b4 100644 --- a/content/fr/docs/contribute/participating.md +++ b/content/fr/docs/contribute/participating.md @@ -1,12 +1,12 @@ --- title: Participez au SIG Docs -content_template: templates/concept +content_type: concept card: name: contribute weight: 40 --- -{{% capture overview %}} + SIG Docs est l'un des [groupes d'intérêts spéciaux](https://github.com/kubernetes/community/blob/master/sig-list.md) au sein du projet Kubernetes, axé sur la rédaction, la mise à jour et la maintenance de la documentation de Kubernetes dans son ensemble. Pour plus d'informations sur le SIG consultez [le dépôt GitHub de la communauté](https://github.com/kubernetes/community/tree/master/sig-docs). @@ -19,9 +19,9 @@ Ces rôles nécessitent un plus grand accès et impliquent certaines responsabil Voir [appartenance à la communauté](https://github.com/kubernetes/community/blob/master/community-membership.md) pour plus d'informations sur le fonctionnement de l'adhésion au sein de la communauté Kubernetes. Le reste de ce document décrit certaines fonctions uniques de ces rôles au sein du SIG Docs, responsable de la gestion de l’un des aspects les plus accessibles du public de Kubernetes: le site Web et la documentation de Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Rôles et responsabilités @@ -194,13 +194,14 @@ En outre, un fichier Markdown individuel peut répertorier les relecteurs et les La combinaison des fichiers `OWNERS` et des entêtes dans les fichiers Markdown determinent les suggestions automatiques de relecteurs dans la PullRequest. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Pour plus d'informations sur la contribution à la documentation Kubernetes, voir: - [Commencez à contribuer](/docs/contribute/start/) - [Documentation style](/docs/contribute/style/) -{{% /capture %}} + diff --git a/content/fr/docs/contribute/start.md b/content/fr/docs/contribute/start.md index 39eee2a39d..7c13bbf506 100644 --- a/content/fr/docs/contribute/start.md +++ b/content/fr/docs/contribute/start.md @@ -2,14 +2,14 @@ title: Commencez à contribuer description: Démarrage contribution Kubernetes slug: start -content_template: templates/concept +content_type: concept weight: 10 card: name: contribute weight: 10 --- -{{% capture overview %}} + Si vous souhaitez commencer à contribuer à la documentation de Kubernetes, cette page et les rubriques associées peuvent vous aider à démarrer. Vous n'avez pas besoin d'être un développeur ou un rédacteur technique pour avoir un impact important sur la documentation et l'expérience utilisateur de Kubernetes ! @@ -17,9 +17,9 @@ Tout ce dont vous avez besoin pour les sujets de cette page est un compte [GitHu Si vous recherchez des informations sur la façon de commencer à contribuer aux référentiels de code Kubernetes, reportez-vous à la section sur [les directives de la communauté Kubernetes](https://github.com/kubernetes/community/blob/master/governance.md). -{{% /capture %}} -{{% capture body %}} + + ## Les bases de notre documentation @@ -282,10 +282,11 @@ Elles sont écrites en collaboration avec l'équipe marketing de Kubernetes, qui Regardez la source des [études de cas existantes](https://github.com/kubernetes/website/tree/master/content/en/case-studies). Utilisez le [Formulaire de soumission d'étude de cas Kubernetes](https://www.cncf.io/people/end-user-community/) pour soumettre votre proposition. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Si vous êtes à l'aise avec toutes les tâches décrites dans cette rubrique et que vous souhaitez vous engager plus profondément dans l'équipe de documentation de Kubernetes, lisez le [guide de contribution de la documentation intermédiaire](/docs/contribute/intermediate/). -{{% /capture %}} + diff --git a/content/fr/docs/contribute/style/content-organization.md b/content/fr/docs/contribute/style/content-organization.md index fd0efdad07..057108ac00 100644 --- a/content/fr/docs/contribute/style/content-organization.md +++ b/content/fr/docs/contribute/style/content-organization.md @@ -1,18 +1,18 @@ --- title: Organisation du contenu -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + Ce site utilise Hugo. Dans Hugo, l'[organisation du contenu](https://gohugo.io/content-management/organization/) est un concept de base. -{{% /capture %}} -{{% capture body %}} + + {{% note %}} **Astuce Hugo:** Démarrez Hugo avec `hugo server --navigateToChanged` pour les sessions d'édition de contenu. @@ -134,11 +134,12 @@ Quelques notes importantes sur les fichiers dans les paquets : La source `SASS` des feuilles de style pour ce site est stockée sous `src/sass` et peut être construite avec `make sass` (notez que Hugo aura bientôt le support `SASS`, voir . -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Hugo shortcodes personnalisés](/docs/contribute/style/hugo-shortcodes/) * [Style guide](/docs/contribute/style/style-guide) -{{% /capture %}} + diff --git a/content/fr/docs/contribute/style/hugo-shortcodes/index.md b/content/fr/docs/contribute/style/hugo-shortcodes/index.md index 359066ba8a..0de8705e69 100644 --- a/content/fr/docs/contribute/style/hugo-shortcodes/index.md +++ b/content/fr/docs/contribute/style/hugo-shortcodes/index.md @@ -1,16 +1,16 @@ --- title: Hugo Shortcodes personnalisés -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Cette page explique les shortcodes Hugo personnalisés pouvant être utilisés dans la documentation de Kubernetes Markdown. En savoir plus sur shortcodes dans la [documentation Hugo](https://gohugo.io/content-management/shortcodes). -{{% /capture %}} -{{% capture body %}} + + ## Etat de la fonctionnalité @@ -208,9 +208,10 @@ Rend à: {{< tab name="JSON File" include="podtemplate" />}} {{< /tabs >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * En savoir plus sur [Hugo](https://gohugo.io/). * En savoir plus sur [écrire un nouveau sujet](/docs/home/contribute/write-new-topic/). @@ -218,4 +219,4 @@ Rend à: * En savoir plus sur [staging your changes](/docs/home/contribute/stage-documentation-changes/) * En savoir plus sur [créer une pull request](/docs/home/contribute/create-pull-request/). -{{% /capture %}} + diff --git a/content/fr/docs/contribute/style/page-templates.md b/content/fr/docs/contribute/style/page-templates.md index 17cfdec6d1..23625c7fdc 100644 --- a/content/fr/docs/contribute/style/page-templates.md +++ b/content/fr/docs/contribute/style/page-templates.md @@ -1,13 +1,13 @@ --- title: Utilisation des modèles de page -content_template: templates/concept +content_type: concept weight: 30 card: name: contribute weight: 30 --- -{{% capture overview %}} + Lorsque vous ajoutez de nouveaux sujets, appliquez-leur l'un des templates suivants. Ceci standardise l'expérience utilisateur d'une page donnée. @@ -19,9 +19,9 @@ Chaque nouveau sujet doit utiliser un modèle. Si vous n'êtes pas sûr du modèle à utiliser pour un nouveau sujet, commencez par un [template concept](#concept-template). {{< /note >}} -{{% /capture %}} -{{% capture body %}} + + ## Concept template @@ -31,7 +31,7 @@ Généralement, les pages conceptuelles n'incluent pas de séquences d'étapes, Pour écrire une nouvelle page concept, créez un fichier Markdown dans un sous-répertoire du répertoire `/content/fr/docs/concepts`, avec les caractéristiques suivantes : -- Dans l'entête YAML de la page, définissez `content_template: templates/concept`. +- Dans l'entête YAML de la page, définissez `content_type: concept`. - Dans le corps de la page, définissez les variables `capture` requises et les variables optionnelles que vous voulez inclure : | Variable | Required? | @@ -72,7 +72,7 @@ Les pages de tâches ont une explication minimale, mais fournissent souvent des Pour écrire une nouvelle page de tâches, créez un fichier Markdown dans un sous-répertoire du répertoire `/content/fr/docs/tasks`, avec les caractéristiques suivantes : -- Dans l'entête YAML de la page, définissez `content_template: templates/task`. +- Dans l'entête YAML de la page, définissez `content_type: task`. - Dans le corps de la page, définissez les variables `capture` requises et les variables optionnelles que vous voulez inclure : | Variable | Required? | @@ -132,7 +132,7 @@ Les didacticiels peuvent inclure des explications au niveau de la surface, mais Pour écrire une nouvelle page de tutoriel, créez un fichier Markdown dans un sous-répertoire du répertoire `/content/fr/docs/tutorials`, avec les caractéristiques suivantes : -- Dans l'entête YAML de la page, définissez `content_template: templates/tutorial`. +- Dans l'entête YAML de la page, définissez `content_type: tutorial`. - Dans le corps de la page, définissez les variables `capture` requises et les variables optionnelles que vous voulez inclure : | Variable | Required? | @@ -187,11 +187,12 @@ Pour écrire une nouvelle page de tutoriel, créez un fichier Markdown dans un s Voici un exemple de sujet publié qui utilise le modèle de tutoriel [Running a Stateless Application Using a Deployment](/docs/tutorials/stateless-application/run-stateless-application-deployment/). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - En savoir plus sur le [style guide](/docs/contribute/style/style-guide/) - En savoir plus sur l'[organisation des contenus](/docs/contribute/style/content-organization/) -{{% /capture %}} + diff --git a/content/fr/docs/contribute/style/style-guide.md b/content/fr/docs/contribute/style/style-guide.md index 27b530d0b6..6e282615d0 100644 --- a/content/fr/docs/contribute/style/style-guide.md +++ b/content/fr/docs/contribute/style/style-guide.md @@ -1,7 +1,7 @@ --- title: Documentation Style Guide linktitle: Style guide -content_template: templates/concept +content_type: concept weight: 10 card: name: contribute @@ -9,15 +9,15 @@ card: title: Documentation Style Guide --- -{{% capture overview %}} + Cette page donne des directives de style d'écriture pour la documentation de Kubernetes. Ce sont des lignes directrices, pas des règles. Faites preuve de discernement et n'hésitez pas à proposer des modifications à ce document dans le cadre d'une pull request. Pour plus d'informations sur la création de nouveau contenu pour les documents Kubernetes, suivez les instructions sur[l'utilisation des templates](/fr/docs/contribute/style/page-templates/) et [création d'une pull request de documentation](/fr/docs/contribute/start/#improve-existing-content). -{{% /capture %}} -{{% capture body %}} + + {{< note >}} La documentation de Kubernetes utilise [Blackfriday Markdown Renderer](https://github.com/russross/blackfriday) ainsi que quelques [Hugo Shortcodes](/docs/home/contribute/includes/) pour prendre en charge les entrées de glossaire, les onglets et la représentation de l'état des fonctionnalités. @@ -403,13 +403,14 @@ Une caractéristique qui est nouvelle aujourd'hui pourrait ne pas être considé | Dans la version 1.4, ... | Dans la version actuelle, ... | | La fonction de fédération offre ... | La nouvelle fonctionnalité de la Fédération offre ... | -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * En savoir plus sur [writing a new topic](/docs/home/contribute/write-new-topic/). * En savoir plus sur [using page templates](/docs/home/contribute/page-templates/). * En savoir plus sur [staging your changes](/docs/home/contribute/stage-documentation-changes/) * En savoir plus sur [creating a pull request](/docs/home/contribute/create-pull-request/). -{{% /capture %}} + diff --git a/content/fr/docs/contribute/style/write-new-topic.md b/content/fr/docs/contribute/style/write-new-topic.md index d5f1575b1c..1027da53b6 100644 --- a/content/fr/docs/contribute/style/write-new-topic.md +++ b/content/fr/docs/contribute/style/write-new-topic.md @@ -1,18 +1,19 @@ --- title: Rédiger une nouveau sujet -content_template: templates/task +content_type: task weight: 20 --- -{{% capture overview %}} + Cette page montre comment créer un nouveau sujet pour la documentation Kubernetes. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Créez un fork du dépôt de la documentation de Kubernetes comme décrit dans [Commencez à contribuer](/fr/docs/contribute/start/). -{{% /capture %}} -{{% capture steps %}} + + ## Choisir un type de page @@ -143,12 +144,13 @@ Pour un exemple d'un sujet qui utilise cette technique, voir [Running a Single-I Placez les fichiers images dans le répertoire `/images`. Le format d'image préféré est SVG. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * En savoir plus sur [l'utilisation des templates de pages](/docs/home/contribute/page-templates/). * En savoir plus sur [le staging de vos changements](/docs/home/contribute/stage-documentation-changes/). * En savoir plus sur [la création d'une pull request](/docs/home/contribute/create-pull-request/). -{{% /capture %}} + diff --git a/content/fr/docs/home/supported-doc-versions.md b/content/fr/docs/home/supported-doc-versions.md index 7f0e2f2a97..afd204d041 100644 --- a/content/fr/docs/home/supported-doc-versions.md +++ b/content/fr/docs/home/supported-doc-versions.md @@ -1,20 +1,20 @@ --- title: Versions supportées de la documentation Kubernetes description: Documentation de Kubernetes -content_template: templates/concept +content_type: concept card: name: about weight: 10 title: Versions supportées de la documentation --- -{{% capture overview %}} + Ce site contient la documentation de la version actuelle de Kubernetes et les quatre versions précédentes de Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Version courante @@ -24,4 +24,4 @@ La version actuelle est [{{< param "version" >}}](/). {{< versions-other >}} -{{% /capture %}} + diff --git a/content/fr/docs/reference/_index.md b/content/fr/docs/reference/_index.md index 514767f5ed..fac9ff8e49 100644 --- a/content/fr/docs/reference/_index.md +++ b/content/fr/docs/reference/_index.md @@ -3,16 +3,16 @@ title: Documents de Référence linkTitle: "Référence" main_menu: true weight: 70 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Cette section de la documentation de Kubernetes contient les informations de références. -{{% /capture %}} -{{% capture body %}} + + ## Documents de Référence de l'API @@ -55,4 +55,4 @@ Pour appeler l'API de Kubernetes depuis un langage de programmation on peut util * [Architecture de Kubernetes](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md) * [Vue d'ensemble des documents de conception de Kubernetes](https://git.k8s.io/community/contributors/design-proposals). -{{% /capture %}} + diff --git a/content/fr/docs/reference/kubectl/cheatsheet.md b/content/fr/docs/reference/kubectl/cheatsheet.md index aa39822757..a50eb8f320 100644 --- a/content/fr/docs/reference/kubectl/cheatsheet.md +++ b/content/fr/docs/reference/kubectl/cheatsheet.md @@ -5,21 +5,21 @@ reviewers: - rbenzair - feloy - remyleone -content_template: templates/concept +content_type: concept card: name: reference weight: 30 --- -{{% capture overview %}} + Voir aussi : [Aperçu Kubectl](/docs/reference/kubectl/overview/) et [Guide JsonPath](/docs/reference/kubectl/jsonpath). Cette page donne un aperçu de la commande `kubectl`. -{{% /capture %}} -{{% capture body %}} + + # Aide-mémoire kubectl @@ -384,9 +384,10 @@ Verbosité | Description `--v=8` | Affiche les contenus des requêtes HTTP. `--v=9` | Affiche les contenus des requêtes HTTP sans les tronquer. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * En savoir plus sur l'[Aperçu de kubectl](/docs/reference/kubectl/overview/). @@ -396,4 +397,4 @@ Verbosité | Description * Voir plus d'[aides-mémoire kubectl](https://github.com/dennyzhang/cheatsheet-kubernetes-A4). -{{% /capture %}} + diff --git a/content/fr/docs/reference/kubectl/conventions.md b/content/fr/docs/reference/kubectl/conventions.md index 8b458871f6..03d16758a7 100644 --- a/content/fr/docs/reference/kubectl/conventions.md +++ b/content/fr/docs/reference/kubectl/conventions.md @@ -1,14 +1,14 @@ --- title: Conventions d'utilisation de kubectl description: kubectl conventions -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Conventions d'utilisation recommandées pour `kubectl`. -{{% /capture %}} -{{% capture body %}} + + ## Utiliser `kubectl` dans des scripts réutilisables @@ -58,4 +58,4 @@ Vous pouvez générer les ressources suivantes avec une commande kubectl, `kubec * Vous pouvez utiliser `kubectl apply` pour créer ou mettre à jour des ressources. Pour plus d'informations sur l'utilisation de `kubectl apply` pour la mise à jour de ressources, voir le [livre Kubectl](https://kubectl.docs.kubernetes.io). -{{% /capture %}} + diff --git a/content/fr/docs/reference/kubectl/jsonpath.md b/content/fr/docs/reference/kubectl/jsonpath.md index 427ae93516..9df389897b 100644 --- a/content/fr/docs/reference/kubectl/jsonpath.md +++ b/content/fr/docs/reference/kubectl/jsonpath.md @@ -1,15 +1,15 @@ --- title: Support de JSONPath description: JSONPath kubectl Kubernetes -content_template: templates/concept +content_type: concept weight: 25 --- -{{% capture overview %}} + Kubectl prend en charge les modèles JSONPath. -{{% /capture %}} -{{% capture body %}} + + Un modèle JSONPath est composé d'expressions JSONPath entourées par des accolades {}. Kubectl utilise les expressions JSONPath pour filtrer sur des champs spécifiques de l'objet JSON et formater la sortie. @@ -101,4 +101,4 @@ kubectl get pods -o=jsonpath="{range .items[*]}{.metadata.name}{\"\t\"}{.status. {{< /note >}} -{{% /capture %}} + diff --git a/content/fr/docs/reference/kubectl/kubectl.md b/content/fr/docs/reference/kubectl/kubectl.md index ceaa94b6c5..64a3c89ce1 100755 --- a/content/fr/docs/reference/kubectl/kubectl.md +++ b/content/fr/docs/reference/kubectl/kubectl.md @@ -4,7 +4,8 @@ content_template: templates/tool-reference description: Référence kubectl notitle: true --- -{{% capture synopsis %}} +## {{% heading "synopsis" %}} + kubectl contrôle le manager d'un cluster Kubernetes @@ -14,9 +15,10 @@ Vous trouverez plus d'informations ici : https://kubernetes.io/fr/docs/reference kubectl [flags] ``` -{{% /capture %}} -{{% capture options %}} + +## {{% heading "options" %}} +
@@ -506,9 +508,10 @@ kubectl [flags] -{{% /capture %}} -{{% capture seealso %}} + +## {{% heading "seealso" %}} + * [kubectl alpha](/docs/reference/generated/kubectl/kubectl-commands#alpha) - Commandes pour fonctionnalités alpha * [kubectl annotate](/docs/reference/generated/kubectl/kubectl-commands#annotate) - Met à jour les annotations d'une ressource @@ -554,4 +557,4 @@ kubectl [flags] * [kubectl version](/docs/reference/generated/kubectl/kubectl-commands#version) - Affiche les informations de version du client et du serveur * [kubectl wait](/docs/reference/generated/kubectl/kubectl-commands#wait) - Expérimental : Attend une condition particulière sur une ou plusieurs ressources -{{% /capture %}} + diff --git a/content/fr/docs/reference/kubectl/overview.md b/content/fr/docs/reference/kubectl/overview.md index 01d36d469e..1f69adc999 100644 --- a/content/fr/docs/reference/kubectl/overview.md +++ b/content/fr/docs/reference/kubectl/overview.md @@ -1,22 +1,22 @@ --- title: Aperçu de kubectl description: kubectl référence -content_template: templates/concept +content_type: concept weight: 20 card: name: reference weight: 20 --- -{{% capture overview %}} + Kubectl est un outil en ligne de commande pour contrôler des clusters Kubernetes. `kubectl` recherche un fichier appelé config dans le répertoire $HOME/.kube. Vous pouvez spécifier d'autres fichiers [kubeconfig](https://kube rnetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) en définissant la variable d'environnement KUBECONFIG ou en utilisant le paramètre [`--kubeconfig`](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/). Cet aperçu couvre la syntaxe `kubectl`, décrit les opérations et fournit des exemples classiques. Pour des détails sur chaque commande, incluant toutes les options et sous-commandes autorisées, voir la documentation de référence de [kubectl](/docs/reference/generated/kubectl/kubectl-commands/). Pour des instructions d'installation, voir [installer kubectl](/docs/tasks/kubectl/install/). -{{% /capture %}} -{{% capture body %}} + + ## Syntaxe @@ -473,10 +473,11 @@ Current user: plugins-user Pour en savoir plus sur les plugins, examinez [l'exemple de plugin CLI](https://github.com/kubernetes/sample-cli-plugin). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Commencez à utiliser les commandes [kubectl](/docs/reference/generated/kubectl/kubectl-commands/). -{{% /capture %}} + diff --git a/content/fr/docs/reference/setup-tools/kubeadm/kubeadm-init.md b/content/fr/docs/reference/setup-tools/kubeadm/kubeadm-init.md index 1b8fd99ed2..dcd43b3634 100644 --- a/content/fr/docs/reference/setup-tools/kubeadm/kubeadm-init.md +++ b/content/fr/docs/reference/setup-tools/kubeadm/kubeadm-init.md @@ -1,13 +1,13 @@ --- title: kubeadm init -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Cette commande initialise un noeud Kubernetes control-plane. -{{% /capture %}} -{{% capture body %}} + + {{< include "generated/kubeadm_init.md" >}} @@ -293,11 +293,12 @@ et les utiliser pour communiquer avec le cluster. Vous remarquerez que ce type d'installation présente un niveau de sécurité inférieur puisqu'il ne permet pas la validation du hash du certificat racine avec `--discovery-token-ca-cert-hash` (puisqu'il n'est pas généré quand les noeuds sont provisionnés). Pour plus d'information, se référer à [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join/). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [kubeadm init phase](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/) pour mieux comprendre les phases `kubeadm init` * [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join/) pour amorcer un noeud Kubernetes worker node Kubernetes et le faire joindre le cluster * [kubeadm upgrade](/docs/reference/setup-tools/kubeadm/kubeadm-upgrade/) pour mettre à jour un cluster Kubernetes vers une version plus récente * [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset/) pour annuler les changements appliqués avec `kubeadm init` ou `kubeadm join` à un noeud -{{% /capture %}} + diff --git a/content/fr/docs/setup/_index.md b/content/fr/docs/setup/_index.md index 23983eae9f..37161dbc0c 100644 --- a/content/fr/docs/setup/_index.md +++ b/content/fr/docs/setup/_index.md @@ -10,9 +10,9 @@ title: Installation description: Panorama de solution Kubernetes main_menu: true weight: 30 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Utilisez cette page pour trouver le type de solution qui correspond le mieux à vos besoins. @@ -20,9 +20,9 @@ Le choix de distribution Kubernetes dépend des ressources dont vous disposez et Vous pouvez exécuter Kubernetes presque partout, de votre ordinateur portable aux machines virtuelles d'un fournisseur de cloud jusqu'à un rack de serveurs en bare metal. Vous pouvez également mettre en place un cluster entièrement géré en exécutant une seule commande ou bien créer votre propre cluster personnalisé sur vos serveurs bare-metal. -{{% /capture %}} -{{% capture body %}} + + ## Solutions locales @@ -86,8 +86,9 @@ différents systèmes d'exploitation. Choisissez une [solution personnalisée] (/fr/docs/setup/pick-right-solution/#solutions-personnalisées). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Allez à [Choisir la bonne solution] (/fr/docs/setup/pick-right-solution/) pour une liste complète de solutions. -{{% /capture %}} + diff --git a/content/fr/docs/setup/custom-cloud/coreos.md b/content/fr/docs/setup/custom-cloud/coreos.md index 4b18c56c8a..4b2f2f56a8 100644 --- a/content/fr/docs/setup/custom-cloud/coreos.md +++ b/content/fr/docs/setup/custom-cloud/coreos.md @@ -1,16 +1,16 @@ --- title: CoreOS sur AWS ou GCE description: Installation Kubernetes CoreOS sur AWS GCE -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Il existe plusieurs guides permettant d'utiliser Kubernetes avec [CoreOS](https://coreos.com/kubernetes/docs/latest/). -{{% /capture %}} -{{% capture body %}} + + ## Guides officiels CoreOS @@ -87,4 +87,4 @@ Ces guides sont maintenus par des membres de la communauté et couvrent des beso Pour le niveau de support de toutes les solutions se référer au [Tableau des solutions](/docs/getting-started-guides/#table-of-solutions). -{{% /capture %}} + diff --git a/content/fr/docs/setup/custom-cloud/kops.md b/content/fr/docs/setup/custom-cloud/kops.md index 81ebe89ab2..297ce01b73 100644 --- a/content/fr/docs/setup/custom-cloud/kops.md +++ b/content/fr/docs/setup/custom-cloud/kops.md @@ -1,10 +1,10 @@ --- title: Installer Kubernetes sur AWS avec kops description: Installation Kubernetes avec kops sur AWS -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Cette documentation pour un démarrage rapide montre comment facilement installer un cluster Kubernetes sur AWS. L'outil utilisé est [`kops`](https://github.com/kubernetes/kops). @@ -21,9 +21,9 @@ kops est un système de provisionnement dont les principes sont: Si ces principes ne vous conviennent pas, vous préférerez probablement construire votre propre cluster selon votre convenance grâce à [kubeadm](/docs/admin/kubeadm/). -{{% /capture %}} -{{% capture body %}} + + ## Créer un cluster @@ -211,12 +211,13 @@ Reportez-vous à la [liste des add-ons] (/docs/concepts/cluster-administration/a * Channel Slack: [#kops-users] (https://kubernetes.slack.com/messages/kops-users/) * [Problèmes GitHub] (https://github.com/kubernetes/kops/issues) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * En apprendre davantages sur les [concepts](/docs/concepts/) Kubernetes et [`kubectl`](/docs/user-guide/kubectl-overview/). * En savoir plus sur les [utilisations avancées](https://github.com/kubernetes/kops) de `kops`. * Pour les bonnes pratiques et les options de configuration avancées de `kops` se référer à la [documentation](https://github.com/kubernetes/kops) -{{% /capture %}} + diff --git a/content/fr/docs/setup/custom-cloud/kubespray.md b/content/fr/docs/setup/custom-cloud/kubespray.md index 926295b7ac..2e10c21f46 100644 --- a/content/fr/docs/setup/custom-cloud/kubespray.md +++ b/content/fr/docs/setup/custom-cloud/kubespray.md @@ -1,10 +1,10 @@ --- title: Installer Kubernetes avec Kubespray (on-premises et fournisseurs de cloud) description: Installation de Kubernetes avec Kubespray -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Cette documentation permet d'installer rapidement un cluster Kubernetes hébergé sur GCE, Azure, Openstack, AWS, vSphere, Oracle Cloud Infrastructure (expérimental) ou sur des serveurs physiques (bare metal) grâce à [Kubespray](https://github.com/kubernetes-incubator/kubespray). @@ -23,9 +23,9 @@ Kubespray se base sur des outils de provisioning, des [paramètres](https://gith Afin de choisir l'outil le mieux adapté à votre besoin, veuillez lire [cette comparaison](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/comparisons.md) avec [kubeadm](/docs/admin/kubeadm/) et [kops](../kops). -{{% /capture %}} -{{% capture body %}} + + ## Créer un cluster @@ -116,10 +116,11 @@ Quand vous utilisez le playbook `reset`, assurez-vous de ne pas cibler accidente * Channel Slack: [#kubespray](https://kubernetes.slack.com/messages/kubespray/) * [Issues GitHub](https://github.com/kubernetes-incubator/kubespray/issues) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Jetez un oeil aux travaux prévus sur Kubespray: [roadmap](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/roadmap.md). -{{% /capture %}} + diff --git a/content/fr/docs/setup/independent/control-plane-flags.md b/content/fr/docs/setup/independent/control-plane-flags.md index faea105867..746602d8b4 100644 --- a/content/fr/docs/setup/independent/control-plane-flags.md +++ b/content/fr/docs/setup/independent/control-plane-flags.md @@ -1,11 +1,11 @@ --- title: Personnalisation de la configuration du control plane avec kubeadm description: Personnalisation de la configuration du control plane avec kubeadm -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="1.12" state="stable" >}} @@ -27,9 +27,9 @@ pour un composant du control plane: Pour plus de détails sur chaque champ de la configuration, vous pouvez accéder aux [pages de référence de l'API](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm#ClusterConfiguration). -{{% /capture %}} -{{% capture body %}} + + ## Paramètres pour l'API Server @@ -86,4 +86,4 @@ scheduler: kubeconfig: /home/johndoe/kubeconfig.yaml ``` -{{% /capture %}} + diff --git a/content/fr/docs/setup/independent/create-cluster-kubeadm.md b/content/fr/docs/setup/independent/create-cluster-kubeadm.md index 8f93ee7f50..a2ac112b3a 100644 --- a/content/fr/docs/setup/independent/create-cluster-kubeadm.md +++ b/content/fr/docs/setup/independent/create-cluster-kubeadm.md @@ -1,11 +1,11 @@ --- title: Création d'un Cluster a master unique avec kubeadm description: Création d'un Cluster a master unique avec kubeadm -content_template: templates/task +content_type: task weight: 30 --- -{{% capture overview %}} +**kubeadm** vous aide à démarrer un cluster Kubernetes minimum, viable et conforme aux meilleures pratiques. Avec kubeadm, votre cluster @@ -78,18 +78,19 @@ problème de sécurité est trouvé. Voici les dernières versions de Kubernetes | v1.12.x | Septembre 2018 | Juin 2019 | | v1.13.x | Décembre 2018 | Septembre 2019 | -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + - Une ou plusieurs machines exécutant un système d'exploitation compatible deb/rpm, par exemple Ubuntu ou CentOS - 2 Go ou plus de RAM par machine. Si vous essayez moins cela laissera trop peu de place pour vos applications. - 2 processeurs ou plus sur le master - Connectivité réseau entre toutes les machines du cluster, qu'il soit public ou privé. -{{% /capture %}} -{{% capture steps %}} + + ## Objectifs diff --git a/content/fr/docs/setup/independent/ha-topology.md b/content/fr/docs/setup/independent/ha-topology.md index 1253183c50..cd0b6aec36 100644 --- a/content/fr/docs/setup/independent/ha-topology.md +++ b/content/fr/docs/setup/independent/ha-topology.md @@ -1,11 +1,11 @@ --- title: Options pour la topologie en haute disponibilité description: Topologie haute-disponibilité Kubernetes -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + Cette page explique les deux options de configuration de topologie de vos clusters Kubernetes pour la haute disponibilité. @@ -17,9 +17,9 @@ Vous pouvez configurer un cluster en haute disponibilité: Vous devez examiner attentivement les avantages et les inconvénients de chaque topologie avant de configurer un cluster en haute disponibilité. -{{% /capture %}} -{{% capture body %}} + + ## Topologie etcd empilée @@ -73,10 +73,10 @@ Un minimum de trois machines pour les nœuds du control plane et de trois machin Schéma de la [Topologie externe etcd](/images/kubeadm/kubeadm-ha-topology-external-etcd.svg) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - [Configurer un cluster hautement disponible avec kubeadm](/docs/setup/independent/high-availability/) -{{% /capture %}} \ No newline at end of file diff --git a/content/fr/docs/setup/independent/high-availability.md b/content/fr/docs/setup/independent/high-availability.md index d8a95b5c89..210ba7e30c 100644 --- a/content/fr/docs/setup/independent/high-availability.md +++ b/content/fr/docs/setup/independent/high-availability.md @@ -1,11 +1,11 @@ --- title: Création de clusters hautement disponibles avec kubeadm description: Cluster Kubernetes haute-disponibilité kubeadm -content_template: templates/task +content_type: task weight: 60 --- -{{% capture overview %}} + Cette page explique deux approches différentes pour configurer un Kubernetes à haute disponibilité. cluster utilisant kubeadm: @@ -35,9 +35,10 @@ environnement Cloud, les approches documentées ici ne fonctionne ni avec des ob load balancer, ni avec des volumes persistants dynamiques. {{< /caution >}} -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Pour les deux méthodes, vous avez besoin de cette infrastructure: @@ -57,9 +58,9 @@ Les exemples suivants utilisent Calico en tant que fournisseur de réseau de Pod CNI, pensez à remplacer les valeurs par défaut si nécessaire. {{< /note >}} -{{% /capture %}} -{{% capture steps %}} + + ## Premières étapes pour les deux méthodes @@ -344,4 +345,4 @@ Chaque nœud worker peut maintenant être joint au cluster avec la commande renv de n’importe quelle commande `kubeadm init`. L'option `--experimental-control-plane` ne doit pas être ajouté aux nœuds workers. -{{% /capture %}} + diff --git a/content/fr/docs/setup/independent/install-kubeadm.md b/content/fr/docs/setup/independent/install-kubeadm.md index 6366c6fdce..a32225872f 100644 --- a/content/fr/docs/setup/independent/install-kubeadm.md +++ b/content/fr/docs/setup/independent/install-kubeadm.md @@ -1,20 +1,21 @@ --- title: Installer kubeadm description: kubeadm installation Kubernetes -content_template: templates/task +content_type: task weight: 20 --- -{{% capture overview %}} +Cette page vous apprend comment installer la boîte à outils `kubeadm`. Pour plus d'informations sur la création d'un cluster avec kubeadm, une fois que vous avez effectué ce processus d'installation, voir la page: [Utiliser kubeadm pour créer un cluster](/docs/setup/independent/create-cluster-kubeadm/). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * Une ou plusieurs machines exécutant: - Ubuntu 16.04+ @@ -31,9 +32,9 @@ effectué ce processus d'installation, voir la page: [Utiliser kubeadm pour cré * Certains ports doivent êtres ouverts sur vos machines. Voir [ici](#check-required-ports) pour plus de détails. * Swap désactivé. Vous devez impérativement désactiver le swap pour que la kubelet fonctionne correctement. -{{% /capture %}} -{{% capture steps %}} + + ## Vérifiez que les adresses MAC et product_uuid sont uniques pour chaque nœud {#verify-the-mac-address-and-product-uuid-are-unique-for-every-node} @@ -253,8 +254,9 @@ systemctl restart kubelet Si vous rencontrez des difficultés avec kubeadm, veuillez consulter notre [documentation de dépannage](/docs/setup/independent/troubleshooting-kubeadm/). -{{% capture whatsnext %}} +## {{% heading "whatsnext" %}} + * [Utiliser kubeadm pour créer un cluster](/docs/setup/independent/create-cluster-kubeadm/) -{{% /capture %}} + diff --git a/content/fr/docs/setup/independent/kubelet-integration.md b/content/fr/docs/setup/independent/kubelet-integration.md index 786dfa18fb..18ea57310b 100644 --- a/content/fr/docs/setup/independent/kubelet-integration.md +++ b/content/fr/docs/setup/independent/kubelet-integration.md @@ -1,11 +1,11 @@ --- title: Configuration des kubelet de votre cluster avec kubeadm description: Configuration kubelet Kubernetes cluster kubeadm -content_template: templates/concept +content_type: concept weight: 80 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="1.11" state="stable" >}} @@ -26,9 +26,9 @@ d’une machine donnée, telles que le système d’exploitation, le stockage et mise en réseau. Vous pouvez gérer la configuration manuellement de vos kubelets, mais [kubeadm fournit maintenant un type d’API `KubeletConfiguration` pour la gestion centralisée de vos configurations de kubelets](#configure-kubelets-using-kubeadm). -{{% /capture %}} -{{% capture body %}} + + ## Patterns de configuration des Kubelets @@ -206,4 +206,4 @@ Les packages DEB et RPM fournis avec les versions de Kubernetes sont les suivant | `kubernetes-cni` | Installe les binaires officiels du CNI dans le repertoire `/opt/cni/bin`. | | `cri-tools` | Installe `/usr/bin/crictl` à partir de [https://github.com/kubernetes-incubator/cri-tools](https://github.com/kubernetes-incubator/cri-tools). | -{{% /capture %}} + diff --git a/content/fr/docs/setup/independent/setup-ha-etcd-with-kubeadm.md b/content/fr/docs/setup/independent/setup-ha-etcd-with-kubeadm.md index 678268a771..446548d0f1 100644 --- a/content/fr/docs/setup/independent/setup-ha-etcd-with-kubeadm.md +++ b/content/fr/docs/setup/independent/setup-ha-etcd-with-kubeadm.md @@ -1,20 +1,21 @@ --- title: Configurer un cluster etcd en haute disponibilité avec kubeadm description: Configuration d'un cluster etcd en haute disponibilité avec kubeadm -content_template: templates/task +content_type: task weight: 70 --- -{{% capture overview %}} + Par défaut, Kubeadm exécute un cluster etcd mono nœud dans un pod statique géré par la kubelet sur le nœud du plan de contrôle (control plane). Ce n'est pas une configuration haute disponibilité puisque le cluster etcd ne contient qu'un seul membre et ne peut donc supporter qu'aucun membre ne devienne indisponible. Cette page vous accompagne dans le processus de création d'un cluster etcd à trois membres en haute disponibilité, pouvant être utilisé en tant que cluster externe lors de l’utilisation de kubeadm pour configurer un cluster kubernetes. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * Trois machines pouvant communiquer entre elles via les ports 2379 et 2380. Cette  methode utilise ces ports par défaut. Cependant, ils sont configurables via  @@ -24,9 +25,9 @@ le fichier de configuration kubeadm. [toolbox]: /docs/setup/independent/install-kubeadm/ -{{% /capture %}} -{{% capture steps %}} + + ## Mise en place du cluster @@ -249,14 +250,15 @@ kubeadm contient tout ce qui est nécessaire pour générer les certificats déc - Configurez `${ETCD_TAG}` avec la version de votre image etcd. Par exemple `v3.2.24`. - Configurez `${HOST0}` avec l'adresse IP de l'hôte que vous testez. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Une fois que vous avez un cluster de 3 membres etcd qui fonctionne, vous pouvez continuer à configurer un control plane hautement disponible utilisant la [méthode etcd externe avec kubeadm](/docs/setup/independent/high-availability/). -{{% /capture %}} + diff --git a/content/fr/docs/setup/independent/troubleshooting-kubeadm.md b/content/fr/docs/setup/independent/troubleshooting-kubeadm.md index ba00d254c7..497c7ae7d7 100644 --- a/content/fr/docs/setup/independent/troubleshooting-kubeadm.md +++ b/content/fr/docs/setup/independent/troubleshooting-kubeadm.md @@ -1,11 +1,11 @@ --- title: Dépanner kubeadm description: Diagnostic pannes kubeadm debug -content_template: templates/concept +content_type: concept weight: 90 --- -{{% capture overview %}} + Comme avec n'importe quel programme, vous pourriez rencontrer une erreur lors de l'installation ou de l'exécution de kubeadm. @@ -25,9 +25,9 @@ dans le canal #kubeadm, ou posez une questions sur [StackOverflow](https://stackoverflow.com/questions/tagged/kubernetes). Merci d'ajouter les tags pertinents comme `#kubernetes` et `#kubeadm`, ainsi on pourra vous aider. -{{% /capture %}} -{{% capture body %}} + + ## `ebtables` ou un exécutable similaire introuvable lors de l'installation @@ -283,4 +283,4 @@ sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/dock yum install docker-ce-18.06.1.ce-3.el7.x86_64 ``` -{{% /capture %}} + diff --git a/content/fr/docs/setup/learning-environment/minikube.md b/content/fr/docs/setup/learning-environment/minikube.md index d1a521c69d..77ddde7f4d 100644 --- a/content/fr/docs/setup/learning-environment/minikube.md +++ b/content/fr/docs/setup/learning-environment/minikube.md @@ -1,16 +1,16 @@ --- title: Installer Kubernetes avec Minikube -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Minikube est un outil facilitant l’exécution locale de Kubernetes. Minikube exécute un cluster Kubernetes à nœud unique dans une machine virtuelle (VM) de votre ordinateur portable pour les utilisateurs qui souhaitent essayer Kubernetes ou le développer au quotidien. -{{% /capture %}} -{{% capture body %}} + + ## Fonctionnalités de Minikube @@ -530,4 +530,4 @@ Les développeurs de minikube sont dans le canal #minikube du [Slack](https://ku Nous avons également la liste de diffusion [kubernetes-dev Google Groupes](https://groups.google.com/forum/#!forum/kubernetes-dev). Si vous publiez sur la liste, veuillez préfixer votre sujet avec "minikube:". -{{% /capture %}} + diff --git a/content/fr/docs/setup/pick-right-solution.md b/content/fr/docs/setup/pick-right-solution.md index 2571698270..929e7ffbfb 100644 --- a/content/fr/docs/setup/pick-right-solution.md +++ b/content/fr/docs/setup/pick-right-solution.md @@ -4,10 +4,10 @@ reviewers: title: Choisir la bonne solution description: Panorama de solutions Kubernetes weight: 10 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Kubernetes peut fonctionner sur des plateformes variées: sur votre PC portable, sur des VMs d'un fournisseur de cloud, ou un rack de serveurs bare-metal. L'effort demandé pour configurer un cluster varie de l'éxécution d'une simple commande à la création @@ -28,9 +28,9 @@ déployer un cluster grâce à une seule ligne de commande par machine. cluster Kubernetes en partant du début. -{{% /capture %}} -{{% capture body %}} + + ## Solutions locales @@ -300,4 +300,4 @@ Le tableau ci-dessus est ordonné par versions testées et utilisées dans les n [3]: https://gist.github.com/erictune/2f39b22f72565365e59b -{{% /capture %}} + diff --git a/content/fr/docs/setup/release/building-from-source.md b/content/fr/docs/setup/release/building-from-source.md index e3e0891f08..b8a8e46bca 100644 --- a/content/fr/docs/setup/release/building-from-source.md +++ b/content/fr/docs/setup/release/building-from-source.md @@ -1,22 +1,22 @@ --- title: Construire une release -content_template: templates/concept +content_type: concept description: Construire une release de la documentation Kubernetes card: name: download weight: 20 title: Construire une release --- -{{% capture overview %}} + Vous pouvez soit compiler une version à partir des sources, soit télécharger une version pré-compilée. Si vous ne prévoyez pas de développer Kubernetes nous vous suggérons d'utiliser une version pré-compilée de la version actuelle, que l'on peut trouver dans le répertoire [Release Notes](/docs/setup/release/notes/). Le code source de Kubernetes peut être téléchargé sur le repo [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes). -{{% /capture %}} -{{% capture body %}} + + ## Installer à partir des sources Si vous installez simplement une version à partir des sources, il n'est pas nécessaire de mettre en place un environnement golang complet car tous les builds se font dans un conteneur Docker. @@ -31,4 +31,4 @@ make release Pour plus de détails sur le processus de release, voir le repertoire [`build`](http://releases.k8s.io/{{< param "githubbranch" >}}/build/) dans kubernetes/kubernetes. -{{% /capture %}} + diff --git a/content/fr/docs/tasks/_index.md b/content/fr/docs/tasks/_index.md index 63dc8f8e0f..357430288a 100644 --- a/content/fr/docs/tasks/_index.md +++ b/content/fr/docs/tasks/_index.md @@ -2,19 +2,19 @@ title: Tâches main_menu: true weight: 50 -content_template: templates/concept +content_type: concept --- {{< toc >}} -{{% capture overview %}} + Cette section de la documentation de Kubernetes contient des pages qui montrent comment effectuer des tâches individuelles. Une page montre comment effectuer une seule chose, généralement en donnant une courte séquence d'étapes. -{{% /capture %}} -{{% capture body %}} + + ## Interface web (Dashboard) {#dashboard} @@ -76,11 +76,12 @@ Configurer des GPUs NVIDIA pour les utiliser dans des noeuds dans un cluster. Configuration des huge pages comme une ressource planifiable dans un cluster. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Si vous souhaitez écrire une page, consultez [Création d'une PullRequest de documentation](/docs/home/contribute/create-pull-request/). -{{% /capture %}} + diff --git a/content/fr/docs/tasks/access-application-cluster/web-ui-dashboard.md b/content/fr/docs/tasks/access-application-cluster/web-ui-dashboard.md index f40e6c4fa3..ba5d296adb 100644 --- a/content/fr/docs/tasks/access-application-cluster/web-ui-dashboard.md +++ b/content/fr/docs/tasks/access-application-cluster/web-ui-dashboard.md @@ -1,6 +1,6 @@ --- title: Tableau de bord (Dashboard) -content_template: templates/concept +content_type: concept weight: 10 card: name: tasks @@ -8,7 +8,7 @@ card: title: Utiliser le tableau de bord (Dashboard) --- -{{% capture overview %}} + Le tableau de bord (Dashboard) est une interface web pour Kubernetes. Vous pouvez utiliser ce tableau de bord pour déployer des applications conteneurisées dans un cluster Kubernetes, dépanner votre application conteneurisée et gérer les ressources du cluster. @@ -19,9 +19,9 @@ Le tableau de bord fournit également des informations sur l'état des ressource ![Tableau de bord Kubernetes](/images/docs/ui-dashboard.png) -{{% /capture %}} -{{% capture body %}} + + ## Déploiement du tableau de bord @@ -212,10 +212,11 @@ Le visualiseur permet d’exploiter les logs des conteneurs appartenant à un se ![Visualisation de journaux](/images/docs/ui-dashboard-logs-view.png) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Pour plus d'informations, voir la page du projet [Kubernetes Dashboard](https://github.com/kubernetes/dashboard). -{{% /capture %}} + diff --git a/content/fr/docs/tasks/administer-cluster/developing-cloud-controller-manager.md b/content/fr/docs/tasks/administer-cluster/developing-cloud-controller-manager.md index 4e76fe2e26..2f2beaba83 100644 --- a/content/fr/docs/tasks/administer-cluster/developing-cloud-controller-manager.md +++ b/content/fr/docs/tasks/administer-cluster/developing-cloud-controller-manager.md @@ -1,9 +1,9 @@ --- title: Développer un Cloud Controller Manager -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.11" state="beta" >}} Dans les prochaines versions, Cloud Controller Manager sera le moyen privilégié d’intégrer Kubernetes à n’importe quel cloud. @@ -17,9 +17,9 @@ La plupart des implémentations de contrôleurs génériques seront au cœur du Pour approfondir un peu les détails de la mise en œuvre, tous les gestionnaires de contrôleurs de nuage vont importer des packages à partir de Kubernetes core, la seule différence étant que chaque projet enregistre son propre fournisseur de nuage en appelant [cloudprovider.RegisterCloudProvider](https://github.com/kubernetes/cloud-provider/blob/master/plugins.go#L56-L66) où une variable globale des fournisseurs de cloud disponibles est mise à jour. -{{% /capture %}} -{{% capture body %}} + + ## Développement @@ -39,4 +39,4 @@ Vous pouvez trouver la liste [ici](/docs/tasks/administer-cluster/running-cloud- Pour les cloud in-tree, vous pouvez exécuter le in-tree cloud controller manager comme un [Daemonset](/examples/admin/cloud/ccm-example.yaml) dans votre cluster. Voir la [documentation sur l'exécution d'un cloud controller manager](/docs/tasks/administer-cluster/running-cloud-controller.md) pour plus de détails. -{{% /capture %}} + diff --git a/content/fr/docs/tasks/administer-cluster/running-cloud-controller.md b/content/fr/docs/tasks/administer-cluster/running-cloud-controller.md index 631eda2baf..d2e9a02420 100644 --- a/content/fr/docs/tasks/administer-cluster/running-cloud-controller.md +++ b/content/fr/docs/tasks/administer-cluster/running-cloud-controller.md @@ -1,9 +1,9 @@ --- title: Kubernetes cloud-controller-manager -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + {{< feature-state state="beta" >}} @@ -17,9 +17,9 @@ Pour des raisons de retro-compatibilité, le [cloud-controller-manager](https:// Les fournisseurs de cloud déjà pris en charge nativement par Kubernetes devraient utiliser le cloud-controller-manager ​disponible ​dans le code de Kubernetes pour effectuer une transition visant à faire sortir cette prise en charge du code de Kubernetes. Dans les futures versions de Kubernetes, tous les cloud-controller-manager seront développés en dehors du projet de base de Kubernetes géré par des sig leads ou des fournisseurs de cloud. -{{% /capture %}} -{{% capture body %}} + + ## Administration @@ -108,4 +108,4 @@ Actuellement, l’amorçage TLS suppose que Kubelet aie la possibilité de deman Pour créer et développer votre propre cloud-controller-manager, lisez la documentation [Développer un cloud-controller-manager](/docs/tasks/administer-cluster/developing-cloud-controller-manager.md). -{{% /capture %}} + diff --git a/content/fr/docs/tasks/configure-pod-container/assign-cpu-resource.md b/content/fr/docs/tasks/configure-pod-container/assign-cpu-resource.md index 0e65c98765..0845cf2806 100644 --- a/content/fr/docs/tasks/configure-pod-container/assign-cpu-resource.md +++ b/content/fr/docs/tasks/configure-pod-container/assign-cpu-resource.md @@ -1,19 +1,20 @@ --- title: Allouer des ressources CPU aux conteneurs et aux pods -content_template: templates/task +content_type: task weight: 20 --- -{{% capture overview %}} + Cette page montre comment assigner une *demande* (request en anglais) de CPU et une *limite* de CPU à un conteneur. Un conteneur est garanti d'avoir autant de CPU qu'il le demande, mais n'est pas autorisé à utiliser plus de CPU que sa limite. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -43,10 +44,10 @@ NAME v1beta1.metrics.k8s.io ``` -{{% /capture %}} -{{% capture steps %}} + + ## Créer un namespace @@ -222,9 +223,10 @@ Supprimez votre namespace : kubectl delete namespace cpu-example ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + ### Pour les développeurs d'applications @@ -249,7 +251,7 @@ kubectl delete namespace cpu-example * [Configuration des quotas pour les objets API](/docs/tasks/administer-cluster/quota-api-object/) -{{% /capture %}} + diff --git a/content/fr/docs/tasks/configure-pod-container/assign-memory-resource.md b/content/fr/docs/tasks/configure-pod-container/assign-memory-resource.md index 93d4fd63c9..754e91972b 100644 --- a/content/fr/docs/tasks/configure-pod-container/assign-memory-resource.md +++ b/content/fr/docs/tasks/configure-pod-container/assign-memory-resource.md @@ -1,16 +1,17 @@ --- title: Allouer des ressources mémoire aux conteneurs et aux pods -content_template: templates/task +content_type: task weight: 10 --- -{{% capture overview %}} + Cette page montre comment assigner une mémoire *request* et une mémoire *limit* à un conteneur. Un conteneur est garanti d'avoir autant de mémoire qu'il le demande, mais n'est pas autorisé à consommer plus de mémoire que sa limite. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -39,9 +40,9 @@ NAME v1beta1.metrics.k8s.io ``` -{{% /capture %}} -{{% capture steps %}} + + ## Créer un namespace @@ -303,9 +304,10 @@ Supprimez votre namespace. Ceci va supprimer tous les Pods que vous avez créés kubectl delete namespace mem-example ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + ### Pour les développeurs d'applications @@ -329,7 +331,7 @@ kubectl delete namespace mem-example * [Configuration des quotas pour les objets API](/docs/tasks/administer-cluster/quota-api-object/) -{{% /capture %}} + diff --git a/content/fr/docs/tasks/configure-pod-container/assign-pods-nodes.md b/content/fr/docs/tasks/configure-pod-container/assign-pods-nodes.md index c3c1a2a5e9..3b102eee1f 100644 --- a/content/fr/docs/tasks/configure-pod-container/assign-pods-nodes.md +++ b/content/fr/docs/tasks/configure-pod-container/assign-pods-nodes.md @@ -1,20 +1,21 @@ --- title: Assigner des pods aux nœuds -content_template: templates/task +content_type: task weight: 120 --- -{{% capture overview %}} + Cette page montre comment assigner un Pod à un nœud particulier dans un cluster Kubernetes. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Ajouter un label à un nœud @@ -89,10 +90,11 @@ Vous pouvez également ordonnancer un pod sur un nœud spécifique via le param Utilisez le fichier de configuration pour créer un pod qui sera ordonnancé sur `foo-node` uniquement. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Pour en savoir plus sur [labels et selectors](/docs/concepts/overview/working-with-objects/labels/). -{{% /capture %}} + diff --git a/content/fr/docs/tasks/configure-pod-container/configure-pod-initialization.md b/content/fr/docs/tasks/configure-pod-container/configure-pod-initialization.md index e31b0f10e6..6d1ca96b31 100644 --- a/content/fr/docs/tasks/configure-pod-container/configure-pod-initialization.md +++ b/content/fr/docs/tasks/configure-pod-container/configure-pod-initialization.md @@ -1,21 +1,22 @@ --- title: Configurer l'initialisation du pod -content_template: templates/task +content_type: task weight: 130 --- -{{% capture overview %}} + Cette page montre comment utiliser un Init conteneur pour initialiser un Pod avant de lancer un conteneur d'application. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Créer un Pod qui a un Init Container @@ -71,9 +72,10 @@ La sortie montre que nginx sert la page web qui a été écrite par le conteneur

Kubernetes is open source giving you the freedom to take advantage ...

... -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Pour en savoir plus sur [communiquer entre conteneurs fonctionnant dans le même Pod](/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume/). @@ -81,6 +83,6 @@ La sortie montre que nginx sert la page web qui a été écrite par le conteneur * Pour en savoir plus sur [Volumes](/docs/concepts/storage/volumes/). * Pour en savoir plus sur [Débogage des Init Conteneurs](/docs/tasks/debug-application-cluster/debug-init-containers/) -{{% /capture %}} + diff --git a/content/fr/docs/tasks/configure-pod-container/configure-volume-storage.md b/content/fr/docs/tasks/configure-pod-container/configure-volume-storage.md index fe35b1cb8b..eed01fc4ed 100644 --- a/content/fr/docs/tasks/configure-pod-container/configure-volume-storage.md +++ b/content/fr/docs/tasks/configure-pod-container/configure-volume-storage.md @@ -1,10 +1,10 @@ --- title: Configurer un pod en utilisant un volume pour le stockage -content_template: templates/task +content_type: task weight: 50 --- -{{% capture overview %}} + Cette page montre comment configurer un Pod pour utiliser un Volume pour le stockage. @@ -12,15 +12,16 @@ Le système de fichiers d'un conteneur ne vit que tant que le conteneur vit. Ain [Volume](/fr/docs/concepts/storage/volumes/). C'est particulièrement important pour les applications Stateful, telles que les key-value stores (comme par exemple Redis) et les bases de données. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Configurer un volume pour un Pod @@ -120,9 +121,10 @@ fixé à `Always`. kubectl delete pod redis ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Voir [Volume](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#volume-v1-core). @@ -130,6 +132,6 @@ fixé à `Always`. * En plus du stockage sur disque local fourni par `emptyDir`, Kubernetes supporte de nombreuses solutions de stockage connectées au réseau, y compris PD sur GCE et EBS sur EC2, qui sont préférés pour les données critiques et qui s'occuperont des autres détails tels que le montage et le démontage sur les nœuds. Voir [Volumes](/fr/docs/concepts/storage/volumes/) pour plus de détails. -{{% /capture %}} + diff --git a/content/fr/docs/tasks/configure-pod-container/extended-resource.md b/content/fr/docs/tasks/configure-pod-container/extended-resource.md index a19980c5b8..439714f6fd 100644 --- a/content/fr/docs/tasks/configure-pod-container/extended-resource.md +++ b/content/fr/docs/tasks/configure-pod-container/extended-resource.md @@ -1,19 +1,20 @@ --- title: Affecter des ressources supplémentaires à un conteneur -content_template: templates/task +content_type: task weight: 40 --- -{{% capture overview %}} + Cette page montre comment affecter des ressources supplémentaires à un conteneur. {{< feature-state state="stable" >}} -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -21,10 +22,10 @@ Avant de commencer cet exercice, procédez à l'exercice en [Annoncer des ressources supplémentaires pour un nœud](/docs/tasks/administer-cluster/extended-resource-node/). Cela configurera l'un de vos nœuds pour qu'il annoncera une ressource dongle. -{{% /capture %}} -{{% capture steps %}} + + ## Affecter une ressource supplémentaire à un Pod @@ -124,9 +125,10 @@ kubectl delete pod extended-resource-demo kubectl delete pod extended-resource-demo-2 ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + ### Pour les développeurs d'applications @@ -137,4 +139,3 @@ kubectl delete pod extended-resource-demo-2 * [Annoncer des ressources supplémentaires pour un nœud](/docs/tasks/administer-cluster/extended-resource-node/) -{{% /capture %}} \ No newline at end of file diff --git a/content/fr/docs/tasks/configure-pod-container/pull-image-private-registry.md b/content/fr/docs/tasks/configure-pod-container/pull-image-private-registry.md index 471448889e..efe9ee74fa 100644 --- a/content/fr/docs/tasks/configure-pod-container/pull-image-private-registry.md +++ b/content/fr/docs/tasks/configure-pod-container/pull-image-private-registry.md @@ -1,25 +1,26 @@ --- title: Récupération d'une image d'un registre privé -content_template: templates/task +content_type: task weight: 100 --- -{{% capture overview %}} + Cette page montre comment créer un Pod qui utilise un Secret pour récupérer une image d'un registre privé. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * Pour faire cet exercice, vous avez besoin d'un [Docker ID](https://docs.docker.com/docker-id/) et un mot de passe. -{{% /capture %}} -{{% capture steps %}} + + ## Connectez-vous à Docker @@ -193,9 +194,10 @@ kubectl apply -f my-private-reg-pod.yaml kubectl get pod private-reg ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Pour en savoir plus sur les [Secrets](/docs/concepts/configuration/secret/). * Pour en savoir plus sur l'[utilisation d'un registre privé](/docs/concepts/containers/images/#using-a-private-registry). @@ -204,5 +206,5 @@ kubectl get pod private-reg * Voir [Secret](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#secret-v1-core). * Voir le champ `imagePullSecrets` de [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core). -{{% /capture %}} + diff --git a/content/fr/docs/tasks/configure-pod-container/quality-service-pod.md b/content/fr/docs/tasks/configure-pod-container/quality-service-pod.md index c866951ddb..8b666ccd98 100644 --- a/content/fr/docs/tasks/configure-pod-container/quality-service-pod.md +++ b/content/fr/docs/tasks/configure-pod-container/quality-service-pod.md @@ -1,25 +1,26 @@ --- title: Configurer la qualité de service pour les pods -content_template: templates/task +content_type: task weight: 30 --- -{{% capture overview %}} + Cette page montre comment configurer les Pods pour qu'ils soient affectés à des classes particulières de qualité de service (QoS). Kubernetes utilise des classes de QoS pour prendre des décisions concernant l'ordonnancement et les évictions des pods. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Les Classes de QoS @@ -224,9 +225,10 @@ Supprimez votre namespace. kubectl delete namespace qos-example ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + ### Pour les développeurs d'applications @@ -251,7 +253,7 @@ kubectl delete namespace qos-example * [Configuration du quota de pods pour un Namespace](/docs/tasks/administer-cluster/quota-pod-namespace/) * [Configuration des quotas pour les objets API](/docs/tasks/administer-cluster/quota-api-object/) -{{% /capture %}} + diff --git a/content/fr/docs/tasks/configure-pod-container/translate-compose-kubernetes.md b/content/fr/docs/tasks/configure-pod-container/translate-compose-kubernetes.md index 444da1ff0a..f856847e85 100644 --- a/content/fr/docs/tasks/configure-pod-container/translate-compose-kubernetes.md +++ b/content/fr/docs/tasks/configure-pod-container/translate-compose-kubernetes.md @@ -1,23 +1,24 @@ --- title: Convertir un fichier Docker Compose en ressources Kubernetes -content_template: templates/task +content_type: task weight: 200 --- -{{% capture overview %}} + C'est quoi Kompose ? C'est un outil de conversion de tout ce qui compose (notamment Docker Compose) en orchestrateurs de conteneurs (Kubernetes ou OpenShift). Vous trouverez plus d'informations sur le site web de Kompose à [http://kompose.io](http:/kompose.io). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Installer Kompose @@ -192,9 +193,9 @@ En quelques étapes, nous vous emmenons de Docker Compose à Kubernetes. Tous do $ curl http://192.0.2.89 ``` -{{% /capture %}} -{{% capture discussion %}} + + ## Guide de l'utilisateur @@ -600,4 +601,4 @@ Kompose supporte les versions Docker Compose : 1, 2 et 3. Nous avons un support Une liste complète sur la compatibilité entre les trois versions est donnée dans notre [document de conversion](https://github.com/kubernetes/kompose/blob/master/docs/conversion.md) incluant une liste de toutes les clés Docker Compose incompatibles. -{{% /capture %}} + diff --git a/content/fr/docs/tasks/debug-application-cluster/get-shell-running-container.md b/content/fr/docs/tasks/debug-application-cluster/get-shell-running-container.md index c699134a4e..b5fb0012a1 100644 --- a/content/fr/docs/tasks/debug-application-cluster/get-shell-running-container.md +++ b/content/fr/docs/tasks/debug-application-cluster/get-shell-running-container.md @@ -1,21 +1,22 @@ --- title: Obtenez un shell dans un conteneur en cours d'exécution -content_template: templates/task +content_type: task --- -{{% capture overview %}} + Cette page montre comment utiliser `kubectl exec` pour obtenir un shell dans un conteneur en cours d'exécution. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Obtenir un shell dans un conteneur @@ -116,9 +117,9 @@ kubectl exec shell-demo ls / kubectl exec shell-demo cat /proc/1/mounts ``` -{{% /capture %}} -{{% capture discussion %}} + + ## Ouverture d'un shell lorsqu'un pod possède plusieurs conteneurs @@ -130,10 +131,11 @@ La commande suivante ouvrirait un shell sur le conteneur de l'application princi kubectl exec -it my-pod --container main-app -- /bin/bash ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [kubectl exec](/docs/reference/generated/kubectl/kubectl-commands/#exec) -{{% /capture %}} + diff --git a/content/fr/docs/tasks/tools/install-kubectl.md b/content/fr/docs/tasks/tools/install-kubectl.md index 2d1f50fccd..8d60357aea 100644 --- a/content/fr/docs/tasks/tools/install-kubectl.md +++ b/content/fr/docs/tasks/tools/install-kubectl.md @@ -4,7 +4,7 @@ reviewers: - rbenzair title: Installer et configurer kubectl description: Installation et configuration de kubectl -content_template: templates/task +content_type: task weight: 10 card: name: tasks @@ -12,15 +12,16 @@ card: title: Installer kubectl --- -{{% capture overview %}} + L'outil en ligne de commande de kubernetes, [kubectl](/docs/user-guide/kubectl/), vous permet d'exécuter des commandes dans les clusters Kubernetes. Vous pouvez utiliser kubectl pour déployer des applications, inspecter et gérer les ressources du cluster et consulter les logs. Pour une liste complète des opérations kubectl, voir [Aperçu de kubectl](/fr/docs/reference/kubectl/overview/). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Vous devez utiliser une version de kubectl qui différe seulement d'une version mineure de la version de votre cluster. Par exemple, un client v1.2 doit fonctionner avec un master v1.1, v1.2 et v1.3. L'utilisation de la dernière version de kubectl permet d'éviter des problèmes imprévus. -{{% /capture %}} -{{% capture steps %}} + + ## Installer kubectl sur Linux @@ -470,12 +471,13 @@ compinit {{% /tab %}} {{< /tabs >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Installer Minikube](/docs/tasks/tools/install-minikube/) * Voir les [guides de démarrage](/fr/docs/setup/) pour plus d'informations sur la création de clusters. * [Apprenez comment lancer et exposer votre application](/docs/tasks/access-application-cluster/service-access-application-cluster/) * Si vous avez besoin d'accéder à un cluster que vous n'avez pas créé, consultez [Partager l'accès du Cluster](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/). * Consulter les [documents de référence de kubectl](/fr/docs/reference/kubectl/kubectl/) -{{% /capture %}} + diff --git a/content/fr/docs/tasks/tools/install-minikube.md b/content/fr/docs/tasks/tools/install-minikube.md index e704e1bfa1..c3fcb56f34 100644 --- a/content/fr/docs/tasks/tools/install-minikube.md +++ b/content/fr/docs/tasks/tools/install-minikube.md @@ -1,19 +1,20 @@ --- title: Installer Minikube -content_template: templates/task +content_type: task weight: 20 card: name: tasks weight: 10 --- -{{% capture overview %}} + Cette page vous montre comment installer [Minikube](/fr/docs/tutorials/hello-minikube/), qui est un outil qui fait tourner un cluster Kubernetes à un noeud unique dans une machine virtuelle sur votre machine. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< tabs name="minikube_before_you_begin" >}} {{% tab name="Linux" %}} @@ -53,11 +54,11 @@ Configuration requise pour Hyper-V: un hyperviseur a été détecté. Les foncti {{% /tab %}} {{< /tabs >}} -{{% /capture %}} -{{% capture steps %}} -# Installer Minikube + + +## Installer Minikube {{< tabs name="tab_with_md" >}} {{% tab name="Linux" %}} @@ -200,13 +201,11 @@ Pour installer Minikube manuellement sur Windows, téléchargez [`minikube-windo {{< /tabs >}} -{{% /capture %}} -{{% capture whatsnext %}} -* [Exécutez Kubernetes localement via Minikube](/fr/docs/setup/learning-environment/minikube/) -{{% /capture %}} + + ## Confirmer l'installation @@ -259,3 +258,8 @@ Vous devez supprimer les fichiers de configuration : ```shell rm -rf ~/.minikube ``` + +## {{% heading "whatsnext" %}} + + +* [Exécutez Kubernetes localement via Minikube](/fr/docs/setup/learning-environment/minikube/) diff --git a/content/fr/docs/tutorials/_index.md b/content/fr/docs/tutorials/_index.md index 22d44440f3..10e1124620 100644 --- a/content/fr/docs/tutorials/_index.md +++ b/content/fr/docs/tutorials/_index.md @@ -2,10 +2,10 @@ title: Tutoriels main_menu: true weight: 60 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Cette section de la documentation de Kubernetes contient des tutoriels. @@ -13,9 +13,9 @@ Un tutoriel montre comment atteindre un objectif qui est plus grand qu'une simpl Avant d'explorer chacun des tutoriels, il peut-être utile de garder un signet pour le [Glossaire standardisé](/docs/reference/glossary/) pour pouvoir le consulter plus facilement par la suite. -{{% /capture %}} -{{% capture body %}} + + ## Elémentaires @@ -66,10 +66,11 @@ Avant d'explorer chacun des tutoriels, il peut-être utile de garder un signet p * [Utiliser Source IP (EN)](/docs/tutorials/services/source-ip/) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Si vous voulez écrire un tutoriel, regardez la section des modèles de page de tutoriel dans l'[Utilisation des modèles de pages ](/docs/home/contribute/page-templates/). -{{% /capture %}} + diff --git a/content/fr/docs/tutorials/hello-minikube.md b/content/fr/docs/tutorials/hello-minikube.md index 71f9d0f1ee..724919d0e6 100644 --- a/content/fr/docs/tutorials/hello-minikube.md +++ b/content/fr/docs/tutorials/hello-minikube.md @@ -1,6 +1,6 @@ --- title: Hello Minikube -content_template: templates/tutorial +content_type: tutorial weight: 5 description: Tutoriel Minikube menu: @@ -14,7 +14,7 @@ card: weight: 10 --- -{{% capture overview %}} + Ce tutoriel vous montre comment exécuter une simple application Hello World Node.js sur Kubernetes en utilisant [Minikube](/docs/getting-started-guides/minikube/) et Katacoda. Katacoda fournit un environnement Kubernetes gratuit dans le navigateur. @@ -23,17 +23,19 @@ Katacoda fournit un environnement Kubernetes gratuit dans le navigateur. Vous pouvez également suivre ce tutoriel si vous avez installé [Minikube localement](/docs/tasks/tools/install-minikube/). {{< /note >}} -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Déployez une application Hello World sur Minikube. * Lancez l'application. * Afficher les journaux des applications. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Ce tutoriel fournit une image de conteneur construite à partir des fichiers suivants : @@ -43,9 +45,9 @@ Ce tutoriel fournit une image de conteneur construite à partir des fichiers sui Pour plus d'informations sur la commande `docker build`, lisez la documentation de [Docker](https://docs.docker.com/engine/reference/commandline/build/). -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Créer un cluster Minikube @@ -261,12 +263,13 @@ Si nécessaire, effacez la VM Minikube : minikube delete ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * En savoir plus sur les [déploiement](/docs/concepts/workloads/controllers/deployment/). * En savoir plus sur le [Déploiement d'applications](/docs/user-guide/deploying-applications/). * En savoir plus sur les [Services](/docs/concepts/services-networking/service/). -{{% /capture %}} + diff --git a/content/id/docs/concepts/_index.md b/content/id/docs/concepts/_index.md index cd135ca14d..ebc205d84a 100644 --- a/content/id/docs/concepts/_index.md +++ b/content/id/docs/concepts/_index.md @@ -1,19 +1,19 @@ --- title: Konsep main_menu: true -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + Bagian konsep ini membantu kamu belajar tentang bagian-bagian sistem serta abstraksi yang digunakan Kubernetes untuk merepresentasikan klaster kamu, serta membantu kamu belajar lebih dalam bagaimana cara kerja Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Ikhtisar @@ -97,12 +97,12 @@ dengan *node* secara langsung. * [Anotasi](/docs/concepts/overview/working-with-objects/annotations/) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Jika kamu ingin menulis halaman konsep, perhatikan [cara penggunaan template pada laman](/docs/home/contribute/page-templates/) untuk informasi mengenai konsep tipe halaman dan *template* konsep. -{{% /capture %}} \ No newline at end of file diff --git a/content/id/docs/concepts/architecture/cloud-controller.md b/content/id/docs/concepts/architecture/cloud-controller.md index 03bc1c1f6a..2bde547273 100644 --- a/content/id/docs/concepts/architecture/cloud-controller.md +++ b/content/id/docs/concepts/architecture/cloud-controller.md @@ -1,10 +1,10 @@ --- title: Konsep-konsep di balik Controller Manager -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Konsep _Cloud Controller Manager_/CCM (jangan tertukar dengan program biner kube-controller-manager) awalnya dibuat untuk memungkinkan kode vendor _cloud_ spesifik dan kode inti Kubernetes untuk berkembang secara independen satu sama lainnya. CCM berjalan bersama dengan komponen Master lainnya seperti Kubernetes Controller Manager, API Server, dan Scheduler. CCM juga dapat dijalankan sebagai Kubernetes Addon (tambahan fungsi terhadap Kubernetes), yang akan berjalan di atas klaster Kubernetes. Desain CCM didasarkan pada mekanisme _plugin_ yang memungkinkan penyedia layanan _cloud_ untuk berintegrasi dengan Kubernetes dengan mudah dengan menggunakan _plugin_. Sudah ada rencana untuk pengenalan penyedia layanan _cloud_ baru pada Kubernetes, dan memindahkan penyedia layanan _cloud_ yang sudah ada dari model yang lama ke model CCM. @@ -15,10 +15,10 @@ Berikut adalah arsitektur sebuah klaster Kubernetes tanpa CCM: ![Pre CCM Kube Arch](/images/docs/pre-ccm-arch.png) -{{% /capture %}} -{{% capture body %}} + + ## Desain @@ -234,4 +234,4 @@ Penyedia layanan cloud berikut telah mengimplementasikan CCM: Petunjuk lengkap untuk mengkonfigurasi dan menjalankan CCM disediakan [di sini](/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager). -{{% /capture %}} + diff --git a/content/id/docs/concepts/architecture/controller.md b/content/id/docs/concepts/architecture/controller.md index 4ce6974b34..a0ff6b9256 100644 --- a/content/id/docs/concepts/architecture/controller.md +++ b/content/id/docs/concepts/architecture/controller.md @@ -1,10 +1,10 @@ --- title: Controller -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Dalam bidang robotika dan otomatisasi, _control loop_ atau kontrol tertutup adalah lingkaran tertutup yang mengatur keadaan suatu sistem. @@ -24,10 +24,10 @@ klaster saat ini mendekati keadaan yang diinginkan. {{< glossary_definition term_id="controller" length="short">}} -{{% /capture %}} -{{% capture body %}} + + ## Pola _controller_ @@ -168,11 +168,12 @@ satu kumpulan dari beberapa Pod, atau bisa juga sebagai bagian eksternal dari Kubernetes. Manakah yang paling sesuai akan tergantung pada apa yang _controller_ khusus itu lakukan. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Silahkan baca tentang [_control plane_ Kubernetes](/docs/concepts/#kubernetes-control-plane) * Temukan beberapa dasar tentang [objek-objek Kubernetes](/docs/concepts/#kubernetes-objects) * Pelajari lebih lanjut tentang [Kubernetes API](/docs/concepts/overview/kubernetes-api/) * Apabila kamu ingin membuat _controller_ sendiri, silakan lihat [pola perluasan](/docs/concepts/extend-kubernetes/extend-cluster/#extension-patterns) dalam memperluas Kubernetes. -{{% /capture %}} + diff --git a/content/id/docs/concepts/architecture/master-node-communication.md b/content/id/docs/concepts/architecture/master-node-communication.md index fcc9f66eed..80644983a4 100644 --- a/content/id/docs/concepts/architecture/master-node-communication.md +++ b/content/id/docs/concepts/architecture/master-node-communication.md @@ -1,19 +1,19 @@ --- title: Komunikasi Master-Node -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Dokumen ini menjelaskan tentang jalur-jalur komunikasi di antara klaster Kubernetes dan master yang sebenarnya hanya berhubungan dengan apiserver saja. Kenapa ada dokumen ini? Supaya kamu, para pengguna Kubernetes, punya gambaran bagaimana mengatur instalasi untuk memperketat konfigurasi jaringan di dalam klaster. Hal ini cukup penting, karena klaster bisa saja berjalan pada jaringan tak terpercaya (untrusted network), ataupun melalui alamat-alamat IP publik pada penyedia cloud. -{{% /capture %}} -{{% capture body %}} + + ## Klaster menuju Master @@ -74,4 +74,4 @@ Dengan ini, apiserver menginisiasi sebuah tunnel SSH untuk setiap node di Tunnel SSH saat ini sudah usang (deprecated), jadi sebaiknya jangan digunakan, kecuali kamu tahu pasti apa yang kamu lakukan. Sebuah desain baru untuk mengganti kanal komunikasi ini sedang disiapkan. -{{% /capture %}} + diff --git a/content/id/docs/concepts/architecture/nodes.md b/content/id/docs/concepts/architecture/nodes.md index 75875425c0..8913c9df65 100644 --- a/content/id/docs/concepts/architecture/nodes.md +++ b/content/id/docs/concepts/architecture/nodes.md @@ -1,10 +1,10 @@ --- title: Node -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Node merupakan sebuah mesin worker di dalam Kubernetes, yang sebelumnya dinamakan `minion`. Sebuah node bisa berupa VM ataupun mesin fisik, tergantung dari klaster-nya. @@ -12,10 +12,10 @@ Masing-masing node berisi beberapa servis yang berguna untuk menjalankan banyak Servis-servis di dalam sebuah node terdiri dari [runtime kontainer](/docs/concepts/overview/components/#node-components), kubelet dan kube-proxy. Untuk lebih detail, lihat dokumentasi desain arsitektur pada [Node Kubernetes](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md#the-kubernetes-node). -{{% /capture %}} -{{% capture body %}} + + ## Status Node @@ -228,4 +228,4 @@ Kalau kamu ingin secara eksplisit menyimpan resource cadangan untuk menja Node adalah tingkatan tertinggi dari resource di dalam Kubernetes REST API. Penjelasan lebih detail tentang obyek API dapat dilihat pada: [Obyek Node API](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#node-v1-core). -{{% /capture %}} + diff --git a/content/id/docs/concepts/cluster-administration/addons.md b/content/id/docs/concepts/cluster-administration/addons.md index 0121182e3f..b404465d8f 100644 --- a/content/id/docs/concepts/cluster-administration/addons.md +++ b/content/id/docs/concepts/cluster-administration/addons.md @@ -1,9 +1,9 @@ --- title: Instalasi Add-ons -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + *Add-ons* berfungsi untuk menambah serta memperluas fungsionalitas dari Kubernetes. @@ -12,10 +12,10 @@ Laman ini akan menjabarkan beberapa *add-ons* yang tersedia serta tautan instruk *Add-ons* pada setiap bagian akan diurutkan secara alfabet - pengurutan ini tidak dilakukan berdasarkan status preferensi atau keunggulan. -{{% /capture %}} -{{% capture body %}} + + ## Jaringan dan *Policy* Jaringan @@ -50,4 +50,4 @@ Ada beberapa *add-on* lain yang didokumentasikan pada direktori deprekasi [*clus *Add-on* lain yang dipelihara dan dikelola dengan baik dapat ditulis di sini. Ditunggu PR-nya! -{{% /capture %}} + diff --git a/content/id/docs/concepts/cluster-administration/certificates.md b/content/id/docs/concepts/cluster-administration/certificates.md index dca5daec7a..a605a78547 100644 --- a/content/id/docs/concepts/cluster-administration/certificates.md +++ b/content/id/docs/concepts/cluster-administration/certificates.md @@ -1,18 +1,18 @@ --- title: Sertifikat -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Saat menggunakan autentikasi sertifikat klien, kamu dapat membuat sertifikat secara manual melalui `easyrsa`, `openssl` atau `cfssl`. -{{% /capture %}} -{{% capture body %}} + + ### easyrsa @@ -247,4 +247,4 @@ Kamu dapat menggunakan API `Certificate.k8s.io` untuk menyediakan sertifikat x509 yang digunakan untuk autentikasi seperti yang didokumentasikan [di sini](/docs/tasks/tls/managing-tls-in-a-cluster). -{{% /capture %}} + diff --git a/content/id/docs/concepts/cluster-administration/cloud-providers.md b/content/id/docs/concepts/cluster-administration/cloud-providers.md index a130610fad..45820e3660 100644 --- a/content/id/docs/concepts/cluster-administration/cloud-providers.md +++ b/content/id/docs/concepts/cluster-administration/cloud-providers.md @@ -1,15 +1,15 @@ --- title: Penyedia Layanan Cloud -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Laman ini akan menjelaskan bagaimana cara mengelola Kubernetes yang berjalan pada penyedia layanan cloud tertentu. -{{% /capture %}} -{{% capture body %}} + + ### Kubeadm [Kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) merupakan salah satu cara yang banyak digunakan untuk membuat klaster Kubernetes. Kubeadm memiliki beragam opsi untuk mengatur konfigurasi spesifik untuk penyedia layanan cloud. Salah satu contoh yang biasa digunakan pada penyedia cloud *in-tree* yang dapat diatur dengan kubeadm adalah sebagai berikut: @@ -303,7 +303,7 @@ dan harus berada pada bagian `[Router]` dari *file* `cloud.conf`: [kubenet]: /docs/concepts/cluster-administration/network-plugins/#kubenet -{{% /capture %}} + ## OVirt diff --git a/content/id/docs/concepts/cluster-administration/cluster-administration-overview.md b/content/id/docs/concepts/cluster-administration/cluster-administration-overview.md index 67a6c36588..b485b5e142 100644 --- a/content/id/docs/concepts/cluster-administration/cluster-administration-overview.md +++ b/content/id/docs/concepts/cluster-administration/cluster-administration-overview.md @@ -3,16 +3,16 @@ reviewers: - davidopp - lavalamp title: Ikhtisar Administrasi Klaster -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Ikhtisar administrasi klaster ini ditujukan untuk siapapun yang akan membuat atau mengelola klaster Kubernetes. Diharapkan untuk memahami beberapa [konsep](/docs/concepts/) dasar Kubernetes sebelumnya. -{{% /capture %}} -{{% capture body %}} + + ## Perencanaan Klaster Lihat panduan di [Persiapan](/docs/setup) untuk mempelajari beberapa contoh tentang bagaimana merencanakan, mengatur dan mengonfigurasi klaster Kubernetes. Solusi yang akan dipaparkan di bawah ini disebut *distro*. @@ -67,6 +67,6 @@ Catatan: Tidak semua distro aktif dikelola. Pilihlah distro yang telah diuji den * [*Logging* dan *Monitoring* Aktivitas Klaster](/docs/concepts/cluster-administration/logging/) akan menjelaskan bagaimana cara *logging* bekerja di Kubernetes serta bagaimana cara mengimplementasikannya. -{{% /capture %}} + diff --git a/content/id/docs/concepts/cluster-administration/controller-metrics.md b/content/id/docs/concepts/cluster-administration/controller-metrics.md index 3afade9907..c5df8c73e3 100644 --- a/content/id/docs/concepts/cluster-administration/controller-metrics.md +++ b/content/id/docs/concepts/cluster-administration/controller-metrics.md @@ -1,15 +1,15 @@ --- title: Metrik controller manager -content_template: templates/concept +content_type: concept weight: 100 --- -{{% capture overview %}} + Metrik _controller manager_ memberikan informasi penting tentang kinerja dan kesehatan dari _controller manager_. -{{% /capture %}} -{{% capture body %}} + + ## Tentang metrik _controller manager_ Metrik _controller manager_ ini berfungsi untuk memberikan informasi penting tentang kinerja dan kesehatan dari _controller manager_. @@ -39,4 +39,4 @@ Metrik ini dikeluarkan dalam bentuk [format prometheus](https://prometheus.io/do Pada _environment_ produksi, kamu mungkin juga ingin mengonfigurasi prometheus atau pengumpul metrik lainnya untuk mengumpulkan metrik-metrik ini secara berkala dalam bentuk basis data _time series_. -{{% /capture %}} + diff --git a/content/id/docs/concepts/cluster-administration/federation.md b/content/id/docs/concepts/cluster-administration/federation.md index b669501d72..7690a75a82 100644 --- a/content/id/docs/concepts/cluster-administration/federation.md +++ b/content/id/docs/concepts/cluster-administration/federation.md @@ -1,10 +1,10 @@ --- title: Federation -content_template: templates/concept +content_type: concept weight: 80 --- -{{% capture overview %}} + {{< deprecationfilewarning >}} {{< include "federation-deprecation-warning-note.md" >}} @@ -12,9 +12,9 @@ weight: 80 Laman ini menjelaskan alasan dan cara penggunaan _federation_ untuk melakukan manajemen klaster Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Kenapa _Federation_ ? _Federation_ membuat proses manajemen klaster multipel menjadi lebih mudah. @@ -181,9 +181,10 @@ Terakhir, jika klaster yang kamu miliki membutuhkan jumlah _node_ yang melebihi maka kamu membutuhkan lebih banyak klaster. Kubernetes v1.3 mampu menangani hingga 1000 node untuk setiap klaster. Kubernetes v1.8 mampu menangani hingga 5000 node untuk tiap klaster. Baca [Membangun Klaster Besar](/docs/setup/cluster-large/) untuk petunjuk lebih lanjut. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Pelajari lebih lanjut tentang [proposal _Federation_](https://github.com/kubernetes/community/blob/{{< param "githubbranch" >}}/contributors/design-proposals/multicluster/federation.md). * Baca [petunjuk pengaktifan](/docs/tutorials/federation/set-up-cluster-federation-kubefed/) klaster _federation_. @@ -192,4 +193,4 @@ mampu menangani hingga 5000 node untuk tiap klaster. Baca [Membangun Klaster Bes * Lihat [_update_ _sig-multicluster_ pada Kubecon2018 Eropa](https://www.youtube.com/watch?v=vGZo5DaThQU) * Lihat [presentasi prototipe _Federation-v2_ pada Kubecon2018 Eropa](https://youtu.be/q27rbaX5Jis?t=7m20s) * Lihat [petunjuk penggunaan _Federation-v2_](https://github.com/kubernetes-sigs/federation-v2/blob/master/docs/userguide.md) -{{% /capture %}} + diff --git a/content/id/docs/concepts/cluster-administration/flow-control.md b/content/id/docs/concepts/cluster-administration/flow-control.md index e95bb84d82..b8d8f9acf7 100644 --- a/content/id/docs/concepts/cluster-administration/flow-control.md +++ b/content/id/docs/concepts/cluster-administration/flow-control.md @@ -1,10 +1,10 @@ --- title: Prioritas dan Kesetaraan API (API Priority and Fairness) -content_template: templates/concept +content_type: concept min-kubernetes-server-version: v1.18 --- -{{% capture overview %}} + {{< feature-state state="alpha" for_k8s_version="v1.18" >}} @@ -32,9 +32,9 @@ opsi `--max-request-inflight` tanpa mengaktifkan APF. {{< /caution >}} -{{% /capture %}} -{{% capture body %}} + + ## Mengaktifkan prioritas dan kesetaraan API @@ -362,13 +362,14 @@ beban kerja yang berperilaku buruk yang dapat membahayakan kesehatan dari sistem berdasarkan FlowSchema yang cocok dengan permintaan dan tingkat prioritas yang ditetapkan pada permintaan tersebut. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Untuk latar belakang informasi mengenai detail desain dari prioritas dan kesetaraan API, silahkan lihat [proposal pembaharuan](https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/20190228-priority-and-fairness.md). Kamu juga dapat membuat saran dan permintaan akan fitur melalui [SIG API Machinery](https://github.com/kubernetes/community/tree/master/sig-api-machinery). -{{% /capture %}} + diff --git a/content/id/docs/concepts/cluster-administration/kubelet-garbage-collection.md b/content/id/docs/concepts/cluster-administration/kubelet-garbage-collection.md index fd92c4896b..6990887cc1 100644 --- a/content/id/docs/concepts/cluster-administration/kubelet-garbage-collection.md +++ b/content/id/docs/concepts/cluster-administration/kubelet-garbage-collection.md @@ -1,10 +1,10 @@ --- title: Konfigurasi Garbage Collection pada kubelet -content_template: templates/concept +content_type: concept weight: 70 --- -{{% capture overview %}} + *Garbage collection* merupakan fitur kubelet yang sangat bermanfaat, yang akan membersihkan *image-image* dan juga kontainer-kontainer yang tidak lagi digunakan. Kubelet akan melakukan *garbage collection* untuk kontainer setiap satu menit dan *garbage collection* untuk @@ -13,10 +13,10 @@ yang tidak lagi digunakan. Kubelet akan melakukan *garbage collection* untuk kon Perangkat *garbage collection* eksternal tidak direkomendasikan karena perangkat tersebut berpotensi merusak perilaku kubelet dengan menghilangkan kontainer-kontainer yang sebenarnya masih diperlukan. -{{% /capture %}} -{{% capture body %}} + + ## *Garbage Collection* untuk *Image* @@ -87,10 +87,11 @@ Beberapa fitur *Garbage Collection* pada kubelet di laman ini akan digantikan ol | `--low-diskspace-threshold-mb` | `--eviction-hard` atau `eviction-soft` | *eviction* memberi generalisasi *threshold* disk untuk *resource-resource* lainnya | | `--outofdisk-transition-frequency` | `--eviction-pressure-transition-period` | *eviction* memberi generalisasi transisi tekanan *disk* (*disk pressure*)untuk *resource-resource* lainnya | -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Lihat [Konfigurasi untuk Menangani Kehabisan *Resource*](/docs/tasks/administer-cluster/out-of-resource/) untuk penjelasan lebih lanjut. -{{% /capture %}} + diff --git a/content/id/docs/concepts/cluster-administration/logging.md b/content/id/docs/concepts/cluster-administration/logging.md index 6ad7a86957..53203777f2 100644 --- a/content/id/docs/concepts/cluster-administration/logging.md +++ b/content/id/docs/concepts/cluster-administration/logging.md @@ -1,19 +1,19 @@ --- title: Arsitektur Logging -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + Log aplikasi dan sistem dapat membantu kamu untuk memahami apa yang terjadi di dalam klaster kamu. Log berguna untuk mengidentifikasi dan menyelesaikan masalah serta memonitor aktivitas klaster. Hampir semua aplikasi modern mempunyai sejenis mekanisme log sehingga hampir semua mesin kontainer didesain untuk mendukung suatu mekanisme _logging_. Metode _logging_ yang paling mudah untuk aplikasi dalam bentuk kontainer adalah menggunakan _standard output_ dan _standard error_. Namun, fungsionalitas bawaan dari mesin kontainer atau _runtime_ biasanya tidak cukup memadai sebagai solusi log. Contohnya, jika sebuah kontainer gagal, sebuah pod dihapus, atau suatu _node_ mati, kamu biasanya tetap menginginkan untuk mengakses log dari aplikasimu. Oleh sebab itu, log sebaiknya berada pada penyimpanan dan _lifecyle_ yang terpisah dari node, pod, atau kontainer. Konsep ini dinamakan sebagai _logging_ pada level klaster. _Logging_ pada level klaster ini membutuhkan _backend_ yang terpisah untuk menyimpan, menganalisis, dan mengkueri log. Kubernetes tidak menyediakan solusi bawaan untuk penyimpanan data log, namun kamu dapat mengintegrasikan beragam solusi _logging_ yang telah ada ke dalam klaster Kubernetes kamu. -{{% /capture %}} -{{% capture body %}} + + Arsitektur _logging_ pada level klaster yang akan dijelaskan berikut mengasumsikan bahwa sebuah _logging backend_ telah tersedia baik di dalam maupun di luar klastermu. Meskipun kamu tidak tertarik menggunakan _logging_ pada level klaster, penjelasan tentang bagaimana log disimpan dan ditangani pada node di bawah ini mungkin dapat berguna untukmu. @@ -195,4 +195,4 @@ Ingat, ini hanya contoh saja dan kamu dapat mengganti fluentd dengan agen _loggi Kamu dapat mengimplementasikan klaster-level _logging_ dengan mengekspos atau mengeluarkan log langsung dari tiap aplikasi; namun cara implementasi mekanisme _logging_ tersebut diluar cakupan dari Kubernetes. -{{% /capture %}} + diff --git a/content/id/docs/concepts/cluster-administration/manage-deployment.md b/content/id/docs/concepts/cluster-administration/manage-deployment.md index 5a94792af2..81c0ba4d08 100644 --- a/content/id/docs/concepts/cluster-administration/manage-deployment.md +++ b/content/id/docs/concepts/cluster-administration/manage-deployment.md @@ -1,17 +1,17 @@ --- title: Mengelola Resource -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + Kamu telah melakukan _deploy_ pada aplikasimu dan mengeksposnya melalui sebuah _service_. Lalu? Kubernetes menyediakan berbagai peralatan untuk membantu mengatur mekanisme _deploy_ aplikasi, termasuk pengaturan kapasitas dan pembaruan. Diantara fitur yang akan didiskusikan lebih mendalam yaitu [berkas konfigurasi](/docs/concepts/configuration/overview/) dan [label](/docs/concepts/overview/working-with-objects/labels/). -{{% /capture %}} -{{% capture body %}} + + ## Mengelola konfigurasi _resource_ @@ -434,11 +434,12 @@ kubectl edit deployment/my-nginx Selesai! Deployment akan memperbarui aplikasi nginx yang terdeploy secara berangsur di belakang. Dia akan menjamin hanya ada sekian replika lama yang akan down selagi pembaruan berjalan dan hanya ada sekian replika baru akan dibuat melebihi jumlah pod. Untuk mempelajari lebih lanjut, kunjungi [laman Deployment](/docs/concepts/workloads/controllers/deployment/). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - [Pelajari tentang bagaimana memakai `kubectl` untuk memeriksa dan _debug_ aplikasi.](/docs/tasks/debug-application-cluster/debug-application-introspection/) - [Praktik Terbaik dan Tips Konfigurasi](/docs/concepts/configuration/overview/) -{{% /capture %}} + diff --git a/content/id/docs/concepts/cluster-administration/monitoring.md b/content/id/docs/concepts/cluster-administration/monitoring.md index 501719a757..f4917496a9 100644 --- a/content/id/docs/concepts/cluster-administration/monitoring.md +++ b/content/id/docs/concepts/cluster-administration/monitoring.md @@ -1,12 +1,12 @@ --- title: Metrik-Metrik untuk Control Plane Kubernetes -content_template: templates/concept +content_type: concept weight: 60 aliases: - controller-metrics.md --- -{{% capture overview %}} + Metrik dari komponen sistem dapat memberikan pandangan yang lebih baik tentang apa yang sedang terjadi di dalam sistem. Metrik sangat berguna untuk membuat dasbor (_dashboard_) @@ -15,9 +15,9 @@ dan peringatan (_alert_). Metrik di dalam _control plane_ Kubernetes disajikan dalam [format prometheus](https://prometheus.io/docs/instrumenting/exposition_formats/) dan dapat terbaca oleh manusia. -{{% /capture %}} -{{% capture body %}} + + ## Metrik-Metrik pada Kubernetes @@ -158,10 +158,11 @@ cloudprovider_gce_api_request_duration_seconds { request = "detach_disk"} cloudprovider_gce_api_request_duration_seconds { request = "list_disk"} ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Baca tentang [format teks Prometheus](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format) untuk berbagai metrik * Lihat daftar [metrik Kubernetes yang _stable_](https://github.com/kubernetes/kubernetes/blob/master/test/instrumentation/testdata/stable-metrics-list.yaml) * Baca tentang [kebijakan _deprecation_ Kubernetes](https://kubernetes.io/docs/reference/using-api/deprecation-policy/#deprecating-a-feature-or-behavior ) -{{% /capture %}} + diff --git a/content/id/docs/concepts/cluster-administration/networking.md b/content/id/docs/concepts/cluster-administration/networking.md index 23fd828fa7..038465bcb8 100644 --- a/content/id/docs/concepts/cluster-administration/networking.md +++ b/content/id/docs/concepts/cluster-administration/networking.md @@ -1,10 +1,10 @@ --- title: Jaringan Kluster -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + Jaringan adalah bagian utama dari Kubernetes, tetapi bisa menjadi sulit untuk memahami persis bagaimana mengharapkannya bisa bekerja. Ada 4 masalah yang berbeda untuk diatasi: @@ -15,10 +15,10 @@ Ada 4 masalah yang berbeda untuk diatasi: 3. Komunikasi Pod dengan Service: ini terdapat di [Service](/docs/concepts/services-networking/service/). 4. Komunikasi eksternal dengan Service: ini terdapat di [Service](/docs/concepts/services-networking/service/). -{{% /capture %}} -{{% capture body %}} + + Kubernetes adalah tentang berbagi mesin antar aplikasi. Pada dasarnya, saat berbagi mesin harus memastikan bahwa dua aplikasi tidak mencoba menggunakan @@ -219,10 +219,11 @@ Calico juga dapat dijalankan dalam mode penegakan kebijakan bersama dengan solus [Weave Net](https://www.weave.works/products/weave-net/) adalah jaringan yang tangguh dan mudah digunakan untuk Kubernetes dan aplikasi yang dihostingnya. Weave Net berjalan sebagai [plug-in CNI](https://www.weave.works/docs/net/latest/cni-plugin/) atau berdiri sendiri. Di kedua versi, itu tidak memerlukan konfigurasi atau kode tambahan untuk dijalankan, dan dalam kedua kasus, jaringan menyediakan satu alamat IP per Pod - seperti standar untuk Kubernetes. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Desain awal model jaringan dan alasannya, dan beberapa rencana masa depan dijelaskan secara lebih rinci dalam [dokumen desain jaringan](https://git.k8s.io/community/contributors/design-proposals/network/networking.md). -{{% /capture %}} + diff --git a/content/id/docs/concepts/cluster-administration/proxies.md b/content/id/docs/concepts/cluster-administration/proxies.md index 50fa737c51..5595414aa9 100644 --- a/content/id/docs/concepts/cluster-administration/proxies.md +++ b/content/id/docs/concepts/cluster-administration/proxies.md @@ -1,14 +1,14 @@ --- title: Berbagai Proxy di Kubernetes -content_template: templates/concept +content_type: concept weight: 90 --- -{{% capture overview %}} + Laman ini menjelaskan berbagai proxy yang ada di dalam Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Berbagai Jenis Proxy @@ -62,4 +62,4 @@ Untuk proxy-proxy lain di luar ini, admin klaster biasanya akan memastika Proxy telah menggantikan fungsi redirect. Redirect telah terdeprekasi. -{{% /capture %}} + diff --git a/content/id/docs/concepts/configuration/assign-pod-node.md b/content/id/docs/concepts/configuration/assign-pod-node.md index 12cf9433d6..8af1abba28 100644 --- a/content/id/docs/concepts/configuration/assign-pod-node.md +++ b/content/id/docs/concepts/configuration/assign-pod-node.md @@ -1,19 +1,19 @@ --- title: Menetapkan Pod ke Node -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Kamu dapat memaksa sebuah [pod](/docs/concepts/workloads/pods/pod/) untuk hanya dapat berjalan pada [node](/docs/concepts/architecture/nodes/) tertentu atau mengajukannya agar berjalan pada node tertentu. Ada beberapa cara untuk melakukan hal tersebut. Semua cara yang direkomendasikan adalah dengan menggunakan [_selector_ label](/docs/concepts/overview/working-with-objects/labels/) untuk menetapkan pilihan yang kamu inginkan. Pada umumnya, pembatasan ini tidak dibutuhkan, sebagaimana _scheduler_ akan melakukan penempatan yang proporsional dengan otomatis (seperti contohnya menyebar pod di node-node, tidak menempatkan pod pada node dengan sumber daya yang tidak memadai, dst.) tetapi ada keadaan-keadaan tertentu yang membuat kamu memiliki kendali lebih terhadap node yang menjadi tempat pod dijalankan, contohnya untuk memastikan pod dijalankan pada mesin yang telah terpasang SSD, atau untuk menempatkan pod-pod dari dua servis yang berbeda yang sering berkomunikasi bersamaan ke dalam zona ketersediaan yang sama. Kamu dapat menemukan semua berkas untuk contoh-contoh berikut pada [dokumentasi yang kami sediakan di sini](https://github.com/kubernetes/website/tree/{{< param "docsbranch" >}}/content/en/docs/concepts/configuration/) -{{% /capture %}} -{{% capture body %}} + + ## nodeSelector @@ -317,8 +317,9 @@ spec: ``` Pod di atas akan berjalan pada node kube-01. -{{% /capture %}} -{{% capture whatsnext %}} -{{% /capture %}} +## {{% heading "whatsnext" %}} + + + diff --git a/content/id/docs/concepts/configuration/manage-compute-resources-container.md b/content/id/docs/concepts/configuration/manage-compute-resources-container.md index 61212e571d..3450bab459 100644 --- a/content/id/docs/concepts/configuration/manage-compute-resources-container.md +++ b/content/id/docs/concepts/configuration/manage-compute-resources-container.md @@ -1,6 +1,6 @@ --- title: Mengatur Sumber Daya Komputasi untuk Container -content_template: templates/concept +content_type: concept weight: 20 feature: title: Bin Packing Otomatis @@ -8,7 +8,7 @@ feature: Menaruh kontainer-kontainer secara otomatis berdasarkan kebutuhan sumber daya mereka dan batasan-batasan lainnya, tanpa mengorbankan ketersediaan. Membaurkan beban-beban kerja kritis dan _best-effort_ untuk meningkatkan penggunaan sumber daya dan menghemat lebih banyak sumber daya. --- -{{% capture overview %}} + Saat kamu membuat spesifikasi sebuah [Pod](/docs/concepts/workloads/pods/pod/), kamu dapat secara opsional menentukan seberapa banyak CPU dan memori (RAM) yang dibutuhkan @@ -18,9 +18,9 @@ untuk menaruh Pod-Pod. Dan saat limit (batas) sumber daya Container-Container te maka kemungkinan rebutan sumber daya pada sebuah Node dapat dihindari. Untuk informasi lebih lanjut mengenai perbedaan `request` dan `limit`, lihat [QoS Sumber Daya](https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md). -{{% /capture %}} -{{% capture body %}} + + ## Jenis-jenis sumber daya @@ -615,10 +615,11 @@ spec: -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Dapatkan pengalaman langsung [menentukan sumber daya memori untuk Container dan Pod](/docs/tasks/configure-pod-container/assign-memory-resource/). @@ -628,4 +629,4 @@ spec: * [ResourceRequirements](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#resourcerequirements-v1-core) -{{% /capture %}} + diff --git a/content/id/docs/concepts/configuration/organize-cluster-access-kubeconfig.md b/content/id/docs/concepts/configuration/organize-cluster-access-kubeconfig.md index f9eb8aa3cd..929c895821 100644 --- a/content/id/docs/concepts/configuration/organize-cluster-access-kubeconfig.md +++ b/content/id/docs/concepts/configuration/organize-cluster-access-kubeconfig.md @@ -1,10 +1,10 @@ --- title: Mengatur Akses Klaster Menggunakan Berkas kubeconfig -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + Gunakan berkas kubeconfig untuk mengatur informasi mengenai klaster, pengguna, _namespace_, dan mekanisme autentikasi. Perintah `kubectl` menggunakan berkas @@ -26,10 +26,10 @@ Instruksi langkah demi langkah untuk membuat dan menentukan berkas kubeconfig, bisa mengacu pada [Mengatur Akses Pada Beberapa Klaster] (/docs/tasks/access-application-cluster/configure-access-multiple-clusters). -{{% /capture %}} -{{% capture body %}} + + ## Mendukung beberapa klaster, pengguna, dan mekanisme autentikasi @@ -152,14 +152,15 @@ Referensi _file_ pada perintah adalah relatif terhadap direktori kerja saat ini. Dalam `$HOME/.kube/config`, _relative path_ akan disimpan secara relatif, dan _absolute path_ akan disimpan secara mutlak. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Mengatur Akses Pada Beberapa Klaster](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) * [`kubectl config`](/docs/reference/generated/kubectl/kubectl-commands#config) -{{% /capture %}} + diff --git a/content/id/docs/concepts/configuration/overview.md b/content/id/docs/concepts/configuration/overview.md index 7dfcc8f503..76d68658ec 100644 --- a/content/id/docs/concepts/configuration/overview.md +++ b/content/id/docs/concepts/configuration/overview.md @@ -1,16 +1,16 @@ --- title: Konfigurasi dan Penerapan Konsep -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Dokumen ini menyoroti dan memperkuat pemahaman konsep konfigurasi yang dikenalkan di seluruh panduan pengguna, dokumentasi Memulai, dan contoh-contoh. Dokumentasi ini terbuka. Jika Anda menemukan sesuatu yang tidak ada dalam daftar ini tetapi mungkin bermanfaat bagi orang lain, jangan ragu untuk mengajukan issue atau mengirimkan PR. -{{% /capture %}} -{{% capture body %}} + + ## Tip konfigurasi secara umum @@ -109,6 +109,6 @@ Semantik caching dari penyedia gambar yang mendasarinya membuat bahkan `imagePul - Gunakan `kubectl run` dan` kubectl expose` untuk dengan cepat membuat Deployment dan Service single-container. Lihat [Use a Service to Access an Application in a Cluster](/docs/tasks/access-application-cluster/service-access-application-cluster/) untuk Contoh. -{{% /capture %}} + diff --git a/content/id/docs/concepts/configuration/pod-overhead.md b/content/id/docs/concepts/configuration/pod-overhead.md index 3c661e4bd5..e59301bb96 100644 --- a/content/id/docs/concepts/configuration/pod-overhead.md +++ b/content/id/docs/concepts/configuration/pod-overhead.md @@ -1,10 +1,10 @@ --- title: Overhead Pod -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.16" state="alpha" >}} @@ -13,10 +13,10 @@ Ketika kamu menjalankan Pod pada Node, Pod itu akan mengambil sejumlah sumber da _Pod Overhead_ adalah fitur yang berfungsi untuk menghitung sumber daya digunakan oleh infrastruktur Pod selain permintaan dan limit Container. -{{% /capture %}} -{{% capture body %}} + + ## Overhead Pod @@ -44,11 +44,12 @@ Pengguna yang dapat mengubah sumber daya RuntimeClass dapat memengaruhi kinerja Lihat [Ringkasan Otorisasi](/docs/reference/access-authn-authz/authorization/) untuk lebih lanjut. {{< /note >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [RuntimeClass](/docs/concepts/containers/runtime-class/) * [Desain PodOverhead](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/20190226-pod-overhead.md) -{{% /capture %}} + diff --git a/content/id/docs/concepts/configuration/pod-priority-preemption.md b/content/id/docs/concepts/configuration/pod-priority-preemption.md index ba19136fa1..a0c6035482 100644 --- a/content/id/docs/concepts/configuration/pod-priority-preemption.md +++ b/content/id/docs/concepts/configuration/pod-priority-preemption.md @@ -1,10 +1,10 @@ --- title: Prioritas dan Pemindahan Pod -content_template: templates/concept +content_type: concept weight: 70 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="1.14" state="stable" >}} @@ -27,9 +27,9 @@ Versi Kubernetes | Keadaan Priority and Pemindahan | Dihidupkan secara Bawaan {{< warning >}}Pada sebuah klaster di mana tidak semua pengguna dipercaya, seorang pengguna yang berniat jahat dapat membuat Pod-pod dengan prioritas paling tinggi, membuat Pod-pod lainnya dipindahkan/tidak dapat dijadwalkan. Untuk mengatasi masalah ini, [ResourceQuota](/docs/concepts/policy/resource-quotas/) ditambahkan untuk mendukung prioritas Pod. Seorang admin dapat membuat ResourceQuota untuk pengguna-pengguna pada tingkat prioritas tertentu, mencegah mereka untuk membuat Pod-pod pada prioritas tinggi. Fitur ini telah beta sejak Kubernetes 1.12. {{< /warning >}} -{{% /capture %}} -{{% capture body %}} + + ## Bagaimana cara menggunakan Priority dan pemindahan Pod @@ -253,4 +253,4 @@ Komponen satu-satunya yang mempertimbangkan baik QoS dan prioritas Pod adalah [p Kubelet menggolongkan Pod-pod untuk pengusiran pertama-tama berdasarkan apakah penggunaan sumber daya mereka melebihi `requests` mereka atau tidak, kemudian berdasarkan Priority, dan kemudian berdasarkan penggunaan sumber daya yang terbatas tersebut relatif terhadap `requests` dari Pod-pod tersebut. Lihat [Mengusir Pod-pod pengguna](/docs/tasks/administer-cluster/out-of-resource/#mengusir-pod-pod-pengguna) untuk lebih detail. Pengusiran oleh Kubelet karena kehabisan sumber daya tidak mengusir Pod-pod yang memiliki penggunaan sumber daya yang tidak melebihi `requests` mereka. Jika sebuah Pod dengan prioritas lebih rendah tidak melebihi `requests`-nya, ia tidak akan diusir. Pod lain dengan prioritas lebih tinggi yang melebihi `requests`-nya boleh diusir. -{{% /capture %}} + diff --git a/content/id/docs/concepts/configuration/resource-bin-packing.md b/content/id/docs/concepts/configuration/resource-bin-packing.md index 26798dccfd..0f5b92784a 100644 --- a/content/id/docs/concepts/configuration/resource-bin-packing.md +++ b/content/id/docs/concepts/configuration/resource-bin-packing.md @@ -1,10 +1,10 @@ --- title: Bin Packing Sumber Daya untuk Sumber Daya Tambahan -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="1.16" state="alpha" >}} @@ -13,9 +13,9 @@ _Kube-scheduler_ dapat dikonfigurasikan untuk mengaktifkan pembungkusan rapat `RequestedToCapacityRatioResourceAllocation`. Fungsi-fungsi prioritas dapat digunakan untuk menyempurnakan _kube-scheduler_ sesuai dengan kebutuhan. -{{% /capture %}} -{{% capture body %}} + + ## Mengaktifkan _Bin Packing_ menggunakan RequestedToCapacityRatioResourceAllocation @@ -214,4 +214,4 @@ NodeScore = (5 * 5) + (7 * 1) + (10 * 3) / (5 + 1 + 3) ``` -{{% /capture %}} + diff --git a/content/id/docs/concepts/configuration/secret.md b/content/id/docs/concepts/configuration/secret.md index 1cb0622197..a6ca8dca88 100644 --- a/content/id/docs/concepts/configuration/secret.md +++ b/content/id/docs/concepts/configuration/secret.md @@ -1,6 +1,6 @@ --- title: Secret -content_template: templates/concept +content_type: concept feature: title: Secret dan manajemen konfigurasi description: > @@ -9,16 +9,16 @@ weight: 50 --- -{{% capture overview %}} + Objek `secret` pada Kubernetes mengizinkan kamu menyimpan dan mengatur informasi yang sifatnya sensitif, seperti _password_, token OAuth, dan ssh _keys_. Menyimpan informasi yang sifatnya sensitif ini ke dalam `secret` cenderung lebih aman dan fleksible jika dibandingkan dengan menyimpan informasi tersebut secara apa adanya pada definisi {{< glossary_tooltip term_id="pod" >}} atau di dalam {{< glossary_tooltip text="container image" term_id="image" >}}. Silahkan lihat [Dokumen desain Secret](https://git.k8s.io/community/contributors/design-proposals/auth/secrets.md) untuk informasi yang sifatnya mendetail. -{{% /capture %}} -{{% capture body %}} + + ## Ikhtisar Secret @@ -1055,6 +1055,7 @@ dalam keadaan tidak terenkripsi. dengan cara meniru kubelet. Meskipun begitu, terdapat fitur yang direncanakan pada rilis selanjutnya yang memungkinkan pengiriman secret hanya dapat mengirimkan secret pada node yang membutuhkan secret tersebut untuk membatasi adanya eksploitasi akses _root_ pada node ini. -{{% capture whatsnext %}} +## {{% heading "whatsnext" %}} + + -{{% /capture %}} diff --git a/content/id/docs/concepts/configuration/taint-and-toleration.md b/content/id/docs/concepts/configuration/taint-and-toleration.md index 03fa777fe2..9a30b48f5b 100644 --- a/content/id/docs/concepts/configuration/taint-and-toleration.md +++ b/content/id/docs/concepts/configuration/taint-and-toleration.md @@ -1,11 +1,11 @@ --- title: Taint dan Toleration -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + Afinitas Node, seperti yang dideskripsikan [di sini](/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature), adalah salah satu properti dari Pod yang menyebabkan pod tersebut memiliki preferensi untuk ditempatkan di sekelompok Node tertentu (preferensi ini dapat berupa _soft constraints_ atau @@ -16,9 +16,9 @@ _Taint_ dan _toleration_ bekerja sama untuk memastikan Pod dijadwalkan pada Node yang sesuai. Satu atau lebih _taint_ akan diterapkan pada suatu node; hal ini akan menyebabkan node tidak akan menerima pod yang tidak mengikuti _taint_ yang sudah diterapkan. -{{% /capture %}} -{{% capture body %}} + + ## Konsep diff --git a/content/id/docs/concepts/containers/container-environment.md b/content/id/docs/concepts/containers/container-environment.md index 55c1bea6cb..affb371001 100644 --- a/content/id/docs/concepts/containers/container-environment.md +++ b/content/id/docs/concepts/containers/container-environment.md @@ -1,17 +1,17 @@ --- title: Kontainer Environment -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Laman ini menjelaskan berbagai *resource* yang tersedia di dalam Kontainer pada suatu *environment*. -{{% /capture %}} -{{% capture body %}} + + ## *Environment* Kontainer @@ -48,12 +48,13 @@ FOO_SERVICE_PORT= Semua *Service* memiliki alamat-alamat IP yang bisa didapatkan di dalam Kontainer melalui DNS, jika [*addon* DNS](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/dns/) diaktifkan.  -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Pelajari lebih lanjut tentang [berbagai *hook* pada *lifecycle* Kontainer](/docs/concepts/containers/container-lifecycle-hooks/). * Dapatkan pengalaman praktis soal [memberikan *handler* untuk *event* dari *lifecycle* Kontainer](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). -{{% /capture %}} + diff --git a/content/id/docs/concepts/containers/container-lifecycle-hooks.md b/content/id/docs/concepts/containers/container-lifecycle-hooks.md index 812bd81ec8..a7b5164864 100644 --- a/content/id/docs/concepts/containers/container-lifecycle-hooks.md +++ b/content/id/docs/concepts/containers/container-lifecycle-hooks.md @@ -1,18 +1,18 @@ --- title: Lifecyle Hook pada Kontainer -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Laman ini menjelaskan bagaimana semua Kontainer yang diatur kubelet menggunakan *framework lifecycle hook* untuk menjalankan kode yang di-*trigger* oleh *event* selama *lifecycle* berlangsung. -{{% /capture %}} -{{% capture body %}} + + ## Ikhtisar @@ -108,12 +108,13 @@ Events: 1m 22s 2 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Warning FailedPostStartHook ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Pelajari lebih lanjut tentang [*environment* Kontainer](/docs/concepts/containers/container-environment-variables/). * Pelajari bagaimana caranya [melakukan *attach handler* pada *event lifecycle* sebuah Kontainer](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). -{{% /capture %}} + diff --git a/content/id/docs/concepts/containers/images.md b/content/id/docs/concepts/containers/images.md index 59f980a35a..7a5fa28154 100644 --- a/content/id/docs/concepts/containers/images.md +++ b/content/id/docs/concepts/containers/images.md @@ -1,19 +1,19 @@ --- title: Image -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Kamu membuat Docker _image_ dan mengunduhnya ke sebuah registri sebelum digunakan di dalam Kubernetes Pod. Properti `image` dari sebuah Container mendukung sintaksis yang sama seperti perintah `docker`, termasuk registri privat dan _tag_. -{{% /capture %}} -{{% capture body %}} + + ## Memperbarui Image @@ -367,4 +367,4 @@ dan solusi yang disarankan. Jika kamu memiliki akses pada beberapa registri, kamu dapat membuat satu _secret_ untuk setiap registri. Kubelet akan melakukan _merge_ `imagePullSecrets` manapun menjadi sebuah virtual `.docker/config.json`. -{{% /capture %}} + diff --git a/content/id/docs/concepts/containers/overview.md b/content/id/docs/concepts/containers/overview.md index 7ec5ef55d5..d31c760ee0 100644 --- a/content/id/docs/concepts/containers/overview.md +++ b/content/id/docs/concepts/containers/overview.md @@ -1,10 +1,10 @@ --- title: Ikhtisar Kontainer -content_template: templates/concept +content_type: concept weight: 1 --- -{{% capture overview %}} + Kontainer adalah teknologi untuk mengemas kode (yang telah dikompilasi) menjadi suatu aplikasi beserta dengan dependensi-dependensi yang dibutuhkannya pada saat @@ -15,9 +15,9 @@ sama di mana pun Anda menjalankannya. Kontainer memisahkan aplikasi dari infrastruktur host yang ada dibawahnya. Hal ini membuat penyebaran lebih mudah di lingkungan cloud atau OS yang berbeda. -{{% /capture %}} -{{% capture body %}} + + ## Image-Image Kontainer @@ -46,4 +46,3 @@ menjalankan kontainer. Kubernetes mendukung beberapa kontainer *runtime*: - Baca tentang [image-image kontainer](https://kubernetes.io/docs/concepts/containers/images/) - Baca tentang [Pod](https://kubernetes.io/docs/concepts/workloads/pods/) -{{% /capture %}} \ No newline at end of file diff --git a/content/id/docs/concepts/containers/runtime-class.md b/content/id/docs/concepts/containers/runtime-class.md index dca24e9b6d..31bd8a25ec 100644 --- a/content/id/docs/concepts/containers/runtime-class.md +++ b/content/id/docs/concepts/containers/runtime-class.md @@ -1,10 +1,10 @@ --- title: Runtime Class -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.14" state="beta" >}} @@ -15,10 +15,10 @@ RuntimeClass memiliki _breaking change_ untuk pembaruan ke beta pada v1.14. Jika RuntimeClass sebelum v1.14, lihat [Memperbarui RuntimeClass dari Alpha ke Beta](#memperbarui-runtimeclass-dari-alpha-ke-beta). {{< /warning >}} -{{% /capture %}} -{{% capture body %}} + + ## `Runtime Class` @@ -158,4 +158,4 @@ pembaruan fitur RuntimeClass dari versi alpha ke versi beta: kosong atau menggunakan karakter `.` pada _handler_. Ini harus dimigrasi ke _handler_ dengan konfigurasi yang valid (lihat petunjuk di atas). -{{% /capture %}} + diff --git a/content/id/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md b/content/id/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md index 7276a0841d..fffc8709a6 100644 --- a/content/id/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md +++ b/content/id/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md @@ -1,16 +1,16 @@ --- title: Memperluas Kubernetes API dengan Lapisan Agregasi -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Lapisan agregasi memungkinkan Kubernetes untuk diperluas dengan API tambahan, selain dari yang ditawarkan oleh API inti Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Ikhtisar Lapisan agregasi memungkinkan instalasi tambahan beragam API _Kubernetes-style_ di kluster kamu. Tambahan-tambahan ini dapat berupa solusi-solusi yang sudah dibangun (_prebuilt_) oleh pihak ke-3 yang sudah ada, seperti [_service-catalog_](https://github.com/kubernetes-incubator/service-catalog/blob/master/README.md), atau API yang dibuat oleh pengguna seperti [apiserver-builder](https://github.com/kubernetes-incubator/apiserver-builder/blob/master/README.md), yang dapat membantu kamu memulainya. @@ -25,12 +25,12 @@ Jika implementasi kamu tidak dapat menyanggupinya, kamu harus mempertimbangkan c _feature-gate_ `EnableAggregatedDiscoveryTimeout=false` di kube-apiserver akan menonaktifkan batasan waktu tersebut. Fitur ini akan dihapus dalam rilis mendatang. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Untuk mengaktifkan agregator di lingkungan kamu, aktifkan[konfigurasi lapisan agregasi](/docs/tasks/access-kubernetes-api/configure-aggregation-layer/). * Kemudian, [siapkan ekstensi api-server](/docs/tasks/access-kubernetes-api/setup-extension-api-server/) untuk bekerja dengan lapisan agregasi. * Selain itu, pelajari caranya [mengembangkan API Kubernetes menggunakan _Custom Resource Definition_](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/). -{{% /capture %}} \ No newline at end of file diff --git a/content/id/docs/concepts/extend-kubernetes/api-extension/custom-resources.md b/content/id/docs/concepts/extend-kubernetes/api-extension/custom-resources.md index 7599fecef7..d8be642856 100644 --- a/content/id/docs/concepts/extend-kubernetes/api-extension/custom-resources.md +++ b/content/id/docs/concepts/extend-kubernetes/api-extension/custom-resources.md @@ -1,16 +1,16 @@ --- title: Custom Resource -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + _Custom Resource_ adalah ekstensi dari Kubernetes API. Laman ini mendiskusikan kapan kamu melakukan penambahan sebuah _Custom Resource_ ke klaster Kubernetes dan kapan kamu menggunakan sebuah layanan mandiri. Laman ini mendeskripsikan dua metode untuk menambahkan _Custom Resource_ dan bagaimana cara memilihnya. -{{% /capture %}} -{{% capture body %}} + + ## _Custom Resource_ @@ -211,12 +211,13 @@ Ketika kamu menambahkan sebuah _Custom Resource_, kamu dapat mengaksesnya dengan - Sebuah klien REST yang kamu tulis - Sebuah klien yang dibuat menggunakan [Kubernetes client generation tools](https://github.com/kubernetes/code-generator) (membuat satu adalah usaha lanjutan, tetapi beberapa proyek mungkin menyajikan sebuah klien bersama dengan CRD atau AA). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Belajar bagaimana untuk [Memperluas Kubernetes API dengan lapisan agregasi](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/). * Belajar bagaimana untuk [Memperluas Kubernetes API dengan CustomResourceDefinition](/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/). -{{% /capture %}} + diff --git a/content/id/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md b/content/id/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md index beb972e9bf..014a40171e 100644 --- a/content/id/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md +++ b/content/id/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md @@ -2,11 +2,11 @@ reviewers: title: Plugin Perangkat description: Gunakan kerangka kerja _plugin_ perangkat Kubernetes untuk mengimplementasikan plugin untuk GPU, NIC, FPGA, InfiniBand, dan sumber daya sejenis yang membutuhkan setelan spesifik vendor. -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.10" state="beta" >}} Kubernetes menyediakan [kerangka kerja _plugin_ perangkat](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/resource-management/device-plugin.md) @@ -17,9 +17,9 @@ _plugin_ perangkat yang di-_deploy_ secara manual atau sebagai {{< glossary_tool Perangkat yang dituju termasuk GPU, NIC berkinerja tinggi, FPGA, adaptor InfiniBand, dan sumber daya komputasi sejenis lainnya yang perlu inisialisasi dan setelan spesifik vendor. -{{% /capture %}} -{{% capture body %}} + + ## Pendaftaran _plugin_ perangkat @@ -223,12 +223,13 @@ Berikut beberapa contoh implementasi _plugin_ perangkat: * [Plugin perangkat SR-IOV Network](https://github.com/intel/sriov-network-device-plugin) * [Plugin perangkat Xilinx FPGA](https://github.com/Xilinx/FPGA_as_a_Service/tree/master/k8s-fpga-device-plugin/trunk) untuk perangkat Xilinx FPGA -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Pelajari bagaimana [menjadwalkan sumber daya GPU](/docs/tasks/manage-gpus/scheduling-gpus/) dengan _plugin_ perangkat * Pelajari bagaimana [mengumumkan sumber daya ekstensi](/docs/tasks/administer-cluster/extended-resource-node/) pada node * Baca tentang penggunaan [akselerasi perangkat keras untuk ingress TLS](https://kubernetes.io/blog/2019/04/24/hardware-accelerated-ssl/tls-termination-in-ingress-controllers-using-kubernetes-device-plugins-and-runtimeclass/) dengan Kubernetes * Pelajari tentang [Topology Manager] (/docs/tasks/adminster-cluster/topology-manager/) -{{% /capture %}} + diff --git a/content/id/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md b/content/id/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md index 7bf34d22d4..f54e285549 100644 --- a/content/id/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md +++ b/content/id/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md @@ -1,11 +1,11 @@ --- title: Plugin Jaringan -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + {{< feature-state state="alpha" >}} {{< warning >}}Fitur-fitur Alpha berubah dengan cepat. {{< /warning >}} @@ -15,9 +15,9 @@ _Plugin_ jaringan di Kubernetes hadir dalam beberapa varian: * _Plugin_ CNI : mengikuti spesifikasi appc / CNI, yang dirancang untuk interoperabilitas. * _Plugin_ Kubenet : mengimplementasi `cbr0` sederhana menggunakan _plugin_ `bridge` dan `host-local` CNI -{{% /capture %}} -{{% capture body %}} + + ## Instalasi @@ -151,8 +151,9 @@ Opsi ini disediakan untuk _plugin_ jaringan; Saat ini **hanya kubenet yang mendu * `--network-plugin=kubenet` menentukan bahwa kita menggunakan _plugin_ jaringan` kubenet` dengan `bridge` CNI dan _plugin-plugin_ `host-local` yang terletak di `/opt/cni/bin` atau `cni-bin-dir`. * `--network-plugin-mtu=9001` menentukan MTU yang akan digunakan, saat ini hanya digunakan oleh _plugin_ jaringan `kubenet`. -{{% /capture %}} -{{% capture whatsnext %}} -{{% /capture %}} +## {{% heading "whatsnext" %}} + + + diff --git a/content/id/docs/concepts/extend-kubernetes/extend-cluster.md b/content/id/docs/concepts/extend-kubernetes/extend-cluster.md index 0d979f31d6..b7b07b46ff 100644 --- a/content/id/docs/concepts/extend-kubernetes/extend-cluster.md +++ b/content/id/docs/concepts/extend-kubernetes/extend-cluster.md @@ -1,10 +1,10 @@ --- title: Memperluas Klaster Kubernetes Kamu -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Kubernetes sangat mudah dikonfigurasi dan diperluas. Sehingga, jarang membutuhkan _fork_ atau menambahkan _patch_ ke kode proyek Kubernetes. @@ -16,10 +16,10 @@ memahami bagaimana menyesuaikan klaster Kubernetes dengan kebutuhan lingkungan k Developer yang prospektif {{< glossary_tooltip text="Developer Platform" term_id="platform-developer" >}} atau {{< glossary_tooltip text="Kontributor" term_id="contributor" >}} Proyek Kubernetes juga mendapatkan manfaat dari dokumen ini sebagai pengantar apa saja poin-poin dan pola-pola perluasan yang ada, untung-rugi, dan batasan-batasannya. -{{% /capture %}} -{{% capture body %}} + + ## Ikhtisar @@ -161,10 +161,11 @@ Ini adalah usaha yang signifikan, dan hampir semua pengguna Kubernetes merasa me Penjadwal juga mendukung [_webhook_](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/scheduler_extender.md) yang memperbolehkan sebuah _webhook backend_ (perluasan penjadwal) untuk menyaring dan memprioritaskan Node yang terpilih untuk sebuah Pod. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Pelajari lebih lanjut tentang [Sumber Daya _Custom_](/docs/concepts/api-extension/custom-resources/) * Pelajari tentang [Kontrol Admisi Dinamis](/docs/reference/access-authn-authz/extensible-admission-controllers/) @@ -174,4 +175,4 @@ Penjadwal juga mendukung [_webhook_](https://github.com/kubernetes/community/blo * Pelajari tentang [_Plugin_ kubectl](/docs/tasks/extend-kubectl/kubectl-plugins/) * Pelajari tentang [Pola Operator](/docs/concepts/extend-kubernetes/operator/) -{{% /capture %}} + diff --git a/content/id/docs/concepts/extend-kubernetes/operator.md b/content/id/docs/concepts/extend-kubernetes/operator.md index dd9b803485..02df63bb79 100644 --- a/content/id/docs/concepts/extend-kubernetes/operator.md +++ b/content/id/docs/concepts/extend-kubernetes/operator.md @@ -1,20 +1,20 @@ --- title: Pola Operator -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Operator adalah ekstensi perangkat lunak untuk Kubernetes yang memanfaatkan [_custom resource_](/docs/concepts/extend-kubernetes/api-extension/custom-resources/) untuk mengelola aplikasi dan komponen-komponennya. Operator mengikuti prinsip Kubernetes, khususnya dalam hal [_control loop_](/docs/concepts/#kubernetes-control-plane). -{{% /capture %}} -{{% capture body %}} + + ## Motivasi @@ -124,7 +124,7 @@ Kamu juga dapat mengimplementasikan Operator (yaitu, _Controller_) dengan menggunakan bahasa / _runtime_ yang dapat bertindak sebagai [klien dari API Kubernetes](/docs/reference/using-api/client-libraries/). -{{% /capture %}} + {{% capture Selanjutnya %}} @@ -143,4 +143,4 @@ menggunakan bahasa / _runtime_ yang dapat bertindak sebagai yang memperkenalkan pola Operator * Baca sebuah [artikel](https://cloud.google.com/blog/products/containers-kubernetes/best-practices-for-building-kubernetes-operators-and-stateful-apps) dari Google Cloud soal panduan terbaik membangun Operator -{{% /capture %}} + diff --git a/content/id/docs/concepts/extend-kubernetes/poseidon-firmament-alternate-scheduler.md b/content/id/docs/concepts/extend-kubernetes/poseidon-firmament-alternate-scheduler.md index a9e49bf4f1..2b4fcdc17b 100644 --- a/content/id/docs/concepts/extend-kubernetes/poseidon-firmament-alternate-scheduler.md +++ b/content/id/docs/concepts/extend-kubernetes/poseidon-firmament-alternate-scheduler.md @@ -1,18 +1,18 @@ --- title: Poseidon-Firmament - Sebuah Penjadwal Alternatif -content_template: templates/concept +content_type: concept weight: 80 --- -{{% capture overview %}} + **Rilis saat ini dari Penjadwal Poseidon-Firmament adalah rilis alpha .** Penjadwal Poseidon-Firmament adalah penjadwal alternatif yang dapat digunakan bersama penjadwal Kubernetes bawaan. -{{% /capture %}} -{{% capture body %}} + + ## Pengenalan @@ -111,4 +111,4 @@ Kelemahan dari penjadwal _pod-by-pod_ ini diatasi dengan penjadwalan secara terk Silakan merujuk ke [hasil _benchmark_ terbaru](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/benchmark/README.md) untuk hasil uji perbandingan kinerja _throughput_ terperinci antara penjadwal Poseidon-Firmament dan Penjadwal bawaan Kubernetes. {{< /note >}} -{{% /capture %}} + diff --git a/content/id/docs/concepts/extend-kubernetes/service-catalog.md b/content/id/docs/concepts/extend-kubernetes/service-catalog.md index 2de908812e..efea4eda97 100644 --- a/content/id/docs/concepts/extend-kubernetes/service-catalog.md +++ b/content/id/docs/concepts/extend-kubernetes/service-catalog.md @@ -2,11 +2,11 @@ title: Service Catalog reviewers: - chenopis -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + {{< glossary_definition term_id="service-catalog" length="all" prepend="Service Catalog adalah" >}} Sebuah makelar servis (_service broker_), seperti yang didefinisikan oleh [spesifikasi API makelar servis terbuka] @@ -22,10 +22,10 @@ seorang {{< glossary_tooltip text="pengelola klaster" term_id="cluster-operator" daftar servis terkelola yang ditawarkan oleh makelar servis, melakukan pembuatan terhadap sebuah servis terkelola, dan menghubungkan (_bind_) untuk membuat tersedia terhadap aplikasi pada suatu klaster Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Contoh kasus penggunaan Seorang {{< glossary_tooltip text="pengembang aplikasi" term_id="application-developer" >}} ingin menggunakan @@ -265,10 +265,11 @@ dengan nama `topic` ke dalam _environment variable_ `TOPIC`. key: topic ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Jika kamu terbiasa dengan {{< glossary_tooltip text="Helm Charts" term_id="helm-chart" >}}, [pasang Service Catalog menggunakan Helm](/docs/tasks/service-catalog/install-service-catalog-using-helm/) ke dalam klaster Kubernetes. Alternatif lain, kamu dapat [memasang Service Catalog dengan SC tool](/docs/tasks/service-catalog/install-service-catalog-using-sc/). @@ -276,7 +277,7 @@ dengan nama `topic` ke dalam _environment variable_ `TOPIC`. * Pelajari mengenai [kubernetes-incubator/service-catalog](https://github.com/kubernetes-incubator/service-catalog) proyek. * Lihat [svc-cat.io](https://svc-cat.io/docs/). -{{% /capture %}} + diff --git a/content/id/docs/concepts/overview/components.md b/content/id/docs/concepts/overview/components.md index 276c5b91fa..63e7b4b3af 100644 --- a/content/id/docs/concepts/overview/components.md +++ b/content/id/docs/concepts/overview/components.md @@ -1,19 +1,19 @@ --- title: Komponen-Komponen Kubernetes -content_template: templates/concept +content_type: concept weight: 20 card: name: concepts weight: 20 --- -{{% capture overview %}} + Dokumen ini merupakan ikhtisar yang mencakup berbagai komponen yang dibutuhkan agar klaster Kubernetes dapat berjalan secara fungsional. -{{% /capture %}} -{{% capture body %}} + + ## Komponen Master Komponen master menyediakan control plane bagi klaster. @@ -147,6 +147,6 @@ untuk melakukan pencarian data yang dibutuhkan. penyimpanan log terpusat dengan antar muka yang dapat digunakan untuk melakukan pencarian. -{{% /capture %}} + diff --git a/content/id/docs/concepts/overview/kubernetes-api.md b/content/id/docs/concepts/overview/kubernetes-api.md index 304a7e0b5f..35bf3f67dc 100644 --- a/content/id/docs/concepts/overview/kubernetes-api.md +++ b/content/id/docs/concepts/overview/kubernetes-api.md @@ -1,13 +1,13 @@ --- title: API Kubernetes -content_template: templates/concept +content_type: concept weight: 30 card: name: concepts weight: 30 --- -{{% capture overview %}} + Secara keseluruhan standar yang digunakan untuk API dijelaskan di dalam [dokumentasi API standar](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md). @@ -21,10 +21,10 @@ Kubernetes menyimpan bentuk terserialisasi dari obyek API yang dimilikinya di da Kubernetes sendiri dibagi menjadi beberapa komponen yang saling dapat saling interaksi melalui API. -{{% /capture %}} -{{% capture body %}} + + ## Perubahan API @@ -153,4 +153,4 @@ Ekstensi lain dapat diaktifkan penanda `--runtime-config` pada apiserver. Sebagai contoh untuk menonaktifkan deployments dan ingress, tetapkan. `--runtime-config=extensions/v1beta1/deployments=false,extensions/v1beta1/ingresses=false` -{{% /capture %}} + diff --git a/content/id/docs/concepts/overview/object-management-kubectl/declarative-config.md b/content/id/docs/concepts/overview/object-management-kubectl/declarative-config.md index ad7c0dcfcf..9599feaf24 100644 --- a/content/id/docs/concepts/overview/object-management-kubectl/declarative-config.md +++ b/content/id/docs/concepts/overview/object-management-kubectl/declarative-config.md @@ -1,14 +1,14 @@ --- title: Pengelolaan Objek Kubernetes secara Deklaratif dengan Menggunakan File Konfigurasi -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + Objek-objek Kubernetes dapat dibuat, diperbarui, dan dihapus dengan menjalankan perintah `kubectl apply` terhadap file-file konfigurasi objek yang disimpan dalam sebuah direktori secara rekursif sesuai dengan kebutuhan. Perintah `kubectl diff` bisa digunakan untuk menampilkan pratinjau tentang perubahan apa saja yang akan dibuat oleh perintah `kubectil apply`. -{{% /capture %}} -{{% capture body %}} + + ## Kelebihan dan kekurangan @@ -860,9 +860,10 @@ template: controller-selector: "extensions/v1beta1/deployment/nginx" ``` -{{% capture whatsnext %}} +## {{% heading "whatsnext" %}} + - [Pengelolaan Objek Kubernetes Menggunakan Perintah Imperatif](/docs/concepts/overview/object-management-kubectl/imperative-command/) - [Pengelolaan Objek Kubernetes secara Imperatif Menggunakan File Konfigurasi](/docs/concepts/overview/object-management-kubectl/imperative-config/) - [Rujukan Perintah Kubectl](/docs/reference/generated/kubectl/kubectl/) - [Rujukan API Kubernetes](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) -{{% /capture %}} + diff --git a/content/id/docs/concepts/overview/object-management-kubectl/imperative-command.md b/content/id/docs/concepts/overview/object-management-kubectl/imperative-command.md index 3cf2103122..e77cc9ca63 100644 --- a/content/id/docs/concepts/overview/object-management-kubectl/imperative-command.md +++ b/content/id/docs/concepts/overview/object-management-kubectl/imperative-command.md @@ -1,14 +1,14 @@ --- title: Pengelolaan Objek Kubernetes dengan Perintah Imperatif -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Objek-objek Kubernetes bisa dibuat, diperbarui, dan dihapus secara langsung dengan menggunakan perintah-perintah imperatif yang ada pada *command-line* `kubectl`. Dokumen ini menjelaskan cara perintah-perintah tersebut diorganisir dan cara menggunakan perintah-perintah tersebut untuk mengelola objek *live*. -{{% /capture %}} -{{% capture body %}} + + ## Kelebihan dan kekurangan @@ -122,11 +122,12 @@ kubectl create --edit -f /tmp/srv.yaml 1. Perintah `kubectl create service` membuat konfigurasi untuk objek Service dan menyimpannya di `/tmp/srv.yaml`. 1. Perintah `kubectl create --edit` membuka file konfigurasi untuk diedit sebelum objek dibuat. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - [Pengelolaan Objek Kubernetes secara Imperatif dengan Menggunakan Konfigurasi Objek](/docs/concepts/overview/object-management-kubectl/imperative-config/) - [Pengelolaan Objek Kubernetes secara Deklaratif dengan Menggunakan File Konfigurasi](/docs/concepts/overview/object-management-kubectl/declarative-config/) - [Rujukan Perintah Kubectl](/docs/reference/generated/kubectl/kubectl/) - [Kubernetes API Reference](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) -{{% /capture %}} + diff --git a/content/id/docs/concepts/overview/object-management-kubectl/imperative-config.md b/content/id/docs/concepts/overview/object-management-kubectl/imperative-config.md index 6390200bde..7df68f579d 100644 --- a/content/id/docs/concepts/overview/object-management-kubectl/imperative-config.md +++ b/content/id/docs/concepts/overview/object-management-kubectl/imperative-config.md @@ -1,14 +1,14 @@ --- title: Penglolaan Objek Kubernetes Secara Imperatif dengan Menggunakan File Konfigurasi -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Objek-objek Kubernetes bisa dibuat, diperbarui, dan dihapus dengan menggunakan perangkat *command-line* `kubectl` dan file konfigurasi objek yang ditulis dalam format YAML atau JSON. Dokumen ini menjelaskan cara mendefinisikan dan mengelola objek dengan menggunakan file konfigurasi. -{{% /capture %}} -{{% capture body %}} + + ## Kelebihan dan kekurangan @@ -104,13 +104,14 @@ template: controller-selector: "extensions/v1beta1/deployment/nginx" ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - [Pengelolaan Objek Kubernetes Menggunakan Perintah Imperatif](/docs/concepts/overview/object-management-kubectl/imperative-command/) - [Pengelolaan Objek Kubernetes secara Deklaratif dengan Menggunakan File Konfigurasi](/docs/concepts/overview/object-management-kubectl/declarative-config/) - [Rujukan Perintah Kubectl](/docs/reference/generated/kubectl/kubectl/) - [Rujukan API Kubernetes](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) -{{% /capture %}} + diff --git a/content/id/docs/concepts/overview/what-is-kubernetes.md b/content/id/docs/concepts/overview/what-is-kubernetes.md index 9a44701803..de35f65b29 100644 --- a/content/id/docs/concepts/overview/what-is-kubernetes.md +++ b/content/id/docs/concepts/overview/what-is-kubernetes.md @@ -1,17 +1,17 @@ --- title: Apa itu Kubernetes? -content_template: templates/concept +content_type: concept weight: 10 card: name: concepts weight: 10 --- -{{% capture overview %}} + Laman ini merupakan ikhtisar Kubernetes. -{{% /capture %}} -{{% capture body %}} + + Kubernetes merupakan platform open-source yang digunakan untuk melakukan manajemen workloads aplikasi yang dikontainerisasi, serta menyediakan konfigurasi dan otomatisasi secara deklaratif. Kubernetes berada di dalam ekosistem @@ -179,11 +179,12 @@ Nama **Kubernetes** berasal dari Bahasa Yunani, yang berarti *juru mudi* atau merupakan sebuah singkatan yang didapat dengan mengganti 8 huruf "ubernete" dengan "8". -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Siap untuk [memulai](/docs/setup/)? * Untuk penjelasan lebih rinci, silahkan lihat [Dokumentasi Kubernetes](/docs/home/). -{{% /capture %}} + diff --git a/content/id/docs/concepts/overview/working-with-objects/annotations.md b/content/id/docs/concepts/overview/working-with-objects/annotations.md index c756c2d34c..8a822f255d 100644 --- a/content/id/docs/concepts/overview/working-with-objects/annotations.md +++ b/content/id/docs/concepts/overview/working-with-objects/annotations.md @@ -1,16 +1,16 @@ --- title: Anotasi -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + Kamu dapat menggunakan fitur anotasi dari Kubernetes untuk menempelkan sembarang metadata tanpa identitas pada suatu objek. Klien, seperti perangkat dan *library*, dapat memperoleh metadata tersebut. -{{% /capture %}} -{{% capture body %}} + + ## Mengaitkan metadata pada objek Kamu dapat menggunakan label maupun anotasi untuk menempelkan metadata pada suatu @@ -76,8 +76,9 @@ pada objek-objek pengguna harus memiliki sebuah prefiks. Prefiks `kubernetes.io/` dan `k8s.io/` merupakan reservasi dari komponen inti Kubernetes. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Pelajari lebih lanjut tentang [Label dan Selektor](/docs/concepts/overview/working-with-objects/labels/). -{{% /capture %}} + diff --git a/content/id/docs/concepts/overview/working-with-objects/common-labels.md b/content/id/docs/concepts/overview/working-with-objects/common-labels.md index 0e1e62c3e1..52350c4e14 100644 --- a/content/id/docs/concepts/overview/working-with-objects/common-labels.md +++ b/content/id/docs/concepts/overview/working-with-objects/common-labels.md @@ -1,9 +1,9 @@ --- title: Label yang Disarankan -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Kamu dapat melakukan visualisasi dan mengatur objek Kubernetes dengan lebih banyak _tools_ dibandingkan dengan perintah kubectl dan dasbor. Sekumpulan label mengizinkan _tools_ untuk bekerja dengan interoperabilitas, mendeskripsikan objek dengan cara yang umum yang dapat @@ -11,9 +11,9 @@ dipahami semua _tools_. Sebagai tambahan bagi _tooling_ tambahan, label yang disarankan ini mendeskripsikan aplikasi sehingga informasi yang ada diapat di-_query_. -{{% /capture %}} -{{% capture body %}} + + Metadata ini diorganisasi berbasis konsep dari sebuah aplikasi. Kubernetes bukan merupakan sebuah platform sebagai sebuah _service_ (_platform as a service_/PaaS) dan tidak mewajibkan sebuah gagasan formal dari sebuah aplikasi. @@ -176,4 +176,4 @@ metadata: Dengan StatefulSet MySQL dan Service kamu dapat mengetahui informasi yang ada pada MySQL dan Wordpress. -{{% /capture %}} + diff --git a/content/id/docs/concepts/overview/working-with-objects/kubernetes-objects.md b/content/id/docs/concepts/overview/working-with-objects/kubernetes-objects.md index 02517e8ef8..57eef5e9c6 100644 --- a/content/id/docs/concepts/overview/working-with-objects/kubernetes-objects.md +++ b/content/id/docs/concepts/overview/working-with-objects/kubernetes-objects.md @@ -1,18 +1,18 @@ --- title: Memahami Konsep Objek-Objek yang ada pada Kubernetes -content_template: templates/concept +content_type: concept weight: 10 card: name: concepts weight: 40 --- -{{% capture overview %}} + Laman ini menjelaskan bagaimana objek-objek Kubernetes direpresentasikan di dalam API Kubernetes, dan bagaimana kamu dapat merepresentasikannya di dalam format `.yaml`. -{{% /capture %}} -{{% capture body %}} + + ## Memahami Konsep Objek-Objek yang Ada pada Kubernetes Objek-objek Kubernetes adalah entitas persisten di dalam sistem Kubernetes. @@ -99,10 +99,11 @@ untuk _Pod_ dapat kamu temukan [di sini](/docs/reference/generated/kubernetes-ap dan format _spec_ untuk _Deployment_ dapat ditemukan [di sini](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#deploymentspec-v1-apps). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Pelajari lebih lanjut mengenai dasar-dasar penting bagi objek Kubernetes, seperti [Pod](/docs/concepts/workloads/pods/pod-overview/). -{{% /capture %}} + diff --git a/content/id/docs/concepts/overview/working-with-objects/labels.md b/content/id/docs/concepts/overview/working-with-objects/labels.md index 0b6060e2fd..306edc0bfb 100644 --- a/content/id/docs/concepts/overview/working-with-objects/labels.md +++ b/content/id/docs/concepts/overview/working-with-objects/labels.md @@ -1,10 +1,10 @@ --- title: Label dan Selektor -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + _Label_ merupakan pasangan _key/value_ yang melekat pada objek-objek, misalnya pada Pod. Label digunakan untuk menentukan atribut identitas dari objek agar memiliki arti dan relevan bagi para pengguna, namun tidak secara langsung memiliki makna terhadap sistem inti. @@ -22,10 +22,10 @@ Setiap objek dapat memiliki satu set label _key/value_. Setiap _Key_ harus unik Label memungkinkan untuk menjalankan kueri dan pengamatan dengan efisien, serta ideal untuk digunakan pada UI dan CLI. Informasi yang tidak digunakan untuk identifikasi sebaiknya menggunakan [anotasi](/id/docs/concepts/overview/working-with-objects/annotations/). -{{% /capture %}} -{{% capture body %}} + + ## Motivasi @@ -222,4 +222,4 @@ selector: Salah satu contoh penggunaan pemilihan dengan menggunakan label yaitu untuk membatasi suatu kumpulan Node tertentu yang dapat digunakan oleh Pod. Lihat dokumentasi pada [pemilihan Node](/id/docs/concepts/configuration/assign-pod-node/) untuk informasi lebih lanjut. -{{% /capture %}} + diff --git a/content/id/docs/concepts/overview/working-with-objects/names.md b/content/id/docs/concepts/overview/working-with-objects/names.md index 331387dba5..5527c15b72 100644 --- a/content/id/docs/concepts/overview/working-with-objects/names.md +++ b/content/id/docs/concepts/overview/working-with-objects/names.md @@ -1,10 +1,10 @@ --- title: Nama -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Seluruh objek di dalam REST API Kubernetes secara jelas ditandai dengan nama dan UID. @@ -12,10 +12,10 @@ Apabila pengguna ingin memberikan atribut tidak unik, Kubernetes menyediakan [la Bacalah [dokumentasi desain penanda](https://git.k8s.io/community/contributors/design-proposals/architecture/identifiers.md) agar kamu dapat memahami lebih lanjut sintaks yang digunakan untuk Nama dan UID. -{{% /capture %}} -{{% capture body %}} + + ## Nama @@ -27,4 +27,4 @@ Berdasarkan ketentuan, nama dari _resources_ Kubernetes memiliki panjang maksimu {{< glossary_definition term_id="uid" length="all" >}} -{{% /capture %}} + diff --git a/content/id/docs/concepts/overview/working-with-objects/namespaces.md b/content/id/docs/concepts/overview/working-with-objects/namespaces.md index a2315fd12e..5eb358a17a 100644 --- a/content/id/docs/concepts/overview/working-with-objects/namespaces.md +++ b/content/id/docs/concepts/overview/working-with-objects/namespaces.md @@ -1,17 +1,17 @@ --- title: Namespace -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Kubernetes mendukung banyak klaster virtual di dalam satu klaster fisik. Klaster virtual tersebut disebut dengan *namespace*. -{{% /capture %}} -{{% capture body %}} + + ## Kapan menggunakan banyak Namespace @@ -91,4 +91,4 @@ kubectl api-resources --namespaced=true kubectl api-resources --namespaced=false ``` -{{% /capture %}} + diff --git a/content/id/docs/concepts/overview/working-with-objects/object-management.md b/content/id/docs/concepts/overview/working-with-objects/object-management.md index aa9d8cbc9e..aadb410473 100644 --- a/content/id/docs/concepts/overview/working-with-objects/object-management.md +++ b/content/id/docs/concepts/overview/working-with-objects/object-management.md @@ -1,16 +1,16 @@ --- title: Pengaturan Objek Kubernetes -content_template: templates/concept +content_type: concept weight: 15 --- -{{% capture overview %}} + Perangkat `kubectl` mendukung beberapa cara untuk membuat dan mengatur objek-objek Kubernetes. Laman ini menggambarkan berbagai macam metodenya. Baca [Kubectl gitbook](https://kubectl.docs.kubernetes.io) untuk penjelasan pengaturan objek dengan Kubectl secara detail. -{{% /capture %}} -{{% capture body %}} + + ## Metode pengaturan @@ -170,9 +170,10 @@ Beberapa kekurangan dibandingkan konfigurasi objek imperatif: - Konfigurasi objek deklaratif lebih sulit untuk di-_debug_ dan hasilnya lebih sulit dimengerti untuk perilaku yang tidak diinginkan. - Pembaruan sebagian menggunakan _diff_ menghasilkan operasi _merge_ dan _patch_ yang rumit. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - [Mengatur Objek Kubernetes menggunakan Perintah Imperatif](/docs/tasks/manage-kubernetes-objects/imperative-command/) - [Mengatur Objek Kubernetes menggunakan Konfigurasi Objek (Imperatif)](/docs/tasks/manage-kubernetes-objects/imperative-config/) @@ -182,4 +183,4 @@ Beberapa kekurangan dibandingkan konfigurasi objek imperatif: - [Kubectl Gitbook](https://kubectl.docs.kubernetes.io) - [Referensi API Kubernetes](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) -{{% /capture %}} + diff --git a/content/id/docs/concepts/policy/pod-security-policy.md b/content/id/docs/concepts/policy/pod-security-policy.md index 0337db0f6c..2dbbd53144 100644 --- a/content/id/docs/concepts/policy/pod-security-policy.md +++ b/content/id/docs/concepts/policy/pod-security-policy.md @@ -1,18 +1,18 @@ --- title: Pod Security Policy -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< feature-state state="beta" >}} Pod Security Policies (kebijakan keamanan Pod) memungkinkan otorisasi secara detil dari pembuatan dan pembaruan Pod. -{{% /capture %}} -{{% capture body %}} + + ## Apa itu Pod Security Policy? @@ -466,4 +466,4 @@ Secara bawaan, semua _sysctl_ yang aman diizinkan. Lihat [dokumentasi Sysctl](/docs/concepts/cluster-administration/sysctl-cluster/#podsecuritypolicy). -{{% /capture %}} + diff --git a/content/id/docs/concepts/policy/resource-quotas.md b/content/id/docs/concepts/policy/resource-quotas.md index b4a3e28ebb..47bfa996bb 100644 --- a/content/id/docs/concepts/policy/resource-quotas.md +++ b/content/id/docs/concepts/policy/resource-quotas.md @@ -1,10 +1,10 @@ --- title: Resource Quota -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Saat beberapa pengguna atau tim berbagi sebuah klaster dengan jumlah Node yang tetap, ada satu hal yang perlu diperhatikan yaitu suatu tim dapat menggunakan sumber daya @@ -13,9 +13,9 @@ lebih dari jatah yang mereka perlukan. _Resource Quota_ (kuota sumber daya) adalah sebuah alat yang dapat digunakan oleh administrator untuk mengatasi hal ini. -{{% /capture %}} -{{% capture body %}} + + Sebuah Resource Quota, didefinisikan oleh objek API `ResourceQuota`, menyediakan batasan-batasan yang membatasi konsumsi gabungan sumber daya komputasi untuk tiap Namespace. Resource Quota dapat @@ -613,10 +613,11 @@ Lihat [LimitedResources](https://github.com/kubernetes/kubernetes/pull/36765) da Lihat [contoh detail cara menggunakan sebuah Resource Quota](/docs/tasks/administer-cluster/quota-api-object/). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Lihat [dokumen desain ResourceQuota](https://git.k8s.io/community/contributors/design-proposals/resource-management/admission_control_resource_quota.md) untuk informasi lebih lanjut. -{{% /capture %}} + diff --git a/content/id/docs/concepts/scheduling/kube-scheduler.md b/content/id/docs/concepts/scheduling/kube-scheduler.md index bf6fd768d8..f4cd477608 100644 --- a/content/id/docs/concepts/scheduling/kube-scheduler.md +++ b/content/id/docs/concepts/scheduling/kube-scheduler.md @@ -1,19 +1,19 @@ --- title: Penjadwal Kubernetes -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + Dalam Kubernetes, _scheduling_ atau penjadwalan ditujukan untuk memastikan {{< glossary_tooltip text="Pod" term_id="pod" >}} mendapatkan {{< glossary_tooltip text="Node" term_id="node" >}} sehingga {{< glossary_tooltip term_id="kubelet" >}} dapat menjalankannya. -{{% /capture %}} -{{% capture body %}} + + ## Ikhtisar Penjadwalan {#penjadwalan} @@ -91,12 +91,13 @@ penilaian oleh penjadwal: lainnya. Kamu juga bisa mengonfigurasi _kube-scheduler_ untuk menjalankan profil yang berbeda. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Baca tentang [penyetelan performa penjadwal](/docs/concepts/scheduling/scheduler-perf-tuning/) * Baca tentang [pertimbangan penyebarang topologi pod](/docs/concepts/workloads/pods/pod-topology-spread-constraints/) * Baca [referensi dokumentasi](/docs/reference/command-line-tools-reference/kube-scheduler/) untuk _kube-scheduler_ * Pelajari tentang [mengkonfigurasi beberapa penjadwal](/docs/tasks/administer-cluster/configure-multiple-schedulers/) * Pelajari tentang [aturan manajemen topologi](/docs/tasks/administer-cluster/topology-manager/) * Pelajari tentang [pengeluaran tambahan Pod](/docs/concepts/configuration/pod-overhead/) -{{% /capture %}} + diff --git a/content/id/docs/concepts/scheduling/scheduler-perf-tuning.md b/content/id/docs/concepts/scheduling/scheduler-perf-tuning.md index 11f9a23077..0a20d9050a 100644 --- a/content/id/docs/concepts/scheduling/scheduler-perf-tuning.md +++ b/content/id/docs/concepts/scheduling/scheduler-perf-tuning.md @@ -1,10 +1,10 @@ --- title: Penyetelan Kinerja Penjadwal -content_template: templates/concept +content_type: concept weight: 70 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.14" state="beta" >}} @@ -21,9 +21,9 @@ API server soal keputusan ini melalui sebuah proses yang disebut _Binding_. Laman ini menjelaskan optimasi penyetelan (_tuning_) kinerja yang relevan untuk klaster Kubernetes berskala besar. -{{% /capture %}} -{{% capture body %}} + + Pada klaster berskala besar, kamu bisa menyetel perilaku penjadwal untuk menyeimbangkan hasil akhir penjadwalan antara latensi (seberapa cepat Pod-Pod baru ditempatkan) @@ -157,4 +157,4 @@ Node 1, Node 5, Node 2, Node 6, Node 3, Node 4 Setelah semua Node telah dicek, penjadwal akan kembali pada Node 1. -{{% /capture %}} + diff --git a/content/id/docs/concepts/scheduling/scheduling-framework.md b/content/id/docs/concepts/scheduling/scheduling-framework.md index de1772286f..f08f9f40c7 100644 --- a/content/id/docs/concepts/scheduling/scheduling-framework.md +++ b/content/id/docs/concepts/scheduling/scheduling-framework.md @@ -1,10 +1,10 @@ --- title: Kerangka Kerja Penjadwalan (Scheduling Framework) -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="1.15" state="alpha" >}} @@ -20,9 +20,9 @@ tersebut. [kep]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-scheduling/20180409-scheduling-framework.md -{{% /capture %}} -{{% capture body %}} + + # Alur kerja kerangka kerja @@ -246,4 +246,4 @@ mengonfigurasi sekumpulan _plugin_ sebagai profil penjadwal dan kemudian menetap beberapa profil agar sesuai dengan berbagai jenis beban kerja. Pelajari lebih lanjut di [multi profil](/docs/reference/scheduling/profiles/#multiple-profiles). -{{% /capture %}} + diff --git a/content/id/docs/concepts/security/overview.md b/content/id/docs/concepts/security/overview.md index e6a00f1cf5..caff040bc5 100644 --- a/content/id/docs/concepts/security/overview.md +++ b/content/id/docs/concepts/security/overview.md @@ -1,16 +1,16 @@ --- title: Ikhtisar Keamanan Cloud Native -content_template: templates/concept +content_type: concept weight: 1 --- {{< toc >}} -{{% capture overview %}} + Keamanan Kubernetes (dan keamanan secara umum) adalah sebuah topik sangat luas yang memiliki banyak bagian yang sangat berkaitan satu sama lain. Pada masa sekarang ini di mana perangkat lunak _open source_ telah diintegrasi ke dalam banyak sistem yang membantu berjalannya aplikasi web, ada beberapa konsep menyeluruh yang dapat membantu intuisimu untuk berpikir tentang konsep keamanan secara menyeluruh. Panduan ini akan mendefinisikan sebuah cara/model berpikir untuk beberapa konsep umum mengenai Keamanan _Cloud Native_. Cara berpikir ini sepenuhnya subjektif dan kamu sebaiknya hanya menggunakannya apabila ini membantumu berpikir tentang di mana harus mengamankan _stack_ perangkat lunakmu. -{{% /capture %}} -{{% capture body %}} + + ## 4C pada Keamanan _Cloud Native_ @@ -103,8 +103,9 @@ Serangan Pengamatan (_probing_) Dinamis | Ada sedikit peralatan otomatis yang da Kebanyakan dari saran yang disebut di atas dapat diotomasi di dalam _delivery pipeline_ kode kamu sebagai bagian dari rangkaian pemeriksaan keamanan. Untuk mempelajari lebih lanjut tentang pendekatan "_Continuous Hacking_" terhadap _delivery_ perangkat lunak, [artikel ini](https://thenewstack.io/beyond-ci-cd-how-continuous-hacking-of-docker-containers-and-pipeline-driven-security-keeps-ygrene-secure/) menyediakan lebih banyak detail. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Pelajari tentang [Network Policy untuk Pod](/docs/concepts/services-networking/network-policies/) * Pelajari tentang [mengamankan klaster kamu](/docs/tasks/administer-cluster/securing-a-cluster/) @@ -113,4 +114,4 @@ Kebanyakan dari saran yang disebut di atas dapat diotomasi di dalam _delivery pi * Pelajari tentang [enkripsi data saat diam](/docs/tasks/administer-cluster/encrypt-data/) * Pelajari tentang [Secret (data sensitif) pada Kubernetes](/docs/concepts/configuration/secret/) -{{% /capture %}} + diff --git a/content/id/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md b/content/id/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md index 2245f9f961..26a2473f46 100644 --- a/content/id/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md +++ b/content/id/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md @@ -1,12 +1,12 @@ --- title: Menambahkan Entry pada /etc/hosts Pod dengan HostAliases -content_template: templates/concept +content_type: concept weight: 60 --- {{< toc >}} -{{% capture overview %}} + Menambahkan entri pada berkas /etc/hosts Pod akan melakukan _override_ resolusi _hostname_ pada level Pod ketika DNS dan opsi lainnya tidak tersedia. Pada versi 1.7, pengguna dapat menambahkan entri yang diinginkan beserta _field_ HostAliases @@ -14,9 +14,9 @@ pada PodSpec. Modifikasi yang dilakukan tanpa menggunakan HostAliases tidaklah disarankan karena berkas ini diatur oleh Kubelet dan dapat di-_override_ ketika Pod dibuat/di-_restart_. -{{% /capture %}} -{{% capture body %}} + + ## Isi Default pada Berkas `Hosts` @@ -127,5 +127,5 @@ semua hal yang didefinisikan oleh pengguna akan ditimpa (_overwrite_) ketika ber atau Pod di-_schedule_ ulang. Dengan demikian tidak dianjurkan untuk memodifikasi berkas tersebut secara langsung. -{{% /capture %}} + diff --git a/content/id/docs/concepts/services-networking/connect-applications-service.md b/content/id/docs/concepts/services-networking/connect-applications-service.md index 7104af5409..4bbd0bbf56 100644 --- a/content/id/docs/concepts/services-networking/connect-applications-service.md +++ b/content/id/docs/concepts/services-networking/connect-applications-service.md @@ -1,11 +1,11 @@ --- title: Menghubungkan aplikasi dengan Service -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + ## Model Kubernetes untuk menghubungkan kontainer @@ -17,9 +17,9 @@ Akan sulit untuk mengkoordinasikan *port* yang digunakan oleh banyak pengembang. Panduan ini menggunakan server *nginx* sederhana untuk mendemonstrasikan konsepnya. Konsep yang sama juga ditulis lebih lengkap di [Aplikasi Jenkins CI](https://kubernetes.io/blog/2015/07/strong-simple-ssl-for-kubernetes). -{{% /capture %}} -{{% capture body %}} + + ## Mengekspos Pod ke dalam klaster @@ -357,10 +357,11 @@ LoadBalancer Ingress: a320587ffd19711e5a37606cf4a74574-1142138393.us-east-1.el ... ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Kubernetes juga mendukung *Federated Service*, yang bisa mempengaruhi banyak klaster dan penyedia layanan *cloud*, untuk meningkatkan ketersediaan, peningkatan toleransi kesalahan, dan pengembangan dari *Service* kamu. Lihat [Panduan Federated Service](/docs/concepts/cluster-administration/federation-service-discovery/) untuk informasi lebih lanjut. -{{% /capture %}} + diff --git a/content/id/docs/concepts/services-networking/dns-pod-service.md b/content/id/docs/concepts/services-networking/dns-pod-service.md index f6b333319e..52ec19a420 100644 --- a/content/id/docs/concepts/services-networking/dns-pod-service.md +++ b/content/id/docs/concepts/services-networking/dns-pod-service.md @@ -1,13 +1,13 @@ --- title: DNS untuk Service dan Pod -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Laman ini menyediakan ikhtisar dari dukungan DNS oleh Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Pendahuluan @@ -258,11 +258,12 @@ Keberadaan Pod DNS Config dan DNS Policy "`None`"" diilustrasikan pada tabel di | 1.10 | Beta (aktif secara default)| | 1.9 | Alpha | -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Untuk petunjuk lebih lanjut mengenai administrasi konfigurasi DNS, kamu dapat membaca [Cara Melakukan Konfigurasi Service DNS](/docs/tasks/administer-cluster/dns-custom-nameservers/) -{{% /capture %}} + diff --git a/content/id/docs/concepts/services-networking/dual-stack.md b/content/id/docs/concepts/services-networking/dual-stack.md index 0c3993b5ca..6714a00cde 100644 --- a/content/id/docs/concepts/services-networking/dual-stack.md +++ b/content/id/docs/concepts/services-networking/dual-stack.md @@ -5,11 +5,11 @@ feature: description: > Pengalokasian alamat IPv4 dan IPv6 untuk Pod dan Service -content_template: templates/concept +content_type: concept weight: 70 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.16" state="alpha" >}} @@ -20,9 +20,9 @@ Jika kamu mengaktifkan jaringan _dual-stack_ IPv4/IPv6 untuk klaster Kubernetes kamu, klaster akan mendukung pengalokasian kedua alamat IPv4 dan IPv6 secara bersamaan. -{{% /capture %}} -{{% capture body %}} + + ## Fitur-fitur yang didukung @@ -131,10 +131,11 @@ _masquerading_ IP dari klaster _dual-stack_. * Kubenet memaksa pelaporan posisi IP untuk IPv4,IPv6 IP (--cluster-cidr) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Validasi jaringan _dual-stack_ IPv4/IPv6](/docs/tasks/network/validate-dual-stack) -{{% /capture %}} + diff --git a/content/id/docs/concepts/services-networking/endpoint-slices.md b/content/id/docs/concepts/services-networking/endpoint-slices.md index 158918a915..224e7b4bbd 100644 --- a/content/id/docs/concepts/services-networking/endpoint-slices.md +++ b/content/id/docs/concepts/services-networking/endpoint-slices.md @@ -5,21 +5,21 @@ feature: description: > Pelacakan _endpoint_ jaringan yang dapat diskalakan pada klaster Kubernetes. -content_template: templates/concept +content_type: concept weight: 15 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.17" state="beta" >}} EndpointSlice menyediakan sebuah cara yang mudah untuk melacak _endpoint_ jaringan dalam sebuah klaster Kubernetes. EndpointSlice memberikan alternatif yang lebih _scalable_ dan lebih dapat diperluas dibandingkan dengan Endpoints. -{{% /capture %}} -{{% capture body %}} + + ## Motivasi @@ -174,11 +174,12 @@ akan segera dibutuhkan. Pembaruan bertahap (_rolling update_) dari Deployment ju pengemasan ulang EndpointSlice yang natural seiring dengan digantikannya seluruh Pod dan _endpoint_ yang bersangkutan. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Mengaktifkan EndpointSlice](/docs/tasks/administer-cluster/enabling-endpointslices) * Baca [Menghubungkan Aplikasi dengan Service](/docs/concepts/services-networking/connect-applications-service/) -{{% /capture %}} + diff --git a/content/id/docs/concepts/services-networking/ingress-controllers.md b/content/id/docs/concepts/services-networking/ingress-controllers.md index c6262ec91f..9491f5dc1c 100644 --- a/content/id/docs/concepts/services-networking/ingress-controllers.md +++ b/content/id/docs/concepts/services-networking/ingress-controllers.md @@ -1,10 +1,10 @@ --- title: Kontroler Ingress -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + Agar Ingress dapat bekerja sebagaimana mestinya, sebuah klaster harus memiliki paling tidak sebuah kontroler Ingress. @@ -18,9 +18,9 @@ paling sesuai dengan kebutuhan kamu. Kubernetes sebagai sebuah proyek, saat ini, mendukung dan memaintain kontroler-kontroler [GCE](https://git.k8s.io/ingress-gce/README.md) dan [nginx](https://git.k8s.io/ingress-nginx/README.md). -{{% /capture %}} -{{% capture body %}} + + ## Kontroler-kontroler lainnya @@ -66,11 +66,12 @@ kontroler Ingress bisa saja memiliki sedikit perbedaan cara kerja. Pastikan kamu sudah terlebih dahulu memahami dokumentasi kontroler Ingress yang akan kamu pakai sebelum memutuskan untuk memakai kontroler tersebut. {{< /note >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Pelajari [Ingress](/docs/concepts/services-networking/ingress/) lebih lanjut. * [Melakukan konfigurasi Ingress pada Minikube dengan kontroler NGINX](/docs/tasks/access-application-cluster/ingress-minikube) -{{% /capture %}} + diff --git a/content/id/docs/concepts/services-networking/ingress.md b/content/id/docs/concepts/services-networking/ingress.md index 905f6b03bb..617581b421 100644 --- a/content/id/docs/concepts/services-networking/ingress.md +++ b/content/id/docs/concepts/services-networking/ingress.md @@ -1,14 +1,14 @@ --- title: Ingress -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + {{< glossary_definition term_id="ingress" length="all" >}} -{{% /capture %}} -{{% capture body %}} + + ## Terminologi Untuk memudahkan, di awal akan dijelaskan beberapa terminologi yang sering dipakai: @@ -467,8 +467,9 @@ Kamu dapat mengekspos sebuah *Service* dalam berbagai cara, tanpa harus mengguna * [Service.Type=NodePort](/docs/concepts/services-networking/service/#nodeport) * [Port Proxy](https://git.k8s.io/contrib/for-demos/proxy-to-service) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Melakukan konfigurasi Ingress pada Minikube dengan kontroler NGINX](/docs/tasks/access-application-cluster/ingress-minikube) -{{% /capture %}} + diff --git a/content/id/docs/concepts/services-networking/network-policies.md b/content/id/docs/concepts/services-networking/network-policies.md index 644a3c8cc2..25f42ddb98 100644 --- a/content/id/docs/concepts/services-networking/network-policies.md +++ b/content/id/docs/concepts/services-networking/network-policies.md @@ -1,20 +1,20 @@ --- title: NetworkPolicy -content_template: templates/concept +content_type: concept weight: 50 --- {{< toc >}} -{{% capture overview %}} + Sebuah NetworkPolicy adalah spesifikasi dari sekelompok Pod atau _endpoint_ yang diizinkan untuk saling berkomunikasi. `NetworkPolicy` menggunakan label untuk memilih Pod serta mendefinisikan serangkaian _rule_ yang digunakan untuk mendefinisikan trafik yang diizinkan untuk suatu Pod tertentu. -{{% /capture %}} -{{% capture body %}} + + ## Prasyarat NetworkPolicy diimplementasikan dengan menggunakan _plugin_ jaringan, @@ -275,11 +275,12 @@ Kubernetes mendukung SCTP sebagai _value_ `protocol` pada definisi `NetworkPolic _Plugin_ CNI harus mendukung SCTP sebagai _value_ dari `protocol` pada `NetworkPolicy`. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - Lihat [Deklarasi _Network Policy_](/docs/tasks/administer-cluster/declare-network-policy/) untuk melihat lebih banyak contoh penggunaan. - Baca lebih lanjut soal [panduan](https://github.com/ahmetb/kubernetes-network-policy-recipes) bagi skenario generik _resource_ `NetworkPolicy`. -{{% /capture %}} + diff --git a/content/id/docs/concepts/services-networking/service-topology.md b/content/id/docs/concepts/services-networking/service-topology.md index 1480589589..ef15d1ab3d 100644 --- a/content/id/docs/concepts/services-networking/service-topology.md +++ b/content/id/docs/concepts/services-networking/service-topology.md @@ -5,12 +5,12 @@ feature: description: > Rute lalu lintas layanan berdasarkan topologi klaster. -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.17" state="alpha" >}} @@ -20,9 +20,9 @@ layanan dapat menentukan lalu lintas jaringan yang lebih diutamakan untuk dirute beberapa _endpoint_ yang berada pada Node yang sama dengan klien, atau pada _availability zone_ yang sama. -{{% /capture %}} -{{% capture body %}} + + ## Pengantar @@ -180,11 +180,11 @@ spec: - "*" ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Baca tentang [mengaktifkan topologi Service](/docs/tasks/administer-cluster/enabling-service-topology) * Baca [menghubungkan aplikasi dengan Service](/docs/concepts/services-networking/connect-applications-service/) -{{% /capture %}} \ No newline at end of file diff --git a/content/id/docs/concepts/services-networking/service.md b/content/id/docs/concepts/services-networking/service.md index 7ae2d39b65..97626bf9ce 100644 --- a/content/id/docs/concepts/services-networking/service.md +++ b/content/id/docs/concepts/services-networking/service.md @@ -5,12 +5,12 @@ feature: description: > Kamu tidak perlu memodifikasi aplikasi kamu untuk menggunakan mekanisme _service discovery_ tambahan. Kubernetes menyediakan IP untuk setiap kontainer serta sebuah DNS bagi sebuah sekumpulan kontainer, serta akan melakukan mekanisme _load balance_ bagi sekumpulan kontainer tersebut. -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + [`Pod`](/docs/concepts/workloads/pods/pod/) pada Kubernetes bersifat *mortal*. Artinya apabila _pod-pod_ tersebut dibuat dan kemudian mati, _pod-pod_ tersebut @@ -41,9 +41,9 @@ yang terus diubah apabila _state_ sebuah sekumpulan `Pod` di dalam suatu `Servic aplikasi _non-native_, Kubernetes menyediakan _bridge_ yang berbasis _virtual-IP_ bagi `Service` yang diarahkan pada `Pod` _backend_. -{{% /capture %}} -{{% capture body %}} + + ## Mendefinisikan sebuah `Service` @@ -1056,10 +1056,11 @@ SCTP tidak didukung pada _node_ berbasis Windows. _Kube-proxy_ tidak mendukung manajemen asosiasi SCTP ketika hal ini dilakukan pada mode _userspace_ -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Baca [Bagaimana cara menghubungkan _Front End_ ke _Back End_ menggunakan sebuah `Service`](/docs/tasks/access-application-cluster/connecting-frontend-backend/). -{{% /capture %}} + diff --git a/content/id/docs/concepts/storage/dynamic-provisioning.md b/content/id/docs/concepts/storage/dynamic-provisioning.md index 2d346ff80e..ac206dfacd 100644 --- a/content/id/docs/concepts/storage/dynamic-provisioning.md +++ b/content/id/docs/concepts/storage/dynamic-provisioning.md @@ -1,10 +1,10 @@ --- title: Penyediaan Volume Dinamis -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + Penyediaan volume dinamis memungkinkan volume penyimpanan untuk dibuat sesuai permintaan (_on-demand_). Tanpa adanya penyediaan dinamis (_dynamic provisioning_), untuk membuat volume penyimpanan baru, admin klaster secara manual harus @@ -13,10 +13,10 @@ sebagai representasi di Kubernetes. Fitur penyediaan dinamis menghilangkan kebut penyimpanan sebelumnya (_pre-provision_). Dengan demikian, penyimpanan akan tersedia secara otomatis ketika diminta oleh pengguna. -{{% /capture %}} -{{% capture body %}} + + ## Latar Belakang @@ -125,4 +125,4 @@ pada sebuah Region. Penyimpanan dengan *backend* Zona-Tunggal seharusnya disedia Zona-Zona dimana Pod dijalankan. Hal ini dapat dicapai dengan mengatur [Mode Volume Binding](/docs/concepts/storage/storage-classes/#volume-binding-mode). -{{% /capture %}} + diff --git a/content/id/docs/concepts/storage/persistent-volumes.md b/content/id/docs/concepts/storage/persistent-volumes.md index 4063f1a282..f75941b86a 100644 --- a/content/id/docs/concepts/storage/persistent-volumes.md +++ b/content/id/docs/concepts/storage/persistent-volumes.md @@ -5,18 +5,18 @@ feature: description: > Secara otomatis memasang sistem penyimpanan pilihanmu, baik dari penyimpanan lokal, penyedia layanan _cloud_ seperti GCP atau AWS, maupun sebuah sistem penyimpanan jaringan seperti NFS, iSCSI, Gluster, Ceph, Cinder, atau Flocker. -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Dokumen ini menjelaskan kondisi terkini dari `PersistentVolumes` pada Kubernetes. Disarankan telah memiliki familiaritas dengan [volume](/docs/concepts/storage/volumes/). -{{% /capture %}} -{{% capture body %}} + + ## Pengenalan @@ -698,4 +698,4 @@ dan membutuhkan _persistent storage_, kami merekomendasikan agar kamu menggunaka atau klaster tidak memiliki sistem penyimpanan (di mana penggun tidak dapat membuat PVC yang membutuhkan _config_). -{{% /capture %}} + diff --git a/content/id/docs/concepts/storage/storage-classes.md b/content/id/docs/concepts/storage/storage-classes.md index ceeadf2d90..6de85830e8 100644 --- a/content/id/docs/concepts/storage/storage-classes.md +++ b/content/id/docs/concepts/storage/storage-classes.md @@ -1,19 +1,19 @@ --- title: StorageClass -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Dokumen ini mendeskripsikan konsep StorageClass yang ada pada Kubernetes. Sebelum lanjut membaca, sangat dianjurkan untuk memiliki pengetahuan terhadap [volumes](/docs/concepts/storage/volumes/) dan [peristent volume](/docs/concepts/storage/persistent-volumes) terlebih dahulu. -{{% /capture %}} -{{% capture body %}} + + ## Pengenalan @@ -785,4 +785,4 @@ sampai _scheduling_ pod dilakukan. Hal ini dispesifikasikan oleh mode _binding_ Memperlambat _binding_ volume mengizinkan _scheduler_ untuk memastikan batasan _scheduling_ semua pod ketika memilih PersistentVolume untuk sebuah PersistentVolumeClaim. -{{% /capture %}} + diff --git a/content/id/docs/concepts/storage/storage-limits.md b/content/id/docs/concepts/storage/storage-limits.md index 45b2ef3b35..d4d7be47f6 100644 --- a/content/id/docs/concepts/storage/storage-limits.md +++ b/content/id/docs/concepts/storage/storage-limits.md @@ -1,9 +1,9 @@ --- title: Limit Volume yang Spesifik terhadap Node -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Laman ini menjelaskan soal jumlah volume maksimal yang dapat dihubungkan ke sebuah Node untuk berbagai penyedia layanan cloud. @@ -14,9 +14,9 @@ sangatlah penting untuk diketahui Kubernetes dalam menentukan keputusan. Jika ti Pod-pod yang telah dijadwalkan pada sebuah Node akan macet dan menunggu terus-menerus untuk terhubung pada volume. -{{% /capture %}} -{{% capture body %}} + + ## Limit _default_ pada Kubernetes @@ -77,4 +77,4 @@ bisa dilihat pada [Ukuran mesin virtual (VM) di Azure](https://docs.microsoft.co sebagai properti Node dan Scheduler tidak akan menjadwalkan Pod dengan volume pada Node manapun yang sudah penuh kapasitasnya. Untuk penjelasan lebih jauh lihat [spek CSI](https://github.com/container-storage-interface/spec/blob/master/spec.md#nodegetinfo). -{{% /capture %}} + diff --git a/content/id/docs/concepts/storage/volume-pvc-datasource.md b/content/id/docs/concepts/storage/volume-pvc-datasource.md index 5f197488bf..4a5f5d8c8c 100644 --- a/content/id/docs/concepts/storage/volume-pvc-datasource.md +++ b/content/id/docs/concepts/storage/volume-pvc-datasource.md @@ -1,17 +1,17 @@ --- title: Pengklonaan Volume CSI -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.16" state="beta" >}} Dokumen ini mendeskripsikan konsep pengklonaan Volume CSI yang telah tersedia di dalam Kubernetes. Pengetahuan tentang [Volume](/docs/concepts/storage/volumes) disarankan. -{{% /capture %}} -{{% capture body %}} + + ## Introduction @@ -59,4 +59,4 @@ Hasilnya adalah sebuah PVC baru dengan nama `clone-of-pvc-1` yang memiliki isi y Setelah tersedianya PVC baru tersebut, PVC baru yang diklonakan tersebut digunakan sama seperti PVC lainnya. Juga diharapkan pada titik ini bahwa PVC baru tersebut adalah sebuah objek terpisah yang independen. Ia dapat digunakan, diklonakan, di-_snapshot_, atau dihapus secara terpisah dan tanpa perlu memikirkan PVC dataSource aslinya. Hal ini juga berarti bahwa sumber tidak terikat sama sekali dengan klona yang baru dibuat tersebut, dan dapat diubah atau dihapus tanpa memengaruhi klona yang baru dibuat tersebut. -{{% /capture %}} + diff --git a/content/id/docs/concepts/storage/volume-snapshot-classes.md b/content/id/docs/concepts/storage/volume-snapshot-classes.md index 5fd92fb42a..0414a9d7de 100644 --- a/content/id/docs/concepts/storage/volume-snapshot-classes.md +++ b/content/id/docs/concepts/storage/volume-snapshot-classes.md @@ -1,19 +1,19 @@ --- title: VolumeSnapshotClass -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Laman ini menjelaskan tentang konsep VolumeSnapshotClass pada Kubernetes. Sebelum melanjutkan, sangat disarankan untuk membaca [_snapshot_ volume](/docs/concepts/storage/volume-snapshots/) dan [kelas penyimpanan (_storage class_)](/docs/concepts/storage/storage-classes) terlebih dahulu. -{{% /capture %}} -{{% capture body %}} + + ## Pengenalan @@ -55,4 +55,4 @@ VolumeSnapshotClass memiliki parameter-parameter yang menggambarkan _snapshot_ v di dalam VolumeSnapshotClass. Parameter-parameter yang berbeda diperbolehkan tergantung dari `shapshotter`. -{{% /capture %}} + diff --git a/content/id/docs/concepts/storage/volume-snapshots.md b/content/id/docs/concepts/storage/volume-snapshots.md index c5b7c09e73..39ab3d31aa 100644 --- a/content/id/docs/concepts/storage/volume-snapshots.md +++ b/content/id/docs/concepts/storage/volume-snapshots.md @@ -1,18 +1,18 @@ --- title: VolumeSnapshot -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.12" state="alpha" >}} Laman ini menjelaskan tentang fitur VolumeSnapshot pada Kubernetes. Sebelum lanjut membaca, sangat disarankan untuk memahami [PersistentVolume](/docs/concepts/storage/persistent-volumes/) terlebih dahulu. -{{% /capture %}} -{{% capture body %}} + + ## Pengenalan @@ -129,4 +129,4 @@ menggunakan _field_ `dataSource` pada objek PersistentVolumeClaim. Untuk detailnya bisa dilihat pada [VolumeSnapshot and Mengembalikan Volume dari _Snapshot_](/docs/concepts/storage/persistent-volumes/#volume-snapshot-and-restore-volume-from-snapshot-support). -{{% /capture %}} + diff --git a/content/id/docs/concepts/storage/volumes.md b/content/id/docs/concepts/storage/volumes.md index 6c179d508c..5a437cbd45 100644 --- a/content/id/docs/concepts/storage/volumes.md +++ b/content/id/docs/concepts/storage/volumes.md @@ -1,18 +1,18 @@ --- title: Volume -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Berkas-berkas yang disimpan di _disk_ di dalam Container bersifat tidak permanen (akan terhapus seiring dengan dihapusnya Container/Pod), yang menimbulkan beberapa masalah untuk aplikasi biasa saat berjalan di dalam Container. Pertama, saat sebuah Container mengalami kegagalan, Kubelet akan memulai kembali Container tersebut, tetapi semua berkas di dalamnya akan hilang - Container berjalan dalam kondisi yang bersih. Kedua, saat menjalankan banyak Container bersamaan di dalam sebuah `Pod`, biasanya diperlukan untuk saling berbagi berkas-berkas di antara Container-container tersebut. Kedua masalah tersebut dipecahkan oleh abstraksi `Volume` pada Kubernetes. Pengetahuan tentang [Pod](/docs/user-guide/pods) disarankan. -{{% /capture %}} -{{% capture body %}} + + ## Latar Belakang @@ -1144,8 +1144,9 @@ sudo systemctl daemon-reload sudo systemctl restart docker ``` -{{% capture whatsnext %}} +## {{% heading "whatsnext" %}} + * Ikuti contoh [memasang WordPress dan MySQL dengan Persistent Volume](/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/). -{{% /capture %}} + diff --git a/content/id/docs/concepts/workloads/controllers/cron-jobs.md b/content/id/docs/concepts/workloads/controllers/cron-jobs.md index 9f50e0b42c..29fde331ea 100644 --- a/content/id/docs/concepts/workloads/controllers/cron-jobs.md +++ b/content/id/docs/concepts/workloads/controllers/cron-jobs.md @@ -1,10 +1,10 @@ --- title: CronJob -content_template: templates/concept +content_type: concept weight: 80 --- -{{% capture overview %}} + Suatu CronJob menciptakan [Job](/docs/concepts/workloads/controllers/jobs-run-to-completion/) yang dijadwalkan berdasarkan waktu tertentu. @@ -17,10 +17,10 @@ Seluruh waktu `schedule:` pada _**CronJob**_ mengikuti zona waktu dari _master_ Untuk panduan dalam berkreasi dengan _cron job_, dan contoh _spec file_ untuk suatu _cron job_, lihat [Menjalankan otomasi _task_ dengan _cron job_](/docs/tasks/job/automated-tasks-with-cron-jobs). -{{% /capture %}} -{{% capture body %}} + + ## Limitasi _Cron Job_ @@ -55,4 +55,3 @@ Job akan tetap dijalankan pada 10:22:00. Hal ini terjadi karena CronJob _control CronJob hanya bertanggung-jawab untuk menciptakan Job yang sesuai dengan jadwalnya sendiri, dan Job tersebut bertanggung jawab terhadap pengelolaan Pod yang direpresentasikan olehnya. -{{% /capture %}} \ No newline at end of file diff --git a/content/id/docs/concepts/workloads/controllers/daemonset.md b/content/id/docs/concepts/workloads/controllers/daemonset.md index c68a207edf..baa79aa3f2 100644 --- a/content/id/docs/concepts/workloads/controllers/daemonset.md +++ b/content/id/docs/concepts/workloads/controllers/daemonset.md @@ -1,10 +1,10 @@ --- title: DaemonSet -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + DaemonSet memastikan semua atau sebagian Node memiliki salinan sebuah Pod. Ketika Node baru ditambahkan ke klaster, Pod ditambahkan ke Node tersebut. @@ -24,10 +24,10 @@ setiap jenis _daemon_. Pengaturan yang lebih rumit bisa saja menggunakan lebih dari satu DaemonSet untuk satu jenis _daemon_, tapi dengan _flag_ dan/atau permintaan cpu/memori yang berbeda untuk jenis _hardware_ yang berbeda. -{{% /capture %}} -{{% capture body %}} + + ## Menulis Spek DaemonSet @@ -233,4 +233,4 @@ host mana Pod berjalan. Gunakan DaemonSet ketika penting untuk satu salinan Pod selalu berjalan di semua atau sebagian host, dan ketika Pod perlu berjalan sebelum Pod lainnya. -{{% /capture %}} + diff --git a/content/id/docs/concepts/workloads/controllers/deployment.md b/content/id/docs/concepts/workloads/controllers/deployment.md index 5d8d681141..045c04e59b 100644 --- a/content/id/docs/concepts/workloads/controllers/deployment.md +++ b/content/id/docs/concepts/workloads/controllers/deployment.md @@ -5,11 +5,11 @@ feature: description: > Kubernetes merilis perubahan secara progresif pada aplikasimu atau konfigurasinya sambil memonitor kesehatan aplikasi untuk menjamin bahwa semua instances tidak mati bersamaan. Jika sesuatu yang buruk terjadi, Kubernetes akan melakukan rollback pada perubahanmu. Take advantage of a growing ecosystem of deployment solutions. -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Deployment menyediakan pembaruan [Pods](/id/docs/concepts/workloads/pods/pod/) dan [ReplicaSets](/id/docs/concepts/workloads/controllers/replicaset/) secara deklaratif. @@ -20,10 +20,10 @@ Kamu mendeskripsikan sebuah state yang diinginkan dalam Deployment, kemudian Dep Jangan mengganti ReplicaSets milik Deployment. Pertimbangkan untuk membuat isu pada repositori utama Kubernetes jika kasusmu tidak diatasi semua kasus di bawah. {{< /note >}} -{{% /capture %}} -{{% capture body %}} + + ## Penggunaan @@ -1125,4 +1125,4 @@ Deployment umumnya tidak terjeda saat dibuat. dengan cara yang serupa. Namun, Deployments lebih disarankan karena deklaratif, berjalan di sisi server, dan punya fitur tambahan, seperti pembalikkan ke revisi manapun sebelumnya bahkan setelah pembaruan rolling selesais. -{{% /capture %}} + diff --git a/content/id/docs/concepts/workloads/controllers/garbage-collection.md b/content/id/docs/concepts/workloads/controllers/garbage-collection.md index 63592fbe89..5eb00cf987 100644 --- a/content/id/docs/concepts/workloads/controllers/garbage-collection.md +++ b/content/id/docs/concepts/workloads/controllers/garbage-collection.md @@ -1,16 +1,16 @@ --- title: Garbage Collection -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + Peran daripada _garbage collector_ Kubernetes adalah untuk menghapus objek tertentu yang sebelumnya mempunyai pemilik, tetapi tidak lagi mempunyai pemilik. -{{% /capture %}} -{{% capture body %}} + + ## Pemilik dan dependen @@ -125,12 +125,13 @@ Sebelum versi 1.7, ketika menggunakan _cascading delete_ dengan Deployment, kamu Ditemukan pada [#26120](https://github.com/kubernetes/kubernetes/issues/26120) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [Dokumen Desain 1](https://git.k8s.io/community/contributors/design-proposals/api-machinery/garbage-collection.md) [Dokumen Desain 2](https://git.k8s.io/community/contributors/design-proposals/api-machinery/synchronous-garbage-collection.md) -{{% /capture %}} + diff --git a/content/id/docs/concepts/workloads/controllers/jobs-run-to-completion.md b/content/id/docs/concepts/workloads/controllers/jobs-run-to-completion.md index 1cfe36117c..4aca03535f 100644 --- a/content/id/docs/concepts/workloads/controllers/jobs-run-to-completion.md +++ b/content/id/docs/concepts/workloads/controllers/jobs-run-to-completion.md @@ -1,6 +1,6 @@ --- title: Job - Dijalankan Hingga Selesai -content_template: templates/concept +content_type: concept feature: title: Eksekusi batch description: > @@ -8,7 +8,7 @@ feature: weight: 70 --- -{{% capture overview %}} + Sebuah Job membuat satu atau beberapa Pod dan menjamin bahwa jumlah Pod yang telah dispesifikasikan sebelumnya berhasil dijalankan. Pada saat Pod telah dihentikan, Job akan menandainya sebagai Job yang sudah berhasil dijalankan. @@ -22,10 +22,10 @@ perangkat keras atau terjadinya _reboot_ pada Node). Kamu juga dapat menggunakan Job untuk menjalankan beberapa Pod secara paralel. -{{% /capture %}} -{{% capture body %}} + + ## Menjalankan Contoh Job @@ -502,4 +502,4 @@ dari sebuah Job, tetapi kontrol secara mutlak atas Pod yang dibuat serta tugas y Kamu dapat menggunakan [`CronJob`](/docs/concepts/workloads/controllers/cron-jobs/) untuk membuat Job yang akan dijalankan pada waktu/tanggal yang spesifik, mirip dengan perangkat lunak `cron` yang ada pada Unix. -{{% /capture %}} + diff --git a/content/id/docs/concepts/workloads/controllers/replicaset.md b/content/id/docs/concepts/workloads/controllers/replicaset.md index 9c62a0fa72..c0c3a83d51 100644 --- a/content/id/docs/concepts/workloads/controllers/replicaset.md +++ b/content/id/docs/concepts/workloads/controllers/replicaset.md @@ -1,17 +1,17 @@ --- title: ReplicaSet -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Tujuan dari ReplicaSet adalah untuk memelihara himpunan stabil dari replika Pod yang sedang berjalan pada satu waktu tertentu. Maka dari itu, ReplicaSet seringkali digunakan untuk menjamin ketersediaan dari beberapa Pod identik dalam jumlah tertentu. -{{% /capture %}} -{{% capture body %}} + + ## Cara kerja ReplicaSet @@ -295,4 +295,3 @@ Gunakan [`DaemonSet`](/docs/concepts/workloads/controllers/daemonset/) alih-alih ReplicaSet adalah suksesor dari [_ReplicationControllers_](/docs/concepts/workloads/controllers/replicationcontroller/). Keduanya memenuhi tujuan yang sama dan memiliki perilaku yang serupa, kecuali bahwa ReplicationController tidak mendukung kebutuhan selektor _set-based_ seperti yang dijelaskan pada [panduan penggunaan label](/docs/concepts/overview/working-with-objects/labels/#label-selectors). Pada kasus tersebut, ReplicaSet lebih direkomendasikan dibandingkan ReplicationController. -{{% /capture %}} \ No newline at end of file diff --git a/content/id/docs/concepts/workloads/controllers/replicationcontroller.md b/content/id/docs/concepts/workloads/controllers/replicationcontroller.md index 3dad74fb07..f828ff9c64 100644 --- a/content/id/docs/concepts/workloads/controllers/replicationcontroller.md +++ b/content/id/docs/concepts/workloads/controllers/replicationcontroller.md @@ -6,11 +6,11 @@ feature: description: > Mengulang dan menjalankan kembali kontainer yang gagal, mengganti dan menjadwalkan ulang ketika ada Node yang mati, mematikan kontainer yang tidak memberikan respon terhadap health-check yang telah didefinisikan, dan tidak menunjukkannya ke klien sampai siap untuk digunakan. -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< note >}} [`Deployment`](/docs/concepts/workloads/controllers/deployment/) yang mengonfigurasi [`ReplicaSet`](/docs/concepts/workloads/controllers/replicaset/) sekarang menjadi cara yang direkomendasikan untuk melakukan replikasi. @@ -18,10 +18,10 @@ weight: 20 Sebuah _ReplicationController_ memastikan bahwa terdapat sejumlah Pod yang sedang berjalan dalam suatu waktu tertentu. Dengan kata lain, ReplicationController memastikan bahwa sebuah Pod atau sebuah kumpulan Pod yang homogen selalu berjalan dan tersedia. -{{% /capture %}} -{{% capture body %}} + + ## Bagaimana ReplicationController Bekerja @@ -240,4 +240,4 @@ Gunakan [`DaemonSet`](/docs/concepts/workloads/controllers/daemonset/) sebagai g Baca [Menjalankan Kontroler Replikasi AP _Stateless_](/docs/tutorials/stateless-application/run-stateless-ap-replication-controller/). -{{% /capture %}} + diff --git a/content/id/docs/concepts/workloads/controllers/statefulset.md b/content/id/docs/concepts/workloads/controllers/statefulset.md index df85a59d39..9d12de91dd 100644 --- a/content/id/docs/concepts/workloads/controllers/statefulset.md +++ b/content/id/docs/concepts/workloads/controllers/statefulset.md @@ -1,10 +1,10 @@ --- title: StatefulSet -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + StatefulSet merupakan salah satu objek API _workload_ yang digunakan untuk aplikasi _stateful_. @@ -13,9 +13,9 @@ StatefulSet merupakan fitur stabil (GA) sejak versi 1.9. {{< /note >}} {{< glossary_definition term_id="statefulset" length="all" >}} -{{% /capture %}} -{{% capture body %}} + + ## Menggunakan StatefulSet @@ -267,11 +267,12 @@ Setelah melakukan mekanisme _revert_ templat, kamu juga harus menghapus semua Po StatefulSet tersebut yang telah berusaha untuk menggunakan konfigurasi yang _broken_. StatefulSet akan mulai membuat Pod dengan templat konfigurasi yang sudah di-_revert_. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Ikuti contoh yang ada pada [bagaimana cara melakukan deployi aplikasi stateful](/docs/tutorials/stateful-application/basic-stateful-set/). * Ikuti contoh yang ada pada [bagaimana cara melakukan deploy Cassandra dengan StatefulSets](/docs/tutorials/stateful-application/cassandra/). -{{% /capture %}} + diff --git a/content/id/docs/concepts/workloads/controllers/ttlafterfinished.md b/content/id/docs/concepts/workloads/controllers/ttlafterfinished.md index 07abe8e2a7..f2c232faf2 100644 --- a/content/id/docs/concepts/workloads/controllers/ttlafterfinished.md +++ b/content/id/docs/concepts/workloads/controllers/ttlafterfinished.md @@ -1,10 +1,10 @@ --- title: Pengendali TTL untuk Sumber Daya yang Telah Selesai Digunakan -content_template: templates/concept +content_type: concept weight: 65 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.12" state="alpha" >}} @@ -19,12 +19,12 @@ Peringatan Fitur Alpha: fitur ini tergolong datam fitur alpha dan dapat diaktifk `TTLAfterFinished`. -{{% /capture %}} -{{% capture body %}} + + ## Pengendali TTL @@ -78,12 +78,13 @@ Pada Kubernetes, NTP haruslah dilakukan pada semua node untuk mecegah adanya _ti _Clock_ tidak akan selalu tepat, meskipun begitu perbedaan yang ada haruslah diminimalisasi. Perhatikan bahwa hal ini dapat terjadi apabila TTL diaktifkan dengan nilai selain 0. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [Membersikan Job secara Otomatis](/docs/concepts/workloads/controllers/jobs-run-to-completion/#clean-up-finished-jobs-automatically) [Dokumentasi Rancangan](https://github.com/kubernetes/enhancements/blob/master/keps/sig-apps/0026-ttl-after-finish.md) -{{% /capture %}} + diff --git a/content/id/docs/concepts/workloads/pods/disruptions.md b/content/id/docs/concepts/workloads/pods/disruptions.md index c612405b97..1adde6c949 100644 --- a/content/id/docs/concepts/workloads/pods/disruptions.md +++ b/content/id/docs/concepts/workloads/pods/disruptions.md @@ -1,17 +1,17 @@ --- title: Disrupsi -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + Petunjuk ini ditujukan pada pemilik aplikasi yang meninginkan aplikasinya memiliki ketersediaan yang tinggi, sehingga butuh untuk mengerti jenis-jenis Disrupsi yang dapat terjadi pada Pod-pod. Petunjuk ini juga ditujukan pada administrator klaster yang ingin melakukan berbagai tindakan otomasi pada klaster, seperti pembaruan dan _autoscaling_ klaster. -{{% /capture %}} -{{% capture body %}} + + ## Disrupsi yang Disengaja dan Tidak Disengaja @@ -174,12 +174,13 @@ Jika kamu adalah Administrator Klaster, maka kamu mesti melakukan tindakan disru - Mengizinkan lebih banyak otomasi administrasi klaster. - Membuat aplikasi yang toleran terhadap disrupsi agak rumit, tetapi usaha yang dilakukan untuk menoleransi disrupsi yang disengaja kebanyakan beririsan dengan usaha untuk mendukung _autoscaling_ dan menoleransi disrupsi yang tidak disengaja. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - Ikuti langkah-langkah untuk melindungi aplikasimu dengan [membuat sebuah PodDisruptionBudget](/docs/tasks/run-application/configure-pdb/). - Pelajari lebih lanjut mengenai [melakukan _drain_ terhadap node](/docs/tasks/administer-cluster/safely-drain-node/). -{{% /capture %}} + diff --git a/content/id/docs/concepts/workloads/pods/ephemeral-containers.md b/content/id/docs/concepts/workloads/pods/ephemeral-containers.md index c74e6e63b4..45154caf25 100644 --- a/content/id/docs/concepts/workloads/pods/ephemeral-containers.md +++ b/content/id/docs/concepts/workloads/pods/ephemeral-containers.md @@ -1,10 +1,10 @@ --- title: Kontainer Sementara (Ephemeral) -content_template: templates/concept +content_type: concept weight: 80 --- -{{% capture overview %}} + {{< feature-state state="alpha" for_k8s_version="v1.16" >}} @@ -23,9 +23,9 @@ dari suatu kontainer. Sesuai dengan Kubernetes ini dapat berubah secara signifikan di masa depan atau akan dihapus seluruhnya. {{< /warning >}} -{{% /capture %}} -{{% capture body %}} + + ## Memahami Kontainer Sementara @@ -221,4 +221,4 @@ PID USER TIME COMMAND 29 root 0:00 ps auxww ``` -{{% /capture %}} + diff --git a/content/id/docs/concepts/workloads/pods/init-containers.md b/content/id/docs/concepts/workloads/pods/init-containers.md index 60ce9d31ce..91807fdaf6 100644 --- a/content/id/docs/concepts/workloads/pods/init-containers.md +++ b/content/id/docs/concepts/workloads/pods/init-containers.md @@ -1,16 +1,16 @@ --- title: Init Container -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + Halaman ini menyediakan ikhtisar untuk Init Container, yaitu Container khusus yang dijalankan sebelum Container aplikasi dan berisi skrip peralatan atau _setup_ yang tidak tersedia di dalam _image_ dari Container aplikasi. -{{% /capture %}} + Fitur ini telah keluar dari trek Beta sejak versi 1.6. Init Container dapat dispesifikasikan di dalam PodSpec bersama dengan _array_ `containers` aplikasi. Nilai anotasi _beta_ akan tetap diperhitungkan dan akan menimpa nilai pada PodSpec, tetapi telah ditandai sebagai kedaluarsa pada versi 1.6 dan 1.7. Pada versi 1.8, anotasi _beta_ tidak didukung lagi dan harus diganti menjadi nilai pada PodSpec. -{{% capture body %}} + ## Memahami Init Container @@ -271,11 +271,12 @@ Sebuah klaster dengan versi Apiserver 1.6.0 ke atas mendukung Init Container mel Pada Apiserver dan Kubelet versi 1.8.0 ke atas, dukungan untuk anotasi _alpha_ dan _beta_ telah dihapus, sehingga dibutuhkan konversi (manual) dari anotasi yang telah kedaluwarsa tersebut ke dalam bentuk kolom `.spec.initContainers`. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Membuat Pod yang memiliki Init Container](/docs/tasks/configure-pod-container/configure-pod-initialization/#creating-a-pod-that-has-an-init-container) -{{% /capture %}} + diff --git a/content/id/docs/concepts/workloads/pods/pod-lifecycle.md b/content/id/docs/concepts/workloads/pods/pod-lifecycle.md index 59bd066a30..8dac6706a7 100644 --- a/content/id/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/id/docs/concepts/workloads/pods/pod-lifecycle.md @@ -1,20 +1,20 @@ --- title: Siklus Hidup Pod -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + {{< comment >}}Pembaruan: 4/14/2015{{< /comment >}} {{< comment >}}Diubah dan dipindahkan ke bagian konsep: 2/2/17{{< /comment >}} Halaman ini menjelaskan siklus hidup sebuah Pod -{{% /capture %}} -{{% capture body %}} + + ## Fase Pod @@ -334,10 +334,11 @@ spec: * Node pengontrol mengisi nilai `phase` Pod menjadi Failed. * Jika berjalan menggunakan pengontrol, maka Pod akan dibuat ulang di tempat lain. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Dapatkan pengalaman langsung mengenai [penambahan _handlers_ pada kontainer _lifecycle events_](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). @@ -347,7 +348,7 @@ spec: * Pelajari lebih lanjut mengenai [_lifecycle hooks_ pada kontainer](/docs/concepts/containers/container-lifecycle-hooks/). -{{% /capture %}} + diff --git a/content/id/docs/concepts/workloads/pods/pod-overview.md b/content/id/docs/concepts/workloads/pods/pod-overview.md index e5c6f23c68..0e9593e0d1 100644 --- a/content/id/docs/concepts/workloads/pods/pod-overview.md +++ b/content/id/docs/concepts/workloads/pods/pod-overview.md @@ -1,18 +1,18 @@ --- title: Pengenalan Pod -content_template: templates/concept +content_type: concept weight: 10 card: name: concepts weight: 60 --- -{{% capture overview %}} + Halaman ini menyajikan ikhtisar dari `Pod`, objek terkecil yang dapat di *deploy* di dalam objek model Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Memahami Pod Sebuah *Pod* adalah unit dasar di Kubernetes--unit terkecil dan paling sederhana di dalam objek model Kubernetes yang dapat dibuat dan di *deploy*. Sebuah *Pod* merepresentasikan suatu proses yang berjalan di dalam klaster. @@ -97,10 +97,11 @@ spec: Perubahan yang terjadi pada templat atau berganti ke templat yang baru tidak memiliki efek langsung pada *Pod* yang sudah dibuat. *Pod* yang dibuat oleh *replication controller* dapat diperbarui secara langsung. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Pelajari lebih lanjut tentang perilaku *Pod*: * [Terminasi Pod](/docs/concepts/workloads/pods/pod/#termination-of-pods) * [Lifecycle Pod](/docs/concepts/workloads/pods/pod-lifecycle/) -{{% /capture %}} + diff --git a/content/id/docs/concepts/workloads/pods/pod.md b/content/id/docs/concepts/workloads/pods/pod.md index 46bfadbe56..3838ec56b5 100644 --- a/content/id/docs/concepts/workloads/pods/pod.md +++ b/content/id/docs/concepts/workloads/pods/pod.md @@ -1,18 +1,18 @@ --- reviewers: title: Pod -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Pod adalah unit komputasi terkecil yang bisa di-_deploy_ dan dibuat serta dikelola dalam Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Apa Itu Pod? @@ -260,4 +260,4 @@ pengaturan ini menjadi relevan. Pod adalah sumber daya tingkat tinggi dalam Kubernetes REST API. Definisi [Objek Pod API](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core) menjelaskan mengenai objek secara lengkap. -{{% /capture %}} + diff --git a/content/id/docs/concepts/workloads/pods/podpreset.md b/content/id/docs/concepts/workloads/pods/podpreset.md index d15f3648fb..2fc1b8598b 100644 --- a/content/id/docs/concepts/workloads/pods/podpreset.md +++ b/content/id/docs/concepts/workloads/pods/podpreset.md @@ -1,14 +1,14 @@ --- title: Pod Preset -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + Halaman ini menyajikan gambaran umum tentang PodPreset, yang merupakan objek untuk memasukkan informasi tertentu ke dalam Pod pada saat waktu penciptaan. Informasi dapat berupa _secret_, _volume_, _volume mount_, dan variabel _environment_. -{{% /capture %}} -{{% capture body %}} + + ## Memahami Pod Preset --- @@ -53,9 +53,10 @@ Dalam rangka untuk menggunakan Pod Preset di dalam klaster kamu, kamu harus mema saat menginisialisasi klaster. 1. Kamu telah membuat objek `PodPreset` pada _namespace_ yang kamu gunakan dengan cara mendefinisikan Pod Preset. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Memasukkan data ke dalam sebuah Pod dengan PodPreset](/docs/concepts/workloads/pods/pod/#injecting-data-into-a-pod-using-podpreset.md) -{{% /capture %}} + diff --git a/content/id/docs/contribute/_index.md b/content/id/docs/contribute/_index.md index 43a63c8e68..d793a78967 100644 --- a/content/id/docs/contribute/_index.md +++ b/content/id/docs/contribute/_index.md @@ -1,12 +1,12 @@ --- -content_template: templates/concept +content_type: concept title: Berkontribusi ke Dokumentasi Kubernetes linktitle: Berkontribusi main_menu: true weight: 80 --- -{{% capture overview %}} + Jika kamu ingin membantu dengan berkontribusi ke dokumentasi atau situs web Kubernetes, kami dengan senang hati menerima bantuan kamu! Siapapun bisa berkontribusi, baik kamu yang masih @@ -16,7 +16,7 @@ atau bahkan seorang yang tidak tahan melihat saltik (_typo_)! Untuk informasi mengenai isi dan gaya (penulisan) dokumentasi Kubernetes, lihat [ikhtisar gaya penulisan dokumentasi](/docs/contribute/style/). -{{% capture body %}} + ## Jenis-jenis kontributor dokumentasi @@ -76,4 +76,4 @@ terhadap dokumentasi Kubernetes, tetapi daftar ini dapat membantumu memulainya. - Untuk berkontribusi ke komunitas Kubernetes melalui forum-forum daring seperti Twitter atau Stack Overflow, atau mengetahui tentang pertemuan komunitas (_meetup_) lokal dan acara-acara Kubernetes, kunjungi [situs komunitas Kubernetes](/community/). - Untuk mulai berkontribusi ke pengembangan fitur, baca [_cheatseet_ kontributor](https://github.com/kubernetes/community/tree/master/contributors/guide/contributor-cheatsheet). -{{% /capture %}} + diff --git a/content/id/docs/home/supported-doc-versions.md b/content/id/docs/home/supported-doc-versions.md index cd90ac42f1..6cecfdaec1 100644 --- a/content/id/docs/home/supported-doc-versions.md +++ b/content/id/docs/home/supported-doc-versions.md @@ -1,19 +1,19 @@ --- title: Versi Kubernetes yang Termasuk dalam Dokumentasi -content_template: templates/concept +content_type: concept card: name: about weight: 10 title: Versi Kubernetes yang Termasuk dalam Dokumentasi --- -{{% capture overview %}} + Situs ini merupakan dokumentasi dari Kubernetes versi saat ini dan 4 versi sebelumnya. -{{% /capture %}} -{{% capture body %}} + + ## Versi saat ini @@ -24,6 +24,6 @@ Versi saat ini adalah {{< versions-other >}} -{{% /capture %}} + diff --git a/content/id/docs/reference/kubectl/cheatsheet.md b/content/id/docs/reference/kubectl/cheatsheet.md index 80667814ac..9afe999064 100644 --- a/content/id/docs/reference/kubectl/cheatsheet.md +++ b/content/id/docs/reference/kubectl/cheatsheet.md @@ -1,20 +1,20 @@ --- title: Contekan kubectl -content_template: templates/concept +content_type: concept card: name: reference weight: 30 --- -{{% capture overview %}} + Lihat juga: [Ikhitsar Kubectl](/docs/reference/kubectl/overview/) dan [Panduan JsonPath](/docs/reference/kubectl/jsonpath). Laman ini merupakan ikhitisar dari perintah `kubectl`. -{{% /capture %}} -{{% capture body %}} + + # kubectl - Contekan @@ -386,9 +386,10 @@ Tingkat kelengkapan keluaran | Deskripsi `--v=8` | Memperlihatkan konten dari permintan HTTP. `--v=9` | Memperlihatkan kontek dari permintaan HTTP tanpa dipotong. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Pelajari lebih lanjut tentang [Ikhitsar kubectl](/docs/reference/kubectl/overview/). @@ -398,4 +399,4 @@ Tingkat kelengkapan keluaran | Deskripsi * Pelajari [contekan kubectl](https://github.com/dennyzhang/cheatsheet-kubernetes-A4) dari komunitas. -{{% /capture %}} + diff --git a/content/id/docs/setup/_index.md b/content/id/docs/setup/_index.md index d170fb24e4..80677a97e4 100644 --- a/content/id/docs/setup/_index.md +++ b/content/id/docs/setup/_index.md @@ -3,10 +3,10 @@ no_issue: true title: Persiapan main_menu: true weight: 30 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Gunakan halaman ini untuk mencari solusi yang paling sesuai dengan kebutuhan kamu. @@ -14,9 +14,9 @@ Menentukan dimana sebaiknya Kubernetes dijalankan sangat tergantung pada kapasit Kamu dapat menjalankan Kubernetes hampir dimana saja, mulai dari laptop, VM di penyedia cloud, sampai pada rak-rak berisi server baremetal. Kamu juga bisa menyiapkan klaster yang diatur sepenuhnya (fully-managed), dengan hanya menjalankan satu perintah, ataupun membuat klaster dengan solusi custom kamu sendiri pada server baremetal. -{{% /capture %}} -{{% capture body %}} + + ## Solusi pada Mesin Lokal @@ -74,8 +74,9 @@ Solusi-solusi ini cukup beragam, mulai dari bare-metal sampai ke penyedia cloud, Pilih [solusi custom](/docs/setup/pick-right-solution/#custom-solutions). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Lihat [Memilih Solusi Terbaik](/docs/setup/pick-right-solution/) untuk daftar solusi yang lengkap. -{{% /capture %}} + diff --git a/content/id/docs/setup/best-practices/_index.md b/content/id/docs/setup/best-practices/_index.md new file mode 100755 index 0000000000..e216ed05c2 --- /dev/null +++ b/content/id/docs/setup/best-practices/_index.md @@ -0,0 +1,5 @@ +--- +title: "Praktek-praktek Terbaik" +weight: 60 +--- + diff --git a/content/id/docs/setup/best-practices/multiple-zones.md b/content/id/docs/setup/best-practices/multiple-zones.md new file mode 100644 index 0000000000..2727db559d --- /dev/null +++ b/content/id/docs/setup/best-practices/multiple-zones.md @@ -0,0 +1,401 @@ +--- +title: Menjalankan klaster dalam beberapa zona +weight: 10 +content_template: templates/concept +--- + +{{% capture overview %}} + +Laman ini menjelaskan tentang bagaimana menjalankan sebuah klaster dalam beberapa zona. + +{{% /capture %}} + +{{% capture body %}} + +## Pendahuluan + +Kubernetes 1.2 menambahkan dukungan untuk menjalankan sebuah klaster dalam beberapa zona kegagalan (_multiple failure zones_) +(GCE secara sederhana menyebutnya sebagai _"zones"_, AWS menyebutnya sebagai _"availability zones"_, dan di sini kita akan menyebutnya sebagai "zona"). +Fitur ini adalah versi sederhana dari fitur federasi klaster yang lebih luas (yang sebelumnya ditujukan pada +sebuah nama panggilan yang ramah (_affectionate nickname_) ["Ubernetes"](https://github.com/kubernetes/community/blob/{{< param "githubbranch" >}}/contributors/design-proposals/multicluster/federation.md)). +Federasi klaster yang penuh memungkinkan untuk menggabungkan +klaster Kubernetes terpisah, yang berjalan pada wilayah atau penyedia cloud yang berbeda +(baik dalam _datacenter_ atau _on-premise_). Namun banyak +pengguna yang ingin menjalankan klaster Kubernetes dengan tingkat ketersediaan yang lebih, dalam beberapa zona +dari satu penyedia cloud mereka, dan dukungan inilah yang akhirnya memperbolehkan fitur multi-zona dalam versi Kubernetes 1.2 +(sebelumnya fitur ini dikenal dengan nama panggilan "Ubernetes Lite"). + +Dukungan multi-zona sengaja dibuat terbatas: dimana satu klaster Kubernetes hanya dapat berjalan +dalam beberapa zona, tetapi hanya pada wilayah yang sama (dan penyedia cloud yang sama pula). Hanya +GCE dan AWS yang saat ini mendukung fitur ini secara otomatis (meskipun cukup mudah +untuk menambahkan dukungan serupa untuk penyedia cloud yang lain atau bahkan untuk perangkat _baremetal_, hanya dengan mengatur +label yang sesuai untuk ditambahkan ke Node dan volume). + + +## Fungsionalitas + +Ketika Node mulai dijalankan, kubelet secara otomatis menambahkan label +informasi pada Node tersebut. + +Kubernetes akan menyebarkan Pod secara otomatis dalam sebuah _controller_ replikasi +atau Service lintas Node dalam sebuah klaster zona tunggal (untuk mengurangi dampak +kegagalan). Dengan klaster multi-zona, perilaku penyebaran ini akan +dilanjutkan hingga melintasi zona (untuk mengurangi dampak kegagalan dalam satu zona.) (Ini +dicapai melalui opsi `SelectorSpreadPriority`). Hal tersebut adalah untuk upaya penempatan terbaik, +apabila zona pada klaster kamu bersifat heterogen +(mis. jumlah Node yang berbeda, tipe Node yang berbeda, atau +persyaratan sumber daya Pod yag berbeda), yang akan mencegah dengan sempurna +penyebaran Pod kamu untuk melintasi zona yang berbeda. Jika diinginkan, kamu bisa menggunakan +zona yang homogen (jumlah dan jenis Node yang sama) untuk mengurangi +probabilitas penyebaran yang tidak merata. + +Pada saat volume persisten dibuat, _controller_ penerima `PersistentVolumeLabel` +akan secara otomatis menambahkan label zona pada volume tersebut. Penjadwal (melalui +predikat `VolumeZonePredicate`) kemudian akan memastikan bahwa Pod yang mengklaim +suatu volume hanya akan ditempatkan pada zona yang sama dengan volume tersebut, karena volume +tidak dapat di-_attach_ melintasi zona yang berbeda. + +## Batasan + +Ada beberapa batasan penting dari dukungan multi-zona: + +* Kami berasumsi bahwa zona yang berbeda terletak secara berdekatan satu sama lain dalam +jaringan, jadi kami tidak melakukan _routing_ yang sadar akan zona. Secara khusus, lalu lintas (_traffic_) +yang berjalan melalui Service mungkin melintasi beberapa zona (bahkan ketika beberapa Pod yang mendukung Service itu +berada pada zona yang sama dengan klien), dan hal ini dapat menimbulkan latensi dan biaya tambahan. + +* Volume _zone-afinity_ hanya akan bekerja dengan PersistentVolume, dan tidak akan +berfungsi apabila kamu secara langsung menentukan volume EBS dalam spesifikasi Pod (misalnya). + +* Klaster tidak dapat melebarkan jangkauan cloud atau _region_ (fungsi ini akan membutuhkan +dukungan penuh federasi). + +* Meskipun Node kamu berada dalam beberapa zona, saat ini kube-up hanya membuat +satu Node master secara bawaan (_default_). Karena Service memerlukan +ketersediaan (_availability_) yang tinggi dan dapat mentolerir akan hilangnya sebuah zona, maka _control plane_ +diletakkan pada setiap zona. Pengguna yang menginginkan _control plane_ yang memiliki ketersediaan +tinggi harus mengikuti instruksi [ketersediaan tinggi](/docs/admin/high-availability). + +### Batasan Volume + +Batasan berikut ditunjukkan dengan menggunakan [pengikatan volume yang sadar topologi](/id/docs/concepts/storage/storage-classes/#mode-volume-_binding_). + +* Penyebaran zona volume StatefulSet yang menggunakan penyediaan secara dinamis, saat ini tidak sesuai dengan + kebijakan afinitas atau anti-afinitas Pod. + +* Jika nama StatefulSet berisi tanda hubung ("-"), maka penyebaran zona volume + mungkin saja tidak menyediakan distribusi penyimpanan (_storage_) yang seragam di seluruh zona yang berbeda. + +* Ketika menentukan beberapa PVC dalam spesifikasi Deployment atau Pod, StorageClass + perlu dikonfigurasi untuk zona tunggal tertentu, atau PV perlu + disediakan secara statis pada zona tertentu. Solusi lainnya adalah menggunakan sebuah + StatefulSet, yang akan memastikan bahwa semua volume untuk sebuah replika + disediakan dalam zona yang sama. + +## Panduan + +Kita sekarang akan berjalan melalui pengaturan dan menggunakan multi-zona +klaster pada GCE & AWS. Untuk melakukannya, kamu perlu mengaktifkan klaster penuh +(dengan menentukan `MULTIZONE=true`), dan kemudian kamu menambahkan Node di zona tambahan +dengan menjalankan `kube-up` lagi (dengan menetapkan opsi `KUBE_USE_EXISTING_MASTER=true`). + +### Mengaktifkan klaster kamu + +Buatlah klaster seperti biasa, tetapi teruskan opsi MULTIZONE untuk memberi tahu klaster untuk mengelola beberapa zona; +dan membuat Node di zona us-central1-a. + +GCE: + +```shell +curl -sS https://get.k8s.io | MULTIZONE=true KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-a NUM_NODES=3 bash +``` + +AWS: + +```shell +curl -sS https://get.k8s.io | MULTIZONE=true KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2a NUM_NODES=3 bash +``` + +Langkah ini akan mengaktifkan klaster seperti biasa, namun masih berjalan dalam satu zona +(tetapi opsi `MULTIZONE=true` telah mengaktifkan kapabilitas multi-zona). + +### Node yang telah diberi label + +Lihatlah Node; dimana kamu bisa melihat Node tersebut diberi label sesuai dengan informasi zona. +Node tersebut sejauh ini berada di zona `us-central1-a` (GCE) atau zona `us-west-2a` (AWS). +Label dari Node itu adalah `failure-domain.beta.kubernetes.io/region` untuk informasi wilayah, +dan `failure-domain.beta.kubernetes.io/zone` untuk informasi zona: + +```shell +kubectl get nodes --show-labels +``` + +Tampilan akan seperti dibawah ini: + +```shell +NAME STATUS ROLES AGE VERSION LABELS +kubernetes-master Ready,SchedulingDisabled 6m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-1,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-master +kubernetes-minion-87j9 Ready 6m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-87j9 +kubernetes-minion-9vlv Ready 6m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv +kubernetes-minion-a12q Ready 6m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-a12q +``` + +### Menambah lebih banyak Node di zona kedua + +Mari kita tambahkan sekumpulan Node ke dalam klaster yang ada, dengan menggunakan kembali +master yang ada, namun dijalankan pada zona yang berbeda (zona `us-central1-b` atau zona `us-west-2b`). +Kemudian kita jalankan kube-up lagi, tetapi dengan menentukan opsi `KUBE_USE_EXISTING_MASTER=true` +sehingga kube-up tidak akan membuat master baru, tetapi akan menggunakan kembali master yang dibuat sebelumnya. + +GCE: + +```shell +KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-b NUM_NODES=3 kubernetes/cluster/kube-up.sh +``` + +Pada AWS, kita juga perlu menentukan CIDR jaringan sebagai tambahan +subnet, bersama dengan alamat IP internal dari master: + +```shell +KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2b NUM_NODES=3 KUBE_SUBNET_CIDR=172.20.1.0/24 MASTER_INTERNAL_IP=172.20.0.9 kubernetes/cluster/kube-up.sh +``` + +Lihat lagi Node; dimana 3 Node lainnya harus sudah dijalankan dan ditandai +berada di `us-central1-b`: + +```shell +kubectl get nodes --show-labels +``` + +Hasil tampilan akan terlihat seperti dibawah ini: + +```shell +NAME STATUS ROLES AGE VERSION LABELS +kubernetes-master Ready,SchedulingDisabled 16m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-1,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-master +kubernetes-minion-281d Ready 2m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-281d +kubernetes-minion-87j9 Ready 16m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-87j9 +kubernetes-minion-9vlv Ready 16m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv +kubernetes-minion-a12q Ready 17m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-a12q +kubernetes-minion-pp2f Ready 2m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-pp2f +kubernetes-minion-wf8i Ready 2m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-wf8i +``` + +### Afinitas Volume + +Buatlah sebuah volume dengan menggunakan pembuatan volume yang dinamis (hanya PersistentVolume yang didukung untuk afinitas zona): + +```bash +kubectl apply -f - <}} +Untuk versi Kubernetes 1.3+ akan mendistribusikan klaim PV yang dinamis di seluruh +zona yang telah dikonfigurasi. Untuk versi 1.2, volume persisten yang dinamis selalu dibuat di zona master klaster +(yaitu `us-central1-a`/`us-west-2a`); masalah tersebut diangkat pada +([#23330](https://github.com/kubernetes/kubernetes/issues/23330)) +dan telah diselesaikan pada versi 1.3+. +{{< /note >}} + +Sekarang marilah kita memvalidasi bahwa Kubernetes secara otomatis memberikan label zona & wilayah di mana PV itu dibuat. + +```shell +kubectl get pv --show-labels +``` + + +Hasil tampilan akan terlihat seperti dibawah ini: + +```shell +NAME CAPACITY ACCESSMODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE LABELS +pv-gce-mj4gm 5Gi RWO Retain Bound default/claim1 manual 46s failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a +``` + +Kemudian sekarang kita akan membuat Pod yang menggunakan klaim akan volume persisten. +Karena volume pada GCE PDs / AWS EBS tidak dapat di-_attach_ melewati zona yang berbeda, +hal ini berarti bahwa Pod ini hanya dapat dibuat pada zona yang sama dengan volume tersebut: + + +```yaml +kubectl apply -f - < 34m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv +kubernetes-minion-281d Ready 20m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-281d +kubernetes-minion-olsh Ready 3m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-f,kubernetes.io/hostname=kubernetes-minion-olsh +``` + +_Load-balancer_ menjangkau semua zona dalam satu klaster; program contoh guestbook-go +sudah termasuk contoh Service dengan beban seimbang (_load-balanced service_): + +```shell +kubectl describe service guestbook | grep LoadBalancer.Ingress +``` + +Hasil tampilan akan terlihat seperti di bawah ini: + +```shell +LoadBalancer Ingress: 130.211.126.21 +``` + +Atur alamat IP di atas: + +```shell +export IP=130.211.126.21 +``` + +Telusurilah dengan curl melalui alamat IP tersebut: + +```shell +curl -s http://${IP}:3000/env | grep HOSTNAME +``` + +Hasil tampilan akan terlihat seperti di bawah ini: + +```shell + "HOSTNAME": "guestbook-44sep", +``` + +Kemudian, telusurilah beberapa kali: + +```shell +(for i in `seq 20`; do curl -s http://${IP}:3000/env | grep HOSTNAME; done) | sort | uniq +``` + +Hasil tampilan akan terlihat seperti dibawah ini: + +```shell + "HOSTNAME": "guestbook-44sep", + "HOSTNAME": "guestbook-hum5n", + "HOSTNAME": "guestbook-ppm40", +``` + +_Load balancer_ telah menargetkan ke semua Pod dengan benar, meskipun semuanya berada di beberapa zona yang berbeda. + +### Menghentikan Klaster +### Shutting down the cluster + +Apabila kamu sudah selesai, maka bersihkanlah: + +GCE: + +```shell +KUBERNETES_PROVIDER=gce KUBE_USE_EXISTING_MASTER=true KUBE_GCE_ZONE=us-central1-f kubernetes/cluster/kube-down.sh +KUBERNETES_PROVIDER=gce KUBE_USE_EXISTING_MASTER=true KUBE_GCE_ZONE=us-central1-b kubernetes/cluster/kube-down.sh +KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-a kubernetes/cluster/kube-down.sh +``` + +AWS: + +```shell +KUBERNETES_PROVIDER=aws KUBE_USE_EXISTING_MASTER=true KUBE_AWS_ZONE=us-west-2c kubernetes/cluster/kube-down.sh +KUBERNETES_PROVIDER=aws KUBE_USE_EXISTING_MASTER=true KUBE_AWS_ZONE=us-west-2b kubernetes/cluster/kube-down.sh +KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2a kubernetes/cluster/kube-down.sh +``` + +{{% /capture %}} diff --git a/content/id/docs/setup/production-environment/_index.md b/content/id/docs/setup/production-environment/_index.md new file mode 100644 index 0000000000..798024b963 --- /dev/null +++ b/content/id/docs/setup/production-environment/_index.md @@ -0,0 +1,4 @@ +--- +title: Lingkungan Produksi +weight: 30 +--- diff --git a/content/id/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md b/content/id/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md new file mode 100644 index 0000000000..4b7b15e91c --- /dev/null +++ b/content/id/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md @@ -0,0 +1,638 @@ +--- +title: Membuat sebuah klaster dengan control-plane tunggal menggunakan kubeadm +content_template: templates/task +weight: 30 +--- + +{{% capture overview %}} + +Perkakas `kubeadm` membantu kamu membuat sebuah klaster Kubernetes minimum yang layak dan sesuai dengan _best practice_. Bahkan, kamu dapat menggunakan `kubeadm` untuk membuat sebuah klaster yang lolos [uji Kubernetes Conformance](https://kubernetes.io/blog/2017/10/software-conformance-certification). +`kubeadm` juga mendukung fungsi siklus hidup (_lifecycle_) +klaster lainnya, seperti [_bootstrap token_](/docs/reference/access-authn-authz/bootstrap-tokens/) dan pembaruan klaster (_cluster upgrade_). + +`kubeadm` merupakan perkakas yang bagus jika kamu membutuhkan: + +- Sebuah cara yang sederhana untuk kamu mencoba Kubernetes, mungkin untuk pertama kalinya. +- Sebuah cara bagi pengguna lama (_existing users_) untuk mengotomatiskan penyetelan sebuah klaster dan menguji aplikasi mereka. +- Sebuah komponen dasar pada ekosistem lain dan/atau perkakas penginstal lain dengan cakupan + yang lebih luas. + +Kamu dapat menginstal dan menggunakan `kubeadm` pada berbagai macam mesin: laptop milikmu, sekelompok +server di _cloud_, sebuah Raspberry Pi, dan lain-lain. Baik itu men-_deploy_ pada +_cloud_ ataupun _on-premise_, kamu dapat mengintegrasikan `kubeadm` pada sistem _provisioning_ seperti +Ansible atau Terraform. + +{{% /capture %}} + +{{% capture prerequisites %}} + +Untuk mengikuti panduan ini, kamu membutuhkan: + +- Satu mesin atau lebih, yang menjalankan sistem operasi Linux yang kompatibel dengan deb atau rpm; sebagai contoh: Ubuntu atau CentOS. +- 2 GiB atau lebih RAM per mesin--kurang dari nilai tersebut akan menyisakan sedikit ruang untuk + aplikasi-aplikasimu. +- Sedikitnya 2 CPU pada mesin yang akan kamu gunakan sebagai Node _control-plane_. +- Koneksi internet pada seluruh mesin pada klaster. Kamu dapat menggunakan internet + publik ataupun pribadi. + + +Kamu juga harus menggunakan versi `kubeadm` yang dapat men-_deploy_ versi +Kubernetes yang ingin kamu gunakan pada klaster barumu. + +[Kebijakan dukungan versi Kubernetes dan _version skew_](https://kubernetes.io/docs/setup/release/version-skew-policy/#supported-versions) juga berlaku pada `kubeadm` dan Kubernetes secara umum. +Periksa kebijakan tersebut untuk mempelajari tentang versi Kubernetes dan `kubeadm` +mana saja yang didukung. Laman ini ditulis untuk Kubernetes {{< param "version" >}}. + +Fitur `kubeadm` secara umum berstatus _General Availability_ (GA). Beberapa sub-fitur sedang +berada dalam pengembangan. Implementasi pembuatan klaster dapat berubah +sedikit seiring dengan berevolusinya kubeadm, namun secara umum implementasinya sudah cukup stabil. + +{{< note >}} +Semua perintah di dalam `kubeadm alpha`, sesuai definisi, didukung pada level _alpha_. +{{< /note >}} + +{{% /capture %}} + +{{% capture steps %}} + +## Tujuan + +* Menginstal Kubernetes klaster dengan _control-plane_ tunggal atau [klaster dengan ketersediaan tinggi](/docs/setup/production-environment/tools/kubeadm/high-availability/) +* Menginstal jaringan Pod pada klaster sehingga Pod dapat + berinteraksi satu sama lain + +## Instruksi + +### Menginstal kubeadm pada hos + +Lihat ["Menginstal kubeadm"](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/). + +{{< note >}} +Jika kamu sudah menginstal kubeadm sebelumnya, jalankan `apt-get update && +apt-get upgrade` atau `yum update` untuk mendapatkan versi kubeadm paling baru. + +Ketika kamu melakukan pembaruan, kubelet melakukan _restart_ setiap beberapa detik sambil menunggu dalam kondisi _crashloop_ sampai +kubeadm memberikan perintah yang harus dilakukan. _Crashloop_ ini memang diantisipasi dan normal. +Setelah kamu menginisialisasi _control-plane_, kubelet akan berjalan normal. +{{< /note >}} + +### Menginisialisasi Node _control-plane_ + +Node _control-plane_ adalah mesin dimana komponen-komponen _control plane_ berjalan, termasuk +{{< glossary_tooltip term_id="etcd" >}} (basis data klaster) dan +{{< glossary_tooltip text="API Server" term_id="kube-apiserver" >}} +(yang akan berkomunikasi dengan perkakas _command line_ {{< glossary_tooltip text="kubectl" term_id="kubectl" >}}. + +1. (Direkomendasikan) Jika kamu berencana untuk memperbarui klaster `kubeadm` dengan _control-plane_ tunggal +menjadi ketersediaan tinggi kamu harus menentukan `--control-plane-endpoint` agar mengarah ke _endpoint_ yang digunakan bersama +untuk semua Node _control-plane_. _Endpoint_ tersebut dapat berupa nama DNS atau sebuah alamat IP dari _load-balancer_. +2. Pilih _add-on_ jaringan Pod, dan pastikan apakah diperlukan argumen untuk +diberikan pada `kubeadm init`. Tergantung +penyedia pihak ketiga yang kamu pilih, kamu mungkin harus mengatur `--pod-network-cidr` dengan nilai +yang spesifik pada penyedia tertentu. Lihat [Menginstal _add-on_ jaringan Pod](#jaringan-pod). +3. (Opsional) Sejak versi 1.14, `kubeadm` mencoba untuk mendeteksi _runtime_ kontainer pada Linux +dengan menggunakan daftar _domain socket path_ yang umum diketahui. Untuk menggunakan _runtime_ kontainer yang berbeda atau +jika ada lebih dari satu yang terpasang pada Node yang digunakan, tentukan argumen `--cri-socket` +pada `kubeadm init`. Lihat [Menginstal _runtime_](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#installing-runtime). +4. (Opsional) Kecuali ditentukan sebelumnya, `kubeadm` akan menggunakan antarmuka jaringan yang diasosiasikan +dengan _default gateway_ untuk mengatur alamat _advertise_ untuk API Server pada Node _control-plane_ ini. +Untuk menggunakan antarmuka jaringan yang berbeda, tentukan argumen `--apiserver-advertise-address=` +pada `kubeadm init`. Untuk men-_deploy_ klaster Kubernetes IPv6 menggunakan pengalamatan IPv6, kamu +harus menentukan alamat IPv6, sebagai contoh `--apiserver-advertise-address=fd00::101` +5. (Opsional) Jalankan `kubeadm config images pull` sebelum `kubeadm init` untuk memastikan +konektivitas ke _container image registry_ gcr.io. + +Untuk menginisialisasi Node _control-plane_ jalankan: + +```bash +kubeadm init +``` + +### Pertimbangan mengenai apiserver-advertise-address dan ControlPlaneEndpoint + +Meski `--apiserver-advertise-address` dapat digunakan untuk mengatur alamat _advertise_ untuk server +API pada Node _control-plane_ ini, `--control-plane-endpoint` dapat digunakan untuk mengatur _endpoint_ yang digunakan bersama +untuk seluruh Node _control-plane_. + +`--control-plane-endpoint` tidak hanya mengizinkan alamat IP tetapi juga nama DNS yang dapat dipetakan ke alamat IP. +Silakan hubungi administrator jaringan kamu untuk mengevaluasi solusi-solusi yang mempertimbangkan pemetaan tersebut. + +Berikut contoh pemetaannya: + +``` +192.168.0.102 cluster-endpoint +``` + +Di mana `192.168.0.102` merupakan alamat IP dari Node ini dan `cluster-endpoint` merupakan nama DNS _custom_ yang dipetakan pada IP ini. +Hal ini memungkinkan kamu untuk memberikan `--control-plane-endpoint=cluster-endpoint` pada `kubeadm init` dan memberikan nama DNS yang sama pada +`kubeadm join`. Kemudian kamu dapat memodifikasi `cluster-endpoint` untuk mengarah pada alamat _load-balancer_ dalam skenario +ketersediaan tinggi (_highly availabile_). + +Mengubah klaster _control plane_ tunggal yang dibuat tanpa `--control-plane-endpoint` menjadi klaster dengan ketersediaan tinggi +tidak didukung oleh kubeadm. + +### Informasi lebih lanjut + +Untuk informasi lebih lanjut mengenai argumen-argumen `kubeadm init`, lihat [panduan referensi kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/). + +Untuk daftar pengaturan konfigurasi yang lengkap, lihat [dokumentasi berkas konfigurasi](/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file). + +Untuk menyetel komponen-komponen _control plane_, termasuk pemasangan IPv6 opsional pada _liveness probe_ untuk komponen-komponen _control plane_ dan server etcd, berikan argumen ekstra pada tiap komponen seperti yang didokumentasikan pada [argumen-argumen _custom_](/docs/setup/production-environment/tools/kubeadm/control-plane-flags/). + +Untuk menjalankan `kubeadm init` lagi, sebelumnya kamu harus [membongkar klaster](#pembongkaran). + +Jika kamu menggabungkan sebuah Node dengan arsitektur yang berbeda ke klastermu, pastikan DaemonSets yang di_deploy_ +memiliki _image_ kontainer yang mendukung arsitektur tersebut. + +Pertama-tama `kubeadm init` akan menjalankan sekumpulan _precheck_ untuk memastikan mesin +siap untuk menjalankan Kubernetes. Kumpulan _precheck_ ini menunjukkan peringatan-peringatan dan akan berhenti jika terjadi kesalahan. Kemudian `kubeadm init` +akan mengunduh dan menginstal komponen-komponen _control plane_ klaster. Hal ini membutuhkan waktu beberapa menit. +Keluaran yang dihasilkan terlihat seperti berikut ini: + +```none +[init] Using Kubernetes version: vX.Y.Z +[preflight] Running pre-flight checks +[preflight] Pulling images required for setting up a Kubernetes cluster +[preflight] This might take a minute or two, depending on the speed of your internet connection +[preflight] You can also perform this action in beforehand using 'kubeadm config images pull' +[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" +[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" +[kubelet-start] Activating the kubelet service +[certs] Using certificateDir folder "/etc/kubernetes/pki" +[certs] Generating "etcd/ca" certificate and key +[certs] Generating "etcd/server" certificate and key +[certs] etcd/server serving cert is signed for DNS names [kubeadm-cp localhost] and IPs [10.138.0.4 127.0.0.1 ::1] +[certs] Generating "etcd/healthcheck-client" certificate and key +[certs] Generating "etcd/peer" certificate and key +[certs] etcd/peer serving cert is signed for DNS names [kubeadm-cp localhost] and IPs [10.138.0.4 127.0.0.1 ::1] +[certs] Generating "apiserver-etcd-client" certificate and key +[certs] Generating "ca" certificate and key +[certs] Generating "apiserver" certificate and key +[certs] apiserver serving cert is signed for DNS names [kubeadm-cp kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.138.0.4] +[certs] Generating "apiserver-kubelet-client" certificate and key +[certs] Generating "front-proxy-ca" certificate and key +[certs] Generating "front-proxy-client" certificate and key +[certs] Generating "sa" key and public key +[kubeconfig] Using kubeconfig folder "/etc/kubernetes" +[kubeconfig] Writing "admin.conf" kubeconfig file +[kubeconfig] Writing "kubelet.conf" kubeconfig file +[kubeconfig] Writing "controller-manager.conf" kubeconfig file +[kubeconfig] Writing "scheduler.conf" kubeconfig file +[control-plane] Using manifest folder "/etc/kubernetes/manifests" +[control-plane] Creating static Pod manifest for "kube-apiserver" +[control-plane] Creating static Pod manifest for "kube-controller-manager" +[control-plane] Creating static Pod manifest for "kube-scheduler" +[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" +[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s +[apiclient] All control plane components are healthy after 31.501735 seconds +[uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace +[kubelet] Creating a ConfigMap "kubelet-config-X.Y" in namespace kube-system with the configuration for the kubelets in the cluster +[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "kubeadm-cp" as an annotation +[mark-control-plane] Marking the node kubeadm-cp as control-plane by adding the label "node-role.kubernetes.io/master=''" +[mark-control-plane] Marking the node kubeadm-cp as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] +[bootstrap-token] Using token: +[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles +[bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials +[bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token +[bootstraptoken] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster +[bootstraptoken] creating the "cluster-info" ConfigMap in the "kube-public" namespace +[addons] Applied essential addon: CoreDNS +[addons] Applied essential addon: kube-proxy + +Your Kubernetes control-plane has initialized successfully! + +To start using your cluster, you need to run the following as a regular user: + + mkdir -p $HOME/.kube + sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + +You should now deploy a Pod network to the cluster. +Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: + /docs/concepts/cluster-administration/addons/ + +You can now join any number of machines by running the following on each node +as root: + + kubeadm join : --token --discovery-token-ca-cert-hash sha256: +``` + +Untuk membuat kubectl bekerja bagi pengguna _non-root_, jalankan perintah-perintah berikut, yang juga merupakan +bagian dari keluaran `kubeadm init`: + +```bash +mkdir -p $HOME/.kube +sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config +sudo chown $(id -u):$(id -g) $HOME/.kube/config +``` + +Secara alternatif, jika kamu adalah pengguna `root`, kamu dapat menjalankan: + +```bash +export KUBECONFIG=/etc/kubernetes/admin.conf +``` + +Buatlah catatan dari perintah `kubeadm join` yang dihasilkan `kubeadm init`. Kamu +membutuhkan perintah ini untuk [menggabungkan Node-Node ke klaster](#menggabungkan-node). + +_Token_ digunakan untuk otentikasi bersama (_mutual authentication_) antara Node _control-plane_ dan Node-Node yang +akan bergabung. _Token_ yang didapat di sini bersifat rahasia. Simpan dengan aman, karena siapapun yang memiliki token tersebut +dapat menambahkan Node-Node yang dapat mengotentikasikan diri ke klaster. Kamu dapat menampilkan daftar _token_, +membuat, dan menghapus _token_ dengan perintah `kubeadm token`. Lihat +[panduan referensi kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm-token/). + +### Menginstal _add-on_ jaringan Pod {#jaringan-pod} + +{{< caution >}} +Bagian ini berisi informasi penting mengenai penyetelan jejaring dan +urutan _deployment_. +Baca seluruh saran ini dengan saksama sebelum melanjutkan. + +**Kamu harus men-_deploy_ +_add-on_ jaringan Pod berbasis {{< glossary_tooltip text="_Container Network Interface_" term_id="cni" >}} +(CNI) sehingga Pod dapat berkomunikasi satu sama lain. +DNS klaster (CoreDNS) tidak akan menyala sebelum jaringan dipasangkan.** + +- Perlu diperhatikan bahwa jaringan Pod tidak boleh tumpang tindih dengan jaringan hos + manapun: kamu akan menemui beberapa masalah jika terjadi tumpang tindih. + (Jika kamu menemukan adanya bentrokan antara jaringan Pod + pilihan _plugin_ jaringan dengan jaringan hos, kamu harus memikirkan blok + CIDR yang cocok untuk digunakan, kemudian menggunakannya pada saat `kubeadm init` dengan + `--pod-network-cidr`, atau sebagai penggantinya pada YAML _plugin_ jaringan kamu). + +- Secara bawaan, `kubeadm` mengatur klastermu untuk menggunakan dan melaksanakan penggunaan + [RBAC](/docs/reference/access-authn-authz/rbac/) (_role based access control_). + Pastikan _plugin_ jaringan Pod mendukung RBAC, dan begitu juga seluruh manifes + yang kamu gunakan untuk men-_deploy_-nya. + +- Jika kamu ingin menggunakan IPv6--baik jaringan _dual-stack_, ataupun jaringan _single-stack_ IPv6 + --untuk klastermu, pastikan _plugin_ jaringan Pod + mendukung IPv6. + Dukungan IPv6 telah ditambahkan pada CNI sejak [v0.6.0](https://github.com/containernetworking/cni/releases/tag/v0.6.0). + +{{< /caution >}} + +{{< note >}} +Saat ini Calico adalah satu-satunya _plugin_ CNI yang dapat menerima uji e2e (_end-to-end_) oleh proyek kubeadm. +Jika kamu menemukan isu terkait _plugin_ CNI kamu harus membuat tiket pada pelacak isu masing-masing _plugin_, +bukan pada pelacak isu kubeadm maupun kubernetes. +{{< /note >}} + +Beberapa proyek eksternal menyediakan jaringan Pod Kubernetes menggunakan CNI, beberapa di antaranya juga +mendukung [Network Policy](/docs/concepts/services-networking/networkpolicies/). + +Lihat daftar +[_add-on_ jejaring dan _network policy_](https://kubernetes.io/docs/concepts/cluster-administration/addons/#networking-and-network-policy) yang tersedia. + +Kamu dapat menginstal _add-on_ jaringan Pod dengan perintah berikut pada Node +_control-plane_ atau Node yang memiliki kredensial kubeconfig: + +```bash +kubectl apply -f +``` + +Kamu hanya dapat menginstal satu jaringan Pod per klaster. +Di bawah ini kamu dapat menemukan instruksi instalasi untuk beberapa _plugin_ jaringan Pod yang populer: + +{{< tabs name="tabs-pod-install" >}} + +{{% tab name="Calico" %}} +[Calico](https://docs.projectcalico.org/latest/introduction/) merupakan penyedia jejaring dan _network policy_. Calico mendukung sekumpulan opsi jejaring yang fleksibel sehingga kamu dapat memilih opsi yang paling efisien untuk situasimu, termasuk jaringan _non-overlay_ dan _overlay_, dengan atau tanpa BGP. Calico menggunakan mesin yang sama untuk melaksanakan _network policy_ pada hos, Pod, dan (jika menggunakan Istio & Envoy) aplikasi yang berada pada lapisan _service mesh_. Calico bekerja pada beberapa arsitektur, meliputi `amd64`, `arm64`, dan `ppc64le`. + +Secara bawaan, Calico menggunakan `192.168.0.0/16` sebagai CIDR jaringan Pod, namun hal ini dapat diatur pada berkas calico.yaml. Agar Calico dapat bekerja dengan benar, kamu perlu memberikan CIDR yang sama pada perintah `kubeadm init` menggunakan opsi `--pod-network-cidr=192.168.0.0/16` atau melalui konfigurasi kubeadm. + +```shell +kubectl apply -f https://docs.projectcalico.org/v3.11/manifests/calico.yaml +``` + +{{% /tab %}} + +{{% tab name="Cilium" %}} +Agar Cilium dapat bekerja dengan benar, kamu harus memberikan `--pod-network-cidr=10.217.0.0/16` pada `kubeadm init`. + +Untuk men-_deploy_ Cilium kamu hanya perlu menjalankan: + +```shell +kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.6/install/kubernetes/quick-install.yaml +``` + +Ketika seluruh Pod Cilium sudah bertanda `READY`, kamu dapat mulai menggunakan klaster. + +```shell +kubectl get pods -n kube-system --selector=k8s-app=cilium +``` +Keluarannya akan tampil seperti berikut: +``` +NAME READY STATUS RESTARTS AGE +cilium-drxkl 1/1 Running 0 18m +``` + +Cilium dapat digunakan sebagai kube-proxy, lihat [Kubernetes tanpa kube-proxy](https://docs.cilium.io/en/stable/gettingstarted/kubeproxy-free). + +Untuk informasi lebih lanjut mengenai penggunaan Cilium dengan Kubernetes, lihat [panduan Instalasi Kubernetes untuk Cilium](https://docs.cilium.io/en/stable/kubernetes/). + +{{% /tab %}} + +{{% tab name="Contiv-VPP" %}} +[Contiv-VPP](https://contivpp.io/) menggunakan CNF vSwitch berbasis [FD.io VPP](https://fd.io/) yang dapat diprogram, +menawarkan layanan dan jejaring _cloud-native_ yang memiliki banyak fungsi dan berkinerja tinggi. + +Contiv-VPP mengimplementasikan Service dan Network Policy Kubernetes pada _user space_ (on VPP). + +Silakan merujuk pada panduan pemasangan berikut: [Pemasangan Manual Contiv-VPP](https://github.com/contiv/vpp/blob/master/docs/setup/MANUAL_INSTALL.md) +{{% /tab %}} + +{{% tab name="Kube-router" %}} + +Kube-router mengandalkan kube-controller-manager untuk mengalokasikan CIDR Pod untuk Node-Node. Maka dari itu, gunakan `kubeadm init` dengan opsi `--pod-network-cidr`. + +Kube-router menyediakan jejaring Pod, _network policy_, dan IP Virtual Server(IPVS)/Linux Virtual Server(LVS) berbasis _service proxy_ yang memiliki kinerja tinggi. + +Informasi mengenai penggunaan `kubeadm` untuk mendirikan klaster Kubernetes dengan Kube-router, dapat dilihat di [panduan pemasangan resminya](https://github.com/cloudnativelabs/kube-router/blob/master/docs/kubeadm.md). +{{% /tab %}} + +{{% tab name="Weave Net" %}} + +Untuk informasi lebih lanjut mengenai pemasangan klaster Kubernetes menggunakan Weave Net, silakan lihat [Mengintegrasikan Kubernetes melalui Addon](https://www.weave.works/docs/net/latest/kube-addon/). + +Weave Net bekerja pada platform `amd64`, `arm`, `arm64` dan `ppc64le` tanpa membutuhkan tindakan ekstra. +Weave Net menyalakan mode _hairpin_ secara bawaan. Hal ini mengizinkan Pod untuk mengakses dirinya sendiri melalui alamat IP Service +jika mereka tidak tahu PodIP miliknya. + +```shell +kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" +``` +{{% /tab %}} + +{{< /tabs >}} + + +Setelah jaringan Pod dipasangkan, kamu dapat mengonfirmasi hal tersebut dengan +memastikan Pod CoreDNS berada pada kondisi `Running` pada keluaran `kubectl get pods --all-namespaces`. +Dan setelah Pod CoreDNS sudah menyala dan berjalan, kamu dapat melanjutkan (pemasangan klaster) dengan menggabungkan Node-Node yang lain. + +Jika jaringan belum bekerja atau CoreDNS tidak berada pada kondisi `Running`, periksalah/lihatlah +[panduan penyelesaian masalah](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/) +untuk `kubeadm`. + +### Isolasi Node _control plane_ + +Secara bawaan, klaster tidak akan menjadwalkan Pod pada Node _control-plane_ untuk alasan +keamanan. Jika kamu ingin Pod dapat dijadwalkan pada Node _control-plane_, sebagai contoh untuk +klaster Kubernetes bermesin-tunggal untuk pengembangan, jalankan: + +```bash +kubectl taint nodes --all node-role.kubernetes.io/master- +``` + +Dengan keluaran seperti berikut: + +``` +node "test-01" untainted +taint "node-role.kubernetes.io/master:" not found +taint "node-role.kubernetes.io/master:" not found +``` + +Hal ini akan menghapus _taint_ `node-role.kubernetes.io/master` pada Node manapun yang +memilikinya, termasuk Node _control-plane_, sehingga _scheduler_ akan dapat +menjadwalkan Pod di manapun. + +### Menggabungkan Node-Node {#menggabungkan-node} + +Node adalah tempat beban kerja (Container, Pod, dan lain-lain) berjalan. Untuk menambahkan Node baru pada klaster lakukan hal berikut pada setiap mesin: + +* SSH ke mesin +* Gunakan pengguna _root_ (mis. `sudo su -`) +* Jalankan perintah hasil keluaran `kubeadm init`. Sebagai contoh: + +```bash +kubeadm join --token : --discovery-token-ca-cert-hash sha256: +``` + +Jika kamu tidak memiliki _token_, kamu bisa mendapatkannya dengan menjalankan perintah berikut pada Node _control-plane_: + +```bash +kubeadm token list +``` + +Keluarannya akan tampil seperti berikut: + +```console +TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS +8ewj1p.9r9hcjoqgajrj4gi 23h 2018-06-12T02:51:28Z authentication, The default bootstrap system: + signing token generated by bootstrappers: + 'kubeadm init'. kubeadm: + default-node-token +``` + +Secara bawaan, _token_ akan kadaluarsa dalam 24 jam. Jika kamu menggabungkan Node ke klaster setelah _token_ kadaluarsa, +kamu dapat membuat _token_ baru dengan menjalankan perintah berikut pada Node _control-plane_: + +```bash +kubeadm token create +``` + +Keluarannya akan tampil seperti berikut: + +```console +5didvk.d09sbcov8ph2amjw +``` + +Jika kamu tidak memiliki nilai `--discovery-token-ca-cert-hash`, kamu bisa mendapatkannya dengan menjalankan perintah berantai berikut pada Node _control-plane_: + +```bash +openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \ + openssl dgst -sha256 -hex | sed 's/^.* //' +``` + +Keluaran yang diberikan kurang lebih akan ditampilkan sebagai berikut: + +```console +8cb2de97839780a412b93877f8507ad6c94f73add17d5d7058e91741c9d5ec78 +``` + +{{< note >}} +Untuk menentukan _tuple_ IPv6 untuk `:`, alamat IPv6 harus be ditutup dengan kurung siku, sebagai contoh: `[fd00::101]:2073`. +{{< /note >}} + +Keluaran yang diberikan kurang lebih akan ditampilkan sebagai berikut: + +``` +[preflight] Running pre-flight checks + +... (log output of join workflow) ... + +Node join complete: +* Certificate signing request sent to control-plane and response + received. +* Kubelet informed of new secure connection details. + +Run 'kubectl get nodes' on control-plane to see this machine join. +``` + +Beberapa saat kemudian, kamu akan melihat Node tersebut pada keluaran dari `kubectl get nodes` ketika dijalankan pada Node _control-plane_. + +### (Opsional) Mengendalikan klaster dari mesin selain Node _control-plane_ + +Untuk membuat kubectl bekerja pada mesin lain (mis. laptop) agar dapat berbicara dengan +klaster, kamu harus menyalin berkas kubeconfig administrator dari Node _control-plane_ +ke mesin seperti berikut: + +```bash +scp root@:/etc/kubernetes/admin.conf . +kubectl --kubeconfig ./admin.conf get nodes +``` + +{{< note >}} +Contoh di atas mengasumsikan akses SSH dinyalakan untuk _root_. Jika tidak berlaku +demikian, kamu dapat menyalin berkas `admin.conf` untuk dapat diakses oleh pengguna lain +dan `scp` menggunakan pengguna lain tersebut. + +Berkas `admin.conf` memberikan penggunanya privilese (_privilege_) _superuser_ terhadap klaster. +Berkas ini harus digunakan seperlunya. Untuk pengguna biasa, direkomendasikan +untuk membuat kredensial unik dengan privilese _whitelist_. Kamu dapat melakukan +ini dengan perintah `kubeadm alpha kubeconfig user --client-name `. +Perintah tersebut akan mencetak berkas KubeConfig ke STDOUT yang harus kamu simpan +ke dalam sebuah berkas dan mendistribusikannya pada para pengguna. Setelah itu, whitelist +privilese menggunakan `kubectl create (cluster)rolebinding`. +{{< /note >}} + +### (Opsional) Memproksi API Server ke localhost + +Jika kamu ingin terhubung dengan API Server dari luar klaster kamu dapat menggunakan +`kubectl proxy`: + +```bash +scp root@:/etc/kubernetes/admin.conf . +kubectl --kubeconfig ./admin.conf proxy +``` + +Kini kamu dapat mengakses API Server secara lokal melalui `http://localhost:8001/api/v1` + +## Pembongkaran + +Jika kamu menggunakan server sekali pakai untuk membuat klaster, sebagai ujicoba, kamu dapat +mematikannya tanpa perlu melakukan pembongkaran. Kamu dapat menggunakan +`kubectl config delete-cluster` untuk menghapus referensi lokal ke +klaster. + +Namun, jika kamu ingin mengatur ulang klaster secara lebih rapih, pertama-tama kamu +harus [menguras (_drain_) Node](/docs/reference/generated/kubectl/kubectl-commands#drain) +dan memastikan Node sudah kosong, kemudian mengembalikan pengaturan pada Node kembali seperti semula. + +### Menghapus Node + +Berinteraksi dengan Node _control-plane_ menggunakan kredensial yang sesuai, jalankan: + +```bash +kubectl drain --delete-local-data --force --ignore-daemonsets +kubectl delete node +``` + +Lalu, pada Node yang dihapus, atur ulang semua kondisi `kubeadm` yang telah dipasang: + +```bash +kubeadm reset +``` + +Proses pengaturan ulang tidak mengatur ulang atau membersihkan kebijakan iptables atau tabel IPVS. Jika kamu ingin mengatur ulang iptables, kamu harus melakukannya secara manual: + +```bash +iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X +``` + +Jika kamu ingin mengatur ulang tabel IPVS, kamu harus menjalankan perintah berikut: + +```bash +ipvsadm -C +``` + +Jika kamu ingin mengulang dari awal, cukup jalankan `kubeadm init` atau `kubeadm join` dengan +argumen yang sesuai. + +### Membersihkan _control plane_ + +Kamu dapat menggunakan `kubeadm reset` pada hos _control plane_ untuk memicu pembersihan +best-effort. + +Lihat dokumentasi referensi [`kubeadm reset`](/docs/reference/setup-tools/kubeadm/kubeadm-reset/) +untuk informasi lebih lanjut mengenai sub-perintah ini dan +opsinya. + +{{% /capture %}} + +{{% capture discussion %}} + +## Selanjutnya + +* Pastikan klaster berjalan dengan benar menggunakan [Sonobuoy](https://github.com/heptio/sonobuoy) +* Lihat [Memperbaharui klaster kubeadm](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) + untuk detail mengenai pembaruan klaster menggunakan `kubeadm`. +* Pelajari penggunaan `kubeadm` lebih lanjut pada [dokumentasi referensi kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm) +* Pelajari lebih lanjut mengenai [konsep-konsep](/docs/concepts/) Kubernetes dan [`kubectl`](/docs/user-guide/kubectl-overview/). +* Lihat halaman [Cluster Networking](/docs/concepts/cluster-administration/networking/) untuk daftar +_add-on_ jaringan Pod yang lebih banyak. +* Lihat [daftar _add-on_](/docs/concepts/cluster-administration/addons/) untuk + mengeksplor _add-on_ lainnya, termasuk perkakas untuk _logging_, _monitoring_, _network policy_, visualisasi & + pengendalian klaster Kubernetes. +* Atur bagaimana klaster mengelola log untuk peristiwa-peristiwa klaster dan dari + aplikasi-aplikasi yang berjalan pada Pod. + Lihat [Arsitektur Logging](/docs/concepts/cluster-administration/logging/) untuk + gambaran umum tentang hal-hal yang terlibat. + +### Umpan balik + +* Untuk masalah kekutu (_bug_), kunjungi [kubeadm GitHub issue tracker](https://github.com/kubernetes/kubeadm/issues) +* Untuk dukungan, kunjungi kanal Slack + [#kubeadm](https://kubernetes.slack.com/messages/kubeadm/) +* Kanal Slack umum pengembangan SIG Cluster Lifecycle: + [#sig-cluster-lifecycle](https://kubernetes.slack.com/messages/sig-cluster-lifecycle/) +* SIG Cluster Lifecycle [SIG information](https://github.com/kubernetes/community/tree/master/sig-cluster-lifecycle#readme) +* Milis SIG Cluster Lifecycle: + [kubernetes-sig-cluster-lifecycle](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle) + +## Kebijakan _version skew_ + +`kubeadm` versi v{{< skew latestVersion >}} dapat men-_deploy_ klaster dengan _control plane_ versi v{{< skew latestVersion >}} atau v{{< skew prevMinorVersion >}}. +`kubeadm` v{{< skew latestVersion >}} juga dapat memperbarui klaster yang dibuat dengan kubeadm v{{< skew prevMinorVersion >}}. + +Karena kita tidak dapat memprediksi masa depan, CLI kubeadm v{{< skew latestVersion >}} mungkin atau tidak mungkin dapat men-_deploy_ klaster v{{< skew nextMinorVersion >}}. + +Sumber daya ini menyediakan informasi lebih lanjut mengenai _version skew_ yang didukung antara kubelet dan _control plane_, serta komponen Kubernetes lainnya: + +* [Kebijakan versi and version-skew Kubernetes](/docs/setup/release/version-skew-policy/) +* [Panduan instalasi](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#installing-kubeadm-kubelet-and-kubectl) spesifik untuk kubeadm + +## Keterbatasan + +### Ketahanan klaster + +Klaster yang dibuat pada panduan ini hanya memiliki Node _control-plane_ tunggal, dengan basis data etcd tunggal +yang berjalan di atasnya. Hal ini berarti jika terjadi kegagalan pada Node _control-plane_, klaster dapat kehilangan +data dan mungkin harus dibuat kembali dari awal. + +Solusi: + +* Lakukan [back up etcd](https://coreos.com/etcd/docs/latest/admin_guide.html) secara reguler. Direktori data + etcd yang dikonfigurasi oleh kubeadm berada di `/var/lib/etcd` pada Node _control-plane_. + +* Gunakan banyak Node _control-plane_. Kamu dapat membaca + [Opsi untuk topologi dengan ketersediaan tinggi](/docs/setup/production-environment/tools/kubeadm/ha-topology/) untuk memilih topologi + klaster yang menyediakan ketersediaan lebih tinggi. + +### Kompatibilitas platform + +_Package_ dbm/rpm dan _binary_ kubeadm dibuat untuk amd64, arm (32-bit), arm64, ppc64le, dan s390x +mengikuti [proposal multi-platform](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/multi-platform.md). + +_Image_ kontainer _multiplatform_ untuk _control plane_ dan _addon_ juga telah didukung sejak v1.12. + +Hanya beberapa penyedia jaringan yang menawarkan solusi untuk seluruh platform. Silakan merujuk pada daftar +penyedia jaringan di atas atau dokumentasi dari masing-masing penyedia untuk mencari tahu apakah penyedia tersebut +mendukung platform pilihanmu. + +## Penyelesaian masalah + +Jika kamu menemui kesulitan dengan kubeadm, silakan merujuk pada [dokumen penyelesaian masalah](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/). + +{{% /capture %}} diff --git a/content/id/docs/tasks/_index.md b/content/id/docs/tasks/_index.md index 9e213d5a99..8ef5cf36bc 100644 --- a/content/id/docs/tasks/_index.md +++ b/content/id/docs/tasks/_index.md @@ -1,21 +1,21 @@ --- -title: Tugas (Tasks) +title: Tugas main_menu: true weight: 50 -content_template: templates/concept +content_type: concept --- {{< toc >}} -{{% capture overview %}} + Bagian dokumentasi Kubernetes ini berisi halaman-halaman yang perlihatkan bagaimana melakukan setiap tugas (_task_). Halaman tugas menunjukkan cara melakukan satu hal saja, biasanya dengan memberikan urutan langkah pendek. -{{% /capture %}} -{{% capture body %}} + + ## Antarmuka Pengguna Berbasis Web (Dashboard) @@ -84,11 +84,12 @@ oleh Node dalam sebuah klaster. Mengkonfigurasi dan menjadwalkan _HugePages_ sebagai sumber daya yang dapat dijadwalkan dalam sebuah klaster. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Jika kamu ingin menulis halaman tugas (_task_), silahkan lihat [Membuat Dokumentasi _Pull Request_](/docs/home/contribute/create-pull-request/). -{{% /capture %}} + diff --git a/content/id/docs/tasks/access-application-cluster/access-cluster.md b/content/id/docs/tasks/access-application-cluster/access-cluster.md index cdb8a70962..148f402402 100644 --- a/content/id/docs/tasks/access-application-cluster/access-cluster.md +++ b/content/id/docs/tasks/access-application-cluster/access-cluster.md @@ -1,17 +1,17 @@ --- title: Mengakses Klaster weight: 20 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Topik ini membahas tentang berbagai cara untuk berinteraksi dengan klaster. -{{% /capture %}} -{{% capture body %}} + + ## Mengakses untuk pertama kalinya dengan kubectl @@ -340,4 +340,4 @@ Ada beberapa proksi berbeda yang mungkin kamu temui saat menggunakan Kubernetes: Pengguna Kubernetes biasanya tidak perlu khawatir tentang apa pun selain dua jenis pertama. Admin klaster biasanya akan memastikan bahwa tipe yang terakhir telah diatur dengan benar. -{{% /capture %}} + diff --git a/content/id/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md b/content/id/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md index 23f7dbd3fc..b2b80aacba 100644 --- a/content/id/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md +++ b/content/id/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md @@ -1,6 +1,6 @@ --- title: Mengkonfigurasi Akses ke Banyak Klaster -content_template: templates/task +content_type: task weight: 30 card: name: tasks @@ -8,7 +8,7 @@ card: --- -{{% capture overview %}} + Halaman ini menunjukkan bagaimana mengkonfigurasi akses ke banyak klaster dengan menggunakan berkas (_file_) konfigurasi. Setelah semua klaster, pengguna, dan konteks didefinisikan di @@ -21,15 +21,16 @@ berkas *kubeconfig*. Ini adalah cara umum untuk merujuk ke berkas konfigurasi. Itu tidak berarti bahwa selalu ada berkas bernama `kubeconfig`. {{< /note >}} -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Mendefinisikan klaster, pengguna, dan konteks @@ -366,13 +367,14 @@ export KUBECONFIG=$KUBECONFIG_SAVED $Env:KUBECONFIG=$ENV:KUBECONFIG_SAVED ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Mengatur Akses Cluster Menggunakan Berkas Kubeconfig](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) * [kubectl config](/docs/reference/generated/kubectl/kubectl-commands#config) -{{% /capture %}} + diff --git a/content/id/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md b/content/id/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md index e1404100b5..6db32dedf8 100644 --- a/content/id/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md +++ b/content/id/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md @@ -1,27 +1,28 @@ --- title: Menggunakan Port Forwarding untuk Mengakses Aplikasi di sebuah Klaster -content_template: templates/task +content_type: task weight: 40 min-kubernetes-server-version: v1.10 --- -{{% capture overview %}} + Halaman ini menunjukkan bagaimana menggunakan `kubectl port-forward` untuk menghubungkan sebuah server Redis yang sedang berjalan di sebuah klaster Kubernetes. Tipe dari koneksi ini dapat berguna untuk melakukan _debugging_ basis data. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * Install [redis-cli](http://redis.io/topics/rediscli). -{{% /capture %}} -{{% capture steps %}} + + ## Membuat Deployment dan Service Redis @@ -177,10 +178,10 @@ Halaman ini menunjukkan bagaimana menggunakan `kubectl port-forward` untuk mengh PONG ``` -{{% /capture %}} -{{% capture discussion %}} + + ## Diskusi @@ -193,9 +194,10 @@ Dukungan untuk protokol UDP bisa dilihat di [issue 47862](https://github.com/kubernetes/kubernetes/issues/47862). {{< /note >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Belajar lebih tentang [kubectl port-forward](/docs/reference/generated/kubectl/kubectl-commands/#port-forward). -{{% /capture %}} + diff --git a/content/id/docs/tasks/access-application-cluster/web-ui-dashboard.md b/content/id/docs/tasks/access-application-cluster/web-ui-dashboard.md index 752e43b2f9..a83605db40 100644 --- a/content/id/docs/tasks/access-application-cluster/web-ui-dashboard.md +++ b/content/id/docs/tasks/access-application-cluster/web-ui-dashboard.md @@ -1,6 +1,6 @@ --- title: Antarmuka Pengguna Berbasis Web (Dashboard) -content_template: templates/concept +content_type: concept weight: 10 card: name: tasks @@ -8,7 +8,7 @@ card: title: Menggunakan Antarmuka Pengguna Berbasis Web Dashboard --- -{{% capture overview %}} + Dashboard adalah antarmuka pengguna Kubernetes. Kamu dapat menggunakan Dashboard untuk men-_deploy_ aplikasi yang sudah dikontainerisasi ke klaster Kubernetes, memecahkan masalah pada aplikasi kamu, dan mengatur sumber daya klaster. Kamu dapat menggunakan Dashboard untuk melihat ringkasan dari aplikasi yang sedang berjalan di klaster kamu, dan juga membuat atau mengedit objek individu sumber daya Kubernetes (seperti Deployment, Job, DaemonSet, dll.). Sebagai contoh, kamu dapat mengembangkan sebuah Deployment, menginisiasi sebuah pembaruan bertahap (_rolling update_), memulai kembali sebuah Pod atau men-_deploy_ aplikasi baru menggunakan sebuah _deploy wizard_. @@ -16,10 +16,10 @@ Dashboard juga menyediakan informasi tentang status dari sumber daya Kubernetes ![Antarmuka Pengguna Dashboard Kubernetes](/images/docs/ui-dashboard.png) -{{% /capture %}} -{{% capture body %}} + + ## Men-_deploy_ Antarmuka Pengguna Dashboard @@ -158,11 +158,12 @@ Laman daftar dan detail Pod tertaut dengan laman penampil log (_log viewer_). Ka ![Logs viewer](/images/docs/ui-dashboard-logs-view.png) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Untuk informasi lebih lanjut, lihat [Laman proyek Kubernetes Dashboard](https://github.com/kubernetes/dashboard). -{{% /capture %}} + diff --git a/content/id/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md b/content/id/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md new file mode 100644 index 0000000000..1d36713f7f --- /dev/null +++ b/content/id/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md @@ -0,0 +1,374 @@ +--- +title: Mengatur Probe Liveness, Readiness dan Startup +content_template: templates/task +weight: 110 +--- + +{{% capture overview %}} + +Laman ini memperlihatkan bagaimana cara untuk mengatur _probe liveness_, _readiness_, dan +_startup_ untuk Container. + +_Probe liveness_ digunakan oleh [kubelet](/docs/admin/kubelet/) untuk mengetahui +kapan perlu mengulang kembali (_restart_) sebuah Container. Sebagai contoh, _probe liveness_ +dapat mendeteksi _deadlock_, ketika aplikasi sedang berjalan tapi tidak dapat berfungsi dengan baik. +Mengulang Container dengan _state_ tersebut dapat membantu ketersediaan aplikasi yang lebih baik +walaupun ada kekutu (_bug_). + +_Probe readiness_ digunakan oleh kubelet untuk mengetahui kapan sebuah Container telah siap untuk +menerima lalu lintas jaringan (_traffic_). Suatu Pod dianggap siap saat semua Container di dalamnya telah +siap. Sinyal ini berguna untuk mengontrol Pod-Pod mana yang digunakan sebagai _backend_ dari Service. +Ketika Pod dalam kondisi tidak siap, Pod tersebut dihapus dari Service _load balancer_. + +_Probe startup_ digunakan oleh kubelet untuk mengetahui kapan sebuah aplikasi Container telah mulai berjalan. +Jika _probe_ tersebut dinyalakan, _probe_ akan menonaktifkan pemeriksaan _liveness_ dan _readiness_ sampai +berhasil, kamu harus memastikan _probe_ tersebut tidak mengganggu _startup_ dari aplikasi. +Mekanisme ini dapat digunakan untuk mengadopsi pemeriksaan _liveness_ pada saat memulai Container yang lambat, +untuk menghindari Container dimatikan oleh kubelet sebelum Container mulai dan berjalan. + +{{% /capture %}} + +{{% capture prerequisites %}} + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + +{{% /capture %}} + +{{% capture steps %}} + +## Mendefinisikan perintah liveness + +Kebanyakan aplikasi yang telah berjalan dalam waktu lama pada akhirnya akan +bertransisi ke _state_ yang rusak (_broken_), dan tidak dapat pulih kecuali diulang kembali. +Kubernetes menyediakan _probe liveness_ untuk mendeteksi dan memperbaiki situasi tersebut. + +Pada latihan ini, kamu akan membuat Pod yang menjalankan Container dari image +`k8s.gcr.io/busybox`. Berikut ini adalah berkas konfigurasi untuk Pod tersebut: + +{{< codenew file="pods/probe/exec-liveness.yaml" >}} + +Pada berkas konfigurasi di atas, kamu dapat melihat bahwa Pod memiliki satu `Container`. +_Field_ `periodSeconds` menentukan bahwa kubelet harus melakukan _probe liveness_ setiap 5 detik. +_Field_ `initialDelaySeconds` memberitahu kubelet untuk menunggu 5 detik sebelum mengerjakan +_probe_ yang pertama. Untuk mengerjakan _probe_, kubelet menjalankan perintah `cat /tmp/healthy` +pada Container tujuan. Jika perintah berhasil, kode 0 akan dikembalikan, dan kubelet menganggap +Container sedang dalam kondisi hidup (_alive_) dan sehat (_healthy_). Jika perintah mengembalikan +kode selain 0, maka kubelet akan mematikan Container dan mengulangnya kembali. + +Saat dimulai, Container akan menjalankan perintah berikut: + +```shell +/bin/sh -c "touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600" +``` + +Container memiliki berkas `/tmp/healthy` pada saat 30 detik pertama setelah dijalankan. +Kemudian, perintah `cat /tmp/healthy` mengembalikan kode sukses. Namun setelah 30 detik, +`cat /tmp/healthy` mengembalikan kode gagal. + +Buatlah sebuah Pod: + +```shell +kubectl apply -f https://k8s.io/examples/pods/probe/exec-liveness.yaml +``` + +Dalam 30 detik pertama, lihatlah _event_ dari Pod: + +```shell +kubectl describe pod liveness-exec +``` + +Keluaran dari perintah tersebut memperlihatkan bahwa belum ada _probe liveness_ yang gagal: + +``` +FirstSeen LastSeen Count From SubobjectPath Type Reason Message +--------- -------- ----- ---- ------------- -------- ------ ------- +24s 24s 1 {default-scheduler } Normal Scheduled Successfully assigned liveness-exec to worker0 +23s 23s 1 {kubelet worker0} spec.containers{liveness} Normal Pulling pulling image "k8s.gcr.io/busybox" +23s 23s 1 {kubelet worker0} spec.containers{liveness} Normal Pulled Successfully pulled image "k8s.gcr.io/busybox" +23s 23s 1 {kubelet worker0} spec.containers{liveness} Normal Created Created container with docker id 86849c15382e; Security:[seccomp=unconfined] +23s 23s 1 {kubelet worker0} spec.containers{liveness} Normal Started Started container with docker id 86849c15382e +``` + +Setelah 35 detik, lihatlah lagi _event_ Pod tersebut: + +```shell +kubectl describe pod liveness-exec +``` + +Baris terakhir dari keluaran tersebut memperlihatkan pesan bahwa _probe liveness_ +mengalami kegagalan, dan Container telah dimatikan dan dibuat ulang. + +``` +FirstSeen LastSeen Count From SubobjectPath Type Reason Message +--------- -------- ----- ---- ------------- -------- ------ ------- +37s 37s 1 {default-scheduler } Normal Scheduled Successfully assigned liveness-exec to worker0 +36s 36s 1 {kubelet worker0} spec.containers{liveness} Normal Pulling pulling image "k8s.gcr.io/busybox" +36s 36s 1 {kubelet worker0} spec.containers{liveness} Normal Pulled Successfully pulled image "k8s.gcr.io/busybox" +36s 36s 1 {kubelet worker0} spec.containers{liveness} Normal Created Created container with docker id 86849c15382e; Security:[seccomp=unconfined] +36s 36s 1 {kubelet worker0} spec.containers{liveness} Normal Started Started container with docker id 86849c15382e +2s 2s 1 {kubelet worker0} spec.containers{liveness} Warning Unhealthy Liveness probe failed: cat: can't open '/tmp/healthy': No such file or directory +``` + +Tunggu 30 detik lagi, dan verifikasi bahwa Container telah diulang kembali: + +```shell +kubectl get pod liveness-exec +``` + +Keluaran perintah tersebut memperlihatkan bahwa jumlah `RESTARTS` telah meningkat: + +``` +NAME READY STATUS RESTARTS AGE +liveness-exec 1/1 Running 1 1m +``` + +## Mendefinisikan probe liveness dengan permintaan HTTP + +Jenis kedua dari _probe liveness_ menggunakan sebuah permintaan GET HTTP. Berikut ini +berkas konfigurasi untuk Pod yang menjalankan Container dari image `k8s.gcr.io/liveness`. + +{{< codenew file="pods/probe/http-liveness.yaml" >}} + +Pada berkas konfigurasi tersebut, kamu dapat melihat Pod memiliki sebuah Container. +_Field_ `periodSeconds` menentukan bahwa kubelet harus mengerjakan _probe liveness_ setiap 3 detik. +_Field_ `initialDelaySeconds` memberitahu kubelet untuk menunggu 3 detik sebelum mengerjakan +_probe_ yang pertama. Untuk mengerjakan _probe_ tersebut, kubelet mengirimkan sebuah permintaan +GET HTTP ke server yang sedang berjalan di dalam Container dan mendengarkan (_listen_) pada porta 8080. +Jika _handler path_ `/healthz` yang dimiliki server mengembalikan kode sukses, kubelet menganggap +Container sedang dalam kondisi hidup dan sehat. Jika _handler_ mengembalikan kode gagal, +kubelet mematikan Container dan mengulangnya kembali. + +Kode yang lebih besar atau sama dengan 200 dan kurang dari 400 mengindikasikan kesuksesan. +Kode selain ini mengindikasikan kegagalan. + +Kamu dapat melihat kode program untuk server ini pada [server.go](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/test/images/agnhost/liveness/server.go). + +Untuk 10 detik pertama setelah Container hidup (_alive_), _handler_ `/healthz` mengembalikan +status 200. Setelah itu, _handler_ mengembalikan status 500. + +```go +http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { + duration := time.Now().Sub(started) + if duration.Seconds() > 10 { + w.WriteHeader(500) + w.Write([]byte(fmt.Sprintf("error: %v", duration.Seconds()))) + } else { + w.WriteHeader(200) + w.Write([]byte("ok")) + } +}) +``` + +Pemeriksaan kesehatan (_health check_) dilakukan kubelet 3 detik setelah Container dimulai, +sehingga beberapa pemeriksaaan pertama akan berhasil. Namun setelah 10 detik, +pemeriksaan akan gagal, dan kubelet akan mematikan dan mengulang Container kembali. + +Untuk mencoba pemeriksaan _liveness_ HTTP, marilah membuat sebuah Pod: + +```shell +kubectl apply -f https://k8s.io/examples/pods/probe/http-liveness.yaml +``` + +Setelah 10 detik, lihatlah _event_ Pod untuk memverifikasi bahwa _probe liveness_ +telah gagal dan Container telah diulang kembali: + +```shell +kubectl describe pod liveness-http +``` + +Untuk rilis sebelum v1.13 (termasuk v1.13), jika variabel lingkungan +`http_proxy` (atau `HTTP_PROXY`) telah diatur pada Node dimana Pod +berjalan, _probe liveness_ HTTP akan menggunakan proksi tersebut. +Untuk rilis setelah v1.13, pengaturan variabel lingkungan pada proksi HTTP lokal +tidak mempengaruhi _probe liveness_ HTTP. + +## Mendefinisikan probe liveness TCP + +Jenis ketiga dari _probe liveness_ menggunakaan sebuah soket TCP. Dengan konfigurasi ini, +kubelet akan mencoba untuk membuka soket pada Container kamu dengan porta tertentu. +Jika koneksi dapat terbentuk dengan sukses, maka Container dianggap dalam kondisi sehat. +Namun jika tidak berhasil terbentuk, maka Container dianggap gagal. + +{{< codenew file="pods/probe/tcp-liveness-readiness.yaml" >}} + +Seperti yang terlihat, konfigurasi untuk pemeriksaan TCP cukup mirip dengan +pemeriksaan HTTP. Contoh ini menggunakan _probe readiness_ dan _liveness_. +_Probe readiness_ yang pertama akan dikirimkan oleh kubelet, 5 detik setelah +Container mulai dijalankan. Container akan coba dihubungkan oleh kubelet dengan +`goproxy` pada porta 8080. Jika _probe_ berhasil, maka Pod akan ditandai menjadi +_ready_. Pemeriksaan ini akan dilanjutkan oleh kubelet setiap 10 detik. + +Selain _probe readiness_, _probe liveness_ juga termasuk di dalam konfigurasi. +_Probe liveness_ yang pertama akan dijalankan oleh kubelet, 15 detik setelah Container +mulai dijalankan. Sama seperti _probe readiness_, kubelet akan mencoba untuk +terhubung dengan Container `goproxy` pada porta 8080. Jika _probe liveness_ gagal, +maka Container akan diulang kembali. + +Untuk mencoba pemeriksaan _liveness_ TCP, marilah membuat sebuah Pod: + +```shell +kubectl apply -f https://k8s.io/examples/pods/probe/tcp-liveness-readiness.yaml +``` + +Setelah 15 detik, lihatlah _event_ Pod untuk memverifikasi _probe liveness_ tersebut: + +```shell +kubectl describe pod goproxy +``` + +## Menggunakan sebuah porta dengan nama + +Kamu dapat menggunakan +[ContainerPort](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#containerport-v1-core) +dengan nama untuk melakukan pemeriksaan _liveness_ HTTP atau TCP: + +```yaml +ports: +- name: liveness-port + containerPort: 8080 + hostPort: 8080 + +livenessProbe: + httpGet: + path: /healthz + port: liveness-port +``` + +## Melindungi Container yang lambat untuk dimulai dengan probe startup {#mendefinisikan-probe-startup} + +Terkadang kamu harus berurusan dengan aplikasi peninggalan (_legacy_) yang +memerlukan waktu tambahan untuk mulai berjalan pada saat pertama kali diinisialisasi. +Pada kasus ini, cukup rumit untuk mengatur parameter _probe liveness_ tanpa +mengkompromikan respons yang cepat terhadap _deadlock_ yang memotivasi digunakannya +probe_ tersebut. Triknya adalah mengatur _probe startup_ dengan perintah yang sama, +baik pemeriksaan HTTP ataupun TCP, dengan `failureThreshold * periodSeconds` yang +mencukupi untuk kemungkinan waktu memulai yang terburuk. + +Sehingga, contoh sebelumnya menjadi: + +```yaml +ports: +- name: liveness-port + containerPort: 8080 + hostPort: 8080 + +livenessProbe: + httpGet: + path: /healthz + port: liveness-port + failureThreshold: 1 + periodSeconds: 10 + +startupProbe: + httpGet: + path: /healthz + port: liveness-port + failureThreshold: 30 + periodSeconds: 10 +``` + +Berkat _probe startup_, aplikasi akan memiliki paling lambat 5 menit (30 * 10 = 300 detik) +untuk selesai memulai. +Ketika _probe startup_ telah berhasil satu kali, maka _probe liveness_ akan +mengambil alih untuk menyediakan respons cepat terhadap _deadlock_ Container. +Jika _probe startup_ tidak pernah berhasil, maka Container akan dimatikan setelah +300 detik dan perilakunya akan bergantung pada `restartPolicy` yang dimiliki Pod. + +## Mendefinisikan probe readiness + +Terkadang aplikasi tidak dapat melayani lalu lintas jaringan sementara. +Contohnya, aplikasi mungkin perlu untuk memuat data besar atau berkas konfigurasi +saat dimulai, atau aplikasi bergantung pada layanan eksternal setelah dimulai. +Pada kasus-kasus ini, kamu tidak ingin mematikan aplikasi, tetapi kamu tidak +ingin juga mengirimkan permintaan ke aplikasi tersebut. Kubernetes menyediakan +_probe readiness_ sebagai solusinya. Sebuah Pod dengan Container yang melaporkan +dirinya tidak siap, tidak akan menerima lalu lintas jaringan dari Kubernetes Service. + +{{< note >}} +_Probe readiness_ dijalankan di dalam Container selama siklus hidupnya. +{{< /note >}} + +_Probe readiness_ memiliki pengaturan yang mirip dengan _probe liveness_. Perbedaan +satu-satunya adalah kamu menggunakan _field_ `readinessProbe`, bukan _field_ `livenessProbe`. + +```yaml +readinessProbe: + exec: + command: + - cat + - /tmp/healthy + initialDelaySeconds: 5 + periodSeconds: 5 +``` + +Pengaturan untuk _probe readiness_ untuk HTTP dan TCP juga sama persis dengan +pengaturan untuk _probe liveness_. + +_Probe readiness_ dan _liveness_ dapat digunakan secara bersamaan untuk +Container yang sama. Apabila keduanya digunakan sekaligus, lalu lintas jaringan +tidak akan sampai ke Container yang belum siap, dan Container akan diulang kembali +(_restart_) saat mengalami kegagalan. + +## Mengatur Probe + +{{< comment >}} +Nantinya beberapa bagian dari bab ini dapat berpindah ke topik konsep. +{{< /comment >}} + +[Probe](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#probe-v1-core) memiliki +beberapa _field_ yang dapat digunakan untuk mengendalikan pemeriksaan _liveness_ dan _readiness_ +secara presisi. + +* `initialDelaySeconds`: Durasi dalam detik setelah Container dimulai, +sebelum _probe liveness_ atau _readiness_ diinisiasi. Nilai bawaannya adalah 0 detik. Nilai minimalnya adalah 0. +* `periodSeconds`: Seberapa sering (dalam detik) _probe_ dijalankan. Nilai bawaannya adalah 10 detik. +Nilai minimalnya adalah 0. +* `timeoutSeconds`: Durasi dalam detik setelah _probe_ mengalami _timeout_. Nilai bawaannya adalah 1 detik. +Nilai minimalnya adalah 0. +* `successThreshold`: Jumlah minimal sukses yang berurutan untuk _probe_ dianggap berhasil +setelah mengalami kegagalan. Nilai bawaannya adalah 1. Nilanya harus 1 untuk _liveness_. +Nilai minimalnya adalah 1. +* `failureThreshold`: Ketika sebuah Pod dimulai dan _probe_ mengalami kegagalan, Kubernetes +akan mencoba beberapa kali sesuai nilai `failureThreshold` sebelum menyerah. Menyerah dalam +kasus _probe liveness_ berarti Container akan diulang kembali. Untuk _probe readiness_, menyerah +akan menandai Pod menjadi "tidak siap" (_Unready_). Nilai bawaannya adalah 3. Nilai minimalnya adalah 1. + +[_Probe_ HTTP](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#httpgetaction-v1-core) +memiliki _field-field_ tambahan yang bisa diatur melalui `httpGet`: + +* `host`: Nama dari host yang akan terhubung, nilai bawaannya adalah IP dari Pod. Kamu mungkin +juga ingin mengatur "Host" pada httpHeaders. +* `scheme`: Skema yang digunakan untuk terhubung pada host (HTTP atau HTTPS). Nilai bawaannya adalah HTTP. +* `path`: _Path_ untuk mengakses server HTTP. +* `httpHeaders`: _Header_ khusus yang diatur dalam permintaan HTTP. HTTP memperbolehkan _header_ yang berulang. +* `port`: Nama atau angka dari porta untuk mengakses Container. Angkanya harus ada di antara 1 sampai 65535. + +Untuk sebuah _probe_ HTTP, kubelet mengirimkan permintaan HTTP untuk _path_ yang ditentukan +dan porta untuk mengerjakan pemeriksaan. _Probe_ dikirimkan oleh kubelet untuk alamat IP Pod, +kecuali saat alamat digantikan oleh _field_ opsional pada `httpGet`. Jika _field_ `scheme` +diatur menjadi `HTTPS`, maka kubelet mengirimkan permintaan HTTPS dan melewati langkah verifikasi +sertifikat. Pada skenario kebanyakan, kamu tidak menginginkan _field_ `host`. +Berikut satu skenario yang memerlukan `host`. Misalkan Container mendengarkan permintaan +melalui 127.0.0.1 dan _field_ `hostNetwork` pada Pod bernilai true. Kemudian `host`, melalui +`httpGet`, harus diatur menjadi 127.0.0.1. Jika Pod kamu bergantung pada host virtual, dimana +untuk kasus-kasus umum, kamu tidak perlu menggunakan `host`, tetapi perlu mengatur _header_ +`Host` pada `httpHeaders`. + +Untuk _probe_ TCP, kubelet membuat koneksi _probe_ pada Node, tidak pada Pod, yang berarti bahwa +kamu tidak menggunakan nama Service di dalam parameter `host` karena kubelet tidak bisa +me-_resolve_-nya. + +{{% /capture %}} + +{{% capture whatsnext %}} + +* Pelajari lebih lanjut tentang +[Probe Container](/id/docs/concepts/workloads/pods/pod-lifecycle/#container-probes). + +Kamu juga dapat membaca rujukan API untuk: + +* [Pod](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core) +* [Container](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core) +* [Probe](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#probe-v1-core) + +{{% /capture %}} diff --git a/content/id/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md b/content/id/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md new file mode 100644 index 0000000000..22e13e2c8a --- /dev/null +++ b/content/id/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md @@ -0,0 +1,284 @@ +--- +title: Mengatur Pod untuk Penyimpanan dengan PersistentVolume +content_template: templates/task +weight: 60 +--- + +{{% capture overview %}} + +Laman ini akan menjelaskan bagaimana kamu dapat mengatur sebuah Pod dengan menggunakan +{{< glossary_tooltip text="PersistentVolumeClaim" term_id="persistent-volume-claim" >}} +untuk penyimpanan. +Berikut ringkasan prosesnya: + +1. Kamu, sebagai seorang administrator klaster, membuat sebuah PersistentVolume yang didukung oleh penyimpanan +fisik. Kamu tidak mengaitkan volume dengan Pod apapun. + +2. Kamu, sekarang mengambil peran sebagai seorang _developer_ / pengguna klaster, membuat sebuah +PersistentVolumeClaim yang secara otomatis terikat dengan PersistentVolume yang sesuai. + +3. Kamu membuat sebuah Pod yang menggunakan PersistentVolumeClaim di atas untuk penyimpanan. + +{{% /capture %}} + +{{% capture prerequisites %}} + +* Kamu membutuhkan sebuah klaster Kubernetes yang hanya memiliki satu Node, dan +{{< glossary_tooltip text="kubectl" term_id="kubectl" >}} +alat baris perintah yang sudah diatur untuk berkomunikasi dengan klaster kamu. Jika kamu +tidak memiliki sebuah klaster dengan Node tunggal, kamu dapat membuatnya dengan +[Minikube](/docs/getting-started-guides/minikube). + +* Familiar dengan materi di +[Persistent Volumes](/id/docs/concepts/storage/persistent-volumes/). + +{{% /capture %}} + +{{% capture steps %}} + +## Membuat sebuah berkas index.html di dalam Node kamu + +Buka sebuah _shell_ ke Node tunggal di klaster kamu. Bagaimana kamu membuka sebuah _shell_ tergantung +dengan bagaimana kamu mengatur klaster kamu. Contoh, jika kamu menggunakan Minikube, kamu +dapat membuka sebuah _shell_ ke Node kamu dengan memasukkan `minikube ssh`. + +Di dalam _shell_ kamu pada Node itu, buat sebuah direktori dengan nama `/mnt/data`: + +```shell +# Asumsikan Node kamu menggunakan "sudo" untuk menjalankan perintah +# sebagai superuser +sudo mkdir /mnt/data +``` + + +Di dalam direktori `/mnt/data`, buat sebuah berkas dengan nama `index.html`: + +```shell +# Disini kembali asumsikan bahwa Node kamu menggunakan "sudo" untuk menjalankan perintah +# sebagai superuser +sudo sh -c "echo 'Hello from Kubernetes storage' > /mnt/data/index.html" +``` + +{{< note >}} +Jika Node kamu menggunakan alat untuk mengakses _superuser_ selain dengan `sudo`, kamu dapat +membuat ini bekerja jika mengganti `sudo` dengan nama dari alat lainnya. +{{< /note >}} + +Menguji bahwa berkas `index.html` ada: + +```shell +cat /mnt/data/index.html +``` + +Keluaran akan seperti ini: +``` +Hello from Kubernetes storage +``` + +Sekarang kamu dapat menutup _shell_ di Node kamu. + +## Membuat sebuah PersistentVolume + +Pada latihan ini, kamu akan membuat sebuah *hostPath* PersistentVolume. Kubernetes mendukung +hostPath untuk pengembangan dan pengujian di dalam klaster Node tunggal. Sebuah hostPath +PersistentVolume menggunakan berkas atau direktori di dalam Node untuk meniru penyimpanan terhubung jaringan (NAS, _network-attached storage_). + +Di dalam klaster _production_, kamu tidak dapat menggunakan hostPath. Sebagai gantinya sebuah administrator klaster +akan menyediakan sumberdaya jaringan seperti Google Compute Engine _persistent disk_, +_NFS share_, atau sebuah Amazon Elastic Block Store volume. Administrator klaster juga dapat +menggunakan [StorageClass](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#storageclass-v1-storage) +untuk mengatur +[_provisioning_ secara dinamis](https://kubernetes.io/blog/2016/10/dynamic-provisioning-and-storage-in-kubernetes). + +Berikut berkas konfigurasi untuk hostPath PersistentVolume: + +{{< codenew file="pods/storage/pv-volume.yaml" >}} + +Berkas konfigurasi tersebut menentukan bahwa volume berada di `/mnt/data` pada +klaster Node. Konfigurasi tersebut juga menentukan ukuran dari 10 gibibytes dan +mode akses `ReadWriteOnce`, yang berarti volume dapat di pasang sebagai +_read-write_ oleh Node tunggal. Konfigurasi ini menggunakan [nama dari StorageClass](/id/docs/concepts/storage/persistent-volumes/#kelas) +`manual` untuk PersistentVolume, yang akan digunakan untuk mengikat +permintaan PeristentVolumeClaim ke PersistentVolume ini. + +Membuat sebuah PersistentVolume: + +```shell +kubectl apply -f https://k8s.io/examples/pods/storage/pv-volume.yaml +``` + +Melihat informasi tentang PersistentVolume: + +```shell +kubectl get pv task-pv-volume +``` + +Keluaran menunjuk PersistentVolume memliki sebuah `STATUS` dari `Available`. Ini +berarti PersistentVolume belum terikat ke PersistentVolumeClaim. + + NAME CAPACITY ACCESSMODES RECLAIMPOLICY STATUS CLAIM STORAGECLASS REASON AGE + task-pv-volume 10Gi RWO Retain Available manual 4s + +## Membuat sebuah PersistentVolumeClaim + +Langkah selanjutnya adalah membuat sebuah PersistentVolumeClaim. Pod menggunakan PersistentVolumeClaim +untuk meminta penyimpanan fisik. Pada latihan ini, kamu akan membuat sebuah PersistentVolumeClaim +yang meminta sebuah volume minimal tiga gibibytes dengan mode akses _read-write_ +setidaknya untuk satu Node. + +Berikut berkas konfigurasi untuk PersistentVolumeClaim: + +{{< codenew file="pods/storage/pv-claim.yaml" >}} + +Membuat sebuah PersistentVolumeClaim: + + kubectl apply -f https://k8s.io/examples/pods/storage/pv-claim.yaml + +Setelah membuat sebuah PersistentVolumeClaim, Kubernetes _control plane_ terlihat +untuk sebuah PersistentVolumeClaim yang memenuhi persyaratan _claim's_. Jika +_control plane_ menemukan PersistentVolume yang cocok dengan StorageClass, maka +akan mengikat _claim_ ke dalam volume tersebut. + +Lihat kembali PersistentVolume: + +```shell +kubectl get pv task-pv-volume +``` + +Sekarang keluaran menunjukan sebuah `STATUS` dari `Bound`. + + NAME CAPACITY ACCESSMODES RECLAIMPOLICY STATUS CLAIM STORAGECLASS REASON AGE + task-pv-volume 10Gi RWO Retain Bound default/task-pv-claim manual 2m + +Lihat PersistentVolumeClaim: + +```shell +kubectl get pvc task-pv-claim +``` + +Keluaran menunjukan PersistentVolumeClaim terlah terikat dengan PersistentVolume, +`task-pv-volume`. + + NAME STATUS VOLUME CAPACITY ACCESSMODES STORAGECLASS AGE + task-pv-claim Bound task-pv-volume 10Gi RWO manual 30s + +## Membuat sebuah Pod + +Langkah selanjutnya adalah membuat sebuah Pod yang akan menggunakan PersistentVolumeClaim sebagai volume. + +Berikut berkas konfigurasi untuk Pod: + +{{< codenew file="pods/storage/pv-pod.yaml" >}} + +Perhatikan bahwa berkas konfigurasi Pod menentukan sebuah PersistentVolumeClaim, tetapi +tidak menentukan PeristentVolume. Dari sudut pandang Pod, _claim_ adalah volume. + +Membuat Pod: + +```shell +kubectl apply -f https://k8s.io/examples/pods/storage/pv-pod.yaml +``` + +Pastikan bahwa Container di dalam Pod berjalan: + +```shell +kubectl get pod task-pv-pod +``` + +Mendapatkan sebuah _shell_ ke Container yang sedang berjalan di Pod kamu: + +```shell +kubectl exec -it task-pv-pod -- /bin/bash +``` + +Di dalam _shell_, pastikan bahwa nginx menyajikan berkas `index.html` dari dalam +hostPath volume: + +```shell +# Pastikan kamu menjalankan 3 perintah ini di dalam shell root yang berasal dari +# "kubectl exec" dari langkah sebelumnya +apt update +apt install curl +curl http://localhost/ +``` + +Keluaran akan menunjukan sebuah teks yang telah kamu tulis di berkas `index.html` +di dalam hostPath volume: + + Hello from Kubernetes storage + + +Jika kamu melihat pesan tersebut, kamu telah berhasil mengatur sebuah Pod +untuk menggunakan penyimpanan dari PersistentVolumeClaim. + +## Membersihkan + +Hapus Pod, PersistentVolumeClaim dan PersistentVolume: + +```shell +kubectl delete pod task-pv-pod +kubectl delete pvc task-pv-claim +kubectl delete pv task-pv-volume +``` + +Jika kamu belum memiliki _shell_ yang telah dibuka ke Node di klaster kamu, +buka _shell_ baru dengan cara yang sama yang telah kamu lakukan sebelumnya. + +Di dalam _shell_ Node kamu, hapus berkas dan direktori yang telah kamu buat: + +```shell +# Asumsikan Node kamu menggunakan "sudo" untuk menjalankan perintah +# sebagai superuser +sudo rm /mnt/data/index.html +sudo rmdir /mnt/data +``` + +Sekarang kamu dapat menutup _shell_ Node kamu. + +{{% /capture %}} + + +{{% capture discussion %}} + +## Kontrol akses + +Penyimpanan yang telah terkonfigurasi dengan group ID (GID) memungkinkan akses menulis hanya dari Pod yang menggunakan +GID yang sama. GID yang tidak cocok atau hilang akan menyebabkan kesalahan izin ditolak. Untuk mengurangi +kebutuhan koordinasi dengan pengguna, administrator dapat membuat anotasi sebuah PersistentVolume +dengan GID. Kemudian GID akan otomatis ditambahkan ke Pod yang menggunakan PersistentVolume. + +Gunakan anotasi `pv.beta.kubernetes.io/gid` sebagai berikut: +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: pv1 + annotations: + pv.beta.kubernetes.io/gid: "1234" +``` +Ketika sebuah Pod mengkonsumsi PersistentVolume yang memiliki anotasi GID, anotasi GID tersebut +akan diterapkan ke semua container di dalam Pod dengan cara yang sama yang ditentukan di dalam GID Pod security context. +Settiap GID, baik berasal dari anotasi PersistentVolume atau Pod, diterapkan pada proses pertama yang dijalankan +di setiap container. + +{{< note >}} +Ketika sebuah Pod mengkonsumsi PersistentVolume, GID yang terkait dengan PersistentVolume +tidak ada di dalam sumberdaya Pod itu sendiri. +{{< /note >}} + +{{% /capture %}} + + +{{% capture whatsnext %}} + +* Belajar lebih lanjut tentang [PersistentVolume](/id/docs/concepts/storage/persistent-volumes/). +* Baca [dokumen perancangan Penyimpanan _Persistent_](https://git.k8s.io/community/contributors/design-proposals/storage/persistent-storage.md). + +### Referensi + +* [PersistentVolume](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolume-v1-core) +* [PersistentVolumeSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumespec-v1-core) +* [PersistentVolumeClaim](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaim-v1-core) +* [PersistentVolumeClaimSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaimspec-v1-core) + +{{% /capture %}} diff --git a/content/id/docs/tasks/configure-pod-container/configure-pod-configmap.md b/content/id/docs/tasks/configure-pod-container/configure-pod-configmap.md index 3d678ad75f..e5175ccf0e 100644 --- a/content/id/docs/tasks/configure-pod-container/configure-pod-configmap.md +++ b/content/id/docs/tasks/configure-pod-container/configure-pod-configmap.md @@ -1,24 +1,25 @@ --- title: Mengatur Pod untuk Menggunakan ConfigMap -content_template: templates/task +content_type: task weight: 150 card: name: tasks weight: 50 --- -{{% capture overview %}} + ConfigMap mengizinkan kamu untuk memisahkan artifak-artifak konfigurasi dari konten _image_ untuk menjaga aplikasi yang dikontainerisasi tetap portabel. Artikel ini menyediakan sekumpulan contoh penerapan yang mendemonstrasikan bagaimana cara membuat ConfigMap dan mengatur Pod menggunakan data yang disimpan di dalam ConfigMap. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Membuat ConfigMap @@ -624,9 +625,9 @@ Ketika sebuah ConfigMap yang sudah dipasang pada sebuah volume diperbarui, kunci Kontainer yang menggunakan ConfigMap sebagai volume [subPath](/docs/concepts/storage/volumes/#using-subpath) tidak akan menerima pembaruan ConfigMap. {{< /note >}} -{{% /capture %}} -{{% capture discussion %}} + + ## Memahami ConfigMap dan Pod @@ -676,9 +677,10 @@ data: - Kamu tidak dapat menggunakan ConfigMap untuk {{< glossary_tooltip text="Pod statis" term_id="static-pod" >}}, karena Kubelet tidak mendukung hal ini. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Ikuti contoh penerapan pada dunia nyata [Mengatur Redis menggunakan ConfigMap](/docs/tutorials/configuration/configure-redis-using-configmap/). -{{% /capture %}} + diff --git a/content/id/docs/tasks/configure-pod-container/security-context.md b/content/id/docs/tasks/configure-pod-container/security-context.md new file mode 100644 index 0000000000..6ea554f2d6 --- /dev/null +++ b/content/id/docs/tasks/configure-pod-container/security-context.md @@ -0,0 +1,416 @@ +--- +title: Mengonfigurasi Konteks Keamanan untuk Pod atau Container +content_template: templates/task +weight: 80 +--- + +{{% capture overview %}} + +Konteks keamanan (_security context_) menentukan wewenang (_privilege_) dan aturan kontrol akses untuk sebuah Pod +atau Container. Aturan konteks keamanan meliputi hal-hal berikut ini namun tidak terbatas pada hal-hal tersebut: + +* Kontrol akses bersifat diskresi: Izin untuk mengakses objek, seperti sebuah berkas, yang didasarkan pada +[ID pengguna atau _user ID_ (UID) dan ID grup atau _group ID_ (GID)](https://wiki.archlinux.org/index.php/users_and_groups). + +* [_Security Enhanced Linux_ (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux): Di mana objek diberi label keamanan. + +* Menjalankan dengan wewenang (_privileged_) atau tanpa wewenang (_unprivileged_). + +* [Kapabilitas Linux (Linux Capabilities)](https://linux-audit.com/linux-capabilities-hardening-linux-binaries-by-removing-setuid/): Memberi sebuah proses beberapa wewenang, namun tidak semua wewenang dari pengguna _root_. + +* [AppArmor](/docs/tutorials/clusters/apparmor/): Menggunakan profil program untuk membatasi kemampuan dari masing-masing program. + +* [Seccomp](https://en.wikipedia.org/wiki/Seccomp): Menyaring panggilan sistem (_system calls_) dari suatu proses. + +* AllowPrivilegeEscalation: Mengontrol apakah suatu proses dapat memperoleh lebih banyak wewenang daripada proses induknya. Pilihan ini mengontrol secara langsung apakah opsi [`no_new_privs`](https://www.kernel.org/doc/Documentation/prctl/no_new_privs.txt) diaktifkan pada proses dalam Container. AllowPrivilegeEscalation selalu aktif (_true_) ketika Container: 1) berjalan dengan wewenang ATAU 2) memiliki `CAP_SYS_ADMIN`. + +* readOnlyRootFilesystem: Menambatkan (_mount_) sistem berkas (_file system_) _root_ dari sebuah Container hanya sebatas untuk dibaca saja (_read-only_). + +Poin-poin di atas bukanlah sekumpulan lengkap dari aturan konteks keamanan - silakan lihat [SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#securitycontext-v1-core) untuk daftar lengkapnya. + +Untuk informasi lebih lanjut tentang mekanisme keamanan pada Linux, silahkan lihat +[ikhtisar fitur keamanan pada Kernel Linux](https://www.linux.com/learn/overview-linux-kernel-security-features) + +{{% /capture %}} + +{{% capture prerequisites %}} + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + +{{% /capture %}} + +{{% capture steps %}} + +## Mengatur konteks keamanan untuk Pod + +Untuk menentukan aturan keamanan pada Pod, masukkan bagian `securityContext` +dalam spesifikasi Pod. Bagian `securityContext` adalah sebuah objek +[PodSecurityContext](/docs/reference/generate/kubernetes-api/{{}}/#podsecuritycontext-v1-core). +Aturan keamanan yang kamu tetapkan untuk Pod akan berlaku untuk semua Container dalam Pod tersebut. +Berikut sebuah berkas konfigurasi untuk Pod yang memiliki volume `securityContext` dan `emptyDir`: + +{{< codenew file="pods/security/security-context.yaml" >}} + +Dalam berkas konfigurasi ini, bagian `runAsUser` menentukan bahwa dalam setiap Container pada +Pod, semua proses dijalankan oleh ID pengguna 1000. Bagian `runAsGroup` menentukan grup utama dengan ID 3000 untuk +semua proses dalam setiap Container pada Pod. Jika bagian ini diabaikan, maka ID grup utama dari Container +akan berubah menjadi _root_(0). Berkas apa pun yang dibuat juga akan dimiliki oleh pengguna dengan ID 1000 dan grup dengan ID 3000 ketika `runAsGroup` ditentukan. +Karena `fsGroup` ditentukan, semua proses milik Container juga merupakan bagian dari grup tambahan dengan ID 2000. +Pemilik volume `/data/demo` dan berkas apa pun yang dibuat dalam volume tersebut adalah grup dengan ID 2000. + +Buatlah Pod tersebut: + +```shell +kubectl apply -f https://k8s.io/examples/pods/security/security-context.yaml +``` + +Periksa apakah Container dari Pod sedang berjalan: + +```shell +kubectl get pod security-context-demo +``` +Masuk ke _shell_ dari Container yang sedang berjalan tersebut: + +```shell +kubectl exec -it security-context-demo -- sh +``` + +Pada _shell_ kamu, lihat daftar proses yang berjalan: + +```shell +ps +``` + +Keluarannya menunjukkan bahwa proses dijalankan oleh pengguna dengan ID 1000, yang merupakan nilai dari bagian `runAsUser`: + +```shell +PID USER TIME COMMAND + 1 1000 0:00 sleep 1h + 6 1000 0:00 sh +... +``` + +Pada _shell_ kamu, pindah ke direktori `/data`, dan lihat isinya: + +```shell +cd /data +ls -l +``` + +Keluarannya menunjukkan bahwa direktori `/data/demo` memiliki grup dengan ID 2000, yang merupakan +nilai dari bagian `fsGroup`. + +```shell +drwxrwsrwx 2 root 2000 4096 Jun 6 20:08 demo +``` + +Pada _shell_ kamu, pindah ke direktori `/data/demo`, dan buatlah sebuah berkas didalamnya: + +```shell +cd demo +echo hello > testfile +``` + +Lihatlah daftar berkas dalam direktori `/data/demo`: + +```shell +ls -l +``` + +Keluarannya menunjukkan bahwa `testfile` memiliki grup dengan ID 2000, dimana merupakan nilai dari bagian `fsGroup`. + +```shell +-rw-r--r-- 1 1000 2000 6 Jun 6 20:08 testfile +``` + +Jalankan perintah berikut ini: + +```shell +$ id +uid=1000 gid=3000 groups=2000 +``` + +Kamu akan melihat bahwa nilai _gid_ adalah 3000, sama dengan bagian `runAsGroup`. Jika `runAsGroup` diabaikan maka nilai _gid_ akan +tetap bernilai 0(_root_) dan proses akan dapat berinteraksi dengan berkas-berkas yang dimiliki oleh grup root(0) dan yang memiliki +izin grup untuk grup root(0). + +Keluarlah dari _shell_ kamu: + +```shell +exit +``` + +## Melakukan konfigurasi izin volume dan kebijakan perubahan kepemilikan untuk Pod + +{{< feature-state for_k8s_version="v1.18" state="alpha" >}} + +Secara bawaan, Kubernetes mengubah kepemilikan dan izin secara rekursif untuk konten masing-masing +volume untuk mencocokkan `fsGroup` yang ditentukan dalam `securityContext` dari Pod pada saat volume itu +ditambatkan (_mounted_). Untuk volume yang besar, memeriksa dan mengubah kepemilikan dan izin dapat memerlukan waktu yang sangat lama, +sehingga memperlambat proses menjalankan Pod. Kamu dapat menggunakan bagian `fsGroupChangePolicy` dalam sebuah `securityContext` +untuk mengontrol cara Kubernetes memeriksa dan mengelola kepemilikan dan izin +untuk sebuah volume. + +**fsGroupChangePolicy** - `fsGroupChangePolicy` mendefinisikan perilaku untuk mengubah kepemilikan dan izin volume +sebelum diekspos di dalam sebuah Pod. Bagian ini hanya berlaku untuk tipe volume yang mendukung +`fsGroup` untuk mengontrol kepemilikan dan izin. Bagian ini memiliki dua nilai yang dapat dimasukkan: + +* _OnRootMismatch_: Hanya mengubah izin dan kepemilikan jika izin dan kepemilikan dari direktori _root_ tidak sesuai dengan izin volume yang diharapkan. Hal ini dapat membantu mempersingkat waktu yang diperlukan untuk mengubah kepemilikan dan izin sebuah volume. +* _Always_: Selalu mengubah izin dan kepemilikan volume ketika volume sudah ditambatkan. + +Sebagai contoh: + +```yaml +securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + fsGroupChangePolicy: "OnRootMismatch" +``` + +Ini adalah fitur alpha. Untuk menggunakannya, silahkan aktifkan [gerbang fitur](/docs/reference/command-line-tools-reference/feature-gates/) `ConfigurableFSGroupPolicy` untuk kube-api-server, kube-controller-manager, dan kubelet. + +{{< note >}} +Bagian ini tidak berpengaruh pada tipe volume yang bersifat sementara (_ephemeral_) seperti +[`secret`](https://kubernetes.io/docs/concepts/storage/volumes/#secret), +[`configMap`](https://kubernetes.io/docs/concepts/storage/volumes/#configmap), +dan [`emptydir`](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir). +{{< /note >}} + + +## Mengatur konteks keamanan untuk Container + +Untuk menentukan aturan keamanan untuk suatu Container, sertakan bagian `securityContext` +dalam manifes Container. Bagian `securityContext` adalah sebuah objek +[SecurityContext](/docs/reference/generate/kubernetes-api/{{}}/#securitycontext-v1-core). +Aturan keamanan yang kamu tentukan untuk Container hanya berlaku untuk +Container secara individu, dan aturan tersebut menimpa aturan yang dibuat pada tingkat Pod apabila +ada aturan yang tumpang tindih. Aturan pada Container mempengaruhi volume pada Pod. + +Berikut berkas konfigurasi untuk Pod yang hanya memiliki satu Container. Keduanya, baik Pod +dan Container memiliki bagian `securityContext` sebagai berikut: + +{{< codenew file="pods/security/security-context-2.yaml" >}} + +Buatlah Pod tersebut: + +```shell +kubectl apply -f https://k8s.io/examples/pods/security/security-context-2.yaml +``` + +Periksa jika Container dalam Pod sedang berjalan: + +```shell +kubectl get pod security-context-demo-2 +``` + +Masuk ke dalam _shell_ Container yang sedang berjalan tersebut: + +```shell +kubectl exec -it security-context-demo-2 -- sh +``` + +Pada _shell_ kamu, lihat daftar proses yang sedang berjalan: + +``` +ps aux +``` + +Keluarannya menunjukkan bahwa proses dijalankan oleh user dengan ID 2000, yang merupakan +nilai dari `runAsUser` seperti yang telah ditentukan untuk Container tersebut. Nilai tersebut menimpa nilai ID 1000 yang +ditentukan untuk Pod-nya. + +``` +USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND +2000 1 0.0 0.0 4336 764 ? Ss 20:36 0:00 /bin/sh -c node server.js +2000 8 0.1 0.5 772124 22604 ? Sl 20:36 0:00 node server.js +... +``` + +Keluar dari _shell_ anda: + +```shell +exit +``` + +## Mengatur Kapabilitas untuk Container + +Dengan menggunakan [Kapabilitas Linux (Linux Capabilities)](http://man7.org/linux/man-pages/man7/capabilities.7.html), +kamu dapat memberikan wewenang tertentu kepada suatu proses tanpa memberikan semua wewenang +dari pengguna _root_. Untuk menambah atau menghapus Kapabilitas Linux pada suatu Container, masukkan +bagian `capabilities` pada `securityContext` di manifes Container-nya. + +Pertama-tama, mari melihat apa yang terjadi ketika kamu tidak menyertakan bagian `capabilities`. +Berikut ini adalah berkas konfigurasi yang tidak menambah atau mengurangi kemampuan apa pun dari Container: + +{{< codenew file="pods/security/security-context-3.yaml" >}} + +Buatlah Pod tersebut: + +```shell +kubectl apply -f https://k8s.io/examples/pods/security/security-context-3.yaml +``` + +Periksa apakah Container dari Pod tersebut sedang berjalan: + +```shell +kubectl get pod security-context-demo-3 +``` + +Masuk ke dalam _shell_ dari Container yang berjalan: + +```shell +kubectl exec -it security-context-demo-3 -- sh +``` + +Dalam _shell_ tersebut, lihatlah daftar proses yang berjalan: + +```shell +ps aux +``` + +Keluarannya menunjukkan ID dari proses atau _process IDs_ (PIDs) untuk Container tersebut: + +```shell +USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND +root 1 0.0 0.0 4336 796 ? Ss 18:17 0:00 /bin/sh -c node server.js +root 5 0.1 0.5 772124 22700 ? Sl 18:17 0:00 node server.js +``` + +Dalam _shell_ kamu, lihat status dari proses dengan ID 1: + +```shell +cd /proc/1 +cat status +``` + +Keluarannya menunjukkan _bitmap_ dari kapabilitas untuk proses tersebut: + +``` +... +CapPrm: 00000000a80425fb +CapEff: 00000000a80425fb +... +``` + +Buatlah catatan untuk _bitmap_ dari kapabilitas tersebut, dan keluarlah dari _shell_ kamu: + +```shell +exit +``` + +Berikutnya, jalankan Container yang sama seperti dengan Container sebelumnya, namun +Container ini memiliki kapabilitas tambahan yang sudah ditentukan. + +Berikut ini adalah berkas konfigurasi untuk Pod yang hanya menjalankan satu Container. Konfigurasi +ini menambahkan kapabilitas `CAP_NET_ADMIN` dan `CAP_SYS_TIME`: + +{{< codenew file="pods/security/security-context-4.yaml" >}} + +Buatlah Pod tersebut: + +```shell +kubectl apply -f https://k8s.io/examples/pods/security/security-context-4.yaml +``` + +Masuk ke dalam _shell_ dari Container yang berjalan: + +```shell +kubectl exec -it security-context-demo-4 -- sh +``` + +Di dalam _shell_ kamu, lihatlah kapabilitas dari proses dengan ID 1: + +```shell +cd /proc/1 +cat status +``` + +Keluarannya menunjukkan _bitmap_ kapabilitas untuk proses tersebut: + +```shell +... +CapPrm: 00000000aa0435fb +CapEff: 00000000aa0435fb +... +``` + +Bandingkan kemampuan dari kedua Containers tersebut: + +``` +00000000a80425fb +00000000aa0435fb +``` + +Dalam _bitmap_ kapabilitas pada Container pertama, bit-12 dan ke-25 tidak diatur. Sedangkan dalam Container kedua, +bit ke-12 dan ke-25 diatur. Bit ke-12 adalah kapabilitas `CAP_NET_ADMIN`, dan bit-25 adalah kapabilitas `CAP_SYS_TIME`. +Lihatlah [capability.h](https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h) +untuk nilai dari konstanta kapabilitas-kapabilitas yang lainnya. + +{{< note >}} +Konstanta kapabilitas Linux memiliki format `CAP_XXX`. Tetapi ketika kamu memasukkan daftar kemampuan dalam manifes Container kamu, kamu harus menghilangkan bagian `CAP_` dari konstantanya. Misalnya, untuk menambahkan `CAP_SYS_TIME`, masukkan `SYS_TIME` ke dalam daftar kapabilitas Container kamu. +{{< /note >}} + +## Memberikan label SELinux pada sebuah Container + +Untuk memberikan label SELinux pada sebuah Container, masukkan bagian `seLinuxOptions` pada +bagian `securityContext` dari manifes Pod atau Container kamu. +Bagian `seLinuxOptions` adalah sebuah objek [SELinuxOptions](/docs/reference/generated/kubernetes-api/{{}}/#selinuxoptions-v1-core). +Berikut ini adalah contoh yang menerapkan sebuah level dari SELinux: + +```yaml +... +securityContext: + seLinuxOptions: + level: "s0:c123,c456" +``` + +{{< note >}} +Untuk menetapkan label SELinux, modul keamanan SELinux harus dimuat terlebih dahulu pada sistem operasi dari hosnya. +{{< /note >}} + +## Diskusi + +Konteks keamanan untuk sebuah Pod berlaku juga untuk Container yang berada dalam Pod tersebut dan juga untuk +volume dari Pod tersebut jika ada. Terkhusus untuk `fsGroup` dan `seLinuxOptions` +akan diterapkan pada volume seperti berikut: + +* `fsGroup`: Volume yang mendukung manajemen kepemilikan (_ownership_) akan dimodifikasi agar dapat dimiliki +dan ditulis oleh ID group (GID) yang disebutkan dalam `fsGroup`. Lihatlah +[Dokumen Desain untuk Manajemen Kepemilikan](https://git.k8s.io/community/contributors/design-proposals/storage/volume-ownership-management.md) +untuk lebih lanjut. + +* `seLinuxOptions`: Volume yang mendukung pelabelan SELinux akan dilabel ulang agar dapat diakses +oleh label yang ditentukan pada `seLinuxOptions`. Biasanya kamu hanya +perlu mengatur bagian `level`. Dimana ini akan menetapkan label +[Keamanan multi-kategori (_Multi-Category Security_) (MCS)](https://selinuxproject.org/page/NB_MLS) +yang diberikan kepada semua Container dalam Pod serta Volume yang ada didalamnya. + +{{< warning >}} +Setelah kamu menentukan label MCS untuk Pod, maka semua Pod dengan label yang sama dapat mengakses Volume tersebut. Jika kamu membutuhkan perlindungan antar Pod, kamu harus menetapkan label MCS yang unik untuk setiap Pod. +{{< /warning >}} + +## Bersih-bersih (_Clean Up_) + +Hapus Pod-Pod tersebut: + +```shell +kubectl delete pod security-context-demo +kubectl delete pod security-context-demo-2 +kubectl delete pod security-context-demo-3 +kubectl delete pod security-context-demo-4 +``` + +{{% /capture %}} + +{{% capture whatsnext %}} + +* [PodSecurityContext](/docs/reference/generated/kubernetes-api/{{}}/#podsecuritycontext-v1-core) +* [SecurityContext](/docs/reference/generated/kubernetes-api/{{}}/#securitycontext-v1-core) +* [Menyetel Docker dengan peningkatan keamanan terbaru](https://opensource.com/business/15/3/docker-security-tuning) +* [Dokumen desain konteks keamanan](https://git.k8s.io/community/contributors/design-proposals/auth/security_context.md) +* [Dokumen desain manajemen kepemilikan](https://git.k8s.io/community/contributors/design-proposals/storage/volume-ownership-management.md) +* [Kebijakan keamanan Pod](/docs/concepts/policy/pod-security-policy/) +* [Dokumen desain AllowPrivilegeEscalation](https://git.k8s.io/community/contributors/design-proposals/auth/no-new-privs.md) + +{{% /capture %}} diff --git a/content/id/docs/tasks/example-task-template.md b/content/id/docs/tasks/example-task-template.md index d5f9c8ca27..a1873501f3 100644 --- a/content/id/docs/tasks/example-task-template.md +++ b/content/id/docs/tasks/example-task-template.md @@ -1,10 +1,10 @@ --- title: Contoh Template Tugas (Task) -content_template: templates/task +content_type: task toc_hide: true --- -{{% capture overview %}} + {{< note >}} Pastikan juga kamu [membuat isian di daftar isi](/docs/home/contribute/write-new-topic/#creating-an-entry-in-the-table-of-contents) untuk dokumen baru kamu. @@ -12,41 +12,43 @@ Pastikan juga kamu [membuat isian di daftar isi](/docs/home/contribute/write-new Halaman ini menunjukkan bagaimana ... -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * Lakukan ini. * Lakukan ini juga. -{{% /capture %}} -{{% capture steps %}} + + ## Menjalankan ... 1. Lakukan ini. 1. Selanjutnya lakukan ini. Bila mungkin silahkan baca [penjelasan terkait](...). -{{% /capture %}} -{{% capture discussion %}} + + ## Memahami ... **[Bagian opsional]** Berikut ini hal-hal yang menarik untuk diketahui tentang langkah-langkah yang baru saja kamu lakukan. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + **[Bagian optional]** * Pelajari tentang [menulis topik baru](/docs/home/contribute/write-new-topic/). * Lihat [menggunakan _template_ halaman - _template_ tugas](/docs/home/contribute/page-templates/#task_template) untuk mengetahui cara menggunakan _template_ ini. -{{% /capture %}} + diff --git a/content/id/docs/tasks/inject-data-application/define-command-argument-container.md b/content/id/docs/tasks/inject-data-application/define-command-argument-container.md new file mode 100644 index 0000000000..28a3a1d7e9 --- /dev/null +++ b/content/id/docs/tasks/inject-data-application/define-command-argument-container.md @@ -0,0 +1,156 @@ +--- +title: Mendefinisikan Perintah dan Argumen untuk sebuah Kontainer +content_template: templates/task +weight: 10 +--- + +{{% capture overview %}} + +Laman ini menunjukkan bagaimana cara mendefinisikan perintah-perintah +dan argumen-argumen saat kamu menjalankan Container +dalam sebuah {{< glossary_tooltip term_id="Pod" >}}. + +{{% /capture %}} + + +{{% capture prerequisites %}} + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + +{{% /capture %}} + + +{{% capture steps %}} + +## Mendefinisikan sebuah perintah dan argumen-argumen saat kamu membuat sebuah Pod + +Saat kamu membuat sebuah Pod, kamu dapat mendefinisikan sebuah perintah dan argumen-argumen untuk +Container-Container yang berjalan di dalam Pod. Untuk mendefinisikan sebuah perintah, sertakan +bidang `command` di dalam berkas konfigurasi. Untuk mendefinisikan argumen-argumen untuk perintah, sertakan +bidang `args` di dalam berkas konfigurasi. Perintah dan argumen-argumen yang telah +kamu definisikan tidak dapat diganti setelah Pod telah terbuat. + +Perintah dan argumen-argumen yang kamu definisikan di dalam berkas konfigurasi +membatalkan perintah dan argumen-argumen bawaan yang disediakan oleh _image_ Container. +Jika kamu mendefinisikan argumen-argumen, tetapi tidak mendefinisikan sebuah perintah, perintah bawaan digunakan +dengan argumen-argumen baru kamu. + +{{< note >}} +Bidang `command` menyerupai `entrypoint` di beberapa _runtime_ Container. +Merujuk pada [catatan](#catatan) di bawah. +{{< /note >}} + +Pada latihan ini, kamu akan membuat sebuah Pod baru yang menjalankan sebuah Container. Berkas konfigurasi +untuk Pod mendefinisikan sebuah perintah dan dua argumen: + +{{< codenew file="pods/commands.yaml" >}} + +1. Buat sebuah Pod dengan berkas konfigurasi YAML: + + ```shell + kubectl apply -f https://k8s.io/examples/pods/commands.yaml + ``` + +2. Daftar Pod yang sedang berjalan + + ```shell + kubectl get pods + ``` + + Keluaran menunjukkan bahwa Container yang berjalan di dalam Pod command-demo + telah selesai. + +3. Untuk melihat keluaran dari perintah yang berjalan di dalam Container, lihat log +dari Pod tersebut: + + ```shell + kubectl logs command-demo + ``` + + Keluaran menunjukan nilai dari variabel lingkungan HOSTNAME dan KUBERNETES_PORT: + + ``` + command-demo + tcp://10.3.240.1:443 + ``` + +## Menggunakan variabel lingkungan untuk mendefinisikan argumen + +Dalam contoh sebelumnya, kamu mendefinisikan langsung argumen-argumen dengan +menyediakan _string_. Sebagai sebuah alternatif untuk menyediakan _string_ secara langsung, +kamu dapat mendefinisikan argumen-argumen dengan menggunakan variabel lingkungan: + +```yaml +env: +- name: MESSAGE + value: "hello world" +command: ["/bin/echo"] +args: ["$(MESSAGE)"] +``` + +Ini berarti kamu dapat mendefinisikan sebuah argumen untuk sebuah Pod menggunakan +salah satu teknik yang tersedia untuk mendefinisikan variabel-variabel lingkungan, termasuk +[ConfigMap](/id/docs/tasks/configure-pod-container/configure-pod-configmap/) +dan +[Secret](/id/docs/concepts/configuration/secret/). + +{{< note >}} +Variabel lingkugan muncul dalam tanda kurung, `"$(VAR)"`. Ini +dibutuhkan untuk variabel yang akan diperuluas di bidang `command` atau `args`. +{{< /note >}} + +## Menjalankan sebuah perintah di dalam shell + +Di beberapa kasus, kamu butuh perintah untuk menjalankan sebuah _shell_. Contohnya, +perintah kamu mungkin terdiri dari beberapa perintah yang digabungkan, atau mungkin berupa +skrip _shell_. Untuk menjalankan perintah kamu di sebuah _shell_, bungkus seperti ini: + +```shell +command: ["/bin/sh"] +args: ["-c", "while true; do echo hello; sleep 10;done"] +``` + +## Catatan + +Tabel ini merangkum nama-nama bidang yang digunakan oleh Docker dan Kubernetes. + +| Deskripsi | Nama bidang pada Docker | Nama bidang pada Kubernetes | +|-------------------------------------------|------------------------------|-----------------------------| +| Perintah yang dijalankan oleh Container | Entrypoint | command | +| Argumen diteruskan ke perintah | Cmd | args | + +Saat kamu mengesampingkan Entrypoint dan Cmd standar, +aturan-aturan ini berlaku: + +* Jika kamu tidak menyediakan `command` atau `args` untuk sebuah Container, +maka `command` dan `args` yang didefinisikan di dalam _image_ Docker akan digunakan. + +* Jika kamu menyediakan `command` tetapi tidak menyediakan `args` untuk sebuah Container, akan digunakan +`command` yang disediakan. Entrypoint dan Cmd bawaan yang didefinisikan di dalam +_image_ Docker diabaikan. + +* Jika kamu hanya menyediakan `args` untuk sebuah Container, Entrypoint bawaan yang didefinisikan di dalam +_image_ Docker dijalakan dengan `args` yang kamu sediakan. + +* Jika kamu menyediakan `command` dan `args`, Entrypoint dan Cmd standar yang didefinisikan +di dalam _image_ Docker diabaikan. `command` kamu akan dijalankan dengan `args` kamu. + +Berikut ini beberapa contoh: + +| Image Entrypoint | Image Cmd | Container command | Container args | Command run | +|--------------------|------------------|---------------------|--------------------|------------------| +| `[/ep-1]` | `[foo bar]` | <not set> | <not set> | `[ep-1 foo bar]` | +| `[/ep-1]` | `[foo bar]` | `[/ep-2]` | <not set> | `[ep-2]` | +| `[/ep-1]` | `[foo bar]` | <not set> | `[zoo boo]` | `[ep-1 zoo boo]` | +| `[/ep-1]` | `[foo bar]` | `[/ep-2]` | `[zoo boo]` | `[ep-2 zoo boo]` | + + +{{% /capture %}} + +{{% capture whatsnext %}} + +* Pelajari lebih lanjut tentang [mengatur Pod and Container](/id/docs/tasks/). +* Pelajari lebih lanjut tentang [menjalankan perintah di dalam sebuah Container](/id/docs/tasks/debug-application-cluster/get-shell-running-container/). +* Lihat [Container](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core). + +{{% /capture %}} diff --git a/content/id/docs/tasks/inject-data-application/define-environment-variable-container.md b/content/id/docs/tasks/inject-data-application/define-environment-variable-container.md index a9cce7b3e0..0f35ef27f7 100644 --- a/content/id/docs/tasks/inject-data-application/define-environment-variable-container.md +++ b/content/id/docs/tasks/inject-data-application/define-environment-variable-container.md @@ -1,24 +1,25 @@ --- title: Mendefinisikan Variabel Lingkungan untuk sebuah Kontainer -content_template: templates/task +content_type: task weight: 20 --- -{{% capture overview %}} + Laman ini menunjukkan bagaimana cara untuk mendefinisikan variabel lingkungan (_environment variable_) untuk sebuah Container di dalam sebuah Pod Kubernetes. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Mendefinisikan sebuah variabel lingkungan untuk sebuah Container @@ -108,12 +109,13 @@ spec: Setelah dibuat, perintah `echo Warm greetings to The Most Honorable Kubernetes` dijalankan di Container tersebut. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Pelajari lebih lanjut tentang [variabel lingkungan](/docs/tasks/inject-data-application/environment-variable-expose-pod-information/). * Pelajari tentang [menggunakan informasi rahasia sebagai variabel lingkungan](/docs/user-guide/secrets/#using-secrets-as-environment-variables). * Lihat [EnvVarSource](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#envvarsource-v1-core). -{{% /capture %}} + diff --git a/content/id/docs/tasks/job/automated-tasks-with-cron-jobs.md b/content/id/docs/tasks/job/automated-tasks-with-cron-jobs.md index d945e156b4..2139f51629 100644 --- a/content/id/docs/tasks/job/automated-tasks-with-cron-jobs.md +++ b/content/id/docs/tasks/job/automated-tasks-with-cron-jobs.md @@ -1,11 +1,11 @@ --- title: Menjalankan Tugas-Tugas Otomatis dengan CronJob min-kubernetes-server-version: v1.8 -content_template: templates/task +content_type: task weight: 10 --- -{{% capture overview %}} + Kamu dapat menggunakan {{< glossary_tooltip text="CronJob" term_id="cronjob" >}} untuk menjalankan {{< glossary_tooltip text="Job" term_id="job" >}} yang dijadwalkan berbasis waktu. Job akan berjalan seperti pekerjaan-pekerjaan [Cron](https://en.wikipedia.org/wiki/Cron) di Linux atau sistem UNIX. @@ -18,15 +18,16 @@ Karena itu, Job haruslah _idempotent._ Untuk informasi lanjut mengenai keterbatasan, lihat [CronJob](/docs/concepts/workloads/controllers/cron-jobs). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} -{{% /capture %}} -{{% capture steps %}} + + ## Membuat Sebuah CronJob @@ -211,4 +212,4 @@ _Field_ `.spec.successfulJobHistoryLimit` dan `.spec.failedJobHistoryLimit` juga _Field_ tersebut menentukan berapa banyak Job yang sudah selesai dan gagal yang harus disimpan. Secara bawaan, masing-masing _field_ tersebut disetel 3 dan 1. Mensetel batas ke `0` untuk menjaga tidak ada Job yang sesuai setelah Job tersebut selesai. -{{% /capture %}} + diff --git a/content/id/docs/tasks/tools/install-kubectl.md b/content/id/docs/tasks/tools/install-kubectl.md index fc9b672c5e..e4d0019c3e 100644 --- a/content/id/docs/tasks/tools/install-kubectl.md +++ b/content/id/docs/tasks/tools/install-kubectl.md @@ -1,6 +1,6 @@ --- title: Menginstal dan Menyiapkan kubectl -content_template: templates/task +content_type: task weight: 10 card: name: tasks @@ -8,15 +8,16 @@ card: title: Menginstal kubectl --- -{{% capture overview %}} + [Kubectl](/docs/user-guide/kubectl/) adalah alat baris perintah (_command line tool_) Kubernetes yang digunakan untuk menjalankan berbagai perintah untuk klaster Kubernetes. Kamu dapat menggunakan `kubectl` untuk men-_deploy_ aplikasi, mengatur sumber daya klaster, dan melihat log. Daftar operasi `kubectl` dapat dilihat di [Ikhtisar kubectl](/docs/reference/kubectl/overview/). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Kamu harus menggunakan kubectl dengan perbedaan maksimal satu versi minor dengan klaster kamu. Misalnya, klien v1.2 masih dapat digunakan dengan master v1.1, v1.2, dan 1.3. Menggunakan versi terbaru `kubectl` dapat menghindari permasalahan yang tidak terduga. -{{% /capture %}} -{{% capture steps %}} + + ## Menginstal kubectl pada Linux @@ -485,12 +486,13 @@ compinit {{% /tab %}} {{< /tabs >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Menginstal Minikube.](/docs/tasks/tools/install-minikube/) * Lihat [panduan persiapan](/docs/setup/) untuk mencari tahu tentang pembuatan klaster. * [Pelajari cara untuk menjalankan dan mengekspos aplikasimu.](/docs/tasks/access-application-cluster/service-access-application-cluster/) * Jika kamu membutuhkan akses ke klaster yang tidak kamu buat, lihat [dokumen Berbagi Akses Klaster](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/). * Baca [dokumen referensi kubectl](/docs/reference/kubectl/kubectl/) -{{% /capture %}} + diff --git a/content/id/docs/tasks/tools/install-minikube.md b/content/id/docs/tasks/tools/install-minikube.md index f0e676e091..342f05246a 100644 --- a/content/id/docs/tasks/tools/install-minikube.md +++ b/content/id/docs/tasks/tools/install-minikube.md @@ -1,19 +1,20 @@ --- title: Menginstal Minikube -content_template: templates/task +content_type: task weight: 20 card: name: tasks weight: 10 --- -{{% capture overview %}} + Halaman ini menunjukkan cara instalasi [Minikube](/docs/tutorials/hello-minikube), sebuah alat untuk menjalankan sebuah klaster Kubernetes dengan satu Node pada mesin virtual yang ada di komputer kamu. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< tabs name="minikube_before_you_begin" >}} {{% tab name="Linux" %}} @@ -53,11 +54,11 @@ Hyper-V Requirements: A hypervisor has been detected. Features required for {{% /tab %}} {{< /tabs >}} -{{% /capture %}} -{{% capture steps %}} -# Menginstal minikube + + +## Menginstal minikube {{< tabs name="tab_with_md" >}} {{% tab name="Linux" %}} @@ -196,14 +197,6 @@ Untuk menginstal Minikube secara manual pada Windows, unduh [`minikube-windows-a {{< /tabs >}} -{{% /capture %}} - -{{% capture whatsnext %}} - -* [Menjalanakan Kubernetes secara lokal dengan Minikube](/docs/setup/learning-environment/minikube/) - -{{% /capture %}} - ## Memastikan instalasi Untuk memastikan keberhasilan kedua instalasi hypervisor dan Minikube, kamu bisa menjalankan perintah berikut untuk memulai sebuah klaster Kubernetes lokal: @@ -254,3 +247,8 @@ maka kamu perlu membersihkan _state_ lokal Minikube: ```shell minikube delete ``` + +## {{% heading "whatsnext" %}} + + +* [Menjalanakan Kubernetes secara lokal dengan Minikube](/docs/setup/learning-environment/minikube/) diff --git a/content/id/docs/tutorials/_index.md b/content/id/docs/tutorials/_index.md index 5645744c39..1093644e15 100644 --- a/content/id/docs/tutorials/_index.md +++ b/content/id/docs/tutorials/_index.md @@ -1,20 +1,20 @@ --- -title: Tutorials +title: Tutorial main_menu: true weight: 60 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Bagian ini membahas tentang tutorial Kubernetes. Tutorial berfungsi untuk memperlihatkan bagaimana caranya mencapai suatu tujuan yang lebih dari sekedar [task](/docs/tasks/) sederhana. Biasanya, sebuah tutorial punya beberapa bagian, masing-masing bagian terdiri dari langkah-langkah yang berurutan. Sebelum melangkah lebih lanjut ke tutorial, sebaiknya tandai dulu halaman [Kamus Istilah](/docs/reference/glossary/) untuk referensi nanti. -{{% /capture %}} -{{% capture body %}} + + ## Prinsip Dasar @@ -64,12 +64,13 @@ Sebelum melangkah lebih lanjut ke tutorial, sebaiknya tandai dulu halaman [Kamus * [Menggunakan Source IP](/docs/tutorials/services/source-ip/) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Tertarik menulis tutorial? Lihat [Menggunakan Template Halaman](/docs/home/contribute/page-templates/) untuk info mengenai template dan ragam halaman tutorial. -{{% /capture %}} + diff --git a/content/id/docs/tutorials/hello-minikube.md b/content/id/docs/tutorials/hello-minikube.md index b8281c4c87..f2588e776b 100644 --- a/content/id/docs/tutorials/hello-minikube.md +++ b/content/id/docs/tutorials/hello-minikube.md @@ -1,6 +1,6 @@ --- title: Halo Minikube -content_template: templates/tutorial +content_type: tutorial weight: 5 menu: main: @@ -13,7 +13,7 @@ card: weight: 10 --- -{{% capture overview %}} + Tutorial ini menunjukkan bagaimana caranya menjalankan aplikasi sederhana Node.js Halo Dunia di Kubernetes, dengan [Minikube](/docs/getting-started-guides/minikube) dan Katacoda. Katacoda menyediakan environment Kubernetes secara gratis di dalam browser. @@ -22,17 +22,19 @@ Katacoda menyediakan environment Kubernetes secara gratis di dalam browse Kamupun bisa mengikuti tutorial ini kalau sudah instalasi [Minikube di lokal](/docs/tasks/tools/install-minikube/) kamu. {{< /note >}} -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Deploy aplikasi halo dunia pada Minikube. * Jalankan aplikasinya. * Melihat log aplikasi. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Tutorial ini menyediakan image Kontainer yang dibuat melalui barisan kode berikut: @@ -42,9 +44,9 @@ Tutorial ini menyediakan image Kontainer yang dibuat melalui barisan kode beriku Untuk info lebih lanjut tentang perintah `docker build`, baca [dokumentasi Docker](https://docs.docker.com/engine/reference/commandline/build/). -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Membuat sebuah klaster Minikube @@ -259,12 +261,13 @@ Kamu juga boleh menghapus Minikube VM: minikube delete ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Pelajari lebih lanjut tentang [Deployment](/docs/concepts/workloads/controllers/deployment/). * Pelajari lebih lanjut tentang [Deploy aplikasi](/docs/user-guide/deploying-applications/). * Pelajari lebih lanjut tentang [Servis](/docs/concepts/services-networking/service/). -{{% /capture %}} + diff --git a/content/id/examples/pods/commands.yaml b/content/id/examples/pods/commands.yaml new file mode 100644 index 0000000000..2327d25827 --- /dev/null +++ b/content/id/examples/pods/commands.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: command-demo + labels: + purpose: demonstrate-command +spec: + containers: + - name: command-demo-container + image: debian + command: ["printenv"] + args: ["HOSTNAME", "KUBERNETES_PORT"] + restartPolicy: OnFailure diff --git a/content/id/examples/pods/probe/exec-liveness.yaml b/content/id/examples/pods/probe/exec-liveness.yaml new file mode 100644 index 0000000000..07bf75f85c --- /dev/null +++ b/content/id/examples/pods/probe/exec-liveness.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + test: liveness + name: liveness-exec +spec: + containers: + - name: liveness + image: k8s.gcr.io/busybox + args: + - /bin/sh + - -c + - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600 + livenessProbe: + exec: + command: + - cat + - /tmp/healthy + initialDelaySeconds: 5 + periodSeconds: 5 diff --git a/content/id/examples/pods/probe/http-liveness.yaml b/content/id/examples/pods/probe/http-liveness.yaml new file mode 100644 index 0000000000..670af18399 --- /dev/null +++ b/content/id/examples/pods/probe/http-liveness.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + test: liveness + name: liveness-http +spec: + containers: + - name: liveness + image: k8s.gcr.io/liveness + args: + - /server + livenessProbe: + httpGet: + path: /healthz + port: 8080 + httpHeaders: + - name: Custom-Header + value: Awesome + initialDelaySeconds: 3 + periodSeconds: 3 diff --git a/content/id/examples/pods/probe/tcp-liveness-readiness.yaml b/content/id/examples/pods/probe/tcp-liveness-readiness.yaml new file mode 100644 index 0000000000..08fb77ff0f --- /dev/null +++ b/content/id/examples/pods/probe/tcp-liveness-readiness.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goproxy + labels: + app: goproxy +spec: + containers: + - name: goproxy + image: k8s.gcr.io/goproxy:0.1 + ports: + - containerPort: 8080 + readinessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 15 + periodSeconds: 20 diff --git a/content/id/examples/pods/security/hello-apparmor.yaml b/content/id/examples/pods/security/hello-apparmor.yaml new file mode 100644 index 0000000000..3e9d7e9dbb --- /dev/null +++ b/content/id/examples/pods/security/hello-apparmor.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-apparmor + annotations: + # Mengintruksikan Kubernetes untuk menerapkan profil AppArmor "k8s-apparmor-example-deny-write". + # Perlu dicatat bahwa ini akan di abaikan jika Node Kubernetes tidak berjalan pada versi 1.4 atau lebih. + container.apparmor.security.beta.kubernetes.io/hello: localhost/k8s-apparmor-example-deny-write +spec: + containers: + - name: hello + image: busybox + command: [ "sh", "-c", "echo 'Hello AppArmor!' && sleep 1h" ] diff --git a/content/id/examples/pods/security/security-context-2.yaml b/content/id/examples/pods/security/security-context-2.yaml new file mode 100644 index 0000000000..0e3185341e --- /dev/null +++ b/content/id/examples/pods/security/security-context-2.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: security-context-demo-2 +spec: + securityContext: + runAsUser: 1000 + containers: + - name: sec-ctx-demo-2 + image: gcr.io/google-samples/node-hello:1.0 + securityContext: + runAsUser: 2000 + allowPrivilegeEscalation: false diff --git a/content/id/examples/pods/security/security-context-3.yaml b/content/id/examples/pods/security/security-context-3.yaml new file mode 100644 index 0000000000..d68b2a783e --- /dev/null +++ b/content/id/examples/pods/security/security-context-3.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + name: security-context-demo-3 +spec: + containers: + - name: sec-ctx-3 + image: gcr.io/google-samples/node-hello:1.0 diff --git a/content/id/examples/pods/security/security-context-4.yaml b/content/id/examples/pods/security/security-context-4.yaml new file mode 100644 index 0000000000..d725308fec --- /dev/null +++ b/content/id/examples/pods/security/security-context-4.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: security-context-demo-4 +spec: + containers: + - name: sec-ctx-4 + image: gcr.io/google-samples/node-hello:1.0 + securityContext: + capabilities: + add: ["NET_ADMIN", "SYS_TIME"] diff --git a/content/id/examples/pods/security/security-context.yaml b/content/id/examples/pods/security/security-context.yaml new file mode 100644 index 0000000000..35cb1eeebe --- /dev/null +++ b/content/id/examples/pods/security/security-context.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Pod +metadata: + name: security-context-demo +spec: + securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + volumes: + - name: sec-ctx-vol + emptyDir: {} + containers: + - name: sec-ctx-demo + image: busybox + command: [ "sh", "-c", "sleep 1h" ] + volumeMounts: + - name: sec-ctx-vol + mountPath: /data/demo + securityContext: + allowPrivilegeEscalation: false diff --git a/content/id/examples/pods/storage/pv-claim.yaml b/content/id/examples/pods/storage/pv-claim.yaml new file mode 100644 index 0000000000..b33f6faa4c --- /dev/null +++ b/content/id/examples/pods/storage/pv-claim.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: task-pv-claim +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi diff --git a/content/id/examples/pods/storage/pv-pod.yaml b/content/id/examples/pods/storage/pv-pod.yaml new file mode 100644 index 0000000000..0597be6876 --- /dev/null +++ b/content/id/examples/pods/storage/pv-pod.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: task-pv-pod +spec: + volumes: + - name: task-pv-storage + persistentVolumeClaim: + claimName: task-pv-claim + containers: + - name: task-pv-container + image: nginx + ports: + - containerPort: 80 + name: "http-server" + volumeMounts: + - mountPath: "/usr/share/nginx/html" + name: task-pv-storage diff --git a/content/id/examples/pods/storage/pv-volume.yaml b/content/id/examples/pods/storage/pv-volume.yaml new file mode 100644 index 0000000000..36fe3c5424 --- /dev/null +++ b/content/id/examples/pods/storage/pv-volume.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: task-pv-volume + labels: + type: local +spec: + storageClassName: manual + capacity: + storage: 10Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/mnt/data" diff --git a/content/it/docs/concepts/_index.md b/content/it/docs/concepts/_index.md index f471f2cd79..89b80f409d 100644 --- a/content/it/docs/concepts/_index.md +++ b/content/it/docs/concepts/_index.md @@ -1,17 +1,17 @@ --- title: Concetti main_menu: true -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + La sezione Concetti ti aiuta a conoscere le parti del sistema Kubernetes e le astrazioni utilizzate da Kubernetes per rappresentare il tuo cluster e ti aiuta ad ottenere una comprensione più profonda di come funziona Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Overview @@ -66,12 +66,13 @@ I nodi di un cluster sono le macchine (VM, server fisici, ecc.) Che eseguono i f * [Annotations](/docs/concepts/overview/working-with-objects/annotations/) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Se vuoi scrivere una pagina concettuale, vedi [Uso dei modelli di pagina](/docs/home/contribute/page-templates/) per informazioni sul tipo di pagina di concetto e il modello di concetto. -{{% /capture %}} + diff --git a/content/it/docs/concepts/architecture/cloud-controller.md b/content/it/docs/concepts/architecture/cloud-controller.md index 866309ce2d..5d8c044199 100644 --- a/content/it/docs/concepts/architecture/cloud-controller.md +++ b/content/it/docs/concepts/architecture/cloud-controller.md @@ -1,10 +1,10 @@ --- title: Concetti alla base del Cloud Controller Manager -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Il concetto di CCM (cloud controller manager), da non confondere con il binario, è stato originariamente creato per consentire di sviluppare Kubernetes indipendentemente dall'implementazione dello specifico cloud provider. Il cloud controller manager viene eseguito insieme ad altri componenti principali come il Kubernetes controller manager, il server API e lo scheduler. Può anche essere avviato come addon di Kubernetes, nel qual caso viene eseguito su Kubernetes. @@ -16,10 +16,10 @@ Ecco l'architettura di un cluster Kubernetes senza il gestore del controller clo ![Pre CCM Kube Arch](/images/docs/pre-ccm-arch.png) -{{% /capture %}} -{{% capture body %}} + + ## Architettura @@ -242,4 +242,4 @@ I seguenti fornitori di cloud hanno una implementazione di CCM: Le istruzioni complete per la configurazione e l'esecuzione del CCM sono fornite [qui](/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager). -{{% /capture %}} + diff --git a/content/it/docs/concepts/architecture/master-node-communication.md b/content/it/docs/concepts/architecture/master-node-communication.md index afbac85793..375c244a6c 100644 --- a/content/it/docs/concepts/architecture/master-node-communication.md +++ b/content/it/docs/concepts/architecture/master-node-communication.md @@ -1,11 +1,11 @@ --- draft: True title: Comunicazione Master-Node -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Questo documento cataloga i percorsi di comunicazione tra il master (in realtà il apiserver) e il cluster Kubernetes. L'intento è di consentire agli utenti di @@ -13,10 +13,10 @@ personalizzare la loro installazione per rafforzare la configurazione di rete in il cluster può essere eseguito su una rete non affidabile (o su IP completamente pubblici su a fornitore di servizi cloud). -{{% /capture %}} -{{% capture body %}} + + ## Cluster to Master @@ -92,4 +92,4 @@ la connessione verrà crittografata, non fornirà alcuna garanzia di integrità. Queste connessioni ** non sono attualmente al sicuro ** da eseguire su non attendibili e / o reti pubbliche. -{{% /capture %}} + diff --git a/content/it/docs/concepts/architecture/nodes.md b/content/it/docs/concepts/architecture/nodes.md index f881e8a8eb..0494050420 100644 --- a/content/it/docs/concepts/architecture/nodes.md +++ b/content/it/docs/concepts/architecture/nodes.md @@ -1,11 +1,11 @@ --- draft: True title: Nodi -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Un nodo è una macchina worker in Kubernetes, precedentemente noto come `minion`. Un nodo può essere una VM o una macchina fisica, a seconda del cluster. Ogni nodo contiene @@ -14,10 +14,10 @@ componenti. I servizi su un nodo includono il [container runtime](/docs/concepts [The Kubernetes Node](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md#the-kubernetes-node) sezione in documento di progettazione dell'architettura per maggiori dettagli. -{{% /capture %}} -{{% capture body %}} + + ## Node Status @@ -283,4 +283,4 @@ Il nodo è una risorsa di livello superiore nell'API REST di Kubernetes. Maggior L'oggetto API può essere trovato a: [Node API object](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#node-v1-core). -{{% /capture %}} + diff --git a/content/it/docs/concepts/cluster-administration/addons.md b/content/it/docs/concepts/cluster-administration/addons.md index 65fe5c582f..3a91ff7b93 100644 --- a/content/it/docs/concepts/cluster-administration/addons.md +++ b/content/it/docs/concepts/cluster-administration/addons.md @@ -1,10 +1,10 @@ --- draft: True title: Installazione dei componenti aggiuntivi -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + I componenti aggiuntivi estendono la funzionalità di Kubernetes. @@ -13,10 +13,10 @@ Questa pagina elenca alcuni componenti aggiuntivi disponibili e collegamenti all I componenti aggiuntivi in ogni sezione sono ordinati alfabeticamente - l'ordine non implica uno stato preferenziale. -{{% /capture %}} -{{% capture body %}} + + ## Networking and Network Policy @@ -49,4 +49,4 @@ qui ci sono molti altri componenti aggiuntivi documentati nella directory deprec Quelli ben mantenuti dovrebbero essere collegati qui. -{{% /capture %}} + diff --git a/content/it/docs/concepts/cluster-administration/certificates.md b/content/it/docs/concepts/cluster-administration/certificates.md index 65bf22cf76..a05a982ccb 100644 --- a/content/it/docs/concepts/cluster-administration/certificates.md +++ b/content/it/docs/concepts/cluster-administration/certificates.md @@ -1,20 +1,20 @@ --- draft: True title: Certificati -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Quando si utilizza l'autenticazione del certificato client, è possibile generare certificati manualmente tramite `easyrsa`,` openssl` o `cfssl`. -{{% /capture %}} -{{% capture body %}} + + ### easyrsa @@ -246,4 +246,4 @@ done. certificati x509 da utilizzare per l'autenticazione come documentato [here](/docs/tasks/tls/managing-tls-in-a-cluster). -{{% /capture %}} + diff --git a/content/it/docs/concepts/cluster-administration/cloud-providers.md b/content/it/docs/concepts/cluster-administration/cloud-providers.md index 393c7d3835..78e1f38bd2 100644 --- a/content/it/docs/concepts/cluster-administration/cloud-providers.md +++ b/content/it/docs/concepts/cluster-administration/cloud-providers.md @@ -1,17 +1,17 @@ --- draft: True title: Cloud Providers -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Questa pagina spiega come gestire Kubernetes in esecuzione su uno specifico fornitore di servizi cloud. -{{% /capture %}} -{{% capture body %}} + + ### kubeadm [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) è un'opzione popolare per la creazione di cluster di kuberneti. @@ -342,7 +342,7 @@ File `cloud.conf`: [kubenet]: https://kubernetes.io/docs/concepts/cluster-administration/network-plugins/#kubenet -{{% /capture %}} + ## OVirt diff --git a/content/it/docs/concepts/cluster-administration/cluster-administration-overview.md b/content/it/docs/concepts/cluster-administration/cluster-administration-overview.md index e3b848693f..a7c9974350 100644 --- a/content/it/docs/concepts/cluster-administration/cluster-administration-overview.md +++ b/content/it/docs/concepts/cluster-administration/cluster-administration-overview.md @@ -1,16 +1,16 @@ --- draft: True title: Panoramica sull'amministrazione del cluster -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + La panoramica dell'amministrazione del cluster è per chiunque crei o gestisca un cluster Kubernetes. Presuppone una certa dimestichezza con i core Kubernetes [concetti](/docs/concepts/). -{{% /capture %}} -{{% capture body %}} + + ## Progettare un cluster Consulta le guide di [Setup](/docs/setup) per avere degli esempi su come pianificare, impostare e configurare cluster Kubernetes. Le soluzioni elencate in questo articolo sono chiamate *distribuzioni*. @@ -67,5 +67,5 @@ Nota: non tutte le distro vengono mantenute attivamente. Scegli le distro che so * [Registrazione e monitoraggio delle attività del cluster](/docs/concepts/cluster-administration/logging/) spiega come funziona il logging in Kubernetes e come implementarlo. -{{% /capture %}} + diff --git a/content/it/docs/concepts/cluster-administration/controller-metrics.md b/content/it/docs/concepts/cluster-administration/controller-metrics.md index ced1604da5..5cb6cee50e 100644 --- a/content/it/docs/concepts/cluster-administration/controller-metrics.md +++ b/content/it/docs/concepts/cluster-administration/controller-metrics.md @@ -1,16 +1,16 @@ --- draft: True title: Metriche del responsabile del controller -content_template: templates/concept +content_type: concept weight: 100 --- -{{% capture overview %}} + Le metriche del controller controller forniscono informazioni importanti sulle prestazioni e la salute di il responsabile del controller. -{{% /capture %}} -{{% capture body %}} + + ## Cosa sono le metriche del controller @@ -44,4 +44,4 @@ Le metriche sono emesse in [formato prometheus](https://prometheus.io/docs/instr In un ambiente di produzione è possibile configurare prometheus o altri strumenti di misurazione delle metriche per raccogliere periodicamente queste metriche e renderle disponibili in una sorta di database di serie temporali. -{{% /capture %}} + diff --git a/content/it/docs/concepts/cluster-administration/federation.md b/content/it/docs/concepts/cluster-administration/federation.md index 80e0d10b97..7f4bdb5998 100644 --- a/content/it/docs/concepts/cluster-administration/federation.md +++ b/content/it/docs/concepts/cluster-administration/federation.md @@ -1,11 +1,11 @@ --- draft: True title: Federation -content_template: templates/concept +content_type: concept weight: 80 --- -{{% capture overview %}} + {{< deprecationfilewarning >}} {{< include "federation-deprecation-warning-note.md" >}} @@ -13,9 +13,9 @@ weight: 80 Questa pagina spiega perché e come gestire più cluster di Kubernetes utilizzando federazione. -{{% /capture %}} -{{% capture body %}} + + ## Perché la federation La federation facilita la gestione di più cluster. Lo fa fornendo 2 @@ -170,9 +170,10 @@ Infine, se uno qualsiasi dei tuoi cluster richiederebbe più del numero massimo potresti aver bisogno di più cluster. Kubernetes v1.3 supporta cluster di dimensioni fino a 1000 nodi. Supporta Kubernetes v1.8 cluster fino a 5000 nodi. Vedi [Costruire cluster di grandi dimensioni](/docs/setup/cluster-large/) per maggiori informazioni. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Ulteriori informazioni sulla [Federazione proposta](https://github.com/kubernetes/community/blob/{{}}/contributors/design-proposal/multicluster/federation.md). * Vedi questo [guida alla configurazione](/docs/tutorial/federazione/set-up-cluster-federation-kubefed/) per la federazione dei cluster. * Vedi questo [Kubecon2016 talk on federation](https://www.youtube.com/watch?v=pq9lbkmxpS8) @@ -180,4 +181,4 @@ cluster fino a 5000 nodi. Vedi [Costruire cluster di grandi dimensioni](/docs/se * Vedi questo [Kubecon2018 aggiornamento Europa su sig-multicluster](https://www.youtube.com/watch?v=vGZo5DaThQU) * Vedi questo [Kubecon2018 Europe Federation-v2 presentazione prototipo](https://youtu.be/q27rbaX5Jis?t=7m20s) * Vedi questo [Federation-v2 Userguide](https://github.com/kubernetes-sigs/federation-v2/blob/master/docs/userguide.md) -{{% /capture %}} + diff --git a/content/it/docs/concepts/cluster-administration/kubelet-garbage-collection.md b/content/it/docs/concepts/cluster-administration/kubelet-garbage-collection.md index 1aad1b22d9..10e0af08cc 100644 --- a/content/it/docs/concepts/cluster-administration/kubelet-garbage-collection.md +++ b/content/it/docs/concepts/cluster-administration/kubelet-garbage-collection.md @@ -1,21 +1,21 @@ --- draft: True title: Configurazione della raccolta dati kubelet -content_template: templates/concept +content_type: concept weight: 70 --- -{{% capture overview %}} + La garbage collection è una funzione utile di kubelet che pulisce le immagini inutilizzate e i contenitori inutilizzati. Kubelet eseguirà la raccolta dei rifiuti per i contenitori ogni minuto e la raccolta dei dati inutili per le immagini ogni cinque minuti. Gli strumenti di garbage collection esterni non sono raccomandati in quanto questi strumenti possono potenzialmente interrompere il comportamento di kubelet rimuovendo i contenitori che si prevede esistano. -{{% /capture %}} -{{% capture body %}} + + ## Image Collection @@ -91,10 +91,11 @@ Compreso: | `--low-diskspace-threshold-mb` | `--eviction-hard` o` eviction-soft` | lo sfratto generalizza le soglie del disco ad altre risorse | | `--outofdisk-transition-frequency` | `--eviction-pressure-transition-period` | lo sfratto generalizza la transizione della pressione del disco verso altre risorse | -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Vedi [Configurazione della gestione delle risorse esterne](/docs/tasks/administration-cluster/out-of-resource/) per maggiori dettagli. -{{% /capture %}} + diff --git a/content/it/docs/concepts/cluster-administration/logging.md b/content/it/docs/concepts/cluster-administration/logging.md index 179339ec4c..ea7235d532 100644 --- a/content/it/docs/concepts/cluster-administration/logging.md +++ b/content/it/docs/concepts/cluster-administration/logging.md @@ -1,20 +1,20 @@ --- draft: True title: Log di registrazione -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + I log di applicazioni e sistemi possono aiutarti a capire cosa sta accadendo all'interno del tuo cluster. I log sono particolarmente utili per il debug dei problemi e il monitoraggio delle attività del cluster. La maggior parte delle applicazioni moderne ha una sorta di meccanismo di registrazione; in quanto tale, la maggior parte dei motori di container sono progettati allo stesso modo per supportare alcuni tipi di registrazione. Il metodo di registrazione più semplice e più accettato per le applicazioni containerizzate è scrivere sull'output standard e sui flussi di errore standard. Tuttavia, la funzionalità nativa fornita da un motore contenitore o dal runtime di solito non è sufficiente per una soluzione di registrazione completa. Ad esempio, se un container si arresta in modo anomalo, un pod viene rimosso, o un nodo muore, di solito vuoi comunque accedere ai log dell'applicazione. Pertanto, i registri devono avere una memoria e un ciclo di vita separati, indipendenti da nodi, pod o contenitori. Questo concetto è chiamato _cluster-logging_. La registrazione a livello di cluster richiede un back-end separato per archiviare, analizzare e interrogare i registri. Kubernetes non fornisce alcuna soluzione di archiviazione nativa per i dati di registro, ma è possibile integrare molte soluzioni di registrazione esistenti nel proprio cluster Kubernetes. -{{% /capture %}} -{{% capture body %}} + + Le architetture di registrazione a livello di cluster sono descritte nel presupposto che un back-end per la registrazione è presente all'interno o all'esterno del cluster. Se tu sei @@ -256,4 +256,4 @@ contenitore. ogni applicazione; tuttavia, l'implementazione di un tale meccanismo di registrazione è al di fuori dello scopo di Kubernetes. -{{% /capture %}} + diff --git a/content/it/docs/concepts/cluster-administration/manage-deployment.md b/content/it/docs/concepts/cluster-administration/manage-deployment.md index fabf9ffe35..5e8886ec6f 100644 --- a/content/it/docs/concepts/cluster-administration/manage-deployment.md +++ b/content/it/docs/concepts/cluster-administration/manage-deployment.md @@ -1,21 +1,21 @@ --- draft: True title: Gestione delle risorse -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + Hai distribuito la tua applicazione e l'hai esposta tramite un servizio. Ora cosa? Kubernetes fornisce una serie di strumenti per aiutarti a gestire la distribuzione delle applicazioni, compreso il ridimensionamento e l'aggiornamento. Tra le caratteristiche che discuteremo in modo più approfondito ci sono [file di configurazione](/docs/concepts/configuration/overview/) e [labels](/docs/concepts/overview/working-with-objects/labels/). -{{% /capture %}} -{{% capture body %}} + + ## Organizzazione delle configurazioni delle risorse @@ -437,11 +437,12 @@ dietro la scena. Garantisce che solo un certo numero di vecchie repliche potrebb aggiornate e solo un certo numero di nuove repliche può essere creato sopra il numero desiderato di pod. Per ulteriori informazioni su di esso, visitare [Pagina di distribuzione](/docs/concepts/workloads/controller/deployment/). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - [[Scopri come usare `kubectl` per l'introspezione e il debug delle applicazioni.](/Docs/tasks/debug-application-cluster/debug-application-introspection/) - [Best practice e suggerimenti sulla configurazione](/docs/concepts/configuration/overview/) -{{% /capture %}} + diff --git a/content/it/docs/concepts/cluster-administration/networking.md b/content/it/docs/concepts/cluster-administration/networking.md index 77829b940f..4c83d201e0 100644 --- a/content/it/docs/concepts/cluster-administration/networking.md +++ b/content/it/docs/concepts/cluster-administration/networking.md @@ -1,11 +1,11 @@ --- draft: True title: Cluster Networking -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + Il networking è una parte centrale di Kubernetes, ma può essere difficile capire esattamente come dovrebbe funzionare. Ci sono 4 reti distinte problemi da affrontare: @@ -14,10 +14,10 @@ Ci sono 4 reti distinte problemi da affrontare: 2. Comunicazioni Pod-to-Pod: questo è l'obiettivo principale di questo documento. 3. Comunicazioni Pod-to-Service: questo è coperto da [servizi](/docs/concepts/services-networking/service/). 4. Comunicazioni da esterno a servizio: questo è coperto da [servizi](/docs/concepts/services-networking/service/). -{{% /capture %}} -{{% capture body %}} + + Kubernetes è tutto basato sulla condivisione di macchine tra le applicazioni. Tipicamente, la condivisione di macchine richiede che due applicazioni non provino a utilizzare il @@ -334,11 +334,12 @@ sue applicazioni in hosting. Weave Net funziona come un plug-in [CNI](https://ww o stand-alone. In entrambe le versioni, non richiede alcuna configurazione o codice aggiuntivo per eseguire, e in entrambi i casi, la rete fornisce un indirizzo IP per pod, come è standard per Kubernetes. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Il progetto iniziale del modello di rete e la sua logica, e un po 'di futuro i piani sono descritti in maggior dettaglio nella [progettazione della rete documento](https://git.k8s.io/community/contributors/design-proposals/network/networking.md). -{{% /capture %}} + diff --git a/content/it/docs/concepts/cluster-administration/proxies.md b/content/it/docs/concepts/cluster-administration/proxies.md index beee7cfa8a..58d29e67fb 100644 --- a/content/it/docs/concepts/cluster-administration/proxies.md +++ b/content/it/docs/concepts/cluster-administration/proxies.md @@ -1,14 +1,14 @@ --- title: Proxy in Kubernetes -content_template: templates/concept +content_type: concept weight: 90 --- -{{% capture overview %}} + Questa pagina spiega i proxy utilizzati con Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Proxy @@ -62,6 +62,6 @@ in genere assicurerà che gli altri tipi di proxy siano impostati correttamente. I proxy hanno sostituito le funzioni di reindirizzamento. I reindirizzamenti sono stati deprecati. -{{% /capture %}} + diff --git a/content/it/docs/concepts/example-concept-template.md b/content/it/docs/concepts/example-concept-template.md index db1bc0b960..fd91cdb085 100644 --- a/content/it/docs/concepts/example-concept-template.md +++ b/content/it/docs/concepts/example-concept-template.md @@ -1,10 +1,10 @@ --- title: Esempio di modello di concetto -content_template: templates/concept +content_type: concept toc_hide: true --- -{{% capture overview %}} + {{< note >}} Assicurati anche di [creare una voce nel sommario](/docs/home/contribute/write-new-topic/#creating-an-entry-in-the-table-of-contents) per il tuo nuovo documento. @@ -12,9 +12,9 @@ Assicurati anche di [creare una voce nel sommario](/docs/home/contribute/write-n Questa pagina spiega ... -{{% /capture %}} -{{% capture body %}} + + ## Comprendendo ... @@ -25,15 +25,16 @@ Kubernetes fornisce ... Usare -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + **[Sezione opzionale]** * Ulteriori informazioni su [Scrivere un nuovo argomento](/docs/home/contribuisci/scrivi-nuovo-argomento/). * Vedi [Uso dei modelli di pagina - Modello di concetto](/docs/home/contribuis/page-templates/#concept_template) su come utilizzare questo modello. -{{% /capture %}} + diff --git a/content/it/docs/concepts/overview/components.md b/content/it/docs/concepts/overview/components.md index 6e896ab21a..da5259971c 100644 --- a/content/it/docs/concepts/overview/components.md +++ b/content/it/docs/concepts/overview/components.md @@ -1,13 +1,13 @@ „--- title: I componenti di Kubernetes -content_template: templates/concept +content_type: concept weight: 20 card: name: concepts weight: 20 --- -{{% capture overview %}} + Facendo il deployment di Kubernetes, ottieni un cluster. {{< glossary_definition term_id="cluster" length="all" prepend="Un cluster Kubernetes è">}} @@ -18,9 +18,9 @@ Questo è un diagramma di un cluster Kubernetes con tutti i componenti e le loro ![I componenti di Kubernetes](/images/docs/components-of-kubernetes.png) -{{% /capture %}} -{{% capture body %}} + + ## Componenti della Control Plane I componenti del Control Plane sono responsabili di tutte le decisioni globali sul cluster (ad esempio, lo scheduling) oltre che a rilevare e rispondere agli eventi del cluster (ad esempio, l'avvio di un nuovo {{< glossary_tooltip text="pod" term_id="pod">}} quando il valore `replicas` di un deployment non è soddisfatto). @@ -113,10 +113,11 @@ Il [Monitoraggio dei Container](/docs/tasks/debug-application-cluster/resource-u Un [log a livello di cluster](/docs/concepts/cluster-administration/logging/) è responsabile per il salvataggio dei log dei container in un log centralizzato la cui interfaccia permette di cercare e navigare nei log. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Scopri i concetti relativi ai [Nodi](/docs/concepts/architecture/nodes/) * Scopri i concetti relativi ai [Controller](/docs/concepts/architecture/controller/) * Scopri i concetti relativi al [kube-scheduler](/docs/concepts/scheduling/kube-scheduler/) * Leggi la [documentazione](https://etcd.io/docs/) ufficiale di etcd -{{% /capture %}} + diff --git a/content/it/docs/concepts/overview/kubernetes-api.md b/content/it/docs/concepts/overview/kubernetes-api.md index 7f122bcd14..5214bea53a 100644 --- a/content/it/docs/concepts/overview/kubernetes-api.md +++ b/content/it/docs/concepts/overview/kubernetes-api.md @@ -1,13 +1,13 @@ --- title: Le API di Kubernetes -content_template: templates/concept +content_type: concept weight: 30 card: name: concepts weight: 20 --- -{{% capture overview %}} + Le convenzioni generali seguite dalle API sono descritte in [API conventions doc](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md). @@ -21,10 +21,10 @@ Kubernetes assicura la persistenza del suo stato (al momento in [etcd](https://c Kubernetes stesso è diviso in differenti componenti, i quali interagiscono tra loro attraverso le stesse API. -{{% /capture %}} -{{% capture body %}} + + ## Evoluzione delle API @@ -123,4 +123,4 @@ Per esempio: per abilitare deployments and daemonsets, utilizza la seguente conf {{< note >}}Abilitare/disabilitare una singola risorsa è supportato solo per il gruppo di API `extensions/v1beta1` per ragioni storiche.{{< /note >}} -{{% /capture %}} + diff --git a/content/it/docs/concepts/overview/what-is-kubernetes.md b/content/it/docs/concepts/overview/what-is-kubernetes.md index 399b31da9c..fa511b90c3 100644 --- a/content/it/docs/concepts/overview/what-is-kubernetes.md +++ b/content/it/docs/concepts/overview/what-is-kubernetes.md @@ -2,18 +2,18 @@ title: Cos'è Kubernetes? description: > Kubernetes è una piattaforma portatile, estensibile e open-source per la gestione di carichi di lavoro e servizi containerizzati, in grado di facilitare sia la configurazione dichiarativa che l'automazione. La piattaforma vanta un grande ecosistema in rapida crescita. Servizi, supporto e strumenti sono ampiamente disponibili nel mondo Kubernetes . -content_template: templates/concept +content_type: concept weight: 10 card: name: concepts weight: 10 --- -{{% capture overview %}} + Questa pagina è una panoramica generale su Kubernetes. -{{% /capture %}} -{{% capture body %}} + + Kubernetes è una piattaforma portatile, estensibile e open-source per la gestione di carichi di lavoro e servizi containerizzati, in grado di facilitare sia la configurazione dichiarativa che l'automazione. La piattaforma vanta un grande ecosistema in rapida crescita. Servizi, supporto e strumenti sono ampiamente disponibili nel mondo Kubernetes . Il nome Kubernetes deriva dal greco, significa timoniere o pilota. Google ha reso open-source il progetto Kubernetes nel 2014. Kubernetes unisce [oltre quindici anni di esperienza di Google nella gestione di carichi di lavoro di produzione su scala mondiale](https://ai.google/research/pubs/pub43438) con le migliori idee e pratiche della comunità. @@ -84,9 +84,10 @@ Kubernetes: * Non fornisce né adotta alcun sistema di gestione completa della macchina, configurazione, manutenzione, gestione o sistemi di self healing. * Inoltre, Kubernetes non è un semplice sistema di orchestrazione. Infatti, questo sistema elimina la necessità di orchestrazione. La definizione tecnica di orchestrazione è l'esecuzione di un flusso di lavoro definito: prima si fa A, poi B, poi C. Al contrario, Kubernetes è composto da un insieme di processi di controllo indipendenti e componibili che guidano costantemente lo stato attuale verso lo stato desiderato. Non dovrebbe importare come si passa dalla A alla C. Anche il controllo centralizzato non è richiesto. Questo si traduce in un sistema più facile da usare, più potente, robusto, resiliente ed estensibile. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Dai un'occhiata alla pagina [i componenti di Kubernetes](/docs/concepts/overview/components/) * Sai già [Come Iniziare](/docs/setup/)? -{{% /capture %}} + diff --git a/content/it/docs/tutorials/_index.md b/content/it/docs/tutorials/_index.md index cdd1c473e8..88ffbe41e5 100644 --- a/content/it/docs/tutorials/_index.md +++ b/content/it/docs/tutorials/_index.md @@ -2,10 +2,10 @@ title: Tutorials main_menu: true weight: 60 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Questa sezione della documentazione di Kubernetes contiene i tutorials. Un tutorial mostra come raggiungere un obiettivo più complesso di un singolo @@ -14,9 +14,9 @@ consiste in una sequenza di più task. Prima di procedere con vari tutorial, raccomandiamo di aggiungere il [Glossario](/docs/reference/glossary/) ai tuoi bookmark per riferimenti successivi. -{{% /capture %}} -{{% capture body %}} + + ## Per cominciare @@ -64,12 +64,13 @@ Prima di procedere con vari tutorial, raccomandiamo di aggiungere il * [Utilizzare Source IP](/docs/tutorials/services/source-ip/) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Se sei interessato a scrivere un tutorial, vedi [Utilizzare i Page Templates](/docs/home/contribute/page-templates/) per informazioni su come creare una tutorial page e sul tutorial template. -{{% /capture %}} + diff --git a/content/it/docs/tutorials/hello-minikube.md b/content/it/docs/tutorials/hello-minikube.md index 80b9b64b11..3dadc7a0cc 100644 --- a/content/it/docs/tutorials/hello-minikube.md +++ b/content/it/docs/tutorials/hello-minikube.md @@ -1,6 +1,6 @@ --- title: Hello Minikube -content_template: templates/tutorial +content_type: tutorial weight: 5 menu: main: @@ -13,7 +13,7 @@ card: weight: 10 --- -{{% capture overview %}} + Questo tutorial mostra come eseguire una semplice applicazione in Kubernetes utilizzando [Minikube](/docs/setup/learning-environment/minikube) e Katacoda. @@ -23,24 +23,26 @@ Katacoda permette di operare su un'installazione di Kubernetes dal tuo browser. Come alternativa, è possibile eseguire questo tutorial [installando minikube](/docs/tasks/tools/install-minikube/) localmente. {{< /note >}} -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Rilasciare una semplice applicazione su Minikube. * Eseguire l'applicazione. * Visualizzare i log dell'applicazione. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Questo tutorial fornisce una container image che utilizza NGINX per risponde a tutte le richieste con un echo che visualizza i dati della richiesta stessa. -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Crea un Minikube cluster @@ -269,12 +271,13 @@ Eventualmente, puoi cancellare la Minikube VM: minikube delete ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Approfondisci la tua conoscenza dei [Deployments](/docs/concepts/workloads/controllers/deployment/). * Approfondisci la tua conoscenza di [Rilasciare applicazioni](/docs/tasks/run-application/run-stateless-application-deployment/). * Approfondisci la tua conoscenza dei [Services](/docs/concepts/services-networking/service/). -{{% /capture %}} + diff --git a/content/ja/docs/concepts/_index.md b/content/ja/docs/concepts/_index.md index a179d79113..ca572d2559 100644 --- a/content/ja/docs/concepts/_index.md +++ b/content/ja/docs/concepts/_index.md @@ -1,17 +1,17 @@ --- title: コンセプト main_menu: true -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + 本セクションは、Kubernetesシステムの各パートと、{{< glossary_tooltip text="クラスター" term_id="cluster" length="all" >}}を表現するためにKubernetesが使用する抽象概念について学習し、Kubernetesの仕組みをより深く理解するのに役立ちます。 -{{% /capture %}} -{{% capture body %}} + + ## 概要 @@ -59,12 +59,13 @@ Kubernetesのマスターは、クラスターの望ましい状態を維持す クラスターのノードは、アプリケーションとクラウドワークフローを実行するマシン(VM、物理サーバーなど)です。Kubernetesのマスターは各ノードを制御します。運用者自身がノードと直接対話することはほとんどありません。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + コンセプトページを追加したい場合は、 [ページテンプレートの使用](/docs/home/contribute/page-templates/) のコンセプトページタイプとコンセプトテンプレートに関する情報を確認してください。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/architecture/cloud-controller.md b/content/ja/docs/concepts/architecture/cloud-controller.md index 9d76076fc7..d722ced7a6 100644 --- a/content/ja/docs/concepts/architecture/cloud-controller.md +++ b/content/ja/docs/concepts/architecture/cloud-controller.md @@ -1,10 +1,10 @@ --- title: クラウドコントローラーマネージャーとそのコンセプト -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + クラウドコントローラマネージャー(CCM)のコンセプト(バイナリと混同しないでください)は、もともとクラウドベンダー固有のソースコードと、Kubernetesのコアソースコードを独立して進化させることが出来るように作られました。クラウドコントローラーマネージャーは、Kubernetesコントローラーマネージャー、APIサーバー、そしてスケジューラーのような他のマスターコンポーネントと並行して動きます。またKubernetesのアドオンとしても動かすことができ、その場合はKubernetes上で動きます。 @@ -16,10 +16,10 @@ weight: 30 ![Pre CCM Kube Arch](/images/docs/pre-ccm-arch.png) -{{% /capture %}} -{{% capture body %}} + + ## 設計 @@ -235,4 +235,4 @@ rules: CCMを設定、動かすための完全な手順は[こちら](/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager)で提供されています。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/architecture/master-node-communication.md b/content/ja/docs/concepts/architecture/master-node-communication.md index 711ce4a584..14f0678a20 100644 --- a/content/ja/docs/concepts/architecture/master-node-communication.md +++ b/content/ja/docs/concepts/architecture/master-node-communication.md @@ -1,18 +1,18 @@ --- title: マスターとノード間の通信 -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + 本ドキュメントでは、KubernetesにおけるMaster(実態はAPIサーバー)及びクラスター間のコミュニケーション経路についてまとめます。 この文書の目的は、信頼できないネットワーク上(またはクラウドプロバイダ上の完全にパブリックなIP上)でクラスタを実行できるように、ユーザーがインストールをカスタマイズしてネットワーク構成を強化できるようにすることです。 -{{% /capture %}} -{{% capture body %}} + + ## クラスターからマスターへの通信 @@ -69,4 +69,4 @@ Kubernetesはマスターからクラスターへの通信経路を保護する SSHトンネルは現在非推奨なので、自分がしていることが分からない限り、使用しないでください。この通信チャネルに代わるものが設計されています。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/architecture/nodes.md b/content/ja/docs/concepts/architecture/nodes.md index eac8388f41..d5631319a7 100644 --- a/content/ja/docs/concepts/architecture/nodes.md +++ b/content/ja/docs/concepts/architecture/nodes.md @@ -1,17 +1,17 @@ --- title: ノード -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + ノードは、以前には `ミニオン` としても知られていた、Kubernetesにおけるワーカーマシンです。1つのノードはクラスターの性質にもよりますが、1つのVMまたは物理的なマシンです。各ノードには[Pod](/ja/docs/concepts/workloads/pods/pod/)を動かすために必要なサービスが含まれており、マスターコンポーネントによって管理されています。ノード上のサービスには[コンテナランタイム](/ja/docs/concepts/overview/components/#container-runtime)、kubelet、kube-proxyが含まれています。詳細については、設計ドキュメントの[Kubernetes Node](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md#the-kubernetes-node)セクションをご覧ください。 -{{% /capture %}} -{{% capture body %}} + + ## ノードのステータス @@ -219,4 +219,4 @@ Pod以外のプロセス用にリソースを明示的に予約したい場合 NodeはKubernetesのREST APIにおけるトップレベルのリソースです。APIオブジェクトに関する詳細は以下の記事にてご覧いただけます: [Node APIオブジェクト](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#node-v1-core). -{{% /capture %}} + diff --git a/content/ja/docs/concepts/cluster-administration/cluster-administration-overview.md b/content/ja/docs/concepts/cluster-administration/cluster-administration-overview.md index 935edba7a3..93ab8c2fa5 100644 --- a/content/ja/docs/concepts/cluster-administration/cluster-administration-overview.md +++ b/content/ja/docs/concepts/cluster-administration/cluster-administration-overview.md @@ -1,15 +1,15 @@ --- reviewers: title: クラスター管理の概要 -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + このページはKubernetesクラスターの作成や管理者向けの内容です。Kubernetesのコア[コンセプト](/ja/docs/concepts/)についてある程度精通していることを前提とします。 -{{% /capture %}} -{{% capture body %}} + + ## クラスターのプランニング Kubernetesクラスターの計画、セットアップ、設定の例を知るには[設定](/ja/docs/setup/)のガイドを参照してください。この記事で列挙されているソリューションは*ディストリビューション* と呼ばれます。 @@ -64,6 +64,6 @@ Kubernetesクラスターの計画、セットアップ、設定の例を知る * [クラスターアクティビィのロギングと監視](/docs/concepts/cluster-administration/logging/)では、Kubernetesにおけるロギングがどのように行われ、どう実装されているかについて解説します。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/cluster-administration/controller-metrics.md b/content/ja/docs/concepts/cluster-administration/controller-metrics.md index d8fb5232f9..d77f5bdf44 100644 --- a/content/ja/docs/concepts/cluster-administration/controller-metrics.md +++ b/content/ja/docs/concepts/cluster-administration/controller-metrics.md @@ -1,15 +1,15 @@ --- title: コントローラーマネージャーの指標 -content_template: templates/concept +content_type: concept weight: 100 --- -{{% capture overview %}} + コントローラーマネージャーの指標は、コントローラー内部のパフォーマンスについての重要で正確な情報と、クラウドコントローラーの状態についての情報を提供します。 -{{% /capture %}} -{{% capture body %}} + + ## コントローラーマネージャーの指標とは何か コントローラーマネージャーの指標は、コントローラー内部のパフォーマンスについての重要で正確な情報と、クラウドコントローラーの状態についての情報を提供します。 @@ -39,4 +39,4 @@ cloudprovider_gce_api_request_duration_seconds { request = "list_disk"} 本番環境ではこれらの指標を定期的に収集し、なんらかの時系列データベースで使用できるようにprometheusやその他の指標のスクレイパーを構成することが推奨されます。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/cluster-administration/networking.md b/content/ja/docs/concepts/cluster-administration/networking.md index 80cf72f0ee..2ec89adc4d 100644 --- a/content/ja/docs/concepts/cluster-administration/networking.md +++ b/content/ja/docs/concepts/cluster-administration/networking.md @@ -1,10 +1,10 @@ --- title: クラスターのネットワーク -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + ネットワークはKubernetesにおける中心的な部分ですが、どのように動作するかを正確に理解することは難解な場合もあります。 Kubernetesには、4つの異なる対応すべきネットワークの問題があります: @@ -14,10 +14,10 @@ Kubernetesには、4つの異なる対応すべきネットワークの問題が 3. Podからサービスへの通信:これは[Service](/ja/docs/concepts/services-networking/service/)でカバーされています。 4. 外部からサービスへの通信:これは[Service](/ja/docs/concepts/services-networking/service/)でカバーされています。 -{{% /capture %}} -{{% capture body %}} + + Kubernetesは、言ってしまえばアプリケーション間でマシンを共有するためのものです。通常、マシンを共有するには、2つのアプリケーションが同じポートを使用しないようにする必要があります。 複数の開発者間でポートを調整することは、大規模に行うことは非常に難しく、ユーザーが制御できないクラスターレベルの問題に見合うことがあります。 @@ -282,10 +282,11 @@ Weave Net runs as a [CNI plug-in](https://www.weave.works/docs/net/latest/cni-pl or stand-alone. In either version, it doesn't require any configuration or extra code to run, and in both cases, the network provides one IP address per pod - as is standard for Kubernetes. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + ネットワークモデルの初期設計とその根拠、および将来の計画については、[ネットワーク設計ドキュメント](https://git.k8s.io/community/contributors/design-proposals/network/networking.md)で詳細に説明されています。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/configuration/assign-pod-node.md b/content/ja/docs/concepts/configuration/assign-pod-node.md index 0dbde41861..b34ea432e6 100644 --- a/content/ja/docs/concepts/configuration/assign-pod-node.md +++ b/content/ja/docs/concepts/configuration/assign-pod-node.md @@ -1,20 +1,20 @@ --- title: Node上へのPodのスケジューリング -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + [Pod](/ja/docs/concepts/workloads/pods/pod/)が稼働する[Node](/ja/docs/concepts/architecture/nodes/)を特定のものに指定したり、優先条件を指定して制限することができます。 これを実現するためにはいくつかの方法がありますが、推奨されている方法は[ラベルでの選択](/docs/concepts/overview/working-with-objects/labels/)です。 スケジューラーが最適な配置を選択するため、一般的にはこのような制限は不要です(例えば、複数のPodを別々のNodeへデプロイしたり、Podを配置する際にリソースが不十分なNodeにはデプロイされないことが挙げられます)が、 SSDが搭載されているNodeにPodをデプロイしたり、同じアベイラビリティーゾーン内で通信する異なるサービスのPodを同じNodeにデプロイする等、柔軟な制御が必要なこともあります。 -{{% /capture %}} -{{% capture body %}} + + ## nodeSelector @@ -357,9 +357,10 @@ spec: 上記のPodはkube-01という名前のNodeで稼働します。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [Taints](/docs/concepts/configuration/taint-and-toleration/)を使うことで、NodeはPodを追い出すことができます。 @@ -367,4 +368,4 @@ spec: [Inter-Pod Affinity/Anti-Affinity](https://git.k8s.io/community/contributors/design-proposals/scheduling/podaffinity.md) には、Taintsの要点に関して様々な背景が紹介されています。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/configuration/overview.md b/content/ja/docs/concepts/configuration/overview.md index 8255db692a..a4b6b57763 100644 --- a/content/ja/docs/concepts/configuration/overview.md +++ b/content/ja/docs/concepts/configuration/overview.md @@ -1,16 +1,16 @@ --- title: 設定のベストプラクティス -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + このドキュメントでは、ユーザーガイド、入門マニュアル、および例を通して紹介されている設定のベストプラクティスを中心に説明します。 このドキュメントは生ものです。このリストには載っていないが他の人に役立つかもしれない何かについて考えている場合、IssueまたはPRを遠慮なく作成してください。 -{{% /capture %}} -{{% capture body %}} + + ## 一般的な設定のTips - 構成を定義する際には、最新の安定したAPIバージョンを指定してください。 @@ -98,6 +98,6 @@ weight: 10 - `get`や`delete`を行う際は、特定のオブジェクト名を指定するのではなくラベルセレクターを使いましょう。[ラベルセレクター](/docs/concepts/overview/working-with-objects/labels/#label-selectors)と[ラベルの効果的な使い方](/docs/concepts/cluster-administration/manage-deployment/#using-labels-effectively)のセクションを参照してください。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/containers/container-environment-variables.md b/content/ja/docs/concepts/containers/container-environment-variables.md index 1057cc0518..52e6f7e1ad 100644 --- a/content/ja/docs/concepts/containers/container-environment-variables.md +++ b/content/ja/docs/concepts/containers/container-environment-variables.md @@ -1,17 +1,17 @@ --- title: コンテナ環境変数 -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + このページでは、コンテナ環境で利用可能なリソースについて説明します。 -{{% /capture %}} -{{% capture body %}} + + ## コンテナ環境 @@ -45,11 +45,12 @@ FOO_SERVICE_PORT=<サービスが実行されているポート> サービスは専用のIPアドレスを持ち、[DNSアドオン](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/dns/)が有効の場合、DNSを介してコンテナで利用可能です。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [コンテナライフサイクルフック](/docs/concepts/containers/container-lifecycle-hooks/)の詳細 * [コンテナライフサイクルイベントへのハンドラー紐付け](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/)のハンズオン -{{% /capture %}} + diff --git a/content/ja/docs/concepts/containers/container-lifecycle-hooks.md b/content/ja/docs/concepts/containers/container-lifecycle-hooks.md index 943e77aae2..5104ab1efb 100644 --- a/content/ja/docs/concepts/containers/container-lifecycle-hooks.md +++ b/content/ja/docs/concepts/containers/container-lifecycle-hooks.md @@ -1,17 +1,17 @@ --- title: コンテナライフサイクルフック -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + このページでは、kubeletにより管理されるコンテナがコンテナライフサイクルフックフレームワークを使用して、管理ライフサイクル中にイベントによって引き起こされたコードを実行する方法について説明します。 -{{% /capture %}} -{{% capture body %}} + + ## 概要 @@ -93,12 +93,13 @@ Events: 1m 22s 2 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Warning FailedPostStartHook ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [コンテナ環境](/docs/concepts/containers/container-environment-variables/)の詳細 * [コンテナライフサイクルイベントへのハンドラー紐付け](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/)のハンズオン -{{% /capture %}} + diff --git a/content/ja/docs/concepts/containers/runtime-class.md b/content/ja/docs/concepts/containers/runtime-class.md index 1acbdcf219..526eb62463 100644 --- a/content/ja/docs/concepts/containers/runtime-class.md +++ b/content/ja/docs/concepts/containers/runtime-class.md @@ -1,11 +1,11 @@ --- reviewers: title: ランタイムクラス(Runtime Class) -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.14" state="beta" >}} @@ -15,10 +15,10 @@ weight: 20 RuntimeClassはKubernetes1.14のβ版アップグレードにおいて*破壊的な* 変更を含んでいます。もしユーザーがKubernetes1.14以前のバージョンを使っていた場合、[RuntimeClassのα版からβ版へのアップグレード](#upgrading-runtimeclass-from-alpha-to-beta)を参照してください。 {{< /warning >}} -{{% /capture %}} -{{% capture body %}} + + ## RuntimeClassについて @@ -139,4 +139,4 @@ RuntimeClassのβ版の機能は、下記の変更点を含みます。 ``` - `runtimeHandler`の指定がないか、もしくは空文字の場合や、ハンドラー名に`.`文字列が使われている場合はα版のRuntimeClassにおいてもはや有効ではありません。正しい形式のハンドラー設定に変更しなくてはなりません(先ほど記載した内容を確認ください)。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md b/content/ja/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md index 5338d3071d..a47d894561 100644 --- a/content/ja/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md +++ b/content/ja/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md @@ -1,16 +1,16 @@ --- title: アグリゲーションレイヤーを使ったKubernetes APIの拡張 -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + アグリゲーションレイヤーを使用すると、KubernetesのコアAPIで提供されている機能を超えて、追加のAPIでKubernetesを拡張できます。 -{{% /capture %}} -{{% capture body %}} + + ## 概要 @@ -20,13 +20,14 @@ weight: 10 通常、APIServiceは、クラスター上で動いているPod内の *extension-apiserver* で実装されます。このextension-apiserverは、追加されたリソースに対するアクティブな管理が必要な場合、通常、1つか複数のコントローラーとペアになっている必要があります。そのため、実際にapiserver-builderはextension-apiserverとコントローラーの両方のスケルトンを提供します。一例として、service-catalogがインストールされると、extension-apiserverと提供するサービスのコントローラーの両方を提供します。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * アグリゲーターをあなたの環境で動かすには、まず[アグリゲーションレイヤーを設定](/docs/tasks/access-kubernetes-api/configure-aggregation-layer/)します * そして、アグリゲーションレイヤーと一緒に動作させるために[extension api-serverをセットアップ](/docs/tasks/access-kubernetes-api/setup-extension-api-server/)します * また、[Custom Resource Definitionを使いKubernetes APIを拡張する](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/)方法を学んで下さい -{{% /capture %}} + diff --git a/content/ja/docs/concepts/extend-kubernetes/api-extension/custom-resources.md b/content/ja/docs/concepts/extend-kubernetes/api-extension/custom-resources.md index 6d8fbc1e2e..41f96a20ce 100644 --- a/content/ja/docs/concepts/extend-kubernetes/api-extension/custom-resources.md +++ b/content/ja/docs/concepts/extend-kubernetes/api-extension/custom-resources.md @@ -1,16 +1,16 @@ --- title: カスタムリソース -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + *カスタムリソース* はKubernetes APIの拡張です。このページでは、いつKubernetesのクラスターにカスタムリソースを追加するべきなのか、そしていつスタンドアローンのサービスを利用するべきなのかを議論します。カスタムリソースを追加する2つの方法と、それらの選択方法について説明します。 -{{% /capture %}} -{{% capture body %}} + + ## カスタムリソース @@ -213,11 +213,12 @@ Kubernetesの[クライアントライブラリー](/docs/reference/using-api/cl - 自作のRESTクライアント - [Kubernetesクライアント生成ツール](https://github.com/kubernetes/code-generator)を使い生成したクライアント(生成は高度な作業ですが、一部のプロジェクトは、CRDまたはAAとともにクライアントを提供する場合があります) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Kubernetes APIをアグリゲーションレイヤーで拡張する方法](/ja/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/)について学ぶ * [Kubernetes APIをCustomResourceDefinitionで拡張する方法](/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/)について学ぶ -{{% /capture %}} + diff --git a/content/ja/docs/concepts/extend-kubernetes/extend-cluster.md b/content/ja/docs/concepts/extend-kubernetes/extend-cluster.md index b554f01819..dad9190345 100644 --- a/content/ja/docs/concepts/extend-kubernetes/extend-cluster.md +++ b/content/ja/docs/concepts/extend-kubernetes/extend-cluster.md @@ -1,10 +1,10 @@ --- title: Kubernetesクラスターの拡張 -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + Kubernetesは柔軟な設定が可能で、高い拡張性を持っています。 結果として、Kubernetesのプロジェクトソースコードをフォークしたり、パッチを当てて利用することは滅多にありません。 @@ -13,9 +13,9 @@ Kubernetesは柔軟な設定が可能で、高い拡張性を持っています 管理しているKubernetesクラスターを、動作環境の要件にどのように適合させるべきかを理解したい{{< glossary_tooltip text="クラスター管理者" term_id="cluster-operator" >}}を対象にしています。 将来の {{< glossary_tooltip text="プラットフォーム開発者" term_id="platform-developer" >}} 、またはKubernetesプロジェクトの{{< glossary_tooltip text="コントリビューター" term_id="contributor" >}}にとっても、どのような拡張のポイントやパターンが存在するのか、また、それぞれのトレードオフや制限事項を学ぶための導入として役立つでしょう。 -{{% /capture %}} -{{% capture body %}} + + ## 概要 @@ -152,9 +152,10 @@ Kubernetesはいくつかのビルトイン認証方式と、それらが要件 スケジューラは[Webhook](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/scheduler_extender.md)もサポートしており、Webhookバックエンド(スケジューラーエクステンション)を通じてPodを配置するために選択されたノードをフィルタリング、優先度付けすることが可能です。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [カスタムリソース](/docs/concepts/api-extension/custom-resources/)についてより深く学ぶ * [動的Admission control](/docs/reference/access-authn-authz/extensible-admission-controllers/)について学ぶ @@ -164,4 +165,4 @@ Kubernetesはいくつかのビルトイン認証方式と、それらが要件 * [kubectlプラグイン](/docs/tasks/extend-kubectl/kubectl-plugins/)について学ぶ * [オペレーターパターン](/docs/concepts/extend-kubernetes/operator/)について学ぶ -{{% /capture %}} + diff --git a/content/ja/docs/concepts/extend-kubernetes/operator.md b/content/ja/docs/concepts/extend-kubernetes/operator.md index 08c173ddff..cee9d31d5c 100644 --- a/content/ja/docs/concepts/extend-kubernetes/operator.md +++ b/content/ja/docs/concepts/extend-kubernetes/operator.md @@ -1,17 +1,17 @@ --- title: オペレーターパターン -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + オペレーターはサードパーティのアプリケーション、コンポーネントを管理するためのリソースを活用する、Kubernetesへのソフトウェア拡張です。 オペレーターは、特に[制御ループ](/docs/concepts/#kubernetes-control-plane)のようなKubernetesが持つ仕組みに準拠しています。 -{{% /capture %}} -{{% capture body %}} + + ## モチベーション @@ -79,9 +79,10 @@ kubectl edit SampleDB/example-database # 手動でいくつかの設定を変更 オペレーター(すなわち、コントローラー)はどの言語/ランタイムでも実装でき、[Kubernetes APIのクライアント](/docs/reference/using-api/client-libraries/)として機能させることができます。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Custom Resources](/docs/concepts/extend-kubernetes/api-extension/custom-resources/)をより深く学びます * ユースケースに合わせた、既製のオペレーターを[OperatorHub.io](https://operatorhub.io/)から見つけます @@ -94,4 +95,4 @@ kubectl edit SampleDB/example-database # 手動でいくつかの設定を変更 * オペレーターパターンを紹介している[CoreOSオリジナル記事](https://coreos.com/blog/introducing-operators.html)を読みます * Google Cloudが出したオペレーター作成のベストプラクティス[記事](https://cloud.google.com/blog/products/containers-kubernetes/best-practices-for-building-kubernetes-operators-and-stateful-apps)を読みます -{{% /capture %}} + diff --git a/content/ja/docs/concepts/overview/components.md b/content/ja/docs/concepts/overview/components.md index 5a4f894c44..933602567a 100644 --- a/content/ja/docs/concepts/overview/components.md +++ b/content/ja/docs/concepts/overview/components.md @@ -1,13 +1,13 @@ --- title: Kubernetesのコンポーネント -content_template: templates/concept +content_type: concept weight: 20 card: name: concepts weight: 20 --- -{{% capture overview %}} + Kubernetesをデプロイすると、クラスターが展開されます。 {{< glossary_definition term_id="cluster" length="all" prepend="クラスターは、">}} @@ -17,9 +17,9 @@ Kubernetesをデプロイすると、クラスターが展開されます。 ![Kubernetesのコンポーネント](/images/docs/components-of-kubernetes.png) -{{% /capture %}} -{{% capture body %}} + + ## マスターコンポーネント @@ -112,10 +112,11 @@ Kubernetesによって開始されたコンテナは、DNS検索にこのDNSサ [クラスターレベルログ](/docs/concepts/cluster-administration/logging/)メカニズムは、コンテナのログを、検索/参照インターフェイスを備えた中央ログストアに保存します。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [ノード](/ja/docs/concepts/architecture/nodes/)について学ぶ * [コントローラー](/docs/concepts/architecture/controller/)について学ぶ * [kube-scheduler](/ja/docs/concepts/scheduling/kube-scheduler/)について学ぶ * etcdの公式 [ドキュメント](https://etcd.io/docs/)を読む -{{% /capture %}} + diff --git a/content/ja/docs/concepts/overview/kubernetes-api.md b/content/ja/docs/concepts/overview/kubernetes-api.md index d7851d954b..b7abd72a9c 100644 --- a/content/ja/docs/concepts/overview/kubernetes-api.md +++ b/content/ja/docs/concepts/overview/kubernetes-api.md @@ -1,14 +1,14 @@ --- reviewers: title: Kubernetes API -content_template: templates/concept +content_type: concept weight: 30 card: name: concepts weight: 30 --- -{{% capture overview %}} + 全般的なAPIの規則は、[API規則ドキュメント](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md)に記載されています。 @@ -22,9 +22,9 @@ Kubernetes APIは、システムの宣言的設定スキーマの基礎として Kubernetesそれ自身は複数のコンポーネントから構成されており、APIを介して連携しています。 -{{% /capture %}} -{{% capture body %}} + + ## APIの変更 @@ -113,4 +113,4 @@ APIグループは、RESTのパスとシリアライズされたオブジェク DaemonSets、Deployments、HorizontalPodAutoscalers、Ingresses、JobsReplicaSets、そしてReplicaSetsはデフォルトで有効です。 その他の拡張リソースは、APIサーバーの`--runtime-config`を設定することで有効化できます。`--runtime-config`はカンマ区切りの複数の値を設定可能です。例えば、deploymentsとingressを無効化する場合、`--runtime-config=extensions/v1beta1/deployments=false,extensions/v1beta1/ingresses=false`と設定します。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/overview/what-is-kubernetes.md b/content/ja/docs/concepts/overview/what-is-kubernetes.md index 6299002ac6..f6da4c1b0d 100644 --- a/content/ja/docs/concepts/overview/what-is-kubernetes.md +++ b/content/ja/docs/concepts/overview/what-is-kubernetes.md @@ -1,17 +1,17 @@ --- title: Kubernetesとは何か? -content_template: templates/concept +content_type: concept weight: 10 card: name: concepts weight: 10 --- -{{% capture overview %}} + このページでは、Kubernetesの概要について説明します。 -{{% /capture %}} -{{% capture body %}} + + Kubernetesは、宣言的な構成管理と自動化を促進し、コンテナ化されたワークロードやサービスを管理するための、ポータブルで拡張性のあるオープンソースプラットホームです。 Kubernetesは膨大で、急速に成長しているエコシステムを備えており、それらのサービス、サポート、ツールは幅広い形で利用可能です。 @@ -94,11 +94,12 @@ Kubernetesは... **Kubernetes** という名前はギリシャ語で *操舵手* や *パイロット* という意味があり、*知事* や[サイバネティックス](http://www.etymonline.com/index.php?term=cybernetics)の語源にもなっています。*K8s* は、8文字の「ubernete」を「8」に置き換えた略語です。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [はじめる](/docs/setup/)準備はできましたか? * さらなる詳細については、[Kubernetesのドキュメント](/ja/docs/home/)を御覧ください。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/overview/working-with-objects/annotations.md b/content/ja/docs/concepts/overview/working-with-objects/annotations.md index a169bdee03..282a671825 100644 --- a/content/ja/docs/concepts/overview/working-with-objects/annotations.md +++ b/content/ja/docs/concepts/overview/working-with-objects/annotations.md @@ -1,14 +1,14 @@ --- title: アノテーション(Annotations) -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + ユーザーは、識別用途でない任意のメタデータをオブジェクトに割り当てるためにアノテーションを使用できます。ツールやライブラリなどのクライアントは、このメタデータを取得できます。 -{{% /capture %}} -{{% capture body %}} + + ## オブジェクトにメタデータを割り当てる ユーザーは、Kubernetesオブジェクトに対してラベルやアノテーションの両方またはどちらか一方を割り当てることができます。 @@ -59,9 +59,10 @@ _アノテーション_ はキーとバリューのペアです。有効なア `kubernetes.io/`と`k8s.io/`プレフィックスは、Kubernetesコアコンポーネントのために予約されています。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [ラベルとセレクター](/docs/concepts/overview/working-with-objects/labels/)について学習してください。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/overview/working-with-objects/common-labels.md b/content/ja/docs/concepts/overview/working-with-objects/common-labels.md index 9a6c4508df..0e65cffc5d 100644 --- a/content/ja/docs/concepts/overview/working-with-objects/common-labels.md +++ b/content/ja/docs/concepts/overview/working-with-objects/common-labels.md @@ -1,15 +1,15 @@ --- title: 推奨ラベル(Recommended Labels) -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + ユーザーはkubectlやダッシュボード以外に、多くのツールでKubernetesオブジェクトの管理と可視化ができます。共通のラベルセットにより、全てのツールにおいて解釈可能な共通のマナーに沿ってオブジェクトを表現することで、ツールの相互運用を可能にします。 ツール化に対するサポートに加えて、推奨ラベルはクエリ可能な方法でアプリケーションを表現します。 -{{% /capture %}} -{{% capture body %}} + + メタデータは、_アプリケーション_ のコンセプトを中心に構成されています。KubernetesはPaaS(Platform as a Service)でなく、アプリケーションの公式な概念を持たず、またそれを強制することはありません。 そのかわり、アプリケーションは、非公式で、メタデータによって表現されています。単一のアプリケーションが有する項目に対する定義は厳密に決められていません。 @@ -153,4 +153,3 @@ metadata: MySQLの`StatefulSet`と`Service`により、MySQLとWordPressに関するより広範な情報が含まれていることに気づくでしょう。 -{{% /capture %}} \ No newline at end of file diff --git a/content/ja/docs/concepts/overview/working-with-objects/kubernetes-objects.md b/content/ja/docs/concepts/overview/working-with-objects/kubernetes-objects.md index 81bee4ca68..ace4fae929 100644 --- a/content/ja/docs/concepts/overview/working-with-objects/kubernetes-objects.md +++ b/content/ja/docs/concepts/overview/working-with-objects/kubernetes-objects.md @@ -1,17 +1,17 @@ --- title: Kubernetesオブジェクトを理解する -content_template: templates/concept +content_type: concept weight: 10 card: name: concepts weight: 40 --- -{{% capture overview %}} + このページでは、KubernetesオブジェクトがKubernetes APIでどのように表現されているか、またそれらを`.yaml`フォーマットでどのように表現するかを説明します。 -{{% /capture %}} -{{% capture body %}} + + ## Kubernetesオブジェクトを理解する *Kubernetesオブジェクト* は、Kubernetes上で永続的なエンティティです。Kubernetesはこれらのエンティティを使い、クラスターの状態を表現します。具体的に言うと、下記のような内容が表現出来ます: @@ -63,10 +63,11 @@ Kubernetesオブジェクトを`.yaml`ファイルに記載して作成する場 またオブジェクトの`spec`の値も指定する必要があります。`spec`の正確なフォーマットは、Kubernetesオブジェクトごとに異なり、オブジェクトごとに特有な入れ子のフィールドを持っています。[Kubernetes API リファレンス](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/)が、Kubernetesで作成出来る全てのオブジェクトに関するspecのフォーマットを探すのに役立ちます。 例えば、`Pod`オブジェクトに関する`spec`のフォーマットは[こちら](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core)を、また`Deployment`オブジェクトに関する`spec`のフォーマットは[こちら](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#deploymentspec-v1-apps)をご確認ください。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * 最も重要、かつ基本的なKubernetesオブジェクト群を学びましょう、例えば、[Pod](/ja/docs/concepts/workloads/pods/pod-overview/)です。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/overview/working-with-objects/labels.md b/content/ja/docs/concepts/overview/working-with-objects/labels.md index 9d42742759..f543ef938c 100644 --- a/content/ja/docs/concepts/overview/working-with-objects/labels.md +++ b/content/ja/docs/concepts/overview/working-with-objects/labels.md @@ -1,10 +1,10 @@ --- title: ラベル(Labels)とセレクター(Selectors) -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + _ラベル(Labels)_ はPodなどのオブジェクトに割り当てられたキーとバリューのペアです。 ラベルはユーザーに関連した意味のあるオブジェクトの属性を指定するために使われることを目的としています。しかしKubernetesのコアシステムに対して直接的にその意味を暗示するものではありません。 @@ -22,10 +22,10 @@ _ラベル(Labels)_ はPodなどのオブジェクトに割り当てられたキ ラベルは効率的な検索・閲覧を可能にし、UIやCLI上での利用に最適です。 識別用途でない情報は、[アノテーション](/docs/concepts/overview/working-with-objects/annotations/)を用いて記録されるべきです。 -{{% /capture %}} -{{% capture body %}} + + ## ラベルを使う動機 @@ -216,4 +216,4 @@ selector: ラベルを選択するための1つのユースケースはPodがスケジュールできるNodeのセットを制限することです。 さらなる情報に関しては、[Node選定](/ja/docs/concepts/configuration/assign-pod-node/) のドキュメントを参照してください。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/overview/working-with-objects/names.md b/content/ja/docs/concepts/overview/working-with-objects/names.md index b8762cb33c..2be57e51a5 100644 --- a/content/ja/docs/concepts/overview/working-with-objects/names.md +++ b/content/ja/docs/concepts/overview/working-with-objects/names.md @@ -1,11 +1,11 @@ --- reviewers: title: 名前 -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + KubernetesのREST API内の全てのオブジェクトは、名前とUIDで明確に識別されます。 @@ -13,9 +13,9 @@ KubernetesのREST API内の全てのオブジェクトは、名前とUIDで明 名前とUIDに関する正確な構文については、[識別子デザインドキュメント](https://git.k8s.io/community/contributors/design-proposals/architecture/identifiers.md)を参照してください。 -{{% /capture %}} -{{% capture body %}} + + ## 名前 @@ -27,4 +27,4 @@ KubernetesのREST API内の全てのオブジェクトは、名前とUIDで明 {{< glossary_definition term_id="uid" length="all" >}} -{{% /capture %}} + diff --git a/content/ja/docs/concepts/overview/working-with-objects/namespaces.md b/content/ja/docs/concepts/overview/working-with-objects/namespaces.md index 8e21224587..1cf310f2bd 100644 --- a/content/ja/docs/concepts/overview/working-with-objects/namespaces.md +++ b/content/ja/docs/concepts/overview/working-with-objects/namespaces.md @@ -1,18 +1,18 @@ --- title: Namespace(名前空間) -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + Kubernetesは、同一の物理クラスター上で複数の仮想クラスターの動作をサポートします。 この仮想クラスターをNamespaceと呼びます。 -{{% /capture %}} -{{% capture body %}} + + ## 複数のNamespaceを使う時 @@ -97,4 +97,4 @@ kubectl api-resources --namespaced=true kubectl api-resources --namespaced=false ``` -{{% /capture %}} + diff --git a/content/ja/docs/concepts/overview/working-with-objects/object-management.md b/content/ja/docs/concepts/overview/working-with-objects/object-management.md index 356426b375..bbf0085cf1 100644 --- a/content/ja/docs/concepts/overview/working-with-objects/object-management.md +++ b/content/ja/docs/concepts/overview/working-with-objects/object-management.md @@ -1,16 +1,16 @@ --- title: Kubernetesオブジェクト管理 -content_template: templates/concept +content_type: concept weight: 15 --- -{{% capture overview %}} + `kubectl`コマンドラインツールは、Kubernetesオブジェクトを作成、管理するためにいくつかの異なる方法をサポートしています。 このドキュメントでは、それらの異なるアプローチごとの概要を提供します。 Kubectlを使ったオブジェクト管理の詳細は、[Kubectl book](https://kubectl.docs.kubernetes.io)を参照してください。 -{{% /capture %}} -{{% capture body %}} + + ## 管理手法 @@ -157,9 +157,10 @@ kubectl apply -R -f configs/ - 宣言型オブジェクト設定は、デバッグ、そして想定外の結果が出たときに理解するのが困難です - 差分を利用した一部のみの更新は、複雑なマージ、パッチの操作が必要です -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - [命令型コマンドを利用したKubernetesオブジェクトの管理](/docs/tasks/manage-kubernetes-objects/imperative-command/) - [オブジェクト設定(命令型)を利用したKubernetesオブジェクトの管理](/docs/tasks/manage-kubernetes-objects/imperative-config/) @@ -169,4 +170,4 @@ kubectl apply -R -f configs/ - [Kubectl Book](https://kubectl.docs.kubernetes.io) - [Kubernetes APIリファレンス](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) -{{% /capture %}} + diff --git a/content/ja/docs/concepts/scheduling/kube-scheduler.md b/content/ja/docs/concepts/scheduling/kube-scheduler.md index 53fd5c67b7..15e2c4e638 100644 --- a/content/ja/docs/concepts/scheduling/kube-scheduler.md +++ b/content/ja/docs/concepts/scheduling/kube-scheduler.md @@ -1,16 +1,16 @@ --- title: Kubernetesのスケジューラー -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + Kubernetesにおいて、_スケジューリング_ とは、{{< glossary_tooltip term_id="kubelet" >}}が{{< glossary_tooltip text="Pod" term_id="pod" >}}を稼働させるために{{< glossary_tooltip text="Node" term_id="node" >}}に割り当てることを意味します。 -{{% /capture %}} -{{% capture body %}} + + ## スケジューリングの概要{#scheduling} @@ -110,9 +110,10 @@ kube-schedulerは、デフォルトで用意されているスケジューリン - `EqualPriorityMap`: 全てのNodeに対して等しい重みを与えます。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [スケジューラーのパフォーマンスチューニング](/docs/concepts/scheduling/scheduler-perf-tuning/)を参照してください。 * kube-schedulerの[リファレンスドキュメント](/docs/reference/command-line-tools-reference/kube-scheduler/)を参照してください。 * [複数のスケジューラーの設定](https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/)について学んでください。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/scheduling/scheduler-perf-tuning.md b/content/ja/docs/concepts/scheduling/scheduler-perf-tuning.md index ccc04a54f3..2a096295a1 100644 --- a/content/ja/docs/concepts/scheduling/scheduler-perf-tuning.md +++ b/content/ja/docs/concepts/scheduling/scheduler-perf-tuning.md @@ -1,10 +1,10 @@ --- title: スケジューラーのパフォーマンスチューニング -content_template: templates/concept +content_type: concept weight: 70 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="1.14" state="beta" >}} @@ -14,9 +14,9 @@ weight: 70 このページでは、大規模のKubernetesクラスターにおけるパフォーマンス最適化のためのチューニングについて説明します。 -{{% /capture %}} -{{% capture body %}} + + ## スコア付けするノードの割合 @@ -71,4 +71,4 @@ Node 1, Node 5, Node 2, Node 6, Node 3, Node 4 全てのノードのチェックを終えたら、1番目のノードに戻ってチェックをします。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/services-networking/connect-applications-service.md b/content/ja/docs/concepts/services-networking/connect-applications-service.md index e1250bbdea..1bb6d404c1 100644 --- a/content/ja/docs/concepts/services-networking/connect-applications-service.md +++ b/content/ja/docs/concepts/services-networking/connect-applications-service.md @@ -1,11 +1,11 @@ --- title: サービスとアプリケーションの接続 -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + ## コンテナを接続するためのKubernetesモデル @@ -25,9 +25,9 @@ Kubernetesでは、どのホストで稼働するかに関わらず、Podが他 このガイドでは、シンプルなnginxサーバーを使用して概念実証を示します。 同じ原則が、より完全な[Jenkins CIアプリケーション](https://kubernetes.io/blog/2015/07/strong-simple-ssl-for-kubernetes)で具体化されています。 -{{% /capture %}} -{{% capture body %}} + + ## Podをクラスターに公開する @@ -410,11 +410,12 @@ LoadBalancer Ingress: a320587ffd19711e5a37606cf4a74574-1142138393.us-east-1.el ... ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Kubernetesは、複数のクラスターおよびクラウドプロバイダーにまたがるフェデレーションサービスもサポートし、可用性の向上、フォールトトレランスの向上、サービスのスケーラビリティの向上を実現します。 詳細については[フェデレーションサービスユーザーガイド](/docs/concepts/cluster-administration/federation-service-discovery/)を参照してください。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/services-networking/dns-pod-service.md b/content/ja/docs/concepts/services-networking/dns-pod-service.md index fa76965e8e..0b2b28da27 100644 --- a/content/ja/docs/concepts/services-networking/dns-pod-service.md +++ b/content/ja/docs/concepts/services-networking/dns-pod-service.md @@ -1,14 +1,14 @@ --- reviewers: title: ServiceとPodに対するDNS -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + このページではKubernetesによるDNSサポートについて概観します。 -{{% /capture %}} -{{% capture body %}} + + ## イントロダクション @@ -191,13 +191,14 @@ PodのDNS設定と"`None`"というDNSポリシーの利用可能なバージョ | 1.10 | β版 (デフォルトで有効)| | 1.9 | α版 | -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + DNS設定の管理方法に関しては、[DNS Serviceの設定](/docs/tasks/administer-cluster/dns-custom-nameservers/) を確認してください。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/services-networking/ingress.md b/content/ja/docs/concepts/services-networking/ingress.md index 7fd3a81bab..273e2dbba2 100644 --- a/content/ja/docs/concepts/services-networking/ingress.md +++ b/content/ja/docs/concepts/services-networking/ingress.md @@ -1,15 +1,15 @@ --- title: Ingress -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.1" state="beta" >}} {{< glossary_definition term_id="ingress" length="all" >}} -{{% /capture %}} -{{% capture body %}} + + ## 用語 @@ -395,9 +395,10 @@ Ingressリソースに直接関与しない複数の方法でServiceを公開で * [Service.Type=LoadBalancer](/ja/docs/concepts/services-networking/service/#loadbalancer) * [Service.Type=NodePort](/ja/docs/concepts/services-networking/service/#nodeport) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Ingressコントローラー](/docs/concepts/services-networking/ingress-controllers/)について学ぶ * [MinikubeとNGINXコントローラーでIngressのセットアップを行う](/docs/tasks/access-application-cluster/ingress-minikube) -{{% /capture %}} + diff --git a/content/ja/docs/concepts/services-networking/service.md b/content/ja/docs/concepts/services-networking/service.md index 4556752eae..ea12c8e0a7 100644 --- a/content/ja/docs/concepts/services-networking/service.md +++ b/content/ja/docs/concepts/services-networking/service.md @@ -5,21 +5,21 @@ feature: description: > Kubernetesでは、なじみのないサービスディスカバリーのメカニズムを使用するためにユーザーがアプリケーションの修正をする必要はありません。KubernetesはPodにそれぞれのIPアドレス割り振りや、Podのセットに対する単一のDNS名を提供したり、それらのPodのセットに対する負荷分散が可能です。 -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + {{< glossary_definition term_id="service" length="short" >}} Kubernetesでは、なじみのないサービスディスカバリーのメカニズムを使用するためにユーザーがアプリケーションの修正をする必要はありません。 KubernetesはPodにそれぞれのIPアドレス割り振りや、Podのセットに対する単一のDNS名を提供したり、それらのPodのセットに対する負荷分散が可能です。 -{{% /capture %}} -{{% capture body %}} + + ## Serviceを利用する動機 @@ -941,12 +941,13 @@ Kubernetesプロジェクトは、L7 (HTTP) Serviceへのサポートをもっ Kubernetesプロジェクトは、現在利用可能なClusterIP、NodePortやLoadBalancerタイプのServiceに対して、より柔軟なIngressのモードを追加する予定です。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/)を参照してください。 * [Ingress](/docs/concepts/services-networking/ingress/)を参照してください。 * [Endpoint Slices](/docs/concepts/services-networking/endpoint-slices/)を参照してください。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/storage/dynamic-provisioning.md b/content/ja/docs/concepts/storage/dynamic-provisioning.md index e2361e5e83..28aa61209e 100644 --- a/content/ja/docs/concepts/storage/dynamic-provisioning.md +++ b/content/ja/docs/concepts/storage/dynamic-provisioning.md @@ -1,19 +1,19 @@ --- reviewers: title: ボリュームの動的プロビジョニング(Dynamic Volume Provisioning) -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + ボリュームの動的プロビジョニングにより、ストレージ用のボリュームをオンデマンドに作成することができます。 動的プロビジョニングなしでは、クラスター管理者はクラウドプロバイダーまたはストレージプロバイダーに対して新規のストレージ用のボリュームと[`PersistentVolume`オブジェクト](/docs/concepts/storage/persistent-volumes/)を作成するように手動で指示しなければなりません。動的プロビジョニングの機能によって、クラスター管理者がストレージを事前にプロビジョンする必要がなくなります。その代わりに、ユーザーによってリクエストされたときに自動でストレージをプロビジョンします。 -{{% /capture %}} -{{% capture body %}} + + ## バックグラウンド @@ -87,4 +87,4 @@ spec: [マルチゾーン](/docs/setup/multiple-zones)クラスター内では、Podは単一のリージョン内のゾーンをまたいでしか稼働できません。シングルゾーンのStorageバックエンドはPodがスケジュールされるゾーン内でプロビジョンされる必要があります。これは[Volume割り当てモード](/docs/concepts/storage/storage-classes/#volume-binding-mode)を設定することにより可能となります。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/storage/persistent-volumes.md b/content/ja/docs/concepts/storage/persistent-volumes.md index d7969f7d99..f6db1eba38 100644 --- a/content/ja/docs/concepts/storage/persistent-volumes.md +++ b/content/ja/docs/concepts/storage/persistent-volumes.md @@ -5,18 +5,18 @@ feature: description: > ローカルストレージやGCPAWSなどのパブリッククラウドプロバイダー、もしくはNFS、iSCSI、Gluster、Ceph、Cinder、Flockerのようなネットワークストレージシステムの中から選択されたものを自動的にマウントします。 -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + このドキュメントではKubernetesの`PersistentVolume`について説明します。[ボリューム](/docs/concepts/storage/volumes/)を一読することをおすすめします。 -{{% /capture %}} -{{% capture body %}} + + ## 概要 @@ -658,4 +658,4 @@ spec: - ユーザーがストレージクラス名を指定しない場合、`persistentVolumeClaim.storageClassName`フィールドはnilのままにする。これにより、PVはユーザーにクラスターのデフォルトストレージクラスで自動的にプロビジョニングされる。多くのクラスター環境ではデフォルトのストレージクラスがインストールされているが、管理者は独自のデフォルトストレージクラスを作成することができる。 - ツールがPVCを監視し、しばらくしてもバインドされないことをユーザーに表示する。これはクラスターが動的ストレージをサポートしない(この場合ユーザーは対応するPVを作成するべき)、もしくはクラスターがストレージシステムを持っていない(この場合ユーザーはPVCを必要とする設定をデプロイできない)可能性があることを示す。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/storage/volume-pvc-datasource.md b/content/ja/docs/concepts/storage/volume-pvc-datasource.md index 7b6cb90601..fc742e558f 100644 --- a/content/ja/docs/concepts/storage/volume-pvc-datasource.md +++ b/content/ja/docs/concepts/storage/volume-pvc-datasource.md @@ -1,10 +1,10 @@ --- title: CSI Volume Cloning -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.15" state="alpha" >}} このドキュメントではKubernetesで既存のCSIボリュームの複製についてのコンセプトを説明します。このページを読む前にあらかじめ[ボリューム](/docs/concepts/storage/volumes)についてよく理解していることが望ましいです。 @@ -16,10 +16,10 @@ weight: 30 ``` -{{% /capture %}} -{{% capture body %}} + + ## イントロダクション @@ -61,4 +61,4 @@ spec: 新しいPVCが使用可能になると、複製されたPVCは他のPVCと同じように利用されます。またこの時点で新しく作成されたPVCは独立したオブジェクトであることが期待されます。元のdataSource PVCを考慮せず個別に利用、複製、スナップショット、削除できます。これはまた複製元が新しく作成された複製にリンクされておらず、新しく作成された複製に影響を与えずに変更または削除できることを意味します。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/storage/volume-snapshot-classes.md b/content/ja/docs/concepts/storage/volume-snapshot-classes.md index 829bde8a2e..0fd19e47be 100644 --- a/content/ja/docs/concepts/storage/volume-snapshot-classes.md +++ b/content/ja/docs/concepts/storage/volume-snapshot-classes.md @@ -1,19 +1,19 @@ --- reviewers: title: VolumeSnapshotClass -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + このドキュメントでは、Kubernetesにおける`VolumeSnapshotClass`のコンセプトについて説明します。 関連する項目として、[Volumeのスナップショット](/docs/concepts/storage/volume-snapshots/)と[ストレージクラス](/docs/concepts/storage/storage-classes)も参照してください。 -{{% /capture %}} -{{% capture body %}} + + ## イントロダクション @@ -45,4 +45,4 @@ VolumeSnapshotClassは、VolumeSnapshotをプロビジョンするときに何 VolumeSnapshotClassは、そのクラスに属するVolumeSnapshotを指定するパラメータを持っています。 `snapshotter`に応じて様々なパラメータを使用できます。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/workloads/controllers/cron-jobs.md b/content/ja/docs/concepts/workloads/controllers/cron-jobs.md index 0520b8a97b..5e60e3501f 100644 --- a/content/ja/docs/concepts/workloads/controllers/cron-jobs.md +++ b/content/ja/docs/concepts/workloads/controllers/cron-jobs.md @@ -1,10 +1,10 @@ --- title: CronJob -content_template: templates/concept +content_type: concept weight: 80 --- -{{% capture overview %}} + _CronJob_ は時刻ベースのスケジュールによって[Job](/docs/concepts/workloads/controllers/jobs-run-to-completion/)を作成します。 @@ -17,9 +17,9 @@ _CronJob_ オブジェクトとは _crontab_ (cron table)ファイルでみら cronジョブを作成し、実行するインストラクション、または、cronジョブ仕様ファイルのサンプルについては、[Running automated tasks with cron jobs](/docs/tasks/job/automated-tasks-with-cron-jobs)をご覧ください。 -{{% /capture %}} -{{% capture body %}} + + ## CronJobの制限 @@ -43,4 +43,4 @@ Cannot determine if job needs to be started. Too many missed start time (> 100). CronJobはスケジュールに一致するJobの作成にのみ関与するのに対して、JobはJobが示すPod管理を担います。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/workloads/controllers/daemonset.md b/content/ja/docs/concepts/workloads/controllers/daemonset.md index 1edf7636ce..2f0210c014 100644 --- a/content/ja/docs/concepts/workloads/controllers/daemonset.md +++ b/content/ja/docs/concepts/workloads/controllers/daemonset.md @@ -1,11 +1,11 @@ --- reviewers: title: DaemonSet -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + _DaemonSet_ は全て(またはいくつか)のNodeが単一のPodのコピーを稼働させることを保証します。Nodeがクラスターに追加されるとき、PodがNode上に追加されます。Nodeがクラスターから削除されたとき、それらのPodはガーベージコレクターにより除去されます。DaemonSetの削除により、DaemonSetが作成したPodもクリーンアップします。 @@ -18,10 +18,10 @@ DaemonSetのいくつかの典型的な使用例は以下の通りです。 シンプルなケースとして、各タイプのデーモンにおいて、全てのNodeをカバーする1つのDaemonSetが使用されるケースがあります。 さらに複雑な設定では、単一のタイプのデーモン用ですが、異なるフラグや、異なるハードウェアタイプに対するメモリー、CPUリクエストを要求する複数のDaemonSetを使用するケースもあります。 -{{% /capture %}} -{{% capture body %}} + + ## DaemonSet Specの記述 @@ -164,4 +164,4 @@ DaemonSetは、Podの作成し、そのPodが停止されることのないプ フロントエンドのようなServiceのように、どのホスト上にPodが稼働するか制御するよりも、レプリカ数をスケールアップまたはスケールダウンしたりローリングアップデートする方が重要であるような、状態をもたないServiceに対してDeploymentを使ってください。 Podのコピーが全てまたは特定のホスト上で常に稼働していることが重要な場合や、他のPodの前に起動させる必要があるときにDaemonSetを使ってください。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/workloads/controllers/deployment.md b/content/ja/docs/concepts/workloads/controllers/deployment.md index 3146606573..68c1f2439f 100644 --- a/content/ja/docs/concepts/workloads/controllers/deployment.md +++ b/content/ja/docs/concepts/workloads/controllers/deployment.md @@ -5,11 +5,11 @@ feature: description: > Kubernetesはアプリケーションや設定への変更を段階的に行い、アプリケーションの状態を監視しながら、全てのインスタンスが同時停止しないようにします。更新に問題が起きたとき、Kubernetesは変更のロールバックを行います。進化を続けるDeploymnetのエコシステムを活用してください。 -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + _Deployment_ コントローラーは[Pod](/ja/docs/concepts/workloads/pods/pod/)と[ReplicaSet](/ja/docs/concepts/workloads/controllers/replicaset/)の宣言的なアップデート機能を提供します。 @@ -19,10 +19,10 @@ _Deployment_ コントローラーは[Pod](/ja/docs/concepts/workloads/pods/pod/ Deploymentによって作成されたReplicaSetを管理しないでください。ユーザーのユースケースが下記の項目をカバーできていない場合はメインのKubernetesリポジトリーにイシューを作成することを検討してください。 {{< /note >}} -{{% /capture %}} -{{% capture body %}} + + ## ユースケース @@ -996,4 +996,4 @@ Deploymentのリビジョン履歴は、Deploymentが管理するReplicaSetに [`kubectl rolling update`](/docs/reference/generated/kubectl/kubectl-commands#rolling-update)によって、同様の形式でPodとReplicationControllerを更新できます。しかしDeploymentの使用が推奨されます。なぜならDeploymentの作成は宣言的であり、ローリングアップデートが更新された後に過去のリビジョンにロールバックできるなど、いくつかの追加機能があります。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/workloads/controllers/garbage-collection.md b/content/ja/docs/concepts/workloads/controllers/garbage-collection.md index 0463849cd0..b7d2f544b3 100644 --- a/content/ja/docs/concepts/workloads/controllers/garbage-collection.md +++ b/content/ja/docs/concepts/workloads/controllers/garbage-collection.md @@ -1,16 +1,16 @@ --- title: ガベージコレクション -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + Kubernetesのガベージコレクターの役割は、かつてオーナーがいたが、現時点でもはやオーナーがいないようなオブジェクトの削除を行うことです。 -{{% /capture %}} -{{% capture body %}} + + ## オーナーとその従属オブジェクト @@ -134,16 +134,17 @@ Kubernetes1.7以前では、Deploymentに対するカスケード削除におい [#26120](https://github.com/kubernetes/kubernetes/issues/26120)にてイシューがトラックされています。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [Design Doc 1](https://git.k8s.io/community/contributors/design-proposals/api-machinery/garbage-collection.md) [Design Doc 2](https://git.k8s.io/community/contributors/design-proposals/api-machinery/synchronous-garbage-collection.md) -{{% /capture %}} + diff --git a/content/ja/docs/concepts/workloads/controllers/replicaset.md b/content/ja/docs/concepts/workloads/controllers/replicaset.md index 3c20e295e6..a164182000 100644 --- a/content/ja/docs/concepts/workloads/controllers/replicaset.md +++ b/content/ja/docs/concepts/workloads/controllers/replicaset.md @@ -1,18 +1,18 @@ --- reviewers: title: ReplicaSet -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + ReplicaSetの目的は、どのような時でも安定したレプリカPodのセットを維持することです。これは、理想的なレプリカ数のPodが利用可能であることを保証するものとして使用されます。 -{{% /capture %}} -{{% capture body %}} + + ## ReplicaSetがどのように動くか @@ -312,4 +312,4 @@ ReplicaSetは[_ReplicationControllers_](/docs/concepts/workloads/controllers/rep この2つは、ReplicationControllerが[ラベルについてのユーザーガイド](/docs/concepts/overview/working-with-objects/labels/#label-selectors)に書かれているように、集合ベース(set-based)のセレクター要求をサポートしていないことを除いては、同じ目的を果たし、同じようにふるまいます。 このように、ReplicaSetはReplicationControllerよりも好まれます。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/workloads/controllers/statefulset.md b/content/ja/docs/concepts/workloads/controllers/statefulset.md index 90de9d1a40..9f01ece3e3 100644 --- a/content/ja/docs/concepts/workloads/controllers/statefulset.md +++ b/content/ja/docs/concepts/workloads/controllers/statefulset.md @@ -1,11 +1,11 @@ --- reviewers: title: StatefulSet -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + StatefulSetはステートフルなアプリケーションを管理するためのワークロードAPIです。 @@ -14,9 +14,9 @@ StatefulSetはKubernetes1.9において利用可能(GA)です。 {{< /note >}} {{< glossary_definition term_id="statefulset" length="all" >}} -{{% /capture %}} -{{% capture body %}} + + ## StatefulSetの使用 @@ -195,11 +195,12 @@ Kubernetes1.7とそれ以降のバージョンにおいて、StatefulSetの`.spe そのテンプレートを戻したあと、ユーザーはまたStatefulSetが異常状態で稼働しようとしていたPodをすべて削除する必要があります。StatefulSetはその戻されたテンプレートを使ってPodの再作成を始めます。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [ステートフルなアプリケーションのデプロイ](/docs/tutorials/stateful-application/basic-stateful-set/)の例を参考にしてください。 * [StatefulSetを使ったCassandraのデプロイ](/docs/tutorials/stateful-application/cassandra/)の例を参考にしてください。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/workloads/controllers/ttlafterfinished.md b/content/ja/docs/concepts/workloads/controllers/ttlafterfinished.md index 3c28fe25ea..f3d55d952c 100644 --- a/content/ja/docs/concepts/workloads/controllers/ttlafterfinished.md +++ b/content/ja/docs/concepts/workloads/controllers/ttlafterfinished.md @@ -1,11 +1,11 @@ --- reviewers: title: 終了したリソースのためのTTLコントローラー(TTL Controller for Finished Resources) -content_template: templates/concept +content_type: concept weight: 65 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.12" state="alpha" >}} @@ -14,12 +14,12 @@ TTLコントローラーは現在[Job](/docs/concepts/workloads/controllers/jobs α版の免責事項: この機能は現在α版の機能で、[Feature Gate](/docs/reference/command-line-tools-reference/feature-gates/)の`TTLAfterFinished`を有効にすることで使用可能です。 -{{% /capture %}} -{{% capture body %}} + + ## TTLコントローラー @@ -45,12 +45,13 @@ TTLコントローラーが、TTL値が期限切れかそうでないかを決 Kubernetesにおいてタイムスキューを避けるために、全てのNode上でNTPの稼働を必須とします([#6159](https://github.com/kubernetes/kubernetes/issues/6159#issuecomment-93844058)を参照してください)。クロックは常に正しいものではありませんが、Node間におけるその差はとても小さいものとなります。TTLに0でない値をセットするときにこのリスクに対して注意してください。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [Jobの自動クリーンアップ](/docs/concepts/workloads/controllers/jobs-run-to-completion/#clean-up-finished-jobs-automatically) [設計ドキュメント](https://github.com/kubernetes/community/blob/master/keps/sig-apps/0026-ttl-after-finish.md) -{{% /capture %}} + diff --git a/content/ja/docs/concepts/workloads/pods/init-containers.md b/content/ja/docs/concepts/workloads/pods/init-containers.md index 0f25a656b2..f23defc198 100644 --- a/content/ja/docs/concepts/workloads/pods/init-containers.md +++ b/content/ja/docs/concepts/workloads/pods/init-containers.md @@ -1,16 +1,16 @@ --- title: Initコンテナ(Init Containers) -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + このページでは、Initコンテナについて概観します。Initコンテナとは、アプリケーションコンテナの前に実行され、アプリケーションコンテナのイメージに存在しないセットアップスクリプトやユーティリティーを含んだ特別なコンテナです。 -{{% /capture %}} + この機能はKubernetes1.6からβ版の機能として存在しています。InitコンテナはPodSpec内で、アプリケーションの`containers`という配列と並べて指定されます。そのベータ版のアノテーション値はまだ扱われ、PodSpecのフィールド値を上書きします。しかしながら、それらはKubernetesバージョン1.6と1.7において廃止されました。Kubernetesバージョン1.8からはそのアノテーション値はサポートされず、PodSpecフィールドの値に変換する必要があります。 -{{% capture body %}} + ## Initコンテナを理解する 単一の[Pod](/ja/docs/concepts/workloads/pods/pod-overview/)は、Pod内に複数のコンテナを稼働させることができますが、Initコンテナもまた、アプリケーションコンテナが稼働する前に1つまたは複数稼働できます。 @@ -266,11 +266,12 @@ ApiServerのバージョン1.6.0かそれ以上のバージョンのクラスタ ApiServerとKubeletバージョン1.8.0かそれ以上のバージョンでは、α版とβ版のアノテーションは削除されており、廃止されたアノテーションは`.spec.initContainers`フィールドへの移行が必須となります。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Initコンテナを持っているPodの作成](/docs/tasks/configure-pod-container/configure-pod-initialization/#creating-a-pod-that-has-an-init-container) -{{% /capture %}} + diff --git a/content/ja/docs/concepts/workloads/pods/pod-lifecycle.md b/content/ja/docs/concepts/workloads/pods/pod-lifecycle.md index 07437b9847..65e4e10a00 100644 --- a/content/ja/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/ja/docs/concepts/workloads/pods/pod-lifecycle.md @@ -1,17 +1,17 @@ --- title: Podのライフサイクル -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + このページではPodのライフサイクルについて説明します。 -{{% /capture %}} -{{% capture body %}} + + ## PodのPhase @@ -317,10 +317,11 @@ spec: * NodeコントローラがPodの`phase`をFailedにします。 * Podがコントローラで作成されていた場合は、別の場所で再作成されます。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [attaching handlers to Container lifecycle events](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/)のハンズオンをやってみる @@ -328,4 +329,4 @@ spec: * [Container lifecycle hooks](/docs/concepts/containers/container-lifecycle-hooks/)についてもっと学ぶ -{{% /capture %}} + diff --git a/content/ja/docs/concepts/workloads/pods/pod-overview.md b/content/ja/docs/concepts/workloads/pods/pod-overview.md index 44388337c7..ad7d531cbd 100644 --- a/content/ja/docs/concepts/workloads/pods/pod-overview.md +++ b/content/ja/docs/concepts/workloads/pods/pod-overview.md @@ -1,18 +1,18 @@ --- title: Podについての概観(Pod Overview) -content_template: templates/concept +content_type: concept weight: 10 card: name: concepts weight: 60 --- -{{% capture overview %}} + このページでは、Kubernetesのオブジェクトモデルにおいて、デプロイ可能な最小単位のオブジェクトである`Pod`に関して概観します。 -{{% /capture %}} -{{% capture body %}} + + ## Podについて理解する *Pod* は、Kubernetesアプリケーションの基本的な実行単位です。これは、作成またはデプロイするKubernetesオブジェクトモデルの中で最小かつ最も単純な単位です。Podは、{{< glossary_tooltip term_id="cluster" >}}で実行されているプロセスを表します。 @@ -108,11 +108,12 @@ spec: 全てのレプリカの現在の理想的な状態を指定するというよりも、Podテンプレートはクッキーの抜き型のようなものです。一度クッキーがカットされると、そのクッキーは抜き型から離れて関係が無くなります。そこにはいわゆる”量子もつれ”といったものはありません。テンプレートに対するその後の変更や新しいテンプレートへの切り替えは、すでに作成されたPod上には直接的な影響はありません。 同様に、ReplicationControllerによって作成されたPodは、変更後に直接更新されます。これはPodとの意図的な違いとなり、そのPodに属する全てのコンテナの現在の理想的な状態を指定します。このアプローチは根本的にシステムのセマンティクスを単純化し、機能の柔軟性を高めます。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Pod](/ja/docs/concepts/workloads/pods/pod/)について更に学びましょう * Podの振る舞いに関して学ぶには下記を参照してください * [Podの停止](/ja/docs/concepts/workloads/pods/pod/#podの終了) * [Podのライフサイクル](/ja/docs/concepts/workloads/pods/pod-lifecycle/) -{{% /capture %}} + diff --git a/content/ja/docs/concepts/workloads/pods/pod.md b/content/ja/docs/concepts/workloads/pods/pod.md index 48be657bc8..22b214b06f 100644 --- a/content/ja/docs/concepts/workloads/pods/pod.md +++ b/content/ja/docs/concepts/workloads/pods/pod.md @@ -1,18 +1,18 @@ --- reviewers: title: Pod -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + _Pod_ は、Kubernetesで作成および管理できる、デプロイ可能な最小のコンピューティング単位です。 -{{% /capture %}} -{{% capture body %}} + + ## Podとは @@ -187,4 +187,4 @@ spec.containers[0].securityContext.privileged: forbidden '<*>(0xc20b222db0)true' PodはKubernetes REST APIのトップレベルのリソースです。 APIオブジェクトの詳細については、[Pod APIオブジェクト](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core)を参照してください 。 -{{% /capture %}} + diff --git a/content/ja/docs/concepts/workloads/pods/podpreset.md b/content/ja/docs/concepts/workloads/pods/podpreset.md index 7638d63acb..89b7865e99 100644 --- a/content/ja/docs/concepts/workloads/pods/podpreset.md +++ b/content/ja/docs/concepts/workloads/pods/podpreset.md @@ -1,16 +1,16 @@ --- reviewers: title: Pod Preset -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + このページではPodPresetについて概観します。PodPresetは、Podの作成時にそのPodに対して、Secret、Volume、VolumeMountや環境変数など、特定の情報を注入するためのオブジェクトです。 -{{% /capture %}} -{{% capture body %}} + + ## PodPresetを理解する `PodPreset`はPodの作成時に追加のランタイム要求を注入するためのAPIリソースです。 @@ -51,10 +51,11 @@ PodPresetによるPodの変更を受け付けたくないようなインスタ 1. `PodPreset`に対する管理コントローラーを有効にします。これを行うための1つの方法として、API Serverの`--enable-admission-plugins`オプションの値に`PodPreset`を含む方法があります。Minikubeにおいては、クラスターの起動時に`--extra-config=apiserver.enable-admission-plugins=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,PodPreset`を追加することで可能になります。 1. ユーザーが使う予定のNamespaceにおいて、`PodPreset`オブジェクトを作成することによりPodPresetを定義します。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [PodPresetを使ったPodへのデータの注入](/docs/tasks/inject-data-application/podpreset/) -{{% /capture %}} + diff --git a/content/ja/docs/contribute/_index.md b/content/ja/docs/contribute/_index.md index 2f6f64fac2..fbbac548d4 100644 --- a/content/ja/docs/contribute/_index.md +++ b/content/ja/docs/contribute/_index.md @@ -1,19 +1,19 @@ --- -content_template: templates/concept +content_type: concept title: Kubernetesのドキュメントに貢献する linktitle: 貢献 main_menu: true weight: 80 --- -{{% capture overview %}} + ドキュメントやウェブサイトに貢献したい方、ご協力お待ちしています。 はじめての方、久しぶりの方、開発者でもエンドユーザでも、はたまたタイポを見逃せない方でもどなたでも貢献可能です。 ドキュメントのスタイルガイドについては[こちら](/docs/contribute/style/style-guide/)。 -{{% capture body %}} + ## コントリビューターの種類 @@ -60,4 +60,4 @@ weight: 80 - TwitterやStack Overflowといったオンラインフォーラムを通してKubernetesコミュニティに貢献したい方、または各地のミートアップやイベントについて知りたい方は[Kubernetes community site](/community/)へ。 - 機能開発に貢献したい方は、まずはじめに[Kubernetesコントリビューターチートシート](https://github.com/kubernetes/community/blob/master/contributors/guide/contributor-cheatsheet/README-ja.md)を読んでください。 -{{% /capture %}} + diff --git a/content/ja/docs/home/supported-doc-versions.md b/content/ja/docs/home/supported-doc-versions.md index d15db3875b..a4c9ac18ce 100644 --- a/content/ja/docs/home/supported-doc-versions.md +++ b/content/ja/docs/home/supported-doc-versions.md @@ -1,19 +1,19 @@ --- title: Kubernetesドキュメントがサポートしているバージョン -content_template: templates/concept +content_type: concept card: name: about weight: 10 title: ドキュメントがサポートしているバージョン --- -{{% capture overview %}} + 本ウェブサイトでは、現行版とその直前4バージョンのKubernetesドキュメントを含んでいます。 -{{% /capture %}} -{{% capture body %}} + + ## 現行版 @@ -24,6 +24,6 @@ card: {{< versions-other >}} -{{% /capture %}} + diff --git a/content/ja/docs/reference/_index.md b/content/ja/docs/reference/_index.md index 7cbe46514b..d6a8dfc828 100644 --- a/content/ja/docs/reference/_index.md +++ b/content/ja/docs/reference/_index.md @@ -3,16 +3,16 @@ title: リファレンス linkTitle: "リファレンス" main_menu: true weight: 70 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + 本セクションには、Kubernetesのドキュメントのリファレンスが含まれています。 -{{% /capture %}} -{{% capture body %}} + + ## APIリファレンス @@ -52,4 +52,4 @@ content_template: templates/concept Kubernetesの機能に関する設計ドキュメントのアーカイブです。[Kubernetesアーキテクチャ](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md) と[Kubernetesデザイン概要](https://git.k8s.io/community/contributors/design-proposals)から読み始めると良いでしょう。 -{{% /capture %}} + diff --git a/content/ja/docs/reference/command-line-tools-reference/feature-gates.md b/content/ja/docs/reference/command-line-tools-reference/feature-gates.md index 582d432e94..9500ad674c 100644 --- a/content/ja/docs/reference/command-line-tools-reference/feature-gates.md +++ b/content/ja/docs/reference/command-line-tools-reference/feature-gates.md @@ -1,16 +1,16 @@ --- title: フィーチャーゲート weight: 10 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + このページでは管理者がそれぞれのKubernetesコンポーネントで指定できるさまざまなフィーチャーゲートの概要について説明しています。 各機能におけるステージの説明については、[機能のステージ](#feature-stages)を参照してください。 -{{% /capture %}} -{{% capture body %}} + + ## 概要 フィーチャーゲートはアルファ機能または実験的機能を記述するkey=valueのペアのセットです。管理者は各コンポーネントで`--feature-gates`コマンドラインフラグを使用することで機能をオンまたはオフにできます。 @@ -398,7 +398,8 @@ GAになってからさらなる変更を加えることは現実的ではない - `WinDSR`: kube-proxyがWindows用のDSRロードバランサーを作成できるようにします。 - `WinOverlay`: kube-proxyをWindowsのオーバーレイモードで実行できるようにします。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Kubernetesの[非推奨ポリシー](/docs/reference/using-api/deprecation-policy/)では、機能とコンポーネントを削除するためのプロジェクトのアプローチを説明しています。 -{{% /capture %}} + diff --git a/content/ja/docs/reference/kubectl/cheatsheet.md b/content/ja/docs/reference/kubectl/cheatsheet.md index 9380b50c07..92c6c475c7 100644 --- a/content/ja/docs/reference/kubectl/cheatsheet.md +++ b/content/ja/docs/reference/kubectl/cheatsheet.md @@ -1,20 +1,20 @@ --- title: kubectlチートシート -content_template: templates/concept +content_type: concept card: name: reference weight: 30 --- -{{% capture overview %}} + [Kubectl概要](/docs/reference/kubectl/overview/)と[JsonPathガイド](/docs/reference/kubectl/jsonpath)も合わせてご覧ください。 このページは`kubectl`コマンドの概要です。 -{{% /capture %}} -{{% capture body %}} + + # kubectl - チートシート @@ -369,9 +369,10 @@ kubectlのログレベルは、レベルを表す整数が後に続く`-v`また `--v=8` | HTTPリクエストのコンテンツを表示します `--v=9` | HTTPリクエストのコンテンツをtruncationなしで表示します -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * kubectlについてより深く学びたい方は[kubectl概要](/docs/reference/kubectl/overview/)をご覧ください。 @@ -381,4 +382,4 @@ kubectlのログレベルは、レベルを表す整数が後に続く`-v`また * コミュニティ版[kubectlチートシート](https://github.com/dennyzhang/cheatsheet-kubernetes-A4)もご覧ください。 -{{% /capture %}} + diff --git a/content/ja/docs/setup/_index.md b/content/ja/docs/setup/_index.md index 0508e24afa..15592cbb05 100644 --- a/content/ja/docs/setup/_index.md +++ b/content/ja/docs/setup/_index.md @@ -3,7 +3,7 @@ no_issue: true title: はじめに main_menu: true weight: 20 -content_template: templates/concept +content_type: concept card: name: setup weight: 20 @@ -14,7 +14,7 @@ card: title: 本番環境 --- -{{% capture overview %}} + このセクションではKubernetesをセットアップして動かすための複数のやり方について説明します。 @@ -24,9 +24,9 @@ Kubernetesクラスタはローカルマシン、クラウド、オンプレの 簡潔に言えば、学習用としても、本番環境用としてもKubernetesクラスターを作成することができます。 -{{% /capture %}} -{{% capture body %}} + + ## 環境について学ぶ @@ -110,4 +110,4 @@ Kubernetesクラスタにおける抽象レイヤには {{< glossary_tooltip tex | [VMware](https://cloud.vmware.com/) | [VMware Cloud PKS](https://cloud.vmware.com/vmware-cloud-pks) |[VMware Enterprise PKS](https://cloud.vmware.com/vmware-enterprise-pks) | [VMware Enterprise PKS](https://cloud.vmware.com/vmware-enterprise-pks) | [VMware Essential PKS](https://cloud.vmware.com/vmware-essential-pks) | |[VMware Essential PKS](https://cloud.vmware.com/vmware-essential-pks) | [Z.A.R.V.I.S.](https://zarvis.ai/) | ✔ | | | | | | -{{% /capture %}} + diff --git a/content/ja/docs/setup/best-practices/certificates.md b/content/ja/docs/setup/best-practices/certificates.md index e28f82311b..9f315a9bce 100644 --- a/content/ja/docs/setup/best-practices/certificates.md +++ b/content/ja/docs/setup/best-practices/certificates.md @@ -1,19 +1,19 @@ --- title: PKI証明書とその要件 -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + Kubernetes requires PKI certificates for authentication over TLS. If you install Kubernetes with [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/), the certificates that your cluster requires are automatically generated. You can also generate your own certificates -- for example, to keep your private keys more secure by not storing them on the API server. This page explains the certificates that your cluster requires. -{{% /capture %}} -{{% capture body %}} + + ## クラスタではどのように証明書が使われているのか @@ -140,4 +140,4 @@ These files are used as follows: [kubeadm]: /docs/reference/setup-tools/kubeadm/kubeadm/ [proxy]: /docs/tasks/access-kubernetes-api/configure-aggregation-layer/ -{{% /capture %}} + diff --git a/content/ja/docs/setup/best-practices/multiple-zones.md b/content/ja/docs/setup/best-practices/multiple-zones.md index 64e28a2762..ded3cad434 100644 --- a/content/ja/docs/setup/best-practices/multiple-zones.md +++ b/content/ja/docs/setup/best-practices/multiple-zones.md @@ -1,16 +1,16 @@ --- title: 複数のゾーンで動かす weight: 10 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + This page describes how to run a cluster in multiple zones. -{{% /capture %}} -{{% capture body %}} + + ## 始めに @@ -397,4 +397,4 @@ KUBERNETES_PROVIDER=aws KUBE_USE_EXISTING_MASTER=true KUBE_AWS_ZONE=us-west-2b k KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2a kubernetes/cluster/kube-down.sh ``` -{{% /capture %}} + diff --git a/content/ja/docs/setup/learning-environment/minikube.md b/content/ja/docs/setup/learning-environment/minikube.md index c626ae23ed..aeac7747d9 100644 --- a/content/ja/docs/setup/learning-environment/minikube.md +++ b/content/ja/docs/setup/learning-environment/minikube.md @@ -1,15 +1,15 @@ --- title: Minikubeを使用してローカル環境でKubernetesを動かす -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Minikubeはローカル環境でKubernetesを簡単に実行するためのツールです。Kubernetesを試したり日々の開発への使用を検討するユーザー向けに、PC上のVM内でシングルノードのKubernetesクラスタを実行することができます。 -{{% /capture %}} -{{% capture body %}} + + ## Minikubeの機能 @@ -441,4 +441,4 @@ Minikubeの詳細については、[proposal](https://git.k8s.io/community/contr コントリビューションや質問、コメントは歓迎・奨励されています! Minikubeの開発者は[Slack](https://kubernetes.slack.com)の#minikubeチャンネルにいます(Slackへの招待状は[こちら](http://slack.kubernetes.io/))。[kubernetes-dev Google Groupsメーリングリスト](https://groups.google.com/forum/#!forum/kubernetes-dev)もあります。メーリングリストに投稿する際は件名の最初に "minikube: " をつけてください。 -{{% /capture %}} + diff --git a/content/ja/docs/setup/production-environment/container-runtimes.md b/content/ja/docs/setup/production-environment/container-runtimes.md index 35796a43aa..a9604a7b59 100644 --- a/content/ja/docs/setup/production-environment/container-runtimes.md +++ b/content/ja/docs/setup/production-environment/container-runtimes.md @@ -1,16 +1,16 @@ --- title: CRIのインストール -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.6" state="stable" >}} Podのコンテナを実行するために、Kubernetesはコンテナランタイムを使用します。 様々なランタイムのインストール手順は次のとおりです。 -{{% /capture %}} -{{% capture body %}} + + {{< caution >}} @@ -253,4 +253,4 @@ systemctl start containerd 詳細については[Fraktiのクイックスタートガイド](https://github.com/kubernetes/frakti#quickstart)を参照してください。 -{{% /capture %}} + diff --git a/content/ja/docs/setup/production-environment/on-premises-vm/cloudstack.md b/content/ja/docs/setup/production-environment/on-premises-vm/cloudstack.md index 5b6bd9b3eb..1177bcdd94 100644 --- a/content/ja/docs/setup/production-environment/on-premises-vm/cloudstack.md +++ b/content/ja/docs/setup/production-environment/on-premises-vm/cloudstack.md @@ -1,9 +1,9 @@ --- title: Cloudstack -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + [CloudStack](https://cloudstack.apache.org/) is a software to build public and private clouds based on hardware virtualization principles (traditional IaaS). To deploy Kubernetes on CloudStack there are several possibilities depending on the Cloud being used and what images are made available. CloudStack also has a vagrant plugin available, hence Vagrant could be used to deploy Kubernetes either using the existing shell provisioner or using new Salt based recipes. @@ -11,9 +11,9 @@ content_template: templates/concept This guide uses a single [Ansible playbook](https://github.com/apachecloudstack/k8s), which is completely automated and can deploy Kubernetes on a CloudStack based Cloud using CoreOS images. The playbook, creates an ssh key pair, creates a security group and associated rules and finally starts coreOS instances configured via cloud-init. -{{% /capture %}} -{{% capture body %}} + + ## 前提条件 @@ -115,4 +115,4 @@ IaaS Provider | Config. Mgmt | OS | Networking | Docs -------------------- | ------------ | ------ | ---------- | --------------------------------------------- | ---------| ---------------------------- CloudStack | Ansible | CoreOS | flannel | [docs](/docs/setup/production-environment/on-premises-vm/cloudstack/) | | Community ([@Guiques](https://github.com/ltupin/)) -{{% /capture %}} + diff --git a/content/ja/docs/setup/production-environment/on-premises-vm/dcos.md b/content/ja/docs/setup/production-environment/on-premises-vm/dcos.md index 52e6a8b6c9..a41309d23b 100644 --- a/content/ja/docs/setup/production-environment/on-premises-vm/dcos.md +++ b/content/ja/docs/setup/production-environment/on-premises-vm/dcos.md @@ -1,9 +1,9 @@ --- title: DC/OS上のKubernetes -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Mesosphereは[DC/OS](https://mesosphere.com/product/)上にKubernetesを構築する為の簡単な選択肢を提供します。それは @@ -14,12 +14,12 @@ Mesosphereは[DC/OS](https://mesosphere.com/product/)上にKubernetesを構築 です。 -{{% /capture %}} -{{% capture body %}} + + ## 公式Mesosphereガイド DC/OS入門の正規のソースは[クイックスタートリポジトリ](https://github.com/mesosphere/dcos-kubernetes-quickstart)にあります。 -{{% /capture %}} + diff --git a/content/ja/docs/setup/production-environment/on-premises-vm/ovirt.md b/content/ja/docs/setup/production-environment/on-premises-vm/ovirt.md index 9f0c9356f0..167c55a244 100644 --- a/content/ja/docs/setup/production-environment/on-premises-vm/ovirt.md +++ b/content/ja/docs/setup/production-environment/on-premises-vm/ovirt.md @@ -1,15 +1,15 @@ --- title: oVirt -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + oVirt is a virtual datacenter manager that delivers powerful management of multiple virtual machines on multiple hosts. Using KVM and libvirt, oVirt can be installed on Fedora, CentOS, or Red Hat Enterprise Linux hosts to set up and manage your virtual data center. -{{% /capture %}} -{{% capture body %}} + + ## oVirtクラウドプロバイダーによる構築 @@ -65,4 +65,4 @@ IaaS Provider | Config. Mgmt | OS | Networking | Docs -------------------- | ------------ | ------ | ---------- | --------------------------------------------- | ---------| ---------------------------- oVirt | | | | [docs](/docs/setup/production-environment/on-premises-vm/ovirt/) | | Community ([@simon3z](https://github.com/simon3z)) -{{% /capture %}} + diff --git a/content/ja/docs/setup/production-environment/tools/kops.md b/content/ja/docs/setup/production-environment/tools/kops.md index ba2914f966..e0203ca097 100644 --- a/content/ja/docs/setup/production-environment/tools/kops.md +++ b/content/ja/docs/setup/production-environment/tools/kops.md @@ -1,10 +1,10 @@ --- title: kopsを使ったAWS上でのKubernetesのインストール -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + This quickstart shows you how to easily install a Kubernetes cluster on AWS. It uses a tool called [`kops`](https://github.com/kubernetes/kops). @@ -21,9 +21,9 @@ kops is an opinionated provisioning system: If your opinions differ from these you may prefer to build your own cluster using [kubeadm](/docs/admin/kubeadm/) as a building block. kops builds on the kubeadm work. -{{% /capture %}} -{{% capture body %}} + + ## クラスタの作成 @@ -224,12 +224,13 @@ See the [list of add-ons](/docs/concepts/cluster-administration/addons/) to expl * Slack Channel: [#kops-users](https://kubernetes.slack.com/messages/kops-users/) * [GitHub Issues](https://github.com/kubernetes/kops/issues) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Learn more about Kubernetes [concepts](/docs/concepts/) and [`kubectl`](/docs/user-guide/kubectl-overview/). * Learn about `kops` [advanced usage](https://github.com/kubernetes/kops) * See the `kops` [docs](https://github.com/kubernetes/kops) section for tutorials, best practices and advanced configuration options. -{{% /capture %}} + diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md b/content/ja/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md index 5393e15b91..b4ff9024f6 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/control-plane-flags.md @@ -1,10 +1,10 @@ --- title: kubeadmを使ったコントロールプレーンの設定のカスタマイズ -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="1.12" state="stable" >}} @@ -27,9 +27,9 @@ kubeadmの`ClusterConfiguration`オブジェクトはAPIServer、ControllerManag `kubeadm config print init-defaults`を実行し、選択したファイルに出力を保存することで、デフォルト値で`ClusterConfiguration`オブジェクトを生成できます。 {{< /note >}} -{{% /capture %}} -{{% capture body %}} + + ## APIServerフラグ @@ -80,4 +80,4 @@ scheduler: kubeconfig: /home/johndoe/kubeconfig.yaml ``` -{{% /capture %}} + diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md b/content/ja/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md index a79e3367d1..f6dd643942 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md @@ -1,10 +1,10 @@ --- title: kubeadmを使用したシングルコントロールプレーンクラスターの作成 -content_template: templates/task +content_type: task weight: 30 --- -{{% capture overview %}} + **kubeadm** helps you bootstrap a minimum viable Kubernetes cluster that conforms to best practices. With kubeadm, your cluster should pass [Kubernetes Conformance tests](https://kubernetes.io/blog/2017/10/software-conformance-certification). Kubeadm also supports other cluster lifecycle functions, such as upgrades, downgrade, and managing [bootstrap tokens](/ja/docs/reference/access-authn-authz/bootstrap-tokens/). @@ -53,9 +53,10 @@ timeframe; which also applies to `kubeadm`. | v1.15.x | June 2019 | March 2020 | | v1.16.x | September 2019 | June 2020 | -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + - One or more machines running a deb/rpm-compatible OS, for example Ubuntu or CentOS - 2 GB or more of RAM per machine. Any less leaves little room for your @@ -64,9 +65,9 @@ timeframe; which also applies to `kubeadm`. - Full network connectivity among all machines in the cluster. A public or private network is fine. -{{% /capture %}} -{{% capture steps %}} + + ## 目的 diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/ha-topology.md b/content/ja/docs/setup/production-environment/tools/kubeadm/ha-topology.md index 429a37f440..ac094e4a92 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/ha-topology.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/ha-topology.md @@ -1,10 +1,10 @@ --- title: Options for Highly Available topology -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + This page explains the two options for configuring the topology of your highly available (HA) Kubernetes clusters. @@ -15,9 +15,9 @@ You can set up an HA cluster: You should carefully consider the advantages and disadvantages of each topology before setting up an HA cluster. -{{% /capture %}} -{{% capture body %}} + + ## Stacked etcd topology @@ -60,10 +60,11 @@ A minimum of three hosts for control plane nodes and three hosts for etcd nodes ![External etcd topology](/images/kubeadm/kubeadm-ha-topology-external-etcd.svg) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - [Set up a highly available cluster with kubeadm](/ja/docs/setup/production-environment/tools/kubeadm/high-availability/) -{{% /capture %}} + diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/high-availability.md b/content/ja/docs/setup/production-environment/tools/kubeadm/high-availability.md index c74e4b806c..b9e82a7838 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/high-availability.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/high-availability.md @@ -1,10 +1,10 @@ --- title: kubeadmを使用した高可用性クラスターの作成 -content_template: templates/task +content_type: task weight: 60 --- -{{% capture overview %}} + このページでは、kubeadmを使用して、高可用性クラスターを作成する、2つの異なるアプローチを説明します: @@ -23,9 +23,10 @@ alpha feature gateである`HighAvailability`はv1.12で非推奨となり、v1. このページはクラウド上でクラスターを構築することには対応していません。ここで説明されているどちらのアプローチも、クラウド上で、LoadBalancerタイプのServiceオブジェクトや、動的なPersistentVolumeを利用して動かすことはできません。 {{< /caution >}} -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + どちらの方法でも、以下のインフラストラクチャーが必要です: @@ -44,9 +45,9 @@ alpha feature gateである`HighAvailability`はv1.12で非推奨となり、v1. 以下の例では、CalicoをPodネットワーキングプロバイダーとして使用します。別のネットワーキングプロバイダーを使用する場合、必要に応じてデフォルトの値を変更してください。 {{< /note >}} -{{% /capture %}} -{{% capture steps %}} + + ## 両手順における最初のステップ @@ -299,4 +300,4 @@ Podネットワークをインストールするには、[こちらの手順に `kubeadm init`コマンドから返されたコマンドを利用して、workerノードをクラスターに参加させることが可能です。workerノードには、`--experimental-control-plane`フラグを追加する必要はありません。 -{{% /capture %}} + diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/ja/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index 426ca84b25..07d23909cd 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -1,6 +1,6 @@ --- title: kubeadmのインストール -content_template: templates/task +content_type: task weight: 20 card: name: setup @@ -8,14 +8,15 @@ card: title: kubeadmセットアップツールのインストール --- -{{% capture overview %}} + このページでは`kubeadm`コマンドをインストールする方法を示します。このインストール処理実行後にkubeadmを使用してクラスターを作成する方法については、[kubeadmを使用したシングルマスタークラスターの作成](/ja/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/)を参照してください。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * 次のいずれかが動作しているマシンが必要です - Ubuntu 16.04+ @@ -32,9 +33,9 @@ card: * マシン内の特定のポートが開いていること。詳細は[ここ](#必須ポートの確認)を参照してください。 * Swapがオフであること。kubeletが正常に動作するためにはswapは**必ず**オフでなければなりません。 -{{% /capture %}} -{{% capture steps %}} + + ## MACアドレスとproduct_uuidが全てのノードでユニークであることの検証 @@ -269,8 +270,9 @@ CRI-Oやcontainerdといった他のコンテナランタイムのcgroup driver kubeadmで問題が発生した場合は、[トラブルシューティング](/ja/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/)を参照してください。 -{{% capture whatsnext %}} +## {{% heading "whatsnext" %}} + * [kubeadmを使用したシングルコントロールプレーンクラスターの作成](/ja/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) -{{% /capture %}} + diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md b/content/ja/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md index b53e0462b9..edf95ce712 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md @@ -1,10 +1,10 @@ --- title: kubeadmを使用したクラスター内の各kubeletの設定 -content_template: templates/concept +content_type: concept weight: 80 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="1.11" state="stable" >}} @@ -24,9 +24,9 @@ characteristics of a given machine, such as OS, storage, and networking. You can of your kubelets manually, but [kubeadm now provides a `KubeletConfiguration` API type for managing your kubelet configurations centrally](#configure-kubelets-using-kubeadm). -{{% /capture %}} -{{% capture body %}} + + ## Kubeletの設定パターン @@ -197,4 +197,4 @@ The DEB and RPM packages shipped with the Kubernetes releases are: | `kubernetes-cni` | Installs the official CNI binaries into the `/opt/cni/bin` directory. | | `cri-tools` | Installs the `/usr/bin/crictl` binary from the [cri-tools git repository](https://github.com/kubernetes-incubator/cri-tools). | -{{% /capture %}} + diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/self-hosting.md b/content/ja/docs/setup/production-environment/tools/kubeadm/self-hosting.md index da61fca3f9..08f9efe0b8 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/self-hosting.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/self-hosting.md @@ -1,10 +1,10 @@ --- title: Configuring your kubernetes cluster to self-host the control plane -content_template: templates/concept +content_type: concept weight: 100 --- -{{% capture overview %}} + ### Self-hosting the Kubernetes control plane {#self-hosting} @@ -17,9 +17,9 @@ configured in the kubelet via static files. To create a self-hosted cluster see the [kubeadm alpha selfhosting pivot](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/#cmd-selfhosting) command. -{{% /capture %}} -{{% capture body %}} + + #### Caveats @@ -65,4 +65,4 @@ In summary, `kubeadm alpha selfhosting` works as follows: 1. When the original static control plane stops, the new self-hosted control plane is able to bind to listening ports and become active. -{{% /capture %}} + diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md b/content/ja/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md index c0283901b2..90725de1d4 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md @@ -1,10 +1,10 @@ --- title: kubeadmを使用した高可用性etcdクラスターの作成 -content_template: templates/task +content_type: task weight: 70 --- -{{% capture overview %}} + Kubeadm defaults to running a single member etcd cluster in a static pod managed by the kubelet on the control plane node. This is not a high availability setup @@ -13,9 +13,10 @@ becoming unavailable. This task walks through the process of creating a high availability etcd cluster of three members that can be used as an external etcd when using kubeadm to set up a kubernetes cluster. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * Three hosts that can talk to each other over ports 2379 and 2380. This document assumes these default ports. However, they are configurable through @@ -26,9 +27,9 @@ when using kubeadm to set up a kubernetes cluster. [toolbox]: /docs/setup/production-environment/tools/kubeadm/install-kubeadm/ -{{% /capture %}} -{{% capture steps %}} + + ## クラスターの構築 @@ -251,14 +252,15 @@ this example. - Set `${ETCD_TAG}` to the version tag of your etcd image. For example `v3.2.24`. - Set `${HOST0}`to the IP address of the host you are testing. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Once you have a working 3 member etcd cluster, you can continue setting up a highly available control plane using the [external etcd method with kubeadm](/ja/docs/setup/production-environment/tools/kubeadm/high-availability/). -{{% /capture %}} + diff --git a/content/ja/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md b/content/ja/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md index 0021ac6cee..669cc3a302 100644 --- a/content/ja/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md +++ b/content/ja/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md @@ -1,10 +1,10 @@ --- title: kubeadmのトラブルシューティング -content_template: templates/concept +content_type: concept weight: 90 --- -{{% capture overview %}} + As with any program, you might run into an error installing or running kubeadm. This page lists some common failure scenarios and have provided steps that can help you understand and fix the problem. @@ -18,9 +18,9 @@ If your problem is not listed below, please follow the following steps: - If you are unsure about how kubeadm works, you can ask on [Slack](http://slack.k8s.io/) in #kubeadm, or open a question on [StackOverflow](https://stackoverflow.com/questions/tagged/kubernetes). Please include relevant tags like `#kubernetes` and `#kubeadm` so folks can help you. -{{% /capture %}} -{{% capture body %}} + + ## インストール中に`ebtables`もしくは他の似たような実行プログラムが見つからない @@ -318,4 +318,4 @@ There are at least two workarounds: ```bash kubectl taint nodes NODE_NAME role.kubernetes.io/master:NoSchedule- ``` -{{% /capture %}} + diff --git a/content/ja/docs/setup/production-environment/tools/kubespray.md b/content/ja/docs/setup/production-environment/tools/kubespray.md index 624887bd44..921ab0e3d8 100644 --- a/content/ja/docs/setup/production-environment/tools/kubespray.md +++ b/content/ja/docs/setup/production-environment/tools/kubespray.md @@ -1,10 +1,10 @@ --- title: kubesprayを使ったオンプレミス/クラウドプロバイダへのKubernetesのインストール -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + This quickstart helps to install a Kubernetes cluster hosted on GCE, Azure, OpenStack, AWS, vSphere, Oracle Cloud Infrastructure (Experimental) or Baremetal with [Kubespray](https://github.com/kubernetes-incubator/kubespray). @@ -23,9 +23,9 @@ Kubespray is a composition of [Ansible](http://docs.ansible.com/) playbooks, [in To choose a tool which best fits your use case, read [this comparison](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/comparisons.md) to [kubeadm](/docs/admin/kubeadm/) and [kops](../kops). -{{% /capture %}} -{{% capture body %}} + + ## クラスタの作成 @@ -112,10 +112,11 @@ When running the reset playbook, be sure not to accidentally target your product * Slack Channel: [#kubespray](https://kubernetes.slack.com/messages/kubespray/) * [GitHub Issues](https://github.com/kubernetes-incubator/kubespray/issues) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + Check out planned work on Kubespray's [roadmap](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/roadmap.md). -{{% /capture %}} + diff --git a/content/ja/docs/setup/production-environment/turnkey/aws.md b/content/ja/docs/setup/production-environment/turnkey/aws.md index 5367103984..728a6f8ccc 100644 --- a/content/ja/docs/setup/production-environment/turnkey/aws.md +++ b/content/ja/docs/setup/production-environment/turnkey/aws.md @@ -1,15 +1,16 @@ --- title: AWS EC2上でKubernetesを動かす -content_template: templates/task +content_type: task --- -{{% capture overview %}} + このページでは、AWS上でKubernetesクラスターをインストールする方法について説明します。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + AWS上でKubernetesクラスターを作成するには、AWSからアクセスキーIDおよびシークレットアクセスキーを入手する必要があります。 @@ -25,9 +26,9 @@ AWS上でKubernetesクラスターを作成するには、AWSからアクセス * [KubeOne](https://github.com/kubermatic/kubeone)は可用性の高いKubernetesクラスターを作成、アップグレード、管理するための、オープンソースのライフサイクル管理ツールです。 -{{% /capture %}} -{{% capture steps %}} + + ## クラスターの始まり @@ -84,4 +85,4 @@ AWS | KubeOne | Ubuntu, CoreOS, CentOS | canal, weave Kubernetesクラスターの利用と管理に関する詳細は、[Kubernetesドキュメント](/ja/docs/)を参照してください。 -{{% /capture %}} + diff --git a/content/ja/docs/setup/production-environment/turnkey/gce.md b/content/ja/docs/setup/production-environment/turnkey/gce.md index a0d590fd57..b00d34ade6 100644 --- a/content/ja/docs/setup/production-environment/turnkey/gce.md +++ b/content/ja/docs/setup/production-environment/turnkey/gce.md @@ -1,15 +1,16 @@ --- title: Google Compute Engine上でKubernetesを動かす -content_template: templates/task +content_type: task --- -{{% capture overview %}} + The example below creates a Kubernetes cluster with 3 worker node Virtual Machines and a master Virtual Machine (i.e. 4 VMs in your cluster). This cluster is set up and controlled from your workstation (or wherever you find convenient). -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + If you want a simplified getting started experience and GUI for managing clusters, please consider trying [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/) for hosted cluster installation and management. @@ -31,9 +32,9 @@ If you want to use custom binaries or pure open source Kubernetes, please contin 1. Make sure you can start up a GCE VM from the command line. At least make sure you can do the [Create an instance](https://cloud.google.com/compute/docs/instances/#startinstancegcloud) part of the GCE Quickstart. 1. Make sure you can SSH into the VM without interactive prompts. See the [Log in to the instance](https://cloud.google.com/compute/docs/instances/#sshing) part of the GCE Quickstart. -{{% /capture %}} -{{% capture steps %}} + + ## クラスターの起動 @@ -220,4 +221,4 @@ GCE | Saltstack | Debian | GCE | [docs](/ja/docs/set Please see the [Kubernetes docs](/ja/docs/) for more details on administering and using a Kubernetes cluster. -{{% /capture %}} + diff --git a/content/ja/docs/setup/production-environment/turnkey/stackpoint.md b/content/ja/docs/setup/production-environment/turnkey/stackpoint.md index 8a86f13866..47711bf4d8 100644 --- a/content/ja/docs/setup/production-environment/turnkey/stackpoint.md +++ b/content/ja/docs/setup/production-environment/turnkey/stackpoint.md @@ -1,15 +1,15 @@ --- title: Stackpoint.ioを利用して複数のクラウド上でKubernetesを動かす -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + [StackPointCloud](https://stackpoint.io/) is the universal control plane for Kubernetes Anywhere. StackPointCloud allows you to deploy and manage a Kubernetes cluster to the cloud provider of your choice in 3 steps using a web-based interface. -{{% /capture %}} -{{% capture body %}} + + ## AWS @@ -184,4 +184,4 @@ To create a Kubernetes cluster on Packet, you will need a Packet API Key. For information on using and managing a Kubernetes cluster on Packet, consult [the official documentation](/ja/docs/home/). -{{% /capture %}} + diff --git a/content/ja/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md b/content/ja/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md index c2406c7cc9..ab0181fd49 100644 --- a/content/ja/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md +++ b/content/ja/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md @@ -1,16 +1,16 @@ --- title: Intro to Windows support in Kubernetes -content_template: templates/concept +content_type: concept weight: 65 --- -{{% capture overview %}} + Windows applications constitute a large portion of the services and applications that run in many organizations. [Windows containers](https://aka.ms/windowscontainers) provide a modern way to encapsulate processes and package dependencies, making it easier to use DevOps practices and follow cloud native patterns for Windows applications. Kubernetes has become the defacto standard container orchestrator, and the release of Kubernetes 1.14 includes production support for scheduling Windows containers on Windows nodes in a Kubernetes cluster, enabling a vast ecosystem of Windows applications to leverage the power of Kubernetes. Organizations with investments in Windows-based applications and Linux-based applications don't have to look for separate orchestrators to manage their workloads, leading to increased operational efficiencies across their deployments, regardless of operating system. -{{% /capture %}} -{{% capture body %}} + + ## Windows containers in Kubernetes @@ -530,9 +530,10 @@ If filing a bug, please include detailed information about how to reproduce the * [Relevant logs](https://github.com/kubernetes/community/blob/master/sig-windows/CONTRIBUTING.md#gathering-logs) * Tag the issue sig/windows by commenting on the issue with `/sig windows` to bring it to a SIG-Windows member's attention -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + We have a lot of features in our roadmap. An abbreviated high level list is included below, but we encourage you to view our [roadmap project](https://github.com/orgs/kubernetes/projects/8) and help us make Windows support better by [contributing](https://github.com/kubernetes/community/blob/master/sig-windows/). @@ -584,4 +585,4 @@ Kubeadm is becoming the de facto standard for users to deploy a Kubernetes clust * More CNIs * More Storage Plugins -{{% /capture %}} + diff --git a/content/ja/docs/setup/production-environment/windows/user-guide-windows-containers.md b/content/ja/docs/setup/production-environment/windows/user-guide-windows-containers.md index 60429629af..44e926eec3 100644 --- a/content/ja/docs/setup/production-environment/windows/user-guide-windows-containers.md +++ b/content/ja/docs/setup/production-environment/windows/user-guide-windows-containers.md @@ -1,16 +1,16 @@ --- title: Guide for scheduling Windows containers in Kubernetes -content_template: templates/concept +content_type: concept weight: 75 --- -{{% capture overview %}} + Windows applications constitute a large portion of the services and applications that run in many organizations. This guide walks you through the steps to configure and deploy a Windows container in Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## Objectives @@ -134,4 +134,4 @@ tolerations: effect: "NoSchedule" ``` -{{% /capture %}} + diff --git a/content/ja/docs/setup/production-environment/windows/user-guide-windows-nodes.md b/content/ja/docs/setup/production-environment/windows/user-guide-windows-nodes.md index da91d2c18f..29035e15d9 100644 --- a/content/ja/docs/setup/production-environment/windows/user-guide-windows-nodes.md +++ b/content/ja/docs/setup/production-environment/windows/user-guide-windows-nodes.md @@ -1,19 +1,19 @@ --- title: Guide for adding Windows Nodes in Kubernetes -content_template: templates/concept +content_type: concept weight: 70 --- -{{% capture overview %}} + The Kubernetes platform can now be used to run both Linux and Windows containers. One or more Windows nodes can be registered to a cluster. This guide shows how to: * Register a Windows node to the cluster * Configure networking so pods on Linux and Windows can communicate -{{% /capture %}} -{{% capture body %}} + + ## Before you begin @@ -261,4 +261,4 @@ Kubeadm is becoming the de facto standard for users to deploy a Kubernetes clust Now that you've configured a Windows worker in your cluster to run Windows containers you may want to add one or more Linux nodes as well to run Linux containers. You are now ready to schedule Windows containers on your cluster. -{{% /capture %}} + diff --git a/content/ja/docs/setup/release/building-from-source.md b/content/ja/docs/setup/release/building-from-source.md index e9fc081a25..21f056ce39 100644 --- a/content/ja/docs/setup/release/building-from-source.md +++ b/content/ja/docs/setup/release/building-from-source.md @@ -1,18 +1,18 @@ --- title: リリースのビルド -content_template: templates/concept +content_type: concept card: name: download weight: 20 title: リリースのビルド --- -{{% capture overview %}} + ソースコードからリリースをビルドすることもできますし、既にビルドされたリリースをダウンロードすることも可能です。Kubernetesを開発する予定が無いのであれば、[リリースノート](/docs/setup/release/notes/)内にて既にビルドされたバージョンを使用することを推奨します。 Kubernetes のソースコードは[kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)のリポジトリからダウンロードすることが可能です。 -{{% /capture %}} -{{% capture body %}} + + ## ソースからのビルド 単にソースからリリースをビルドするだけであれば、完全なGOの環境を準備する必要はなく、全てのビルドはDockerコンテナの中で行われます。 @@ -27,4 +27,4 @@ make release リリース手段の詳細な情報はkubernetes/kubernetes内の[`build`](http://releases.k8s.io/{{< param "githubbranch" >}}/build/)ディレクトリを参照して下さい。 -{{% /capture %}} + diff --git a/content/ja/docs/setup/release/version-skew-policy.md b/content/ja/docs/setup/release/version-skew-policy.md index 4573e740a6..dda92c2597 100644 --- a/content/ja/docs/setup/release/version-skew-policy.md +++ b/content/ja/docs/setup/release/version-skew-policy.md @@ -1,14 +1,14 @@ --- title: Kubernetesバージョンとバージョンスキューサポートポリシー -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + このドキュメントでは、さまざまなKubernetesコンポーネント間でサポートされる最大のバージョンの差異(バージョンスキュー)について説明します。特定のクラスターデプロイツールは、バージョンの差異に追加の制限を加える場合があります。 -{{% /capture %}} -{{% capture body %}} + + ## サポートされるバージョン diff --git a/content/ja/docs/tasks/_index.md b/content/ja/docs/tasks/_index.md index 5ff0023dc0..758500260b 100644 --- a/content/ja/docs/tasks/_index.md +++ b/content/ja/docs/tasks/_index.md @@ -2,19 +2,19 @@ title: タスク main_menu: true weight: 50 -content_template: templates/concept +content_type: concept --- {{< toc >}} -{{% capture overview %}} + Kubernetesドキュメントのこのセクションには、個々のタスクの実行方法を示すページが含まれています。 タスクページは、通常、短い手順を実行することにより、1つのことを行う方法を示します。 -{{% /capture %}} -{{% capture body %}} + + ## Web UI (ダッシュボード) @@ -76,10 +76,11 @@ StatefulSetのスケーリング、削除、デバッグなど、ステートフ クラスター内のスケジュール可能なリソースとしてHuge Pageを構成およびスケジュールします。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + タスクページを作成する場合は、[ドキュメントのPull Requestの作成](/docs/home/contribute/create-pull-request/)を参照してください。 -{{% /capture %}} + diff --git a/content/ja/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume.md b/content/ja/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume.md index d6f2def08b..fa5cc9f976 100644 --- a/content/ja/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume.md +++ b/content/ja/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume.md @@ -1,25 +1,26 @@ --- title: 共有ボリュームを使用して同じPod内のコンテナ間で通信する -content_template: templates/task +content_type: task weight: 110 --- -{{% capture overview %}} + このページでは、ボリュームを使用して、同じPodで実行されている2つのコンテナ間で通信する方法を示します。 コンテナ間で[プロセス名前空間を共有する](/ja/docs/tasks/configure-pod-container/share-process-namespace/)ことにより、プロセスが通信できるようにする方法も参照してください。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## 2つのコンテナを実行するPodの作成 @@ -105,10 +106,10 @@ debianコンテナがnginxルートディレクトリに`index.html`ファイル Hello from the debian container -{{% /capture %}} -{{% capture discussion %}} + + ## 議論 @@ -121,10 +122,11 @@ Podが複数のコンテナを持つことができる主な理由は、プラ この演習のボリュームは、コンテナがポッドの寿命中に通信する方法を提供します。 Podを削除して再作成すると、共有ボリュームに保存されているデータはすべて失われます。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [複合コンテナのパターン](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns)の詳細 @@ -138,7 +140,7 @@ Podを削除して再作成すると、共有ボリュームに保存されて * [Pod](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core)を参照 -{{% /capture %}} + diff --git a/content/ja/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md b/content/ja/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md index fd5784a093..47af3be178 100644 --- a/content/ja/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md +++ b/content/ja/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md @@ -1,6 +1,6 @@ --- title: 複数のクラスターへのアクセスを設定する -content_template: templates/task +content_type: task weight: 30 card: name: tasks @@ -8,7 +8,7 @@ card: --- -{{% capture overview %}} + ここでは、設定ファイルを使って複数のクラスターにアクセスする方法を紹介します。クラスター、ユーザー、contextの情報を一つ以上の設定ファイルにまとめることで、`kubectl config use-context`のコマンドを使ってクラスターを素早く切り替えることができます。 @@ -16,15 +16,16 @@ card: クラスターへのアクセスを設定するファイルを、*kubeconfig* ファイルと呼ぶことがあります。これは設定ファイルの一般的な呼び方です。`kubeconfig`という名前のファイルが存在するわけではありません。 {{< /note >}} -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## クラスター、ユーザー、contextを設定する @@ -325,11 +326,11 @@ Windows PowerShell $Env:KUBECONFIG=$ENV:KUBECONFIG_SAVED ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [kubeconfigファイルを使ってクラスターへのアクセスを管理する](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) * [kubectl config](/docs/reference/generated/kubectl/kubectl-commands#config) -{{% /capture %}} \ No newline at end of file diff --git a/content/ja/docs/tasks/access-application-cluster/connecting-frontend-backend.md b/content/ja/docs/tasks/access-application-cluster/connecting-frontend-backend.md index 9ff0a60455..af02efe7cd 100644 --- a/content/ja/docs/tasks/access-application-cluster/connecting-frontend-backend.md +++ b/content/ja/docs/tasks/access-application-cluster/connecting-frontend-backend.md @@ -1,38 +1,40 @@ --- title: Serviceを使用してフロントエンドをバックエンドに接続する -content_template: templates/tutorial +content_type: tutorial weight: 70 --- -{{% capture overview %}} + このタスクでは、フロントエンドとバックエンドのマイクロサービスを作成する方法を示します。 バックエンドのマイクロサービスは挨拶です。 フロントエンドとバックエンドは、Kubernetes {{< glossary_tooltip term_id="service" >}}オブジェクトを使用して接続されます。 -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * {{< glossary_tooltip term_id="deployment" >}}オブジェクトを使用してマイクロサービスを作成および実行します。 * フロントエンドを経由してトラフィックをバックエンドにルーティングします。 * Serviceオブジェクトを使用して、フロントエンドアプリケーションをバックエンドアプリケーションに接続します。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * このタスクでは[Serviceで外部ロードバランサー](/docs/tasks/access-application-cluster/create-external-load-balancer/)を使用しますが、外部ロードバランサーの使用がサポートされている環境である必要があります。 ご使用の環境がこれをサポートしていない場合は、代わりにタイプ[NodePort](/ja/docs/concepts/services-networking/service/#nodeport)のServiceを使用できます。 -{{% /capture %}} -{{% capture lessoncontent %}} + + ### Deploymentを使用したバックエンドの作成 @@ -184,14 +186,15 @@ curl http://${EXTERNAL_IP} # これを前に見たEXTERNAL-IPに置き換えま {"message":"Hello"} ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Service](/ja/docs/concepts/services-networking/service/)の詳細 * [ConfigMap](/docs/tasks/configure-pod-container/configure-pod-configmap/)の詳細 -{{% /capture %}} + diff --git a/content/ja/docs/tasks/access-application-cluster/service-access-application-cluster.md b/content/ja/docs/tasks/access-application-cluster/service-access-application-cluster.md index 48be31fdb4..004bf8ab37 100644 --- a/content/ja/docs/tasks/access-application-cluster/service-access-application-cluster.md +++ b/content/ja/docs/tasks/access-application-cluster/service-access-application-cluster.md @@ -1,34 +1,36 @@ --- title: Serviceを利用したクラスター内のアプリケーションへのアクセス -content_template: templates/tutorial +content_type: tutorial weight: 60 --- -{{% capture overview %}} + ここでは、クラスター内で稼働しているアプリケーションに外部からアクセスするために、KubernetesのServiceオブジェクトを作成する方法を紹介します。 例として、2つのインスタンスから成るアプリケーションへのロードバランシングを扱います。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * 2つのHellow Worldアプリケーションを稼働させる。 * Nodeのポートを公開するServiceオブジェクトを作成する。 * 稼働しているアプリケーションにアクセスするためにServiceオブジェクトを使用する。 -{{% /capture %}} -{{% capture lessoncontent %}} + + ## 2つのPodから成るアプリケーションのServiceを作成 @@ -118,10 +120,11 @@ weight: 60 [service configuration file](/ja/docs/concepts/services-networking/service/) を使用してServiceを作成することもできます。 -{{% /capture %}} -{{% capture cleanup %}} + +## {{% heading "cleanup" %}} + Serviceを削除するには、以下のコマンドを実行します: @@ -131,12 +134,13 @@ Hello Worldアプリケーションが稼働しているDeployment、ReplicaSet kubectl delete deployment hello-world -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + 詳細は [serviceを利用してアプリケーションと接続する](/docs/concepts/services-networking/connect-applications-service/) を確認してください。 -{{% /capture %}} + diff --git a/content/ja/docs/tasks/administer-cluster/developing-cloud-controller-manager.md b/content/ja/docs/tasks/administer-cluster/developing-cloud-controller-manager.md index ea15264c78..e082c41219 100644 --- a/content/ja/docs/tasks/administer-cluster/developing-cloud-controller-manager.md +++ b/content/ja/docs/tasks/administer-cluster/developing-cloud-controller-manager.md @@ -1,9 +1,9 @@ --- title: クラウドコントローラーマネージャーの開発 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.11" state="beta" >}} 今後のリリースで、クラウドコントローラーマネージャーはKubernetesを任意のクラウドと統合するための良い方法となります。これによりクラウドプロバイダーはKubernetesのコアリリースサイクルから独立して機能を開発できるようになります。 @@ -15,10 +15,10 @@ content_template: templates/concept 実装の詳細をもう少し掘り下げてみましょう。すべてのクラウドコントローラーマネージャーはKubernetesコアからパッケージをインポートします。唯一の違いは、各プロジェクトが利用可能なクラウドプロバイダーの情報(グローバル変数)が更新される場所である[cloudprovider.RegisterCloudProvider](https://github.com/kubernetes/cloud-provider/blob/master/plugins.go#L56-L66)を呼び出すことによって独自のクラウドプロバイダーを登録する点です。 -{{% /capture %}} -{{% capture body %}} + + ## 開発 @@ -36,4 +36,4 @@ Kubernetesには登録されていない独自のクラウドプロバイダー Kubernetesに登録されているクラウドプロバイダーであれば、[Daemonset](https://kubernetes.io/examples/admin/cloud/ccm-example.yaml) を使ってあなたのクラスターで動かすことができます。詳細については[Kubernetesクラウドコントローラーマネージャードキュメント](/docs/tasks/administer-cluster/running-cloud-controller/)を参照してください。 -{{% /capture %}} + diff --git a/content/ja/docs/tasks/administer-cluster/enabling-endpointslices.md b/content/ja/docs/tasks/administer-cluster/enabling-endpointslices.md index 736e6eb1c3..ddab0bb95d 100644 --- a/content/ja/docs/tasks/administer-cluster/enabling-endpointslices.md +++ b/content/ja/docs/tasks/administer-cluster/enabling-endpointslices.md @@ -1,18 +1,19 @@ --- title: EndpointSliceの有効化 -content_template: templates/task +content_type: task --- -{{% capture overview %}} + このページはKubernetesのEndpointSliceの有効化の概要を説明します。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## 概要 @@ -36,9 +37,10 @@ EndpointSliceコントローラーはクラスター内にEndpointSliceを作成 クラスター内でEndpointSliceを完全に有効にすると、各Endpointsリソースに対応するEndpointSliceリソースが表示されます。既存のEndpointsの機能をサポートすることに加えて、EndpointSliceはトポロジーなどの新しい情報を含める必要があります。これらにより、クラスター内のネットワークエンドポイントのスケーラビリティと拡張性が大きく向上します。 -{{% capture whatsnext %}} +## {{% heading "whatsnext" %}} + * [EndpointSlice](/docs/concepts/services-networking/endpoint-slices/)を参照してください。 * [サービスとアプリケーションの接続](/ja/docs/concepts/services-networking/connect-applications-service/)を参照してください。 -{{% /capture %}} + diff --git a/content/ja/docs/tasks/administer-cluster/running-cloud-controller.md b/content/ja/docs/tasks/administer-cluster/running-cloud-controller.md index 331ba92a01..e98cee60d8 100644 --- a/content/ja/docs/tasks/administer-cluster/running-cloud-controller.md +++ b/content/ja/docs/tasks/administer-cluster/running-cloud-controller.md @@ -1,9 +1,9 @@ --- title: Kubernetesクラウドコントローラーマネージャー -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + {{< feature-state state="beta" >}} @@ -11,10 +11,10 @@ Kubernetes v1.6では`cloud-controller-manager`という新しいバイナリが `cloud-controller-manager`は、[cloudprovider.Interface](https://github.com/kubernetes/cloud-provider/blob/master/cloud.go)を満たす任意のクラウドプロバイダーと接続できます。下位互換性のためにKubernetesのコアプロジェクトで提供される[cloud-controller-manager](https://github.com/kubernetes/kubernetes/tree/master/cmd/cloud-controller-manager)は`kube-controller-manager`と同じクラウドライブラリを使用します。Kubernetesのコアリポジトリで既にサポートされているクラウドプロバイダーは、Kubernetesリポジトリにあるcloud-controller-managerを使用してKubernetesのコアから移行することが期待されています。今後のKubernetesのリリースでは、すべてのクラウドコントローラーマネージャーはsigリードまたはクラウドベンダーが管理するKubernetesのコアプロジェクトの外で開発される予定です。 -{{% /capture %}} -{{% capture body %}} + + ## 運用 @@ -87,4 +87,4 @@ Kubernetesのコアリポジトリにないクラウドコントローラーマ 独自のクラウドコントローラーマネージャーを構築および開発するには[クラウドコントローラーマネージャーの開発](/docs/tasks/administer-cluster/developing-cloud-controller-manager.md)のドキュメントを参照してください。 -{{% /capture %}} + diff --git a/content/ja/docs/tasks/configure-pod-container/assign-cpu-resource.md b/content/ja/docs/tasks/configure-pod-container/assign-cpu-resource.md index f88e5e1f10..6626901a02 100644 --- a/content/ja/docs/tasks/configure-pod-container/assign-cpu-resource.md +++ b/content/ja/docs/tasks/configure-pod-container/assign-cpu-resource.md @@ -1,17 +1,18 @@ --- title: コンテナおよびPodへのCPUリソースの割り当て -content_template: templates/task +content_type: task weight: 20 --- -{{% capture overview %}} + このページでは、CPUの *request* と *limit* をコンテナに割り当てる方法について示します。コンテナは設定された制限を超えてCPUを使用することはできません。システムにCPUの空き時間がある場合、コンテナには要求されたCPUを割り当てられます。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -38,10 +39,10 @@ NAME v1beta1.metrics.k8s.io ``` -{{% /capture %}} -{{% capture steps %}} + + ## namespaceの作成 @@ -207,9 +208,10 @@ namespaceを削除してください: kubectl delete namespace cpu-example ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + ### アプリケーション開発者向け @@ -234,4 +236,4 @@ kubectl delete namespace cpu-example * [APIオブジェクトのクォータを設定する](/docs/tasks/administer-cluster/quota-api-object/) -{{% /capture %}} + diff --git a/content/ja/docs/tasks/configure-pod-container/assign-memory-resource.md b/content/ja/docs/tasks/configure-pod-container/assign-memory-resource.md index bc68116ad7..fb361dfa72 100644 --- a/content/ja/docs/tasks/configure-pod-container/assign-memory-resource.md +++ b/content/ja/docs/tasks/configure-pod-container/assign-memory-resource.md @@ -1,17 +1,18 @@ --- title: コンテナおよびPodへのメモリーリソースの割り当て -content_template: templates/task +content_type: task weight: 10 --- -{{% capture overview %}} + このページでは、メモリーの *要求* と *制限* をコンテナに割り当てる方法について示します。コンテナは要求されたメモリーを確保することを保証しますが、その制限を超えるメモリーの使用は許可されません。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -38,9 +39,9 @@ NAME v1beta1.metrics.k8s.io ``` -{{% /capture %}} -{{% capture steps %}} + + ## namespaceの作成 @@ -288,9 +289,10 @@ namespaceを削除してください。これにより、今回のタスクで kubectl delete namespace mem-example ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + ### アプリケーション開発者向け @@ -314,7 +316,7 @@ kubectl delete namespace mem-example * [APIオブジェクトのクォータを設定する](/docs/tasks/administer-cluster/quota-api-object/) -{{% /capture %}} + diff --git a/content/ja/docs/tasks/configure-pod-container/attach-handler-lifecycle-event.md b/content/ja/docs/tasks/configure-pod-container/attach-handler-lifecycle-event.md index e0acddd5f7..f988d81ba9 100644 --- a/content/ja/docs/tasks/configure-pod-container/attach-handler-lifecycle-event.md +++ b/content/ja/docs/tasks/configure-pod-container/attach-handler-lifecycle-event.md @@ -1,24 +1,25 @@ --- title: コンテナライフサイクルイベントへのハンドラー紐付け -content_template: templates/task +content_type: task weight: 140 --- -{{% capture overview %}} + このページでは、コンテナのライフサイクルイベントにハンドラーを紐付けする方法を説明します。KubernetesはpostStartとpreStopイベントをサポートしています。Kubernetesはコンテナの起動直後にpostStartイベントを送信し、コンテナの終了直前にpreStopイベントを送信します。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## postStartハンドラーとpreStopハンドラーを定義する @@ -50,11 +51,11 @@ Pod内で実行されているコンテナでシェルを実行します: Hello from the postStart handler -{{% /capture %}} -{{% capture discussion %}} + + ## 議論 @@ -70,10 +71,11 @@ Kubernetesは、Podが *終了* したときにのみpreStopイベントを送 この制限は[issue #55087](https://github.com/kubernetes/kubernetes/issues/55807)で追跡されています。 {{< /note >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [コンテナライフサイクルフック](/ja/docs/concepts/containers/container-lifecycle-hooks/)の詳細 * [Podのライフサイクル](/ja/docs/concepts/workloads/pods/pod-lifecycle/)の詳細 @@ -85,6 +87,6 @@ Kubernetesは、Podが *終了* したときにのみpreStopイベントを送 * [コンテナ](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core) * [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core)の`terminationGracePeriodSeconds` -{{% /capture %}} + diff --git a/content/ja/docs/tasks/configure-pod-container/configure-projected-volume-storage.md b/content/ja/docs/tasks/configure-pod-container/configure-projected-volume-storage.md index 5ee17ea721..f8e7341bb2 100644 --- a/content/ja/docs/tasks/configure-pod-container/configure-projected-volume-storage.md +++ b/content/ja/docs/tasks/configure-pod-container/configure-projected-volume-storage.md @@ -1,23 +1,24 @@ --- title: ストレージにProjectedボリュームを使用するようPodを設定する -content_template: templates/task +content_type: task weight: 70 --- -{{% capture overview %}} + このページでは、[`projected`](/docs/concepts/storage/volumes/#projected)(投影)ボリュームを使用して、既存の複数のボリュームソースを同一ディレクトリ内にマウントする方法を説明します。 現在、`secret`、`configMap`、`downwardAPI`および`serviceAccountToken`ボリュームを投影できます。 {{< note >}} `serviceAccountToken`はボリュームタイプではありません。 {{< /note >}} -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## ProjectedボリュームをPodに設定する この課題では、ローカルファイルからユーザーネームおよびパスワードの{{< glossary_tooltip text="Secret" term_id="secret" >}}を作成します。 @@ -73,9 +74,10 @@ kubectl delete pod test-projected-volume kubectl delete secret user pass ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [`projected`](/docs/concepts/storage/volumes/#projected)ボリュームについてさらに学ぶ * [all-in-oneボリューム](https://github.com/kubernetes/community/blob/{{< param "githubbranch" >}}/contributors/design-proposals/node/all-in-one-volume.md)のデザインドキュメントを読む -{{% /capture %}} + diff --git a/content/ja/docs/tasks/configure-pod-container/configure-volume-storage.md b/content/ja/docs/tasks/configure-pod-container/configure-volume-storage.md index 6b998ca05d..87fa5d965e 100644 --- a/content/ja/docs/tasks/configure-pod-container/configure-volume-storage.md +++ b/content/ja/docs/tasks/configure-pod-container/configure-volume-storage.md @@ -1,10 +1,10 @@ --- title: ストレージにボリュームを使用するPodを構成する -content_template: templates/task +content_type: task weight: 50 --- -{{% capture overview %}} + このページでは、ストレージにボリュームを使用するPodを構成する方法を示します。 @@ -13,15 +13,16 @@ weight: 50 コンテナに依存しない、より一貫したストレージを実現するには、[ボリューム](/docs/concepts/storage/volumes/)を使用できます。 これは、キーバリューストア(Redisなど)やデータベースなどのステートフルアプリケーションにとって特に重要です。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Podのボリュームを構成する @@ -120,9 +121,10 @@ weight: 50 kubectl delete pod redis ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Volume](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#volume-v1-core)参照 @@ -130,6 +132,6 @@ weight: 50 * `emptyDir`によって提供されるローカルディスクストレージに加えて、Kubernetesは、GCEのPDやEC2のEBSなど、さまざまなネットワーク接続ストレージソリューションをサポートします。これらは、重要なデータに好ましく、ノード上のデバイスのマウントやアンマウントなどの詳細を処理します。詳細は[ボリューム](/docs/concepts/storage/volumes/)を参照してください。 -{{% /capture %}} + diff --git a/content/ja/docs/tasks/configure-pod-container/quality-service-pod.md b/content/ja/docs/tasks/configure-pod-container/quality-service-pod.md index f2a4edb2ee..ed95ed4ce3 100644 --- a/content/ja/docs/tasks/configure-pod-container/quality-service-pod.md +++ b/content/ja/docs/tasks/configure-pod-container/quality-service-pod.md @@ -1,25 +1,26 @@ --- title: PodにQuality of Serviceを設定する -content_template: templates/task +content_type: task weight: 30 --- -{{% capture overview %}} + このページでは、特定のQuality of Service (QoS)クラスをPodに割り当てるための設定方法を示します。Kubernetesは、Podのスケジューリングおよび退役を決定するためにQoSクラスを用います。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## QoSクラス @@ -222,9 +223,10 @@ namespaceを削除してください: kubectl delete namespace qos-example ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + ### アプリケーション開発者向け @@ -248,7 +250,7 @@ kubectl delete namespace qos-example * [NamespaceにPodのクォータを設定する](/docs/tasks/administer-cluster/quota-pod-namespace/) * [APIオブジェクトのクォータを設定する](/docs/tasks/administer-cluster/quota-api-object/) -{{% /capture %}} + diff --git a/content/ja/docs/tasks/configure-pod-container/share-process-namespace.md b/content/ja/docs/tasks/configure-pod-container/share-process-namespace.md index c24df13f1f..513da2365c 100644 --- a/content/ja/docs/tasks/configure-pod-container/share-process-namespace.md +++ b/content/ja/docs/tasks/configure-pod-container/share-process-namespace.md @@ -1,11 +1,11 @@ --- title: Pod内のコンテナ間でプロセス名前空間を共有する min-kubernetes-server-version: v1.10 -content_template: templates/task +content_type: task weight: 160 --- -{{% capture overview %}} + {{< feature-state state="stable" for_k8s_version="v1.17" >}} @@ -14,15 +14,16 @@ weight: 160 この機能を使用して、ログハンドラーサイドカーコンテナなどの協調コンテナを構成したり、シェルなどのデバッグユーティリティを含まないコンテナイメージをトラブルシューティングしたりできます。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Podを構成する @@ -86,9 +87,9 @@ events { worker_connections 1024; ``` -{{% /capture %}} -{{% capture discussion %}} + + ## プロセス名前空間の共有について理解する @@ -106,6 +107,6 @@ Podは多くのリソースを共有するため、プロセスの名前空間 1. **コンテナファイルシステムは、`/proc/$pid/root`リンクを介してPod内の他のコンテナに表示されます。** これによりデバッグが容易になりますが、ファイルシステム内の秘密情報はファイルシステムのアクセス許可によってのみ保護されることも意味します。 -{{% /capture %}} + diff --git a/content/ja/docs/tasks/debug-application-cluster/debug-init-containers.md b/content/ja/docs/tasks/debug-application-cluster/debug-init-containers.md index b9638ff0ba..9de4afeb87 100644 --- a/content/ja/docs/tasks/debug-application-cluster/debug-init-containers.md +++ b/content/ja/docs/tasks/debug-application-cluster/debug-init-containers.md @@ -1,24 +1,25 @@ --- title: Init Containerのデバッグ -content_template: templates/task +content_type: task --- -{{% capture overview %}} + このページでは、Init Containerの実行に関連する問題を調査する方法を説明します。以下のコマンドラインの例では、Podを``、Init Containerを``および``として参照しています。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * [Init Container](/docs/concepts/abstractions/init-containers/)の基本を理解しておきましょう。 * [Init Containerを設定](/docs/tasks/configure-pod-container/configure-pod-initialization/#creating-a-pod-that-has-an-init-container/)しておきましょう。 -{{% /capture %}} -{{% capture steps %}} + + ## Init Containerのステータスを確認する @@ -95,9 +96,9 @@ kubectl logs -c シェルスクリプトを実行するInit Containerは、実行時にコマンドを出力します。たとえば、スクリプトの始めに`set -x`を実行することでBashで同じことができます。 -{{% /capture %}} -{{% capture discussion %}} + + ## Podのステータスを理解する @@ -111,7 +112,7 @@ kubectl logs -c `Pending` | PodはまだInit Containerの実行を開始していません。 `PodInitializing` or `Running` | PodはすでにInit Containerの実行を終了しています。 -{{% /capture %}} + diff --git a/content/ja/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md b/content/ja/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md index 406466cc1c..9a423ce4ca 100644 --- a/content/ja/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md +++ b/content/ja/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md @@ -1,23 +1,24 @@ --- title: PodとReplicationControllerのデバッグ -content_template: templates/task +content_type: task --- -{{% capture overview %}} + このページでは、PodとReplicationControllerをデバッグする方法を説明します。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * [Pod](/ja/docs/concepts/workloads/pods/pod/)と[Podのライフサイクル](/ja/docs/concepts/workloads/pods/pod-lifecycle/)の基本を理解している必要があります。 -{{% /capture %}} -{{% capture steps %}} + + ## Podのデバッグ @@ -122,4 +123,4 @@ Podを作成できない場合は、[上述の手順](#Podのデバッグ)を参 `kubectl describe rc ${CONTROLLER_NAME}`を使用して、レプリケーションコントローラーに関連するイベントを調べることもできます。 -{{% /capture %}} + diff --git a/content/ja/docs/tasks/debug-application-cluster/debug-service.md b/content/ja/docs/tasks/debug-application-cluster/debug-service.md index a59bef5a6a..1a1f1b95f9 100644 --- a/content/ja/docs/tasks/debug-application-cluster/debug-service.md +++ b/content/ja/docs/tasks/debug-application-cluster/debug-service.md @@ -1,18 +1,18 @@ --- -content_template: templates/concept +content_type: concept title: Serviceのデバッグ --- -{{% capture overview %}} + 新規にKubernetesをインストールした環境でかなり頻繁に発生する問題は、`Service`が適切に機能しないというものです。 `Deployment`を実行して`Service`を作成したにもかかわらず、アクセスしようとしても応答がありません。 何が問題になっているのかを理解するのに、このドキュメントがきっと役立つでしょう。 -{{% /capture %}} -{{% capture body %}} + + ## 規則 @@ -588,10 +588,11 @@ DNSは動作していて、`iptables`ルールがインストールされてい [Forum](https://discuss.kubernetes.io)または [GitHub](https://github.com/kubernetes/kubernetes)でお問い合わせください。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + 詳細については、[トラブルシューティングドキュメント](/docs/troubleshooting/)をご覧ください。 -{{% /capture %}} + diff --git a/content/ja/docs/tasks/debug-application-cluster/debug-stateful-set.md b/content/ja/docs/tasks/debug-application-cluster/debug-stateful-set.md index 817db0ecde..2aa2ddc4e0 100644 --- a/content/ja/docs/tasks/debug-application-cluster/debug-stateful-set.md +++ b/content/ja/docs/tasks/debug-application-cluster/debug-stateful-set.md @@ -1,22 +1,23 @@ --- title: StatefulSetのデバッグ -content_template: templates/task +content_type: task --- -{{% capture overview %}} + このタスクでは、StatefulSetをデバッグする方法を説明します。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * Kubernetesクラスターが必要です。また、kubectlコマンドラインツールがクラスターと通信するように設定されている必要があります。 * 調べたいStatefulSetを実行しておきましょう。 -{{% /capture %}} -{{% capture steps %}} + + ## StatefulSetのデバッグ @@ -29,12 +30,13 @@ kubectl get pods -l app=myapp Podが長期間`Unknown`または`Terminating`の状態になっていることがわかった場合は、それらを処理する方法について[StatefulSet Podsの削除](/docs/tasks/manage-stateful-set/delete-pods/)タスクを参照してください。 [Podのデバッグ](/docs/tasks/debug-application-cluster/debug-pod-replication-controller/)ガイドを使用して、StatefulSet内の個々のPodをデバッグできます。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [Init Containerのデバッグ](/ja/docs/tasks/debug-application-cluster/debug-init-containers/)の詳細 -{{% /capture %}} + diff --git a/content/ja/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md b/content/ja/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md index 5de335c32b..7ec722caa6 100644 --- a/content/ja/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md +++ b/content/ja/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md @@ -1,25 +1,26 @@ --- title: Pod障害の原因を特定する -content_template: templates/task +content_type: task --- -{{% capture overview %}} + このページでは、コンテナ終了メッセージの読み書き方法を説明します。 終了メッセージは、致命的なイベントに関する情報を、ダッシュボードや監視ソフトウェアなどのツールで簡単に取得して表示できる場所にコンテナが書き込むための手段を提供します。 ほとんどの場合、終了メッセージに入力した情報も一般的な[Kubernetesログ](/docs/concepts/cluster-administration/logging/)に書き込まれるはずです。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## 終了メッセージの書き込みと読み取り @@ -82,15 +83,16 @@ spec: さらに、ユーザーは追加のカスタマイズをするためにContainerの`terminationMessagePolicy`フィールドを設定できます。このフィールドのデフォルト値は`File`です。これは、終了メッセージが終了メッセージファイルからのみ取得されることを意味します。`terminationMessagePolicy`を`FallbackToLogsOnError`に設定することで、終了メッセージファイルが空でコンテナがエラーで終了した場合に、コンテナログ出力の最後のチャンクを使用するようにKubernetesに指示できます。ログ出力は、2048バイトまたは80行のどちらか小さい方に制限されています。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [コンテナ](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core)の`terminationMessagePath`フィールド参照 * [ログ取得](/docs/concepts/cluster-administration/logging/)について * [Goテンプレート](https://golang.org/pkg/text/template/)について -{{% /capture %}} + diff --git a/content/ja/docs/tasks/debug-application-cluster/get-shell-running-container.md b/content/ja/docs/tasks/debug-application-cluster/get-shell-running-container.md index 40f903789f..b7bf3c83e0 100644 --- a/content/ja/docs/tasks/debug-application-cluster/get-shell-running-container.md +++ b/content/ja/docs/tasks/debug-application-cluster/get-shell-running-container.md @@ -1,24 +1,25 @@ --- title: 実行中のコンテナへのシェルを取得する -content_template: templates/task +content_type: task --- -{{% capture overview %}} + このページは`kubectl exec`を使用して実行中のコンテナへのシェルを取得する方法を説明します。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## コンテナへのシェルの取得 @@ -115,9 +116,9 @@ kubectl exec shell-demo ls / kubectl exec shell-demo cat /proc/1/mounts ``` -{{% /capture %}} -{{% capture discussion %}} + + ## Podが1つ以上のコンテナを持つ場合にシェルを開く @@ -129,14 +130,15 @@ Podが1つ以上のコンテナを持つ場合、`--container`か`-c`を使用 kubectl exec -it my-pod --container main-app -- /bin/bash ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [kubectl exec](/docs/reference/generated/kubectl/kubectl-commands/#exec) -{{% /capture %}} + diff --git a/content/ja/docs/tasks/run-application/delete-stateful-set.md b/content/ja/docs/tasks/run-application/delete-stateful-set.md index d6f7d981e4..530e41fc0c 100644 --- a/content/ja/docs/tasks/run-application/delete-stateful-set.md +++ b/content/ja/docs/tasks/run-application/delete-stateful-set.md @@ -1,22 +1,23 @@ --- title: StatefulSetの削除 -content_template: templates/task +content_type: task weight: 60 --- -{{% capture overview %}} + このタスクでは、StatefulSetを削除する方法を説明します。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * このタスクは、クラスター上で、StatefulSetで表現されるアプリケーションが実行されていることを前提としています。 -{{% /capture %}} -{{% capture steps %}} + + ## StatefulSetの削除 @@ -74,12 +75,13 @@ kubectl delete pvc -l app=myapp StatefulSet内の一部のPodが長期間`Terminating`または`Unknown`状態のままになっていることが判明した場合は、手動でapiserverからPodを強制的に削除する必要があります。これは潜在的に危険な作業です。詳細は[StatefulSet Podの強制削除](/docs/tasks/run-application/force-delete-stateful-set-pod/)を参照してください。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [StatefulSet Podの強制削除](/docs/tasks/run-application/force-delete-stateful-set-pod/)の詳細 -{{% /capture %}} + diff --git a/content/ja/docs/tasks/run-application/force-delete-stateful-set-pod.md b/content/ja/docs/tasks/run-application/force-delete-stateful-set-pod.md index be930f23e5..d318813fb4 100644 --- a/content/ja/docs/tasks/run-application/force-delete-stateful-set-pod.md +++ b/content/ja/docs/tasks/run-application/force-delete-stateful-set-pod.md @@ -1,20 +1,21 @@ --- title: StatefulSet Podの強制削除 -content_template: templates/task +content_type: task weight: 70 --- -{{% capture overview %}} + このページでは、StatefulSetの一部であるPodを削除する方法と、削除する際に考慮すべき事項について説明します。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * これはかなり高度なタスクであり、StatefulSetに固有のいくつかの特性に反する可能性があります。 * 先に進む前に、以下に列挙されている考慮事項をよく理解してください。 -{{% /capture %}} -{{% capture steps %}} + + ## StatefulSetに関する考慮事項 @@ -68,10 +69,11 @@ kubectl patch pod -p '{"metadata":{"finalizers":null}}' StatefulSet Podの強制削除は、常に慎重に、関連するリスクを完全に把握して実行してください。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [StatefulSetのデバッグ](/docs/tasks/debug-application-cluster/debug-stateful-set/)の詳細 -{{% /capture %}} + diff --git a/content/ja/docs/tasks/run-application/run-replicated-stateful-application.md b/content/ja/docs/tasks/run-application/run-replicated-stateful-application.md index bd430ce44b..d76e60f5c3 100644 --- a/content/ja/docs/tasks/run-application/run-replicated-stateful-application.md +++ b/content/ja/docs/tasks/run-application/run-replicated-stateful-application.md @@ -1,10 +1,10 @@ --- title: レプリカを持つステートフルアプリケーションを実行する -content_template: templates/tutorial +content_type: tutorial weight: 30 --- -{{% capture overview %}} + このページでは、[StatefulSet](/ja/docs/concepts/workloads/controllers/statefulset/) コントローラーを使用して、レプリカを持つステートフルアプリケーションを実行する方法を説明します。 @@ -14,9 +14,10 @@ weight: 30 具体的には、MySQLの設定が安全ではないデフォルトのままとなっています。 これはKubernetesでステートフルアプリケーションを実行するための一般的なパターンに焦点を当てるためです。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * {{< include "default-storage-class-prereqs.md" >}} @@ -29,18 +30,19 @@ weight: 30 * MySQLに関する知識は記事の理解に役立ちますが、 このチュートリアルは他のシステムにも役立つ一般的なパターンを提示することを目的としています。 -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * StatefulSetコントローラーを使用して、レプリカを持つMySQLトポロジーをデプロイします。 * MySQLクライアントトラフィックを送信します。 * ダウンタイムに対する耐性を観察します。 * StatefulSetをスケールアップおよびスケールダウンします。 -{{% /capture %}} -{{% capture lessoncontent %}} + + ## MySQLをデプロイする @@ -437,9 +439,10 @@ kubectl delete pvc data-mysql-3 kubectl delete pvc data-mysql-4 ``` -{{% /capture %}} -{{% capture cleanup %}} + +## {{% heading "cleanup" %}} + 1. `SELECT @@server_id`ループを実行している端末で**Ctrl+C**を押すか、 別の端末から次のコマンドを実行して、ループをキャンセルします。 @@ -478,13 +481,14 @@ kubectl delete pvc data-mysql-4 動的プロビジョニング機能を使用した場合は、PersistentVolumeClaimを削除すれば、自動的にPersistentVolumeも削除されます。 一部の動的プロビジョナー(EBSやPDなど)は、PersistentVolumeを削除すると同時に下層にあるリソースも解放します。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * その他のステートフルアプリケーションの例は、[Helm Charts repository](https://github.com/kubernetes/charts)を見てください。 -{{% /capture %}} + diff --git a/content/ja/docs/tasks/run-application/run-single-instance-stateful-application.md b/content/ja/docs/tasks/run-application/run-single-instance-stateful-application.md index c7efac3f8e..91b6f24adb 100644 --- a/content/ja/docs/tasks/run-application/run-single-instance-stateful-application.md +++ b/content/ja/docs/tasks/run-application/run-single-instance-stateful-application.md @@ -1,35 +1,37 @@ --- title: 単一レプリカのステートフルアプリケーションを実行する -content_template: templates/tutorial +content_type: tutorial weight: 20 --- -{{% capture overview %}} + このページでは、PersistentVolumeとDeploymentを使用して、Kubernetesで単一レプリカのステートフルアプリケーションを実行する方法を説明します。アプリケーションはMySQLです。 -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * 自身の環境のディスクを参照するPersistentVolumeを作成します。 * MySQLのDeploymentを作成します。 * MySQLをDNS名でクラスター内の他のPodに公開します。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * {{< include "default-storage-class-prereqs.md" >}} -{{% /capture %}} -{{% capture lessoncontent %}} + + ## MySQLをデプロイする @@ -163,10 +165,11 @@ PersistentVolumeを手動でプロビジョニングした場合は、Persistent 動的プロビジョニング機能を使用した場合は、PersistentVolumeClaimを削除すれば、自動的にPersistentVolumeも削除されます。 一部の動的プロビジョナー(EBSやPDなど)は、PersistentVolumeを削除すると同時に下層にあるリソースも解放します。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Deploymentオブジェクト](/ja/docs/concepts/workloads/controllers/deployment/)についてもっと学ぶ @@ -176,6 +179,6 @@ PersistentVolumeを手動でプロビジョニングした場合は、Persistent * [Volumes](/docs/concepts/storage/volumes/)と[Persistent Volumes](/docs/concepts/storage/persistent-volumes/) -{{% /capture %}} + diff --git a/content/ja/docs/tasks/run-application/run-stateless-application-deployment.md b/content/ja/docs/tasks/run-application/run-stateless-application-deployment.md index dd4172138f..88a35b7d84 100644 --- a/content/ja/docs/tasks/run-application/run-stateless-application-deployment.md +++ b/content/ja/docs/tasks/run-application/run-stateless-application-deployment.md @@ -1,34 +1,36 @@ --- title: Deploymentを使用してステートレスアプリケーションを実行する min-kubernetes-server-version: v1.9 -content_template: templates/tutorial +content_type: tutorial weight: 10 --- -{{% capture overview %}} + このページでは、Kubernetes Deploymentオブジェクトを使用してアプリケーションを実行する方法を説明します。 -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * nginx deploymentを作成します。 * kubectlを使ってdeploymentに関する情報を一覧表示します。 * deploymentを更新します。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture lessoncontent %}} + + ## nginx deploymentの作成と探検 @@ -138,13 +140,14 @@ Deploymentを名前を指定して削除します: 複製アプリケーションを作成するための好ましい方法はDeploymentを使用することです。そして、DeploymentはReplicaSetを使用します。 DeploymentとReplicaSetがKubernetesに追加される前は、[ReplicationController](/docs/concepts/workloads/controllers/replicationcontroller/)を使用して複製アプリケーションを構成していました。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Deploymentオブジェクト](/ja/docs/concepts/workloads/controllers/deployment/)の詳細 -{{% /capture %}} + diff --git a/content/ja/docs/tasks/run-application/scale-stateful-set.md b/content/ja/docs/tasks/run-application/scale-stateful-set.md index 7e9352c1c1..155b93d069 100644 --- a/content/ja/docs/tasks/run-application/scale-stateful-set.md +++ b/content/ja/docs/tasks/run-application/scale-stateful-set.md @@ -1,14 +1,15 @@ --- title: StatefulSetのスケール -content_template: templates/task +content_type: task weight: 50 --- -{{% capture overview %}} + このタスクは、StatefulSetをスケールする方法を示します。StatefulSetをスケーリングするとは、レプリカの数を増減することです。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * StatefulSetはKubernetesバージョン1.5以降でのみ利用可能です。 Kubernetesのバージョンを確認するには、`kubectl version`を実行してください。 @@ -17,9 +18,9 @@ weight: 50 * ステートフルアプリケーションクラスターが完全に健全であると確信できる場合にのみ、スケーリングを実行してください。 -{{% /capture %}} -{{% capture steps %}} + + ## StatefulSetのスケール @@ -71,10 +72,11 @@ spec.replicas > 1の場合、Kubernetesは不健康なPodの理由を判断で 一時的な障害によってPodが正常でなくなり、Podが再び使用可能になる可能性がある場合は、一時的なエラーがスケールアップまたはスケールダウン操作の妨げになる可能性があります。一部の分散データベースでは、ノードが同時に参加および脱退するときに問題があります。このような場合は、アプリケーションレベルでスケーリング操作を考えることをお勧めします。また、ステートフルアプリケーションクラスタが完全に健全であることが確実な場合にのみスケーリングを実行してください。 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [StatefulSetの削除](/ja/docs/tasks/run-application/delete-stateful-set/)の詳細 -{{% /capture %}} + diff --git a/content/ja/docs/tasks/service-catalog/install-service-catalog-using-helm.md b/content/ja/docs/tasks/service-catalog/install-service-catalog-using-helm.md index cac6668f16..e597d24a4d 100644 --- a/content/ja/docs/tasks/service-catalog/install-service-catalog-using-helm.md +++ b/content/ja/docs/tasks/service-catalog/install-service-catalog-using-helm.md @@ -1,17 +1,18 @@ --- title: Helmを使用したサービスカタログのインストール -content_template: templates/task +content_type: task --- -{{% capture overview %}} + {{< glossary_definition term_id="service-catalog" length="all" prepend="サービスカタログは" >}} [Helm](https://helm.sh/)を使用してKubernetesクラスターにサービスカタログをインストールします。手順の最新情報は[kubernetes-sigs/service-catalog](https://github.com/kubernetes-sigs/service-catalog/blob/master/docs/install.md)リポジトリーを参照してください。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * [サービスカタログ](/docs/concepts/service-catalog/)の基本概念を理解してください。 * サービスカタログを使用するには、Kubernetesクラスターのバージョンが1.7以降である必要があります。 * KubernetesクラスターのクラスターDNSを有効化する必要があります。 @@ -22,10 +23,10 @@ content_template: templates/task * [Helm install instructions](https://helm.sh/docs/intro/install/)を参考にしてください。 * 上記のバージョンのHelmをすでにインストールしている場合は、`helm init`を実行し、HelmのサーバーサイドコンポーネントであるTillerをインストールしてください。 -{{% /capture %}} -{{% capture steps %}} + + ## Helmリポジトリーにサービスカタログを追加 Helmをインストールし、以下のコマンドを実行することでローカルマシンに*service-catalog*のHelmリポジトリーを追加します。 @@ -106,11 +107,12 @@ helm install svc-cat/catalog --name catalog --namespace catalog ``` {{% /tab %}} {{< /tabs >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [sample service brokers](https://github.com/openservicebrokerapi/servicebroker/blob/master/gettingStarted.md#sample-service-brokers) * [kubernetes-sigs/service-catalog](https://github.com/kubernetes-sigs/service-catalog) -{{% /capture %}} + diff --git a/content/ja/docs/tasks/tools/install-kubectl.md b/content/ja/docs/tasks/tools/install-kubectl.md index 1e6bb6b3a5..c20f0b8880 100644 --- a/content/ja/docs/tasks/tools/install-kubectl.md +++ b/content/ja/docs/tasks/tools/install-kubectl.md @@ -1,6 +1,6 @@ --- title: kubectlのインストールおよびセットアップ -content_template: templates/task +content_type: task weight: 10 card: name: tasks @@ -8,15 +8,16 @@ card: title: Install kubectl --- -{{% capture overview %}} + Kubernetesのコマンドラインツールである[kubectl](/docs/user-guide/kubectl/)を使用して、Kubernetesクラスターに対してコマンドを実行することができます。kubectlによってアプリケーションのデプロイや、クラスターのリソース管理および検査を行うことができます。kubectlの操作に関する完全なリストは、[Overview of kubectl](/docs/reference/kubectl/overview/)を参照してください。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + kubectlのバージョンは、クラスターのマイナーバージョンとの差分が1つ以内でなければなりません。たとえば、クライアントがv1.2であれば、v1.1、v1.2、v1.3のマスターで動作するはずです。最新バージョンのkubectlを使うことで、不測の事態を避けることができるでしょう。 -{{% /capture %}} -{{% capture steps %}} + + ## Linuxへkubectlをインストールする {#install-kubectl-on-linux} @@ -464,12 +465,13 @@ compinit {{% /tab %}} {{< /tabs >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Minikubeをインストールする](/ja/docs/tasks/tools/install-minikube/) * クラスターの作成に関する詳細を[スタートガイド](/docs/setup/)で確認する * [アプリケーションを起動して公開する方法を学ぶ](/docs/tasks/access-application-cluster/service-access-application-cluster/) * あなたが作成していないクラスターにアクセスする必要がある場合は、[クラスターアクセスドキュメントの共有](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/)を参照してください * [kubectlリファレンスドキュメント](/docs/reference/kubectl/kubectl/)を参照する -{{% /capture %}} + diff --git a/content/ja/docs/tasks/tools/install-minikube.md b/content/ja/docs/tasks/tools/install-minikube.md index fa98a198be..39b274b948 100644 --- a/content/ja/docs/tasks/tools/install-minikube.md +++ b/content/ja/docs/tasks/tools/install-minikube.md @@ -1,19 +1,20 @@ --- title: Minikubeのインストール -content_template: templates/task +content_type: task weight: 20 card: name: tasks weight: 10 --- -{{% capture overview %}} + このページでは[Minikube](/ja/docs/tutorials/hello-minikube)のインストール方法を説明し、コンピューターの仮想マシン上で単一ノードのKubernetesクラスターを実行します。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< tabs name="minikube_before_you_begin" >}} {{% tab name="Linux" %}} @@ -53,11 +54,11 @@ Hyper-V Requirements: A hypervisor has been detected. Features required for {{% /tab %}} {{< /tabs >}} -{{% /capture %}} -{{% capture steps %}} -# minikubeのインストール + + +## minikubeのインストール {{< tabs name="tab_with_md" >}} {{% tab name="Linux" %}} @@ -182,13 +183,7 @@ WindowsにMinikubeを手動でインストールするには、[`minikube-window {{% /tab %}} {{< /tabs >}} -{{% /capture %}} -{{% capture whatsnext %}} - -* [Minikubeを使ってローカルでKubernetesを実行する](/ja/docs/setup/learning-environment/minikube/) - -{{% /capture %}} ## ローカル状態のクリーンアップ {#cleanup-local-state} @@ -206,3 +201,10 @@ minikubeのローカル状態をクリアする必要があります: ```shell minikube delete ``` + + +## {{% heading "whatsnext" %}} + + +* [Minikubeを使ってローカルでKubernetesを実行する](/ja/docs/setup/learning-environment/minikube/) + diff --git a/content/ja/docs/tutorials/_index.md b/content/ja/docs/tutorials/_index.md index dfecf9f192..784e426a99 100644 --- a/content/ja/docs/tutorials/_index.md +++ b/content/ja/docs/tutorials/_index.md @@ -2,16 +2,16 @@ title: チュートリアル main_menu: true weight: 60 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + 本セクションにはチュートリアルが含まれています。チュートリアルでは、単一の[タスク](/docs/tasks/)よりも大きな目標を達成する方法を示します。通常、チュートリアルにはいくつかのセクションがあり、各セクションには一連のステップがあります。各チュートリアルを進める前に、後で参照できるように[標準化された用語集](/docs/reference/glossary/)ページをブックマークしておくことをお勧めします。 -{{% /capture %}} -{{% capture body %}} + + ## 基本 @@ -61,10 +61,11 @@ content_template: templates/concept * [Source IPを使う](/docs/tutorials/services/source-ip/) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + チュートリアルを書きたい場合は、[ページテンプレートの使用](/docs/contribute/style/page-templates/)を参照し、チュートリアルのページタイプとチュートリアルテンプレートについてご確認ください。 -{{% /capture %}} + diff --git a/content/ja/docs/tutorials/configuration/configure-redis-using-configmap.md b/content/ja/docs/tutorials/configuration/configure-redis-using-configmap.md index a113679775..297aba127f 100644 --- a/content/ja/docs/tutorials/configuration/configure-redis-using-configmap.md +++ b/content/ja/docs/tutorials/configuration/configure-redis-using-configmap.md @@ -1,15 +1,16 @@ --- title: ConfigMapを使ったRedisの設定 -content_template: templates/tutorial +content_type: tutorial --- -{{% capture overview %}} + 本ページでは、[ConfigMapを使ったコンテナの設定](/docs/tasks/configure-pod-container/configure-pod-configmap/)に基づき、ConfigMapを使ってRedisの設定を行う実践的な例を提供します。 -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * 以下の要素を含む`kustomization.yaml`ファイルを作成する: * ConfigMapGenerator @@ -17,17 +18,18 @@ content_template: templates/tutorial * `kubectl apply -k ./`コマンドにてディレクトリ全体を適用する * 設定が正しく反映されていることを確認する -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * この例は、バージョン1.14以上での動作を確認しています。 * [ConfigMapを使ったコンテナの設定](/docs/tasks/configure-pod-container/configure-pod-configmap/)を読んで理解しておいてください。 -{{% /capture %}} -{{% capture lessoncontent %}} + + ## 実践例: ConfigMapを使ったRedisの設定 @@ -95,12 +97,13 @@ kubectl exec -it redis redis-cli 2) "allkeys-lru" ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [ConfigMap](/docs/tasks/configure-pod-container/configure-pod-configmap/)について学ぶ -{{% /capture %}} + diff --git a/content/ja/docs/tutorials/hello-minikube.md b/content/ja/docs/tutorials/hello-minikube.md index 807d15ce77..5ea6ef99b4 100644 --- a/content/ja/docs/tutorials/hello-minikube.md +++ b/content/ja/docs/tutorials/hello-minikube.md @@ -1,6 +1,6 @@ --- title: Hello Minikube -content_template: templates/tutorial +content_type: tutorial weight: 5 menu: main: @@ -13,7 +13,7 @@ card: weight: 10 --- -{{% capture overview %}} + このチュートリアルでは、[Minikube](/docs/getting-started-guides/minikube)とKatacodaを使用して、Kubernetes上でシンプルなHello WorldのNode.jsアプリケーションを動かす方法を紹介します。Katacodaはブラウザで無償のKubernetes環境を提供します。 @@ -21,17 +21,19 @@ card: [Minikubeをローカルにインストール](/ja/docs/tasks/tools/install-minikube/)している場合もこのチュートリアルを進めることが可能です。 {{< /note >}} -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * Minikubeへのhello worldアプリケーションのデプロイ * アプリケーションの実行 * アプリケーションログの確認 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + このチュートリアルは下記のファイルからビルドされるコンテナーイメージを提供します: @@ -41,9 +43,9 @@ card: `docker build`コマンドについての詳細な情報は、[Dockerのドキュメント](https://docs.docker.com/engine/reference/commandline/build/)を参照してください。 -{{% /capture %}} -{{% capture lessoncontent %}} + + ## Minikubeクラスタの作成 @@ -253,12 +255,13 @@ minikube stop minikube delete ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Deploymentオブジェクト](/ja/docs/concepts/workloads/controllers/deployment/)について学ぶ. * [アプリケーションのデプロイ](/ja/docs/tasks/run-application/run-stateless-application-deployment/)について学ぶ. * [Serviceオブジェクト](/ja/docs/concepts/services-networking/service/)について学ぶ. -{{% /capture %}} + diff --git a/content/ja/docs/tutorials/stateless-application/expose-external-ip-address.md b/content/ja/docs/tutorials/stateless-application/expose-external-ip-address.md index 74d973fdf2..45fb8441fd 100644 --- a/content/ja/docs/tutorials/stateless-application/expose-external-ip-address.md +++ b/content/ja/docs/tutorials/stateless-application/expose-external-ip-address.md @@ -1,17 +1,18 @@ --- title: クラスター内のアプリケーションにアクセスするために外部IPアドレスを公開する -content_template: templates/tutorial +content_type: tutorial weight: 10 --- -{{% capture overview %}} + このページでは、外部IPアドレスを公開するKubernetesのServiceオブジェクトを作成する方法を示します。 -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * [kubectl](/ja/docs/tasks/tools/install-kubectl/)をインストールしてください。 @@ -19,19 +20,20 @@ weight: 10 * Kubernetes APIサーバーと通信するために、`kubectl`を設定してください。手順については、各クラウドプロバイダーのドキュメントを参照してください。 -{{% /capture %}} -{{% capture objectives %}} + +## {{% heading "objectives" %}} + * 5つのインスタンスで実際のアプリケーションを起動します。 * 外部IPアドレスを公開するServiceオブジェクトを作成します。 * 起動中のアプリケーションにアクセスするためにServiceオブジェクトを使用します。 -{{% /capture %}} -{{% capture lessoncontent %}} + + ## 5つのPodで起動しているアプリケーションへのServiceの作成 @@ -124,10 +126,11 @@ kubectl apply -f https://k8s.io/examples/service/load-balancer-example.yaml Hello Kubernetes! -{{% /capture %}} -{{% capture cleanup %}} + +## {{% heading "cleanup" %}} + Serviceを削除する場合、次のコマンドを実行します: @@ -137,10 +140,11 @@ Deployment、ReplicaSet、およびHello Worldアプリケーションが動作 kubectl delete deployment hello-world -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [connecting applications with services](/docs/concepts/services-networking/connect-applications-service/)にて詳細を学ぶことができます。 -{{% /capture %}} + diff --git a/content/ko/docs/concepts/_index.md b/content/ko/docs/concepts/_index.md index 03a1d64ddd..30424ead72 100644 --- a/content/ko/docs/concepts/_index.md +++ b/content/ko/docs/concepts/_index.md @@ -1,17 +1,17 @@ --- title: 개념 main_menu: true -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + 개념 섹션을 통해 쿠버네티스 시스템을 구성하는 요소와 {{< glossary_tooltip text="클러스터" term_id="cluster" length="all" >}}를 표현하는데 사용되는 추상 개념에 대해 배우고 쿠버네티스가 작동하는 방식에 대해 보다 깊이 이해할 수 있다. -{{% /capture %}} -{{% capture body %}} + + ## 개요 @@ -60,12 +60,13 @@ weight: 40 클러스터 내 노드는 애플리케이션과 클라우드 워크플로우를 구동시키는 머신(VM, 물리 서버 등)이다. 쿠버네티스 마스터는 각 노드를 관리한다. 직접 노드와 직접 상호 작용할 일은 거의 없을 것이다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + 개념 페이지를 작성하기를 원하면, 개념 페이지 유형과 개념 템플릿에 대한 정보가 있는 [페이지 템플릿 사용하기](/docs/home/contribute/page-templates/)를 참조한다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/architecture/cloud-controller.md b/content/ko/docs/concepts/architecture/cloud-controller.md index 83bc1d246c..12b1d714e8 100644 --- a/content/ko/docs/concepts/architecture/cloud-controller.md +++ b/content/ko/docs/concepts/architecture/cloud-controller.md @@ -1,10 +1,10 @@ --- title: 클라우드 컨트롤러 매니저 -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + {{< feature-state state="beta" for_k8s_version="v1.11" >}} @@ -17,10 +17,10 @@ weight: 40 클라우드 컨트롤러 매니저는 다양한 클라우드 공급자가 자신의 플랫폼에 쿠버네티스를 통합할 수 있도록 하는 플러그인 메커니즘을 사용해서 구성된다. -{{% /capture %}} -{{% capture body %}} + + ## 디자인 @@ -200,8 +200,9 @@ rules: - update ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [클라우드 컨트롤러 매니저 관리](/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager)에는 클라우드 컨트롤러 매니저의 실행과 관리에 대한 지침이 있다. @@ -212,4 +213,4 @@ rules: 이 문서(노드, 라우트와 서비스)에서 강조된 공유 컨트롤러의 구현과 공유 cloudprovider 인터페이스와 함께 일부 스캐폴딩(scaffolding)은 쿠버네티스 핵심의 일부이다. 클라우드 공급자 전용 구현은 쿠버네티스의 핵심 바깥에 있으며 `CloudProvider` 인터페이스를 구현한다. 플러그인 개발에 대한 자세한 내용은 [클라우드 컨트롤러 매니저 개발하기](/docs/tasks/administer-cluster/developing-cloud-controller-manager/)를 참조한다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/architecture/control-plane-node-communication.md b/content/ko/docs/concepts/architecture/control-plane-node-communication.md index a037452ef3..819ee0c384 100644 --- a/content/ko/docs/concepts/architecture/control-plane-node-communication.md +++ b/content/ko/docs/concepts/architecture/control-plane-node-communication.md @@ -1,18 +1,18 @@ --- title: 컨트롤 플레인-노드 간 통신 -content_template: templates/concept +content_type: concept weight: 20 aliases: - master-node-communication --- -{{% capture overview %}} + 이 문서는 컨트롤 플레인(실제로는 API 서버)과 쿠버네티스 클러스터 사이에 대한 통신 경로의 목록을 작성한다. 이는 사용자가 신뢰할 수 없는 네트워크(또는 클라우드 공급자의 완전한 퍼블릭 IP)에서 클러스터를 실행할 수 있도록 네트워크 구성을 강화하기 위한 맞춤 설치를 할 수 있도록 한다. -{{% /capture %}} -{{% capture body %}} + + ## 노드에서 컨트롤 플레인으로의 통신 노드에서 컨트롤 플레인까지의 모든 통신 경로는 API 서버에서 종료된다(다른 마스터 컴포넌트 중 어느 것도 원격 서비스를 노출하도록 설계되지 않았다). 일반적인 배포에서 API 서버는 하나 이상의 클라이언트 [인증](/docs/reference/access-authn-authz/authentication/) 형식이 활성화된 보안 HTTPS 포트(443)에서 원격 연결을 수신하도록 구성된다. diff --git a/content/ko/docs/concepts/architecture/controller.md b/content/ko/docs/concepts/architecture/controller.md index b8c6556b2a..6688a969d7 100644 --- a/content/ko/docs/concepts/architecture/controller.md +++ b/content/ko/docs/concepts/architecture/controller.md @@ -1,10 +1,10 @@ --- title: 컨트롤러 -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + 로보틱스와 자동화에서 _컨트롤 루프_ 는 시스템 상태를 조절하는 종료되지 않는 루프이다. @@ -18,10 +18,10 @@ weight: 30 {{< glossary_definition term_id="controller" length="short">}} -{{% /capture %}} -{{% capture body %}} + + ## 컨트롤러 패턴 @@ -150,11 +150,12 @@ weight: 30 또는 쿠버네티스 외부에서 실행할 수 있다. 가장 적합한 것은 특정 컨트롤러의 기능에 따라 달라진다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [쿠버네티스 컨트롤 플레인](/ko/docs/concepts/#쿠버네티스-컨트롤-플레인)에 대해 읽기 * [쿠버네티스 오브젝트](/ko/docs/concepts/#쿠버네티스-오브젝트)의 몇 가지 기본 사항을 알아보자. * [쿠버네티스 API](/ko/docs/concepts/overview/kubernetes-api/)에 대해 더 배워 보자. * 만약 자신만의 컨트롤러를 작성하기 원한다면, 쿠버네티스 확장하기의 [확장 패턴](/ko/docs/concepts/extend-kubernetes/extend-cluster/#익스텐션-패턴)을 본다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/architecture/nodes.md b/content/ko/docs/concepts/architecture/nodes.md index 34690c7825..197ae71422 100644 --- a/content/ko/docs/concepts/architecture/nodes.md +++ b/content/ko/docs/concepts/architecture/nodes.md @@ -1,10 +1,10 @@ --- title: 노드 -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + 쿠버네티스는 컨테이너를 파드내에 배치하고 _노드_ 에서 실행함으로 워크로드를 구동한다. 노드는 클러스터에 따라 가상 또는 물리적 머신일 수 있다. 각 노드에는 @@ -20,9 +20,9 @@ weight: 10 {{< glossary_tooltip text="컨테이너 런타임" term_id="container-runtime" >}} 그리고 {{< glossary_tooltip text="kube-proxy" term_id="kube-proxy" >}}가 포함된다. -{{% /capture %}} -{{% capture body %}} + + ## 관리 @@ -322,12 +322,13 @@ kubelet은 `NodeStatus` 와 리스 오브젝트를 생성하고 업데이트 할 자세한 내용은 [노드의 컨트롤 토폴로지 관리 정책](/docs/tasks/administer-cluster/topology-manager/)을 본다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * 노드를 구성하는 [컴포넌트](/ko/docs/concepts/overview/components/#노드-컴포넌트)에 대해 알아본다. * [노드에 대한 API 정의](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#node-v1-core)를 읽어본다. * 아키텍처 디자인 문서의 [노드](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md#the-kubernetes-node) 섹션을 읽어본다. * [테인트와 톨러레이션](/ko/docs/concepts/configuration/taint-and-toleration/)을 읽어본다. * [클러스터 오토스케일링](/ko/docs/tasks/administer-cluster/cluster-management/#클러스터-오토스케일링)을 읽어본다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/cluster-administration/addons.md b/content/ko/docs/concepts/cluster-administration/addons.md index ac66c7baa4..9e6f5ab7ec 100644 --- a/content/ko/docs/concepts/cluster-administration/addons.md +++ b/content/ko/docs/concepts/cluster-administration/addons.md @@ -1,9 +1,9 @@ --- title: 애드온 설치 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + 애드온은 쿠버네티스의 기능을 확장한다. @@ -12,10 +12,10 @@ content_template: templates/concept 각 섹션의 애드온은 알파벳 순으로 정렬되어 있다. 순서는 우선 순위와는 상관없다. -{{% /capture %}} -{{% capture body %}} + + ## 네트워킹과 네트워크 폴리시 @@ -55,4 +55,4 @@ content_template: templates/concept 잘 관리된 것들이 여기에 연결되어 있어야 한다. PR을 환영한다! -{{% /capture %}} + diff --git a/content/ko/docs/concepts/cluster-administration/certificates.md b/content/ko/docs/concepts/cluster-administration/certificates.md index cadcef4b17..d7051e4145 100644 --- a/content/ko/docs/concepts/cluster-administration/certificates.md +++ b/content/ko/docs/concepts/cluster-administration/certificates.md @@ -1,19 +1,19 @@ --- title: 인증서 -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + 클라이언트 인증서로 인증을 사용하는 경우 `easyrsa`, `openssl` 또는 `cfssl` 을 통해 인증서를 수동으로 생성할 수 있다. -{{% /capture %}} -{{% capture body %}} + + ### easyrsa @@ -249,4 +249,4 @@ done. [여기](/docs/tasks/tls/managing-tls-in-a-cluster)에 설명된 대로 인증에 사용할 x509 인증서를 프로비전 할 수 있다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/cluster-administration/cloud-providers.md b/content/ko/docs/concepts/cluster-administration/cloud-providers.md index 702dc3a54c..30dc7b230e 100644 --- a/content/ko/docs/concepts/cluster-administration/cloud-providers.md +++ b/content/ko/docs/concepts/cluster-administration/cloud-providers.md @@ -1,16 +1,16 @@ --- title: 클라우드 제공자 -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + 이 페이지에서는 특정 클라우드 제공자에서 실행 중인 쿠버네티스를 관리하는 방법에 대해 설명한다. -{{% /capture %}} -{{% capture body %}} + + ### kubeadm [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/)은 쿠버네티스 클러스터를 생성하는 데 많이 사용하는 옵션이다. kubeadm에는 클라우드 제공자에 대한 구성 정보를 지정하는 구성 옵션이 있다. 예를 들어 @@ -363,7 +363,7 @@ OpenStack 제공자에 대한 다음의 구성 옵션은 [kubenet] [kubenet]: /ko/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#kubenet -{{% /capture %}} + ## OVirt @@ -400,9 +400,9 @@ IBM 클라우드 쿠버네티스 서비스 제공자를 사용하면, 단일 영 쿠버네티스 노드 오브젝트의 이름은 IBM 클라우드 쿠버네티스 서비스 워커 노드 인스턴스의 프라이빗 IP 주소이다. ### 네트워킹 -IBM 클라우드 쿠버네티스 서비스 제공자는 노드의 네트워크 성능 품질과 네트워크 격리를 위한 VLAN을 제공한다. 사용자 정의 방화벽 및 Calico 네트워크 폴리시를 설정하여 클러스터에 추가적인 보안 계층을 추가하거나 VPN을 통해 온-프레미스 데이터센터에 클러스터를 연결할 수 있다. 자세한 내용은 [인-클러스터(in-cluster) 및 프라이빗 네트워킹 계획](https://cloud.ibm.com/docs/containers?topic=containers-cs_network_cluster#cs_network_cluster)을 참고한다. +IBM 클라우드 쿠버네티스 서비스 제공자는 노드의 네트워크 성능 품질과 네트워크 격리를 위한 VLAN을 제공한다. 사용자 정의 방화벽 및 Calico 네트워크 폴리시를 설정하여 클러스터에 추가적인 보안 계층을 추가하거나 VPN을 통해 온-프레미스 데이터센터에 클러스터를 연결할 수 있다. 자세한 내용은 [클러스터 네트워킹 구성](https://cloud.ibm.com/docs/containers?topic=containers-plan_clusters)을 참고한다. -퍼블릭 또는 클러스터 내에서 앱을 노출하기 위해 노드포트(NodePort), 로드밸런서 또는 인그레스 서비스를 활용할 수 있다. 어노테이션을 사용하여 인그레스 애플리케이션 로드 밸런서를 커스터마이징 할 수도 있다. 자세한 내용은 [외부 네트워킹으로 앱 노출 계획](https://cloud.ibm.com/docs/containers?topic=containers-cs_network_planning#cs_network_planning)을 참고한다. +퍼블릭 또는 클러스터 내에서 앱을 노출하기 위해 노드포트(NodePort), 로드밸런서 또는 인그레스 서비스를 활용할 수 있다. 어노테이션을 사용하여 인그레스 애플리케이션 로드 밸런서를 커스터마이징 할 수도 있다. 자세한 내용은 [앱을 노출할 서비스 선택하기](https://cloud.ibm.com/docs/containers?topic=containers-cs_network_planning#cs_network_planning)을 참고한다. ### 스토리지 IBM 클라우드 쿠버네티스 서비스 제공자는 쿠버네티스-네이티브 퍼시스턴트 볼륨을 활용하여 사용자가 파일, 블록 및 클라우드 오브젝트 스토리지를 앱에 마운트할 수 있도록 한다. 데이터를 지속적으로 저장하기 위해 서비스로서의-데이터베이스(database-as-a-service)와 써드파티 애드온을 사용할 수도 있다. 자세한 정보는 [고가용성 퍼시스턴트 스토리지 계획](https://cloud.ibm.com/docs/containers?topic=containers-storage_planning#storage_planning)을 참고한다. diff --git a/content/ko/docs/concepts/cluster-administration/cluster-administration-overview.md b/content/ko/docs/concepts/cluster-administration/cluster-administration-overview.md index 9a5aba856b..d454b85ca0 100644 --- a/content/ko/docs/concepts/cluster-administration/cluster-administration-overview.md +++ b/content/ko/docs/concepts/cluster-administration/cluster-administration-overview.md @@ -1,15 +1,15 @@ --- title: 클러스터 관리 개요 -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + 클러스터 관리 개요는 쿠버네티스 클러스터를 만들거나 관리하는 모든 사람들을 위한 것이다. 여기서는 쿠버네티스의 핵심 [개념](/ko/docs/concepts/)에 대해 잘 알고 있다고 가정한다. -{{% /capture %}} -{{% capture body %}} + + ## 클러스터 계획 [올바른 솔루션 고르기](/ko/docs/setup/pick-right-solution/)에서 쿠버네티스 클러스터를 어떻게 계획하고, 셋업하고, 구성하는 지에 대한 예시를 참조하자. 이 글에 쓰여진 솔루션들은 *배포판* 이라고 부른다. @@ -65,4 +65,4 @@ weight: 10 * [클러스터 활동 로깅과 모니터링](/docs/concepts/cluster-administration/logging/)은 쿠버네티스 로깅이 로깅의 작동 방법과 로깅을 어떻게 구현하는지 설명한다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/cluster-administration/kubelet-garbage-collection.md b/content/ko/docs/concepts/cluster-administration/kubelet-garbage-collection.md index 348b3776f6..a6907ad44c 100644 --- a/content/ko/docs/concepts/cluster-administration/kubelet-garbage-collection.md +++ b/content/ko/docs/concepts/cluster-administration/kubelet-garbage-collection.md @@ -1,19 +1,19 @@ --- title: kubelet 가비지(Garbage) 수집 설정하기 -content_template: templates/concept +content_type: concept weight: 70 --- -{{% capture overview %}} + 가비지 수집은 사용되지 않는 이미지들과 컨테이너들을 정리하는 kubelet의 유용한 기능이다. Kubelet은 1분마다 컨테이너들에 대하여 가비지 수집을 수행하며, 5분마다 이미지들에 대하여 가비지 수집을 수행한다. 별도의 가비지 수집 도구들을 사용하는 것은, 이러한 도구들이 존재할 수도 있는 컨테이너들을 제거함으로써 kubelet 을 중단시킬 수도 있으므로 권장하지 않는다. -{{% /capture %}} -{{% capture body %}} + + ## 이미지 수집 @@ -77,10 +77,11 @@ kubelet이 관리하지 않는 컨테이너는 컨테이너 가비지 수집 대 | `--outofdisk-transition-frequency` | `--eviction-pressure-transition-period` | 축출이 다른 리소스로의 디스크 압력전환을 일반화 함 | -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + 자세한 내용은 [리소스 부족 처리 구성](/docs/tasks/administer-cluster/out-of-resource/)를 본다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/cluster-administration/logging.md b/content/ko/docs/concepts/cluster-administration/logging.md index 51526e84cb..5c7ce6cd8d 100644 --- a/content/ko/docs/concepts/cluster-administration/logging.md +++ b/content/ko/docs/concepts/cluster-administration/logging.md @@ -1,19 +1,19 @@ --- title: 로깅 아키텍처 -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + 애플리케이션과 시스템 로그는 클러스터 내부에서 발생하는 상황을 이해하는 데 도움이 된다. 로그는 문제를 디버깅하고 클러스터 활동을 모니터링하는 데 특히 유용하다. 대부분의 최신 애플리케이션에는 일종의 로깅 메커니즘이 있다. 따라서, 대부분의 컨테이너 엔진은 일종의 로깅을 지원하도록 설계되었다. 컨테이너화된 애플리케이션에 가장 쉽고 가장 널리 사용되는 로깅 방법은 표준 출력과 표준 에러 스트림에 작성하는 것이다. 그러나, 일반적으로 컨테이너 엔진이나 런타임에서 제공하는 기본 기능은 완전한 로깅 솔루션으로 충분하지 않다. 예를 들어, 컨테이너가 크래시되거나, 파드가 축출되거나, 노드가 종료된 경우에도 여전히 애플리케이션의 로그에 접근하려고 한다. 따라서, 로그는 노드, 파드 또는 컨테이너와는 독립적으로 별도의 스토리지와 라이프사이클을 가져야 한다. 이 개념을 _클러스터-레벨-로깅_ 이라고 한다. 클러스터-레벨 로깅은 로그를 저장하고, 분석하고, 쿼리하기 위해 별도의 백엔드가 필요하다. 쿠버네티스는 로그 데이터를 위한 네이티브 스토리지 솔루션을 제공하지 않지만, 기존의 많은 로깅 솔루션을 쿠버네티스 클러스터에 통합할 수 있다. -{{% /capture %}} -{{% capture body %}} + + 클러스터-레벨 로깅 아키텍처는 로깅 백엔드가 클러스터 내부 또는 외부에 존재한다고 가정하여 설명한다. 클러스터-레벨 @@ -264,4 +264,4 @@ fluentd를 구성하는 것에 대한 자세한 내용은, 구현할 수 있다. 그러나, 이러한 로깅 메커니즘의 구현은 쿠버네티스의 범위를 벗어난다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/cluster-administration/manage-deployment.md b/content/ko/docs/concepts/cluster-administration/manage-deployment.md index ea5396350f..6bed969e90 100644 --- a/content/ko/docs/concepts/cluster-administration/manage-deployment.md +++ b/content/ko/docs/concepts/cluster-administration/manage-deployment.md @@ -1,17 +1,17 @@ --- title: 리소스 관리 -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + 애플리케이션을 배포하고 서비스를 통해 노출했다. 이제 무엇을 해야 할까? 쿠버네티스는 확장과 업데이트를 포함하여, 애플리케이션 배포를 관리하는 데 도움이 되는 여러 도구를 제공한다. 더 자세히 설명할 기능 중에는 [구성 파일](/ko/docs/concepts/configuration/overview/)과 [레이블](/ko/docs/concepts/overview/working-with-objects/labels/)이 있다. -{{% /capture %}} -{{% capture body %}} + + ## 리소스 구성 구성하기 @@ -154,7 +154,7 @@ deployment.apps/my-deployment created persistentvolumeclaim/my-pvc created ``` -`kubectl` 에 대해 더 자세히 알고 싶다면, [kubectl 개요](/docs/reference/kubectl/overview/)를 참조한다. +`kubectl` 에 대해 더 자세히 알고 싶다면, [kubectl 개요](/ko/docs/reference/kubectl/overview/)를 참조한다. ## 효과적인 레이블 사용 @@ -447,11 +447,12 @@ kubectl edit deployment/my-nginx 이것으로 끝이다! 디플로이먼트는 배포된 nginx 애플리케이션을 배후에서 점차적으로 업데이트한다. 업데이트되는 동안 특정 수의 이전 레플리카만 중단될 수 있으며, 원하는 수의 파드 위에 특정 수의 새 레플리카만 생성될 수 있다. 이에 대한 더 자세한 내용을 보려면, [디플로이먼트 페이지](/ko/docs/concepts/workloads/controllers/deployment/)를 방문한다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - [애플리케이션 검사 및 디버깅에 `kubectl` 을 사용하는 방법](/docs/tasks/debug-application-cluster/debug-application-introspection/)에 대해 알아본다. - [구성 모범 사례 및 팁](/ko/docs/concepts/configuration/overview/)을 참고한다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/cluster-administration/networking.md b/content/ko/docs/concepts/cluster-administration/networking.md index bdc0981f59..28508d58f2 100644 --- a/content/ko/docs/concepts/cluster-administration/networking.md +++ b/content/ko/docs/concepts/cluster-administration/networking.md @@ -1,10 +1,10 @@ --- title: 클러스터 네트워킹 -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + 네트워킹은 쿠버네티스의 중심적인 부분이지만, 어떻게 작동하는지 정확하게 이해하기가 어려울 수 있다. 쿠버네티스에는 4가지 대응해야 할 네트워킹 문제가 있다. @@ -15,10 +15,10 @@ weight: 50 3. 파드와 서비스 간 통신: 이 문제는 [서비스](/ko/docs/concepts/services-networking/service/)에서 다룬다. 4. 외부와 서비스 간 통신: 이 문제는 [서비스](/ko/docs/concepts/services-networking/service/)에서 다룬다. -{{% /capture %}} -{{% capture body %}} + + 쿠버네티스는 애플리케이션 간에 머신을 공유하는 것이다. 일반적으로, 머신을 공유하려면 두 애플리케이션이 동일한 포트를 사용하지 않도록 @@ -310,12 +310,13 @@ OVN은 Open vSwitch 커뮤니티에서 개발한 오픈소스 네트워크 독립형으로 실행된다. 두 버전에서, 실행하기 위해 구성이나 추가 코드가 필요하지 않으며, 두 경우 모두, 쿠버네티스의 표준과 같이 네트워크에서 파드별로 하나의 IP 주소를 제공한다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + 네트워크 모델의 초기 설계와 그 근거 및 미래의 계획은 [네트워킹 디자인 문서](https://git.k8s.io/community/contributors/design-proposals/network/networking.md)에 자세히 설명되어 있다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/cluster-administration/proxies.md b/content/ko/docs/concepts/cluster-administration/proxies.md index 3b8b2d32a1..df43157578 100644 --- a/content/ko/docs/concepts/cluster-administration/proxies.md +++ b/content/ko/docs/concepts/cluster-administration/proxies.md @@ -1,14 +1,14 @@ --- title: 쿠버네티스에서 프락시(Proxy) -content_template: templates/concept +content_type: concept weight: 90 --- -{{% capture overview %}} + 이 페이지는 쿠버네티스에서 함께 사용되는 프락시(Proxy)를 설명한다. -{{% /capture %}} -{{% capture body %}} + + ## 프락시 @@ -62,6 +62,6 @@ weight: 90 프락시는 리다이렉트 기능을 대체했다. 리다이렉트는 더 이상 사용하지 않는다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/configuration/configmap.md b/content/ko/docs/concepts/configuration/configmap.md index 42beb83ed5..8e5eb3bed1 100644 --- a/content/ko/docs/concepts/configuration/configmap.md +++ b/content/ko/docs/concepts/configuration/configmap.md @@ -1,10 +1,10 @@ --- title: 컨피그맵(ConfigMap) -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< glossary_definition term_id="configmap" prepend="컨피그맵은" length="all" >}} @@ -15,9 +15,9 @@ weight: 20 사용하여 데이터를 비공개로 유지하자. {{< /caution >}} -{{% /capture %}} -{{% capture body %}} + + ## 사용 동기 애플리케이션 코드와 별도로 구성 데이터를 설정하려면 컨피그맵을 사용하자. @@ -158,12 +158,13 @@ spec: {{< /note >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [시크릿](/docs/concepts/configuration/secret/)에 대해 읽어본다. * [컨피그맵을 사용하도록 파드 구성하기](/docs/tasks/configure-pod-container/configure-pod-configmap/)를 읽어본다. * 코드를 구성에서 분리하려는 동기를 이해하려면 [Twelve-Factor 앱](https://12factor.net/ko/)을 읽어본다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/configuration/manage-resources-containers.md b/content/ko/docs/concepts/configuration/manage-resources-containers.md index b54f12a48b..90991bc49e 100644 --- a/content/ko/docs/concepts/configuration/manage-resources-containers.md +++ b/content/ko/docs/concepts/configuration/manage-resources-containers.md @@ -1,6 +1,6 @@ --- title: 컨테이너 리소스 관리 -content_template: templates/concept +content_type: concept weight: 40 feature: title: 자동 빈 패킹(bin packing) @@ -8,7 +8,7 @@ feature: 리소스 요구 사항과 기타 제약 조건에 따라 컨테이너를 자동으로 배치하지만, 가용성은 그대로 유지한다. 활용도를 높이고 더 많은 리소스를 절약하기 위해 중요한(critical) 워크로드와 최선의(best-effort) 워크로드를 혼합한다. --- -{{% capture overview %}} + {{< glossary_tooltip text="파드" term_id="pod" >}}를 지정할 때, {{< glossary_tooltip text="컨테이너" term_id="container" >}}에 필요한 각 리소스의 양을 선택적으로 지정할 수 있다. @@ -21,10 +21,10 @@ feature: 컨테이너가 사용할 수 있도록 해당 시스템 리소스의 최소 _요청_ 량을 예약한다. -{{% /capture %}} -{{% capture body %}} + + ## 요청 및 제한 @@ -740,10 +740,11 @@ LastState: map[terminated:map[exitCode:137 reason:OOM Killed startedAt:2015-07-0 -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [컨테이너와 파드에 메모리 리소스를 할당](/ko/docs/tasks/configure-pod-container/assign-memory-resource/)하는 핸즈온 경험을 해보자. @@ -758,4 +759,4 @@ LastState: map[terminated:map[exitCode:137 reason:OOM Killed startedAt:2015-07-0 * XFS의 [프로젝트 쿼터](http://xfs.org/docs/xfsdocs-xml-dev/XFS_User_Guide/tmp/en-US/html/xfs-quotas.html)에 대해 읽어보기 -{{% /capture %}} + diff --git a/content/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig.md b/content/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig.md index d24d749d26..0e50a842bb 100644 --- a/content/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig.md +++ b/content/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig.md @@ -1,10 +1,10 @@ --- title: kubeconfig 파일을 사용하여 클러스터 접근 구성하기 -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + kubeconfig 파일들을 사용하여 클러스터, 사용자, 네임스페이스 및 인증 메커니즘에 대한 정보를 관리하자. `kubectl` 커맨드라인 툴은 kubeconfig 파일을 사용하여 @@ -25,10 +25,10 @@ kubeconfig 파일들을 사용하여 클러스터, 사용자, 네임스페이스 kubeconfig 파일을 생성하고 지정하는 단계별 지시사항은 [다중 클러스터로 접근 구성하기](/ko/docs/tasks/access-application-cluster/configure-access-multiple-clusters/)를 참조한다. -{{% /capture %}} -{{% capture body %}} + + ## 다중 클러스터, 사용자와 인증 메커니즘 지원 @@ -143,14 +143,15 @@ kubeconfig 파일에서 파일과 경로 참조는 kubeconfig 파일의 위치 `$HOME/.kube/config`에서 상대 경로는 상대적으로, 절대 경로는 절대적으로 저장한다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [다중 클러스터 접근 구성하기](/ko/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) * [`kubectl config`](/docs/reference/generated/kubectl/kubectl-commands#config) -{{% /capture %}} + diff --git a/content/ko/docs/concepts/configuration/overview.md b/content/ko/docs/concepts/configuration/overview.md index 7611be8cb6..db45ca2d2c 100644 --- a/content/ko/docs/concepts/configuration/overview.md +++ b/content/ko/docs/concepts/configuration/overview.md @@ -1,16 +1,16 @@ --- title: 구성 모범 사례 -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + 이 문서는 사용자 가이드, 시작하기 문서 및 예제들에 걸쳐 소개된 구성 모범 사례를 강조하고 통합한다. 이 문서는 지속적으로 변경 가능하다. 이 목록에 없지만 다른 사람들에게 유용할 것 같은 무엇인가를 생각하고 있다면, 새로운 이슈를 생성하거나 풀 리퀘스트를 제출하는 것을 망설이지 말기를 바란다. -{{% /capture %}} -{{% capture body %}} + + ## 일반적인 구성 팁 - 구성을 정의할 때, 안정된 최신 API 버전을 명시한다. @@ -57,8 +57,7 @@ DNS 서버는 새로운 `서비스`를 위한 쿠버네티스 API를 Watch하며 - `hostPort`와 같은 이유로, `hostNetwork`를 사용하는 것을 피한다. -- `kube-proxy` 로드 밸런싱이 필요하지 않을 때, 쉬운 서비스 발견을 위해 [헤드리스 서비스](/ko/docs/concepts/services-networking/service/#헤드리스-headless- -서비스)(`ClusterIP`의 값을 `None`으로 가지는)를 사용한다. +- `kube-proxy` 로드 밸런싱이 필요하지 않을 때, 쉬운 서비스 발견을 위해 [헤드리스 서비스](/ko/docs/concepts/services-networking/service/#헤드리스-headless-서비스)(`ClusterIP`의 값을 `None`으로 가지는)를 사용한다. ## 레이블 사용하기 @@ -76,7 +75,7 @@ DNS 서버는 새로운 `서비스`를 위한 쿠버네티스 API를 Watch하며 - `imagePullPolicy: IfNotPresent`: 이미지가 로컬에 이미 존재하지 않으면 이미지가 풀(Pull) 된다. -- `imagePullPolicy: Always`: 파드가 시작될 때마다 이미지가 풀(Pull) 된다. +- `imagePullPolicy: Always`: kubelet이 컨테이너를 시작할 때마다, kubelet은 컨테이너 이미지 레지스트리를 쿼리해서 이름을 이미지 다이제스트(digest)로 확인한다. kubelet에 정확한 다이제스트가 저장된 컨테이너 이미지가 로컬로 캐시된 경우, kubelet은 캐시된 이미지를 사용한다. 그렇지 않으면, kubelet은 확인한 다이제스트를 사용해서 이미지를 다운로드(pull)하고, 해당 이미지를 사용해서 컨테이너를 시작한다. - `imagePullPolicy`가 생략되어 있고, 이미지 태그가 `:latest` 이거나 생략되어 있다면 `Always`가 적용된다. @@ -104,4 +103,4 @@ DNS 서버는 새로운 `서비스`를 위한 쿠버네티스 API를 Watch하며 - 단일 컨테이너로 구성된 디플로이먼트와 서비스를 빠르게 생성하기 위해 `kubectl run`와 `kubectl expose`를 사용한다. [클러스터 내부의 애플리케이션에 접근하기 위한 서비스 사용](/docs/tasks/access-application-cluster/service-access-application-cluster/)에서 예시를 확인할 수 있다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/configuration/pod-overhead.md b/content/ko/docs/concepts/configuration/pod-overhead.md index c5efc58c1e..cafd3a921d 100644 --- a/content/ko/docs/concepts/configuration/pod-overhead.md +++ b/content/ko/docs/concepts/configuration/pod-overhead.md @@ -1,10 +1,10 @@ --- title: 파드 오버헤드 -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.18" state="beta" >}} @@ -14,10 +14,10 @@ _파드 오버헤드_ 는 컨테이너 리소스 요청과 상한 위에서 파 소비되는 리소스를 계산하는 기능이다. -{{% /capture %}} -{{% capture body %}} + + 쿠버네티스에서 파드의 오버헤드는 파드의 [런타임클래스](/ko/docs/concepts/containers/runtime-class/) 와 관련된 오버헤드에 따라 @@ -183,11 +183,12 @@ sudo crictl inspectp -o=json $POD_ID | grep cgroupsPath 이 기능은 kube-state-metrics 의 1.9 릴리스에서는 사용할 수 없지만, 다음 릴리스에서는 가능할 예정이다. 그 전까지는 소스로부터 kube-state-metric 을 빌드해야 한다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [런타임클래스](/ko/docs/concepts/containers/runtime-class/) * [파드오버헤드 디자인](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/20190226-pod-overhead.md) -{{% /capture %}} + diff --git a/content/ko/docs/concepts/configuration/pod-priority-preemption.md b/content/ko/docs/concepts/configuration/pod-priority-preemption.md index 3a6b989c8e..ac39ed6c94 100644 --- a/content/ko/docs/concepts/configuration/pod-priority-preemption.md +++ b/content/ko/docs/concepts/configuration/pod-priority-preemption.md @@ -1,10 +1,10 @@ --- title: 파드 우선순위(priority)와 선점(preemption) -content_template: templates/concept +content_type: concept weight: 70 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.14" state="stable" >}} @@ -13,9 +13,9 @@ weight: 70 스케줄러는 우선순위가 낮은 파드를 선점(축출)하여 보류 중인 파드를 스케줄링할 수 있게 한다. -{{% /capture %}} -{{% capture body %}} + + {{< warning >}} @@ -404,7 +404,8 @@ kubelet 리소스 부족 축출은 사용량이 요청을 초과하지 않는 초과하지 않으면, 축출되지 않는다. 요청을 초과하는 우선순위가 더 높은 다른 파드가 축출될 수 있다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * 프라이어리티클래스와 관련하여 리소스쿼터 사용에 대해 [기본적으로 프라이어리티 클래스 소비 제한](/ko/docs/concepts/policy/resource-quotas/#기본적으로-우선-순위-클래스-소비-제한)을 읽어보자. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/configuration/resource-bin-packing.md b/content/ko/docs/concepts/configuration/resource-bin-packing.md index 5b6af1c661..4a8a6b7f2f 100644 --- a/content/ko/docs/concepts/configuration/resource-bin-packing.md +++ b/content/ko/docs/concepts/configuration/resource-bin-packing.md @@ -1,18 +1,18 @@ --- title: 확장된 리소스를 위한 리소스 빈 패킹(bin packing) -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.16" state="alpha" >}} kube-scheduler는 `RequestedToCapacityRatioResourceAllocation` 우선 순위 기능을 사용해서 확장된 리소스와 함께 리소스의 빈 패킹이 가능하도록 구성할 수 있다. 우선 순위 기능을 사용해서 맞춤 요구에 따라 kube-scheduler를 미세 조정할 수 있다. -{{% /capture %}} -{{% capture body %}} + + ## RequestedToCapacityRatioResourceAllocation을 사용해서 빈 패킹 활성화하기 @@ -190,4 +190,4 @@ NodeScore = (5 * 5) + (7 * 1) + (10 * 3) / (5 + 1 + 3) ``` -{{% /capture %}} + diff --git a/content/ko/docs/concepts/containers/container-environment.md b/content/ko/docs/concepts/containers/container-environment.md index b5cfaccbfc..95671af60d 100644 --- a/content/ko/docs/concepts/containers/container-environment.md +++ b/content/ko/docs/concepts/containers/container-environment.md @@ -1,17 +1,17 @@ --- title: 컨테이너 환경 변수 -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + 이 페이지는 컨테이너 환경에서 컨테이너에 가용한 리소스에 대해 설명한다. -{{% /capture %}} -{{% capture body %}} + + ## 컨테이너 환경 @@ -50,11 +50,12 @@ FOO_SERVICE_PORT=<서비스가 동작 중인 포트> 서비스에 지정된 IP 주소가 있고 [DNS 애드온](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/dns/)이 활성화된 경우, DNS를 통해서 컨테이너가 서비스를 사용할 수 있다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [컨테이너 라이프사이클 훅(hooks)](/ko/docs/concepts/containers/container-lifecycle-hooks/)에 대해 더 배워 보기. * [컨테이너 라이프사이클 이벤트에 핸들러 부착](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/) 실제 경험 얻기. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/containers/container-lifecycle-hooks.md b/content/ko/docs/concepts/containers/container-lifecycle-hooks.md index 6264621a24..ac29c19c1b 100644 --- a/content/ko/docs/concepts/containers/container-lifecycle-hooks.md +++ b/content/ko/docs/concepts/containers/container-lifecycle-hooks.md @@ -1,18 +1,18 @@ --- title: 컨테이너 라이프사이클 훅(Hook) -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + 이 페이지는 kubelet이 관리하는 컨테이너가 관리 라이프사이클 동안의 이벤트에 의해 발동되는 코드를 실행하기 위해서 컨테이너 라이프사이클 훅 프레임워크를 사용하는 방법에 대해서 설명한다. -{{% /capture %}} -{{% capture body %}} + + ## 개요 @@ -109,12 +109,13 @@ Events: 1m 22s 2 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Warning FailedPostStartHook ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [컨테이너 환경](/ko/docs/concepts/containers/container-environment/)에 대해 더 배우기. * [컨테이너 라이프사이클 이벤트에 핸들러 부착](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/) 실습 경험하기. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/containers/images.md b/content/ko/docs/concepts/containers/images.md index 4a84219ff6..afc9a3076a 100644 --- a/content/ko/docs/concepts/containers/images.md +++ b/content/ko/docs/concepts/containers/images.md @@ -1,19 +1,19 @@ --- title: 이미지 -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + 사용자 Docker 이미지를 생성하고 레지스트리에 푸시(push)하여 쿠버네티스 파드에서 참조되기 이전에 대비한다. 컨테이너의 `image` 속성은 `docker` 커맨드에서 지원하는 문법과 같은 문법을 지원한다. 이는 프라이빗 레지스트리와 태그를 포함한다. -{{% /capture %}} -{{% capture body %}} + + ## 이미지 업데이트 @@ -148,7 +148,7 @@ kubelet은 ECR 자격 증명을 가져오고 주기적으로 갱신할 것이다 ### IBM 클라우드 컨테이너 레지스트리 사용 IBM 클라우드 컨테이너 레지스트리는 멀티-테넌트 프라이빗 이미지 레지스트리를 제공하여 사용자가 이미지를 안전하게 저장하고 공유할 수 있도록 한다. 기본적으로, 프라이빗 레지스트리의 이미지는 통합된 취약점 조언기(Vulnerability Advisor)를 통해 조사되어 보안 이슈와 잠재적 취약성을 검출한다. IBM 클라우드 계정의 모든 사용자가 이미지에 접근할 수 있도록 하거나, IAM 역할과 정책으로 IBM 클라우드 컨테이너 레지스트리 네임스페이스의 접근 권한을 부여해서 사용할 수 있다. -IBM 클라우드 컨테이너 레지스트리 CLI 플러그인을 설치하고 사용자 이미지를 위한 네임스페이스를 생성하기 위해서는, [IBM 클라우드 컨테이너 레지스트리 시작하기](https://cloud.ibm.com/docs/Registry?topic=registry-getting-started)를 참고한다. +IBM 클라우드 컨테이너 레지스트리 CLI 플러그인을 설치하고 사용자 이미지를 위한 네임스페이스를 생성하기 위해서는, [IBM 클라우드 컨테이너 레지스트리 시작하기](https://cloud.ibm.com/docs/Registry?topic=Registry-getting-started)를 참고한다. 다른 추가적인 구성이 없는 IBM 클라우드 쿠버네티스 서비스 클러스터의 IBM 클라우드 컨테이너 레지스트리 내 기본 네임스페이스에 저장되어 있는 배포된 이미지를 동일 계정과 동일 지역에서 사용하려면 [이미지로부터 컨테이너 빌드하기](https://cloud.ibm.com/docs/containers?topic=containers-images)를 본다. 다른 구성 옵션에 대한 것은 [레지스트리부터 클러스터에 이미지를 가져오도록 권한을 부여하는 방법 이해하기](https://cloud.ibm.com/docs/containers?topic=containers-registry#cluster_registry_auth)를 본다. @@ -367,4 +367,4 @@ imagePullSecrets을 셋팅하여 자동화할 수 있다. 다중 레지스트리에 접근해야 하는 경우, 각 레지스트리에 대해 하나의 시크릿을 생성할 수 있다. Kubelet은 모든`imagePullSecrets` 파일을 하나의 가상`.docker / config.json` 파일로 병합한다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/containers/overview.md b/content/ko/docs/concepts/containers/overview.md index 11d29a18ce..7ad30f5749 100644 --- a/content/ko/docs/concepts/containers/overview.md +++ b/content/ko/docs/concepts/containers/overview.md @@ -1,10 +1,10 @@ --- title: 컨테이너 개요 -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + 컨테이너는 런타임에 필요한 종속성과 애플리케이션의 컴파일 된 코드를 패키징 하는 기술이다. 실행되는 각각의 @@ -15,10 +15,10 @@ weight: 10 컨테이너는 기본 호스트 인프라 환경에서 애플리케이션의 실행환경을 분리한다. 따라서 다양한 클라우드 환경이나 운영체제에서 쉽게 배포 할 수 있다. -{{% /capture %}} -{{% capture body %}} + + ## 컨테이너 이미지 [컨테이너 이미지](/ko/docs/concepts/containers/images/) 는 즉시 실행할 수 있는 @@ -36,8 +36,9 @@ weight: 10 {{< glossary_definition term_id="container-runtime" length="all" >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [컨테이너 이미지](/ko/docs/concepts/containers/images/)에 대해 읽어보기 * [파드](/ko/docs/concepts/workloads/pods/)에 대해 읽어보기 -{{% /capture %}} + diff --git a/content/ko/docs/concepts/containers/runtime-class.md b/content/ko/docs/concepts/containers/runtime-class.md index f1fd42cad6..8af3bda7a8 100644 --- a/content/ko/docs/concepts/containers/runtime-class.md +++ b/content/ko/docs/concepts/containers/runtime-class.md @@ -1,10 +1,10 @@ --- title: 런타임 클래스 -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.12" state="alpha" >}} @@ -13,10 +13,10 @@ weight: 20 런타임클래스는 컨테이너 런타임을 구성을 선택하는 기능이다. 컨테이너 런타임 구성은 파드의 컨테이너를 실행하는데 사용된다. -{{% /capture %}} -{{% capture body %}} + + ## 동기 @@ -176,12 +176,13 @@ PodOverhead를 사용하려면, PodOverhead [기능 게이트](/docs/reference/c 해당 런타임 클래스를 사용해서 구동 중인 파드의 오버헤드를 특정할 수 있고 이 오버헤드가 쿠버네티스 내에서 처리된다는 것을 보장할 수 있다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - [런타임 클래스 설계](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/runtime-class.md) - [런타임 클래스 스케줄링 설계](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/runtime-class-scheduling.md) - [파드 오버헤드](/docs/concepts/configuration/pod-overhead/) 개념에 대해 읽기 - [파드 오버헤드 기능 설계](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/20190226-pod-overhead.md) -{{% /capture %}} + diff --git a/content/ko/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md b/content/ko/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md index fe8af20904..3b0b42dfcc 100644 --- a/content/ko/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md +++ b/content/ko/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md @@ -1,19 +1,19 @@ --- title: 애그리게이션 레이어(aggregation layer)로 쿠버네티스 API 확장하기 -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + 애그리게이션 레이어는 코어 쿠버네티스 API가 제공하는 기능 이외에 더 많은 기능을 제공할 수 있도록 추가 API를 더해 쿠버네티스를 확장할 수 있게 해준다. 추가 API는 [서비스-카탈로그](/docs/concepts/extend-kubernetes/service-catalog/)와 같이 미리 만들어진 솔루션이거나 사용자가 직접 개발한 API일 수 있다. 애그리게이션 레이어는 [사용자 정의 리소스](/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources/)와는 다르며, 애그리게이션 레이어는 {{< glossary_tooltip term_id="kube-apiserver" text="kube-apiserver" >}} 가 새로운 종류의 오브젝트를 인식하도록 하는 방법이다. -{{% /capture %}} -{{% capture body %}} + + ## 애그리게이션 레이어 @@ -30,13 +30,14 @@ extention API server가 레이턴시 요구 사항을 달성할 수 없는 경 `EnableAggregatedDiscoveryTimeout=false` [기능 게이트](/docs/reference/command-line-tools-reference/feature-gates/)를 설정해서 타임아웃 제한을 비활성화 할 수 있다. 이 사용 중단(deprecated)된 기능 게이트는 향후 릴리스에서 제거될 예정이다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * 사용자의 환경에서 Aggregator를 동작시키려면, [애그리게이션 레이어를 설정한다](/docs/tasks/access-kubernetes-api/configure-aggregation-layer/). * 다음에, [extension api-server를 구성해서](/docs/tasks/access-kubernetes-api/setup-extension-api-server/) 애그리게이션 레이어와 연계한다. * 또한, 어떻게 [쿠버네티스 API를 커스텀 리소스 데피니션으로 확장하는지](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/)를 배워본다. * [API 서비스](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#apiservice-v1-apiregistration-k8s-io)의 사양을 읽어본다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources.md b/content/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources.md index 84481bdec7..9e9f3e9e29 100644 --- a/content/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources.md +++ b/content/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources.md @@ -1,18 +1,18 @@ --- title: 커스텀 리소스 -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + *커스텀 리소스* 는 쿠버네티스 API의 익스텐션이다. 이 페이지에서는 쿠버네티스 클러스터에 커스텀 리소스를 추가할 시기와 독립형 서비스를 사용하는 시기에 대해 설명한다. 커스텀 리소스를 추가하는 두 가지 방법과 이들 중에서 선택하는 방법에 대해 설명한다. -{{% /capture %}} -{{% capture body %}} + + ## 커스텀 리소스 *리소스* 는 [쿠버네티스 API](/ko/docs/reference/using-api/api-overview/)에서 특정 종류의 @@ -243,12 +243,13 @@ CRD는 항상 API 서버의 빌트인 리소스와 동일한 인증, 권한 부 - 작성한 REST 클라이언트 - [쿠버네티스 클라이언트 생성 도구](https://github.com/kubernetes/code-generator)를 사용하여 생성된 클라이언트(하나를 생성하는 것은 고급 기능이지만, 일부 프로젝트는 CRD 또는 AA와 함께 클라이언트를 제공할 수 있다). -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [애그리게이션 레이어(aggregation layer)로 쿠버네티스 API 확장](/ko/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/)하는 방법에 대해 배우기. * [커스텀리소스데피니션으로 쿠버네티스 API 확장](/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/)하는 방법에 대해 배우기. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md b/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md index 0912bcdcde..d75601de9f 100644 --- a/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md +++ b/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md @@ -1,11 +1,11 @@ --- title: 장치 플러그인 description: GPU, NIC, FPGA, InfiniBand 및 공급 업체별 설정이 필요한 유사한 리소스를 위한 플러그인을 구현하는데 쿠버네티스 장치 플러그인 프레임워크를 사용한다. -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.10" state="beta" >}} 쿠버네티스는 시스템 하드웨어 리소스를 {{< glossary_tooltip term_id="kubelet" >}}에 알리는 데 사용할 수 있는 @@ -18,9 +18,9 @@ weight: 20 및 공급 업체별 초기화 및 설정이 필요할 수 있는 기타 유사한 컴퓨팅 리소스가 포함된다. -{{% /capture %}} -{{% capture body %}} + + ## 장치 플러그인 등록 @@ -224,12 +224,13 @@ pluginapi.Device{ID: "25102017", Health: pluginapi.Healthy, Topology:&pluginapi. * [SR-IOV 네트워크 장치 플러그인](https://github.com/intel/sriov-network-device-plugin) * Xilinx FPGA 장치용 [Xilinx FPGA 장치 플러그인](https://github.com/Xilinx/FPGA_as_a_Service/tree/master/k8s-fpga-device-plugin/trunk) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * 장치 플러그인을 사용한 [GPU 리소스 스케줄링](/docs/tasks/manage-gpus/scheduling-gpus/)에 대해 알아보기 * 노드에서의 [확장 리소스 알리기](/docs/tasks/administer-cluster/extended-resource-node/)에 대해 배우기 * 쿠버네티스에서 [TLS 수신에 하드웨어 가속](https://kubernetes.io/blog/2019/04/24/hardware-accelerated-ssl/tls-termination-in-ingress-controllers-using-kubernetes-device-plugins-and-runtimeclass/) 사용에 대해 읽기 * [토폴로지 관리자](/docs/tasks/adminster-cluster/topology-manager/)에 대해 알아보기 -{{% /capture %}} + diff --git a/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md b/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md index 137195e7e7..df26abccc5 100644 --- a/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md +++ b/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md @@ -1,11 +1,11 @@ --- title: 네트워크 플러그인 -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + {{< feature-state state="alpha" >}} {{< caution >}}알파 기능은 빨리 변경될 수 있다. {{< /caution >}} @@ -15,9 +15,9 @@ weight: 10 * CNI 플러그인: 상호 운용성을 위해 설계된 appc/CNI 명세를 준수한다. * Kubenet 플러그인: `bridge` 와 `host-local` CNI 플러그인을 사용하여 기본 `cbr0` 구현한다. -{{% /capture %}} -{{% capture body %}} + + ## 설치 @@ -79,11 +79,13 @@ CNI 네트워킹 플러그인은 `hostPort` 를 지원한다. CNI 플러그인 #### 트래픽 셰이핑 지원 +**실험적인 기능입니다** + CNI 네트워킹 플러그인은 파드 수신 및 송신 트래픽 셰이핑도 지원한다. CNI 플러그인 팀에서 제공하는 공식 [대역폭(bandwidth)](https://github.com/containernetworking/plugins/tree/master/plugins/meta/bandwidth) 플러그인을 사용하거나 대역폭 제어 기능이 있는 자체 플러그인을 사용할 수 있다. 트래픽 셰이핑 지원을 활성화하려면, CNI 구성 파일 (기본값 `/etc/cni/net.d`)에 `bandwidth` 플러그인을 -추가해야 한다. +추가하고, 바이너리가 CNI 실행 파일 디렉터리(기본값: `/opt/cni/bin`)에 포함되어있는지 확인한다. ```json { @@ -160,8 +162,9 @@ AWS에서 `eth0` MTU는 일반적으로 9001이므로, `--network-plugin-mtu=900 * `--network-plugin=kubenet` 은 `/opt/cni/bin` 또는 `cni-bin-dir` 에 있는 CNI `bridge` 및 `host-local` 플러그인과 함께 kubenet 네트워크 플러그인을 사용하도록 지정한다. * 현재 kubenet 네트워크 플러그인에서만 사용하는 `--network-plugin-mtu=9001` 은 사용할 MTU를 지정한다. -{{% /capture %}} -{{% capture whatsnext %}} -{{% /capture %}} +## {{% heading "whatsnext" %}} + + + diff --git a/content/ko/docs/concepts/extend-kubernetes/extend-cluster.md b/content/ko/docs/concepts/extend-kubernetes/extend-cluster.md index 408ff70c22..ecf57f49fc 100644 --- a/content/ko/docs/concepts/extend-kubernetes/extend-cluster.md +++ b/content/ko/docs/concepts/extend-kubernetes/extend-cluster.md @@ -1,10 +1,10 @@ --- title: 쿠버네티스 클러스터 확장 -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + 쿠버네티스는 매우 유연하게 구성할 수 있고 확장 가능하다. 결과적으로 쿠버네티스 프로젝트를 포크하거나 코드에 패치를 제출할 필요가 @@ -17,10 +17,10 @@ weight: 10 어떤 익스텐션 포인트와 패턴이 있는지, 그리고 그것들의 트레이드오프와 제약에 대한 소개 자료로 유용할 것이다. -{{% /capture %}} -{{% capture body %}} + + ## 개요 @@ -189,10 +189,11 @@ Kubelet이 바이너리 플러그인을 호출하여 볼륨을 마운트하도 [웹훅](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/scheduler_extender.md)을 지원한다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [커스텀 리소스](/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources/)에 대해 더 알아보기 * [동적 어드미션 컨트롤](/docs/reference/access-authn-authz/extensible-admission-controllers/)에 대해 알아보기 @@ -202,4 +203,4 @@ Kubelet이 바이너리 플러그인을 호출하여 볼륨을 마운트하도 * [kubectl 플러그인](/docs/tasks/extend-kubectl/kubectl-plugins/)에 대해 알아보기 * [오퍼레이터 패턴](/docs/concepts/extend-kubernetes/operator/)에 대해 알아보기 -{{% /capture %}} + diff --git a/content/ko/docs/concepts/extend-kubernetes/operator.md b/content/ko/docs/concepts/extend-kubernetes/operator.md index 7d7854e05c..c663964e21 100644 --- a/content/ko/docs/concepts/extend-kubernetes/operator.md +++ b/content/ko/docs/concepts/extend-kubernetes/operator.md @@ -1,20 +1,20 @@ --- title: 오퍼레이터(operator) 패턴 -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + 오퍼레이터(Operator)는 [사용자 정의 리소스](/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources/)를 사용하여 애플리케이션 및 해당 컴포넌트를 관리하는 쿠버네티스의 소프트웨어 익스텐션이다. 오퍼레이터는 쿠버네티스 원칙, 특히 [컨트롤 루프](/ko/docs/concepts/#쿠버네티스-컨트롤-플레인)를 따른다. -{{% /capture %}} -{{% capture body %}} + + ## 동기 부여 @@ -113,9 +113,10 @@ kubectl edit SampleDB/example-database # 일부 설정을 수동으로 변경하 또한 [쿠버네티스 API의 클라이언트](/ko/docs/reference/using-api/client-libraries/) 역할을 할 수 있는 모든 언어 / 런타임을 사용하여 오퍼레이터(즉, 컨트롤러)를 구현한다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [사용자 정의 리소스](/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources/)에 대해 더 알아보기 * [OperatorHub.io](https://operatorhub.io/)에서 유스케이스에 맞는 이미 만들어진 오퍼레이터 찾기 @@ -129,5 +130,5 @@ kubectl edit SampleDB/example-database # 일부 설정을 수동으로 변경하 * 오퍼레이터 패턴을 소개한 [CoreOS 원본 기사](https://coreos.com/blog/introducing-operators.html) 읽기 * 오퍼레이터 구축을 위한 모범 사례에 대한 구글 클라우드(Google Cloud)의 [기사](https://cloud.google.com/blog/products/containers-kubernetes/best-practices-for-building-kubernetes-operators-and-stateful-apps) 읽기 -{{% /capture %}} + diff --git a/content/ko/docs/concepts/overview/components.md b/content/ko/docs/concepts/overview/components.md index 9db222e271..3d4a8b8370 100644 --- a/content/ko/docs/concepts/overview/components.md +++ b/content/ko/docs/concepts/overview/components.md @@ -1,13 +1,13 @@ --- title: 쿠버네티스 컴포넌트 -content_template: templates/concept +content_type: concept weight: 20 card: name: concepts weight: 20 --- -{{% capture overview %}} + 쿠버네티스를 배포하면 클러스터를 얻는다. {{< glossary_definition term_id="cluster" length="all" prepend="쿠버네티스 클러스터는">}} @@ -18,9 +18,9 @@ card: ![쿠버네티스의 컴포넌트](/images/docs/components-of-kubernetes.png) -{{% /capture %}} -{{% capture body %}} + + ## 컨트롤 플레인 컴포넌트 컨트롤 플레인 컴포넌트는 클러스터에 관한 전반적인 결정(예를 들어, 스케줄링)을 수행하고 클러스터 이벤트(예를 들어, 디플로이먼트의 `replicas` 필드에 대한 요구 조건이 충족되지 않을 경우 새로운 {{< glossary_tooltip text="파드" term_id="pod">}}를 구동시키는 것)를 감지하고 반응한다. @@ -118,10 +118,11 @@ kube-controller-manager와 마찬가지로 cloud-controller-manager는 논리적 [클러스터-레벨 로깅](/docs/concepts/cluster-administration/logging/) 메커니즘은 검색/열람 인터페이스와 함께 중앙 로그 저장소에 컨테이너 로그를 저장하는 책임을 진다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [노드](/ko/docs/concepts/architecture/nodes/)에 대해 더 배우기 * [컨트롤러](/ko/docs/concepts/architecture/controller/)에 대해 더 배우기 * [kube-scheduler](/ko/docs/concepts/scheduling-eviction/kube-scheduler/)에 대해 더 배우기 * etcd의 공식 [문서](https://etcd.io/docs/) 읽기 -{{% /capture %}} + diff --git a/content/ko/docs/concepts/overview/kubernetes-api.md b/content/ko/docs/concepts/overview/kubernetes-api.md index ce0b55e113..aa5d4a043d 100644 --- a/content/ko/docs/concepts/overview/kubernetes-api.md +++ b/content/ko/docs/concepts/overview/kubernetes-api.md @@ -1,13 +1,13 @@ --- title: 쿠버네티스 API -content_template: templates/concept +content_type: concept weight: 30 card: name: concepts weight: 30 --- -{{% capture overview %}} + 전체 API 관례는 [API conventions doc](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md)에 기술되어 있다. @@ -15,16 +15,16 @@ API 엔드포인트, 리소스 타입과 샘플은 [API Reference](/docs/referen API에 원격 접속하는 방법은 [Controlling API Access doc](/docs/reference/access-authn-authz/controlling-access/)에서 논의되었다. -쿠버네티스 API는 시스템을 위한 선언적 설정 스키마를 위한 기초가 되기도 한다. [kubectl](/docs/reference/kubectl/overview/) 커맨드라인 툴을 사용해서 API 오브젝트를 생성, 업데이트, 삭제 및 조회할 수 있다. +쿠버네티스 API는 시스템을 위한 선언적 설정 스키마를 위한 기초가 되기도 한다. [kubectl](/ko/docs/reference/kubectl/overview/) 커맨드라인 툴을 사용해서 API 오브젝트를 생성, 업데이트, 삭제 및 조회할 수 있다. 쿠버네티스는 또한 API 리소스에 대해 직렬화된 상태를 (현재는 [etcd](https://coreos.com/docs/distributed-configuration/getting-started-with-etcd/)에) 저장한다. 쿠버네티스 자체는 여러 컴포넌트로 나뉘어져서 각각의 API를 통해 상호작용한다. -{{% /capture %}} -{{% capture body %}} + + ## API 변경 @@ -137,4 +137,4 @@ API 그룹은 REST 경로와 직렬화된 객체의 `apiVersion` 필드에 명 {{< note >}}개별 리소스의 활성화/비활성화는 레거시 문제로 `extensions/v1beta1` API 그룹에서만 지원된다. {{< /note >}} -{{% /capture %}} + diff --git a/content/ko/docs/concepts/overview/what-is-kubernetes.md b/content/ko/docs/concepts/overview/what-is-kubernetes.md index 1bbffa96cc..f94ab988a3 100644 --- a/content/ko/docs/concepts/overview/what-is-kubernetes.md +++ b/content/ko/docs/concepts/overview/what-is-kubernetes.md @@ -2,18 +2,18 @@ title: 쿠버네티스란 무엇인가? description: > 쿠버네티스는 컨테이너화된 워크로드와 서비스를 관리하기 위한 이식할 수 있고, 확장 가능한 오픈소스 플랫폼으로, 선언적 구성과 자동화를 모두 지원한다. 쿠버네티스는 크고 빠르게 성장하는 생태계를 가지고 있다. 쿠버네티스 서비스, 지원 그리고 도구들은 광범위하게 제공된다. -content_template: templates/concept +content_type: concept weight: 10 card: name: concepts weight: 10 --- -{{% capture overview %}} + 이 페이지에서는 쿠버네티스 개요를 설명한다. -{{% /capture %}} -{{% capture body %}} + + 쿠버네티스는 컨테이너화된 워크로드와 서비스를 관리하기 위한 이식성이 있고, 확장가능한 오픈소스 플랫폼이다. 쿠버네티스는 선언적 구성과 자동화를 모두 용이하게 해준다. 쿠버네티스는 크고, 빠르게 성장하는 생태계를 가지고 있다. 쿠버네티스 서비스, 기술 지원 및 도구는 어디서나 쉽게 이용할 수 있다. 쿠버네티스란 명칭은 키잡이(helmsman)나 파일럿을 뜻하는 그리스어에서 유래했다. 구글이 2014년에 쿠버네티스 프로젝트를 오픈소스화했다. 쿠버네티스는 프로덕션 워크로드를 대규모로 운영하는 [15년 이상의 구글 경험](/blog/2015/04/borg-predecessor-to-kubernetes/)과 커뮤니티의 최고의 아이디어와 적용 사례가 결합되어 있다. @@ -83,9 +83,10 @@ card: * 포괄적인 머신 설정, 유지보수, 관리, 자동 복구 시스템을 제공하거나 채택하지 않는다. * 추가로, 쿠버네티스는 단순한 오케스트레이션 시스템이 아니다. 사실, 쿠버네티스는 오케스트레이션의 필요성을 없애준다. 오케스트레이션의 기술적인 정의는 A를 먼저 한 다음, B를 하고, C를 하는 것과 같이 정의된 워크플로우를 수행하는 것이다. 반면에, 쿠버네티스는 독립적이고 조합 가능한 제어 프로세스들로 구성되어 있다. 이 프로세스는 지속적으로 현재 상태를 입력받은 의도한 상태로 나아가도록 한다. A에서 C로 어떻게 갔는지는 상관이 없다. 중앙화된 제어도 필요치 않다. 이로써 시스템이 보다 더 사용하기 쉬워지고, 강력해지며, 견고하고, 회복력을 갖추게 되며, 확장 가능해진다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [쿠버네티스 구성요소](/ko/docs/concepts/overview/components/) 살펴보기 * [시작하기](/ko/docs/setup/) 준비가 되었는가? -{{% /capture %}} + diff --git a/content/ko/docs/concepts/overview/working-with-objects/annotations.md b/content/ko/docs/concepts/overview/working-with-objects/annotations.md index 4b238bf313..aa9c29cb64 100644 --- a/content/ko/docs/concepts/overview/working-with-objects/annotations.md +++ b/content/ko/docs/concepts/overview/working-with-objects/annotations.md @@ -1,15 +1,15 @@ --- title: 어노테이션 -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + 쿠버네티스 어노테이션을 사용하여 임의의 비-식별 메타데이터를 오브젝트에 첨부할 수 있다. 도구 및 라이브러리와 같은 클라이언트는 이 메타데이터를 검색할 수 있다. -{{% /capture %}} -{{% capture body %}} + + ## 오브젝트에 메타데이터 첨부 레이블이나 어노테이션을 사용하여 쿠버네티스 @@ -88,10 +88,11 @@ spec: ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [레이블과 셀렉터](/ko/docs/concepts/overview/working-with-objects/labels/)에 대해 알아본다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/overview/working-with-objects/common-labels.md b/content/ko/docs/concepts/overview/working-with-objects/common-labels.md index 450255c37c..be7db19bb5 100644 --- a/content/ko/docs/concepts/overview/working-with-objects/common-labels.md +++ b/content/ko/docs/concepts/overview/working-with-objects/common-labels.md @@ -1,17 +1,17 @@ --- title: 권장 레이블 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + kubectl과 대시보드와 같은 많은 도구들로 쿠버네티스 오브젝트를 시각화 하고 관리할 수 있다. 공통 레이블 셋은 모든 도구들이 이해할 수 있는 공통의 방식으로 오브젝트를 식별하고 도구들이 상호 운용적으로 작동할 수 있도록 한다. 권장 레이블은 지원 도구 외에도 쿼리하는 방식으로 애플리케이션을 식별하게 한다. -{{% /capture %}} -{{% capture body %}} + + 메타데이터는 _애플리케이션_ 의 개념을 중심으로 정리된다. 쿠버네티스는 플랫폼 서비스(PaaS)가 아니며 애플리케이션에 대해 공식적인 개념이 없거나 강요하지 않는다. 대신 애플리케이션은 비공식적이며 메타데이터로 설명된다. @@ -166,4 +166,4 @@ metadata: MySQL `StatefulSet` 과 `Service` 로 MySQL과 WordPress가 더 큰 범위의 애플리케이션에 포함되어 있는 것을 알게 된다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/overview/working-with-objects/kubernetes-objects.md b/content/ko/docs/concepts/overview/working-with-objects/kubernetes-objects.md index 3028155e22..1fe7183c29 100644 --- a/content/ko/docs/concepts/overview/working-with-objects/kubernetes-objects.md +++ b/content/ko/docs/concepts/overview/working-with-objects/kubernetes-objects.md @@ -1,17 +1,17 @@ --- title: 쿠버네티스 오브젝트 이해하기 -content_template: templates/concept +content_type: concept weight: 10 card: name: concepts weight: 40 --- -{{% capture overview %}} + 이 페이지에서는 쿠버네티스 오브젝트가 쿠버네티스 API에서 어떻게 표현되고, 그 오브젝트를 어떻게 `.yaml` 형식으로 표현할 수 있는지에 대해 설명한다. -{{% /capture %}} -{{% capture body %}} + + ## 쿠버네티스 오브젝트 이해하기 {#kubernetes-objects} *쿠버네티스 오브젝트* 는 쿠버네티스 시스템에서 영속성을 가지는 개체이다. 쿠버네티스는 클러스터의 상태를 나타내기 위해 이 개체를 이용한다. 구체적으로 말하자면, 다음을 기술할 수 있다. @@ -86,10 +86,11 @@ deployment.apps/nginx-deployment created 에서 확인할 수 있고, 디플로이먼트에 대한 `spec` 포맷은 [DeploymentSpec v1 apps](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#deploymentspec-v1-apps)에서 확인할 수 있다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * API 개념의 더 많은 설명은 [Kubernetes API 개요](/ko/docs/reference/using-api/api-overview/)를 본다. * [파드(Pod)](/ko/docs/concepts/workloads/pods/pod-overview/)와 같이, 가장 중요하고 기본적인 쿠버네티스 오브젝트에 대해 배운다. * 쿠버네티스의 [컨트롤러](/ko/docs/concepts/architecture/controller/)에 대해 배운다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/overview/working-with-objects/labels.md b/content/ko/docs/concepts/overview/working-with-objects/labels.md index 6fa5790a83..fe8b0ce8fb 100644 --- a/content/ko/docs/concepts/overview/working-with-objects/labels.md +++ b/content/ko/docs/concepts/overview/working-with-objects/labels.md @@ -1,10 +1,10 @@ --- title: 레이블과 셀렉터 -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + _레이블_ 은 파드와 같은 오브젝트에 첨부된 키와 값의 쌍이다. 레이블은 오브젝트의 특성을 식별하는 데 사용되어 사용자에게 중요하지만, 코어 시스템에 직접적인 의미는 없다. @@ -22,10 +22,10 @@ _레이블_ 은 파드와 같은 오브젝트에 첨부된 키와 값의 쌍이 레이블은 UI와 CLI에서 효율적인 쿼리를 사용하고 검색에 사용하기에 적합하다. 식별되지 않는 정보는 [어노테이션](/ko/docs/concepts/overview/working-with-objects/annotations/)으로 기록해야 한다. -{{% /capture %}} -{{% capture body %}} + + ## 사용 동기 @@ -225,4 +225,4 @@ selector: 레이블을 통해 선택하는 사용 사례 중 하나는 파드를 스케줄 할 수 있는 노드 셋을 제한하는 것이다. 자세한 내용은 [노드 선택](/ko/docs/concepts/scheduling-eviction/assign-pod-node/) 문서를 참조한다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/overview/working-with-objects/names.md b/content/ko/docs/concepts/overview/working-with-objects/names.md index 3841e76c1e..0cb3e7656a 100644 --- a/content/ko/docs/concepts/overview/working-with-objects/names.md +++ b/content/ko/docs/concepts/overview/working-with-objects/names.md @@ -1,10 +1,10 @@ --- title: 오브젝트 이름과 ID -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + 클러스터의 각 오브젝트는 해당 유형의 리소스에 대하여 고유한 [_이름_](#names) 을 가지고 있다. 또한, 모든 쿠버네티스 오브젝트는 전체 클러스터에 걸쳐 고유한 [_UID_](#uids) 를 가지고 있다. @@ -13,10 +13,10 @@ weight: 20 유일하지 않은 사용자 제공 속성의 경우 쿠버네티스는 [레이블](/ko/docs/concepts/overview/working-with-objects/labels/)과 [어노테이션](/ko/docs/concepts/overview/working-with-objects/annotations/)을 제공한다. -{{% /capture %}} -{{% capture body %}} + + ## 이름 {#names} @@ -79,8 +79,9 @@ spec: 쿠버네티스 UID는 보편적으로 고유한 식별자이다(또는 UUID라고 한다). UUID는 ISO/IEC 9834-8 과 ITU-T X.667 로 표준화 되어 있다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * 쿠버네티스의 [레이블](/ko/docs/concepts/overview/working-with-objects/labels/)에 대해 읽기. * [쿠버네티스의 식별자와 이름](https://git.k8s.io/community/contributors/design-proposals/architecture/identifiers.md) 디자인 문서 읽기. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/overview/working-with-objects/namespaces.md b/content/ko/docs/concepts/overview/working-with-objects/namespaces.md index 4707233f26..d5eb45f21c 100644 --- a/content/ko/docs/concepts/overview/working-with-objects/namespaces.md +++ b/content/ko/docs/concepts/overview/working-with-objects/namespaces.md @@ -1,18 +1,18 @@ --- title: 네임스페이스 -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + 쿠버네티스는 동일한 물리 클러스터를 기반으로 하는 여러 가상 클러스터를 지원한다. 이런 가상 클러스터를 네임스페이스라고 한다. -{{% /capture %}} -{{% capture body %}} + + ## 여러 개의 네임스페이스를 사용하는 경우 @@ -47,11 +47,11 @@ weight: 30 kubectl get namespace ``` ``` -NAME STATUS AGE -default Active 1d -kube-system Active 1d -kube-public Active 1d -kube-node-lease Active 1d +NAME STATUS AGE +default Active 1d +kube-node-lease Active 1d +kube-public Active 1d +kube-system Active 1d ``` 쿠버네티스는 처음에 세 개의 초기 네임스페이스를 갖는다. @@ -108,11 +108,12 @@ kubectl api-resources --namespaced=true kubectl api-resources --namespaced=false ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [신규 네임스페이스 생성](/docs/tasks/administer-cluster/namespaces/#creating-a-new-namespace)에 대해 더 배우기. * [네임스페이스 삭제](/docs/tasks/administer-cluster/namespaces/#deleting-a-namespace)에 대해 더 배우기. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/overview/working-with-objects/object-management.md b/content/ko/docs/concepts/overview/working-with-objects/object-management.md index bdb5ac476d..550cbe951c 100644 --- a/content/ko/docs/concepts/overview/working-with-objects/object-management.md +++ b/content/ko/docs/concepts/overview/working-with-objects/object-management.md @@ -1,17 +1,17 @@ --- title: 쿠버네티스 오브젝트 관리 -content_template: templates/concept +content_type: concept weight: 15 --- -{{% capture overview %}} + `kubectl` 커맨드라인 툴은 쿠버네티스 오브젝트를 생성하고 관리하기 위한 몇 가지 상이한 방법을 지원한다. 이 문서는 여러가지 접근법에 대한 개요을 제공한다. Kubectl로 오브젝트 관리하기에 대한 자세한 설명은 [Kubectl 서적](https://kubectl.docs.kubernetes.io)에서 확인한다. -{{% /capture %}} -{{% capture body %}} + + ## 관리 기법 @@ -174,9 +174,10 @@ kubectl apply -R -f configs/ - 선언형 오브젝트 구성은 예상치 못한 결과를 디버깅하고 이해하기가 더 어렵다. - diff를 사용한 부분 업데이트는 복잡한 병합 및 패치 작업을 일으킨다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - [명령형 커맨드를 이용한 쿠버네티스 오브젝트 관리하기](/ko/docs/tasks/manage-kubernetes-objects/imperative-command/) - [오브젝트 구성을 이용한 쿠버네티스 오브젝트 관리하기(명령형)](/ko/docs/tasks/manage-kubernetes-objects/imperative-config/) - [오브젝트 구성을 이용한 쿠버네티스 오브젝트 관리하기(선언형)](/ko/docs/tasks/manage-kubernetes-objects/declarative-config/) @@ -187,4 +188,4 @@ kubectl apply -R -f configs/ {{< comment >}} {{< /comment >}} -{{% /capture %}} + diff --git a/content/ko/docs/concepts/policy/limit-range.md b/content/ko/docs/concepts/policy/limit-range.md index 11356ed3ee..e2bd0a10d3 100644 --- a/content/ko/docs/concepts/policy/limit-range.md +++ b/content/ko/docs/concepts/policy/limit-range.md @@ -1,19 +1,19 @@ --- title: 리밋 레인지(Limit Range) -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + 기본적으로 컨테이너는 쿠버네티스 클러스터에서 무제한 [컴퓨팅 리소스](/docs/user-guide/compute-resources)로 실행된다. 리소스 쿼터을 사용하면 클러스터 관리자는 {{< glossary_tooltip text="네임스페이스" term_id="namespace" >}}별로 리소스 사용과 생성을 제한할 수 있다. 네임스페이스 내에서 파드나 컨테이너는 네임스페이스의 리소스 쿼터에 정의된 만큼의 CPU와 메모리를 사용할 수 있다. 하나의 파드 또는 컨테이너가 사용 가능한 모든 리소스를 독점할 수 있다는 우려가 있다. 리밋레인지는 네임스페이스에서 리소스 할당(파드 또는 컨테이너)을 제한하는 정책이다. -{{% /capture %}} -{{% capture body %}} + + _리밋레인지_ 는 다음과 같은 제약 조건을 제공한다. @@ -49,9 +49,10 @@ _리밋레인지_ 는 다음과 같은 제약 조건을 제공한다. 경합이나 리밋레인지 변경은 이미 생성된 리소스에 영향을 미치지 않는다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + 자세한 내용은 [LimitRanger 디자인 문서](https://git.k8s.io/community/contributors/design-proposals/resource-management/admission_control_limit_range.md)를 참조한다. @@ -65,4 +66,4 @@ _리밋레인지_ 는 다음과 같은 제약 조건을 제공한다. - [네임스페이스당 할당량을 설정하는 자세한 예시](/docs/tasks/administer-cluster/quota-memory-cpu-namespace/). -{{% /capture %}} + diff --git a/content/ko/docs/concepts/policy/pod-security-policy.md b/content/ko/docs/concepts/policy/pod-security-policy.md index 4e264da6f1..57a115fc69 100644 --- a/content/ko/docs/concepts/policy/pod-security-policy.md +++ b/content/ko/docs/concepts/policy/pod-security-policy.md @@ -1,20 +1,20 @@ --- title: 파드 시큐리티 폴리시 -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< feature-state state="beta" >}} 파드 시큐리티 폴리시를 사용하면 파드 생성 및 업데이트에 대한 세분화된 권한을 부여할 수 있다. -{{% /capture %}} -{{% capture body %}} + + ## 파드 시큐리티 폴리시란? @@ -371,6 +371,8 @@ podsecuritypolicy "example" deleted {{< codenew file="policy/restricted-psp.yaml" >}} +더 많은 예제는 [파드 보안 표준](/docs/concepts/security/pod-security-standards/#policy-instantiation)을 본다. + ## 정책 레퍼런스 ### 특권을 가진 @@ -626,10 +628,13 @@ spec: [Sysctl 문서]( /docs/concepts/cluster-administration/sysctl-cluster/#podsecuritypolicy)를 참고하길 바란다. -{{% /capture %}} -{{% capture whatsnext %}} -API 세부 정보는 [파드 시큐리티 폴리시 레퍼런스](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podsecuritypolicy-v1beta1-policy) 참조 +## {{% heading "whatsnext" %}} + + +폴리시 권장 사항에 대해서는 [파드 보안 표준](/docs/concepts/security/pod-security-standards/)을 참조한다. + +API 세부 정보는 [파드 시큐리티 폴리시 레퍼런스](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podsecuritypolicy-v1beta1-policy) 참조한다. + -{{% /capture %}} diff --git a/content/ko/docs/concepts/policy/resource-quotas.md b/content/ko/docs/concepts/policy/resource-quotas.md index d13c6abbc2..4aec897572 100644 --- a/content/ko/docs/concepts/policy/resource-quotas.md +++ b/content/ko/docs/concepts/policy/resource-quotas.md @@ -1,20 +1,20 @@ --- title: 리소스 쿼터 -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + 여러 사용자나 팀이 정해진 수의 노드로 클러스터를 공유할 때 한 팀이 공정하게 분배된 리소스보다 많은 리소스를 사용할 수 있다는 우려가 있다. 리소스 쿼터는 관리자가 이 문제를 해결하기 위한 도구이다. -{{% /capture %}} -{{% capture body %}} + + `ResourceQuota` 오브젝트로 정의된 리소스 쿼터는 네임스페이스별 총 리소스 사용을 제한하는 제약 조건을 제공한다. 유형별로 네임스페이스에서 만들 수 있는 오브젝트 수와 @@ -592,10 +592,11 @@ plugins: [리소스 쿼터를 사용하는 방법에 대한 자세한 예](/docs/tasks/administer-cluster/quota-api-object/)를 참고하길 바란다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + 자세한 내용은 [리소스쿼터 디자인 문서](https://git.k8s.io/community/contributors/design-proposals/resource-management/admission_control_resource_quota.md)를 참고하길 바란다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/scheduling-eviction/assign-pod-node.md b/content/ko/docs/concepts/scheduling-eviction/assign-pod-node.md index 6a5eaf68ec..a56dece692 100644 --- a/content/ko/docs/concepts/scheduling-eviction/assign-pod-node.md +++ b/content/ko/docs/concepts/scheduling-eviction/assign-pod-node.md @@ -1,11 +1,11 @@ --- title: 노드에 파드 할당하기 -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + {{< glossary_tooltip text="파드" term_id="pod" >}}를 특정한 {{< glossary_tooltip text="노드(들)" term_id="node" >}}에서만 동작하도록 하거나, 특정 노드들을 선호하도록 제한할 수 있다. @@ -17,9 +17,9 @@ weight: 50 예를 들어 SSD가 장착된 머신에 파드가 연결되도록 하거나 또는 동일한 가용성 영역(availability zone)에서 많은 것을 통신하는 두 개의 서로 다른 서비스의 파드를 같이 배치할 수 있다. -{{% /capture %}} -{{% capture body %}} + + ## 노드 셀렉터(nodeSelector) @@ -383,9 +383,10 @@ spec: 위 파드는 kube-01 노드에서 실행될 것이다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [테인트](/docs/concepts/configuration/taint-and-toleration/)는 노드가 특정 파드들을 *쫓아내게* 할 수 있다. @@ -397,4 +398,4 @@ spec: [토폴로지 매니저](/docs/tasks/administer-cluster/topology-manager/)는 노드 수준의 리소스 할당 결정에 참여할 수 있다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/scheduling-eviction/kube-scheduler.md b/content/ko/docs/concepts/scheduling-eviction/kube-scheduler.md index 24754a5c88..54373e2e2c 100644 --- a/content/ko/docs/concepts/scheduling-eviction/kube-scheduler.md +++ b/content/ko/docs/concepts/scheduling-eviction/kube-scheduler.md @@ -1,18 +1,18 @@ --- title: 쿠버네티스 스케줄러 -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + 쿠버네티스에서 _스케줄링_ 은 {{< glossary_tooltip term_id="kubelet" >}}이 파드를 실행할 수 있도록 {{< glossary_tooltip text="파드" term_id="pod" >}}가 {{< glossary_tooltip text="노드" term_id="node" >}}에 적합한지 확인하는 것을 말한다. -{{% /capture %}} -{{% capture body %}} + + ## 스케줄링 개요 {#scheduling} @@ -86,12 +86,13 @@ _스코어링_ 단계에서 스케줄러는 목록에 남아있는 노드의 순 다른 스케줄링 단계를 구현하는 플러그인을 구성할 수 있다. 다른 프로파일을 실행하도록 kube-scheduler를 구성할 수도 있다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [스케줄러 성능 튜닝](/ko/docs/concepts/scheduling/scheduler-perf-tuning/)에 대해 읽기 * [파드 토폴로지 분배 제약 조건](/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints/)에 대해 읽기 * kube-scheduler의 [레퍼런스 문서](/docs/reference/command-line-tools-reference/kube-scheduler/) 읽기 * [멀티 스케줄러 구성하기](/docs/tasks/administer-cluster/configure-multiple-schedulers/)에 대해 배우기 * [토폴로지 관리 정책](/docs/tasks/administer-cluster/topology-manager/)에 대해 배우기 * [파드 오버헤드](/docs/concepts/configuration/pod-overhead/)에 대해 배우기 -{{% /capture %}} + diff --git a/content/ko/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md b/content/ko/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md index 3387bdce43..52db313635 100644 --- a/content/ko/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md +++ b/content/ko/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md @@ -1,10 +1,10 @@ --- title: 스케줄러 성능 튜닝 -content_template: templates/concept +content_type: concept weight: 70 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="1.14" state="beta" >}} @@ -22,9 +22,9 @@ API 서버에 해당 결정을 통지한다. 본 페이지에서는 상대적으로 큰 규모의 쿠버네티스 클러스터에 대한 성능 튜닝 최적화에 대해 설명한다. -{{% /capture %}} -{{% capture body %}} + + 큰 규모의 클러스터에서는 스케줄러의 동작을 튜닝하여 응답 시간 (새 파드가 빠르게 배치됨)과 정확도(스케줄러가 배치 결정을 잘 못하는 경우가 드물게 됨) @@ -161,4 +161,4 @@ percentageOfNodesToScore: 50 모든 노드를 검토한 후, 노드 1로 돌아간다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/scheduling-eviction/taint-and-toleration.md b/content/ko/docs/concepts/scheduling-eviction/taint-and-toleration.md index ed59926d17..0d0a192ddd 100644 --- a/content/ko/docs/concepts/scheduling-eviction/taint-and-toleration.md +++ b/content/ko/docs/concepts/scheduling-eviction/taint-and-toleration.md @@ -1,11 +1,11 @@ --- title: 테인트(Taints)와 톨러레이션(Tolerations) -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + [여기](/ko/docs/concepts/configuration/assign-pod-node/#어피니티-affinity-와-안티-어피니티-anti-affinity)에 설명된 노드 어피니티는 노드 셋을 *끌어들이는* (기본 설정 또는 어려운 요구 사항) *파드* 속성이다. 테인트는 그 반대로, *노드* 가 파드 셋을 @@ -17,9 +17,9 @@ weight: 40 톨러레이션은 파드에 적용되며, 파드를 일치하는 테인트가 있는 노드에 스케줄되게 하지만 필수는 아니다. -{{% /capture %}} -{{% capture body %}} + + ## 개요 @@ -72,21 +72,10 @@ tolerations: 두 가지 특별한 경우가 있다. -* operator `Exists` 가 있는 비어있는 `key` 는 모든 키, 값 및 이펙트와 일치하므로 +operator `Exists` 가 있는 비어있는 `key` 는 모든 키, 값 및 이펙트와 일치하므로 모든 것이 톨러레이션 된다. -```yaml -tolerations: -- operator: "Exists" -``` - -* 비어있는 `effect` 는 모든 이펙트를 키 `key` 와 일치시킨다. - -```yaml -tolerations: -- key: "key" - operator: "Exists" -``` +비어있는 `effect` 는 모든 이펙트를 키 `key` 와 일치시킨다. {{< /note >}} diff --git a/content/ko/docs/concepts/security/overview.md b/content/ko/docs/concepts/security/overview.md index 6dcbe57d58..f988d1d5d9 100644 --- a/content/ko/docs/concepts/security/overview.md +++ b/content/ko/docs/concepts/security/overview.md @@ -1,12 +1,12 @@ --- title: 클라우드 네이티브 보안 개요 -content_template: templates/concept +content_type: concept weight: 1 --- {{< toc >}} -{{% capture overview %}} + 쿠버네티스 보안(일반적인 보안)은 관련된 많은 부분이 상호작용하는 방대한 주제다. 오늘날에는 웹 애플리케이션의 실행을 돕는 수많은 시스템에 오픈소스 소프트웨어가 통합되어 있으며, @@ -15,9 +15,9 @@ weight: 1 몇 가지 일반적인 개념에 대한 멘탈 모델(mental model)을 정의한다. 멘탈 모델은 완전히 임의적이며 소프트웨어 스택을 보호할 위치를 생각하는데 도움이되는 경우에만 사용해야 한다. -{{% /capture %}} -{{% capture body %}} + + ## 클라우드 네이티브 보안의 4C 계층적인 보안에 대해서 어떻게 생각할 수 있는지 이해하는 데 도움이 될 수 있는 다이어그램부터 살펴보자. @@ -150,12 +150,13 @@ TLS를 통한 접근 | 코드가 TCP를 통해 통신해야 한다면, 클라이 전달하는 파이프라인에 의해 자동화 될 수 있다. 소프트웨어 전달을 위한 "지속적인 해킹(Continuous Hacking)"에 대한 접근 방식에 대해 알아 보려면, 자세한 설명을 제공하는 [이 기사](https://thenewstack.io/beyond-ci-cd-how-continuous-hacking-of-docker-containers-and-pipeline-driven-security-keeps-ygrene-secure/)를 참고한다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [파드에 대한 네트워크 정책](/ko/docs/concepts/services-networking/network-policies/) 알아보기 * [클러스터 보안](/docs/tasks/administer-cluster/securing-a-cluster/)에 대해 알아보기 * [API 접근 통제](/docs/reference/access-authn-authz/controlling-access/)에 대해 알아보기 * 컨트롤 플레인에 대한 [전송 데이터 암호화](/docs/tasks/tls/managing-tls-in-a-cluster/) 알아보기 * [Rest에서 데이터 암호화](/docs/tasks/administer-cluster/encrypt-data/) 알아보기 * [쿠버네티스 시크릿](/docs/concepts/configuration/secret/)에 대해 알아보기 -{{% /capture %}} + diff --git a/content/ko/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md b/content/ko/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md index b0cc6b1a93..bc5560b5ba 100644 --- a/content/ko/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md +++ b/content/ko/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md @@ -1,18 +1,18 @@ --- title: HostAliases로 파드의 /etc/hosts 항목 추가하기 -content_template: templates/concept +content_type: concept weight: 60 --- {{< toc >}} -{{% capture overview %}} + 파드의 /etc/hosts 파일에 항목을 추가하는 것은 DNS나 다른 방법들이 적용되지 않을 때 파드 수준의 호스트네임 해석을 제공한다. 1.7 버전에서는, 사용자들이 PodSpec의 HostAliases 항목을 사용하여 이러한 사용자 정의 항목들을 추가할 수 있다. HostAliases를 사용하지 않은 수정은 권장하지 않는데, 이는 호스트 파일이 Kubelet에 의해 관리되고, 파드 생성/재시작 중에 덮어쓰여질 수 있기 때문이다. -{{% /capture %}} -{{% capture body %}} + + ## 기본 호스트 파일 내용 @@ -123,4 +123,4 @@ fe00::2 ip6-allrouters 덮어쓰여진다. 따라서, 호스트 파일의 내용을 직접 바꾸는 것은 권장하지 않는다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/services-networking/connect-applications-service.md b/content/ko/docs/concepts/services-networking/connect-applications-service.md index ca2440a048..4649577399 100644 --- a/content/ko/docs/concepts/services-networking/connect-applications-service.md +++ b/content/ko/docs/concepts/services-networking/connect-applications-service.md @@ -1,11 +1,11 @@ --- title: 서비스와 애플리케이션 연결하기 -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + ## 컨테이너 연결을 위한 쿠버네티스 모델 @@ -15,11 +15,11 @@ weight: 30 컨테이너를 제공하는 여러 개발자 또는 팀에서 포트를 조정하는 것은 규모면에서 매우 어려우며, 사용자가 제어할 수 없는 클러스터 수준의 문제에 노출된다. 쿠버네티스는 파드가 배치된 호스트와는 무관하게 다른 파드와 통신할 수 있다고 가정한다. 쿠버네티스는 모든 파드에게 자체 클러스터-프라이빗 IP 주소를 제공하기 때문에 파드간에 명시적으로 링크를 만들거나 컨테이너 포트를 호스트 포트에 매핑 할 필요가 없다. 이것은 파드 내의 컨테이너는 모두 로컬호스트에서 서로의 포트에 도달할 수 있으며 클러스터의 모든 파드는 NAT 없이 서로를 볼 수 있다는 의미이다. 이 문서의 나머지 부분에서는 이러한 네트워킹 모델에서 신뢰할 수 있는 서비스를 실행하는 방법에 대해 자세히 설명할 것이다. -이 가이드는 간단한 nginx 서버를 사용해서 개념증명을 보여준다. 동일한 원칙이 보다 완전한 [Jenkins CI 애플리케이션](https://kubernetes.io/blog/2015/07/strong-simple-ssl-for-kubernetes)에서 구현된다. +이 가이드는 간단한 nginx 서버를 사용해서 개념증명을 보여준다. -{{% /capture %}} -{{% capture body %}} + + ## 파드를 클러스터에 노출하기 @@ -414,12 +414,13 @@ LoadBalancer Ingress: a320587ffd19711e5a37606cf4a74574-1142138393.us-east-1.el ... ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [서비스를 사용해서 클러스터 내 애플리케이션에 접근하기](/docs/tasks/access-application-cluster/service-access-application-cluster/)를 더 자세히 알아본다. * [서비스를 사용해서 프론트 엔드부터 백 엔드까지 연결하기](/docs/tasks/access-application-cluster/connecting-frontend-backend/)를 더 자세히 알아본다. * [외부 로드 밸런서를 생성하기](/docs/tasks/access-application-cluster/create-external-load-balancer/)를 더 자세히 알아본다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/services-networking/dns-pod-service.md b/content/ko/docs/concepts/services-networking/dns-pod-service.md index cef017128f..7550473fc2 100644 --- a/content/ko/docs/concepts/services-networking/dns-pod-service.md +++ b/content/ko/docs/concepts/services-networking/dns-pod-service.md @@ -1,16 +1,16 @@ --- title: 서비스 및 파드용 DNS -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + 이 페이지는 쿠버네티스의 DNS 지원에 대한 개요를 설명한다. -{{% /capture %}} -{{% capture body %}} + + ## 소개 @@ -262,13 +262,14 @@ options ndots:5 | 1.10 | 베타 (기본)| | 1.9 | 알파 | -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + DNS 구성 관리에 대한 지침은 [DNS 서비스 구성](/docs/tasks/administer-cluster/dns-custom-nameservers/) 에서 확인 할 수 있다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/services-networking/dual-stack.md b/content/ko/docs/concepts/services-networking/dual-stack.md index f234bcbbc3..11390c04d5 100644 --- a/content/ko/docs/concepts/services-networking/dual-stack.md +++ b/content/ko/docs/concepts/services-networking/dual-stack.md @@ -5,11 +5,11 @@ feature: description: > 파드와 서비스에 IPv4와 IPv6 주소 할당 -content_template: templates/concept +content_type: concept weight: 70 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.16" state="alpha" >}} @@ -17,9 +17,9 @@ weight: 70 만약 쿠버네티스 클러스터에서 IPv4/IPv6 이중 스택 네트워킹을 활성화하면, 클러스터는 IPv4와 IPv6 주소의 동시 할당을 지원하게 된다. -{{% /capture %}} -{{% capture body %}} + + ## 지원되는 기능 @@ -99,10 +99,11 @@ IPv6가 활성화된 외부 로드 밸런서를 지원하는 클라우드 공급 * Kubenet은 IP의 IPv4,IPv6의 위치 보고를 강제로 수행한다. (--cluster-cidr) -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [IPv4/IPv6 이중 스택 확인](/docs/tasks/network/validate-dual-stack) 네트워킹 -{{% /capture %}} + diff --git a/content/ko/docs/concepts/services-networking/endpoint-slices.md b/content/ko/docs/concepts/services-networking/endpoint-slices.md index 635798f5a2..2a2ffc45bf 100644 --- a/content/ko/docs/concepts/services-networking/endpoint-slices.md +++ b/content/ko/docs/concepts/services-networking/endpoint-slices.md @@ -1,11 +1,11 @@ --- title: 엔드포인트슬라이스 -content_template: templates/concept +content_type: concept weight: 15 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.17" state="beta" >}} @@ -13,9 +13,9 @@ _엔드포인트슬라이스_ 는 쿠버네티스 클러스터 내의 네트워 추적하는 간단한 방법을 제공한다. 이것은 엔드포인트를 더 확장하고, 확장 가능한 대안을 제안한다. -{{% /capture %}} -{{% capture body %}} + + ## 사용동기 @@ -173,11 +173,12 @@ text="kube-controller-manager" term_id="kube-controller-manager" >}} 플래그 교체되는 엔드포인트에 대해서 엔드포인트슬라이스를 자연스럽게 재포장한다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [엔드포인트슬라이스 활성화하기](/docs/tasks/administer-cluster/enabling-endpointslices) * [애플리케이션을 서비스와 함께 연결하기](/ko/docs/concepts/services-networking/connect-applications-service/) 를 읽는다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/services-networking/ingress-controllers.md b/content/ko/docs/concepts/services-networking/ingress-controllers.md index 92a2387f5d..4600c32bcf 100644 --- a/content/ko/docs/concepts/services-networking/ingress-controllers.md +++ b/content/ko/docs/concepts/services-networking/ingress-controllers.md @@ -1,11 +1,11 @@ --- title: 인그레스 컨트롤러 reviewers: -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + 인그레스 리소스가 작동하려면, 클러스터는 실행 중인 인그레스 컨트롤러가 반드시 필요하다. @@ -15,9 +15,9 @@ kube-controller-manager 바이너리의 일부로 실행되는 컨트롤러의 프로젝트로써 쿠버네티스는 현재 [GCE](https://git.k8s.io/ingress-gce/README.md) 와 [nginx](https://git.k8s.io/ingress-nginx/README.md) 컨트롤러를 지원하고 유지한다. -{{% /capture %}} -{{% capture body %}} + + ## 추가 컨트롤러 @@ -52,11 +52,12 @@ kube-controller-manager 바이너리의 일부로 실행되는 컨트롤러의 인그레스 컨트롤러의 설명서를 검토하여 선택 시 주의 사항을 이해해야한다. {{< /note >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [인그레스](/ko/docs/concepts/services-networking/ingress/)에 대해 자세히 알아보기. * [NGINX 컨트롤러로 Minikube에서 Ingress를 설정하기](/docs/tasks/access-application-cluster/ingress-minikube). -{{% /capture %}} + diff --git a/content/ko/docs/concepts/services-networking/ingress.md b/content/ko/docs/concepts/services-networking/ingress.md index 28f964a4ca..968274bc1c 100644 --- a/content/ko/docs/concepts/services-networking/ingress.md +++ b/content/ko/docs/concepts/services-networking/ingress.md @@ -1,15 +1,15 @@ --- title: 인그레스 -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.1" state="beta" >}} {{< glossary_definition term_id="ingress" length="all" >}} -{{% /capture %}} -{{% capture body %}} + + ## 용어 @@ -541,10 +541,11 @@ Events: * [Service.Type=LoadBalancer](/ko/docs/concepts/services-networking/service/#loadbalancer) 사용. * [Service.Type=NodePort](/ko/docs/concepts/services-networking/service/#nodeport) 사용. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [인그레스] API](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#ingress-v1beta1-networking-k8s-io)에 대해 배우기 * [인그레스 컨트롤러](/ko/docs/concepts/services-networking/ingress-controllers/)에 대해 배우기 * [NGINX 컨트롤러로 Minikube에서 인그레스 구성하기](/docs/tasks/access-application-cluster/ingress-minikube) -{{% /capture %}} + diff --git a/content/ko/docs/concepts/services-networking/network-policies.md b/content/ko/docs/concepts/services-networking/network-policies.md index c3f4ee193b..55fde3a3be 100644 --- a/content/ko/docs/concepts/services-networking/network-policies.md +++ b/content/ko/docs/concepts/services-networking/network-policies.md @@ -1,19 +1,19 @@ --- title: 네트워크 정책 -content_template: templates/concept +content_type: concept weight: 50 --- {{< toc >}} -{{% capture overview %}} + 네트워크 정책은 {{< glossary_tooltip text="파드" term_id="pod">}} 그룹이 서로 간에 또는 다른 네트워크 엔드포인트와 통신할 수 있도록 허용하는 방법에 대한 명세이다. `NetworkPolicy` 리소스는 {{< glossary_tooltip text="레이블" term_id="label">}}을 사용해서 파드를 선택하고 선택한 파드에 허용되는 트래픽을 지정하는 규칙을 정의한다. -{{% /capture %}} -{{% capture body %}} + + ## 전제 조건 네트워크 정책은 [네트워크 플러그인](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/)으로 구현된다. 네트워크 정책을 사용하려면 NetworkPolicy를 지원하는 네트워킹 솔루션을 사용해야만 한다. 이를 구현하는 컨트롤러 없이 NetworkPolicy 리소스를 생성해도 아무런 효과가 없기 때문이다. @@ -211,12 +211,13 @@ SCTP 프로토콜 NetworkPolicy을 지원하는 {{< glossary_tooltip text="CNI" {{< /note >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - 자세한 설명과 추가 예시는 [네트워크 정책 선언](/docs/tasks/administer-cluster/declare-network-policy/)을 본다. - NetworkPolicy 리소스에서 사용되는 일반적인 시나리오는 [레시피](https://github.com/ahmetb/kubernetes-network-policy-recipes)를 본다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/services-networking/service-topology.md b/content/ko/docs/concepts/services-networking/service-topology.md index 16f140e583..da419f76e4 100644 --- a/content/ko/docs/concepts/services-networking/service-topology.md +++ b/content/ko/docs/concepts/services-networking/service-topology.md @@ -5,12 +5,12 @@ feature: description: > 클러스터 토폴로지를 기반으로 서비스 트래픽 라우팅. -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.17" state="alpha" >}} @@ -19,9 +19,9 @@ _서비스 토폴로지_ 를 활성화 하면 서비스는 클러스터의 노 클라이언트와 동일한 노드이거나 동일한 가용성 영역에 있는 엔드포인트로 우선적으로 라우팅되도록 지정할 수 있다. -{{% /capture %}} -{{% capture body %}} + + ## 소개 @@ -189,11 +189,12 @@ spec: ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [서비스 토폴로지 활성화하기](/docs/tasks/administer-cluster/enabling-service-topology)를 읽는다. * [서비스와 애플리케이션 연결하기](/ko/docs/concepts/services-networking/connect-applications-service/)를 읽는다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/services-networking/service.md b/content/ko/docs/concepts/services-networking/service.md index c81309709f..e1e0d28c7c 100644 --- a/content/ko/docs/concepts/services-networking/service.md +++ b/content/ko/docs/concepts/services-networking/service.md @@ -5,14 +5,14 @@ feature: description: > 쿠버네티스를 사용하면 익숙하지 않은 서비스 디스커버리 메커니즘을 사용하기 위해 애플리케이션을 수정할 필요가 없다. 쿠버네티스는 파드에게 고유한 IP 주소와 파드 집합에 대한 단일 DNS 명을 부여하고, 그것들 간에 로드-밸런스를 수행할 수 있다. -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + {{< glossary_definition term_id="service" length="short" >}} @@ -20,9 +20,9 @@ weight: 10 쿠버네티스는 파드에게 고유한 IP 주소와 파드 집합에 대한 단일 DNS 명을 부여하고, 그것들 간에 로드-밸런스를 수행할 수 있다. -{{% /capture %}} -{{% capture body %}} + + ## 동기 @@ -1226,12 +1226,13 @@ SCTP는 Windows 기반 노드를 지원하지 않는다. kube-proxy는 유저스페이스 모드에 있을 때 SCTP 연결 관리를 지원하지 않는다. {{< /warning >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [서비스와 애플리케이션 연결](/ko/docs/concepts/services-networking/connect-applications-service/) 알아보기 * [인그레스](/ko/docs/concepts/services-networking/ingress/)에 대해 알아보기 * [엔드포인트슬라이스](/ko/docs/concepts/services-networking/endpoint-slices/)에 대해 알아보기 -{{% /capture %}} + diff --git a/content/ko/docs/concepts/storage/dynamic-provisioning.md b/content/ko/docs/concepts/storage/dynamic-provisioning.md index 11564490ec..bf0b257dbf 100644 --- a/content/ko/docs/concepts/storage/dynamic-provisioning.md +++ b/content/ko/docs/concepts/storage/dynamic-provisioning.md @@ -1,10 +1,10 @@ --- title: 동적 볼륨 프로비저닝 -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + 동적 볼륨 프로비저닝을 통해 온-디맨드 방식으로 스토리지 볼륨을 생성할 수 있다. 동적 프로비저닝이 없으면 클러스터 관리자는 클라우드 또는 스토리지 @@ -14,10 +14,10 @@ weight: 40 스토리지를 사전 프로비저닝 할 필요가 없다. 대신 사용자가 스토리지를 요청하면 자동으로 프로비저닝 한다. -{{% /capture %}} -{{% capture body %}} + + ## 배경 @@ -128,4 +128,4 @@ spec: 프로비전 해야 한다. [볼륨 바인딩 모드](/docs/concepts/storage/storage-classes/#volume-binding-mode)를 설정해서 수행할 수 있다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/storage/persistent-volumes.md b/content/ko/docs/concepts/storage/persistent-volumes.md index b24c284ba8..397041842b 100644 --- a/content/ko/docs/concepts/storage/persistent-volumes.md +++ b/content/ko/docs/concepts/storage/persistent-volumes.md @@ -5,18 +5,18 @@ feature: description: > 로컬 스토리지, GCPAWS와 같은 퍼블릭 클라우드 공급자 또는 NFS, iSCSI, Gluster, Ceph, Cinder나 Flocker와 같은 네트워크 스토리지 시스템에서 원하는 스토리지 시스템을 자동으로 마운트한다. -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + 이 페이지는 쿠버네티스의 _퍼시스턴트 볼륨_ 의 현재 상태를 설명한다. [볼륨](/ko/docs/concepts/storage/volumes/)에 대해 익숙해지는 것을 추천한다. -{{% /capture %}} -{{% capture body %}} + + ## 소개 @@ -741,8 +741,9 @@ spec: 않거나(이 경우 사용자가 일치하는 PV를 생성해야 함), 클러스터에 스토리지 시스템이 없음을 나타낸다(이 경우 사용자는 PVC가 필요한 구성을 배포할 수 없음). -{{% /capture %}} - {{% capture whatsnext %}} + + ## {{% heading "whatsnext" %}} + * [퍼시스턴트볼륨 생성](/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolume)에 대해 자세히 알아보기 * [퍼시스턴트볼륨클레임 생성](/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolumeclaim)에 대해 자세히 알아보기 @@ -754,4 +755,4 @@ spec: * [PersistentVolumeSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumespec-v1-core) * [퍼시스턴트볼륨클레임](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaim-v1-core) * [PersistentVolumeClaimSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaimspec-v1-core) -{{% /capture %}} + diff --git a/content/ko/docs/concepts/storage/storage-classes.md b/content/ko/docs/concepts/storage/storage-classes.md index 0d7416e72a..e73d886ef7 100644 --- a/content/ko/docs/concepts/storage/storage-classes.md +++ b/content/ko/docs/concepts/storage/storage-classes.md @@ -1,18 +1,18 @@ --- title: 스토리지 클래스 -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + 이 문서는 쿠버네티스의 스토리지클래스의 개념을 설명한다. [볼륨](/ko/docs/concepts/storage/volumes/)과 [퍼시스턴트 볼륨](/ko/docs/concepts/storage/persistent-volumes)에 익숙해지는 것을 권장한다. -{{% /capture %}} -{{% capture body %}} + + ## 소개 @@ -816,4 +816,4 @@ volumeBindingMode: WaitForFirstConsumer 적절한 퍼시스턴트볼륨을 선택할 때 파드의 모든 스케줄링 제약 조건을 고려할 수 있다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/storage/volume-pvc-datasource.md b/content/ko/docs/concepts/storage/volume-pvc-datasource.md index b58b882d6d..8b8e1b484f 100644 --- a/content/ko/docs/concepts/storage/volume-pvc-datasource.md +++ b/content/ko/docs/concepts/storage/volume-pvc-datasource.md @@ -1,18 +1,18 @@ --- title: CSI 볼륨 복제하기 -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + 이 문서에서는 쿠버네티스의 기존 CSI 볼륨 복제의 개념을 설명한다. [볼륨] (/ko/docs/concepts/storage/volumes)을 숙지하는 것을 추천한다. -{{% /capture %}} -{{% capture body %}} + + ## 소개 @@ -66,4 +66,4 @@ spec: 새 PVC를 사용할 수 있게 되면, 복제된 PVC는 다른 PVC와 동일하게 소비된다. 또한, 이 시점에서 새롭게 생성된 PVC는 독립된 오브젝트이다. 원본 dataSource PVC와는 무관하게 독립적으로 소비하고, 복제하고, 스냅샷의 생성 또는 삭제를 할 수 있다. 이는 소스가 새롭게 생성된 복제본에 어떤 방식으로든 연결되어 있지 않으며, 새롭게 생성된 복제본에 영향 없이 수정하거나, 삭제할 수도 있는 것을 의미한다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/storage/volume-snapshot-classes.md b/content/ko/docs/concepts/storage/volume-snapshot-classes.md index f4d2991238..801ff624bb 100644 --- a/content/ko/docs/concepts/storage/volume-snapshot-classes.md +++ b/content/ko/docs/concepts/storage/volume-snapshot-classes.md @@ -1,19 +1,19 @@ --- title: 볼륨 스냅샷 클래스 -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + 이 문서는 쿠버네티스의 `VolumeSnapshotClass` 개요를 설명한다. [볼륨 스냅샷](/docs/concepts/storage/volume-snapshots/)과 [스토리지 클래스](/docs/concepts/storage/storage-classes)의 숙지를 추천한다. -{{% /capture %}} -{{% capture body %}} + + ## 소개 @@ -62,4 +62,4 @@ parameters: 설명하는 파라미터를 가지고 있다. `driver` 에 따라 다른 파라미터를 사용할 수 있다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/storage/volume-snapshots.md b/content/ko/docs/concepts/storage/volume-snapshots.md index 60ad22c7cc..d2d85909e1 100644 --- a/content/ko/docs/concepts/storage/volume-snapshots.md +++ b/content/ko/docs/concepts/storage/volume-snapshots.md @@ -1,18 +1,18 @@ --- title: 볼륨 스냅샷 -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.17" state="beta" >}} 쿠버네티스에서 스토리지 시스템 볼륨 스냅샷은 _VolumeSnapshot_ 을 나타낸다. 이 문서는 이미 쿠버네티스 [퍼시스턴트 볼륨](/docs/concepts/storage/persistent-volumes/)에 대해 잘 알고 있다고 가정한다. -{{% /capture %}} -{{% capture body %}} + + ## 소개 @@ -148,4 +148,4 @@ spec: 보다 자세한 사항은 [볼륨 스냅샷 및 스냅샷에서 볼륨 복원](/docs/concepts/storage/persistent-volumes/#volume-snapshot-and-restore-volume-from-snapshot-support)에서 확인할 수 있다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/storage/volumes.md b/content/ko/docs/concepts/storage/volumes.md index eb63215bb5..a5a3e8aa23 100644 --- a/content/ko/docs/concepts/storage/volumes.md +++ b/content/ko/docs/concepts/storage/volumes.md @@ -1,10 +1,10 @@ --- title: 볼륨 -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + 컨테이너 내의 디스크에 있는 파일은 임시적이며, 컨테이너에서 실행될 때 애플리케이션에 적지 않은 몇 가지 문제가 발생한다. 첫째, 컨테이너가 충돌되면, @@ -15,10 +15,10 @@ kubelet은 컨테이너를 재시작시키지만, 컨테이너는 깨끗한 상 [파드](/ko/docs/concepts/workloads/pods/pod/)에 대해 익숙해지는 것을 추천한다. -{{% /capture %}} -{{% capture body %}} + + ## 배경 @@ -1470,6 +1470,7 @@ sudo systemctl restart docker -{{% capture whatsnext %}} +## {{% heading "whatsnext" %}} + * [퍼시스턴트 볼륨과 함께 워드프레스와 MySQL 배포하기](/ko/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/)의 예시를 따른다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/workloads/controllers/cron-jobs.md b/content/ko/docs/concepts/workloads/controllers/cron-jobs.md index b06881c53f..54d15ba050 100644 --- a/content/ko/docs/concepts/workloads/controllers/cron-jobs.md +++ b/content/ko/docs/concepts/workloads/controllers/cron-jobs.md @@ -1,10 +1,10 @@ --- title: 크론잡 -content_template: templates/concept +content_type: concept weight: 80 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.8" state="beta" >}} @@ -28,8 +28,8 @@ kube-controller-manager 컨테이너에 설정된 시간대는 크론잡 컨트 63자라는 제약 조건이 있기 때문이다. -{{% /capture %}} -{{% capture body %}} + + ## 크론잡 @@ -77,12 +77,13 @@ Cannot determine if job needs to be started. Too many missed start time (> 100). 크론 잡은 오직 그 일정에 맞는 잡 생성에 책임이 있고, 잡은 그 잡이 대표하는 파드 관리에 책임이 있다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [크론 표현 포맷](https://pkg.go.dev/github.com/robfig/cron?tab=doc#hdr-CRON_Expression_Format)은 크론잡 `schedule` 필드의 포맷을 문서화 한다. 크론 잡 생성과 작업에 대한 지침과 크론잡 매니페스트의 예는 [크론 잡으로 자동화된 작업 실행하기](/docs/tasks/job/automated-tasks-with-cron-jobs/)를 참조한다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/workloads/controllers/daemonset.md b/content/ko/docs/concepts/workloads/controllers/daemonset.md index 91fbeb8cf8..83f3c428a3 100644 --- a/content/ko/docs/concepts/workloads/controllers/daemonset.md +++ b/content/ko/docs/concepts/workloads/controllers/daemonset.md @@ -1,10 +1,10 @@ --- title: 데몬셋 -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + _데몬셋_ 은 모든(또는 일부) 노드가 파드의 사본을 실행하도록 한다. 노드가 클러스터에 추가되면 파드도 추가된다. 노드가 클러스터에서 제거되면 해당 파드는 가비지(garbage)로 @@ -20,10 +20,10 @@ _데몬셋_ 은 모든(또는 일부) 노드가 파드의 사본을 실행하도 더 복잡한 구성에서는 단일 유형의 데몬에 여러 데몬셋을 사용할 수 있지만, 각기 다른 하드웨어 유형에 따라 서로 다른 플래그, 메모리, CPU 요구가 달라진다. -{{% /capture %}} -{{% capture body %}} + + ## 데몬셋 사양 작성 @@ -118,7 +118,7 @@ kubectl apply -f https://k8s.io/examples/controllers/daemonset.yaml `NodeAffinity` 용어를 추가해서 데몬셋 컨트롤러 대신 기본 스케줄러를 사용해서 데몬셋을 스케줄할 수 있다. 이후에 기본 스케줄러를 사용해서 대상 호스트에 파드를 바인딩 한다. 만약 데몬셋 파드에 -이미 노드 선호도가 존재한다면 교체한다. 데몬셋 컨트롤러는 +이미 노드 선호도가 존재한다면 교체한다(대상 호스트를 선택하기 전에 원래 노드의 어피니티가 고려된다). 데몬셋 컨트롤러는 데몬셋 파드를 만들거나 수정할 때만 이런 작업을 수행하며, 데몬셋의 `spec.template` 은 변경되지 않는다. @@ -226,4 +226,4 @@ Kubelet이 감시하는 특정 디렉토리에 파일을 작성하는 파드를 디플로이먼트를 사용한다. 파드 사본이 항상 모든 호스트 또는 특정 호스트에서 실행되는 것이 중요하고, 다른 파드의 실행 이전에 필요한 경우에는 데몬셋을 사용한다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/workloads/controllers/deployment.md b/content/ko/docs/concepts/workloads/controllers/deployment.md index ab730109d5..96f41dc186 100644 --- a/content/ko/docs/concepts/workloads/controllers/deployment.md +++ b/content/ko/docs/concepts/workloads/controllers/deployment.md @@ -5,11 +5,11 @@ feature: description: > 쿠버네티스는 애플리케이션 또는 애플리케이션의 설정 변경시 점진적으로 롤아웃하는 동시에 애플리케이션을 모니터링해서 모든 인스턴스가 동시에 종료되지 않도록 보장한다. 만약 어떤 문제가 발생하면 쿠버네티스는 변경 사항을 롤백한다. 성장하는 디플로이먼트 솔루션 생태계를 이용한다. -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + _디플로이먼트_ 는 [파드](/ko/docs/concepts/workloads/pods/pod/)와 [레플리카셋](/ko/docs/concepts/workloads/controllers/replicaset/)에 대한 선언적 업데이트를 제공한다. @@ -20,10 +20,10 @@ _디플로이먼트_ 는 [파드](/ko/docs/concepts/workloads/pods/pod/)와 디플로이먼트가 소유하는 레플리카셋은 관리하지 말아야 한다. 사용자의 유스케이스가 다음에 포함되지 않는 경우 쿠버네티스 리포지터리에 이슈를 올릴 수 있다. {{< /note >}} -{{% /capture %}} -{{% capture body %}} + + ## 유스케이스 @@ -1165,4 +1165,4 @@ API 버전 `apps/v1` 에서는 `.spec.selector` 와 `.metadata.labels` 이 설 일시 중지된 디플로이먼트는 PodTemplateSpec에 대한 변경 사항이 일시중지 된 경우 새 롤아웃을 트리거 하지 않는다. 디플로이먼트는 생성시 기본적으로 일시 중지되지 않는다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/workloads/controllers/garbage-collection.md b/content/ko/docs/concepts/workloads/controllers/garbage-collection.md index 9ccc803dce..f819614a6c 100644 --- a/content/ko/docs/concepts/workloads/controllers/garbage-collection.md +++ b/content/ko/docs/concepts/workloads/controllers/garbage-collection.md @@ -1,18 +1,18 @@ --- title: 가비지(Garbage) 수집 -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + 쿠버네티스의 가비지 수집기는 한때 소유자가 있었지만, 더 이상 소유자가 없는 오브젝트들을 삭제하는 역할을 한다. -{{% /capture %}} -{{% capture body %}} + + ## 소유자(owner)와 종속(dependent) @@ -168,15 +168,16 @@ kubectl delete replicaset my-repset --cascade=false [#26120](https://github.com/kubernetes/kubernetes/issues/26120)을 추적한다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [디자인 문서 1](https://git.k8s.io/community/contributors/design-proposals/api-machinery/garbage-collection.md) [디자인 문서 2](https://git.k8s.io/community/contributors/design-proposals/api-machinery/synchronous-garbage-collection.md) -{{% /capture %}} + diff --git a/content/ko/docs/concepts/workloads/controllers/jobs-run-to-completion.md b/content/ko/docs/concepts/workloads/controllers/jobs-run-to-completion.md index 5aba53574b..4d6e93ded5 100644 --- a/content/ko/docs/concepts/workloads/controllers/jobs-run-to-completion.md +++ b/content/ko/docs/concepts/workloads/controllers/jobs-run-to-completion.md @@ -1,6 +1,6 @@ --- title: 잡 - 실행부터 완료까지 -content_template: templates/concept +content_type: concept feature: title: 배치 실행 description: > @@ -8,7 +8,7 @@ feature: weight: 70 --- -{{% capture overview %}} + 잡에서 하나 이상의 파드를 생성하고 지정된 수의 파드가 성공적으로 종료되도록 한다. 파드가 성공적으로 완료되면, 성공적으로 완료된 잡을 추적한다. 지정된 수의 @@ -21,10 +21,10 @@ weight: 70 잡을 사용하면 여러 파드를 병렬로 실행할 수도 있다. -{{% /capture %}} -{{% capture body %}} + + ## 예시 잡 실행하기 @@ -469,10 +469,10 @@ spec: 스파크 드라이버를 실행한 다음, 정리한다. 이 접근 방식의 장점은 전체 프로세스가 잡 오브젝트의 완료를 보장하면서도, -파드 생성과 작업 할당 방법을 완전히 제어할 수 있다는 점이다. +파드 생성과 작업 할당 방법을 완전히 제어하고 유지한다는 것이다. ## 크론 잡 {#cron-jobs} [`크론잡`](/ko/docs/concepts/workloads/controllers/cron-jobs/)을 사용해서 Unix 도구인 `cron`과 유사하게 지정된 시간/일자에 실행되는 잡을 생성할 수 있다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/workloads/controllers/replicaset.md b/content/ko/docs/concepts/workloads/controllers/replicaset.md index 80bcf6052b..e99bb4f7c5 100644 --- a/content/ko/docs/concepts/workloads/controllers/replicaset.md +++ b/content/ko/docs/concepts/workloads/controllers/replicaset.md @@ -1,18 +1,18 @@ --- title: 레플리카셋 -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + 레플리카셋의 목적은 레플리카 파드 집합의 실행을 항상 안정적으로 유지하는 것이다. 이처럼 레플리카셋은 보통 명시된 동일 파드 개수에 대한 가용성을 보증하는데 사용한다. -{{% /capture %}} -{{% capture body %}} + + ## 레플리카셋의 작동 방식 @@ -362,4 +362,4 @@ kubectl autoscale rs frontend --max=10 --min=3 --cpu-percent=50 설명된 설정-기반의 셀렉터의 요건을 지원하지 않는다는 점을 제외하면 유사하다. 따라서 레플리카셋이 레플리케이션 컨트롤러보다 선호된다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/workloads/controllers/replicationcontroller.md b/content/ko/docs/concepts/workloads/controllers/replicationcontroller.md index 4551dc99b4..16146a45b6 100644 --- a/content/ko/docs/concepts/workloads/controllers/replicationcontroller.md +++ b/content/ko/docs/concepts/workloads/controllers/replicationcontroller.md @@ -6,11 +6,11 @@ feature: description: > 오류가 발생한 컨테이너를 재시작하고, 노드가 죽었을 때 컨테이너를 교체하기 위해 다시 스케줄하고, 사용자 정의 상태 체크에 응답하지 않는 컨테이너를 제거하며, 서비스를 제공할 준비가 될 때까지 클라이언트에 해당 컨테이너를 알리지 않는다. -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + {{< note >}} [`ReplicaSet`](/ko/docs/concepts/workloads/controllers/replicaset/) 을 구성하는 [`Deployment`](/ko/docs/concepts/workloads/controllers/deployment/) 가 현재 권장되는 레플리케이션 설정 방법이다. @@ -20,10 +20,10 @@ _레플리케이션 컨트롤러_ 는 언제든지 지정된 수의 파드 레 실행 중임을 보장한다. 다시 말하면, 레플리케이션 컨트롤러는 파드 또는 동일 종류의 파드의 셋이 항상 기동되고 사용 가능한지 확인한다. -{{% /capture %}} -{{% capture body %}} + + ## 레플리케이션 컨트롤러의 동작방식 @@ -282,4 +282,4 @@ API 오브젝트에 대한 더 자세한 것은 [스테이트리스 애플리케이션 레플리케이션 컨트롤러 실행하기](/docs/tutorials/stateless-application/run-stateless-ap-replication-controller/) 를 참조하라. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/workloads/controllers/statefulset.md b/content/ko/docs/concepts/workloads/controllers/statefulset.md index e83c02b50d..1779ea4f92 100644 --- a/content/ko/docs/concepts/workloads/controllers/statefulset.md +++ b/content/ko/docs/concepts/workloads/controllers/statefulset.md @@ -1,17 +1,17 @@ --- title: 스테이트풀셋 -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + 스테이트풀셋은 애플리케이션의 스테이트풀을 관리하는데 사용하는 워크로드 API 오브젝트이다. {{< glossary_definition term_id="statefulset" length="all" >}} -{{% /capture %}} -{{% capture body %}} + + ## 스테이트풀셋 사용 @@ -262,12 +262,13 @@ web-0이 실패할 경우 web-1은 web-0이 Running 및 Ready 상태가 실행하려고 시도한 모든 파드를 삭제해야 한다. 그러면 스테이트풀셋은 되돌린 템플릿을 사용해서 파드를 다시 생성하기 시작 한다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [스테이트풀 애플리케이션의 배포](/ko/docs/tutorials/stateful-application/basic-stateful-set/)의 예시를 따른다. * [카산드라와 스테이트풀셋 배포](/ko/docs/tutorials/stateful-application/cassandra/)의 예시를 따른다. * [레플리케이티드(replicated) 스테이트풀 애플리케이션 실행하기](/docs/tasks/run-application/run-replicated-stateful-application/)의 예시를 따른다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/workloads/controllers/ttlafterfinished.md b/content/ko/docs/concepts/workloads/controllers/ttlafterfinished.md index aefccc9243..c095dd31c5 100644 --- a/content/ko/docs/concepts/workloads/controllers/ttlafterfinished.md +++ b/content/ko/docs/concepts/workloads/controllers/ttlafterfinished.md @@ -1,10 +1,10 @@ --- title: 완료된 리소스를 위한 TTL 컨트롤러 -content_template: templates/concept +content_type: concept weight: 65 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.12" state="alpha" >}} @@ -18,12 +18,12 @@ TTL 컨트롤러는 실행이 완료된 리소스 오브젝트의 수명을 [기능 게이트](/docs/reference/command-line-tools-reference/feature-gates/) 로 `TTLAfterFinished` 를 활성화 할 수 있다. -{{% /capture %}} -{{% capture body %}} + + ## TTL 컨트롤러 @@ -75,12 +75,13 @@ TTL 컨트롤러는 쿠버네티스 리소스에 에서 NTP를 실행해야 한다. 시계가 항상 정확한 것은 아니지만, 그 차이는 아주 작아야 한다. 0이 아닌 TTL을 설정할때는 이 위험에 대해 유의해야 한다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [자동으로 잡 정리](/ko/docs/concepts/workloads/controllers/jobs-run-to-completion/#완료된-잡을-자동으로-정리) [디자인 문서](https://github.com/kubernetes/enhancements/blob/master/keps/sig-apps/0026-ttl-after-finish.md) -{{% /capture %}} + diff --git a/content/ko/docs/concepts/workloads/pods/disruptions.md b/content/ko/docs/concepts/workloads/pods/disruptions.md index d2c91e3ecb..bd2f2023af 100644 --- a/content/ko/docs/concepts/workloads/pods/disruptions.md +++ b/content/ko/docs/concepts/workloads/pods/disruptions.md @@ -1,10 +1,10 @@ --- title: 중단(disruption) -content_template: templates/concept +content_type: concept weight: 60 --- -{{% capture overview %}} + 이 가이드는 고가용성 애플리케이션을 구성하려는 소유자와 파드에서 발생하는 장애 유형을 이해하기 원하는 애플리케이션 소유자를 위한 것이다. @@ -12,10 +12,10 @@ weight: 60 또한 클러스터의 업그레이드와 오토스케일링과 같은 클러스터의 자동화 작업을 하려는 관리자를 위한 것이다. -{{% /capture %}} -{{% capture body %}} + + ## 자발적 중단과 비자발적 중단 @@ -242,13 +242,14 @@ Pod Disruption Budgets를 사용할 필요가 없다. 자발적 중단를 허용하는 작업의 대부분은 오토스케일링과 비자발적 중단를 지원하는 작업과 겹친다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [Pod Disruption Budget 설정하기](/docs/tasks/run-application/configure-pdb/)의 단계를 따라서 애플리케이션을 보호한다. * [노드 비우기](/docs/tasks/administer-cluster/safely-drain-node/)에 대해 자세히 알아보기 -{{% /capture %}} + diff --git a/content/ko/docs/concepts/workloads/pods/ephemeral-containers.md b/content/ko/docs/concepts/workloads/pods/ephemeral-containers.md index dd061e5130..721405614c 100644 --- a/content/ko/docs/concepts/workloads/pods/ephemeral-containers.md +++ b/content/ko/docs/concepts/workloads/pods/ephemeral-containers.md @@ -1,10 +1,10 @@ --- title: 임시(Ephemeral) 컨테이너 -content_template: templates/concept +content_type: concept weight: 80 --- -{{% capture overview %}} + {{< feature-state state="alpha" for_k8s_version="v1.16" >}} @@ -19,9 +19,9 @@ weight: 80 이 알파 기능은 향후 크게 변경되거나, 완전히 제거될 수 있다. {{< /warning >}} -{{% /capture %}} -{{% capture body %}} + + ## 임시 컨테이너 이해하기 @@ -188,4 +188,4 @@ Ephemeral Containers: kubectl attach -it example-pod -c debugger ``` -{{% /capture %}} + diff --git a/content/ko/docs/concepts/workloads/pods/init-containers.md b/content/ko/docs/concepts/workloads/pods/init-containers.md index c45d3cc838..0e1f614de3 100644 --- a/content/ko/docs/concepts/workloads/pods/init-containers.md +++ b/content/ko/docs/concepts/workloads/pods/init-containers.md @@ -1,19 +1,19 @@ --- title: 초기화 컨테이너 -content_template: templates/concept +content_type: concept weight: 40 --- -{{% capture overview %}} + 이 페이지는 초기화 컨테이너에 대한 개요를 제공한다. 초기화 컨테이너는 {{< glossary_tooltip text="파드" term_id="pod" >}}의 앱 컨테이너들이 실행되기 전에 실행되는 특수한 컨테이너이며, 앱 이미지에는 없는 유틸리티 또는 설정 스크립트 등을 포함할 수 있다. 초기화 컨테이너는 `containers` 배열(앱 컨테이너를 기술하는)과 나란히 파드 스펙에 명시할 수 있다. -{{% /capture %}} -{{% capture body %}} + + ## 초기화 컨테이너 이해하기 @@ -314,12 +314,13 @@ myapp-pod 1/1 Running 0 9m 동안 종료되었다. 그리고 초기화 컨테이너의 완료 기록이 가비지 수집 때문에 유실되었다. -{{% /capture %}} -{{% capture whatsnext %}} -* [초기화 컨테이너를 가진 파드 생성하기](/docs/tasks/configure-pod-container/configure-pod-initialization/#creating-a-pod-that-has-an-init-container) +## {{% heading "whatsnext" %}} + + +* [초기화 컨테이너를 가진 파드 생성하기](/docs/tasks/configure-pod-container/configure-pod-initialization/#create-a-pod-that-has-an-init-container) * [초기화 컨테이너 디버깅](/docs/tasks/debug-application-cluster/debug-init-containers/) 알아보기 -{{% /capture %}} + diff --git a/content/ko/docs/concepts/workloads/pods/pod-lifecycle.md b/content/ko/docs/concepts/workloads/pods/pod-lifecycle.md index e29e358d97..e8e384a4ab 100644 --- a/content/ko/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/ko/docs/concepts/workloads/pods/pod-lifecycle.md @@ -1,20 +1,20 @@ --- title: 파드 라이프사이클 -content_template: templates/concept +content_type: concept weight: 30 --- -{{% capture overview %}} + {{< comment >}}Updated: 4/14/2015{{< /comment >}} {{< comment >}}Edited and moved to Concepts section: 2/2/17{{< /comment >}} 이 페이지는 파드의 라이프사이클을 설명한다. -{{% /capture %}} -{{% capture body %}} + + ## 파드의 단계(phase) @@ -388,10 +388,11 @@ spec: * 노드 컨트롤러가 파드의 `phase`를 Failed로 설정한다. * 만약 컨트롤러로 실행되었다면, 파드는 어딘가에서 재생성된다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Hands-on 경험하기 [컨테이너 라이프사이클 이벤트에 핸들러 부착하기](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). @@ -401,6 +402,6 @@ spec: * [컨테이너 라이프사이클 후크(hook)](/ko/docs/concepts/containers/container-lifecycle-hooks/)에 대해 더 배우기. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/workloads/pods/pod-overview.md b/content/ko/docs/concepts/workloads/pods/pod-overview.md index e1239a817e..5b2af22d73 100644 --- a/content/ko/docs/concepts/workloads/pods/pod-overview.md +++ b/content/ko/docs/concepts/workloads/pods/pod-overview.md @@ -1,18 +1,18 @@ --- title: 파드(Pod) 개요 -content_template: templates/concept +content_type: concept weight: 10 card: name: concepts weight: 60 --- -{{% capture overview %}} + 이 페이지는 쿠버네티스 객체 모델 중 가장 작은 배포 가능한 객체인 `파드` 에 대한 개요를 제공한다. -{{% /capture %}} -{{% capture body %}} + + ## 파드에 대해 이해하기 *파드* 는 쿠버네티스 애플리케이션의 기본 실행 단위이다. 쿠버네티스 객체 모델 중 만들고 배포할 수 있는 가장 작고 간단한 단위이다. 파드는 {{< glossary_tooltip term_id="cluster" text="클러스터" >}} 에서의 Running 프로세스를 나타낸다. @@ -104,12 +104,13 @@ metadata: 노드에서 "kubelet"이 파드 템플릿과 업데이트에 관련된 세부 정보를 직접 관찰하거나 관리하지 않으며, 이러한 세부 정보는 추상화되지 않는다. 이러한 추상화와 분리는 시스템 시맨틱을 단순화하며, 기존 코드를 변경하지 않고 클러스터의 동작을 확장할 수 있도록 한다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [파드](/ko/docs/concepts/workloads/pods/pod/)에 대해 더 배워보자. * [분산 시스템 툴킷: 복합 컨테이너의 패턴](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns)은 둘 이상의 컨테이너가 있는 파드의 공통 레이아웃에 대해 설명한다. * 파드의 동작에 대해 더 알아보자. * [파드 종료](/ko/docs/concepts/workloads/pods/pod/#파드의-종료) * [파드 라이프사이클](/ko/docs/concepts/workloads/pods/pod-lifecycle/) -{{% /capture %}} + diff --git a/content/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints.md b/content/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints.md index 040a228d70..d7cc7d545b 100644 --- a/content/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints.md +++ b/content/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints.md @@ -1,18 +1,18 @@ --- title: 파드 토폴로지 분배 제약 조건 -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.18" state="beta" >}} 사용자는 _토폴로지 분배 제약 조건_ 을 사용해서 지역, 영역, 노드 그리고 기타 사용자-정의 토폴로지 도메인과 같이 장애-도메인으로 설정된 클러스터에 걸쳐 파드가 분산되는 방식을 제어할 수 있다. 이를 통해 고가용성뿐만 아니라, 효율적인 리소스 활용의 목적을 이루는 데 도움이 된다. -{{% /capture %}} -{{% capture body %}} + + ## 필수 구성 요소 @@ -245,4 +245,4 @@ profiles: - 디플로이먼트를 스케일링 다운하면 그 결과로 파드의 분포가 불균형이 될 수 있다. - 파드와 일치하는 테인트(taint)가 된 노드가 존중된다. [이슈 80921](https://github.com/kubernetes/kubernetes/issues/80921)을 본다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/workloads/pods/pod.md b/content/ko/docs/concepts/workloads/pods/pod.md index b4c7e63fbe..9f7d06d091 100644 --- a/content/ko/docs/concepts/workloads/pods/pod.md +++ b/content/ko/docs/concepts/workloads/pods/pod.md @@ -1,15 +1,15 @@ --- title: 파드 -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + _파드_ 는 쿠버네티스에서 생성되고 관리될 수 있는 배포 가능한 최소 컴퓨팅 단위이다. -{{% /capture %}} -{{% capture body %}} + + ## 파드는 무엇인가? _파드_ 는 (고래 떼(pod of whales)나 콩꼬투리(pea pod)와 마찬가지로) 하나 이상의(도커 컨테이너 같은) 컨테이너 그룹이다. @@ -203,4 +203,4 @@ spec.containers[0].securityContext.privileged: forbidden '<*>(0xc20b222db0)true' 파드 오브젝트에 대한 매니페스트를 생성할때는 지정된 이름이 유효한 [DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름들)인지 확인해야 한다. -{{% /capture %}} + diff --git a/content/ko/docs/concepts/workloads/pods/podpreset.md b/content/ko/docs/concepts/workloads/pods/podpreset.md index 97ed144f68..4b37e0c232 100644 --- a/content/ko/docs/concepts/workloads/pods/podpreset.md +++ b/content/ko/docs/concepts/workloads/pods/podpreset.md @@ -1,19 +1,19 @@ --- title: 파드 프리셋 -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.6" state="alpha" >}} 이 페이지는 파드프리셋(PodPreset)에 대한 개요를 제공한다. 파드프리셋은 파드 생성 시간에 파드에 특정 정보를 주입하기 위한 오브젝트이다. 해당 정보에는 시크릿, 볼륨, 볼륨 마운트, 환경 변수가 포함될 수 있다. -{{% /capture %}} -{{% capture body %}} + + ## 파드 프리셋 이해하기 파드프리셋은 파드 생성 시간에 파드에 추가적인 런타임 요구사항을 @@ -79,12 +79,13 @@ weight: 50 있을 것이다. 이 경우에는, 다음과 같은 양식으로 어노테이션을 파드 스펙에 추가한다. `podpreset.admission.kubernetes.io/exclude: "true"`. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + [파드프리셋을 사용하여 파드에 데이터 주입하기](/docs/tasks/inject-data-application/podpreset/)를 본다. 배경에 대한 자세한 정보를 위해서는, [파드프리셋을 위한 디자인 제안](https://git.k8s.io/community/contributors/design-proposals/service-catalog/pod-preset.md)을 본다. -{{% /capture %}} + diff --git a/content/ko/docs/contribute/_index.md b/content/ko/docs/contribute/_index.md index f059747769..9dd0b23cd3 100644 --- a/content/ko/docs/contribute/_index.md +++ b/content/ko/docs/contribute/_index.md @@ -1,5 +1,5 @@ --- -content_template: templates/concept +content_type: concept title: 쿠버네티스 문서에 기여하기 linktitle: 기여 main_menu: true @@ -10,7 +10,7 @@ card: title: 기여 시작하기 --- -{{% capture overview %}} + 이 웹사이트는 [쿠버네티스 SIG Docs](/docs/contribute/#get-involved-with-sig-docs)에 의해서 관리됩니다. @@ -23,9 +23,9 @@ card: 쿠버네티스 문서는 새롭고 경험이 풍부한 모든 기여자의 개선을 환영합니다! -{{% /capture %}} -{{% capture body %}} + + ## 시작하기 @@ -75,4 +75,4 @@ SIG Docs는 여러가지 방법으로 의견을 나누고 있습니다. - [기여자 치트시트](https://github.com/kubernetes/community/tree/master/contributors/guide/contributor-cheatsheet)를 읽고 쿠버네티스 기능 개발에 참여합니다. - [블로그 게시물 또는 사례 연구](/docs/contribute/new-content/blogs-case-studies/)를 제출합니다. -{{% /capture %}} + diff --git a/content/ko/docs/contribute/advanced.md b/content/ko/docs/contribute/advanced.md index de723b3353..3f30f6eff9 100644 --- a/content/ko/docs/contribute/advanced.md +++ b/content/ko/docs/contribute/advanced.md @@ -1,11 +1,11 @@ --- title: 고급 기여 slug: advanced -content_template: templates/concept +content_type: concept weight: 98 --- -{{% capture overview %}} + 이 페이지에서는 당신이 [새로운 콘텐츠에 기여](/ko/docs/contribute/new-content/overview)하고 @@ -13,9 +13,9 @@ weight: 98 이해한다고 가정한다. 또한 기여하기 위한 더 많은 방법에 대해 배울 준비가 되었다고 가정한다. 이러한 작업 중 일부에는 Git 커맨드 라인 클라이언트와 다른 도구를 사용해야 한다. -{{% /capture %}} -{{% capture body %}} + + ## 일주일 동안 PR 랭글러(Wrangler) 되기 @@ -245,4 +245,4 @@ SIG Docs [승인자](/ko/docs/contribute/participating/#승인자)는 SIG Docs 비디오가 자동으로 유튜브에 업로드된다. -{{% /capture %}} + diff --git a/content/ko/docs/contribute/generate-ref-docs/_index.md b/content/ko/docs/contribute/generate-ref-docs/_index.md new file mode 100644 index 0000000000..756c509206 --- /dev/null +++ b/content/ko/docs/contribute/generate-ref-docs/_index.md @@ -0,0 +1,11 @@ +--- +title: 참조 문서 개요 +main_menu: true +weight: 80 +--- + +이 섹션은 쿠버네티스 참조 가이드를 생성하는 방법에 대해 설명한다. + +참조 문서화 시스템을 빌드하려면, 다음의 가이드를 참고한다. + +* [참조 문서 생성에 대한 퀵스타트 가이드](/docs/contribute/generate-ref-docs/quickstart/) \ No newline at end of file diff --git a/content/ko/docs/contribute/localization_ko.md b/content/ko/docs/contribute/localization_ko.md index 9b56a46dfa..72d6c7f3d7 100644 --- a/content/ko/docs/contribute/localization_ko.md +++ b/content/ko/docs/contribute/localization_ko.md @@ -1,16 +1,54 @@ --- title: 쿠버네티스 문서 한글화 가이드 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + 쿠버네티스 문서 한글화를 위한 가이드 -{{% /capture %}} -{{% capture body %}} + + + +## 팀 마일스톤 관리 + +쿠버네티스 문서 한글화팀은 커뮤니티의 +[현지화 가이드](/docs/contribute/localization/#branching-strategy)에 따라 한글화를 +위한 팀 마일스톤과 개발 브랜치를 관리한다. 본 섹션은 한글화팀의 팀 마일스톤 관리에 특화된 +내용을 다룬다. + +한글화팀은 `master` 브랜치에서 분기한 개발 브랜치를 사용한다. 개발 브랜치 이름은 다음과 같은 +구조를 갖는다. + +`dev-<소스 버전>-ko.<팀 마일스톤>` + +개발 브랜치는 약 2주에서 3주 사이의 팀 마일스톤 기간 동안 공동의 작업을 위해 사용되며, 팀 +마일스톤이 종료될 때 원 브랜치로 병합(merge)된다. + +업스트림(upstream)의 릴리스 주기(약 3개월)에 따라 다음 버전으로 마일스톤을 변경하는 시점에는 +일시적으로 `release-<소스 버전>` 브랜치를 원 브랜치로 사용하는 개발 브랜치를 추가로 운영한다. + +[한글화팀의 정기 화상 회의 일정](https://github.com/kubernetes/community/tree/master/sig-docs#meetings)과 +팀 마일스톤 주기는 대체로 일치하며, 정기 회의를 통해 팀 마일스톤마다 PR 랭글러(wrangler)를 +지정한다. + +한글화팀의 PR 랭글러가 갖는 의무는 업스트림의 +[PR 랭글러](/ko/docs/contribute/advanced/#일주일-동안-pr-랭글러-wrangler-되기)가 갖는 +의무와 유사하다. 단, 업스트림의 PR 랭글러와는 달리 승인자가 아니어도 팀 마일스톤의 PR 랭글러가 +될 수 있다. 그래서, 보다 상위 권한이 필요한 업무가 발생한 경우, PR 랭글러는 해당 권한을 가진 +한글화팀 멤버에게 처리를 요청한다. + +업스트림의 [PR 랭글러에게 유용한 GitHub 쿼리](/ko/docs/contribute/advanced/#랭글러에게-유용한-github-쿼리)를 +기반으로 작성한, 한글화팀의 PR 랭글러에게 유용한 쿼리를 아래에 나열한다. + +- [CLA 서명 없음, 병합할 수 없음](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+label%3A%22cncf-cla%3A+no%22+-label%3Ado-not-merge+label%3Alanguage%2Fko) +- [LGTM 필요](https://github.com/kubernetes/website/pulls?utf8=%E2%9C%93&q=is%3Aopen+is%3Apr+-label%3Ado-not-merge+label%3Alanguage%2Fko+-label%3Algtm+) +- [LGTM 보유, 문서 승인 필요](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+-label%3Ado-not-merge+label%3Alanguage%2Fko+label%3Algtm) +- [퀵윈(Quick Wins)](https://github.com/kubernetes/website/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+base%3Amaster+-label%3A%22do-not-merge%2Fwork-in-progress%22+-label%3A%22do-not-merge%2Fhold%22+label%3A%22cncf-cla%3A+yes%22+label%3A%22size%2FXS%22+label%3A%22language%2Fko%22+) + +팀 마일스톤 일정과 PR 랭글러는 커뮤니티 슬랙 내 [#kubernetes-docs-ko 채널](https://kubernetes.slack.com/archives/CA1MMR86S)에 공지된다. ## 문체 가이드 @@ -67,7 +105,7 @@ content_template: templates/concept + + + title: 쿠버네티스 컴포넌트 -content_template: templates/concept +content_type: concept weight: 10 ``` @@ -87,27 +125,50 @@ weight: 10 기존에 번역된 문서를 참고한다. {{% note %}} -한영 병기는 페이지 내에서 해당 용어가 처음 사용되는 경우에만 적용하고 이후 부터는 한글만 표기한다. +한영 병기는 페이지 내에서 해당 용어가 처음 사용되는 경우에만 1회 적용하고 이후에는 한글만 표기한다. +한영 병기의 대상에는 제목도 포함되므로 제목에 한영 병기가 제공된 경우, 문서 내부에는 +한글만 표기한다. {{% /note %}} -### API 오브젝트 용어 한글화 관련 방침 +### API 오브젝트 용어 한글화 방침 -API 오브젝트는 외래어 표기법에 따라 한글 표기한다. - -쿠버네티스 API 오브젝트는 원 단어를 +API 오브젝트 중 `kubectl api-resources` 결과의 `kind`에 해당하는 오브젝트는 [국립국어원 외래어 표기법](http://kornorms.korean.go.kr/regltn/regltnView.do?regltn_code=0003#a)에 -따라 한글화 한다. 예를 들면 다음과 같다. +따라 한글 표기한다. 예를 들면 다음과 같다. -원 단어 | 외래어 표기 ---- | --- -Deployment | 디플로이먼트 -Pod | 파드 -Service | 서비스 +API 오브젝트(kind) | 한글화(외래어 표기) +--- | --- +ClusterRoleBinding | 클러스터롤바인딩 +ConfigMap | 컨피그맵 +Deployment | 디플로이먼트 +Pod | 파드 +PersistentVolumeClaim | 퍼시스턴트볼륨클레임 +Service | 서비스 +... | ... + +그 이외의 API 오브젝트는, [한글화 용어집](#한글화-용어집)에 등록된 용어인 경우를 제외하고, +모두 원문 그대로 표기한다. 예를 들면 다음과 같다. + +API 오브젝트(kind가 아닌 경우) | 한글화(원문 유지) +--- | --- +ClusterRoleBindingList | ClusterRoleBindingList +ClusterRoleList | ClusterRoleList +ConfigMapEnvSource | ConfigMapEnvSource +ConfigMapKeySelector | ConfigMapKeySelector +PersistentVolumeClaimList | PersistentVolumeClaimList +PersistentVolumeClaimSpec | PersistentVolumeClaimSpec +... | ... {{% note %}} 단, API 오브젝트 한글화 원칙에 예외가 있을 수 있으며, 이 경우에는 가능한 [한글화 용어집](#한글화-용어집)을 준용한다. (예를 들면, Horizontal Pod Autoscaler -는 API 오브젝트에 대해 외래어 표기법 적용하지 않고 원문 그대로 표기한다.) +는 API 오브젝트에 대해 외래어 표기법을 적용하지 않고 원문 그대로 표기한다.) +{{% /note %}} + +{{% note %}} +원문에서는 API 오브젝트를 camelCase(예: configMap)로 표기하는 것을 가이드하고 있다. +그러나 한글에는 대소문자 구분이 없으므로 이를 띄어쓰기 없이 붙여서 처리한다. +(예: configMap -> 컨피그맵, config Map -> 컨피그맵) {{% /note %}} {{% note %}} @@ -116,6 +177,31 @@ API 오브젝트의 필드 이름, 파일 이름, 경로와 같은 내용은 독 단, 주석에 포함된 내용은 한글로 옮길 수 있다. {{% /note %}} +### 기능 게이트(feature gate) 한글화 방침 + +쿠버네티스의 [기능 게이트](/docs/reference/command-line-tools-reference/feature-gates/)를 +의미하는 용어는 한글화하지 않고 원문 형태를 유지한다. + +기능 게이트의 예시는 다음과 같다. +- Accelerators +- AdvancedAuditing +- AffinityInAnnotations +- AllowExtTrafficLocalEndpoints +- ... + +전체 기능 게이트 목록은 +[여기](/docs/reference/command-line-tools-reference/feature-gates/#feature-gates)를 참고한다. + +{{% note %}} +단, 해당 원칙에는 예외가 있을 수 있으며, 이 경우에는 가능한 +[한글화 용어집](#한글화-용어집)을 준용한다. +{{% /note %}} + +{{% note %}} +기능 게이트는 쿠버네티스 버전에 따라 변경될 수 있으므로, +쿠버네티스 및 쿠버네티스 문서의 버전에 맞는 기능 게이트 목록을 적용하여 한글화를 진행한다. +{{% /note %}} + ### 한글화 용어집 정보 쿠버네티스 [한글화 용어집](#한글화-용어집)은 한글화된 쿠버네티스 문서의 일관성을 위해서 @@ -153,6 +239,7 @@ Age | 기간 | Allocation | 할당량 | alphanumeric | 영숫자 | Annotation | 어노테이션 | +APIService | API서비스(APIService) | API 오브젝트인 경우 App | 앱 | Appendix | 부록 | Application | 애플리케이션 | @@ -162,6 +249,7 @@ autoscaler | 오토스케일러 | availability zone | 가용성 영역(availability zone) | bare pod | 베어(bare) 파드 | beta | 베타 | +Binding | 바인딩(Binding) | API 오브젝트인 경우 boilerplate | 상용구 | Boot | 부트 | Build | 빌드 | @@ -169,31 +257,39 @@ Cache | 캐시 | Calico | 캘리코(Calico) | canary | 카나리(canary) | 릴리스 방식에 관련한 용어인 경우에 한함 cascading | 캐스케이딩(cascading) | +CertificateSigningRequest | CertificateSigningRequest | API 오브젝트인 경우 character set | 캐릭터 셋 | Charts | 차트 | checkpoint | 체크포인트 | Cilium | 실리움(Cilium) | CLI | CLI | Cluster | 클러스터 | +ClusterRole | 클러스터롤(ClusterRole) | API 오브젝트인 경우 +ClusterRoleBinding | 클러스터롤바인딩(ClusterRoleBinding) | API 오브젝트인 경우 Command Line Tool | 커맨드라인 툴 | -Config Map | 컨피그 맵 | +ComponentStatus | 컴포넌트스테이터스(ComponentStatus) | API 오브젝트인 경우 +ConfigMap | 컨피그맵(ConfigMap) | API 오브젝트인 경우 configuration | 구성, 설정 | Connection | 연결 | containerized | 컨테이너화 된 | Context | 컨텍스트 | Control Plane | 컨트롤 플레인 | controller | 컨트롤러 | -Cron Job | 크론 잡 | +ControllerRevision | 컨트롤러리비전(ControllerRevision) | API 오브젝트인 경우 +cron job | 크론 잡 | +CronJob | 크론잡(CronJob) | API 오브젝트인 경우 +CSIDriver | CSI드라이버(CSIDriver) | API 오브젝트인 경우 +CSINode | CSI노드(CSINode) | API 오브젝트인 경우 custom metrics | 사용자 정의 메트릭 | -Custom resource | 사용자 정의 리소스 | -CustomResourceDefinition | 커스텀 리소스 데피니션 | +custom resource | 사용자 정의 리소스 | +CustomResourceDefinition | 커스텀리소스데피니션(CustomResourceDefinition) | API 오브젝트인 경우 Daemon | 데몬 | -Daemon Set | 데몬 셋 | +DaemonSet | 데몬셋(DaemonSet) | API 오브젝트인 경우 Dashboard | 대시보드 | Data Plane | 데이터 플레인 | Default Limit | 기본 상한 | Default Request | 기본 요청량 | -Deployment | 디플로이먼트 | +Deployment | 디플로이먼트(Deployment) | API 오브젝트인 경우 deprecated | 사용 중단(deprecated) | descriptor | 디스크립터, 식별자 | Desired number of pods | 의도한 파드의 수 | @@ -206,9 +302,11 @@ Docker Swarm | Docker Swarm | Downward API | 다운워드(Downward) API | draining | 드레이닝(draining) | egress | 이그레스, 송신(egress) | -Endpoint | 엔드포인트 | +endpoint | 엔드포인트 | +EndpointSlice | 엔드포인트슬라이스(EndpointSlice) | API 오브젝트인 경우 +Endpoints | 엔드포인트(Endpoints) | API 오브젝트인 경우 entry point | 진입점 | -Event | 이벤트 | +Event | 이벤트(Event) | API 오브젝트인 경우 evict | 축출하다 | eviction | 축출 | Exec | Exec | @@ -235,20 +333,24 @@ Hypervisor | 하이퍼바이저 | idempotent | 멱등성 | Image | 이미지 | Image Pull Secrets | 이미지 풀(Pull) 시크릿 | -Ingress | 인그레스 | +Ingress | 인그레스(Ingress) | API 오브젝트인 경우 +IngressClass | 인그레스클래스(IngressClass) | API 오브젝트인 경우 Init Container | 초기화 컨테이너 | Instance group | 인스턴스 그룹 | introspection | 인트로스펙션(introspection) | Istio | 이스티오(Istio) | -Job | 잡 | +Job | 잡(Job) | API 오브젝트인 경우 kube-proxy | kube-proxy | Kubelet | Kubelet | Kubernetes | 쿠버네티스 | Kube-router | Kube-router | label | 레이블 | +Lease | 리스(Lease) | API 오브젝트인 경우 lifecycle | 라이프사이클 | +LimitRange | 리밋레인지(LimitRange) | API 오브젝트인 경우 Linux | 리눅스 | load | 부하 | +LocalSubjectAccessReview | 로컬서브젝트액세스리뷰(LocalSubjectAccessReview) | API 오브젝트인 경우 Log | 로그 | loopback | 루프백(loopback) | Lost | Lost | 클레임의 상태에 한함 @@ -263,11 +365,12 @@ Minikube | Minikube | Mirror pod | 미러 파드(mirror pod) | monitoring | 모니터링 | multihomed | 멀티홈드(multihomed) | +MutatingWebhookConfiguration | MutatingWebhookConfiguration | API 오브젝트인 경우 naked pod | 네이키드(naked) 파드 | -Namespace | 네임스페이스 | +Namespace | 네임스페이스(Namespace) | API 오브젝트인 경우 netfilter | 넷필터(netfilter) | -Network Policy | 네트워크 폴리시 | -Node | 노드 | +NetworkPolicy | 네트워크폴리시(NetworkPolicy) | API 오브젝트인 경우 +Node | 노드(Node) | API 오브젝트인 경우 node lease | 노드 리스(lease) Object | 오브젝트 | Orchestrate | 오케스트레이션하다 | @@ -275,16 +378,19 @@ Output | 출력 | parameter | 파라미터 | patch | 패치 | Pending | Pending | 파드, 클레임의 상태에 한함 -Persistent Volume | 퍼시스턴트 볼륨 | -Persistent Volume Claim | 퍼시스턴트 볼륨 클레임 | +PersistentVolume | 퍼시스턴트볼륨(PersistentVolume) | API 오브젝트인 경우 +PersistentVolumeClaim | 퍼시스턴트볼륨클레임(PersistentVolumeClaim) | API 오브젝트인 경우 pipeline | 파이프라인 | placeholder pod | 플레이스홀더(placeholder) 파드 | -Pod | 파드 | +Pod | 파드(Pod) | API 오브젝트인 경우 Pod Preset | 파드 프리셋 | PodAntiAffinity | 파드안티어피니티(PodAntiAffinity) | -PodDisruptionBudget | PodDisruptionBudget | +PodDisruptionBudget | PodDisruptionBudget | API 오브젝트인 경우 +PodSecurityPolicy | 파드시큐리티폴리시(PodSecurityPolicy) | API 오브젝트인 경우 +PodTemplate | 파드템플릿(PodTemplate) | API 오브젝트인 경우 postfix | 접미사 | prefix | 접두사 | +PriorityClass | 프라이어리티클래스(PriorityClass) | API 오브젝트인 경우 Privileged | 특권을 가진(privileged) | Prometheus | 프로메테우스 | proof of concept | 개념 증명 | @@ -300,29 +406,33 @@ redirect | 리다이렉트(redirect) | redirection | 리다이렉션 | Registry | 레지스트리 | Release | 릴리스 | -Replica Set | 레플리카 셋 | +ReplicaSet | 레플리카셋(ReplicaSet) | API 오브젝트인 경우 replicas | 레플리카 | -Replication Controller | 레플리케이션 컨트롤러 | +ReplicationController | 레플리케이션컨트롤러(ReplicationController) | API 오브젝트인 경우 repository | 리포지터리 | resource | 리소스 | Resource Limit | 리소스 상한 | -Resource Quota | 리소스 쿼터 | +ResourceQuota | 리소스쿼터(ResourceQuota) | API 오브젝트인 경우 return | 반환하다 | revision | 리비전 | -Role | 롤 | +Role | 롤(Role) | API 오브젝트인 경우 +RoleBinding | 롤바인딩(RoleBinding) | API 오브젝트인 경우 rollback | 롤백 | rolling update | 롤링 업데이트 | rollout | 롤아웃 | Romana | 로마나(Romana) | Running | Running | 파드의 상태에 한함 runtime | 런타임 | +RuntimeClass | 런타임클래스(RuntimeClass) | API 오브젝트인 경우 Scale | 스케일 | -Secret | 시크릿 | +Secret | 시크릿(Secret) | API 오브젝트인 경우 segment | 세그먼트 | Selector | 셀렉터 | Self-healing | 자가 치유 | -Service | 서비스 | -Service Account | 서비스 어카운트 | +SelfSubjectAccessReview | 셀프서브젝트액세스리뷰(SelfSubjectAccessReview) | API 오브젝트인 경우 +SelfSubjectRulesReview | SelfSubjectRulesReview | API 오브젝트이지만 용어를 구성하는 단어 중 복수형 Rules를 '룰스'로 외래어 표기하는 경우 한국어 독자에게 다소 생경할 수 있어 예외적으로 영문 용어를 사용함 +Service | 서비스(Service) | API 오브젝트인 경우 +ServiceAccount | 서비스어카운트(ServiceAccount) | API 오브젝트인 경우 service discovery | 서비스 디스커버리 | service mesh | 서비스 메시 | Session | 세션 | @@ -335,10 +445,11 @@ skew | 차이(skew) | snippet | 스니펫(snippet) | spec | 명세, 스펙, 사양 | specification | 명세 | -Stateful Set | 스테이트풀 셋 | +StatefulSet | 스테이트풀셋(StatefulSet) | API 오브젝트인 경우 stateless | 스테이트리스 | Static pod | 스태틱(static) 파드 | -Storage Class | 스토리지 클래스 | +StorageClass | 스토리지클래스(StorageClass) | API 오브젝트인 경우 +SubjectAccessReview | 서브젝트액세스리뷰(SubjectAccessReview) | API 오브젝트인 경우 Sub-Object | 서브-오브젝트 | support | 지원 | Surge | 증가율 | 롤링업데이트 전략에 한함 @@ -346,6 +457,7 @@ System | 시스템 | taint | 테인트(taint) | Task | 태스크 | Terminated | Terminated | 파드의 상태에 한함 +TokenReview | 토큰리뷰(TokenReview) | API 오브젝트인 경우 tolerations | 톨러레이션(toleration) | Topology spread constraints | 토폴로지 분배 제약 조건 | Tools | 도구 | @@ -355,9 +467,11 @@ ubuntu | 우분투 | use case | 유스케이스 | userspace | 유저스페이스(userspace) | Utilization | 사용량, 사용률 | +ValidatingWebhookConfiguration | ValidatingWebhookConfiguration | API 오브젝트인 경우 verbosity | 로그 상세 레벨(verbosity) | virtualization | 가상화 | Volume | 볼륨 | +VolumeAttachment | 볼륨어태치먼트(VolumeAttachment) | API 오브젝트인 경우 Waiting | Waiting | 파드의 상태에 한함 Walkthrough | 연습 | Weave-net | 위브넷(Weave Net) | Weaveworks 사의 솔루션 공식 명칭은 'Weave Net'이므로 한영병기 시 공식 명칭 사용 @@ -366,4 +480,4 @@ Worker | 워커 | 노드의 형태에 한함 Workload | 워크로드 | YAML | YAML | -{{% /capture %}} + diff --git a/content/ko/docs/contribute/new-content/open-a-pr.md b/content/ko/docs/contribute/new-content/open-a-pr.md index 17ea3342c1..6f01e04135 100644 --- a/content/ko/docs/contribute/new-content/open-a-pr.md +++ b/content/ko/docs/contribute/new-content/open-a-pr.md @@ -1,14 +1,14 @@ --- title: 풀 리퀘스트 열기 slug: new-content -content_template: templates/concept +content_type: concept weight: 10 card: name: contribute weight: 40 --- -{{% capture overview %}} + {{< note >}} **코드 개발자**: 향후 쿠버네티스 릴리스의 @@ -22,9 +22,9 @@ card: 변경 사항이 많으면, [로컬 포크에서 작업하기](#fork-the-repo)를 읽고 컴퓨터에서 로컬로 변경하는 방법을 배운다. -{{% /capture %}} -{{% capture body %}} + + ## GitHub을 사용하여 변경하기 @@ -475,10 +475,11 @@ PR에 여러 커밋이 있는 경우, PR을 병합하기 전에 해당 커밋을 느낌을 얻으려면 열린 이슈와 PR을 살펴보자. 이슈나 PR을 제출할 때 가능한 한 상세하게 템플릿의 내용을 작성한다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + - 리뷰 프로세스에 대한 자세한 내용은 [리뷰하기](/ko/docs/contribute/reviewing/revewing-prs)를 읽어본다. -{{% /capture %}} + diff --git a/content/ko/docs/contribute/new-content/overview.md b/content/ko/docs/contribute/new-content/overview.md index ca1bc10737..c17a557c6d 100644 --- a/content/ko/docs/contribute/new-content/overview.md +++ b/content/ko/docs/contribute/new-content/overview.md @@ -1,19 +1,19 @@ --- title: 새로운 콘텐츠 기여하기에 대한 개요 linktitle: 개요 -content_template: templates/concept +content_type: concept main_menu: true weight: 5 --- -{{% capture overview %}} + 이 섹션에는 새로운 콘텐츠를 기여하기 전에 알아야 할 정보가 있다. -{{% /capture %}} -{{% capture body %}} + + ## 기여하기에 대한 기본 @@ -54,5 +54,8 @@ CLA에 서명하지 않은 기여자의 풀 리퀘스트(pull request)는 자동 PR 당 하나의 언어로 풀 리퀘스트를 제한한다. 여러 언어로 동일한 코드 샘플을 동일하게 변경해야 하는 경우 각 언어마다 별도의 PR을 연다. +## 기여자를 위한 도구들 + +`kubernetes/website` 리포지터리의 [문서 기여자를 위한 도구](https://github.com/kubernetes/website/tree/master/content/en/docs/doc-contributor-tools) 디렉터리에는 기여 여정이 좀 더 순조롭게 진행되도록 도와주는 도구들이 포함되어 있다. + -{{% /capture %}} diff --git a/content/ko/docs/contribute/participating.md b/content/ko/docs/contribute/participating.md index 401c91844e..8f9cb0b5f6 100644 --- a/content/ko/docs/contribute/participating.md +++ b/content/ko/docs/contribute/participating.md @@ -1,13 +1,13 @@ --- title: SIG Docs에 참여하기 -content_template: templates/concept +content_type: concept weight: 60 card: name: contribute weight: 60 --- -{{% capture overview %}} + SIG Docs는 쿠버네티스 프로젝트의 [분과회(special interest group)](https://github.com/kubernetes/community/blob/master/sig-list.md) @@ -30,9 +30,9 @@ SIG Docs는 모든 컨트리뷰터의 콘텐츠와 리뷰를 환영한다. 문서를 관리하는 책임을 가지는 SIG Docs에서, 이런 체계가 작동하는 특유의 방식에 대한 윤곽을 잡아보겠다. -{{% /capture %}} -{{% capture body %}} + + ## 역할과 책임 @@ -302,9 +302,10 @@ PR 소유자에게 조언하는데 활용된다. [PR Wrangler](/docs/contribute/advanced#be-the-pr-wrangler-for-a-week) 또는 [SIG Docs 의장](#sig-docs-의장)과 같은 특정 역할도 수행한다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + 쿠버네티스 문서화에 기여하는 일에 대한 보다 많은 정보는 다음 문서를 참고한다. @@ -312,5 +313,5 @@ PR 소유자에게 조언하는데 활용된다. - [컨텐츠 검토하기](/docs/contribute/review/reviewing-prs) - [문서 스타일 가이드](/docs/contribute/style/) -{{% /capture %}} + diff --git a/content/ko/docs/contribute/review/_index.md b/content/ko/docs/contribute/review/_index.md index a79fb6129f..161dcc8511 100644 --- a/content/ko/docs/contribute/review/_index.md +++ b/content/ko/docs/contribute/review/_index.md @@ -3,12 +3,12 @@ title: 변경 사항 리뷰하기 weight: 30 --- -{{% capture overview %}} + 이 섹션은 콘텐츠를 리뷰하는 방법에 대해 설명한다. -{{% /capture %}} -{{% capture body %}} -{{% /capture %}} + + + diff --git a/content/ko/docs/contribute/review/for-approvers.md b/content/ko/docs/contribute/review/for-approvers.md index 6713d5a50b..9b6c01d739 100644 --- a/content/ko/docs/contribute/review/for-approvers.md +++ b/content/ko/docs/contribute/review/for-approvers.md @@ -2,11 +2,11 @@ title: 승인자와 리뷰어의 리뷰 linktitle: 승인자와 리뷰어용 slug: for-approvers -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + SIG Docs [리뷰어](/ko/docs/contribute/participating/#리뷰어)와 [승인자](/ko/docs/contribute/participating/#승인자)는 변경 사항을 리뷰할 때 몇 가지 추가 작업을 수행한다. @@ -19,10 +19,10 @@ SIG Docs [리뷰어](/ko/docs/contribute/participating/#리뷰어)와 [승인자 로테이션 외에도, 봇은 영향을 받는 파일의 소유자를 기반으로 PR에 대한 리뷰어와 승인자를 할당한다. -{{% /capture %}} -{{% capture body %}} + + ## PR 리뷰 @@ -224,4 +224,4 @@ https://github.com/kubernetes/kubernetes 에서 ``` -{{% /capture %}} + diff --git a/content/ko/docs/contribute/review/reviewing-prs.md b/content/ko/docs/contribute/review/reviewing-prs.md index c220e9599c..b7416f505a 100644 --- a/content/ko/docs/contribute/review/reviewing-prs.md +++ b/content/ko/docs/contribute/review/reviewing-prs.md @@ -1,11 +1,11 @@ --- title: 풀 리퀘스트 리뷰 -content_template: templates/concept +content_type: concept main_menu: true weight: 10 --- -{{% capture overview %}} + 누구나 문서화에 대한 풀 리퀘스트를 리뷰할 수 있다. 쿠버네티스 website 리포지터리의 [풀 리퀘스트](https://github.com/kubernetes/website/pulls) 섹션을 방문하여 열린(open) 풀 리퀘스트를 확인한다. @@ -19,9 +19,9 @@ weight: 10 [스타일 가이드](/docs/contribute/style/style-guide/)를 읽는다. - 쿠버네티스 문서화 커뮤니티의 다양한 [역할과 책임](/docs/contribute/participating/#roles-and-responsibilities)을 이해한다. -{{% /capture %}} -{{% capture body %}} + + ## 시작하기 전에 @@ -95,4 +95,4 @@ weight: 10 오타나 공백과 같은 작은 이슈의 PR인 경우, 코멘트 앞에 `nit:` 를 추가한다. 이를 통해 문서의 저자는 이슈가 긴급하지 않다는 것을 알 수 있다. -{{% /capture %}} + diff --git a/content/ko/docs/contribute/style/write-new-topic.md b/content/ko/docs/contribute/style/write-new-topic.md index 4313b3bd2f..0c8ab86fbf 100644 --- a/content/ko/docs/contribute/style/write-new-topic.md +++ b/content/ko/docs/contribute/style/write-new-topic.md @@ -1,19 +1,20 @@ --- title: 새로운 주제의 문서 작성 -content_template: templates/task +content_type: task weight: 20 --- -{{% capture overview %}} + 이 페이지는 쿠버네티스 문서에서 새로운 주제를 생성하는 방법을 보여준다. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + [기여 시작하기](/docs/contribute/start/)에 설명된 대로 쿠버네티스 문서 저장소의 포크(fork)를 생성하자. -{{% /capture %}} -{{% capture steps %}} + + ## 페이지 타입 선택 @@ -159,9 +160,10 @@ kubectl create -f https://k8s.io/examples/pods/storage/gce-volume.yaml 이미지 파일을 `/images` 디렉토리에 넣는다. 기본 이미지 형식은 SVG 이다. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [페이지 템플릿 사용](/docs/contribute/page-templates/))에 대해 알아보기. * [풀 리퀘스트 작성](/docs/contribute/new-content/open-a-pr/)에 대해 알아보기. -{{% /capture %}} + diff --git a/content/ko/docs/contribute/suggesting-improvements.md b/content/ko/docs/contribute/suggesting-improvements.md index ca2e8862ed..c7fe87ac07 100644 --- a/content/ko/docs/contribute/suggesting-improvements.md +++ b/content/ko/docs/contribute/suggesting-improvements.md @@ -1,14 +1,14 @@ --- title: 콘텐츠 개선 제안 slug: suggest-improvements -content_template: templates/concept +content_type: concept weight: 10 card: name: contribute weight: 20 --- -{{% capture overview %}} + 쿠버네티스 문서에 문제가 있거나, 새로운 내용에 대한 아이디어가 있으면, 이슈를 연다. [GitHub 계정](https://github.com/join)과 웹 브라우저만 있으면 된다. @@ -16,9 +16,9 @@ card: 쿠버네티스 기여자는 필요에 따라 이슈를 리뷰, 분류하고 태그를 지정한다. 다음으로, 여러분이나 다른 쿠버네티스 커뮤니티 멤버가 문제를 해결하기 위한 변경 사항이 있는 풀 리퀘스트를 연다. -{{% /capture %}} -{{% capture body %}} + + ## 이슈 열기 @@ -62,4 +62,4 @@ card: 존중한다. 예를 들어, "문서가 끔찍하다"는 도움이 되지 않거나 예의 바르지 않은 피드백이다. -{{% /capture %}} + diff --git a/content/ko/docs/home/_index.md b/content/ko/docs/home/_index.md index 94927fbfc1..2432f8781c 100644 --- a/content/ko/docs/home/_index.md +++ b/content/ko/docs/home/_index.md @@ -13,7 +13,7 @@ menu: title: "문서" weight: 20 post: > -

개념, 튜토리얼 및 참조 문서와 함께 쿠버네티스 사용하는 방법을 익힐 수 있다. 또한, 문서에 기여하는 것도 도움을 줄 수 있다!

+

개념, 튜토리얼 및 참조 문서와 함께 쿠버네티스 사용하는 방법을 익힐 수 있다. 또한, 문서에 기여하는 것도 도움을 줄 수 있다!

description: > 쿠버네티스는 컨테이너화된 애플리케이션의 배포, 확장 및 관리를 자동화하기 위한 오픈소스 컨테이너 오케스트레이션 엔진이다. 오픈소스 프로젝트는 Cloud Native Computing Foundation에서 주관한다. overview: > diff --git a/content/ko/docs/home/supported-doc-versions.md b/content/ko/docs/home/supported-doc-versions.md index 69245a2f41..9bf7edfedf 100644 --- a/content/ko/docs/home/supported-doc-versions.md +++ b/content/ko/docs/home/supported-doc-versions.md @@ -1,20 +1,20 @@ --- title: 쿠버네티스 문서의 버전 지원 -content_template: templates/concept +content_type: concept card: name: about weight: 10 title: 문서의 버전 지원 --- -{{% capture overview %}} + 이 웹 사이트에는 현재 버전의 쿠버네티스와 이전 4개 버전의 쿠버네티스에 대한 문서가 포함되어 있습니다. -{{% /capture %}} -{{% capture body %}} + + ## 현재 버전 @@ -25,4 +25,4 @@ card: {{< versions-other >}} -{{% /capture %}} + diff --git a/content/ko/docs/reference/_index.md b/content/ko/docs/reference/_index.md index a9ce09b988..148abd29a3 100644 --- a/content/ko/docs/reference/_index.md +++ b/content/ko/docs/reference/_index.md @@ -3,16 +3,16 @@ title: 레퍼런스 linkTitle: "레퍼런스" main_menu: true weight: 70 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + -쿠버네티스 문서의 본 섹션에서는 레퍼런스를 다룬다. +쿠버네티스 문서의 본 섹션에서는 레퍼런스를 다룬다. -{{% /capture %}} -{{% capture body %}} + + ## API 레퍼런스 @@ -21,8 +21,8 @@ content_template: templates/concept ## API 클라이언트 라이브러리 -프로그래밍 언어에서 쿠버네티스 API를 호출하기 위해서, -[클라이언트 라이브러리](/ko/docs/reference/using-api/client-libraries/)를 사용할 수 있다. +프로그래밍 언어에서 쿠버네티스 API를 호출하기 위해서, +[클라이언트 라이브러리](/ko/docs/reference/using-api/client-libraries/)를 사용할 수 있다. 공식적으로 지원되는 클라이언트 라이브러리는 다음과 같다. - [쿠버네티스 Go 클라이언트 라이브러리](https://github.com/kubernetes/client-go/) @@ -32,7 +32,7 @@ content_template: templates/concept ## CLI 레퍼런스 -* [kubectl](/docs/reference/kubectl/overview/) - 명령어를 실행하거나 쿠버네티스 클러스터를 관리하기 위해 사용하는 주된 CLI 도구. +* [kubectl](/ko/docs/reference/kubectl/overview/) - 명령어를 실행하거나 쿠버네티스 클러스터를 관리하기 위해 사용하는 주된 CLI 도구. * [JSONPath](/docs/reference/kubectl/jsonpath/) - kubectl에서 [JSONPath 표현](http://goessner.net/articles/JsonPath/)을 사용하기 위한 문법 가이드. * [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) - 안정적인 쿠버네티스 클러스터를 쉽게 프로비전하기 위한 CLI 도구. @@ -50,4 +50,4 @@ content_template: templates/concept 쿠버네티스 기능에 대한 설계 문서의 아카이브. [쿠버네티스 아키텍처](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md)와 [쿠버네티스 디자인 개요](https://git.k8s.io/community/contributors/design-proposals)가 좋은 출발점이다. -{{% /capture %}} + diff --git a/content/ko/docs/reference/issues-security/security.md b/content/ko/docs/reference/issues-security/security.md index ba79a2d473..986af01cf1 100644 --- a/content/ko/docs/reference/issues-security/security.md +++ b/content/ko/docs/reference/issues-security/security.md @@ -1,14 +1,14 @@ --- title: 쿠버네티스 보안과 공개 정보 -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + 이 페이지는 쿠버네티스 보안 및 공개 정보를 설명한다. -{{% /capture %}} -{{% capture body %}} + + ## 보안 공지 보안 및 주요 API 공지에 대한 이메일을 위해 [kubernetes-security-announce](https://groups.google.com/forum/#!forum/kubernetes-security-announce)) 그룹에 가입하세요. @@ -21,7 +21,7 @@ weight: 20 보고서를 작성하려면, [쿠버네티스 버그 현상금 프로그램](https://hackerone.com/kubernetes)에 취약점을 제출한다. 이를 통해 표준화된 응답시간으로 취약점을 분류하고 처리할 수 있다. 또한, 보안 세부 내용과 [모든 쿠버네티스 버그 보고서](https://git.k8s.io/kubernetes/.github/ISSUE_TEMPLATE/bug-report.md)로 부터 예상되는 세부사항을 [security@kubernetes.io](mailto:security@kubernetes.io)로 이메일을 보낸다. -[제품 보안 위원회 구성원](https://git.k8s.io/security/security-release-process.md#product-security-committee-psc)의 GPG 키를 사용하여 이 목록으로 이메일을 암호화할 수 있다. GPG를 사용한 암호화는 공개할 필요가 없다. +[제품 보안 위원회 구성원](https://git.k8s.io/security/README.md#product-security-committee-psc)의 GPG 키를 사용하여 이 목록으로 이메일을 암호화할 수 있다. GPG를 사용한 암호화는 공개할 필요가 없다. ### 언제 취약점을 보고해야 하는가? @@ -48,4 +48,4 @@ weight: 20 ## 공개 시기 공개 날짜는 쿠버네티스 제품 보안 위원회와 버그 제출자가 협상한다. 사용자 완화가 가능해지면 가능한 빨리 버그를 완전히 공개하는 것이 좋다. 버그 또는 픽스가 아직 완전히 이해되지 않았거나 솔루션이 제대로 테스트되지 않았거나 벤더 협력을 위해 공개를 지연시키는 것이 합리적이다. 공개 기간은 즉시(특히 이미 공개적으로 알려진 경우)부터 몇 주까지입니다. 간단한 완화 기능이 있는 취약점의 경우 보고 날짜부터 공개 날짜까지는 7일 정도 소요될 것으로 예상된다. 쿠버네티스 제품 보안 위원회는 공개 날짜를 설정할 때 최종 결정권을 갖는다. -{{% /capture %}} + diff --git a/content/ko/docs/reference/kubectl/_index.md b/content/ko/docs/reference/kubectl/_index.md new file mode 100755 index 0000000000..7b6c2d720b --- /dev/null +++ b/content/ko/docs/reference/kubectl/_index.md @@ -0,0 +1,5 @@ +--- +title: "kubectl CLI" +weight: 60 +--- + diff --git a/content/ko/docs/reference/kubectl/cheatsheet.md b/content/ko/docs/reference/kubectl/cheatsheet.md index 8956d72474..bcf654b0bd 100644 --- a/content/ko/docs/reference/kubectl/cheatsheet.md +++ b/content/ko/docs/reference/kubectl/cheatsheet.md @@ -1,20 +1,20 @@ --- title: kubectl 치트 시트 -content_template: templates/concept +content_type: concept card: name: reference weight: 30 --- -{{% capture overview %}} + -참고 항목: [Kubectl 개요](/docs/reference/kubectl/overview/)와 [JsonPath 가이드](/docs/reference/kubectl/jsonpath). +참고 항목: [Kubectl 개요](/ko/docs/reference/kubectl/overview/)와 [JsonPath 가이드](/docs/reference/kubectl/jsonpath). 이 페이지는 `kubectl` 커맨드의 개요이다. -{{% /capture %}} -{{% capture body %}} + + # kubectl - 치트 시트 @@ -203,7 +203,7 @@ kubectl diff -f ./my-manifest.yaml ```bash kubectl set image deployment/frontend www=image:v2 # "frontend" 디플로이먼트의 "www" 컨테이너 이미지를 업데이트하는 롤링 업데이트 -kubectl rollout history deployment/frontend # 현 리비전을 포함한 디플로이먼트의 이력을 체크 +kubectl rollout history deployment/frontend # 현 리비전을 포함한 디플로이먼트의 이력을 체크 kubectl rollout undo deployment/frontend # 이전 디플로이먼트로 롤백 kubectl rollout undo deployment/frontend --to-revision=2 # 특정 리비전으로 롤백 kubectl rollout status -w deployment/frontend # 완료될 때까지 "frontend" 디플로이먼트의 롤링 업데이트 상태를 감시 @@ -355,7 +355,7 @@ kubectl get pods -A -o=custom-columns='DATA:spec.containers[?(@.image!="k8s.gcr. kubectl get pods -A -o=custom-columns='DATA:metadata.*' ``` -More examples in the kubectl [reference documentation](/docs/reference/kubectl/overview/#custom-columns). +더 많은 예제는 kubectl [참조 문서](/ko/docs/reference/kubectl/overview/#custom-columns)를 참고한다. ### Kubectl 출력 로그 상세 레벨(verbosity)과 디버깅 @@ -373,11 +373,12 @@ Kubectl 로그 상세 레벨(verbosity)은 `-v` 또는`--v` 플래그와 로그 `--v=8` | HTTP 요청 내용을 표시. `--v=9` | 내용을 잘라 내지 않고 HTTP 요청 내용을 표시. -{{% /capture %}} -{{% capture whatsnext %}} -* [kubectl 개요](/docs/reference/kubectl/overview/)에 대해 더 배워보자. +## {{% heading "whatsnext" %}} + + +* [kubectl 개요](/ko/docs/reference/kubectl/overview/)에 대해 더 배워보자. * [kubectl](/docs/reference/kubectl/kubectl/) 옵션을 참고한다. @@ -385,4 +386,4 @@ Kubectl 로그 상세 레벨(verbosity)은 `-v` 또는`--v` 플래그와 로그 * 더 많은 [kubectl 치트 시트](https://github.com/dennyzhang/cheatsheet-kubernetes-A4) 커뮤니티 확인 -{{% /capture %}} + diff --git a/content/ko/docs/reference/kubectl/overview.md b/content/ko/docs/reference/kubectl/overview.md new file mode 100644 index 0000000000..d70eb8939a --- /dev/null +++ b/content/ko/docs/reference/kubectl/overview.md @@ -0,0 +1,495 @@ +--- +title: kubectl 개요 +content_template: templates/concept +weight: 20 +card: + name: reference + weight: 20 +--- + +{{% capture overview %}} +Kubectl은 쿠버네티스 클러스터를 제어하기 위한 커맨드 라인 도구이다. `kubectl` 은 config 파일을 $HOME/.kube 에서 찾는다. KUBECONFIG 환경 변수를 설정하거나 [`--kubeconfig`](/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig/) 플래그를 설정하여 다른 [kubeconfig](/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig/) 파일을 지정할 수 있다. + +이 개요는 `kubectl` 구문을 다루고, 커맨드 동작을 설명하며, 일반적인 예제를 제공한다. 지원되는 모든 플래그 및 하위 명령을 포함한 각 명령에 대한 자세한 내용은 [kubectl](/docs/reference/generated/kubectl/kubectl-commands/) 참조 문서를 참고한다. 설치 방법에 대해서는 [kubectl 설치](/ko/docs/tasks/tools/install-kubectl/)를 참고한다. + +{{% /capture %}} + +{{% capture body %}} + +## 구문 + +터미널 창에서 `kubectl` 명령을 실행하려면 다음의 구문을 사용한다. + +```shell +kubectl [command] [TYPE] [NAME] [flags] +``` + +다음은 `command`, `TYPE`, `NAME` 과 `flags` 에 대한 설명이다. + +* `command`: 하나 이상의 리소스에서 수행하려는 동작을 지정한다. 예: `create`, `get`, `describe`, `delete` + +* `TYPE`: [리소스 타입](#리소스-타입)을 지정한다. 리소스 타입은 대소문자를 구분하지 않으며 단수형, 복수형 또는 약어 형식을 지정할 수 있다. 예를 들어, 다음의 명령은 동일한 출력 결과를 생성한다. + + ```shell + kubectl get pod pod1 + kubectl get pods pod1 + kubectl get po pod1 + ``` + +* `NAME`: 리소스 이름을 지정한다. 이름은 대소문자를 구분한다. 이름을 생략하면, 모든 리소스에 대한 세부 사항이 표시된다. 예: `kubectl get pods` + + 여러 리소스에 대한 작업을 수행할 때, 타입 및 이름별로 각 리소스를 지정하거나 하나 이상의 파일을 지정할 수 있다. + + * 타입 및 이름으로 리소스를 지정하려면 다음을 참고한다. + + * 리소스가 모두 동일한 타입인 경우 리소스를 그룹화하려면 다음을 사용한다. `TYPE1 name1 name2 name<#>`
+ 예: `kubectl get pod example-pod1 example-pod2` + + * 여러 리소스 타입을 개별적으로 지정하려면 다음을 사용한다. `TYPE1/name1 TYPE1/name2 TYPE2/name3 TYPE<#>/name<#>`
+ 예: `kubectl get pod/example-pod1 replicationcontroller/example-rc1` + + * 하나 이상의 파일로 리소스를 지정하려면 다음을 사용한다. `-f file1 -f file2 -f file<#>` + + * YAML이 특히 구성 파일에 대해 더 사용자 친화적이므로, [JSON 대신 YAML을 사용한다](/ko/docs/concepts/configuration/overview/#일반적인-구성-팁).
+ 예: `kubectl get pod -f ./pod.yaml` + +* `flags`: 선택적 플래그를 지정한다. 예를 들어, `-s` 또는 `--server` 플래그를 사용하여 쿠버네티스 API 서버의 주소와 포트를 지정할 수 있다.
+ +{{< caution >}} +커맨드 라인에서 지정하는 플래그는 기본값과 해당 환경 변수를 무시한다. +{{< /caution >}} + +도움이 필요하다면, 터미널 창에서 `kubectl help` 를 실행한다. + +## 명령어 + +다음 표에는 모든 `kubectl` 작업에 대한 간단한 설명과 일반적인 구문이 포함되어 있다. + +명령어 | 구문 | 설명 +-------------------- | -------------------- | -------------------- +`alpha` | `kubectl alpha SUBCOMMAND [flags]` | 쿠버네티스 클러스터에서 기본적으로 활성화되어 있지 않은 알파 기능의 사용할 수 있는 명령을 나열한다. +`annotate` | kubectl annotate (-f FILENAME | TYPE NAME | TYPE/NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--overwrite] [--all] [--resource-version=version] [flags] | 하나 이상의 리소스 어노테이션을 추가하거나 업데이트한다. +`api-resources` | `kubectl api-resources [flags]` | 사용 가능한 API 리소스를 나열한다. +`api-versions` | `kubectl api-versions [flags]` | 사용 가능한 API 버전을 나열한다. +`apply` | `kubectl apply -f FILENAME [flags]`| 파일이나 표준입력(stdin)으로부터 리소스에 구성 변경 사항을 적용한다. +`attach` | `kubectl attach POD -c CONTAINER [-i] [-t] [flags]` | 실행 중인 컨테이너에 연결하여 출력 스트림을 보거나 표준입력을 통해 컨테이너와 상호 작용한다. +`auth` | `kubectl auth [flags] [options]` | 승인을 검사한다. +`autoscale` | kubectl autoscale (-f FILENAME | TYPE NAME | TYPE/NAME) [--min=MINPODS] --max=MAXPODS [--cpu-percent=CPU] [flags] | 레플리케이션 컨트롤러에서 관리하는 파드 집합을 자동으로 조정한다. +`certificate` | `kubectl certificate SUBCOMMAND [options]` | 인증서 리소스를 수정한다. +`cluster-info` | `kubectl cluster-info [flags]` | 클러스터의 마스터와 서비스에 대한 엔드포인트 정보를 표시한다. +`completion` | `kubectl completion SHELL [options]` | 지정된 셸(bash 또는 zsh)에 대한 셸 완성 코드를 출력한다. +`config` | `kubectl config SUBCOMMAND [flags]` | kubeconfig 파일을 수정한다. 세부 사항은 개별 하위 명령을 참고한다. +`convert` | `kubectl convert -f FILENAME [options]` | 다른 API 버전 간에 구성 파일을 변환한다. YAML 및 JSON 형식이 모두 허용된다. +`cordon` | `kubectl cordon NODE [options]` | 노드를 스케줄 불가능(unschedulable)으로 표시한다. +`cp` | `kubectl cp [options]` | 컨테이너에서 그리고 컨테이너로 파일 및 디렉터리를 복사한다. +`create` | `kubectl create -f FILENAME [flags]` | 파일이나 표준입력에서 하나 이상의 리소스를 생성한다. +`delete` | kubectl delete (-f FILENAME | TYPE [NAME | /NAME | -l label | --all]) [flags] | 파일, 표준입력 또는 레이블 셀렉터, 이름, 리소스 셀렉터 또는 리소스를 지정하여 리소스를 삭제한다. +`describe` | kubectl describe (-f FILENAME | TYPE [NAME_PREFIX | /NAME | -l label]) [flags] | 하나 이상의 리소스의 자세한 상태를 표시한다. +`diff` | `kubectl diff -f FILENAME [flags]`| 라이브 구성에 대해 파일이나 표준입력의 차이점을 출력한다. +`drain` | `kubectl drain NODE [options]` | 유지 보수를 준비 중인 노드를 드레인한다. +`edit` | kubectl edit (-f FILENAME | TYPE NAME | TYPE/NAME) [flags] | 기본 편집기를 사용하여 서버에서 하나 이상의 리소스 정의를 편집하고 업데이트한다. +`exec` | `kubectl exec POD [-c CONTAINER] [-i] [-t] [flags] [-- COMMAND [args...]]` | 파드의 컨테이너에 대해 명령을 실행한다. +`explain` | `kubectl explain [--recursive=false] [flags]` | 파드, 노드, 서비스 등의 다양한 리소스에 대한 문서를 출력한다. +`expose` | kubectl expose (-f FILENAME | TYPE NAME | TYPE/NAME) [--port=port] [--protocol=TCP|UDP] [--target-port=number-or-name] [--name=name] [--external-ip=external-ip-of-service] [--type=type] [flags] | 레플리케이션 컨트롤러, 서비스 또는 파드를 새로운 쿠버네티스 서비스로 노출한다. +`get` | kubectl get (-f FILENAME | TYPE [NAME | /NAME | -l label]) [--watch] [--sort-by=FIELD] [[-o | --output]=OUTPUT_FORMAT] [flags] | 하나 이상의 리소스를 나열한다. +`kustomize` | `kubectl kustomize [flags] [options]` | kustomization.yaml 파일의 지시 사항에서 생성된 API 리소스 집합을 나열한다. 인수는 파일을 포함하는 디렉터리의 경로이거나, 리포지터리 루트와 관련하여 경로 접미사가 동일한 git 리포지터리 URL이어야 한다. +`label` | kubectl label (-f FILENAME | TYPE NAME | TYPE/NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--overwrite] [--all] [--resource-version=version] [flags] | 하나 이상의 리소스 레이블을 추가하거나 업데이트한다. +`logs` | `kubectl logs POD [-c CONTAINER] [--follow] [flags]` | 파드의 컨테이너에 대한 로그를 출력한다. +`options` | `kubectl options` | 모든 명령에 적용되는 전역 커맨드 라인 옵션을 나열한다. +`patch` | kubectl patch (-f FILENAME | TYPE NAME | TYPE/NAME) --patch PATCH [flags] | 전략적 병합 패치 프로세스를 사용하여 리소스의 하나 이상의 필드를 업데이트한다. +`plugin` | `kubectl plugin [flags] [options]` | 플러그인과 상호 작용하기 위한 유틸리티를 제공한다. +`port-forward` | `kubectl port-forward POD [LOCAL_PORT:]REMOTE_PORT [...[LOCAL_PORT_N:]REMOTE_PORT_N] [flags]` | 하나 이상의 로컬 포트를 파드로 전달한다. +`proxy` | `kubectl proxy [--port=PORT] [--www=static-dir] [--www-prefix=prefix] [--api-prefix=prefix] [flags]` | 쿠버네티스 API 서버에 프록시를 실행한다. +`replace` | `kubectl replace -f FILENAME` | 파일 또는 표준입력에서 리소스를 교체한다. +`rollout` | `kubectl rollout SUBCOMMAND [options]` | 리소스의 롤아웃을 관리한다. 유효한 리소스 타입에는 디플로이먼트(deployment), 데몬셋(daemonset)과 스테이트풀셋(statefulset)이 포함된다. +`run` | kubectl run NAME --image=image [--env="key=value"] [--port=port] [--dry-run=server|client|none] [--overrides=inline-json] [flags] | 클러스터에서 지정된 이미지를 실행한다. +`scale` | kubectl scale (-f FILENAME | TYPE NAME | TYPE/NAME) --replicas=COUNT [--resource-version=version] [--current-replicas=count] [flags] | 지정된 레플리케이션 컨트롤러의 크기를 업데이트한다. +`set` | `kubectl set SUBCOMMAND [options]` | 애플리케이션 리소스를 구성한다. +`taint` | `kubectl taint NODE NAME KEY_1=VAL_1:TAINT_EFFECT_1 ... KEY_N=VAL_N:TAINT_EFFECT_N [options]` | 하나 이상의 노드에서 테인트(taint)를 업데이트한다. +`top` | `kubectl top [flags] [options]` | 리소스(CPU/메모리/스토리지) 사용량을 표시한다. +`uncordon` | `kubectl uncordon NODE [options]` | 노드를 스케줄 가능(schedulable)으로 표시한다. +`version` | `kubectl version [--client] [flags]` | 클라이언트와 서버에서 실행 중인 쿠버네티스 버전을 표시한다. +`wait` | kubectl wait ([-f FILENAME] | resource.group/resource.name | resource.group [(-l label | --all)]) [--for=delete|--for condition=available] [options] | 실험(experimental) 기능: 하나 이상의 리소스에서 특정 조건을 기다린다. + +기억하기: 명령 동작에 대한 자세한 내용은 [kubectl](/docs/user-guide/kubectl/) 참조 문서를 참고한다. + +## 리소스 타입 + +다음 표에는 지원되는 모든 리소스 타입과 해당 약어가 나열되어 있다. + +(이 출력은 `kubectl api-resources` 에서 확인할 수 있으며, 쿠버네티스 1.13.3 부터 일치한다.) + +| 리소스 이름 | 짧은 이름 | API 그룹 | 네임스페이스 | 리소스 종류 | +|---|---|---|---|---| +| `bindings` | | | true | Binding| +| `componentstatuses` | `cs` | | false | ComponentStatus | +| `configmaps` | `cm` | | true | ConfigMap | +| `endpoints` | `ep` | | true | Endpoints | +| `limitranges` | `limits` | | true | LimitRange | +| `namespaces` | `ns` | | false | Namespace | +| `nodes` | `no` | | false | Node | +| `persistentvolumeclaims` | `pvc` | | true | PersistentVolumeClaim | +| `persistentvolumes` | `pv` | | false | PersistentVolume | +| `pods` | `po` | | true | Pod | +| `podtemplates` | | | true | PodTemplate | +| `replicationcontrollers` | `rc` | | true| ReplicationController | +| `resourcequotas` | `quota` | | true | ResourceQuota | +| `secrets` | | | true | Secret | +| `serviceaccounts` | `sa` | | true | ServiceAccount | +| `services` | `svc` | | true | Service | +| `mutatingwebhookconfigurations` | | admissionregistration.k8s.io | false | MutatingWebhookConfiguration | +| `validatingwebhookconfigurations` | | admissionregistration.k8s.io | false | ValidatingWebhookConfiguration | +| `customresourcedefinitions` | `crd`, `crds` | apiextensions.k8s.io | false | CustomResourceDefinition | +| `apiservices` | | apiregistration.k8s.io | false | APIService | +| `controllerrevisions` | | apps | true | ControllerRevision | +| `daemonsets` | `ds` | apps | true | DaemonSet | +| `deployments` | `deploy` | apps | true | Deployment | +| `replicasets` | `rs` | apps | true | ReplicaSet | +| `statefulsets` | `sts` | apps | true | StatefulSet | +| `tokenreviews` | | authentication.k8s.io | false | TokenReview | +| `localsubjectaccessreviews` | | authorization.k8s.io | true | LocalSubjectAccessReview | +| `selfsubjectaccessreviews` | | authorization.k8s.io | false | SelfSubjectAccessReview | +| `selfsubjectrulesreviews` | | authorization.k8s.io | false | SelfSubjectRulesReview | +| `subjectaccessreviews` | | authorization.k8s.io | false | SubjectAccessReview | +| `horizontalpodautoscalers` | `hpa` | autoscaling | true | HorizontalPodAutoscaler | +| `cronjobs` | `cj` | batch | true | CronJob | +| `jobs` | | batch | true | Job | +| `certificatesigningrequests` | `csr` | certificates.k8s.io | false | CertificateSigningRequest | +| `leases` | | coordination.k8s.io | true | Lease | +| `events` | `ev` | events.k8s.io | true | Event | +| `ingresses` | `ing` | extensions | true | Ingress | +| `networkpolicies` | `netpol` | networking.k8s.io | true | NetworkPolicy | +| `poddisruptionbudgets` | `pdb` | policy | true | PodDisruptionBudget | +| `podsecuritypolicies` | `psp` | policy | false | PodSecurityPolicy | +| `clusterrolebindings` | | rbac.authorization.k8s.io | false | ClusterRoleBinding | +| `clusterroles` | | rbac.authorization.k8s.io | false | ClusterRole | +| `rolebindings` | | rbac.authorization.k8s.io | true | RoleBinding | +| `roles` | | rbac.authorization.k8s.io | true | Role | +| `priorityclasses` | `pc` | scheduling.k8s.io | false | PriorityClass | +| `csidrivers` | | storage.k8s.io | false | CSIDriver | +| `csinodes` | | storage.k8s.io | false | CSINode | +| `storageclasses` | `sc` | storage.k8s.io | false | StorageClass | +| `volumeattachments` | | storage.k8s.io | false | VolumeAttachment | + +## 출력 옵션 + +특정 명령의 출력을 서식화하거나 정렬하는 방법에 대한 정보는 다음 섹션을 참고한다. 다양한 출력 옵션을 지원하는 명령에 대한 자세한 내용은 [kubectl](/docs/user-guide/kubectl/) 참조 문서를 참고한다. + +### 출력 서식화 + +모든 `kubectl` 명령의 기본 출력 형식은 사람이 읽을 수 있는 일반 텍스트 형식이다. 특정 형식으로 터미널 창에 세부 정보를 출력하려면, 지원되는 `kubectl` 명령에 `-o` 또는 `--output` 플래그를 추가할 수 있다. + +#### 구문 + +```shell +kubectl [command] [TYPE] [NAME] -o +``` + +`kubectl` 명령에 따라, 다음과 같은 출력 형식이 지원된다. + +출력 형식 | 설명 +--------------| ----------- +`-o custom-columns=` | 쉼표로 구분된 [사용자 정의 열](#custom-columns) 목록을 사용하여 테이블을 출력한다. +`-o custom-columns-file=` | `` 파일에서 [사용자 정의 열](#custom-columns) 템플릿을 사용하여 테이블을 출력한다. +`-o json` | JSON 형식의 API 오브젝트를 출력한다. +`-o jsonpath=