Merge remote-tracking branch 'upstream/master' into dev-1.19
This commit is contained in:
commit
f1966fe56a
|
|
@ -33,4 +33,4 @@ resources/
|
|||
# Netlify Functions build output
|
||||
package-lock.json
|
||||
functions/
|
||||
node_modules/
|
||||
node_modules/
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
DirectoryPath: public/docs
|
||||
IgnoreDirectoryMissingTrailingSlash: true
|
||||
CheckExternal: false
|
||||
IgnoreAltMissing: true
|
||||
CheckImages: false
|
||||
CheckScripts: false
|
||||
CheckMeta: false
|
||||
CheckMetaRefresh: false
|
||||
CheckLinks: false
|
||||
EnforceHTML5: false
|
||||
EnforceHTTPS: false
|
||||
IgnoreDirectoryMissingTrailingSlash: false
|
||||
IgnoreInternalEmptyHash: true
|
||||
IgnoreEmptyHref: true
|
||||
IgnoreDirs:
|
||||
- "reference/generated/kubernetes-api"
|
||||
10
Makefile
10
Makefile
|
|
@ -45,8 +45,16 @@ docker-build:
|
|||
$(DOCKER_RUN) $(DOCKER_IMAGE) hugo
|
||||
|
||||
docker-serve:
|
||||
$(DOCKER_RUN) -p 1313:1313 $(DOCKER_IMAGE) hugo server --buildFuture --bind 0.0.0.0
|
||||
$(DOCKER_RUN) --mount type=tmpfs,destination=/src/resources,tmpfs-mode=0755 -p 1313:1313 $(DOCKER_IMAGE) hugo server --buildFuture --bind 0.0.0.0
|
||||
|
||||
test-examples:
|
||||
scripts/test_examples.sh install
|
||||
scripts/test_examples.sh run
|
||||
|
||||
.PHONY: link-checker-setup
|
||||
link-checker-image-pull:
|
||||
docker pull wjdp/htmltest
|
||||
|
||||
docker-internal-linkcheck: link-checker-image-pull
|
||||
$(DOCKER_RUN) $(DOCKER_IMAGE) hugo --config config.toml,linkcheck-config.toml --buildFuture
|
||||
$(DOCKER) run --mount type=bind,source=$(CURDIR),target=/test --rm wjdp/htmltest htmltest
|
||||
|
|
@ -137,6 +137,7 @@ aliases:
|
|||
- inductor
|
||||
- nasa9084
|
||||
sig-docs-ja-reviews: # PR reviews for Japanese content
|
||||
- bells17
|
||||
- cstoku
|
||||
- inductor
|
||||
- makocchi-git
|
||||
|
|
|
|||
|
|
@ -83,10 +83,12 @@ githubWebsiteRepo = "github.com/kubernetes/website"
|
|||
githubWebsiteRaw = "raw.githubusercontent.com/kubernetes/website"
|
||||
|
||||
# param for displaying an announcement block on every page; see PR #16210
|
||||
announcement = false
|
||||
announcement = true
|
||||
# announcement_message is only displayed when announcement = true; update with your specific message
|
||||
announcement_message = "The Kubernetes Documentation team would like your feedback! Please take a <a href='https://www.surveymonkey.com/r/8R237FN' target='_blank'>short survey</a> so we can improve the Kubernetes online documentation."
|
||||
|
||||
announcement_title = "Black lives matter."
|
||||
announcement_message_full = "We stand in solidarity with the Black community.<br/>Racism is unacceptable.<br/>It conflicts with the [core values of the Kubernetes project](https://git.k8s.io/community/values.md) and our community does not tolerate it." #appears on homepage. Use md formatting for links and <br/> for line breaks.
|
||||
announcement_message_compact = "We stand in solidarity with the Black community.<br/>Racism is unacceptable.<br/>It conflicts with the [core values of the Kubernetes project](https://git.k8s.io/community/values.md) and our community does not tolerate it." #appears on subpages
|
||||
announcement_bg = "#000000" #choose a dark color – text is white
|
||||
|
||||
[params.pushAssets]
|
||||
css = [
|
||||
|
|
|
|||
|
|
@ -3,8 +3,6 @@ title: "Production-Grade Container Orchestration"
|
|||
abstract: "Automated container deployment, scaling, and management"
|
||||
cid: home
|
||||
---
|
||||
{{< announcement >}}
|
||||
|
||||
{{< deprecationwarning >}}
|
||||
|
||||
{{< blocks/section id="oceanNodes" >}}
|
||||
|
|
@ -60,4 +58,4 @@ Kubernetes is open source giving you the freedom to take advantage of on-premise
|
|||
|
||||
{{< blocks/kubernetes-features >}}
|
||||
|
||||
{{< blocks/case-studies >}}
|
||||
{{< blocks/case-studies >}}
|
||||
|
|
@ -60,9 +60,7 @@ using `kube-apiserver --feature-gates DryRun=true`.
|
|||
If you have dynamic admission controllers, you might have to fix them to:
|
||||
|
||||
- Remove any side-effects when the dry-run parameter is specified on the webhook request,
|
||||
- Specify in the [`sideEffects`](/docs/reference/generated/kubernetes-api/v1.13/#webhook-v1beta1-admissionregistration)
|
||||
field of the `admissionregistration.k8s.io/v1beta1.Webhook` object to indicate that the object doesn't
|
||||
have side-effects on dry-run (or at all).
|
||||
- Specify in the [`sideEffects`](https://v1-13.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#webhook-v1beta1-admissionregistration) field of the `admissionregistration.k8s.io/v1beta1.Webhook` object to indicate that the object doesn't have side-effects on dry-run (or at all).
|
||||
|
||||
### How to use it
|
||||
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ Introducing CSI plugin support for out-of-tree providers, enabling Windows nodes
|
|||
|
||||
## Introducing Endpoint Slices
|
||||
|
||||
The release of Kubernetes 1.16 includes an exciting new alpha feature: the EndpointSlice API. This API provides a scalable and extensible alternative to the [Endpoints](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.16/#endpoints-v1-core) resource, which dates back to the very first versions of Kubernetes. Behind the scenes, Endpoints play a big role in network routing within Kubernetes. Each Service endpoint is tracked within these resources - kube-proxy uses them for generating proxy rules that allow pods to communicate with each other so easily in Kubernetes, and many ingress controllers use them to route HTTP traffic directly to pods.
|
||||
The release of Kubernetes 1.16 includes an exciting new alpha feature: the EndpointSlice API. This API provides a scalable and extensible alternative to the [Endpoints](https://v1-16.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.16/#endpoints-v1-core) resource, which dates back to the very first versions of Kubernetes. Behind the scenes, Endpoints play a big role in network routing within Kubernetes. Each Service endpoint is tracked within these resources - kube-proxy uses them for generating proxy rules that allow pods to communicate with each other so easily in Kubernetes, and many ingress controllers use them to route HTTP traffic directly to pods.
|
||||
|
||||
### Providing Greater Scalability
|
||||
|
||||
|
|
|
|||
|
|
@ -43,14 +43,14 @@ Upon arrival at the handler, a request is assigned to exactly one priority level
|
|||
|
||||
* FlowSchema: FlowSchema will identify a PriorityLevelConfiguration object and the way to compute the request’s “flow identifier”. Currently we support matching requests according to: the identity making the request, the verb, and the target object. The identity can match in terms of: a username, a user group name, or a ServiceAccount. And as for the target objects, we can match by apiGroup, resource[/subresource], and namespace.
|
||||
* The flow identifier is used for shuffle sharding, so it’s important that requests have the same flow identifier if they are from the same source! We like to consider scenarios with “elephants” (which send many/heavy requests) vs “mice” (which send few/light requests): it is important to make sure the elephant’s requests all get the same flow identifier, otherwise they will look like many different mice to the system!
|
||||
* See the API Documentation [here](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#flowschema-v1alpha1-flowcontrol-apiserver-k8s-io)!
|
||||
* See the API Documentation [here](https://kubernetes.io/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#flowschema-v1alpha1-flowcontrol-apiserver-k8s-io)!
|
||||
|
||||
* PriorityLevelConfiguration: Defines a priority level.
|
||||
* For apiserver self requests, and any reentrant traffic (e.g., admission webhooks which themselves make API requests), a Priority Level can be marked “exempt”, which means that no queueing or limiting of any sort is done. This is to prevent priority inversions.
|
||||
* Each non-exempt Priority Level is configured with a number of "concurrency shares" and gets an isolated pool of concurrency to use. Requests of that Priority Level run in that pool when it is not full, never anywhere else. Each apiserver is configured with a total concurrency limit (taken to be the sum of the old limits on mutating and readonly requests), and this is then divided among the Priority Levels in proportion to their concurrency shares.
|
||||
* A non-exempt Priority Level may select a number of queues and a "hand size" to use for the shuffle sharding. Shuffle sharding maps flows to queues in a way that is better than consistent hashing. A given flow has access to a small collection of queues, and for each incoming request the shortest queue is chosen. When a Priority Level has queues, it also sets a limit on queue length. There is also a limit placed on how long a request can wait in its queue; this is a fixed fraction of the apiserver's request timeout. A request that cannot be executed and cannot be queued (any longer) is rejected.
|
||||
* Alternatively, a non-exempt Priority Level may select immediate rejection instead of waiting in a queue.
|
||||
* See the [API documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#prioritylevelconfiguration-v1alpha1-flowcontrol-apiserver-k8s-io) for this feature.
|
||||
* See the [API documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#prioritylevelconfiguration-v1alpha1-flowcontrol-apiserver-k8s-io) for this feature.
|
||||
|
||||
## What’s missing? When will there be a beta?
|
||||
We’re already planning a few enhancements based on alpha and there will be more as users send feedback to our community. Here’s a list of them:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,67 @@
|
|||
---
|
||||
layout: blog
|
||||
title: "An Introduction to the K8s-Infrastructure Working Group"
|
||||
date: 2020-05-27
|
||||
slug: an-introduction-to-the-k8s-infrastructure-working-group
|
||||
---
|
||||
|
||||
**Author**: [Kiran "Rin" Oliver](https://twitter.com/kiran_oliver) Storyteller, Kubernetes Upstream Marketing Team
|
||||
|
||||
# An Introduction to the K8s-Infrastructure Working Group
|
||||
|
||||
*Welcome to part one of a new series introducing the K8s-Infrastructure working group!*
|
||||
|
||||
When Kubernetes was formed in 2014, Google undertook the task of building and maintaining the infrastructure necessary for keeping the project running smoothly. The tools itself were open source, but the Google Cloud Platform project used to run the infrastructure was internal-only, preventing contributors from being able to help out. In August 2018, Google granted the Cloud Native Computing Foundation [$9M in credits for the operation of Kubernetes](https://cloud.google.com/blog/products/gcp/google-cloud-grants-9m-in-credits-for-the-operation-of-the-kubernetes-project). The sentiment behind this was that a project such as Kubernetes should be both maintained and operated by the community itself rather than by a single vendor.
|
||||
|
||||
A group of community members enthusiastically undertook the task of collaborating on the path forward, realizing that there was a [more formal infrastructure necessary](https://github.com/kubernetes/community/issues/2715). They joined together as a cross-team working group with ownership spanning across multiple Kubernetes SIGs (Architecture, Contributor Experience, Release, and Testing). [Aaron Crickenberger](https://twitter.com/spiffxp) worked with the Kubernetes Steering Committee to enable the formation of the working group, co-drafting a charter alongside long-time collaborator [Davanum Srinivas](https://twitter.com/dims), and by 2019 the working group was official.
|
||||
|
||||
## What Issues Does the K8s-Infrastructure Working Group Tackle?
|
||||
|
||||
The team took on the complex task of managing the many moving parts of the infrastructure that sustains Kubernetes as a project.
|
||||
|
||||
The need started with necessity: the first problem they took on was a complete migration of all of the project's infrastructure from Google-owned infrastructure to the Cloud Native Computing Foundation (CNCF). This is being done so that the project is self-sustainable without the need of any direct assistance from individual vendors. This breaks down in the following ways:
|
||||
|
||||
* Identifying what infrastructure the Kubernetes project depends on.
|
||||
* What applications are running?
|
||||
* Where does it run?
|
||||
* Where is its source code?
|
||||
* What is custom built?
|
||||
* What is off-the-shelf?
|
||||
* What services depend on each other?
|
||||
* How is it administered?
|
||||
* Documenting guidelines and policies for how to run the infrastructure as a community.
|
||||
* What are our access policies?
|
||||
* How do we keep track of billing?
|
||||
* How do we ensure privacy and security?
|
||||
* Migrating infrastructure over to the CNCF as-is.
|
||||
* What is the path of least resistance to migration?
|
||||
* Improving the state of the infrastructure for sustainability.
|
||||
* Moving from humans running scripts to a more automated GitOps model (YAML all the things!)
|
||||
* Supporting community members who wish to develop new infrastructure
|
||||
* Documenting the state of our efforts, better defining goals, and completeness indicators.
|
||||
* The project and program management necessary to communicate this work to our [massive community of contributors](https://kubernetes.io/blog/2020/04/21/contributor-communication/)
|
||||
|
||||
## The challenge of K8s-Infrastructure is documentation
|
||||
|
||||
The most crucial problem the working group is trying to tackle is that the project is all volunteer-led. This leads to contributors, chairs, and others involved in the project quickly becoming overscheduled. As a result of this, certain areas such as documentation and organization often lack information, and efforts to progress are taking longer than the group would like to complete.
|
||||
|
||||
Some of the infrastructure that is being migrated over hasn't been updated in a while, and its original authors or directly responsible individuals have moved on from working on Kubernetes. While this is great from the perspective of the fact that the code was able to run untouched for a long period of time, from the perspective of trying to migrate, this makes it difficult to identify how to operate these components, and how to move these infrastructure pieces where they need to be effectively.
|
||||
|
||||
The lack of documentation is being addressed head-on by group member [Bart Smykla](https://twitter.com/bartsmykla), but there is a definite need for others to support. If you're looking for a way to [get involved](https://github.com/kubernetes/community/labels/wg%2Fk8s-infra) and learn the infrastructure, you can become a new contributor to the working group!
|
||||
|
||||
## Celebrating some Working Group wins
|
||||
|
||||
The team has made progress in the last few months that is well worth celebrating.
|
||||
|
||||
- The K8s-Infrastructure Working Group released an automated billing report that they start every meeting off by reviewing as a group.
|
||||
- DNS for k8s.io and kubernetes.io are also fully [community-owned](https://groups.google.com/g/kubernetes-dev/c/LZTYJorGh7c/m/u-ydk-yNEgAJ), with community members able to [file issues](https://github.com/kubernetes/k8s.io/issues/new?assignees=&labels=wg%2Fk8s-infra&template=dns-request.md&title=DNS+REQUEST%3A+%3Cyour-dns-record%3E) to manage records.
|
||||
- The container registry [k8s.gcr.io](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io) is also fully community-owned and available for all Kubernetes subprojects to use.
|
||||
- The Kubernetes [publishing-bot](https://github.com/kubernetes/publishing-bot) responsible for keeping k8s.io/kubernetes/staging repositories published to their own top-level repos (For example: [kubernetes/api](https://github.com/kubernetes/api)) runs on a community-owned cluster.
|
||||
- The gcsweb.k8s.io service used to provide anonymous access to GCS buckets for kubernetes artifacts runs on a community-owned cluster.
|
||||
- There is also an automated process of promoting all our container images. This includes a fully documented infrastructure, managed by the Kubernetes community, with automated processes for provisioning permissions.
|
||||
|
||||
These are just a few of the things currently happening in the K8s Infrastructure working group.
|
||||
|
||||
If you're interested in getting involved, be sure to join the [#wg-K8s-infra Slack Channel](https://app.slack.com/client/T09NY5SBT/CCK68P2Q2). Meetings are 60 minutes long, and are held every other Wednesday at 8:30 AM PT/16:30 UTC.
|
||||
|
||||
Join to help with the documentation, stay to learn about the amazing infrastructure supporting the Kubernetes community.
|
||||
|
|
@ -0,0 +1,655 @@
|
|||
---
|
||||
title: "My exciting journey into Kubernetes’ history"
|
||||
date: 2020-05-28
|
||||
slug: kubernetes-history
|
||||
url: /blog/2020/05/my-exciting-journey-into-kubernetes-history
|
||||
---
|
||||
|
||||
**Author:** Sascha Grunert, SUSE Software Solutions
|
||||
|
||||
_Editor's note: Sascha is part of [SIG Release][0] and is working on many other
|
||||
different container runtime related topics. Feel free to reach him out on
|
||||
Twitter [@saschagrunert][1]._
|
||||
|
||||
[0]: https://github.com/kubernetes/sig-release
|
||||
[1]: https://twitter.com/saschagrunert
|
||||
|
||||
---
|
||||
|
||||
> A story of data science-ing 90,000 GitHub issues and pull requests by using
|
||||
> Kubeflow, TensorFlow, Prow and a fully automated CI/CD pipeline.
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Getting the Data](#getting-the-data)
|
||||
- [Exploring the Data](#exploring-the-data)
|
||||
- [Labels, Labels, Labels](#labels-labels-labels)
|
||||
- [Building the Machine Learning Model](#building-the-machine-learning-model)
|
||||
- [Doing some first Natural Language Processing (NLP)](#doing-some-first-natural-language-processing-nlp)
|
||||
- [Creating the Multi-Layer Perceptron (MLP) Model](#creating-the-multi-layer-perceptron-mlp-model)
|
||||
- [Training the Model](#training-the-model)
|
||||
- [A first Prediction](#a-first-prediction)
|
||||
- [Automate Everything](#automate-everything)
|
||||
- [Automatic Labeling of new PRs](#automatic-labeling-of-new-prs)
|
||||
- [Summary](#summary)
|
||||
|
||||
# Introduction
|
||||
|
||||
Choosing the right steps when working in the field of data science is truly no
|
||||
silver bullet. Most data scientists might have their custom workflow, which
|
||||
could be more or less automated, depending on their area of work. Using
|
||||
[Kubernetes][10] can be a tremendous enhancement when trying to automate
|
||||
workflows on a large scale. In this blog post, I would like to take you on my
|
||||
journey of doing data science while integrating the overall workflow into
|
||||
Kubernetes.
|
||||
|
||||
The target of the research I did in the past few months was to find any
|
||||
useful information about all those thousands of GitHub issues and pull requests
|
||||
(PRs) we have in the [Kubernetes repository][11]. What I ended up with was a
|
||||
fully automated, in Kubernetes running Continuous Integration (CI) and
|
||||
Deployment (CD) data science workflow powered by [Kubeflow][12] and [Prow][13].
|
||||
You may not know both of them, but we get to the point where I explain what
|
||||
they’re doing in detail. The source code of my work can be found in the
|
||||
[kubernetes-analysis GitHub repository][14], which contains everything source
|
||||
code-related as well as the raw data. But how to retrieve this data I’m talking
|
||||
about? Well, this is where the story begins.
|
||||
|
||||
[10]: https://kubernetes.io
|
||||
[11]: https://github.com/kubernetes/kubernetes
|
||||
[12]: https://www.kubeflow.org
|
||||
[13]: https://github.com/kubernetes/test-infra/tree/master/prow
|
||||
[14]: https://github.com/kubernetes-analysis/kubernetes-analysis
|
||||
|
||||
# Getting the Data
|
||||
|
||||
The foundation for my experiments is the raw GitHub API data in plain [JSON][23]
|
||||
format. The necessary data can be retrieved via the [GitHub issues
|
||||
endpoint][20], which returns all pull requests as well as regular issues in the
|
||||
[REST][21] API. I exported roughly **91000** issues and pull requests in
|
||||
the first iteration into a massive **650 MiB** data blob. This took me about **8
|
||||
hours** of data retrieval time because for sure, the GitHub API is [rate
|
||||
limited][22]. To be able to put this data into a GitHub repository, I’d chosen
|
||||
to compress it via [`xz(1)`][24]. The result was a roundabout [25 MiB sized
|
||||
tarball][25], which fits well into the repository.
|
||||
|
||||
[20]: https://developer.github.com/v3/issues
|
||||
[21]: https://en.wikipedia.org/wiki/Representational_state_transfer
|
||||
[22]: https://developer.github.com/apps/building-github-apps/understanding-rate-limits-for-github-apps/
|
||||
[23]: https://en.wikipedia.org/wiki/JSON
|
||||
[24]: https://linux.die.net/man/1/xz
|
||||
[25]: https://github.com/kubernetes-analysis/kubernetes-analysis/blob/master/data/api.tar.xz
|
||||
|
||||
I had to find a way to regularly update the dataset because the Kubernetes
|
||||
issues and pull requests are updated by the users over time as well as new ones
|
||||
are created. To achieve the continuous update without having to wait 8 hours
|
||||
over and over again, I now fetch the delta GitHub API data between the
|
||||
[last update][31] and the current time. This way, a Continuous Integration job
|
||||
can update the data on a regular basis, whereas I can continue my research with
|
||||
the latest available set of data.
|
||||
|
||||
From a tooling perspective, I’ve written an [all-in-one Python executable][30],
|
||||
which allows us to trigger the different steps during the data science
|
||||
experiments separately via dedicated subcommands. For example, to run an export
|
||||
of the whole data set, we can call:
|
||||
|
||||
[30]: https://github.com/kubernetes-analysis/kubernetes-analysis/blob/master/main
|
||||
[31]: https://github.com/kubernetes-analysis/kubernetes-analysis/blob/master/.update
|
||||
|
||||
```
|
||||
> export GITHUB_TOKEN=<MY-SECRET-TOKEN>
|
||||
> ./main export
|
||||
INFO | Getting GITHUB_TOKEN from environment variable
|
||||
INFO | Dumping all issues
|
||||
INFO | Pulling 90929 items
|
||||
INFO | 1: Unit test coverage in Kubelet is lousy. (~30%)
|
||||
INFO | 2: Better error messages if go isn't installed, or if gcloud is old.
|
||||
INFO | 3: Need real cluster integration tests
|
||||
INFO | 4: kubelet should know which containers it is managing
|
||||
… [just wait 8 hours] …
|
||||
```
|
||||
|
||||
To update the data between the last time stamp stored in the repository we can
|
||||
run:
|
||||
|
||||
```
|
||||
> ./main export --update-api
|
||||
INFO | Getting GITHUB_TOKEN from environment variable
|
||||
INFO | Retrieving issues and PRs
|
||||
INFO | Updating API
|
||||
INFO | Got update timestamp: 2020-05-09T10:57:40.854151
|
||||
INFO | 90786: Automated cherry pick of #90749: fix: azure disk dangling attach issue
|
||||
INFO | 90674: Switch core master base images from debian to distroless
|
||||
INFO | 90086: Handling error returned by request.Request.ParseForm()
|
||||
INFO | 90544: configurable weight on the CPU and memory
|
||||
INFO | 87746: Support compiling Kubelet w/o docker/docker
|
||||
INFO | Using already extracted data from data/data.pickle
|
||||
INFO | Loading pickle dataset
|
||||
INFO | Parsed 34380 issues and 55832 pull requests (90212 items)
|
||||
INFO | Updating data
|
||||
INFO | Updating issue 90786 (updated at 2020-05-09T10:59:43Z)
|
||||
INFO | Updating issue 90674 (updated at 2020-05-09T10:58:27Z)
|
||||
INFO | Updating issue 90086 (updated at 2020-05-09T10:58:26Z)
|
||||
INFO | Updating issue 90544 (updated at 2020-05-09T10:57:51Z)
|
||||
INFO | Updating issue 87746 (updated at 2020-05-09T11:01:51Z)
|
||||
INFO | Saving data
|
||||
```
|
||||
|
||||
This gives us an idea of how fast the project is actually moving: On a Saturday
|
||||
at noon (European time), 5 issues and pull requests got updated within literally 5
|
||||
minutes!
|
||||
|
||||
Funnily enough, [Joe Beda][32], one of the founders of Kubernetes, created the
|
||||
first GitHub issue [mentioning that the unit test coverage is too low][33]. The
|
||||
issue has no further description than the title, and no enhanced labeling
|
||||
applied, like we know from more recent issues and pull requests. But now we have
|
||||
to explore the exported data more deeply to do something useful with it.
|
||||
|
||||
[32]: https://github.com/jbeda
|
||||
[33]: https://github.com/kubernetes/kubernetes/issues/1
|
||||
|
||||
# Exploring the Data
|
||||
|
||||
Before we can start creating machine learning models and train them, we have to
|
||||
get an idea about how our data is structured and what we want to achieve in
|
||||
general.
|
||||
|
||||
To get a better feeling about the amount of data, let’s look at how many issues
|
||||
and pull requests have been created over time inside the Kubernetes repository:
|
||||
|
||||
```
|
||||
> ./main analyze --created
|
||||
INFO | Using already extracted data from data/data.pickle
|
||||
INFO | Loading pickle dataset
|
||||
INFO | Parsed 34380 issues and 55832 pull requests (90212 items)
|
||||
```
|
||||
|
||||
The Python [matplotlib][40] module should pop up a graph which looks like this:
|
||||
|
||||

|
||||
|
||||
[40]: https://matplotlib.org
|
||||
|
||||
Okay, this looks not that spectacular but gives us an impression on how the
|
||||
project has grown over the past 6 years. To get a better idea about the speed of
|
||||
development of the project, we can look at the _created-vs-closed_ metric. This
|
||||
means on our timeline, we add one to the y-axis if an issue or pull request got
|
||||
created and subtracts one if closed. Now the chart looks like this:
|
||||
|
||||
```
|
||||
> ./main analyze --created-vs-closed
|
||||
```
|
||||
|
||||

|
||||
|
||||
At the beginning of 2018, the Kubernetes projects introduced some more enhanced
|
||||
life-cycle management via the glorious [fejta-bot][41]. This automatically
|
||||
closes issues and pull requests after they got stale over a longer period of
|
||||
time. This resulted in a massive closing of issues, which does not apply to pull
|
||||
requests in the same amount. For example, if we look at the _created-vs-closed_
|
||||
metric only for pull requests.
|
||||
|
||||
[41]: https://github.com/fejta-bot
|
||||
|
||||
```
|
||||
> ./main analyze --created-vs-closed --pull-requests
|
||||
```
|
||||
|
||||

|
||||
|
||||
The overall impact is not that obvious. What we can see is that the increasing
|
||||
number of peaks in the PR chart indicates that the project is moving faster over
|
||||
time. Usually, a candlestick chart would be a better choice for showing this kind
|
||||
of volatility-related information. I’d also like to highlight that it looks like
|
||||
the development of the project slowed down a bit in the beginning of 2020.
|
||||
|
||||
Parsing raw JSON in every analysis iteration is not the fastest approach to do
|
||||
in Python. This means that I decided to parse the more important information,
|
||||
for example the content, title and creation time into dedicated [issue][50] and
|
||||
[PR classes][51]. This data will be [pickle][58] serialized into the repository
|
||||
as well, which allows an overall faster startup independently of the JSON blob.
|
||||
|
||||
A pull request is more or less the same as an issue in my analysis, except that
|
||||
it contains a release note.
|
||||
|
||||
[50]: https://github.com/kubernetes-analysis/kubernetes-analysis/blob/master/src/issue.py
|
||||
[51]: https://github.com/kubernetes-analysis/kubernetes-analysis/blob/master/src/pull_request.py
|
||||
[58]: https://docs.python.org/3/library/pickle.html
|
||||
|
||||
Release notes in Kubernetes are written in the PRs description into a separate
|
||||
`release-note` block like this:
|
||||
|
||||
````
|
||||
```release-note
|
||||
I changed something extremely important and you should note that.
|
||||
```
|
||||
````
|
||||
|
||||
Those release notes are parsed by [dedicated Release Engineering Tools like
|
||||
`krel`][52] during the release creation process and will be part of the various
|
||||
[CHANGELOG.md][53] files and the [Release Notes Website][54]. That seems like a
|
||||
lot of magic, but in the end, the quality of the overall release notes is much
|
||||
higher because they’re easy to edit, and the PR reviewers can ensure that we
|
||||
only document real user-facing changes and nothing else.
|
||||
|
||||
[52]: https://github.com/kubernetes/release#tools
|
||||
[53]: https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
|
||||
[54]: https://relnotes.k8s.io
|
||||
|
||||
The quality of the input data is a key aspect when doing data science. I decided
|
||||
to focus on the release notes because they seem to have the highest amount of
|
||||
overall quality when comparing them to the plain descriptions in issues and PRs.
|
||||
Besides that, they’re easy to parse, and we would not need to strip away
|
||||
the [various issue][55] and [PR template][56] text noise.
|
||||
|
||||
[55]: https://github.com/kubernetes/kubernetes/tree/master/.github/ISSUE_TEMPLATE
|
||||
[56]: https://github.com/kubernetes/kubernetes/blob/master/.github/PULL_REQUEST_TEMPLATE.md
|
||||
|
||||
## Labels, Labels, Labels
|
||||
|
||||
Issues and pull requests in Kubernetes get different labels applied during its
|
||||
life-cycle. They are usually grouped via a single slash (`/`). For example, we
|
||||
have `kind/bug` and `kind/api-change` or `sig/node` and `sig/network`. An easy
|
||||
way to understand which label groups exist and how they’re distributed across
|
||||
the repository is to plot them into a bar chart:
|
||||
|
||||
```
|
||||
> ./main analyze --labels-by-group
|
||||
```
|
||||
|
||||

|
||||
|
||||
It looks like that `sig/`, `kind/` and `area/` labels are pretty common.
|
||||
Something like `size/` can be ignored for now because these labels are
|
||||
automatically applied based on the amount of the code changes for a pull
|
||||
request. We said that we want to focus on release notes as input data, which
|
||||
means that we have to check out the distribution of the labels for the PRs. This
|
||||
means that the top 25 labels on pull requests are:
|
||||
|
||||
```
|
||||
> ./main analyze --labels-by-name --pull-requests
|
||||
```
|
||||
|
||||

|
||||
|
||||
Again, we can ignore labels like `lgtm` (looks good to me), because every PR
|
||||
which now should get merged has to look good. Pull requests containing release
|
||||
notes automatically get the `release-note` label applied, which enables further
|
||||
filtering more easily. This does not mean that every PR containing that label
|
||||
also contains the release notes block. The label could have been applied
|
||||
manually and the parsing of the release notes block did not exist since the
|
||||
beginning of the project. This means we will probably loose a decent amount of
|
||||
input data on one hand. On the other hand we can focus on the highest possible
|
||||
data quality, because applying labels the right way needs some enhanced maturity
|
||||
of the project and its contributors.
|
||||
|
||||
From a label group perspective I have chosen to focus on the `kind/` labels.
|
||||
Those labels are something which has to be applied manually by the author of the
|
||||
PR, they are available on a good amount of pull requests and they’re related to
|
||||
user-facing changes as well. Besides that, the `kind/` choice has to be done for
|
||||
every pull request because it is part of the PR template.
|
||||
|
||||
Alright, how does the distribution of those labels look like when focusing only
|
||||
on pull requests which have release notes?
|
||||
|
||||
```
|
||||
> ./main analyze --release-notes-stats
|
||||
```
|
||||
|
||||

|
||||
|
||||
Interestingly, we have approximately 7,000 overall pull requests containing
|
||||
release notes, but only ~5,000 have a `kind/` label applied. The distribution of
|
||||
the labels is not equal, and one-third of them are labeled as `kind/bug`. This
|
||||
brings me to the next decision in my data science journey: I will build a binary
|
||||
classifier which, for the sake of simplicity, is only able to distinguish between
|
||||
bugs (via `kind/bug`) and non-bugs (where the label is not applied).
|
||||
|
||||
The main target is now to be able to classify newly incoming release notes if
|
||||
they are related to a bug or not, based on the historical data we already have
|
||||
from the community.
|
||||
|
||||
Before doing that, I recommend that you play around with the `./main analyze -h`
|
||||
subcommand as well to explore the latest set of data. You can also check out all
|
||||
the [continuously updated assets][57] I provide within the analysis repository.
|
||||
For example, those are the top 25 PR creators inside the Kubernetes repository:
|
||||
|
||||

|
||||
|
||||
[57]: https://github.com/kubernetes-analysis/kubernetes-analysis/tree/master/assets
|
||||
|
||||
# Building the Machine Learning Model
|
||||
|
||||
Now we have an idea what the data set is about, and we can start building a first
|
||||
machine learning model. Before actually building the model, we have to
|
||||
pre-process all the extracted release notes from the PRs. Otherwise, the model
|
||||
would not be able to understand our input.
|
||||
|
||||
## Doing some first Natural Language Processing (NLP)
|
||||
|
||||
In the beginning, we have to define a vocabulary for which we want to train. I
|
||||
decided to choose the [TfidfVectorizer][60] from the Python scikit-learn machine
|
||||
learning library. This vectorizer is able to take our input texts and create a
|
||||
single huge vocabulary out of it. This is our so-called [bag-of-words][61],
|
||||
which has a chosen n-gram range of `(1, 2)` (unigrams and bigrams). Practically
|
||||
this means that we always use the first word and the next one as a single
|
||||
vocabulary entry (bigrams). We also use the single word as vocabulary entry
|
||||
(unigram). The TfidfVectorizer is able to skip words that occur multiple times
|
||||
(`max_df`), and requires a minimum amount (`min_df`) to add a word to the
|
||||
vocabulary. I decided not to change those values in the first place, just
|
||||
because I had the intuition that release notes are something unique to a
|
||||
project.
|
||||
|
||||
Parameters like `min_df`, `max_df` and the n-gram range can be seen as some of
|
||||
our hyperparameters. Those parameters have to be optimized in a dedicated step
|
||||
after the machine learning model has been built. This step is called
|
||||
hyperparameter tuning and basically means that we train multiple times with
|
||||
different parameters and compare the accuracy of the model. Afterwards, we choose
|
||||
the parameters with the best accuracy.
|
||||
|
||||
[60]: https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html
|
||||
[61]: https://en.wikipedia.org/wiki/Bag-of-words_model
|
||||
|
||||
During the training, the vectorizer will produce a `data/features.json` which
|
||||
contains the whole vocabulary. This gives us a good understanding of how such a
|
||||
vocabulary may look like:
|
||||
|
||||
```json
|
||||
[
|
||||
…
|
||||
"hostname",
|
||||
"hostname address",
|
||||
"hostname and",
|
||||
"hostname as",
|
||||
"hostname being",
|
||||
"hostname bug",
|
||||
…
|
||||
]
|
||||
```
|
||||
|
||||
This produces round about 50,000 entries in the overall bag-of-words, which is
|
||||
pretty much. Previous analyses between different data sets showed that it is
|
||||
simply not necessary to take so many features into account. Some general data
|
||||
sets state that an overall vocabulary of 20,000 is enough and higher amounts do
|
||||
not influence the accuracy any more. To do so we can use the [SelectKBest][62]
|
||||
feature selector to strip down the vocabulary to only choose the top features.
|
||||
Anyway, I still decided to stick to the top 50,000 to not negatively influence
|
||||
the model accuracy. We have a relatively low amount of data (appr. 7,000
|
||||
samples) and a low number of words per sample (~15) which already made me wonder
|
||||
if we have enough data at all.
|
||||
|
||||
[62]: https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.SelectKBest.html
|
||||
|
||||
The vectorizer is not only able to create our bag-of-words, but it is also able to
|
||||
encode the features in [term frequency–inverse document frequency (tf-idf)][63]
|
||||
format. That is where the vectorizer got its name, whereas the output of that
|
||||
encoding is something the machine learning model can directly consume. All the
|
||||
details of the vectorization process can be found in the [source code][64].
|
||||
|
||||
[63]: https://en.wikipedia.org/wiki/Tf%e2%80%93idf
|
||||
[64]: https://github.com/kubernetes-analysis/kubernetes-analysis/blob/f419ff4a3462bafc0cb067aa6973dc7280409699/src/nlp.py#L193-L235
|
||||
|
||||
## Creating the Multi-Layer Perceptron (MLP) Model
|
||||
|
||||
I decided to choose a simple MLP based model which is built with the help of the
|
||||
popular [TensorFlow][70] framework. Because we do not have that much input data,
|
||||
we just use two hidden layers, so that the model basically looks like this:
|
||||
|
||||

|
||||
|
||||
[70]: https://www.tensorflow.org/api_docs/python/tf/keras
|
||||
|
||||
There have to be [multiple other][71] hyperparameters to be taken into account
|
||||
when creating the model. I will not discuss them in detail here, but they’re
|
||||
important to be optimized also in relation to the number of classes we want to
|
||||
have in the model (only two in our case).
|
||||
|
||||
[71]: https://github.com/kubernetes-analysis/kubernetes-analysis/blob/f419ff4a3462bafc0cb067aa6973dc7280409699/src/nlp.py#L95-L100
|
||||
|
||||
## Training the Model
|
||||
|
||||
Before starting the actual training, we have to split up our input data into
|
||||
training and validation data sets. I’ve chosen to use ~80% of the data for
|
||||
training and 20% for validation purposes. We have to shuffle our input data as
|
||||
well to ensure that the model is not affected by ordering issues. The technical
|
||||
details of the training process can be found in the [GitHub sources][80]. So now
|
||||
we’re ready to finally start the training:
|
||||
|
||||
```
|
||||
> ./main train
|
||||
INFO | Using already extracted data from data/data.pickle
|
||||
INFO | Loading pickle dataset
|
||||
INFO | Parsed 34380 issues and 55832 pull requests (90212 items)
|
||||
INFO | Training for label 'kind/bug'
|
||||
INFO | 6980 items selected
|
||||
INFO | Using 5584 training and 1395 testing texts
|
||||
INFO | Number of classes: 2
|
||||
INFO | Vocabulary len: 51772
|
||||
INFO | Wrote features to file data/features.json
|
||||
INFO | Using units: 1
|
||||
INFO | Using activation function: sigmoid
|
||||
INFO | Created model with 2 layers and 64 units
|
||||
INFO | Compiling model
|
||||
INFO | Starting training
|
||||
Train on 5584 samples, validate on 1395 samples
|
||||
Epoch 1/1000
|
||||
5584/5584 - 3s - loss: 0.6895 - acc: 0.6789 - val_loss: 0.6856 - val_acc: 0.6860
|
||||
Epoch 2/1000
|
||||
5584/5584 - 2s - loss: 0.6822 - acc: 0.6827 - val_loss: 0.6782 - val_acc: 0.6860
|
||||
Epoch 3/1000
|
||||
…
|
||||
Epoch 68/1000
|
||||
5584/5584 - 2s - loss: 0.2587 - acc: 0.9257 - val_loss: 0.4847 - val_acc: 0.7728
|
||||
INFO | Confusion matrix:
|
||||
[[920 32]
|
||||
[291 152]]
|
||||
INFO | Confusion matrix normalized:
|
||||
[[0.966 0.034]
|
||||
[0.657 0.343]]
|
||||
INFO | Saving model to file data/model.h5
|
||||
INFO | Validation accuracy: 0.7727598547935486, loss: 0.48470408514836355
|
||||
```
|
||||
|
||||
The output of the [Confusion Matrix][81] shows us that we’re pretty good on
|
||||
training accuracy, but the validation accuracy could be a bit higher. We now
|
||||
could start a hyperparameter tuning to see if we can optimize the output of the
|
||||
model even further. I will leave that experiment up to you with the hint to the
|
||||
`./main train --tune` flag.
|
||||
|
||||
We saved the model (`data/model.h5`), the vectorizer (`data/vectorizer.pickle`)
|
||||
and the feature selector (`data/selector.pickle`) to disk to be able to use them
|
||||
later on for prediction purposes without having a need for additional training
|
||||
steps.
|
||||
|
||||
[80]: https://github.com/kubernetes-analysis/kubernetes-analysis/blob/f419ff4a3462bafc0cb067aa6973dc7280409699/src/nlp.py#L91-L170
|
||||
[81]: https://en.wikipedia.org/wiki/Confusion_matrix
|
||||
|
||||
## A first Prediction
|
||||
|
||||
We are now able to test the model by loading it from disk and predicting some
|
||||
input text:
|
||||
|
||||
```
|
||||
> ./main predict --test
|
||||
INFO | Testing positive text:
|
||||
|
||||
Fix concurrent map access panic
|
||||
Don't watch .mount cgroups to reduce number of inotify watches
|
||||
Fix NVML initialization race condition
|
||||
Fix brtfs disk metrics when using a subdirectory of a subvolume
|
||||
|
||||
INFO | Got prediction result: 0.9940581321716309
|
||||
INFO | Matched expected positive prediction result
|
||||
INFO | Testing negative text:
|
||||
|
||||
action required
|
||||
1. Currently, if users were to explicitly specify CacheSize of 0 for
|
||||
KMS provider, they would end-up with a provider that caches up to
|
||||
1000 keys. This PR changes this behavior.
|
||||
Post this PR, when users supply 0 for CacheSize this will result in
|
||||
a validation error.
|
||||
2. CacheSize type was changed from int32 to *int32. This allows
|
||||
defaulting logic to differentiate between cases where users
|
||||
explicitly supplied 0 vs. not supplied any value.
|
||||
3. KMS Provider's endpoint (path to Unix socket) is now validated when
|
||||
the EncryptionConfiguration files is loaded. This used to be handled
|
||||
by the GRPCService.
|
||||
|
||||
INFO | Got prediction result: 0.1251964420080185
|
||||
INFO | Matched expected negative prediction result
|
||||
```
|
||||
|
||||
Both tests are real-world examples which already exist. We could also try
|
||||
something completely different, like this random tweet I found a couple of
|
||||
minutes ago:
|
||||
|
||||
```
|
||||
./main predict "My dudes, if you can understand SYN-ACK, you can understand consent"
|
||||
INFO | Got prediction result: 0.1251964420080185
|
||||
ERROR | Result is lower than selected threshold 0.6
|
||||
```
|
||||
|
||||
Looks like it is not classified as bug for a release note, which seems to work.
|
||||
Selecting a good threshold is also not that easy, but sticking to something >
|
||||
50% should be the bare minimum.
|
||||
|
||||
# Automate Everything
|
||||
|
||||
The next step is to find some way of automation to continuously update the model
|
||||
with new data. If I change any source code within my repository, then I’d like
|
||||
to get feedback about the test results of the model without having a need to run
|
||||
the training on my own machine. I would like to utilize the GPUs in my
|
||||
Kubernetes cluster to train faster and automatically update the data set if a PR
|
||||
got merged.
|
||||
|
||||
With the help of [Kubeflow pipelines][90] we can fulfill most of these
|
||||
requirements. The pipeline I built looks like this:
|
||||
|
||||
[90]: https://www.kubeflow.org/docs/pipelines/overview/pipelines-overview
|
||||
|
||||

|
||||
|
||||
First, we check out the source code of the PR, which will be passed on as output
|
||||
artifact to all other steps. Then we incrementally update the API and internal
|
||||
data before we run the training on an always up-to-date data set. The prediction
|
||||
test verifies after the training that we did not badly influence the model with
|
||||
our changes.
|
||||
|
||||
We also built a container image within our pipeline. [This container image][91]
|
||||
copies the previously built model, vectorizer, and selector into a container and
|
||||
runs `./main serve`. When doing this, we spin up a [kfserving][92] web server,
|
||||
which can be used for prediction purposes. Do you want to try it out by yourself? Simply
|
||||
do a JSON POST request like this and run the prediction against the endpoint:
|
||||
|
||||
```
|
||||
> curl https://kfserving.k8s.saschagrunert.de/v1/models/kubernetes-analysis:predict \
|
||||
-d '{"text": "my test text"}'
|
||||
{"result": 0.1251964420080185}
|
||||
```
|
||||
|
||||
The [custom kfserving][93] implementation is pretty straightforward, whereas the
|
||||
deployment utilizes [Knative Serving][95] and an [Istio][94] ingress gateway
|
||||
under the hood to correctly route the traffic into the cluster and provide the
|
||||
right set of services.
|
||||
|
||||
[91]: https://github.com/kubernetes-analysis/kubernetes-analysis/blob/master/Dockerfile-deploy
|
||||
[92]: https://www.kubeflow.org/docs/components/serving/kfserving
|
||||
[93]: https://github.com/kubernetes-analysis/kubernetes-analysis/blob/master/src/kfserver.py
|
||||
[94]: https://istio.io
|
||||
[95]: https://knative.dev/docs/serving
|
||||
|
||||
The `commit-changes` and `rollout` step will only run if the pipeline runs on
|
||||
the `master` branch. Those steps make sure that we always have the latest data
|
||||
set available on the master branch as well as in the kfserving deployment. The
|
||||
[rollout step][96] creates a new canary deployment, which only accepts 50% of the
|
||||
incoming traffic in the first place. After the canary got deployed successfully,
|
||||
it will be promoted as the new main instance of the service. This is a great way
|
||||
to ensure that the deployment works as intended and allows additional testing
|
||||
after rolling out the canary.
|
||||
|
||||
[96]: https://github.com/kubernetes-analysis/kubernetes-analysis/blob/f419ff4a3462bafc0cb067aa6973dc7280409699/src/rollout.py#L30-L51
|
||||
|
||||
But how to trigger Kubeflow pipelines when creating a pull request? Kubeflow has
|
||||
no feature for that right now. That’s why I decided to use [Prow][100],
|
||||
Kubernetes test-infrastructure project for CI/CD purposes.
|
||||
|
||||
First of all, a [24h periodic job][101] ensures that we have at least daily
|
||||
up-to-date data available within the repository. Then, if we create a pull
|
||||
request, Prow will run the whole Kubeflow pipeline without committing or rolling
|
||||
out any changes. If we merge the pull request via Prow, another job runs on the
|
||||
master and updates the data as well as the deployment. That’s pretty neat, isn’t
|
||||
it?
|
||||
|
||||
[100]: https://github.com/kubernetes/test-infra/tree/master/prow
|
||||
[101]: https://github.com/kubernetes-analysis/kubernetes-analysis/blob/f419ff4a3462bafc0cb067aa6973dc7280409699/ci/config.yaml#L45-L61
|
||||
|
||||
# Automatic Labeling of new PRs
|
||||
|
||||
The prediction API is nice for testing, but now we need a real-world use case.
|
||||
Prow supports external plugins which can be used to take action on any GitHub
|
||||
event. I wrote [a plugin][110] which uses the kfserving API to make predictions
|
||||
based on new pull requests. This means if we now create a new pull request in
|
||||
the kubernetes-analysis repository, we will see the following:
|
||||
|
||||
[110]: https://github.com/kubernetes-analysis/kubernetes-analysis/tree/master/pkg
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||

|
||||
|
||||
Okay cool, so now let’s change the release note based on a real bug from the
|
||||
already existing dataset:
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||

|
||||
|
||||
The bot edits its own comment, predicts it with round about 90% as `kind/bug`
|
||||
and automatically adds the correct label! Now, if we change it back to some
|
||||
different - obviously wrong - release note:
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||

|
||||
|
||||
The bot does the work for us, removes the label and informs us what it did!
|
||||
Finally, if we change the release note to `None`:
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||

|
||||
|
||||
The bot removed the comment, which is nice and reduces the text noise on the PR.
|
||||
Everything I demonstrated is running inside a single Kubernetes cluster, which
|
||||
would make it unnecessary at all to expose the kfserving API to the public. This
|
||||
introduces an indirect API rate limiting because the only usage would be
|
||||
possible via the Prow bot user.
|
||||
|
||||
If you want to try it out for yourself, feel free to open a [new test
|
||||
issue][111] in `kubernetes-analysis`. This works because I enabled the plugin
|
||||
also for issues rather than only for pull requests.
|
||||
|
||||
[111]: https://github.com/kubernetes-analysis/kubernetes-analysis/issues/new?&template=release-notes-test.md
|
||||
|
||||
So then, we have a running CI bot which is able to classify new release notes
|
||||
based on a machine learning model. If the bot would run in the official
|
||||
Kubernetes repository, then we could correct wrong label predictions manually.
|
||||
This way, the next training iteration would pick up the correction and result in
|
||||
a continuously improved model over time. All totally automated!
|
||||
|
||||
# Summary
|
||||
|
||||
Thank you for reading down to here! This was my little data science journey
|
||||
through the Kubernetes GitHub repository. There are a lot of other things to
|
||||
optimize, for example introducing more classes (than just `kind/bug` or nothing)
|
||||
or automatic hyperparameter tuning with Kubeflows [Katib][120]. If you have any
|
||||
questions or suggestions, then feel free to get in touch with me anytime. See you
|
||||
soon!
|
||||
|
||||
[120]: https://www.kubeflow.org/docs/components/hyperparameter-tuning/hyperparameter
|
||||
|
|
@ -0,0 +1,201 @@
|
|||
---
|
||||
layout: blog
|
||||
title: "K8s KPIs with Kuberhealthy"
|
||||
date: 2020-05-29
|
||||
---
|
||||
|
||||
**Authors:** Joshulyne Park (Comcast), Eric Greer (Comcast)
|
||||
|
||||
### Building Onward from Kuberhealthy v2.0.0
|
||||
|
||||
Last November at KubeCon San Diego 2019, we announced the release of
|
||||
[Kuberhealthy 2.0.0](https://www.youtube.com/watch?v=aAJlWhBtzqY) - transforming Kuberhealthy into a Kubernetes operator
|
||||
for synthetic monitoring. This new ability granted developers the means to create their own Kuberhealthy check
|
||||
containers to synthetically monitor their applications and clusters. The community was quick to adopt this new feature and we're grateful for everyone who implemented and tested Kuberhealthy 2.0.0 in their clusters. Thanks to all of you who reported
|
||||
issues and contributed to discussions on the #kuberhealthy Slack channel. We quickly set to work to address all your feedback
|
||||
with a newer version of Kuberhealthy. Additionally, we created a guide on how to easily install and use Kuberhealthy in order to capture some helpful synthetic [KPIs](https://kpi.org/KPI-Basics).
|
||||
|
||||
### Deploying Kuberhealthy
|
||||
|
||||
To install Kuberhealthy, make sure you have [Helm 3](https://helm.sh/docs/intro/install/) installed. If not, you can use the generated flat spec files located
|
||||
in this [deploy folder](https://github.com/Comcast/kuberhealthy/tree/master/deploy). You should use [kuberhealthy-prometheus.yaml](https://github.com/Comcast/kuberhealthy/blob/master/deploy/kuberhealthy-prometheus.yaml) if you don't use the [Prometheus Operator](https://github.com/coreos/prometheus-operator), and [kuberhealthy-prometheus-operator.yaml](https://github.com/Comcast/kuberhealthy/blob/master/deploy/kuberhealthy-prometheus-operator.yaml) if you do. If you don't use Prometheus at all, you can still use Kuberhealthy with a JSON status page and/or InfluxDB integration using [this spec](https://github.com/Comcast/kuberhealthy/blob/master/deploy/kuberhealthy.yaml).
|
||||
|
||||
#### To install using Helm 3:
|
||||
##### 1. Create namespace "kuberhealthy" in the desired Kubernetes cluster/context:
|
||||
```
|
||||
kubectl create namespace kuberhealthy
|
||||
```
|
||||
##### 2. Set your current namespace to "kuberhealthy":
|
||||
```
|
||||
kubectl config set-context --current --namespace=kuberhealthy
|
||||
```
|
||||
##### 3. Add the kuberhealthy repo to Helm:
|
||||
```
|
||||
helm repo add kuberhealthy https://comcast.github.io/kuberhealthy/helm-repos
|
||||
```
|
||||
##### 4. Depending on your Prometheus implementation, install Kuberhealthy using the appropriate command for your cluster:
|
||||
|
||||
- If you use the [Prometheus Operator](https://github.com/coreos/prometheus-operator):
|
||||
```
|
||||
helm install kuberhealthy kuberhealthy/kuberhealthy --set prometheus.enabled=true,prometheus.enableAlerting=true,prometheus.enableScraping=true,prometheus.serviceMonitor=true
|
||||
```
|
||||
|
||||
- If you use Prometheus, but NOT Prometheus Operator:
|
||||
```
|
||||
helm install kuberhealthy kuberhealthy/kuberhealthy --set prometheus.enabled=true,prometheus.enableAlerting=true,prometheus.enableScraping=true
|
||||
```
|
||||
See additional details about configuring the appropriate scrape annotations in the section [Prometheus Integration Details](#prometheus-integration-details) below.
|
||||
|
||||
- Finally, if you don't use Prometheus:
|
||||
```
|
||||
helm install kuberhealthy kuberhealthy/kuberhealthy
|
||||
```
|
||||
|
||||
Running the Helm command should automatically install the newest version of Kuberhealthy (v2.2.0) along with a few basic checks. If you run `kubectl get pods`, you should see two Kuberhealthy pods. These are the pods that create, coordinate, and track test pods. These two Kuberhealthy pods also serve a JSON status page as well as a `/metrics` endpoint. Every other pod you see created is a checker pod designed to execute and shut down when done.
|
||||
|
||||
### Configuring Additional Checks
|
||||
|
||||
Next, you can run `kubectl get khchecks`. You should see three Kuberhealthy checks installed by default:
|
||||
- [daemonset](https://github.com/Comcast/kuberhealthy/tree/master/cmd/daemonset-check): Deploys and tears down a daemonset to ensure all nodes in the cluster are functional.
|
||||
- [deployment](https://github.com/Comcast/kuberhealthy/tree/master/cmd/deployment-check): Creates a deployment and then triggers a rolling update. Tests that the deployment is reachable via a service and then deletes everything. Any problem in this process will cause this check to report a failure.
|
||||
- [dns-status-internal](https://github.com/Comcast/kuberhealthy/tree/master/cmd/dns-resolution-check): Validates that internal cluster DNS is functioning as expected.
|
||||
|
||||
To view other available external checks, check out the [external checks registry](https://github.com/Comcast/kuberhealthy/blob/master/docs/EXTERNAL_CHECKS_REGISTRY.md) where you can find other yaml files you can apply to your cluster to enable various checks.
|
||||
|
||||
Kuberhealthy check pods should start running shortly after Kuberhealthy starts running (1-2 minutes). Additionally, the check-reaper cronjob runs every few minutes to ensure there are no more than 5 completed checker pods left lying around at a time.
|
||||
|
||||
To get status page view of these checks, you'll need to either expose the `kuberhealthy` service externally by editing the service `kuberhealthy` and setting `Type: LoadBalancer` or use `kubectl port-forward service/kuberhealthy 8080:80`. When viewed, the service endpoint will display a JSON status page that looks like this:
|
||||
|
||||
```json
|
||||
{
|
||||
"OK": true,
|
||||
"Errors": [],
|
||||
"CheckDetails": {
|
||||
"kuberhealthy/daemonset": {
|
||||
"OK": true,
|
||||
"Errors": [],
|
||||
"RunDuration": "22.512278967s",
|
||||
"Namespace": "kuberhealthy",
|
||||
"LastRun": "2020-04-06T23:20:31.7176964Z",
|
||||
"AuthoritativePod": "kuberhealthy-67bf8c4686-mbl2j",
|
||||
"uuid": "9abd3ec0-b82f-44f0-b8a7-fa6709f759cd"
|
||||
},
|
||||
"kuberhealthy/deployment": {
|
||||
"OK": true,
|
||||
"Errors": [],
|
||||
"RunDuration": "29.142295647s",
|
||||
"Namespace": "kuberhealthy",
|
||||
"LastRun": "2020-04-06T23:20:31.7176964Z",
|
||||
"AuthoritativePod": "kuberhealthy-67bf8c4686-mbl2j",
|
||||
"uuid": "5f0d2765-60c9-47e8-b2c9-8bc6e61727b2"
|
||||
},
|
||||
"kuberhealthy/dns-status-internal": {
|
||||
"OK": true,
|
||||
"Errors": [],
|
||||
"RunDuration": "2.43940936s",
|
||||
"Namespace": "kuberhealthy",
|
||||
"LastRun": "2020-04-06T23:20:44.6294547Z",
|
||||
"AuthoritativePod": "kuberhealthy-67bf8c4686-mbl2j",
|
||||
"uuid": "c85f95cb-87e2-4ff5-b513-e02b3d25973a"
|
||||
}
|
||||
},
|
||||
"CurrentMaster": "kuberhealthy-7cf79bdc86-m78qr"
|
||||
}
|
||||
```
|
||||
|
||||
This JSON page displays all Kuberhealthy checks running in your cluster. If you have Kuberhealthy checks running in different namespaces, you can filter them by adding the `GET` variable `namespace` parameter: `?namespace=kuberhealthy,kube-system` onto the status page URL.
|
||||
|
||||
|
||||
### Writing Your Own Checks
|
||||
|
||||
Kuberhealthy is designed to be extended with custom check containers that can be written by anyone to check anything. These checks can be written in any language as long as they are packaged in a container. This makes Kuberhealthy an excellent platform for creating your own synthetic checks!
|
||||
|
||||
Creating your own check is a great way to validate your client library, simulate real user workflow, and create a high level of confidence in your service or system uptime.
|
||||
|
||||
To learn more about writing your own checks, along with simple examples, check the [custom check creation](https://github.com/Comcast/kuberhealthy/blob/master/docs/EXTERNAL_CHECK_CREATION.md) documentation.
|
||||
|
||||
|
||||
### Prometheus Integration Details
|
||||
|
||||
When enabling Prometheus (not the operator), the Kuberhealthy service gets the following annotations added:
|
||||
```.env
|
||||
prometheus.io/path: /metrics
|
||||
prometheus.io/port: "80"
|
||||
prometheus.io/scrape: "true"
|
||||
```
|
||||
|
||||
In your prometheus configuration, add the following example scrape_config that scrapes the Kuberhealthy service given the added prometheus annotation:
|
||||
|
||||
```yaml
|
||||
- job_name: 'kuberhealthy'
|
||||
scrape_interval: 1m
|
||||
honor_labels: true
|
||||
metrics_path: /metrics
|
||||
kubernetes_sd_configs:
|
||||
- role: service
|
||||
namespaces:
|
||||
names:
|
||||
- kuberhealthy
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
|
||||
action: keep
|
||||
regex: true
|
||||
```
|
||||
|
||||
You can also specify the target endpoint to be scraped using this example job:
|
||||
```yaml
|
||||
- job_name: kuberhealthy
|
||||
scrape_interval: 1m
|
||||
honor_labels: true
|
||||
metrics_path: /metrics
|
||||
static_configs:
|
||||
- targets:
|
||||
- kuberhealthy.kuberhealthy.svc.cluster.local:80
|
||||
```
|
||||
|
||||
Once the appropriate prometheus configurations are applied, you should be able to see the following Kuberhealthy metrics:
|
||||
- `kuberhealthy_check`
|
||||
- `kuberhealthy_check_duration_seconds`
|
||||
- `kuberhealthy_cluster_states`
|
||||
- `kuberhealthy_running`
|
||||
|
||||
### Creating Key Performance Indicators
|
||||
|
||||
Using these Kuberhealthy metrics, our team has been able to collect KPIs based on the following definitions, calculations, and PromQL queries.
|
||||
|
||||
*Availability*
|
||||
|
||||
We define availability as the K8s cluster control plane being up and functioning as expected. This is measured by our ability to create a deployment, do a rolling update, and delete the deployment within a set period of time.
|
||||
|
||||
We calculate this by measuring Kuberhealthy's [deployment check](https://github.com/Comcast/kuberhealthy/tree/master/cmd/deployment-check) successes and failures.
|
||||
- Availability = Uptime / (Uptime * Downtime)
|
||||
- Uptime = Number of Deployment Check Passes * Check Run Interval
|
||||
- Downtime = Number of Deployment Check Fails * Check Run Interval
|
||||
- Check Run Interval = how often the check runs (`runInterval` set in your KuberhealthyCheck Spec)
|
||||
|
||||
- PromQL Query (Availability % over the past 30 days):
|
||||
```promql
|
||||
1 - (sum(count_over_time(kuberhealthy_check{check="kuberhealthy/deployment", status="0"}[30d])) OR vector(0))/(sum(count_over_time(kuberhealthy_check{check="kuberhealthy/deployment", status="1"}[30d])) * 100)
|
||||
```
|
||||
|
||||
*Utilization*
|
||||
|
||||
We define utilization as user uptake of product (k8s) and its resources (pods, services, etc.). This is measured by how many nodes, deployments, statefulsets, persistent volumes, services, pods, and jobs are being utilized by our customers.
|
||||
We calculate this by counting the total number of nodes, deployments, statefulsets, persistent volumes, services, pods, and jobs.
|
||||
|
||||
*Duration (Latency)*
|
||||
|
||||
We define duration as the control plane's capacity and utilization of throughput. We calculate this by capturing the average run duration of a Kuberhealthy [deployment check](https://github.com/Comcast/kuberhealthy/tree/master/cmd/deployment-check) run.
|
||||
|
||||
- PromQL Query (Deployment check average run duration):
|
||||
```promql
|
||||
avg(kuberhealthy_check_duration_seconds{check="kuberhealthy/deployment"})
|
||||
```
|
||||
|
||||
*Errors / Alerts*
|
||||
|
||||
We define errors as all k8s cluster and Kuberhealthy related alerts. Every time one of our Kuberhealthy check fails, we are alerted of this failure.
|
||||
|
||||
### Thank You!
|
||||
|
||||
Thanks again to everyone in the community for all of your contributions and help! We are excited to see what you build. As always, if you find an issue, have a feature request, or need to open a pull request, please [open an issue](https://github.com/Comcast/kuberhealthy/issues) on the Github project.
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
---
|
||||
layout: blog
|
||||
title: Supporting the Evolving Ingress Specification in Kubernetes 1.18
|
||||
date: 2020-06-05
|
||||
slug: Supporting-the-Evolving-Ingress-Specification-in-Kubernetes-1.18
|
||||
---
|
||||
|
||||
**Authors:** Alex Gervais (Datawire.io)
|
||||
|
||||
Earlier this year, the Kubernetes team released [Kubernetes 1.18](https://kubernetes.io/blog/2020/03/25/kubernetes-1-18-release-announcement/), which extended Ingress. In this blog post, we’ll walk through what’s new in the new Ingress specification, what it means for your applications, and how to upgrade to an ingress controller that supports this new specification.
|
||||
|
||||
### What is Kubernetes Ingress
|
||||
When deploying your applications in Kubernetes, one of the first challenges many people encounter is how to get traffic into their cluster. [Kubernetes ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) is a collection of routing rules that govern how external users access services running in a Kubernetes cluster. There are [three general approaches](https://blog.getambassador.io/kubernetes-ingress-nodeport-load-balancers-and-ingress-controllers-6e29f1c44f2d) for exposing your application:
|
||||
|
||||
* Using a `NodePort` to expose your application on a port across each of your nodes
|
||||
* Using a `LoadBalancer` service to create an external load balancer that points to a Kubernetes service in your cluster
|
||||
* Using a Kubernetes Ingress resource
|
||||
|
||||
### What’s new in Kubernetes 1.18 Ingress
|
||||
There are three significant additions to the Ingress API in Kubernetes 1.18:
|
||||
|
||||
* A new `pathType` field
|
||||
* A new `IngressClass` resource
|
||||
* Support for wildcards in hostnames
|
||||
|
||||
The new `pathType` field allows you to specify how Ingress paths should match.
|
||||
The field supports three types: `ImplementationSpecific` (default), `exact`, and `prefix`. Explicitly defining the expected behavior of path matching will allow every ingress-controller to support a user’s needs and will increase portability between ingress-controller implementation solutions.
|
||||
|
||||
The `IngressClass` resource specifies how Ingresses should be implemented by controllers. This was added to formalize the commonly used but never standardized `kubernetes.io/ingress.class` annotation and allow for implementation-specific extensions and configuration.
|
||||
|
||||
You can read more about these changes, as well as the support for wildcards in hostnames in more detail in [a previous blog post](https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/).
|
||||
|
||||
## Supporting Kubernetes ingress
|
||||
[Ambassador](https://www.getambassador.io) is an open-source Envoy-based ingress controller. We believe strongly in supporting common standards such as Kubernetes ingress, which we adopted and [announced our initial support for back in 2019](https://blog.getambassador.io/ambassador-ingress-controller-better-config-reporting-updated-envoy-proxy-99dc9139e28f).
|
||||
|
||||
Every Ambassador release goes through rigorous testing. Therefore, we also contributed an [open conformance test suite](https://github.com/kubernetes-sigs/ingress-controller-conformance), supporting Kubernetes ingress. We wrote the initial bits of test code and will keep iterating over the newly added features and different versions of the Ingress specification as it evolves to a stable v1 GA release. Documentation and usage samples, is one of our top priorities. We understand how complex usage can be, especially when transitioning from a previous version of an API.
|
||||
|
||||
Following a test-driven development approach, the first step we took in supporting Ingress improvements in Ambassador was to translate the revised specification -- both in terms of API and behavior -- into a comprehensible test suite. The test suite, although still under heavy development and going through multiple iterations, was rapidly added to the Ambassador CI infrastructure and acceptance criteria. This means every change to the Ambassador codebase going forward will be compliant with the Ingress API and be tested end-to-end in a lightweight [KIND cluster](https://kind.sigs.k8s.io/). Using KIND allowed us to make rapid improvements while limiting our cloud provider infrastructure bill and testing out unreleased Kubernetes features with pre-release builds.
|
||||
|
||||
### Adopting a new specification
|
||||
With a global comprehension of additions to Ingress introduced in Kubernetes 1.18 and a test suite on hand, we tackled the task of adapting the Ambassador code so that it would support translating the high-level Ingress API resources into Envoy configurations and constructs. Luckily Ambassador already supported previous versions of ingress functionalities so the development effort was incremental.
|
||||
|
||||
We settled on a controller name of `getambassador.io/ingress-controller`. This value, consistent with Ambassador's domain and CRD versions, must be used to tie in an IngressClass `spec.controller` with an Ambassador deployment. The new IngressClass resource allows for extensibility by setting a `spec.parameters` field. At the moment Ambassador makes no use of this field and its usage is reserved for future development.
|
||||
|
||||
Paths can now define different matching behaviors using the `pathType` field. The field will default to a value of `ImplementationSpecific`, which uses the same matching rules as the [Ambassador Mappings](https://www.getambassador.io/docs/latest/topics/using/mappings/) prefix field and previous Ingress specification for backward compatibility reasons.
|
||||
|
||||
### Kubernetes Ingress Controllers
|
||||
A comprehensive [list of Kubernetes ingress controllers](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) is available in the Kubernetes documentation. Currently, Ambassador is the only ingress controller that supports these new additions to the ingress specification. Powered by the [Envoy Proxy](https://www.envoyproxy.io), Ambassador is the fastest way for you to try out the new ingress specification today.
|
||||
|
||||
Check out the following resources:
|
||||
|
||||
* Ambassador on [GitHub](https://www.github.com/datawire/ambassador)
|
||||
* The Ambassador [documentation](https://www.getambassador.io/docs)
|
||||
* [Improvements to the Ingress API](https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/)
|
||||
|
||||
Or join the community on [Slack](http://d6e.co/slack)!
|
||||
|
|
@ -212,4 +212,4 @@ The cloud controller manager uses Go interfaces to allow implementations from an
|
|||
The implementation of the shared controllers highlighted in this document (Node, Route, and Service), and some scaffolding along with the shared cloudprovider interface, is part of the Kubernetes core. Implementations specific to cloud providers are outside the core of Kubernetes and implement the `CloudProvider` interface.
|
||||
|
||||
For more information about developing plugins, see [Developing Cloud Controller Manager](/docs/tasks/administer-cluster/developing-cloud-controller-manager/).
|
||||
{{% /capture %}}
|
||||
{{% /capture %}}
|
||||
|
|
@ -1,7 +1,6 @@
|
|||
---
|
||||
reviewers:
|
||||
- dchen1107
|
||||
- roberthbailey
|
||||
- liggitt
|
||||
title: Control Plane-Node Communication
|
||||
content_template: templates/concept
|
||||
|
|
|
|||
|
|
@ -400,9 +400,9 @@ By using the IBM Cloud Kubernetes Service provider, you can create clusters with
|
|||
The name of the Kubernetes Node object is the private IP address of the IBM Cloud Kubernetes Service worker node instance.
|
||||
|
||||
### Networking
|
||||
The IBM Cloud Kubernetes Service provider provides VLANs for quality network performance and network isolation for nodes. You can set up custom firewalls and Calico network policies to add an extra layer of security for your cluster, or connect your cluster to your on-prem data center via VPN. For more information, see [Planning in-cluster and private networking](https://cloud.ibm.com/docs/containers?topic=containers-cs_network_cluster#cs_network_cluster).
|
||||
The IBM Cloud Kubernetes Service provider provides VLANs for quality network performance and network isolation for nodes. You can set up custom firewalls and Calico network policies to add an extra layer of security for your cluster, or connect your cluster to your on-prem data center via VPN. For more information, see [Planning your cluster network setup](https://cloud.ibm.com/docs/containers?topic=containers-plan_clusters).
|
||||
|
||||
To expose apps to the public or within the cluster, you can leverage NodePort, LoadBalancer, or Ingress services. You can also customize the Ingress application load balancer with annotations. For more information, see [Planning to expose your apps with external networking](https://cloud.ibm.com/docs/containers?topic=containers-cs_network_planning#cs_network_planning).
|
||||
To expose apps to the public or within the cluster, you can leverage NodePort, LoadBalancer, or Ingress services. You can also customize the Ingress application load balancer with annotations. For more information, see [Choosing an app exposure service](https://cloud.ibm.com/docs/containers?topic=containers-cs_network_planning#cs_network_planning).
|
||||
|
||||
### Storage
|
||||
The IBM Cloud Kubernetes Service provider leverages Kubernetes-native persistent volumes to enable users to mount file, block, and cloud object storage to their apps. You can also use database-as-a-service and third-party add-ons for persistent storage of your data. For more information, see [Planning highly available persistent storage](https://cloud.ibm.com/docs/containers?topic=containers-storage_planning#storage_planning).
|
||||
|
|
|
|||
|
|
@ -136,7 +136,7 @@ classes:
|
|||
controllers.
|
||||
|
||||
* The `workload-low` priority level is for requests from any other service
|
||||
account, which will typically include all requests from controllers runing in
|
||||
account, which will typically include all requests from controllers running in
|
||||
Pods.
|
||||
|
||||
* The `global-default` priority level handles all other traffic, e.g.
|
||||
|
|
@ -375,4 +375,4 @@ the [enhancement proposal](https://github.com/kubernetes/enhancements/blob/maste
|
|||
You can make suggestions and feature requests via [SIG API
|
||||
Machinery](https://github.com/kubernetes/community/tree/master/sig-api-machinery).
|
||||
|
||||
{{% /capture %}}
|
||||
{{% /capture %}}
|
||||
|
|
|
|||
|
|
@ -402,7 +402,7 @@ For more information, please see [kubectl edit](/docs/reference/generated/kubect
|
|||
|
||||
You can use `kubectl patch` to update API objects in place. This command supports JSON patch,
|
||||
JSON merge patch, and strategic merge patch. See
|
||||
[Update API Objects in Place Using kubectl patch](/docs/tasks/run-application/update-api-object-kubectl-patch/)
|
||||
[Update API Objects in Place Using kubectl patch](/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/)
|
||||
and
|
||||
[kubectl patch](/docs/reference/generated/kubectl/kubectl-commands/#patch).
|
||||
|
||||
|
|
|
|||
|
|
@ -157,6 +157,91 @@ or {{< glossary_tooltip text="operators" term_id="operator-pattern" >}} that
|
|||
adjust their behavior based on a ConfigMap.
|
||||
{{< /note >}}
|
||||
|
||||
## Using ConfigMaps
|
||||
|
||||
ConfigMaps can be mounted as data volumes. ConfigMaps can also be used by other
|
||||
parts of the system, without being directly exposed to the Pod. For example,
|
||||
ConfigMaps can hold data that other parts of the system should use for configuration.
|
||||
|
||||
### Using ConfigMaps as files from a Pod
|
||||
|
||||
To consume a ConfigMap in a volume in a Pod:
|
||||
|
||||
1. Create a config map or use an existing one. Multiple Pods can reference the same config map.
|
||||
1. Modify your Pod definition to add a volume under `.spec.volumes[]`. Name the volume anything, and have a `.spec.volumes[].configmap.localObjectReference` field set to reference your ConfigMap object.
|
||||
1. Add a `.spec.containers[].volumeMounts[]` to each container that needs the config map. Specify `.spec.containers[].volumeMounts[].readOnly = true` and `.spec.containers[].volumeMounts[].mountPath` to an unused directory name where you would like the config map to appear.
|
||||
1. Modify your image or command line so that the program looks for files in that directory. Each key in the config map `data` map becomes the filename under `mountPath`.
|
||||
|
||||
This is an example of a Pod that mounts a ConfigMap in a volume:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: mypod
|
||||
spec:
|
||||
containers:
|
||||
- name: mypod
|
||||
image: redis
|
||||
volumeMounts:
|
||||
- name: foo
|
||||
mountPath: "/etc/foo"
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: foo
|
||||
configMap:
|
||||
name: myconfigmap
|
||||
```
|
||||
|
||||
Each ConfigMap you want to use needs to be referred to in `.spec.volumes`.
|
||||
|
||||
If there are multiple containers in the Pod, then each container needs its
|
||||
own `volumeMounts` block, but only one `.spec.volumes` is needed per ConfigMap.
|
||||
|
||||
#### Mounted ConfigMaps are updated automatically
|
||||
|
||||
When a config map currently consumed in a volume is updated, projected keys are eventually updated as well.
|
||||
The kubelet checks whether the mounted config map is fresh on every periodic sync.
|
||||
However, the kubelet uses its local cache for getting the current value of the ConfigMap.
|
||||
The type of the cache is configurable using the `ConfigMapAndSecretChangeDetectionStrategy` field in
|
||||
the [KubeletConfiguration struct](https://github.com/kubernetes/kubernetes/blob/{{< param "docsbranch" >}}/staging/src/k8s.io/kubelet/config/v1beta1/types.go).
|
||||
A ConfigMap can be either propagated by watch (default), ttl-based, or simply redirecting
|
||||
all requests directly to the API server.
|
||||
As a result, the total delay from the moment when the ConfigMap is updated to the moment
|
||||
when new keys are projected to the Pod can be as long as the kubelet sync period + cache
|
||||
propagation delay, where the cache propagation delay depends on the chosen cache type
|
||||
(it equals to watch propagation delay, ttl of cache, or zero correspondingly).
|
||||
|
||||
{{< feature-state for_k8s_version="v1.18" state="alpha" >}}
|
||||
|
||||
The Kubernetes alpha feature _Immutable Secrets and ConfigMaps_ provides an option to set
|
||||
individual Secrets and ConfigMaps as immutable. For clusters that extensively use ConfigMaps
|
||||
(at least tens of thousands of unique ConfigMap to Pod mounts), preventing changes to their
|
||||
data has the following advantages:
|
||||
|
||||
- protects you from accidental (or unwanted) updates that could cause applications outages
|
||||
- improves performance of your cluster by significantly reducing load on kube-apiserver, by
|
||||
closing watches for config maps marked as immutable.
|
||||
|
||||
To use this feature, enable the `ImmutableEmphemeralVolumes`
|
||||
[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) and set
|
||||
your Secret or ConfigMap `immutable` field to `true`. For example:
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
...
|
||||
data:
|
||||
...
|
||||
immutable: true
|
||||
```
|
||||
|
||||
{{< note >}}
|
||||
Once a ConfigMap or Secret is marked as immutable, it is _not_ possible to revert this change
|
||||
nor to mutate the contents of the `data` field. You can only delete and recreate the ConfigMap.
|
||||
Existing Pods maintain a mount point to the deleted ConfigMap - it is recommended to recreate
|
||||
these pods.
|
||||
{{< /note >}}
|
||||
|
||||
{{% /capture %}}
|
||||
{{% capture whatsnext %}}
|
||||
|
|
|
|||
|
|
@ -725,7 +725,7 @@ data has the following advantages:
|
|||
- improves performance of your cluster by significantly reducing load on kube-apiserver, by
|
||||
closing watches for secrets marked as immutable.
|
||||
|
||||
To use this feature, enable the `ImmutableEmphemeralVolumes`
|
||||
To use this feature, enable the `ImmutableEphemeralVolumes`
|
||||
[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) and set
|
||||
your Secret or ConfigMap `immutable` field to `true`. For example:
|
||||
```yaml
|
||||
|
|
|
|||
|
|
@ -151,7 +151,7 @@ Once you have those variables filled in you can
|
|||
### Using IBM Cloud Container Registry
|
||||
IBM Cloud Container Registry provides a multi-tenant private image registry that you can use to safely store and share your images. By default, images in your private registry are scanned by the integrated Vulnerability Advisor to detect security issues and potential vulnerabilities. Users in your IBM Cloud account can access your images, or you can use IAM roles and policies to grant access to IBM Cloud Container Registry namespaces.
|
||||
|
||||
To install the IBM Cloud Container Registry CLI plug-in and create a namespace for your images, see [Getting started with IBM Cloud Container Registry](https://cloud.ibm.com/docs/Registry?topic=registry-getting-started).
|
||||
To install the IBM Cloud Container Registry CLI plug-in and create a namespace for your images, see [Getting started with IBM Cloud Container Registry](https://cloud.ibm.com/docs/Registry?topic=Registry-getting-started).
|
||||
|
||||
If you are using the same account and region, you can deploy images that are stored in IBM Cloud Container Registry into the default namespace of your IBM Cloud Kubernetes Service cluster without any additional configuration, see [Building containers from images](https://cloud.ibm.com/docs/containers?topic=containers-images). For other configuration options, see [Understanding how to authorize your cluster to pull images from a registry](https://cloud.ibm.com/docs/containers?topic=containers-registry#cluster_registry_auth).
|
||||
|
||||
|
|
|
|||
|
|
@ -186,7 +186,7 @@ Aggregated APIs offer more advanced API features and customization of other feat
|
|||
| Scale Subresource | Allows systems like HorizontalPodAutoscaler and PodDisruptionBudget interact with your new resource | [Yes](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#scale-subresource) | Yes |
|
||||
| Status Subresource | Allows fine-grained access control where user writes the spec section and the controller writes the status section. Allows incrementing object Generation on custom resource data mutation (requires separate spec and status sections in the resource) | [Yes](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#status-subresource) | Yes |
|
||||
| Other Subresources | Add operations other than CRUD, such as "logs" or "exec". | No | Yes |
|
||||
| strategic-merge-patch | The new endpoints support PATCH with `Content-Type: application/strategic-merge-patch+json`. Useful for updating objects that may be modified both locally, and by the server. For more information, see ["Update API Objects in Place Using kubectl patch"](/docs/tasks/run-application/update-api-object-kubectl-patch/) | No | Yes |
|
||||
| strategic-merge-patch | The new endpoints support PATCH with `Content-Type: application/strategic-merge-patch+json`. Useful for updating objects that may be modified both locally, and by the server. For more information, see ["Update API Objects in Place Using kubectl patch"](/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/) | No | Yes |
|
||||
| Protocol Buffers | The new resource supports clients that want to use Protocol Buffers | No | Yes |
|
||||
| OpenAPI Schema | Is there an OpenAPI (swagger) schema for the types that can be dynamically fetched from the server? Is the user protected from misspelling field names by ensuring only allowed fields are set? Are types enforced (in other words, don't put an `int` in a `string` field?) | Yes, based on the [OpenAPI v3.0 validation](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#validation) schema (GA in 1.16). | Yes |
|
||||
|
||||
|
|
|
|||
|
|
@ -11,73 +11,94 @@ card:
|
|||
|
||||
{{% capture overview %}}
|
||||
|
||||
Overall API conventions are described in the [API conventions doc](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md).
|
||||
The core of Kubernetes' {{< glossary_tooltip text="control plane" term_id="control-plane" >}}
|
||||
is the {{< glossary_tooltip text="API server" term_id="kube-apiserver" >}}. The API server
|
||||
exposes an HTTP API that lets end users, different parts of your cluster, and external components
|
||||
communicate with one another.
|
||||
|
||||
API endpoints, resource types and samples are described in [API Reference](/docs/reference).
|
||||
The Kubernetes API lets you query and manipulate the state of objects in the Kubernetes API
|
||||
(for example: Pods, Namespaces, ConfigMaps, and Events).
|
||||
|
||||
Remote access to the API is discussed in the [Controlling API Access doc](/docs/reference/access-authn-authz/controlling-access/).
|
||||
|
||||
The Kubernetes API also serves as the foundation for the declarative configuration schema for the system. The [kubectl](/docs/reference/kubectl/overview/) command-line tool can be used to create, update, delete, and get API objects.
|
||||
|
||||
Kubernetes also stores its serialized state (currently in [etcd](https://coreos.com/docs/distributed-configuration/getting-started-with-etcd/)) in terms of the API resources.
|
||||
|
||||
Kubernetes itself is decomposed into multiple components, which interact through its API.
|
||||
API endpoints, resource types and samples are described in the [API Reference](/docs/reference/kubernetes-api/).
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
|
||||
{{% capture body %}}
|
||||
|
||||
## API changes
|
||||
|
||||
In our experience, any system that is successful needs to grow and change as new use cases emerge or existing ones change. Therefore, we expect the Kubernetes API to continuously change and grow. However, we intend to not break compatibility with existing clients, for an extended period of time. In general, new API resources and new resource fields can be expected to be added frequently. Elimination of resources or fields will require following the [API deprecation policy](/docs/reference/using-api/deprecation-policy/).
|
||||
Any system that is successful needs to grow and change as new use cases emerge or existing ones change.
|
||||
Therefore, Kubernetes has design features to allow the Kubernetes API to continuously change and grow.
|
||||
The Kubernetes project aims to _not_ break compatibility with existing clients, and to maintain that
|
||||
compatibility for a length of time so that other projects have an opportunity to adapt.
|
||||
|
||||
What constitutes a compatible change and how to change the API are detailed by the [API change document](https://git.k8s.io/community/contributors/devel/sig-architecture/api_changes.md).
|
||||
In general, new API resources and new resource fields can be added often and frequently.
|
||||
Elimination of resources or fields requires following the
|
||||
[API deprecation policy](/docs/reference/using-api/deprecation-policy/).
|
||||
|
||||
## OpenAPI and Swagger definitions
|
||||
What constitutes a compatible change, and how to change the API, are detailed in
|
||||
[API changes](https://git.k8s.io/community/contributors/devel/sig-architecture/api_changes.md#readme).
|
||||
|
||||
## OpenAPI specification {#api-specification}
|
||||
|
||||
Complete API details are documented using [OpenAPI](https://www.openapis.org/).
|
||||
|
||||
Starting with Kubernetes 1.10, the Kubernetes API server serves an OpenAPI spec via the `/openapi/v2` endpoint.
|
||||
The requested format is specified by setting HTTP headers:
|
||||
The Kubernetes API server serves an OpenAPI spec via the `/openapi/v2` endpoint.
|
||||
You can request the response format using request headers as follows:
|
||||
|
||||
Header | Possible Values
|
||||
------ | ---------------
|
||||
Accept | `application/json`, `application/com.github.proto-openapi.spec.v2@v1.0+protobuf` (the default content-type is `application/json` for `*/*` or not passing this header)
|
||||
Accept-Encoding | `gzip` (not passing this header is acceptable)
|
||||
|
||||
Prior to 1.14, format-separated endpoints (`/swagger.json`, `/swagger-2.0.0.json`, `/swagger-2.0.0.pb-v1`, `/swagger-2.0.0.pb-v1.gz`)
|
||||
serve the OpenAPI spec in different formats. These endpoints are deprecated, and are removed in Kubernetes 1.14.
|
||||
|
||||
**Examples of getting OpenAPI spec**:
|
||||
|
||||
Before 1.10 | Starting with Kubernetes 1.10
|
||||
----------- | -----------------------------
|
||||
GET /swagger.json | GET /openapi/v2 **Accept**: application/json
|
||||
GET /swagger-2.0.0.pb-v1 | GET /openapi/v2 **Accept**: application/com.github.proto-openapi.spec.v2@v1.0+protobuf
|
||||
GET /swagger-2.0.0.pb-v1.gz | GET /openapi/v2 **Accept**: application/com.github.proto-openapi.spec.v2@v1.0+protobuf **Accept-Encoding**: gzip
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Header</th>
|
||||
<th style="min-width: 50%;">Possible values</th>
|
||||
<th>Notes</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><code>Accept-Encoding</code></td>
|
||||
<td><code>gzip</code></td>
|
||||
<td><em>not supplying this header is also acceptable</em></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td rowspan="3"><code>Accept</code></td>
|
||||
<td><code>application/com.github.proto-openapi.spec.v2@v1.0+protobuf</code></td>
|
||||
<td><em>mainly for intra-cluster use</em></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>application/json</code></td>
|
||||
<td><em>default</em></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>*</code></td>
|
||||
<td><em>serves </em><code>application/json</code></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
<caption>Valid request header values for OpenAPI v2 queries</caption>
|
||||
</table>
|
||||
|
||||
Kubernetes implements an alternative Protobuf based serialization format for the API that is primarily intended for intra-cluster communication, documented in the [design proposal](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/protobuf.md) and the IDL files for each schema are located in the Go packages that define the API objects.
|
||||
|
||||
Prior to 1.14, the Kubernetes apiserver also exposes an API that can be used to retrieve
|
||||
the [Swagger v1.2](http://swagger.io/) Kubernetes API spec at `/swaggerapi`.
|
||||
This endpoint is deprecated, and was removed in Kubernetes 1.14.
|
||||
|
||||
## API versioning
|
||||
|
||||
To make it easier to eliminate fields or restructure resource representations, Kubernetes supports
|
||||
multiple API versions, each at a different API path, such as `/api/v1` or
|
||||
`/apis/extensions/v1beta1`.
|
||||
|
||||
We chose to version at the API level rather than at the resource or field level to ensure that the API presents a clear, consistent view of system resources and behavior, and to enable controlling access to end-of-life and/or experimental APIs. The JSON and Protobuf serialization schemas follow the same guidelines for schema changes - all descriptions below cover both formats.
|
||||
Versioning is done at the API level rather than at the resource or field level to ensure that the
|
||||
API presents a clear, consistent view of system resources and behavior, and to enable controlling
|
||||
access to end-of-life and/or experimental APIs.
|
||||
|
||||
Note that API versioning and Software versioning are only indirectly related. The [API and release
|
||||
versioning proposal](https://git.k8s.io/community/contributors/design-proposals/release/versioning.md) describes the relationship between API versioning and
|
||||
software versioning.
|
||||
The JSON and Protobuf serialization schemas follow the same guidelines for schema changes - all descriptions below cover both formats.
|
||||
|
||||
Note that API versioning and Software versioning are only indirectly related. The
|
||||
[Kubernetes Release Versioning](https://git.k8s.io/community/contributors/design-proposals/release/versioning.md)
|
||||
proposal describes the relationship between API versioning and software versioning.
|
||||
|
||||
Different API versions imply different levels of stability and support. The criteria for each level are described
|
||||
in more detail in the [API Changes documentation](https://git.k8s.io/community/contributors/devel/sig-architecture/api_changes.md#alpha-beta-and-stable-versions). They are summarized here:
|
||||
in more detail in the
|
||||
[API Changes](https://git.k8s.io/community/contributors/devel/sig-architecture/api_changes.md#alpha-beta-and-stable-versions)
|
||||
documentation. They are summarized here:
|
||||
|
||||
- Alpha level:
|
||||
- The version names contain `alpha` (e.g. `v1alpha1`).
|
||||
|
|
@ -101,35 +122,36 @@ in more detail in the [API Changes documentation](https://git.k8s.io/community/c
|
|||
|
||||
## API groups
|
||||
|
||||
To make it easier to extend the Kubernetes API, we implemented [*API groups*](https://git.k8s.io/community/contributors/design-proposals/api-machinery/api-group.md).
|
||||
To make it easier to extend its API, Kubernetes implements [*API groups*](https://git.k8s.io/community/contributors/design-proposals/api-machinery/api-group.md).
|
||||
The API group is specified in a REST path and in the `apiVersion` field of a serialized object.
|
||||
|
||||
Currently there are several API groups in use:
|
||||
There are several API groups in a cluster:
|
||||
|
||||
1. The *core* group, often referred to as the *legacy group*, is at the REST path `/api/v1` and uses `apiVersion: v1`.
|
||||
1. The *core* group, also referred to as the *legacy* group, is at the REST path `/api/v1` and uses `apiVersion: v1`.
|
||||
|
||||
1. The named groups are at REST path `/apis/$GROUP_NAME/$VERSION`, and use `apiVersion: $GROUP_NAME/$VERSION`
|
||||
(e.g. `apiVersion: batch/v1`). Full list of supported API groups can be seen in [Kubernetes API reference](/docs/reference/).
|
||||
1. *Named* groups are at REST path `/apis/$GROUP_NAME/$VERSION`, and use `apiVersion: $GROUP_NAME/$VERSION`
|
||||
(e.g. `apiVersion: batch/v1`). The Kubernetes [API reference](/docs/reference/kubernetes-api/) has a
|
||||
full list of available API groups.
|
||||
|
||||
|
||||
There are two supported paths to extending the API with [custom resources](/docs/concepts/api-extension/custom-resources/):
|
||||
There are two paths to extending the API with [custom resources](/docs/concepts/api-extension/custom-resources/):
|
||||
|
||||
1. [CustomResourceDefinition](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/)
|
||||
is for users with very basic CRUD needs.
|
||||
1. Users needing the full set of Kubernetes API semantics can implement their own apiserver
|
||||
lets you declaratively define how the API server should provide your chosen resource API.
|
||||
1. You can also [implement your own extension API server](/docs/tasks/access-kubernetes-api/setup-extension-api-server/)
|
||||
and use the [aggregator](/docs/tasks/access-kubernetes-api/configure-aggregation-layer/)
|
||||
to make it seamless for clients.
|
||||
|
||||
|
||||
## Enabling or disabling API groups
|
||||
|
||||
Certain resources and API groups are enabled by default. They can be enabled or disabled by setting `--runtime-config`
|
||||
on apiserver. `--runtime-config` accepts comma separated values. For example: to disable batch/v1, set
|
||||
`--runtime-config=batch/v1=false`, to enable batch/v2alpha1, set `--runtime-config=batch/v2alpha1`.
|
||||
The flag accepts comma separated set of key=value pairs describing runtime configuration of the apiserver.
|
||||
Certain resources and API groups are enabled by default. They can be enabled or disabled by setting `--runtime-config`
|
||||
as a command line option to the kube-apiserver.
|
||||
|
||||
{{< note >}}Enabling or disabling groups or resources requires restarting apiserver and controller-manager
|
||||
to pick up the `--runtime-config` changes.{{< /note >}}
|
||||
`--runtime-config` accepts comma separated values. For example: to disable batch/v1, set
|
||||
`--runtime-config=batch/v1=false`; to enable batch/v2alpha1, set `--runtime-config=batch/v2alpha1`.
|
||||
The flag accepts comma separated set of key=value pairs describing runtime configuration of the API server.
|
||||
|
||||
{{< note >}}Enabling or disabling groups or resources requires restarting the kube-apiserver and the
|
||||
kube-controller-manager to pick up the `--runtime-config` changes.{{< /note >}}
|
||||
|
||||
## Enabling specific resources in the extensions/v1beta1 group
|
||||
|
||||
|
|
@ -139,4 +161,20 @@ For example: to enable deployments and daemonsets, set
|
|||
|
||||
{{< note >}}Individual resource enablement/disablement is only supported in the `extensions/v1beta1` API group for legacy reasons.{{< /note >}}
|
||||
|
||||
## Persistence
|
||||
|
||||
Kubernetes stores its serialized state in terms of the API resources by writing them into
|
||||
{{< glossary_tooltip term_id="etcd" >}}.
|
||||
|
||||
{{% /capture %}}
|
||||
{{% capture whatsnext %}}
|
||||
[Controlling API Access](/docs/reference/access-authn-authz/controlling-access/) describes
|
||||
how the cluster manages authentication and authorization for API access.
|
||||
|
||||
Overall API conventions are described in the
|
||||
[API conventions](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#api-conventions)
|
||||
document.
|
||||
|
||||
API endpoints, resource types and samples are described in the [API Reference](/docs/reference/kubernetes-api/).
|
||||
|
||||
{{% /capture %}}
|
||||
|
|
|
|||
|
|
@ -16,12 +16,7 @@ kubectl get pods --field-selector status.phase=Running
|
|||
```
|
||||
|
||||
{{< note >}}
|
||||
Field selectors are essentially resource *filters*. By default, no selectors/filters are applied, meaning that all resources of the specified type are selected. This makes the following `kubectl` queries equivalent:
|
||||
|
||||
```shell
|
||||
kubectl get pods
|
||||
kubectl get pods --field-selector ""
|
||||
```
|
||||
Field selectors are essentially resource *filters*. By default, no selectors/filters are applied, meaning that all resources of the specified type are selected. This makes the `kubectl` queries `kubectl get pods` and `kubectl get pods --field-selector ""` equivalent.
|
||||
{{< /note >}}
|
||||
|
||||
## Supported fields
|
||||
|
|
|
|||
|
|
@ -56,8 +56,8 @@ developers of non-critical applications. The following listed controls should be
|
|||
enforced/disallowed:
|
||||
|
||||
<table>
|
||||
<caption style="display:none">Baseline policy specification</caption>
|
||||
<tbody>
|
||||
<caption style="display:none">Baseline policy specification</caption>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><strong>Control</strong></td>
|
||||
<td><strong>Policy</strong></td>
|
||||
|
|
@ -115,10 +115,10 @@ enforced/disallowed:
|
|||
<tr>
|
||||
<td>AppArmor <em>(optional)</em></td>
|
||||
<td>
|
||||
On supported hosts, the `runtime/default` AppArmor profile is applied by default. The default policy should prevent overriding or disabling the policy, or restrict overrides to a whitelisted set of profiles.<br>
|
||||
On supported hosts, the 'runtime/default' AppArmor profile is applied by default. The default policy should prevent overriding or disabling the policy, or restrict overrides to a whitelisted set of profiles.<br>
|
||||
<br><b>Restricted Fields:</b><br>
|
||||
metadata.annotations['container.apparmor.security.beta.kubernetes.io/*']<br>
|
||||
<br><b>Allowed Values:</b> runtime/default, undefined<br>
|
||||
<br><b>Allowed Values:</b> 'runtime/default', undefined<br>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
|
@ -132,6 +132,31 @@ enforced/disallowed:
|
|||
<br><b>Allowed Values:</b> undefined/nil<br>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>/proc Mount Type</td>
|
||||
<td>
|
||||
The default /proc masks are set up to reduce attack surface, and should be required.<br>
|
||||
<br><b>Restricted Fields:</b><br>
|
||||
spec.containers[*].securityContext.procMount<br>
|
||||
spec.initContainers[*].securityContext.procMount<br>
|
||||
<br><b>Allowed Values:</b> undefined/nil, 'Default'<br>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Sysctls</td>
|
||||
<td>
|
||||
Sysctls can disable security mechanisms or affect all containers on a host, and should be disallowed except for a whitelisted "safe" subset.
|
||||
A sysctl is considered safe if it is namespaced in the container or the Pod, and it is isolated from other Pods or processes on the same Node.<br>
|
||||
<br><b>Restricted Fields:</b><br>
|
||||
spec.securityContext.sysctls<br>
|
||||
<br><b>Allowed Values:</b><br>
|
||||
kernel.shm_rmid_forced<br>
|
||||
net.ipv4.ip_local_port_range<br>
|
||||
net.ipv4.tcp_syncookies<br>
|
||||
net.ipv4.ping_group_range<br>
|
||||
undefined/empty<br>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
|
|
@ -143,7 +168,7 @@ well as lower-trust users.The following listed controls should be enforced/disal
|
|||
|
||||
|
||||
<table>
|
||||
<caption style="display:none">Restricted policy specification</caption>
|
||||
<caption style="display:none">Restricted policy specification</caption>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><strong>Control</strong></td>
|
||||
|
|
@ -184,7 +209,7 @@ well as lower-trust users.The following listed controls should be enforced/disal
|
|||
<tr>
|
||||
<td>Privilege Escalation</td>
|
||||
<td>
|
||||
Privilege escalation to root should not be allowed.<br>
|
||||
Privilege escalation to root should not be allowed.<br>
|
||||
<br><b>Restricted Fields:</b><br>
|
||||
spec.containers[*].securityContext.privileged<br>
|
||||
spec.initContainers[*].securityContext.privileged<br>
|
||||
|
|
@ -194,7 +219,7 @@ well as lower-trust users.The following listed controls should be enforced/disal
|
|||
<tr>
|
||||
<td>Running as Non-root</td>
|
||||
<td>
|
||||
Containers must be required to run as non-root users.<br>
|
||||
Containers must be required to run as non-root users.<br>
|
||||
<br><b>Restricted Fields:</b><br>
|
||||
spec.securityContext.runAsNonRoot<br>
|
||||
spec.containers[*].securityContext.runAsNonRoot<br>
|
||||
|
|
@ -205,7 +230,7 @@ well as lower-trust users.The following listed controls should be enforced/disal
|
|||
<tr>
|
||||
<td>Non-root groups <em>(optional)</em></td>
|
||||
<td>
|
||||
Containers should be forbidden from running with a root primary or supplementary GID.<br>
|
||||
Containers should be forbidden from running with a root primary or supplementary GID.<br>
|
||||
<br><b>Restricted Fields:</b><br>
|
||||
spec.securityContext.runAsGroup<br>
|
||||
spec.securityContext.supplementalGroups[*]<br>
|
||||
|
|
@ -224,12 +249,12 @@ well as lower-trust users.The following listed controls should be enforced/disal
|
|||
<tr>
|
||||
<td>Seccomp</td>
|
||||
<td>
|
||||
The runtime/default seccomp profile must be required, or allow additional whitelisted values.<br>
|
||||
The 'runtime/default' seccomp profile must be required, or allow additional whitelisted values.<br>
|
||||
<br><b>Restricted Fields:</b><br>
|
||||
metadata.annotations['seccomp.security.alpha.kubernetes.io/pod']<br>
|
||||
metadata.annotations['container.seccomp.security.alpha.kubernetes.io/*']<br>
|
||||
<br><b>Allowed Values:</b><br>
|
||||
runtime/default<br>
|
||||
'runtime/default'<br>
|
||||
undefined (container annotation)<br>
|
||||
</td>
|
||||
</tr>
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ hostaliases-pod 0/1 Completed 0 6s 10.200
|
|||
The `hosts` file content would look like this:
|
||||
|
||||
```shell
|
||||
kubectl exec hostaliases-pod -- cat /etc/hosts
|
||||
kubectl logs hostaliases-pod
|
||||
```
|
||||
|
||||
```none
|
||||
|
|
|
|||
|
|
@ -456,15 +456,13 @@ spec:
|
|||
```
|
||||
|
||||
#### Regional Persistent Disks
|
||||
{{< feature-state for_k8s_version="v1.10" state="beta" >}}
|
||||
|
||||
The [Regional Persistent Disks](https://cloud.google.com/compute/docs/disks/#repds) feature allows the creation of Persistent Disks that are available in two zones within the same region. In order to use this feature, the volume must be provisioned as a PersistentVolume; referencing the volume directly from a pod is not supported.
|
||||
|
||||
#### Manually provisioning a Regional PD PersistentVolume
|
||||
Dynamic provisioning is possible using a [StorageClass for GCE PD](/docs/concepts/storage/storage-classes/#gce).
|
||||
Before creating a PersistentVolume, you must create the PD:
|
||||
```shell
|
||||
gcloud beta compute disks create --size=500GB my-data-disk
|
||||
gcloud compute disks create --size=500GB my-data-disk
|
||||
--region us-central1
|
||||
--replica-zones us-central1-a,us-central1-b
|
||||
```
|
||||
|
|
@ -475,8 +473,6 @@ apiVersion: v1
|
|||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: test-volume
|
||||
labels:
|
||||
failure-domain.beta.kubernetes.io/zone: us-central1-a__us-central1-b
|
||||
spec:
|
||||
capacity:
|
||||
storage: 400Gi
|
||||
|
|
@ -485,6 +481,15 @@ spec:
|
|||
gcePersistentDisk:
|
||||
pdName: my-data-disk
|
||||
fsType: ext4
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: failure-domain.beta.kubernetes.io/zone
|
||||
operator: In
|
||||
values:
|
||||
- us-central1-a
|
||||
- us-central1-b
|
||||
```
|
||||
|
||||
#### CSI Migration
|
||||
|
|
|
|||
|
|
@ -18,9 +18,9 @@ collected. Deleting a DaemonSet will clean up the Pods it created.
|
|||
|
||||
Some typical uses of a DaemonSet are:
|
||||
|
||||
- running a cluster storage daemon, such as `glusterd`, `ceph`, on each node.
|
||||
- running a logs collection daemon on every node, such as `fluentd` or `filebeat`.
|
||||
- running a node monitoring daemon on every node, such as [Prometheus Node Exporter](https://github.com/prometheus/node_exporter), [Flowmill](https://github.com/Flowmill/flowmill-k8s/), [Sysdig Agent](https://docs.sysdig.com), `collectd`, [Dynatrace OneAgent](https://www.dynatrace.com/technologies/kubernetes-monitoring/), [AppDynamics Agent](https://docs.appdynamics.com/display/CLOUD/Container+Visibility+with+Kubernetes), [Datadog agent](https://docs.datadoghq.com/agent/kubernetes/daemonset_setup/), [New Relic agent](https://docs.newrelic.com/docs/integrations/kubernetes-integration/installation/kubernetes-installation-configuration), Ganglia `gmond`, [Instana Agent](https://www.instana.com/supported-integrations/kubernetes-monitoring/) or [Elastic Metricbeat](https://www.elastic.co/guide/en/beats/metricbeat/current/running-on-kubernetes.html).
|
||||
- running a cluster storage daemon on every node
|
||||
- running a logs collection daemon on every node
|
||||
- running a node monitoring daemon on every node
|
||||
|
||||
In a simple case, one DaemonSet, covering all nodes, would be used for each type of daemon.
|
||||
A more complex setup might use multiple DaemonSets for a single type of daemon, but with
|
||||
|
|
@ -95,7 +95,7 @@ another DaemonSet, or via another workload resource such as ReplicaSet. Otherwi
|
|||
Kubernetes will not stop you from doing this. One case where you might want to do this is manually
|
||||
create a Pod with a different value on a node for testing.
|
||||
|
||||
### Running Pods on Only Some Nodes
|
||||
### Running Pods on select Nodes
|
||||
|
||||
If you specify a `.spec.template.spec.nodeSelector`, then the DaemonSet controller will
|
||||
create Pods on nodes which match that [node
|
||||
|
|
@ -103,7 +103,7 @@ selector](/docs/concepts/scheduling-eviction/assign-pod-node/). Likewise if you
|
|||
then DaemonSet controller will create Pods on nodes which match that [node affinity](/docs/concepts/scheduling-eviction/assign-pod-node/).
|
||||
If you do not specify either, then the DaemonSet controller will create Pods on all nodes.
|
||||
|
||||
## How Daemon Pods are Scheduled
|
||||
## How Daemon Pods are scheduled
|
||||
|
||||
### Scheduled by default scheduler
|
||||
|
||||
|
|
@ -144,7 +144,6 @@ In addition, `node.kubernetes.io/unschedulable:NoSchedule` toleration is added
|
|||
automatically to DaemonSet Pods. The default scheduler ignores
|
||||
`unschedulable` Nodes when scheduling DaemonSet Pods.
|
||||
|
||||
|
||||
### Taints and Tolerations
|
||||
|
||||
Although Daemon Pods respect
|
||||
|
|
@ -152,17 +151,14 @@ Although Daemon Pods respect
|
|||
the following tolerations are added to DaemonSet Pods automatically according to
|
||||
the related features.
|
||||
|
||||
| Toleration Key | Effect | Version | Description |
|
||||
| ---------------------------------------- | ---------- | ------- | ------------------------------------------------------------ |
|
||||
| `node.kubernetes.io/not-ready` | NoExecute | 1.13+ | DaemonSet pods will not be evicted when there are node problems such as a network partition. |
|
||||
| `node.kubernetes.io/unreachable` | NoExecute | 1.13+ | DaemonSet pods will not be evicted when there are node problems such as a network partition. |
|
||||
| `node.kubernetes.io/disk-pressure` | NoSchedule | 1.8+ | |
|
||||
| `node.kubernetes.io/memory-pressure` | NoSchedule | 1.8+ | |
|
||||
| `node.kubernetes.io/unschedulable` | NoSchedule | 1.12+ | DaemonSet pods tolerate unschedulable attributes by default scheduler. |
|
||||
| `node.kubernetes.io/network-unavailable` | NoSchedule | 1.12+ | DaemonSet pods, who uses host network, tolerate network-unavailable attributes by default scheduler. |
|
||||
|
||||
|
||||
|
||||
| Toleration Key | Effect | Version | Description |
|
||||
| ---------------------------------------- | ---------- | ------- | ----------- |
|
||||
| `node.kubernetes.io/not-ready` | NoExecute | 1.13+ | DaemonSet pods will not be evicted when there are node problems such as a network partition. |
|
||||
| `node.kubernetes.io/unreachable` | NoExecute | 1.13+ | DaemonSet pods will not be evicted when there are node problems such as a network partition. |
|
||||
| `node.kubernetes.io/disk-pressure` | NoSchedule | 1.8+ | |
|
||||
| `node.kubernetes.io/memory-pressure` | NoSchedule | 1.8+ | |
|
||||
| `node.kubernetes.io/unschedulable` | NoSchedule | 1.12+ | DaemonSet pods tolerate unschedulable attributes by default scheduler. |
|
||||
| `node.kubernetes.io/network-unavailable` | NoSchedule | 1.12+ | DaemonSet pods, who uses host network, tolerate network-unavailable attributes by default scheduler. |
|
||||
|
||||
## Communicating with Daemon Pods
|
||||
|
||||
|
|
@ -195,7 +191,7 @@ You can [perform a rolling update](/docs/tasks/manage-daemon/update-daemon-set/)
|
|||
|
||||
## Alternatives to DaemonSet
|
||||
|
||||
### Init Scripts
|
||||
### Init scripts
|
||||
|
||||
It is certainly possible to run daemon processes by directly starting them on a node (e.g. using
|
||||
`init`, `upstartd`, or `systemd`). This is perfectly fine. However, there are several advantages to
|
||||
|
|
|
|||
|
|
@ -140,19 +140,19 @@ See section [specifying your own pod selector](#specifying-your-own-pod-selector
|
|||
There are three main types of task suitable to run as a Job:
|
||||
|
||||
1. Non-parallel Jobs
|
||||
- normally, only one Pod is started, unless the Pod fails.
|
||||
- the Job is complete as soon as its Pod terminates successfully.
|
||||
- normally, only one Pod is started, unless the Pod fails.
|
||||
- the Job is complete as soon as its Pod terminates successfully.
|
||||
1. Parallel Jobs with a *fixed completion count*:
|
||||
- specify a non-zero positive value for `.spec.completions`.
|
||||
- the Job represents the overall task, and is complete when there is one successful Pod for each value in the range 1 to `.spec.completions`.
|
||||
- **not implemented yet:** Each Pod is passed a different index in the range 1 to `.spec.completions`.
|
||||
- specify a non-zero positive value for `.spec.completions`.
|
||||
- the Job represents the overall task, and is complete when there is one successful Pod for each value in the range 1 to `.spec.completions`.
|
||||
- **not implemented yet:** Each Pod is passed a different index in the range 1 to `.spec.completions`.
|
||||
1. Parallel Jobs with a *work queue*:
|
||||
- do not specify `.spec.completions`, default to `.spec.parallelism`.
|
||||
- the Pods must coordinate amongst themselves or an external service to determine what each should work on. For example, a Pod might fetch a batch of up to N items from the work queue.
|
||||
- each Pod is independently capable of determining whether or not all its peers are done, and thus that the entire Job is done.
|
||||
- when _any_ Pod from the Job terminates with success, no new Pods are created.
|
||||
- once at least one Pod has terminated with success and all Pods are terminated, then the Job is completed with success.
|
||||
- once any Pod has exited with success, no other Pod should still be doing any work for this task or writing any output. They should all be in the process of exiting.
|
||||
- do not specify `.spec.completions`, default to `.spec.parallelism`.
|
||||
- the Pods must coordinate amongst themselves or an external service to determine what each should work on. For example, a Pod might fetch a batch of up to N items from the work queue.
|
||||
- each Pod is independently capable of determining whether or not all its peers are done, and thus that the entire Job is done.
|
||||
- when _any_ Pod from the Job terminates with success, no new Pods are created.
|
||||
- once at least one Pod has terminated with success and all Pods are terminated, then the Job is completed with success.
|
||||
- once any Pod has exited with success, no other Pod should still be doing any work for this task or writing any output. They should all be in the process of exiting.
|
||||
|
||||
For a _non-parallel_ Job, you can leave both `.spec.completions` and `.spec.parallelism` unset. When both are
|
||||
unset, both are defaulted to 1.
|
||||
|
|
|
|||
|
|
@ -322,7 +322,7 @@ reasons:
|
|||
|
||||
{{% capture whatsnext %}}
|
||||
|
||||
* Read about [creating a Pod that has an init container](/docs/tasks/configure-pod-container/configure-pod-initialization/#creating-a-pod-that-has-an-init-container)
|
||||
* Read about [creating a Pod that has an init container](/docs/tasks/configure-pod-container/configure-pod-initialization/#create-a-pod-that-has-an-init-container)
|
||||
* Learn how to [debug init containers](/docs/tasks/debug-application-cluster/debug-init-containers/)
|
||||
|
||||
{{% /capture %}}
|
||||
{{% /capture %}}
|
||||
|
|
|
|||
|
|
@ -138,19 +138,19 @@ git status
|
|||
The output is similar to:
|
||||
|
||||
```
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/css/bootstrap.min.css
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/css/font-awesome.min.css
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/css/stylesheet.css
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/fonts/FontAwesome.otf
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/fonts/fontawesome-webfont.eot
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/fonts/fontawesome-webfont.svg
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/fonts/fontawesome-webfont.ttf
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/fonts/fontawesome-webfont.woff
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/fonts/fontawesome-webfont.woff2
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/index.html
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/js/jquery.scrollTo.min.js
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/js/navData.js
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/js/scroll.js
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/css/bootstrap.min.css
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/css/font-awesome.min.css
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/css/stylesheet.css
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/FontAwesome.otf
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/fontawesome-webfont.eot
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/fontawesome-webfont.svg
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/fontawesome-webfont.ttf
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/fontawesome-webfont.woff
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/fontawesome-webfont.woff2
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/index.html
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/js/jquery.scrollTo.min.js
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/js/navData.js
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/js/scroll.js
|
||||
```
|
||||
|
||||
## Updating the API reference index pages
|
||||
|
|
@ -177,7 +177,7 @@ version number.
|
|||
## Locally test the API reference
|
||||
|
||||
Publish a local version of the API reference.
|
||||
Verify the [local preview](http://localhost:1313/docs/reference/generated/kubernetes-api/v1.17/).
|
||||
Verify the [local preview](http://localhost:1313/docs/reference/generated/kubernetes-api/{{< param "version">}}/).
|
||||
|
||||
```shell
|
||||
cd <web-base>
|
||||
|
|
|
|||
|
|
@ -219,19 +219,19 @@ static/docs/reference/generated/kubectl/css/font-awesome.min.css
|
|||
### Generated Kubernetes API reference directories and files
|
||||
|
||||
```
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/index.html
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/js/navData.js
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/js/scroll.js
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/js/query.scrollTo.min.js
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/css/font-awesome.min.css
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/css/bootstrap.min.css
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/css/stylesheet.css
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/fonts/FontAwesome.otf
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/fonts/fontawesome-webfont.eot
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/fonts/fontawesome-webfont.svg
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/fonts/fontawesome-webfont.ttf
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/fonts/fontawesome-webfont.woff
|
||||
static/docs/reference/generated/kubernetes-api/v1.17/fonts/fontawesome-webfont.woff2
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/index.html
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/js/navData.js
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/js/scroll.js
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/js/query.scrollTo.min.js
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/css/font-awesome.min.css
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/css/bootstrap.min.css
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/css/stylesheet.css
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/FontAwesome.otf
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/fontawesome-webfont.eot
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/fontawesome-webfont.svg
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/fontawesome-webfont.ttf
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/fontawesome-webfont.woff
|
||||
static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/fontawesome-webfont.woff2
|
||||
```
|
||||
|
||||
Run `git add` and `git commit` to commit the files.
|
||||
|
|
|
|||
|
|
@ -54,5 +54,8 @@ was wrong, you (and only you, the submitter) can change it.
|
|||
|
||||
Limit pull requests to one language per PR. If you need to make an identical change to the same code sample in multiple languages, open a separate PR for each language.
|
||||
|
||||
## Tools for contributors
|
||||
|
||||
The [doc contributors tools](https://github.com/kubernetes/website/tree/master/content/en/docs/doc-contributor-tools) directory in the `kubernetes/website` repository contains tools to help your contribution journey go more smoothly.
|
||||
|
||||
{{% /capture %}}
|
||||
|
|
|
|||
|
|
@ -32,9 +32,14 @@ project](https://github.com/kubernetes/kubernetes).
|
|||
|
||||
## What's allowed
|
||||
|
||||
Kubernetes docs permit only some kinds of content.
|
||||
Kubernetes docs allow content for third-party projects only when:
|
||||
|
||||
- Content documents software in the Kubernetes project
|
||||
- Content documents software that's out of project but necessary for Kubernetes to function
|
||||
- Content is canonical on kubernetes.io, or links to canonical content elsewhere
|
||||
|
||||
### Third party content
|
||||
|
||||
Kubernetes documentation includes applied examples of projects in the Kubernetes project—projects that live in the [kubernetes](https://github.com/kubernetes) and
|
||||
[kubernetes-sigs](https://github.com/kubernetes-sigs) GitHub organizations.
|
||||
|
||||
|
|
@ -43,7 +48,7 @@ Links to active content in the Kubernetes project are always allowed.
|
|||
Kubernetes requires some third party content to function. Examples include container runtimes (containerd, CRI-O, Docker),
|
||||
[networking policy](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) (CNI plugins), [Ingress controllers](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/), and [logging](https://kubernetes.io/docs/concepts/cluster-administration/logging/).
|
||||
|
||||
Docs can link to third-party open source software (OSS) outside the Kubernetes project if it's necessary for Kubernetes to function.
|
||||
Docs can link to third-party open source software (OSS) outside the Kubernetes project only if it's necessary for Kubernetes to function.
|
||||
|
||||
### Dual sourced content
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,76 @@
|
|||
# Internal link checking tool
|
||||
|
||||
You can use [htmltest](https://github.com/wjdp/htmltest) to check for broken links in [`/content/en/`](https://git.k8s.io/website/content/en/). This is useful when refactoring sections of content, moving pages around, or renaming files or page headers.
|
||||
|
||||
## How the tool works
|
||||
|
||||
`htmltest` scans links in the generated HTML files of the kubernetes website repository. It runs using a `make` command which does the following:
|
||||
|
||||
- Builds the site and generates output HTML in the `/public` directory of your local `kubernetes/website` repository
|
||||
- Pulls the `wdjp/htmltest` Docker image
|
||||
- Mounts your local `kubernetes/website` repository to the Docker image
|
||||
- Scans the files generated in the `/public` directory and provides command line output when it encounters broken internal links
|
||||
|
||||
## What it does and doesn't check
|
||||
|
||||
The link checker scans generated HTML files, not raw Markdown. The htmltest tool depends on a configuration file, [`.htmltest.yml`](https://git.k8s.io/website/.htmltest.yml), to determine which content to examine.
|
||||
|
||||
The link checker scans the following:
|
||||
|
||||
- All content generated from Markdown in [`/content/en/docs`](https://git.k8s.io/website/content/en/docs/) directory, excluding:
|
||||
- Generated API references, for example https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/
|
||||
- All internal links, excluding:
|
||||
- Empty hashes (`<a href="#">` or `[title](#)`) and empty hrefs (`<a href="">` or `[title]()`)
|
||||
- Internal links to images and other media files
|
||||
|
||||
The link checker does not scan the following:
|
||||
|
||||
- Links included in the top and side nav bars, footer links, or links in a page's `<head>` section, such as links to CSS stylesheets, scripts, and meta information
|
||||
- Top level pages and their children, for example: `/training`, `/community`, `/case-studies/adidas`
|
||||
- Blog posts
|
||||
- API Reference documentation, for example: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/
|
||||
- Localizations
|
||||
|
||||
## Prerequisites and installation
|
||||
|
||||
You must install
|
||||
* [Docker](https://docs.docker.com/get-docker/)
|
||||
* [make](https://www.gnu.org/software/make/)
|
||||
|
||||
## Running the link checker
|
||||
|
||||
To run the link checker:
|
||||
|
||||
1. Navigate to the root directory of your local `kubernetes/website` repository.
|
||||
|
||||
2. Run the following command:
|
||||
|
||||
```
|
||||
make docker-internal-linkcheck
|
||||
```
|
||||
|
||||
## Understanding the output
|
||||
|
||||
If the link checker finds broken links, the output is similar to the following:
|
||||
|
||||
```
|
||||
tasks/access-kubernetes-api/custom-resources/index.html
|
||||
hash does not exist --- tasks/access-kubernetes-api/custom-resources/index.html --> #preserving-unknown-fields
|
||||
hash does not exist --- tasks/access-kubernetes-api/custom-resources/index.html --> #preserving-unknown-fields
|
||||
```
|
||||
|
||||
This is one set of broken links. The log adds an output for each page with broken links.
|
||||
|
||||
In this output, the file with broken links is `tasks/access-kubernetes-api/custom-resources.md`.
|
||||
|
||||
The tool gives a reason: `hash does not exist`. In most cases, you can ignore this.
|
||||
|
||||
The target URL is `#preserving-unknown-fields`.
|
||||
|
||||
One way to fix this is to:
|
||||
|
||||
1. Navigate to the Markdown file with a broken link.
|
||||
2. Using a text editor, do a full-text search (usually Ctrl+F or Command+F) for the broken link's URL, `#preserving-unknown-fields`.
|
||||
3. Fix the link. For a broken page hash (or _anchor_) link, check whether the topic was renamed or removed.
|
||||
|
||||
Run htmltest to verify that broken links are fixed.
|
||||
|
|
@ -15,7 +15,7 @@ menu:
|
|||
title: "Documentation"
|
||||
weight: 20
|
||||
post: >
|
||||
<p>Learn how to use Kubernetes with conceptual, tutorial, and reference documentation. You can even <a href="/editdocs/" data-auto-burger-exclude>help contribute to the docs</a>!</p>
|
||||
<p>Learn how to use Kubernetes with conceptual, tutorial, and reference documentation. You can even <a href="/editdocs/" data-auto-burger-exclude data-proofer-ignore>help contribute to the docs</a>!</p>
|
||||
description: >
|
||||
Kubernetes is an open source container orchestration engine for automating deployment, scaling, and management of containerized applications. The open source project is hosted by the Cloud Native Computing Foundation.
|
||||
overview: >
|
||||
|
|
@ -38,7 +38,7 @@ cards:
|
|||
button_path: "/docs/setup"
|
||||
- name: tasks
|
||||
title: "Learn how to use Kubernetes"
|
||||
description: "Look up common tasks and how to perform them using a short sequence of steps."
|
||||
description: "Look up common tasks and how to perform them using a short sequence of steps."
|
||||
button: "View Tasks"
|
||||
button_path: "/docs/tasks"
|
||||
- name: training
|
||||
|
|
@ -62,4 +62,4 @@ cards:
|
|||
- name: about
|
||||
title: About the documentation
|
||||
description: This website contains documentation for the current and previous 4 versions of Kubernetes.
|
||||
---
|
||||
---
|
||||
|
|
@ -18,6 +18,7 @@ This page describes how to build, configure, use, and monitor admission webhooks
|
|||
{{% /capture %}}
|
||||
|
||||
{{% capture body %}}
|
||||
|
||||
## What are admission webhooks?
|
||||
|
||||
Admission webhooks are HTTP callbacks that receive admission requests and do
|
||||
|
|
@ -549,7 +550,7 @@ Example of a minimal response from a webhook to forbid a request:
|
|||
|
||||
When rejecting a request, the webhook can customize the http code and message returned to the user using the `status` field.
|
||||
The specified status object is returned to the user.
|
||||
See the [API documentation](/docs/reference/generated/kubernetes-api/v1.14/#status-v1-meta) for details about the status type.
|
||||
See the [API documentation](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#status-v1-meta) for details about the status type.
|
||||
Example of a response to forbid a request, customizing the HTTP status code and message presented to the user:
|
||||
{{< tabs name="AdmissionReview_response_forbid_details" >}}
|
||||
{{% tab name="admission.k8s.io/v1" %}}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ reviewers:
|
|||
- liggitt
|
||||
title: Using RBAC Authorization
|
||||
content_template: templates/concept
|
||||
aliases: [../../../rbac/]
|
||||
aliases: [/rbac/]
|
||||
weight: 70
|
||||
---
|
||||
|
||||
|
|
@ -1210,4 +1210,4 @@ kubectl create clusterrolebinding permissive-binding \
|
|||
After you have transitioned to use RBAC, you should adjust the access controls
|
||||
for your cluster to ensure that these meet your information security needs.
|
||||
|
||||
{{% /capture %}}
|
||||
{{% /capture %}}
|
||||
|
|
@ -105,6 +105,7 @@ different Kubernetes components.
|
|||
| `HugePageStorageMediumSize` | `false` | Alpha | 1.18 | |
|
||||
| `HyperVContainer` | `false` | Alpha | 1.10 | |
|
||||
| `ImmutableEphemeralVolumes` | `false` | Alpha | 1.18 | |
|
||||
| `IPv6DualStack` | `false` | Alpha | 1.16 | |
|
||||
| `KubeletPodResources` | `false` | Alpha | 1.13 | 1.14 |
|
||||
| `KubeletPodResources` | `true` | Beta | 1.15 | |
|
||||
| `LegacyNodeRoleBehavior` | `true` | Alpha | 1.16 | |
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
title: Container Environment Variables
|
||||
id: container-env-variables
|
||||
date: 2018-04-12
|
||||
full_link: /docs/concepts/containers/container-environment-variables/
|
||||
full_link: /docs/concepts/containers/container-environment/
|
||||
short_description: >
|
||||
Container environment variables are name=value pairs that provide useful information into containers running in a Pod.
|
||||
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ To make a report, submit your vulnerability to the [Kubernetes bug bounty progra
|
|||
|
||||
You can also email the private [security@kubernetes.io](mailto:security@kubernetes.io) list with the security details and the details expected for [all Kubernetes bug reports](https://git.k8s.io/kubernetes/.github/ISSUE_TEMPLATE/bug-report.md).
|
||||
|
||||
You may encrypt your email to this list using the GPG keys of the [Product Security Committee members](https://git.k8s.io/security/security-release-process.md#product-security-committee-psc). Encryption using GPG is NOT required to make a disclosure.
|
||||
You may encrypt your email to this list using the GPG keys of the [Product Security Committee members](https://git.k8s.io/security/README.md#product-security-committee-psc). Encryption using GPG is NOT required to make a disclosure.
|
||||
|
||||
### When Should I Report a Vulnerability?
|
||||
|
||||
|
|
|
|||
|
|
@ -32,11 +32,11 @@ where `command`, `TYPE`, `NAME`, and `flags` are:
|
|||
|
||||
* `TYPE`: Specifies the [resource type](#resource-types). Resource types are case-insensitive and you can specify the singular, plural, or abbreviated forms. For example, the following commands produce the same output:
|
||||
|
||||
```shell
|
||||
kubectl get pod pod1
|
||||
kubectl get pods pod1
|
||||
kubectl get po pod1
|
||||
```
|
||||
```shell
|
||||
kubectl get pod pod1
|
||||
kubectl get pods pod1
|
||||
kubectl get po pod1
|
||||
```
|
||||
|
||||
* `NAME`: Specifies the name of the resource. Names are case-sensitive. If the name is omitted, details for all resources are displayed, for example `kubectl get pods`.
|
||||
|
||||
|
|
@ -424,7 +424,7 @@ kubectl hello
|
|||
hello world
|
||||
```
|
||||
|
||||
```
|
||||
```shell
|
||||
# we can "uninstall" a plugin, by simply removing it from our PATH
|
||||
sudo rm /usr/local/bin/kubectl-hello
|
||||
```
|
||||
|
|
@ -442,7 +442,7 @@ The following kubectl-compatible plugins are available:
|
|||
/usr/local/bin/kubectl-foo
|
||||
/usr/local/bin/kubectl-bar
|
||||
```
|
||||
```
|
||||
```shell
|
||||
# this command can also warn us about plugins that are
|
||||
# not executable, or that are overshadowed by other
|
||||
# plugins, for example
|
||||
|
|
|
|||
|
|
@ -357,7 +357,7 @@ The modifying verbs (`POST`, `PUT`, `PATCH`, and `DELETE`) can accept requests i
|
|||
|
||||
Dry-run is triggered by setting the `dryRun` query parameter. This parameter is a string, working as an enum, and the only accepted values are:
|
||||
|
||||
* `All`: Every stage runs as normal, except for the final storage stage. Admission controllers are run to check that the request is valid, mutating controllers mutate the request, merge is performed on `PATCH`, fields are defaulted, and schema validation occurs. The changes are not persisted to the underlying storage, but the final object which would have been persisted is still returned to the user, along with the normal status code. If the request would trigger an admission controller which would have side effects, the request will be failed rather than risk an unwanted side effect. All built in admission control plugins support dry-run. Additionally, admission webhooks can declare in their [configuration object](/docs/reference/generated/kubernetes-api/v1.13/#webhook-v1beta1-admissionregistration-k8s-io) that they do not have side effects by setting the sideEffects field to "None". If a webhook actually does have side effects, then the sideEffects field should be set to "NoneOnDryRun", and the webhook should also be modified to understand the `DryRun` field in AdmissionReview, and prevent side effects on dry-run requests.
|
||||
* `All`: Every stage runs as normal, except for the final storage stage. Admission controllers are run to check that the request is valid, mutating controllers mutate the request, merge is performed on `PATCH`, fields are defaulted, and schema validation occurs. The changes are not persisted to the underlying storage, but the final object which would have been persisted is still returned to the user, along with the normal status code. If the request would trigger an admission controller which would have side effects, the request will be failed rather than risk an unwanted side effect. All built in admission control plugins support dry-run. Additionally, admission webhooks can declare in their [configuration object](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#webhook-v1beta1-admissionregistration-k8s-io) that they do not have side effects by setting the sideEffects field to "None". If a webhook actually does have side effects, then the sideEffects field should be set to "NoneOnDryRun", and the webhook should also be modified to understand the `DryRun` field in AdmissionReview, and prevent side effects on dry-run requests.
|
||||
* Leave the value empty, which is also the default: Keep the default modifying behavior.
|
||||
|
||||
For example:
|
||||
|
|
@ -450,7 +450,7 @@ request (if not forced, see [Conflicts](#conflicts)).
|
|||
|
||||
Field management is stored in a newly introduced `managedFields` field that is
|
||||
part of an object's
|
||||
[`metadata`](/docs/reference/generated/kubernetes-api/v1.16/#objectmeta-v1-meta).
|
||||
[`metadata`](/docs/reference/generated/kubernetes-api/{{< latest-version >}}/#objectmeta-v1-meta).
|
||||
|
||||
A simple example of an object created by Server Side Apply could look like this:
|
||||
|
||||
|
|
@ -490,7 +490,7 @@ Nevertheless it is possible to change `metadata.managedFields` through an
|
|||
option to try if, for example, the `managedFields` get into an inconsistent
|
||||
state (which clearly should not happen).
|
||||
|
||||
The format of the `managedFields` is described in the [API](/docs/reference/generated/kubernetes-api/v1.16/#fieldsv1-v1-meta).
|
||||
The format of the `managedFields` is described in the [API](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#fieldsv1-v1-meta).
|
||||
|
||||
### Conflicts
|
||||
|
||||
|
|
@ -706,9 +706,9 @@ Resource versions are strings that identify the server's internal version of an
|
|||
|
||||
Clients find resource versions in resources, including the resources in watch events, and list responses returned from the server:
|
||||
|
||||
[v1.meta/ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.16/#objectmeta-v1-meta) - The `metadata.resourceVersion` of a resource instance identifies the resource version the instance was last modified at.
|
||||
[v1.meta/ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#objectmeta-v1-meta) - The `metadata.resourceVersion` of a resource instance identifies the resource version the instance was last modified at.
|
||||
|
||||
[v1.meta/ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.16/#listmeta-v1-meta) - The `metadata.resourceVersion` of a resource collection (i.e. a list response) identifies the resource version at which the list response was constructed.
|
||||
[v1.meta/ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#listmeta-v1-meta) - The `metadata.resourceVersion` of a resource collection (i.e. a list response) identifies the resource version at which the list response was constructed.
|
||||
|
||||
### The ResourceVersion Parameter
|
||||
|
||||
|
|
|
|||
|
|
@ -49,6 +49,7 @@ their authors, not the Kubernetes team.
|
|||
| Go | [github.com/ericchiang/k8s](https://github.com/ericchiang/k8s) |
|
||||
| Java (OSGi) | [bitbucket.org/amdatulabs/amdatu-kubernetes](https://bitbucket.org/amdatulabs/amdatu-kubernetes) |
|
||||
| Java (Fabric8, OSGi) | [github.com/fabric8io/kubernetes-client](https://github.com/fabric8io/kubernetes-client) |
|
||||
| Java | [github.com/manusa/yakc](https://github.com/manusa/yakc) |
|
||||
| Lisp | [github.com/brendandburns/cl-k8s](https://github.com/brendandburns/cl-k8s) |
|
||||
| Lisp | [github.com/xh4/cube](https://github.com/xh4/cube) |
|
||||
| Node.js (TypeScript) | [github.com/Goyoo/node-k8s-client](https://github.com/Goyoo/node-k8s-client) |
|
||||
|
|
|
|||
|
|
@ -1,101 +0,0 @@
|
|||
---
|
||||
title: Installing Kubernetes with KRIB
|
||||
krib-version: 2.4
|
||||
author: Rob Hirschfeld (zehicle)
|
||||
weight: 20
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
This guide helps to install a Kubernetes cluster hosted on bare metal with [Digital Rebar Provision](https://github.com/digitalrebar/provision) using only its Content packages and *kubeadm*.
|
||||
|
||||
Digital Rebar Provision (DRP) is an integrated Golang DHCP, bare metal provisioning (PXE/iPXE) and workflow automation platform. While [DRP can be used to invoke](https://provision.readthedocs.io/en/tip/doc/integrations/ansible.html) [kubespray](/docs/setup/custom-cloud/kubespray), it also offers a self-contained Kubernetes installation known as [KRIB (Kubernetes Rebar Integrated Bootstrap)](https://github.com/digitalrebar/provision-content/tree/master/krib).
|
||||
|
||||
{{< note >}}
|
||||
KRIB is not a _stand-alone_ installer: Digital Rebar templates drive a standard *[kubeadm](/docs/admin/kubeadm/)* configuration that manages the Kubernetes installation with the [Digital Rebar cluster pattern](https://provision.readthedocs.io/en/tip/doc/arch/cluster.html#rs-cluster-pattern) to elect leaders _without external supervision_.
|
||||
{{< /note >}}
|
||||
|
||||
|
||||
KRIB features:
|
||||
|
||||
* zero-touch, self-configuring cluster without pre-configuration or inventory
|
||||
* very fast, no-ssh required automation
|
||||
* bare metal, on-premises focused platform
|
||||
* highly available cluster options (including splitting etcd from the controllers)
|
||||
* dynamic generation of a TLS infrastructure
|
||||
* composable attributes and automatic detection of hardware by profile
|
||||
* options for persistent, immutable and image-based deployments
|
||||
* support for Ubuntu 18.04, CentOS/RHEL 7, CoreOS, RancherOS and others
|
||||
|
||||
## Creating a cluster
|
||||
|
||||
Review [Digital Rebar documentation](https://provision.readthedocs.io/en/tip/README.html) for details about installing the platform.
|
||||
|
||||
The Digital Rebar Provision Golang binary should be installed on a Linux-like system with 16 GB of RAM or larger (Packet.net Tiny and Rasberry Pi are also acceptable).
|
||||
|
||||
### (1/5) Discover servers
|
||||
|
||||
Following the [Digital Rebar installation](https://provision.readthedocs.io/en/tip/doc/quickstart.html), allow one or more servers to boot through the _Sledgehammer_ discovery process to register with the API. This will automatically install the Digital Rebar runner and to allow for next steps.
|
||||
|
||||
### (2/5) Install KRIB Content and Certificate Plugin
|
||||
|
||||
Upload the KRIB Content bundle (or build from [source](https://github.com/digitalrebar/provision-content/tree/master/krib)) and the Cert Plugin for your DRP platform. Both are freely available via the [RackN UX](https://portal.rackn.io) or using the upload from catalog feature of the DRPCLI (shown below).
|
||||
|
||||
```
|
||||
drpcli plugin_providers upload certs from catalog:certs-stable
|
||||
drpcli contents upload catalog:krib-stable
|
||||
```
|
||||
|
||||
### (3/5) Start your cluster deployment
|
||||
|
||||
{{< note >}}
|
||||
KRIB documentation is dynamically generated from the source and will be more up to date than this guide.
|
||||
{{< /note >}}
|
||||
|
||||
Following the [KRIB documentation](https://provision.readthedocs.io/en/tip/doc/content-packages/krib.html), create a Profile for your cluster and assign your target servers into the cluster Profile. The Profile must set `krib\cluster-name` and `etcd\cluster-name` Params to be the name of the Profile. Cluster configuration choices can be made by adding additional Params to the Profile; however, safe defaults are provided for all Params.
|
||||
|
||||
Once all target servers are assigned to the cluster Profile, start a KRIB installation Workflow by assigning one of the included Workflows to all cluster servers. For example, selecting `krib-live-cluster` will perform an immutable deployment into the Sledgehammer discovery operating system. You may use one of the pre-created read-only Workflows or choose to build your own custom variation.
|
||||
|
||||
For basic installs, no further action is required. Advanced users may choose to assign the controllers, etcd servers or other configuration values in the relevant Params.
|
||||
|
||||
### (4/5) Monitor your cluster deployment
|
||||
|
||||
Digital Rebar Provision provides detailed logging and live updates during the installation process. Workflow events are available via a websocket connection or monitoring the Jobs list.
|
||||
|
||||
During the installation, KRIB writes cluster configuration data back into the cluster Profile.
|
||||
|
||||
### (5/5) Access your cluster
|
||||
|
||||
The cluster is available for access via *kubectl* once the `krib/cluster-admin-conf` Param has been set. This Param contains the `kubeconfig` information necessary to access the cluster.
|
||||
|
||||
For example, if you named the cluster Profile `krib` then the following commands would allow you to connect to the installed cluster from your local terminal.
|
||||
|
||||
::
|
||||
|
||||
drpcli profiles get krib params krib/cluster-admin-conf > admin.conf
|
||||
export KUBECONFIG=admin.conf
|
||||
kubectl get nodes
|
||||
|
||||
|
||||
The installation continues after the `krib/cluster-admin-conf` is set to install the Kubernetes UI and Helm. You may interact with the cluster as soon as the `admin.conf` file is available.
|
||||
|
||||
## Cluster operations
|
||||
|
||||
KRIB provides additional Workflows to manage your cluster. Please see the [KRIB documentation](https://provision.readthedocs.io/en/tip/doc/content-packages/krib.html) for an updated list of advanced cluster operations.
|
||||
|
||||
### Scale your cluster
|
||||
|
||||
You can add servers into your cluster by adding the cluster Profile to the server and running the appropriate Workflow.
|
||||
|
||||
### Cleanup your cluster (for developers)
|
||||
|
||||
You can reset your cluster and wipe out all configuration and TLS certificates using the `krib-reset-cluster` Workflow on any of the servers in the cluster.
|
||||
|
||||
{{< caution >}}
|
||||
When running the reset Workflow, be sure not to accidentally target your production cluster!
|
||||
{{< /caution >}}
|
||||
|
||||
## Feedback
|
||||
|
||||
* Slack Channel: [#community](https://rackn.slack.com/messages/community/)
|
||||
* [GitHub Issues](https://github.com/digitalrebar/provision/issues)
|
||||
|
|
@ -17,6 +17,11 @@ You can set up an HA cluster:
|
|||
|
||||
You should carefully consider the advantages and disadvantages of each topology before setting up an HA cluster.
|
||||
|
||||
{{< note >}}
|
||||
kubeadm bootstraps the etcd cluster statically. Read the etcd [Clustering Guide](https://github.com/etcd-io/etcd/blob/release-3.4/Documentation/op-guide/clustering.md#static)
|
||||
for more details.
|
||||
{{< /note >}}
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
{{% capture body %}}
|
||||
|
|
|
|||
|
|
@ -77,11 +77,12 @@ option. Your cluster requirements may need a different configuration.
|
|||
on the apiserver port. It must also allow incoming traffic on its
|
||||
listening port.
|
||||
|
||||
- [HAProxy](http://www.haproxy.org/) can be used as a load balancer.
|
||||
|
||||
- Make sure the address of the load balancer always matches
|
||||
the address of kubeadm's `ControlPlaneEndpoint`.
|
||||
|
||||
- Read the [Options for Software Load Balancing](https://github.com/kubernetes/kubeadm/blob/master/docs/ha-considerations.md#options-for-software-load-balancing)
|
||||
guide for more details.
|
||||
|
||||
1. Add the first control plane nodes to the load balancer and test the
|
||||
connection:
|
||||
|
||||
|
|
|
|||
|
|
@ -1,340 +0,0 @@
|
|||
---
|
||||
title: Running Kubernetes on CenturyLink Cloud
|
||||
---
|
||||
|
||||
|
||||
These scripts handle the creation, deletion and expansion of Kubernetes clusters on CenturyLink Cloud.
|
||||
|
||||
You can accomplish all these tasks with a single command. We have made the Ansible playbooks used to perform these tasks available [here](https://github.com/CenturyLinkCloud/adm-kubernetes-on-clc/blob/master/ansible/README.md).
|
||||
|
||||
## Find Help
|
||||
|
||||
If you run into any problems or want help with anything, we are here to help. Reach out to use via any of the following ways:
|
||||
|
||||
- Submit a github issue
|
||||
- Send an email to Kubernetes AT ctl DOT io
|
||||
- Visit [http://info.ctl.io/kubernetes](http://info.ctl.io/kubernetes)
|
||||
|
||||
## Clusters of VMs or Physical Servers, your choice.
|
||||
|
||||
- We support Kubernetes clusters on both Virtual Machines or Physical Servers. If you want to use physical servers for the worker nodes (minions), simple use the --minion_type=bareMetal flag.
|
||||
- For more information on physical servers, visit: [https://www.ctl.io/bare-metal/](https://www.ctl.io/bare-metal/)
|
||||
- Physical serves are only available in the VA1 and GB3 data centers.
|
||||
- VMs are available in all 13 of our public cloud locations
|
||||
|
||||
## Requirements
|
||||
|
||||
The requirements to run this script are:
|
||||
|
||||
- A linux administrative host (tested on ubuntu and macOS)
|
||||
- python 2 (tested on 2.7.11)
|
||||
- pip (installed with python as of 2.7.9)
|
||||
- git
|
||||
- A CenturyLink Cloud account with rights to create new hosts
|
||||
- An active VPN connection to the CenturyLink Cloud from your linux host
|
||||
|
||||
## Script Installation
|
||||
|
||||
After you have all the requirements met, please follow these instructions to install this script.
|
||||
|
||||
1) Clone this repository and cd into it.
|
||||
|
||||
```shell
|
||||
git clone https://github.com/CenturyLinkCloud/adm-kubernetes-on-clc
|
||||
```
|
||||
|
||||
2) Install all requirements, including
|
||||
|
||||
* Ansible
|
||||
* CenturyLink Cloud SDK
|
||||
* Ansible Modules
|
||||
|
||||
```shell
|
||||
sudo pip install -r ansible/requirements.txt
|
||||
```
|
||||
|
||||
3) Create the credentials file from the template and use it to set your ENV variables
|
||||
|
||||
```shell
|
||||
cp ansible/credentials.sh.template ansible/credentials.sh
|
||||
vi ansible/credentials.sh
|
||||
source ansible/credentials.sh
|
||||
|
||||
```
|
||||
|
||||
4) Grant your machine access to the CenturyLink Cloud network by using a VM inside the network or [ configuring a VPN connection to the CenturyLink Cloud network.](https://www.ctl.io/knowledge-base/network/how-to-configure-client-vpn/)
|
||||
|
||||
|
||||
#### Script Installation Example: Ubuntu 14 Walkthrough
|
||||
|
||||
If you use an ubuntu 14, for your convenience we have provided a step by step
|
||||
guide to install the requirements and install the script.
|
||||
|
||||
```shell
|
||||
# system
|
||||
apt-get update
|
||||
apt-get install -y git python python-crypto
|
||||
curl -O https://bootstrap.pypa.io/get-pip.py
|
||||
python get-pip.py
|
||||
|
||||
# installing this repository
|
||||
mkdir -p ~home/k8s-on-clc
|
||||
cd ~home/k8s-on-clc
|
||||
git clone https://github.com/CenturyLinkCloud/adm-kubernetes-on-clc.git
|
||||
cd adm-kubernetes-on-clc/
|
||||
pip install -r requirements.txt
|
||||
|
||||
# getting started
|
||||
cd ansible
|
||||
cp credentials.sh.template credentials.sh; vi credentials.sh
|
||||
source credentials.sh
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Cluster Creation
|
||||
|
||||
To create a new Kubernetes cluster, simply run the ```kube-up.sh``` script. A complete
|
||||
list of script options and some examples are listed below.
|
||||
|
||||
```shell
|
||||
CLC_CLUSTER_NAME=[name of kubernetes cluster]
|
||||
cd ./adm-kubernetes-on-clc
|
||||
bash kube-up.sh -c="$CLC_CLUSTER_NAME"
|
||||
```
|
||||
|
||||
It takes about 15 minutes to create the cluster. Once the script completes, it
|
||||
will output some commands that will help you setup kubectl on your machine to
|
||||
point to the new cluster.
|
||||
|
||||
When the cluster creation is complete, the configuration files for it are stored
|
||||
locally on your administrative host, in the following directory
|
||||
|
||||
```shell
|
||||
> CLC_CLUSTER_HOME=$HOME/.clc_kube/$CLC_CLUSTER_NAME/
|
||||
```
|
||||
|
||||
|
||||
#### Cluster Creation: Script Options
|
||||
|
||||
```shell
|
||||
Usage: kube-up.sh [OPTIONS]
|
||||
Create servers in the CenturyLinkCloud environment and initialize a Kubernetes cluster
|
||||
Environment variables CLC_V2_API_USERNAME and CLC_V2_API_PASSWD must be set in
|
||||
order to access the CenturyLinkCloud API
|
||||
|
||||
All options (both short and long form) require arguments, and must include "="
|
||||
between option name and option value.
|
||||
|
||||
-h (--help) display this help and exit
|
||||
-c= (--clc_cluster_name=) set the name of the cluster, as used in CLC group names
|
||||
-t= (--minion_type=) standard -> VM (default), bareMetal -> physical]
|
||||
-d= (--datacenter=) VA1 (default)
|
||||
-m= (--minion_count=) number of kubernetes minion nodes
|
||||
-mem= (--vm_memory=) number of GB ram for each minion
|
||||
-cpu= (--vm_cpu=) number of virtual cps for each minion node
|
||||
-phyid= (--server_conf_id=) physical server configuration id, one of
|
||||
physical_server_20_core_conf_id
|
||||
physical_server_12_core_conf_id
|
||||
physical_server_4_core_conf_id (default)
|
||||
-etcd_separate_cluster=yes create a separate cluster of three etcd nodes,
|
||||
otherwise run etcd on the master node
|
||||
```
|
||||
|
||||
## Cluster Expansion
|
||||
|
||||
To expand an existing Kubernetes cluster, run the ```add-kube-node.sh```
|
||||
script. A complete list of script options and some examples are listed [below](#cluster-expansion-script-options).
|
||||
This script must be run from the same host that created the cluster (or a host
|
||||
that has the cluster artifact files stored in ```~/.clc_kube/$cluster_name```).
|
||||
|
||||
```shell
|
||||
cd ./adm-kubernetes-on-clc
|
||||
bash add-kube-node.sh -c="name_of_kubernetes_cluster" -m=2
|
||||
```
|
||||
|
||||
#### Cluster Expansion: Script Options
|
||||
|
||||
```shell
|
||||
Usage: add-kube-node.sh [OPTIONS]
|
||||
Create servers in the CenturyLinkCloud environment and add to an
|
||||
existing CLC kubernetes cluster
|
||||
|
||||
Environment variables CLC_V2_API_USERNAME and CLC_V2_API_PASSWD must be set in
|
||||
order to access the CenturyLinkCloud API
|
||||
|
||||
-h (--help) display this help and exit
|
||||
-c= (--clc_cluster_name=) set the name of the cluster, as used in CLC group names
|
||||
-m= (--minion_count=) number of kubernetes minion nodes to add
|
||||
```
|
||||
|
||||
## Cluster Deletion
|
||||
|
||||
There are two ways to delete an existing cluster:
|
||||
|
||||
1) Use our python script:
|
||||
|
||||
```shell
|
||||
python delete_cluster.py --cluster=clc_cluster_name --datacenter=DC1
|
||||
```
|
||||
|
||||
2) Use the CenturyLink Cloud UI. To delete a cluster, log into the CenturyLink
|
||||
Cloud control portal and delete the parent server group that contains the
|
||||
Kubernetes Cluster. We hope to add a scripted option to do this soon.
|
||||
|
||||
## Examples
|
||||
|
||||
Create a cluster with name of k8s_1, 1 master node and 3 worker minions (on physical machines), in VA1
|
||||
|
||||
```shell
|
||||
bash kube-up.sh --clc_cluster_name=k8s_1 --minion_type=bareMetal --minion_count=3 --datacenter=VA1
|
||||
```
|
||||
|
||||
Create a cluster with name of k8s_2, an ha etcd cluster on 3 VMs and 6 worker minions (on VMs), in VA1
|
||||
|
||||
```shell
|
||||
bash kube-up.sh --clc_cluster_name=k8s_2 --minion_type=standard --minion_count=6 --datacenter=VA1 --etcd_separate_cluster=yes
|
||||
```
|
||||
|
||||
Create a cluster with name of k8s_3, 1 master node, and 10 worker minions (on VMs) with higher mem/cpu, in UC1:
|
||||
|
||||
```shell
|
||||
bash kube-up.sh --clc_cluster_name=k8s_3 --minion_type=standard --minion_count=10 --datacenter=VA1 -mem=6 -cpu=4
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Cluster Features and Architecture
|
||||
|
||||
We configure the Kubernetes cluster with the following features:
|
||||
|
||||
* KubeDNS: DNS resolution and service discovery
|
||||
* Heapster/InfluxDB: For metric collection. Needed for Grafana and auto-scaling.
|
||||
* Grafana: Kubernetes/Docker metric dashboard
|
||||
* KubeUI: Simple web interface to view Kubernetes state
|
||||
* Kube Dashboard: New web interface to interact with your cluster
|
||||
|
||||
We use the following to create the Kubernetes cluster:
|
||||
|
||||
* Kubernetes 1.1.7
|
||||
* Ubuntu 14.04
|
||||
* Flannel 0.5.4
|
||||
* Docker 1.9.1-0~trusty
|
||||
* Etcd 2.2.2
|
||||
|
||||
## Optional add-ons
|
||||
|
||||
* Logging: We offer an integrated centralized logging ELK platform so that all
|
||||
Kubernetes and docker logs get sent to the ELK stack. To install the ELK stack
|
||||
and configure Kubernetes to send logs to it, follow [the log
|
||||
aggregation documentation](https://github.com/CenturyLinkCloud/adm-kubernetes-on-clc/blob/master/log_aggregration.md). Note: We don't install this by default as
|
||||
the footprint isn't trivial.
|
||||
|
||||
## Cluster management
|
||||
|
||||
The most widely used tool for managing a Kubernetes cluster is the command-line
|
||||
utility ```kubectl```. If you do not already have a copy of this binary on your
|
||||
administrative machine, you may run the script ```install_kubectl.sh``` which will
|
||||
download it and install it in ```/usr/bin/local```.
|
||||
|
||||
The script requires that the environment variable ```CLC_CLUSTER_NAME``` be defined. ```install_kubectl.sh``` also writes a configuration file which will embed the necessary
|
||||
authentication certificates for the particular cluster. The configuration file is
|
||||
written to the ```${CLC_CLUSTER_HOME}/kube``` directory
|
||||
|
||||
|
||||
```shell
|
||||
export KUBECONFIG=${CLC_CLUSTER_HOME}/kube/config
|
||||
kubectl version
|
||||
kubectl cluster-info
|
||||
```
|
||||
|
||||
### Accessing the cluster programmatically
|
||||
|
||||
It's possible to use the locally stored client certificates to access the apiserver. For example, you may want to use any of the [Kubernetes API client libraries](/docs/reference/using-api/client-libraries/) to program against your Kubernetes cluster in the programming language of your choice.
|
||||
|
||||
To demonstrate how to use these locally stored certificates, we provide the following example of using ```curl``` to communicate to the master apiserver via https:
|
||||
|
||||
```shell
|
||||
curl \
|
||||
--cacert ${CLC_CLUSTER_HOME}/pki/ca.crt \
|
||||
--key ${CLC_CLUSTER_HOME}/pki/kubecfg.key \
|
||||
--cert ${CLC_CLUSTER_HOME}/pki/kubecfg.crt https://${MASTER_IP}:6443
|
||||
```
|
||||
|
||||
But please note, this *does not* work out of the box with the ```curl``` binary
|
||||
distributed with macOS.
|
||||
|
||||
### Accessing the cluster with a browser
|
||||
|
||||
We install [the kubernetes dashboard](/docs/tasks/web-ui-dashboard/). When you
|
||||
create a cluster, the script should output URLs for these interfaces like this:
|
||||
|
||||
kubernetes-dashboard is running at ```https://${MASTER_IP}:6443/api/v1/namespaces/kube-system/services/kubernetes-dashboard/proxy```.
|
||||
|
||||
Note on Authentication to the UIs:
|
||||
|
||||
The cluster is set up to use basic authentication for the user _admin_.
|
||||
Hitting the url at ```https://${MASTER_IP}:6443``` will
|
||||
require accepting the self-signed certificate
|
||||
from the apiserver, and then presenting the admin
|
||||
password written to file at: ```> _${CLC_CLUSTER_HOME}/kube/admin_password.txt_```
|
||||
|
||||
|
||||
### Configuration files
|
||||
|
||||
Various configuration files are written into the home directory *CLC_CLUSTER_HOME* under ```.clc_kube/${CLC_CLUSTER_NAME}``` in several subdirectories. You can use these files
|
||||
to access the cluster from machines other than where you created the cluster from.
|
||||
|
||||
* ```config/```: Ansible variable files containing parameters describing the master and minion hosts
|
||||
* ```hosts/```: hosts files listing access information for the Ansible playbooks
|
||||
* ```kube/```: ```kubectl``` configuration files, and the basic-authentication password for admin access to the Kubernetes API
|
||||
* ```pki/```: public key infrastructure files enabling TLS communication in the cluster
|
||||
* ```ssh/```: SSH keys for root access to the hosts
|
||||
|
||||
|
||||
## ```kubectl``` usage examples
|
||||
|
||||
There are a great many features of _kubectl_. Here are a few examples
|
||||
|
||||
List existing nodes, pods, services and more, in all namespaces, or in just one:
|
||||
|
||||
```shell
|
||||
kubectl get nodes
|
||||
kubectl get --all-namespaces pods
|
||||
kubectl get --all-namespaces services
|
||||
kubectl get --namespace=kube-system replicationcontrollers
|
||||
```
|
||||
|
||||
The Kubernetes API server exposes services on web URLs, which are protected by requiring
|
||||
client certificates. If you run a kubectl proxy locally, ```kubectl``` will provide
|
||||
the necessary certificates and serve locally over http.
|
||||
|
||||
```shell
|
||||
kubectl proxy -p 8001
|
||||
```
|
||||
|
||||
Then, you can access urls like ```http://127.0.0.1:8001/api/v1/namespaces/kube-system/services/kubernetes-dashboard/proxy/``` without the need for client certificates in your browser.
|
||||
|
||||
|
||||
## What Kubernetes features do not work on CenturyLink Cloud
|
||||
|
||||
These are the known items that don't work on CenturyLink cloud but do work on other cloud providers:
|
||||
|
||||
- At this time, there is no support services of the type [LoadBalancer](/docs/tasks/access-application-cluster/create-external-load-balancer/). We are actively working on this and hope to publish the changes sometime around April 2016.
|
||||
|
||||
- At this time, there is no support for persistent storage volumes provided by
|
||||
CenturyLink Cloud. However, customers can bring their own persistent storage
|
||||
offering. We ourselves use Gluster.
|
||||
|
||||
|
||||
## Ansible Files
|
||||
|
||||
If you want more information about our Ansible files, please [read this file](https://github.com/CenturyLinkCloud/adm-kubernetes-on-clc/blob/master/ansible/README.md)
|
||||
|
||||
## Further reading
|
||||
|
||||
Please see the [Kubernetes docs](/docs/) for more details on administering
|
||||
and using a Kubernetes cluster.
|
||||
|
||||
|
||||
|
||||
|
|
@ -66,4 +66,4 @@ You can install IBM Cloud Private on VMware with either Ubuntu or RHEL images. F
|
|||
|
||||
The IBM Cloud Private Hosted service automatically deploys IBM Cloud Private Hosted on your VMware vCenter Server instances. This service brings the power of microservices and containers to your VMware environment on IBM Cloud. With this service, you can extend the same familiar VMware and IBM Cloud Private operational model and tools from on-premises into the IBM Cloud.
|
||||
|
||||
For more information, see [IBM Cloud Private Hosted service](https://cloud.ibm.com/docs/services/vmwaresolutions/vmonic?topic=vmware-solutions-prod_overview#ibm-cloud-private-hosted).
|
||||
For more information, see [IBM Cloud Private Hosted service](https://cloud.ibm.com/docs/vmwaresolutions?topic=vmwaresolutions-icp_overview).
|
||||
|
|
|
|||
|
|
@ -269,8 +269,8 @@ public class KubeConfigFileClientExample {
|
|||
CoreV1Api api = new CoreV1Api();
|
||||
|
||||
// invokes the CoreV1Api client
|
||||
V1PodList list = api.listPodForAllNamespaces(null, null, null, null, null, null, null, null);
|
||||
System.out.Println("Listing all pods: ");
|
||||
V1PodList list = api.listPodForAllNamespaces(null, null, null, null, null, null, null, null, null);
|
||||
System.out.println("Listing all pods: ");
|
||||
for (V1Pod item : list.getItems()) {
|
||||
System.out.println(item.getMetadata().getName());
|
||||
}
|
||||
|
|
|
|||
|
|
@ -54,8 +54,7 @@ inheriting DNS. Set it to a valid file path to specify a file other than
|
|||
|
||||
## CoreDNS
|
||||
|
||||
CoreDNS is a general-purpose authoritative DNS server that can serve as cluster DNS, complying with the [dns specifications]
|
||||
(https://github.com/kubernetes/dns/blob/master/docs/specification.md).
|
||||
CoreDNS is a general-purpose authoritative DNS server that can serve as cluster DNS, complying with the [dns specifications](https://github.com/kubernetes/dns/blob/master/docs/specification.md).
|
||||
|
||||
### CoreDNS ConfigMap options
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ reviewers:
|
|||
title: Using NodeLocal DNSCache in Kubernetes clusters
|
||||
content_template: templates/task
|
||||
---
|
||||
|
||||
|
||||
{{% capture overview %}}
|
||||
{{< feature-state for_k8s_version="v1.18" state="stable" >}}
|
||||
This page provides an overview of NodeLocal DNSCache feature in Kubernetes.
|
||||
|
|
@ -27,7 +27,7 @@ NodeLocal DNSCache improves Cluster DNS performance by running a dns caching age
|
|||
|
||||
## Motivation
|
||||
|
||||
* With the current DNS architecture, it is possible that Pods with the highest DNS QPS have to reach out to a different node, if there is no local kube-dns/CoreDNS instance.
|
||||
* With the current DNS architecture, it is possible that Pods with the highest DNS QPS have to reach out to a different node, if there is no local kube-dns/CoreDNS instance.
|
||||
Having a local cache will help improve the latency in such scenarios.
|
||||
|
||||
* Skipping iptables DNAT and connection tracking will help reduce [conntrack races](https://github.com/kubernetes/kubernetes/issues/56903) and avoid UDP DNS entries filling up conntrack table.
|
||||
|
|
@ -45,7 +45,7 @@ Having a local cache will help improve the latency in such scenarios.
|
|||
This is the path followed by DNS Queries after NodeLocal DNSCache is enabled:
|
||||
|
||||
|
||||
{{< figure src="/images/docs/nodelocaldns.jpg" alt="NodeLocal DNSCache flow" title="Nodelocal DNSCache flow" caption="This image shows how NodeLocal DNSCache handles DNS queries." >}}
|
||||
{{< figure src="/images/docs/nodelocaldns.svg" alt="NodeLocal DNSCache flow" title="Nodelocal DNSCache flow" caption="This image shows how NodeLocal DNSCache handles DNS queries." >}}
|
||||
|
||||
## Configuration
|
||||
{{< note >}} The local listen IP address for NodeLocal DNSCache can be any IP in the 169.254.20.0/16 space or any other IP address that can be guaranteed to not collide with any existing IP. This document uses 169.254.20.10 as an example.
|
||||
|
|
@ -54,33 +54,33 @@ This is the path followed by DNS Queries after NodeLocal DNSCache is enabled:
|
|||
This feature can be enabled using the following steps:
|
||||
|
||||
* Prepare a manifest similar to the sample [`nodelocaldns.yaml`](https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml) and save it as `nodelocaldns.yaml.`
|
||||
* Substitute the variables in the manifest with the right values:
|
||||
* Substitute the variables in the manifest with the right values:
|
||||
|
||||
* kubedns=`kubectl get svc kube-dns -n kube-system -o jsonpath={.spec.clusterIP}`
|
||||
|
||||
* domain=`<cluster-domain>`
|
||||
|
||||
* kubedns=`kubectl get svc kube-dns -n kube-system -o jsonpath={.spec.clusterIP}`
|
||||
|
||||
* domain=`<cluster-domain>`
|
||||
|
||||
* localdns=`<node-local-address>`
|
||||
|
||||
|
||||
`<cluster-domain>` is "cluster.local" by default. `<node-local-address>` is the local listen IP address chosen for NodeLocal DNSCache.
|
||||
|
||||
* If kube-proxy is running in IPTABLES mode:
|
||||
|
||||
* If kube-proxy is running in IPTABLES mode:
|
||||
|
||||
``` bash
|
||||
sed -i "s/__PILLAR__LOCAL__DNS__/$localdns/g; s/__PILLAR__DNS__DOMAIN__/$domain/g; s/__PILLAR__DNS__SERVER__/$kubedns/g" nodelocaldns.yaml
|
||||
```
|
||||
|
||||
`__PILLAR__CLUSTER__DNS__` and `__PILLAR__UPSTREAM__SERVERS__` will be populated by the node-local-dns pods.
|
||||
|
||||
`__PILLAR__CLUSTER__DNS__` and `__PILLAR__UPSTREAM__SERVERS__` will be populated by the node-local-dns pods.
|
||||
In this mode, node-local-dns pods listen on both the kube-dns service IP as well as `<node-local-address>`, so pods can lookup DNS records using either IP address.
|
||||
|
||||
* If kube-proxy is running in IPVS mode:
|
||||
|
||||
* If kube-proxy is running in IPVS mode:
|
||||
|
||||
``` bash
|
||||
sed -i "s/__PILLAR__LOCAL__DNS__/$localdns/g; s/__PILLAR__DNS__DOMAIN__/$domain/g; s/__PILLAR__DNS__SERVER__//g; s/__PILLAR__CLUSTER__DNS__/$kubedns/g" nodelocaldns.yaml
|
||||
```
|
||||
In this mode, node-local-dns pods listen only on `<node-local-address>`. The node-local-dns interface cannot bind the kube-dns cluster IP since the interface used for IPVS loadbalancing already uses this address.
|
||||
In this mode, node-local-dns pods listen only on `<node-local-address>`. The node-local-dns interface cannot bind the kube-dns cluster IP since the interface used for IPVS loadbalancing already uses this address.
|
||||
`__PILLAR__UPSTREAM__SERVERS__` will be populated by the node-local-dns pods.
|
||||
|
||||
|
||||
* Run `kubectl create -f nodelocaldns.yaml`
|
||||
* If using kube-proxy in IPVS mode, `--cluster-dns` flag to kubelet needs to be modified to use `<node-local-address>` that NodeLocal DNSCache is listening on.
|
||||
Otherwise, there is no need to modify the value of the `--cluster-dns` flag, since NodeLocal DNSCache listens on both the kube-dns service IP as well as `<node-local-address>`.
|
||||
|
|
|
|||
|
|
@ -119,6 +119,7 @@ request to `/apis/batch/v1/namespaces/some-namespace/jobs/some-job-name`.
|
|||
}
|
||||
]
|
||||
```
|
||||
|
||||
{{< /note >}}
|
||||
|
||||
### Log backend
|
||||
|
|
@ -234,7 +235,7 @@ spec:
|
|||
url: "https://audit.app"
|
||||
```
|
||||
|
||||
For the complete API definition, see [AuditSink](/docs/reference/generated/kubernetes-api/v1.13/#auditsink-v1alpha1-auditregistration). Multiple objects will exist as independent solutions.
|
||||
For the complete API definition, see [AuditSink](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#auditsink-v1alpha1-auditregistration). Multiple objects will exist as independent solutions.
|
||||
The name of an AuditSink object must be a valid
|
||||
[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names).
|
||||
|
||||
|
|
@ -339,12 +340,12 @@ audit policies.
|
|||
[Fluentd][fluentd] is an open source data collector for unified logging layer.
|
||||
In this example, we will use fluentd to split audit events by different namespaces.
|
||||
|
||||
1. install [fluentd][fluentd_install_doc], fluent-plugin-forest and fluent-plugin-rewrite-tag-filter in the kube-apiserver node
|
||||
{{< note >}}
|
||||
Fluent-plugin-forest and fluent-plugin-rewrite-tag-filter are plugins for fluentd. You can get details about plugin installation from [fluentd plugin-management][fluentd_plugin_management_doc].
|
||||
{{< note >}}Fluent-plugin-forest and fluent-plugin-rewrite-tag-filter are plugins for fluentd. You can get details about plugin installation from [fluentd plugin-management][fluentd_plugin_management_doc].
|
||||
{{< /note >}}
|
||||
|
||||
1. create a config file for fluentd
|
||||
1. Install [fluentd][fluentd_install_doc], fluent-plugin-forest and fluent-plugin-rewrite-tag-filter in the kube-apiserver node
|
||||
|
||||
1. Create a config file for fluentd
|
||||
|
||||
```
|
||||
cat <<'EOF' > /etc/fluentd/config
|
||||
|
|
@ -399,19 +400,19 @@ Fluent-plugin-forest and fluent-plugin-rewrite-tag-filter are plugins for fluent
|
|||
EOF
|
||||
```
|
||||
|
||||
1. start fluentd
|
||||
1. Start fluentd
|
||||
|
||||
```shell
|
||||
fluentd -c /etc/fluentd/config -vv
|
||||
```
|
||||
|
||||
1. start kube-apiserver with the following options:
|
||||
1. Start kube-apiserver with the following options:
|
||||
|
||||
```shell
|
||||
--audit-policy-file=/etc/kubernetes/audit-policy.yaml --audit-log-path=/var/log/kube-audit --audit-log-format=json
|
||||
```
|
||||
|
||||
1. check audits for different namespaces in `/var/log/audit-*.log`
|
||||
1. Check audits for different namespaces in `/var/log/audit-*.log`
|
||||
|
||||
### Use logstash to collect and distribute audit events from webhook backend
|
||||
|
||||
|
|
@ -490,8 +491,7 @@ Note that in addition to file output plugin, logstash has a variety of outputs t
|
|||
let users route data where they want. For example, users can emit audit events to elasticsearch
|
||||
plugin which supports full-text search and analytics.
|
||||
|
||||
|
||||
[kube-apiserver]: /docs/admin/kube-apiserver
|
||||
[kube-apiserver]: /docs/reference/command-line-tools-reference/kube-apiserver/
|
||||
[auditing-proposal]: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/auditing.md
|
||||
[auditing-api]: https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/types.go
|
||||
[configure-helper]: https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh
|
||||
|
|
|
|||
|
|
@ -61,8 +61,7 @@ kubectl scale deployment hostnames --replicas=3
|
|||
deployment.apps/hostnames scaled
|
||||
```
|
||||
|
||||
{{< note >}}
|
||||
This is the same as if you had started the Deployment with the following
|
||||
Note that this is the same as if you had started the Deployment with the following
|
||||
YAML:
|
||||
|
||||
```yaml
|
||||
|
|
@ -89,7 +88,6 @@ spec:
|
|||
|
||||
The label "app" is automatically set by `kubectl create deployment` to the name of the
|
||||
Deployment.
|
||||
{{< /note >}}
|
||||
|
||||
You can confirm your Pods are running:
|
||||
|
||||
|
|
@ -195,7 +193,6 @@ hostnames ClusterIP 10.0.1.175 <none> 80/TCP 5s
|
|||
|
||||
Now you know that the Service exists.
|
||||
|
||||
{{< note >}}
|
||||
As before, this is the same as if you had started the Service with YAML:
|
||||
|
||||
```yaml
|
||||
|
|
@ -218,7 +215,6 @@ spec:
|
|||
In order to highlight the full range of configuration, the Service you created
|
||||
here uses a different port number than the Pods. For many real-world
|
||||
Services, these values might be the same.
|
||||
{{< /note >}}
|
||||
|
||||
## Does the Service work by DNS name?
|
||||
|
||||
|
|
|
|||
|
|
@ -82,6 +82,11 @@ value of `/dev/termination-log`. By customizing this field, you can tell Kuberne
|
|||
to use a different file. Kubernetes use the contents from the specified file to
|
||||
populate the Container's status message on both success and failure.
|
||||
|
||||
The termination message is intended to be brief final status, such as an assertion failure message.
|
||||
The kubelet truncates messages that are longer than 4096 bytes. The total message length across all
|
||||
containers will be limited to 12KiB. The default termination message path is `/dev/termination-log`.
|
||||
You cannot set the termination message path after a Pod is launched
|
||||
|
||||
In the following example, the container writes termination messages to
|
||||
`/tmp/my-log` for Kubernetes to retrieve:
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ in a Kubernetes Pod.
|
|||
|
||||
{{% capture prerequisites %}}
|
||||
|
||||
{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}}
|
||||
{{< include "task-tutorial-prereqs.md" >}}
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
|
|
@ -29,12 +29,12 @@ that run in the Pod. To set environment variables, include the `env` or
|
|||
|
||||
In this exercise, you create a Pod that runs one container. The configuration
|
||||
file for the Pod defines an environment variable with name `DEMO_GREETING` and
|
||||
value `"Hello from the environment"`. Here is the configuration file for the
|
||||
value `"Hello from the environment"`. Here is the configuration manifest for the
|
||||
Pod:
|
||||
|
||||
{{< codenew file="pods/inject/envars.yaml" >}}
|
||||
|
||||
1. Create a Pod based on the YAML configuration file:
|
||||
1. Create a Pod based on that manifest:
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://k8s.io/examples/pods/inject/envars.yaml
|
||||
|
|
@ -46,7 +46,7 @@ Pod:
|
|||
kubectl get pods -l purpose=demonstrate-envars
|
||||
```
|
||||
|
||||
The output is similar to this:
|
||||
The output is similar to:
|
||||
|
||||
```
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
|
|
@ -62,7 +62,8 @@ Pod:
|
|||
1. In your shell, run the `printenv` command to list the environment variables.
|
||||
|
||||
```shell
|
||||
root@envar-demo:/# printenv
|
||||
# Run this in the shell inside the container
|
||||
printenv
|
||||
```
|
||||
|
||||
The output is similar to this:
|
||||
|
|
@ -80,12 +81,19 @@ Pod:
|
|||
|
||||
{{< note >}}
|
||||
The environment variables set using the `env` or `envFrom` field
|
||||
will override any environment variables specified in the container image.
|
||||
override any environment variables specified in the container image.
|
||||
{{< /note >}}
|
||||
|
||||
## Using environment variables inside of your config
|
||||
|
||||
Environment variables that you define in a Pod's configuration can be used elsewhere in the configuration, for example in commands and arguments that you set for the Pod's containers. In the example configuration below, the `GREETING`, `HONORIFIC`, and `NAME` environment variables are set to `Warm greetings to`, `The Most Honorable`, and `Kubernetes`, respectively. Those environment variables are then used in the CLI arguments passed to the `env-print-demo` container.
|
||||
Environment variables that you define in a Pod's configuration can be used
|
||||
elsewhere in the configuration, for example in commands and arguments that
|
||||
you set for the Pod's containers.
|
||||
In the example configuration below, the `GREETING`, `HONORIFIC`, and
|
||||
`NAME` environment variables are set to `Warm greetings to`, `The Most
|
||||
Honorable`, and `Kubernetes`, respectively. Those environment variables
|
||||
are then used in the CLI arguments passed to the `env-print-demo`
|
||||
container.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
title: Update API Objects in Place Using kubectl patch
|
||||
description: Use kubectl patch to update Kubernetes API objects in place. Do a strategic merge patch or a JSON merge patch.
|
||||
content_template: templates/task
|
||||
weight: 40
|
||||
weight: 50
|
||||
---
|
||||
|
||||
{{% capture overview %}}
|
||||
|
|
@ -380,7 +380,7 @@ Dans la CLI, les modes d'accès sont abrégés comme suit:
|
|||
Par exemple, un GCEPersistentDisk peut être monté en tant que ReadWriteOnce par un seul nœud ou ReadOnlyMany par plusieurs nœuds, mais pas en même temps.
|
||||
|
||||
| Volume Plugin | ReadWriteOnce | ReadOnlyMany | ReadWriteMany |
|
||||
|-:--------------------|-:-:--------------|-:-:--------------|-:-:----------------------------------------------|
|
||||
| :-: | :-: | :-: | :-: |
|
||||
| AWSElasticBlockStore | ✓ | - | - |
|
||||
| AzureFile | ✓ | ✓ | ✓ |
|
||||
| AzureDisk | ✓ | - | - |
|
||||
|
|
|
|||
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
title: "Praktek-praktek Terbaik"
|
||||
weight: 60
|
||||
---
|
||||
|
||||
|
|
@ -0,0 +1,401 @@
|
|||
---
|
||||
title: Menjalankan klaster dalam beberapa zona
|
||||
weight: 10
|
||||
content_template: templates/concept
|
||||
---
|
||||
|
||||
{{% capture overview %}}
|
||||
|
||||
Laman ini menjelaskan tentang bagaimana menjalankan sebuah klaster dalam beberapa zona.
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
{{% capture body %}}
|
||||
|
||||
## Pendahuluan
|
||||
|
||||
Kubernetes 1.2 menambahkan dukungan untuk menjalankan sebuah klaster dalam beberapa zona kegagalan (_multiple failure zones_)
|
||||
(GCE secara sederhana menyebutnya sebagai _"zones"_, AWS menyebutnya sebagai _"availability zones"_, dan di sini kita akan menyebutnya sebagai "zona").
|
||||
Fitur ini adalah versi sederhana dari fitur federasi klaster yang lebih luas (yang sebelumnya ditujukan pada
|
||||
sebuah nama panggilan yang ramah (_affectionate nickname_) ["Ubernetes"](https://github.com/kubernetes/community/blob/{{< param "githubbranch" >}}/contributors/design-proposals/multicluster/federation.md)).
|
||||
Federasi klaster yang penuh memungkinkan untuk menggabungkan
|
||||
klaster Kubernetes terpisah, yang berjalan pada wilayah atau penyedia cloud yang berbeda
|
||||
(baik dalam _datacenter_ atau _on-premise_). Namun banyak
|
||||
pengguna yang ingin menjalankan klaster Kubernetes dengan tingkat ketersediaan yang lebih, dalam beberapa zona
|
||||
dari satu penyedia cloud mereka, dan dukungan inilah yang akhirnya memperbolehkan fitur multi-zona dalam versi Kubernetes 1.2
|
||||
(sebelumnya fitur ini dikenal dengan nama panggilan "Ubernetes Lite").
|
||||
|
||||
Dukungan multi-zona sengaja dibuat terbatas: dimana satu klaster Kubernetes hanya dapat berjalan
|
||||
dalam beberapa zona, tetapi hanya pada wilayah yang sama (dan penyedia cloud yang sama pula). Hanya
|
||||
GCE dan AWS yang saat ini mendukung fitur ini secara otomatis (meskipun cukup mudah
|
||||
untuk menambahkan dukungan serupa untuk penyedia cloud yang lain atau bahkan untuk perangkat _baremetal_, hanya dengan mengatur
|
||||
label yang sesuai untuk ditambahkan ke Node dan volume).
|
||||
|
||||
|
||||
## Fungsionalitas
|
||||
|
||||
Ketika Node mulai dijalankan, kubelet secara otomatis menambahkan label
|
||||
informasi pada Node tersebut.
|
||||
|
||||
Kubernetes akan menyebarkan Pod secara otomatis dalam sebuah _controller_ replikasi
|
||||
atau Service lintas Node dalam sebuah klaster zona tunggal (untuk mengurangi dampak
|
||||
kegagalan). Dengan klaster multi-zona, perilaku penyebaran ini akan
|
||||
dilanjutkan hingga melintasi zona (untuk mengurangi dampak kegagalan dalam satu zona.) (Ini
|
||||
dicapai melalui opsi `SelectorSpreadPriority`). Hal tersebut adalah untuk upaya penempatan terbaik,
|
||||
apabila zona pada klaster kamu bersifat heterogen
|
||||
(mis. jumlah Node yang berbeda, tipe Node yang berbeda, atau
|
||||
persyaratan sumber daya Pod yag berbeda), yang akan mencegah dengan sempurna
|
||||
penyebaran Pod kamu untuk melintasi zona yang berbeda. Jika diinginkan, kamu bisa menggunakan
|
||||
zona yang homogen (jumlah dan jenis Node yang sama) untuk mengurangi
|
||||
probabilitas penyebaran yang tidak merata.
|
||||
|
||||
Pada saat volume persisten dibuat, _controller_ penerima `PersistentVolumeLabel`
|
||||
akan secara otomatis menambahkan label zona pada volume tersebut. Penjadwal (melalui
|
||||
predikat `VolumeZonePredicate`) kemudian akan memastikan bahwa Pod yang mengklaim
|
||||
suatu volume hanya akan ditempatkan pada zona yang sama dengan volume tersebut, karena volume
|
||||
tidak dapat di-_attach_ melintasi zona yang berbeda.
|
||||
|
||||
## Batasan
|
||||
|
||||
Ada beberapa batasan penting dari dukungan multi-zona:
|
||||
|
||||
* Kami berasumsi bahwa zona yang berbeda terletak secara berdekatan satu sama lain dalam
|
||||
jaringan, jadi kami tidak melakukan _routing_ yang sadar akan zona. Secara khusus, lalu lintas (_traffic_)
|
||||
yang berjalan melalui Service mungkin melintasi beberapa zona (bahkan ketika beberapa Pod yang mendukung Service itu
|
||||
berada pada zona yang sama dengan klien), dan hal ini dapat menimbulkan latensi dan biaya tambahan.
|
||||
|
||||
* Volume _zone-afinity_ hanya akan bekerja dengan PersistentVolume, dan tidak akan
|
||||
berfungsi apabila kamu secara langsung menentukan volume EBS dalam spesifikasi Pod (misalnya).
|
||||
|
||||
* Klaster tidak dapat melebarkan jangkauan cloud atau _region_ (fungsi ini akan membutuhkan
|
||||
dukungan penuh federasi).
|
||||
|
||||
* Meskipun Node kamu berada dalam beberapa zona, saat ini kube-up hanya membuat
|
||||
satu Node master secara bawaan (_default_). Karena Service memerlukan
|
||||
ketersediaan (_availability_) yang tinggi dan dapat mentolerir akan hilangnya sebuah zona, maka _control plane_
|
||||
diletakkan pada setiap zona. Pengguna yang menginginkan _control plane_ yang memiliki ketersediaan
|
||||
tinggi harus mengikuti instruksi [ketersediaan tinggi](/docs/admin/high-availability).
|
||||
|
||||
### Batasan Volume
|
||||
|
||||
Batasan berikut ditunjukkan dengan menggunakan [pengikatan volume yang sadar topologi](/id/docs/concepts/storage/storage-classes/#mode-volume-_binding_).
|
||||
|
||||
* Penyebaran zona volume StatefulSet yang menggunakan penyediaan secara dinamis, saat ini tidak sesuai dengan
|
||||
kebijakan afinitas atau anti-afinitas Pod.
|
||||
|
||||
* Jika nama StatefulSet berisi tanda hubung ("-"), maka penyebaran zona volume
|
||||
mungkin saja tidak menyediakan distribusi penyimpanan (_storage_) yang seragam di seluruh zona yang berbeda.
|
||||
|
||||
* Ketika menentukan beberapa PVC dalam spesifikasi Deployment atau Pod, StorageClass
|
||||
perlu dikonfigurasi untuk zona tunggal tertentu, atau PV perlu
|
||||
disediakan secara statis pada zona tertentu. Solusi lainnya adalah menggunakan sebuah
|
||||
StatefulSet, yang akan memastikan bahwa semua volume untuk sebuah replika
|
||||
disediakan dalam zona yang sama.
|
||||
|
||||
## Panduan
|
||||
|
||||
Kita sekarang akan berjalan melalui pengaturan dan menggunakan multi-zona
|
||||
klaster pada GCE & AWS. Untuk melakukannya, kamu perlu mengaktifkan klaster penuh
|
||||
(dengan menentukan `MULTIZONE=true`), dan kemudian kamu menambahkan Node di zona tambahan
|
||||
dengan menjalankan `kube-up` lagi (dengan menetapkan opsi `KUBE_USE_EXISTING_MASTER=true`).
|
||||
|
||||
### Mengaktifkan klaster kamu
|
||||
|
||||
Buatlah klaster seperti biasa, tetapi teruskan opsi MULTIZONE untuk memberi tahu klaster untuk mengelola beberapa zona;
|
||||
dan membuat Node di zona us-central1-a.
|
||||
|
||||
GCE:
|
||||
|
||||
```shell
|
||||
curl -sS https://get.k8s.io | MULTIZONE=true KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-a NUM_NODES=3 bash
|
||||
```
|
||||
|
||||
AWS:
|
||||
|
||||
```shell
|
||||
curl -sS https://get.k8s.io | MULTIZONE=true KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2a NUM_NODES=3 bash
|
||||
```
|
||||
|
||||
Langkah ini akan mengaktifkan klaster seperti biasa, namun masih berjalan dalam satu zona
|
||||
(tetapi opsi `MULTIZONE=true` telah mengaktifkan kapabilitas multi-zona).
|
||||
|
||||
### Node yang telah diberi label
|
||||
|
||||
Lihatlah Node; dimana kamu bisa melihat Node tersebut diberi label sesuai dengan informasi zona.
|
||||
Node tersebut sejauh ini berada di zona `us-central1-a` (GCE) atau zona `us-west-2a` (AWS).
|
||||
Label dari Node itu adalah `failure-domain.beta.kubernetes.io/region` untuk informasi wilayah,
|
||||
dan `failure-domain.beta.kubernetes.io/zone` untuk informasi zona:
|
||||
|
||||
```shell
|
||||
kubectl get nodes --show-labels
|
||||
```
|
||||
|
||||
Tampilan akan seperti dibawah ini:
|
||||
|
||||
```shell
|
||||
NAME STATUS ROLES AGE VERSION LABELS
|
||||
kubernetes-master Ready,SchedulingDisabled <none> 6m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-1,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-master
|
||||
kubernetes-minion-87j9 Ready <none> 6m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-87j9
|
||||
kubernetes-minion-9vlv Ready <none> 6m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv
|
||||
kubernetes-minion-a12q Ready <none> 6m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-a12q
|
||||
```
|
||||
|
||||
### Menambah lebih banyak Node di zona kedua
|
||||
|
||||
Mari kita tambahkan sekumpulan Node ke dalam klaster yang ada, dengan menggunakan kembali
|
||||
master yang ada, namun dijalankan pada zona yang berbeda (zona `us-central1-b` atau zona `us-west-2b`).
|
||||
Kemudian kita jalankan kube-up lagi, tetapi dengan menentukan opsi `KUBE_USE_EXISTING_MASTER=true`
|
||||
sehingga kube-up tidak akan membuat master baru, tetapi akan menggunakan kembali master yang dibuat sebelumnya.
|
||||
|
||||
GCE:
|
||||
|
||||
```shell
|
||||
KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-b NUM_NODES=3 kubernetes/cluster/kube-up.sh
|
||||
```
|
||||
|
||||
Pada AWS, kita juga perlu menentukan CIDR jaringan sebagai tambahan
|
||||
subnet, bersama dengan alamat IP internal dari master:
|
||||
|
||||
```shell
|
||||
KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2b NUM_NODES=3 KUBE_SUBNET_CIDR=172.20.1.0/24 MASTER_INTERNAL_IP=172.20.0.9 kubernetes/cluster/kube-up.sh
|
||||
```
|
||||
|
||||
Lihat lagi Node; dimana 3 Node lainnya harus sudah dijalankan dan ditandai
|
||||
berada di `us-central1-b`:
|
||||
|
||||
```shell
|
||||
kubectl get nodes --show-labels
|
||||
```
|
||||
|
||||
Hasil tampilan akan terlihat seperti dibawah ini:
|
||||
|
||||
```shell
|
||||
NAME STATUS ROLES AGE VERSION LABELS
|
||||
kubernetes-master Ready,SchedulingDisabled <none> 16m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-1,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-master
|
||||
kubernetes-minion-281d Ready <none> 2m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-281d
|
||||
kubernetes-minion-87j9 Ready <none> 16m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-87j9
|
||||
kubernetes-minion-9vlv Ready <none> 16m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv
|
||||
kubernetes-minion-a12q Ready <none> 17m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-a12q
|
||||
kubernetes-minion-pp2f Ready <none> 2m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-pp2f
|
||||
kubernetes-minion-wf8i Ready <none> 2m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-wf8i
|
||||
```
|
||||
|
||||
### Afinitas Volume
|
||||
|
||||
Buatlah sebuah volume dengan menggunakan pembuatan volume yang dinamis (hanya PersistentVolume yang didukung untuk afinitas zona):
|
||||
|
||||
```bash
|
||||
kubectl apply -f - <<EOF
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "PersistentVolumeClaim",
|
||||
"metadata": {
|
||||
"name": "claim1",
|
||||
"annotations": {
|
||||
"volume.alpha.kubernetes.io/storage-class": "foo"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"accessModes": [
|
||||
"ReadWriteOnce"
|
||||
],
|
||||
"resources": {
|
||||
"requests": {
|
||||
"storage": "5Gi"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
EOF
|
||||
```
|
||||
|
||||
{{< note >}}
|
||||
Untuk versi Kubernetes 1.3+ akan mendistribusikan klaim PV yang dinamis di seluruh
|
||||
zona yang telah dikonfigurasi. Untuk versi 1.2, volume persisten yang dinamis selalu dibuat di zona master klaster
|
||||
(yaitu `us-central1-a`/`us-west-2a`); masalah tersebut diangkat pada
|
||||
([#23330](https://github.com/kubernetes/kubernetes/issues/23330))
|
||||
dan telah diselesaikan pada versi 1.3+.
|
||||
{{< /note >}}
|
||||
|
||||
Sekarang marilah kita memvalidasi bahwa Kubernetes secara otomatis memberikan label zona & wilayah di mana PV itu dibuat.
|
||||
|
||||
```shell
|
||||
kubectl get pv --show-labels
|
||||
```
|
||||
|
||||
|
||||
Hasil tampilan akan terlihat seperti dibawah ini:
|
||||
|
||||
```shell
|
||||
NAME CAPACITY ACCESSMODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE LABELS
|
||||
pv-gce-mj4gm 5Gi RWO Retain Bound default/claim1 manual 46s failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a
|
||||
```
|
||||
|
||||
Kemudian sekarang kita akan membuat Pod yang menggunakan klaim akan volume persisten.
|
||||
Karena volume pada GCE PDs / AWS EBS tidak dapat di-_attach_ melewati zona yang berbeda,
|
||||
hal ini berarti bahwa Pod ini hanya dapat dibuat pada zona yang sama dengan volume tersebut:
|
||||
|
||||
|
||||
```yaml
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: mypod
|
||||
spec:
|
||||
containers:
|
||||
- name: myfrontend
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- mountPath: "/var/www/html"
|
||||
name: mypd
|
||||
volumes:
|
||||
- name: mypd
|
||||
persistentVolumeClaim:
|
||||
claimName: claim1
|
||||
EOF
|
||||
```
|
||||
|
||||
Perhatikan bahwa Pod secara otomatis dibuat pada zona yang sama dengan volume, karena
|
||||
pada umumnya lampiran lintas zona tidak diizinkan oleh penyedia cloud:
|
||||
|
||||
```shell
|
||||
kubectl describe pod mypod | grep Node
|
||||
```
|
||||
|
||||
```shell
|
||||
Node: kubernetes-minion-9vlv/10.240.0.5
|
||||
```
|
||||
|
||||
Kemudian cek label Node:
|
||||
|
||||
```shell
|
||||
kubectl get node kubernetes-minion-9vlv --show-labels
|
||||
```
|
||||
|
||||
```shell
|
||||
NAME STATUS AGE VERSION LABELS
|
||||
kubernetes-minion-9vlv Ready 22m v1.6.0+fff5156 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv
|
||||
```
|
||||
|
||||
### Pod yang tersebar melintasi zona yang berbeda
|
||||
|
||||
Pod dalam _controller_ atau Service replikasi tersebar secara otomatis
|
||||
melintasi zona yang berbeda. Pertama-tama, mari kita luncurkan lebih banyak Node di zona ketiga:
|
||||
|
||||
GCE:
|
||||
|
||||
```shell
|
||||
KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-f NUM_NODES=3 kubernetes/cluster/kube-up.sh
|
||||
```
|
||||
|
||||
AWS:
|
||||
|
||||
```shell
|
||||
KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2c NUM_NODES=3 KUBE_SUBNET_CIDR=172.20.2.0/24 MASTER_INTERNAL_IP=172.20.0.9 kubernetes/cluster/kube-up.sh
|
||||
```
|
||||
|
||||
Pastikan bahwa kamu mempunyai Node dalam 3 zona berbeda:
|
||||
|
||||
```shell
|
||||
kubectl get nodes --show-labels
|
||||
```
|
||||
|
||||
Buatlah contoh dengan program guestbook-go, yang mencakup RC dengan ukuran 3, dan menjalankan sebuah aplikasi web sederhana:
|
||||
|
||||
```shell
|
||||
find kubernetes/examples/guestbook-go/ -name '*.json' | xargs -I {} kubectl apply -f {}
|
||||
```
|
||||
|
||||
Beberapa Pod seharusnya tersebar di ketiga zona semuanya:
|
||||
|
||||
```shell
|
||||
kubectl describe pod -l app=guestbook | grep Node
|
||||
```
|
||||
|
||||
```shell
|
||||
Node: kubernetes-minion-9vlv/10.240.0.5
|
||||
Node: kubernetes-minion-281d/10.240.0.8
|
||||
Node: kubernetes-minion-olsh/10.240.0.11
|
||||
```
|
||||
|
||||
```shell
|
||||
kubectl get node kubernetes-minion-9vlv kubernetes-minion-281d kubernetes-minion-olsh --show-labels
|
||||
```
|
||||
|
||||
```shell
|
||||
NAME STATUS ROLES AGE VERSION LABELS
|
||||
kubernetes-minion-9vlv Ready <none> 34m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv
|
||||
kubernetes-minion-281d Ready <none> 20m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-281d
|
||||
kubernetes-minion-olsh Ready <none> 3m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-f,kubernetes.io/hostname=kubernetes-minion-olsh
|
||||
```
|
||||
|
||||
_Load-balancer_ menjangkau semua zona dalam satu klaster; program contoh guestbook-go
|
||||
sudah termasuk contoh Service dengan beban seimbang (_load-balanced service_):
|
||||
|
||||
```shell
|
||||
kubectl describe service guestbook | grep LoadBalancer.Ingress
|
||||
```
|
||||
|
||||
Hasil tampilan akan terlihat seperti di bawah ini:
|
||||
|
||||
```shell
|
||||
LoadBalancer Ingress: 130.211.126.21
|
||||
```
|
||||
|
||||
Atur alamat IP di atas:
|
||||
|
||||
```shell
|
||||
export IP=130.211.126.21
|
||||
```
|
||||
|
||||
Telusurilah dengan curl melalui alamat IP tersebut:
|
||||
|
||||
```shell
|
||||
curl -s http://${IP}:3000/env | grep HOSTNAME
|
||||
```
|
||||
|
||||
Hasil tampilan akan terlihat seperti di bawah ini:
|
||||
|
||||
```shell
|
||||
"HOSTNAME": "guestbook-44sep",
|
||||
```
|
||||
|
||||
Kemudian, telusurilah beberapa kali:
|
||||
|
||||
```shell
|
||||
(for i in `seq 20`; do curl -s http://${IP}:3000/env | grep HOSTNAME; done) | sort | uniq
|
||||
```
|
||||
|
||||
Hasil tampilan akan terlihat seperti dibawah ini:
|
||||
|
||||
```shell
|
||||
"HOSTNAME": "guestbook-44sep",
|
||||
"HOSTNAME": "guestbook-hum5n",
|
||||
"HOSTNAME": "guestbook-ppm40",
|
||||
```
|
||||
|
||||
_Load balancer_ telah menargetkan ke semua Pod dengan benar, meskipun semuanya berada di beberapa zona yang berbeda.
|
||||
|
||||
### Menghentikan Klaster
|
||||
### Shutting down the cluster
|
||||
|
||||
Apabila kamu sudah selesai, maka bersihkanlah:
|
||||
|
||||
GCE:
|
||||
|
||||
```shell
|
||||
KUBERNETES_PROVIDER=gce KUBE_USE_EXISTING_MASTER=true KUBE_GCE_ZONE=us-central1-f kubernetes/cluster/kube-down.sh
|
||||
KUBERNETES_PROVIDER=gce KUBE_USE_EXISTING_MASTER=true KUBE_GCE_ZONE=us-central1-b kubernetes/cluster/kube-down.sh
|
||||
KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-a kubernetes/cluster/kube-down.sh
|
||||
```
|
||||
|
||||
AWS:
|
||||
|
||||
```shell
|
||||
KUBERNETES_PROVIDER=aws KUBE_USE_EXISTING_MASTER=true KUBE_AWS_ZONE=us-west-2c kubernetes/cluster/kube-down.sh
|
||||
KUBERNETES_PROVIDER=aws KUBE_USE_EXISTING_MASTER=true KUBE_AWS_ZONE=us-west-2b kubernetes/cluster/kube-down.sh
|
||||
KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2a kubernetes/cluster/kube-down.sh
|
||||
```
|
||||
|
||||
{{% /capture %}}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
title: Lingkungan Produksi
|
||||
weight: 30
|
||||
---
|
||||
|
|
@ -0,0 +1,638 @@
|
|||
---
|
||||
title: Membuat sebuah klaster dengan control-plane tunggal menggunakan kubeadm
|
||||
content_template: templates/task
|
||||
weight: 30
|
||||
---
|
||||
|
||||
{{% capture overview %}}
|
||||
|
||||
Perkakas <img src="https://raw.githubusercontent.com/kubernetes/kubeadm/master/logos/stacked/color/kubeadm-stacked-color.png" align="right" width="150px">`kubeadm` membantu kamu membuat sebuah klaster Kubernetes minimum yang layak dan sesuai dengan _best practice_. Bahkan, kamu dapat menggunakan `kubeadm` untuk membuat sebuah klaster yang lolos [uji Kubernetes Conformance](https://kubernetes.io/blog/2017/10/software-conformance-certification).
|
||||
`kubeadm` juga mendukung fungsi siklus hidup (_lifecycle_)
|
||||
klaster lainnya, seperti [_bootstrap token_](/docs/reference/access-authn-authz/bootstrap-tokens/) dan pembaruan klaster (_cluster upgrade_).
|
||||
|
||||
`kubeadm` merupakan perkakas yang bagus jika kamu membutuhkan:
|
||||
|
||||
- Sebuah cara yang sederhana untuk kamu mencoba Kubernetes, mungkin untuk pertama kalinya.
|
||||
- Sebuah cara bagi pengguna lama (_existing users_) untuk mengotomatiskan penyetelan sebuah klaster dan menguji aplikasi mereka.
|
||||
- Sebuah komponen dasar pada ekosistem lain dan/atau perkakas penginstal lain dengan cakupan
|
||||
yang lebih luas.
|
||||
|
||||
Kamu dapat menginstal dan menggunakan `kubeadm` pada berbagai macam mesin: laptop milikmu, sekelompok
|
||||
server di _cloud_, sebuah Raspberry Pi, dan lain-lain. Baik itu men-_deploy_ pada
|
||||
_cloud_ ataupun _on-premise_, kamu dapat mengintegrasikan `kubeadm` pada sistem _provisioning_ seperti
|
||||
Ansible atau Terraform.
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
{{% capture prerequisites %}}
|
||||
|
||||
Untuk mengikuti panduan ini, kamu membutuhkan:
|
||||
|
||||
- Satu mesin atau lebih, yang menjalankan sistem operasi Linux yang kompatibel dengan deb atau rpm; sebagai contoh: Ubuntu atau CentOS.
|
||||
- 2 GiB atau lebih RAM per mesin--kurang dari nilai tersebut akan menyisakan sedikit ruang untuk
|
||||
aplikasi-aplikasimu.
|
||||
- Sedikitnya 2 CPU pada mesin yang akan kamu gunakan sebagai Node _control-plane_.
|
||||
- Koneksi internet pada seluruh mesin pada klaster. Kamu dapat menggunakan internet
|
||||
publik ataupun pribadi.
|
||||
|
||||
|
||||
Kamu juga harus menggunakan versi `kubeadm` yang dapat men-_deploy_ versi
|
||||
Kubernetes yang ingin kamu gunakan pada klaster barumu.
|
||||
|
||||
[Kebijakan dukungan versi Kubernetes dan _version skew_](https://kubernetes.io/docs/setup/release/version-skew-policy/#supported-versions) juga berlaku pada `kubeadm` dan Kubernetes secara umum.
|
||||
Periksa kebijakan tersebut untuk mempelajari tentang versi Kubernetes dan `kubeadm`
|
||||
mana saja yang didukung. Laman ini ditulis untuk Kubernetes {{< param "version" >}}.
|
||||
|
||||
Fitur `kubeadm` secara umum berstatus _General Availability_ (GA). Beberapa sub-fitur sedang
|
||||
berada dalam pengembangan. Implementasi pembuatan klaster dapat berubah
|
||||
sedikit seiring dengan berevolusinya kubeadm, namun secara umum implementasinya sudah cukup stabil.
|
||||
|
||||
{{< note >}}
|
||||
Semua perintah di dalam `kubeadm alpha`, sesuai definisi, didukung pada level _alpha_.
|
||||
{{< /note >}}
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
{{% capture steps %}}
|
||||
|
||||
## Tujuan
|
||||
|
||||
* Menginstal Kubernetes klaster dengan _control-plane_ tunggal atau [klaster dengan ketersediaan tinggi](/docs/setup/production-environment/tools/kubeadm/high-availability/)
|
||||
* Menginstal jaringan Pod pada klaster sehingga Pod dapat
|
||||
berinteraksi satu sama lain
|
||||
|
||||
## Instruksi
|
||||
|
||||
### Menginstal kubeadm pada hos
|
||||
|
||||
Lihat ["Menginstal kubeadm"](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/).
|
||||
|
||||
{{< note >}}
|
||||
Jika kamu sudah menginstal kubeadm sebelumnya, jalankan `apt-get update &&
|
||||
apt-get upgrade` atau `yum update` untuk mendapatkan versi kubeadm paling baru.
|
||||
|
||||
Ketika kamu melakukan pembaruan, kubelet melakukan _restart_ setiap beberapa detik sambil menunggu dalam kondisi _crashloop_ sampai
|
||||
kubeadm memberikan perintah yang harus dilakukan. _Crashloop_ ini memang diantisipasi dan normal.
|
||||
Setelah kamu menginisialisasi _control-plane_, kubelet akan berjalan normal.
|
||||
{{< /note >}}
|
||||
|
||||
### Menginisialisasi Node _control-plane_
|
||||
|
||||
Node _control-plane_ adalah mesin dimana komponen-komponen _control plane_ berjalan, termasuk
|
||||
{{< glossary_tooltip term_id="etcd" >}} (basis data klaster) dan
|
||||
{{< glossary_tooltip text="API Server" term_id="kube-apiserver" >}}
|
||||
(yang akan berkomunikasi dengan perkakas _command line_ {{< glossary_tooltip text="kubectl" term_id="kubectl" >}}.
|
||||
|
||||
1. (Direkomendasikan) Jika kamu berencana untuk memperbarui klaster `kubeadm` dengan _control-plane_ tunggal
|
||||
menjadi ketersediaan tinggi kamu harus menentukan `--control-plane-endpoint` agar mengarah ke _endpoint_ yang digunakan bersama
|
||||
untuk semua Node _control-plane_. _Endpoint_ tersebut dapat berupa nama DNS atau sebuah alamat IP dari _load-balancer_.
|
||||
2. Pilih _add-on_ jaringan Pod, dan pastikan apakah diperlukan argumen untuk
|
||||
diberikan pada `kubeadm init`. Tergantung
|
||||
penyedia pihak ketiga yang kamu pilih, kamu mungkin harus mengatur `--pod-network-cidr` dengan nilai
|
||||
yang spesifik pada penyedia tertentu. Lihat [Menginstal _add-on_ jaringan Pod](#jaringan-pod).
|
||||
3. (Opsional) Sejak versi 1.14, `kubeadm` mencoba untuk mendeteksi _runtime_ kontainer pada Linux
|
||||
dengan menggunakan daftar _domain socket path_ yang umum diketahui. Untuk menggunakan _runtime_ kontainer yang berbeda atau
|
||||
jika ada lebih dari satu yang terpasang pada Node yang digunakan, tentukan argumen `--cri-socket`
|
||||
pada `kubeadm init`. Lihat [Menginstal _runtime_](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#installing-runtime).
|
||||
4. (Opsional) Kecuali ditentukan sebelumnya, `kubeadm` akan menggunakan antarmuka jaringan yang diasosiasikan
|
||||
dengan _default gateway_ untuk mengatur alamat _advertise_ untuk API Server pada Node _control-plane_ ini.
|
||||
Untuk menggunakan antarmuka jaringan yang berbeda, tentukan argumen `--apiserver-advertise-address=<ip-address>`
|
||||
pada `kubeadm init`. Untuk men-_deploy_ klaster Kubernetes IPv6 menggunakan pengalamatan IPv6, kamu
|
||||
harus menentukan alamat IPv6, sebagai contoh `--apiserver-advertise-address=fd00::101`
|
||||
5. (Opsional) Jalankan `kubeadm config images pull` sebelum `kubeadm init` untuk memastikan
|
||||
konektivitas ke _container image registry_ gcr.io.
|
||||
|
||||
Untuk menginisialisasi Node _control-plane_ jalankan:
|
||||
|
||||
```bash
|
||||
kubeadm init <args>
|
||||
```
|
||||
|
||||
### Pertimbangan mengenai apiserver-advertise-address dan ControlPlaneEndpoint
|
||||
|
||||
Meski `--apiserver-advertise-address` dapat digunakan untuk mengatur alamat _advertise_ untuk server
|
||||
API pada Node _control-plane_ ini, `--control-plane-endpoint` dapat digunakan untuk mengatur _endpoint_ yang digunakan bersama
|
||||
untuk seluruh Node _control-plane_.
|
||||
|
||||
`--control-plane-endpoint` tidak hanya mengizinkan alamat IP tetapi juga nama DNS yang dapat dipetakan ke alamat IP.
|
||||
Silakan hubungi administrator jaringan kamu untuk mengevaluasi solusi-solusi yang mempertimbangkan pemetaan tersebut.
|
||||
|
||||
Berikut contoh pemetaannya:
|
||||
|
||||
```
|
||||
192.168.0.102 cluster-endpoint
|
||||
```
|
||||
|
||||
Di mana `192.168.0.102` merupakan alamat IP dari Node ini dan `cluster-endpoint` merupakan nama DNS _custom_ yang dipetakan pada IP ini.
|
||||
Hal ini memungkinkan kamu untuk memberikan `--control-plane-endpoint=cluster-endpoint` pada `kubeadm init` dan memberikan nama DNS yang sama pada
|
||||
`kubeadm join`. Kemudian kamu dapat memodifikasi `cluster-endpoint` untuk mengarah pada alamat _load-balancer_ dalam skenario
|
||||
ketersediaan tinggi (_highly availabile_).
|
||||
|
||||
Mengubah klaster _control plane_ tunggal yang dibuat tanpa `--control-plane-endpoint` menjadi klaster dengan ketersediaan tinggi
|
||||
tidak didukung oleh kubeadm.
|
||||
|
||||
### Informasi lebih lanjut
|
||||
|
||||
Untuk informasi lebih lanjut mengenai argumen-argumen `kubeadm init`, lihat [panduan referensi kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/).
|
||||
|
||||
Untuk daftar pengaturan konfigurasi yang lengkap, lihat [dokumentasi berkas konfigurasi](/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file).
|
||||
|
||||
Untuk menyetel komponen-komponen _control plane_, termasuk pemasangan IPv6 opsional pada _liveness probe_ untuk komponen-komponen _control plane_ dan server etcd, berikan argumen ekstra pada tiap komponen seperti yang didokumentasikan pada [argumen-argumen _custom_](/docs/setup/production-environment/tools/kubeadm/control-plane-flags/).
|
||||
|
||||
Untuk menjalankan `kubeadm init` lagi, sebelumnya kamu harus [membongkar klaster](#pembongkaran).
|
||||
|
||||
Jika kamu menggabungkan sebuah Node dengan arsitektur yang berbeda ke klastermu, pastikan DaemonSets yang di_deploy_
|
||||
memiliki _image_ kontainer yang mendukung arsitektur tersebut.
|
||||
|
||||
Pertama-tama `kubeadm init` akan menjalankan sekumpulan _precheck_ untuk memastikan mesin
|
||||
siap untuk menjalankan Kubernetes. Kumpulan _precheck_ ini menunjukkan peringatan-peringatan dan akan berhenti jika terjadi kesalahan. Kemudian `kubeadm init`
|
||||
akan mengunduh dan menginstal komponen-komponen _control plane_ klaster. Hal ini membutuhkan waktu beberapa menit.
|
||||
Keluaran yang dihasilkan terlihat seperti berikut ini:
|
||||
|
||||
```none
|
||||
[init] Using Kubernetes version: vX.Y.Z
|
||||
[preflight] Running pre-flight checks
|
||||
[preflight] Pulling images required for setting up a Kubernetes cluster
|
||||
[preflight] This might take a minute or two, depending on the speed of your internet connection
|
||||
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
|
||||
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
|
||||
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
|
||||
[kubelet-start] Activating the kubelet service
|
||||
[certs] Using certificateDir folder "/etc/kubernetes/pki"
|
||||
[certs] Generating "etcd/ca" certificate and key
|
||||
[certs] Generating "etcd/server" certificate and key
|
||||
[certs] etcd/server serving cert is signed for DNS names [kubeadm-cp localhost] and IPs [10.138.0.4 127.0.0.1 ::1]
|
||||
[certs] Generating "etcd/healthcheck-client" certificate and key
|
||||
[certs] Generating "etcd/peer" certificate and key
|
||||
[certs] etcd/peer serving cert is signed for DNS names [kubeadm-cp localhost] and IPs [10.138.0.4 127.0.0.1 ::1]
|
||||
[certs] Generating "apiserver-etcd-client" certificate and key
|
||||
[certs] Generating "ca" certificate and key
|
||||
[certs] Generating "apiserver" certificate and key
|
||||
[certs] apiserver serving cert is signed for DNS names [kubeadm-cp kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.138.0.4]
|
||||
[certs] Generating "apiserver-kubelet-client" certificate and key
|
||||
[certs] Generating "front-proxy-ca" certificate and key
|
||||
[certs] Generating "front-proxy-client" certificate and key
|
||||
[certs] Generating "sa" key and public key
|
||||
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
|
||||
[kubeconfig] Writing "admin.conf" kubeconfig file
|
||||
[kubeconfig] Writing "kubelet.conf" kubeconfig file
|
||||
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
|
||||
[kubeconfig] Writing "scheduler.conf" kubeconfig file
|
||||
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
|
||||
[control-plane] Creating static Pod manifest for "kube-apiserver"
|
||||
[control-plane] Creating static Pod manifest for "kube-controller-manager"
|
||||
[control-plane] Creating static Pod manifest for "kube-scheduler"
|
||||
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
|
||||
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
|
||||
[apiclient] All control plane components are healthy after 31.501735 seconds
|
||||
[uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
|
||||
[kubelet] Creating a ConfigMap "kubelet-config-X.Y" in namespace kube-system with the configuration for the kubelets in the cluster
|
||||
[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "kubeadm-cp" as an annotation
|
||||
[mark-control-plane] Marking the node kubeadm-cp as control-plane by adding the label "node-role.kubernetes.io/master=''"
|
||||
[mark-control-plane] Marking the node kubeadm-cp as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
|
||||
[bootstrap-token] Using token: <token>
|
||||
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
|
||||
[bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
|
||||
[bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
|
||||
[bootstraptoken] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
|
||||
[bootstraptoken] creating the "cluster-info" ConfigMap in the "kube-public" namespace
|
||||
[addons] Applied essential addon: CoreDNS
|
||||
[addons] Applied essential addon: kube-proxy
|
||||
|
||||
Your Kubernetes control-plane has initialized successfully!
|
||||
|
||||
To start using your cluster, you need to run the following as a regular user:
|
||||
|
||||
mkdir -p $HOME/.kube
|
||||
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
|
||||
sudo chown $(id -u):$(id -g) $HOME/.kube/config
|
||||
|
||||
You should now deploy a Pod network to the cluster.
|
||||
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
|
||||
/docs/concepts/cluster-administration/addons/
|
||||
|
||||
You can now join any number of machines by running the following on each node
|
||||
as root:
|
||||
|
||||
kubeadm join <control-plane-host>:<control-plane-port> --token <token> --discovery-token-ca-cert-hash sha256:<hash>
|
||||
```
|
||||
|
||||
Untuk membuat kubectl bekerja bagi pengguna _non-root_, jalankan perintah-perintah berikut, yang juga merupakan
|
||||
bagian dari keluaran `kubeadm init`:
|
||||
|
||||
```bash
|
||||
mkdir -p $HOME/.kube
|
||||
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
|
||||
sudo chown $(id -u):$(id -g) $HOME/.kube/config
|
||||
```
|
||||
|
||||
Secara alternatif, jika kamu adalah pengguna `root`, kamu dapat menjalankan:
|
||||
|
||||
```bash
|
||||
export KUBECONFIG=/etc/kubernetes/admin.conf
|
||||
```
|
||||
|
||||
Buatlah catatan dari perintah `kubeadm join` yang dihasilkan `kubeadm init`. Kamu
|
||||
membutuhkan perintah ini untuk [menggabungkan Node-Node ke klaster](#menggabungkan-node).
|
||||
|
||||
_Token_ digunakan untuk otentikasi bersama (_mutual authentication_) antara Node _control-plane_ dan Node-Node yang
|
||||
akan bergabung. _Token_ yang didapat di sini bersifat rahasia. Simpan dengan aman, karena siapapun yang memiliki token tersebut
|
||||
dapat menambahkan Node-Node yang dapat mengotentikasikan diri ke klaster. Kamu dapat menampilkan daftar _token_,
|
||||
membuat, dan menghapus _token_ dengan perintah `kubeadm token`. Lihat
|
||||
[panduan referensi kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm-token/).
|
||||
|
||||
### Menginstal _add-on_ jaringan Pod {#jaringan-pod}
|
||||
|
||||
{{< caution >}}
|
||||
Bagian ini berisi informasi penting mengenai penyetelan jejaring dan
|
||||
urutan _deployment_.
|
||||
Baca seluruh saran ini dengan saksama sebelum melanjutkan.
|
||||
|
||||
**Kamu harus men-_deploy_
|
||||
_add-on_ jaringan Pod berbasis {{< glossary_tooltip text="_Container Network Interface_" term_id="cni" >}}
|
||||
(CNI) sehingga Pod dapat berkomunikasi satu sama lain.
|
||||
DNS klaster (CoreDNS) tidak akan menyala sebelum jaringan dipasangkan.**
|
||||
|
||||
- Perlu diperhatikan bahwa jaringan Pod tidak boleh tumpang tindih dengan jaringan hos
|
||||
manapun: kamu akan menemui beberapa masalah jika terjadi tumpang tindih.
|
||||
(Jika kamu menemukan adanya bentrokan antara jaringan Pod
|
||||
pilihan _plugin_ jaringan dengan jaringan hos, kamu harus memikirkan blok
|
||||
CIDR yang cocok untuk digunakan, kemudian menggunakannya pada saat `kubeadm init` dengan
|
||||
`--pod-network-cidr`, atau sebagai penggantinya pada YAML _plugin_ jaringan kamu).
|
||||
|
||||
- Secara bawaan, `kubeadm` mengatur klastermu untuk menggunakan dan melaksanakan penggunaan
|
||||
[RBAC](/docs/reference/access-authn-authz/rbac/) (_role based access control_).
|
||||
Pastikan _plugin_ jaringan Pod mendukung RBAC, dan begitu juga seluruh manifes
|
||||
yang kamu gunakan untuk men-_deploy_-nya.
|
||||
|
||||
- Jika kamu ingin menggunakan IPv6--baik jaringan _dual-stack_, ataupun jaringan _single-stack_ IPv6
|
||||
--untuk klastermu, pastikan _plugin_ jaringan Pod
|
||||
mendukung IPv6.
|
||||
Dukungan IPv6 telah ditambahkan pada CNI sejak [v0.6.0](https://github.com/containernetworking/cni/releases/tag/v0.6.0).
|
||||
|
||||
{{< /caution >}}
|
||||
|
||||
{{< note >}}
|
||||
Saat ini Calico adalah satu-satunya _plugin_ CNI yang dapat menerima uji e2e (_end-to-end_) oleh proyek kubeadm.
|
||||
Jika kamu menemukan isu terkait _plugin_ CNI kamu harus membuat tiket pada pelacak isu masing-masing _plugin_,
|
||||
bukan pada pelacak isu kubeadm maupun kubernetes.
|
||||
{{< /note >}}
|
||||
|
||||
Beberapa proyek eksternal menyediakan jaringan Pod Kubernetes menggunakan CNI, beberapa di antaranya juga
|
||||
mendukung [Network Policy](/docs/concepts/services-networking/networkpolicies/).
|
||||
|
||||
Lihat daftar
|
||||
[_add-on_ jejaring dan _network policy_](https://kubernetes.io/docs/concepts/cluster-administration/addons/#networking-and-network-policy) yang tersedia.
|
||||
|
||||
Kamu dapat menginstal _add-on_ jaringan Pod dengan perintah berikut pada Node
|
||||
_control-plane_ atau Node yang memiliki kredensial kubeconfig:
|
||||
|
||||
```bash
|
||||
kubectl apply -f <add-on.yaml>
|
||||
```
|
||||
|
||||
Kamu hanya dapat menginstal satu jaringan Pod per klaster.
|
||||
Di bawah ini kamu dapat menemukan instruksi instalasi untuk beberapa _plugin_ jaringan Pod yang populer:
|
||||
|
||||
{{< tabs name="tabs-pod-install" >}}
|
||||
|
||||
{{% tab name="Calico" %}}
|
||||
[Calico](https://docs.projectcalico.org/latest/introduction/) merupakan penyedia jejaring dan _network policy_. Calico mendukung sekumpulan opsi jejaring yang fleksibel sehingga kamu dapat memilih opsi yang paling efisien untuk situasimu, termasuk jaringan _non-overlay_ dan _overlay_, dengan atau tanpa BGP. Calico menggunakan mesin yang sama untuk melaksanakan _network policy_ pada hos, Pod, dan (jika menggunakan Istio & Envoy) aplikasi yang berada pada lapisan _service mesh_. Calico bekerja pada beberapa arsitektur, meliputi `amd64`, `arm64`, dan `ppc64le`.
|
||||
|
||||
Secara bawaan, Calico menggunakan `192.168.0.0/16` sebagai CIDR jaringan Pod, namun hal ini dapat diatur pada berkas calico.yaml. Agar Calico dapat bekerja dengan benar, kamu perlu memberikan CIDR yang sama pada perintah `kubeadm init` menggunakan opsi `--pod-network-cidr=192.168.0.0/16` atau melalui konfigurasi kubeadm.
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://docs.projectcalico.org/v3.11/manifests/calico.yaml
|
||||
```
|
||||
|
||||
{{% /tab %}}
|
||||
|
||||
{{% tab name="Cilium" %}}
|
||||
Agar Cilium dapat bekerja dengan benar, kamu harus memberikan `--pod-network-cidr=10.217.0.0/16` pada `kubeadm init`.
|
||||
|
||||
Untuk men-_deploy_ Cilium kamu hanya perlu menjalankan:
|
||||
|
||||
```shell
|
||||
kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.6/install/kubernetes/quick-install.yaml
|
||||
```
|
||||
|
||||
Ketika seluruh Pod Cilium sudah bertanda `READY`, kamu dapat mulai menggunakan klaster.
|
||||
|
||||
```shell
|
||||
kubectl get pods -n kube-system --selector=k8s-app=cilium
|
||||
```
|
||||
Keluarannya akan tampil seperti berikut:
|
||||
```
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
cilium-drxkl 1/1 Running 0 18m
|
||||
```
|
||||
|
||||
Cilium dapat digunakan sebagai kube-proxy, lihat [Kubernetes tanpa kube-proxy](https://docs.cilium.io/en/stable/gettingstarted/kubeproxy-free).
|
||||
|
||||
Untuk informasi lebih lanjut mengenai penggunaan Cilium dengan Kubernetes, lihat [panduan Instalasi Kubernetes untuk Cilium](https://docs.cilium.io/en/stable/kubernetes/).
|
||||
|
||||
{{% /tab %}}
|
||||
|
||||
{{% tab name="Contiv-VPP" %}}
|
||||
[Contiv-VPP](https://contivpp.io/) menggunakan CNF vSwitch berbasis [FD.io VPP](https://fd.io/) yang dapat diprogram,
|
||||
menawarkan layanan dan jejaring _cloud-native_ yang memiliki banyak fungsi dan berkinerja tinggi.
|
||||
|
||||
Contiv-VPP mengimplementasikan Service dan Network Policy Kubernetes pada _user space_ (on VPP).
|
||||
|
||||
Silakan merujuk pada panduan pemasangan berikut: [Pemasangan Manual Contiv-VPP](https://github.com/contiv/vpp/blob/master/docs/setup/MANUAL_INSTALL.md)
|
||||
{{% /tab %}}
|
||||
|
||||
{{% tab name="Kube-router" %}}
|
||||
|
||||
Kube-router mengandalkan kube-controller-manager untuk mengalokasikan CIDR Pod untuk Node-Node. Maka dari itu, gunakan `kubeadm init` dengan opsi `--pod-network-cidr`.
|
||||
|
||||
Kube-router menyediakan jejaring Pod, _network policy_, dan IP Virtual Server(IPVS)/Linux Virtual Server(LVS) berbasis _service proxy_ yang memiliki kinerja tinggi.
|
||||
|
||||
Informasi mengenai penggunaan `kubeadm` untuk mendirikan klaster Kubernetes dengan Kube-router, dapat dilihat di [panduan pemasangan resminya](https://github.com/cloudnativelabs/kube-router/blob/master/docs/kubeadm.md).
|
||||
{{% /tab %}}
|
||||
|
||||
{{% tab name="Weave Net" %}}
|
||||
|
||||
Untuk informasi lebih lanjut mengenai pemasangan klaster Kubernetes menggunakan Weave Net, silakan lihat [Mengintegrasikan Kubernetes melalui Addon](https://www.weave.works/docs/net/latest/kube-addon/).
|
||||
|
||||
Weave Net bekerja pada platform `amd64`, `arm`, `arm64` dan `ppc64le` tanpa membutuhkan tindakan ekstra.
|
||||
Weave Net menyalakan mode _hairpin_ secara bawaan. Hal ini mengizinkan Pod untuk mengakses dirinya sendiri melalui alamat IP Service
|
||||
jika mereka tidak tahu PodIP miliknya.
|
||||
|
||||
```shell
|
||||
kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"
|
||||
```
|
||||
{{% /tab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
|
||||
Setelah jaringan Pod dipasangkan, kamu dapat mengonfirmasi hal tersebut dengan
|
||||
memastikan Pod CoreDNS berada pada kondisi `Running` pada keluaran `kubectl get pods --all-namespaces`.
|
||||
Dan setelah Pod CoreDNS sudah menyala dan berjalan, kamu dapat melanjutkan (pemasangan klaster) dengan menggabungkan Node-Node yang lain.
|
||||
|
||||
Jika jaringan belum bekerja atau CoreDNS tidak berada pada kondisi `Running`, periksalah/lihatlah
|
||||
[panduan penyelesaian masalah](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/)
|
||||
untuk `kubeadm`.
|
||||
|
||||
### Isolasi Node _control plane_
|
||||
|
||||
Secara bawaan, klaster tidak akan menjadwalkan Pod pada Node _control-plane_ untuk alasan
|
||||
keamanan. Jika kamu ingin Pod dapat dijadwalkan pada Node _control-plane_, sebagai contoh untuk
|
||||
klaster Kubernetes bermesin-tunggal untuk pengembangan, jalankan:
|
||||
|
||||
```bash
|
||||
kubectl taint nodes --all node-role.kubernetes.io/master-
|
||||
```
|
||||
|
||||
Dengan keluaran seperti berikut:
|
||||
|
||||
```
|
||||
node "test-01" untainted
|
||||
taint "node-role.kubernetes.io/master:" not found
|
||||
taint "node-role.kubernetes.io/master:" not found
|
||||
```
|
||||
|
||||
Hal ini akan menghapus _taint_ `node-role.kubernetes.io/master` pada Node manapun yang
|
||||
memilikinya, termasuk Node _control-plane_, sehingga _scheduler_ akan dapat
|
||||
menjadwalkan Pod di manapun.
|
||||
|
||||
### Menggabungkan Node-Node {#menggabungkan-node}
|
||||
|
||||
Node adalah tempat beban kerja (Container, Pod, dan lain-lain) berjalan. Untuk menambahkan Node baru pada klaster lakukan hal berikut pada setiap mesin:
|
||||
|
||||
* SSH ke mesin
|
||||
* Gunakan pengguna _root_ (mis. `sudo su -`)
|
||||
* Jalankan perintah hasil keluaran `kubeadm init`. Sebagai contoh:
|
||||
|
||||
```bash
|
||||
kubeadm join --token <token> <control-plane-host>:<control-plane-port> --discovery-token-ca-cert-hash sha256:<hash>
|
||||
```
|
||||
|
||||
Jika kamu tidak memiliki _token_, kamu bisa mendapatkannya dengan menjalankan perintah berikut pada Node _control-plane_:
|
||||
|
||||
```bash
|
||||
kubeadm token list
|
||||
```
|
||||
|
||||
Keluarannya akan tampil seperti berikut:
|
||||
|
||||
```console
|
||||
TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS
|
||||
8ewj1p.9r9hcjoqgajrj4gi 23h 2018-06-12T02:51:28Z authentication, The default bootstrap system:
|
||||
signing token generated by bootstrappers:
|
||||
'kubeadm init'. kubeadm:
|
||||
default-node-token
|
||||
```
|
||||
|
||||
Secara bawaan, _token_ akan kadaluarsa dalam 24 jam. Jika kamu menggabungkan Node ke klaster setelah _token_ kadaluarsa,
|
||||
kamu dapat membuat _token_ baru dengan menjalankan perintah berikut pada Node _control-plane_:
|
||||
|
||||
```bash
|
||||
kubeadm token create
|
||||
```
|
||||
|
||||
Keluarannya akan tampil seperti berikut:
|
||||
|
||||
```console
|
||||
5didvk.d09sbcov8ph2amjw
|
||||
```
|
||||
|
||||
Jika kamu tidak memiliki nilai `--discovery-token-ca-cert-hash`, kamu bisa mendapatkannya dengan menjalankan perintah berantai berikut pada Node _control-plane_:
|
||||
|
||||
```bash
|
||||
openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \
|
||||
openssl dgst -sha256 -hex | sed 's/^.* //'
|
||||
```
|
||||
|
||||
Keluaran yang diberikan kurang lebih akan ditampilkan sebagai berikut:
|
||||
|
||||
```console
|
||||
8cb2de97839780a412b93877f8507ad6c94f73add17d5d7058e91741c9d5ec78
|
||||
```
|
||||
|
||||
{{< note >}}
|
||||
Untuk menentukan _tuple_ IPv6 untuk `<control-plane-host>:<control-plane-port>`, alamat IPv6 harus be ditutup dengan kurung siku, sebagai contoh: `[fd00::101]:2073`.
|
||||
{{< /note >}}
|
||||
|
||||
Keluaran yang diberikan kurang lebih akan ditampilkan sebagai berikut:
|
||||
|
||||
```
|
||||
[preflight] Running pre-flight checks
|
||||
|
||||
... (log output of join workflow) ...
|
||||
|
||||
Node join complete:
|
||||
* Certificate signing request sent to control-plane and response
|
||||
received.
|
||||
* Kubelet informed of new secure connection details.
|
||||
|
||||
Run 'kubectl get nodes' on control-plane to see this machine join.
|
||||
```
|
||||
|
||||
Beberapa saat kemudian, kamu akan melihat Node tersebut pada keluaran dari `kubectl get nodes` ketika dijalankan pada Node _control-plane_.
|
||||
|
||||
### (Opsional) Mengendalikan klaster dari mesin selain Node _control-plane_
|
||||
|
||||
Untuk membuat kubectl bekerja pada mesin lain (mis. laptop) agar dapat berbicara dengan
|
||||
klaster, kamu harus menyalin berkas kubeconfig administrator dari Node _control-plane_
|
||||
ke mesin seperti berikut:
|
||||
|
||||
```bash
|
||||
scp root@<control-plane-host>:/etc/kubernetes/admin.conf .
|
||||
kubectl --kubeconfig ./admin.conf get nodes
|
||||
```
|
||||
|
||||
{{< note >}}
|
||||
Contoh di atas mengasumsikan akses SSH dinyalakan untuk _root_. Jika tidak berlaku
|
||||
demikian, kamu dapat menyalin berkas `admin.conf` untuk dapat diakses oleh pengguna lain
|
||||
dan `scp` menggunakan pengguna lain tersebut.
|
||||
|
||||
Berkas `admin.conf` memberikan penggunanya privilese (_privilege_) _superuser_ terhadap klaster.
|
||||
Berkas ini harus digunakan seperlunya. Untuk pengguna biasa, direkomendasikan
|
||||
untuk membuat kredensial unik dengan privilese _whitelist_. Kamu dapat melakukan
|
||||
ini dengan perintah `kubeadm alpha kubeconfig user --client-name <CN>`.
|
||||
Perintah tersebut akan mencetak berkas KubeConfig ke STDOUT yang harus kamu simpan
|
||||
ke dalam sebuah berkas dan mendistribusikannya pada para pengguna. Setelah itu, whitelist
|
||||
privilese menggunakan `kubectl create (cluster)rolebinding`.
|
||||
{{< /note >}}
|
||||
|
||||
### (Opsional) Memproksi API Server ke localhost
|
||||
|
||||
Jika kamu ingin terhubung dengan API Server dari luar klaster kamu dapat menggunakan
|
||||
`kubectl proxy`:
|
||||
|
||||
```bash
|
||||
scp root@<control-plane-host>:/etc/kubernetes/admin.conf .
|
||||
kubectl --kubeconfig ./admin.conf proxy
|
||||
```
|
||||
|
||||
Kini kamu dapat mengakses API Server secara lokal melalui `http://localhost:8001/api/v1`
|
||||
|
||||
## Pembongkaran
|
||||
|
||||
Jika kamu menggunakan server sekali pakai untuk membuat klaster, sebagai ujicoba, kamu dapat
|
||||
mematikannya tanpa perlu melakukan pembongkaran. Kamu dapat menggunakan
|
||||
`kubectl config delete-cluster` untuk menghapus referensi lokal ke
|
||||
klaster.
|
||||
|
||||
Namun, jika kamu ingin mengatur ulang klaster secara lebih rapih, pertama-tama kamu
|
||||
harus [menguras (_drain_) Node](/docs/reference/generated/kubectl/kubectl-commands#drain)
|
||||
dan memastikan Node sudah kosong, kemudian mengembalikan pengaturan pada Node kembali seperti semula.
|
||||
|
||||
### Menghapus Node
|
||||
|
||||
Berinteraksi dengan Node _control-plane_ menggunakan kredensial yang sesuai, jalankan:
|
||||
|
||||
```bash
|
||||
kubectl drain <node name> --delete-local-data --force --ignore-daemonsets
|
||||
kubectl delete node <node name>
|
||||
```
|
||||
|
||||
Lalu, pada Node yang dihapus, atur ulang semua kondisi `kubeadm` yang telah dipasang:
|
||||
|
||||
```bash
|
||||
kubeadm reset
|
||||
```
|
||||
|
||||
Proses pengaturan ulang tidak mengatur ulang atau membersihkan kebijakan iptables atau tabel IPVS. Jika kamu ingin mengatur ulang iptables, kamu harus melakukannya secara manual:
|
||||
|
||||
```bash
|
||||
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
|
||||
```
|
||||
|
||||
Jika kamu ingin mengatur ulang tabel IPVS, kamu harus menjalankan perintah berikut:
|
||||
|
||||
```bash
|
||||
ipvsadm -C
|
||||
```
|
||||
|
||||
Jika kamu ingin mengulang dari awal, cukup jalankan `kubeadm init` atau `kubeadm join` dengan
|
||||
argumen yang sesuai.
|
||||
|
||||
### Membersihkan _control plane_
|
||||
|
||||
Kamu dapat menggunakan `kubeadm reset` pada hos _control plane_ untuk memicu pembersihan
|
||||
best-effort.
|
||||
|
||||
Lihat dokumentasi referensi [`kubeadm reset`](/docs/reference/setup-tools/kubeadm/kubeadm-reset/)
|
||||
untuk informasi lebih lanjut mengenai sub-perintah ini dan
|
||||
opsinya.
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
{{% capture discussion %}}
|
||||
|
||||
## Selanjutnya
|
||||
|
||||
* Pastikan klaster berjalan dengan benar menggunakan [Sonobuoy](https://github.com/heptio/sonobuoy)
|
||||
* <a id="lifecycle" />Lihat [Memperbaharui klaster kubeadm](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/)
|
||||
untuk detail mengenai pembaruan klaster menggunakan `kubeadm`.
|
||||
* Pelajari penggunaan `kubeadm` lebih lanjut pada [dokumentasi referensi kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm)
|
||||
* Pelajari lebih lanjut mengenai [konsep-konsep](/docs/concepts/) Kubernetes dan [`kubectl`](/docs/user-guide/kubectl-overview/).
|
||||
* Lihat halaman [Cluster Networking](/docs/concepts/cluster-administration/networking/) untuk daftar
|
||||
_add-on_ jaringan Pod yang lebih banyak.
|
||||
* <a id="other-addons" />Lihat [daftar _add-on_](/docs/concepts/cluster-administration/addons/) untuk
|
||||
mengeksplor _add-on_ lainnya, termasuk perkakas untuk _logging_, _monitoring_, _network policy_, visualisasi &
|
||||
pengendalian klaster Kubernetes.
|
||||
* Atur bagaimana klaster mengelola log untuk peristiwa-peristiwa klaster dan dari
|
||||
aplikasi-aplikasi yang berjalan pada Pod.
|
||||
Lihat [Arsitektur Logging](/docs/concepts/cluster-administration/logging/) untuk
|
||||
gambaran umum tentang hal-hal yang terlibat.
|
||||
|
||||
### Umpan balik
|
||||
|
||||
* Untuk masalah kekutu (_bug_), kunjungi [kubeadm GitHub issue tracker](https://github.com/kubernetes/kubeadm/issues)
|
||||
* Untuk dukungan, kunjungi kanal Slack
|
||||
[#kubeadm](https://kubernetes.slack.com/messages/kubeadm/)
|
||||
* Kanal Slack umum pengembangan SIG Cluster Lifecycle:
|
||||
[#sig-cluster-lifecycle](https://kubernetes.slack.com/messages/sig-cluster-lifecycle/)
|
||||
* SIG Cluster Lifecycle [SIG information](https://github.com/kubernetes/community/tree/master/sig-cluster-lifecycle#readme)
|
||||
* Milis SIG Cluster Lifecycle:
|
||||
[kubernetes-sig-cluster-lifecycle](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle)
|
||||
|
||||
## Kebijakan _version skew_
|
||||
|
||||
`kubeadm` versi v{{< skew latestVersion >}} dapat men-_deploy_ klaster dengan _control plane_ versi v{{< skew latestVersion >}} atau v{{< skew prevMinorVersion >}}.
|
||||
`kubeadm` v{{< skew latestVersion >}} juga dapat memperbarui klaster yang dibuat dengan kubeadm v{{< skew prevMinorVersion >}}.
|
||||
|
||||
Karena kita tidak dapat memprediksi masa depan, CLI kubeadm v{{< skew latestVersion >}} mungkin atau tidak mungkin dapat men-_deploy_ klaster v{{< skew nextMinorVersion >}}.
|
||||
|
||||
Sumber daya ini menyediakan informasi lebih lanjut mengenai _version skew_ yang didukung antara kubelet dan _control plane_, serta komponen Kubernetes lainnya:
|
||||
|
||||
* [Kebijakan versi and version-skew Kubernetes](/docs/setup/release/version-skew-policy/)
|
||||
* [Panduan instalasi](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#installing-kubeadm-kubelet-and-kubectl) spesifik untuk kubeadm
|
||||
|
||||
## Keterbatasan
|
||||
|
||||
### Ketahanan klaster
|
||||
|
||||
Klaster yang dibuat pada panduan ini hanya memiliki Node _control-plane_ tunggal, dengan basis data etcd tunggal
|
||||
yang berjalan di atasnya. Hal ini berarti jika terjadi kegagalan pada Node _control-plane_, klaster dapat kehilangan
|
||||
data dan mungkin harus dibuat kembali dari awal.
|
||||
|
||||
Solusi:
|
||||
|
||||
* Lakukan [back up etcd](https://coreos.com/etcd/docs/latest/admin_guide.html) secara reguler. Direktori data
|
||||
etcd yang dikonfigurasi oleh kubeadm berada di `/var/lib/etcd` pada Node _control-plane_.
|
||||
|
||||
* Gunakan banyak Node _control-plane_. Kamu dapat membaca
|
||||
[Opsi untuk topologi dengan ketersediaan tinggi](/docs/setup/production-environment/tools/kubeadm/ha-topology/) untuk memilih topologi
|
||||
klaster yang menyediakan ketersediaan lebih tinggi.
|
||||
|
||||
### Kompatibilitas platform
|
||||
|
||||
_Package_ dbm/rpm dan _binary_ kubeadm dibuat untuk amd64, arm (32-bit), arm64, ppc64le, dan s390x
|
||||
mengikuti [proposal multi-platform](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/multi-platform.md).
|
||||
|
||||
_Image_ kontainer _multiplatform_ untuk _control plane_ dan _addon_ juga telah didukung sejak v1.12.
|
||||
|
||||
Hanya beberapa penyedia jaringan yang menawarkan solusi untuk seluruh platform. Silakan merujuk pada daftar
|
||||
penyedia jaringan di atas atau dokumentasi dari masing-masing penyedia untuk mencari tahu apakah penyedia tersebut
|
||||
mendukung platform pilihanmu.
|
||||
|
||||
## Penyelesaian masalah
|
||||
|
||||
Jika kamu menemui kesulitan dengan kubeadm, silakan merujuk pada [dokumen penyelesaian masalah](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/).
|
||||
|
||||
{{% /capture %}}
|
||||
|
|
@ -0,0 +1,284 @@
|
|||
---
|
||||
title: Mengatur Pod untuk Penyimpanan dengan PersistentVolume
|
||||
content_template: templates/task
|
||||
weight: 60
|
||||
---
|
||||
|
||||
{{% capture overview %}}
|
||||
|
||||
Laman ini akan menjelaskan bagaimana kamu dapat mengatur sebuah Pod dengan menggunakan
|
||||
{{< glossary_tooltip text="PersistentVolumeClaim" term_id="persistent-volume-claim" >}}
|
||||
untuk penyimpanan.
|
||||
Berikut ringkasan prosesnya:
|
||||
|
||||
1. Kamu, sebagai seorang administrator klaster, membuat sebuah PersistentVolume yang didukung oleh penyimpanan
|
||||
fisik. Kamu tidak mengaitkan volume dengan Pod apapun.
|
||||
|
||||
2. Kamu, sekarang mengambil peran sebagai seorang _developer_ / pengguna klaster, membuat sebuah
|
||||
PersistentVolumeClaim yang secara otomatis terikat dengan PersistentVolume yang sesuai.
|
||||
|
||||
3. Kamu membuat sebuah Pod yang menggunakan PersistentVolumeClaim di atas untuk penyimpanan.
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
{{% capture prerequisites %}}
|
||||
|
||||
* Kamu membutuhkan sebuah klaster Kubernetes yang hanya memiliki satu Node, dan
|
||||
{{< glossary_tooltip text="kubectl" term_id="kubectl" >}}
|
||||
alat baris perintah yang sudah diatur untuk berkomunikasi dengan klaster kamu. Jika kamu
|
||||
tidak memiliki sebuah klaster dengan Node tunggal, kamu dapat membuatnya dengan
|
||||
[Minikube](/docs/getting-started-guides/minikube).
|
||||
|
||||
* Familiar dengan materi di
|
||||
[Persistent Volumes](/id/docs/concepts/storage/persistent-volumes/).
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
{{% capture steps %}}
|
||||
|
||||
## Membuat sebuah berkas index.html di dalam Node kamu
|
||||
|
||||
Buka sebuah _shell_ ke Node tunggal di klaster kamu. Bagaimana kamu membuka sebuah _shell_ tergantung
|
||||
dengan bagaimana kamu mengatur klaster kamu. Contoh, jika kamu menggunakan Minikube, kamu
|
||||
dapat membuka sebuah _shell_ ke Node kamu dengan memasukkan `minikube ssh`.
|
||||
|
||||
Di dalam _shell_ kamu pada Node itu, buat sebuah direktori dengan nama `/mnt/data`:
|
||||
|
||||
```shell
|
||||
# Asumsikan Node kamu menggunakan "sudo" untuk menjalankan perintah
|
||||
# sebagai superuser
|
||||
sudo mkdir /mnt/data
|
||||
```
|
||||
|
||||
|
||||
Di dalam direktori `/mnt/data`, buat sebuah berkas dengan nama `index.html`:
|
||||
|
||||
```shell
|
||||
# Disini kembali asumsikan bahwa Node kamu menggunakan "sudo" untuk menjalankan perintah
|
||||
# sebagai superuser
|
||||
sudo sh -c "echo 'Hello from Kubernetes storage' > /mnt/data/index.html"
|
||||
```
|
||||
|
||||
{{< note >}}
|
||||
Jika Node kamu menggunakan alat untuk mengakses _superuser_ selain dengan `sudo`, kamu dapat
|
||||
membuat ini bekerja jika mengganti `sudo` dengan nama dari alat lainnya.
|
||||
{{< /note >}}
|
||||
|
||||
Menguji bahwa berkas `index.html` ada:
|
||||
|
||||
```shell
|
||||
cat /mnt/data/index.html
|
||||
```
|
||||
|
||||
Keluaran akan seperti ini:
|
||||
```
|
||||
Hello from Kubernetes storage
|
||||
```
|
||||
|
||||
Sekarang kamu dapat menutup _shell_ di Node kamu.
|
||||
|
||||
## Membuat sebuah PersistentVolume
|
||||
|
||||
Pada latihan ini, kamu akan membuat sebuah *hostPath* PersistentVolume. Kubernetes mendukung
|
||||
hostPath untuk pengembangan dan pengujian di dalam klaster Node tunggal. Sebuah hostPath
|
||||
PersistentVolume menggunakan berkas atau direktori di dalam Node untuk meniru penyimpanan terhubung jaringan (NAS, _network-attached storage_).
|
||||
|
||||
Di dalam klaster _production_, kamu tidak dapat menggunakan hostPath. Sebagai gantinya sebuah administrator klaster
|
||||
akan menyediakan sumberdaya jaringan seperti Google Compute Engine _persistent disk_,
|
||||
_NFS share_, atau sebuah Amazon Elastic Block Store volume. Administrator klaster juga dapat
|
||||
menggunakan [StorageClass](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#storageclass-v1-storage)
|
||||
untuk mengatur
|
||||
[_provisioning_ secara dinamis](https://kubernetes.io/blog/2016/10/dynamic-provisioning-and-storage-in-kubernetes).
|
||||
|
||||
Berikut berkas konfigurasi untuk hostPath PersistentVolume:
|
||||
|
||||
{{< codenew file="pods/storage/pv-volume.yaml" >}}
|
||||
|
||||
Berkas konfigurasi tersebut menentukan bahwa volume berada di `/mnt/data` pada
|
||||
klaster Node. Konfigurasi tersebut juga menentukan ukuran dari 10 gibibytes dan
|
||||
mode akses `ReadWriteOnce`, yang berarti volume dapat di pasang sebagai
|
||||
_read-write_ oleh Node tunggal. Konfigurasi ini menggunakan [nama dari StorageClass](/id/docs/concepts/storage/persistent-volumes/#kelas)
|
||||
`manual` untuk PersistentVolume, yang akan digunakan untuk mengikat
|
||||
permintaan PeristentVolumeClaim ke PersistentVolume ini.
|
||||
|
||||
Membuat sebuah PersistentVolume:
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://k8s.io/examples/pods/storage/pv-volume.yaml
|
||||
```
|
||||
|
||||
Melihat informasi tentang PersistentVolume:
|
||||
|
||||
```shell
|
||||
kubectl get pv task-pv-volume
|
||||
```
|
||||
|
||||
Keluaran menunjuk PersistentVolume memliki sebuah `STATUS` dari `Available`. Ini
|
||||
berarti PersistentVolume belum terikat ke PersistentVolumeClaim.
|
||||
|
||||
NAME CAPACITY ACCESSMODES RECLAIMPOLICY STATUS CLAIM STORAGECLASS REASON AGE
|
||||
task-pv-volume 10Gi RWO Retain Available manual 4s
|
||||
|
||||
## Membuat sebuah PersistentVolumeClaim
|
||||
|
||||
Langkah selanjutnya adalah membuat sebuah PersistentVolumeClaim. Pod menggunakan PersistentVolumeClaim
|
||||
untuk meminta penyimpanan fisik. Pada latihan ini, kamu akan membuat sebuah PersistentVolumeClaim
|
||||
yang meminta sebuah volume minimal tiga gibibytes dengan mode akses _read-write_
|
||||
setidaknya untuk satu Node.
|
||||
|
||||
Berikut berkas konfigurasi untuk PersistentVolumeClaim:
|
||||
|
||||
{{< codenew file="pods/storage/pv-claim.yaml" >}}
|
||||
|
||||
Membuat sebuah PersistentVolumeClaim:
|
||||
|
||||
kubectl apply -f https://k8s.io/examples/pods/storage/pv-claim.yaml
|
||||
|
||||
Setelah membuat sebuah PersistentVolumeClaim, Kubernetes _control plane_ terlihat
|
||||
untuk sebuah PersistentVolumeClaim yang memenuhi persyaratan _claim's_. Jika
|
||||
_control plane_ menemukan PersistentVolume yang cocok dengan StorageClass, maka
|
||||
akan mengikat _claim_ ke dalam volume tersebut.
|
||||
|
||||
Lihat kembali PersistentVolume:
|
||||
|
||||
```shell
|
||||
kubectl get pv task-pv-volume
|
||||
```
|
||||
|
||||
Sekarang keluaran menunjukan sebuah `STATUS` dari `Bound`.
|
||||
|
||||
NAME CAPACITY ACCESSMODES RECLAIMPOLICY STATUS CLAIM STORAGECLASS REASON AGE
|
||||
task-pv-volume 10Gi RWO Retain Bound default/task-pv-claim manual 2m
|
||||
|
||||
Lihat PersistentVolumeClaim:
|
||||
|
||||
```shell
|
||||
kubectl get pvc task-pv-claim
|
||||
```
|
||||
|
||||
Keluaran menunjukan PersistentVolumeClaim terlah terikat dengan PersistentVolume,
|
||||
`task-pv-volume`.
|
||||
|
||||
NAME STATUS VOLUME CAPACITY ACCESSMODES STORAGECLASS AGE
|
||||
task-pv-claim Bound task-pv-volume 10Gi RWO manual 30s
|
||||
|
||||
## Membuat sebuah Pod
|
||||
|
||||
Langkah selanjutnya adalah membuat sebuah Pod yang akan menggunakan PersistentVolumeClaim sebagai volume.
|
||||
|
||||
Berikut berkas konfigurasi untuk Pod:
|
||||
|
||||
{{< codenew file="pods/storage/pv-pod.yaml" >}}
|
||||
|
||||
Perhatikan bahwa berkas konfigurasi Pod menentukan sebuah PersistentVolumeClaim, tetapi
|
||||
tidak menentukan PeristentVolume. Dari sudut pandang Pod, _claim_ adalah volume.
|
||||
|
||||
Membuat Pod:
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://k8s.io/examples/pods/storage/pv-pod.yaml
|
||||
```
|
||||
|
||||
Pastikan bahwa Container di dalam Pod berjalan:
|
||||
|
||||
```shell
|
||||
kubectl get pod task-pv-pod
|
||||
```
|
||||
|
||||
Mendapatkan sebuah _shell_ ke Container yang sedang berjalan di Pod kamu:
|
||||
|
||||
```shell
|
||||
kubectl exec -it task-pv-pod -- /bin/bash
|
||||
```
|
||||
|
||||
Di dalam _shell_, pastikan bahwa nginx menyajikan berkas `index.html` dari dalam
|
||||
hostPath volume:
|
||||
|
||||
```shell
|
||||
# Pastikan kamu menjalankan 3 perintah ini di dalam shell root yang berasal dari
|
||||
# "kubectl exec" dari langkah sebelumnya
|
||||
apt update
|
||||
apt install curl
|
||||
curl http://localhost/
|
||||
```
|
||||
|
||||
Keluaran akan menunjukan sebuah teks yang telah kamu tulis di berkas `index.html`
|
||||
di dalam hostPath volume:
|
||||
|
||||
Hello from Kubernetes storage
|
||||
|
||||
|
||||
Jika kamu melihat pesan tersebut, kamu telah berhasil mengatur sebuah Pod
|
||||
untuk menggunakan penyimpanan dari PersistentVolumeClaim.
|
||||
|
||||
## Membersihkan
|
||||
|
||||
Hapus Pod, PersistentVolumeClaim dan PersistentVolume:
|
||||
|
||||
```shell
|
||||
kubectl delete pod task-pv-pod
|
||||
kubectl delete pvc task-pv-claim
|
||||
kubectl delete pv task-pv-volume
|
||||
```
|
||||
|
||||
Jika kamu belum memiliki _shell_ yang telah dibuka ke Node di klaster kamu,
|
||||
buka _shell_ baru dengan cara yang sama yang telah kamu lakukan sebelumnya.
|
||||
|
||||
Di dalam _shell_ Node kamu, hapus berkas dan direktori yang telah kamu buat:
|
||||
|
||||
```shell
|
||||
# Asumsikan Node kamu menggunakan "sudo" untuk menjalankan perintah
|
||||
# sebagai superuser
|
||||
sudo rm /mnt/data/index.html
|
||||
sudo rmdir /mnt/data
|
||||
```
|
||||
|
||||
Sekarang kamu dapat menutup _shell_ Node kamu.
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
|
||||
{{% capture discussion %}}
|
||||
|
||||
## Kontrol akses
|
||||
|
||||
Penyimpanan yang telah terkonfigurasi dengan group ID (GID) memungkinkan akses menulis hanya dari Pod yang menggunakan
|
||||
GID yang sama. GID yang tidak cocok atau hilang akan menyebabkan kesalahan izin ditolak. Untuk mengurangi
|
||||
kebutuhan koordinasi dengan pengguna, administrator dapat membuat anotasi sebuah PersistentVolume
|
||||
dengan GID. Kemudian GID akan otomatis ditambahkan ke Pod yang menggunakan PersistentVolume.
|
||||
|
||||
Gunakan anotasi `pv.beta.kubernetes.io/gid` sebagai berikut:
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: pv1
|
||||
annotations:
|
||||
pv.beta.kubernetes.io/gid: "1234"
|
||||
```
|
||||
Ketika sebuah Pod mengkonsumsi PersistentVolume yang memiliki anotasi GID, anotasi GID tersebut
|
||||
akan diterapkan ke semua container di dalam Pod dengan cara yang sama yang ditentukan di dalam GID Pod security context.
|
||||
Settiap GID, baik berasal dari anotasi PersistentVolume atau Pod, diterapkan pada proses pertama yang dijalankan
|
||||
di setiap container.
|
||||
|
||||
{{< note >}}
|
||||
Ketika sebuah Pod mengkonsumsi PersistentVolume, GID yang terkait dengan PersistentVolume
|
||||
tidak ada di dalam sumberdaya Pod itu sendiri.
|
||||
{{< /note >}}
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
|
||||
{{% capture whatsnext %}}
|
||||
|
||||
* Belajar lebih lanjut tentang [PersistentVolume](/id/docs/concepts/storage/persistent-volumes/).
|
||||
* Baca [dokumen perancangan Penyimpanan _Persistent_](https://git.k8s.io/community/contributors/design-proposals/storage/persistent-storage.md).
|
||||
|
||||
### Referensi
|
||||
|
||||
* [PersistentVolume](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolume-v1-core)
|
||||
* [PersistentVolumeSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumespec-v1-core)
|
||||
* [PersistentVolumeClaim](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaim-v1-core)
|
||||
* [PersistentVolumeClaimSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaimspec-v1-core)
|
||||
|
||||
{{% /capture %}}
|
||||
|
|
@ -0,0 +1,416 @@
|
|||
---
|
||||
title: Mengonfigurasi Konteks Keamanan untuk Pod atau Container
|
||||
content_template: templates/task
|
||||
weight: 80
|
||||
---
|
||||
|
||||
{{% capture overview %}}
|
||||
|
||||
Konteks keamanan (_security context_) menentukan wewenang (_privilege_) dan aturan kontrol akses untuk sebuah Pod
|
||||
atau Container. Aturan konteks keamanan meliputi hal-hal berikut ini namun tidak terbatas pada hal-hal tersebut:
|
||||
|
||||
* Kontrol akses bersifat diskresi: Izin untuk mengakses objek, seperti sebuah berkas, yang didasarkan pada
|
||||
[ID pengguna atau _user ID_ (UID) dan ID grup atau _group ID_ (GID)](https://wiki.archlinux.org/index.php/users_and_groups).
|
||||
|
||||
* [_Security Enhanced Linux_ (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux): Di mana objek diberi label keamanan.
|
||||
|
||||
* Menjalankan dengan wewenang (_privileged_) atau tanpa wewenang (_unprivileged_).
|
||||
|
||||
* [Kapabilitas Linux (Linux Capabilities)](https://linux-audit.com/linux-capabilities-hardening-linux-binaries-by-removing-setuid/): Memberi sebuah proses beberapa wewenang, namun tidak semua wewenang dari pengguna _root_.
|
||||
|
||||
* [AppArmor](/docs/tutorials/clusters/apparmor/): Menggunakan profil program untuk membatasi kemampuan dari masing-masing program.
|
||||
|
||||
* [Seccomp](https://en.wikipedia.org/wiki/Seccomp): Menyaring panggilan sistem (_system calls_) dari suatu proses.
|
||||
|
||||
* AllowPrivilegeEscalation: Mengontrol apakah suatu proses dapat memperoleh lebih banyak wewenang daripada proses induknya. Pilihan ini mengontrol secara langsung apakah opsi [`no_new_privs`](https://www.kernel.org/doc/Documentation/prctl/no_new_privs.txt) diaktifkan pada proses dalam Container. AllowPrivilegeEscalation selalu aktif (_true_) ketika Container: 1) berjalan dengan wewenang ATAU 2) memiliki `CAP_SYS_ADMIN`.
|
||||
|
||||
* readOnlyRootFilesystem: Menambatkan (_mount_) sistem berkas (_file system_) _root_ dari sebuah Container hanya sebatas untuk dibaca saja (_read-only_).
|
||||
|
||||
Poin-poin di atas bukanlah sekumpulan lengkap dari aturan konteks keamanan - silakan lihat [SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#securitycontext-v1-core) untuk daftar lengkapnya.
|
||||
|
||||
Untuk informasi lebih lanjut tentang mekanisme keamanan pada Linux, silahkan lihat
|
||||
[ikhtisar fitur keamanan pada Kernel Linux](https://www.linux.com/learn/overview-linux-kernel-security-features)
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
{{% capture prerequisites %}}
|
||||
|
||||
{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}}
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
{{% capture steps %}}
|
||||
|
||||
## Mengatur konteks keamanan untuk Pod
|
||||
|
||||
Untuk menentukan aturan keamanan pada Pod, masukkan bagian `securityContext`
|
||||
dalam spesifikasi Pod. Bagian `securityContext` adalah sebuah objek
|
||||
[PodSecurityContext](/docs/reference/generate/kubernetes-api/{{<param"version">}}/#podsecuritycontext-v1-core).
|
||||
Aturan keamanan yang kamu tetapkan untuk Pod akan berlaku untuk semua Container dalam Pod tersebut.
|
||||
Berikut sebuah berkas konfigurasi untuk Pod yang memiliki volume `securityContext` dan `emptyDir`:
|
||||
|
||||
{{< codenew file="pods/security/security-context.yaml" >}}
|
||||
|
||||
Dalam berkas konfigurasi ini, bagian `runAsUser` menentukan bahwa dalam setiap Container pada
|
||||
Pod, semua proses dijalankan oleh ID pengguna 1000. Bagian `runAsGroup` menentukan grup utama dengan ID 3000 untuk
|
||||
semua proses dalam setiap Container pada Pod. Jika bagian ini diabaikan, maka ID grup utama dari Container
|
||||
akan berubah menjadi _root_(0). Berkas apa pun yang dibuat juga akan dimiliki oleh pengguna dengan ID 1000 dan grup dengan ID 3000 ketika `runAsGroup` ditentukan.
|
||||
Karena `fsGroup` ditentukan, semua proses milik Container juga merupakan bagian dari grup tambahan dengan ID 2000.
|
||||
Pemilik volume `/data/demo` dan berkas apa pun yang dibuat dalam volume tersebut adalah grup dengan ID 2000.
|
||||
|
||||
Buatlah Pod tersebut:
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://k8s.io/examples/pods/security/security-context.yaml
|
||||
```
|
||||
|
||||
Periksa apakah Container dari Pod sedang berjalan:
|
||||
|
||||
```shell
|
||||
kubectl get pod security-context-demo
|
||||
```
|
||||
Masuk ke _shell_ dari Container yang sedang berjalan tersebut:
|
||||
|
||||
```shell
|
||||
kubectl exec -it security-context-demo -- sh
|
||||
```
|
||||
|
||||
Pada _shell_ kamu, lihat daftar proses yang berjalan:
|
||||
|
||||
```shell
|
||||
ps
|
||||
```
|
||||
|
||||
Keluarannya menunjukkan bahwa proses dijalankan oleh pengguna dengan ID 1000, yang merupakan nilai dari bagian `runAsUser`:
|
||||
|
||||
```shell
|
||||
PID USER TIME COMMAND
|
||||
1 1000 0:00 sleep 1h
|
||||
6 1000 0:00 sh
|
||||
...
|
||||
```
|
||||
|
||||
Pada _shell_ kamu, pindah ke direktori `/data`, dan lihat isinya:
|
||||
|
||||
```shell
|
||||
cd /data
|
||||
ls -l
|
||||
```
|
||||
|
||||
Keluarannya menunjukkan bahwa direktori `/data/demo` memiliki grup dengan ID 2000, yang merupakan
|
||||
nilai dari bagian `fsGroup`.
|
||||
|
||||
```shell
|
||||
drwxrwsrwx 2 root 2000 4096 Jun 6 20:08 demo
|
||||
```
|
||||
|
||||
Pada _shell_ kamu, pindah ke direktori `/data/demo`, dan buatlah sebuah berkas didalamnya:
|
||||
|
||||
```shell
|
||||
cd demo
|
||||
echo hello > testfile
|
||||
```
|
||||
|
||||
Lihatlah daftar berkas dalam direktori `/data/demo`:
|
||||
|
||||
```shell
|
||||
ls -l
|
||||
```
|
||||
|
||||
Keluarannya menunjukkan bahwa `testfile` memiliki grup dengan ID 2000, dimana merupakan nilai dari bagian `fsGroup`.
|
||||
|
||||
```shell
|
||||
-rw-r--r-- 1 1000 2000 6 Jun 6 20:08 testfile
|
||||
```
|
||||
|
||||
Jalankan perintah berikut ini:
|
||||
|
||||
```shell
|
||||
$ id
|
||||
uid=1000 gid=3000 groups=2000
|
||||
```
|
||||
|
||||
Kamu akan melihat bahwa nilai _gid_ adalah 3000, sama dengan bagian `runAsGroup`. Jika `runAsGroup` diabaikan maka nilai _gid_ akan
|
||||
tetap bernilai 0(_root_) dan proses akan dapat berinteraksi dengan berkas-berkas yang dimiliki oleh grup root(0) dan yang memiliki
|
||||
izin grup untuk grup root(0).
|
||||
|
||||
Keluarlah dari _shell_ kamu:
|
||||
|
||||
```shell
|
||||
exit
|
||||
```
|
||||
|
||||
## Melakukan konfigurasi izin volume dan kebijakan perubahan kepemilikan untuk Pod
|
||||
|
||||
{{< feature-state for_k8s_version="v1.18" state="alpha" >}}
|
||||
|
||||
Secara bawaan, Kubernetes mengubah kepemilikan dan izin secara rekursif untuk konten masing-masing
|
||||
volume untuk mencocokkan `fsGroup` yang ditentukan dalam `securityContext` dari Pod pada saat volume itu
|
||||
ditambatkan (_mounted_). Untuk volume yang besar, memeriksa dan mengubah kepemilikan dan izin dapat memerlukan waktu yang sangat lama,
|
||||
sehingga memperlambat proses menjalankan Pod. Kamu dapat menggunakan bagian `fsGroupChangePolicy` dalam sebuah `securityContext`
|
||||
untuk mengontrol cara Kubernetes memeriksa dan mengelola kepemilikan dan izin
|
||||
untuk sebuah volume.
|
||||
|
||||
**fsGroupChangePolicy** - `fsGroupChangePolicy` mendefinisikan perilaku untuk mengubah kepemilikan dan izin volume
|
||||
sebelum diekspos di dalam sebuah Pod. Bagian ini hanya berlaku untuk tipe volume yang mendukung
|
||||
`fsGroup` untuk mengontrol kepemilikan dan izin. Bagian ini memiliki dua nilai yang dapat dimasukkan:
|
||||
|
||||
* _OnRootMismatch_: Hanya mengubah izin dan kepemilikan jika izin dan kepemilikan dari direktori _root_ tidak sesuai dengan izin volume yang diharapkan. Hal ini dapat membantu mempersingkat waktu yang diperlukan untuk mengubah kepemilikan dan izin sebuah volume.
|
||||
* _Always_: Selalu mengubah izin dan kepemilikan volume ketika volume sudah ditambatkan.
|
||||
|
||||
Sebagai contoh:
|
||||
|
||||
```yaml
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsGroup: 3000
|
||||
fsGroup: 2000
|
||||
fsGroupChangePolicy: "OnRootMismatch"
|
||||
```
|
||||
|
||||
Ini adalah fitur alpha. Untuk menggunakannya, silahkan aktifkan [gerbang fitur](/docs/reference/command-line-tools-reference/feature-gates/) `ConfigurableFSGroupPolicy` untuk kube-api-server, kube-controller-manager, dan kubelet.
|
||||
|
||||
{{< note >}}
|
||||
Bagian ini tidak berpengaruh pada tipe volume yang bersifat sementara (_ephemeral_) seperti
|
||||
[`secret`](https://kubernetes.io/docs/concepts/storage/volumes/#secret),
|
||||
[`configMap`](https://kubernetes.io/docs/concepts/storage/volumes/#configmap),
|
||||
dan [`emptydir`](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir).
|
||||
{{< /note >}}
|
||||
|
||||
|
||||
## Mengatur konteks keamanan untuk Container
|
||||
|
||||
Untuk menentukan aturan keamanan untuk suatu Container, sertakan bagian `securityContext`
|
||||
dalam manifes Container. Bagian `securityContext` adalah sebuah objek
|
||||
[SecurityContext](/docs/reference/generate/kubernetes-api/{{<param"version">}}/#securitycontext-v1-core).
|
||||
Aturan keamanan yang kamu tentukan untuk Container hanya berlaku untuk
|
||||
Container secara individu, dan aturan tersebut menimpa aturan yang dibuat pada tingkat Pod apabila
|
||||
ada aturan yang tumpang tindih. Aturan pada Container mempengaruhi volume pada Pod.
|
||||
|
||||
Berikut berkas konfigurasi untuk Pod yang hanya memiliki satu Container. Keduanya, baik Pod
|
||||
dan Container memiliki bagian `securityContext` sebagai berikut:
|
||||
|
||||
{{< codenew file="pods/security/security-context-2.yaml" >}}
|
||||
|
||||
Buatlah Pod tersebut:
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://k8s.io/examples/pods/security/security-context-2.yaml
|
||||
```
|
||||
|
||||
Periksa jika Container dalam Pod sedang berjalan:
|
||||
|
||||
```shell
|
||||
kubectl get pod security-context-demo-2
|
||||
```
|
||||
|
||||
Masuk ke dalam _shell_ Container yang sedang berjalan tersebut:
|
||||
|
||||
```shell
|
||||
kubectl exec -it security-context-demo-2 -- sh
|
||||
```
|
||||
|
||||
Pada _shell_ kamu, lihat daftar proses yang sedang berjalan:
|
||||
|
||||
```
|
||||
ps aux
|
||||
```
|
||||
|
||||
Keluarannya menunjukkan bahwa proses dijalankan oleh user dengan ID 2000, yang merupakan
|
||||
nilai dari `runAsUser` seperti yang telah ditentukan untuk Container tersebut. Nilai tersebut menimpa nilai ID 1000 yang
|
||||
ditentukan untuk Pod-nya.
|
||||
|
||||
```
|
||||
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
|
||||
2000 1 0.0 0.0 4336 764 ? Ss 20:36 0:00 /bin/sh -c node server.js
|
||||
2000 8 0.1 0.5 772124 22604 ? Sl 20:36 0:00 node server.js
|
||||
...
|
||||
```
|
||||
|
||||
Keluar dari _shell_ anda:
|
||||
|
||||
```shell
|
||||
exit
|
||||
```
|
||||
|
||||
## Mengatur Kapabilitas untuk Container
|
||||
|
||||
Dengan menggunakan [Kapabilitas Linux (Linux Capabilities)](http://man7.org/linux/man-pages/man7/capabilities.7.html),
|
||||
kamu dapat memberikan wewenang tertentu kepada suatu proses tanpa memberikan semua wewenang
|
||||
dari pengguna _root_. Untuk menambah atau menghapus Kapabilitas Linux pada suatu Container, masukkan
|
||||
bagian `capabilities` pada `securityContext` di manifes Container-nya.
|
||||
|
||||
Pertama-tama, mari melihat apa yang terjadi ketika kamu tidak menyertakan bagian `capabilities`.
|
||||
Berikut ini adalah berkas konfigurasi yang tidak menambah atau mengurangi kemampuan apa pun dari Container:
|
||||
|
||||
{{< codenew file="pods/security/security-context-3.yaml" >}}
|
||||
|
||||
Buatlah Pod tersebut:
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://k8s.io/examples/pods/security/security-context-3.yaml
|
||||
```
|
||||
|
||||
Periksa apakah Container dari Pod tersebut sedang berjalan:
|
||||
|
||||
```shell
|
||||
kubectl get pod security-context-demo-3
|
||||
```
|
||||
|
||||
Masuk ke dalam _shell_ dari Container yang berjalan:
|
||||
|
||||
```shell
|
||||
kubectl exec -it security-context-demo-3 -- sh
|
||||
```
|
||||
|
||||
Dalam _shell_ tersebut, lihatlah daftar proses yang berjalan:
|
||||
|
||||
```shell
|
||||
ps aux
|
||||
```
|
||||
|
||||
Keluarannya menunjukkan ID dari proses atau _process IDs_ (PIDs) untuk Container tersebut:
|
||||
|
||||
```shell
|
||||
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
|
||||
root 1 0.0 0.0 4336 796 ? Ss 18:17 0:00 /bin/sh -c node server.js
|
||||
root 5 0.1 0.5 772124 22700 ? Sl 18:17 0:00 node server.js
|
||||
```
|
||||
|
||||
Dalam _shell_ kamu, lihat status dari proses dengan ID 1:
|
||||
|
||||
```shell
|
||||
cd /proc/1
|
||||
cat status
|
||||
```
|
||||
|
||||
Keluarannya menunjukkan _bitmap_ dari kapabilitas untuk proses tersebut:
|
||||
|
||||
```
|
||||
...
|
||||
CapPrm: 00000000a80425fb
|
||||
CapEff: 00000000a80425fb
|
||||
...
|
||||
```
|
||||
|
||||
Buatlah catatan untuk _bitmap_ dari kapabilitas tersebut, dan keluarlah dari _shell_ kamu:
|
||||
|
||||
```shell
|
||||
exit
|
||||
```
|
||||
|
||||
Berikutnya, jalankan Container yang sama seperti dengan Container sebelumnya, namun
|
||||
Container ini memiliki kapabilitas tambahan yang sudah ditentukan.
|
||||
|
||||
Berikut ini adalah berkas konfigurasi untuk Pod yang hanya menjalankan satu Container. Konfigurasi
|
||||
ini menambahkan kapabilitas `CAP_NET_ADMIN` dan `CAP_SYS_TIME`:
|
||||
|
||||
{{< codenew file="pods/security/security-context-4.yaml" >}}
|
||||
|
||||
Buatlah Pod tersebut:
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://k8s.io/examples/pods/security/security-context-4.yaml
|
||||
```
|
||||
|
||||
Masuk ke dalam _shell_ dari Container yang berjalan:
|
||||
|
||||
```shell
|
||||
kubectl exec -it security-context-demo-4 -- sh
|
||||
```
|
||||
|
||||
Di dalam _shell_ kamu, lihatlah kapabilitas dari proses dengan ID 1:
|
||||
|
||||
```shell
|
||||
cd /proc/1
|
||||
cat status
|
||||
```
|
||||
|
||||
Keluarannya menunjukkan _bitmap_ kapabilitas untuk proses tersebut:
|
||||
|
||||
```shell
|
||||
...
|
||||
CapPrm: 00000000aa0435fb
|
||||
CapEff: 00000000aa0435fb
|
||||
...
|
||||
```
|
||||
|
||||
Bandingkan kemampuan dari kedua Containers tersebut:
|
||||
|
||||
```
|
||||
00000000a80425fb
|
||||
00000000aa0435fb
|
||||
```
|
||||
|
||||
Dalam _bitmap_ kapabilitas pada Container pertama, bit-12 dan ke-25 tidak diatur. Sedangkan dalam Container kedua,
|
||||
bit ke-12 dan ke-25 diatur. Bit ke-12 adalah kapabilitas `CAP_NET_ADMIN`, dan bit-25 adalah kapabilitas `CAP_SYS_TIME`.
|
||||
Lihatlah [capability.h](https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h)
|
||||
untuk nilai dari konstanta kapabilitas-kapabilitas yang lainnya.
|
||||
|
||||
{{< note >}}
|
||||
Konstanta kapabilitas Linux memiliki format `CAP_XXX`. Tetapi ketika kamu memasukkan daftar kemampuan dalam manifes Container kamu, kamu harus menghilangkan bagian `CAP_` dari konstantanya. Misalnya, untuk menambahkan `CAP_SYS_TIME`, masukkan `SYS_TIME` ke dalam daftar kapabilitas Container kamu.
|
||||
{{< /note >}}
|
||||
|
||||
## Memberikan label SELinux pada sebuah Container
|
||||
|
||||
Untuk memberikan label SELinux pada sebuah Container, masukkan bagian `seLinuxOptions` pada
|
||||
bagian `securityContext` dari manifes Pod atau Container kamu.
|
||||
Bagian `seLinuxOptions` adalah sebuah objek [SELinuxOptions](/docs/reference/generated/kubernetes-api/{{<param"version">}}/#selinuxoptions-v1-core).
|
||||
Berikut ini adalah contoh yang menerapkan sebuah level dari SELinux:
|
||||
|
||||
```yaml
|
||||
...
|
||||
securityContext:
|
||||
seLinuxOptions:
|
||||
level: "s0:c123,c456"
|
||||
```
|
||||
|
||||
{{< note >}}
|
||||
Untuk menetapkan label SELinux, modul keamanan SELinux harus dimuat terlebih dahulu pada sistem operasi dari hosnya.
|
||||
{{< /note >}}
|
||||
|
||||
## Diskusi
|
||||
|
||||
Konteks keamanan untuk sebuah Pod berlaku juga untuk Container yang berada dalam Pod tersebut dan juga untuk
|
||||
volume dari Pod tersebut jika ada. Terkhusus untuk `fsGroup` dan `seLinuxOptions`
|
||||
akan diterapkan pada volume seperti berikut:
|
||||
|
||||
* `fsGroup`: Volume yang mendukung manajemen kepemilikan (_ownership_) akan dimodifikasi agar dapat dimiliki
|
||||
dan ditulis oleh ID group (GID) yang disebutkan dalam `fsGroup`. Lihatlah
|
||||
[Dokumen Desain untuk Manajemen Kepemilikan](https://git.k8s.io/community/contributors/design-proposals/storage/volume-ownership-management.md)
|
||||
untuk lebih lanjut.
|
||||
|
||||
* `seLinuxOptions`: Volume yang mendukung pelabelan SELinux akan dilabel ulang agar dapat diakses
|
||||
oleh label yang ditentukan pada `seLinuxOptions`. Biasanya kamu hanya
|
||||
perlu mengatur bagian `level`. Dimana ini akan menetapkan label
|
||||
[Keamanan multi-kategori (_Multi-Category Security_) (MCS)](https://selinuxproject.org/page/NB_MLS)
|
||||
yang diberikan kepada semua Container dalam Pod serta Volume yang ada didalamnya.
|
||||
|
||||
{{< warning >}}
|
||||
Setelah kamu menentukan label MCS untuk Pod, maka semua Pod dengan label yang sama dapat mengakses Volume tersebut. Jika kamu membutuhkan perlindungan antar Pod, kamu harus menetapkan label MCS yang unik untuk setiap Pod.
|
||||
{{< /warning >}}
|
||||
|
||||
## Bersih-bersih (_Clean Up_)
|
||||
|
||||
Hapus Pod-Pod tersebut:
|
||||
|
||||
```shell
|
||||
kubectl delete pod security-context-demo
|
||||
kubectl delete pod security-context-demo-2
|
||||
kubectl delete pod security-context-demo-3
|
||||
kubectl delete pod security-context-demo-4
|
||||
```
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
{{% capture whatsnext %}}
|
||||
|
||||
* [PodSecurityContext](/docs/reference/generated/kubernetes-api/{{<param"version">}}/#podsecuritycontext-v1-core)
|
||||
* [SecurityContext](/docs/reference/generated/kubernetes-api/{{<param"version">}}/#securitycontext-v1-core)
|
||||
* [Menyetel Docker dengan peningkatan keamanan terbaru](https://opensource.com/business/15/3/docker-security-tuning)
|
||||
* [Dokumen desain konteks keamanan](https://git.k8s.io/community/contributors/design-proposals/auth/security_context.md)
|
||||
* [Dokumen desain manajemen kepemilikan](https://git.k8s.io/community/contributors/design-proposals/storage/volume-ownership-management.md)
|
||||
* [Kebijakan keamanan Pod](/docs/concepts/policy/pod-security-policy/)
|
||||
* [Dokumen desain AllowPrivilegeEscalation](https://git.k8s.io/community/contributors/design-proposals/auth/no-new-privs.md)
|
||||
|
||||
{{% /capture %}}
|
||||
|
|
@ -0,0 +1,147 @@
|
|||
---
|
||||
title: Mendapatkan Shell Untuk Masuk ke Container yang Sedang Berjalan
|
||||
content_template: templates/task
|
||||
---
|
||||
|
||||
{{% capture overview %}}
|
||||
|
||||
Laman ini menunjukkan bagaimana cara menggunakan `kubectl exec` untuk
|
||||
mendapatkan _shell_ untuk masuk ke dalam Container yang sedang berjalan.
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
|
||||
{{% capture prerequisites %}}
|
||||
|
||||
{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}}
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
|
||||
{{% capture steps %}}
|
||||
|
||||
## Mendapatkan sebuah _shell_ untuk masuk ke sebuah Container
|
||||
|
||||
Dalam latihan ini, kamu perlu membuat Pod yang hanya memiliki satu Container saja. Container
|
||||
tersebut menjalankan _image_ nginx. Berikut ini adalah berkas konfigurasi untuk Pod tersebut:
|
||||
|
||||
{{< codenew file="application/shell-demo.yaml" >}}
|
||||
|
||||
Buatlah Pod tersebut:
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://k8s.io/examples/application/shell-demo.yaml
|
||||
```
|
||||
|
||||
Pastikan bahwa Container dalam Pod berjalan:
|
||||
|
||||
```shell
|
||||
kubectl get pod shell-demo
|
||||
```
|
||||
|
||||
Dapatkan _shell_ untuk masuk ke dalam Container:
|
||||
|
||||
```shell
|
||||
kubectl exec -it shell-demo -- /bin/bash
|
||||
```
|
||||
|
||||
{{< note >}}
|
||||
|
||||
Simbol tanda hubung ganda "--" digunakan untuk memisahkan antara argumen perintah yang ingin kamu eksekusi pada Container dan argumen dari kubectl itu sendiri.
|
||||
|
||||
{{< /note >}}
|
||||
|
||||
Di dalam _shell_ kamu, perlihatkan isi dari direktori _root_:
|
||||
|
||||
```shell
|
||||
root@shell-demo:/# ls /
|
||||
```
|
||||
|
||||
Di dalam _shell_ kamu, cobalah perintah-perintah yang lainnya. Berikut beberapa contohnya:
|
||||
|
||||
```shell
|
||||
root@shell-demo:/# ls /
|
||||
root@shell-demo:/# cat /proc/mounts
|
||||
root@shell-demo:/# cat /proc/1/maps
|
||||
root@shell-demo:/# apt-get update
|
||||
root@shell-demo:/# apt-get install -y tcpdump
|
||||
root@shell-demo:/# tcpdump
|
||||
root@shell-demo:/# apt-get install -y lsof
|
||||
root@shell-demo:/# lsof
|
||||
root@shell-demo:/# apt-get install -y procps
|
||||
root@shell-demo:/# ps aux
|
||||
root@shell-demo:/# ps aux | grep nginx
|
||||
```
|
||||
|
||||
## Menulis halaman utama (_root_) untuk nginx
|
||||
|
||||
Lihat kembali berkas konfigurasi untuk Pod kamu. Pod
|
||||
memiliki volume `emptyDir`, dan Container melakukan pemasangan (_mounting_) untuk volume tersebut
|
||||
pada `/usr/share/nginx/html`.
|
||||
|
||||
Pada _shell_ kamu, buatlah berkas `index.html` dalam direktori `/usr/share/nginx/html`:
|
||||
|
||||
```shell
|
||||
root@shell-demo:/# echo Hello shell demo > /usr/share/nginx/html/index.html
|
||||
```
|
||||
|
||||
Pada _shell_ kamu, kirimkan sebuah permintaan (_request_) GET ke server nginx.
|
||||
|
||||
```shell
|
||||
root@shell-demo:/# apt-get update
|
||||
root@shell-demo:/# apt-get install curl
|
||||
root@shell-demo:/# curl localhost
|
||||
```
|
||||
|
||||
Keluarannya akan menunjukkan teks yang kamu tulis pada berkas `index.html`.
|
||||
|
||||
```shell
|
||||
Hello shell demo
|
||||
```
|
||||
|
||||
Setelah kamu selesai dengan _shell_ kamu, ketiklah `exit`.
|
||||
|
||||
## Menjalankan perintah individu di dalam sebuah Container
|
||||
|
||||
Pada jendela (_window_) perintah biasa, bukan pada _shell_ kamu di dalam Container,
|
||||
lihatlah daftar variabel lingkungan (_environment variable_) pada Container yang sedang berjalan:
|
||||
|
||||
```shell
|
||||
kubectl exec shell-demo env
|
||||
```
|
||||
|
||||
Cobalah dengan menjalankan perintah lainnya. Berikut beberapa contohnya:
|
||||
|
||||
```shell
|
||||
kubectl exec shell-demo ps aux
|
||||
kubectl exec shell-demo ls /
|
||||
kubectl exec shell-demo cat /proc/1/mounts
|
||||
```
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
{{% capture discussion %}}
|
||||
|
||||
## Membuka sebuah _shell_ ketika sebuah Pod memiliki lebih dari satu Container
|
||||
|
||||
Jika sebuah Pod memiliki lebih dari satu Container, gunakanlah `--container` atau `-c` untuk
|
||||
menentukan Container yang dimaksud pada perintah `kubectl exec`. Sebagai contoh,
|
||||
misalkan kamu memiliki Pod yang bernama my-pod, dan Pod tersebut memiliki dua Container
|
||||
yang bernama main-app dan helper-app. Perintah berikut ini akan membuka sebuah
|
||||
_shell_ ke Container dengan nama main-app.
|
||||
|
||||
```shell
|
||||
kubectl exec -it my-pod --container main-app -- /bin/bash
|
||||
```
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
|
||||
{{% capture whatsnext %}}
|
||||
|
||||
* [kubectl exec](/docs/reference/generated/kubectl/kubectl-commands/#exec)
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,156 @@
|
|||
---
|
||||
title: Mendefinisikan Perintah dan Argumen untuk sebuah Kontainer
|
||||
content_template: templates/task
|
||||
weight: 10
|
||||
---
|
||||
|
||||
{{% capture overview %}}
|
||||
|
||||
Laman ini menunjukkan bagaimana cara mendefinisikan perintah-perintah
|
||||
dan argumen-argumen saat kamu menjalankan Container
|
||||
dalam sebuah {{< glossary_tooltip term_id="Pod" >}}.
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
|
||||
{{% capture prerequisites %}}
|
||||
|
||||
{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}}
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
|
||||
{{% capture steps %}}
|
||||
|
||||
## Mendefinisikan sebuah perintah dan argumen-argumen saat kamu membuat sebuah Pod
|
||||
|
||||
Saat kamu membuat sebuah Pod, kamu dapat mendefinisikan sebuah perintah dan argumen-argumen untuk
|
||||
Container-Container yang berjalan di dalam Pod. Untuk mendefinisikan sebuah perintah, sertakan
|
||||
bidang `command` di dalam berkas konfigurasi. Untuk mendefinisikan argumen-argumen untuk perintah, sertakan
|
||||
bidang `args` di dalam berkas konfigurasi. Perintah dan argumen-argumen yang telah
|
||||
kamu definisikan tidak dapat diganti setelah Pod telah terbuat.
|
||||
|
||||
Perintah dan argumen-argumen yang kamu definisikan di dalam berkas konfigurasi
|
||||
membatalkan perintah dan argumen-argumen bawaan yang disediakan oleh _image_ Container.
|
||||
Jika kamu mendefinisikan argumen-argumen, tetapi tidak mendefinisikan sebuah perintah, perintah bawaan digunakan
|
||||
dengan argumen-argumen baru kamu.
|
||||
|
||||
{{< note >}}
|
||||
Bidang `command` menyerupai `entrypoint` di beberapa _runtime_ Container.
|
||||
Merujuk pada [catatan](#catatan) di bawah.
|
||||
{{< /note >}}
|
||||
|
||||
Pada latihan ini, kamu akan membuat sebuah Pod baru yang menjalankan sebuah Container. Berkas konfigurasi
|
||||
untuk Pod mendefinisikan sebuah perintah dan dua argumen:
|
||||
|
||||
{{< codenew file="pods/commands.yaml" >}}
|
||||
|
||||
1. Buat sebuah Pod dengan berkas konfigurasi YAML:
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://k8s.io/examples/pods/commands.yaml
|
||||
```
|
||||
|
||||
2. Daftar Pod yang sedang berjalan
|
||||
|
||||
```shell
|
||||
kubectl get pods
|
||||
```
|
||||
|
||||
Keluaran menunjukkan bahwa Container yang berjalan di dalam Pod command-demo
|
||||
telah selesai.
|
||||
|
||||
3. Untuk melihat keluaran dari perintah yang berjalan di dalam Container, lihat log
|
||||
dari Pod tersebut:
|
||||
|
||||
```shell
|
||||
kubectl logs command-demo
|
||||
```
|
||||
|
||||
Keluaran menunjukan nilai dari variabel lingkungan HOSTNAME dan KUBERNETES_PORT:
|
||||
|
||||
```
|
||||
command-demo
|
||||
tcp://10.3.240.1:443
|
||||
```
|
||||
|
||||
## Menggunakan variabel lingkungan untuk mendefinisikan argumen
|
||||
|
||||
Dalam contoh sebelumnya, kamu mendefinisikan langsung argumen-argumen dengan
|
||||
menyediakan _string_. Sebagai sebuah alternatif untuk menyediakan _string_ secara langsung,
|
||||
kamu dapat mendefinisikan argumen-argumen dengan menggunakan variabel lingkungan:
|
||||
|
||||
```yaml
|
||||
env:
|
||||
- name: MESSAGE
|
||||
value: "hello world"
|
||||
command: ["/bin/echo"]
|
||||
args: ["$(MESSAGE)"]
|
||||
```
|
||||
|
||||
Ini berarti kamu dapat mendefinisikan sebuah argumen untuk sebuah Pod menggunakan
|
||||
salah satu teknik yang tersedia untuk mendefinisikan variabel-variabel lingkungan, termasuk
|
||||
[ConfigMap](/id/docs/tasks/configure-pod-container/configure-pod-configmap/)
|
||||
dan
|
||||
[Secret](/id/docs/concepts/configuration/secret/).
|
||||
|
||||
{{< note >}}
|
||||
Variabel lingkugan muncul dalam tanda kurung, `"$(VAR)"`. Ini
|
||||
dibutuhkan untuk variabel yang akan diperuluas di bidang `command` atau `args`.
|
||||
{{< /note >}}
|
||||
|
||||
## Menjalankan sebuah perintah di dalam shell
|
||||
|
||||
Di beberapa kasus, kamu butuh perintah untuk menjalankan sebuah _shell_. Contohnya,
|
||||
perintah kamu mungkin terdiri dari beberapa perintah yang digabungkan, atau mungkin berupa
|
||||
skrip _shell_. Untuk menjalankan perintah kamu di sebuah _shell_, bungkus seperti ini:
|
||||
|
||||
```shell
|
||||
command: ["/bin/sh"]
|
||||
args: ["-c", "while true; do echo hello; sleep 10;done"]
|
||||
```
|
||||
|
||||
## Catatan
|
||||
|
||||
Tabel ini merangkum nama-nama bidang yang digunakan oleh Docker dan Kubernetes.
|
||||
|
||||
| Deskripsi | Nama bidang pada Docker | Nama bidang pada Kubernetes |
|
||||
|-------------------------------------------|------------------------------|-----------------------------|
|
||||
| Perintah yang dijalankan oleh Container | Entrypoint | command |
|
||||
| Argumen diteruskan ke perintah | Cmd | args |
|
||||
|
||||
Saat kamu mengesampingkan Entrypoint dan Cmd standar,
|
||||
aturan-aturan ini berlaku:
|
||||
|
||||
* Jika kamu tidak menyediakan `command` atau `args` untuk sebuah Container,
|
||||
maka `command` dan `args` yang didefinisikan di dalam _image_ Docker akan digunakan.
|
||||
|
||||
* Jika kamu menyediakan `command` tetapi tidak menyediakan `args` untuk sebuah Container, akan digunakan
|
||||
`command` yang disediakan. Entrypoint dan Cmd bawaan yang didefinisikan di dalam
|
||||
_image_ Docker diabaikan.
|
||||
|
||||
* Jika kamu hanya menyediakan `args` untuk sebuah Container, Entrypoint bawaan yang didefinisikan di dalam
|
||||
_image_ Docker dijalakan dengan `args` yang kamu sediakan.
|
||||
|
||||
* Jika kamu menyediakan `command` dan `args`, Entrypoint dan Cmd standar yang didefinisikan
|
||||
di dalam _image_ Docker diabaikan. `command` kamu akan dijalankan dengan `args` kamu.
|
||||
|
||||
Berikut ini beberapa contoh:
|
||||
|
||||
| Image Entrypoint | Image Cmd | Container command | Container args | Command run |
|
||||
|--------------------|------------------|---------------------|--------------------|------------------|
|
||||
| `[/ep-1]` | `[foo bar]` | <not set> | <not set> | `[ep-1 foo bar]` |
|
||||
| `[/ep-1]` | `[foo bar]` | `[/ep-2]` | <not set> | `[ep-2]` |
|
||||
| `[/ep-1]` | `[foo bar]` | <not set> | `[zoo boo]` | `[ep-1 zoo boo]` |
|
||||
| `[/ep-1]` | `[foo bar]` | `[/ep-2]` | `[zoo boo]` | `[ep-2 zoo boo]` |
|
||||
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
{{% capture whatsnext %}}
|
||||
|
||||
* Pelajari lebih lanjut tentang [mengatur Pod and Container](/id/docs/tasks/).
|
||||
* Pelajari lebih lanjut tentang [menjalankan perintah di dalam sebuah Container](/id/docs/tasks/debug-application-cluster/get-shell-running-container/).
|
||||
* Lihat [Container](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core).
|
||||
|
||||
{{% /capture %}}
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: shell-demo
|
||||
spec:
|
||||
volumes:
|
||||
- name: shared-data
|
||||
emptyDir: {}
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: shared-data
|
||||
mountPath: /usr/share/nginx/html
|
||||
hostNetwork: true
|
||||
dnsPolicy: Default
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: command-demo
|
||||
labels:
|
||||
purpose: demonstrate-command
|
||||
spec:
|
||||
containers:
|
||||
- name: command-demo-container
|
||||
image: debian
|
||||
command: ["printenv"]
|
||||
args: ["HOSTNAME", "KUBERNETES_PORT"]
|
||||
restartPolicy: OnFailure
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: hello-apparmor
|
||||
annotations:
|
||||
# Mengintruksikan Kubernetes untuk menerapkan profil AppArmor "k8s-apparmor-example-deny-write".
|
||||
# Perlu dicatat bahwa ini akan di abaikan jika Node Kubernetes tidak berjalan pada versi 1.4 atau lebih.
|
||||
container.apparmor.security.beta.kubernetes.io/hello: localhost/k8s-apparmor-example-deny-write
|
||||
spec:
|
||||
containers:
|
||||
- name: hello
|
||||
image: busybox
|
||||
command: [ "sh", "-c", "echo 'Hello AppArmor!' && sleep 1h" ]
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: security-context-demo-2
|
||||
spec:
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
containers:
|
||||
- name: sec-ctx-demo-2
|
||||
image: gcr.io/google-samples/node-hello:1.0
|
||||
securityContext:
|
||||
runAsUser: 2000
|
||||
allowPrivilegeEscalation: false
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: security-context-demo-3
|
||||
spec:
|
||||
containers:
|
||||
- name: sec-ctx-3
|
||||
image: gcr.io/google-samples/node-hello:1.0
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: security-context-demo-4
|
||||
spec:
|
||||
containers:
|
||||
- name: sec-ctx-4
|
||||
image: gcr.io/google-samples/node-hello:1.0
|
||||
securityContext:
|
||||
capabilities:
|
||||
add: ["NET_ADMIN", "SYS_TIME"]
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: security-context-demo
|
||||
spec:
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsGroup: 3000
|
||||
fsGroup: 2000
|
||||
volumes:
|
||||
- name: sec-ctx-vol
|
||||
emptyDir: {}
|
||||
containers:
|
||||
- name: sec-ctx-demo
|
||||
image: busybox
|
||||
command: [ "sh", "-c", "sleep 1h" ]
|
||||
volumeMounts:
|
||||
- name: sec-ctx-vol
|
||||
mountPath: /data/demo
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: task-pv-claim
|
||||
spec:
|
||||
storageClassName: manual
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 3Gi
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: task-pv-pod
|
||||
spec:
|
||||
volumes:
|
||||
- name: task-pv-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: task-pv-claim
|
||||
containers:
|
||||
- name: task-pv-container
|
||||
image: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: "http-server"
|
||||
volumeMounts:
|
||||
- mountPath: "/usr/share/nginx/html"
|
||||
name: task-pv-storage
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: task-pv-volume
|
||||
labels:
|
||||
type: local
|
||||
spec:
|
||||
storageClassName: manual
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
hostPath:
|
||||
path: "/mnt/data"
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
---
|
||||
„---
|
||||
title: I componenti di Kubernetes
|
||||
content_template: templates/concept
|
||||
weight: 20
|
||||
|
|
@ -9,9 +9,9 @@ card:
|
|||
|
||||
{{% capture overview %}}
|
||||
Facendo il deployment di Kubernetes, ottieni un cluster.
|
||||
{{< glossary_definition term_id="cluster" length="all" prepend="Un cluster è">}}
|
||||
{{< glossary_definition term_id="cluster" length="all" prepend="Un cluster Kubernetes è">}}
|
||||
|
||||
Questo documento describe i diversi componenti che sono necessari per avere
|
||||
Questo documento descrive i diversi componenti che sono necessari per avere
|
||||
un cluster Kubernetes completo e funzionante.
|
||||
|
||||
Questo è un diagramma di un cluster Kubernetes con tutti i componenti e le loro relazioni.
|
||||
|
|
@ -23,9 +23,9 @@ Questo è un diagramma di un cluster Kubernetes con tutti i componenti e le loro
|
|||
{{% capture body %}}
|
||||
## Componenti della Control Plane
|
||||
|
||||
La Control Plane è responsabile di tutte le decisioni globali sul cluster (ad esempio, lo scheduling), e l'individuazione e la risposta ad eventi derivanti dal cluster (ad esempio, l'avvio di un nuovo {{< glossary_tooltip text="pod" term_id="pod">}} quando il valore `replicas` di un deployment non è soddisfatto).
|
||||
I componenti del Control Plane sono responsabili di tutte le decisioni globali sul cluster (ad esempio, lo scheduling) oltre che a rilevare e rispondere agli eventi del cluster (ad esempio, l'avvio di un nuovo {{< glossary_tooltip text="pod" term_id="pod">}} quando il valore `replicas` di un deployment non è soddisfatto).
|
||||
|
||||
I componenti della Control Plane possono essere eseguiti su qualsiasi nodo del cluster, ma solitamente gli script di installazione tendono a eseguire tutti i componenti della Control Plane sulla stessa macchina, separando la Control Plane dai workload dell'utente.
|
||||
I componenti della Control Plane possono essere eseguiti su qualsiasi nodo del cluster stesso. Solitamente, per semplicità, gli script di installazione tendono a eseguire tutti i componenti della Control Plane sulla stessa macchina, separando la Control Plane dai workload dell'utente.
|
||||
Vedi [creare un cluster in High-Availability](/docs/admin/high-availability/) per un esempio di un'installazione multi-master.
|
||||
|
||||
### kube-apiserver
|
||||
|
|
@ -53,27 +53,25 @@ Alcuni esempi di controller gestiti dal kube-controller-manager sono:
|
|||
|
||||
### cloud-controller-manager
|
||||
|
||||
Il [cloud-controller-manager](/docs/tasks/administer-cluster/running-cloud-controller/) esegue i controller che interagiscono con i cloud provider responsabili per la gestione dell'infrastruttura sottostante al cluster, in caso di deployment in cloud.
|
||||
Il cloud-controller-manager è una funzionalità alpha introdotta in Kubernetes 1.6.
|
||||
{{< glossary_definition term_id="cloud-controller-manager" length="short" >}}
|
||||
|
||||
Il cloud-controller-manager esegue esclusivamente i cicli di controllo specifici dei cloud provider.
|
||||
È possibile disabilitare questi cicli di controllo usando il kube-controller-manager.
|
||||
È inoltre possibile disabilitare i cicli di controllo settando il parametro `--cloud-provider` con il valore `external` durante l'esecuzione del kube-controller-manager.
|
||||
Il cloud-controller-manager esegue dei controller specifici del tuo cloud provider.
|
||||
Se hai una installazione Kubernetes on premises, o un ambiente di laboratorio
|
||||
nel tuo PC, il cluster non ha un cloud-controller-manager.
|
||||
|
||||
Il cloud-controller-manager permette l'evoluzione indipendente al codice di Kubernetes e a quello dei singoli cloud vendor.
|
||||
Precedentemente, il codice core di Kubernetes dipendeva da implementazioni specifiche dei cloud provider.
|
||||
In futuro, implementazioni specifiche per singoli cloud provider devono essere mantenuti dai cloud provider interessati e collegati al cloud-controller-manager.
|
||||
Come nel kube-controller-manager, il cloud-controller-manager combina diversi control loop
|
||||
logicamente indipendenti in un singolo binario che puoi eseguire come un singolo processo. Tu puoi
|
||||
scalare orizzontalmente (eseguire più di una copia) per migliorare la responsività o per migliorare la tolleranza ai fallimenti.
|
||||
|
||||
I seguenti controller hanno dipendenze verso implementazioni di specifici cloud provider:
|
||||
|
||||
* Node Controller: Per controllare se sul cloud provider i nodi che hanno smesso di rispondere sono stati cancellati
|
||||
* Route Controller: Per configurare le regole di route nella sottostante infrastruttura cloud
|
||||
* Service Controller: Per creare, aggiornare ed eliminare i load balancer nella infrastruttura del cloud provider
|
||||
* Volume Controller: Per creare, associare e montare i volumi e per interagire con il cloud provider per orchestrare i volumi
|
||||
|
||||
* Route Controller: Per configurare le network route nella sottostante infrastruttura cloud
|
||||
* Service Controller: Per creare, aggiornare ed eliminare i load balancer del cloud provider
|
||||
|
||||
## Componenti dei Nodi
|
||||
|
||||
I componenti di Kubernetes che girano sui Worker Node sono responsabili dell'esecuzione dei workload degli utenti.
|
||||
I componenti del nodo vengono eseguiti su ogni nodo, mantenendo i pod in esecuzione e fornendo l'ambiente di runtime Kubernetes.
|
||||
|
||||
### kubelet
|
||||
|
||||
|
|
@ -89,10 +87,10 @@ I componenti di Kubernetes che girano sui Worker Node sono responsabili dell'ese
|
|||
|
||||
## Addons
|
||||
|
||||
Gli Addons usano le risorse Kubernetes ({{< glossary_tooltip term_id="daemonset" >}}, {{< glossary_tooltip term_id="deployment" >}}, etc) per implementare nuove funzionalità a livello di cluster.
|
||||
Gli Addons usano le risorse Kubernetes ({{< glossary_tooltip term_id="daemonset" >}}, {{< glossary_tooltip term_id="deployment" >}}, etc) per implementare funzionalità di cluster.
|
||||
Dal momento che gli addons forniscono funzionalità a livello di cluster, le risorse che necessitano di un namespace, vengono collocate nel namespace `kube-system`.
|
||||
|
||||
Alcuni addons sono descritti di seguito; mentre per una più estesa lista di addons, riferirsi ad [Addons](/docs/concepts/cluster-administration/addons/).
|
||||
Alcuni addons sono descritti di seguito; mentre per una più estesa lista di addons, per favore vedere [Addons](/docs/concepts/cluster-administration/addons/).
|
||||
|
||||
### DNS
|
||||
|
||||
|
|
@ -100,7 +98,7 @@ Mentre gli altri addons non sono strettamente richiesti, tutti i cluster Kuberne
|
|||
|
||||
Il DNS del cluster è un server DNS aggiuntivo rispetto ad altri server DNS presenti nella rete, e si occupa specificatamente dei record DNS per i servizi Kubernetes.
|
||||
|
||||
I container eseguiti da Kubernetes possono utilizzare questo server per la risoluzione DNS.
|
||||
I container eseguiti da Kubernetes automaticamente usano questo server per la risoluzione DNS.
|
||||
|
||||
### Interfaccia web (Dashboard)
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,24 @@
|
|||
---
|
||||
title: Cloud Controller Manager
|
||||
id: cloud-controller-manager
|
||||
date: 2018-04-12
|
||||
full_link: /docs/concepts/architecture/cloud-controller/
|
||||
short_description: >
|
||||
Componente della control plane che integra Kubernetes con cloud providers di terze parti.
|
||||
aka:
|
||||
tags:
|
||||
- core-object
|
||||
- architecture
|
||||
- operation
|
||||
---
|
||||
Un componente della {{< glossary_tooltip text="control plane" term_id="control-plane" >}} di Kubernetes
|
||||
che aggiunge logiche di controllo specifiche per il cloud. Il cloud-controller-manager ti permette di collegare il tuo
|
||||
cluster con le API del cloud provider e separa le componenti che interagiscono
|
||||
con la piattaforma cloud dai componenti che interagiscono solamente col cluster.
|
||||
|
||||
<!--more-->
|
||||
|
||||
Disaccoppiando la logica di interoperabilità tra Kubernetes e l'infrastruttura cloud sottostante,
|
||||
il componente cloud-controller-manager abilità i cloud provider di rilasciare
|
||||
funzionalità a un ritmo diverso rispetto al progetto principale Kubernetes.
|
||||
|
||||
|
|
@ -4,14 +4,14 @@ id: cluster
|
|||
date: 2019-06-15
|
||||
full_link:
|
||||
short_description: >
|
||||
Un'insieme di macchine, chiamate nodi, che eseguono container e gestite da Kubernetes. Un cluster ha almeno un Worker Node e un Control Plane Node.
|
||||
Un'insieme di macchine, chiamate nodi, che eseguono container gestiti da Kubernetes. Un cluster ha almeno un Worker Node.
|
||||
|
||||
aka:
|
||||
tags:
|
||||
- fundamental
|
||||
- operation
|
||||
---
|
||||
Un'insieme di macchine, chiamate nodi, che eseguono container e gestite da Kubernetes. Un cluster ha almeno un Worker Node e un Control Plane Node.
|
||||
Un'insieme di macchine, chiamate nodi, che eseguono container gestiti da Kubernetes. Un cluster ha almeno un Worker Node.
|
||||
|
||||
<!--more-->
|
||||
Il/I Worker Node ospitano i Pod che eseguono i workload dell'utente. Il/I Control Plane Node gestiscono i Worker Node e tutto quanto accade all'interno del cluster. Per garantire la high-availability e la possibilità di failover del cluster, vengono utilizzati più Control Plane Node.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
title: Container
|
||||
id: container
|
||||
date: 2018-04-12
|
||||
full_link: /docs/concepts/overview/what-is-kubernetes/#why-containers
|
||||
short_description: >
|
||||
Una immagine leggera, portabile ed eseguibile che contiene un software e tutte le sue dipendenze.
|
||||
|
||||
aka:
|
||||
tags:
|
||||
- fundamental
|
||||
- workload
|
||||
---
|
||||
Una immagine leggera, portabile ed eseguibile che contiene un software e tutte le sue dipendenze.
|
||||
|
||||
<!--more-->
|
||||
|
||||
I ontainer disaccoppiano le applicazione dall'infrastruttura host sottostante e rendono semplice il deploy nei differenti cloud o sistemi operativi e anche per una semplice scalabilità
|
||||
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
---
|
||||
title: Control Plane
|
||||
id: control-plane
|
||||
date: 2019-05-12
|
||||
full_link:
|
||||
short_description: >
|
||||
Lo strato per l'orchestrazione dei container che espone le API e interfaccie per definere, deploy, e gestione del ciclo di vita dei container.
|
||||
|
||||
|
||||
aka:
|
||||
tags:
|
||||
- fundamental
|
||||
---
|
||||
Lo strato per l'orchestrazione dei container che espone le API e interfaccie per definere, deploy, e gestione del ciclo di vita dei container.
|
||||
|
||||
<!--more-->
|
||||
|
||||
Questo strato è composto da diversi componenti, come (ma non limitato a):
|
||||
|
||||
* {{< glossary_tooltip text="etcd" term_id="etcd" >}}
|
||||
* {{< glossary_tooltip text="API Server" term_id="kube-apiserver" >}}
|
||||
* {{< glossary_tooltip text="Scheduler" term_id="kube-scheduler" >}}
|
||||
* {{< glossary_tooltip text="Controller Manager" term_id="kube-controller-manager" >}}
|
||||
* {{< glossary_tooltip text="Cloud Controller Manager" term_id="cloud-controller-manager" >}}
|
||||
|
||||
Questi compenti possono girare come trazionali servizi del sistema operativo (demoni) o come containers. L'host che esegue questi componenti era storicamente chiamato {{< glossary_tooltip text="master" term_id="master" >}}.
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
title: Container runtime interface (CRI)
|
||||
id: cri
|
||||
date: 2019-03-07
|
||||
full_link: /docs/concepts/overview/components/#container-runtime
|
||||
short_description: >
|
||||
Una API per i container runtimes che si integra con la kubelet
|
||||
|
||||
|
||||
aka:
|
||||
tags:
|
||||
- fundamental
|
||||
---
|
||||
Il container runtime interface (CRI) è una API per container runtimes
|
||||
che si integra con la kubelet in un node.
|
||||
<!--more-->
|
||||
|
||||
Per maggiori informazioni, guarda [CRI](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-node/container-runtime-interface.md) API e relative specifiche.
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
title: DaemonSet
|
||||
id: daemonset
|
||||
date: 2018-04-12
|
||||
full_link: /docs/concepts/workloads/controllers/daemonset
|
||||
short_description: >
|
||||
Assicura che una copia di un Pod è attiva su tutti nodi di un cluster.
|
||||
|
||||
aka:
|
||||
tags:
|
||||
- fundamental
|
||||
- core-object
|
||||
- workload
|
||||
---
|
||||
Assicura che una copia del {{< glossary_tooltip text="Pod" term_id="pod" >}} è attiva su tutti nodi di un {{< glossary_tooltip text="cluster" term_id="cluster" >}}.
|
||||
|
||||
<!--more-->
|
||||
|
||||
Utilizzato per il deploy di demoni di sistema come collettori di log e agenti di monitoraggio che tipicamente girano in ogni {{< glossary_tooltip term_id="node" >}}.
|
||||
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
title: Deployment
|
||||
id: deployment
|
||||
date: 2018-04-12
|
||||
full_link: /docs/concepts/workloads/controllers/deployment/
|
||||
short_description: >
|
||||
Gestisce una applicazione replicata nel tuo cluster.
|
||||
|
||||
aka:
|
||||
tags:
|
||||
- fundamental
|
||||
- core-object
|
||||
- workload
|
||||
---
|
||||
Un oggetto API che gestisce un'applicazione replicatata, tipicamente esegue Pod senza stato locale.
|
||||
|
||||
<!--more-->
|
||||
Ogni replica è rappresentata da un {{< glossary_tooltip term_id="pod" >}}, e i Pod sono distribuiti attraverso i
|
||||
{{< glossary_tooltip text="nodi" term_id="node" >}} di un cluster.
|
||||
Per i carichi di lavoro che hanno bisogno di uno stato locale, cosidera l'utilizzo di un {{< glossary_tooltip term_id="StatefulSet" >}}.
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
title: Docker
|
||||
id: docker
|
||||
date: 2018-04-12
|
||||
full_link: https://docs.docker.com/engine/
|
||||
short_description: >
|
||||
Docker è una technologia software che offre una virtualizzazione a livello del sistema operativo nota come container.
|
||||
|
||||
aka:
|
||||
tags:
|
||||
- fundamental
|
||||
---
|
||||
Docker (nello specifico, Docker Engine) è una technologia software che offre una virtualizzazione a livello del sistema operativo nota come {{< glossary_tooltip text="container" term_id="container" >}}.
|
||||
|
||||
<!--more-->
|
||||
|
||||
Docker utilizza delle funzionalità di isolamente del kernel Linux come cgroups e kernel namespaces e un file system union-capable come OverlayFS e altro permettendo a container indipendenti di girare all'interno di una singola istanza Linux, eliminando il sovraccarico nell'avviare e manutenere delle virtual machines (VMs).
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
title: Kubeadm
|
||||
id: kubeadm
|
||||
date: 2018-04-12
|
||||
full_link: /docs/admin/kubeadm/
|
||||
short_description: >
|
||||
Un tool per installare velocemente Kubernetes e avviare un cluster sicuro.
|
||||
|
||||
aka:
|
||||
tags:
|
||||
- tool
|
||||
- operation
|
||||
---
|
||||
Un tool per installare velocemente Kubernetes e avviare un cluster sicuro.
|
||||
|
||||
<!--more-->
|
||||
|
||||
Puoi usare kubeadm per installare sia la control plane che il {{< glossary_tooltip text="worker node" term_id="node" >}}.
|
||||
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
title: Label
|
||||
id: label
|
||||
date: 2018-04-12
|
||||
full_link: /docs/concepts/overview/working-with-objects/labels
|
||||
short_description: >
|
||||
Tags di oggetti con attributi identificativi che sono significativi e pertinenti per gli utenti.
|
||||
|
||||
aka:
|
||||
tags:
|
||||
- fundamental
|
||||
---
|
||||
Tags di oggetti con attributi identificativi che sono significativi e pertinenti per gli utenti.
|
||||
|
||||
<!--more-->
|
||||
|
||||
Le label sono delle coppie key/value che sono collegate a oggetti come {{< glossary_tooltip text="Pod" term_id="pod" >}}. Esse sono usate per organizzare e selezionare un sottoinsieme di oggetti.
|
||||
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
---
|
||||
title: Master
|
||||
id: master
|
||||
date: 2020-04-16
|
||||
short_description: >
|
||||
Termine vecchio, usato come sinonimo per i nodi che ospitano la control plane.
|
||||
|
||||
aka:
|
||||
tags:
|
||||
- fundamental
|
||||
---
|
||||
Termine vecchio, usato come sinonimo per i {{< glossary_tooltip text="nodi" term_id="node" >}} che ospitano la {{< glossary_tooltip text="control plane" term_id="control-plane" >}}.
|
||||
|
||||
<!--more-->
|
||||
Il termine è ancora usato da alcuni strumenti di provisioning, come {{< glossary_tooltip text="kubeadm" term_id="kubeadm" >}}, e servizi gestiti, per mettere la {{< glossary_tooltip text="label" term_id="label" >}} `kubernetes.io/role` ai {{< glossary_tooltip text="nodi" term_id="node" >}} per controllare il posizionamento dei {{< glossary_tooltip text="pods" term_id="pod" >}} della {{< glossary_tooltip text="control plane" term_id="control-plane" >}} .
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
title: Node
|
||||
id: node
|
||||
date: 2018-04-12
|
||||
full_link: /docs/concepts/architecture/nodes/
|
||||
short_description: >
|
||||
Un node è una macchina worker in Kubernetes.
|
||||
|
||||
aka:
|
||||
tags:
|
||||
- fundamental
|
||||
---
|
||||
Un node è una macchina worker in Kubernetes.
|
||||
|
||||
<!--more-->
|
||||
|
||||
Un worker node può essere una VM o una macchina fisica, in base al cluster. Possiede daemon locali o servizi ncessari a eseguire {{< glossary_tooltip text="Pods" term_id="pod" >}} e viene gestito dalla control plane. I deamon i un node includono {{< glossary_tooltip text="kubelet" term_id="kubelet" >}}, {{< glossary_tooltip text="kube-proxy" term_id="kube-proxy" >}}, e un container runtiome che implementa {{< glossary_tooltip text="CRI" term_id="cri" >}} come ad esempio {{< glossary_tooltip term_id="docker" >}}.
|
||||
|
||||
Nelle prime versioni di Kubernetes, i Node venivano chiamati "Minions".
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
title: Pod
|
||||
id: pod
|
||||
date: 2018-04-12
|
||||
full_link: /docs/concepts/workloads/pods/pod-overview/
|
||||
short_description: >
|
||||
Un Pod rappresenta un gruppo di container nel tuo cluster.
|
||||
|
||||
aka:
|
||||
tags:
|
||||
- core-object
|
||||
- fundamental
|
||||
---
|
||||
Il più piccolo e semplice oggetto in Kubernetes. Un pod rappresenta un gruppo di {{< glossary_tooltip text="container" term_id="container" >}} nel tuo cluster.
|
||||
|
||||
<!--more-->
|
||||
|
||||
Un pod è tipicamente progettato per eseguire un singolo container primario. Può opzionalmente eseguire sidecar container che aggiungono funzionalità supplementari come logging. I Pod sono generalmetne gestiti da un {{< glossary_tooltip term_id="deployment" >}}.
|
||||
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
---
|
||||
title: StatefulSet
|
||||
id: statefulset
|
||||
date: 2018-04-12
|
||||
full_link: /docs/concepts/workloads/controllers/statefulset/
|
||||
short_description: >
|
||||
Gestisce deployment e la scalabilità di un gruppo di Pod, con storage e identificativi persistenti per ogni Pod.
|
||||
|
||||
aka:
|
||||
tags:
|
||||
- fundamental
|
||||
- core-object
|
||||
- workload
|
||||
- storage
|
||||
---
|
||||
Gestisce deployment e la scalabilità di un gruppo di {{< glossary_tooltip text="Pods" term_id="pod" >}}, *e garantisce il corretto ordine e unicità* di questi Pods.
|
||||
|
||||
<!--more-->
|
||||
|
||||
Come un {{< glossary_tooltip term_id="deployment" >}}, uno StatefulSet gestisce Pod che sono basati sulla stessa specifica di container. Contrariamente da un Deployment, uno StatefulSet mantiente una specifica identita per ogni Pod. Questi pod sono creati dalla stessa specifica, ma non sono intercambiabili: ogni pod a un identificativo persistente che si mantiene attraverso ogni rischedulazione.
|
||||
|
||||
Se vuoi usare un volume dello storage per avere la persistenza per il tuo carico di lavoro, puoi usare uno StatefulSet come parte della tua soluzione. Anche se i singoli Pod in uno StatefulSet sono suscettibili al fallimento, l'identificativo persistente del Pod rende semplice il collegamento dei volumi esistenti ai nuovi Pods che sostituiscono quelli falliti.
|
||||
|
|
@ -1,101 +0,0 @@
|
|||
---
|
||||
title: KRIBを使用してDigital Rebar Provision (DRP)と共にKubernetesをインストールする
|
||||
krib-version: 2.4
|
||||
author: Rob Hirschfeld (zehicle)
|
||||
weight: 20
|
||||
---
|
||||
|
||||
## 概要
|
||||
|
||||
This guide helps to install a Kubernetes cluster hosted on bare metal with [Digital Rebar Provision](https://github.com/digitalrebar/provision) using only its Content packages and *kubeadm*.
|
||||
|
||||
Digital Rebar Provision (DRP) is an integrated Golang DHCP, bare metal provisioning (PXE/iPXE) and workflow automation platform. While [DRP can be used to invoke](https://provision.readthedocs.io/en/tip/doc/integrations/ansible.html) [kubespray](/ja/docs/setup/custom-cloud/kubespray), it also offers a self-contained Kubernetes installation known as [KRIB (Kubernetes Rebar Integrated Bootstrap)](https://github.com/digitalrebar/provision-content/tree/master/krib).
|
||||
|
||||
{{< note >}}
|
||||
KRIB is not a _stand-alone_ installer: Digital Rebar templates drive a standard *[kubeadm](/docs/admin/kubeadm/)* configuration that manages the Kubernetes installation with the [Digital Rebar cluster pattern](https://provision.readthedocs.io/en/tip/doc/arch/cluster.html#rs-cluster-pattern) to elect leaders _without external supervision_.
|
||||
{{< /note >}}
|
||||
|
||||
|
||||
KRIB features:
|
||||
|
||||
* zero-touch, self-configuring cluster without pre-configuration or inventory
|
||||
* very fast, no-ssh required automation
|
||||
* bare metal, on-premises focused platform
|
||||
* highly available cluster options (including splitting etcd from the controllers)
|
||||
* dynamic generation of a TLS infrastructure
|
||||
* composable attributes and automatic detection of hardware by profile
|
||||
* options for persistent, immutable and image-based deployments
|
||||
* support for Ubuntu 18.04, CentOS/RHEL 7, CoreOS, RancherOS and others
|
||||
|
||||
## クラスターの作成
|
||||
|
||||
Review [Digital Rebar documentation](https://provision.readthedocs.io/en/tip/README.html) for details about installing the platform.
|
||||
|
||||
The Digital Rebar Provision Golang binary should be installed on a Linux-like system with 16 GB of RAM or larger (Packet.net Tiny and Rasberry Pi are also acceptable).
|
||||
|
||||
### (1/5) サーバーの発見
|
||||
|
||||
Following the [Digital Rebar installation](https://provision.readthedocs.io/en/tip/doc/quickstart.html), allow one or more servers to boot through the _Sledgehammer_ discovery process to register with the API. This will automatically install the Digital Rebar runner and to allow for next steps.
|
||||
|
||||
### (2/5) KRIBと証明書プラグインのインストール
|
||||
|
||||
Upload the KRIB Content bundle (or build from [source](https://github.com/digitalrebar/provision-content/tree/master/krib)) and the Cert Plugin for your DRP platform. Both are freely available via the [RackN UX](https://portal.rackn.io) or using the upload from catalog feature of the DRPCLI (shown below).
|
||||
|
||||
```
|
||||
drpcli plugin_providers upload certs from catalog:certs-stable
|
||||
drpcli contents upload catalog:krib-stable
|
||||
```
|
||||
|
||||
### (3/5) クラスター構築の開始
|
||||
|
||||
{{< note >}}
|
||||
KRIB documentation is dynamically generated from the source and will be more up to date than this guide.
|
||||
{{< /note >}}
|
||||
|
||||
Following the [KRIB documentation](https://provision.readthedocs.io/en/tip/doc/content-packages/krib.html), create a Profile for your cluster and assign your target servers into the cluster Profile. The Profile must set `krib\cluster-name` and `etcd\cluster-name` Params to be the name of the Profile. Cluster configuration choices can be made by adding additional Params to the Profile; however, safe defaults are provided for all Params.
|
||||
|
||||
Once all target servers are assigned to the cluster Profile, start a KRIB installation Workflow by assigning one of the included Workflows to all cluster servers. For example, selecting `krib-live-cluster` will perform an immutable deployment into the Sledgehammer discovery operating system. You may use one of the pre-created read-only Workflows or choose to build your own custom variation.
|
||||
|
||||
For basic installs, no further action is required. Advanced users may choose to assign the controllers, etcd servers or other configuration values in the relevant Params.
|
||||
|
||||
### (4/5) クラスター構築を監視
|
||||
|
||||
Digital Rebar Provision provides detailed logging and live updates during the installation process. Workflow events are available via a websocket connection or monitoring the Jobs list.
|
||||
|
||||
During the installation, KRIB writes cluster configuration data back into the cluster Profile.
|
||||
|
||||
### (5/5) クラスターへのアクセス
|
||||
|
||||
The cluster is available for access via *kubectl* once the `krib/cluster-admin-conf` Param has been set. This Param contains the `kubeconfig` information necessary to access the cluster.
|
||||
|
||||
For example, if you named the cluster Profile `krib` then the following commands would allow you to connect to the installed cluster from your local terminal.
|
||||
|
||||
::
|
||||
|
||||
drpcli profiles get krib params krib/cluster-admin-conf > admin.conf
|
||||
export KUBECONFIG=admin.conf
|
||||
kubectl get nodes
|
||||
|
||||
|
||||
The installation continues after the `krib/cluster-admin-conf` is set to install the Kubernetes UI and Helm. You may interact with the cluster as soon as the `admin.conf` file is available.
|
||||
|
||||
## クラスター操作
|
||||
|
||||
KRIB provides additional Workflows to manage your cluster. Please see the [KRIB documentation](https://provision.readthedocs.io/en/tip/doc/content-packages/krib.html) for an updated list of advanced cluster operations.
|
||||
|
||||
### クラスターのスケール
|
||||
|
||||
You can add servers into your cluster by adding the cluster Profile to the server and running the appropriate Workflow.
|
||||
|
||||
### クラスターのクリーンアップ(開発者向け)
|
||||
|
||||
You can reset your cluster and wipe out all configuration and TLS certificates using the `krib-reset-cluster` Workflow on any of the servers in the cluster.
|
||||
|
||||
{{< caution >}}
|
||||
When running the reset Workflow, be sure not to accidentally target your production cluster!
|
||||
{{< /caution >}}
|
||||
|
||||
## フィードバック
|
||||
|
||||
* Slack Channel: [#community](https://rackn.slack.com/messages/community/)
|
||||
* [GitHub Issues](https://github.com/digitalrebar/provision/issues)
|
||||
|
|
@ -50,7 +50,7 @@ Google이 일주일에 수십억 개의 컨테이너들을 운영하게 해준
|
|||
<br>
|
||||
<br>
|
||||
<br>
|
||||
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/?utm_source=kubernetes.io&utm_medium=nav&utm_campaign=kccncna20" button id="desktopKCButton">Attend KubeCon in Boston on November 17-20, 2020</a>
|
||||
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/?utm_source=kubernetes.io&utm_medium=nav&utm_campaign=kccnceu20" button id="desktopKCButton">Attend KubeCon EU virtually on August 17-20, 2020</a>
|
||||
</div>
|
||||
<div id="videoPlayer">
|
||||
<iframe data-url="https://www.youtube.com/embed/H06qrNmGqyE?autoplay=1" frameborder="0" allowfullscreen></iframe>
|
||||
|
|
|
|||
Binary file not shown.
|
Before Width: | Height: | Size: 20 KiB |
|
|
@ -1,4 +0,0 @@
|
|||
---
|
||||
title: CCP Games
|
||||
content_url: https://cloud.google.com/customers/ccp-games/
|
||||
---
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 6.1 KiB |
|
|
@ -1,4 +0,0 @@
|
|||
---
|
||||
title: Comcast
|
||||
content_url: https://youtu.be/lmeFkH-rHII
|
||||
---
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 19 KiB |
|
|
@ -1,4 +0,0 @@
|
|||
---
|
||||
title: Concur
|
||||
content_url: http://searchitoperations.techtarget.com/news/450297178/Tech-firms-roll-out-Kubernetes-in-production
|
||||
---
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 22 KiB |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue