Merge branch 'master' into patch-1
|
|
@ -10,6 +10,8 @@ toc:
|
|||
path: /docs/whatisk8s/
|
||||
- title: Installing Kubernetes on Linux with kubeadm
|
||||
path: /docs/getting-started-guides/kubeadm/
|
||||
- title: Installing Kubernetes on AWS with kops
|
||||
path: /docs/getting-started-guides/kops/
|
||||
- title: Hello World on Google Container Engine
|
||||
path: /docs/hellonode/
|
||||
- title: Downloading or Building Kubernetes
|
||||
|
|
@ -68,6 +70,8 @@ toc:
|
|||
path: /docs/getting-started-guides/network-policy/walkthrough/
|
||||
- title: Using Calico for NetworkPolicy
|
||||
path: /docs/getting-started-guides/network-policy/calico/
|
||||
- title: Using Romana for NetworkPolicy
|
||||
path: /docs/getting-started-guides/network-policy/romana/
|
||||
|
||||
- title: Batch Jobs
|
||||
section:
|
||||
|
|
|
|||
|
|
@ -2,6 +2,12 @@ bigheader: "Tasks"
|
|||
toc:
|
||||
- title: Tasks
|
||||
path: /docs/tasks/
|
||||
- title: Configuring Pods and Containers
|
||||
section:
|
||||
- title: Defining Environment Variables for a Container
|
||||
path: /docs/tasks/configure-pod-container/define-environment-variable-container/
|
||||
- title: Defining a Command and Arguments for a Container
|
||||
path: /docs/tasks/configure-pod-container/define-command-argument-container/
|
||||
- title: Accessing Applications in a Cluster
|
||||
section:
|
||||
- title: Using Port Forwarding to Access Applications in a Cluster
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
<a href="/docs/hellonode/">Get Started</a>
|
||||
<a href="/docs/">Documentation</a>
|
||||
<a href="http://blog.kubernetes.io/">Blog</a>
|
||||
<a href="/partners/">Partners</a>
|
||||
<a href="/community/">Community</a>
|
||||
<a href="/case-studies/">Case Studies</a>
|
||||
</nav>
|
||||
|
|
|
|||
|
|
@ -0,0 +1,229 @@
|
|||
;(function () {
|
||||
var partners = [
|
||||
{
|
||||
type: 0,
|
||||
name: 'CoreOS',
|
||||
logo: 'core_os',
|
||||
link: 'https://tectonic.com/',
|
||||
blurb: 'Tectonic is the enterprise-ready Kubernetes product, by CoreOS. It adds key features to allow you to manage, update, and control clusters in production.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Deis',
|
||||
logo: 'deis',
|
||||
link: 'https://deis.com',
|
||||
blurb: 'Deis the creators of Helm, Workflow, and Steward, helps developers and operators build, deploy, manage and scale their applications on top of Kubernetes.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Sysdig Cloud',
|
||||
logo: 'sys_dig',
|
||||
link: 'https://sysdig.com/blog/monitoring-kubernetes-with-sysdig-cloud/',
|
||||
blurb: 'Container native monitoring with deep support for Kubernetes.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Puppet',
|
||||
logo: 'puppet',
|
||||
link: 'https://puppet.com/blog/managing-kubernetes-configuration-puppet',
|
||||
blurb: 'The Puppet module for Kubernetes makes it easy to manage Pods, Replication Controllers, Services and more in Kubernetes, and to build domain-specific interfaces to one\'s Kubernetes configuration.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Citrix',
|
||||
logo: 'citrix',
|
||||
link: 'http://wercker.com/workflows/partners/kubernetes/',
|
||||
blurb: 'Netscaler CPX gives app developers all the features they need to load balance their microservices and containerized apps with Kubernetes.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Wercker',
|
||||
logo: 'wercker',
|
||||
link: 'http://wercker.com/workflows/partners/kubernetes/',
|
||||
blurb: 'Wercker automates your build, test and deploy pipelines for launching containers and triggering rolling updates on your Kubernetes cluster. '
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Rancher',
|
||||
logo: 'rancher',
|
||||
link: 'http://rancher.com/kubernetes/',
|
||||
blurb: 'Rancher is an open-source, production-ready container management platform that makes it easy to deploy and leverage Kubernetes in the enterprise.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Red Hat',
|
||||
logo: 'redhat',
|
||||
link: 'https://www.openshift.com/',
|
||||
blurb: 'Leverage an enterprise Kubernetes platform to orchestrate complex, multi-container apps.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Intel',
|
||||
logo: 'intel',
|
||||
link: 'https://tectonic.com/press/intel-coreos-collaborate-on-openstack-with-kubernetes.html',
|
||||
blurb: 'Powering the GIFEE (Google’s Infrastructure for Everyone Else), to run OpenStack deployments on Kubernetes.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'ElasticKube',
|
||||
logo: 'elastickube',
|
||||
link: 'https://www.ctl.io/elastickube-kubernetes/',
|
||||
blurb: 'Self-service container management for Kubernetes.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Platform9',
|
||||
logo: 'platform9',
|
||||
link: 'https://platform9.com/products/kubernetes/',
|
||||
blurb: 'Platform9 is the open source-as-a-service company that takes all of the goodness of Kubernetes and delivers it as a managed service.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Datadog',
|
||||
logo: 'datadog',
|
||||
link: 'http://docs.datadoghq.com/integrations/kubernetes/',
|
||||
blurb: 'Full-stack observability for dynamic infrastructure & applications. Includes precision alerting, analytics and deep Kubernetes integrations. '
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'AppFormix',
|
||||
logo: 'appformix',
|
||||
link: 'http://www.appformix.com/solutions/appformix-for-kubernetes/',
|
||||
blurb: 'AppFormix is a cloud infrastructure performance optimization service helping enterprise operators streamline their cloud operations on any Kubernetes cloud. '
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Crunchy',
|
||||
logo: 'crunchy',
|
||||
link: 'http://info.crunchydata.com/blog/advanced-crunchy-containers-for-postgresql',
|
||||
blurb: 'Crunchy PostgreSQL Container Suite is a set of containers for managing PostgreSQL with DBA microservices leveraging Kubernetes and Helm.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Aqua',
|
||||
logo: 'aqua',
|
||||
link: 'http://blog.aquasec.com/security-best-practices-for-kubernetes-deployment',
|
||||
blurb: 'Deep, automated security for your containers running on Kubernetes.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Canonical',
|
||||
logo: 'canonical',
|
||||
link: 'https://jujucharms.com/canonical-kubernetes/',
|
||||
blurb: 'The Canonical Distribution of Kubernetes enables you to operate Kubernetes clusters on demand on any major public cloud and private infrastructure.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Distelli',
|
||||
logo: 'distelli',
|
||||
link: 'https://www.distelli.com/',
|
||||
blurb: 'Pipelines from your source repositories to your Kubernetes Clusters on any cloud.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Nuage networks',
|
||||
logo: 'nuagenetworks',
|
||||
link: 'https://github.com/nuagenetworks/nuage-kubernetes',
|
||||
blurb: 'The Nuage SDN platform provides policy-based networking between Kubernetes Pods and non-Kubernetes environments with visibility and security monitoring.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Sematext',
|
||||
logo: 'sematext',
|
||||
link: 'https://sematext.com/kubernetes/',
|
||||
blurb: 'Logging & Monitoring: Automatic collection and processing of Metrics, Events and Logs for auto-discovered pods and Kubernetes nodes.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Diamanti',
|
||||
logo: 'diamanti',
|
||||
link: 'https://www.diamanti.com/products/',
|
||||
blurb: 'Diamanti deploys containers with guaranteed performance using Kubernetes in the first hyperconverged appliance purpose built for containerized applications.'
|
||||
},
|
||||
{
|
||||
type: 1,
|
||||
name: 'Apprenda',
|
||||
logo: 'apprenda',
|
||||
link: 'https://apprenda.com/kubernetes-support/',
|
||||
blurb: 'Apprenda offers flexible and wide range of support plans for pure play Kubernetes on your choice of infrastructure, cloud provider and operating system.'
|
||||
},
|
||||
{
|
||||
type: 1,
|
||||
name: 'Reactive Ops',
|
||||
logo: 'reactive_ops',
|
||||
link: 'https://www.reactiveops.com/kubernetes/',
|
||||
blurb: 'ReactiveOps has written automation on best practices for infrastructure as code on GCP & AWS using Kubernetes, helping you build and maintain a world-class infrastructure at a fraction of the price of an internal hire.'
|
||||
},
|
||||
{
|
||||
type: 1,
|
||||
name: 'Livewyer',
|
||||
logo: 'livewyer',
|
||||
link: 'https://livewyer.io/services/kubernetes-experts/',
|
||||
blurb: 'Kubernetes experts that on-board applications and empower IT teams to get the most out of containerised technology.'
|
||||
},
|
||||
{
|
||||
type: 1,
|
||||
name: 'Deis',
|
||||
logo: 'deis',
|
||||
link: 'https://deis.com/services/',
|
||||
blurb: 'Deis provides professional services and 24x7 operational support for any Kubernetes cluster managed by our global cluster operations team.'
|
||||
},
|
||||
{
|
||||
type: 1,
|
||||
name: 'Samsung SDS',
|
||||
logo: 'samsung_sds',
|
||||
link: 'http://www.samsungsdsa.com/cloud-infrastructure_kubernetes',
|
||||
blurb: 'Samsung SDS’s Cloud Native Computing Team offers expert consulting across the range of technical aspects involved in building services targeted at a Kubernetes cluster.'
|
||||
},
|
||||
{
|
||||
type: 1,
|
||||
name: 'Container Solutions',
|
||||
logo: 'container_solutions',
|
||||
link: 'http://container-solutions.com/resources/kubernetes/',
|
||||
blurb: 'Container Solutions is a premium software consultancy that focuses on programmable infrastructure, offering our expertise in software development, strategy and operations to help you innovate at speed and scale.'
|
||||
},
|
||||
{
|
||||
type: 1,
|
||||
name: 'Jetstack',
|
||||
logo: 'jetstack',
|
||||
link: 'https://www.jetstack.io/',
|
||||
blurb: 'Jetstack is an organisation focused entirely on Kubernetes. They will help you to get the most out of Kubernetes through expert professional services and open source tooling. Get in touch, and accelerate your project.'
|
||||
}
|
||||
]
|
||||
|
||||
var isvContainer = document.getElementById('isvContainer')
|
||||
var servContainer = document.getElementById('servContainer')
|
||||
|
||||
var sorted = partners.sort(function (a, b) {
|
||||
if (a.name > b.name) return 1
|
||||
if (a.name < b.name) return -1
|
||||
return 0
|
||||
})
|
||||
|
||||
sorted.forEach(function (obj) {
|
||||
var box = document.createElement('div')
|
||||
box.className = 'partner-box'
|
||||
|
||||
var img = document.createElement('img')
|
||||
img.src = '/images/square-logos/' + obj.logo + '.png'
|
||||
|
||||
var div = document.createElement('div')
|
||||
|
||||
var p = document.createElement('p')
|
||||
p.textContent = obj.blurb
|
||||
|
||||
var link = document.createElement('a')
|
||||
link.href = obj.link
|
||||
link.target = '_blank'
|
||||
link.textContent = 'Learn more'
|
||||
|
||||
div.appendChild(p)
|
||||
div.appendChild(link)
|
||||
|
||||
box.appendChild(img)
|
||||
box.appendChild(div)
|
||||
|
||||
var container = obj.type ? servContainer : isvContainer
|
||||
container.appendChild(box)
|
||||
})
|
||||
})();
|
||||
|
|
@ -0,0 +1,94 @@
|
|||
h5 {
|
||||
font-size: 18px;
|
||||
line-height: 1.5em;
|
||||
margin-bottom: 2em;
|
||||
}
|
||||
|
||||
#usersGrid a {
|
||||
display: inline-block;
|
||||
background-color: #f9f9f9;
|
||||
}
|
||||
|
||||
#isvContainer, #servContainer {
|
||||
position: relative;
|
||||
width: 100%;
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
|
||||
#isvContainer {
|
||||
margin-bottom: 80px;
|
||||
}
|
||||
|
||||
.partner-box {
|
||||
position: relative;
|
||||
width: 47%;
|
||||
max-width: 48%;
|
||||
min-width: 48%;
|
||||
margin-bottom: 20px;
|
||||
padding: 20px;
|
||||
flex: 1;
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: flex-start;
|
||||
}
|
||||
|
||||
.partner-box img {
|
||||
background-color: #f9f9f9;
|
||||
}
|
||||
|
||||
.partner-box > div {
|
||||
margin-left: 30px;
|
||||
}
|
||||
|
||||
.partner-box a {
|
||||
color: #3576E3;
|
||||
}
|
||||
|
||||
@media screen and (max-width: 1024px) {
|
||||
.partner-box {
|
||||
flex-direction: column;
|
||||
justify-content: flex-start;
|
||||
}
|
||||
|
||||
.partner-box > div {
|
||||
margin: 20px 0 0;
|
||||
}
|
||||
}
|
||||
|
||||
@media screen and (max-width: 568px) {
|
||||
#isvContainer, #servContainer {
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
.partner-box {
|
||||
flex-direction: column;
|
||||
justify-content: flex-start;
|
||||
width: 100%;
|
||||
max-width: 100%;
|
||||
min-width: 100%;
|
||||
}
|
||||
|
||||
.partner-box > div {
|
||||
margin: 20px 0 0;
|
||||
}
|
||||
}
|
||||
|
||||
@media screen and (max-width: 568px) {
|
||||
#isvContainer, #servContainer {
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
.partner-box {
|
||||
flex-direction: column;
|
||||
justify-content: flex-start;
|
||||
width: 100%;
|
||||
max-width: 100%;
|
||||
min-width: 100%;
|
||||
}
|
||||
|
||||
.partner-box > div {
|
||||
margin: 20px 0 0;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
You need to have a Kubernetes cluster, and the kubectl command-line tool must
|
||||
be configured to communicate with your cluster. If you do not already have a
|
||||
cluster, you can create one by using
|
||||
[Minikube](/docs/getting-started-guides/minikube).
|
||||
|
|
@ -164,10 +164,11 @@ $video-section-height: 550px
|
|||
margin-bottom: 20px
|
||||
|
||||
a
|
||||
width: 20%
|
||||
width: 16.65%
|
||||
float: left
|
||||
font-size: 24px
|
||||
font-weight: 300
|
||||
white-space: nowrap
|
||||
|
||||
.social
|
||||
padding: 0 30px
|
||||
|
|
|
|||
|
|
@ -222,8 +222,7 @@ $feature-box-div-width: 45%
|
|||
text-align: center
|
||||
|
||||
a
|
||||
font-size: 22px
|
||||
width: auto
|
||||
width: 30%
|
||||
padding: 0 20px
|
||||
|
||||
.social
|
||||
|
|
|
|||
|
|
@ -10,8 +10,6 @@ title: Community
|
|||
<h1>Community</h1>
|
||||
</section>
|
||||
|
||||
|
||||
|
||||
<section id="mainContent">
|
||||
<main>
|
||||
<div class="content">
|
||||
|
|
@ -29,20 +27,6 @@ title: Community
|
|||
from AWS and Openstack to Big Data and Scalability, there’s a place for you to contribute and instructions
|
||||
for forming a new SIG if your special interest isn’t covered (yet).</p>
|
||||
</div>
|
||||
<div class="content">
|
||||
<h3>Customers</h3>
|
||||
<div class="company-logos">
|
||||
<img src="/images/community_logos/zulily_logo.png">
|
||||
<img src="/images/community_logos/we_pay_logo.png">
|
||||
<img src="/images/community_logos/goldman_sachs_logo.png">
|
||||
<img src="/images/community_logos/ebay_logo.png">
|
||||
<img src="/images/community_logos/box_logo.png">
|
||||
<img src="/images/community_logos/wikimedia_logo.png">
|
||||
<img src="/images/community_logos/soundcloud_logo.png">
|
||||
<img src="/images/community_logos/new_york_times_logo.png">
|
||||
<img src="/images/community_logos/kabam_logo.png">
|
||||
</div>
|
||||
</div>
|
||||
<div class="content">
|
||||
<h3>Events</h3>
|
||||
<div id="calendarWrapper">
|
||||
|
|
@ -50,34 +34,6 @@ title: Community
|
|||
frameborder="0" scrolling="no"></iframe>
|
||||
</div>
|
||||
</div>
|
||||
<div class="content">
|
||||
<h3>Partners</h3>
|
||||
<p>We are working with a broad group of partners who contribute to the kubernetes core codebase, making it stronger and richer, as well as help in growing the kubernetes ecosystem supporting
|
||||
a sprectrum of compelmenting platforms, from open source solutions to market-leading technologies.</p>
|
||||
<div class="partner-logos">
|
||||
<a href="https://coreos.com/kubernetes"><img src="/images/community_logos/core_os_logo.png"></a>
|
||||
<a href="https://deis.com"><img src="/images/community_logos/deis_logo.png"></a>
|
||||
<a href="https://sysdig.com/blog/monitoring-kubernetes-with-sysdig-cloud/"><img src="/images/community_logos/sysdig_cloud_logo.png"></a>
|
||||
<a href="https://puppet.com/blog/managing-kubernetes-configuration-puppet"><img src="/images/community_logos/puppet_logo.png"></a>
|
||||
<a href="https://www.citrix.com/blogs/2016/07/15/citrix-kubernetes-a-home-run/"><img src="/images/community_logos/citrix_logo.png"></a>
|
||||
<a href="http://wercker.com/workflows/partners/kubernetes/"><img src="/images/community_logos/wercker_logo.png"></a>
|
||||
<a href="http://rancher.com/kubernetes/"><img src="/images/community_logos/rancher_logo.png"></a>
|
||||
<a href="https://www.openshift.com/"><img src="/images/community_logos/red_hat_logo.png"></a>
|
||||
<a href="https://tectonic.com/press/intel-coreos-collaborate-on-openstack-with-kubernetes.html"><img src="/images/community_logos/intel_logo.png"></a>
|
||||
<a href="https://elasticbox.com/kubernetes/"><img src="/images/community_logos/elastickube_logo.png"></a>
|
||||
<a href="https://platform9.com/blog/containers-as-a-service-kubernetes-docker"><img src="/images/community_logos/platform9_logo.png"></a>
|
||||
<a href="http://www.appformix.com/solutions/appformix-for-kubernetes/"><img src="/images/community_logos/appformix_logo.png"></a>
|
||||
<a href="http://kubernetes.io/docs/getting-started-guides/dcos/"><img src="/images/community_logos/mesosphere_logo.png"></a>
|
||||
<a href="http://docs.datadoghq.com/integrations/kubernetes/"><img src="/images/community_logos/datadog_logo.png"></a>
|
||||
<a href="https://apprenda.com/kubernetes-support/"><img src="/images/community_logos/apprenda_logo.png"></a>
|
||||
<a href="http://www.ibm.com/cloud-computing/"><img src="/images/community_logos/ibm_logo.png"></a>
|
||||
<a href="http://info.crunchydata.com/blog/advanced-crunchy-containers-for-postgresql"><img src="/images/community_logos/crunchy_data_logo.png"></a>
|
||||
<a href="https://content.mirantis.com/Containerizing-OpenStack-on-Kubernetes-Video-Landing-Page.html"><img src="/images/community_logos/mirantis_logo.png"></a>
|
||||
<a href="http://blog.aquasec.com/security-best-practices-for-kubernetes-deployment"><img src="/images/community_logos/aqua_logo.png"></a>
|
||||
<a href="https://jujucharms.com/canonical-kubernetes/"><img src="/images/community_logos/ubuntu_cannonical_logo.png"></a>
|
||||
<a href="https://github.com/nuagenetworks/nuage-kubernetes"><img src="/images/community_logos/nuage_network_logo.png"></a>
|
||||
</div>
|
||||
</div>
|
||||
</main>
|
||||
</section>
|
||||
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ This page lists some of the available add-ons and links to their respective inst
|
|||
* [Weave Net](https://github.com/weaveworks/weave-kube) provides networking and network policy, will carry on working on both sides of a network partition, and does not require an external database.
|
||||
* [Calico](https://github.com/projectcalico/calico-containers/tree/master/docs/cni/kubernetes/manifests/kubeadm) is a secure L3 networking and network policy provider.
|
||||
* [Canal](https://github.com/tigera/canal/tree/master/k8s-install/kubeadm) unites Flannel and Calico, providing networking and network policy.
|
||||
* [Romana](http://romana.io) is a Layer 3 networking solution for pod networks that also supports the [NetworkPolicy API](/docs/user-guide/networkpolicies/). Kubeadm add-on installation details available [here](https://github.com/romana/romana/tree/master/containerize).
|
||||
|
||||
## Visualization & Control
|
||||
|
||||
|
|
|
|||
|
|
@ -9,26 +9,43 @@ assignees:
|
|||
|
||||
This document provides information on how to use kubeadm's advanced options.
|
||||
|
||||
Running kubeadm init bootstraps a Kubernetes cluster. This consists of the
|
||||
Running `kubeadm init` bootstraps a Kubernetes cluster. This consists of the
|
||||
following steps:
|
||||
|
||||
1. kubeadm generates a token that additional nodes can use to register themselves
|
||||
with the master in future.
|
||||
1. kubeadm runs a series of pre-flight checks to validate the system state
|
||||
before making changes. Some checks only trigger warnings, others are
|
||||
considered errors and will exit kubeadm until the problem is corrected or
|
||||
the user specifies `--skip-preflight-checks`.
|
||||
|
||||
1. kubeadm generates a token that additional nodes can use to register
|
||||
themselves with the master in future. Optionally, the user can provide a token.
|
||||
|
||||
1. kubeadm generates a self-signed CA using openssl to provision identities
|
||||
for each node in the cluster, and for the API server to secure communication
|
||||
with clients.
|
||||
|
||||
1. Outputting a kubeconfig file for the kubelet to use to connect to the API server,
|
||||
as well as an additional kubeconfig file for administration.
|
||||
1. Outputting a kubeconfig file for the kubelet to use to connect to the API
|
||||
server, as well as an additional kubeconfig file for administration.
|
||||
|
||||
1. kubeadm generates Kubernetes resource manifests for the API server, controller manager
|
||||
and scheduler, and placing them in `/etc/kubernetes/manifests`. The kubelet watches
|
||||
this directory for static resources to create on startup. These are the core
|
||||
components of Kubernetes, and once they are up and running we can use `kubectl`
|
||||
to set up/manage any additional components.
|
||||
1. kubeadm generates Kubernetes resource manifests for the API server,
|
||||
controller manager and scheduler, and placing them in
|
||||
`/etc/kubernetes/manifests`. The kubelet watches this directory for static
|
||||
resources to create on startup. These are the core components of Kubernetes, and
|
||||
once they are up and running we can use `kubectl` to set up/manage any
|
||||
additional components.
|
||||
|
||||
1. kubeadm installs any add-on components, such as DNS or discovery, via the API server.
|
||||
1. kubeadm installs any add-on components, such as DNS or discovery, via the API
|
||||
server.
|
||||
|
||||
Running `kubeadm join` on each node in the cluster consists of the following steps:
|
||||
|
||||
1. Use the token to talk to the API server and securely get the root CA
|
||||
certificate.
|
||||
|
||||
1. Creates a local key pair. Prepares a certificate signing request (CSR) and
|
||||
sends that off to the API server for signing.
|
||||
|
||||
1. Configures the local kubelet to connect to the API server
|
||||
|
||||
## Usage
|
||||
|
||||
|
|
@ -112,11 +129,17 @@ to change the DNS name suffix. Again, you will need to update the
|
|||
`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` file accordingly else DNS will
|
||||
not function correctly.
|
||||
|
||||
- `--skip-preflight-checks`
|
||||
|
||||
By default, `kubeadm` runs a series of preflight checks to validate the system
|
||||
before making any changes. Advanced users can use this flag to bypass these if
|
||||
necessary.
|
||||
|
||||
- `--token`
|
||||
|
||||
By default, `kubeadm init` automatically generates the token used to initialise
|
||||
each new node. If you would like to manually specify this token, you can use the
|
||||
`--token` flag. The token must be of the format '<6 character string>.<16 character string>'.
|
||||
`--token` flag. The token must be of the format `<6 character string>.<16 character string>`.
|
||||
|
||||
- `--use-kubernetes-version` (default 'v1.4.1') the kubernetes version to initialise
|
||||
|
||||
|
|
@ -127,18 +150,59 @@ for a full list of available versions).
|
|||
|
||||
### `kubeadm join`
|
||||
|
||||
`kubeadm join` has one mandatory flag, the token used to secure cluster bootstrap,
|
||||
and one mandatory argument, the master IP address.
|
||||
When you use kubeadm join, you must supply the token used to secure cluster
|
||||
boostrap as a mandatory flag, and the master IP address as a mandatory argument.
|
||||
|
||||
Here's an example on how to use it:
|
||||
|
||||
`kubeadm join --token=the_secret_token 192.168.1.1`
|
||||
|
||||
- `--skip-preflight-checks`
|
||||
|
||||
By default, `kubeadm` runs a series of preflight checks to validate the system
|
||||
before making any changes. Advanced users can use this flag to bypass these if
|
||||
necessary.
|
||||
|
||||
- `--token=<token>`
|
||||
|
||||
By default, when `kubeadm init` runs, a token is generated and revealed in the output.
|
||||
That's the token you should use here.
|
||||
|
||||
## Automating kubeadm
|
||||
|
||||
Rather than copying the token you obtained from `kubeadm init` to each node, as
|
||||
in the basic `kubeadm` tutorials, you can parallelize the token distribution for
|
||||
easier automation. To implement this automation, you must know the IP address
|
||||
that the master will have after it is started.
|
||||
|
||||
1. Generate a token. This token must have the form `<6 character string>.<16
|
||||
character string>`
|
||||
|
||||
Here is a simple python one-liner for this:
|
||||
|
||||
```
|
||||
python -c 'import random; print "%0x.%0x" % (random.SystemRandom().getrandbits(3*8), random.SystemRandom().getrandbits(8*8))'
|
||||
```
|
||||
|
||||
1. Start both the master node and the worker nodes concurrently with this token. As they come up they should find each other and form the cluster.
|
||||
|
||||
Once the cluster is up, you can grab the admin credentials from the master node at `/etc/kubernetes/admin.conf` and use that to talk to the cluster.
|
||||
|
||||
## Environment variables
|
||||
|
||||
There are some environment variables that modify the way that `kubeadm` works. Most users will have no need to set these.
|
||||
|
||||
| Variable | Default | Description |
|
||||
| --- | --- | --- |
|
||||
| `KUBE_KUBERNETES_DIR` | `/etc/kubernetes` | Where most configuration files are written to and read from |
|
||||
| `KUBE_HOST_PKI_PATH` | `/etc/kubernetes/pki` | Directory for master PKI assets |
|
||||
| `KUBE_HOST_ETCD_PATH` | `/var/lib/etcd` | Local etcd state for Kubernetes cluster |
|
||||
| `KUBE_HYPERKUBE_IMAGE` | `` | If set, use a single hyperkube image with this name. If not set, individual images per server component will be used. |
|
||||
| `KUBE_DISCOVERY_IMAGE` | `gcr.io/google_containers/kube-discovery-<arch>:1.0` | The bootstrap discovery helper image to use. |
|
||||
| `KUBE_ETCD_IMAGE` | `gcr.io/google_containers/etcd-<arch>:2.2.5` | The etcd container image to use. |
|
||||
| `KUBE_COMPONENT_LOGLEVEL` | `--v=4` | Logging configuration for all Kubernetes components |
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
* Some users on RHEL/CentOS 7 have reported issues with traffic being routed incorrectly due to iptables being bypassed. You should ensure `net.bridge.bridge-nf-call-iptables` is set to 1 in your sysctl config, eg.
|
||||
|
|
|
|||
|
|
@ -36,7 +36,11 @@ Place plugins in `network-plugin-dir/plugin-name/plugin-name`, i.e if you have a
|
|||
|
||||
### CNI
|
||||
|
||||
The CNI plugin is selected by passing Kubelet the `--network-plugin=cni` command-line option. Kubelet reads the first CNI configuration file from `--network-plugin-dir` and uses the CNI configuration from that file to set up each pod's network. The CNI configuration file must match the [CNI specification](https://github.com/containernetworking/cni/blob/master/SPEC.md), and any required CNI plugins referenced by the configuration must be present in `/opt/cni/bin`.
|
||||
The CNI plugin is selected by passing Kubelet the `--network-plugin=cni` command-line option. Kubelet reads a file from `--cni-conf-dir` (default `/etc/cni/net.d`) and uses the CNI configuration from that file to set up each pod's network. The CNI configuration file must match the [CNI specification](https://github.com/containernetworking/cni/blob/master/SPEC.md), and any required CNI plugins referenced by the configuration must be present in `--cni-bin-dir` (default `/opt/cni/bin`).
|
||||
|
||||
If there are multiple CNI configuration files in the directory, the first one in lexicographic order of file name is used.
|
||||
|
||||
In addition to the CNI plugin specified by the configuration file, Kubernetes requires the standard CNI `lo` plugin, at minimum version 0.2.0
|
||||
|
||||
### kubenet
|
||||
|
||||
|
|
@ -44,7 +48,7 @@ The Linux-only kubenet plugin provides functionality similar to the `--configure
|
|||
|
||||
The plugin requires a few things:
|
||||
|
||||
* The standard CNI `bridge` and `host-local` plugins are required. Kubenet will first search for them in `/opt/cni/bin`. Specify `network-plugin-dir` to supply additional search path. The first found match will take effect.
|
||||
* The standard CNI `bridge`, `lo` and `host-local` plugins are required, at minimum version 0.2.0. Kubenet will first search for them in `/opt/cni/bin`. Specify `network-plugin-dir` to supply additional search path. The first found match will take effect.
|
||||
* Kubelet must be run with the `--network-plugin=kubenet` argument to enable the plugin
|
||||
* Kubelet must also be run with the `--reconcile-cidr` argument to ensure the IP subnet assigned to the node by configuration or the controller-manager is propagated to the plugin
|
||||
* The node must be assigned an IP subnet through either the `--pod-cidr` kubelet command-line option or the `--allocate-node-cidrs=true --cluster-cidr=<cidr>` controller-manager command-line options.
|
||||
|
|
@ -66,6 +70,6 @@ This option is provided to the network-plugin; currently **only kubenet supports
|
|||
## Usage Summary
|
||||
|
||||
* `--network-plugin=exec` specifies that we use the `exec` plugin, with executables located in `--network-plugin-dir`.
|
||||
* `--network-plugin=cni` specifies that we use the `cni` network plugin with actual CNI plugin binaries located in `/opt/cni/bin` and CNI plugin configuration located in `network-plugin-dir`, config location defaults to `/etc/cni/net.d`.
|
||||
* `--network-plugin=cni` specifies that we use the `cni` network plugin with actual CNI plugin binaries located in `--cni-bin-dir` (default `/opt/cni/bin`) and CNI plugin configuration located in `--cni-conf-dir` (default `/etc/cni/net.d`).
|
||||
* `--network-plugin=kubenet` specifies that we use the `kubenet` network plugin with CNI `bridge` and `host-local` plugins placed in `/opt/cni/bin` or `network-plugin-dir`.
|
||||
* `--network-plugin-mtu=9001` specifies the MTU to use, currently only used by the `kubenet` network plugin.
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
---
|
||||
---
|
||||
assignees:
|
||||
- lavalamp
|
||||
- thockin
|
||||
|
|
@ -191,7 +191,7 @@ Calico can also be run in policy enforcement mode in conjunction with other netw
|
|||
|
||||
### Romana
|
||||
|
||||
[Romana](http://romana.io) is an open source software defined networking (SDN) solution that lets you deploy Kubernetes without an overlay network.
|
||||
[Romana](http://romana.io) is an open source network and security automation solution that lets you deploy Kubernetes without an overlay network. Romana supports Kubernetes [Network Policy](/docs/user-guide/networkpolicies/) to provide isolation across network namespaces.
|
||||
|
||||
### Contiv
|
||||
|
||||
|
|
|
|||
|
|
@ -123,7 +123,7 @@ KUBE_API_ARGS=""
|
|||
|
||||
```shell
|
||||
$ etcdctl mkdir /kube-centos/network
|
||||
$ etcdclt mk /kube-centos/network/config "{ \"Network\": \"172.30.0.0/16\", \"SubnetLen\": 24, \"Backend\": { \"Type\": \"vxlan\" } }"
|
||||
$ etcdctl mk /kube-centos/network/config "{ \"Network\": \"172.30.0.0/16\", \"SubnetLen\": 24, \"Backend\": { \"Type\": \"vxlan\" } }"
|
||||
```
|
||||
|
||||
* Configure flannel to overlay Docker network in /etc/sysconfig/flanneld on the master (also in the nodes as we'll see):
|
||||
|
|
@ -196,6 +196,13 @@ for SERVICES in kube-proxy kubelet flanneld docker; do
|
|||
systemctl status $SERVICES
|
||||
done
|
||||
```
|
||||
* Configure kubectl
|
||||
|
||||
```shell
|
||||
kubectl config set-cluster default-cluster --server=http://centos-master:8080
|
||||
kubectl config set-context default-context --cluster=default-cluster --user=default-admin
|
||||
kubectl config use-context default-context
|
||||
```
|
||||
|
||||
*You should be finished!*
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,160 @@
|
|||
---
|
||||
---
|
||||
|
||||
<style>
|
||||
li>.highlighter-rouge {position:relative; top:3px;}
|
||||
</style>
|
||||
|
||||
## Overview
|
||||
|
||||
This quickstart shows you how to easily install a Kubernetes cluster on AWS.
|
||||
It uses a tool called [`kops`](https://github.com/kubernetes/kops).
|
||||
|
||||
kops is an opinionated provisioning system:
|
||||
|
||||
* Fully automated installation
|
||||
* Uses DNS to identify clusters
|
||||
* Self-healing: everything runs in Auto-Scaling Groups
|
||||
* Limited OS support (Debian preferred, Ubuntu 16.04 supported, early support for CentOS & RHEL)
|
||||
* High-Availability support
|
||||
* Can directly provision, or generate terraform manifests
|
||||
|
||||
If your opinions differ from these you may prefer to build your own cluster using [kubeadm](kubeadm) as
|
||||
a building block. kops builds on the kubeadm work.
|
||||
|
||||
## Creating a cluster
|
||||
|
||||
### (1/5) Install kops
|
||||
|
||||
Download kops from the [releases page](https://github.com/kubernetes/kops/releases) (it is also easy to build from source):
|
||||
|
||||
On MacOS:
|
||||
|
||||
```
|
||||
wget https://github.com/kubernetes/kops/releases/download/v1.4.1/kops-darwin-amd64
|
||||
chmod +x kops-darwin-amd64
|
||||
mv kops-darwin-amd64 /usr/local/bin/kops
|
||||
```
|
||||
|
||||
On Linux:
|
||||
|
||||
```
|
||||
wget https://github.com/kubernetes/kops/releases/download/v1.4.1/kops-linux-amd64
|
||||
chmod +x kops-linux-amd64
|
||||
mv kops-linux-amd64 /usr/local/bin/kops
|
||||
```
|
||||
|
||||
### (2/5) Create a route53 domain for your cluster
|
||||
|
||||
kops uses DNS for discovery, both inside the cluster and so that you can reach the kubernetes API server
|
||||
from clients.
|
||||
|
||||
kops has a strong opinion on the cluster name: it should be a valid DNS name. By doing so you will
|
||||
no longer get your clusters confused, you can share clusters with your colleagues unambigiously,
|
||||
and you can reach them without relying on remembering an IP address.
|
||||
|
||||
You can, and probably should, use subdomains to divide your clusters. As our example we will use
|
||||
`useast1.dev.example.com`. The API server endpoint will then be `api.useast1.dev.example.com`.
|
||||
|
||||
A Route53 hosted zone can serve subdomains. Your hosted zone could be `useast1.dev.example.com`,
|
||||
but also `dev.example.com` or even `example.com`. kops works with any of these, so typically
|
||||
you choose for organization reasons (e.g. you are allowed to create records under `dev.example.com`,
|
||||
but not under `example.com`).
|
||||
|
||||
Let's assume you're using `dev.example.com` as your hosted zone. You create that hosted zone using
|
||||
the [normal process](http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingNewSubdomain.html), or
|
||||
with a command such as `aws route53 create-hosted-zone --name dev.example.com --caller-reference 1`.
|
||||
|
||||
You must then set up your NS records in the parent domain, so that records in the domain will resolve. Here,
|
||||
you would create NS records in `example.com` for `dev`. If it is a root domain name you would configure the NS
|
||||
records at your domain registrar (e.g. `example.com` would need to be configured where you bought `example.com`).
|
||||
|
||||
This step is easy to mess up (it is the #1 cause of problems!) You can double-check that
|
||||
your cluster is configured correctly if you have the dig tool by running:
|
||||
|
||||
`dig NS dev.example.com`
|
||||
|
||||
You should see the 4 NS records that Route53 assigned your hosted zone.
|
||||
|
||||
### (3/5) Create an S3 bucket to store your clusters state
|
||||
|
||||
kops lets you manage your clusters even after installation. To do this, it must keep track of the clusters
|
||||
that you have created, along with their configuration, the keys they are using etc. This information is stored
|
||||
in an S3 bucket. S3 permissions are used to control access to the bucket.
|
||||
|
||||
Multiple clusters can use the same S3 bucket, and you can share an S3 bucket between your colleagues that
|
||||
administer the same clusters - this is much easier than passing around kubecfg files. But anyone with access
|
||||
to the S3 bucket will have administrative access to all your clusters, so you don't want to share it beyond
|
||||
the operations team.
|
||||
|
||||
So typically you have one S3 bucket for each ops team (and often the name will correspond
|
||||
to the name of the hosted zone above!)
|
||||
|
||||
In our example, we chose `dev.example.com` as our hosted zone, so let's pick `clusters.dev.example.com` as
|
||||
the S3 bucket name.
|
||||
|
||||
* Export `AWS_PROFILE` (if you need to select a profile for the AWS CLI to work)
|
||||
|
||||
* Create the S3 bucket using `aws s3 mb s3://clusters.dev.example.com`
|
||||
|
||||
* You can `export KOPS_STATE_STORE=s3://clusters.dev.example.com` and then kops will use this location by default.
|
||||
We suggest putting this in your bash profile or similar.
|
||||
|
||||
|
||||
### (4/5) Build your cluster configuration
|
||||
|
||||
Run "kops create cluster" to create your cluster configuration:
|
||||
|
||||
`kops create cluster --zones=us-east-1c useast1.dev.example.com`
|
||||
|
||||
kops will create the configuration for your cluster. Note that it _only_ creates the configuration, it does
|
||||
not actually create the cloud resources - you'll do that in the next step with a `kops update cluster`. This
|
||||
give you an opportunity to review the configuration or change it.
|
||||
|
||||
It prints commands you can use to explore further:
|
||||
|
||||
* List your clusters with: `kops get cluster`
|
||||
* Edit this cluster with: `kops edit cluster useast1.dev.example.com`
|
||||
* Edit your node instance group: `kops edit ig --name=useast1.dev.example.com nodes`
|
||||
* Edit your master instance group: `kops edit ig --name=useast1.dev.example.com master-us-east-1c`
|
||||
|
||||
If this is your first time using kops, do spend a few minutes to try those out! An instance group is a
|
||||
set of instances, which will be registered as kubernetes nodes. On AWS this is implemented via auto-scaling-groups.
|
||||
You can have several instance groups, for example if you wanted nodes that are a mix of spot and on-demand instances, or
|
||||
GPU and non-GPU instances.
|
||||
|
||||
|
||||
### (5/5) Create the cluster in AWS
|
||||
|
||||
Run "kops update cluster" to create your cluster in AWS:
|
||||
|
||||
`kops update cluster useast1.dev.awsdata.com --yes`
|
||||
|
||||
That takes a few seconds to run, but then your cluster will likely take a few minutes to actually be ready.
|
||||
`kops update cluster` will be the tool you'll use whenever you change the configuration of your cluster; it
|
||||
applies the changes you have made to the configuration to your cluster - reconfiguring AWS or kubernetes as needed.
|
||||
|
||||
For example, after you `kops edit ig nodes`, then `kops update cluster --yes` to apply your configuration, and
|
||||
sometimes you will also have to `kops rolling-update cluster` to roll out the configuration immediately.
|
||||
|
||||
Without `--yes`, `kops update cluster` will show you a preview of what it is going to do. This is handy
|
||||
for production clusters!
|
||||
|
||||
### Explore other add-ons
|
||||
|
||||
See the [list of add-ons](/docs/admin/addons/) to explore other add-ons, including tools for logging, monitoring, network policy, visualization & control of your Kubernetes cluster.
|
||||
|
||||
## What's next
|
||||
|
||||
* Learn more about [Kubernetes concepts and kubectl in Kubernetes 101](/docs/user-guide/walkthrough/).
|
||||
* Learn about `kops` [advanced usage](https://github.com/kubernetes/kops)
|
||||
|
||||
## Cleanup
|
||||
|
||||
* To delete you cluster: `kops delete cluster useast1.dev.example.com --yes`
|
||||
|
||||
## Feedback
|
||||
|
||||
* Slack Channel: [#sig-aws](https://kubernetes.slack.com/messages/sig-aws/) has a lot of kops users
|
||||
* [GitHub Issues](https://github.com/kubernetes/kops/issues)
|
||||
|
||||
|
|
@ -1,4 +1,10 @@
|
|||
---
|
||||
assignees:
|
||||
- mikedanese
|
||||
- luxas
|
||||
- errordeveloper
|
||||
- jbeda
|
||||
|
||||
---
|
||||
|
||||
<style>
|
||||
|
|
@ -13,11 +19,26 @@ The installation uses a tool called `kubeadm` which is part of Kubernetes 1.4.
|
|||
This process works with local VMs, physical servers and/or cloud servers.
|
||||
It is simple enough that you can easily integrate its use into your own automation (Terraform, Chef, Puppet, etc).
|
||||
|
||||
**The `kubeadm` tool is currently in alpha but please try it out and give us [feedback](/docs/getting-started-guides/kubeadm/#feedback)!**
|
||||
See the full [`kubeadm` reference](/docs/admin/kubeadm) for information on all `kubeadm` command-line flags and for advice on automating `kubeadm` itself.
|
||||
|
||||
**The `kubeadm` tool is currently in alpha but please try it out and give us [feedback](/docs/getting-started-guides/kubeadm/#feedback)!
|
||||
Be sure to read the [limitations](#limitations); in particular note that kubeadm doesn't have great support for
|
||||
automatically configuring cloud providers. Please refer to the specific cloud provider documentation or
|
||||
use another provisioning system.**
|
||||
|
||||
kubeadm assumes you have a set of machines (virtual or real) that are up and running. It is designed
|
||||
to be part of a larger provisioning system - or just for easy manual provisioning. kubeadm is a great
|
||||
choice where you have your own infrastructure (e.g. bare metal), or where you have an existing
|
||||
orchestration system (e.g. Puppet) that you have to integrate with.
|
||||
|
||||
If you are not constrained, other tools build on kubeadm to give you complete clusters:
|
||||
|
||||
* On GCE, [Google Container Engine](https://cloud.google.com/container-engine/) gives you turn-key Kubernetes
|
||||
* On AWS, [kops](kops) makes installation and cluster management easy (and supports high availability)
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. One or more machines running Ubuntu 16.04 or CentOS 7
|
||||
1. One or more machines running Ubuntu 16.04, CentOS 7 or HypriotOS v1.0.1
|
||||
1. 1GB or more of RAM per machine (any less will leave little room for your apps)
|
||||
1. Full network connectivity between all machines in the cluster (public or private network is fine)
|
||||
|
||||
|
|
@ -33,24 +54,26 @@ It is simple enough that you can easily integrate its use into your own automati
|
|||
|
||||
You will install the following packages on all the machines:
|
||||
|
||||
* `docker`: the container runtime, which Kubernetes depends on.
|
||||
* `docker`: the container runtime, which Kubernetes depends on. v1.11.2 is recommended, but v1.10.3 and v1.12.1 are known to work as well.
|
||||
* `kubelet`: the most core component of Kubernetes.
|
||||
It runs on all of the machines in your cluster and does things like starting pods and containers.
|
||||
* `kubectl`: the command to control the cluster once it's running.
|
||||
You will only use this on the master.
|
||||
You will only need this on the master, but it can be useful to have on the other nodes as well.
|
||||
* `kubeadm`: the command to bootstrap the cluster.
|
||||
|
||||
For each host in turn:
|
||||
|
||||
* SSH into the machine and become `root` if you are not already (for example, run `sudo su -`).
|
||||
* If the machine is running Ubuntu 16.04, run:
|
||||
* If the machine is running Ubuntu 16.04 or HypriotOS v1.0.1, run:
|
||||
|
||||
# curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
|
||||
# cat <<EOF > /etc/apt/sources.list.d/kubernetes.list
|
||||
deb http://apt.kubernetes.io/ kubernetes-xenial main
|
||||
EOF
|
||||
# apt-get update
|
||||
# apt-get install -y docker.io kubelet kubeadm kubectl kubernetes-cni
|
||||
# # Install docker if you don't have it already.
|
||||
# apt-get install -y docker.io
|
||||
# apt-get install -y kubelet kubeadm kubectl kubernetes-cni
|
||||
|
||||
If the machine is running CentOS 7, run:
|
||||
|
||||
|
|
@ -78,6 +101,8 @@ Note: `setenforce 0` will no longer be necessary on CentOS once [#33555](https:/
|
|||
The master is the machine where the "control plane" components run, including `etcd` (the cluster database) and the API server (which the `kubectl` CLI communicates with).
|
||||
All of these components run in pods started by `kubelet`.
|
||||
|
||||
Right now you can't run `kubeadm init` twice without turning down the cluster in between, see [Turndown](#turndown).
|
||||
|
||||
To initialize the master, pick one of the machines you previously installed `kubelet` and `kubeadm` on, and run:
|
||||
|
||||
# kubeadm init
|
||||
|
|
@ -85,6 +110,10 @@ To initialize the master, pick one of the machines you previously installed `kub
|
|||
**Note:** this will autodetect the network interface to advertise the master on as the interface with the default gateway.
|
||||
If you want to use a different interface, specify `--api-advertise-addresses=<ip-address>` argument to `kubeadm init`.
|
||||
|
||||
If you want to use [flannel](https://github.com/coreos/flannel) as the pod network; specify `--pod-network-cidr=10.244.0.0/16` if you're using the daemonset manifest below. _However, please note that this is not required for any other networks, including Weave, which is the recommended pod network._
|
||||
|
||||
Please refer to the [kubeadm reference doc](/docs/admin/kubeadm/) if you want to read more about the flags `kubeadm init` provides.
|
||||
|
||||
This will download and install the cluster database and "control plane" components.
|
||||
This may take several minutes.
|
||||
|
||||
|
|
@ -125,7 +154,30 @@ If you want to be able to schedule pods on the master, for example if you want a
|
|||
|
||||
This will remove the "dedicated" taint from any nodes that have it, including the master node, meaning that the scheduler will then be able to schedule pods everywhere.
|
||||
|
||||
### (3/4) Joining your nodes
|
||||
### (3/4) Installing a pod network
|
||||
|
||||
You must install a pod network add-on so that your pods can communicate with each other.
|
||||
|
||||
**It is necessary to do this before you try to deploy any applications to your cluster, and before `kube-dns` will start up. Note also that `kubeadm` only supports CNI based networks and therefore kubenet based networks will not work.**
|
||||
|
||||
Several projects provide Kubernetes pod networks using CNI, some of which
|
||||
also support [Network Policy](/docs/user-guide/networkpolicies/). See the [add-ons page](/docs/admin/addons/) for a complete list of available network add-ons.
|
||||
|
||||
You can install a pod network add-on with the following command:
|
||||
|
||||
# kubectl apply -f <add-on.yaml>
|
||||
|
||||
Please refer to the specific add-on installation guide for exact details. You should only install one pod network per cluster.
|
||||
|
||||
If you are on another architecture than amd64, you should use the flannel overlay network as described in [the multi-platform section](#kubeadm-is-multi-platform)
|
||||
|
||||
NOTE: You can install **only one** pod network per cluster.
|
||||
|
||||
Once a pod network has been installed, you can confirm that it is working by checking that the `kube-dns` pod is `Running` in the output of `kubectl get pods --all-namespaces`.
|
||||
|
||||
And once the `kube-dns` pod is up and running, you can continue by joining your nodes.
|
||||
|
||||
### (4/4) Joining your nodes
|
||||
|
||||
The nodes are where your workloads (containers and pods, etc) run.
|
||||
If you want to add any new machines as nodes to your cluster, for each machine: SSH to that machine, become root (e.g. `sudo su -`) and run the command that was output by `kubeadm init`.
|
||||
|
|
@ -149,36 +201,20 @@ For example:
|
|||
|
||||
A few seconds later, you should notice that running `kubectl get nodes` on the master shows a cluster with as many machines as you created.
|
||||
|
||||
**YOUR CLUSTER IS NOT READY YET!**
|
||||
### (Optional) Control your cluster from machines other than the master
|
||||
|
||||
Before you can deploy applications to it, you need to install a pod network.
|
||||
In order to get a kubectl on your laptop for example to talk to your cluster, you need to copy the `KubeConfig` file from your master to your laptop like this:
|
||||
|
||||
### (4/4) Installing a pod network
|
||||
|
||||
You must install a pod network add-on so that your pods can communicate with each other when they are on different hosts.
|
||||
**It is necessary to do this before you try to deploy any applications to your cluster.**
|
||||
|
||||
Several projects provide Kubernetes pod networks.
|
||||
You can see a complete list of available network add-ons on the [add-ons page](/docs/admin/addons/).
|
||||
|
||||
By way of example, you can install [Weave Net](https://github.com/weaveworks/weave-kube) by logging in to the master and running:
|
||||
|
||||
# kubectl apply -f https://git.io/weave-kube
|
||||
daemonset "weave-net" created
|
||||
|
||||
If you prefer [Calico](https://github.com/projectcalico/calico-containers/tree/master/docs/cni/kubernetes/manifests/kubeadm) or [Canal](https://github.com/tigera/canal/tree/master/k8s-install/kubeadm), please refer to their respective installation guides.
|
||||
You should only install one pod network per cluster.
|
||||
|
||||
Once a pod network has been installed, you can confirm that it is working by checking that the `kube-dns` pod is `Running` in the output of `kubectl get pods --all-namespaces`.
|
||||
**This signifies that your cluster is ready.**
|
||||
# scp root@<master ip>:/etc/kubernetes/admin.conf .
|
||||
# kubectl --kubeconfig ./admin.conf get nodes
|
||||
|
||||
### (Optional) Installing a sample application
|
||||
|
||||
As an example, install a sample microservices application, a socks shop, to put your cluster through its paces.
|
||||
To learn more about the sample microservices app, see the [GitHub README](https://github.com/microservices-demo/microservices-demo).
|
||||
|
||||
# git clone https://github.com/microservices-demo/microservices-demo
|
||||
# kubectl apply -f microservices-demo/deploy/kubernetes/manifests/sock-shop-ns.yml -f microservices-demo/deploy/kubernetes/manifests
|
||||
# kubectl create namespace sock-shop
|
||||
# kubectl apply -n sock-shop -f "https://github.com/microservices-demo/microservices-demo/blob/master/deploy/kubernetes/complete-demo.yaml?raw=true"
|
||||
|
||||
You can then find out the port that the [NodePort feature of services](/docs/user-guide/services/) allocated for the front-end service by running:
|
||||
|
||||
|
|
@ -202,21 +238,9 @@ In the example above, this was `31869`, but it is a different port for you.
|
|||
|
||||
If there is a firewall, make sure it exposes this port to the internet before you try to access it.
|
||||
|
||||
### Explore other add-ons
|
||||
## Tear down
|
||||
|
||||
See the [list of add-ons](/docs/admin/addons/) to explore other add-ons, including tools for logging, monitoring, network policy, visualization & control of your Kubernetes cluster.
|
||||
|
||||
|
||||
## What's next
|
||||
|
||||
* Learn more about [Kubernetes concepts and kubectl in Kubernetes 101](/docs/user-guide/walkthrough/).
|
||||
* Install Kubernetes with [a cloud provider configurations](/docs/getting-started-guides/) to add Load Balancer and Persistent Volume support.
|
||||
* Learn about `kubeadm`'s advanced usage on the [advanced reference doc](/docs/admin/kubeadm/)
|
||||
|
||||
|
||||
## Cleanup
|
||||
|
||||
* To uninstall the socks shop, run `kubectl delete -f microservices-demo/deploy/kubernetes/manifests` on the master.
|
||||
* To uninstall the socks shop, run `kubectl delete namespace sock-shop` on the master.
|
||||
|
||||
* To undo what `kubeadm` did, simply delete the machines you created for this tutorial, or run the script below and then start over or uninstall the packages.
|
||||
|
||||
|
|
@ -230,18 +254,43 @@ See the [list of add-ons](/docs/admin/addons/) to explore other add-ons, includi
|
|||
If you wish to start over, run `systemctl start kubelet` followed by `kubeadm init` or `kubeadm join`.
|
||||
<!-- *syntax-highlighting-hack -->
|
||||
|
||||
## Explore other add-ons
|
||||
|
||||
See the [list of add-ons](/docs/admin/addons/) to explore other add-ons, including tools for logging, monitoring, network policy, visualization & control of your Kubernetes cluster.
|
||||
|
||||
## What's next
|
||||
|
||||
* Learn about `kubeadm`'s advanced usage on the [advanced reference doc](/docs/admin/kubeadm/)
|
||||
* Learn more about [Kubernetes concepts and kubectl in Kubernetes 101](/docs/user-guide/walkthrough/).
|
||||
|
||||
## Feedback
|
||||
|
||||
* Slack Channel: [#sig-cluster-lifecycle](https://kubernetes.slack.com/messages/sig-cluster-lifecycle/)
|
||||
* Mailing List: [kubernetes-sig-cluster-lifecycle](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle)
|
||||
* [GitHub Issues](https://github.com/kubernetes/kubernetes/issues): please tag `kubeadm` issues with `@kubernetes/sig-cluster-lifecycle`
|
||||
|
||||
## kubeadm is multi-platform
|
||||
|
||||
kubeadm deb packages and binaries are built for amd64, arm and arm64, following the [multi-platform proposal](https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/multi-platform.md).
|
||||
|
||||
deb-packages are released for ARM and ARM 64-bit, but not RPMs (yet, reach out if there's interest).
|
||||
|
||||
Anyway, ARM had some issues when making v1.4, see [#32517](https://github.com/kubernetes/kubernetes/pull/32517) [#33485](https://github.com/kubernetes/kubernetes/pull/33485), [#33117](https://github.com/kubernetes/kubernetes/pull/33117) and [#33376](https://github.com/kubernetes/kubernetes/pull/33376).
|
||||
|
||||
However, thanks to the PRs above, `kube-apiserver` works on ARM from the `v1.4.1` release, so make sure you're at least using `v1.4.1` when running on ARM 32-bit
|
||||
|
||||
The multiarch flannel daemonset can be installed this way. Make sure you replace `ARCH=amd64` with `ARCH=arm` or `ARCH=arm64` if necessary.
|
||||
|
||||
# ARCH=amd64 curl -sSL https://raw.githubusercontent.com/luxas/flannel/update-daemonset/Documentation/kube-flannel.yml | sed "s/amd64/${ARCH}/g" | kubectl create -f -
|
||||
|
||||
And obviously replace `ARCH=amd64` with `ARCH=arm` or `ARCH=arm64` depending on the platform you're running on.
|
||||
|
||||
## Limitations
|
||||
|
||||
Please note: `kubeadm` is a work in progress and these limitations will be addressed in due course.
|
||||
|
||||
1. The cluster created here doesn't have cloud-provider integrations, so for example won't work with (for example) [Load Balancers](/docs/user-guide/load-balancer/) (LBs) or [Persistent Volumes](/docs/user-guide/persistent-volumes/walkthrough/) (PVs).
|
||||
To easily obtain a cluster which works with LBs and PVs Kubernetes, try [the "hello world" GKE tutorial](/docs/hellonode) or [one of the other cloud-specific installation tutorials](/docs/getting-started-guides/).
|
||||
1. The cluster created here doesn't have cloud-provider integrations by default, so for example it doesn't work automatically with (for example) [Load Balancers](/docs/user-guide/load-balancer/) (LBs) or [Persistent Volumes](/docs/user-guide/persistent-volumes/walkthrough/) (PVs).
|
||||
To set up kubeadm with CloudProvider integrations (it's experimental, but try), refer to the [kubeadm reference](/docs/admin/kubeadm/) document.
|
||||
|
||||
Workaround: use the [NodePort feature of services](/docs/user-guide/services/#type-nodeport) for exposing applications to the internet.
|
||||
1. The cluster created here has a single master, with a single `etcd` database running on it.
|
||||
|
|
@ -253,9 +302,6 @@ Please note: `kubeadm` is a work in progress and these limitations will be addre
|
|||
1. `kubectl logs` is broken with `kubeadm` clusters due to [#22770](https://github.com/kubernetes/kubernetes/issues/22770).
|
||||
|
||||
Workaround: use `docker logs` on the nodes where the containers are running as a workaround.
|
||||
1. There is not yet an easy way to generate a `kubeconfig` file which can be used to authenticate to the cluster remotely with `kubectl` on, for example, your workstation.
|
||||
|
||||
Workaround: copy the kubelet's `kubeconfig` from the master: use `scp root@<master>:/etc/kubernetes/admin.conf .` and then e.g. `kubectl --kubeconfig ./admin.conf get nodes` from your workstation.
|
||||
|
||||
1. If you are using VirtualBox (directly or via Vagrant), you will need to ensure that `hostname -i` returns a routable IP address (i.e. one on the second network interface, not the first one).
|
||||
By default, it doesn't do this and kubelet ends-up using first non-loopback network interface, which is usually NATed.
|
||||
|
|
|
|||
|
|
@ -138,7 +138,6 @@ Use the built-in Docker daemon with:
|
|||
eval $(minikube docker-env)
|
||||
```
|
||||
This command sets up the Docker environment variables so a Docker client can communicate with the minikube Docker daemon.
|
||||
Minikube currently supports only docker version 1.11.1 on the server, which is what is supported by Kubernetes 1.3. With a newer docker version, you'll get this [issue](https://github.com/kubernetes/minikube/issues/338).
|
||||
|
||||
```shell
|
||||
docker ps
|
||||
|
|
|
|||
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
assignees:
|
||||
- chrismarino
|
||||
|
||||
---
|
||||
|
||||
# Installation with kubeadm
|
||||
|
||||
Begin by following the [kubeadm getting started guide](/docs/getting-started-guides/kubeadm/) and complete steps 1, 2, and 3. Once completed, follow the [containerized installation guide](https://github.com/romana/romana/tree/master/containerize) for kubeadmin. Kubernetes network policies can then be applied to pods using the NetworkPolicy API.
|
||||
|
||||
#### Additional Romana Network Policy Options
|
||||
|
||||
In addition to the standard Kubernetes NetworkPolicy API, Romana also supports additional network policy functions.
|
||||
|
||||
* [Romana Network Policy Capabilities](https://github.com/romana/romana/wiki/Romana-policies)
|
||||
* [Example Romana Policies](https://github.com/romana/core/tree/master/policy)
|
||||
|
||||
|
|
@ -6,7 +6,12 @@ assignees:
|
|||
|
||||
Kubernetes can be used to declare network policies which govern how Pods can communicate with each other. This document helps you get started using the Kubernetes [NetworkPolicy API](/docs/user-guide/networkpolicies), and provides a demonstration thereof.
|
||||
|
||||
In this article we assume that a Kubernetes cluster has been created with network policy support. There are a number of network providers which support NetworkPolicy (see the "Using X for NetworkPolicy" articles in this section). The reference implementation is [Calico](/docs/getting-started-guides/network-policy/calico) running on GCE.
|
||||
In this article, we assume a Kubernetes cluster has been created with network policy support. There are a number of network providers that support NetworkPolicy including:
|
||||
|
||||
* [Calico](/docs/getting-started-guides/network-policy/calico/)
|
||||
* [Romana](/docs/getting-started-guides/network-policy/romana/)
|
||||
|
||||
The reference implementation is [Calico](/docs/getting-started-guides/network-policy/calico) running on GCE.
|
||||
|
||||
The following example walkthrough will work on a Kubernetes cluster using any of the listed providers.
|
||||
|
||||
|
|
|
|||
|
|
@ -85,6 +85,7 @@ to implement one of the above options:
|
|||
- [Flannel](https://github.com/coreos/flannel)
|
||||
- [Calico](http://https://github.com/projectcalico/calico-containers)
|
||||
- [Weave](http://weave.works/)
|
||||
- [Romana](http://romana.io/)
|
||||
- [Open vSwitch (OVS)](http://openvswitch.org/)
|
||||
- [More found here](/docs/admin/networking#how-to-achieve-this)
|
||||
- You can also write your own.
|
||||
|
|
@ -129,14 +130,9 @@ Also, you need to pick a static IP for master node.
|
|||
|
||||
#### Network Policy
|
||||
|
||||
Kubernetes enables the definition of fine-grained network policy between Pods
|
||||
using the [NetworkPolicy](/docs/user-guide/networkpolicy) resource.
|
||||
Kubernetes enables the definition of fine-grained network policy between Pods using the [NetworkPolicy](/docs/user-guide/network-policy) resource.
|
||||
|
||||
Not all networking providers support the Kubernetes NetworkPolicy features.
|
||||
For clusters which choose to enable NetworkPolicy, the
|
||||
[Calico policy controller addon](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/calico-policy-controller)
|
||||
can enforce the NetworkPolicy API on top of native cloud-provider networking,
|
||||
Flannel, or Calico networking.
|
||||
Not all networking providers support the Kubernetes NetworkPolicy API, see [Using Network Policy](/docs/getting-started-guides/network-policy/walkthrough/) for more information.
|
||||
|
||||
### Cluster Naming
|
||||
|
||||
|
|
|
|||
|
|
@ -150,8 +150,12 @@ gcloud docker push gcr.io/$PROJECT_ID/hello-node:v1
|
|||
|
||||
If all goes well, you should be able to see the container image listed in the console: *Compute > Container Engine > Container Registry*. We now have a project-wide Docker image available which Kubernetes can access and orchestrate.
|
||||
|
||||
If you see an error message like the following: __denied: Unable to create the repository, please check that you have access to do so.__ ensure that you are pushing the image to Container Registry with the correct user credentials, use `gcloud auth list` and then `gcloud config set account example@gmail.com`.
|
||||
|
||||

|
||||
|
||||
**Note:** *Docker for Windows, Version 1.12 or 1.12.1, does not yet support this procedure. Instead, it replies with the message 'denied: Unable to access the repository; please check that you have permission to access it'. A bugfix is available at http://stackoverflow.com/questions/39277986/unable-to-push-to-google-container-registry-unable-to-access-the-repository?answertab=votes#tab-top.*
|
||||
|
||||
## Create your Kubernetes Cluster
|
||||
|
||||
A cluster consists of a Master API server and a set of worker VMs called Nodes.
|
||||
|
|
|
|||
|
|
@ -83,9 +83,15 @@ h2, h3, h4 {
|
|||
</div>
|
||||
<div class="col3rd">
|
||||
<h3>Installing Kubernetes on Linux with kubeadm</h3>
|
||||
<p>This quickstart will show you how to install a secure Kubernetes cluster on any computers running Linux, using a tool called <code>kubeadm</code> which is part of Kubernetes. It'll work with local VMs, physical servers and/or cloud servers, either manually or as part of your own automation. It is currently in alpha but please try it out and give us feedback!</p>
|
||||
<p>This quickstart will show you how to install a secure Kubernetes cluster on any computers running Linux, using a tool called <code>kubeadm</code>. It'll work with local VMs, physical servers and/or cloud servers, either manually or as a part of your own automation. It is currently in alpha but please try it out and give us feedback!</p>
|
||||
<p>If you are looking for a fully automated solution, note that kubeadm is intended as a building block. Tools such as GKE and kops build on kubeadm to provision a complete cluster.</p>
|
||||
<a href="/docs/getting-started-guides/kubeadm/" class="button">Install Kubernetes with kubeadm</a>
|
||||
</div>
|
||||
<div class="col3rd">
|
||||
<h3>Installing Kubernetes on AWS with kops</h3>
|
||||
<p>This quickstart will show you how to bring up a complete Kubernetes cluster on AWS, using a tool called <code>kops</code>.</p>
|
||||
<a href="/docs/getting-started-guides/kops/" class="button">Install Kubernetes with kops</a>
|
||||
</div>
|
||||
<div class="col3rd">
|
||||
<h3>Guided Tutorial</h3>
|
||||
<p>If you’ve completed one of the quickstarts, a great next step is Kubernetes 101. You will follow a path through the various features of Kubernetes, with code examples along the way, learning all of the core concepts. There's also a <a href="/docs/user-guide/walkthrough/k8s201">Kubernetes 201</a>!</p>
|
||||
|
|
|
|||
|
|
@ -11,14 +11,7 @@ load-balanced access to an application running in a cluster.
|
|||
|
||||
{% capture prerequisites %}
|
||||
|
||||
* Install [kubectl](http://kubernetes.io/docs/user-guide/prereqs).
|
||||
|
||||
* Create a Kubernetes cluster, including a running Kubernetes
|
||||
API server. One way to create a new cluster is to use
|
||||
[Minikube](/docs/getting-started-guides/minikube).
|
||||
|
||||
* Configure `kubectl` to communicate with your Kubernetes API server. This
|
||||
configuration is done automatically if you use Minikube.
|
||||
{% include task-tutorial-prereqs.md %}
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
|
|
|||
|
|
@ -12,14 +12,7 @@ for database debugging.
|
|||
|
||||
{% capture prerequisites %}
|
||||
|
||||
* Install [kubectl](http://kubernetes.io/docs/user-guide/prereqs).
|
||||
|
||||
* Create a Kubernetes cluster, including a running Kubernetes
|
||||
API server. One way to create a new cluster is to use
|
||||
[Minikube](/docs/getting-started-guides/minikube).
|
||||
|
||||
* Configure `kubectl` to communicate with your Kubernetes API server. This
|
||||
configuration is done automatically if you use Minikube.
|
||||
* {% include task-tutorial-prereqs.md %}
|
||||
|
||||
* Install [redis-cli](http://redis.io/topics/rediscli).
|
||||
|
||||
|
|
|
|||
|
|
@ -7,14 +7,7 @@ This page shows how to use an HTTP proxy to access the Kubernetes API.
|
|||
|
||||
{% capture prerequisites %}
|
||||
|
||||
* Install [kubectl](http://kubernetes.io/docs/user-guide/prereqs).
|
||||
|
||||
* Create a Kubernetes cluster, including a running Kubernetes
|
||||
API server. One way to create a new cluster is to use
|
||||
[Minikube](/docs/getting-started-guides/minikube).
|
||||
|
||||
* Configure `kubectl` to communicate with your Kubernetes API server. This
|
||||
configuration is done automatically if you use Minikube.
|
||||
* {% include task-tutorial-prereqs.md %}
|
||||
|
||||
* If you do not already have an application running in your cluster, start
|
||||
a Hello world application by entering this command:
|
||||
|
|
|
|||
|
|
@ -8,14 +8,7 @@ Kubernetes cluster.
|
|||
|
||||
{% capture prerequisites %}
|
||||
|
||||
* Install [kubectl](http://kubernetes.io/docs/user-guide/prereqs).
|
||||
|
||||
* Create a Kubernetes cluster, including a running Kubernetes
|
||||
API server. One way to create a new cluster is to use
|
||||
[Minikube](/docs/getting-started-guides/minikube).
|
||||
|
||||
* Configure `kubectl` to communicate with your Kubernetes API server. This
|
||||
configuration is done automatically if you use Minikube.
|
||||
{% include task-tutorial-prereqs.md %}
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: command-demo
|
||||
labels:
|
||||
purpose: demonstrate-command
|
||||
spec:
|
||||
containers:
|
||||
- name: command-demo-container
|
||||
image: debian
|
||||
command: ["printenv"]
|
||||
args: ["HOSTNAME", "KUBERNETES_PORT"]
|
||||
|
|
@ -0,0 +1,105 @@
|
|||
---
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
||||
This page shows how to define commands and arguments when you run a container
|
||||
in a Kubernetes Pod.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture prerequisites %}
|
||||
|
||||
{% include task-tutorial-prereqs.md %}
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture steps %}
|
||||
|
||||
### Defining a command and arguments when you create a Pod
|
||||
|
||||
When you create a Pod, you can define a command and arguments for the
|
||||
containers that run in the Pod. To define a command, include the `command`
|
||||
field in the configuration file. To define arguments for the command, include
|
||||
the `args` field in the configuration file. The command and arguments that
|
||||
you define cannot be changed after the Pod is created.
|
||||
|
||||
The command and arguments that you define in the configuration file
|
||||
override the default command and arguments provided by the container image.
|
||||
If you define args, but do not define a command, the default command is used
|
||||
with your new arguments. For more information, see
|
||||
[Commands and Capabilities](/docs/user-guide/containers/).
|
||||
|
||||
In this exercise, you create a Pod that runs one container. The configuration
|
||||
file for the Pod defines a command and two arguments:
|
||||
|
||||
{% include code.html language="yaml" file="commands.yaml" ghlink="/docs/tasks/configure-pod-container/commands.yaml" %}
|
||||
|
||||
1. Create a Pod based on the YAML configuration file:
|
||||
|
||||
export REPO=https://raw.githubusercontent.com/kubernetes/kubernetes.github.io/master
|
||||
kubectl create -f $REPO/docs/tasks/configure-pod-container/commands.yaml
|
||||
|
||||
1. List the running Pods:
|
||||
|
||||
kubectl get pods
|
||||
|
||||
The output shows that the container that ran in the command-demo Pod has
|
||||
completed.
|
||||
|
||||
1. To see the output of the command that ran in the container, view the logs
|
||||
from the Pod:
|
||||
|
||||
kubectl logs command-demo
|
||||
|
||||
The output shows the values of the HOSTNAME and KUBERNETES_PORT environment
|
||||
variables:
|
||||
|
||||
command-demo
|
||||
tcp://10.3.240.1:443
|
||||
|
||||
### Using environment variables to define arguments
|
||||
|
||||
In the preceding example, you defined the arguments directly by
|
||||
providing strings. As an alternative to providing strings directly,
|
||||
you can define arguments by using environment variables:
|
||||
|
||||
env:
|
||||
- name: MESSAGE
|
||||
value: "hello world"
|
||||
command: ["/bin/echo"]
|
||||
args: ["$(MESSAGE)"]
|
||||
|
||||
This means you can define an argument for a Pod using any of
|
||||
the techniques available for defining environment variables, including
|
||||
[ConfigMaps](/docs/user-guide/configmap/)
|
||||
and
|
||||
[Secrets](/docs/user-guide/secrets/).
|
||||
|
||||
NOTE: The environment variable appears in parentheses, `"$(VAR)"`. This is
|
||||
required for the variable to be expanded in the `command` or `args` field.
|
||||
|
||||
### Running a command in a shell
|
||||
|
||||
In some cases, you need your command to run in a shell. For example, your
|
||||
command might consist of several commands piped together, or it might be a shell
|
||||
script. To run your command in a shell, wrap it like this:
|
||||
|
||||
command: ["/bin/sh"]
|
||||
args: ["-c", "while true; do echo hello; sleep 10;done"]
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
{% capture whatsnext %}
|
||||
|
||||
* Learn more about [containers and commands](/docs/user-guide/containers/).
|
||||
* Learn more about [configuring containers](/docs/user-guide/configuring-containers/).
|
||||
* Learn more about [running commands in a container](/docs/user-guide/getting-into-containers/).
|
||||
* See [Container](/docs/api-reference/v1/definitions/#_v1_container).
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% include templates/task.md %}
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
---
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
||||
This page shows how to define environment variables when you run a container
|
||||
in a Kubernetes Pod.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture prerequisites %}
|
||||
|
||||
{% include task-tutorial-prereqs.md %}
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture steps %}
|
||||
|
||||
### Defining an environment variable for a container
|
||||
|
||||
When you create a Pod, you can set environment variables for the containers
|
||||
that run in the Pod. To set environment variables, include the `env` field in
|
||||
the configuration file.
|
||||
|
||||
In this exercise, you create a Pod that runs one container. The configuration
|
||||
file for the Pod defines an environment variable with name `DEMO_GREETING` and
|
||||
value `"Hello from the environment"`. Here is the configuration file for the
|
||||
Pod:
|
||||
|
||||
{% include code.html language="yaml" file="envars.yaml" ghlink="/docs/tasks/configure-pod-container/envars.yaml" %}
|
||||
|
||||
1. Create a Pod based on the YAML configuration file:
|
||||
|
||||
export REPO=https://raw.githubusercontent.com/kubernetes/kubernetes.github.io/master
|
||||
kubectl create -f $REPO/docs/tasks/configure-pod-container/envars.yaml
|
||||
|
||||
1. List the running Pods:
|
||||
|
||||
kubectl get pods
|
||||
|
||||
The output is similar to this:
|
||||
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
envar-demo 1/1 Running 0 9s
|
||||
|
||||
1. Get a shell to the container running in your Pod:
|
||||
|
||||
kubectl exec -it envar-demo -- /bin/bash
|
||||
|
||||
1. In your shell, run the `printenv` command to list the environment variables.
|
||||
|
||||
root@envar-demo:/# printenv
|
||||
|
||||
The output is similar to this:
|
||||
|
||||
NODE_VERSION=4.4.2
|
||||
EXAMPLE_SERVICE_PORT_8080_TCP_ADDR=10.3.245.237
|
||||
HOSTNAME=envar-demo
|
||||
...
|
||||
DEMO_GREETING=Hello from the environment
|
||||
|
||||
1. To exit the shell, enter `exit`.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
{% capture whatsnext %}
|
||||
|
||||
* Learn more about [environment variables](/docs/user-guide/environment-guide/).
|
||||
* Learn about [using secrets as environment variables](/docs/user-guide/secrets/#using-secrets-as-environment-variables).
|
||||
* See [EnvVarSource](/docs/api-reference/v1/definitions/#_v1_envvarsource).
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% include templates/task.md %}
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: envar-demo
|
||||
labels:
|
||||
purpose: demonstrate-envars
|
||||
spec:
|
||||
containers:
|
||||
- name: envar-demo-container
|
||||
image: gcr.io/google-samples/node-hello:1.0
|
||||
env:
|
||||
- name: DEMO_GREETING
|
||||
value: "Hello from the environment"
|
||||
|
|
@ -3,6 +3,11 @@
|
|||
|
||||
The Tasks section of the Kubernetes documentation is a work in progress
|
||||
|
||||
#### Configuring Pods and Containers
|
||||
|
||||
* [Defining Environment Variables for a Container](/docs/tasks/configure-pod-container/define-environment-variable-container/)
|
||||
* [Defining a Command and Arguments for a Container](/docs/tasks/configure-pod-container/define-command-argument-container/)
|
||||
|
||||
#### Accessing Applications in a Cluster
|
||||
|
||||
* [Using Port Forwarding to Access Applications in a Cluster](/docs/tasks/access-application-cluster/port-forward-access-application-cluster/)
|
||||
|
|
|
|||
|
|
@ -35,6 +35,11 @@ Use Helm to:
|
|||
|
||||
### Kompose
|
||||
|
||||
[`kompose`](https://github.com/skippbox/kompose) is a tool to help users familiar with `docker-compose`
|
||||
move to Kubernetes. It takes a Docker Compose file and translates it into Kubernetes objects. `kompose`
|
||||
is a convenient tool to go from local Docker development to managing your application with Kubernetes.
|
||||
[Kompose](https://github.com/kubernetes-incubator/kompose) is a tool to help users familiar with Docker Compose
|
||||
move to Kubernetes.
|
||||
|
||||
Use Kompose to:
|
||||
|
||||
* Translate a Docker Compose file into Kubernetes objects
|
||||
* Go from local Docker development to managing your application via Kubernetes
|
||||
* Convert v1 or v2 Docker Compose `yaml` files or [Distributed Application Bundles](https://docs.docker.com/compose/bundles/)
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@
|
|||
</div>
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/deploy-intro.html" role="button">Continue to Module 2<span class="btn__next">›</span></a>
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/deploy-intro/" role="button">Continue to Module 2<span class="btn__next">›</span></a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ redirect_from:
|
|||
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/cluster-interactive.html" role="button">Start Interactive Tutorial <span class="btn__next">›</span></a>
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/cluster-interactive/" role="button">Start Interactive Tutorial <span class="btn__next">›</span></a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@
|
|||
</div>
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/explore-intro.html" role="button">Continue to Module 3<span class="btn__next">›</span></a>
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/explore-intro/" role="button">Continue to Module 3<span class="btn__next">›</span></a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
|
|
|||
|
|
@ -95,7 +95,7 @@
|
|||
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/deploy-interactive.html" role="button">Start Interactive Tutorial <span class="btn__next">›</span></a>
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/deploy-interactive/" role="button">Start Interactive Tutorial <span class="btn__next">›</span></a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@
|
|||
</div>
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/expose-intro.html" role="button">Continue to Module 4<span class="btn__next">›</span></a>
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/expose-intro/" role="button">Continue to Module 4<span class="btn__next">›</span></a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
|
|
|||
|
|
@ -129,7 +129,7 @@
|
|||
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/explore-interactive.html" role="button">Start Interactive Tutorial <span class="btn__next">›</span></a>
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/explore-interactive/" role="button">Start Interactive Tutorial <span class="btn__next">›</span></a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@
|
|||
</div>
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/scale-intro.html" role="button">Continue to Module 5<span class="btn__next">›</span></a>
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/scale-intro/" role="button">Continue to Module 5<span class="btn__next">›</span></a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
|
|
|||
|
|
@ -125,7 +125,7 @@
|
|||
<br>
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/expose-interactive.html" role="button">Start Interactive Tutorial <span class="btn__next">›</span></a>
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/expose-interactive/" role="button">Start Interactive Tutorial <span class="btn__next">›</span></a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@
|
|||
<div class="row">
|
||||
<div class="col-md-9">
|
||||
<h2>What can Kubernetes do for you?</h2>
|
||||
<p>With modern web services, users expect applications to be available 24/7, and developers expect to deploy new versions of those applications several times a day. Containzerization helps package software to serve these goals, enabling applications to be released and updated in an easy and fast way without downtime. Kubernetes helps you make sure those containerized applications run where and when you want, and helps them find the resources and tools they need to work. <a href="http://kubernetes.io/docs/whatisk8s/">Kubernetes</a> is a production-ready, open source platform designed with the Google's accumulated experience in container orchestration, combined with best-of-breed ideas from the community.</p>
|
||||
<p>With modern web services, users expect applications to be available 24/7, and developers expect to deploy new versions of those applications several times a day. Containerization helps package software to serve these goals, enabling applications to be released and updated in an easy and fast way without downtime. Kubernetes helps you make sure those containerized applications run where and when you want, and helps them find the resources and tools they need to work. <a href="http://kubernetes.io/docs/whatisk8s/">Kubernetes</a> is a production-ready, open source platform designed with the Google's accumulated experience in container orchestration, combined with best-of-breed ideas from the community.</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@
|
|||
</div>
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/update-intro.html" role="button">Continue to Module 6<span class="btn__next">›</span></a>
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/update-intro/" role="button">Continue to Module 6<span class="btn__next">›</span></a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
|
|
|||
|
|
@ -107,7 +107,7 @@
|
|||
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/scale-interactive.html" role="button">Start Interactive Tutorial <span class="btn__next">›</span></a>
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/scale-interactive/" role="button">Start Interactive Tutorial <span class="btn__next">›</span></a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
|
|
|||
|
|
@ -122,7 +122,7 @@
|
|||
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/update-interactive.html" role="button">Start Interactive Tutorial <span class="btn__next">›</span></a>
|
||||
<a class="btn btn-lg btn-success" href="/docs/tutorials/kubernetes-basics/update-interactive/" role="button">Start Interactive Tutorial <span class="btn__next">›</span></a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
|
|
|||
|
|
@ -12,14 +12,7 @@ provides load balancing for an application that has two running instances.
|
|||
|
||||
{% capture prerequisites %}
|
||||
|
||||
* Install [kubectl](http://kubernetes.io/docs/user-guide/prereqs).
|
||||
|
||||
* Create a Kubernetes cluster, including a running Kubernetes
|
||||
API server. One way to create a new cluster is to use
|
||||
[Minikube](/docs/getting-started-guides/minikube).
|
||||
|
||||
* Configure `kubectl` to communicate with your Kubernetes API server. This
|
||||
configuration is done automatically if you use Minikube.
|
||||
{% include task-tutorial-prereqs.md %}
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
|
@ -104,7 +97,7 @@ provides load balancing for an application that has two running instances.
|
|||
|
||||
1. On your chosen node, create a firewall rule that allows TCP traffic
|
||||
on your node port. For example, if your Service has a NodePort value of
|
||||
31568, create a firewall rule that allows TCP traffic on port 31568.
|
||||
31568, create a firewall rule that allows TCP traffic on port 31568.
|
||||
|
||||
1. Use the node address and node port to access the Hello World application:
|
||||
|
||||
|
|
|
|||
|
|
@ -11,15 +11,7 @@ external IP address.
|
|||
|
||||
{% capture prerequisites %}
|
||||
|
||||
* Install [kubectl](http://kubernetes.io/docs/user-guide/prereqs).
|
||||
|
||||
* Use a cloud provider like Google Container Engine or Amazon Web Services to
|
||||
create a Kubernetes cluster. This tutorial creates an
|
||||
[external load balancer](/docs/user-guide/load-balancer/),
|
||||
which requires a cloud provider.
|
||||
|
||||
* Configure `kubectl` to communicate with your Kubernetes API server. For
|
||||
instructions, see the documentation for your cloud provider.
|
||||
{% include task-tutorial-prereqs.md %}
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,14 +19,7 @@ This page shows how to run an application using a Kubernetes Deployment object.
|
|||
|
||||
{% capture prerequisites %}
|
||||
|
||||
* To do this tutorial, you need a Kubernetes cluster, including a running
|
||||
Kubernetes API server. You can use an existing cluster, or you can create a
|
||||
new cluster. One way to create a new cluster is to use
|
||||
[Minikube](/docs/getting-started-guides/minikube).
|
||||
|
||||
* You also need to have `kubectl` installed on your local machine, and `kubectl`
|
||||
must be configured to communicate with your Kubernetes API server. This
|
||||
configuration is done automatically if you use Minikube.
|
||||
{% include task-tutorial-prereqs.md %}
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
|
|
|||
|
|
@ -374,6 +374,6 @@ driver, and then cleans up.
|
|||
An advantage of this approach is that the overall process gets the completion guarantee of a Job
|
||||
object, but complete control over what pods are created and how work is assigned to them.
|
||||
|
||||
## Future work
|
||||
## Scheduled Jobs
|
||||
|
||||
Support for creating Jobs at specified times/dates (i.e. cron) is expected in [1.4](https://github.com/kubernetes/kubernetes/pull/11980).
|
||||
Support for creating Jobs at specified times/dates (i.e. cron) is available in Kubernetes [1.4](https://github.com/kubernetes/kubernetes/pull/11980). More information is available in the [scheduled job documents](http://kubernetes.io/docs/user-guide/scheduled-jobs/)
|
||||
|
|
|
|||
|
|
@ -166,7 +166,7 @@ We will use the `amqp-consume` utility to read the message
|
|||
from the queue and run our actual program. Here is a very simple
|
||||
example program:
|
||||
|
||||
{% include code.html language="python" file="worker.py" ghlink="/docs/user-guide/job/work-queue-1/worker.py" %}
|
||||
{% include code.html language="python" file="worker.py" ghlink="/docs/user-guide/jobs/work-queue-1/worker.py" %}
|
||||
|
||||
Now, build an image. If you are working in the source
|
||||
tree, then change directory to `examples/job/work-queue-1`.
|
||||
|
|
@ -204,7 +204,7 @@ Here is a job definition. You'll need to make a copy of the Job and edit the
|
|||
image to match the name you used, and call it `./job.yaml`.
|
||||
|
||||
|
||||
{% include code.html language="yaml" file="job.yaml" ghlink="/docs/user-guide/job/work-queue-1/job.yaml" %}
|
||||
{% include code.html language="yaml" file="job.yaml" ghlink="/docs/user-guide/jobs/work-queue-1/job.yaml" %}
|
||||
|
||||
In this example, each pod works on one item from the queue and then exits.
|
||||
So, the completion count of the Job corresponds to the number of work items
|
||||
|
|
@ -258,12 +258,12 @@ want to consider one of the other [job patterns](/docs/user-guide/jobs/#job-patt
|
|||
|
||||
This approach creates a pod for every work item. If your work items only take a few seconds,
|
||||
though, creating a Pod for every work item may add a lot of overhead. Consider another
|
||||
[example](/docs/user-guide/job/work-queue-2), that executes multiple work items per Pod.
|
||||
[example](/docs/user-guide/jobs/work-queue-2/), that executes multiple work items per Pod.
|
||||
|
||||
In this example, we used use the `amqp-consume` utility to read the message
|
||||
from the queue and run our actual program. This has the advantage that you
|
||||
do not need to modify your program to be aware of the queue.
|
||||
A [different example](/docs/user-guide/job/work-queue-2), shows how to
|
||||
A [different example](/docs/user-guide/jobs/work-queue-2/), shows how to
|
||||
communicate with the work queue using a client library.
|
||||
|
||||
## Caveats
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ called rediswq.py ([Download](rediswq.py?raw=true)).
|
|||
The "worker" program in each Pod of the Job uses the work queue
|
||||
client library to get work. Here it is:
|
||||
|
||||
{% include code.html language="python" file="worker.py" ghlink="/docs/user-guide/job/work-queue-2/worker.py" %}
|
||||
{% include code.html language="python" file="worker.py" ghlink="/docs/user-guide/jobs/work-queue-2/worker.py" %}
|
||||
|
||||
If you are working from the source tree,
|
||||
change directory to the `examples/job/work-queue-2` directory.
|
||||
|
|
@ -147,7 +147,7 @@ gcloud docker push gcr.io/<project>/job-wq-2
|
|||
|
||||
Here is the job definition:
|
||||
|
||||
{% include code.html language="yaml" file="job.yaml" ghlink="/docs/user-guide/job/work-queue-2/job.yaml" %}
|
||||
{% include code.html language="yaml" file="job.yaml" ghlink="/docs/user-guide/jobs/work-queue-2/job.yaml" %}
|
||||
|
||||
Be sure to edit the job template to
|
||||
change `gcr.io/myproject` to your own path.
|
||||
|
|
|
|||
|
|
@ -3,24 +3,57 @@ assignees:
|
|||
- bgrant0607
|
||||
- erictune
|
||||
- krousey
|
||||
- clove
|
||||
|
||||
---
|
||||
An assortment of compact kubectl examples
|
||||
|
||||
See also: [Kubectl overview](/docs/user-guide/kubectl-overview/) and [JsonPath guide](/docs/user-guide/jsonpath).
|
||||
See also: [Kubectl Overview](/docs/user-guide/kubectl-overview/) and [JsonPath Guide](/docs/user-guide/jsonpath).
|
||||
|
||||
## Kubectl Autocomplete
|
||||
|
||||
```console
|
||||
$ source <(kubectl completion bash) # setup autocomplete in bash
|
||||
$ source <(kubectl completion zsh) # setup autocomplete in zsh
|
||||
```
|
||||
|
||||
## Kubectl Context and Configuration
|
||||
|
||||
Set which Kubernetes cluster `kubectl` communicates with and modify configuration
|
||||
information. See [kubeconfig file](/docs/user-guide/kubeconfig-file/) documentation for
|
||||
detailed config file information.
|
||||
|
||||
```console
|
||||
$ kubectl config view # Show Merged kubeconfig settings.
|
||||
|
||||
# use multiple kubeconfig files at the same time and view merged config
|
||||
$ KUBECONFIG=~/.kube/config:~/.kube/kubconfig2 kubectl config view
|
||||
|
||||
# Get the password for the e2e user
|
||||
$ kubectl config view -o jsonpath='{.users[?(@.name == "e2e")].user.password}'
|
||||
|
||||
$ kubectl config current-context # Display the current-context
|
||||
$ kubectl config use-context my-cluster-name # set the default context to my-cluster-name
|
||||
|
||||
# add a new cluster to your kubeconf that supports basic auth
|
||||
$ kubectl config set-credentials kubeuser/foo.kubernetes.com --username=kubeuser --password=kubepassword
|
||||
|
||||
# set a context utilizing a specific username and namespace.
|
||||
$ kubectl config set-context gce --user=cluster-admin --namespace=foo \
|
||||
&& kubectl config use-context gce
|
||||
```
|
||||
|
||||
## Creating Objects
|
||||
|
||||
```shell
|
||||
$ kubectl create -f ./file.yml # create resource(s) in a json or yaml file
|
||||
Kubernetes manifests can be defined in json or yaml. The file extension `.yaml`,
|
||||
`.yml`, and `.json` can be used.
|
||||
|
||||
$ kubectl create -f ./file1.yml -f ./file2.yaml # create resource(s) in a json or yaml file
|
||||
|
||||
$ kubectl create -f ./dir # create resources in all .json, .yml, and .yaml files in dir
|
||||
|
||||
# Create from a URL
|
||||
|
||||
$ kubectl create -f http://www.fpaste.org/279276/48569091/raw/
|
||||
```console
|
||||
$ kubectl create -f ./my-manifest.yaml # create resource(s)
|
||||
$ kubectl create -f ./my1.yaml -f ./my2.yaml # create from multiple files
|
||||
$ kubectl create -f ./dir # create resource(s) in all manifest files in dir
|
||||
$ kubectl create -f https://git.io/vPieo # create resource(s) from url
|
||||
$ kubectl run nginx --image=nginx # start a single instance of nginx
|
||||
$ kubectl explain pods,svc # get the documentation for pod and svc manifests
|
||||
|
||||
# Create multiple YAML objects from stdin
|
||||
$ cat <<EOF | kubectl create -f -
|
||||
|
|
@ -61,68 +94,181 @@ data:
|
|||
username: $(echo "jane" | base64)
|
||||
EOF
|
||||
|
||||
# TODO: kubectl-explain example
|
||||
```
|
||||
|
||||
|
||||
## Viewing, Finding Resources
|
||||
|
||||
```shell
|
||||
# Columnar output
|
||||
```console
|
||||
# Get commands with basic output
|
||||
$ kubectl get services # List all services in the namespace
|
||||
$ kubectl get pods --all-namespaces # List all pods in all namespaces
|
||||
$ kubectl get pods -o wide # List all pods in the namespace, with more details
|
||||
$ kubectl get rc <rc-name> # List a particular replication controller
|
||||
$ kubectl get replicationcontroller <rc-name> # List a particular RC
|
||||
$ kubectl get deployment my-dep # List a particular deployment
|
||||
|
||||
# Verbose output
|
||||
$ kubectl describe nodes <node-name>
|
||||
$ kubectl describe pods <pod-name>
|
||||
$ kubectl describe pods/<pod-name> # Equivalent to previous
|
||||
$ kubectl describe pods <rc-name> # Lists pods created by <rc-name> using common prefix
|
||||
# Describe commands with verbose output
|
||||
$ kubectl describe nodes my-node
|
||||
$ kubectl describe pods my-pod
|
||||
|
||||
# List Services Sorted by Name
|
||||
$ kubectl get services --sort-by=.metadata.name
|
||||
$ kubectl get services --sort-by=.metadata.name # List Services Sorted by Name
|
||||
|
||||
# List pods Sorted by Restart Count
|
||||
$ kubectl get pods --sort-by='.status.containerStatuses[0].restartCount'
|
||||
|
||||
# Get the version label of all pods with label app=cassandra
|
||||
$ kubectl get pods --selector=app=cassandra rc -o 'jsonpath={.items[*].metadata.labels.version}'
|
||||
$ kubectl get pods --selector=app=cassandra rc -o \
|
||||
jsonpath='{.items[*].metadata.labels.version}'
|
||||
|
||||
# Get ExternalIPs of all nodes
|
||||
$ kubectl get nodes -o jsonpath='{.items[*].status.addresses[?(@.type=="ExternalIP")].address}'
|
||||
|
||||
# List Names of Pods that belong to Particular RC
|
||||
# "jq" command useful for transformations that are too complex for jsonpath
|
||||
$ sel=$(kubectl get rc <rc-name> --output=json | jq -j '.spec.selector | to_entries | .[] | "\(.key)=\(.value),"')
|
||||
$ sel=${sel%?} # Remove trailing comma
|
||||
$ pods=$(kubectl get pods --selector=$sel --output=jsonpath={.items..metadata.name})
|
||||
$ echo $pods
|
||||
$ sel=${$(kubectl get rc my-rc --output=json | jq -j '.spec.selector | to_entries | .[] | "\(.key)=\(.value),"')%?}
|
||||
$ echo $(kubectl get pods --selector=$sel --output=jsonpath={.items..metadata.name})
|
||||
|
||||
# Check which nodes are ready
|
||||
$ kubectl get nodes -o jsonpath='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'| tr ';' "\n" | grep "Ready=True"
|
||||
$ JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}' \
|
||||
&& kubectl get nodes -o jsonpath=$JSONPATH | grep "Ready=True"
|
||||
```
|
||||
|
||||
## Modifying and Deleting Resources
|
||||
## Updating Resources
|
||||
|
||||
```shell
|
||||
$ kubectl label pods <pod-name> new-label=awesome # Add a Label
|
||||
$ kubectl annotate pods <pod-name> icon-url=http://goo.gl/XXBTWq # Add an annotation
|
||||
```console
|
||||
$ kubectl rolling-update frontend-v1 -f frontend-v2.json # Rolling update pods of frontend-v1
|
||||
$ kubectl rolling-update frontend-v1 frontend-v2 --image=image:v2 # Change the name of the resource and update the image
|
||||
$ kubectl rolling-update frontend --image=image:v2 # Update the pods image of frontend
|
||||
$ kubectl rolling-update frontend-v1 frontend-v2 --rollback # Abort existing rollout in progress
|
||||
$ cat pod.json | kubectl replace -f - # Replace a pod based on the JSON passed into stdin
|
||||
|
||||
# TODO: examples of kubectl edit, patch, delete, replace, scale, and rolling-update commands.
|
||||
# Force replace, delete and then re-create the resource. Will cause a service outage.
|
||||
$ kubectl replace --force -f ./pod.json
|
||||
|
||||
# Create a service for a replicated nginx, which serves on port 80 and connects to the containers on port 8000
|
||||
$ kubectl expose rc nginx --port=80 --target-port=8000
|
||||
|
||||
# Update a single-container pod's image version (tag) to v4
|
||||
$ kubectl get pod mypod -o yaml | sed 's/\(image: myimage\):.*$/\1:v4/' | kubectl replace -f -
|
||||
|
||||
$ kubectl label pods my-pod new-label=awesome # Add a Label
|
||||
$ kubectl annotate pods my-pod icon-url=http://goo.gl/XXBTWq # Add an annotation
|
||||
$ kubectl autoscale deployment foo --min=2 --max=10 # Auto scale a deployment "foo"
|
||||
```
|
||||
|
||||
## Patching Resources
|
||||
Patch a resource(s) with a strategic merge patch.
|
||||
|
||||
```console
|
||||
$ kubectl patch node k8s-node-1 -p '{"spec":{"unschedulable":true}}' # Partially update a node
|
||||
|
||||
# Update a container's image; spec.containers[*].name is required because it's a merge key
|
||||
$ kubectl patch pod valid-pod -p '{"spec":{"containers":[{"name":"kubernetes-serve-hostname","image":"new image"}]}}'
|
||||
|
||||
# Update a container's image using a json patch with positional arrays
|
||||
$ kubectl patch pod valid-pod --type='json' -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"new image"}]'
|
||||
```
|
||||
|
||||
## Editing Resources
|
||||
The edit any API resource in an editor.
|
||||
|
||||
```console
|
||||
$ kubectl edit svc/docker-registry # Edit the service named docker-registry
|
||||
$ KUBE_EDITOR="nano" kubectl edit svc/docker-registry # Use an alternative editor
|
||||
```
|
||||
|
||||
## Scaling Resources
|
||||
|
||||
```console
|
||||
$ kubectl scale --replicas=3 rs/foo # Scale a replicaset named 'foo' to
|
||||
$ kubectl scale --replicas=3 -f foo.yaml # Scale a resource specified in "foo.yaml" to 3
|
||||
$ kubectl scale --current-replicas=2 --replicas=3 deployment/mysql # If the deployment named mysql's current size is 2, scale mysql to 3
|
||||
$ kubectl scale --replicas=5 rc/foo rc/bar rc/baz # Scale multiple replication controllers
|
||||
```
|
||||
|
||||
## Deleting Resources
|
||||
|
||||
```console
|
||||
$ kubectl delete -f ./pod.json # Delete a pod using the type and name specified in pod.json
|
||||
$ kubectl delete pod,service baz foo # Delete pods and services with same names "baz" and "foo"
|
||||
$ kubectl delete pods,services -l name=myLabel # Delete pods and services with label name=myLabel
|
||||
$ kubectl -n my-ns delete po,svc --all # Delete all pods and services in namespace my-ns
|
||||
```
|
||||
|
||||
## Interacting with running Pods
|
||||
|
||||
```shell
|
||||
$ kubectl logs <pod-name> # dump pod logs (stdout)
|
||||
$ kubectl logs -f <pod-name> # stream pod logs (stdout) until canceled (ctrl-c) or timeout
|
||||
|
||||
$ kubectl run -i --tty busybox --image=busybox -- sh # Run pod as interactive shell
|
||||
$ kubectl attach <podname> -i # Attach to Running Container
|
||||
$ kubectl port-forward <podname> <local-and-remote-port> # Forward port of Pod to your local machine
|
||||
$ kubectl port-forward <servicename> <port> # Forward port to service
|
||||
$ kubectl exec <pod-name> -- ls / # Run command in existing pod (1 container case)
|
||||
$ kubectl exec <pod-name> -c <container-name> -- ls / # Run command in existing pod (multi-container case)
|
||||
```console
|
||||
$ kubectl logs my-pod # dump pod logs (stdout)
|
||||
$ kubectl logs -f my-pod # stream pod logs (stdout)
|
||||
$ kubectl run -i --tty busybox --image=busybox -- sh # Run pod as interactive shell
|
||||
$ kubectl attach my-pod -i # Attach to Running Container
|
||||
$ kubectl port-forward my-pod 5000 6000 # Forward port 6000 of Pod to your to 5000 on your local machine
|
||||
$ kubectl port-forward my-svc 6000 # Forward port to service
|
||||
$ kubectl exec my-pod -- ls / # Run command in existing pod (1 container case)
|
||||
$ kubectl exec my-pod -c my-container -- ls / # Run command in existing pod (multi-container case)
|
||||
$ kubectl top pod POD_NAME --containers # Show metrics for a given pod and its containers
|
||||
```
|
||||
|
||||
## Interacting with Nodes and Cluster
|
||||
|
||||
```console
|
||||
$ kubectl cordon my-node # Mark my-node as unschedulable
|
||||
$ kubectl drain my-node # Drain my-node in preparation for maintenance
|
||||
$ kubectl uncordon my-node # Mark my-node as schedulable
|
||||
$ kubectl top node my-node # Show metrics for a given node
|
||||
$ kubectl cluster-info # Display addresses of the master and services
|
||||
$ kubectl cluster-info dump # Dump current cluster state to stdout
|
||||
$ kubectl cluster-info dump --output-directory=/path/to/cluster-state # Dump current cluster state to /path/to/cluster-state
|
||||
|
||||
# If a taint with that key and effect already exists, its value is replaced as specified.
|
||||
$ kubectl taint nodes foo dedicated=special-user:NoSchedule
|
||||
```
|
||||
|
||||
## Resource types
|
||||
|
||||
The following table includes a list of all the supported resource types and their abbreviated aliases.
|
||||
|
||||
Resource type | Abbreviated alias
|
||||
-------------------- | --------------------
|
||||
`clusters` |
|
||||
`componentstatuses` |`cs`
|
||||
`configmaps` |`cm`
|
||||
`daemonsets` |`ds`
|
||||
`deployments` |`deploy`
|
||||
`endpoints` |`ep`
|
||||
`event` |`ev`
|
||||
`horizontalpodautoscalers` |`hpa`
|
||||
`ingresses` |`ing`
|
||||
`jobs` |
|
||||
`limitranges` |`limits`
|
||||
`namespaces` |`ns`
|
||||
`networkpolicies` |
|
||||
`nodes` |`no`
|
||||
`petset` |
|
||||
`persistentvolumeclaims` |`pvc`
|
||||
`persistentvolumes` |`pv`
|
||||
`pods` |`po`
|
||||
`podsecuritypolicies` |`psp`
|
||||
`podtemplates` |
|
||||
`replicasets` |`rs`
|
||||
`replicationcontrollers` |`rc`
|
||||
`resourcequotas` |`quota`
|
||||
`scheduledjob` |
|
||||
`secrets` |
|
||||
`serviceaccount` |`sa`
|
||||
`services` |`svc`
|
||||
`storageclasses` |
|
||||
`thirdpartyresources` |
|
||||
|
||||
### Formatting output
|
||||
|
||||
To output details to your terminal window in a specific format, you can add either the `-o` or `-output` flags to a supported `kubectl` command.
|
||||
|
||||
Output format | Description
|
||||
--------------| -----------
|
||||
`-o=custom-columns=<spec>` | Print a table using a comma separated list of custom columns
|
||||
`-o=custom-columns-file=<filename>` | Print a table using the custom columns template in the `<filename>` file
|
||||
`-o=json` | Output a JSON formatted API object
|
||||
`-o=jsonpath=<template>` | Print the fields defined in a [jsonpath](/docs/user-guide/jsonpath) expression
|
||||
`-o=jsonpath-file=<filename>` | Print the fields defined by the [jsonpath](/docs/user-guide/jsonpath) expression in the `<filename>` file
|
||||
`-o=name` | Print only the resource name and nothing else
|
||||
`-o=wide` | Output in the plain-text format with any additional information, and for pods, the node name is included
|
||||
`-o=yaml` | Output a YAML formatted API object
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ assignees:
|
|||
|
||||
---
|
||||
|
||||
Use this overview of the `kubectl` command line interface to help you start running commands against Kubernetes clusters. This overview quickly covers `kubectl` syntax, describes the command operations, and provides common examples. For details about each command, including all the supported flags and subcommands, see the [kubectl](/docs/user-guide/kubectl) reference documentation.
|
||||
`kubectl` is a command line interface for running commands against Kubernetes clusters. This overview covers `kubectl` syntax, describes the command operations, and provides common examples. For details about each command, including all the supported flags and subcommands, see the [kubectl](/docs/user-guide/kubectl) reference documentation. For installation instructions see [prerequisites](/docs/user-guide/prereqs).
|
||||
|
||||
TODO: Auto-generate this file to ensure it's always in sync with any `kubectl` changes, see [#14177](http://pr.k8s.io/14177).
|
||||
|
||||
|
|
|
|||
|
|
@ -44,9 +44,11 @@ metadata:
|
|||
|
||||
To configure the annotation via `kubectl`:
|
||||
|
||||
```shell{% raw %}
|
||||
```shell
|
||||
{% raw %}
|
||||
kubectl annotate ns <namespace> "net.beta.kubernetes.io/network-policy={\"ingress\": {\"isolation\": \"DefaultDeny\"}}"
|
||||
{% endraw %}```
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
See the [NetworkPolicy getting started guide](/docs/getting-started-guides/network-policy/walkthrough) for an example.
|
||||
|
||||
|
|
@ -61,12 +63,16 @@ apiVersion: extensions/v1beta1
|
|||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: test-network-policy
|
||||
namespace: default
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
role: db
|
||||
ingress:
|
||||
- from:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
project: myproject
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
role: frontend
|
||||
|
|
@ -84,3 +90,9 @@ __spec__: `NetworkPolicy` [spec](https://github.com/kubernetes/kubernetes/tree/{
|
|||
__podSelector__: Each `NetworkPolicy` includes a `podSelector` which selects the grouping of pods to which the `ingress` rules in the policy apply.
|
||||
|
||||
__ingress__: Each `NetworkPolicy` includes a list of whitelist `ingress` rules. Each rule allows traffic which matches both the `from` and `ports` sections.
|
||||
|
||||
This example NetworkPolicy has the following characteristics:
|
||||
|
||||
1. applies to all pods in the default namespace with the label "role=db"
|
||||
2. allows tcp/6379 ingress traffic to the "role=db" pods from any pod in the current namespace with the label "role=frontend" (due to the podSelector list element)
|
||||
3. allows tcp/6379 ingress traffic to the "role=db" pods from any pod in the namespace "myproject" (due to the namespaceSelector list element)
|
||||
|
|
|
|||
|
|
@ -400,6 +400,20 @@ parameters:
|
|||
* `type`: [VolumeType](http://docs.openstack.org/admin-guide/dashboard-manage-volumes.html) created in Cinder. Default is empty.
|
||||
* `availability`: Availability Zone. Default is empty.
|
||||
|
||||
#### vSphere
|
||||
|
||||
```yaml
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: fast
|
||||
provisioner: kubernetes.io/vsphere-volume
|
||||
parameters:
|
||||
diskformat: zeroedthick
|
||||
```
|
||||
|
||||
* `diskformat`: `thin`, `zeroedthick` and `eagerzeroedthick`. Default: `"thin"`.
|
||||
|
||||
## Writing Portable Configuration
|
||||
|
||||
If you're writing configuration templates or examples that run on a wide range of clusters
|
||||
|
|
|
|||
|
|
@ -226,7 +226,8 @@ Here is a toy example:
|
|||
|
||||
The message is recorded along with the other state of the last (i.e., most recent) termination:
|
||||
|
||||
```shell{% raw %}
|
||||
```shell
|
||||
{% raw %}
|
||||
$ kubectl create -f ./pod-w-message.yaml
|
||||
pod "pod-w-message" created
|
||||
$ sleep 70
|
||||
|
|
@ -234,7 +235,8 @@ $ kubectl get pods/pod-w-message -o go-template="{{range .status.containerStatus
|
|||
Sleep expired
|
||||
$ kubectl get pods/pod-w-message -o go-template="{{range .status.containerStatuses}}{{.lastState.terminated.exitCode}}{{end}}"
|
||||
0
|
||||
{% endraw %}```
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
## What's next?
|
||||
|
||||
|
|
|
|||
|
|
@ -344,6 +344,9 @@ In this case, the file resulting in `/etc/foo/my-group/my-username` will have
|
|||
permission value of `0777`. Owing to JSON limitations, you must specify the mode
|
||||
in decimal notation.
|
||||
|
||||
Note that this permission value might be displayed in decimal notation if you
|
||||
read it later.
|
||||
|
||||
**Consuming Secret Values from Volumes**
|
||||
|
||||
Inside the container that mounts a secret volume, the secret keys appear as
|
||||
|
|
|
|||
|
|
@ -475,18 +475,27 @@ More details can be found [here](https://github.com/kubernetes/kubernetes/tree/{
|
|||
|
||||
### vsphereVolume
|
||||
|
||||
A `vsphereVolume` is used to mount a vSphere VMDK Volume into your Pod. The contents
|
||||
of a volume are preserved when it is unmounted.
|
||||
__Prerequisite: Kubernetes with vSphere Cloud Provider configured.
|
||||
For cloudprovider configuration please refer [vSphere getting started guide](http://kubernetes.io/docs/getting-started-guides/vsphere/).__
|
||||
|
||||
__Important: You must create a VMDK volume using `vmware-vdiskmanager -c` or
|
||||
the VSphere API before you can use it__
|
||||
A `vsphereVolume` is used to mount a vSphere VMDK Volume into your Pod. The contents
|
||||
of a volume are preserved when it is unmounted. It supports both VMFS and VSAN datastore.
|
||||
|
||||
__Important: You must create VMDK using one of the following method before using with POD.__
|
||||
|
||||
#### Creating a VMDK volume
|
||||
|
||||
Before you can use a vSphere volume with a pod, you need to create it.
|
||||
* Create using vmkfstools.
|
||||
|
||||
First ssh into ESX and then use following command to create vmdk,
|
||||
|
||||
```shell
|
||||
vmware-vdiskmanager -c -t 0 -s 40GB -a lsilogic myDisk.vmdk
|
||||
vmkfstools -c 2G /vmfs/volumes/DatastoreName/volumes/myDisk.vmdk
|
||||
```
|
||||
|
||||
* Create using vmware-vdiskmanager.
|
||||
```shell
|
||||
vmware-vdiskmanager -c -t 0 -s 40GB -a lsilogic myDisk.vmdk
|
||||
```
|
||||
|
||||
#### vSphere VMDK Example configuration
|
||||
|
|
@ -507,9 +516,11 @@ spec:
|
|||
- name: test-volume
|
||||
# This VMDK volume must already exist.
|
||||
vsphereVolume:
|
||||
volumePath: myDisk
|
||||
volumePath: "[DatastoreName] volumes/myDisk"
|
||||
fsType: ext4
|
||||
```
|
||||
More examples can be found [here](https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/vsphere).
|
||||
|
||||
|
||||
### Quobyte
|
||||
|
||||
|
|
|
|||
|
|
@ -59,12 +59,14 @@ On most providers, the pod IPs are not externally accessible. The easiest way to
|
|||
|
||||
Provided the pod IP is accessible, you should be able to access its http endpoint with wget on port 80:
|
||||
|
||||
```shell{% raw %}
|
||||
```shell
|
||||
{% raw %}
|
||||
$ kubectl run busybox --image=busybox --restart=Never --tty -i --generator=run-pod/v1 --env "POD_IP=$(kubectl get pod nginx -o go-template='{{.status.podIP}}')"
|
||||
u@busybox$ wget -qO- http://$POD_IP # Run in the busybox container
|
||||
u@busybox$ exit # Exit the busybox container
|
||||
$ kubectl delete pod busybox # Clean up the pod we created with "kubectl run"
|
||||
{% endraw %}```
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
Delete the pod by name:
|
||||
|
||||
|
|
|
|||
|
|
@ -136,7 +136,8 @@ On most providers, the service IPs are not externally accessible. The easiest wa
|
|||
|
||||
Provided the service IP is accessible, you should be able to access its http endpoint with wget on the exposed port:
|
||||
|
||||
```shell{% raw %}
|
||||
```shell
|
||||
{% raw %}
|
||||
$ export SERVICE_IP=$(kubectl get service nginx-service -o go-template='{{.spec.clusterIP}}')
|
||||
$ export SERVICE_PORT=$(kubectl get service nginx-service -o go-template='{{(index .spec.ports 0).port}}')
|
||||
$ echo "$SERVICE_IP:$SERVICE_PORT"
|
||||
|
|
@ -144,7 +145,8 @@ $ kubectl run busybox --generator=run-pod/v1 --image=busybox --restart=Never --
|
|||
u@busybox$ wget -qO- http://$SERVICE_IP:$SERVICE_PORT # Run in the busybox container
|
||||
u@busybox$ exit # Exit the busybox container
|
||||
$ kubectl delete pod busybox # Clean up the pod we created with "kubectl run"
|
||||
{% endraw %}```
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
To delete the service by name:
|
||||
|
||||
|
|
|
|||
|
After Width: | Height: | Size: 3.8 KiB |
|
After Width: | Height: | Size: 8.4 KiB |
|
After Width: | Height: | Size: 5.8 KiB |
|
After Width: | Height: | Size: 8.6 KiB |
|
After Width: | Height: | Size: 12 KiB |
|
After Width: | Height: | Size: 7.4 KiB |
|
After Width: | Height: | Size: 9.8 KiB |
|
After Width: | Height: | Size: 9.0 KiB |
|
After Width: | Height: | Size: 17 KiB |
|
After Width: | Height: | Size: 13 KiB |
|
After Width: | Height: | Size: 3.5 KiB |
|
After Width: | Height: | Size: 13 KiB |
|
After Width: | Height: | Size: 3.8 KiB |
|
After Width: | Height: | Size: 10 KiB |
|
After Width: | Height: | Size: 7.3 KiB |
|
After Width: | Height: | Size: 3.7 KiB |
|
After Width: | Height: | Size: 1.9 KiB |
|
After Width: | Height: | Size: 15 KiB |
|
After Width: | Height: | Size: 8.1 KiB |
|
After Width: | Height: | Size: 11 KiB |
|
After Width: | Height: | Size: 12 KiB |
|
After Width: | Height: | Size: 4.4 KiB |
|
After Width: | Height: | Size: 8.8 KiB |
|
After Width: | Height: | Size: 9.2 KiB |
|
After Width: | Height: | Size: 15 KiB |
|
After Width: | Height: | Size: 6.8 KiB |
|
After Width: | Height: | Size: 9.4 KiB |
|
|
@ -14,47 +14,24 @@ title: Partners
|
|||
|
||||
<section id="users">
|
||||
<main>
|
||||
<h5>We are working with a broad group of partners who contribute to the Kubernetes core codebase, making it stronger and richer, creating a vibrant Kubernetes ecosystem supporting a spectrum of complementing platforms, from open source solutions to market-leading technologies.</h5>
|
||||
<h3>ISV Partners</h3>
|
||||
<div id="usersGrid">
|
||||
<a target="_blank" href="https://coreos.com/kubernetes"><img src="/images/community_logos/core_os_logo.png"></a>
|
||||
<a target="_blank" href="https://deis.com"><img src="/images/community_logos/deis_logo.png"></a>
|
||||
<a target="_blank" href="https://sysdig.com/blog/monitoring-kubernetes-with-sysdig-cloud"><img src="/images/community_logos/sysdig_cloud_logo.png"></a>
|
||||
<a target="_blank" href="https://puppet.com/blog/managing-kubernetes-configuration-puppet"><img src="/images/community_logos/puppet_logo.png"></a>
|
||||
<a target="_blank" href="https://www.microloadbalancer.com/docs/deploy-netscaler-cpx-kubernetes-environment"><img src="/images/community_logos/citrix_logo.png"></a>
|
||||
<a target="_blank" href="http://wercker.com/workflows/partners/kubernetes/"><img src="/images/community_logos/wercker_logo.png"></a>
|
||||
<a target="_blank" href="http://rancher.com/kubernetes/"><img src="/images/community_logos/rancher_logo.png"></a>
|
||||
<a target="_blank" href="https://www.openshift.com/"><img src="/images/community_logos/red_hat_logo.png"></a>
|
||||
<a target="_blank" href="https://tectonic.com/press/intel-coreos-collaborate-on-openstack-with-kubernetes.html"><img src="/images/community_logos/intel_logo.png"></a>
|
||||
<a target="_blank" href="https://elasticbox.com/kubernetes/"><img src="/images/community_logos/elastickube_logo.png"></a>
|
||||
<a target="_blank" href="https://platform9.com/blog/containers-as-a-service-kubernetes-docker"><img src="/images/community_logos/platform9_logo.png"></a>
|
||||
<a target="_blank" href="http://www.appformix.com/solutions/appformix-for-kubernetes/"><img src="/images/community_logos/appformix_logo.png"></a>
|
||||
<a target="_blank" href="http://kubernetes.io/docs/getting-started-guides/dcos"><img src="/images/community_logos/mesosphere_logo.png"></a>
|
||||
<a target="_blank" href="http://docs.datadoghq.com/integrations/kubernetes/"><img src="/images/community_logos/datadog_logo.png"></a>
|
||||
<a target="_blank" href="https://apprenda.com/kubernetes-support/"><img src="/images/community_logos/apprenda_logo.png"></a>
|
||||
<a target="_blank" href="http://www.ibm.com/cloud-computing/"><img src="/images/community_logos/ibm_logo.png"></a>
|
||||
<a target="_blank" href="http://info.crunchydata.com/blog/advanced-crunchy-containers-for-postgresql"><img src="/images/community_logos/crunchy_data_logo.png"></a>
|
||||
<a target="_blank" href="https://content.mirantis.com/Containerizing-OpenStack-on-Kubernetes-Video-Landing-Page.html"><img src="/images/community_logos/mirantis_logo.png"></a>
|
||||
<a target="_blank" href="http://blog.aquasec.com/security-best-practices-for-kubernetes-deployment"><img src="/images/community_logos/aqua_logo.png"></a>
|
||||
<a target="_blank" href="https://jujucharms.com/canonical-kubernetes/"><img src="/images/community_logos/ubuntu_cannonical_logo.png"></a>
|
||||
</div>
|
||||
<h5>We are working with a broad group of partners who contribute to the Kubernetes core codebase, making it stronger and richer. There partners create a vibrant Kubernetes ecosystem supporting a spectrum of complementing platforms, from open source solutions to market-leading technologies.</h5>
|
||||
<h3>Technology Partners</h3>
|
||||
<div id="isvContainer"></div>
|
||||
<h3>Services Partners</h3>
|
||||
<div id="servContainer"></div>
|
||||
</main>
|
||||
</section>
|
||||
<style>
|
||||
h5 {
|
||||
font-size: 18px;
|
||||
line-height: 1.5em;
|
||||
margin-bottom: 2em;
|
||||
}
|
||||
|
||||
#usersGrid a {
|
||||
display: inline-block;
|
||||
background-color: #f9f9f9;
|
||||
}
|
||||
</style>
|
||||
|
||||
{% include footer.html %}
|
||||
{% include case-study-styles.html %}
|
||||
|
||||
<style>
|
||||
{% include partner-style.css %}
|
||||
</style>
|
||||
|
||||
<script>
|
||||
{% include partner-script.js %}
|
||||
</script>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
|
|
|||