diff --git a/README.md b/README.md index 9e4aa779..2eec040d 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ Name | Description | Notable Features Used | Complexity Level ------------- | ------------- | ------------ | ------------ | ------------ [Guestbook](guestbook/) | PHP app with Redis | Replication Controller, Service | Beginner [WordPress](mysql-wordpress-pd/) | WordPress with MySQL | Deployment, Persistent Volume with Claim | Beginner -[Cassandra](cassandra/) | Cloud Native Cassandra | Daemon Set | Intermediate +[Cassandra](storage/cassandra/) | Cloud Native Cassandra | Daemon Set | Intermediate Note: Please add examples to the list above that are maintained. diff --git a/examples_test.go b/examples_test.go index 21feace4..eb05388d 100644 --- a/examples_test.go +++ b/examples_test.go @@ -220,10 +220,10 @@ func TestExampleObjectSchemas(t *testing.T) { "pod": &api.Pod{}, "service": &api.Service{}, }, - "../examples/iscsi": { + "../examples/volumes/iscsi": { "iscsi": &api.Pod{}, }, - "../examples/glusterfs": { + "../examples/volumes/glusterfs": { "glusterfs-pod": &api.Pod{}, "glusterfs-endpoints": &api.Endpoints{}, "glusterfs-service": &api.Service{}, @@ -249,14 +249,14 @@ func TestExampleObjectSchemas(t *testing.T) { "scheduler-policy-config": &schedulerapi.Policy{}, "scheduler-policy-config-with-extender": &schedulerapi.Policy{}, }, - "../examples/rbd/secret": { + "../examples/volumes/rbd/secret": { "ceph-secret": &api.Secret{}, }, - "../examples/rbd": { + "../examples/volumes/rbd": { "rbd": &api.Pod{}, "rbd-with-secret": &api.Pod{}, }, - "../examples/cassandra": { + "../examples/storage/cassandra": { "cassandra-daemonset": &extensions.DaemonSet{}, "cassandra-controller": &api.ReplicationController{}, "cassandra-service": &api.Service{}, @@ -289,7 +289,7 @@ func TestExampleObjectSchemas(t *testing.T) { "../examples/explorer": { "pod": &api.Pod{}, }, - "../examples/hazelcast": { + "../examples/storage/hazelcast": { "hazelcast-controller": &api.ReplicationController{}, "hazelcast-service": &api.Service{}, }, @@ -319,7 +319,7 @@ func TestExampleObjectSchemas(t *testing.T) { "mysql-deployment": &api.Service{}, "wordpress-deployment": &api.Service{}, }, - "../examples/nfs": { + "../examples/volumes/nfs": { "nfs-busybox-rc": &api.ReplicationController{}, "nfs-server-rc": &api.ReplicationController{}, "nfs-server-service": &api.Service{}, @@ -345,7 +345,7 @@ func TestExampleObjectSchemas(t *testing.T) { "phabricator-controller": &api.ReplicationController{}, "phabricator-service": &api.Service{}, }, - "../examples/redis": { + "../examples/storage/redis": { "redis-controller": &api.ReplicationController{}, "redis-master": &api.Pod{}, "redis-proxy": &api.Pod{}, @@ -357,7 +357,7 @@ func TestExampleObjectSchemas(t *testing.T) { "limits": &api.LimitRange{}, "quota": &api.ResourceQuota{}, }, - "../examples/rethinkdb": { + "../examples/storage/rethinkdb": { "admin-pod": &api.Pod{}, "admin-service": &api.Service{}, "driver-service": &api.Service{}, @@ -390,11 +390,11 @@ func TestExampleObjectSchemas(t *testing.T) { "zookeeper-service": &api.Service{}, "zookeeper": &api.Pod{}, }, - "../examples/cephfs/": { + "../examples/volumes/cephfs/": { "cephfs": &api.Pod{}, "cephfs-with-secret": &api.Pod{}, }, - "../examples/fibre_channel": { + "../examples/volumes/fibre_channel": { "fc": &api.Pod{}, }, "../examples/javaweb-tomcat-sidecar": { @@ -409,7 +409,7 @@ func TestExampleObjectSchemas(t *testing.T) { "redis-service": &api.Service{}, "job": &batch.Job{}, }, - "../examples/azure_file": { + "../examples/volumes/azure_file": { "azure": &api.Pod{}, }, } @@ -486,7 +486,7 @@ func TestReadme(t *testing.T) { }{ {"../README.md", []runtime.Object{&api.Pod{}}}, {"../docs/user-guide/walkthrough/README.md", []runtime.Object{&api.Pod{}}}, - {"../examples/iscsi/README.md", []runtime.Object{&api.Pod{}}}, + {"../examples/volumes/iscsi/README.md", []runtime.Object{&api.Pod{}}}, } for _, path := range paths { diff --git a/job/work-queue-2/README.md b/job/work-queue-2/README.md index d2d72334..d8e7fe88 100644 --- a/job/work-queue-2/README.md +++ b/job/work-queue-2/README.md @@ -58,7 +58,7 @@ Here is an overview of the steps in this example: ## Starting Redis For this example, for simplicitly, we will start a single instance of Redis. -See the [Redis Example](../../../examples/redis/README.md) for an example +See the [Redis Example](../../../examples/storage/redis/README.md) for an example of deploying Redis scaleably and redundantly. Start a temporary Pod running Redis and a service so we can find it. diff --git a/spark/spark-gluster/README.md b/spark/spark-gluster/README.md index 83f0c4cf..7b695e98 100644 --- a/spark/spark-gluster/README.md +++ b/spark/spark-gluster/README.md @@ -34,7 +34,7 @@ Documentation for other releases can be found at # Spark on GlusterFS example -This guide is an extension of the standard [Spark on Kubernetes Guide](../../../examples/spark/) and describes how to run Spark on GlusterFS using the [Kubernetes Volume Plugin for GlusterFS](../../../examples/glusterfs/) +This guide is an extension of the standard [Spark on Kubernetes Guide](../../../examples/spark/) and describes how to run Spark on GlusterFS using the [Kubernetes Volume Plugin for GlusterFS](../../../examples/volumes/glusterfs/) The setup is the same in that you will setup a Spark Master Service in the same way you do with the standard Spark guide but you will deploy a modified Spark Master and a Modified Spark Worker ReplicationController, as they will be modified to use the GlusterFS volume plugin to mount a GlusterFS volume into the Spark Master and Spark Workers containers. Note that this example can be used as a guide for implementing any of the Kubernetes Volume Plugins with the Spark Example. diff --git a/cassandra/README.md b/storage/cassandra/README.md similarity index 92% rename from cassandra/README.md rename to storage/cassandra/README.md index 2beac0d6..20b9f243 100644 --- a/cassandra/README.md +++ b/storage/cassandra/README.md @@ -19,11 +19,6 @@ If you are using a released version of Kubernetes, you should refer to the docs that go with that version. - - -The latest release of this document can be found -[here](http://releases.k8s.io/release-1.3/examples/cassandra/README.md). - Documentation for other releases can be found at [releases.k8s.io](http://releases.k8s.io). @@ -58,17 +53,17 @@ new Cassandra nodes as they join the cluster. This example also uses some of the core components of Kubernetes: -- [_Pods_](../../docs/user-guide/pods.md) -- [ _Services_](../../docs/user-guide/services.md) -- [_Replication Controllers_](../../docs/user-guide/replication-controller.md) -- [_Daemon Sets_](../../docs/admin/daemons.md) +- [_Pods_](../../../docs/user-guide/pods.md) +- [ _Services_](../../../docs/user-guide/services.md) +- [_Replication Controllers_](../../../docs/user-guide/replication-controller.md) +- [_Daemon Sets_](../../../docs/admin/daemons.md) ## Prerequisites This example assumes that you have a Kubernetes version >=1.2 cluster installed and running, -and that you have installed the [`kubectl`](../../docs/user-guide/kubectl/kubectl.md) +and that you have installed the [`kubectl`](../../../docs/user-guide/kubectl/kubectl.md) command line tool somewhere in your path. Please see the -[getting started guides](../../docs/getting-started-guides/) +[getting started guides](../../../docs/getting-started-guides/) for installation instructions for your platform. This example also has a few code and configuration files needed. To avoid @@ -109,7 +104,7 @@ how the container docker image was built and what it contains. You may also note that we are setting some Cassandra parameters (`MAX_HEAP_SIZE` and `HEAP_NEWSIZE`), and adding information about the -[namespace](../../docs/user-guide/namespaces.md). +[namespace](../../../docs/user-guide/namespaces.md). We also tell Kubernetes that the container exposes both the `CQL` and `Thrift` API ports. Finally, we tell the cluster manager that we need 0.1 cpu (0.1 core). @@ -121,10 +116,10 @@ here are the steps: ```sh # create a service to track all cassandra nodes -kubectl create -f examples/cassandra/cassandra-service.yaml +kubectl create -f examples/storage/cassandra/cassandra-service.yaml # create a replication controller to replicate cassandra nodes -kubectl create -f examples/cassandra/cassandra-controller.yaml +kubectl create -f examples/storage/cassandra/cassandra-controller.yaml # validate the Cassandra cluster. Substitute the name of one of your pods. kubectl exec -ti cassandra-xxxxx -- nodetool status @@ -136,7 +131,7 @@ kubectl scale rc cassandra --replicas=4 kubectl delete rc cassandra # then, create a daemonset to place a cassandra node on each kubernetes node -kubectl create -f examples/cassandra/cassandra-daemonset.yaml --validate=false +kubectl create -f examples/storage/cassandra/cassandra-daemonset.yaml --validate=false # resource cleanup kubectl delete service -l app=cassandra @@ -145,8 +140,8 @@ kubectl delete daemonset cassandra ## Step 1: Create a Cassandra Service -A Kubernetes _[Service](../../docs/user-guide/services.md)_ describes a set of -[_Pods_](../../docs/user-guide/pods.md) that perform the same task. In +A Kubernetes _[Service](../../../docs/user-guide/services.md)_ describes a set of +[_Pods_](../../../docs/user-guide/pods.md) that perform the same task. In Kubernetes, the atomic unit of an application is a Pod: one or more containers that _must_ be scheduled onto the same host. @@ -184,20 +179,20 @@ selected for membership in this service. We'll see that in action shortly. Create the Cassandra service as follows: ```console -$ kubectl create -f examples/cassandra/cassandra-service.yaml +$ kubectl create -f examples/storage/cassandra/cassandra-service.yaml ``` ## Step 2: Use a Replication Controller to create Cassandra node pods As we noted above, in Kubernetes, the atomic unit of an application is a -[_Pod_](../../docs/user-guide/pods.md). +[_Pod_](../../../docs/user-guide/pods.md). A Pod is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes. A Kubernetes -_[Replication Controller](../../docs/user-guide/replication-controller.md)_ +_[Replication Controller](../../../docs/user-guide/replication-controller.md)_ is responsible for replicating sets of identical pods. Like a Service, it has a selector query which identifies the members of its set. Unlike a Service, it also has a desired number of replicas, and it will create @@ -294,7 +289,7 @@ Create the Replication Controller: ```console -$ kubectl create -f examples/cassandra/cassandra-controller.yaml +$ kubectl create -f examples/storage/cassandra/cassandra-controller.yaml ``` @@ -428,7 +423,7 @@ $ kubectl delete rc cassandra ## Step 5: Use a DaemonSet instead of a Replication Controller -In Kubernetes, a [_Daemon Set_](../../docs/admin/daemons.md) can distribute pods +In Kubernetes, a [_Daemon Set_](../../../docs/admin/daemons.md) can distribute pods onto Kubernetes nodes, one-to-one. Like a _ReplicationController_, it has a selector query which identifies the members of its set. Unlike a _ReplicationController_, it has a node selector to limit which nodes are @@ -520,7 +515,7 @@ Create this daemonset: ```console -$ kubectl create -f examples/cassandra/cassandra-daemonset.yaml +$ kubectl create -f examples/storage/cassandra/cassandra-daemonset.yaml ``` @@ -528,7 +523,7 @@ You may need to disable config file validation, like so: ```console -$ kubectl create -f examples/cassandra/cassandra-daemonset.yaml --validate=false +$ kubectl create -f examples/storage/cassandra/cassandra-daemonset.yaml --validate=false ``` @@ -596,5 +591,5 @@ $ kubectl delete daemonset cassandra -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/cassandra/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/storage/cassandra/README.md?pixel)]() diff --git a/cassandra/cassandra-controller.yaml b/storage/cassandra/cassandra-controller.yaml similarity index 100% rename from cassandra/cassandra-controller.yaml rename to storage/cassandra/cassandra-controller.yaml diff --git a/cassandra/cassandra-daemonset.yaml b/storage/cassandra/cassandra-daemonset.yaml similarity index 100% rename from cassandra/cassandra-daemonset.yaml rename to storage/cassandra/cassandra-daemonset.yaml diff --git a/cassandra/cassandra-service.yaml b/storage/cassandra/cassandra-service.yaml similarity index 100% rename from cassandra/cassandra-service.yaml rename to storage/cassandra/cassandra-service.yaml diff --git a/cassandra/image/Dockerfile b/storage/cassandra/image/Dockerfile similarity index 100% rename from cassandra/image/Dockerfile rename to storage/cassandra/image/Dockerfile diff --git a/cassandra/image/Makefile b/storage/cassandra/image/Makefile similarity index 100% rename from cassandra/image/Makefile rename to storage/cassandra/image/Makefile diff --git a/cassandra/image/cassandra.list b/storage/cassandra/image/cassandra.list similarity index 100% rename from cassandra/image/cassandra.list rename to storage/cassandra/image/cassandra.list diff --git a/cassandra/image/cassandra.yaml b/storage/cassandra/image/cassandra.yaml similarity index 100% rename from cassandra/image/cassandra.yaml rename to storage/cassandra/image/cassandra.yaml diff --git a/cassandra/image/kubernetes-cassandra.jar b/storage/cassandra/image/kubernetes-cassandra.jar similarity index 100% rename from cassandra/image/kubernetes-cassandra.jar rename to storage/cassandra/image/kubernetes-cassandra.jar diff --git a/cassandra/image/logback.xml b/storage/cassandra/image/logback.xml similarity index 100% rename from cassandra/image/logback.xml rename to storage/cassandra/image/logback.xml diff --git a/cassandra/image/run.sh b/storage/cassandra/image/run.sh similarity index 100% rename from cassandra/image/run.sh rename to storage/cassandra/image/run.sh diff --git a/cassandra/java/.gitignore b/storage/cassandra/java/.gitignore similarity index 100% rename from cassandra/java/.gitignore rename to storage/cassandra/java/.gitignore diff --git a/cassandra/java/README.md b/storage/cassandra/java/README.md similarity index 92% rename from cassandra/java/README.md rename to storage/cassandra/java/README.md index f35571f7..a5ea2b02 100644 --- a/cassandra/java/README.md +++ b/storage/cassandra/java/README.md @@ -18,11 +18,6 @@ If you are using a released version of Kubernetes, you should refer to the docs that go with that version. - - -The latest release of this document can be found -[here](http://releases.k8s.io/release-1.3/examples/cassandra/java/README.md). - Documentation for other releases can be found at [releases.k8s.io](http://releases.k8s.io). @@ -64,5 +59,5 @@ This in affect makes every node a seed provider, which is not a recommended best -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/cassandra/java/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/storage/cassandra/java/README.md?pixel)]() diff --git a/cassandra/java/pom.xml b/storage/cassandra/java/pom.xml similarity index 100% rename from cassandra/java/pom.xml rename to storage/cassandra/java/pom.xml diff --git a/cassandra/java/src/main/java/io/k8s/cassandra/KubernetesSeedProvider.java b/storage/cassandra/java/src/main/java/io/k8s/cassandra/KubernetesSeedProvider.java similarity index 100% rename from cassandra/java/src/main/java/io/k8s/cassandra/KubernetesSeedProvider.java rename to storage/cassandra/java/src/main/java/io/k8s/cassandra/KubernetesSeedProvider.java diff --git a/cassandra/java/src/test/java/io/k8s/cassandra/KubernetesSeedProviderTest.java b/storage/cassandra/java/src/test/java/io/k8s/cassandra/KubernetesSeedProviderTest.java similarity index 100% rename from cassandra/java/src/test/java/io/k8s/cassandra/KubernetesSeedProviderTest.java rename to storage/cassandra/java/src/test/java/io/k8s/cassandra/KubernetesSeedProviderTest.java diff --git a/cassandra/java/src/test/resources/cassandra.yaml b/storage/cassandra/java/src/test/resources/cassandra.yaml similarity index 100% rename from cassandra/java/src/test/resources/cassandra.yaml rename to storage/cassandra/java/src/test/resources/cassandra.yaml diff --git a/cassandra/java/src/test/resources/logback-test.xml b/storage/cassandra/java/src/test/resources/logback-test.xml similarity index 100% rename from cassandra/java/src/test/resources/logback-test.xml rename to storage/cassandra/java/src/test/resources/logback-test.xml diff --git a/hazelcast/README.md b/storage/hazelcast/README.md similarity index 87% rename from hazelcast/README.md rename to storage/hazelcast/README.md index 3c5bcd5c..a12bd697 100644 --- a/hazelcast/README.md +++ b/storage/hazelcast/README.md @@ -18,11 +18,6 @@ If you are using a released version of Kubernetes, you should refer to the docs that go with that version. - - -The latest release of this document can be found -[here](http://releases.k8s.io/release-1.3/examples/hazelcast/README.md). - Documentation for other releases can be found at [releases.k8s.io](http://releases.k8s.io). @@ -42,7 +37,7 @@ This document also attempts to describe the core components of Kubernetes: _Pods ### Prerequisites -This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the `kubectl` command line tool somewhere in your path. Please see the [getting started](../../docs/getting-started-guides/) for installation instructions for your platform. +This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the `kubectl` command line tool somewhere in your path. Please see the [getting started](../../../docs/getting-started-guides/) for installation instructions for your platform. ### A note for the impatient @@ -57,14 +52,14 @@ Source is freely available at: ### Simple Single Pod Hazelcast Node -In Kubernetes, the atomic unit of an application is a [_Pod_](../../docs/user-guide/pods.md). A Pod is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes. +In Kubernetes, the atomic unit of an application is a [_Pod_](../../../docs/user-guide/pods.md). A Pod is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes. In this case, we shall not run a single Hazelcast pod, because the discovery mechanism now relies on a service definition. ### Adding a Hazelcast Service -In Kubernetes a _[Service](../../docs/user-guide/services.md)_ describes a set of Pods that perform the same task. For example, the set of nodes in a Hazelcast cluster. An important use for a Service is to create a load balancer which distributes traffic across members of the set. But a _Service_ can also be used as a standing query which makes a dynamically changing set of Pods available via the Kubernetes API. This is actually how our discovery mechanism works, by relying on the service to discover other Hazelcast pods. +In Kubernetes a _[Service](../../../docs/user-guide/services.md)_ describes a set of Pods that perform the same task. For example, the set of nodes in a Hazelcast cluster. An important use for a Service is to create a load balancer which distributes traffic across members of the set. But a _Service_ can also be used as a standing query which makes a dynamically changing set of Pods available via the Kubernetes API. This is actually how our discovery mechanism works, by relying on the service to discover other Hazelcast pods. Here is the service description: @@ -92,14 +87,14 @@ The important thing to note here is the `selector`. It is a query over labels, t Create this service as follows: ```sh -$ kubectl create -f examples/hazelcast/hazelcast-service.yaml +$ kubectl create -f examples/storage/hazelcast/hazelcast-service.yaml ``` ### Adding replicated nodes The real power of Kubernetes and Hazelcast lies in easily building a replicated, resizable Hazelcast cluster. -In Kubernetes a _[Replication Controller](../../docs/user-guide/replication-controller.md)_ is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state. +In Kubernetes a _[Replication Controller](../../../docs/user-guide/replication-controller.md)_ is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state. Replication Controllers will "adopt" existing pods that match their selector query, so let's create a Replication Controller with a single replica to adopt our existing Hazelcast Pod. @@ -153,7 +148,7 @@ Last but not least, we set `DNS_DOMAIN` environment variable according to your K Create this controller: ```sh -$ kubectl create -f examples/hazelcast/hazelcast-controller.yaml +$ kubectl create -f examples/storage/hazelcast/hazelcast-controller.yaml ``` After the controller provisions successfully the pod, you can query the service endpoints: @@ -264,10 +259,10 @@ For those of you who are impatient, here is the summary of the commands we ran i ```sh # create a service to track all hazelcast nodes -kubectl create -f examples/hazelcast/hazelcast-service.yaml +kubectl create -f examples/storage/hazelcast/hazelcast-service.yaml # create a replication controller to replicate hazelcast nodes -kubectl create -f examples/hazelcast/hazelcast-controller.yaml +kubectl create -f examples/storage/hazelcast/hazelcast-controller.yaml # scale up to 2 nodes kubectl scale rc hazelcast --replicas=2 @@ -282,5 +277,5 @@ See [here](https://github.com/pires/hazelcast-kubernetes-bootstrapper/blob/maste -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/hazelcast/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/storage/hazelcast/README.md?pixel)]() diff --git a/hazelcast/hazelcast-controller.yaml b/storage/hazelcast/hazelcast-controller.yaml similarity index 100% rename from hazelcast/hazelcast-controller.yaml rename to storage/hazelcast/hazelcast-controller.yaml diff --git a/hazelcast/hazelcast-service.yaml b/storage/hazelcast/hazelcast-service.yaml similarity index 100% rename from hazelcast/hazelcast-service.yaml rename to storage/hazelcast/hazelcast-service.yaml diff --git a/hazelcast/image/Dockerfile b/storage/hazelcast/image/Dockerfile similarity index 100% rename from hazelcast/image/Dockerfile rename to storage/hazelcast/image/Dockerfile diff --git a/mysql-galera/README.md b/storage/mysql-galera/README.md similarity index 91% rename from mysql-galera/README.md rename to storage/mysql-galera/README.md index 6ad7b92b..eadbea50 100644 --- a/mysql-galera/README.md +++ b/storage/mysql-galera/README.md @@ -18,11 +18,6 @@ If you are using a released version of Kubernetes, you should refer to the docs that go with that version. - - -The latest release of this document can be found -[here](http://releases.k8s.io/release-1.3/examples/mysql-galera/README.md). - Documentation for other releases can be found at [releases.k8s.io](http://releases.k8s.io). @@ -38,7 +33,7 @@ This document explains a simple demonstration example of running MySQL synchrono ### Prerequisites -This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the ```kubectl``` command line tool somewhere in your path. Please see the [getting started](../../docs/getting-started-guides/) for installation instructions for your platform. +This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the ```kubectl``` command line tool somewhere in your path. Please see the [getting started](../../../docs/getting-started-guides/) for installation instructions for your platform. Also, this example requires the image found in the ```image``` directory. For your convenience, it is built and available on Docker's public image repository as ```capttofu/percona_xtradb_cluster_5_6```. It can also be built which would merely require that the image in the pod or replication controller files is updated. @@ -56,11 +51,11 @@ Note: Kubernetes best-practices is to pre-create the services for each controlle First, create the overall cluster service that will be used to connect to the cluster: -```kubectl create -f examples/mysql-galera/pxc-cluster-service.yaml``` +```kubectl create -f examples/storage/mysql-galera/pxc-cluster-service.yaml``` Create the service and replication controller for the first node: -```kubectl create -f examples/mysql-galera/pxc-node1.yaml``` +```kubectl create -f examples/storage/mysql-galera/pxc-node1.yaml``` ### Create services and controllers for the remaining nodes @@ -71,21 +66,21 @@ When complete, you should be able connect with a mysql client to the IP address ### An example of creating a cluster -Shown below are examples of Using ```kubectl``` from within the ```./examples/mysql-galera``` directory, the status of the lauched replication controllers and services can be confirmed +Shown below are examples of Using ```kubectl``` from within the ```./examples/storage/mysql-galera``` directory, the status of the lauched replication controllers and services can be confirmed ``` -$ kubectl create -f examples/mysql-galera/pxc-cluster-service.yaml +$ kubectl create -f examples/storage/mysql-galera/pxc-cluster-service.yaml services/pxc-cluster -$ kubectl create -f examples/mysql-galera/pxc-node1.yaml +$ kubectl create -f examples/storage/mysql-galera/pxc-node1.yaml services/pxc-node1 replicationcontrollers/pxc-node1 -$ kubectl create -f examples/mysql-galera/pxc-node2.yaml +$ kubectl create -f examples/storage/mysql-galera/pxc-node2.yaml services/pxc-node2 replicationcontrollers/pxc-node2 -$ kubectl create -f examples/mysql-galera/pxc-node3.yaml +$ kubectl create -f examples/storage/mysql-galera/pxc-node3.yaml services/pxc-node3 replicationcontrollers/pxc-node3 @@ -167,5 +162,5 @@ This setup certainly can become more fluid and dynamic. One idea is to perhaps u -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/mysql-galera/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/storage/mysql-galera/README.md?pixel)]() diff --git a/mysql-galera/image/Dockerfile b/storage/mysql-galera/image/Dockerfile similarity index 100% rename from mysql-galera/image/Dockerfile rename to storage/mysql-galera/image/Dockerfile diff --git a/mysql-galera/image/cluster.cnf b/storage/mysql-galera/image/cluster.cnf similarity index 100% rename from mysql-galera/image/cluster.cnf rename to storage/mysql-galera/image/cluster.cnf diff --git a/mysql-galera/image/docker-entrypoint.sh b/storage/mysql-galera/image/docker-entrypoint.sh similarity index 100% rename from mysql-galera/image/docker-entrypoint.sh rename to storage/mysql-galera/image/docker-entrypoint.sh diff --git a/mysql-galera/image/my.cnf b/storage/mysql-galera/image/my.cnf similarity index 100% rename from mysql-galera/image/my.cnf rename to storage/mysql-galera/image/my.cnf diff --git a/mysql-galera/pxc-cluster-service.yaml b/storage/mysql-galera/pxc-cluster-service.yaml similarity index 100% rename from mysql-galera/pxc-cluster-service.yaml rename to storage/mysql-galera/pxc-cluster-service.yaml diff --git a/mysql-galera/pxc-node1.yaml b/storage/mysql-galera/pxc-node1.yaml similarity index 100% rename from mysql-galera/pxc-node1.yaml rename to storage/mysql-galera/pxc-node1.yaml diff --git a/mysql-galera/pxc-node2.yaml b/storage/mysql-galera/pxc-node2.yaml similarity index 100% rename from mysql-galera/pxc-node2.yaml rename to storage/mysql-galera/pxc-node2.yaml diff --git a/mysql-galera/pxc-node3.yaml b/storage/mysql-galera/pxc-node3.yaml similarity index 100% rename from mysql-galera/pxc-node3.yaml rename to storage/mysql-galera/pxc-node3.yaml diff --git a/redis/README.md b/storage/redis/README.md similarity index 77% rename from redis/README.md rename to storage/redis/README.md index 7996e8a1..c030887e 100644 --- a/redis/README.md +++ b/storage/redis/README.md @@ -18,11 +18,6 @@ If you are using a released version of Kubernetes, you should refer to the docs that go with that version. - - -The latest release of this document can be found -[here](http://releases.k8s.io/release-1.3/examples/redis/README.md). - Documentation for other releases can be found at [releases.k8s.io](http://releases.k8s.io). @@ -38,7 +33,7 @@ The following document describes the deployment of a reliable, multi-node Redis ### Prerequisites -This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the ```kubectl``` command line tool somewhere in your path. Please see the [getting started](../../docs/getting-started-guides/) for installation instructions for your platform. +This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the ```kubectl``` command line tool somewhere in your path. Please see the [getting started](../../../docs/getting-started-guides/) for installation instructions for your platform. ### A note for the impatient @@ -46,7 +41,7 @@ This is a somewhat long tutorial. If you want to jump straight to the "do it no ### Turning up an initial master/sentinel pod. -A [_Pod_](../../docs/user-guide/pods.md) is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes. +A [_Pod_](../../../docs/user-guide/pods.md) is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes. We will used the shared network namespace to bootstrap our Redis cluster. In particular, the very first sentinel needs to know how to find the master (subsequent sentinels just ask the first sentinel). Because all containers in a Pod share a network namespace, the sentinel can simply look at ```$(hostname -i):6379```. @@ -56,12 +51,12 @@ Here is the config for the initial master and sentinel pod: [redis-master.yaml]( Create this master as follows: ```sh -kubectl create -f examples/redis/redis-master.yaml +kubectl create -f examples/storage/redis/redis-master.yaml ``` ### Turning up a sentinel service -In Kubernetes a [_Service_](../../docs/user-guide/services.md) describes a set of Pods that perform the same task. For example, the set of nodes in a Cassandra cluster, or even the single node we created above. An important use for a Service is to create a load balancer which distributes traffic across members of the set. But a _Service_ can also be used as a standing query which makes a dynamically changing set of Pods (or the single Pod we've already created) available via the Kubernetes API. +In Kubernetes a [_Service_](../../../docs/user-guide/services.md) describes a set of Pods that perform the same task. For example, the set of nodes in a Cassandra cluster, or even the single node we created above. An important use for a Service is to create a load balancer which distributes traffic across members of the set. But a _Service_ can also be used as a standing query which makes a dynamically changing set of Pods (or the single Pod we've already created) available via the Kubernetes API. In Redis, we will use a Kubernetes Service to provide a discoverable endpoints for the Redis sentinels in the cluster. From the sentinels Redis clients can find the master, and then the slaves and other relevant info for the cluster. This enables new members to join the cluster when failures occur. @@ -70,14 +65,14 @@ Here is the definition of the sentinel service: [redis-sentinel-service.yaml](re Create this service: ```sh -kubectl create -f examples/redis/redis-sentinel-service.yaml +kubectl create -f examples/storage/redis/redis-sentinel-service.yaml ``` ### Turning up replicated redis servers So far, what we have done is pretty manual, and not very fault-tolerant. If the ```redis-master``` pod that we previously created is destroyed for some reason (e.g. a machine dying) our Redis service goes away with it. -In Kubernetes a [_Replication Controller_](../../docs/user-guide/replication-controller.md) is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state. +In Kubernetes a [_Replication Controller_](../../../docs/user-guide/replication-controller.md) is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state. Replication Controllers will "adopt" existing pods that match their selector query, so let's create a Replication Controller with a single replica to adopt our existing Redis server. Here is the replication controller config: [redis-controller.yaml](redis-controller.yaml) @@ -86,7 +81,7 @@ The bulk of this controller config is actually identical to the redis-master pod Create this controller: ```sh -kubectl create -f examples/redis/redis-controller.yaml +kubectl create -f examples/storage/redis/redis-controller.yaml ``` We'll do the same thing for the sentinel. Here is the controller config: [redis-sentinel-controller.yaml](redis-sentinel-controller.yaml) @@ -94,7 +89,7 @@ We'll do the same thing for the sentinel. Here is the controller config: [redis We create it as follows: ```sh -kubectl create -f examples/redis/redis-sentinel-controller.yaml +kubectl create -f examples/storage/redis/redis-sentinel-controller.yaml ``` ### Scale our replicated pods @@ -142,16 +137,16 @@ For those of you who are impatient, here is the summary of commands we ran in th ``` # Create a bootstrap master -kubectl create -f examples/redis/redis-master.yaml +kubectl create -f examples/storage/redis/redis-master.yaml # Create a service to track the sentinels -kubectl create -f examples/redis/redis-sentinel-service.yaml +kubectl create -f examples/storage/redis/redis-sentinel-service.yaml # Create a replication controller for redis servers -kubectl create -f examples/redis/redis-controller.yaml +kubectl create -f examples/storage/redis/redis-controller.yaml # Create a replication controller for redis sentinels -kubectl create -f examples/redis/redis-sentinel-controller.yaml +kubectl create -f examples/storage/redis/redis-sentinel-controller.yaml # Scale both replication controllers kubectl scale rc redis --replicas=3 @@ -163,5 +158,5 @@ kubectl delete pods redis-master -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/redis/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/storage/redis/README.md?pixel)]() diff --git a/redis/image/Dockerfile b/storage/redis/image/Dockerfile similarity index 100% rename from redis/image/Dockerfile rename to storage/redis/image/Dockerfile diff --git a/redis/image/redis-master.conf b/storage/redis/image/redis-master.conf similarity index 100% rename from redis/image/redis-master.conf rename to storage/redis/image/redis-master.conf diff --git a/redis/image/redis-slave.conf b/storage/redis/image/redis-slave.conf similarity index 100% rename from redis/image/redis-slave.conf rename to storage/redis/image/redis-slave.conf diff --git a/redis/image/run.sh b/storage/redis/image/run.sh similarity index 100% rename from redis/image/run.sh rename to storage/redis/image/run.sh diff --git a/redis/redis-controller.yaml b/storage/redis/redis-controller.yaml similarity index 100% rename from redis/redis-controller.yaml rename to storage/redis/redis-controller.yaml diff --git a/redis/redis-master.yaml b/storage/redis/redis-master.yaml similarity index 100% rename from redis/redis-master.yaml rename to storage/redis/redis-master.yaml diff --git a/redis/redis-proxy.yaml b/storage/redis/redis-proxy.yaml similarity index 100% rename from redis/redis-proxy.yaml rename to storage/redis/redis-proxy.yaml diff --git a/redis/redis-sentinel-controller.yaml b/storage/redis/redis-sentinel-controller.yaml similarity index 100% rename from redis/redis-sentinel-controller.yaml rename to storage/redis/redis-sentinel-controller.yaml diff --git a/redis/redis-sentinel-service.yaml b/storage/redis/redis-sentinel-service.yaml similarity index 100% rename from redis/redis-sentinel-service.yaml rename to storage/redis/redis-sentinel-service.yaml diff --git a/rethinkdb/README.md b/storage/rethinkdb/README.md similarity index 90% rename from rethinkdb/README.md rename to storage/rethinkdb/README.md index 80317de4..11b41510 100644 --- a/rethinkdb/README.md +++ b/storage/rethinkdb/README.md @@ -18,11 +18,6 @@ If you are using a released version of Kubernetes, you should refer to the docs that go with that version. - - -The latest release of this document can be found -[here](http://releases.k8s.io/release-1.3/examples/rethinkdb/README.md). - Documentation for other releases can be found at [releases.k8s.io](http://releases.k8s.io). @@ -50,7 +45,7 @@ Rethinkdb will discover its peer using endpoints provided by kubernetes service, so first create a service so the following pod can query its endpoint ```sh -$kubectl create -f examples/rethinkdb/driver-service.yaml +$kubectl create -f examples/storage/rethinkdb/driver-service.yaml ``` check out: @@ -67,7 +62,7 @@ rethinkdb-driver 10.0.27.114 28015/TCP db=rethinkdb start the first server in the cluster ```sh -$kubectl create -f examples/rethinkdb/rc.yaml +$kubectl create -f examples/storage/rethinkdb/rc.yaml ``` Actually, you can start servers as many as you want at one time, just modify the `replicas` in `rc.ymal` @@ -110,8 +105,8 @@ Admin You need a separate pod (labeled as role:admin) to access Web Admin UI ```sh -kubectl create -f examples/rethinkdb/admin-pod.yaml -kubectl create -f examples/rethinkdb/admin-service.yaml +kubectl create -f examples/storage/rethinkdb/admin-pod.yaml +kubectl create -f examples/storage/rethinkdb/admin-service.yaml ``` find the service @@ -153,11 +148,11 @@ since the ui is not stateless when playing with Web Admin UI will cause `Connect **BTW** * `gen_pod.sh` is using to generate pod templates for my local cluster, -the generated pods which is using `nodeSelector` to force k8s to schedule containers to my designate nodes, for I need to access persistent data on my host dirs. Note that one needs to label the node before 'nodeSelector' can work, see this [tutorial](../../docs/user-guide/node-selection/) +the generated pods which is using `nodeSelector` to force k8s to schedule containers to my designate nodes, for I need to access persistent data on my host dirs. Note that one needs to label the node before 'nodeSelector' can work, see this [tutorial](../../../docs/user-guide/node-selection/) * see [antmanler/rethinkdb-k8s](https://github.com/antmanler/rethinkdb-k8s) for detail -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/rethinkdb/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/storage/rethinkdb/README.md?pixel)]() diff --git a/rethinkdb/admin-pod.yaml b/storage/rethinkdb/admin-pod.yaml similarity index 100% rename from rethinkdb/admin-pod.yaml rename to storage/rethinkdb/admin-pod.yaml diff --git a/rethinkdb/admin-service.yaml b/storage/rethinkdb/admin-service.yaml similarity index 100% rename from rethinkdb/admin-service.yaml rename to storage/rethinkdb/admin-service.yaml diff --git a/rethinkdb/driver-service.yaml b/storage/rethinkdb/driver-service.yaml similarity index 100% rename from rethinkdb/driver-service.yaml rename to storage/rethinkdb/driver-service.yaml diff --git a/rethinkdb/gen-pod.sh b/storage/rethinkdb/gen-pod.sh similarity index 100% rename from rethinkdb/gen-pod.sh rename to storage/rethinkdb/gen-pod.sh diff --git a/rethinkdb/image/Dockerfile b/storage/rethinkdb/image/Dockerfile similarity index 100% rename from rethinkdb/image/Dockerfile rename to storage/rethinkdb/image/Dockerfile diff --git a/rethinkdb/image/run.sh b/storage/rethinkdb/image/run.sh similarity index 100% rename from rethinkdb/image/run.sh rename to storage/rethinkdb/image/run.sh diff --git a/rethinkdb/rc.yaml b/storage/rethinkdb/rc.yaml similarity index 100% rename from rethinkdb/rc.yaml rename to storage/rethinkdb/rc.yaml diff --git a/vitess/README.md b/storage/vitess/README.md similarity index 91% rename from vitess/README.md rename to storage/vitess/README.md index 59316df5..d9051f0b 100644 --- a/vitess/README.md +++ b/storage/vitess/README.md @@ -18,11 +18,6 @@ If you are using a released version of Kubernetes, you should refer to the docs that go with that version. - - -The latest release of this document can be found -[here](http://releases.k8s.io/release-1.3/examples/vitess/README.md). - Documentation for other releases can be found at [releases.k8s.io](http://releases.k8s.io). @@ -42,7 +37,7 @@ Kubernetes as simple as launching more pods. The example brings up a database with 2 shards, and then runs a pool of [sharded guestbook](https://github.com/youtube/vitess/tree/master/examples/kubernetes/guestbook) pods. The guestbook app was ported from the original -[guestbook](../../examples/guestbook-go/) +[guestbook](../../../examples/guestbook-go/) example found elsewhere in this tree, modified to use Vitess as the backend. For a more detailed, step-by-step explanation of this example setup, see the @@ -54,17 +49,17 @@ You'll need to install [Go 1.4+](https://golang.org/doc/install) to build `vtctlclient`, the command-line admin tool for Vitess. We also assume you have a running Kubernetes cluster with `kubectl` pointing to -it by default. See the [Getting Started guides](../../docs/getting-started-guides/) +it by default. See the [Getting Started guides](../../../docs/getting-started-guides/) for how to get to that point. Note that your Kubernetes cluster needs to have enough resources (CPU+RAM) to schedule all the pods. By default, this example requires a cluster-wide total of at least 6 virtual CPUs and 10GiB RAM. You can tune these requirements in the -[resource limits](../../docs/user-guide/compute-resources.md) +[resource limits](../../../docs/user-guide/compute-resources.md) section of each YAML file. Lastly, you need to open ports 30000-30001 (for the Vitess admin daemon) and 80 (for the guestbook app) in your firewall. See the -[Services and Firewalls](../../docs/user-guide/services-firewalls.md) +[Services and Firewalls](../../../docs/user-guide/services-firewalls.md) guide for examples of how to do that. ### Configure site-local settings @@ -143,5 +138,5 @@ You may also want to remove any firewall rules you created. -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/vitess/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/storage/vitess/README.md?pixel)]() diff --git a/vitess/configure.sh b/storage/vitess/configure.sh similarity index 100% rename from vitess/configure.sh rename to storage/vitess/configure.sh diff --git a/vitess/create_test_table.sql b/storage/vitess/create_test_table.sql similarity index 100% rename from vitess/create_test_table.sql rename to storage/vitess/create_test_table.sql diff --git a/vitess/env.sh b/storage/vitess/env.sh similarity index 100% rename from vitess/env.sh rename to storage/vitess/env.sh diff --git a/vitess/etcd-controller-template.yaml b/storage/vitess/etcd-controller-template.yaml similarity index 100% rename from vitess/etcd-controller-template.yaml rename to storage/vitess/etcd-controller-template.yaml diff --git a/vitess/etcd-down.sh b/storage/vitess/etcd-down.sh similarity index 100% rename from vitess/etcd-down.sh rename to storage/vitess/etcd-down.sh diff --git a/vitess/etcd-service-template.yaml b/storage/vitess/etcd-service-template.yaml similarity index 100% rename from vitess/etcd-service-template.yaml rename to storage/vitess/etcd-service-template.yaml diff --git a/vitess/etcd-up.sh b/storage/vitess/etcd-up.sh similarity index 100% rename from vitess/etcd-up.sh rename to storage/vitess/etcd-up.sh diff --git a/vitess/guestbook-controller.yaml b/storage/vitess/guestbook-controller.yaml similarity index 100% rename from vitess/guestbook-controller.yaml rename to storage/vitess/guestbook-controller.yaml diff --git a/vitess/guestbook-down.sh b/storage/vitess/guestbook-down.sh similarity index 100% rename from vitess/guestbook-down.sh rename to storage/vitess/guestbook-down.sh diff --git a/vitess/guestbook-service.yaml b/storage/vitess/guestbook-service.yaml similarity index 100% rename from vitess/guestbook-service.yaml rename to storage/vitess/guestbook-service.yaml diff --git a/vitess/guestbook-up.sh b/storage/vitess/guestbook-up.sh similarity index 100% rename from vitess/guestbook-up.sh rename to storage/vitess/guestbook-up.sh diff --git a/vitess/vitess-down.sh b/storage/vitess/vitess-down.sh similarity index 100% rename from vitess/vitess-down.sh rename to storage/vitess/vitess-down.sh diff --git a/vitess/vitess-up.sh b/storage/vitess/vitess-up.sh similarity index 100% rename from vitess/vitess-up.sh rename to storage/vitess/vitess-up.sh diff --git a/vitess/vtctld-controller-template.yaml b/storage/vitess/vtctld-controller-template.yaml similarity index 100% rename from vitess/vtctld-controller-template.yaml rename to storage/vitess/vtctld-controller-template.yaml diff --git a/vitess/vtctld-down.sh b/storage/vitess/vtctld-down.sh similarity index 100% rename from vitess/vtctld-down.sh rename to storage/vitess/vtctld-down.sh diff --git a/vitess/vtctld-service.yaml b/storage/vitess/vtctld-service.yaml similarity index 100% rename from vitess/vtctld-service.yaml rename to storage/vitess/vtctld-service.yaml diff --git a/vitess/vtctld-up.sh b/storage/vitess/vtctld-up.sh similarity index 100% rename from vitess/vtctld-up.sh rename to storage/vitess/vtctld-up.sh diff --git a/vitess/vtgate-controller-template.yaml b/storage/vitess/vtgate-controller-template.yaml similarity index 100% rename from vitess/vtgate-controller-template.yaml rename to storage/vitess/vtgate-controller-template.yaml diff --git a/vitess/vtgate-down.sh b/storage/vitess/vtgate-down.sh similarity index 100% rename from vitess/vtgate-down.sh rename to storage/vitess/vtgate-down.sh diff --git a/vitess/vtgate-service.yaml b/storage/vitess/vtgate-service.yaml similarity index 100% rename from vitess/vtgate-service.yaml rename to storage/vitess/vtgate-service.yaml diff --git a/vitess/vtgate-up.sh b/storage/vitess/vtgate-up.sh similarity index 100% rename from vitess/vtgate-up.sh rename to storage/vitess/vtgate-up.sh diff --git a/vitess/vttablet-down.sh b/storage/vitess/vttablet-down.sh similarity index 100% rename from vitess/vttablet-down.sh rename to storage/vitess/vttablet-down.sh diff --git a/vitess/vttablet-pod-template.yaml b/storage/vitess/vttablet-pod-template.yaml similarity index 100% rename from vitess/vttablet-pod-template.yaml rename to storage/vitess/vttablet-pod-template.yaml diff --git a/vitess/vttablet-up.sh b/storage/vitess/vttablet-up.sh similarity index 100% rename from vitess/vttablet-up.sh rename to storage/vitess/vttablet-up.sh diff --git a/aws_ebs/README.md b/volumes/aws_ebs/README.md similarity index 86% rename from aws_ebs/README.md rename to volumes/aws_ebs/README.md index 970c5f04..bd3123cb 100644 --- a/aws_ebs/README.md +++ b/volumes/aws_ebs/README.md @@ -18,11 +18,6 @@ If you are using a released version of Kubernetes, you should refer to the docs that go with that version. - - -The latest release of this document can be found -[here](http://releases.k8s.io/release-1.3/examples/aws_ebs/README.md). - Documentation for other releases can be found at [releases.k8s.io](http://releases.k8s.io). @@ -48,7 +43,7 @@ Create a volume in the same region as your node. Add your volume information in the pod description file aws-ebs-web.yaml then create the pod: ```shell - $ kubectl create -f examples/aws_ebs/aws-ebs-web.yaml + $ kubectl create -f examples/volumes/aws_ebs/aws-ebs-web.yaml ``` Add some data to the volume if is empty: @@ -66,5 +61,5 @@ You should now be able to query your web server: -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/aws_ebs/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/aws_ebs/README.md?pixel)]() diff --git a/aws_ebs/aws-ebs-web.yaml b/volumes/aws_ebs/aws-ebs-web.yaml similarity index 100% rename from aws_ebs/aws-ebs-web.yaml rename to volumes/aws_ebs/aws-ebs-web.yaml diff --git a/azure_file/README.md b/volumes/azure_file/README.md similarity index 85% rename from azure_file/README.md rename to volumes/azure_file/README.md index 9b891bb4..f6b9bc71 100644 --- a/azure_file/README.md +++ b/volumes/azure_file/README.md @@ -18,11 +18,6 @@ If you are using a released version of Kubernetes, you should refer to the docs that go with that version. - - -The latest release of this document can be found -[here](http://releases.k8s.io/release-1.3/examples/azure_file/README.md). - Documentation for other releases can be found at [releases.k8s.io](http://releases.k8s.io). @@ -53,7 +48,7 @@ In the pod, you need to provide the following information: Create the secret: ```console - # kubectl create -f examples/azure_file/secret/azure-secret.yaml + # kubectl create -f examples/volumes/azure_file/secret/azure-secret.yaml ``` You should see the account name and key from `kubectl get secret` @@ -61,9 +56,9 @@ You should see the account name and key from `kubectl get secret` Then create the Pod: ```console - # kubectl create -f examples/azure_file/azure.yaml + # kubectl create -f examples/volumes/azure_file/azure.yaml ``` -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/azure_file/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/azure_file/README.md?pixel)]() diff --git a/azure_file/azure.yaml b/volumes/azure_file/azure.yaml similarity index 100% rename from azure_file/azure.yaml rename to volumes/azure_file/azure.yaml diff --git a/azure_file/secret/azure-secret.yaml b/volumes/azure_file/secret/azure-secret.yaml similarity index 100% rename from azure_file/secret/azure-secret.yaml rename to volumes/azure_file/secret/azure-secret.yaml diff --git a/cephfs/README.md b/volumes/cephfs/README.md similarity index 85% rename from cephfs/README.md rename to volumes/cephfs/README.md index 24dd017f..f070bd7d 100644 --- a/cephfs/README.md +++ b/volumes/cephfs/README.md @@ -18,11 +18,6 @@ If you are using a released version of Kubernetes, you should refer to the docs that go with that version. - - -The latest release of this document can be found -[here](http://releases.k8s.io/release-1.3/examples/cephfs/README.md). - Documentation for other releases can be found at [releases.k8s.io](http://releases.k8s.io). @@ -55,12 +50,12 @@ Once you have installed Ceph and a Kubernetes cluster, you can create a pod base Here are the commands: ```console - # kubectl create -f examples/cephfs/cephfs.yaml + # kubectl create -f examples/volumes/cephfs/cephfs.yaml # create a secret if you want to use Ceph secret instead of secret file - # kubectl create -f examples/cephfs/secret/ceph-secret.yaml + # kubectl create -f examples/volumes/cephfs/secret/ceph-secret.yaml - # kubectl create -f examples/cephfs/cephfs-with-secret.yaml + # kubectl create -f examples/volumes/cephfs/cephfs-with-secret.yaml # kubectl get pods ``` @@ -68,5 +63,5 @@ Here are the commands: -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/cephfs/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/cephfs/README.md?pixel)]() diff --git a/cephfs/cephfs-with-secret.yaml b/volumes/cephfs/cephfs-with-secret.yaml similarity index 100% rename from cephfs/cephfs-with-secret.yaml rename to volumes/cephfs/cephfs-with-secret.yaml diff --git a/cephfs/cephfs.yaml b/volumes/cephfs/cephfs.yaml similarity index 100% rename from cephfs/cephfs.yaml rename to volumes/cephfs/cephfs.yaml diff --git a/cephfs/secret/ceph-secret.yaml b/volumes/cephfs/secret/ceph-secret.yaml similarity index 100% rename from cephfs/secret/ceph-secret.yaml rename to volumes/cephfs/secret/ceph-secret.yaml diff --git a/fibre_channel/README.md b/volumes/fibre_channel/README.md similarity index 91% rename from fibre_channel/README.md rename to volumes/fibre_channel/README.md index e76063a9..aeec67f3 100644 --- a/fibre_channel/README.md +++ b/volumes/fibre_channel/README.md @@ -18,11 +18,6 @@ If you are using a released version of Kubernetes, you should refer to the docs that go with that version. - - -The latest release of this document can be found -[here](http://releases.k8s.io/release-1.3/examples/fibre_channel/README.md). - Documentation for other releases can be found at [releases.k8s.io](http://releases.k8s.io). @@ -49,7 +44,7 @@ kubectl create -f ./your_new_pod.json Here is my command and output: ```console -# kubectl create -f examples/fibre_channel/fc.yaml +# kubectl create -f examples/volumes/fibre_channel/fc.yaml # kubectl get pods NAME READY STATUS RESTARTS AGE fcpd 2/2 Running 0 10m @@ -74,5 +69,5 @@ CONTAINER ID IMAGE COMMAND C ``` -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/fibre_channel/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/fibre_channel/README.md?pixel)]() diff --git a/fibre_channel/fc.yaml b/volumes/fibre_channel/fc.yaml similarity index 100% rename from fibre_channel/fc.yaml rename to volumes/fibre_channel/fc.yaml diff --git a/flexvolume/README.md b/volumes/flexvolume/README.md similarity index 92% rename from flexvolume/README.md rename to volumes/flexvolume/README.md index 7b3b2fb7..9998494f 100644 --- a/flexvolume/README.md +++ b/volumes/flexvolume/README.md @@ -18,11 +18,6 @@ If you are using a released version of Kubernetes, you should refer to the docs that go with that version. - - -The latest release of this document can be found -[here](http://releases.k8s.io/release-1.3/examples/flexvolume/README.md). - Documentation for other releases can be found at [releases.k8s.io](http://releases.k8s.io). @@ -114,5 +109,5 @@ In addition to the flags specified by the user in the Options field of the FlexV See [nginx.yaml](nginx.yaml) for a quick example on how to use Flexvolume in a pod. -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/flexvolume/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/flexvolume/README.md?pixel)]() diff --git a/flexvolume/lvm b/volumes/flexvolume/lvm similarity index 100% rename from flexvolume/lvm rename to volumes/flexvolume/lvm diff --git a/flexvolume/nginx.yaml b/volumes/flexvolume/nginx.yaml similarity index 100% rename from flexvolume/nginx.yaml rename to volumes/flexvolume/nginx.yaml diff --git a/flocker/README.md b/volumes/flocker/README.md similarity index 96% rename from flocker/README.md rename to volumes/flocker/README.md index 51048648..7361351e 100644 --- a/flocker/README.md +++ b/volumes/flocker/README.md @@ -18,11 +18,6 @@ If you are using a released version of Kubernetes, you should refer to the docs that go with that version. - - -The latest release of this document can be found -[here](http://releases.k8s.io/release-1.3/examples/flocker/README.md). - Documentation for other releases can be found at [releases.k8s.io](http://releases.k8s.io). @@ -101,7 +96,7 @@ The following *volume* spec from the [example pod](flocker-pod.yml) illustrates Use `kubetctl` to create the pod. ```sh -$ kubectl create -f examples/flocker/flocker-pod.yml +$ kubectl create -f examples/volumes/flocker/flocker-pod.yml ``` You should now verify that the pod is running and determine it's IP address: @@ -145,5 +140,5 @@ Read more about the [Flocker Cluster Architecture](https://docs.clusterhq.com/en To see a demo example of using Kubernetes and Flocker, visit [Flocker's blog post on High Availability with Kubernetes and Flocker](https://clusterhq.com/2015/12/22/ha-demo-kubernetes-flocker/) -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/flocker/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/flocker/README.md?pixel)]() diff --git a/flocker/flocker-pod-with-rc.yml b/volumes/flocker/flocker-pod-with-rc.yml similarity index 100% rename from flocker/flocker-pod-with-rc.yml rename to volumes/flocker/flocker-pod-with-rc.yml diff --git a/flocker/flocker-pod.yml b/volumes/flocker/flocker-pod.yml similarity index 100% rename from flocker/flocker-pod.yml rename to volumes/flocker/flocker-pod.yml diff --git a/glusterfs/README.md b/volumes/glusterfs/README.md similarity index 90% rename from glusterfs/README.md rename to volumes/glusterfs/README.md index 8f3937f3..2cdc5b1f 100644 --- a/glusterfs/README.md +++ b/volumes/glusterfs/README.md @@ -18,11 +18,6 @@ If you are using a released version of Kubernetes, you should refer to the docs that go with that version. - - -The latest release of this document can be found -[here](http://releases.k8s.io/release-1.3/examples/glusterfs/README.md). - Documentation for other releases can be found at [releases.k8s.io](http://releases.k8s.io). @@ -65,7 +60,7 @@ The "IP" field should be filled with the address of a node in the Glusterfs serv Create the endpoints, ```sh -$ kubectl create -f examples/glusterfs/glusterfs-endpoints.json +$ kubectl create -f examples/volumes/glusterfs/glusterfs-endpoints.json ``` You can verify that the endpoints are successfully created by running @@ -81,7 +76,7 @@ We need also create a service for this endpoints, so that the endpoints will be Use this command to create the service: ```sh -$ kubectl create -f examples/glusterfs/glusterfs-service.json +$ kubectl create -f examples/volumes/glusterfs/glusterfs-service.json ``` @@ -109,7 +104,7 @@ The parameters are explained as the followings. Create a pod that has a container using Glusterfs volume, ```sh -$ kubectl create -f examples/glusterfs/glusterfs-pod.json +$ kubectl create -f examples/volumes/glusterfs/glusterfs-pod.json ``` You can verify that the pod is running: @@ -134,5 +129,5 @@ You may also run `docker ps` on the host to see the actual container. -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/glusterfs/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/glusterfs/README.md?pixel)]() diff --git a/glusterfs/glusterfs-endpoints.json b/volumes/glusterfs/glusterfs-endpoints.json similarity index 100% rename from glusterfs/glusterfs-endpoints.json rename to volumes/glusterfs/glusterfs-endpoints.json diff --git a/glusterfs/glusterfs-pod.json b/volumes/glusterfs/glusterfs-pod.json similarity index 100% rename from glusterfs/glusterfs-pod.json rename to volumes/glusterfs/glusterfs-pod.json diff --git a/glusterfs/glusterfs-service.json b/volumes/glusterfs/glusterfs-service.json similarity index 100% rename from glusterfs/glusterfs-service.json rename to volumes/glusterfs/glusterfs-service.json diff --git a/iscsi/README.md b/volumes/iscsi/README.md similarity index 95% rename from iscsi/README.md rename to volumes/iscsi/README.md index 25a734f1..f0868625 100644 --- a/iscsi/README.md +++ b/volumes/iscsi/README.md @@ -18,11 +18,6 @@ If you are using a released version of Kubernetes, you should refer to the docs that go with that version. - - -The latest release of this document can be found -[here](http://releases.k8s.io/release-1.3/examples/iscsi/README.md). - Documentation for other releases can be found at [releases.k8s.io](http://releases.k8s.io). @@ -70,7 +65,7 @@ kubectl create -f ./your_new_pod.yaml Here is the example pod created and expected output: ```console -# kubectl create -f examples/iscsi/iscsi.yaml +# kubectl create -f examples/volumes/iscsi/iscsi.yaml # kubectl get pods NAME READY STATUS RESTARTS AGE iscsipd 2/2 RUNNING 0 2m @@ -120,5 +115,5 @@ Run *docker inspect* and verify the container mounted the host directory into th -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/iscsi/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/iscsi/README.md?pixel)]() diff --git a/iscsi/iscsi.yaml b/volumes/iscsi/iscsi.yaml similarity index 100% rename from iscsi/iscsi.yaml rename to volumes/iscsi/iscsi.yaml diff --git a/nfs/README.md b/volumes/nfs/README.md similarity index 84% rename from nfs/README.md rename to volumes/nfs/README.md index a1fa2a84..8fa22fdc 100644 --- a/nfs/README.md +++ b/volumes/nfs/README.md @@ -21,7 +21,7 @@ refer to the docs that go with that version. The latest release of this document can be found -[here](http://releases.k8s.io/release-1.2/examples/nfs/README.md). +[here](http://releases.k8s.io/release-1.2/examples/volumes/nfs/README.md). Documentation for other releases can be found at [releases.k8s.io](http://releases.k8s.io). @@ -58,16 +58,16 @@ Note, this example uses an NFS container that doesn't support NFSv4. ## tl;dr Quickstart ```console -$ kubectl create -f examples/nfs/provisioner/nfs-server-gce-pv.yaml -$ kubectl create -f examples/nfs/nfs-server-rc.yaml -$ kubectl create -f examples/nfs/nfs-server-service.yaml +$ kubectl create -f examples/volumes/nfs/provisioner/nfs-server-gce-pv.yaml +$ kubectl create -f examples/volumes/nfs/nfs-server-rc.yaml +$ kubectl create -f examples/volumes/nfs/nfs-server-service.yaml # get the cluster IP of the server using the following command $ kubectl describe services nfs-server # use the NFS server IP to update nfs-pv.yaml and execute the following -$ kubectl create -f examples/nfs/nfs-pv.yaml -$ kubectl create -f examples/nfs/nfs-pvc.yaml +$ kubectl create -f examples/volumes/nfs/nfs-pv.yaml +$ kubectl create -f examples/volumes/nfs/nfs-pvc.yaml # run a fake backend -$ kubectl create -f examples/nfs/nfs-busybox-rc.yaml +$ kubectl create -f examples/volumes/nfs/nfs-busybox-rc.yaml # get pod name from this command $ kubectl get pod -l name=nfs-busybox # use the pod name to check the test file @@ -94,12 +94,12 @@ Define [the NFS Service and Replication Controller](nfs-server-rc.yaml) and The NFS server exports an an auto-provisioned persistent volume backed by GCE PD: ```console -$ kubectl create -f examples/nfs/provisioner/nfs-server-gce-pv.yaml +$ kubectl create -f examples/volumes/nfs/provisioner/nfs-server-gce-pv.yaml ``` ```console -$ kubectl create -f examples/nfs/nfs-server-rc.yaml -$ kubectl create -f examples/nfs/nfs-server-service.yaml +$ kubectl create -f examples/volumes/nfs/nfs-server-rc.yaml +$ kubectl create -f examples/volumes/nfs/nfs-server-service.yaml ``` The directory contains dummy `index.html`. Wait until the pod is running @@ -119,14 +119,14 @@ Replace the invalid IP in the [nfs PV](nfs-pv.yaml). (In the future, we'll be able to tie these together using the service names, but for now, you have to hardcode the IP.) -Create the the [persistent volume](../../docs/user-guide/persistent-volumes.md) +Create the the [persistent volume](../../../docs/user-guide/persistent-volumes.md) and the persistent volume claim for your NFS server. The persistent volume and claim gives us an indirection that allow multiple pods to refer to the NFS server using a symbolic name rather than the hardcoded server address. ```console -$ kubectl create -f examples/nfs/nfs-pv.yaml -$ kubectl create -f examples/nfs/nfs-pvc.yaml +$ kubectl create -f examples/volumes/nfs/nfs-pv.yaml +$ kubectl create -f examples/volumes/nfs/nfs-pvc.yaml ``` ## Setup the fake backend @@ -135,7 +135,7 @@ The [NFS busybox controller](nfs-busybox-rc.yaml) updates `index.html` on the NFS server every 10 seconds. Let's start that now: ```console -$ kubectl create -f examples/nfs/nfs-busybox-rc.yaml +$ kubectl create -f examples/volumes/nfs/nfs-busybox-rc.yaml ``` Conveniently, it's also a `busybox` pod, so we can get an early check @@ -165,14 +165,14 @@ volume and runs a simple web server on it. Define the pod: ```console -$ kubectl create -f examples/nfs/nfs-web-rc.yaml +$ kubectl create -f examples/volumes/nfs/nfs-web-rc.yaml ``` This creates two pods, each of which serve the `index.html` from above. We can then use a simple service to front it: ```console -kubectl create -f examples/nfs/nfs-web-service.yaml +kubectl create -f examples/volumes/nfs/nfs-web-service.yaml ``` We can then use the busybox container we launched before to check that `nginx` @@ -200,5 +200,5 @@ nfs-busybox-w3s4t -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/nfs/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/nfs/README.md?pixel)]() diff --git a/nfs/nfs-busybox-rc.yaml b/volumes/nfs/nfs-busybox-rc.yaml similarity index 100% rename from nfs/nfs-busybox-rc.yaml rename to volumes/nfs/nfs-busybox-rc.yaml diff --git a/nfs/nfs-data/Dockerfile b/volumes/nfs/nfs-data/Dockerfile similarity index 100% rename from nfs/nfs-data/Dockerfile rename to volumes/nfs/nfs-data/Dockerfile diff --git a/nfs/nfs-data/README.md b/volumes/nfs/nfs-data/README.md similarity index 84% rename from nfs/nfs-data/README.md rename to volumes/nfs/nfs-data/README.md index a23032af..af110708 100644 --- a/nfs/nfs-data/README.md +++ b/volumes/nfs/nfs-data/README.md @@ -18,11 +18,6 @@ If you are using a released version of Kubernetes, you should refer to the docs that go with that version. - - -The latest release of this document can be found -[here](http://releases.k8s.io/release-1.3/examples/nfs/nfs-data/README.md). - Documentation for other releases can be found at [releases.k8s.io](http://releases.k8s.io). @@ -43,5 +38,5 @@ Available as `gcr.io/google-samples/nfs-server` -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/nfs/nfs-data/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/nfs/nfs-data/README.md?pixel)]() diff --git a/nfs/nfs-data/index.html b/volumes/nfs/nfs-data/index.html similarity index 100% rename from nfs/nfs-data/index.html rename to volumes/nfs/nfs-data/index.html diff --git a/nfs/nfs-data/run_nfs.sh b/volumes/nfs/nfs-data/run_nfs.sh similarity index 100% rename from nfs/nfs-data/run_nfs.sh rename to volumes/nfs/nfs-data/run_nfs.sh diff --git a/nfs/nfs-pv.png b/volumes/nfs/nfs-pv.png similarity index 100% rename from nfs/nfs-pv.png rename to volumes/nfs/nfs-pv.png diff --git a/nfs/nfs-pv.yaml b/volumes/nfs/nfs-pv.yaml similarity index 100% rename from nfs/nfs-pv.yaml rename to volumes/nfs/nfs-pv.yaml diff --git a/nfs/nfs-pvc.yaml b/volumes/nfs/nfs-pvc.yaml similarity index 100% rename from nfs/nfs-pvc.yaml rename to volumes/nfs/nfs-pvc.yaml diff --git a/nfs/nfs-server-rc.yaml b/volumes/nfs/nfs-server-rc.yaml similarity index 100% rename from nfs/nfs-server-rc.yaml rename to volumes/nfs/nfs-server-rc.yaml diff --git a/nfs/nfs-server-service.yaml b/volumes/nfs/nfs-server-service.yaml similarity index 100% rename from nfs/nfs-server-service.yaml rename to volumes/nfs/nfs-server-service.yaml diff --git a/nfs/nfs-web-rc.yaml b/volumes/nfs/nfs-web-rc.yaml similarity index 100% rename from nfs/nfs-web-rc.yaml rename to volumes/nfs/nfs-web-rc.yaml diff --git a/nfs/nfs-web-service.yaml b/volumes/nfs/nfs-web-service.yaml similarity index 100% rename from nfs/nfs-web-service.yaml rename to volumes/nfs/nfs-web-service.yaml diff --git a/nfs/provisioner/nfs-server-gce-pv.yaml b/volumes/nfs/provisioner/nfs-server-gce-pv.yaml similarity index 100% rename from nfs/provisioner/nfs-server-gce-pv.yaml rename to volumes/nfs/provisioner/nfs-server-gce-pv.yaml diff --git a/rbd/README.md b/volumes/rbd/README.md similarity index 90% rename from rbd/README.md rename to volumes/rbd/README.md index ca7cbcc5..5620b3b2 100644 --- a/rbd/README.md +++ b/volumes/rbd/README.md @@ -18,11 +18,6 @@ If you are using a released version of Kubernetes, you should refer to the docs that go with that version. - - -The latest release of this document can be found -[here](http://releases.k8s.io/release-1.3/examples/rbd/README.md). - Documentation for other releases can be found at [releases.k8s.io](http://releases.k8s.io). @@ -65,7 +60,7 @@ QVFBTWdYaFZ3QkNlRGhBQTlubFBhRnlmVVNhdEdENGRyRldEdlE9PQ== An example yaml is provided [here](secret/ceph-secret.yaml). Then post the secret through ```kubectl``` in the following command. ```console - # kubectl create -f examples/rbd/secret/ceph-secret.yaml + # kubectl create -f examples/volumes/rbd/secret/ceph-secret.yaml ``` # Get started @@ -73,7 +68,7 @@ An example yaml is provided [here](secret/ceph-secret.yaml). Then post the secre Here are my commands: ```console - # kubectl create -f examples/rbd/rbd.json + # kubectl create -f examples/volumes/rbd/rbd.json # kubectl get pods ``` @@ -89,5 +84,5 @@ On the Kubernetes host, I got these in mount output -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/rbd/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/rbd/README.md?pixel)]() diff --git a/rbd/rbd-with-secret.json b/volumes/rbd/rbd-with-secret.json similarity index 100% rename from rbd/rbd-with-secret.json rename to volumes/rbd/rbd-with-secret.json diff --git a/rbd/rbd.json b/volumes/rbd/rbd.json similarity index 100% rename from rbd/rbd.json rename to volumes/rbd/rbd.json diff --git a/rbd/secret/ceph-secret.yaml b/volumes/rbd/secret/ceph-secret.yaml similarity index 100% rename from rbd/secret/ceph-secret.yaml rename to volumes/rbd/secret/ceph-secret.yaml