Remove trailing whitespaces

This commit is contained in:
Zoltán Reegn 2018-10-30 19:34:59 +01:00
parent 78097f9de8
commit 412a68d903
50 changed files with 173 additions and 173 deletions

View File

@ -12,7 +12,7 @@ use the latest and greatest features, current guidelines and best practices,
and to refresh command syntax, output, changed prerequisites, as needed.
|Name | Description | Notable Features Used | Complexity Level|
------------- | ------------- | ------------ | ------------ |
------------- | ------------- | ------------ | ------------ |
|[Guestbook](guestbook/) | PHP app with Redis | Deployment, Service | Beginner |
|[WordPress](mysql-wordpress-pd/) | WordPress with MySQL | Deployment, Persistent Volume with Claim | Beginner|
|[Cassandra](cassandra/) | Cloud Native Cassandra | Daemon Set, Stateful Set, Replication Controller | Intermediate

View File

@ -43,7 +43,7 @@ spec:
lifecycle:
preStop:
exec:
command:
command:
- /bin/sh
- -c
- nodetool drain

View File

@ -56,7 +56,7 @@ else
fi
mv /kubernetes-cassandra.jar /usr/local/apache-cassandra-${CASSANDRA_VERSION}/lib
mv /cassandra-seed.so /etc/cassandra/
mv /cassandra-seed.so /etc/cassandra/
mv /cassandra-seed.h /usr/local/lib/include
apt-get -y purge localepurge

View File

@ -306,7 +306,7 @@ counter_cache_save_period: 7200
saved_caches_directory: /cassandra_data/saved_caches
# commitlog_sync may be either "periodic" or "batch."
#
#
# When in batch mode, Cassandra won't ack writes until the commit log
# has been fsynced to disk. It will wait
# commitlog_sync_batch_window_in_ms milliseconds between fsyncs.
@ -980,9 +980,9 @@ transparent_data_encryption_options:
key_alias: testing:1
# CBC IV length for AES needs to be 16 bytes (which is also the default size)
# iv_length: 16
key_provider:
key_provider:
- class_name: org.apache.cassandra.security.JKSKeyProvider
parameters:
parameters:
- keystore: conf/.keystore
keystore_password: cassandra
store_type: JCEKS

View File

@ -22,7 +22,7 @@
<plugins>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.5.1</version>
<version>3.5.1</version>
<configuration>
<source>1.8</source>
<target>1.8</target>

View File

@ -49,7 +49,7 @@ public class KubernetesSeedProvider implements SeedProvider {
/**
* Create new seed provider
*
*
* @param params
*/
public KubernetesSeedProvider(Map<String, String> params) {
@ -57,7 +57,7 @@ public class KubernetesSeedProvider implements SeedProvider {
/**
* Call Kubernetes API to collect a list of seed providers
*
*
* @return list of seed providers
*/
public List<InetAddress> getSeeds() {

View File

@ -35,7 +35,7 @@ data_file_directories:
- target/cassandra/data
disk_access_mode: mmap
seed_provider:
- class_name: io.k8s.cassandra.KubernetesSeedProvider
- class_name: io.k8s.cassandra.KubernetesSeedProvider
parameters:
- seeds: "8.4.4.4,8.8.8.8"
endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch

View File

@ -59,7 +59,7 @@ Use the `examples/guestbook-go/redis-master-controller.json` file to create a [r
```console
me@workstation$ gcloud compute ssh --zone us-central1-b kubernetes-node-bz1p
me@kubernetes-node-3:~$ sudo docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS
d5c458dabe50 redis "/entrypoint.sh redis" 5 minutes ago Up 5 minutes
@ -195,7 +195,7 @@ This is a simple Go `net/http` ([negroni](https://github.com/codegangsta/negroni
redis-master-xx4uv 1/1 Running 0 23m
redis-slave-b6wj4 1/1 Running 0 6m
redis-slave-iai40 1/1 Running 0 6m
...
...
```
Result: You see a single Redis master, two Redis slaves, and three guestbook pods.

View File

@ -7,7 +7,7 @@ metadata:
tier: frontend
spec:
# comment or delete the following line if you want to use a LoadBalancer
type: NodePort
type: NodePort
# if your cluster supports it, uncomment the following to automatically create
# an external load-balanced IP for the frontend service.
# type: LoadBalancer

View File

@ -32,7 +32,7 @@ function kill() {
# Create database on second node (idempotently for convenience).
cat <<EOF | sql 1
CREATE DATABASE IF NOT EXISTS foo;
CREATE TABLE IF NOT EXISTS foo.bar (k STRING PRIMARY KEY, v STRING);
CREATE TABLE IF NOT EXISTS foo.bar (k STRING PRIMARY KEY, v STRING);
UPSERT INTO foo.bar VALUES ('Kuber', 'netes'), ('Cockroach', 'DB');
EOF

View File

@ -6,23 +6,23 @@ metadata:
name: mysql-pod
context: docker-k8s-lab
spec:
containers:
-
containers:
-
name: mysql
image: mysql:latest
env:
-
env:
-
name: "MYSQL_USER"
value: "mysql"
-
-
name: "MYSQL_PASSWORD"
value: "mysql"
-
-
name: "MYSQL_DATABASE"
value: "sample"
-
-
name: "MYSQL_ROOT_PASSWORD"
value: "supersecret"
ports:
-
ports:
-
containerPort: 3306

View File

@ -1,15 +1,15 @@
apiVersion: v1
kind: Service
metadata:
metadata:
name: mysql-service
labels:
labels:
name: mysql-pod
context: docker-k8s-lab
spec:
spec:
ports:
# the port that this service should serve on
- port: 3306
# label keys and values that must match in order to receive traffic for this service
selector:
selector:
name: mysql-pod
context: docker-k8s-lab

View File

@ -19,7 +19,7 @@ spec:
name: app-volume
ports:
- containerPort: 8080
hostPort: 8001
hostPort: 8001
volumes:
- name: app-volume
emptyDir: {}

View File

@ -82,7 +82,7 @@ export PUBLIC_OPENSHIFT_IP=""
echo "===> Waiting for public IP to be set for the OpenShift Service."
echo "Mistakes in service setup can cause this to loop infinitely if an"
echo "external IP is never set. Ensure that the OpenShift service"
echo "is set to use an external load balancer. This process may take"
echo "is set to use an external load balancer. This process may take"
echo "a few minutes. Errors can be found in the log file found at:"
echo ${OPENSHIFT_EXAMPLE}/openshift-startup.log
echo "" > ${OPENSHIFT_EXAMPLE}/openshift-startup.log

View File

@ -13,7 +13,7 @@ spec:
spec:
containers:
- name: server
image: nginx
image: nginx
volumeMounts:
- mountPath: /var/lib/www/html
name: quobytepvc

View File

@ -2,7 +2,7 @@ apiVersion: v1
kind: Secret
metadata:
name: ceph-secret-user
type: "kubernetes.io/rbd"
type: "kubernetes.io/rbd"
data:
#Please note this value is base64 encoded.
key: QVFBTWdYaFZ3QkNlRGhBQTlubFBhRnlmVVNhdEdENGRyRldEdlE9PQ==

View File

@ -127,7 +127,7 @@ In order to create a pod, either the creating user or the service account
specified by the pod must be authorized to use a `PodSecurityPolicy` object
that allows the pod, within the pod's namespace.
That authorization is determined by the ability to perform the `use` verb
That authorization is determined by the ability to perform the `use` verb
on a particular `podsecuritypolicies` resource, at the scope of the pod's namespace.
The `use` verb is a special verb that grants access to use a policy while not permitting any
other access.

View File

@ -1,7 +1,7 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: selenium-hub
name: selenium-hub
labels:
app: selenium-hub
spec:
@ -18,7 +18,7 @@ spec:
- name: selenium-hub
image: selenium/hub:3.11
ports:
- containerPort: 4444
- containerPort: 4444
resources:
limits:
memory: "1000Mi"

View File

@ -6,8 +6,8 @@ metadata:
app: selenium-hub
spec:
ports:
- port: 4444
targetPort: 4444
- port: 4444
targetPort: 4444
name: port0
selector:
app: selenium-hub

View File

@ -1,7 +1,7 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: selenium-node-chrome
name: selenium-node-chrome
labels:
app: selenium-node-chrome
spec:
@ -18,11 +18,11 @@ spec:
- name: selenium-node-chrome
image: selenium/node-chrome-debug:3.11
ports:
- containerPort: 5900
- containerPort: 5900
env:
- name: HUB_PORT_4444_TCP_ADDR
- name: HUB_PORT_4444_TCP_ADDR
value: "selenium-hub"
- name: HUB_PORT_4444_TCP_PORT
- name: HUB_PORT_4444_TCP_PORT
value: "4444"
resources:
limits:

View File

@ -1,7 +1,7 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: selenium-node-firefox
name: selenium-node-firefox
labels:
app: selenium-node-firefox
spec:
@ -14,7 +14,7 @@ spec:
labels:
app: selenium-node-firefox
spec:
volumes:
volumes:
- name: dshm
emptyDir:
medium: Memory
@ -22,14 +22,14 @@ spec:
- name: selenium-node-firefox
image: selenium/node-firefox-debug:3.11
ports:
- containerPort: 5900
- containerPort: 5900
volumeMounts:
- mountPath: /dev/shm
name: dshm
env:
- name: HUB_PORT_4444_TCP_ADDR
- name: HUB_PORT_4444_TCP_ADDR
value: "selenium-hub"
- name: HUB_PORT_4444_TCP_PORT
- name: HUB_PORT_4444_TCP_PORT
value: "4444"
resources:
limits:

View File

@ -43,7 +43,7 @@ metadata:
labels:
name: hazelcast
name: hazelcast
spec:
spec:
ports:
- port: 5701
selector:
@ -74,27 +74,27 @@ Deployments will "adopt" existing pods that match their selector query, so let's
```yaml
apiVersion: "apps/v1" # for k8s versions before 1.9.0 use apps/v1beta2 and before 1.8.0 use extensions/v1beta1
kind: Deployment
metadata:
metadata:
name: hazelcast
labels:
labels:
name: hazelcast
spec:
selector:
matchLabels:
name: hazelcast
template:
metadata:
labels:
template:
metadata:
labels:
name: hazelcast
spec:
containers:
spec:
containers:
- name: hazelcast
image: quay.io/pires/hazelcast-kubernetes:0.8.0
imagePullPolicy: Always
env:
- name: "DNS_DOMAIN"
value: "cluster.local"
ports:
ports:
- name: hazelcast
containerPort: 5701
```
@ -187,7 +187,7 @@ kubectl logs -f hazelcast-4195412960-0tl3w
2017-03-15 09:42:47.253 INFO 7 --- [cached.thread-3] c.hazelcast.nio.tcp.InitConnectionTask : [172.17.0.6]:5701 [someGroup] [3.8] Connecting to /172.17.0.2:5701, timeout: 0, bind-any: true
2017-03-15 09:42:47.262 INFO 7 --- [cached.thread-3] c.h.nio.tcp.TcpIpConnectionManager : [172.17.0.6]:5701 [someGroup] [3.8] Established socket connection between /172.17.0.6:58073 and /172.17.0.2:5701
2017-03-15 09:42:54.260 INFO 7 --- [ration.thread-0] com.hazelcast.system : [172.17.0.6]:5701 [someGroup] [3.8] Cluster version set to 3.8
2017-03-15 09:42:54.262 INFO 7 --- [ration.thread-0] c.h.internal.cluster.ClusterService : [172.17.0.6]:5701 [someGroup] [3.8]
2017-03-15 09:42:54.262 INFO 7 --- [ration.thread-0] c.h.internal.cluster.ClusterService : [172.17.0.6]:5701 [someGroup] [3.8]
Members [2] {
Member [172.17.0.2]:5701 - 170f6924-7888-442a-9875-ad4d25659a8a

View File

@ -1,19 +1,19 @@
apiVersion: apps/v1 # for k8s versions before 1.9.0 use apps/v1beta2 and before 1.8.0 use extensions/v1beta1
kind: Deployment
metadata:
metadata:
name: hazelcast
labels:
labels:
name: hazelcast
spec:
spec:
selector:
matchLabels:
name: hazelcast
template:
metadata:
labels:
template:
metadata:
labels:
name: hazelcast
spec:
containers:
spec:
containers:
- name: hazelcast
image: quay.io/pires/hazelcast-kubernetes:3.8_1
imagePullPolicy: Always
@ -24,6 +24,6 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
ports:
- name: hazelcast
containerPort: 5701

View File

@ -4,7 +4,7 @@ metadata:
labels:
name: hazelcast
name: hazelcast
spec:
spec:
ports:
- port: 5701
selector:

View File

@ -40,18 +40,18 @@ When completed, you should be able to connect with a MySQL client to the IP addr
Shown below are examples of Using ```kubectl``` from within the ```./examples/storage/mysql-galera``` directory, the status of the lauched replication controllers and services can be confirmed:
```
$ kubectl create -f examples/storage/mysql-galera/pxc-cluster-service.yaml
$ kubectl create -f examples/storage/mysql-galera/pxc-cluster-service.yaml
services/pxc-cluster
$ kubectl create -f examples/storage/mysql-galera/pxc-node1.yaml
$ kubectl create -f examples/storage/mysql-galera/pxc-node1.yaml
services/pxc-node1
replicationcontrollers/pxc-node1
$ kubectl create -f examples/storage/mysql-galera/pxc-node2.yaml
$ kubectl create -f examples/storage/mysql-galera/pxc-node2.yaml
services/pxc-node2
replicationcontrollers/pxc-node2
$ kubectl create -f examples/storage/mysql-galera/pxc-node3.yaml
$ kubectl create -f examples/storage/mysql-galera/pxc-node3.yaml
services/pxc-node3
replicationcontrollers/pxc-node3
@ -100,7 +100,7 @@ pxc-node3-0b5mc
$ kubectl exec pxc-node3-0b5mc -i -t -- mysql -u root -p -h pxc-cluster
Enter password:
Enter password:
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 5
Server version: 5.6.24-72.2-56-log Percona XtraDB Cluster (GPL), Release rel72.2, Revision 43abf03, WSREP version 25.11, wsrep_25.11

View File

@ -14,7 +14,7 @@
FROM ubuntu:trusty
# add our user and group first to make sure their IDs get assigned
# add our user and group first to make sure their IDs get assigned
# consistently, regardless of whatever dependencies get added
RUN groupadd -r mysql && useradd -r -g mysql mysql
@ -22,7 +22,7 @@ ENV PERCONA_XTRADB_VERSION 5.6
ENV MYSQL_VERSION 5.6
ENV TERM linux
RUN apt-get update
RUN apt-get update
RUN DEBIAN_FRONTEND=noninteractive apt-get install -y perl --no-install-recommends && rm -rf /var/lib/apt/lists/*
RUN apt-key adv --keyserver keys.gnupg.net --recv-keys 8507EFA5
@ -31,7 +31,7 @@ RUN echo "deb http://repo.percona.com/apt trusty main" > /etc/apt/sources.list.d
RUN echo "deb-src http://repo.percona.com/apt trusty main" >> /etc/apt/sources.list.d/percona.list
# the "/var/lib/mysql" stuff here is because the mysql-server
# postinst doesn't have an explicit way to disable the
# postinst doesn't have an explicit way to disable the
# mysql_install_db codepath besides having a database already
# "configured" (ie, stuff in /var/lib/mysql/mysql)
# also, we set debconf keys to make APT a little quieter
@ -42,7 +42,7 @@ RUN { \
&& apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y percona-xtradb-cluster-client-"${MYSQL_VERSION}" \
percona-xtradb-cluster-common-"${MYSQL_VERSION}" percona-xtradb-cluster-server-"${MYSQL_VERSION}" \
&& rm -rf /var/lib/apt/lists/* \
&& rm -rf /var/lib/mysql && mkdir -p /var/lib/mysql && chown -R mysql:mysql /var/lib/mysql
&& rm -rf /var/lib/mysql && mkdir -p /var/lib/mysql && chown -R mysql:mysql /var/lib/mysql
VOLUME /var/lib/mysql

View File

@ -14,13 +14,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# This script does the following:
#
#
# 1. Sets up database privileges by building an SQL script
# 2. MySQL is initially started with this script a first time
# 2. MySQL is initially started with this script a first time
# 3. Modify my.cnf and cluster.cnf to reflect available nodes to join
#
#
# if NUM_NODES not passed, default to 3
if [ -z "$NUM_NODES" ]; then
@ -31,15 +31,15 @@ if [ "${1:0:1}" = '-' ]; then
set -- mysqld "$@"
fi
# if the command passed is 'mysqld' via CMD, then begin processing.
# if the command passed is 'mysqld' via CMD, then begin processing.
if [ "$1" = 'mysqld' ]; then
# read DATADIR from the MySQL config
DATADIR="$("$@" --verbose --help 2>/dev/null | awk '$1 == "datadir" { print $2; exit }')"
# only check if system tables not created from mysql_install_db and permissions
# only check if system tables not created from mysql_install_db and permissions
# set with initial SQL script before proceeding to build SQL script
if [ ! -d "$DATADIR/mysql" ]; then
# fail if user didn't supply a root password
# fail if user didn't supply a root password
if [ -z "$MYSQL_ROOT_PASSWORD" -a -z "$MYSQL_ALLOW_EMPTY_PASSWORD" ]; then
echo >&2 'error: database is uninitialized and MYSQL_ROOT_PASSWORD not set'
echo >&2 ' Did you forget to add -e MYSQL_ROOT_PASSWORD=... ?'
@ -50,23 +50,23 @@ if [ "$1" = 'mysqld' ]; then
echo 'Running mysql_install_db ...'
mysql_install_db --datadir="$DATADIR"
echo 'Finished mysql_install_db'
# this script will be run once when MySQL first starts to set up
# prior to creating system tables and will ensure proper user permissions
# prior to creating system tables and will ensure proper user permissions
tempSqlFile='/tmp/mysql-first-time.sql'
cat > "$tempSqlFile" <<-EOSQL
DELETE FROM mysql.user ;
CREATE USER 'root'@'%' IDENTIFIED BY '${MYSQL_ROOT_PASSWORD}' ;
GRANT ALL ON *.* TO 'root'@'%' WITH GRANT OPTION ;
EOSQL
if [ "$MYSQL_DATABASE" ]; then
echo "CREATE DATABASE IF NOT EXISTS \`$MYSQL_DATABASE\` ;" >> "$tempSqlFile"
fi
if [ "$MYSQL_USER" -a "$MYSQL_PASSWORD" ]; then
echo "CREATE USER '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD' ;" >> "$tempSqlFile"
if [ "$MYSQL_DATABASE" ]; then
echo "GRANT ALL ON \`$MYSQL_DATABASE\`.* TO '$MYSQL_USER'@'%' ;" >> "$tempSqlFile"
fi
@ -87,11 +87,11 @@ EOSQL
fi
echo 'FLUSH PRIVILEGES ;' >> "$tempSqlFile"
# Add the SQL file to mysqld's command line args
set -- "$@" --init-file="$tempSqlFile"
fi
chown -R mysql:mysql "$DATADIR"
fi
@ -114,11 +114,11 @@ if [ -n "$GALERA_CLUSTER" ]; then
if [ -n "$WSREP_NODE_ADDRESS" ]; then
sed -i -e "s|^wsrep_node_address=.*$|wsrep_node_address=${WSREP_NODE_ADDRESS}|" /etc/mysql/conf.d/cluster.cnf
fi
# if the string is not defined or it only is 'gcomm://', this means bootstrap
if [ -z "$WSREP_CLUSTER_ADDRESS" -o "$WSREP_CLUSTER_ADDRESS" == "gcomm://" ]; then
# if empty, set to 'gcomm://'
# NOTE: this list does not imply membership.
# NOTE: this list does not imply membership.
# It only means "obtain SST and join from one of these..."
if [ -z "$WSREP_CLUSTER_ADDRESS" ]; then
WSREP_CLUSTER_ADDRESS="gcomm://"
@ -127,7 +127,7 @@ if [ -n "$GALERA_CLUSTER" ]; then
# loop through number of nodes
for NUM in `seq 1 $NUM_NODES`; do
NODE_SERVICE_HOST="PXC_NODE${NUM}_SERVICE_HOST"
# if set
if [ -n "${!NODE_SERVICE_HOST}" ]; then
# if not its own IP, then add it
@ -149,7 +149,7 @@ if [ -n "$GALERA_CLUSTER" ]; then
done
fi
# WSREP_CLUSTER_ADDRESS is now complete and will be interpolated into the
# WSREP_CLUSTER_ADDRESS is now complete and will be interpolated into the
# cluster address string (wsrep_cluster_address) in the cluster
# configuration file, cluster.cnf
if [ -n "$WSREP_CLUSTER_ADDRESS" -a "$WSREP_CLUSTER_ADDRESS" != "gcomm://" ]; then
@ -160,5 +160,5 @@ fi
# random server ID needed
sed -i -e "s/^server\-id=.*$/server-id=${RANDOM}/" /etc/mysql/my.cnf
# finally, start mysql
# finally, start mysql
exec "$@"

View File

@ -11,27 +11,27 @@ spec:
- port: 4444
name: state-snapshot-transfer
- port: 4567
name: replication-traffic
name: replication-traffic
- port: 4568
name: incremental-state-transfer
name: incremental-state-transfer
selector:
node: pxc-node1
node: pxc-node1
---
apiVersion: v1
kind: ReplicationController
metadata:
name: pxc-node1
name: pxc-node1
spec:
replicas: 1
template:
metadata:
labels:
node: pxc-node1
node: pxc-node1
unit: pxc-cluster
spec:
containers:
- resources:
limits:
limits:
cpu: 0.3
image: capttofu/percona_xtradb_cluster_5_6:beta
name: pxc-node1
@ -54,4 +54,4 @@ spec:
- name: MYSQL_PASSWORD
value: mysql
- name: MYSQL_ROOT_PASSWORD
value: c-krit
value: c-krit

View File

@ -2,7 +2,7 @@ apiVersion: v1
kind: Service
metadata:
name: pxc-node2
labels:
labels:
node: pxc-node2
spec:
ports:
@ -11,28 +11,28 @@ spec:
- port: 4444
name: state-snapshot-transfer
- port: 4567
name: replication-traffic
name: replication-traffic
- port: 4568
name: incremental-state-transfer
name: incremental-state-transfer
selector:
node: pxc-node2
node: pxc-node2
---
apiVersion: v1
kind: ReplicationController
metadata:
name: pxc-node2
name: pxc-node2
spec:
replicas: 1
template:
metadata:
labels:
node: pxc-node2
node: pxc-node2
unit: pxc-cluster
spec:
containers:
- resources:
limits:
limits:
cpu: 0.3
image: capttofu/percona_xtradb_cluster_5_6:beta
name: pxc-node2
@ -55,4 +55,4 @@ spec:
- name: MYSQL_PASSWORD
value: mysql
- name: MYSQL_ROOT_PASSWORD
value: c-krit
value: c-krit

View File

@ -2,7 +2,7 @@ apiVersion: v1
kind: Service
metadata:
name: pxc-node3
labels:
labels:
node: pxc-node3
spec:
ports:
@ -11,28 +11,28 @@ spec:
- port: 4444
name: state-snapshot-transfer
- port: 4567
name: replication-traffic
name: replication-traffic
- port: 4568
name: incremental-state-transfer
name: incremental-state-transfer
selector:
node: pxc-node3
node: pxc-node3
---
apiVersion: v1
kind: ReplicationController
metadata:
name: pxc-node3
name: pxc-node3
spec:
replicas: 1
template:
metadata:
labels:
node: pxc-node3
node: pxc-node3
unit: pxc-cluster
spec:
containers:
- resources:
limits:
limits:
cpu: 0.3
image: capttofu/percona_xtradb_cluster_5_6:beta
name: pxc-node3
@ -55,4 +55,4 @@ spec:
- name: MYSQL_PASSWORD
value: mysql
- name: MYSQL_ROOT_PASSWORD
value: c-krit
value: c-krit

View File

@ -124,7 +124,7 @@ kubectl scale rc redis --replicas=3
kubectl scale rc redis-sentinel --replicas=3
# Delete the original master pod
# Note: If you are running all the above commands consecutively including this one in a shell script, it may NOT work out. When you run the above commands, let the pods first come up, especially the redis-master pod. Else, the sentinel pods would never be able to know the master redis server and establish a connection with it.
# Note: If you are running all the above commands consecutively including this one in a shell script, it may NOT work out. When you run the above commands, let the pods first come up, especially the redis-master pod. Else, the sentinel pods would never be able to know the master redis server and establish a connection with it.
kubectl delete pods redis-master
```

View File

@ -203,7 +203,7 @@ dir "./"
# network partition slaves automatically try to reconnect to masters
# and resynchronize with them.
#
slaveof %master-ip% %master-port%
slaveof %master-ip% %master-port%
# If the master is password protected (using the "requirepass" configuration
# directive below) it is possible to tell the slave to authenticate before

View File

@ -59,7 +59,7 @@ function launchslave() {
echo "Failed to find master."
sleep 60
exit 1
fi
fi
redis-cli -h ${master} INFO
if [[ "$?" == "0" ]]; then
break

View File

@ -32,7 +32,7 @@ if [[ ${NAME} == "admin" ]]; then
fi
NODE=""
# One needs to label a node with the same key/value pair,
# One needs to label a node with the same key/value pair,
# i.e., 'kubectl label nodes <node-name> name=${2}'
if [[ ! -z "${2-}" ]]; then
NODE="nodeSelector: { name: ${2} }"

View File

@ -27,7 +27,7 @@ if [[ -n "${KUBERNETES_SERVICE_HOST}" ]]; then
URL="https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}/api/v1/namespaces/${POD_NAMESPACE}/endpoints/rethinkdb-driver"
echo "Endpoint url: ${URL}"
echo "Looking for IPs..."
token=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
token=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
# try to pick up first different ip from endpoints
IP=$(curl -s ${URL} --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt --header "Authorization: Bearer ${token}" \
| jq -s -r --arg h "${MYHOST}" '.[0].subsets | .[].addresses | [ .[].ip ] | map(select(. != $h)) | .[0]') || exit 1

View File

@ -1,21 +1,21 @@
# Kubernetes Persistent Volume Plugin For Blob and Managed Disks Samples
This repo contains samples that works with the new Azure persistent volume plugin for Kubernetes. The plugin is expected to be in v1.7.2 release then will become part of Azure ACS
This repo contains samples that works with the new Azure persistent volume plugin for Kubernetes. The plugin is expected to be in v1.7.2 release then will become part of Azure ACS
## What does the plugin do?
## What does the plugin do?
1. Provision PVC based on Azure Managed Disks and Blob Disks
2. Perform consistent attach/detach/mount/unmount and format when needed for disks
2. Perform consistent attach/detach/mount/unmount and format when needed for disks
3. Supports both standard and premium LRS storage accounts.
## Get Started
### Using the Samples
The sequence of events is generally
The sequence of events is generally
1. Create a storage class
2. Create a PVC
2. Create a PVC
3. Create a pod or a replication controller that uses the PVC
```
@ -30,7 +30,7 @@ kubectl describe pvc {pvc-name}
# you can use the following command to create a pod with specified pvc
kubectl create -f pod-uses-managed-hdd.yaml
```
To verify, inside of the pod/container, you should see something like this:
@ -42,18 +42,18 @@ $ df -h
## How does it work?
## How does it work?
### Managed Disks
The entire experience is offloaded to Azure to manage disks:storage accounts. You can use PVC (Kubernetes will automatically create a managed disk for you). Or you can use an existing disk as PV in your PODs/RCs
> Note: as a general rule, use PV disks provisioned in the same Azure resource group where the cluster is provisioned.
> Note: as a general rule, use PV disks provisioned in the same Azure resource group where the cluster is provisioned.
### Blob Disks
Blob Disks works in two modes. Controlled by *kind* parameter on the storage class.
### Blob Disks
Blob Disks works in two modes. Controlled by *kind* parameter on the storage class.
### Dedicated (default mode)
When *kind* parameter is set to *dedicated* K8S will create a new dedicated storage account for this new disk. No other disks will be allowed in the this storage account. The account will be removed when the PVC is removed (according to K8S PVC reclaim policy)
When *kind* parameter is set to *dedicated* K8S will create a new dedicated storage account for this new disk. No other disks will be allowed in the this storage account. The account will be removed when the PVC is removed (according to K8S PVC reclaim policy)
> Note: You can still use existing VHDs, again the general rule apply use storage accounts that are part of cluster resource group
@ -65,14 +65,14 @@ When *kind* parameter is set to *dedicated* K8S will create a new dedicated stor
4. *fstype* the file system of this disk (default *ext4*)
### Shared
PVC: VHDs are created in a shared storage accounts in the same resource group as the cluster as the following
PVC: VHDs are created in a shared storage accounts in the same resource group as the cluster as the following
```
Resource Group
--Storage Account: pvc{unique-hash}001 // created by K8S as it provisioned PVC, all disks are placed in the same blob container
--Storage Account: pvc{unique-hash}001 // created by K8S as it provisioned PVC, all disks are placed in the same blob container
---pvc-xxx-xxx-xxxx.vhd
---pvc-xxx-xxx-xxxx.vhd
--Storage Account: pvc{unique-hash}002..n
--Storage Account: pvc{unique-hash}002..n
---pvc-xxx-xxx-xxxx.vhd
```
@ -84,11 +84,11 @@ The following rules apply:
4. K8S will create initial 2 accounts ( 1 standard and 1 premium ) to accelerate the provisioning process.
## Additional Notes
The samples assume that you have a cluster with node labeled with #disktype=blob# for VMs that are using blob disks and #disktype=managed# for VMs that are using managed disks. You can label your nodes or remove the node selector before using the files.
The samples assume that you have a cluster with node labeled with #disktype=blob# for VMs that are using blob disks and #disktype=managed# for VMs that are using managed disks. You can label your nodes or remove the node selector before using the files.
> Note: You can not attach managed disks to VMs that are not using managed OS disks. This applies also the other way around no blob disks on VMS that are using managed OS disks
To label your nodes use the following command
To label your nodes use the following command
```
kubectl label nodes {node-name-here} disktype=blob
```

View File

@ -17,7 +17,7 @@ spec:
- 10.16.154.82:6789
- 10.16.154.83:6789
# by default the path is /, but you can override and mount a specific path of the filesystem by using the path attribute
# path: /some/path/in/side/cephfs
# path: /some/path/in/side/cephfs
user: admin
secretFile: "/etc/ceph/admin.secret"
readOnly: true

View File

@ -34,9 +34,9 @@ If you ssh to that machine, you can run `docker ps` to see the actual pod.
```console
# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
090ac457ddc2 kubernetes/pause "/pause" 12 minutes ago Up 12 minutes k8s_fcpd-rw.aae720ec_fcpd_default_4024318f-4121-11e5-a294-e839352ddd54_99eb5415
5e2629cf3e7b kubernetes/pause "/pause" 12 minutes ago Up 12 minutes k8s_fcpd-ro.857720dc_fcpd_default_4024318f-4121-11e5-a294-e839352ddd54_c0175742
2948683253f7 k8s.gcr.io/pause:0.8.0 "/pause" 12 minutes ago Up 12 minutes k8s_POD.7be6d81d_fcpd_default_4024318f-4121-11e5-a294-e839352ddd54_8d9dd7bf
090ac457ddc2 kubernetes/pause "/pause" 12 minutes ago Up 12 minutes k8s_fcpd-rw.aae720ec_fcpd_default_4024318f-4121-11e5-a294-e839352ddd54_99eb5415
5e2629cf3e7b kubernetes/pause "/pause" 12 minutes ago Up 12 minutes k8s_fcpd-ro.857720dc_fcpd_default_4024318f-4121-11e5-a294-e839352ddd54_c0175742
2948683253f7 k8s.gcr.io/pause:0.8.0 "/pause" 12 minutes ago Up 12 minutes k8s_POD.7be6d81d_fcpd_default_4024318f-4121-11e5-a294-e839352ddd54_8d9dd7bf
```
## Multipath

View File

@ -4,7 +4,7 @@ metadata:
name: fc
spec:
containers:
- image: kubernetes/pause
- image: kubernetes/pause
name: fc
volumeMounts:
- name: fc-vol

View File

@ -47,7 +47,7 @@ More details regarding cluster authentication can be found at the documentation:
```sh
flocker-volumes create -m name=my-flocker-vol -s 10G -n <node-uuid>
# -n or --node= Is the initial primary node for dataset (any unique
# -n or --node= Is the initial primary node for dataset (any unique
# prefix of node uuid, see flocker-volumes list-nodes)
```

View File

@ -48,16 +48,16 @@ apiVersion: v1
kind: Secret
metadata:
name: chap-secret
type: "kubernetes.io/iscsi-chap"
type: "kubernetes.io/iscsi-chap"
data:
discovery.sendtargets.auth.username:
discovery.sendtargets.auth.password:
discovery.sendtargets.auth.username_in:
discovery.sendtargets.auth.password_in:
node.session.auth.username:
node.session.auth.password:
node.session.auth.username_in:
node.session.auth.password_in:
discovery.sendtargets.auth.username:
discovery.sendtargets.auth.password:
discovery.sendtargets.auth.username_in:
discovery.sendtargets.auth.password_in:
node.session.auth.username:
node.session.auth.password:
node.session.auth.username_in:
node.session.auth.password_in:
```
These keys map to those used by Open-iSCSI initiator. Detailed documents on these keys can be found at [Open-iSCSI](https://github.com/open-iscsi/open-iscsi/blob/master/etc/iscsid.conf)

View File

@ -3,7 +3,7 @@ apiVersion: v1
kind: Secret
metadata:
name: chap-secret
type: "kubernetes.io/iscsi-chap"
type: "kubernetes.io/iscsi-chap"
data:
discovery.sendtargets.auth.username: dXNlcg==
discovery.sendtargets.auth.password: ZGVtbw==

View File

@ -20,5 +20,5 @@ spec:
readOnly: true
chapAuthDiscovery: true
chapAuthSession: true
secretRef:
secretRef:
name: chap-secret

View File

@ -26,7 +26,7 @@ function start()
chmod 644 $i/index.html
echo "Serving $i"
done
# start rpcbind if it is not started yet
/usr/sbin/rpcinfo 127.0.0.1 > /dev/null; s=$?
if [ $s -ne 0 ]; then

View File

@ -204,7 +204,7 @@ The following examples assumes that you already have a running Kubernetes cluste
``` bash
$ kubectl get pod pvpod
NAME READY STATUS RESTARTS AGE
pvpod 1/1 Running 0 48m
pvpod 1/1 Running 0 48m
```
### Using Dynamic Provisioning
@ -361,7 +361,7 @@ create Portworx volumes out of band and they will be created automatically.
``` bash
$ kubectl get pod pvpod
NAME READY STATUS RESTARTS AGE
pvpod 1/1 Running 0 48m
pvpod 1/1 Running 0 48m
```

View File

@ -12,7 +12,7 @@ spec:
volumes:
- name: rbdpd
rbd:
monitors:
monitors:
- '10.16.154.78:6789'
- '10.16.154.82:6789'
- '10.16.154.83:6789'

View File

@ -2,6 +2,6 @@ apiVersion: v1
kind: Secret
metadata:
name: ceph-secret
type: "kubernetes.io/rbd"
type: "kubernetes.io/rbd"
data:
key: QVFCMTZWMVZvRjVtRXhBQTVrQ1FzN2JCajhWVUxSdzI2Qzg0SEE9PQ==

View File

@ -13,10 +13,10 @@ This document shows how to configure Kubernetes resources to consume storage fro
This document assumes you are familiar with ScaleIO and have a cluster ready to go. If you are *not familiar* with ScaleIO, please review *Learn how to setup a 3-node* [ScaleIO cluster on Vagrant](https://github.com/codedellemc/labs/tree/master/setup-scaleio-vagrant) and see *General instructions on* [setting up ScaleIO](https://www.emc.com/products-solutions/trial-software-download/scaleio.htm)
For this demonstration, ensure the following:
For this demonstration, ensure the following:
- The ScaleIO `SDC` component is installed and properly configured on all Kubernetes nodes where deployed pods will consume ScaleIO-backed storage.
- You have a configured ScaleIO gateway that is accessible from the Kubernetes nodes.
- You have a configured ScaleIO gateway that is accessible from the Kubernetes nodes.
## Deploy Kubernetes Secret for ScaleIO
@ -28,8 +28,8 @@ c2lvdXNlcg==
$> echo -n "sc@l3I0" | base64
c2NAbDNJMA==
```
The previous will generate `base64-encoded` values for the username and password.
Remember to generate the credentials for your own environment and copy them in a secret file similar to the following.
The previous will generate `base64-encoded` values for the username and password.
Remember to generate the credentials for your own environment and copy them in a secret file similar to the following.
File: [secret.yaml](secret.yaml)
@ -114,7 +114,7 @@ $> kubectl get pod
NAME READY STATUS RESTARTS AGE
pod-0 1/1 Running 0 33s
```
Or for more detail, use
Or for more detail, use
```
kubectl describe pod pod-0
```
@ -128,8 +128,8 @@ scinia 252:0 0 8G 0 disk /var/lib/kubelet/pods/135986c7-dcb7-11e6-9f
## StorageClass and Dynamic Provisioning
The ScaleIO volume plugin can also dynamically provision storage to a Kubernetes cluster.
The ScaleIO dynamic provisioner plugin can be used with a `StorageClass` and is identified as `kubernetes.io/scaleio`.
The ScaleIO volume plugin can also dynamically provision storage to a Kubernetes cluster.
The ScaleIO dynamic provisioner plugin can be used with a `StorageClass` and is identified as `kubernetes.io/scaleio`.
### ScaleIO StorageClass
The ScaleIO dynamic provisioning plugin supports the following StorageClass parameters:
@ -197,7 +197,7 @@ spec:
Note the `spec:storageClassName` entry which specifies the name of the previously defined StorageClass `sio-small` .
Next, deploy the PVC file. This step will cause the Kubernetes ScaleIO plugin to create the volume in the storage system.
Next, deploy the PVC file. This step will cause the Kubernetes ScaleIO plugin to create the volume in the storage system.
```
$> kubectl create -f examples/volumes/scaleio/sc-pvc.yaml
```
@ -242,7 +242,7 @@ kubectl get pod
NAME READY STATUS RESTARTS AGE
pod-0 1/1 Running 0 23m
pod-sio-small 1/1 Running 0 5s
```
```
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/volumes/scaleio/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View File

@ -4,7 +4,7 @@ metadata:
name: sio-small
provisioner: kubernetes.io/scaleio
parameters:
gateway: https://localhost:443/api
gateway: https://localhost:443/api
system: scaleio
protectionDomain: pd01
storagePool: pd01

View File

@ -26,14 +26,14 @@ The StorageOS provider has been pre-configured to use the StorageOS API defaults
API configuration is set by using Kubernetes secrets. The configuration secret supports the following parameters:
* `apiAddress`: The address of the StorageOS API. This is optional and defaults to `tcp://localhost:5705`, which should be correct if the StorageOS container is running using the default settings.
* `apiAddress`: The address of the StorageOS API. This is optional and defaults to `tcp://localhost:5705`, which should be correct if the StorageOS container is running using the default settings.
* `apiUsername`: The username to authenticate to the StorageOS API with.
* `apiPassword`: The password to authenticate to the StorageOS API with.
* `apiVersion`: Optional, string value defaulting to `1`. Only set this if requested in StorageOS documentation.
Mutiple credentials can be used by creating different secrets.
Mutiple credentials can be used by creating different secrets.
For Persistent Volumes, secrets must be created in the Pod namespace. Specify the secret name using the `secretName` parameter when attaching existing volumes in Pods or creating new persistent volumes.
For Persistent Volumes, secrets must be created in the Pod namespace. Specify the secret name using the `secretName` parameter when attaching existing volumes in Pods or creating new persistent volumes.
For dynamically provisioned volumes using storage classes, the secret can be created in any namespace. Note that you would want this to be an admin-controlled namespace with restricted access to users. Specify the secret namespace as parameter `adminSecretNamespace` and name as parameter `adminSecretName` in storage classes.