Merge pull request #54 from Jim-Lin/master
Update Elasticsearch to 5.6.2
This commit is contained in:
commit
f223c1e9ad
|
|
@ -1,11 +1,11 @@
|
||||||
# Elasticsearch for Kubernetes
|
# Elasticsearch for Kubernetes
|
||||||
|
|
||||||
Kubernetes makes it trivial for anyone to easily build and scale [Elasticsearch](http://www.elasticsearch.org/) clusters. Here, you'll find how to do so.
|
Kubernetes makes it trivial for anyone to easily build and scale [Elasticsearch](http://www.elasticsearch.org/) clusters. Here, you'll find how to do so.
|
||||||
Current Elasticsearch version is `1.7.1`.
|
Current Elasticsearch version is `5.6.2`.
|
||||||
|
|
||||||
[A more robust example that follows Elasticsearch best-practices of separating nodes concern is also available](production_cluster/README.md).
|
[A more robust example that follows Elasticsearch best-practices of separating nodes concern is also available](production_cluster/README.md).
|
||||||
|
|
||||||
<img src="http://kubernetes.io/kubernetes/img/warning.png" alt="WARNING" width="25" height="25"> Current pod descriptors use an `emptyDir` for storing data in each data node container. This is meant to be for the sake of simplicity and [should be adapted according to your storage needs](../../docs/design/persistent-storage.md).
|
Current pod descriptors use an `emptyDir` for storing data in each data node container. This is meant to be for the sake of simplicity and [should be adapted according to your storage needs](../../docs/design/persistent-storage.md).
|
||||||
|
|
||||||
## Docker image
|
## Docker image
|
||||||
|
|
||||||
|
|
@ -16,9 +16,9 @@ The [pre-built image](https://github.com/pires/docker-elasticsearch-kubernetes)
|
||||||
Let's kickstart our cluster with 1 instance of Elasticsearch.
|
Let's kickstart our cluster with 1 instance of Elasticsearch.
|
||||||
|
|
||||||
```
|
```
|
||||||
kubectl create -f examples/elasticsearch/service-account.yaml
|
kubectl create -f staging/elasticsearch/service-account.yaml
|
||||||
kubectl create -f examples/elasticsearch/es-svc.yaml
|
kubectl create -f staging/elasticsearch/es-svc.yaml
|
||||||
kubectl create -f examples/elasticsearch/es-rc.yaml
|
kubectl create -f staging/elasticsearch/es-rc.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
Let's see if it worked:
|
Let's see if it worked:
|
||||||
|
|
@ -26,27 +26,38 @@ Let's see if it worked:
|
||||||
```
|
```
|
||||||
$ kubectl get pods
|
$ kubectl get pods
|
||||||
NAME READY STATUS RESTARTS AGE
|
NAME READY STATUS RESTARTS AGE
|
||||||
es-kfymw 1/1 Running 0 7m
|
es-q8q2v 1/1 Running 0 2m
|
||||||
kube-dns-p3v1u 3/3 Running 0 19m
|
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
```
|
||||||
$ kubectl logs es-kfymw
|
$ kubectl logs es-q8q2v
|
||||||
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
|
[2017-10-02T11:39:22,347][INFO ][o.e.n.Node ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] initializing ...
|
||||||
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
|
[2017-10-02T11:39:22,579][INFO ][o.e.e.NodeEnvironment ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] using [1] data paths, mounts [[/data (/dev/sda1)]], net usable_space [92.5gb], net total_space [94.3gb], spins? [possibly], types [ext4]
|
||||||
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
|
[2017-10-02T11:39:22,579][INFO ][o.e.e.NodeEnvironment ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] heap size [503.6mb], compressed ordinary object pointers [true]
|
||||||
[2015-08-30 10:01:31,946][INFO ][node ] [Hammerhead] version[1.7.1], pid[7], build[b88f43f/2015-07-29T09:54:16Z]
|
[2017-10-02T11:39:22,581][INFO ][o.e.n.Node ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] node name [ece3d296-dbd3-46a3-b66c-8b4c282610af], node ID [Rc-odsaESxSAnvOBFg4MNA]
|
||||||
[2015-08-30 10:01:31,946][INFO ][node ] [Hammerhead] initializing ...
|
[2017-10-02T11:39:22,582][INFO ][o.e.n.Node ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] version[5.6.2], pid[9], build[57e20f3/2017-09-23T13:16:45.703Z], OS[Linux/4.4.64+/amd64], JVM[Oracle Corporation/OpenJDK 64-Bit Server VM/1.8.0_131/25.131-b11]
|
||||||
[2015-08-30 10:01:32,110][INFO ][plugins ] [Hammerhead] loaded [cloud-kubernetes], sites []
|
[2017-10-02T11:39:22,583][INFO ][o.e.n.Node ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] JVM arguments [-XX:+UseConcMarkSweepGC, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, -XX:+DisableExplicitGC, -XX:+AlwaysPreTouch, -Xss1m, -Djava.awt.headless=true, -Dfile.encoding=UTF-8, -Djna.nosys=true, -Djdk.io.permissionsUseCanonicalPath=true, -Dio.netty.noUnsafe=true, -Dio.netty.noKeySetOptimization=true, -Dlog4j.shutdownHookEnabled=false, -Dlog4j2.disable.jmx=true, -Dlog4j.skipJansi=true, -XX:+HeapDumpOnOutOfMemoryError, -Xms512m, -Xmx512m, -Des.path.home=/elasticsearch]
|
||||||
[2015-08-30 10:01:32,153][INFO ][env ] [Hammerhead] using [1] data paths, mounts [[/data (/dev/sda9)]], net usable_space [14.4gb], net total_space [15.5gb], types [ext4]
|
[2017-10-02T11:39:24,386][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [aggs-matrix-stats]
|
||||||
[2015-08-30 10:01:37,188][INFO ][node ] [Hammerhead] initialized
|
[2017-10-02T11:39:24,388][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [ingest-common]
|
||||||
[2015-08-30 10:01:37,189][INFO ][node ] [Hammerhead] starting ...
|
[2017-10-02T11:39:24,388][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [lang-expression]
|
||||||
[2015-08-30 10:01:37,499][INFO ][transport ] [Hammerhead] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.244.48.2:9300]}
|
[2017-10-02T11:39:24,388][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [lang-groovy]
|
||||||
[2015-08-30 10:01:37,550][INFO ][discovery ] [Hammerhead] myesdb/n2-6uu_UT3W5XNrjyqBPiA
|
[2017-10-02T11:39:24,388][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [lang-mustache]
|
||||||
[2015-08-30 10:01:43,966][INFO ][cluster.service ] [Hammerhead] new_master [Hammerhead][n2-6uu_UT3W5XNrjyqBPiA][es-kfymw][inet[/10.244.48.2:9300]]{master=true}, reason: zen-disco-join (elected_as_master)
|
[2017-10-02T11:39:24,388][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [lang-painless]
|
||||||
[2015-08-30 10:01:44,010][INFO ][http ] [Hammerhead] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.244.48.2:9200]}
|
[2017-10-02T11:39:24,388][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [parent-join]
|
||||||
[2015-08-30 10:01:44,011][INFO ][node ] [Hammerhead] started
|
[2017-10-02T11:39:24,388][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [percolator]
|
||||||
[2015-08-30 10:01:44,042][INFO ][gateway ] [Hammerhead] recovered [0] indices into cluster_state
|
[2017-10-02T11:39:24,388][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [reindex]
|
||||||
|
[2017-10-02T11:39:24,389][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [transport-netty3]
|
||||||
|
[2017-10-02T11:39:24,389][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [transport-netty4]
|
||||||
|
[2017-10-02T11:39:24,389][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] no plugins loaded
|
||||||
|
[2017-10-02T11:39:27,395][INFO ][o.e.d.DiscoveryModule ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] using discovery type [zen]
|
||||||
|
[2017-10-02T11:39:28,754][INFO ][o.e.n.Node ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] initialized
|
||||||
|
[2017-10-02T11:39:28,758][INFO ][o.e.n.Node ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] starting ...
|
||||||
|
[2017-10-02T11:39:29,132][INFO ][o.e.t.TransportService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] publish_address {10.44.2.5:9300}, bound_addresses {10.44.2.5:9300}
|
||||||
|
[2017-10-02T11:39:29,154][INFO ][o.e.b.BootstrapChecks ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks
|
||||||
|
[2017-10-02T11:39:32,264][INFO ][o.e.c.s.ClusterService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] new_master {ece3d296-dbd3-46a3-b66c-8b4c282610af}{Rc-odsaESxSAnvOBFg4MNA}{YvzOdsplT12C-9c7X3O8Xw}{10.44.2.5}{10.44.2.5:9300}, reason: zen-disco-elected-as-master ([0] nodes joined)
|
||||||
|
[2017-10-02T11:39:32,315][INFO ][o.e.h.n.Netty4HttpServerTransport] [ece3d296-dbd3-46a3-b66c-8b4c282610af] publish_address {10.44.2.5:9200}, bound_addresses {10.44.2.5:9200}
|
||||||
|
[2017-10-02T11:39:32,316][INFO ][o.e.n.Node ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] started
|
||||||
|
[2017-10-02T11:39:32,331][INFO ][o.e.g.GatewayService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] recovered [0] indices into cluster_state
|
||||||
```
|
```
|
||||||
|
|
||||||
So we have a 1-node Elasticsearch cluster ready to handle some work.
|
So we have a 1-node Elasticsearch cluster ready to handle some work.
|
||||||
|
|
@ -64,52 +75,63 @@ Did it work?
|
||||||
```
|
```
|
||||||
$ kubectl get pods
|
$ kubectl get pods
|
||||||
NAME READY STATUS RESTARTS AGE
|
NAME READY STATUS RESTARTS AGE
|
||||||
es-78e0s 1/1 Running 0 8m
|
es-95h78 1/1 Running 0 3m
|
||||||
es-kfymw 1/1 Running 0 17m
|
es-q8q2v 1/1 Running 0 6m
|
||||||
es-rjmer 1/1 Running 0 8m
|
es-qdcnd 1/1 Running 0 3m
|
||||||
kube-dns-p3v1u 3/3 Running 0 30m
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Let's take a look at logs:
|
Let's take a look at logs:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ kubectl logs es-kfymw
|
$ kubectl logs es-q8q2v
|
||||||
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
|
[2017-10-02T11:39:22,347][INFO ][o.e.n.Node ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] initializing ...
|
||||||
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
|
[2017-10-02T11:39:22,579][INFO ][o.e.e.NodeEnvironment ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] using [1] data paths, mounts [[/data (/dev/sda1)]], net usable_space [92.5gb], net total_space [94.3gb], spins? [possibly], types [ext4]
|
||||||
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
|
[2017-10-02T11:39:22,579][INFO ][o.e.e.NodeEnvironment ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] heap size [503.6mb], compressed ordinary object pointers [true]
|
||||||
[2015-08-30 10:01:31,946][INFO ][node ] [Hammerhead] version[1.7.1], pid[7], build[b88f43f/2015-07-29T09:54:16Z]
|
[2017-10-02T11:39:22,581][INFO ][o.e.n.Node ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] node name [ece3d296-dbd3-46a3-b66c-8b4c282610af], node ID [Rc-odsaESxSAnvOBFg4MNA]
|
||||||
[2015-08-30 10:01:31,946][INFO ][node ] [Hammerhead] initializing ...
|
[2017-10-02T11:39:22,582][INFO ][o.e.n.Node ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] version[5.6.2], pid[9], build[57e20f3/2017-09-23T13:16:45.703Z], OS[Linux/4.4.64+/amd64], JVM[Oracle Corporation/OpenJDK 64-Bit Server VM/1.8.0_131/25.131-b11]
|
||||||
[2015-08-30 10:01:32,110][INFO ][plugins ] [Hammerhead] loaded [cloud-kubernetes], sites []
|
[2017-10-02T11:39:22,583][INFO ][o.e.n.Node ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] JVM arguments [-XX:+UseConcMarkSweepGC, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, -XX:+DisableExplicitGC, -XX:+AlwaysPreTouch, -Xss1m, -Djava.awt.headless=true, -Dfile.encoding=UTF-8, -Djna.nosys=true, -Djdk.io.permissionsUseCanonicalPath=true, -Dio.netty.noUnsafe=true, -Dio.netty.noKeySetOptimization=true, -Dlog4j.shutdownHookEnabled=false, -Dlog4j2.disable.jmx=true, -Dlog4j.skipJansi=true, -XX:+HeapDumpOnOutOfMemoryError, -Xms512m, -Xmx512m, -Des.path.home=/elasticsearch]
|
||||||
[2015-08-30 10:01:32,153][INFO ][env ] [Hammerhead] using [1] data paths, mounts [[/data (/dev/sda9)]], net usable_space [14.4gb], net total_space [15.5gb], types [ext4]
|
[2017-10-02T11:39:24,386][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [aggs-matrix-stats]
|
||||||
[2015-08-30 10:01:37,188][INFO ][node ] [Hammerhead] initialized
|
[2017-10-02T11:39:24,388][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [ingest-common]
|
||||||
[2015-08-30 10:01:37,189][INFO ][node ] [Hammerhead] starting ...
|
[2017-10-02T11:39:24,388][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [lang-expression]
|
||||||
[2015-08-30 10:01:37,499][INFO ][transport ] [Hammerhead] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.244.48.2:9300]}
|
[2017-10-02T11:39:24,388][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [lang-groovy]
|
||||||
[2015-08-30 10:01:37,550][INFO ][discovery ] [Hammerhead] myesdb/n2-6uu_UT3W5XNrjyqBPiA
|
[2017-10-02T11:39:24,388][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [lang-mustache]
|
||||||
[2015-08-30 10:01:43,966][INFO ][cluster.service ] [Hammerhead] new_master [Hammerhead][n2-6uu_UT3W5XNrjyqBPiA][es-kfymw][inet[/10.244.48.2:9300]]{master=true}, reason: zen-disco-join (elected_as_master)
|
[2017-10-02T11:39:24,388][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [lang-painless]
|
||||||
[2015-08-30 10:01:44,010][INFO ][http ] [Hammerhead] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.244.48.2:9200]}
|
[2017-10-02T11:39:24,388][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [parent-join]
|
||||||
[2015-08-30 10:01:44,011][INFO ][node ] [Hammerhead] started
|
[2017-10-02T11:39:24,388][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [percolator]
|
||||||
[2015-08-30 10:01:44,042][INFO ][gateway ] [Hammerhead] recovered [0] indices into cluster_state
|
[2017-10-02T11:39:24,388][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [reindex]
|
||||||
[2015-08-30 10:08:02,517][INFO ][cluster.service ] [Hammerhead] added {[Tenpin][2gv5MiwhRiOSsrTOF3DhuA][es-78e0s][inet[/10.244.54.4:9300]]{master=true},}, reason: zen-disco-receive(join from node[[Tenpin][2gv5MiwhRiOSsrTOF3DhuA][es-78e0s][inet[/10.244.54.4:9300]]{master=true}])
|
[2017-10-02T11:39:24,389][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [transport-netty3]
|
||||||
[2015-08-30 10:10:10,645][INFO ][cluster.service ] [Hammerhead] added {[Evilhawk][ziTq2PzYRJys43rNL2tbyg][es-rjmer][inet[/10.244.33.3:9300]]{master=true},}, reason: zen-disco-receive(join from node[[Evilhawk][ziTq2PzYRJys43rNL2tbyg][es-rjmer][inet[/10.244.33.3:9300]]{master=true}])
|
[2017-10-02T11:39:24,389][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] loaded module [transport-netty4]
|
||||||
|
[2017-10-02T11:39:24,389][INFO ][o.e.p.PluginsService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] no plugins loaded
|
||||||
|
[2017-10-02T11:39:27,395][INFO ][o.e.d.DiscoveryModule ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] using discovery type [zen]
|
||||||
|
[2017-10-02T11:39:28,754][INFO ][o.e.n.Node ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] initialized
|
||||||
|
[2017-10-02T11:39:28,758][INFO ][o.e.n.Node ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] starting ...
|
||||||
|
[2017-10-02T11:39:29,132][INFO ][o.e.t.TransportService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] publish_address {10.44.2.5:9300}, bound_addresses {10.44.2.5:9300}
|
||||||
|
[2017-10-02T11:39:29,154][INFO ][o.e.b.BootstrapChecks ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks
|
||||||
|
[2017-10-02T11:39:32,264][INFO ][o.e.c.s.ClusterService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] new_master {ece3d296-dbd3-46a3-b66c-8b4c282610af}{Rc-odsaESxSAnvOBFg4MNA}{YvzOdsplT12C-9c7X3O8Xw}{10.44.2.5}{10.44.2.5:9300}, reason: zen-disco-elected-as-master ([0] nodes joined)
|
||||||
|
[2017-10-02T11:39:32,315][INFO ][o.e.h.n.Netty4HttpServerTransport] [ece3d296-dbd3-46a3-b66c-8b4c282610af] publish_address {10.44.2.5:9200}, bound_addresses {10.44.2.5:9200}
|
||||||
|
[2017-10-02T11:39:32,316][INFO ][o.e.n.Node ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] started
|
||||||
|
[2017-10-02T11:39:32,331][INFO ][o.e.g.GatewayService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] recovered [0] indices into cluster_state
|
||||||
|
[2017-10-02T11:42:39,410][INFO ][o.e.c.s.ClusterService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] added {{8bcf8744-c48a-4ebb-86d8-e677a61141b7}{nFExy7_bS--Vcd42xrnFrw}{RQyzD2UnR--UUEfyfPuHgg}{10.44.0.5}{10.44.0.5:9300},}, reason: zen-disco-node-join[{8bcf8744-c48a-4ebb-86d8-e677a61141b7}{nFExy7_bS--Vcd42xrnFrw}{RQyzD2UnR--UUEfyfPuHgg}{10.44.0.5}{10.44.0.5:9300}]
|
||||||
|
[2017-10-02T11:42:39,470][WARN ][o.e.d.z.ElectMasterService] [ece3d296-dbd3-46a3-b66c-8b4c282610af] value for setting "discovery.zen.minimum_master_nodes" is too low. This can result in data loss! Please set it to at least a quorum of master-eligible nodes (current value: [1], total number of master-eligible nodes used for publishing in this round: [2])
|
||||||
|
[2017-10-02T11:42:42,586][INFO ][o.e.c.s.ClusterService ] [ece3d296-dbd3-46a3-b66c-8b4c282610af] added {{3b2f3585-7706-416d-bede-c467a46ab30f}{eG6p9sJRQ9yShS97yL3pQg}{JqGe38AeSKmHQfLaICibQA}{10.44.1.5}{10.44.1.5:9300},}, reason: zen-disco-node-join[{3b2f3585-7706-416d-bede-c467a46ab30f}{eG6p9sJRQ9yShS97yL3pQg}{JqGe38AeSKmHQfLaICibQA}{10.44.1.5}{10.44.1.5:9300}]
|
||||||
```
|
```
|
||||||
|
|
||||||
So we have a 3-node Elasticsearch cluster ready to handle more work.
|
So we have a 3-node Elasticsearch cluster ready to handle more work.
|
||||||
|
|
||||||
## Access the service
|
## Access the service
|
||||||
|
|
||||||
*Don't forget* that services in Kubernetes are only acessible from containers in the cluster. For different behavior you should [configure the creation of an external load-balancer](http://kubernetes.io/v1.0/docs/user-guide/services.html#type-loadbalancer). While it's supported within this example service descriptor, its usage is out of scope of this document, for now.
|
*Don't forget* that services in Kubernetes are only acessible from containers in the cluster. For different behavior you should [configure the creation of an external load-balancer](https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer). While it's supported within this example service descriptor, its usage is out of scope of this document, for now.
|
||||||
|
|
||||||
```
|
```
|
||||||
$ kubectl get service elasticsearch
|
$ kubectl get service elasticsearch
|
||||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||||
elasticsearch component=elasticsearch component=elasticsearch 10.100.108.94 9200/TCP
|
elasticsearch 10.47.252.248 35.200.115.240 9200:31394/TCP,9300:30907/TCP 6m
|
||||||
9300/TCP
|
|
||||||
```
|
```
|
||||||
|
|
||||||
From any host on your cluster (that's running `kube-proxy`), run:
|
From any host on your cluster (that's running `kube-proxy`), run:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ curl 10.100.108.94:9200
|
$ curl 35.200.115.240:9200
|
||||||
```
|
```
|
||||||
|
|
||||||
You should see something similar to the following:
|
You should see something similar to the following:
|
||||||
|
|
@ -117,15 +139,15 @@ You should see something similar to the following:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"status" : 200,
|
"name" : "ece3d296-dbd3-46a3-b66c-8b4c282610af",
|
||||||
"name" : "Hammerhead",
|
|
||||||
"cluster_name" : "myesdb",
|
"cluster_name" : "myesdb",
|
||||||
|
"cluster_uuid" : "lb76DGaGS1msgwC3w8H9Qg",
|
||||||
"version" : {
|
"version" : {
|
||||||
"number" : "1.7.1",
|
"number" : "5.6.2",
|
||||||
"build_hash" : "b88f43fc40b0bcd7f173a1f9ee2e97816de80b19",
|
"build_hash" : "57e20f3",
|
||||||
"build_timestamp" : "2015-07-29T09:54:16Z",
|
"build_date" : "2017-09-23T13:16:45.703Z",
|
||||||
"build_snapshot" : false,
|
"build_snapshot" : false,
|
||||||
"lucene_version" : "4.10.4"
|
"lucene_version" : "6.6.1"
|
||||||
},
|
},
|
||||||
"tagline" : "You Know, for Search"
|
"tagline" : "You Know, for Search"
|
||||||
}
|
}
|
||||||
|
|
@ -135,7 +157,7 @@ Or if you want to check cluster information:
|
||||||
|
|
||||||
|
|
||||||
```
|
```
|
||||||
curl 10.100.108.94:9200/_cluster/health?pretty
|
curl 35.189.128.215:9200/_cluster/health?pretty
|
||||||
```
|
```
|
||||||
|
|
||||||
You should see something similar to the following:
|
You should see something similar to the following:
|
||||||
|
|
@ -154,7 +176,9 @@ You should see something similar to the following:
|
||||||
"unassigned_shards" : 0,
|
"unassigned_shards" : 0,
|
||||||
"delayed_unassigned_shards" : 0,
|
"delayed_unassigned_shards" : 0,
|
||||||
"number_of_pending_tasks" : 0,
|
"number_of_pending_tasks" : 0,
|
||||||
"number_of_in_flight_fetch" : 0
|
"number_of_in_flight_fetch" : 0,
|
||||||
|
"task_max_waiting_in_queue_millis" : 0,
|
||||||
|
"active_shards_percent_as_number" : 100.0
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -12,13 +12,20 @@ spec:
|
||||||
component: elasticsearch
|
component: elasticsearch
|
||||||
spec:
|
spec:
|
||||||
serviceAccount: elasticsearch
|
serviceAccount: elasticsearch
|
||||||
|
initContainers:
|
||||||
|
- name: init-sysctl
|
||||||
|
image: busybox
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
command: ["sysctl", "-w", "vm.max_map_count=262144"]
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
containers:
|
containers:
|
||||||
- name: es
|
- name: es
|
||||||
securityContext:
|
securityContext:
|
||||||
capabilities:
|
capabilities:
|
||||||
add:
|
add:
|
||||||
- IPC_LOCK
|
- IPC_LOCK
|
||||||
image: quay.io/pires/docker-elasticsearch-kubernetes:1.7.1-4
|
image: quay.io/pires/docker-elasticsearch-kubernetes:5.6.2
|
||||||
env:
|
env:
|
||||||
- name: KUBERNETES_CA_CERTIFICATE_FILE
|
- name: KUBERNETES_CA_CERTIFICATE_FILE
|
||||||
value: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
value: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue