Add CircleCI for testing links and lint (#200)

This commit is contained in:
Thomas Rampelberg 2019-02-26 08:18:29 -08:00 committed by GitHub
parent 61672965fe
commit 042b45eaa4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
61 changed files with 395 additions and 1422 deletions

28
.circleci/config.yml Normal file
View File

@ -0,0 +1,28 @@
version: 2
jobs:
build:
docker:
- image: circleci/node:10
working_directory: ~/website
steps:
- checkout
- run:
name: Install dependencies
command: |-
wget https://github.com/gohugoio/hugo/releases/download/v0.54.0/hugo_0.54.0_Linux-64bit.deb
sudo dpkg -i hugo*.deb
curl https://htmltest.wjdp.uk | bash
sudo mv bin/htmltest /usr/local/bin
sudo npm install -g markdownlint-cli
- run:
name: Validate markdown
command: |-
make lint
- run:
name: Check for bad links
command: |-
make check

View File

@ -4,6 +4,9 @@ RELEASE_URL = https://github.com/linkerd/linkerd2/releases
export L5D2_STABLE_VERSION ?= stable-2.2.1
export L5D2_EDGE_VERSION ?= edge-19.2.4
GIT_BRANCH = $(shell git rev-parse --abbrev-ref HEAD)
GIT_HASH = $(shell git log --pretty=format:'%h' -n 1)
define upload_public
gsutil -m rsync \
-d -r -c $(if $(DRY_RUN),-n,) \
@ -14,6 +17,7 @@ HAS_GSUTIL := $(shell command -v gsutil;)
HAS_FLARECTL := $(shell command -v flarectl;)
HAS_HUGO := $(shell command -v hugo;)
HAS_HTMLTEST := $(shell command -v htmltest;)
HAS_MDLINT := $(shell command -v markdownlint;)
.PHONY: publish
publish: update-version build-linkerd.io deploy
@ -47,6 +51,14 @@ tmp/%/public:
tmp-sites: tmp
cp -R *linkerd.io tmp/
.PHONY: lint
lint:
@# lint the markdown for linkerd.io
ifndef HAS_MDLINT
@printf "Install markdownlint first, run npm install -g markdownlint-cli\n"; exit 1
endif
markdownlint -c linkerd.io/.markdownlint.yaml linkerd.io/content
.PHONY: check
check: build-linkerd.io
@# Check linkerd.io for valid links and standards
@ -55,6 +67,19 @@ ifndef HAS_HTMLTEST
endif
cd tmp/linkerd.io && htmltest
.PHONY: test-ci
test-ci:
@# Test CI configuration without constant commits to config.yml
ifndef CIRCLE_TOKEN
@printf "Create a personal CircleCI token first (CIRCLE_TOKEN). See https://circleci.com/docs/2.0/managing-api-tokens/#creating-a-personal-api-token\n"; exit 1
endif
curl --user $(CIRCLE_TOKEN): \
--request POST \
--form revision=$(GIT_HASH) \
--form config=@.circleci/config.yml \
--form notify=false \
https://circleci.com/api/v1.1/project/github/linkerd/website/tree/$(GIT_BRANCH)
serve-%: build-%
@# Serve the built files locally
cd tmp/$*/public \

View File

@ -1,3 +1,4 @@
IgnoreURLs:
- .*localhost.*
- https://expedia.com
DirectoryPath: public

View File

@ -0,0 +1,8 @@
MD002:
level: 2
MD013:
code_blocks: false
tables: false
MD024: false
MD041:
level: 2

View File

@ -15,6 +15,9 @@ disableKinds = ["taxonomy", "taxonomyTerm"]
l5d2_release_version = "L5D2_STABLE_VERSION"
l5d2_edge_version = "L5D2_EDGE_VERSION"
[blackfriday]
HrefTargetBlank = true
# MENU: top nav bar
[[menu.top]]
name = "Docs"

View File

@ -1 +1,3 @@
<!-- markdownlint-disable -->
<meta http-equiv="Refresh" content="0; url=overview/">
<!-- markdownlint-enable -->

View File

@ -15,4 +15,5 @@ The admin interface also provides a web UI that you can use to help debug
Linkerd instance. This provides valuable insight into how Linkerd will route
your request. The UI is available at `/delegator`, on the configured admin port.
{{< fig src="/images/dtab-playground.png" title="Linkerd admin UI - Dtab playground." >}}
{{< fig src="/images/dtab-playground.png"
title="Linkerd admin UI - Dtab playground." >}}

View File

@ -35,7 +35,7 @@ To enable additional metrics endpoints, such as Prometheus, InfluxDB, or StatsD,
have a look at the
[Telemetry section of the Linkerd config]({{% linkerdconfig "telemetry" %}}).
### Prometheus
## Prometheus
Linkerd provides a metrics endpoint, `/admin/metrics/prometheus`, specifically
for exporting stats to Prometheus. To enable the Prometheus telemeter, add this
@ -65,7 +65,7 @@ scrape_configs:
That configuration would scrape metrics from three separate Linkerd instances.
### InfluxDB
## InfluxDB
Linkerd provides a metrics endpoint, `/admin/metrics/influxdb`, specifically
for exporting stats in InfluxDB LINE protocol. You can configure
@ -74,7 +74,7 @@ from your Linkerd instances. Have a look at the
[InfluxDB section of the linkerd-examples repo](https://github.com/linkerd/linkerd-examples/tree/master/influxdb)
for a complete example.
### StatsD
## StatsD
Linkerd supports pushing metrics to a StatsD backend. Simply add a StatsD config
block to your Linkerd configuration file:

View File

@ -47,7 +47,8 @@ Since this model requires high concurrency of Linkerd instances, a larger
resource profile is usually appropriate. In this model, the loss of an
individual Linkerd instance is equivalent to losing the host itself.
{{< fig src="/images/diagram-per-host-deployment.png" title="Linkerd deployed per host." >}}
{{< fig src="/images/diagram-per-host-deployment.png"
title="Linkerd deployed per host." >}}
## Sidecar
@ -63,7 +64,8 @@ resource profile is usually appropriate. In this model, the loss of an
individual Linkerd instance is equivalent to losing the corresponding service
instance.
{{< fig src="/images/diagram-sidecar-deployment.png" title="Linkerd deployed as a sidecar process (service-to-linkerd)." >}}
{{< fig src="/images/diagram-sidecar-deployment.png"
title="Linkerd deployed as a sidecar process (service-to-linkerd)." >}}
There are three configurations for how the application service and Linkerd can
talk to each other: service-to-linker, linker-to-service, and linker-to-linker.

View File

@ -72,6 +72,7 @@ With the dtab:
/california => /USA/CA;
/sanfrancisco => /california/SF;
```
And the path:
```dtab
@ -122,11 +123,12 @@ subsequent path segments as an ip address and port. So the path
`/$/inet/127.0.0.1/4140` would resolve to the bound address `127.0.0.1:4140`
Linkerd also provides a suite of namers for many different service discovery
mechanisms. Some examples are [`/#/io.l5d.consul`]({{% linkerdconfig "consul-service-discovery-experimental" %}}),
[`/#/io.l5d.k8s`]({{% linkerdconfig "kubernetes-service-discovery-experimental" %}}),
and
[`/#/io.l5d.marathon`]({{% linkerdconfig "marathon-service-discovery-experimental" %}}).
See more on these and others in the [Linkerd documentation on namers]({{% linkerdconfig "namers" %}}).
mechanisms. Some examples are [`/#/io.l5d.consul`]({{% linkerdconfig
"consul-service-discovery-experimental" %}}), [`/#/io.l5d.k8s`]({{%
linkerdconfig "kubernetes-service-discovery-experimental" %}}), and
[`/#/io.l5d.marathon`]({{% linkerdconfig
"marathon-service-discovery-experimental" %}}). See more on these and others in
the [Linkerd documentation on namers]({{% linkerdconfig "namers" %}}).
Once a namer converts a path into a bound address, the routing is considered
complete and any residual path segments not used in prefix matching will

View File

@ -11,36 +11,41 @@ aliases = [
parent = "advanced"
+++
namerd is a service that manages routing for multiple Linkerd instances. It
does this by storing [dtabs]({{% ref "/1/advanced/dtabs.md" %}})
and using [namers]({{% ref "/1/advanced/dtabs.md#namers-addresses" %}}) for
service discovery. namerd supports the same suite of service discovery backends that Linkerd does, which include services like [ZooKeeper](https://twitter.github.io/commons/apidocs/com/twitter/common/zookeeper/ServerSet.html), [Consul](https://www.consul.io/), [Kubernetes API](http://kubernetes.io/docs/api), and [Marathon](https://mesosphere.github.io/marathon/).
namerd is a service that manages routing for multiple Linkerd instances. It does
this by storing [dtabs]({{% ref "/1/advanced/dtabs.md" %}}) and using
[namers]({{% ref "/1/advanced/dtabs.md#namers-addresses" %}}) for service
discovery. namerd supports the same suite of service discovery backends that
Linkerd does, which include services like
[ZooKeeper](https://twitter.github.io/commons/apidocs/com/twitter/common/zookeeper/ServerSet.html),
[Consul](https://www.consul.io/), [Kubernetes
API](http://kubernetes.io/docs/api), and
[Marathon](https://mesosphere.github.io/marathon/).
Using namerd, individual Linkerds no longer need to talk directly to service
discovery or have dtabs hardcoded into their config files. Instead, they ask
namerd for any necessary routing information. This provides a number of
benefits, which are outlined below.
### Decreased load on service discovery backends
## Decreased load on service discovery backends
Using namerd means that only a small cluster of namerds need to talk directly
to the service discovery backends instead of every Linkerd in the fleet. namerd
also utilizes caching to further protect the service discovery backend from
excessive load.
### Global routing policy
## Global routing policy
By storing dtabs in namerd instead of hardcoding them in the Linkerd configs, it
ensures that routing policy is in sync across the fleet and gives you one
central source of truth when you need to make changes.
### Dynamic routing policy
## Dynamic routing policy
The other advantage of storing dtabs in namerd is that these dtabs can be
updated dynamically using [namerd's API]({{% namerdconfig "http-controller" %}}) or
[command-line tool](https://github.com/linkerd/namerctl). This allows you to
perform operations like
[canary, staging, or blue-green deploy](https://blog.buoyant.io/2016/05/04/real-world-microservices-when-services-stop-playing-well-and-start-getting-real/#dynamic-routing-with-namerd),
updated dynamically using [namerd's API]({{% namerdconfig "http-controller" %}})
or [command-line tool](https://github.com/linkerd/namerctl). This allows you to
perform operations like [canary, staging, or blue-green
deploy](https://blog.buoyant.io/2016/05/04/real-world-microservices-when-services-stop-playing-well-and-start-getting-real/#dynamic-routing-with-namerd),
all without needing to restart any Linkerds.
## More information

View File

@ -103,7 +103,6 @@ simply create a resource file called
[`META-INF/services/io.buoyant.linkerd.ResponseClassifierConfig`](https://github.com/linkerd/linkerd-examples/blob/master/plugins/header-classifier/src/main/resources/META-INF/services/io.buoyant.linkerd.ResponseClassifierInitializer)
and add the fully qualified class name of the config initializer to that file.
## Build & package
We use sbt to build our plugin and the assembly sbt plugin to package it into a
@ -138,7 +137,7 @@ routers:
If you run Linkerd with `-log.level=DEBUG` then you should see a line printed
at startup that indicates the HeaderClassifierInitializer has been loaded:
```
```bash
LoadService: loaded instance of class io.buoyant.http.classifiers.HeaderClassifierInitializer for requested service io.buoyant.linkerd.ResponseClassifierInitializer
```

View File

@ -18,7 +18,6 @@ contains several examples of how to use Linkerd and namerd in various environmen
The [Buoyant blog](https://blog.buoyant.io) also contains several examples and
walkthroughs highlighting various Linkerd features.
## Kubernetes
For walkthroughs of various Linkerd features with deployable examples, check out

View File

@ -17,6 +17,8 @@ produced by both Buoyant and Linkerd users is represented below.
You can also check out [Buoyant's blog](https://buoyant.io/blog) for more posts!
<!-- markdownlint-disable MD033 -->
## Linkerd Users
* [Linkerd: the Cloud Native service mesh](https://skillsmatter.com/skillscasts/10912-cloud-native-london-october)<br>
@ -31,8 +33,11 @@ KubeCon EU, March 2017
* [Cloud Native Applications on OpenShift/Azure](https://channel9.msdn.com/Events/TechDaysOnline/MVP-Led-Techdays-Online/Cloud-Native-Applications-on-OpenShiftAzure-)<br>
Microsoft TechDays Online, February 2017
* [Building a Modern Bank Backend](https://monzo.com/blog/2016/09/19/building-a-modern-bank-backend/) (blog)<br>
[Building a Bank with Kubernetes](https://skillsmatter.com/skillscasts/9146-building-a-bank-with-kubernetes) (slides)<br>
* [Building a Modern Bank
Backend](https://monzo.com/blog/2016/09/19/building-a-modern-bank-backend/)
(blog)<br> [Building a Bank with
Kubernetes](https://skillsmatter.com/skillscasts/9146-building-a-bank-with-kubernetes)
(slides)<br>
Monzo, October 2016
## Linkerd Developers
@ -87,8 +92,12 @@ TechRepublic, March 2017
* [Linkerd Celebrates One Year with One Hundred Billion Production Requests](https://www.cncf.io/blog/2017/03/09/linkerd-celebrates-one-year-one-hundred-billion-production-requests/)<br>
Cloud Native Computing Foundation, March 2017
* [Open Source Linkerd Project Celebrates First Anniversary in Quest to Become TCP/IP of Microservices](https://www.infoq.com/news/2017/03/linkerd-celebrates-one-year)<br>
InfoQ, March 2017
* [Open Source Linkerd Project Celebrates First Anniversary in Quest to Become
TCP/IP of
Microservices](https://www.infoq.com/news/2017/03/linkerd-celebrates-one-year)<br>
InfoQ, March 2017
* [Cloud Native Computing Foundation adds Linkerd as its fifth hosted project](https://techcrunch.com/2017/01/23/cloud-native-computing-foundation-adds-linkerd-as-its-fifth-hosted-project/)<br>
TechCrunch, January 2017
<!-- markdownlint-enable MD033 -->

View File

@ -27,7 +27,6 @@ export tracing data to a backend trace aggregator, such as
[Zipkin](http://zipkin.io). This will expose latency, retry, and failure
information for each hop in a request.
## Further reading
If you're ready to start using distributed tracing in your setup, see the

View File

@ -31,7 +31,7 @@ service running elsewhere. In that scenario, you could take advantage of
Linkerd's proxy integration and make a curl request to the "hello" service with:
```bash
$ http_proxy=localhost:4140 curl http://hello/
http_proxy=localhost:4140 curl http://hello/
```
With the `http_proxy` variable set, curl will send the proxy request directly to

View File

@ -40,8 +40,8 @@ For more detail about Metrics instrumention, see the
"/1/administration/telemetry.md"
%}}).
For configuring your metrics endpoint, see the
[Admin section of the Linkerd config]({{% linkerdconfig "administrative-interface" %}}).
For configuring your metrics endpoint, see the [Admin section of the Linkerd
config]({{% linkerdconfig "administrative-interface" %}}).
For a guide on setting up an end-to-end monitoring pipeline on Kubernetes, see
[A Service Mesh for Kubernetes, Part I: Top-Line Service Metrics](https://blog.buoyant.io/2016/10/04/a-service-mesh-for-kubernetes-part-i-top-line-service-metrics/).

View File

@ -24,7 +24,6 @@ be retried. Thus, even if one instance of a service is failing, clients can
maximize success rates. Retry budgets (the percentage of requests that Linkerd
will retry) are configurable so that you can avoid overloading your server.
## Timeouts
You can also specify a per-request timeout on the

View File

@ -49,6 +49,6 @@ removed from service discovery to stop receiving traffic. If the instance simply
stops accepting requests, Linkerd's load-balancing algorithms are designed to
handle gracefully instances that become unhealthy or disappear.
Lookups in service discovery are controlled by [dtab rules](
{{% ref "/1/advanced/dtabs.md" %}}). This means that these lookups comprise part of
the routing logic for a request.
Lookups in service discovery are controlled by [dtab rules]( {{% ref
"/1/advanced/dtabs.md" %}}). This means that these lookups comprise part of the
routing logic for a request.

View File

@ -27,8 +27,9 @@ In order for Linkerd to send requests with TLS, it's necessary to set the
[client TLS configuration parameter]({{% linkerdconfig "client-tls" %}}) when
configuring Linkerd.
Linkerd supports Static TLS, TLS with Bound Path and No Validation TLS
through different configurations of the [client TLS configuration parameter]({{% linkerdconfig "client-tls" %}}).
Linkerd supports Static TLS, TLS with Bound Path and No Validation TLS through
different configurations of the [client TLS configuration parameter]({{%
linkerdconfig "client-tls" %}}).
## Server TLS

View File

@ -22,6 +22,7 @@ Linkerd running on the node. Note that this setup proxies all outbound traffic
to a single Linkerd port, so it won't work if you are using multiple protocols.
To use `linkerd-inject`:
```bash
# install linkerd-inject
$ go get github.com/linkerd/linkerd-inject

View File

@ -32,7 +32,7 @@ Install the Linkerd DC/OS Universe package with the following command, note
public and private:
```bash
$ dcos package install --options=<(echo '{"linkerd":{"instances":4}}') linkerd
dcos package install --options=<(echo '{"linkerd":{"instances":4}}') linkerd
```
Note that Linkerd boots two servers, `outgoing` on port `4140`, and `incoming`
@ -87,14 +87,14 @@ repo as
To modify a Linkerd config, do the following:
1. Edit `linkerd-config.yml`
2. Convert to JSON using something like http://json2yaml.com
3. Remove all line breaks and escape quotes:
1. Convert to JSON using something like [http://json2yaml.com](http://json2yaml.com)
1. Remove all line breaks and escape quotes:
```bash
cat linkerd-config.json |tr -d '\n '|sed 's/"/\\\\\\\"/g'
```
4. Replace the inner contents of `linkerd-dcos.json`'s `cmd` field with the
1. Replace the inner contents of `linkerd-dcos.json`'s `cmd` field with the
output.
## Deploying linkerd-viz

View File

@ -106,11 +106,11 @@ First we need to create an SSH tunnel to the cluster. The following commands
will choose one of the EC2 hosts, and forward traffic on three local ports to
three remote ports on the EC2 host:
- Traffic to `localhost:9990` will go to the Linkerd dashboard on the remote
* Traffic to `localhost:9990` will go to the Linkerd dashboard on the remote
host
- Traffic to `localhost:8500` will go to the Consul admin dashboard on the
* Traffic to `localhost:8500` will go to the Consul admin dashboard on the
remote host
- Traffic to `localhost:4140` will go to the Linkerd HTTP proxy on the remote
* Traffic to `localhost:4140` will go to the Linkerd HTTP proxy on the remote
host
Note that if one of these four ports is already in use on your local machine
@ -170,8 +170,10 @@ Hello (172.31.20.160) World-V2 (172.31.19.35)!!
By setting the `l5d-dtab` header, we instructed Linkerd to dynamically route all
requests destined for `world` to `world-v2`.
{{< fig src="/images/ecs-linkerd-routing.png" title="Linkerd request routing" >}}
For more information, have a look at
{{< fig src="/images/ecs-linkerd-routing.png"
title="Linkerd request routing" >}}
For more information, have a look at
[Dynamic Request Routing]({{% ref "/1/features/routing.md" %}}).
## linkerd-viz

View File

@ -30,15 +30,16 @@ aliases = [
+++
{{< note >}}
This document is specific to Linkerd 1.x. If you're on Kubernetes, you may wish to consider [Linkerd 2.x](/2/getting-started/) instead.
This document is specific to Linkerd 1.x. If you're on Kubernetes, you may wish
to consider [Linkerd 2.x](/2/getting-started/) instead.
{{< /note >}}
If you have a Kubernetes cluster or even just run [Minikube](https://github.com/kubernetes/minikube),
deploying Linkerd as a service mesh is the fastest way to get started. Not only
is it incredibly simple to deploy, it is also suitable for most production use-
cases, providing service discovery, instrumentation, intelligent client-side
load balancing, circuit breakers, and dynamic routing out-of-the-box.
If you have a Kubernetes cluster or even just run
[Minikube](https://github.com/kubernetes/minikube), deploying Linkerd as a
service mesh is the fastest way to get started. Not only is it incredibly
simple to deploy, it is also suitable for most production use- cases, providing
service discovery, instrumentation, intelligent client-side load balancing,
circuit breakers, and dynamic routing out-of-the-box.
The Linkerd service mesh is deployed as a Kubernetes
[DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/),
@ -61,8 +62,8 @@ You can verify that Linkerd was deployed successfully by running
kubectl -n linkerd port-forward $(kubectl -n linkerd get pod -l app=l5d -o jsonpath='{.items[0].metadata.name}') 9990 &
```
And then viewing the Linkerd admin dashboard by visiting `http://localhost:9990`
in your browser.
And then viewing the Linkerd admin dashboard by visiting
[http://localhost:9990](http://localhost:9990) in your browser.
Note that if your cluster uses CNI, you will need to make a few small changes
to the Linkerd config to enable CNI compatibility. These are indicated as
@ -119,14 +120,17 @@ respectively.
## Ingress
The Linkerd service mesh is also also configured to act as an
[Ingress Controller](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-controllers). Simply create an Ingress resource defining the routes
that you want and then send requests to port 80 (or port 8080 for HTTP/2) of
the ingress address for the cluster. In cloud environments with external load
balancers, the ingress address is the address of the external load balancer.
Otherwise, the address of any node may be used as the ingress address.
The Linkerd service mesh is also also configured to act as an [Ingress
Controller](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-controllers).
Simply create an Ingress resource defining the routes that you want and then
send requests to port 80 (or port 8080 for HTTP/2) of the ingress address for
the cluster. In cloud environments with external load balancers, the ingress
address is the address of the external load balancer. Otherwise, the address of
any node may be used as the ingress address.
See our [Ingress blog post](https://buoyant.io/2017/04/06/a-service-mesh-for-kubernetes-part-viii-linkerd-as-an-ingress-controller/) for more details.
See our [Ingress blog
post](https://buoyant.io/2017/04/06/a-service-mesh-for-kubernetes-part-viii-linkerd-as-an-ingress-controller/)
for more details.
## Next Steps

View File

@ -25,9 +25,10 @@ java version "1.8.0_66"
Linkerd works with both Oracle and OpenJDK. If you need to install Java 8, you
can download either one.
<!-- markdownlint-disable MD013 MD033 -->
<p class="text-center">
{{% button "Download Oracle Java 8" "http://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html" %}}
or
{{% button "Download Oracle Java 8" "http://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html" %}} or
{{% button "Download OpenJDK 8" "http://openjdk.java.net/install/" %}}
</p>
@ -39,11 +40,13 @@ First, download the latest binary release of Linkerd.
{{% button "Download Linkerd" "https://github.com/linkerd/linkerd/releases" %}}
</p>
<!-- markdownlint-enable MD013 MD033 -->
Once you've downloaded the release, extract it:
```bash
$ tar -xzf linkerd-{{% latestversion %}}.tgz
$ cd linkerd-{{% latestversion %}}
tar -xzf linkerd-{{% latestversion %}}.tgz
cd linkerd-{{% latestversion %}}
```
The release will contain these files:
@ -63,7 +66,7 @@ Once you have extracted the release, you can start and stop Linkerd by using
To start Linkerd, run:
```bash
$ ./linkerd-{{% latestversion %}}-exec config/linkerd.yaml
./linkerd-{{% latestversion %}}-exec config/linkerd.yaml
```
## Making sure it works
@ -76,8 +79,8 @@ HTTP calls with a `Host` header set to "web" to a service listening on port
You can test this by running a simple service on port 9999:
```bash
$ echo 'It works!' > index.html
$ python -m SimpleHTTPServer 9999
echo 'It works!' > index.html
python -m SimpleHTTPServer 9999
```
This will be our destination server, and will respond to any HTTP request

View File

@ -16,6 +16,7 @@ mesh: a dedicated layer for managing, controlling, and monitoring service-to-
service communication within an application.
<!--more-->
<!-- markdownlint-disable MD026 -->
## What problems does it solve?
@ -26,7 +27,8 @@ was usually not the services themselves, but the *communication* between
services. Linkerd addresses these problems not just by controlling the mechanics
of this communication but by providing a layer of abstraction on top of it.
{{< fig src="/images/diagram-individual-instance.png" title="Linkerd adds reliability and instrumentation to existing applications." >}}
{{< fig src="/images/diagram-individual-instance.png"
title="Linkerd adds reliability and instrumentation." >}}
By providing a consistent, uniform layer of instrumentation and control across
services, Linkerd frees service owners to choose whichever language is most
@ -67,3 +69,5 @@ service instance) or per-host. Since Linkerd instances are stateless and
independent, they can fit easily into existing deployment topologies. They can
be deployed alongside application code in a variety of configurations and with a
minimum of coordination.
<!-- markdownlint-enable MD026 -->

View File

@ -14,11 +14,13 @@ aliases = [
weight = 8
+++
### How do I pronounce "Linkerd"?
<!-- markdownlint-disable MD026 -->
## How do I pronounce "Linkerd"?
"Linker-DEE".
### Why is it called Linkerd?
## Why is it called Linkerd?
Linkerd can be thought of as a *dynamic linker* for microservices. In an
operating system, the dynamic linker takes runtime information about the name
@ -28,24 +30,24 @@ analogous task for microservices: it takes the name of a service and of a call
to make on that service (HTTP, gRPC, etc.), and does the work required to make
the call successful---including routing, load-balancing, and retrying.
### How do I get to the Linkerd dashboard (or "admin") page?
## How do I get to the Linkerd dashboard (or "admin") page?
By default, the admin page can be accessed at `http://localhost:9990`. You can
specify a different port in the [admin section of the Linkerd config]({{%
linkerdconfig "administrative-interface" %}}).
### How do I get upstream service metrics?
## How do I get upstream service metrics?
Linkerd exposes machine-readable metrics in JSON format at
`http://localhost:9990/admin/metrics.json`. This is configurable--see above.
### Where do Linkerd logs go?
## Where do Linkerd logs go?
Linkerd logs to stderr. For HTTP routers, additional access logging can be
configured via the [`httpAccessLog` key in the config file]({{% linkerdconfig
"http-1-1-protocol" %}}).
### Does Linkerd support dynamic config reloading?
## Does Linkerd support dynamic config reloading?
No. We prefer to avoid this pattern and to offload mutable things to separate
services. For example, Linkerd talks to [service discovery]({{%
@ -53,7 +55,7 @@ ref "/1/features/service-discovery.md" %}}) for changes in deployed instances,
and
to [namerd]({{% ref "/1/advanced/namerd.md" %}}) for changes in routing policy.
### How does Linkerd handle service errors?
## How does Linkerd handle service errors?
Errors generated by downstream services are passed through Linkerd unmodified.
@ -70,7 +72,7 @@ All other failures communicating with downstream services (including timeouts,
inability to establish a connection, etc.) are represented as `503 Bad
Gateway`.
### Why do I see a "No hosts available" error?
## Why do I see a "No hosts available" error?
If you see a `No hosts are available` error message in the Linkerd logs or in
a response, it means that Linkerd was unable to translate the request's
@ -83,6 +85,7 @@ dashboard. Simply visit `<linkerd host>:9990/delegator` in your browser. This UI
will demonstrate each step of how the request's name is transformed by the dtab
and namers.
(For more information on how Linkerd handles names, see the [Routing](
{{% ref "/1/advanced/routing.md" %}}) page.)
(For more information on how Linkerd handles names, see the
[Routing]({{% ref "/1/advanced/routing.md" %}}) page.)
<!-- markdownlint-enable MD026 -->

View File

@ -20,6 +20,7 @@ see how these concepts apply against a real environment.
---
## Introduction
One of the most common questions when getting started with Linkerd is: what
exactly is a service mesh? Why is a service mesh a critical component of cloud
native apps, when environments like Kubernetes provide primitives like service

View File

@ -1,253 +0,0 @@
+++
date = "2017-04-06T13:43:54-07:00"
title = "Part VIII: Linkerd as an ingress controller"
description = "Linkerd is designed to make service-to-service communication internal to an application safe, fast and reliable."
weight = 9
draft = true
aliases = [
"/tutorials_staging/part-eight"
]
[menu.docs]
parent = "tutorials"
+++
Author: Sarah Brown
Linkerd is designed to make service-to-service communication internal to an application safe, fast and reliable. However, those same goals are also applicable at the edge. In this post, well demonstrate a new feature of Linkerd which allows it to act as a Kubernetes ingress controller, and show how it can handle ingress traffic both with and without TLS.
---
In a [previous installment](/tutorials/part-five) of this series, we explored how to receive external requests by deploying Linkerd as a Kubernetes DaemonSet and routing traffic through the corresponding Service VIP. In this post, well simplify this setup by using Linkerd as a [Kubernetes ingress controller](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-controllers), taking advantage of features introduced in [Linkerd 0.9.1](https://github.com/linkerd/linkerd/releases/tag/0.9.1).
This approach has the benefits of simplicity and a tight integration with the Kubernetes API. However, for more complex requirements like on-demand TLS cert generation, SNI, or routing based on cookie values (e.g. the employee dogfooding approach discussed in [Part V](/tutorials/part-five) of this series), combining Linkerd with a dedicated edge layer such as NGINX is still necessary.
What is a Kubernetes ingress controller? An ingress controller is an edge router that accepts traffic from the outside world and forwards it to services in your Kubernetes cluster. The ingress controller uses HTTP host and path routing rules defined in Kubernetes [ingress resources](https://kubernetes.io/docs/concepts/services-networking/ingress/).
---
## Ingress Hello World
Using a [Kubernetes config](https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/linkerd-ingress-controller.yml) from the [linkerd-examples](https://github.com/linkerd/linkerd-examples) repo, we can launch Linkerd as a dedicated ingress controller. The config follows the same pattern as our [previous posts on k8s daemonsets](/tutorials/part-two): it deploys an `l5d-config` ConfigMap, an `l5d` DaemonSet, and an `l5d` Service.
{{< fig src="/images/tutorials/buoyant-k8s-hello-world-ingress-controller-1.png" >}}
---
## Step 1: Deploy Linkerd
First lets deploy Linkerd. You can of course deploy into the default namespace, but here weve put Linkerd in its own namespace for better separation of concerns:
```
$ kubectl create ns l5d-system
$ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/linkerd-ingress-controller.yml -n l5d-system
```
You can verify that the Linkerd pods are up by running:
```
$ kubectl get po -n l5d-system
NAME READY STATUS RESTARTS AGE
l5d-0w0f4 2/2 Running 0 5s
l5d-3cmfp 2/2 Running 0 5s
l5d-dj1sm 2/2 Running 0 5s
```
And take a look at the admin dashboard (This command assumes your cluster supports LoadBalancer services, and remember that it may take a few minutes for the ingress LB to become available.):
```
$ L5D_SVC_IP=$(kubectl get svc l5d -n l5d-system -o jsonpath="{.status.loadBalancer.ingress[0].*}")
$ open http://$L5D_SVC_IP:9990 # on OS X
```
Or if external load balancer support is unavailable for the cluster, use hostIP:
```
$ HOST_IP=$(kubectl get po -l app=l5d -n l5d-system -o jsonpath="{.items[0].status.hostIP}")
$ L5D_SVC_IP=$HOST_IP:$(kubectl get svc l5d -n l5d-system -o 'jsonpath={.spec.ports[0].nodePort}')
open http://$HOST_IP:$(kubectl get svc l5d -n l5d-system -o 'jsonpath={.spec.ports[1].nodePort}') # on OS X
```
Lets take a closer look at the ConfigMap we just deployed. It stores the `config.yaml` file that Linkerd mounts on startup.
```
$ kubectl get cm l5d-config -n l5d-system -o yaml
apiVersion: v1
data:
config.yaml: |-
namers:
- kind: io.l5d.k8s
routers:
- protocol: http
identifier:
kind: io.l5d.ingress
servers:
- port: 80
ip: 0.0.0.0
clearContext: true
dtab: /svc =&gt; /#/io.l5d.k8s
usage:
orgId: linkerd-examples-ingress
```
You can see that this config defines an HTTP router on port 80 that identifies incoming requests using ingress resources (via the [io.l5d.ingress identifier](https://linkerd.io/config/1.0.0/linkerd/index.html#ingress-identifier)). The resulting namespace, port, and service name are then passed to the [Kubernetes namer](https://linkerd.io/config/1.0.0/linkerd/index.html#kubernetes-service-discovery) for resolution. Weve also set `clearContext` to `true` in order to remove any incoming Linkerd context headers from untrusted sources.
---
## Step 2: Deploy the Hello World application
Now its time to deploy our application, so that our ingress controller can route traffic to us. Well deploy a simple app consisting of a hello and a world service.
```
$ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/hello-world.yml
$ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/world-v2.yml
```
You can again verify that the pods are up and running:
```
$ kubectl get po
NAME READY STATUS RESTARTS AGE
hello-0v0vx 1/1 Running 0 5s
hello-84wfp 1/1 Running 0 5s
hello-mrcfr 1/1 Running 0 5s
world-v1-105tl 1/1 Running 0 5s
world-v1-1t6jc 1/1 Running 0 5s
world-v1-htwsw 1/1 Running 0 5s
world-v2-5tl10 1/1 Running 0 5s
world-v2-6jc1t 1/1 Running 0 5s
world-v2-wswht 1/1 Running 0 5s
```
At this point, if you try to send an ingress request, youll see something like:
```
$ curl $L5D_SVC_IP
Unknown destination: Request("GET /", from /184.23.234.210:58081) / no ingress rule matches
```
---
## Step 3: Create the Ingress resource
In order for our Linkerd ingress controller to function properly, we need to create an [ingress resource](https://kubernetes.io/docs/concepts/services-networking/ingress/) that uses it.
```
$ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/hello-world-ingress.yml
```
Verify the resource:
```
$ kubectl get ingress
NAME HOSTS ADDRESS PORTS AGE
hello-world world.v2 80 7s
```
This “hello-world” ingress resource references our backends (were only using `world-v1` and `world-v2` for this demo):
```
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: hello-world
annotations:
kubernetes.io/ingress.class: "linkerd"
spec:
backend:
serviceName: world-v1
servicePort: http
rules:
- host: world.v2
http:
paths:
- backend:
serviceName: world-v2
servicePort: http
```
The resource
- Specifies `world-v1` as the default backend to route to if a request does not match any of the rules defined.
- Specifies a rule where all requests with the host header `world.v2` will be routed to the `world-v2` service.
-Sets the `kubernetes.io/ingress.class` annotation to “linkerd”. Note, this annotation is only required if there are multiple ingress controllers running in the cluster. GCE runs one by default; you may choose to disable it by [following these instructions](https://github.com/kubernetes/ingress/blob/master/docs/faq/gce.md#how-do-i-disable-the-gce-ingress-controller).
Thats it! You can exercise these rules by curling the IP assigned to the l5d service loadbalancer.
```
$ curl $L5D_SVC_IP
world (10.0.4.7)!
$ curl -H "Host: world.v2" $L5D_SVC_IP
earth (10.0.1.5)!
```
While this example starts with totally new instances, its just as easy to add an ingress identifier router to a pre-existing linked setup. Also, although we employ a DaemonSet here (to be consistent with the rest of the Service Mesh for Kubernetes series), utilizing a Kubernetes [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) for a Linkerd ingress controller works just as well. Using Deployments is left as an exercise for the reader. :)
---
## Ingress with TLS
Linkerd already supports TLS for clients and servers within the cluster. Setting up TLS is described in much more detail in [Part III of this series](/tutorials/part-three). In this ingress controller configuration, Linkerd expects certs to be defined in a [Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/) named `ingress-certs` and to follow [the format described as part of the ingress user guide](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls). Note that theres no need to specify a TLS section as part of the ingress resource: Linkerd doesnt implement that section of the resource. All TLS configuration happens as part of the `l5d-config` ConfigMap.
The Linkerd config remains largely unchanged, save updating the server port to `443` and adding TLS file paths:
```
...
servers:
- port: 443
ip: 0.0.0.0
clearContext: true
tls:
certPath: /io.buoyant/linkerd/certs/tls.crt
keyPath: /io.buoyant/linkerd/certs/tls.key
...
```
The l5d DaemonSet now mounts a secret volume with the expected name: `ingress-certs`
```
spec:
volumes:
- name: certificates
secret:
secretName: ingress-certs
...
containers:
- name: l5d
...
ports:
- name: tls
containerPort: 443
hostPort: 443
...
volumeMounts:
- name: "certificates"
mountPath: "/io.buoyant/linkerd/certs"
readOnly: true
...
```
And the updated Service config exposes port `443`.
A reminder that the certificates were using here are for testing purposes only! Create the Secret, delete the DaemonSet and ConfigMap, and re-apply the ingress controller config:
```
$ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/ingress-certificates.yml -n l5d-system
$ kubectl delete ds/l5d configmap/l5d-config -n l5d-system
$ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/linkerd-tls-ingress-controller.yml -n l5d-system
```
You should now be able to make an encrypted request:
```
# Example requires this development cert: https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/certificates/cert.pem
# The cert expects "hello.world" host, so we add an /etc/hosts entry, eg:
# 104.198.196.230 hello.world
# where "104.198.196.230" is the ip stored in $L5D_SVC_IP
$ curl --cacert cert.pem -H "Host: world.v2" https://hello.world
$ earth (10.0.1.5)!
```
## Conclusion
Linkerd provides a ton of benefits as an edge router. In addition to the dynamic routing and TLS termination described in this post, it also [pools connections](https://en.wikipedia.org/wiki/Connection_pool), [load balances dynamically](https://buoyant.io/beyond-round-robin-load-balancing-for-latency/), [enables circuit breaking](https://buoyant.io/making-microservices-more-resilient-with-circuit-breaking/), and supports [distributed tracing](http://buoyant.io/a-service-mesh-for-kubernetes-part-vii-distributed-tracing-made-easy/). Using the Linkerd ingress controller and the [Kubernetes configuration](https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/linkerd-ingress-controller.yml) referenced in this post, you gain access to all these features in an easy to use, Kubernetes-native approach. Best of all, this method works seamlessly with the rest of the service mesh, allowing for operation, visibility, and high availability in virtually any cloud architecture.
{{< note >}}
There are a myriad of ways to deploy Kubernetes and different environments support different features. Learn more about deployment differences [here](https://discourse.linkerd.io/t/flavors-of-kubernetes).
{{< /note >}}
The [ingress identifier is new](https://github.com/linkerd/linkerd/pull/1116), so wed love to get your thoughts on what features you want from an ingress controller. You can find us in the [Linkerd community Slack](https://slack.linkerd.io/) or on the [linkerd discourse](https://discourse.linkerd.io/).
## Acknowledgements
Big thanks to [Alex Leong](https://twitter.com/adlleong) and [Andrew Seigner](https://twitter.com/siggy) for feedback on this post.

View File

@ -1,133 +0,0 @@
+++
date = "2017-06-20T13:43:54-07:00"
title = "Part XI: Egress"
description = "explore how Linkerd can be used as an egress as well, handling requests from services within the cluster to services running outside of the #cluster, whether those are legacy non-Kubernetes systems or third-party APIs outside the firewall."
weight = 12
draft = true
aliases = [
"/tutorials_staging/part-eleven"
]
[menu.docs]
parent = "tutorials"
+++
Author: Alex Leong
In previous posts in this series, weve demonstrated how Linkerd can act as an _ingress_ to a Kubernetes cluster, handling all requests coming from outside of the cluster and sending them to the appropriate Kubernetes services.
In this post well explore how Linkerd can be used as an egress as well, handling requests from services within the cluster to services running outside of the cluster, whether those are legacy non-Kubernetes systems or third-party APIs outside the firewall.
Using the Linkerd service mesh for egress gives you a uniform, consistent model of request handling independent of where those requests are destined. It also lets you apply the benefits of Linkerd, such as adaptive load balancing, observability, circuit breakers, dynamic routing, and TLS, to services which are running outside of Kubernetes.
---
## Egress naming with dns
Linkerd provides a uniform naming abstraction that encompasses many different service discovery systems, including Kubernetes, Marathon, Consul, and ZooKeeper, as well as DNS and raw IP address resolution. When a service asks Linkerd to route a request to “foo”, Linkerd can be configured to resolve the name “foo” in a variety of ways, including arbitrary combinations of any of the above options. (For more on this, read about Linkerds powerful and sophisticated routing languages called [dtabs](https://linkerd.io/in-depth/dtabs/).)
In Kubernetes terms, most egress names resolve to non-Kubernetes services and must be resolved via DNS. Thus, the most straightforward way to add egress to the Linkerd service mesh is to add DNS lookup as a fallback mechanism. To accomplish this, well start with our standard service mesh configuration, but tweak it with an additional rule: if we get a request for a service that doesnt exist in Kubernetes, well treat the service name as an external DNS name and send the request to that external address.
In the following sections, well talk about how this actually works in terms of Linkerds configuration. If you just want to play with the end result, jump right to the “Trying it out” section at the bottom.
---
## Splitting the Kubernetes namer
There are a number of changes we need to make to the Linkerd config weve been developing in earlier examples to make this happen.
In our basic service mesh config, we attached the DaemonSet transformer to the outgoing routers interpreter. This was so that all requests from one service would be sent to the Linkerd DaemonSet pod of the destination service (read more about that in [Part II](/tutorials/part-two) of this series). However, this is not appropriate for external services because they are running outside of Kubernetes and dont have a corresponding Linkerd DaemonSet pod. Therefore, we must take the DaemonSet transformer off of the interpreter and put it directly on the `io.l5d.k8s namer`. This makes the DaemonSet transformer apply only to Kubernetes names and not to external ones. We must also add a second `io.l5d.k8s` namer without the DaemonSet transformer for the incoming router to use.
```
namers:
# This namer has the daemonset transformer "built-in"
- kind: io.l5d.k8s
prefix: /io.l5d.k8s.ds # We reference this in the outgoing router's dtab
transformers:
- kind: io.l5d.k8s.daemonset
namespace: default
port: incoming
service: l5d
# The "basic" k8s namer. We reference this in the incoming router's dtab
- kind: io.l5d.k8s
```
---
## Updating the dtab
With those namers in place, we can now update the outgoing dtab to use the DaemonSet transformed Kubernetes namer and add dtab fallback rules to treat the service name as a DNS name. We use the io.buoyant.portHostPfx rewriting namer to extract the port number from the hostname (or use 80 by default if unspecified).
```
dtab: |
/ph => /$/io.buoyant.rinet ; # Lookup the name in DNS
/svc => /ph/80 ; # Use port 80 if unspecified
/srv => /$/io.buoyant.porthostPfx/ph ; # Attempt to extract the port from the hostname
/srv => /#/io.l5d.k8s.ds/default/http ; # Lookup the name in Kubernetes, use the linkerd daemonset pod
/svc => /srv ;
/svc/world => /srv/world-v1 ;
```
Recall that later dtab entries have higher priority so this will prefer:
- The linkerd daemonset pod of the Kubernetes service, if it exists
- An external DNS service on the specified port
- An external DNS service on port 80 if no port specified
{{< fig src="/images/tutorials/buoyant-k8s-egress-dtab.png" >}}
---
## Don't forget TLS!
Most services running on the open internet dont allow plain HTTP. Well use Linkerds fine-grained client configuration to add TLS to all egress requests that use port 443.
```
client:
kind: io.l5d.static
configs:
- prefix: "/$/io.buoyant.rinet/443/{service}"
tls:
commonName: "{service}"
```
Putting all that together gives us this config. Lets try it out.
---
#Trying it out
Deploy our usual `hello world` microservice and updated Linkerd service mesh using these commands:
```
kubectl apply -f https://raw.githubusercontent.com/BuoyantIO/linkerd-examples/master/k8s-daemonset/k8s/hello-world.yml
kubectl apply -f https://raw.githubusercontent.com/BuoyantIO/linkerd-examples/master/k8s-daemonset/k8s/linkerd-egress.yaml
```
Once Kubernetes provisions an external LoadBalancer IP for Linkerd, we can test requests to the `hello` and `world` services as well as external services running outside of Kubernetes.
(Note that the examples in these blog posts assume k8s is running on GKE (e.g. external loadbalancer IPs are available, no CNI plugins are being used). Slight modifications may be needed for other environments—see our [Flavors of Kubernetes help page](https://discourse.linkerd.io/t/flavors-of-kubernetes/53) for environments like Minikube or CNI configurations with Calico/Weave.)
```
$ L5D_INGRESS_LB=$(kubectl get svc l5d -o jsonpath="{.status.loadBalancer.ingress[0].*}")
```
A request to a Kubernetes service:
```
$ curl $L5D_INGRESS_LB:4140 -H "Host: hello"
Hello (10.196.1.242) world (10.196.1.243)!!
```
A request to an external service, using port 80 by default:
```
$ curl -sI $L5D_INGRESS_LB:4140/index.html -H "Host: linkerd.io" | head -n 1
HTTP/1.1 301 Moved Permanently
```
A request to an external service using an explicit port and HTTPS:
```
$ curl -sI $L5D_INGRESS_LB:4140/index.html -H "Host: linkerd.io:443" | head -n 1
HTTP/1.1 200 OK
```
## Caveat
In the above configuration, we assume that the Linkerd DaemonSet pods are able to route to the external services in the first place. If this is not the case, e.g. if you have strict firewall rules that restrict L3/L4 traffic, you could instead set up a dedicated egress cluster of Linkerd instances running on nodes with access to the external services. All egress requests would then need to be sent to the egress cluster.
## Conclusion
By using Linkerd for egress, external services are able to share the same benefits that services running inside of Kubernetes get from the Linkerd service mesh. These include adaptive load balancing, circuit breaking, observability, dynamic routing, and TLS initiation. Most importantly, Linkerd gives you a uniform, consistent model of request handling and naming thats independent of whether those requests are destined for internal services, or for external, third-party APIs.
If you have any questions about using Linkerd for egress, please come ask on [Discourse](https://discourse.linkerd.io/) or [Slack](https://slack.linkerd.io/)!

View File

@ -1,233 +0,0 @@
+++
date = "2016-11-18T13:43:54-07:00"
title = "Part V: Dogfood environments, ingress and edge routing"
description = "In this post well show you how to use a service mesh of linkerd instances to handle ingress traffic on Kubernetes, distributing traffic #across every instance in the mesh."
weight = 6
draft = true
aliases = [
"/tutorials_staging/part-five"
]
[menu.docs]
parent = "tutorials"
+++
Author: Risha Mars
In this post well show you how to use a service mesh of linkerd instances to handle ingress traffic on Kubernetes, distributing traffic across every instance in the mesh. Well also walk through an example that showcases linkerds advanced routing capabilities by creating a dogfood environment that routes certain requests to a newer version of the underlying application, e.g. for internal, pre-release testing.
**Update 2017-04-19: this post is about using linkerd as an ingress point for traffic to a Kubernetes network. As of [0.9.1](https://github.com/linkerd/linkerd/releases/tag/0.9.1), linkerd supports the Kubernetes Ingress resource directly, which is an alternate, and potentially simpler starting point for some of the use cases in this article. For information on how to use linkerd as a [Kubernetes ingress controller](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-controllers), please see Sarahs blog post, [Linkerd as an ingress controller](https://buoyant.io/a-service-mesh-for-kubernetes-part-viii-linkerd-as-an-ingress-controller/).**
---
In previous installments of this series, weve shown you how you can use linkerd to capture [top-line service metrics](/tutorials/part-one), transparently [add TLS](https://buoyant.io/a-service-mesh-for-kubernetes-part-iii-encrypting-all-the-things//) across service calls, and [perform blue-green deploys](/tutorials/part-four). These posts showed how using linkerd as a service mesh in environments like Kubernetes adds a layer of resilience and performance to internal, service-to-service calls. In this post, well extend this model to ingress routing.
Although the examples in this post are Kubernetes-specific, we wont use the built-in [Ingress Resource](https://kubernetes.io/docs/concepts/services-networking/ingress/) that Kubernetes provides (for this, see [Sarahs post](https://buoyant.io/a-service-mesh-for-kubernetes-part-viii-linkerd-as-an-ingress-controller/)). While Ingress Resources are a convenient way of doing basic path and host-based routing, at the time of writing, they are fairly limited. In the examples below, well be reaching far beyond what they provide.
---
## Step 1: Deploy the Linkerd service mesh
Starting with our basic linkerd service mesh Kubernetes config from the previous articles, well make two changes to support ingress: well modify the linkerd config to add an additional logical router, and well tweak the VIP in the Kubernetes Service object around linkerd. (The full config is here: [linkerd-ingress.yml](https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/linkerd-ingress.yml).)
Heres the new `ingress` logical router on linkerd instances that will handle ingress traffic and route it to the corresponding services:
```
routers:
- protocol: http
label: ingress
dtab: |
/srv => /#/io.l5d.k8s/default/http ;
/domain/world/hello/www => /srv/hello ;
/domain/world/hello/api => /srv/api ;
/host => /$/io.buoyant.http.domainToPathPfx/domain ;
/svc => /host ;
interpreter:
kind: default
transformers:
- kind: io.l5d.k8s.daemonset
namespace: default
port: incoming
service: l5d
servers:
- port: 4142
ip: 0.0.0.0
```
In this config, were using linkerds routing syntax, [dtabs](https://linkerd.io/in-depth/dtabs/), to route requests from domain to service—in this case from “api.hello.world” to the `api` service, and from “www.hello.world” to the `world` service. For simplicitys sake, weve added one rule per domain, but this mapping can easily be generified for more complex setups. (If youre a linkerd config aficionado, were accomplishing this behavior by combining linkerds [default header token](https://linkerd.io/config/1.0.0/linkerd/index.html#header-identifier) identifier to route on the Host header, the [`domainToPathPfx`](https://linkerd.io/config/1.0.0/linkerd/index.html#domaintopathpfx) namer to turn dotted hostnames into hierarchical paths, and the [`io.l5d.k8s.daemonset` transformer](https://linkerd.io/config/1.0.0/linkerd/index.html#daemonset-kubernetes) to send requests to the corresponding host-local linkerd.)
Weve added this ingress router to every linkerd instance—in true service mesh fashion, well fully distribute ingress traffic across these instances so that no instance is a single point of failure.
We also need modify our k8s Service object to replace the `outgoing` VIP with an `ingress` VIP on port 80. This will allow us to send ingress traffic directly to the linkerd service mesh—mainly for debugging purposes, since the this traffic will not be sanitized before hitting linkerd. (In the next step, well fix this.)
The Kubernetes change looks like this:
```
---
apiVersion: v1
kind: Service
metadata:
name: l5d
spec:
selector:
app: l5d
type: LoadBalancer
ports:
- name: ingress
port: 80
targetPort: 4142
- name: incoming
port: 4141
- name: admin
port: 9990
```
All of the above can be accomplished in one fell swoop by running this command to apply the [full linkerd service mesh plus ingress Kubernetes config](https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/linkerd-ingress.yml):
```
$ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/linkerd-ingress.yml
```
---
## Deploy the services
For services in this example, well use the same [hello and world configs](https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/hello-world.yml) from the previous blog posts, and well add two new services: an [api service](https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/api.yml), which calls both `hello` and `world`, and a new version of the world service, `world-v2`, which will return the word “earth” rather than “world”—our growth hacker team has assured us their A/B tests show this change will increase engagement tenfold.
The following commands will deploy the three [hello world services](https://github.com/linkerd/linkerd-examples/tree/master/docker/helloworld) to the default namespace. These apps rely on the nodeName supplied by the [Kubernetes downward API](https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/) to find Linkerd. To check if your cluster supports nodeName, you can run this test job:
```
kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/node-name-test.yml
```
And then looks at its logs:
```
kubectl logs node-name-test
```
If you see an ip, great! Go ahead and deploy the hello world app using:
```
$ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/hello-world.yml
$ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/api.yml
$ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/world-v2.yml
```
If instead you see a “server cant find …” error, deploy the hello-world legacy version that relies on hostIP instead of nodeName:
```
$ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/hello-world-legacy.yml
$ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/api-legacy.yml
$ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/world-v2.yml
```
At this point we should be able to test the setup by sending traffic through the ingress Kubernetes VIP. In the absence of futzing with DNS, well set a Host header manually on the request:
```
$ INGRESS_LB=$(kubectl get svc l5d -o jsonpath="{.status.loadBalancer.ingress[0].*}")
$ curl -s -H "Host: www.hello.world" $INGRESS_LB
Hello (10.0.5.7) world (10.0.4.7)!!
$ curl -s -H "Host: api.hello.world" $INGRESS_LB
{"api_result":"api (10.0.3.6) Hello (10.0.5.4) world (10.0.1.5)!!"}
```
Or if external load balancer support is unavailable for the cluster, use hostIP:
```
$ INGRESS_LB=$(kubectl get po -l app=l5d -o jsonpath="{.items[0].status.hostIP}"):$(kubectl get svc l5d -o 'jsonpath={.spec.ports[0].nodePort}')
```
Success! Weve set up linkerd as our ingress controller, and weve used it to route requests received on different domains to different services. And as you can see, production traffic is hitting the `world-v1` service—we arent ready to bring `world-v2` out just yet.
---
## Step 3: a layer of nginx
At this point we have functioning ingress. However, were not ready for production just yet. For one thing, our ingress router doesnt strip headers from requests, which means that external requests may include headers that we do not want to accept. For instance, linkerd allows setting the `l5d-dtab` header to [apply routing rules per-request](https://linkerd.io/features/routing/#per-request-routing). This is a useful feature for ad-hoc staging of new services, but its probably not appropriate calls from the outside world!
For example, we can use the `l5d-dtab` header to override the routing logic to use `world-v2` rather than the production `world-v1` service the outside world:
```
$ curl -H "Host: www.hello.world" -H "l5d-dtab: /host/world => /srv/world-v2;" $INGRESS_LB
Hello (10.100.4.3) earth (10.100.5.5)!!
```
Note the **earth** in the response, denoting the result of the `world-v2` service. Thats cool, but definitely not the kind of power we want to give just anyone!
We can address this (and other issues, such as serving static files) by adding [nginx](https://nginx.com/) to the mix. If we configure nginx to strip incoming headers before proxying requests to the linkerd ingress route, well get the best of both worlds: an ingress layer that is capable of safely handling external traffic, and linkerd doing dynamic, service-based routing.
Lets add nginx to the cluster. Well configure it using [this nginx.conf](https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/nginx.yml). Well use the `proxy_pass` directive under our virtual servers `www.hello.world` and `api.hello.world` to send requests to the linkerd instances, and, for maximum fanciness, well strip [linkerds context](https://linkerd.io/config/0.8.3/linkerd/index.html#context-headers) headers using the `more_clear_input_headers` directive (with wildcard matching) provided by the [Headers More](https://github.com/openresty/headers-more-nginx-module) module.
(Alternatively, we could avoid third-party nginx modules by using nginxs `proxy_set_header` directive to clear headers. Wed need separate entries for each `l5d-ctx-` header as well as the `l5d-dtab` and `l5d-sample` headers.)
Note that as of [linkerd 0.9.0](https://buoyant.io/linkerd-0-9-0-released/), we can clear incoming `l5d-*` headers by setting `clearContext: true` on the ingress router [server](https://linkerd.io/config/1.0.0/linkerd/index.html#server-parameters). However, nginx has many features we can make use of (as youll see presently), so it is still valuable to use nginx in conjunction with linkerd.
For those of you following along at home, weve published an nginx Docker image with the _Headers More_ module installed ([Dockerfile here](https://raw.githubusercontent.com/linkerd/linkerd-examples/master/docker/nginx/Dockerfile)) as [buoyantio/nginx:1.11.5](https://hub.docker.com/r/buoyantio/nginx/). We can deploy this image with our config above using this [Kubernetes config](https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/nginx.yml):
```
$ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/nginx.yml
```
After waiting a bit for the external IP to appear, we can test that nginx is up by hitting the simple test endpoint in the nginx.conf:
```
$ INGRESS_LB=$(kubectl get svc nginx -o jsonpath="{.status.loadBalancer.ingress[0].*}")
$ curl $INGRESS_LB
200 OK
```
Or if external load balancer support is unavailable for the cluster, use hostIP:
```
$ INGRESS_LB=$(kubectl get po -l app=nginx -o jsonpath="{.items[0].status.hostIP}"):$(kubectl get svc nginx -o 'jsonpath={.spec.ports[0].nodePort}')
```
We should be able to now send traffic to our services through nginx:
```
$ curl -s -H "Host: www.hello.world" $INGRESS_LB
Hello (10.0.5.7) world (10.0.4.7)!!
$ curl -s -H "Host: api.hello.world" $INGRESS_LB
{"api_result":"api (10.0.3.6) Hello (10.0.5.4) world (10.0.1.5)!!"}
```
Finally, lets try our header trick and attempt to communicate directly with the world-v2 service:
```
$ curl -H "Host: www.hello.world" -H "l5d-dtab: /host/world => /srv/world-v2;" $INGRESS_LB
Hello (10.196.1.8) world (10.196.2.13)!!
```
Great! No more **earth**. Nginx is sanitizing external traffic.
---
## Step 4: Time for some delicious dogfood!
Ok, were ready for the good part: lets set up a dogfood environment that uses the `world-v2` service, but only for some traffic!
For simplicity, well target traffic that sets a particular cookie, `special_employee_cookie`. In practice, you probably want something more sophisticated than this—authenticate it, require that it come from the corp network IP range, etc.
With nginx and linkerd installed, accomplishing this is quite simple. Well use nginx to check for the presence of that cookie, and set a dtab override header for linkerd to adjust its routing. The relevant nginx config looks like this:
```
if ($cookie_special_employee_cookie ~* "dogfood") {
set $xheader "/host/world => /srv/world-v2;";
}
proxy_set_header 'l5d-dtab' $xheader;
```
If youve been following the steps above, the deployed nginx already contains this configuration. We can test it like so:
```
$ curl -H "Host: www.hello.world" --cookie "special_employee_cookie=dogfood" $INGRESS_LB
Hello (10.196.1.8) earth (10.196.2.13)!!
```
The system works! When this cookie is set, youll be in dogfood mode. Without it, youll be in regular, production traffic mode. Most importantly, dogfood mode can involve new versions of services that appear _anywhere_ in the service stack, even many layers deep—as long as service code [forwards linkerd context headers](https://linkerd.io/config/1.0.0/linkerd/index.html#context-headers), the linkerd service mesh will take care of the rest.
---
## Conclusion
In this post, we saw how to use linkerd to provide powerful and flexible ingress to a Kubernetes cluster. Weve demonstrated how to deploy a nominally production-ready setup that uses linkerd for service routing. And weve demonstrated how to use some of the advanced routing features of linkerd to decouple the _traffic-serving_ topology from the deployment topology, allowing for the creation of dogfood environments without separate clusters or deploy-time complications.
{{< note >}}
There are a myriad of ways to deploy Kubernetes and different environments support different features. Learn more about deployment differences [here](https://discourse.linkerd.io/t/flavors-of-kubernetes).
{{< /note >}}
For more about running linkerd in Kubernetes, or if you have any issues configuring ingress in your setup, feel free to stop by our [linkerd community Slack](http://slack.linkerd.io/), ask a question on [Discourse](https://discourse.linkerd.io/), or [contact us directly](https://linkerd.io/overview/help/)!

View File

@ -17,7 +17,7 @@ pipeline.
---
## Traffic shifting with per-request routing
## Traffic shifting with per-request routing
Beyond service discovery, top-line metrics, and TLS, Linkerd also has a powerful
routing language, called dtabs, that can be used to alter the ways that
@ -57,10 +57,10 @@ automation server, well deploy a new version of the world service using the
## Step 0: Setup and Prerequisites
First, youll need a clean Kubernetes cluster and a functioning `kubectl` command on
your local machine. This tutorial requires a fresh cluster and if you've followed
the previous tutorial, you'll need to tear down your currently running Linkerd
daemonset, because of conflicting configs.
First, youll need a clean Kubernetes cluster and a functioning `kubectl`
command on your local machine. This tutorial requires a fresh cluster and if
you've followed the previous tutorial, you'll need to tear down your currently
running Linkerd daemonset, because of conflicting configs.
**This tutorial will assume you're running on
[GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-container-cluster).
@ -115,7 +115,7 @@ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/mast
You can confirm that installation was successful by viewing Namerds admin page
(note that it may take a few minutes for the ingress IP to become available):
##### GKE
### GKE
```bash
NAMERD_INGRESS_LB=$(kubectl get svc namerd -o jsonpath="{.status.loadBalancer.ingress[0].*}")
@ -140,7 +140,7 @@ The utility uses the `NAMERCTL_BASE_URL` environment variable to connect to
Namerd. In order to connect to the version of Namerd that we just deployed to
Kubernetes, set the variable as follows:
##### GKE
### GKE
```bash
export NAMERCTL_BASE_URL=http://$NAMERD_INGRESS_LB:4180
@ -165,7 +165,9 @@ uniqueness. Well use this dtab entry to safely introduce new versions of the
world service into production.
---
## Step 2: Install Linkerd
Next well install Linkerd and configure it to resolve routes using Namerd. To
install Linkerd as a DaemonSet (i.e., one instance per host) in the default
Kubernetes namespace, run:
@ -177,7 +179,7 @@ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/mast
You can confirm that installation was successful by viewing Linkerds admin UI
(note that it may take a few minutes for the ingress IP to become available):
##### GKE
### GKE
```bash
L5D_INGRESS_LB=$(kubectl get svc l5d -o jsonpath="{.status.loadBalancer.ingress[0].*}")
@ -189,6 +191,7 @@ Well use the admin UI to verify steps of the blue-green deploy.
---
## Step 3: Install the sample apps
Now well install the hello and world apps in the default namespace. These apps
rely on the nodeName supplied by the
[Kubernetes downward API](https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/)
@ -202,7 +205,7 @@ At this point, we actually have a functioning service mesh and an application
that makes use of it. You can see the entire setup in action by sending traffic
through Linkerds external IP:
##### GKE
### GKE
```bash
$ curl $L5D_INGRESS_LB
@ -215,10 +218,12 @@ above, with the IPs of the pods that served the request.
---
## Continuous deployment
Well now use Jenkins to perform blue-green deploys of the “world” service that
we deployed in the previous step.
### Setup Jenkins
Lets start by deploying the [buoyantio/jenkins-plus](https://hub.docker.com/r/buoyantio/jenkins-plus/)
Docker image to our Kubernetes cluster. This image provides the base `jenkins`
image, along with the `kubectl` and `namerctl` binaries that we need, as well as
@ -236,7 +241,7 @@ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/mast
You can confirm that installation was successful by opening up the Jenkins web
UI (note that it may take a few minutes for the ingress IP to become available):
##### GKE
#### GKE
```bash
JENKINS_LB=$(kubectl get svc jenkins -o jsonpath="{.status.loadBalancer.ingress[0].*}")
@ -299,7 +304,8 @@ To start the deploy, click into the “hello_world” job in the Jenkins UI, and
then click “Build with the parameters” in the sidebar. Youll be taken to a page
that lets you customize the deploy, and it will look something like this:
{{< fig src="/images/tutorials/buoyant-pipeline-build-parameters.png" title="Jenkins deploy customization screen." >}}
{{< fig src="/images/tutorials/buoyant-pipeline-build-parameters.png"
title="Jenkins deploy customization screen." >}}
Change the value of the `gitRepo` form field to point to your fork of the
`linkerd-examples` repo, and then click the “Build” button. Note that if you
@ -349,7 +355,8 @@ request to make sure the new version can be reached. If the test request
succeeds, it pauses the deploy and waits for us to acknowledge that the newly
deployed version looks correct before proceeding.
{{< fig src="/images/tutorials/buoyant-pipeline-integration-testing.png" title="Integration success message." >}}
{{< fig src="/images/tutorials/buoyant-pipeline-integration-testing.png"
title="Integration success message." >}}
At this point, we want to make sure that the new pods are running as expected—not
just by themselves, but in conjunction with the rest of the production environment.
@ -391,12 +398,14 @@ clicking the “Ok, Im done with manual testing” button in the Jenkins UI.
---
### Shift traffic (10%)
After some manual testing, were ready to start the blue-green deployment by
sending 10% of production traffic to the newly deployed version of the service.
The script makes the change in routing policy and again pauses, asking us to
confirm that everything looks OK with 10% traffic before proceeding.
{{< fig src="/images/tutorials/buoyant-pipeline-shift-traffic-10.png" title="Shifting traffic by 10%." >}}
{{< fig src="/images/tutorials/buoyant-pipeline-shift-traffic-10.png"
title="Shifting traffic by 10%." >}}
Note that if the user aborts on any pipeline step, the script assumes there was
something wrong with the new service, and automatically reverts the routing
@ -425,7 +434,8 @@ Looking good! Now is also a good time to check Linkerds admin dashboard, to
verify that the new service is healthy. If your application were receiving a
small amount of steady traffic, then the dashboard would look like this:
{{< fig src="/images/tutorials/buoyant-pipeline-admin-large-1024x737.png" title="Pipeline administration UI." >}}
{{< fig src="/images/tutorials/buoyant-pipeline-admin-large-1024x737.png"
title="Pipeline administration UI." >}}
We can see right away that the `world-v2` service is taking roughly 10% of
traffic, with 100% success rate. If everything looks good, we can proceed to the
@ -441,7 +451,8 @@ service. For a concise example, were moving immediately to 100% of traffic, b
in a typical deployment you could include additional intermediary percentages as
separate steps in the pipeline.
{{< fig src="/images/tutorials/buoyant-pipeline-shift-traffic-100.png" title="Pipeline administration UI." >}}
{{< fig src="/images/tutorials/buoyant-pipeline-shift-traffic-100.png"
title="Pipeline administration UI." >}}
We can verify that the new service is serving traffic by sending it a request
without a dtab override header:
@ -458,12 +469,14 @@ looks good” button in the Jenkins UI.
---
### Cleanup
In the final step, the script finalizes the deploy by making the routing rules
to route traffic to the new version of the service permanent. It also tears down
the previous version of the service that was still running in our cluster but
not receiving any traffic.
{{< fig src="/images/tutorials/buoyant-pipeline-cleanup.png" title="Pipeline cleanup." >}}
{{< fig src="/images/tutorials/buoyant-pipeline-cleanup.png"
title="Pipeline cleanup." >}}
The final version of Namerds dtab is now:
@ -494,6 +507,7 @@ promote it to the current version when the deploy successfully completes.
---
## Conclusion
In this tutorial, weve shown a basic workflow incorporating Linkerd, Namerd,
and Jenkins to progressively shift traffic from an old version to a new version
of a service as the final step of a continuous deployment pipeline. Weve shown
@ -514,4 +528,4 @@ anything else about Linkerd, feel free to stop by our [Linkerd forum](https://di
{{< note >}}
There are a myriad of ways to deploy Kubernetes and different environments
support different features. Learn more about deployment differences [here](https://discourse.linkerd.io/t/flavors-of-kubernetes).
{{< /note >}}
{{< /note >}}

View File

@ -1,140 +0,0 @@
+++
date = "2017-04-19T13:43:54-07:00"
title = "Part IX: gRPC for fun and profit"
description = "As of Linkerd 0.8.5, released earlier this year, Linkerd supports gRPC and HTTP/2!"
weight = 10
draft = true
[menu.docs]
parent = "tutorials"
+++
Author: Risha Mars
As of Linkerd 0.8.5, released earlier this year, [Linkerd supports gRPC and HTTP/2](https://buoyant.io/http2-grpc-and-linkerd/)! These powerful protocols can provide significant benefits to applications that make use of them. In this post, well demonstrate how to use Linkerd with gRPC, allowing applications that speak gRPC to take full advantage of Linkerds load balancing, service discovery, circuit breaking, and distributed tracing logic.
---
For this post well use our familiar `hello world` microservice app and configs, which can be found in the `linkerd-examples` repo ([k8s configs here](https://github.com/BuoyantIO/linkerd-examples/tree/master/k8s-daemonset) and [`hello world` code here](https://github.com/BuoyantIO/linkerd-examples/tree/master/docker/helloworld)).
The `hello world` application consists of two components—a `hello` service which calls a `world` service to complete a request. hello and world use gRPC to talk to each other. Well deploy Linkerd as a DaemonSet (so one Linkerd instance per host), and a request from `hello` to `world` will look like this:
{{< fig src="/images/tutorials/buoyant-grpc-daemonset-1024x617.png" title="DaemonSet deployment model: one Linkerd per host." >}}
As shown above, when the `hello` service wants to call `world`, the request goes through the _outgoing_ router of its host-local Linkerd, which does not send the request directly to the destination `world` service, but to a Linkerd instance running on the same host as `world` (on its _incoming_ router). That Linkerd instance then sends the request to the world service on its host. This three-hop model allows Linkerd to decouple the applications protocol from the transport protocol—for example, [by wrapping cross-node connections in TLS](https://buoyant.io/a-service-mesh-for-kubernetes-part-iii-encrypting-all-the-things/). (For more on this deployment topology, see Part II of this series, [Pods are great until theyre not](https://buoyant.io/a-service-mesh-for-kubernetes-part-ii-pods-are-great-until-theyre-not/).)
---
## Trying this at home
Lets see this setup in action! Deploy the `hello` and `world` to the default k8s namespace. These apps rely on the nodeName supplied by the [Kubernetes downward API](https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/) to find Linkerd. To check if your cluster supports nodeName, you can run this test job:
```
kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/node-name-test.yml
```
And then looks at its logs:
```
kubectl logs node-name-test
```
If you see an ip, great! Go ahead and deploy the hello world app using:
```
kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/hello-world-grpc.yml
```
If instead you see a “server cant find …” error, deploy the hello-world legacy version that relies on hostIP instead of nodeName:
```
kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/hello-world-grpc-legacy.yml
```
Also deploy Linkerd:
```
kubectl apply -f https://raw.githubusercontent.com/BuoyantIO/linkerd-examples/master/k8s-daemonset/k8s/linkerd-grpc.yml
```
Once Kubernetes provisions an external LoadBalancer IP for Linkerd, we can do some test requests! Note that the examples in these blog posts assume k8s is running on GKE (e.g. external loadbalancer IPs are available, no CNI plugins are being used). Slight modifications may be needed for other environments—see our [Flavors of Kubernetes](https://discourse.linkerd.io/t/flavors-of-kubernetes/53) help page for environments like Minikube or CNI configurations with Calico/Weave.
Well use the helloworld-client provided by the `hello world` [docker image](https://hub.docker.com/r/buoyantio/helloworld/) in order to send test gRPC requests to our `hello world` service:
```
$ L5D_INGRESS_LB=$(kubectl get svc l5d -o jsonpath="{.status.loadBalancer.ingress[0].*}")
$ docker run --rm --entrypoint=helloworld-client buoyantio/helloworld:0.1.3 $L5D_INGRESS_LB:4140
Hello (10.196.1.242) world (10.196.1.243)!!
```
Or if external load balancer support is unavailable for the cluster, use hostIP:
```
$ L5D_INGRESS_LB=$(kubectl get po -l app=l5d -o jsonpath="{.items[0].status.hostIP}")
$ docker run --rm --entrypoint=helloworld-client buoyantio/helloworld:0.1.3 $L5D_INGRESS_LB:$(kubectl get svc l5d -o 'jsonpath={.spec.ports[0].nodePort}')
Hello (10.196.1.242) world (10.196.1.243)!!
```
It works!
We can check out the Linkerd admin dashboard by doing:
```
$ open http://$L5D_INGRESS_LB:9990 # on OSX
```
Or using hostIP:
```
$ open http://$L5D_INGRESS_LB:$(kubectl get svc l5d -o 'jsonpath={.spec.ports[2].nodePort}') # on OSX
```
And thats it! We now have gRPC services talking to each other, with their HTTP/2 requests being routed through Linkerd. Now we can use all of [Linkerds awesome features](https://linkerd.io/features/), including per-request routing, load balancing, circuit-breaking, retries, TLS, distributed tracing, service discovery integration and more, in our gRPC microservice applications!
---
## How did we configure Linkerd for GRPC over HTTP/2?
Lets take a step back and examine our config. Whats different about using gRPC rather than HTTP/1.1? Actually, not very much! If you compare our [Linkerd config for routing gRPC](https://raw.githubusercontent.com/BuoyantIO/linkerd-examples/master/k8s-daemonset/k8s/linkerd-grpc.yml) with the [config for plain old HTTP/1.1](https://raw.githubusercontent.com/BuoyantIO/linkerd-examples/master/k8s-daemonset/k8s/linkerd.yml), theyre quite similar (full documentation on configuring an HTTP/2 router can be found [here](https://linkerd.io/config/0.9.1/linkerd/index.html#http-2-protocol)).
The changes youll notice are:
### Protocol
Weve changed the router `protocol` from `http` to `h2` (naturally!) and set the `experimental` flag to `true` to opt in to experimental HTTP/2 support.
```
routers:
- protocol: h2
experimental: true
```
### Identifier
We use the [header path identifier](https://linkerd.io/config/1.0.0/linkerd/index.html#http-2-header-path-identifier) to assign a logical name based on the gRPC request. gRPC clients set HTTP/2s `:path` pseudo-header to `/package.Service/Method`. The header path identifier uses this pseudo-header to assign a logical name to the request (such as `/svc/helloworld.Hello/Greeting)`. Setting `segments` to 1 means we only take the first segment of the path, in other words, dropping the gRPC `Method`. The resulting name can then be transformed via a [dtab](https://linkerd.io/in-depth/dtabs/) where we extract the gRPC service name, and route the request to a Kubernetes service of the same name. For more on how Linkerd routes requests, see our [routing](https://linkerd.io/in-depth/routing/) docs.
```
identifier:
kind: io.l5d.header.path
segments: 1
```
### DTAB
Weve adjusted the dtab slightly, now that were routing on the `/serviceName` prefix from the header path identifier. The dtab below transforms the logical name assigned by the path identifier (`/svc/helloworld.Hello`) to a name that tells the [io.l5d.k8s namer](https://linkerd.io/config/1.0.0/linkerd/index.html#kubernetes-service-discovery) to query the API for the `grpc` port of the `hello` Service in the default namespace (`/#/io.l5d.k8s/default/grpc/Hello`).
The [domainToPathPfx namer](https://linkerd.io/config/1.0.0/linkerd/index.html#domaintopathpfx) is used to extract the service name from the package-qualified gRPC service name, as seen in the dentry `/svc => /$/io.buoyant.http.domainToPathPfx/grpc`.
Delegation to `world` is similar, however weve decided to version the `world` service, so weve added the additional rule `/grpc/World => /srv/world-v1` to send requests to world-v1.
Our full dtab is now:
```
/srv => /#/io.l5d.k8s/default/grpc;
/grpc => /srv;
/grpc/World => /srv/world-v1;
/svc => /$/io.buoyant.http.domainToPathPfx/grpc;
```
---
## Conclusion
In this article, weve seen how to use Linkerd as a service mesh for gRPC requests, adding latency-aware load balancing, circuit breaking, and request-level routing to gRPC apps. Linkerd and gRPC are a great combination, especially as gRPCs HTTP/2 underpinnings provide it with powerful mechanisms like multiplexed streaming, back pressure, and cancelation, which Linkerd can take full advantage of. Because gRPC includes routing information in the request, its a natural fit for Linkerd, and makes it very easy to set up Linkerd to route gRPC requests. For more on Linkerds roadmap around gRPC, see [Olivers blog post on the topic](https://buoyant.io/http2-grpc-and-linkerd/).
Finally, for a more advanced example of configuring gRPC services, take a look at our [Gob microservice app](https://github.com/BuoyantIO/linkerd-examples/tree/master/gob). In that example, we additionally deploy [Namerd](https://github.com/linkerd/linkerd/tree/master/namerd), which we use to manage our routing rules centrally, and update routing rules without redeploying Linkerd. This lets us to do things like canarying and blue green deploys between different versions of a service.
{{< note >}}
There are a myriad of ways to deploy Kubernetes and different environments support different features. Learn more about deployment differences [here](https://discourse.linkerd.io/t/flavors-of-kubernetes).
{{< /note >}}
For more information on Linkerd, gRPC, and HTTP/2 head to the [Linkerd gRPC documentation](https://linkerd.io/features/grpc/) as well as our [config documentation for HTTP/2](https://linkerd.io/config/1.0.0/linkerd/index.html#http-2-protocol).

View File

@ -25,6 +25,7 @@ latencies without requiring changes to application code.
---
## Using Linkerd for service monitoring in Kubernetes
One of the advantages of operating at the request layer is that the service mesh
has access to protocol-level semantics of success and failure. For example, if
youre running an HTTP service, Linkerd can understand the semantics of 200
@ -37,7 +38,9 @@ automatically capture aggregated, top-line service success rates without
requiring application changes.
---
## Step 0: Setup and Prerequisites
First, youll need a Kubernetes cluster and a functioning `kubectl` command on
your local machine. These following examples will assume you're using either
[GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-container-cluster)
@ -61,19 +64,22 @@ kubectl create clusterrolebinding cluster-admin-binding-$USER --clusterrole=clus
kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/linkerd-rbac.yml
```
Your cluster should have at least 4 CPU's available for this tutorial to work.
## Step 1: Install Linkerd
Install Linkerd using [this Kubernetes config](https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/linkerd.yml).
This will install Linkerd as a DaemonSet (i.e., one instance per host) running
in the default Kubernetes namespace:
```
```bash
kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/linkerd.yml
```
You can confirm that installation was successful by viewing Linkerds admin page:
##### Minikube
### Minikube
```bash
HOST_IP=$(kubectl get po -l app=l5d -o jsonpath="{.items[0].status.hostIP}")
@ -81,27 +87,29 @@ NODE_PORT_ADMIN=$(kubectl get svc l5d -o 'jsonpath={.spec.ports[2].nodePort}')
open http://$HOST_IP:$NODE_PORT_ADMIN # on OS X
```
#### GKE
### GKE
```bash
INGRESS_LB=$(kubectl get svc l5d -o jsonpath="{.status.loadBalancer.ingress[0].*}")
open http://$INGRESS_LB:9990 # on OS X
```
{{< fig src="/images/tutorials/buoyant-k8s-linkerd-admin-large-1024x737.png"
title="Linkerd admin UI." >}}
title="Linkerd admin UI." >}}
---
## Step 2: Install the sample apps
Install two services, “hello” and “world”, in the default namespace.
##### Minikube
### Minikube
```bash
kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/hello-world-legacy.yml
```
##### GKE
### GKE
```bash
kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/hello-world.yml
@ -113,14 +121,14 @@ service to complete its request).
You can see this in action by sending traffic through Linkerds external IP:
##### Minikube
### Minikube
```bash
NODE_PORT=$(kubectl get svc l5d -o 'jsonpath={.spec.ports[0].nodePort}')
http_proxy=$HOST_IP:$NODE_PORT curl -s http://hello
```
##### GKE
### GKE
```bash
http_proxy=$INGRESS_LB:4140 curl -s http://hello
@ -137,17 +145,18 @@ Finally, lets take a look at what our services are doing by installing
supplemental package that includes a simple Prometheus and Grafana setup and
is configured to automatically find Linkerd instances.
Install Linkerd-viz using [this Linkerd-viz config]
(https://raw.githubusercontent.com/linkerd/linkerd-viz/master/k8s/linkerd-viz.yml).
Install Linkerd-viz using
[this Linkerd-viz config](https://raw.githubusercontent.com/linkerd/linkerd-viz/master/k8s/linkerd-viz.yml).
This will install Linkerd-viz into the default namespace:
```bash
kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-viz/master/k8s/linkerd-viz.yml
```
Open Linkerd-vizs external IP to view the dashboard:
##### Minikube
### Minikube
```bash
VIZ_HOST_IP=$(kubectl get po -l name=linkerd-viz -o jsonpath="{.items[0].status.hostIP}")
@ -155,7 +164,7 @@ VIZ_NODE_PORT=$(kubectl get svc linkerd-viz -o 'jsonpath={.spec.ports[0].nodePor
open http://$VIZ_HOST_IP:$VIZ_NODE_PORT # on OS X
```
##### GKE
### GKE
```bash
VIZ_INGRESS_LB=$(kubectl get svc linkerd-viz -o jsonpath="{.status.loadBalancer.ingress[0].*}")
@ -165,7 +174,8 @@ open http://$VIZ_INGRESS_LB # on OS X
You should see a dashboard, including selectors by service and instance. All
charts respond to these service and instance selectors:
{{< fig src="/images/tutorials/buoyant-k8s-linkerd-viz-large-1024x739.png" title="Linkerd-Viz dashboard." >}}
{{< fig src="/images/tutorials/buoyant-k8s-linkerd-viz-large-1024x739.png"
title="Linkerd-Viz dashboard." >}}
The Linkerd-viz dashboard includes three sections:
@ -177,7 +187,7 @@ node in your cluster.
---
### Thats all!
### Thats all
With just three simple commands we were able to install Linkerd on our Kubernetes
cluster, install an app, and use Linkerd to gain visibility into the health of

View File

@ -1,145 +0,0 @@
+++
date = "2017-03-14T13:43:54-07:00"
title = "Part VII: Distributed tracing made easy"
description = "Linkerds role as a service mesh makes it a great source of data around system performance and runtime behavior."
weight = 8
draft = true
aliases = [
"/tutorials_staging/part-seven"
]
[menu.docs]
parent = "tutorials"
+++
Author: Kevin Lingerfelt
Linkerds role as a _service mesh_ makes it a great source of data around system performance and runtime behavior. This is especially true in polyglot or heterogeneous environments, where instrumenting each language or framework can be quite difficult. Rather than instrumenting each of your apps directly, the service mesh can provide a uniform, standard layer of application tracing and metrics data, which can be collected by systems like [Zipkin](http://zipkin.io/) and [Prometheus](https://prometheus.io/).
In this post well walk through a simple example how Linkerd and Zipkin can work together in Kubernetes to automatically get distributed traces, with only minor changes to the application.
---
In previous installments of this series, weve shown you how you can use Linkerd to [capture top-line service metrics](/tutorials/part-one). Service metrics are vital for determining the health of individual services, but they dont capture the way that multiple services work (or dont work!) together to serve requests. To see a bigger picture of system-level performance, we need to turn to distributed tracing.
In a previous post, we covered some of the [benefits of distributed tracing](https://buoyant.io/distributed-tracing-for-polyglot-microservices/), and how to configure Linkerd to export tracing data to [Zipkin](http://zipkin.io/). In this post, well show you how to run this setup entirely in Kubernetes, including Zipkin itself, and how to derive meaningful data from traces that are exported by Linkerd.
---
## A Kubernetes Service Mesh
Before we start looking at traces, well need to deploy Linkerd and Zipkin to Kubernetes, along with some sample apps. The [linkerd-examples](https://github.com/linkerd/linkerd-examples/tree/master/k8s-daemonset) repo provides all of the configuration files that well need to get tracing working end-to-end in Kubernetes. Well walk you through the steps below.
---
## Step 1: Install Zipkin
Well start by installing Zipkin, which will be used to collect and display tracing data. In this example, for convenience, well use Zipkins in-memory store. (If you plan to run Zipkin in production, youll want to switch to using one of its persistent backends.)
To install Zipkin in the default Kubernetes namespace, run:
```
kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/zipkin.yml
```
You can confirm that installation was successful by viewing Zipkins web UI:
```
ZIPKIN_LB=$(kubectl get svc zipkin -o jsonpath="{.status.loadBalancer.ingress[0].*}")
open http://$ZIPKIN_LB # on OS X
```
Note that it may take a few minutes for the ingress IP to become available. Or if external load balancer support is unavailable for the cluster, use hostIP:
```
ZIPKIN_LB=</code>$(kubectl get po -l app=zipkin -o jsonpath="{.items[0].status.hostIP}"):$(kubectl get svc zipkin -o 'jsonpath={.spec.ports[0].nodePort}') open http://$ZIPKIN_LB # on OS X
```
However, the web UI wont show any traces until we install Linkerd.
---
## Step 2: Install the service mesh
Next well install the Linkerd service mesh, configured to write tracing data to Zipkin. To install Linkerd as a DaemonSet (i.e., one instance per host) in the default Kubernetes namespace, run:
```
kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/linkerd-zipkin.yml
```
This installed Linkerd as a service mesh, exporting tracing data with Linkerds [Zipkin telemeter](https://linkerd.io/config/0.9.0/linkerd/index.html#zipkin-telemeter). The relevant config snippet is:
```
telemetry:
- kind: io.l5d.zipkin
host: zipkin-collector.default.svc.cluster.local
port: 9410
sampleRate: 1.0
```
Here were telling Linkerd to send tracing data to the Zipkin service that we deployed in the previous step, on port 9410. The configuration also specifies a sample rate, which determines the number of requests that are traced. In this example were tracing all requests, but in a production setting you may want to set the rate to be much lower (the default is 0.001, or 0.1% of all requests).
You can confirm the installation was successful by viewing Linkerds admin UI (note, again, that it may take a few minutes for the ingress IP to become available, depending on the vagaries of your cloud provider):
```
L5D_INGRESS_LB=$(kubectl get svc l5d -o jsonpath="{.status.loadBalancer.ingress[0].*}")
open http://$L5D_INGRESS_LB:9990 # on OS X
```
Or if external load balancer support is unavailable for the cluster, use hostIP:
```
L5D_INGRESS_LB=$(kubectl get po -l app=l5d -o jsonpath="{.items[0].status.hostIP}")
open http://$L5D_INGRESS_LB:$(kubectl get svc l5d -o 'jsonpath={.spec.ports[2].nodePort}') # on OS X
```
---
## Step 3: Install the sample apps
Now well install the “hello” and “world” apps in the default namespace. These apps rely on the nodeName supplied by the [Kubernetes downward API](https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/) to find Linkerd. To check if your cluster supports nodeName, you can run this test job:
```
kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/node-name-test.yml
```
And then looks at its logs:
```
kubectl logs node-name-test
```
If you see an ip, great! Go ahead and deploy the hello world app using:
```
kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/hello-world.yml
```
If instead you see a “server cant find …” error, deploy the hello-world legacy version that relies on hostIP instead of nodeName:
```
kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/hello-world-legacy.yml
```
Congrats! At this point, we have a functioning service mesh with distributed tracing enabled, and an application that makes use of it.
Lets see the entire setup in action by sending traffic through Linkerds outgoing router running on port 4140:
```
http_proxy=http://$L5D_INGRESS_LB:4140 curl -s http://hello
Hello () world ()!
```
Or if using hostIP:
```
http_proxy=http://$L5D_INGRESS_LB:</code>$(kubectl get svc l5d -o 'jsonpath={.spec.ports[0].nodePort}') curl -s http://hello Hello () world ()!
```
If everything is working, youll see a “Hello world” message similar to that above, with the IPs of the pods that served the request.
---
## Step 4: Enjoy the view
Now its time to see some traces. Lets start by looking at the trace that was emitted by the test request that we sent in the previous section. Zipkins UI allows you to search by “span” name, and in our case, were interested in spans that originated with the Linkerd router running on 0.0.0.0:4140, which is where we sent our initial request. We can search for that span as follows:
```
open http://$ZIPKIN_LB/?serviceName=0.0.0.0%2F4140 # on OS X
```
That should surface 1 trace with 8 spans, and the search results should look like this:
{{< fig src="/images/tutorials/buoyant-k8s-tracing-search-1-large-1024x352.png" >}}
Clicking on the trace from this view will bring up the trace detail view:
{{< fig src="/images/tutorials/buoyant-k8s-tracing-trace-1-large-1024x360.png" >}}
From this view, you can see the timing information for all 8 spans that Linkerd emitted for this trace. The fact that there are 8 spans for a request between 2 services stems from the service mesh configuration, in which each request passes through two Linkerd instances (so that the protocol can be upgraded or downgraded, or [TLS can be added and removed across node boundaries](/tutorials/part-three)). Each Linkerd router emits both a server span and a client span, for a total of 8 spans.
Clicking on a span will bring up additional details for that span. For instance, the last span in the trace above represents how long it took the world service to respond to a request—8 milliseconds. If you click on that span, youll see the span detail view:
{{< fig src="/images/tutorials/buoyant-k8s-tracing-span-1-large-1024x712.png" >}}

View File

@ -1,159 +0,0 @@
+++
date = "2017-01-06T13:43:54-07:00"
title = "Part VI: Staging microservices without the tears"
description = "Staging new code before exposing it to production traffic is a critical part of building reliable, low-downtime software."
weight = 7
draft = true
aliases = [
"/tutorials_staging/part-six"
]
[menu.docs]
parent = "tutorials"
+++
Author: Risha Mars
Staging new code before exposing it to production traffic is a critical part of building reliable, low-downtime software. Unfortunately, with microservices, the addition of each new service increases the complexity of the staging process, as the dependency graph between services grows quadratically with the number of services. In this article, well show you how one of linkerds most powerful features, per-request routing, allows you to neatly sidestep this problem.
---
For a video presentation of the concepts discussed in this article, see [Alex Leong](https://twitter.com/adlleong)s meetup talk, [Microservice Staging without the Tears](https://youtu.be/y0D5EAXvUpg).
Linkerd is a service mesh for cloud-native applications. It acts as a transparent request proxy that adds a layer of resilience to applications by wrapping cross-service calls with features like latency-aware load balancing, retry budgets, deadlines, and circuit breaking.
In addition to improving application resilience, linkerd also provides a powerful routing language that can alter how request traffic flows between services at runtime. In this post, well demonstrate linkerds ability to do this routing, not just globally, but on a per-request basis. Well show how this _per-request_ routing can be used to create ad-hoc staging environments that allow us to test new code in the context of the production application, without actually exposing the new code to production traffic. Finally, well show how (in contrast to staging with a dedicated staging environment) ad-hoc staging requires neither coordination with other teams, nor the costly process of keeping multiple deployment environments in sync.
---
## Why stage?
Why is staging so important? In modern software development, code goes through a rigorous set of practices designed to _assess correctness_: code review, unit tests, integration tests, etc. Having passed these hurdles, we move to _assessing behaviour_: how fast is the new code? How does it behave under load? How does it interact with runtime dependencies, including other services?
These are the questions that a staging environment can answer. The fundamental principle of staging is that the closer to the production environment, the more realistic staging will be. Thus, while mocks and stub implementations make sense for tests, for staging, we ideally want actual running services. The best staging environment is one in which the surrounding environment is exactly the same as it will be in production.
---
## Why is staging hard for microservices?
When your application consists of many services, the interaction between these services becomes a critical component of end-to-end application behaviour. In fact, the more that the application is disaggregated into services, the more that the runtime behaviour of the application is determined not just by the services themselves, but by the interactions between them.
Unfortunately, increasing the number of services doesnt just increase the importance of proper staging, it also increases the difficulty of doing this properly. Lets take a look at a couple common ways of staging, and why they suffer in multi-service environments.
A frequent choice for staging is the shared staging cluster, wherein your staged service is deployed into a dedicated staging environment alongside other staged services. The problem with this approach is that there is no isolation. If, as in the diagram below, Alex deploys his Foo service and sees weird behaviour, its difficult to determine the source—it could be due to the staging deploys of Alex, Alice, or Bob, or simply the mock data in the database. Keeping the staging environment in sync with production can be very difficult, especially as the number of services, teams, and releases all start to increase.
{{< fig src="/images/tutorials/buoyant-1_everyone.png" >}}
An alternative to the shared environment that addresses the lack of isolation is the “personal” or per-developer, staging cluster. In this model, every developer can spin up a staging cluster on demand. To keep our staging effective, staging a service requires staging its upstream and downstream dependencies as well. (For example, in the diagram below, Alex would need to deploy Web FE and API in order to ensure the changes he made to his Foo service are correctly reflected there.) Unfortunately, maintaining the ability to deploy arbitrary subsets of the application topology on demand also becomes very complex, especially as the application topology becomes larger, and as services have independent deployment models.
{{< fig src="/images/tutorials/buoyant-2_personal.png" >}}
Finally, there is the (sadly prevalent!) option of simply deploying fresh code into production and rolling it back when flaws are discovered. Of course, this is rather risky, and may not be an option for applications that handle, e.g., financial transactions. There are many other ways you could obtain a staging environment, but in this article, well describe a straightforward, tear-free approach.
---
## A better path
Fortunately, with linkerd, we can do staging without incurring the costs detailed above, by creating _ad-hoc staging environments_. In fact, one of the prime motivations for the routing layer in Finagle, the library underlying linkerd, was solving this very problem at Twitter!
Lets consider again the goal of staging Alexs Foo service. What if, rather than deploying to a separate environment, we could simply substitute Foo-staging in place of Foo-production, for a specific request? That would give us the ability to stage Foo safely, against the production environment, without requiring any deployment other than that of Foo-staging itself. This is the essence of ad-hoc staging environments. The burden on the developer is now greatly eased: Alex must simply stage his new code, set a header on ingress requests, and voila!
{{< fig src="/images/tutorials/buoyant-3_request_path.png" >}}
Happily, linkerds per-request routing allow us to do just this. With linkerd proxying traffic, we can set a routing “override” for a particular request using the `l5d-dtab` header. This header allows you to set routing rules (called, in Finagle parlance, “[Dtabs](https://linkerd.io/in-depth/dtabs/)”) for that request. For example, the dtab rule `/s/foo => /srv/alex-foo` might override the production routing rule for Foo. Attaching this change to a single requestwould allow us to reach Alexs Foo service, but only for that request. Linkerd propagates this rule, so any usage of Alexs Foo service anywhere in the application topology, for the lifetime of that request, will be properly handled.
{{< fig src="/images/tutorials/buoyant-4_override.png" >}}
---
## Trying this at home
Keen readers of our [Service Mesh for Kubernetes](https://buoyant.io/a-service-mesh-for-kubernetes-part-i-top-line-service-metrics/) series will note that weve already seen an example of this in [our dogfood blog](/tutorials/part-five) post. We deployed a `world-v2` service, and we were able to send individual dogfood requests through this service via a simple header containing a routing override. Now, we can use this same mechanism for something else: setting up an ad hoc staging environment.
Lets deploy two versions of a service and use linkerds routing capabilities to test our new service before using it in production. Well deploy our `hello` and `world-v1` services as our running prod services, and then well create an ad-hoc staging environment to stage and test a new version of world, `world-v2`.
---
## Step 1: Deploy Linkerd and our hello-world services
Well use the hello world service from the previous blog posts. This consists of a hello service that calls a world service. These apps rely on the nodeName supplied by the [Kubernetes downward API](https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/) to find Linkerd. To check if your cluster supports nodeName, you can run this test job:
```
kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/node-name-test.yml
```
And then looks at its logs:
```
kubectl logs node-name-test
```
If you see an ip, great! Go ahead and deploy the hello world app using:
```
kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/hello-world.yml
```
If instead you see a “server cant find …” error, deploy the hello-world legacy version that relies on hostIP instead of nodeName:
```
kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/hello-world-legacy.yml
```
Lets deploy our prod environment (linkerd, and the hello and world services):
```
$ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/linkerd-ingress.yml
```
Lets also deploy linkerd and the service we want to stage, world-v2, which will return the word “earth” rather than “world”.
```
$ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/linkerd-ingress.yml
$ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/world-v2.yml
```
---
## Step 2: Use per request overrides in our ad-hoc staging environment
So now that we have a running world-v2, lets test it by running a request through our production topology, except that instead of hitting `world-v1`, well hit `world-v2`. First, lets run an unmodified request through our default topology (you may have to wait for l5ds external IP to appear):
```
$ INGRESS_LB=$(kubectl get svc l5d -o jsonpath="{.status.loadBalancer.ingress[0].*}")
$ curl -H "Host: www.hello.world" $INGRESS_LB
Hello (10.196.2.232) world (10.196.2.233)!!
```
Or if external load balancer support is unavailable for the cluster, use hostIP:
```
INGRESS_LB=$(kubectl get po -l app=l5d -o jsonpath="{.items[0].status.hostIP}"):$(kubectl get svc l5d -o 'jsonpath={.spec.ports[0].nodePort}')
$ curl -H "Host: www.hello.world" $INGRESS_LB
Hello (10.196.2.232) world (10.196.2.233)!!
```
As we expect, this returns `Hello (......) World (.....)` from our production topology.
Now, how do we get to the staging environment? All we have to do is pass the following dtab override and requests through the prod topology will go to `world-v2`! A dtab override is another dtab entry that we pass using headers in the request. Since later dtab rules are applied first, this rule will replace (override) our current “/host/world => /srv/world-v1” rule with a rule to send requests with `/host/world` to `/srv/world-v2` instead.
```
$ curl -H "Host: www.hello.world" -H "l5d-dtab: /host/world =&gt; /srv/world-v2;" $INGRESS_LB
Hello (10.196.2.232) earth (10.196.2.234)!!
```
We now see “earth” instead of “world”! The request is successfully served from the world-v2 service wired up to our existing production topology, with no code changes or additional deploys. Success! Staging is now fun and easy.
[Dtabs](https://linkerd.io/in-depth/dtabs/) and [routing](https://linkerd.io/in-depth/routing/) in linkerd are well documented. During development, you can also make use of linkerds “dtab playground” at `http://$INGRESS_LB:9990/delegator`. By going to the “outgoing” router and testing a request name like /http/1.1/GET/world, you can see linkerds routing policy in action.
---
## In practice
In practice, there are some caveats to using this approach. First, the issue of writes to production databases must be addressed. The same dtab override mechanism can be used to send any writes to a staging database, or, with some application-level intelligence, to /dev/null. It is recommended that these rules are not created by hand so as to avoid expensive mistakes with production data!
Secondly, you application needs to forward [linkerds context headers](https://linkerd.io/features/routing/#per-request-routing) for this to work.
Lastly, its important to ensure that the l5d-dtab header is not settable from the outside world! In our post about [setting up a dogfood environment in Kubernetes](/tutorials/part-five), we gave an example nginx configuration for ingress that would strip unknown headers from the outside world—good practice for a variety of reasons.
---
## Conclusion
Weve demonstrated how to create ad-hoc staging environments with linkerd by setting per-request routing rules. With this approach, we can stage services in the context of production environment, without modifying existing code, provisioning extra resources for our staging environment (other than for the staging instance itself), or maintaining parallel environments for production and staging. For microservices with complex application topologies, this approach can provide an easy, low-cost way to staging services before pushing to production.
{{< note >}}
There are a myriad of ways to deploy Kubernetes and different environments support different features. Learn more about deployment differences [here](https://discourse.linkerd.io/t/flavors-of-kubernetes).
{{< /note >}}
For more about running linkerd in Kubernetes, or if you have any issues configuring ingress in your setup, feel free to stop by our [linkerd community Slack](https://slack.linkerd.io/), ask a question on [Discourse](https://discourse.linkerd.io/), or [contact us directly](https://linkerd.io/overview/help/)!

View File

@ -1,121 +0,0 @@
+++
date = "2017-05-24T13:43:54-07:00"
title = "Part X: The Service Mesh API"
description = "With the 1.0 release happily out of the way, we thought wed take a moment to explain what this API does and what it means for the future of Linkerd."
weight = 11
draft = true
aliases = [
"/tutorials_staging/part-ten"
]
[menu.docs]
parent = "tutorials"
+++
Author: Alex Leong
As part of our Linkerd 1.0 release last month, we snuck in something that a few people have picked up on—Linkerds _service mesh API_. With the 1.0 release happily out of the way, we thought wed take a moment to explain what this API does and what it means for the future of Linkerd. Well also show off one of the upcoming features of this API—dynamic control over Linkerds per-service communications policy.
## The Linkerd service mesh
This morning at [Gluecon](http://gluecon.com/), Buoyant CTO [Oliver Gould](https://twitter.com/olix0r) delivered a keynote entitled **The Service Mesh**. In this keynote, he outlined the vision of the service mesh, as exemplified by [Linkerd](https://linkerd.io/). While Linkerd is often added to systems built on Kubernetes for its ability to add resiliency, the full vision of the service mesh is much more than that. As William Morgan writes in his blog post, [Whats a Service Mesh?](https://buoyant.io/whats-a-service-mesh-and-why-do-i-need-one/):
> The explicit goal of the service mesh is to move service communication out of the realm of the invisible, implied infrastructure, and into the role of a first-class member of the ecosystem—where it can be monitored, managed and controlled.
For Linkerd, this means that every aspect of its behavior should be not only instrumented and observable, but also controllable at runtime. And ideally, this mutability should take place, not via config file edits and hot reloading, but via a unified and well-designed runtime API.
This is, in short, the purpose of Linkerds service mesh API. To that end, weve introduced the [io.l5d.mesh interpreter](https://linkerd.io/config/1.0.0/linkerd/index.html#namerd-mesh) and [a new gRPC API for Namerd](https://linkerd.io/config/1.0.0/namerd/index.html#grpc-mesh-interface). Together, these provide the ability to dynamically control routing policy, and form the core of Linkerds service mesh API. This is a first step towards the eventual goal of providing a unified, global model of control over every aspect of Linkerds behavior.
Linkerd 1.0 also introduced a new type of policy that isnt yet exposed via the service mesh API—per-service _communications policy_. In this post, well show how to configure this policy today, and well describe the future work needed to add this control to Linkerds service mesh API.
---
## Communications Policy
Linkerds new per-service _communications policy_ is an oft-requested feature. Communications policy encompasses many different aspects of how Linkerd proxies a request, including: how long should we wait for a service to process a request before timing out? What kinds of requests are safe to retry? Should we encrypt communication with TLS and which certificates should we use? And so on.
Lets take a look at how this policy can be used today, with the example of two services that have wildly different latencies.
Starting from a fresh Kubernetes cluster, lets deploy two services with different latencies. We can deploy the `hello world` microservice that were familiar with from the other posts in this series, with one small tweak: the `hello` service will be configured to add `500ms` of artificial latency.
```
- name: service
image: buoyantio/helloworld:0.1.2
args:
- "-addr=:7777"
- "-text=Hello"
- "-target=world"
- "-latency=500ms"
```
Deploy it to your Kubernetes cluster with this command:
```
kubectl apply -f https://raw.githubusercontent.com/BuoyantIO/linkerd-examples/master/k8s-daemonset/k8s/hello-world-latency.yml
```
(Note that the examples in these blog posts assume Kubernetes is running in an environment like GKE, where external loadbalancer IPs are available, and no CNI plugins are being used. Slight modifications may be needed for other environments—see our [Flavors of Kubernetes](https://discourse.linkerd.io/t/flavors-of-kubernetes/53) forum posting for how to handle environments like Minikube or CNI configurations with Calico/Weave.)
Our next step will be to deploy the Linkerd service mesh. Wed like to add a timeout so that we can abort (and potentially retry) requests that are taking too long, but were faced with a problem. The `world` service is fast, responding in less than `100ms`, but the `hello` service is slow, taking more than `500ms` to respond. If we set our timeout just above `100ms`, requests to the `world` service will succeed, but requests to the `hello` service are guaranteed to timeout. On the other hand, if we set our timeout above `500ms` then were giving the `world` service a much longer timeout than necessary, which may cause problems to our callers.
To give each service an appropriate timeout, we can use Linkerd 1.0s new fine-grained per-service configuration to set a separate communications policy for each service:
```
service:
kind: io.l5d.static
configs:
- prefix: /svc/hello
totalTimeoutMs: 600ms
- prefix: /svc/world
totalTimeoutMs: 100ms
```
This configuration establishes the following timeouts:
{{< fig src="/images/tutorials/buoyant-k8s-hello-world-timeouts.png" >}}
We can deploy the Linkerd service mesh with this configuration using this command:
```
kubectl apply -f https://raw.githubusercontent.com/BuoyantIO/linkerd-examples/master/k8s-daemonset/k8s/linkerd-latency.yml
```
Once Kubernetes provisions an external LoadBalancer IP for Linkerd, we can test requests to both the `hello` and `world` services and make sure both are operating within their timeouts.
```
$ L5D_INGRESS_LB=$(kubectl get svc l5d -o jsonpath="{.status.loadBalancer.ingress[0].*}")
$ curl $L5D_INGRESS_LB:4140 -H "Host: hello"
Hello (10.196.1.242) world (10.196.1.243)!!
$ curl $L5D_INGRESS_LB:4140 -H "Host: world"
world (10.196.1.243)!!
```
(Note that the first few requests will be slower because they must establish connections and may time out. Subsequent requests should be successful.)
We can also check that timeouts are being enforced by artificially increasing the latency of the `hello` and `world` services until they violate their timeouts. Well start by increasing the artificial latency of the `hello` service to `600ms`. Given that the timeout for the `hello` service is `600ms`, this leaves zero overhead for the `hello` service to do things like call the `world` service and any requests should therefore timeout:
```
$ curl <span class="token string">"$L5D_INGRESS_LB:4140/setLatency?latency=600ms"</span> -X POST -H "Host: hello"
ok
$ curl $L5D_INGRESS_LB:4140 -H "Host: hello"
exceeded 600.milliseconds to unspecified while waiting for a response for the request, including retries (if applicable). Remote Info: Not Available
```
Similarly, we can add `100ms` of artificial latency to the `world` service which should cause all requests to the `world` service to violate the `100ms` timeout.
```
$ curl <span class="token string">"$L5D_INGRESS_LB:4140/setLatency?latency=100ms"</span> -X POST -H "Host: world"
ok
$ curl $L5D_INGRESS_LB:4140 -H "Host: world"
exceeded 100.milliseconds to unspecified while waiting for a response for the request, including retries (if applicable). Remote Info: Not Available
```
Success! Weve set appropriate timeouts for each service, and demonstrated the expected behavior when these timeouts are (and are not) violated.
In this example, weve only been configuring timeouts, but, as you might expect, this same pattern can be used to configure any kind of per-service communications policy, including [response classification](https://linkerd.io/config/1.0.0/linkerd/index.html#http-response-classifiers) or [retry budgets](https://linkerd.io/config/1.0.0/linkerd/index.html#retries).
---
## Looking forward
In this post, weve seen an example of using Linkerds new per-service communications policy to handle two services with wildly different expected latencies. The introduction of per-service communications policy solves some immediate use cases for Linkerd users. But what weve seen here is just the beginning of communications policy control in Linkerd—this policy was developed from the ground up in a way that it can be dynamically updatable, with the explicit goal of making it a part of the service mesh API.
In the coming months, well add this communications policy to Linkerds service mesh API, alongside routing policy. Looking still further, other forms of policy—including [rate limiting](https://github.com/linkerd/linkerd/issues/1006), [request forking policy](https://github.com/linkerd/linkerd/issues/1277), and [security policy](https://github.com/linkerd/linkerd/issues/1276)—are all on [the Linkerd roadmap](https://github.com/linkerd/linkerd/projects/3), and will form more of Linkerds service mesh API. A consistent, uniform, well-designed service mesh API with comprehensive control over Linkerds runtime behavior is central to our vision of Linkerd as the service mesh for cloud native applications.
Theres a lot of very exciting work ahead of us and it wont be possible without input and involvement from the amazing Linkerd community. Please comment on an issue, discuss your use case on [Discourse](https://discourse.linkerd.io/), hit us up on [Slack](https://slack.linkerd.io/), or—best of all—submit a [pull request](https://github.com/linkerd/linkerd/pulls)!

View File

@ -16,6 +16,7 @@ will learn how to set up mutual TLS between two endpoints using Linkerd.
---
## Encrypting all the things with protocol upgrades
In this tutorial, well show you how to use Linkerd as a service mesh to add TLS
to all service-to-service HTTP calls, without modifying any application code.
Another benefit of the service mesh approach is that it allows you to decouple
@ -38,6 +39,7 @@ integration is slightly more complex.)
---
## Step 0: Setup and Prerequisites
First, youll need a Kubernetes cluster and a functioning `kubectl` command on
your local machine. These following examples will assume you're using either
[GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-container-cluster)
@ -77,7 +79,7 @@ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/mast
You can confirm that installation was successful by viewing Linkerds admin page:
##### Minikube
#### Minikube
```bash
HOST_IP=$(kubectl get po -l app=l5d -o jsonpath="{.items[0].status.hostIP}")
@ -101,13 +103,13 @@ title="Linkerd admin UI." >}}
Install two services, “hello” and “world”, in the default namespace.
##### Minikube
#### Minikube
```bash
kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/hello-world-legacy.yml
```
##### GKE
#### GKE
```bash
kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/hello-world.yml
@ -119,14 +121,14 @@ service to complete its request).
You can see this in action by sending traffic through Linkerds external IP:
##### Minikube
#### Minikube
```bash
NODE_PORT=$(kubectl get svc l5d -o 'jsonpath={.spec.ports[0].nodePort}')
http_proxy=$HOST_IP:$NODE_PORT curl -s http://hello
```
##### GKE
#### GKE
```bash
http_proxy=$INGRESS_LB:4140 curl -s http://hello
@ -137,6 +139,7 @@ You should see the string “Hello world”.
---
## Linkerd with TLS
Now that Linkerd is installed, lets use it to encrypt traffic. Well place TLS
certificates on each of the hosts, and configure Linkerd to use those
certificates for TLS.
@ -156,6 +159,7 @@ generate your own self-signed certificates, see our blog post, where we have
---
## Step 1: Deploy certificates and config changes to Kubernetes
Were ready to update Linkerd to encrypt traffic. We will distribute the
[sample certificates](https://raw.githubusercontent.com/linkerd/linkerd-examples/master/k8s-daemonset/k8s/certificates.yml)
as Kubernetes [secrets](https://kubernetes.io/docs/concepts/configuration/secret/).
@ -175,18 +179,19 @@ kubectl apply -f https://raw.githubusercontent.com/linkerd/linkerd-examples/mast
---
## STEP 2: SUCCESS!
## STEP 2: SUCCESS
At this point, Linkerd should be transparently wrapping all communication
between these services in TLS. Lets verify this by running the same command as
before:
##### Minikube
### Minikube
```bash
http_proxy=$HOST_IP:$(kubectl get svc l5d -o 'jsonpath={.spec.ports[0].nodePort}') curl -s http://hello
```
##### GKE
### GKE
```bash
http_proxy=$INGRESS_LB:4140 curl -s http://hello
@ -197,14 +202,14 @@ communication between the hello and world services is being encrypted. We can
verify this by making an HTTPS request directly to port 4141, where Linkerd is
listening for requests from other Linkerd instances:
##### Minikube
### Minikube
```bash
NODE_PORT_INCOMING=$(kubectl get svc l5d -o 'jsonpath={.spec.ports[1].nodePort}')
curl -skH 'l5d-dtab: /svc=>/#/io.l5d.k8s/default/admin/l5d;' https://$HOST_IP:$NODE_PORT_INCOMING/admin/ping
```
##### GKE
### GKE
```bash
curl -skH 'l5d-dtab: /svc=>/#/io.l5d.k8s/default/admin/l5d;' https://$INGRESS_LB:4141/admin/ping

View File

@ -17,6 +17,7 @@ running in Kubernetes.
---
## A service mesh for Kubernetes
As a service mesh, Linkerd is designed to be run alongside application code,
managing and monitoring inter-service communication, including performing
service discovery, retries, load-balancing, and protocol upgrades.
@ -28,7 +29,8 @@ and weve spent a lot of time [optimizing Linkerd for this use case](https://b
However, the sidecar model also has a downside: deploying per pod means that
resource costs scale per pod. If your services are lightweight and you run many
instances, like [Monzo](https://monzo.com/) (who [built an entire bank on top of Linkerd and Kubernetes](https://monzo.com/blog/2016/09/19/building-a-modern-bank-backend/)),
instances, like [Monzo](https://monzo.com/) (who
[built an entire bank on top of Linkerd and Kubernetes](https://monzo.com/blog/2016/09/19/building-a-modern-bank-backend/)),
then the cost of using sidecars can be quite high.
We can reduce this resource cost by deploying Linkerd per host rather than per
@ -44,6 +46,7 @@ in Kubernetes.
---
## Architecture options
One of the defining characteristics of a service mesh is its ability to decouple
application communication from transport communication. For example, if services
A and B speak HTTP, the service mesh may convert that to HTTPS across the wire,
@ -56,12 +59,13 @@ receiving side of each request, proxying to and from local instances. E.g. for
HTTP to HTTPS upgrades, Linkerd must be able to both initiate and terminate TLS.
In a DaemonSet world, a request path through Linkerd looks like the diagram below:
{{< fig src="/images/tutorials/buoyant-k8s-daemonset-mesh.png" title="DaemonSet request path diagram." >}}
{{< fig src="/images/tutorials/buoyant-k8s-daemonset-mesh.png"
title="DaemonSet request path diagram." >}}
As you can see, a request that starts in Pod A on Host 1 and is destined for Pod J
on Host 2 must go through Pod As host-local Linkerd instance, then to Host 2s
Linkerd instance, and finally to Pod J. This path introduces three problems that
Linkerd must address:
As you can see, a request that starts in Pod A on Host 1 and is destined for
Pod J on Host 2 must go through Pod As host-local Linkerd instance, then to
Host 2s Linkerd instance, and finally to Pod J. This path introduces three
problems that Linkerd must address:
- How does an application identify its host-local Linkerd?
- How does Linkerd route an outgoing request to the destinations Linkerd?
@ -72,7 +76,9 @@ you just want to get Linkerd working with Kubernetes DaemonSets, see
[part one](/1/tutorials/part-one/)!
---
## How does an application identify its host-local Linkerd?
## Identify the host-local Linkerd
Since DaemonSets use a Kubernetes `hostPort`, we know that Linkerd is running on
a fixed port on the hosts IP. Thus, in order to send a request to the Linkerd
process on the same machine that its running on, we need to determine the IP
@ -131,7 +137,8 @@ set as environment variables in the pod.
---
## How does Linkerd route an outgoing request to the destination's Linkerd?
## Route an outgoing request to the destination's Linkerd
In our service mesh deployment, outgoing requests should not be sent directly to
the destination application, but instead should be sent to the Linkerd running
on that applications host. To do this, we can take advantage of powerful new
@ -159,7 +166,7 @@ routers:
---
## How does Linkerd route an incoming request to the destination application?
## Route an incoming request to the destination application
When a request finally arrives at the destination pods Linkerd instance, it
must be correctly routed to the pod itself. To do this we use the `localnode`
@ -180,10 +187,11 @@ routers:
---
## Conclusion
Deploying Linkerd as a Kubernetes DaemonSet gives us the best of both worlds—it
allows us to accomplish the full set of goals of a service mesh (such as
transparent TLS, protocol upgrades, latency-aware load balancing, etc), while
scaling Linkerd instances per host rather than per pod.
transparent TLS, protocol upgrades, latency-aware load balancing, etc), while
scaling Linkerd instances per host rather than per pod.
For a full, working example, see the [part one](/1/tutorials/part-one/), or
download

View File

@ -1 +1,3 @@
<!-- markdownlint-disable -->
<meta http-equiv="Refresh" content="0; url=overview/">
<!-- markdownlint-enable -->

View File

@ -9,7 +9,7 @@ choose between stability or getting the latest and greatest functionality. The
latest release for each channel is listed below. The full list of releases can
be found on [GitHub](https://github.com/linkerd/linkerd2/releases).
# Stable (latest version: {{% latestversion %}})
## Stable (latest version: {{% latestversion %}})
Stable releases are periodic, and focus on stability. To install a stable
release, you can run:
@ -18,7 +18,7 @@ release, you can run:
curl -sL https://run.linkerd.io/install | sh
```
# Edge (latest version: {{% latestedge %}})
## Edge (latest version: {{% latestedge %}})
Edge releases are frequent (usually, weekly) and can be used to work with the
latest and greatest features. These releases are intended to be stable, but are

View File

@ -11,7 +11,9 @@ aliases = [
priority = 1.0
+++
# What is Linkerd?
<!-- markdownlint-disable MD026 -->
## What is Linkerd?
Linkerd is a [service
mesh](https://blog.buoyant.io/2017/04/25/whats-a-service-mesh-and-why-do-i-need-one/).
@ -27,13 +29,13 @@ with a uniform point at which they can control and measure the behavior of the
data plane. Operators typically interact with Linkerd using the [CLI](/2/cli/)
and the [web dashboard UI](/2/getting-started/#step-4-explore-linkerd).
# Who owns Linkerd and how is it licensed?
## Who owns Linkerd and how is it licensed?
Linkerd is licensed under Apache v2 and is a [Cloud Native Computing
Foundation](https://cncf.io) (CNCF) project. The CNCF owns the trademark; the
copyright is held by the Linkerd authors themselves.
# Who maintains Linkerd?
## Who maintains Linkerd?
See the [2.x
maintainers](https://github.com/linkerd/linkerd2/blob/master/MAINTAINERS.md)
@ -41,15 +43,15 @@ file, and the [1.x
maintainers](https://github.com/linkerd/linkerd/blob/master/MAINTAINERS.md)
file.
# Is there an Enterprise edition, or a commercial edition?
## Is there an Enterprise edition, or a commercial edition?
No. Everything in Linkerd is fully open source.
# How do I pronounce Linkerd?
## How do I pronounce Linkerd?
The "d" is pronounced separately, i.e. "Linker-DEE". (It's a UNIX thing.)
# What's the difference between Linkerd 1.x and 2.x?
## What's the difference between Linkerd 1.x and 2.x?
Linkerd 1.x is built on the "Twitter stack": Finagle, Netty, Scala, and the
JVM. Linkerd 2.x is built in Rust and Go, and is significantly faster and
@ -58,26 +60,26 @@ support or featureset of 1.x. (See
[the full list of supported platforms](/choose-your-platform/)
across both versions.)
# Is Linkerd 1.x still supported?
## Is Linkerd 1.x still supported?
Yes, the 1.x branch of Linkerd is under active development, and continues
to power the production infrastructure of companies around the globe.
[The full Linkerd 1.x documentation is here](/1/).
# Does Linkerd require Kubernetes?
## Does Linkerd require Kubernetes?
Linkerd 2.x currently requires Kubernetes, though this will change in the
future. Linkerd 1.x can be installed on any platform, and supports Kubernetes,
DC/OS, Mesos, Consul, and ZooKeeper-based environments.
# Where's the Linkerd roadmap?
## Where's the Linkerd roadmap?
As a community project, there is no official roadmap, but a glance at the
[active GitHub issues](https://github.com/linkerd/linkerd2/issues) will give
you a sense of what is in store for the future.
# What happens to Linkerd's proxies if the control plane is down?
## What happens to Linkerd's proxies if the control plane is down?
Linkerd's proxies do not integrate with Kubernetes directly, but rely on the
control plane for service discovery information. The proxies are designed to
@ -95,3 +97,4 @@ If *new* proxies are deployed when the control plane is unreachable, these new
proxies will not be able to operate. They will timeout all new requests until
such time as they can reach the control plane.
<!-- markdownlint-enable MD026 -->

View File

@ -28,12 +28,16 @@ The dashboards that are provided out of the box include:
{{< gallery >}}
{{< gallery-item src="/images/screenshots/grafana-top.png" title="Top Line Metrics" >}}
{{< gallery-item src="/images/screenshots/grafana-top.png"
title="Top Line Metrics" >}}
{{< gallery-item src="/images/screenshots/grafana-deployment.png" title="Deployment Detail" >}}
{{< gallery-item src="/images/screenshots/grafana-deployment.png"
title="Deployment Detail" >}}
{{< gallery-item src="/images/screenshots/grafana-pod.png" title="Pod Detail" >}}
{{< gallery-item src="/images/screenshots/grafana-pod.png"
title="Pod Detail" >}}
{{< gallery-item src="/images/screenshots/grafana-health.png" title="Linkerd Health" >}}
{{< gallery-item src="/images/screenshots/grafana-health.png"
title="Linkerd Health" >}}
{{< /gallery >}}

View File

@ -17,17 +17,20 @@ Here's a short description of what `--ha` does to the `linkerd` install.
* Defaults the controller replicas to `3`
* Set's sane `cpu` + `memory` requests to the linkerd control plane components.
* Defaults to a sensible requests for the sidecar containers for the control plane + [_auto proxy injection_](../proxy-injection/).
* Defaults to a sensible requests for the sidecar containers for the control
plane + [_auto proxy injection_](/2/features/proxy-injection/).
## Setup
### Setup
Because it's the control plane that requires the `ha` config, you'll need to use the `install` command with the `ha` flag.
Because it's the control plane that requires the `ha` config, you'll need to
use the `install` command with the `ha` flag.
```bash
linkerd install --ha | kubectl apply -f
```
You can also override the amount of controller replicas that you wish to run by passing in the `--controller-replicas` flag
You can also override the amount of controller replicas that you wish to run by
passing in the `--controller-replicas` flag
```bash
linkerd install --ha --controller-replicas=2 | kubectl apply -f

View File

@ -40,7 +40,6 @@ The following protocols are known to be server-speaks-first:
* 4222 - NATS
* 27017 - MongoDB
If you're working with a protocol that can't be automatically recognized by
Linkerd, use the `--skip-inbound-ports` and `--skip-outbound-ports` flags when
running `linkerd inject`.

View File

@ -26,7 +26,7 @@ These can be setup by following the guides:
- [Configuring Retries](/2/tasks/configuring-retries/)
- [Configuring Timeouts](/2/tasks/configuring-timeouts/)
# How Retries Can Go Wrong
## How Retries Can Go Wrong
Traditionally, when performing retries, you must specify a maximum number of
retry attempts before giving up. Unfortunately, there are two major problems

View File

@ -34,7 +34,7 @@ This data can be consumed in several ways:
[pre-built Grafana dashboards](/2/features/dashboard/#grafana).
* Directly from Linkerd's built-in Prometheus instance
# Lifespan of Linkerd metrics
## Lifespan of Linkerd metrics
Linkerd is not designed as a long-term historical metrics store. While
Linkerd's control plane does include a Prometheus instance, this instance

View File

@ -29,13 +29,11 @@ Before we can do anything, we need to ensure you have access to a Kubernetes
cluster running 1.9 or later, and a functioning `kubectl` command on your local
machine.
You can run Kubernetes on your local machine. We suggest <a
href="https://www.docker.com/products/docker-desktop" target="_blank">Docker
Desktop</a> or <a
href="https://kubernetes.io/docs/tasks/tools/install-minikube/"
target="_blank">Minikube</a>. (For other options, see the <a
href="https://kubernetes.io/docs/setup/pick-right-solution/"
target="_blank">full list</a>.)
You can run Kubernetes on your local machine. We suggest
[Docker Desktop](https://www.docker.com/products/docker-desktop) or
[Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/). For other
options, see the
[full list](https://kubernetes.io/docs/setup/pick-right-solution/).
When ready, make sure you're running a recent version of Kubernetes with:
@ -220,7 +218,7 @@ the way it should with the data plane. To do this check, run:
linkerd -n emojivoto check --proxy
```
## Step 6: Watch it run!
## Step 6: Watch it run
You can glance at the Linkerd dashboard and see all the HTTP/2 (gRPC) and HTTP/1
(web frontend) speaking services in the demo app show up in the list of
@ -260,13 +258,17 @@ browser instead. The dashboard views look like:
{{< gallery >}}
{{< gallery-item src="/images/getting-started/stat.png" title="Top Line Metrics">}}
{{< gallery-item src="/images/getting-started/stat.png"
title="Top Line Metrics">}}
{{< gallery-item src="/images/getting-started/inbound-outbound.png" title="Deployment Detail">}}
{{< gallery-item src="/images/getting-started/inbound-outbound.png"
title="Deployment Detail">}}
{{< gallery-item src="/images/getting-started/top.png" title="Top" >}}
{{< gallery-item src="/images/getting-started/top.png"
title="Top" >}}
{{< gallery-item src="/images/getting-started/tap.png" title="Tap" >}}
{{< gallery-item src="/images/getting-started/tap.png"
title="Tap" >}}
{{< /gallery >}}
@ -276,7 +278,8 @@ metrics collected by Prometheus and ships with some extremely valuable
dashboards. You can get to these by clicking the Grafana icon in the overview
page.
{{< fig src="/images/getting-started/grafana.png" title="Deployment Detail Dashboard">}}
{{< fig src="/images/getting-started/grafana.png"
title="Deployment Detail Dashboard">}}
## Thats it! 👏

View File

@ -4,6 +4,7 @@ title = "Overview"
aliases = [
"/docs",
"/documentation",
"/2/",
"/2/docs/",
"/doc/network-performance/",
"/in-depth/network-performance/",

View File

@ -22,7 +22,7 @@ control signals from, the control plane.
{{< fig src="/images/architecture/control-plane.png" title="Architecture" >}}
# Control Plane
## Control Plane
The Linkerd control plane is a set of services that run in a dedicated
Kubernetes namespace (`linkerd` by default). These services accomplish various
@ -50,7 +50,7 @@ The control plane is made up of four components:
component is used to render and display these dashboards. You can reach these
dashboards via links in the Linkerd dashboard itself.
# Data Plane
## Data Plane
The Linkerd data plane is comprised of lightweight proxies, which are deployed
as sidecar containers alongside each instance of your service code. In order to
@ -98,13 +98,13 @@ The proxy's features include:
The proxy supports service discovery via DNS and the
[destination gRPC API](https://github.com/linkerd/linkerd2-proxy-api).
# CLI
## CLI
The Linkerd CLI is run locally on your machine and is used to interact with the
control and data planes. It can be used to view statistics, debug production
issues in real time and install/upgrade the control and data planes.
# Dashboard
## Dashboard
The Linkerd dashboard provides a high level view of what is happening with your
services in real time. It can be used to view the "golden" metrics (success
@ -114,7 +114,7 @@ running `linkerd dashboard` from the command line.
{{< fig src="/images/architecture/stat.png" title="Top Line Metrics">}}
# Grafana
## Grafana
As a component of the control plane, Grafana provides actionable dashboards for
your services out of the box. It is possible to see high level metrics and dig
@ -124,17 +124,21 @@ The dashboards that are provided out of the box include:
{{< gallery >}}
{{< gallery-item src="/images/screenshots/grafana-top.png" title="Top Line Metrics" >}}
{{< gallery-item src="/images/screenshots/grafana-top.png"
title="Top Line Metrics" >}}
{{< gallery-item src="/images/screenshots/grafana-deployment.png" title="Deployment Detail" >}}
{{< gallery-item src="/images/screenshots/grafana-deployment.png"
title="Deployment Detail" >}}
{{< gallery-item src="/images/screenshots/grafana-pod.png" title="Pod Detail" >}}
{{< gallery-item src="/images/screenshots/grafana-pod.png"
title="Pod Detail" >}}
{{< gallery-item src="/images/screenshots/grafana-health.png" title="Linkerd Health" >}}
{{< gallery-item src="/images/screenshots/grafana-health.png"
title="Linkerd Health" >}}
{{< /gallery >}}
# Prometheus
## Prometheus
Prometheus is a cloud native monitoring solution that is used to collect
and store all the Linkerd metrics. It is installed as part of the control plane

View File

@ -13,7 +13,7 @@ for a full list of all the possible checks, what they do and how to fix them.
{{< cli/examples "check" >}}
# Example output
## Example output
```bash
$ linkerd check

View File

@ -9,7 +9,7 @@ service that is receiving the requests. For more information about how to
create a service profile, see [service profiles](/2/features/service-profiles/).
and the [profile](/2/reference/cli/profile/) command reference.
# Inbound Metrics
## Inbound Metrics
By default, `routes` displays *inbound* metrics for a target. In other
words, it shows information about requests which are sent to the target and
@ -24,7 +24,7 @@ Displays the request volume, success rate, and latency of requests to the
perspective, which means that, for example, these latencies do not include the
network latency between a client and the `webapp` deployment.
# Outbound Metrics
## Outbound Metrics
If you specify the `--to` flag then `linkerd routes` displays *outbound* metrics
from the target resource to the resource in the `--to` flag. In contrast to

View File

@ -13,7 +13,7 @@ The Linkerd proxy exposes metrics that describe the traffic flowing through the
proxy. The following metrics are available at `/metrics` on the proxy's metrics
port (default: `:4191`) in the [Prometheus format][prom-format].
# Protocol-Level Metrics
## Protocol-Level Metrics
* `request_total`: A counter of the number of requests the proxy has received.
This is incremented when the request stream begins.
@ -48,7 +48,7 @@ Note that latency measurements are not exported to Prometheus until the stream
_completes_. This is necessary so that latencies can be labeled with the appropriate
[response classification](#response-labels).
## Labels
### Labels
Each of these metrics has the following labels:
@ -58,7 +58,7 @@ Each of these metrics has the following labels:
`outbound` if the request originated from inside of the pod.
* `tls`: `true` if the request's connection was secured with TLS.
### Response Labels
#### Response Labels
The following labels are only applicable on `response_*` metrics.
@ -70,7 +70,7 @@ The following labels are only applicable on `response_*` metrics.
for gRPC responses.
* `status_code`: The HTTP status code of the response.
### Outbound labels
#### Outbound labels
The following labels are only applicable if `direction=outbound`.
@ -88,7 +88,7 @@ The following labels are only applicable if `direction=outbound`.
selector roughly approximates a pod's `ReplicaSet` or
`ReplicationController`.
### Prometheus Collector labels
#### Prometheus Collector labels
The following labels are added by the Prometheus collector.
@ -96,7 +96,7 @@ The following labels are added by the Prometheus collector.
* `job`: The Prometheus job responsible for the collection, typically
`linkerd-proxy`.
#### Kubernetes labels added at collection time
##### Kubernetes labels added at collection time
Kubernetes namespace, pod name, and all labels are mapped to corresponding
Prometheus labels.
@ -109,7 +109,7 @@ Prometheus labels.
approximates a pod's `ReplicaSet` or
`ReplicationController`.
#### Linkerd labels added at collection time
##### Linkerd labels added at collection time
Kubernetes labels prefixed with `linkerd.io/` are added to your application at
`linkerd inject` time. More specifically, Kubernetes labels prefixed with
@ -154,25 +154,25 @@ request_total{
}
```
# Transport-Level Metrics
## Transport-Level Metrics
The following metrics are collected at the level of the underlying transport
layer.
* `tcp_open_total`: A counter of the total number of opened transport
connections.
connections.
* `tcp_close_total`: A counter of the total number of transport connections
which have closed.
which have closed.
* `tcp_open_connections`: A gauge of the number of transport connections
currently open.
currently open.
* `tcp_write_bytes_total`: A counter of the total number of sent bytes. This is
updated when the connection closes.
updated when the connection closes.
* `tcp_read_bytes_total`: A counter of the total number of received bytes. This
is updated when the connection closes.
is updated when the connection closes.
* `tcp_connection_duration_ms`: A histogram of the duration of the lifetime of a
connection, in milliseconds. This is updated when the connection closes.
connection, in milliseconds. This is updated when the connection closes.
## Labels
### Labels
Each of these metrics has the following labels:
@ -186,7 +186,7 @@ Each of these metrics has the following labels:
Note that the labels described above under the heading "Prometheus Collector labels"
are also added to transport-level metrics, when applicable.
### Connection Close Labels
#### Connection Close Labels
The following labels are added only to metrics which are updated when a
connection closes (`tcp_close_total` and `tcp_connection_duration_ms`):

View File

@ -4,11 +4,14 @@ title = "Service Profiles"
description = "Details on the specification and what is possible with service profiles."
+++
[Service profiles](/2/features/service-profiles/) provide Linkerd additional
information about a service. This is a reference for everything that can be done
with service profiles.
## Spec
A service profile spec must contain the following top level fields:
{{< table >}}
| field| value |
|------|-------|
@ -92,4 +95,3 @@ to this service as a ratio of the original request volume.
| `minRetriesPerSecond` | allowance of retries per second in addition to those allowed by the retryRatio |
| `ttl` | indicates for how long requests should be considered for the purposes of calculating the retryRatio |
{{< /table >}}

View File

@ -66,7 +66,8 @@ itself by port-forwarding `webapp` locally:
kubectl -n booksapp port-forward svc/webapp 7000 &
```
Open http://localhost:7000/ in your browser to see the frontend.
Open [http://localhost:7000/](http://localhost:7000/) in your browser to see the
frontend.
{{< fig src="/images/books/frontend.png" title="Frontend" >}}

View File

@ -10,10 +10,10 @@ aliases = [
The demo application emojivoto has some issues. Let's use that and Linkerd to
diagnose an application that fails in ways which are a little more subtle than
the entire service crashing. This guide assumes that you've followed the steps in the
[Getting Started](/2/getting-started/) guide and have Linkerd and the demo
application running in a Kubernetes cluster. If you've not done that yet, go get
started and come back when you're done!
the entire service crashing. This guide assumes that you've followed the steps
in the [Getting Started](/2/getting-started/) guide and have Linkerd and the
demo application running in a Kubernetes cluster. If you've not done that yet,
go get started and come back when you're done!
If you glance at the Linkerd dashboard (by running the `linkerd dashboard`
command), you should see all the resources in the `emojivoto` namespace,

View File

@ -22,7 +22,7 @@ metrics data from Linkerd:
- [Extracting data via Prometheus's APIs](#api)
- [Gather data from the proxies directly](#proxy)
# Using the Prometheus federation API {#federation}
## Using the Prometheus federation API {#federation}
If you are using Prometheus as your own metrics store, we recommend taking
advantage of Prometheus's *federation* API, which is designed exactly for the
@ -65,7 +65,7 @@ label definitions, have a look at [Proxy Metrics](/2/reference/proxy-metrics/).
For more information on Prometheus' `/federate` endpoint, have a look at the
[Prometheus federation docs](https://prometheus.io/docs/prometheus/latest/federation/).
# Using a Prometheus integration {#integration}
## Using a Prometheus integration {#integration}
If you are not using Prometheus as your own long-term data store, you may be
able to leverage one of Prometheus's [many
@ -73,7 +73,7 @@ integrations](https://prometheus.io/docs/operating/integrations/) to
automatically extract data from Linkerd's Prometheus instance into the data
store of your choice. Please refer to the Prometheus documentation for details.
# Extracting data via Prometheus's APIs {#api}
## Extracting data via Prometheus's APIs {#api}
If neither Prometheus federation nor Prometheus integrations are options for
you, it is possible to call Prometheus's APIs to extract data from Linkerd.
@ -101,7 +101,7 @@ retrieve all metrics:
curl http://linkerd-prometheus.linkerd.svc.cluster.local:9090/api/v1/query?query=request_total
```
# Gathering data from the Linkerd proxies directly {#proxy}
## Gathering data from the Linkerd proxies directly {#proxy}
Finally, if you want to avoid Linkerd's Prometheus entirely, you can query the
Linkerd proxies directly on their `/metrics` endpoint.

View File

@ -92,4 +92,3 @@ This will select only the requests observed and show the `:authority` and
attention to `rt_route`. If it is missing entirely, compare the `:path` to
the regex you'd like for it to match, and use a
[tester](https://regex101.com/) with the Golang flavor of regex.

View File

@ -7,7 +7,7 @@ description = "Troubleshoot issues with your Linkerd installation."
This section provides resolution steps for common problems reported with the
`linkerd check` command.
# The "pre-kubernetes-cluster-setup" checks {#pre-k8s-cluster}
## The "pre-kubernetes-cluster-setup" checks {#pre-k8s-cluster}
These checks only run when the `--pre` flag is set. This flag is intended for
use prior to running `linkerd install`, to verify your cluster is prepared for
@ -42,8 +42,8 @@ create the Kubernetes resources required for Linkerd installation, specifically:
√ can create CustomResourceDefinitions
```
For more information on cluster access, see the [GKE Setup](/2/tasks/install/#gke) section
above.
For more information on cluster access, see the
[GKE Setup](/2/tasks/install/#gke) section above.
# The "pre-kubernetes-setup" checks {#pre-k8s}
@ -58,8 +58,8 @@ permissions to install Linkerd.
√ can create ConfigMaps
```
For more information on cluster access, see the [GKE Setup](/2/tasks/install/#gke) section
above.
For more information on cluster access, see the
[GKE Setup](/2/tasks/install/#gke) section above.
## The "pre-kubernetes-single-namespace-setup" checks {#pre-single}
@ -99,8 +99,8 @@ installation, specifically:
√ can create RoleBindings
```
For more information on cluster access, see the [GKE Setup](/2/tasks/install/#gke) section
above.
For more information on cluster access, see the
[GKE Setup](/2/tasks/install/#gke) section above.
# The "kubernetes-api" checks {#k8s-api}
@ -144,7 +144,7 @@ kubectl config set-cluster ${KUBE_CONTEXT} --insecure-skip-tls-verify=true \
--server=${KUBE_CONTEXT}
```
# The "kubernetes-version" checks {#k8s-version}
## The "kubernetes-version" checks {#k8s-version}
Example failure:
@ -163,7 +163,7 @@ For more information on upgrading Kubernetes, see the page in the Kubernetes
Documentation on
[Upgrading a cluster](https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/#upgrading-a-cluster)
# The "linkerd-existence" checks {#l5d-existence}
## The "linkerd-existence" checks {#l5d-existence}
### √ control plane namespace exists {#l5d-existence-ns}
@ -253,7 +253,7 @@ kubectl -n linkerd port-forward \
curl localhost:9995/metrics
```
# The "linkerd-api" checks {#l5d-api}
## The "linkerd-api" checks {#l5d-api}
### √ control plane pods are ready {#l5d-api-control-ready}
@ -339,7 +339,7 @@ Check the logs on the control-plane's public API:
linkerd logs --control-plane-component controller --container public-api
```
# The "linkerd-service-profile" checks {#l5d-sp}
## The "linkerd-service-profile" checks {#l5d-sp}
Example failure:
@ -357,7 +357,7 @@ bad 51s
linkerd-controller-api.linkerd.svc.cluster.local 1m
```
# The "linkerd-version" checks {#l5d-version}
## The "linkerd-version" checks {#l5d-version}
### √ can determine the latest version {#l5d-version-latest}
@ -400,7 +400,7 @@ Example failures:
See the page on [Upgrading Linkerd](/2/upgrade/).
# The "linkerd-data-plane" checks {#l5d-data-plane}
## The "linkerd-data-plane" checks {#l5d-data-plane}
These checks only run when the `--proxy` flag is set. This flag is intended for
use after running `linkerd inject`, to verify the injected proxies are operating

View File

@ -17,9 +17,7 @@ There are three components that need to be upgraded:
In this guide, we'll walk you through how to upgrade all three components
incrementally without taking down any of your services.
# Upgrade notice: stable-2.2.0
## Breaking changes
## Upgrade notice: stable-2.2.0
There are two breaking changes in `stable-2.2.0`. One relates to
[Service Profiles](/2/features/service-profiles/), the other relates to
@ -53,8 +51,8 @@ of the following:
[Upgrade the data plane](#upgrade-the-data-plane))
- Delete and redeploy the application
Auto-inject support for application updates is tracked at:
https://github.com/linkerd/linkerd2/issues/2260
Auto-inject support for application updates is tracked on
[github](https://github.com/linkerd/linkerd2/issues/2260)
# Upgrade notice: stable-2.1.0
@ -145,9 +143,9 @@ It is expected that the Client and Server versions won't match at this point in
the process. Nothing has been changed on the cluster, only the local CLI has
been updated.
### Notes
- Until you upgrade the control plane, some new CLI commands may not work.
{{< note >}}
Until you upgrade the control plane, some new CLI commands may not work.
{{< /note >}}
## Upgrade the control plane
@ -212,11 +210,11 @@ Client version: {{% latestversion %}}
Server version: {{% latestversion %}}
```
### Notes
- You will lose the historical data from Prometheus. If you would like to have
that data persisted through an upgrade, take a look at the
[persistence documentation](/2/observability/exporting-metrics/)
{{< note >}}
You will lose the historical data from Prometheus. If you would like to have
that data persisted through an upgrade, take a look at the
[persistence documentation](/2/observability/exporting-metrics/)
{{< /note >}}
## Upgrade the data plane

View File

@ -56,7 +56,7 @@
<img src="/images/logos/monzo.svg" alt="Monzo" />
</a>
<a class="col-md-2 col-4 my-auto pb-3"
href="https://www.expedia.com"
href="https://expedia.com"
target="_blank">
<img src="/images/logos/expedia.svg" alt="Expedia" />
</a>