Added a --ha flag to install CLI (#1852)

This change allows some advised production config to be applied to the install of the control plane.
Currently this runs 3x replicas of the controller and adds some pretty sane requests to each of the components + containers of the control plane.

Fixes #1101

Signed-off-by: Ben Lambert <ben@blam.sh>
This commit is contained in:
Ben Lambert 2018-11-21 05:03:59 +01:00 committed by Kevin Lingerfelt
parent 7a7f6b6ecb
commit 297cb570f2
7 changed files with 1875 additions and 30 deletions

View File

@ -23,8 +23,6 @@ type installConfig struct {
PrometheusImage string
GrafanaImage string
ControllerReplicas uint
WebReplicas uint
PrometheusReplicas uint
ImagePullPolicy string
UUID string
CliVersion string
@ -59,28 +57,31 @@ type installConfig struct {
ProxyResourceRequestMemory string
ProxyBindTimeout string
SingleNamespace bool
EnableHA bool
}
type installOptions struct {
controllerReplicas uint
webReplicas uint
prometheusReplicas uint
controllerLogLevel string
proxyAutoInject bool
singleNamespace bool
highAvailability bool
*proxyConfigOptions
}
const prometheusProxyOutboundCapacity = 10000
const (
prometheusProxyOutboundCapacity = 10000
defaultControllerReplicas = 1
defaultHAControllerReplicas = 3
)
func newInstallOptions() *installOptions {
return &installOptions{
controllerReplicas: 1,
webReplicas: 1,
prometheusReplicas: 1,
controllerReplicas: defaultControllerReplicas,
controllerLogLevel: "info",
proxyAutoInject: false,
singleNamespace: false,
highAvailability: false,
proxyConfigOptions: newProxyConfigOptions(),
}
}
@ -104,12 +105,10 @@ func newCmdInstall() *cobra.Command {
addProxyConfigFlags(cmd, options.proxyConfigOptions)
cmd.PersistentFlags().UintVar(&options.controllerReplicas, "controller-replicas", options.controllerReplicas, "Replicas of the controller to deploy")
cmd.PersistentFlags().UintVar(&options.webReplicas, "web-replicas", options.webReplicas, "Replicas of the web server to deploy")
cmd.PersistentFlags().UintVar(&options.prometheusReplicas, "prometheus-replicas", options.prometheusReplicas, "Replicas of prometheus to deploy")
cmd.PersistentFlags().StringVar(&options.controllerLogLevel, "controller-log-level", options.controllerLogLevel, "Log level for the controller and web components")
cmd.PersistentFlags().BoolVar(&options.proxyAutoInject, "proxy-auto-inject", options.proxyAutoInject, "Experimental: Enable proxy sidecar auto-injection webhook (default false)")
cmd.PersistentFlags().BoolVar(&options.singleNamespace, "single-namespace", options.singleNamespace, "Experimental: Configure the control plane to only operate in the installed namespace (default false)")
cmd.PersistentFlags().BoolVar(&options.highAvailability, "ha", options.highAvailability, "Experimental: Enable HA deployment config for the control plane")
return cmd
}
@ -130,6 +129,18 @@ func validateAndBuildConfig(options *installOptions) (*installConfig, error) {
ignoreOutboundPorts = append(ignoreOutboundPorts, fmt.Sprintf("%d", p))
}
if options.highAvailability && options.controllerReplicas == defaultControllerReplicas {
options.controllerReplicas = defaultHAControllerReplicas
}
if options.highAvailability && options.proxyCpuRequest == "" {
options.proxyCpuRequest = "10m"
}
if options.highAvailability && options.proxyMemoryRequest == "" {
options.proxyMemoryRequest = "20Mi"
}
return &installConfig{
Namespace: controlPlaneNamespace,
ControllerImage: fmt.Sprintf("%s/controller:%s", options.dockerRegistry, options.linkerdVersion),
@ -137,8 +148,6 @@ func validateAndBuildConfig(options *installOptions) (*installConfig, error) {
PrometheusImage: "prom/prometheus:v2.4.0",
GrafanaImage: fmt.Sprintf("%s/grafana:%s", options.dockerRegistry, options.linkerdVersion),
ControllerReplicas: options.controllerReplicas,
WebReplicas: options.webReplicas,
PrometheusReplicas: options.prometheusReplicas,
ImagePullPolicy: options.imagePullPolicy,
UUID: uuid.NewV4().String(),
CliVersion: k8s.CreatedByAnnotationValue(),
@ -173,6 +182,7 @@ func validateAndBuildConfig(options *installOptions) (*installConfig, error) {
ProxyResourceRequestMemory: options.proxyMemoryRequest,
ProxyBindTimeout: "1m",
SingleNamespace: options.singleNamespace,
EnableHA: options.highAvailability,
}, nil
}
@ -186,6 +196,7 @@ func render(config installConfig, w io.Writer, options *installOptions) error {
if err != nil {
return err
}
if config.EnableTLS {
tlsTemplate, err := template.New("linkerd").Parse(install.TlsTemplate)
if err != nil {

View File

@ -16,6 +16,7 @@ func TestRender(t *testing.T) {
if err != nil {
t.Fatalf("Unexpected error from validateAndBuildConfig(): %v", err)
}
defaultConfig.UUID = "deaab91a-f4ab-448a-b7d1-c832a2fa0a60"
// A configuration that shows that all config setting strings are honored
@ -28,8 +29,6 @@ func TestRender(t *testing.T) {
PrometheusImage: "PrometheusImage",
GrafanaImage: "GrafanaImage",
ControllerReplicas: 1,
WebReplicas: 2,
PrometheusReplicas: 3,
ImagePullPolicy: "ImagePullPolicy",
UUID: "UUID",
CliVersion: "CliVersion",
@ -72,8 +71,6 @@ func TestRender(t *testing.T) {
PrometheusImage: "PrometheusImage",
GrafanaImage: "GrafanaImage",
ControllerReplicas: 1,
WebReplicas: 2,
PrometheusReplicas: 3,
ImagePullPolicy: "ImagePullPolicy",
UUID: "UUID",
CliVersion: "CliVersion",
@ -92,14 +89,30 @@ func TestRender(t *testing.T) {
SingleNamespace: true,
}
haOptions := newInstallOptions()
haOptions.highAvailability = true
haConfig, _ := validateAndBuildConfig(haOptions)
haConfig.UUID = "deaab91a-f4ab-448a-b7d1-c832a2fa0a60"
haWithOverridesOptions := newInstallOptions()
haWithOverridesOptions.highAvailability = true
haWithOverridesOptions.controllerReplicas = 2
haWithOverridesOptions.proxyCpuRequest = "400m"
haWithOverridesOptions.proxyMemoryRequest = "300Mi"
haWithOverridesConfig, _ := validateAndBuildConfig(haWithOverridesOptions)
haWithOverridesConfig.UUID = "deaab91a-f4ab-448a-b7d1-c832a2fa0a60"
testCases := []struct {
config installConfig
options *installOptions
controlPlaneNamespace string
goldenFileName string
}{
{*defaultConfig, defaultControlPlaneNamespace, "testdata/install_default.golden"},
{metaConfig, metaConfig.Namespace, "testdata/install_output.golden"},
{singleNamespaceConfig, singleNamespaceConfig.Namespace, "testdata/install_single_namespace_output.golden"},
{*defaultConfig, defaultOptions, defaultControlPlaneNamespace, "testdata/install_default.golden"},
{metaConfig, defaultOptions, metaConfig.Namespace, "testdata/install_output.golden"},
{singleNamespaceConfig, defaultOptions, singleNamespaceConfig.Namespace, "testdata/install_single_namespace_output.golden"},
{*haConfig, haOptions, haConfig.Namespace, "testdata/install_ha_output.golden"},
{*haWithOverridesConfig, haWithOverridesOptions, haWithOverridesConfig.Namespace, "testdata/install_ha_with_overrides_output.golden"},
}
for i, tc := range testCases {
@ -107,7 +120,7 @@ func TestRender(t *testing.T) {
controlPlaneNamespace = tc.controlPlaneNamespace
var buf bytes.Buffer
err := render(tc.config, &buf, defaultOptions)
err := render(tc.config, &buf, tc.options)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}

View File

@ -0,0 +1,887 @@
### Namespace ###
kind: Namespace
apiVersion: v1
metadata:
name: linkerd
### Service Account Controller ###
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: linkerd-controller
namespace: linkerd
### Controller RBAC ###
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: linkerd-linkerd-controller
rules:
- apiGroups: ["extensions", "apps"]
resources: ["deployments", "replicasets"]
verbs: ["list", "get", "watch"]
- apiGroups: [""]
resources: ["pods", "endpoints", "services", "namespaces", "replicationcontrollers"]
verbs: ["list", "get", "watch"]
- apiGroups: ["linkerd.io"]
resources: ["serviceprofiles"]
verbs: ["list", "get", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: linkerd-linkerd-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: linkerd-linkerd-controller
subjects:
- kind: ServiceAccount
name: linkerd-controller
namespace: linkerd
### Service Account Prometheus ###
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: linkerd-prometheus
namespace: linkerd
### Prometheus RBAC ###
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: linkerd-linkerd-prometheus
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: linkerd-linkerd-prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: linkerd-linkerd-prometheus
subjects:
- kind: ServiceAccount
name: linkerd-prometheus
namespace: linkerd
### Controller ###
---
kind: Service
apiVersion: v1
metadata:
name: api
namespace: linkerd
labels:
linkerd.io/control-plane-component: controller
annotations:
linkerd.io/created-by: linkerd/cli undefined
spec:
type: ClusterIP
selector:
linkerd.io/control-plane-component: controller
ports:
- name: http
port: 8085
targetPort: 8085
---
kind: Service
apiVersion: v1
metadata:
name: proxy-api
namespace: linkerd
labels:
linkerd.io/control-plane-component: controller
annotations:
linkerd.io/created-by: linkerd/cli undefined
spec:
type: ClusterIP
selector:
linkerd.io/control-plane-component: controller
ports:
- name: grpc
port: 8086
targetPort: 8086
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
linkerd.io/created-by: linkerd/cli undefined
creationTimestamp: null
labels:
linkerd.io/control-plane-component: controller
name: controller
namespace: linkerd
spec:
replicas: 3
strategy: {}
template:
metadata:
annotations:
linkerd.io/created-by: linkerd/cli undefined
linkerd.io/proxy-version: undefined
creationTimestamp: null
labels:
linkerd.io/control-plane-component: controller
linkerd.io/control-plane-ns: linkerd
linkerd.io/proxy-deployment: controller
spec:
containers:
- args:
- public-api
- -prometheus-url=http://prometheus.linkerd.svc.cluster.local:9090
- -controller-namespace=linkerd
- -single-namespace=false
- -log-level=info
image: gcr.io/linkerd-io/controller:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /ping
port: 9995
initialDelaySeconds: 10
name: public-api
ports:
- containerPort: 8085
name: http
- containerPort: 9995
name: admin-http
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9995
resources:
requests:
cpu: 20m
memory: 50Mi
- args:
- proxy-api
- -addr=:8086
- -controller-namespace=linkerd
- -single-namespace=false
- -enable-tls=false
- -log-level=info
image: gcr.io/linkerd-io/controller:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /ping
port: 9996
initialDelaySeconds: 10
name: proxy-api
ports:
- containerPort: 8086
name: grpc
- containerPort: 9996
name: admin-http
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9996
resources:
requests:
cpu: 20m
memory: 50Mi
- args:
- tap
- -controller-namespace=linkerd
- -single-namespace=false
- -log-level=info
image: gcr.io/linkerd-io/controller:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /ping
port: 9998
initialDelaySeconds: 10
name: tap
ports:
- containerPort: 8088
name: grpc
- containerPort: 9998
name: admin-http
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9998
resources:
requests:
cpu: 20m
memory: 50Mi
- env:
- name: LINKERD2_PROXY_LOG
value: warn,linkerd2_proxy=info
- name: LINKERD2_PROXY_BIND_TIMEOUT
value: 10s
- name: LINKERD2_PROXY_CONTROL_URL
value: tcp://localhost.:8086
- name: LINKERD2_PROXY_CONTROL_LISTENER
value: tcp://0.0.0.0:4190
- name: LINKERD2_PROXY_METRICS_LISTENER
value: tcp://0.0.0.0:4191
- name: LINKERD2_PROXY_OUTBOUND_LISTENER
value: tcp://127.0.0.1:4140
- name: LINKERD2_PROXY_INBOUND_LISTENER
value: tcp://0.0.0.0:4143
- name: LINKERD2_PROXY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: gcr.io/linkerd-io/proxy:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
name: linkerd-proxy
ports:
- containerPort: 4143
name: linkerd-proxy
- containerPort: 4191
name: linkerd-metrics
readinessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
resources:
requests:
cpu: 10m
memory: 20Mi
securityContext:
runAsUser: 2102
terminationMessagePolicy: FallbackToLogsOnError
initContainers:
- args:
- --incoming-proxy-port
- "4143"
- --outgoing-proxy-port
- "4140"
- --proxy-uid
- "2102"
- --inbound-ports-to-ignore
- 4190,4191
image: gcr.io/linkerd-io/proxy-init:undefined
imagePullPolicy: IfNotPresent
name: linkerd-init
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: false
terminationMessagePolicy: FallbackToLogsOnError
serviceAccount: linkerd-controller
status: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: serviceprofiles.linkerd.io
namespace: linkerd
annotations:
linkerd.io/created-by: linkerd/cli undefined
spec:
group: linkerd.io
version: v1alpha1
scope: Namespaced
names:
plural: serviceprofiles
singular: serviceprofile
kind: ServiceProfile
shortNames:
- sp
### Web ###
---
kind: Service
apiVersion: v1
metadata:
name: web
namespace: linkerd
labels:
linkerd.io/control-plane-component: web
annotations:
linkerd.io/created-by: linkerd/cli undefined
spec:
type: ClusterIP
selector:
linkerd.io/control-plane-component: web
ports:
- name: http
port: 8084
targetPort: 8084
- name: admin-http
port: 9994
targetPort: 9994
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
linkerd.io/created-by: linkerd/cli undefined
creationTimestamp: null
labels:
linkerd.io/control-plane-component: web
name: web
namespace: linkerd
spec:
replicas: 1
strategy: {}
template:
metadata:
annotations:
linkerd.io/created-by: linkerd/cli undefined
linkerd.io/proxy-version: undefined
creationTimestamp: null
labels:
linkerd.io/control-plane-component: web
linkerd.io/control-plane-ns: linkerd
linkerd.io/proxy-deployment: web
spec:
containers:
- args:
- -api-addr=api.linkerd.svc.cluster.local:8085
- -uuid=deaab91a-f4ab-448a-b7d1-c832a2fa0a60
- -controller-namespace=linkerd
- -log-level=info
image: gcr.io/linkerd-io/web:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /ping
port: 9994
initialDelaySeconds: 10
name: web
ports:
- containerPort: 8084
name: http
- containerPort: 9994
name: admin-http
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9994
resources:
requests:
cpu: 20m
memory: 50Mi
- env:
- name: LINKERD2_PROXY_LOG
value: warn,linkerd2_proxy=info
- name: LINKERD2_PROXY_BIND_TIMEOUT
value: 10s
- name: LINKERD2_PROXY_CONTROL_URL
value: tcp://proxy-api.linkerd.svc.cluster.local:8086
- name: LINKERD2_PROXY_CONTROL_LISTENER
value: tcp://0.0.0.0:4190
- name: LINKERD2_PROXY_METRICS_LISTENER
value: tcp://0.0.0.0:4191
- name: LINKERD2_PROXY_OUTBOUND_LISTENER
value: tcp://127.0.0.1:4140
- name: LINKERD2_PROXY_INBOUND_LISTENER
value: tcp://0.0.0.0:4143
- name: LINKERD2_PROXY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: gcr.io/linkerd-io/proxy:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
name: linkerd-proxy
ports:
- containerPort: 4143
name: linkerd-proxy
- containerPort: 4191
name: linkerd-metrics
readinessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
resources:
requests:
cpu: 10m
memory: 20Mi
securityContext:
runAsUser: 2102
terminationMessagePolicy: FallbackToLogsOnError
initContainers:
- args:
- --incoming-proxy-port
- "4143"
- --outgoing-proxy-port
- "4140"
- --proxy-uid
- "2102"
- --inbound-ports-to-ignore
- 4190,4191
image: gcr.io/linkerd-io/proxy-init:undefined
imagePullPolicy: IfNotPresent
name: linkerd-init
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: false
terminationMessagePolicy: FallbackToLogsOnError
status: {}
---
kind: Service
apiVersion: v1
metadata:
name: prometheus
namespace: linkerd
labels:
linkerd.io/control-plane-component: prometheus
annotations:
linkerd.io/created-by: linkerd/cli undefined
spec:
type: ClusterIP
selector:
linkerd.io/control-plane-component: prometheus
ports:
- name: admin-http
port: 9090
targetPort: 9090
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
linkerd.io/created-by: linkerd/cli undefined
creationTimestamp: null
labels:
linkerd.io/control-plane-component: prometheus
name: prometheus
namespace: linkerd
spec:
replicas: 1
strategy: {}
template:
metadata:
annotations:
linkerd.io/created-by: linkerd/cli undefined
linkerd.io/proxy-version: undefined
creationTimestamp: null
labels:
linkerd.io/control-plane-component: prometheus
linkerd.io/control-plane-ns: linkerd
linkerd.io/proxy-deployment: prometheus
spec:
containers:
- args:
- --storage.tsdb.retention=6h
- --config.file=/etc/prometheus/prometheus.yml
image: prom/prometheus:v2.4.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /-/healthy
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
name: prometheus
ports:
- containerPort: 9090
name: admin-http
readinessProbe:
httpGet:
path: /-/ready
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
resources:
requests:
cpu: 300m
memory: 300Mi
volumeMounts:
- mountPath: /etc/prometheus
name: prometheus-config
readOnly: true
- env:
- name: LINKERD2_PROXY_LOG
value: warn,linkerd2_proxy=info
- name: LINKERD2_PROXY_BIND_TIMEOUT
value: 10s
- name: LINKERD2_PROXY_CONTROL_URL
value: tcp://proxy-api.linkerd.svc.cluster.local:8086
- name: LINKERD2_PROXY_CONTROL_LISTENER
value: tcp://0.0.0.0:4190
- name: LINKERD2_PROXY_METRICS_LISTENER
value: tcp://0.0.0.0:4191
- name: LINKERD2_PROXY_OUTBOUND_LISTENER
value: tcp://127.0.0.1:4140
- name: LINKERD2_PROXY_INBOUND_LISTENER
value: tcp://0.0.0.0:4143
- name: LINKERD2_PROXY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY
value: "10000"
image: gcr.io/linkerd-io/proxy:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
name: linkerd-proxy
ports:
- containerPort: 4143
name: linkerd-proxy
- containerPort: 4191
name: linkerd-metrics
readinessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
resources:
requests:
cpu: 10m
memory: 20Mi
securityContext:
runAsUser: 2102
terminationMessagePolicy: FallbackToLogsOnError
initContainers:
- args:
- --incoming-proxy-port
- "4143"
- --outgoing-proxy-port
- "4140"
- --proxy-uid
- "2102"
- --inbound-ports-to-ignore
- 4190,4191
image: gcr.io/linkerd-io/proxy-init:undefined
imagePullPolicy: IfNotPresent
name: linkerd-init
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: false
terminationMessagePolicy: FallbackToLogsOnError
serviceAccount: linkerd-prometheus
volumes:
- configMap:
name: prometheus-config
name: prometheus-config
status: {}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: prometheus-config
namespace: linkerd
labels:
linkerd.io/control-plane-component: prometheus
annotations:
linkerd.io/created-by: linkerd/cli undefined
data:
prometheus.yml: |-
global:
scrape_interval: 10s
scrape_timeout: 10s
evaluation_interval: 10s
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
- job_name: 'grafana'
kubernetes_sd_configs:
- role: pod
namespaces:
names: ['linkerd']
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_container_name
action: keep
regex: ^grafana$
- job_name: 'linkerd-controller'
kubernetes_sd_configs:
- role: pod
namespaces:
names: ['linkerd']
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_label_linkerd_io_control_plane_component
- __meta_kubernetes_pod_container_port_name
action: keep
regex: (.*);admin-http$
- source_labels: [__meta_kubernetes_pod_container_name]
action: replace
target_label: component
- job_name: 'linkerd-proxy'
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_container_name
- __meta_kubernetes_pod_container_port_name
- __meta_kubernetes_pod_label_linkerd_io_control_plane_ns
action: keep
regex: ^linkerd-proxy;linkerd-metrics;linkerd$
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: pod
# special case k8s' "job" label, to not interfere with prometheus' "job"
# label
# __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo =>
# k8s_job=foo
- source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job]
action: replace
target_label: k8s_job
# __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo =>
# deployment=foo
- action: labelmap
regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+)
# drop all labels that we just made copies of in the previous labelmap
- action: labeldrop
regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+)
# __meta_kubernetes_pod_label_linkerd_io_foo=bar =>
# foo=bar
- action: labelmap
regex: __meta_kubernetes_pod_label_linkerd_io_(.+)
### Grafana ###
---
kind: Service
apiVersion: v1
metadata:
name: grafana
namespace: linkerd
labels:
linkerd.io/control-plane-component: grafana
annotations:
linkerd.io/created-by: linkerd/cli undefined
spec:
type: ClusterIP
selector:
linkerd.io/control-plane-component: grafana
ports:
- name: http
port: 3000
targetPort: 3000
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
linkerd.io/created-by: linkerd/cli undefined
creationTimestamp: null
labels:
linkerd.io/control-plane-component: grafana
name: grafana
namespace: linkerd
spec:
replicas: 1
strategy: {}
template:
metadata:
annotations:
linkerd.io/created-by: linkerd/cli undefined
linkerd.io/proxy-version: undefined
creationTimestamp: null
labels:
linkerd.io/control-plane-component: grafana
linkerd.io/control-plane-ns: linkerd
linkerd.io/proxy-deployment: grafana
spec:
containers:
- image: gcr.io/linkerd-io/grafana:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /api/health
port: 3000
name: grafana
ports:
- containerPort: 3000
name: http
readinessProbe:
failureThreshold: 10
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 30
resources:
requests:
cpu: 20m
memory: 50Mi
volumeMounts:
- mountPath: /etc/grafana
name: grafana-config
readOnly: true
- env:
- name: LINKERD2_PROXY_LOG
value: warn,linkerd2_proxy=info
- name: LINKERD2_PROXY_BIND_TIMEOUT
value: 10s
- name: LINKERD2_PROXY_CONTROL_URL
value: tcp://proxy-api.linkerd.svc.cluster.local:8086
- name: LINKERD2_PROXY_CONTROL_LISTENER
value: tcp://0.0.0.0:4190
- name: LINKERD2_PROXY_METRICS_LISTENER
value: tcp://0.0.0.0:4191
- name: LINKERD2_PROXY_OUTBOUND_LISTENER
value: tcp://127.0.0.1:4140
- name: LINKERD2_PROXY_INBOUND_LISTENER
value: tcp://0.0.0.0:4143
- name: LINKERD2_PROXY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: gcr.io/linkerd-io/proxy:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
name: linkerd-proxy
ports:
- containerPort: 4143
name: linkerd-proxy
- containerPort: 4191
name: linkerd-metrics
readinessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
resources:
requests:
cpu: 10m
memory: 20Mi
securityContext:
runAsUser: 2102
terminationMessagePolicy: FallbackToLogsOnError
initContainers:
- args:
- --incoming-proxy-port
- "4143"
- --outgoing-proxy-port
- "4140"
- --proxy-uid
- "2102"
- --inbound-ports-to-ignore
- 4190,4191
image: gcr.io/linkerd-io/proxy-init:undefined
imagePullPolicy: IfNotPresent
name: linkerd-init
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: false
terminationMessagePolicy: FallbackToLogsOnError
volumes:
- configMap:
items:
- key: grafana.ini
path: grafana.ini
- key: datasources.yaml
path: provisioning/datasources/datasources.yaml
- key: dashboards.yaml
path: provisioning/dashboards/dashboards.yaml
name: grafana-config
name: grafana-config
status: {}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: grafana-config
namespace: linkerd
labels:
linkerd.io/control-plane-component: grafana
annotations:
linkerd.io/created-by: linkerd/cli undefined
data:
grafana.ini: |-
instance_name = linkerd-grafana
[server]
root_url = %(protocol)s://%(domain)s:/api/v1/namespaces/linkerd/services/grafana:http/proxy/
[auth]
disable_login_form = true
[auth.anonymous]
enabled = true
org_role = Editor
[auth.basic]
enabled = false
[analytics]
check_for_updates = false
datasources.yaml: |-
apiVersion: 1
datasources:
- name: prometheus
type: prometheus
access: proxy
orgId: 1
url: http://prometheus.linkerd.svc.cluster.local:9090
isDefault: true
jsonData:
timeInterval: "5s"
version: 1
editable: true
dashboards.yaml: |-
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: true
editable: true
options:
path: /var/lib/grafana/dashboards
homeDashboardId: linkerd-top-line
---

View File

@ -0,0 +1,887 @@
### Namespace ###
kind: Namespace
apiVersion: v1
metadata:
name: linkerd
### Service Account Controller ###
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: linkerd-controller
namespace: linkerd
### Controller RBAC ###
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: linkerd-linkerd-controller
rules:
- apiGroups: ["extensions", "apps"]
resources: ["deployments", "replicasets"]
verbs: ["list", "get", "watch"]
- apiGroups: [""]
resources: ["pods", "endpoints", "services", "namespaces", "replicationcontrollers"]
verbs: ["list", "get", "watch"]
- apiGroups: ["linkerd.io"]
resources: ["serviceprofiles"]
verbs: ["list", "get", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: linkerd-linkerd-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: linkerd-linkerd-controller
subjects:
- kind: ServiceAccount
name: linkerd-controller
namespace: linkerd
### Service Account Prometheus ###
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: linkerd-prometheus
namespace: linkerd
### Prometheus RBAC ###
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: linkerd-linkerd-prometheus
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: linkerd-linkerd-prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: linkerd-linkerd-prometheus
subjects:
- kind: ServiceAccount
name: linkerd-prometheus
namespace: linkerd
### Controller ###
---
kind: Service
apiVersion: v1
metadata:
name: api
namespace: linkerd
labels:
linkerd.io/control-plane-component: controller
annotations:
linkerd.io/created-by: linkerd/cli undefined
spec:
type: ClusterIP
selector:
linkerd.io/control-plane-component: controller
ports:
- name: http
port: 8085
targetPort: 8085
---
kind: Service
apiVersion: v1
metadata:
name: proxy-api
namespace: linkerd
labels:
linkerd.io/control-plane-component: controller
annotations:
linkerd.io/created-by: linkerd/cli undefined
spec:
type: ClusterIP
selector:
linkerd.io/control-plane-component: controller
ports:
- name: grpc
port: 8086
targetPort: 8086
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
linkerd.io/created-by: linkerd/cli undefined
creationTimestamp: null
labels:
linkerd.io/control-plane-component: controller
name: controller
namespace: linkerd
spec:
replicas: 2
strategy: {}
template:
metadata:
annotations:
linkerd.io/created-by: linkerd/cli undefined
linkerd.io/proxy-version: undefined
creationTimestamp: null
labels:
linkerd.io/control-plane-component: controller
linkerd.io/control-plane-ns: linkerd
linkerd.io/proxy-deployment: controller
spec:
containers:
- args:
- public-api
- -prometheus-url=http://prometheus.linkerd.svc.cluster.local:9090
- -controller-namespace=linkerd
- -single-namespace=false
- -log-level=info
image: gcr.io/linkerd-io/controller:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /ping
port: 9995
initialDelaySeconds: 10
name: public-api
ports:
- containerPort: 8085
name: http
- containerPort: 9995
name: admin-http
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9995
resources:
requests:
cpu: 20m
memory: 50Mi
- args:
- proxy-api
- -addr=:8086
- -controller-namespace=linkerd
- -single-namespace=false
- -enable-tls=false
- -log-level=info
image: gcr.io/linkerd-io/controller:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /ping
port: 9996
initialDelaySeconds: 10
name: proxy-api
ports:
- containerPort: 8086
name: grpc
- containerPort: 9996
name: admin-http
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9996
resources:
requests:
cpu: 20m
memory: 50Mi
- args:
- tap
- -controller-namespace=linkerd
- -single-namespace=false
- -log-level=info
image: gcr.io/linkerd-io/controller:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /ping
port: 9998
initialDelaySeconds: 10
name: tap
ports:
- containerPort: 8088
name: grpc
- containerPort: 9998
name: admin-http
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9998
resources:
requests:
cpu: 20m
memory: 50Mi
- env:
- name: LINKERD2_PROXY_LOG
value: warn,linkerd2_proxy=info
- name: LINKERD2_PROXY_BIND_TIMEOUT
value: 10s
- name: LINKERD2_PROXY_CONTROL_URL
value: tcp://localhost.:8086
- name: LINKERD2_PROXY_CONTROL_LISTENER
value: tcp://0.0.0.0:4190
- name: LINKERD2_PROXY_METRICS_LISTENER
value: tcp://0.0.0.0:4191
- name: LINKERD2_PROXY_OUTBOUND_LISTENER
value: tcp://127.0.0.1:4140
- name: LINKERD2_PROXY_INBOUND_LISTENER
value: tcp://0.0.0.0:4143
- name: LINKERD2_PROXY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: gcr.io/linkerd-io/proxy:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
name: linkerd-proxy
ports:
- containerPort: 4143
name: linkerd-proxy
- containerPort: 4191
name: linkerd-metrics
readinessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
resources:
requests:
cpu: 400m
memory: 300Mi
securityContext:
runAsUser: 2102
terminationMessagePolicy: FallbackToLogsOnError
initContainers:
- args:
- --incoming-proxy-port
- "4143"
- --outgoing-proxy-port
- "4140"
- --proxy-uid
- "2102"
- --inbound-ports-to-ignore
- 4190,4191
image: gcr.io/linkerd-io/proxy-init:undefined
imagePullPolicy: IfNotPresent
name: linkerd-init
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: false
terminationMessagePolicy: FallbackToLogsOnError
serviceAccount: linkerd-controller
status: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: serviceprofiles.linkerd.io
namespace: linkerd
annotations:
linkerd.io/created-by: linkerd/cli undefined
spec:
group: linkerd.io
version: v1alpha1
scope: Namespaced
names:
plural: serviceprofiles
singular: serviceprofile
kind: ServiceProfile
shortNames:
- sp
### Web ###
---
kind: Service
apiVersion: v1
metadata:
name: web
namespace: linkerd
labels:
linkerd.io/control-plane-component: web
annotations:
linkerd.io/created-by: linkerd/cli undefined
spec:
type: ClusterIP
selector:
linkerd.io/control-plane-component: web
ports:
- name: http
port: 8084
targetPort: 8084
- name: admin-http
port: 9994
targetPort: 9994
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
linkerd.io/created-by: linkerd/cli undefined
creationTimestamp: null
labels:
linkerd.io/control-plane-component: web
name: web
namespace: linkerd
spec:
replicas: 1
strategy: {}
template:
metadata:
annotations:
linkerd.io/created-by: linkerd/cli undefined
linkerd.io/proxy-version: undefined
creationTimestamp: null
labels:
linkerd.io/control-plane-component: web
linkerd.io/control-plane-ns: linkerd
linkerd.io/proxy-deployment: web
spec:
containers:
- args:
- -api-addr=api.linkerd.svc.cluster.local:8085
- -uuid=deaab91a-f4ab-448a-b7d1-c832a2fa0a60
- -controller-namespace=linkerd
- -log-level=info
image: gcr.io/linkerd-io/web:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /ping
port: 9994
initialDelaySeconds: 10
name: web
ports:
- containerPort: 8084
name: http
- containerPort: 9994
name: admin-http
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9994
resources:
requests:
cpu: 20m
memory: 50Mi
- env:
- name: LINKERD2_PROXY_LOG
value: warn,linkerd2_proxy=info
- name: LINKERD2_PROXY_BIND_TIMEOUT
value: 10s
- name: LINKERD2_PROXY_CONTROL_URL
value: tcp://proxy-api.linkerd.svc.cluster.local:8086
- name: LINKERD2_PROXY_CONTROL_LISTENER
value: tcp://0.0.0.0:4190
- name: LINKERD2_PROXY_METRICS_LISTENER
value: tcp://0.0.0.0:4191
- name: LINKERD2_PROXY_OUTBOUND_LISTENER
value: tcp://127.0.0.1:4140
- name: LINKERD2_PROXY_INBOUND_LISTENER
value: tcp://0.0.0.0:4143
- name: LINKERD2_PROXY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: gcr.io/linkerd-io/proxy:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
name: linkerd-proxy
ports:
- containerPort: 4143
name: linkerd-proxy
- containerPort: 4191
name: linkerd-metrics
readinessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
resources:
requests:
cpu: 400m
memory: 300Mi
securityContext:
runAsUser: 2102
terminationMessagePolicy: FallbackToLogsOnError
initContainers:
- args:
- --incoming-proxy-port
- "4143"
- --outgoing-proxy-port
- "4140"
- --proxy-uid
- "2102"
- --inbound-ports-to-ignore
- 4190,4191
image: gcr.io/linkerd-io/proxy-init:undefined
imagePullPolicy: IfNotPresent
name: linkerd-init
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: false
terminationMessagePolicy: FallbackToLogsOnError
status: {}
---
kind: Service
apiVersion: v1
metadata:
name: prometheus
namespace: linkerd
labels:
linkerd.io/control-plane-component: prometheus
annotations:
linkerd.io/created-by: linkerd/cli undefined
spec:
type: ClusterIP
selector:
linkerd.io/control-plane-component: prometheus
ports:
- name: admin-http
port: 9090
targetPort: 9090
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
linkerd.io/created-by: linkerd/cli undefined
creationTimestamp: null
labels:
linkerd.io/control-plane-component: prometheus
name: prometheus
namespace: linkerd
spec:
replicas: 1
strategy: {}
template:
metadata:
annotations:
linkerd.io/created-by: linkerd/cli undefined
linkerd.io/proxy-version: undefined
creationTimestamp: null
labels:
linkerd.io/control-plane-component: prometheus
linkerd.io/control-plane-ns: linkerd
linkerd.io/proxy-deployment: prometheus
spec:
containers:
- args:
- --storage.tsdb.retention=6h
- --config.file=/etc/prometheus/prometheus.yml
image: prom/prometheus:v2.4.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /-/healthy
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
name: prometheus
ports:
- containerPort: 9090
name: admin-http
readinessProbe:
httpGet:
path: /-/ready
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
resources:
requests:
cpu: 300m
memory: 300Mi
volumeMounts:
- mountPath: /etc/prometheus
name: prometheus-config
readOnly: true
- env:
- name: LINKERD2_PROXY_LOG
value: warn,linkerd2_proxy=info
- name: LINKERD2_PROXY_BIND_TIMEOUT
value: 10s
- name: LINKERD2_PROXY_CONTROL_URL
value: tcp://proxy-api.linkerd.svc.cluster.local:8086
- name: LINKERD2_PROXY_CONTROL_LISTENER
value: tcp://0.0.0.0:4190
- name: LINKERD2_PROXY_METRICS_LISTENER
value: tcp://0.0.0.0:4191
- name: LINKERD2_PROXY_OUTBOUND_LISTENER
value: tcp://127.0.0.1:4140
- name: LINKERD2_PROXY_INBOUND_LISTENER
value: tcp://0.0.0.0:4143
- name: LINKERD2_PROXY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY
value: "10000"
image: gcr.io/linkerd-io/proxy:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
name: linkerd-proxy
ports:
- containerPort: 4143
name: linkerd-proxy
- containerPort: 4191
name: linkerd-metrics
readinessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
resources:
requests:
cpu: 400m
memory: 300Mi
securityContext:
runAsUser: 2102
terminationMessagePolicy: FallbackToLogsOnError
initContainers:
- args:
- --incoming-proxy-port
- "4143"
- --outgoing-proxy-port
- "4140"
- --proxy-uid
- "2102"
- --inbound-ports-to-ignore
- 4190,4191
image: gcr.io/linkerd-io/proxy-init:undefined
imagePullPolicy: IfNotPresent
name: linkerd-init
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: false
terminationMessagePolicy: FallbackToLogsOnError
serviceAccount: linkerd-prometheus
volumes:
- configMap:
name: prometheus-config
name: prometheus-config
status: {}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: prometheus-config
namespace: linkerd
labels:
linkerd.io/control-plane-component: prometheus
annotations:
linkerd.io/created-by: linkerd/cli undefined
data:
prometheus.yml: |-
global:
scrape_interval: 10s
scrape_timeout: 10s
evaluation_interval: 10s
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
- job_name: 'grafana'
kubernetes_sd_configs:
- role: pod
namespaces:
names: ['linkerd']
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_container_name
action: keep
regex: ^grafana$
- job_name: 'linkerd-controller'
kubernetes_sd_configs:
- role: pod
namespaces:
names: ['linkerd']
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_label_linkerd_io_control_plane_component
- __meta_kubernetes_pod_container_port_name
action: keep
regex: (.*);admin-http$
- source_labels: [__meta_kubernetes_pod_container_name]
action: replace
target_label: component
- job_name: 'linkerd-proxy'
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_container_name
- __meta_kubernetes_pod_container_port_name
- __meta_kubernetes_pod_label_linkerd_io_control_plane_ns
action: keep
regex: ^linkerd-proxy;linkerd-metrics;linkerd$
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: pod
# special case k8s' "job" label, to not interfere with prometheus' "job"
# label
# __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo =>
# k8s_job=foo
- source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job]
action: replace
target_label: k8s_job
# __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo =>
# deployment=foo
- action: labelmap
regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+)
# drop all labels that we just made copies of in the previous labelmap
- action: labeldrop
regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+)
# __meta_kubernetes_pod_label_linkerd_io_foo=bar =>
# foo=bar
- action: labelmap
regex: __meta_kubernetes_pod_label_linkerd_io_(.+)
### Grafana ###
---
kind: Service
apiVersion: v1
metadata:
name: grafana
namespace: linkerd
labels:
linkerd.io/control-plane-component: grafana
annotations:
linkerd.io/created-by: linkerd/cli undefined
spec:
type: ClusterIP
selector:
linkerd.io/control-plane-component: grafana
ports:
- name: http
port: 3000
targetPort: 3000
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
linkerd.io/created-by: linkerd/cli undefined
creationTimestamp: null
labels:
linkerd.io/control-plane-component: grafana
name: grafana
namespace: linkerd
spec:
replicas: 1
strategy: {}
template:
metadata:
annotations:
linkerd.io/created-by: linkerd/cli undefined
linkerd.io/proxy-version: undefined
creationTimestamp: null
labels:
linkerd.io/control-plane-component: grafana
linkerd.io/control-plane-ns: linkerd
linkerd.io/proxy-deployment: grafana
spec:
containers:
- image: gcr.io/linkerd-io/grafana:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /api/health
port: 3000
name: grafana
ports:
- containerPort: 3000
name: http
readinessProbe:
failureThreshold: 10
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 30
resources:
requests:
cpu: 20m
memory: 50Mi
volumeMounts:
- mountPath: /etc/grafana
name: grafana-config
readOnly: true
- env:
- name: LINKERD2_PROXY_LOG
value: warn,linkerd2_proxy=info
- name: LINKERD2_PROXY_BIND_TIMEOUT
value: 10s
- name: LINKERD2_PROXY_CONTROL_URL
value: tcp://proxy-api.linkerd.svc.cluster.local:8086
- name: LINKERD2_PROXY_CONTROL_LISTENER
value: tcp://0.0.0.0:4190
- name: LINKERD2_PROXY_METRICS_LISTENER
value: tcp://0.0.0.0:4191
- name: LINKERD2_PROXY_OUTBOUND_LISTENER
value: tcp://127.0.0.1:4140
- name: LINKERD2_PROXY_INBOUND_LISTENER
value: tcp://0.0.0.0:4143
- name: LINKERD2_PROXY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: gcr.io/linkerd-io/proxy:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
name: linkerd-proxy
ports:
- containerPort: 4143
name: linkerd-proxy
- containerPort: 4191
name: linkerd-metrics
readinessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
resources:
requests:
cpu: 400m
memory: 300Mi
securityContext:
runAsUser: 2102
terminationMessagePolicy: FallbackToLogsOnError
initContainers:
- args:
- --incoming-proxy-port
- "4143"
- --outgoing-proxy-port
- "4140"
- --proxy-uid
- "2102"
- --inbound-ports-to-ignore
- 4190,4191
image: gcr.io/linkerd-io/proxy-init:undefined
imagePullPolicy: IfNotPresent
name: linkerd-init
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: false
terminationMessagePolicy: FallbackToLogsOnError
volumes:
- configMap:
items:
- key: grafana.ini
path: grafana.ini
- key: datasources.yaml
path: provisioning/datasources/datasources.yaml
- key: dashboards.yaml
path: provisioning/dashboards/dashboards.yaml
name: grafana-config
name: grafana-config
status: {}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: grafana-config
namespace: linkerd
labels:
linkerd.io/control-plane-component: grafana
annotations:
linkerd.io/created-by: linkerd/cli undefined
data:
grafana.ini: |-
instance_name = linkerd-grafana
[server]
root_url = %(protocol)s://%(domain)s:/api/v1/namespaces/linkerd/services/grafana:http/proxy/
[auth]
disable_login_form = true
[auth.anonymous]
enabled = true
org_role = Editor
[auth.basic]
enabled = false
[analytics]
check_for_updates = false
datasources.yaml: |-
apiVersion: 1
datasources:
- name: prometheus
type: prometheus
access: proxy
orgId: 1
url: http://prometheus.linkerd.svc.cluster.local:9090
isDefault: true
jsonData:
timeInterval: "5s"
version: 1
editable: true
dashboards.yaml: |-
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: true
editable: true
options:
path: /var/lib/grafana/dashboards
homeDashboardId: linkerd-top-line
---

View File

@ -336,7 +336,7 @@ metadata:
name: web
namespace: Namespace
spec:
replicas: 2
replicas: 1
strategy: {}
template:
metadata:
@ -468,7 +468,7 @@ metadata:
name: prometheus
namespace: Namespace
spec:
replicas: 3
replicas: 1
strategy: {}
template:
metadata:

View File

@ -338,7 +338,7 @@ metadata:
name: web
namespace: Namespace
spec:
replicas: 2
replicas: 1
strategy: {}
template:
metadata:
@ -470,7 +470,7 @@ metadata:
name: prometheus
namespace: Namespace
spec:
replicas: 3
replicas: 1
strategy: {}
template:
metadata:

View File

@ -179,6 +179,12 @@ spec:
path: /ready
port: 9995
failureThreshold: 7
{{- if .EnableHA }}
resources:
requests:
cpu: 20m
memory: 50Mi
{{- end }}
- name: proxy-api
ports:
- name: grpc
@ -204,6 +210,12 @@ spec:
path: /ready
port: 9996
failureThreshold: 7
{{- if .EnableHA }}
resources:
requests:
cpu: 20m
memory: 50Mi
{{- end }}
- name: tap
ports:
- name: grpc
@ -227,6 +239,12 @@ spec:
path: /ready
port: 9998
failureThreshold: 7
{{- if .EnableHA }}
resources:
requests:
cpu: 20m
memory: 50Mi
{{- end }}
### Service Profile CRD ###
---
@ -282,7 +300,7 @@ metadata:
annotations:
{{.CreatedByAnnotation}}: {{.CliVersion}}
spec:
replicas: {{.WebReplicas}}
replicas: 1
template:
metadata:
labels:
@ -314,6 +332,12 @@ spec:
path: /ready
port: 9994
failureThreshold: 7
{{- if .EnableHA }}
resources:
requests:
cpu: 20m
memory: 50Mi
{{- end }}
### Prometheus ###
---
@ -346,7 +370,7 @@ metadata:
annotations:
{{.CreatedByAnnotation}}: {{.CliVersion}}
spec:
replicas: {{.PrometheusReplicas}}
replicas: 1
template:
metadata:
labels:
@ -385,6 +409,12 @@ spec:
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
{{- if .EnableHA }}
resources:
requests:
cpu: 300m
memory: 300Mi
{{- end }}
---
kind: ConfigMap
@ -546,7 +576,12 @@ spec:
timeoutSeconds: 30
failureThreshold: 10
periodSeconds: 10
{{- if .EnableHA }}
resources:
requests:
cpu: 20m
memory: 50Mi
{{- end }}
---
kind: ConfigMap
apiVersion: v1
@ -675,7 +710,7 @@ metadata:
annotations:
{{.CreatedByAnnotation}}: {{.CliVersion}}
spec:
replicas: {{.ControllerReplicas}}
replicas: 1
template:
metadata:
labels:
@ -709,6 +744,12 @@ spec:
path: /ready
port: 9997
failureThreshold: 7
{{- if .EnableHA }}
resources:
requests:
cpu: 20m
memory: 50Mi
{{- end }}
`
const ProxyInjectorTemplate = `
@ -724,7 +765,7 @@ metadata:
annotations:
{{.CreatedByAnnotation}}: {{.CliVersion}}
spec:
replicas: {{.ControllerReplicas}}
replicas: 1
selector:
matchLabels:
{{.ControllerComponentLabel}}: proxy-injector
@ -774,6 +815,12 @@ spec:
- name: proxy-spec
configMap:
name: {{.ProxyInjectorSidecarConfig}}
{{- if .EnableHA }}
resources:
requests:
cpu: 20m
memory: 50Mi
{{- end }}
---
### Proxy Injector Service Account ###