linkerd2/cli/cmd/testdata/install_output.golden

1342 lines
34 KiB
Plaintext

### Namespace ###
kind: Namespace
apiVersion: v1
metadata:
name: Namespace
labels:
ProxyAutoInjectLabel: disabled
### Service Account Controller ###
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: linkerd-controller
namespace: Namespace
### Controller RBAC ###
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: linkerd-Namespace-controller
rules:
- apiGroups: ["extensions", "apps"]
resources: ["deployments", "replicasets"]
verbs: ["list", "get", "watch"]
- apiGroups: [""]
resources: ["pods", "endpoints", "services", "namespaces", "replicationcontrollers"]
verbs: ["list", "get", "watch"]
- apiGroups: ["linkerd.io"]
resources: ["serviceprofiles"]
verbs: ["list", "get", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: linkerd-Namespace-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: linkerd-Namespace-controller
subjects:
- kind: ServiceAccount
name: linkerd-controller
namespace: Namespace
### Service Account Prometheus ###
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: linkerd-prometheus
namespace: Namespace
### Prometheus RBAC ###
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: linkerd-Namespace-prometheus
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: linkerd-Namespace-prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: linkerd-Namespace-prometheus
subjects:
- kind: ServiceAccount
name: linkerd-prometheus
namespace: Namespace
### Controller ###
---
kind: Service
apiVersion: v1
metadata:
name: api
namespace: Namespace
labels:
ControllerComponentLabel: controller
annotations:
CreatedByAnnotation: CliVersion
spec:
type: ClusterIP
selector:
ControllerComponentLabel: controller
ports:
- name: http
port: 8085
targetPort: 8085
---
kind: Service
apiVersion: v1
metadata:
name: proxy-api
namespace: Namespace
labels:
ControllerComponentLabel: controller
annotations:
CreatedByAnnotation: CliVersion
spec:
type: ClusterIP
selector:
ControllerComponentLabel: controller
ports:
- name: grpc
port: 123
targetPort: 123
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
CreatedByAnnotation: CliVersion
creationTimestamp: null
labels:
ControllerComponentLabel: controller
name: controller
namespace: Namespace
spec:
replicas: 1
strategy: {}
template:
metadata:
annotations:
CreatedByAnnotation: CliVersion
linkerd.io/created-by: linkerd/cli undefined
linkerd.io/proxy-version: undefined
creationTimestamp: null
labels:
ControllerComponentLabel: controller
linkerd.io/control-plane-ns: Namespace
linkerd.io/proxy-deployment: controller
spec:
containers:
- args:
- public-api
- -prometheus-url=http://prometheus.Namespace.svc.cluster.local:9090
- -controller-namespace=Namespace
- -single-namespace=false
- -log-level=ControllerLogLevel
image: ControllerImage
imagePullPolicy: ImagePullPolicy
livenessProbe:
httpGet:
path: /ping
port: 9995
initialDelaySeconds: 10
name: public-api
ports:
- containerPort: 8085
name: http
- containerPort: 9995
name: admin-http
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9995
resources: {}
- args:
- destination
- -controller-namespace=Namespace
- -single-namespace=false
- -enable-tls=true
- -log-level=ControllerLogLevel
image: ControllerImage
imagePullPolicy: ImagePullPolicy
livenessProbe:
httpGet:
path: /ping
port: 9999
initialDelaySeconds: 10
name: destination
ports:
- containerPort: 8089
name: grpc
- containerPort: 9999
name: admin-http
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9999
resources: {}
- args:
- proxy-api
- -addr=:123
- -log-level=ControllerLogLevel
image: ControllerImage
imagePullPolicy: ImagePullPolicy
livenessProbe:
httpGet:
path: /ping
port: 9996
initialDelaySeconds: 10
name: proxy-api
ports:
- containerPort: 123
name: grpc
- containerPort: 9996
name: admin-http
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9996
resources: {}
- args:
- tap
- -controller-namespace=Namespace
- -single-namespace=false
- -log-level=ControllerLogLevel
image: ControllerImage
imagePullPolicy: ImagePullPolicy
livenessProbe:
httpGet:
path: /ping
port: 9998
initialDelaySeconds: 10
name: tap
ports:
- containerPort: 8088
name: grpc
- containerPort: 9998
name: admin-http
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9998
resources: {}
- env:
- name: LINKERD2_PROXY_LOG
value: warn,linkerd2_proxy=info
- name: LINKERD2_PROXY_BIND_TIMEOUT
value: 10s
- name: LINKERD2_PROXY_CONTROL_URL
value: tcp://localhost.:8086
- name: LINKERD2_PROXY_CONTROL_LISTENER
value: tcp://0.0.0.0:4190
- name: LINKERD2_PROXY_METRICS_LISTENER
value: tcp://0.0.0.0:4191
- name: LINKERD2_PROXY_OUTBOUND_LISTENER
value: tcp://127.0.0.1:4140
- name: LINKERD2_PROXY_INBOUND_LISTENER
value: tcp://0.0.0.0:4143
- name: LINKERD2_PROXY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: gcr.io/linkerd-io/proxy:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
name: linkerd-proxy
ports:
- containerPort: 4143
name: linkerd-proxy
- containerPort: 4191
name: linkerd-metrics
readinessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
resources: {}
securityContext:
runAsUser: 2102
terminationMessagePolicy: FallbackToLogsOnError
initContainers:
- args:
- --incoming-proxy-port
- "4143"
- --outgoing-proxy-port
- "4140"
- --proxy-uid
- "2102"
- --inbound-ports-to-ignore
- 4190,4191
image: gcr.io/linkerd-io/proxy-init:undefined
imagePullPolicy: IfNotPresent
name: linkerd-init
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: false
terminationMessagePolicy: FallbackToLogsOnError
serviceAccount: linkerd-controller
status: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: serviceprofiles.linkerd.io
namespace: Namespace
annotations:
CreatedByAnnotation: CliVersion
spec:
group: linkerd.io
version: v1alpha1
scope: Namespaced
names:
plural: serviceprofiles
singular: serviceprofile
kind: ServiceProfile
shortNames:
- sp
### Web ###
---
kind: Service
apiVersion: v1
metadata:
name: web
namespace: Namespace
labels:
ControllerComponentLabel: web
annotations:
CreatedByAnnotation: CliVersion
spec:
type: ClusterIP
selector:
ControllerComponentLabel: web
ports:
- name: http
port: 8084
targetPort: 8084
- name: admin-http
port: 9994
targetPort: 9994
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
CreatedByAnnotation: CliVersion
creationTimestamp: null
labels:
ControllerComponentLabel: web
name: web
namespace: Namespace
spec:
replicas: 2
strategy: {}
template:
metadata:
annotations:
CreatedByAnnotation: CliVersion
linkerd.io/created-by: linkerd/cli undefined
linkerd.io/proxy-version: undefined
creationTimestamp: null
labels:
ControllerComponentLabel: web
linkerd.io/control-plane-ns: Namespace
linkerd.io/proxy-deployment: web
spec:
containers:
- args:
- -api-addr=api.Namespace.svc.cluster.local:8085
- -uuid=UUID
- -controller-namespace=Namespace
- -log-level=ControllerLogLevel
image: WebImage
imagePullPolicy: ImagePullPolicy
livenessProbe:
httpGet:
path: /ping
port: 9994
initialDelaySeconds: 10
name: web
ports:
- containerPort: 8084
name: http
- containerPort: 9994
name: admin-http
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9994
resources: {}
- env:
- name: LINKERD2_PROXY_LOG
value: warn,linkerd2_proxy=info
- name: LINKERD2_PROXY_BIND_TIMEOUT
value: 10s
- name: LINKERD2_PROXY_CONTROL_URL
value: tcp://proxy-api.Namespace.svc.cluster.local:8086
- name: LINKERD2_PROXY_CONTROL_LISTENER
value: tcp://0.0.0.0:4190
- name: LINKERD2_PROXY_METRICS_LISTENER
value: tcp://0.0.0.0:4191
- name: LINKERD2_PROXY_OUTBOUND_LISTENER
value: tcp://127.0.0.1:4140
- name: LINKERD2_PROXY_INBOUND_LISTENER
value: tcp://0.0.0.0:4143
- name: LINKERD2_PROXY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: gcr.io/linkerd-io/proxy:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
name: linkerd-proxy
ports:
- containerPort: 4143
name: linkerd-proxy
- containerPort: 4191
name: linkerd-metrics
readinessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
resources: {}
securityContext:
runAsUser: 2102
terminationMessagePolicy: FallbackToLogsOnError
initContainers:
- args:
- --incoming-proxy-port
- "4143"
- --outgoing-proxy-port
- "4140"
- --proxy-uid
- "2102"
- --inbound-ports-to-ignore
- 4190,4191
image: gcr.io/linkerd-io/proxy-init:undefined
imagePullPolicy: IfNotPresent
name: linkerd-init
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: false
terminationMessagePolicy: FallbackToLogsOnError
status: {}
---
kind: Service
apiVersion: v1
metadata:
name: prometheus
namespace: Namespace
labels:
ControllerComponentLabel: prometheus
annotations:
CreatedByAnnotation: CliVersion
spec:
type: ClusterIP
selector:
ControllerComponentLabel: prometheus
ports:
- name: admin-http
port: 9090
targetPort: 9090
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
CreatedByAnnotation: CliVersion
creationTimestamp: null
labels:
ControllerComponentLabel: prometheus
name: prometheus
namespace: Namespace
spec:
replicas: 3
strategy: {}
template:
metadata:
annotations:
CreatedByAnnotation: CliVersion
linkerd.io/created-by: linkerd/cli undefined
linkerd.io/proxy-version: undefined
creationTimestamp: null
labels:
ControllerComponentLabel: prometheus
linkerd.io/control-plane-ns: Namespace
linkerd.io/proxy-deployment: prometheus
spec:
containers:
- args:
- --storage.tsdb.retention=6h
- --config.file=/etc/prometheus/prometheus.yml
image: PrometheusImage
imagePullPolicy: ImagePullPolicy
livenessProbe:
httpGet:
path: /-/healthy
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
name: prometheus
ports:
- containerPort: 9090
name: admin-http
readinessProbe:
httpGet:
path: /-/ready
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
resources: {}
volumeMounts:
- mountPath: /etc/prometheus
name: prometheus-config
readOnly: true
- env:
- name: LINKERD2_PROXY_LOG
value: warn,linkerd2_proxy=info
- name: LINKERD2_PROXY_BIND_TIMEOUT
value: 10s
- name: LINKERD2_PROXY_CONTROL_URL
value: tcp://proxy-api.Namespace.svc.cluster.local:8086
- name: LINKERD2_PROXY_CONTROL_LISTENER
value: tcp://0.0.0.0:4190
- name: LINKERD2_PROXY_METRICS_LISTENER
value: tcp://0.0.0.0:4191
- name: LINKERD2_PROXY_OUTBOUND_LISTENER
value: tcp://127.0.0.1:4140
- name: LINKERD2_PROXY_INBOUND_LISTENER
value: tcp://0.0.0.0:4143
- name: LINKERD2_PROXY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY
value: "10000"
image: gcr.io/linkerd-io/proxy:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
name: linkerd-proxy
ports:
- containerPort: 4143
name: linkerd-proxy
- containerPort: 4191
name: linkerd-metrics
readinessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
resources: {}
securityContext:
runAsUser: 2102
terminationMessagePolicy: FallbackToLogsOnError
initContainers:
- args:
- --incoming-proxy-port
- "4143"
- --outgoing-proxy-port
- "4140"
- --proxy-uid
- "2102"
- --inbound-ports-to-ignore
- 4190,4191
image: gcr.io/linkerd-io/proxy-init:undefined
imagePullPolicy: IfNotPresent
name: linkerd-init
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: false
terminationMessagePolicy: FallbackToLogsOnError
serviceAccount: linkerd-prometheus
volumes:
- configMap:
name: prometheus-config
name: prometheus-config
status: {}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: prometheus-config
namespace: Namespace
labels:
ControllerComponentLabel: prometheus
annotations:
CreatedByAnnotation: CliVersion
data:
prometheus.yml: |-
global:
scrape_interval: 10s
scrape_timeout: 10s
evaluation_interval: 10s
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
- job_name: 'grafana'
kubernetes_sd_configs:
- role: pod
namespaces:
names: ['Namespace']
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_container_name
action: keep
regex: ^grafana$
- job_name: 'linkerd-controller'
kubernetes_sd_configs:
- role: pod
namespaces:
names: ['Namespace']
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_label_linkerd_io_control_plane_component
- __meta_kubernetes_pod_container_port_name
action: keep
regex: (.*);admin-http$
- source_labels: [__meta_kubernetes_pod_container_name]
action: replace
target_label: component
- job_name: 'linkerd-proxy'
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_container_name
- __meta_kubernetes_pod_container_port_name
- __meta_kubernetes_pod_label_linkerd_io_control_plane_ns
action: keep
regex: ^ProxyContainerName;linkerd-metrics;Namespace$
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: pod
# special case k8s' "job" label, to not interfere with prometheus' "job"
# label
# __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo =>
# k8s_job=foo
- source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job]
action: replace
target_label: k8s_job
# __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo =>
# deployment=foo
- action: labelmap
regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+)
# drop all labels that we just made copies of in the previous labelmap
- action: labeldrop
regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+)
# __meta_kubernetes_pod_label_linkerd_io_foo=bar =>
# foo=bar
- action: labelmap
regex: __meta_kubernetes_pod_label_linkerd_io_(.+)
### Grafana ###
---
kind: Service
apiVersion: v1
metadata:
name: grafana
namespace: Namespace
labels:
ControllerComponentLabel: grafana
annotations:
CreatedByAnnotation: CliVersion
spec:
type: ClusterIP
selector:
ControllerComponentLabel: grafana
ports:
- name: http
port: 3000
targetPort: 3000
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
CreatedByAnnotation: CliVersion
creationTimestamp: null
labels:
ControllerComponentLabel: grafana
name: grafana
namespace: Namespace
spec:
replicas: 1
strategy: {}
template:
metadata:
annotations:
CreatedByAnnotation: CliVersion
linkerd.io/created-by: linkerd/cli undefined
linkerd.io/proxy-version: undefined
creationTimestamp: null
labels:
ControllerComponentLabel: grafana
linkerd.io/control-plane-ns: Namespace
linkerd.io/proxy-deployment: grafana
spec:
containers:
- image: GrafanaImage
imagePullPolicy: ImagePullPolicy
livenessProbe:
httpGet:
path: /api/health
port: 3000
name: grafana
ports:
- containerPort: 3000
name: http
readinessProbe:
failureThreshold: 10
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 30
resources: {}
volumeMounts:
- mountPath: /etc/grafana
name: grafana-config
readOnly: true
- env:
- name: LINKERD2_PROXY_LOG
value: warn,linkerd2_proxy=info
- name: LINKERD2_PROXY_BIND_TIMEOUT
value: 10s
- name: LINKERD2_PROXY_CONTROL_URL
value: tcp://proxy-api.Namespace.svc.cluster.local:8086
- name: LINKERD2_PROXY_CONTROL_LISTENER
value: tcp://0.0.0.0:4190
- name: LINKERD2_PROXY_METRICS_LISTENER
value: tcp://0.0.0.0:4191
- name: LINKERD2_PROXY_OUTBOUND_LISTENER
value: tcp://127.0.0.1:4140
- name: LINKERD2_PROXY_INBOUND_LISTENER
value: tcp://0.0.0.0:4143
- name: LINKERD2_PROXY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: gcr.io/linkerd-io/proxy:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
name: linkerd-proxy
ports:
- containerPort: 4143
name: linkerd-proxy
- containerPort: 4191
name: linkerd-metrics
readinessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
resources: {}
securityContext:
runAsUser: 2102
terminationMessagePolicy: FallbackToLogsOnError
initContainers:
- args:
- --incoming-proxy-port
- "4143"
- --outgoing-proxy-port
- "4140"
- --proxy-uid
- "2102"
- --inbound-ports-to-ignore
- 4190,4191
image: gcr.io/linkerd-io/proxy-init:undefined
imagePullPolicy: IfNotPresent
name: linkerd-init
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: false
terminationMessagePolicy: FallbackToLogsOnError
volumes:
- configMap:
items:
- key: grafana.ini
path: grafana.ini
- key: datasources.yaml
path: provisioning/datasources/datasources.yaml
- key: dashboards.yaml
path: provisioning/dashboards/dashboards.yaml
name: grafana-config
name: grafana-config
status: {}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: grafana-config
namespace: Namespace
labels:
ControllerComponentLabel: grafana
annotations:
CreatedByAnnotation: CliVersion
data:
grafana.ini: |-
instance_name = linkerd-grafana
[server]
root_url = %(protocol)s://%(domain)s:/api/v1/namespaces/Namespace/services/grafana:http/proxy/
[auth]
disable_login_form = true
[auth.anonymous]
enabled = true
org_role = Editor
[auth.basic]
enabled = false
[analytics]
check_for_updates = false
datasources.yaml: |-
apiVersion: 1
datasources:
- name: prometheus
type: prometheus
access: proxy
orgId: 1
url: http://prometheus.Namespace.svc.cluster.local:9090
isDefault: true
jsonData:
timeInterval: "5s"
version: 1
editable: true
dashboards.yaml: |-
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: true
editable: true
options:
path: /var/lib/grafana/dashboards
homeDashboardId: linkerd-top-line
### Service Account CA ###
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: linkerd-ca
namespace: Namespace
### CA RBAC ###
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: linkerd-Namespace-ca
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: [TLSTrustAnchorConfigMapName]
verbs: ["update"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "get", "watch"]
- apiGroups: ["extensions", "apps"]
resources: ["replicasets"]
verbs: ["list", "get", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create", "update"]
- apiGroups: ["admissionregistration.k8s.io"]
resources: ["mutatingwebhookconfigurations"]
verbs: ["list", "get", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: linkerd-Namespace-ca
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: linkerd-Namespace-ca
subjects:
- kind: ServiceAccount
name: linkerd-ca
namespace: Namespace
### CA ###
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
CreatedByAnnotation: CliVersion
creationTimestamp: null
labels:
ControllerComponentLabel: ca
name: ca
namespace: Namespace
spec:
replicas: 1
strategy: {}
template:
metadata:
annotations:
CreatedByAnnotation: CliVersion
linkerd.io/created-by: linkerd/cli undefined
linkerd.io/proxy-version: undefined
creationTimestamp: null
labels:
ControllerComponentLabel: ca
linkerd.io/control-plane-ns: Namespace
linkerd.io/proxy-deployment: ca
spec:
containers:
- args:
- ca
- -controller-namespace=Namespace
- -single-namespace=false
- -proxy-auto-inject=true
- -log-level=ControllerLogLevel
image: ControllerImage
imagePullPolicy: ImagePullPolicy
livenessProbe:
httpGet:
path: /ping
port: 9997
initialDelaySeconds: 10
name: ca
ports:
- containerPort: 9997
name: admin-http
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9997
resources: {}
- env:
- name: LINKERD2_PROXY_LOG
value: warn,linkerd2_proxy=info
- name: LINKERD2_PROXY_BIND_TIMEOUT
value: 10s
- name: LINKERD2_PROXY_CONTROL_URL
value: tcp://proxy-api.Namespace.svc.cluster.local:8086
- name: LINKERD2_PROXY_CONTROL_LISTENER
value: tcp://0.0.0.0:4190
- name: LINKERD2_PROXY_METRICS_LISTENER
value: tcp://0.0.0.0:4191
- name: LINKERD2_PROXY_OUTBOUND_LISTENER
value: tcp://127.0.0.1:4140
- name: LINKERD2_PROXY_INBOUND_LISTENER
value: tcp://0.0.0.0:4143
- name: LINKERD2_PROXY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: gcr.io/linkerd-io/proxy:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
name: linkerd-proxy
ports:
- containerPort: 4143
name: linkerd-proxy
- containerPort: 4191
name: linkerd-metrics
readinessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
resources: {}
securityContext:
runAsUser: 2102
terminationMessagePolicy: FallbackToLogsOnError
initContainers:
- args:
- --incoming-proxy-port
- "4143"
- --outgoing-proxy-port
- "4140"
- --proxy-uid
- "2102"
- --inbound-ports-to-ignore
- 4190,4191
image: gcr.io/linkerd-io/proxy-init:undefined
imagePullPolicy: IfNotPresent
name: linkerd-init
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: false
terminationMessagePolicy: FallbackToLogsOnError
serviceAccount: linkerd-ca
status: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
CreatedByAnnotation: CliVersion
creationTimestamp: null
labels:
ControllerComponentLabel: proxy-injector
name: proxy-injector
namespace: Namespace
spec:
replicas: 1
selector:
matchLabels:
ControllerComponentLabel: proxy-injector
strategy: {}
template:
metadata:
annotations:
CreatedByAnnotation: CliVersion
linkerd.io/created-by: linkerd/cli undefined
linkerd.io/proxy-version: undefined
creationTimestamp: null
labels:
ControllerComponentLabel: proxy-injector
linkerd.io/control-plane-ns: Namespace
linkerd.io/proxy-deployment: proxy-injector
spec:
containers:
- args:
- proxy-injector
- -controller-namespace=Namespace
- -log-level=ControllerLogLevel
image: ControllerImage
imagePullPolicy: ImagePullPolicy
livenessProbe:
httpGet:
path: /ping
port: 9995
initialDelaySeconds: 10
name: proxy-injector
ports:
- containerPort: 443
name: proxy-injector
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9995
resources: {}
volumeMounts:
- mountPath: /var/linkerd-io/trust-anchors
name: linkerd-trust-anchors
readOnly: true
- mountPath: /var/linkerd-io/identity
name: webhook-secrets
readOnly: true
- mountPath: /var/linkerd-io/config
name: proxy-spec
- env:
- name: LINKERD2_PROXY_LOG
value: warn,linkerd2_proxy=info
- name: LINKERD2_PROXY_BIND_TIMEOUT
value: 10s
- name: LINKERD2_PROXY_CONTROL_URL
value: tcp://proxy-api.Namespace.svc.cluster.local:8086
- name: LINKERD2_PROXY_CONTROL_LISTENER
value: tcp://0.0.0.0:4190
- name: LINKERD2_PROXY_METRICS_LISTENER
value: tcp://0.0.0.0:4191
- name: LINKERD2_PROXY_OUTBOUND_LISTENER
value: tcp://127.0.0.1:4140
- name: LINKERD2_PROXY_INBOUND_LISTENER
value: tcp://0.0.0.0:4143
- name: LINKERD2_PROXY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: gcr.io/linkerd-io/proxy:undefined
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
name: linkerd-proxy
ports:
- containerPort: 4143
name: linkerd-proxy
- containerPort: 4191
name: linkerd-metrics
readinessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
resources: {}
securityContext:
runAsUser: 2102
terminationMessagePolicy: FallbackToLogsOnError
initContainers:
- args:
- --incoming-proxy-port
- "4143"
- --outgoing-proxy-port
- "4140"
- --proxy-uid
- "2102"
- --inbound-ports-to-ignore
- 4190,4191
image: gcr.io/linkerd-io/proxy-init:undefined
imagePullPolicy: IfNotPresent
name: linkerd-init
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: false
terminationMessagePolicy: FallbackToLogsOnError
serviceAccount: linkerd-proxy-injector
volumes:
- name: webhook-secrets
secret:
optional: true
secretName: ProxyInjectorTLSSecret
- configMap:
name: ProxyInjectorSidecarConfig
name: proxy-spec
status: {}
---
### Proxy Injector Service Account ###
kind: ServiceAccount
apiVersion: v1
metadata:
name: linkerd-proxy-injector
namespace: Namespace
---
### Proxy Injector RBAC ###
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: linkerd-Namespace-proxy-injector
rules:
- apiGroups: ["admissionregistration.k8s.io"]
resources: ["mutatingwebhookconfigurations"]
verbs: ["create", "update", "get", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: linkerd-Namespace-proxy-injector
subjects:
- kind: ServiceAccount
name: linkerd-proxy-injector
namespace: Namespace
apiGroup: ""
roleRef:
kind: ClusterRole
name: linkerd-Namespace-proxy-injector
apiGroup: rbac.authorization.k8s.io
---
### Proxy Injector Service ###
kind: Service
apiVersion: v1
metadata:
name: proxy-injector
namespace: Namespace
labels:
ControllerComponentLabel: proxy-injector
annotations:
CreatedByAnnotation: CliVersion
spec:
type: ClusterIP
selector:
ControllerComponentLabel: proxy-injector
ports:
- name: proxy-injector
port: 443
targetPort: proxy-injector
---
### Proxy Sidecar Container Spec ###
kind: ConfigMap
apiVersion: v1
metadata:
name: ProxyInjectorSidecarConfig
namespace: Namespace
labels:
ControllerComponentLabel: proxy-injector
annotations:
CreatedByAnnotation: CliVersion
data:
ProxyInitSpecFileName: |
args:
- --incoming-proxy-port
- 4143
- --outgoing-proxy-port
- 4140
- --proxy-uid
- 2102
- --inbound-ports-to-ignore
- 4190,4191,1,2,3
- --outbound-ports-to-ignore
- 2,3,4
image: ProxyInitImage
imagePullPolicy: IfNotPresent
name: linkerd-init
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: false
terminationMessagePolicy: FallbackToLogsOnError
ProxySpecFileName: |
env:
- name: LINKERD2_PROXY_LOG
value: warn,linkerd2_proxy=info
- name: LINKERD2_PROXY_BIND_TIMEOUT
value: 1m
- name: LINKERD2_PROXY_CONTROL_URL
value: tcp://proxy-api.Namespace.svc.cluster.local:123
- name: LINKERD2_PROXY_CONTROL_LISTENER
value: tcp://0.0.0.0:4190
- name: LINKERD2_PROXY_METRICS_LISTENER
value: tcp://0.0.0.0:4191
- name: LINKERD2_PROXY_OUTBOUND_LISTENER
value: tcp://127.0.0.1:4140
- name: LINKERD2_PROXY_INBOUND_LISTENER
value: tcp://0.0.0.0:4143
- name: LINKERD2_PROXY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LINKERD2_PROXY_TLS_TRUST_ANCHORS
value: /var/linkerd-io/trust-anchors/TLSTrustAnchorFileName
- name: LINKERD2_PROXY_TLS_CERT
value: /var/linkerd-io/identity/TLSCertFileName
- name: LINKERD2_PROXY_TLS_PRIVATE_KEY
value: /var/linkerd-io/identity/TLSPrivateKeyFileName
- name: LINKERD2_PROXY_TLS_POD_IDENTITY
value: "" # this value will be computed by the webhook
- name: LINKERD2_PROXY_CONTROLLER_NAMESPACE
value: Namespace
- name: LINKERD2_PROXY_TLS_CONTROLLER_IDENTITY
value: "" # this value will be computed by the webhook
image: ProxyImage
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
name: linkerd-proxy
ports:
- containerPort: 4143
name: linkerd-proxy
- containerPort: 4191
name: linkerd-metrics
readinessProbe:
httpGet:
path: /metrics
port: 4191
initialDelaySeconds: 10
resources:
requests:
cpu: RequestCPU
memory: RequestMemory
securityContext:
runAsUser: 2102
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /var/linkerd-io/trust-anchors
name: linkerd-trust-anchors
readOnly: true
- mountPath: /var/linkerd-io/identity
name: linkerd-secrets
readOnly: true
TLSTrustAnchorVolumeSpecFileName: |
name: linkerd-trust-anchors
configMap:
name: TLSTrustAnchorConfigMapName
optional: true
TLSIdentityVolumeSpecFileName: |
name: linkerd-secrets
secret:
secretName: "" # this value will be computed by the webhook
optional: true
---