Add cmd to inject debug sidecar for l5d components only

Signed-off-by: zaharidichev <zaharidichev@gmail.com>
This commit is contained in:
zaharidichev 2019-10-07 18:29:19 +03:00
parent ce49b8c9f7
commit 50b8b3577e
15 changed files with 876 additions and 25 deletions

151
cli/cmd/debug-sidecar.go Normal file
View File

@ -0,0 +1,151 @@
package cmd
import (
"fmt"
"io"
"os"
cfg "github.com/linkerd/linkerd2/controller/gen/config"
"github.com/linkerd/linkerd2/pkg/inject"
"github.com/spf13/cobra"
)
func newCmdDebugSidecar() *cobra.Command {
options := &proxyConfigOptions{}
run := func(inject bool) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return fmt.Errorf("please specify a kubernetes resource file")
}
in, err := read(args[0])
if err != nil {
return err
}
configs, err := options.fetchConfigsOrDefault()
if err != nil {
return err
}
transformer := &resourceTransformerDebugSidecar{
configs: configs,
inject: inject,
}
exitCode := runDebugSidecarCmd(in, stderr, stdout, transformer)
os.Exit(exitCode)
return nil
}
}
root := &cobra.Command{
Use: "debug-sidecar [inject | uninject] CONFIG-FILE",
Short: "Add debug sidecar or remove it from meshed pods",
Long: `Add debug sidecar or remove it from meshed pods.
You can inject or uninject the debug sidecar into resources contained in a single
file, inside a folder and its sub-folders, or coming from stdin.`,
Example: ` # Inject the debug sidecar into all the deployments in the default namespace.
kubectl get deploy -o yaml | linkerd debug-sidecar inject - | kubectl apply -f -
# Download a resource and inject the debug sidecar it through stdin.
curl http://url.to/yml | linkerd debug-sidecar inject - | kubectl apply -f -
# Uninject the debug sidecar from all the resources inside a folder and its sub-folders.
linkerd debug-sidecar uninject <folder> | kubectl apply -f -`,
}
inject := &cobra.Command{
Use: "inject CONFIG-FILE",
Short: "Adds the debug sidecar to meshed pods.",
Long: "Adds the debug sidecar to meshed pods.",
RunE: run(true),
}
uninject := &cobra.Command{
Use: "uninject CONFIG-FILE",
Short: "Removes the debug sidecar from meshed pods.",
Long: "Removes the debug sidecar from meshed pods.",
RunE: run(false),
}
root.AddCommand(inject)
root.AddCommand(uninject)
return root
}
type resourceTransformerDebugSidecar struct {
configs *cfg.All
inject bool
}
func runDebugSidecarCmd(inputs []io.Reader, errWriter, outWriter io.Writer, transformer *resourceTransformerDebugSidecar) int {
return transformInput(inputs, errWriter, outWriter, transformer)
}
func writeErrors(r inject.Report, output io.Writer) {
if r.Kind != "" {
output.Write([]byte(fmt.Sprintf("%s \"%s\" skipped\n", r.Kind, r.Name)))
} else {
output.Write([]byte(fmt.Sprintf("document missing \"kind\" field, skipped\n")))
}
}
func writeResult(result string, r inject.Report, output io.Writer) {
output.Write([]byte(fmt.Sprintf("%s \"%s\" debug sidecar %s\n", r.Kind, r.Name, result)))
}
func (rt resourceTransformerDebugSidecar) generateReport(reports []inject.Report, output io.Writer) {
// leading newline to separate from yaml output on stdout
output.Write([]byte("\n"))
for _, r := range reports {
if rt.inject {
if r.CanInjectinjectDebugSidecar() {
writeResult("injected", r, output)
} else {
writeErrors(r, output)
}
} else {
if r.Uninjected.DebugSidecar {
writeResult("uninjected", r, output)
} else {
writeErrors(r, output)
}
}
}
// trailing newline to separate from kubectl output if piping
output.Write([]byte("\n"))
}
func (rt resourceTransformerDebugSidecar) transform(bytes []byte) ([]byte, []inject.Report, error) {
conf := inject.NewResourceConfig(rt.configs, inject.OriginCLI)
report, err := conf.ParseMetaAndYAML(bytes)
if err != nil {
return nil, nil, err
}
if !conf.IsControlPlaneComponent() {
return nil, nil, fmt.Errorf("cannot use debug-sidecar command on non linkerd components")
}
var output []byte
if rt.inject {
output, err = conf.InjectDebug(report)
} else {
output, err = conf.UnInjectDebug(report)
}
if err != nil {
return nil, nil, err
}
if output == nil {
output = bytes
report.UnsupportedResource = true
}
return output, []inject.Report{*report}, nil
}

View File

@ -0,0 +1,83 @@
package cmd
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"testing"
)
func TestDebugSidecar(t *testing.T) {
//TODO: Add test for non control plane components + other skipped in which we skip injection/uninjection
testCases := []struct {
inputFileName string
goldenFileName string
reportFileName string
inject bool
exitCode int
}{
{
inputFileName: "debug_inject_linkerd_tap.input.yml",
goldenFileName: "debug_inject_linkerd_tap.golden.yml",
reportFileName: "debug_inject_linkerd_tap.report",
inject: true,
},
{
inputFileName: "debug_inject_linkerd_tap.golden.yml",
goldenFileName: "debug_inject_linkerd_tap.golden.yml",
reportFileName: "debug_inject_skipped_tap.report",
inject: true,
},
{
inputFileName: "debug_inject_linkerd_tap.golden.yml",
goldenFileName: "debug_inject_linkerd_tap.input.yml",
reportFileName: "debug_uninject_linkerd_tap.report",
inject: false,
},
{
inputFileName: "debug_inject_linkerd_tap.input.yml",
goldenFileName: "debug_inject_linkerd_tap.input.yml",
reportFileName: "debug_inject_skipped_tap.report",
inject: false,
},
{
inputFileName: "inject_emojivoto_deployment.input.yml",
reportFileName: "debug_inject_emojivoto_deployment.bad.golden",
inject: true,
exitCode: 1,
},
}
for i, tc := range testCases {
tc := tc // pin
t.Run(fmt.Sprintf("%d: %s", i, tc.inputFileName), func(t *testing.T) {
file, err := os.Open("testdata/" + tc.inputFileName)
if err != nil {
t.Errorf("error opening test input file: %v\n", err)
}
read := []io.Reader{bufio.NewReader(file)}
output := new(bytes.Buffer)
report := new(bytes.Buffer)
transf := &resourceTransformerDebugSidecar{
configs: testInstallConfig(),
inject: tc.inject,
}
exitCode := runDebugSidecarCmd(read, report, output, transf)
if exitCode != tc.exitCode {
t.Errorf("Unexpected exit code. Got %d but was expecting %d\n", exitCode, tc.exitCode)
}
if tc.exitCode == 0 {
diffTestdata(t, tc.goldenFileName, output.String())
}
diffTestdata(t, tc.reportFileName, report.String())
})
}
}

16
cli/cmd/internal.go Normal file
View File

@ -0,0 +1,16 @@
package cmd
import (
"github.com/spf13/cobra"
)
func newCmdInternal() *cobra.Command {
root := &cobra.Command{
Use: "internal",
Short: "Used for managing internal linkerd components",
Long: `Used for managing internal linkerd components.`,
}
root.AddCommand(newCmdDebugSidecar())
return root
}

View File

@ -104,6 +104,7 @@ func init() {
RootCmd.AddCommand(newCmdEndpoints())
RootCmd.AddCommand(newCmdGet())
RootCmd.AddCommand(newCmdInject())
RootCmd.AddCommand(newCmdInternal())
RootCmd.AddCommand(newCmdInstall())
RootCmd.AddCommand(newCmdInstallCNIPlugin())
RootCmd.AddCommand(newCmdInstallSP())

View File

@ -0,0 +1 @@
Error transforming resources: cannot use debug-sidecar command on non linkerd components

View File

@ -0,0 +1,254 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
linkerd.io/created-by: linkerd/cli edge-19.9.4
creationTimestamp: null
generation: 3
labels:
linkerd.io/control-plane-component: tap
linkerd.io/control-plane-ns: linkerd
name: linkerd-tap
namespace: linkerd
resourceVersion: "19436"
selfLink: /apis/apps/v1/namespaces/linkerd/deployments/linkerd-tap
uid: 37a94545-e5f4-4741-b45f-9da3e0ac78fa
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
linkerd.io/control-plane-component: tap
linkerd.io/control-plane-ns: linkerd
linkerd.io/proxy-deployment: linkerd-tap
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
annotations:
config.linkerd.io/enable-debug-sidecar: "true"
linkerd.io/created-by: linkerd/cli edge-19.9.4
linkerd.io/identity-mode: default
linkerd.io/proxy-version: edge-19.9.4
creationTimestamp: null
labels:
linkerd.io/control-plane-component: tap
linkerd.io/control-plane-ns: linkerd
linkerd.io/proxy-deployment: linkerd-tap
spec:
containers:
- args:
- tap
- -controller-namespace=linkerd
- -log-level=info
image: gcr.io/linkerd-io/controller:edge-19.9.4
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /ping
port: 9998
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: tap
ports:
- containerPort: 8088
name: grpc
protocol: TCP
- containerPort: 8089
name: apiserver
protocol: TCP
- containerPort: 9998
name: admin-http
protocol: TCP
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9998
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
securityContext:
runAsUser: 2103
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/run/linkerd/tls
name: tls
readOnly: true
- mountPath: /var/run/linkerd/config
name: config
- env:
- name: LINKERD2_PROXY_LOG
value: warn,linkerd2_proxy=info
- name: LINKERD2_PROXY_DESTINATION_SVC_ADDR
value: linkerd-destination.linkerd.svc.cluster.local:8086
- name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR
value: 0.0.0.0:4190
- name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR
value: 0.0.0.0:4191
- name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR
value: 127.0.0.1:4140
- name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR
value: 0.0.0.0:4143
- name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES
value: svc.cluster.local.
- name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES
value: svc.cluster.local.
- name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE
value: 10000ms
- name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE
value: 10000ms
- name: _pod_ns
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: LINKERD2_PROXY_DESTINATION_CONTEXT
value: ns:$(_pod_ns)
- name: LINKERD2_PROXY_IDENTITY_DIR
value: /var/run/linkerd/identity/end-entity
- name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS
value: |
-----BEGIN CERTIFICATE-----
MIIBgzCCASmgAwIBAgIBATAKBggqhkjOPQQDAjApMScwJQYDVQQDEx5pZGVudGl0
eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMTkxMDAxMDg1MDM3WhcNMjAwOTMw
MDg1MDU3WjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9j
YWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQdaLHl4dkpo/vOqH8TalM4sDl5
+kQ/+KXjA3OQLec95pWJYqLNfeyNa1mR8bQMRXN3J+vfvjFaOyGuXVFKukOzo0Iw
QDAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC
MA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIgFiiOB/CF0FpNhzZs
WQDZbKz2NedwzRYO+TY63fxbxxACIQCqogxlL4avtqdf0pyKHFo/S0eVb/sbyxis
NouiAv3x0g==
-----END CERTIFICATE-----
- name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE
value: /var/run/secrets/kubernetes.io/serviceaccount/token
- name: LINKERD2_PROXY_IDENTITY_SVC_ADDR
value: linkerd-identity.linkerd.svc.cluster.local:8080
- name: _pod_sa
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.serviceAccountName
- name: _l5d_ns
value: linkerd
- name: _l5d_trustdomain
value: cluster.local
- name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME
value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain)
- name: LINKERD2_PROXY_IDENTITY_SVC_NAME
value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain)
- name: LINKERD2_PROXY_DESTINATION_SVC_NAME
value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain)
- name: LINKERD2_PROXY_TAP_SVC_NAME
value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain)
image: gcr.io/linkerd-io/proxy:edge-19.9.4
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /metrics
port: 4191
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: linkerd-proxy
ports:
- containerPort: 4143
name: linkerd-proxy
protocol: TCP
- containerPort: 4191
name: linkerd-admin
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /ready
port: 4191
scheme: HTTP
initialDelaySeconds: 2
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 2102
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /var/run/linkerd/identity/end-entity
name: linkerd-identity-end-entity
- image: gcr.io/linkerd-io/debug:install-control-plane-version
imagePullPolicy: IfNotPresent
name: linkerd-debug
resources: {}
terminationMessagePolicy: FallbackToLogsOnError
dnsPolicy: ClusterFirst
initContainers:
- args:
- --incoming-proxy-port
- "4143"
- --outgoing-proxy-port
- "4140"
- --proxy-uid
- "2102"
- --inbound-ports-to-ignore
- 4190,4191
- --outbound-ports-to-ignore
- "443"
image: gcr.io/linkerd-io/proxy-init:v1.2.0
imagePullPolicy: IfNotPresent
name: linkerd-init
resources:
limits:
cpu: 100m
memory: 50Mi
requests:
cpu: 10m
memory: 10Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_ADMIN
- NET_RAW
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: false
runAsUser: 0
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: linkerd-tap
serviceAccountName: linkerd-tap
terminationGracePeriodSeconds: 30
volumes:
- configMap:
defaultMode: 420
name: linkerd-config
name: config
- emptyDir:
medium: Memory
name: linkerd-identity-end-entity
- name: tls
secret:
defaultMode: 420
secretName: linkerd-tap-tls
status: {}
---

View File

@ -0,0 +1,248 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
linkerd.io/created-by: linkerd/cli edge-19.9.4
creationTimestamp: null
generation: 3
labels:
linkerd.io/control-plane-component: tap
linkerd.io/control-plane-ns: linkerd
name: linkerd-tap
namespace: linkerd
resourceVersion: "19436"
selfLink: /apis/apps/v1/namespaces/linkerd/deployments/linkerd-tap
uid: 37a94545-e5f4-4741-b45f-9da3e0ac78fa
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
linkerd.io/control-plane-component: tap
linkerd.io/control-plane-ns: linkerd
linkerd.io/proxy-deployment: linkerd-tap
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
annotations:
linkerd.io/created-by: linkerd/cli edge-19.9.4
linkerd.io/identity-mode: default
linkerd.io/proxy-version: edge-19.9.4
creationTimestamp: null
labels:
linkerd.io/control-plane-component: tap
linkerd.io/control-plane-ns: linkerd
linkerd.io/proxy-deployment: linkerd-tap
spec:
containers:
- args:
- tap
- -controller-namespace=linkerd
- -log-level=info
image: gcr.io/linkerd-io/controller:edge-19.9.4
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /ping
port: 9998
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: tap
ports:
- containerPort: 8088
name: grpc
protocol: TCP
- containerPort: 8089
name: apiserver
protocol: TCP
- containerPort: 9998
name: admin-http
protocol: TCP
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9998
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
securityContext:
runAsUser: 2103
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/run/linkerd/tls
name: tls
readOnly: true
- mountPath: /var/run/linkerd/config
name: config
- env:
- name: LINKERD2_PROXY_LOG
value: warn,linkerd2_proxy=info
- name: LINKERD2_PROXY_DESTINATION_SVC_ADDR
value: linkerd-destination.linkerd.svc.cluster.local:8086
- name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR
value: 0.0.0.0:4190
- name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR
value: 0.0.0.0:4191
- name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR
value: 127.0.0.1:4140
- name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR
value: 0.0.0.0:4143
- name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES
value: svc.cluster.local.
- name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES
value: svc.cluster.local.
- name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE
value: 10000ms
- name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE
value: 10000ms
- name: _pod_ns
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: LINKERD2_PROXY_DESTINATION_CONTEXT
value: ns:$(_pod_ns)
- name: LINKERD2_PROXY_IDENTITY_DIR
value: /var/run/linkerd/identity/end-entity
- name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS
value: |
-----BEGIN CERTIFICATE-----
MIIBgzCCASmgAwIBAgIBATAKBggqhkjOPQQDAjApMScwJQYDVQQDEx5pZGVudGl0
eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMTkxMDAxMDg1MDM3WhcNMjAwOTMw
MDg1MDU3WjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9j
YWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQdaLHl4dkpo/vOqH8TalM4sDl5
+kQ/+KXjA3OQLec95pWJYqLNfeyNa1mR8bQMRXN3J+vfvjFaOyGuXVFKukOzo0Iw
QDAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC
MA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIgFiiOB/CF0FpNhzZs
WQDZbKz2NedwzRYO+TY63fxbxxACIQCqogxlL4avtqdf0pyKHFo/S0eVb/sbyxis
NouiAv3x0g==
-----END CERTIFICATE-----
- name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE
value: /var/run/secrets/kubernetes.io/serviceaccount/token
- name: LINKERD2_PROXY_IDENTITY_SVC_ADDR
value: linkerd-identity.linkerd.svc.cluster.local:8080
- name: _pod_sa
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.serviceAccountName
- name: _l5d_ns
value: linkerd
- name: _l5d_trustdomain
value: cluster.local
- name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME
value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain)
- name: LINKERD2_PROXY_IDENTITY_SVC_NAME
value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain)
- name: LINKERD2_PROXY_DESTINATION_SVC_NAME
value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain)
- name: LINKERD2_PROXY_TAP_SVC_NAME
value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain)
image: gcr.io/linkerd-io/proxy:edge-19.9.4
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /metrics
port: 4191
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: linkerd-proxy
ports:
- containerPort: 4143
name: linkerd-proxy
protocol: TCP
- containerPort: 4191
name: linkerd-admin
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /ready
port: 4191
scheme: HTTP
initialDelaySeconds: 2
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 2102
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /var/run/linkerd/identity/end-entity
name: linkerd-identity-end-entity
dnsPolicy: ClusterFirst
initContainers:
- args:
- --incoming-proxy-port
- "4143"
- --outgoing-proxy-port
- "4140"
- --proxy-uid
- "2102"
- --inbound-ports-to-ignore
- 4190,4191
- --outbound-ports-to-ignore
- "443"
image: gcr.io/linkerd-io/proxy-init:v1.2.0
imagePullPolicy: IfNotPresent
name: linkerd-init
resources:
limits:
cpu: 100m
memory: 50Mi
requests:
cpu: 10m
memory: 10Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_ADMIN
- NET_RAW
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: false
runAsUser: 0
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: linkerd-tap
serviceAccountName: linkerd-tap
terminationGracePeriodSeconds: 30
volumes:
- configMap:
defaultMode: 420
name: linkerd-config
name: config
- emptyDir:
medium: Memory
name: linkerd-identity-end-entity
- name: tls
secret:
defaultMode: 420
secretName: linkerd-tap-tls
status: {}
---

View File

@ -0,0 +1,3 @@
deployment "linkerd-tap" debug sidecar injected

View File

@ -0,0 +1,3 @@
deployment "linkerd-tap" skipped

View File

@ -0,0 +1,3 @@
deployment "linkerd-tap" debug sidecar uninjected

View File

@ -36,3 +36,14 @@ func HasExistingSidecars(podSpec *corev1.PodSpec) bool {
return false
}
// HasExistingDebugSidecar returns true if the pod spec already has the
// debug container present
func HasExistingDebugSidecar(podSpec *corev1.PodSpec) bool {
for _, container := range podSpec.Containers {
if container.Name == k8s.DebugSidecarName && strings.HasPrefix(container.Image, k8s.DebugSidecarImage) {
return true
}
}
return false
}

53
pkg/inject/debug.go Normal file
View File

@ -0,0 +1,53 @@
package inject
import (
"github.com/linkerd/linkerd2/pkg/k8s"
corev1 "k8s.io/api/core/v1"
)
// UnInjectDebug removes the debug container from the pods spec (if present)
func (conf *ResourceConfig) UnInjectDebug(report *Report) ([]byte, error) {
if conf.pod.spec == nil || !report.CanUnInjectinjectDebugSidecar() {
return nil, nil
}
report.Uninjected.DebugSidecar = conf.uninjectContainer(k8s.DebugSidecarName)
conf.unInjectDebugMeta()
return conf.YamlMarshalObj()
}
func (conf *ResourceConfig) unInjectDebugMeta() {
newAnnotations := make(map[string]string)
for key, val := range conf.pod.meta.Annotations {
if key != k8s.ProxyEnableDebugAnnotation {
newAnnotations[key] = val
}
}
conf.pod.meta.Annotations = newAnnotations
}
// InjectDebug adds a debug container into the pod spec
func (conf *ResourceConfig) InjectDebug(report *Report) ([]byte, error) {
if conf.pod.spec == nil || !report.CanInjectinjectDebugSidecar() {
return nil, nil
}
conf.pod.meta.Annotations[k8s.ProxyEnableDebugAnnotation] = "true"
conf.pod.spec.Containers = append(conf.pod.spec.Containers, corev1.Container{
Name: k8s.DebugSidecarName,
Image: k8s.DebugSidecarImage + ":" + conf.configs.GetGlobal().GetVersion(),
ImagePullPolicy: corev1.PullIfNotPresent,
TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError,
})
return conf.YamlMarshalObj()
}
// CanInjectinjectDebugSidecar determines whether a debug sidecar can be injected
func (r *Report) CanInjectinjectDebugSidecar() bool {
return !r.UnsupportedResource && r.Sidecar && !r.DebugSidecar
}
// CanUnInjectinjectDebugSidecar returns true if there is a debug sidecar present in the pod
func (r *Report) CanUnInjectinjectDebugSidecar() bool {
return r.DebugSidecar
}

View File

@ -135,6 +135,13 @@ func NewResourceConfig(configs *config.All, origin Origin) *ResourceConfig {
return config
}
// IsControlPlaneComponent will return true if the /control-plane-component annotation is
// present on the pod
func (conf *ResourceConfig) IsControlPlaneComponent() bool {
_, b := conf.pod.meta.Labels[k8s.ControllerComponentLabel]
return b
}
// WithKind enriches ResourceConfig with the workload kind
func (conf *ResourceConfig) WithKind(kind string) *ResourceConfig {
conf.workload.metaType = metav1.TypeMeta{Kind: kind}

View File

@ -37,6 +37,7 @@ type Report struct {
Name string
HostNetwork bool
Sidecar bool
DebugSidecar bool
UDP bool // true if any port in any container has `protocol: UDP`
UnsupportedResource bool
InjectDisabled bool
@ -52,6 +53,9 @@ type Report struct {
// ProxyInit is true if a proxy-init container has been uninjected
ProxyInit bool
// DebugSidecar is true if a debug container has been uninjected
DebugSidecar bool
}
}
@ -77,6 +81,7 @@ func newReport(conf *ResourceConfig) *Report {
report.InjectDisabled, report.InjectDisabledReason, report.InjectAnnotationAt = report.disableByAnnotation(conf)
report.HostNetwork = conf.pod.spec.HostNetwork
report.Sidecar = healthcheck.HasExistingSidecars(conf.pod.spec)
report.DebugSidecar = healthcheck.HasExistingDebugSidecar(conf.pod.spec)
report.UDP = checkUDPPorts(conf.pod.spec)
report.TracingEnabled = conf.pod.meta.Annotations[k8s.ProxyTraceCollectorSvcAddr] != "" || conf.nsAnnotations[k8s.ProxyTraceCollectorSvcAddr] != ""
} else {

View File

@ -39,15 +39,9 @@ func (conf *ResourceConfig) uninjectPodSpec(report *Report) {
}
t.InitContainers = initContainers
containers := []v1.Container{}
for _, container := range t.Containers {
if container.Name != k8s.ProxyContainerName {
containers = append(containers, container)
} else {
if conf.uninjectContainer(k8s.ProxyContainerName) {
report.Uninjected.Proxy = true
}
}
t.Containers = containers
volumes := []v1.Volume{}
for _, volume := range t.Volumes {
@ -58,7 +52,24 @@ func (conf *ResourceConfig) uninjectPodSpec(report *Report) {
t.Volumes = volumes
}
func (conf *ResourceConfig) uninjectContainer(containerName string) bool {
t := conf.pod.spec
unInjected := false
var containers []v1.Container
for _, container := range t.Containers {
if container.Name != containerName {
containers = append(containers, container)
} else {
unInjected = true
}
}
t.Containers = containers
return unInjected
}
func uninjectObjectMeta(t *metav1.ObjectMeta, report *Report) {
//do not uninject meta if this is a control plane component
if _, ok := t.Labels[k8s.ControllerComponentLabel]; !ok {
newAnnotations := make(map[string]string)
for key, val := range t.Annotations {
if !strings.HasPrefix(key, k8s.Prefix) ||
@ -79,3 +90,4 @@ func uninjectObjectMeta(t *metav1.ObjectMeta, report *Report) {
}
t.Labels = labels
}
}