mirror of https://github.com/linkerd/linkerd2.git
Fix `linkerd metrics` resource selector (#5567)
The `linkerd metrics` command was selecting pods based on owner resource names. If multiple owners existed with the same name (for example `sts/web`, `deploy/web`), additional pods would be incorrectly included in the output. Fix the pod selector code to validate pods have owner references to the given workload/owner. Before: ``` $ linkerd metrics -n emojivoto deploy/web|grep POD # POD web-0 (1 of 3) # POD web-d9ffd684f-gnbcx (2 of 3) # POD web-fs6l7 (3 of 3) ``` After: ``` $ bin/go-run cli metrics -n emojivoto deploy/web|grep POD # POD web-d9ffd684f-gnbcx (1 of 1) ``` Signed-off-by: Andrew Seigner <siggy@buoyant.io>
This commit is contained in:
parent
c63fbdf0e4
commit
9c80d4d2a1
|
@ -14,6 +14,7 @@ import (
|
|||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
|
@ -137,15 +138,17 @@ func getPodsFor(ctx context.Context, clientset kubernetes.Interface, namespace s
|
|||
}
|
||||
|
||||
var matchLabels map[string]string
|
||||
var ownerUID types.UID
|
||||
switch res.GetType() {
|
||||
case k8s.CronJob:
|
||||
jobs, err := clientset.BatchV1().Jobs(namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pods []corev1.Pod
|
||||
for _, job := range jobs.Items {
|
||||
if isOwner(res.GetName(), job.GetOwnerReferences()) {
|
||||
if isOwner(job.GetUID(), job.GetOwnerReferences()) {
|
||||
jobPods, err := getPodsFor(ctx, clientset, namespace, fmt.Sprintf("%s/%s", k8s.Job, job.GetName()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -161,6 +164,7 @@ func getPodsFor(ctx context.Context, clientset kubernetes.Interface, namespace s
|
|||
return nil, err
|
||||
}
|
||||
matchLabels = ds.Spec.Selector.MatchLabels
|
||||
ownerUID = ds.GetUID()
|
||||
|
||||
case k8s.Deployment:
|
||||
deployment, err := clientset.AppsV1().Deployments(namespace).Get(ctx, res.GetName(), metav1.GetOptions{})
|
||||
|
@ -168,6 +172,29 @@ func getPodsFor(ctx context.Context, clientset kubernetes.Interface, namespace s
|
|||
return nil, err
|
||||
}
|
||||
matchLabels = deployment.Spec.Selector.MatchLabels
|
||||
ownerUID = deployment.GetUID()
|
||||
|
||||
replicaSets, err := clientset.AppsV1().ReplicaSets(namespace).List(
|
||||
ctx,
|
||||
metav1.ListOptions{
|
||||
LabelSelector: labels.Set(matchLabels).AsSelector().String(),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pods []corev1.Pod
|
||||
for _, rs := range replicaSets.Items {
|
||||
if isOwner(ownerUID, rs.GetOwnerReferences()) {
|
||||
podsRS, err := getPodsFor(ctx, clientset, namespace, fmt.Sprintf("%s/%s", k8s.ReplicaSet, rs.GetName()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pods = append(pods, podsRS...)
|
||||
}
|
||||
}
|
||||
return pods, nil
|
||||
|
||||
case k8s.Job:
|
||||
job, err := clientset.BatchV1().Jobs(namespace).Get(ctx, res.GetName(), metav1.GetOptions{})
|
||||
|
@ -175,6 +202,7 @@ func getPodsFor(ctx context.Context, clientset kubernetes.Interface, namespace s
|
|||
return nil, err
|
||||
}
|
||||
matchLabels = job.Spec.Selector.MatchLabels
|
||||
ownerUID = job.GetUID()
|
||||
|
||||
case k8s.ReplicaSet:
|
||||
rs, err := clientset.AppsV1().ReplicaSets(namespace).Get(ctx, res.GetName(), metav1.GetOptions{})
|
||||
|
@ -182,6 +210,7 @@ func getPodsFor(ctx context.Context, clientset kubernetes.Interface, namespace s
|
|||
return nil, err
|
||||
}
|
||||
matchLabels = rs.Spec.Selector.MatchLabels
|
||||
ownerUID = rs.GetUID()
|
||||
|
||||
case k8s.ReplicationController:
|
||||
rc, err := clientset.CoreV1().ReplicationControllers(namespace).Get(ctx, res.GetName(), metav1.GetOptions{})
|
||||
|
@ -189,6 +218,7 @@ func getPodsFor(ctx context.Context, clientset kubernetes.Interface, namespace s
|
|||
return nil, err
|
||||
}
|
||||
matchLabels = rc.Spec.Selector
|
||||
ownerUID = rc.GetUID()
|
||||
|
||||
case k8s.StatefulSet:
|
||||
ss, err := clientset.AppsV1().StatefulSets(namespace).Get(ctx, res.GetName(), metav1.GetOptions{})
|
||||
|
@ -196,6 +226,7 @@ func getPodsFor(ctx context.Context, clientset kubernetes.Interface, namespace s
|
|||
return nil, err
|
||||
}
|
||||
matchLabels = ss.Spec.Selector.MatchLabels
|
||||
ownerUID = ss.GetUID()
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported resource type: %s", res.GetType())
|
||||
|
@ -214,12 +245,23 @@ func getPodsFor(ctx context.Context, clientset kubernetes.Interface, namespace s
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return podList.Items, nil
|
||||
if ownerUID == "" {
|
||||
return podList.Items, nil
|
||||
}
|
||||
|
||||
pods := []corev1.Pod{}
|
||||
for _, pod := range podList.Items {
|
||||
if isOwner(ownerUID, pod.GetOwnerReferences()) {
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
}
|
||||
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
func isOwner(resourceName string, ownerRefs []metav1.OwnerReference) bool {
|
||||
func isOwner(u types.UID, ownerRefs []metav1.OwnerReference) bool {
|
||||
for _, or := range ownerRefs {
|
||||
if resourceName == or.Name {
|
||||
if u == or.UID {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue