Compare commits

...

18 Commits

Author SHA1 Message Date
Tim Rühsen 79d08a7e27
off-cpu: Use a probability value for the threshold (#460)
Co-authored-by: Florian Lehner <florian.lehner@elastic.co>
2025-07-11 17:43:04 +02:00
Florian Lehner 7a0ae331c4
ebpf: delete entry from map for off cpu sampling (#599)
Signed-off-by: Florian Lehner <florian.lehner@elastic.co>
2025-07-11 14:26:28 +03:00
Tolya Korniltsev e78bcc4514
Reusable amd64 interpreter for extraction of values from disassembly (#447)
Co-authored-by: Timo Teräs <timo.teras@iki.fi>
Co-authored-by: Florian Lehner <florianl@users.noreply.github.com>
2025-07-11 13:54:54 +03:00
Christos Kalkanis ec6ee4599e
processmanager: minor cleanups (#590)
Co-authored-by: Tim Rühsen <tim.ruhsen@elastic.co>
2025-07-10 10:41:13 +03:00
renovate[bot] 5191dbe342
fix(deps): update go dependencies (#591)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-10 09:44:44 +03:00
Trask Stalnaker c4f3ccd22a
Ensure correct CodeQL workflow permissions (#592)
Co-authored-by: otelbot <197425009+otelbot@users.noreply.github.com>
2025-07-10 09:35:44 +03:00
OpenTelemetry Bot e82b9a3b04
Sort contributor listings and remove affiliation from emeriti (#589)
Co-authored-by: otelbot <197425009+otelbot@users.noreply.github.com>
2025-07-10 09:34:24 +03:00
Florian Lehner 26bf9fb804
ebpf: introduce with_debug_output (#560)
Signed-off-by: Florian Lehner <florian.lehner@elastic.co>
Co-authored-by: Christos Kalkanis <christos.kalkanis@elastic.co>
Co-authored-by: Timo Teräs <timo.teras@iki.fi>
2025-07-09 21:26:21 +02:00
Tommy Reilly b2d73337eb
Fix parseMappings wrt anon mappings (#588) 2025-07-09 21:14:02 +02:00
Christos Kalkanis dba0c0e17c
ProcessManager: Extract containerID with the rest of process metadata (#577)
Signed-off-by: Florian Lehner <florian.lehner@elastic.co>
Co-authored-by: Florian Lehner <florian.lehner@elastic.co>
2025-07-09 10:05:53 +02:00
Tolya Korniltsev dd36674acb
fix(tracemgmt): remove unnecessary errno check in rate limit reset condition (#583) 2025-07-07 16:00:16 +02:00
Florian Lehner 418fd9c916
coredump: split out cloudstore (#558)
Signed-off-by: Florian Lehner <florian.lehner@elastic.co>
2025-07-05 20:44:02 +03:00
renovate[bot] 9f83ff4de7
chore(deps): update rust crate lru to 0.16.0 (#578)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-03 22:01:43 +02:00
renovate[bot] 46c2c7702e
fix(deps): update module github.com/aws/aws-sdk-go-v2/service/s3 to v1.83.0 (#579)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-03 22:01:17 +02:00
david dcd30e25e5
offcpu: Fix incorrect OffTimes values in offcpu profiling. (#559) 2025-07-03 21:43:29 +02:00
Florian Lehner d90d670c1b
[ci/codeql] adapt permissions (#576) 2025-07-02 16:02:59 +02:00
Timo Teräs 3c82c64730
elfehframe: dynamically detect entry stub (#573)
Earlier we synthesized STOP delta for FDE ending at the entry point. This does not work reliably: 1. in some cases there is no FDE preceding the entry point and 2. often the musl entry code's second portion of code immediately following the CALL which has an FDE that will invalidate the STOP delta.

To better handle the situation this will dynamically match the entry code against known stubs when needed and mark the whole stub with STOP overriding any FDEs.
2025-07-02 15:46:38 +03:00
Timo Teräs b3452d13c5
Improve elfehframe handling (#564) 2025-07-02 10:44:53 +03:00
90 changed files with 2010 additions and 578 deletions

View File

@ -8,11 +8,12 @@ on:
schedule: schedule:
- cron: "21 6 * * 1" - cron: "21 6 * * 1"
permissions: permissions: read-all
contents: read
jobs: jobs:
analyze: analyze:
permissions:
security-events: write # for github/codeql-action/analyze to upload SARIF results
name: Analyze Go (${{ matrix.target_arch }}) name: Analyze Go (${{ matrix.target_arch }})
if: ${{ github.actor != 'dependabot[bot]' && github.repository == 'open-telemetry/opentelemetry-ebpf-profiler' }} if: ${{ github.actor != 'dependabot[bot]' && github.repository == 'open-telemetry/opentelemetry-ebpf-profiler' }}
runs-on: ubuntu-24.04 runs-on: ubuntu-24.04

View File

@ -99,10 +99,10 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Hash binary blobs - name: Hash binary blobs
run: | run: |
sha256sum support/ebpf/tracer.ebpf.release.* > binary-blobs.hash sha256sum support/ebpf/tracer.ebpf.* > binary-blobs.hash
- name: Rebuild eBPF blobs - name: Rebuild eBPF blobs
run: | run: |
rm support/ebpf/tracer.ebpf.release.* rm support/ebpf/tracer.ebpf.*
make amd64 -C support/ebpf make amd64 -C support/ebpf
make arm64 -C support/ebpf make arm64 -C support/ebpf
- name: Check for differences - name: Check for differences
@ -111,6 +111,11 @@ jobs:
echo "Please rebuild and commit the updated binary blobs." echo "Please rebuild and commit the updated binary blobs."
exit 1 exit 1
fi fi
- if: failure()
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: binary-blobs
path: support/ebpf/tracer.ebpf.*
build-integration-test-binaries: build-integration-test-binaries:
name: Build integration test binaries (${{ matrix.target_arch }}) name: Build integration test binaries (${{ matrix.target_arch }})

View File

@ -145,10 +145,10 @@ For more information about the maintainer role, see the [community repository](h
### Approvers ### Approvers
- [Damien Mathieu](https://github.com/dmathieu), Elastic
- [Florian Lehner](https://github.com/florianl), Elastic - [Florian Lehner](https://github.com/florianl), Elastic
- [Joel Höner](https://github.com/athre0z) - [Joel Höner](https://github.com/athre0z)
- [Tim Rühsen](https://github.com/rockdaboot), Elastic - [Tim Rühsen](https://github.com/rockdaboot), Elastic
- [Damien Mathieu](https://github.com/dmathieu), Elastic
For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver). For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver).

4
Cargo.lock generated
View File

@ -280,9 +280,9 @@ checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
[[package]] [[package]]
name = "lru" name = "lru"
version = "0.15.0" version = "0.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0281c2e25e62316a5c9d98f2d2e9e95a37841afdaf4383c177dbb5c1dfab0568" checksum = "86ea4e65087ff52f3862caff188d489f1fab49a0cb09e01b2e3f1a617b10aaed"
[[package]] [[package]]
name = "memchr" name = "memchr"

View File

@ -63,7 +63,7 @@ default-features = false
features = ["std"] features = ["std"]
[workspace.dependencies.lru] [workspace.dependencies.lru]
version = "0.15.0" version = "0.16.0"
default-features = false default-features = false
[workspace.dependencies.object] [workspace.dependencies.object]

1
asm/amd/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
testdata/fuzz

179
asm/amd/interpreter.go Normal file
View File

@ -0,0 +1,179 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package amd // import "go.opentelemetry.io/ebpf-profiler/asm/amd"
import (
"fmt"
"io"
"math"
"go.opentelemetry.io/ebpf-profiler/asm/expression"
"golang.org/x/arch/x86/x86asm"
)
type CodeBlock struct {
Address expression.Expression
Code []byte
}
type Interpreter struct {
Regs Registers
code []byte
CodeAddress expression.Expression
pc int
}
func NewInterpreter() *Interpreter {
it := &Interpreter{}
it.initRegs()
return it
}
func NewInterpreterWithCode(code []byte) *Interpreter {
it := &Interpreter{code: code, CodeAddress: expression.Named("code address")}
it.initRegs()
return it
}
func (i *Interpreter) ResetCode(code []byte, address expression.Expression) {
i.code = code
i.CodeAddress = address
i.pc = 0
}
func (i *Interpreter) Loop() (x86asm.Inst, error) {
return i.LoopWithBreak(func(x86asm.Inst) bool { return false })
}
func (i *Interpreter) LoopWithBreak(breakLoop func(op x86asm.Inst) bool) (x86asm.Inst, error) {
prev := x86asm.Inst{}
for {
op, err := i.Step()
if err != nil {
return prev, err
}
if breakLoop(op) {
return op, nil
}
prev = op
}
}
func (i *Interpreter) Step() (x86asm.Inst, error) {
if len(i.code) == 0 {
return x86asm.Inst{}, io.EOF
}
var inst x86asm.Inst
var err error
if ok, instLen := DecodeSkippable(i.code); ok {
inst = x86asm.Inst{Op: x86asm.NOP, Len: instLen}
} else {
inst, err = x86asm.Decode(i.code, 64)
if err != nil {
return inst, fmt.Errorf("at 0x%x : %v", i.pc, err)
}
}
i.pc += inst.Len
i.code = i.code[inst.Len:]
i.Regs.setX86asm(x86asm.RIP, expression.Add(i.CodeAddress, expression.Imm(uint64(i.pc))))
switch inst.Op {
case x86asm.ADD:
if dst, ok := inst.Args[0].(x86asm.Reg); ok {
left := i.Regs.getX86asm(dst)
switch src := inst.Args[1].(type) {
case x86asm.Imm:
right := expression.Imm(uint64(src))
i.Regs.setX86asm(dst, expression.Add(left, right))
case x86asm.Reg:
right := i.Regs.getX86asm(src)
i.Regs.setX86asm(dst, expression.Add(left, right))
case x86asm.Mem:
right := i.MemArg(src)
right = expression.MemWithSegment(src.Segment, right, inst.MemBytes)
i.Regs.setX86asm(dst, expression.Add(left, right))
}
}
case x86asm.SHL:
if dst, ok := inst.Args[0].(x86asm.Reg); ok {
if src, imm := inst.Args[1].(x86asm.Imm); imm {
v := expression.Multiply(
i.Regs.getX86asm(dst),
expression.Imm(uint64(math.Pow(2, float64(src)))),
)
i.Regs.setX86asm(dst, v)
}
}
case x86asm.MOV, x86asm.MOVZX, x86asm.MOVSXD, x86asm.MOVSX:
if dst, ok := inst.Args[0].(x86asm.Reg); ok {
switch src := inst.Args[1].(type) {
case x86asm.Imm:
i.Regs.setX86asm(dst, expression.Imm(uint64(src)))
case x86asm.Reg:
i.Regs.setX86asm(dst, i.Regs.getX86asm(src))
case x86asm.Mem:
v := i.MemArg(src)
dataSizeBits := inst.DataSize
v = expression.MemWithSegment(src.Segment, v, inst.MemBytes)
if inst.Op == x86asm.MOVSXD || inst.Op == x86asm.MOVSX {
v = expression.SignExtend(v, dataSizeBits)
} else {
v = expression.ZeroExtend(v, dataSizeBits)
}
i.Regs.setX86asm(dst, v)
}
}
case x86asm.XOR:
if dst, ok := inst.Args[0].(x86asm.Reg); ok {
if src, reg := inst.Args[1].(x86asm.Reg); reg {
if src == dst {
i.Regs.setX86asm(dst, expression.Imm(0))
}
}
}
case x86asm.AND:
if dst, ok := inst.Args[0].(x86asm.Reg); ok {
if src, imm := inst.Args[1].(x86asm.Imm); imm {
if src == 3 { // todo other cases
i.Regs.setX86asm(dst, expression.ZeroExtend(i.Regs.getX86asm(dst), 2))
}
}
}
case x86asm.LEA:
if dst, ok := inst.Args[0].(x86asm.Reg); ok {
if src, mem := inst.Args[1].(x86asm.Mem); mem {
v := i.MemArg(src)
i.Regs.setX86asm(dst, v)
}
}
default:
}
return inst, nil
}
func (i *Interpreter) MemArg(src x86asm.Mem) expression.Expression {
vs := make([]expression.Expression, 0, 3)
if src.Disp != 0 {
vs = append(vs, expression.Imm(uint64(src.Disp)))
}
if src.Base != 0 {
vs = append(vs, i.Regs.getX86asm(src.Base))
}
if src.Index != 0 {
v := expression.Multiply(
i.Regs.getX86asm(src.Index),
expression.Imm(uint64(src.Scale)),
)
vs = append(vs, v)
}
v := expression.Add(vs...)
return v
}
func (i *Interpreter) initRegs() {
for j := 0; j < len(i.Regs.regs); j++ {
i.Regs.regs[j] = expression.Named(Reg(j).String())
}
}

171
asm/amd/interpreter_test.go Normal file
View File

@ -0,0 +1,171 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package amd
import (
"io"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/ebpf-profiler/asm/expression"
)
func BenchmarkPythonInterpreter(b *testing.B) {
for i := 0; i < b.N; i++ {
testPythonInterpreter(b)
}
}
func TestPythonInterpreter(t *testing.T) {
testPythonInterpreter(t)
}
func testPythonInterpreter(t testing.TB) {
// 00010000 4D 89 F2 mov r10, r14
// 00010003 45 0F B6 36 movzx r14d, byte ptr [r14]
// 00010007 48 8D 05 2D B3 35 00 lea rax, [rip + 0x35b32d]
// 0001000E 4C 8B 6C 24 08 mov r13, qword ptr [rsp + 8]
// 00010013 48 89 C1 mov rcx, rax
// 00010016 48 89 44 24 10 mov qword ptr [rsp + 0x10], rax
// 0001001B 45 0F B6 5A 01 movzx r11d, byte ptr [r10 + 1]
// 00010020 41 0F B6 C6 movzx eax, r14b
// 00010024 48 8B 04 C1 mov rax, qword ptr [rcx + rax*8]
// 00010028 FF E0 jmp rax
code := []byte{
0x4d, 0x89, 0xf2, 0x45, 0x0f, 0xb6, 0x36, 0x48, 0x8d, 0x05, 0x2d, 0xb3, 0x35,
0x00, 0x4c, 0x8b, 0x6c, 0x24, 0x08, 0x48, 0x89, 0xc1, 0x48, 0x89, 0x44, 0x24,
0x10, 0x45, 0x0f, 0xb6, 0x5a, 0x01, 0x41, 0x0f, 0xb6, 0xc6, 0x48, 0x8b, 0x04,
0xc1, 0xff, 0xe0,
}
it := NewInterpreterWithCode(code)
it.CodeAddress = expression.Imm(0x8AF05)
r14 := it.Regs.Get(R14)
_, err := it.Loop()
if err == nil || err != io.EOF {
t.Fatal(err)
}
actual := it.Regs.Get(RAX)
expected := expression.Mem(
expression.Add(
expression.Multiply(
expression.ZeroExtend8(expression.Mem1(r14)),
expression.Imm(8),
),
expression.NewImmediateCapture("switch table"),
),
8,
)
if !actual.Match(expected) {
t.Fatal()
}
}
func TestRecoverSwitchCase(t *testing.T) {
blocks := []CodeBlock{
{
Address: expression.Imm(0x3310E3),
// 003310E3 48 8B 44 24 20 mov rax, qword ptr [rsp + 0x20]
// 003310E8 48 89 18 mov qword ptr [rax], rbx
// 003310EB 49 83 C2 02 add r10, 2
// 003310EF 44 89 E0 mov eax, r12d
// 003310F2 83 E0 03 and eax, 3
// 003310F5 31 DB xor ebx, ebx
// 003310F7 41 F6 C4 04 test r12b, 4
// 003310FB 4C 89 74 24 10 mov qword ptr [rsp + 0x10], r14
// 00331100 74 08 je 0x33110a
Code: []byte{0x48, 0x8b, 0x44, 0x24, 0x20, 0x48, 0x89, 0x18, 0x49,
0x83, 0xc2, 0x02, 0x44, 0x89, 0xe0, 0x83, 0xe0, 0x03, 0x31, 0xdb,
0x41, 0xf6, 0xc4, 0x04, 0x4c, 0x89, 0x74, 0x24, 0x10, 0x74, 0x08},
},
{
Address: expression.Imm(0x33110a),
// 0033110A 4D 89 DC mov r12, r11
// 0033110D 4D 8D 47 F8 lea r8, [r15 - 8]
// 00331111 4C 89 7C 24 60 mov qword ptr [rsp + 0x60], r15
// 00331116 4D 8B 7F F8 mov r15, qword ptr [r15 - 8]
// 0033111A 48 8B 0D 87 06 17 01 mov rcx, qword ptr [rip + 0x1170687]
// 00331121 89 C0 mov eax, eax
// 00331123 48 8D 15 02 E7 C0 00 lea rdx, [rip + 0xc0e702]
// 0033112A 48 63 04 82 movsxd rax, dword ptr [rdx + rax*4]
// 0033112E 48 01 D0 add rax, rdx
// 00331131 4C 89 D5 mov rbp, r10
// 00331134 4D 89 C5 mov r13, r8
// 00331137 FF E0 jmp rax
Code: []byte{
0x4d, 0x89, 0xdc, 0x4d, 0x8d, 0x47, 0xf8, 0x4c, 0x89, 0x7c, 0x24,
0x60, 0x4d, 0x8b, 0x7f, 0xf8, 0x48, 0x8b, 0x0d, 0x87, 0x06, 0x17,
0x01, 0x89, 0xc0, 0x48, 0x8d, 0x15, 0x02, 0xe7, 0xc0, 0x00, 0x48,
0x63, 0x04, 0x82, 0x48, 0x01, 0xd0, 0x4c, 0x89, 0xd5, 0x4d, 0x89,
0xc5, 0xff, 0xe0,
},
},
}
it := NewInterpreter()
initR12 := it.Regs.Get(R12)
it.ResetCode(blocks[0].Code, blocks[0].Address)
_, err := it.Loop()
require.ErrorIs(t, err, io.EOF)
expected := expression.ZeroExtend(initR12, 2)
assertEval(t, it.Regs.Get(RAX), expected)
it.ResetCode(blocks[1].Code, blocks[1].Address)
_, err = it.Loop()
require.ErrorIs(t, err, io.EOF)
table := expression.NewImmediateCapture("table")
base := expression.NewImmediateCapture("base")
expected = expression.Add(
expression.SignExtend(
expression.Mem(
expression.Add(
expression.Multiply(
expression.ZeroExtend(initR12, 2),
expression.Imm(4),
),
table,
),
4,
),
64,
),
base,
)
assertEval(t, it.Regs.Get(RAX), expected)
assert.EqualValues(t, 0xf3f82c, table.CapturedValue())
assert.EqualValues(t, 0xf3f82c, base.CapturedValue())
}
func assertEval(t *testing.T, left, right expression.Expression) {
if !left.Match(right) {
assert.Failf(t, "failed to eval %s to %s", left.DebugString(), right.DebugString())
t.Logf("left %s", left.DebugString())
t.Logf("right %s", right.DebugString())
}
}
func FuzzInterpreter(f *testing.F) {
f.Fuzz(func(_ *testing.T, code []byte) {
i := NewInterpreterWithCode(code)
_, _ = i.Loop()
})
}
func TestMoveSignExtend(t *testing.T) {
i := NewInterpreterWithCode([]byte{
// 00000000 B8 01 00 00 00 mov eax, 1
// 00000005 8B 40 04 mov eax, dword ptr [rax + 4]
// 00000008 B8 02 00 00 00 mov eax, 2
// 0000000D 48 0F B6 40 04 movzx rax, byte ptr [rax + 4]
// 00000012 B8 03 00 00 00 mov eax, 3
// 00000017 48 0F BF 40 04 movsx rax, word ptr [rax + 4]
0xB8, 0x01, 0x00, 0x00, 0x00, 0x8B, 0x40, 0x04,
0xB8, 0x02, 0x00, 0x00, 0x00, 0x48, 0x0F, 0xB6,
0x40, 0x04, 0xB8, 0x03, 0x00, 0x00, 0x00, 0x48,
0x0F, 0xBF, 0x40, 0x04,
})
_, err := i.Loop()
require.ErrorIs(t, err, io.EOF)
pattern := expression.SignExtend(expression.Mem(expression.Imm(7), 2), 64)
require.True(t, i.Regs.Get(RAX).Match(pattern))
}

View File

@ -3,64 +3,230 @@
package amd // import "go.opentelemetry.io/ebpf-profiler/asm/amd" package amd // import "go.opentelemetry.io/ebpf-profiler/asm/amd"
import "golang.org/x/arch/x86/x86asm" import (
"fmt"
// regIndex returns index into RegsState.regs "go.opentelemetry.io/ebpf-profiler/asm/expression"
func regIndex(reg x86asm.Reg) int { "golang.org/x/arch/x86/x86asm"
)
type Registers struct {
regs [int(registersCount)]expression.Expression
}
type regEntry struct {
idx Reg
bits int
}
type Reg uint8
const (
_ Reg = iota
RAX
RCX
RDX
RBX
RSP
RBP
RSI
RDI
R8
R9
R10
R11
R12
R13
R14
R15
RIP
registersCount
)
var regNames = [...]string{
RAX: "RAX",
RCX: "RCX",
RDX: "RDX",
RBX: "RBX",
RSP: "RSP",
RBP: "RBP",
RSI: "RSI",
RDI: "RDI",
R8: "R8",
R9: "R9",
R10: "R10",
R11: "R11",
R12: "R12",
R13: "R13",
R14: "R14",
R15: "R15",
RIP: "RIP",
}
func (r Reg) String() string {
i := int(r)
if r == 0 || i >= len(regNames) || regNames[i] == "" {
return fmt.Sprintf("Reg(%d)", i)
}
return regNames[i]
}
func regMappingFor(reg x86asm.Reg) regEntry {
switch reg { switch reg {
case x86asm.RAX, x86asm.EAX: case x86asm.AL:
return 1 return regEntry{idx: RAX, bits: 8}
case x86asm.RBX, x86asm.EBX: case x86asm.CL:
return 2 return regEntry{idx: RCX, bits: 8}
case x86asm.RCX, x86asm.ECX: case x86asm.DL:
return 3 return regEntry{idx: RDX, bits: 8}
case x86asm.RDX, x86asm.EDX: case x86asm.BL:
return 4 return regEntry{idx: RBX, bits: 8}
case x86asm.RDI, x86asm.EDI: case x86asm.SPB:
return 5 return regEntry{idx: RSP, bits: 8}
case x86asm.RSI, x86asm.ESI: case x86asm.BPB:
return 6 return regEntry{idx: RBP, bits: 8}
case x86asm.RBP, x86asm.EBP: case x86asm.SIB:
return 7 return regEntry{idx: RSI, bits: 8}
case x86asm.R8, x86asm.R8L: case x86asm.DIB:
return 8 return regEntry{idx: RDI, bits: 8}
case x86asm.R9, x86asm.R9L: case x86asm.R8B:
return 9 return regEntry{idx: R8, bits: 8}
case x86asm.R10, x86asm.R10L: case x86asm.R9B:
return 10 return regEntry{idx: R9, bits: 8}
case x86asm.R11, x86asm.R11L: case x86asm.R10B:
return 11 return regEntry{idx: R10, bits: 8}
case x86asm.R12, x86asm.R12L: case x86asm.R11B:
return 12 return regEntry{idx: R11, bits: 8}
case x86asm.R13, x86asm.R13L: case x86asm.R12B:
return 13 return regEntry{idx: R12, bits: 8}
case x86asm.R14, x86asm.R14L: case x86asm.R13B:
return 14 return regEntry{idx: R13, bits: 8}
case x86asm.R15, x86asm.R15L: case x86asm.R14B:
return 15 return regEntry{idx: R14, bits: 8}
case x86asm.RSP, x86asm.ESP: case x86asm.R15B:
return 16 return regEntry{idx: R15, bits: 8}
case x86asm.AX:
return regEntry{idx: RAX, bits: 16}
case x86asm.CX:
return regEntry{idx: RCX, bits: 16}
case x86asm.DX:
return regEntry{idx: RDX, bits: 16}
case x86asm.BX:
return regEntry{idx: RBX, bits: 16}
case x86asm.SP:
return regEntry{idx: RSP, bits: 16}
case x86asm.BP:
return regEntry{idx: RBP, bits: 16}
case x86asm.SI:
return regEntry{idx: RSI, bits: 16}
case x86asm.DI:
return regEntry{idx: RDI, bits: 16}
case x86asm.R8W:
return regEntry{idx: R8, bits: 16}
case x86asm.R9W:
return regEntry{idx: R9, bits: 16}
case x86asm.R10W:
return regEntry{idx: R10, bits: 16}
case x86asm.R11W:
return regEntry{idx: R11, bits: 16}
case x86asm.R12W:
return regEntry{idx: R12, bits: 16}
case x86asm.R13W:
return regEntry{idx: R13, bits: 16}
case x86asm.R14W:
return regEntry{idx: R14, bits: 16}
case x86asm.R15W:
return regEntry{idx: R15, bits: 16}
case x86asm.EAX:
return regEntry{idx: RAX, bits: 32}
case x86asm.ECX:
return regEntry{idx: RCX, bits: 32}
case x86asm.EDX:
return regEntry{idx: RDX, bits: 32}
case x86asm.EBX:
return regEntry{idx: RBX, bits: 32}
case x86asm.ESP:
return regEntry{idx: RSP, bits: 32}
case x86asm.EBP:
return regEntry{idx: RBP, bits: 32}
case x86asm.ESI:
return regEntry{idx: RSI, bits: 32}
case x86asm.EDI:
return regEntry{idx: RDI, bits: 32}
case x86asm.R8L:
return regEntry{idx: R8, bits: 32}
case x86asm.R9L:
return regEntry{idx: R9, bits: 32}
case x86asm.R10L:
return regEntry{idx: R10, bits: 32}
case x86asm.R11L:
return regEntry{idx: R11, bits: 32}
case x86asm.R12L:
return regEntry{idx: R12, bits: 32}
case x86asm.R13L:
return regEntry{idx: R13, bits: 32}
case x86asm.R14L:
return regEntry{idx: R14, bits: 32}
case x86asm.R15L:
return regEntry{idx: R15, bits: 32}
case x86asm.RAX:
return regEntry{idx: RAX, bits: 64}
case x86asm.RCX:
return regEntry{idx: RCX, bits: 64}
case x86asm.RDX:
return regEntry{idx: RDX, bits: 64}
case x86asm.RBX:
return regEntry{idx: RBX, bits: 64}
case x86asm.RSP:
return regEntry{idx: RSP, bits: 64}
case x86asm.RBP:
return regEntry{idx: RBP, bits: 64}
case x86asm.RSI:
return regEntry{idx: RSI, bits: 64}
case x86asm.RDI:
return regEntry{idx: RDI, bits: 64}
case x86asm.R8:
return regEntry{idx: R8, bits: 64}
case x86asm.R9:
return regEntry{idx: R9, bits: 64}
case x86asm.R10:
return regEntry{idx: R10, bits: 64}
case x86asm.R11:
return regEntry{idx: R11, bits: 64}
case x86asm.R12:
return regEntry{idx: R12, bits: 64}
case x86asm.R13:
return regEntry{idx: R13, bits: 64}
case x86asm.R14:
return regEntry{idx: R14, bits: 64}
case x86asm.R15:
return regEntry{idx: R15, bits: 64}
case x86asm.RIP: case x86asm.RIP:
return 17 return regEntry{idx: RIP, bits: 64}
default: default:
return 0 return regEntry{idx: 0, bits: 64}
} }
} }
type RegsState struct { func (r *Registers) setX86asm(reg x86asm.Reg, v expression.Expression) {
regs [18]regState e := regMappingFor(reg)
if e.bits != 64 {
v = expression.ZeroExtend(v, e.bits)
}
r.regs[e.idx] = v
} }
func (r *RegsState) Set(reg x86asm.Reg, value, loadedFrom uint64) { func (r *Registers) getX86asm(reg x86asm.Reg) expression.Expression {
r.regs[regIndex(reg)].Value = value e := regMappingFor(reg)
r.regs[regIndex(reg)].LoadedFrom = loadedFrom res := r.regs[e.idx]
if e.bits != 64 {
res = expression.ZeroExtend(res, e.bits)
}
return res
} }
func (r *RegsState) Get(reg x86asm.Reg) (value, loadedFrom uint64) { func (r *Registers) Get(reg Reg) expression.Expression {
return r.regs[regIndex(reg)].Value, r.regs[regIndex(reg)].LoadedFrom if int(reg) >= len(r.regs) {
} return r.regs[0]
}
type regState struct { return r.regs[int(reg)]
LoadedFrom uint64
Value uint64
} }

36
asm/expression/add.go Normal file
View File

@ -0,0 +1,36 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package expression // import "go.opentelemetry.io/ebpf-profiler/asm/expression"
func Add(vs ...Expression) Expression {
oss := make(operands, 0, len(vs)+1)
v := uint64(0)
for _, it := range vs {
if o, ok := it.(*op); ok && o.typ == opAdd {
for _, jit := range o.operands {
if imm, immOk := jit.(*immediate); immOk {
v += imm.Value
} else {
oss = append(oss, jit)
}
}
} else {
if imm, immOk := it.(*immediate); immOk {
v += imm.Value
} else {
oss = append(oss, it)
}
}
}
if len(oss) == 0 {
return Imm(v)
}
if v != 0 {
oss = append(oss, Imm(v))
}
if len(oss) == 1 {
return oss[0]
}
return newOp(opAdd, oss)
}

29
asm/expression/capture.go Normal file
View File

@ -0,0 +1,29 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package expression // import "go.opentelemetry.io/ebpf-profiler/asm/expression"
var _ Expression = &ImmediateCapture{}
func NewImmediateCapture(name string) *ImmediateCapture {
return &ImmediateCapture{
name: name,
}
}
type ImmediateCapture struct {
name string
capturedValue immediate
}
func (v *ImmediateCapture) CapturedValue() uint64 {
return v.capturedValue.Value
}
func (v *ImmediateCapture) DebugString() string {
return "@" + v.name
}
func (v *ImmediateCapture) Match(_ Expression) bool {
return false
}

View File

@ -0,0 +1,72 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package expression // import "go.opentelemetry.io/ebpf-profiler/asm/expression"
import "sort"
// Expression is an interface representing a 64-bit size value. It can be immediate
type Expression interface {
// Match compares this Expression value against a pattern Expression.
// The order of the arguments matters: a.Match(b) or b.Match(a) may
// produce different results. The intended order The pattern should be passed as
// an argument, not the other way around.
// It returns true if the values are considered equal or compatible according to
// the type-specific rules:
// - For operations (add, mul): checks if operation types and operands match
// - For immediate: checks if values are equal and extracts value into a ImmediateCapture
// - For mem references: checks if segments and addresses match
// - For extend operations: checks if sizes and inner values match
// - For named: checks if they are pointing to the same object instance.
// - For ImmediateCapture: matches nothing - see immediate
Match(pattern Expression) bool
DebugString() string
}
type operands []Expression
func (os *operands) Match(other operands) bool {
if len(*os) != len(other) {
return false
}
sort.Sort(sortedOperands(*os))
sort.Sort(sortedOperands(other))
for i := 0; i < len(*os); i++ {
if !(*os)[i].Match(other[i]) {
return false
}
}
return true
}
type sortedOperands operands
func (s sortedOperands) Len() int {
return len(s)
}
func (s sortedOperands) Less(i, j int) bool {
o1 := cmpOrder(s[i])
o2 := cmpOrder(s[j])
return o1 < o2
}
func (s sortedOperands) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func cmpOrder(u Expression) int {
switch u.(type) {
case *mem:
return 1
case *op:
return 2
case *ImmediateCapture:
return 3
case *named:
return 4
case *immediate:
return 5
default:
return 0
}
}

View File

@ -0,0 +1,116 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package expression
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestExpression(t *testing.T) {
t.Run("add sort-summ-immediate", func(t *testing.T) {
v := Named("v")
require.Equal(t, Add(v, Imm(14)), Add(Imm(1), Imm(3), Imm(1), v, Imm(9)))
})
t.Run("named match", func(t *testing.T) {
n := Named("v")
require.True(t, n.Match(n))
require.False(t, n.Match(Imm(239)))
})
t.Run("add 0", func(t *testing.T) {
v := Named("v")
require.Equal(t, v, Add(Imm(0), v))
})
t.Run("add nested", func(t *testing.T) {
s1 := Named("s1")
s2 := Named("s2")
s3 := Named("s3")
performAssertions := func(e Expression) {
opp, ok := e.(*op)
require.True(t, ok)
require.Len(t, opp.operands, 3)
require.Contains(t, opp.operands, s1)
require.Contains(t, opp.operands, s2)
require.Contains(t, opp.operands, s3)
}
performAssertions(Add(Add(s1, s3), s2))
performAssertions(Add(Add(s1, s3), s2))
})
t.Run("add opt", func(t *testing.T) {
v := Named("v")
require.Equal(t, Add(Add(Imm(2), v), Imm(7)), Add(v, Imm(9)))
})
t.Run("add 1 element", func(t *testing.T) {
require.Equal(t, Add(Imm(2)), Imm(2))
})
t.Run("mul immediate", func(t *testing.T) {
v := Named("v")
require.Equal(t, Multiply(v, Imm(27)), Multiply(Imm(1), Imm(3), Imm(1), v, Imm(9)))
})
t.Run("mul 1", func(t *testing.T) {
v := Named("v")
require.Equal(t, v, Multiply(Imm(1), v))
})
t.Run("mul add", func(t *testing.T) {
v1 := Named("v1")
v2 := Named("v2")
v3 := Named("v3")
require.Equal(t, Add(Multiply(v1, v3), Multiply(v2, v3)), Multiply(Add(v1, v2), v3))
})
t.Run("op order", func(t *testing.T) {
v := Named("v")
v2 := Mem8(Named("v2"))
require.True(t, Multiply(v, v2).Match(Multiply(v2, v)))
})
t.Run("mul order", func(t *testing.T) {
v := Named("v")
var a Expression = &op{opMul, []Expression{v, Imm(239)}}
require.Equal(t, a, Multiply(Imm(239), v))
})
t.Run("mul 0", func(t *testing.T) {
v := Named("v")
require.Equal(t, Imm(0), Multiply(Imm(0), Imm(3), Imm(1), v, Imm(9)))
})
t.Run("extend nested", func(t *testing.T) {
v := Named("v")
require.Equal(t, ZeroExtend(v, 7), ZeroExtend(ZeroExtend(v, 7), 7))
})
t.Run("extend nested smaller", func(t *testing.T) {
v := Named("v")
require.Equal(t, ZeroExtend(v, 5), ZeroExtend(ZeroExtend(v, 7), 5))
})
t.Run("extend nested smaller", func(t *testing.T) {
v := Named("v")
require.Equal(t, ZeroExtend(v, 5), ZeroExtend(ZeroExtend(v, 5), 7))
})
t.Run("extend 0", func(t *testing.T) {
require.Equal(t, Imm(0), ZeroExtend(Named("v1"), 0))
})
t.Run("nested extend ", func(t *testing.T) {
v1 := Named("v1")
require.Equal(t, ZeroExtend(v1, 8), ZeroExtend(ZeroExtend(v1, 8), 8))
})
}

84
asm/expression/extend.go Normal file
View File

@ -0,0 +1,84 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package expression // import "go.opentelemetry.io/ebpf-profiler/asm/expression"
import (
"fmt"
"math"
)
var _ Expression = &extend{}
func SignExtend(v Expression, bits int) Expression {
return &extend{v, bits, true}
}
func ZeroExtend32(v Expression) Expression {
return ZeroExtend(v, 32)
}
func ZeroExtend8(v Expression) Expression {
return ZeroExtend(v, 8)
}
func ZeroExtend(v Expression, bits int) Expression {
if bits >= 64 {
bits = 64
}
c := &extend{
v: v,
bits: bits,
}
if c.bits == 0 {
return Imm(0)
}
if c.bits == 64 {
return c.v
}
switch typed := c.v.(type) {
case *immediate:
return Imm(typed.Value & c.MaxValue())
case *extend:
if typed.sign {
return c
}
if typed.bits <= c.bits {
return typed
}
return &extend{typed.v, c.bits, false}
default:
return c
}
}
type extend struct {
v Expression
bits int
sign bool
}
func (c *extend) MaxValue() uint64 {
if c.bits >= 64 || c.sign {
return math.MaxUint64
}
return 1<<c.bits - 1
}
func (c *extend) Match(pattern Expression) bool {
switch typedPattern := pattern.(type) {
case *extend:
return typedPattern.bits == c.bits &&
typedPattern.sign == c.sign &&
c.v.Match(typedPattern.v)
default:
return false
}
}
func (c *extend) DebugString() string {
s := "zero"
if c.sign {
s = "sign"
}
return fmt.Sprintf("%s-extend(%s, %d bits)", s, c.v.DebugString(), c.bits)
}

43
asm/expression/imm.go Normal file
View File

@ -0,0 +1,43 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package expression // import "go.opentelemetry.io/ebpf-profiler/asm/expression"
import "fmt"
var zero Expression = &immediate{0}
var one Expression = &immediate{1}
func Imm(v uint64) Expression {
switch v {
case 0:
return zero
case 1:
return one
default:
return &immediate{v}
}
}
type immediate struct {
Value uint64
}
func (v *immediate) MaxValue() uint64 {
return v.Value
}
func (v *immediate) DebugString() string {
return fmt.Sprintf("0x%x", v.Value)
}
func (v *immediate) Match(pattern Expression) bool {
switch typedPattern := pattern.(type) {
case *immediate:
return v.Value == typedPattern.Value
case *ImmediateCapture:
typedPattern.capturedValue = *v
return true
default:
return false
}
}

57
asm/expression/mem.go Normal file
View File

@ -0,0 +1,57 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package expression // import "go.opentelemetry.io/ebpf-profiler/asm/expression"
import (
"fmt"
"golang.org/x/arch/x86/x86asm"
)
var _ Expression = &mem{}
func MemWithSegment8(segment x86asm.Reg, at Expression) Expression {
return MemWithSegment(segment, at, 8)
}
func MemWithSegment(segment x86asm.Reg, at Expression, sizeBytes int) Expression {
return &mem{at: at, segment: segment, sizeBytes: sizeBytes}
}
func Mem8(at Expression) Expression {
return Mem(at, 8)
}
func Mem1(at Expression) Expression {
return Mem(at, 1)
}
func Mem(at Expression, sizeBytes int) Expression {
return &mem{at: at, segment: 0, sizeBytes: sizeBytes}
}
type mem struct {
segment x86asm.Reg
at Expression
sizeBytes int
}
func (v *mem) DebugString() string {
if v.segment == 0 {
return fmt.Sprintf("[%s : %d bits]", v.at.DebugString(), v.sizeBytes*8)
}
return fmt.Sprintf("[%s : %s : %d bits]", v.segment, v.at.DebugString(), v.sizeBytes*8)
}
func (v *mem) Match(pattern Expression) bool {
switch typedPattern := pattern.(type) {
case *mem:
if v.segment != typedPattern.segment {
return false
}
return v.at.Match(typedPattern.at)
default:
return false
}
}

43
asm/expression/mul.go Normal file
View File

@ -0,0 +1,43 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package expression // import "go.opentelemetry.io/ebpf-profiler/asm/expression"
func Multiply(vs ...Expression) Expression {
oss := make(operands, 0, len(vs)+1)
v := uint64(1)
for _, it := range vs {
if it == zero {
return zero
}
if it == one {
continue
}
if imm, immOk := it.(*immediate); immOk {
v *= imm.Value
} else {
oss = append(oss, it)
}
}
if len(oss) == 0 {
return Imm(v)
}
if v != 1 {
oss = append(oss, Imm(v))
}
if len(oss) == 1 {
return oss[0]
}
if len(oss) == 2 {
if a, ok := oss[0].(*op); ok && a.typ == opAdd {
var res []Expression
for _, ait := range a.operands {
res = append(res, Multiply(ait, oss[1]))
}
return Add(res...)
}
}
return newOp(opMul, oss)
}

24
asm/expression/named.go Normal file
View File

@ -0,0 +1,24 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package expression // import "go.opentelemetry.io/ebpf-profiler/asm/expression"
var _ Expression = &named{}
func Named(name string) Expression {
return &named{
name: name,
}
}
type named struct {
name string
}
func (v *named) DebugString() string {
return "@" + v.name
}
func (v *named) Match(pattern Expression) bool {
return pattern == v
}

56
asm/expression/op.go Normal file
View File

@ -0,0 +1,56 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package expression // import "go.opentelemetry.io/ebpf-profiler/asm/expression"
import (
"fmt"
"strings"
)
type opType int
const (
_ opType = iota
opAdd
opMul
)
type op struct {
typ opType
operands operands
}
func newOp(typ opType, operands operands) Expression {
res := &op{typ: typ, operands: operands}
return res
}
func (o *op) Match(pattern Expression) bool {
switch typedPattern := pattern.(type) {
case *op:
if o.typ != typedPattern.typ ||
len(o.operands) != len(typedPattern.operands) {
return false
}
return o.operands.Match(typedPattern.operands)
default:
return false
}
}
func (o *op) DebugString() string {
ss := make([]string, len(o.operands))
for i := range o.operands {
ss[i] = o.operands[i].DebugString()
}
sep := ""
switch o.typ {
case opAdd:
sep = "+"
case opMul:
sep = "*"
}
return fmt.Sprintf("( %s )", strings.Join(ss, sep))
}

View File

@ -12,7 +12,6 @@ import (
"github.com/peterbourgon/ff/v3" "github.com/peterbourgon/ff/v3"
"go.opentelemetry.io/ebpf-profiler/internal/controller" "go.opentelemetry.io/ebpf-profiler/internal/controller"
"go.opentelemetry.io/ebpf-profiler/support"
"go.opentelemetry.io/ebpf-profiler/tracer" "go.opentelemetry.io/ebpf-profiler/tracer"
) )
@ -64,10 +63,10 @@ var (
"If zero, monotonic-realtime clock sync will be performed once, " + "If zero, monotonic-realtime clock sync will be performed once, " +
"on agent startup, but not periodically." "on agent startup, but not periodically."
sendErrorFramesHelp = "Send error frames (devfiler only, breaks Kibana)" sendErrorFramesHelp = "Send error frames (devfiler only, breaks Kibana)"
offCPUThresholdHelp = fmt.Sprintf("The per-mille chance for an off-cpu event being recorded. "+ offCPUThresholdHelp = fmt.Sprintf("The probability for an off-cpu event being recorded. "+
"Valid values are in the range [1..%d], and 0 to disable off-cpu profiling."+ "Valid values are in the range [0..1]. 0 disables off-cpu profiling. "+
"Default is %d.", "Default is %d.",
support.OffCPUThresholdMax, defaultOffCPUThreshold) defaultOffCPUThreshold)
envVarsHelp = "Comma separated list of environment variables that will be reported with the" + envVarsHelp = "Comma separated list of environment variables that will be reported with the" +
"captured profiling samples." "captured profiling samples."
) )
@ -123,7 +122,7 @@ func parseArgs() (*controller.Config, error) {
fs.BoolVar(&args.VerboseMode, "verbose", false, verboseModeHelp) fs.BoolVar(&args.VerboseMode, "verbose", false, verboseModeHelp)
fs.BoolVar(&args.Version, "version", false, versionHelp) fs.BoolVar(&args.Version, "version", false, versionHelp)
fs.UintVar(&args.OffCPUThreshold, "off-cpu-threshold", fs.Float64Var(&args.OffCPUThreshold, "off-cpu-threshold",
defaultOffCPUThreshold, offCPUThresholdHelp) defaultOffCPUThreshold, offCPUThresholdHelp)
fs.StringVar(&args.IncludeEnvVars, "env-vars", defaultEnvVarsValue, envVarsHelp) fs.StringVar(&args.IncludeEnvVars, "env-vars", defaultEnvVarsValue, envVarsHelp)

View File

@ -38,7 +38,6 @@ func NewController(cfg *controller.Config,
ExecutablesCacheElements: 16384, ExecutablesCacheElements: 16384,
// Next step: Calculate FramesCacheElements from numCores and samplingRate. // Next step: Calculate FramesCacheElements from numCores and samplingRate.
FramesCacheElements: 131072, FramesCacheElements: 131072,
CGroupCacheElements: 1024,
SamplesPerSecond: cfg.SamplesPerSecond, SamplesPerSecond: cfg.SamplesPerSecond,
}, nextConsumer) }, nextConsumer)
if err != nil { if err != nil {

10
go.mod
View File

@ -5,7 +5,7 @@ go 1.23.6
require ( require (
github.com/aws/aws-sdk-go-v2 v1.36.5 github.com/aws/aws-sdk-go-v2 v1.36.5
github.com/aws/aws-sdk-go-v2/config v1.29.17 github.com/aws/aws-sdk-go-v2/config v1.29.17
github.com/aws/aws-sdk-go-v2/service/s3 v1.82.0 github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0
github.com/cespare/xxhash/v2 v2.3.0 github.com/cespare/xxhash/v2 v2.3.0
github.com/cilium/ebpf v0.19.0 github.com/cilium/ebpf v0.19.0
github.com/elastic/go-freelru v0.16.0 github.com/elastic/go-freelru v0.16.0
@ -29,11 +29,11 @@ require (
go.opentelemetry.io/collector/receiver/xreceiver v0.129.0 go.opentelemetry.io/collector/receiver/xreceiver v0.129.0
go.opentelemetry.io/otel v1.37.0 go.opentelemetry.io/otel v1.37.0
go.opentelemetry.io/otel/metric v1.37.0 go.opentelemetry.io/otel/metric v1.37.0
golang.org/x/arch v0.18.0 golang.org/x/arch v0.19.0
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b
golang.org/x/mod v0.25.0 golang.org/x/mod v0.26.0
golang.org/x/sync v0.15.0 golang.org/x/sync v0.16.0
golang.org/x/sys v0.33.0 golang.org/x/sys v0.34.0
google.golang.org/grpc v1.73.0 google.golang.org/grpc v1.73.0
) )

10
go.sum
View File

@ -30,6 +30,8 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.81.0 h1:1GmCadhKR3J2sMVKs2bAYq9VnwYeC
github.com/aws/aws-sdk-go-v2/service/s3 v1.81.0/go.mod h1:kUklwasNoCn5YpyAqC/97r6dzTA1SRKJfKq16SXeoDU= github.com/aws/aws-sdk-go-v2/service/s3 v1.81.0/go.mod h1:kUklwasNoCn5YpyAqC/97r6dzTA1SRKJfKq16SXeoDU=
github.com/aws/aws-sdk-go-v2/service/s3 v1.82.0 h1:JubM8CGDDFaAOmBrd8CRYNr49ZNgEAiLwGwgNMdS0nw= github.com/aws/aws-sdk-go-v2/service/s3 v1.82.0 h1:JubM8CGDDFaAOmBrd8CRYNr49ZNgEAiLwGwgNMdS0nw=
github.com/aws/aws-sdk-go-v2/service/s3 v1.82.0/go.mod h1:kUklwasNoCn5YpyAqC/97r6dzTA1SRKJfKq16SXeoDU= github.com/aws/aws-sdk-go-v2/service/s3 v1.82.0/go.mod h1:kUklwasNoCn5YpyAqC/97r6dzTA1SRKJfKq16SXeoDU=
github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0 h1:5Y75q0RPQoAbieyOuGLhjV9P3txvYgXv2lg0UwJOfmE=
github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0/go.mod h1:kUklwasNoCn5YpyAqC/97r6dzTA1SRKJfKq16SXeoDU=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 h1:AIRJ3lfb2w/1/8wOOSqYb9fUKGwQbtysJ2H1MofRUPg= github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 h1:AIRJ3lfb2w/1/8wOOSqYb9fUKGwQbtysJ2H1MofRUPg=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.5/go.mod h1:b7SiVprpU+iGazDUqvRSLf5XmCdn+JtT1on7uNL6Ipc= github.com/aws/aws-sdk-go-v2/service/sso v1.25.5/go.mod h1:b7SiVprpU+iGazDUqvRSLf5XmCdn+JtT1on7uNL6Ipc=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 h1:BpOxT3yhLwSJ77qIY3DoHAQjZsc4HEGfMCE4NGy3uFg= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 h1:BpOxT3yhLwSJ77qIY3DoHAQjZsc4HEGfMCE4NGy3uFg=
@ -220,6 +222,8 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/arch v0.18.0 h1:WN9poc33zL4AzGxqf8VtpKUnGvMi8O9lhNyBMF/85qc= golang.org/x/arch v0.18.0 h1:WN9poc33zL4AzGxqf8VtpKUnGvMi8O9lhNyBMF/85qc=
golang.org/x/arch v0.18.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= golang.org/x/arch v0.18.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk=
golang.org/x/arch v0.19.0 h1:LmbDQUodHThXE+htjrnmVD73M//D9GTH6wFZjyDkjyU=
golang.org/x/arch v0.19.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@ -231,6 +235,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@ -246,6 +252,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -258,6 +266,8 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=

View File

@ -50,6 +50,7 @@ type Trace struct {
Comm string Comm string
ProcessName string ProcessName string
ExecutablePath string ExecutablePath string
ContainerID string
Frames []Frame Frames []Frame
Hash TraceHash Hash TraceHash
KTime times.KTime KTime times.KTime

View File

@ -10,7 +10,6 @@ import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"go.opentelemetry.io/ebpf-profiler/reporter" "go.opentelemetry.io/ebpf-profiler/reporter"
"go.opentelemetry.io/ebpf-profiler/support"
"go.opentelemetry.io/ebpf-profiler/tracer" "go.opentelemetry.io/ebpf-profiler/tracer"
) )
@ -32,7 +31,7 @@ type Config struct {
Tracers string Tracers string
VerboseMode bool VerboseMode bool
Version bool Version bool
OffCPUThreshold uint OffCPUThreshold float64
Reporter reporter.Reporter Reporter reporter.Reporter
@ -89,12 +88,10 @@ func (cfg *Config) Validate() error {
) )
} }
if cfg.OffCPUThreshold > support.OffCPUThresholdMax { if cfg.OffCPUThreshold < 0.0 || cfg.OffCPUThreshold > 1.0 {
return fmt.Errorf( return errors.New(
"invalid argument for off-cpu-threshold. Value "+ "invalid argument for off-cpu-threshold. The value " +
"should be between 1 and %d, or 0 to disable off-cpu profiling", "should be in the range [0..1]. 0 disables off-cpu profiling")
support.OffCPUThresholdMax,
)
} }
if !cfg.NoKernelVersionCheck { if !cfg.NoKernelVersionCheck {

View File

@ -3,6 +3,7 @@ package controller // import "go.opentelemetry.io/ebpf-profiler/internal/control
import ( import (
"context" "context"
"fmt" "fmt"
"math"
"strings" "strings"
"time" "time"
@ -95,7 +96,7 @@ func (c *Controller) Start(ctx context.Context) error {
BPFVerifierLogLevel: uint32(c.config.BpfVerifierLogLevel), BPFVerifierLogLevel: uint32(c.config.BpfVerifierLogLevel),
ProbabilisticInterval: c.config.ProbabilisticInterval, ProbabilisticInterval: c.config.ProbabilisticInterval,
ProbabilisticThreshold: c.config.ProbabilisticThreshold, ProbabilisticThreshold: c.config.ProbabilisticThreshold,
OffCPUThreshold: uint32(c.config.OffCPUThreshold), OffCPUThreshold: uint32(c.config.OffCPUThreshold * float64(math.MaxUint32)),
IncludeEnvVars: envVars, IncludeEnvVars: envVars,
}) })
if err != nil { if err != nil {
@ -117,11 +118,11 @@ func (c *Controller) Start(ctx context.Context) error {
} }
log.Info("Attached tracer program") log.Info("Attached tracer program")
if c.config.OffCPUThreshold > 0 { if c.config.OffCPUThreshold > 0.0 {
if err := trc.StartOffCPUProfiling(); err != nil { if err := trc.StartOffCPUProfiling(); err != nil {
return fmt.Errorf("failed to start off-cpu profiling: %v", err) return fmt.Errorf("failed to start off-cpu profiling: %v", err)
} }
log.Printf("Enabled off-cpu profiling") log.Printf("Enabled off-cpu profiling with p=%f", c.config.OffCPUThreshold)
} }
if c.config.ProbabilisticThreshold < tracer.ProbabilisticThresholdMax { if c.config.ProbabilisticThreshold < tracer.ProbabilisticThresholdMax {

View File

@ -0,0 +1,65 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package python // import "go.opentelemetry.io/ebpf-profiler/interpreter/python"
import (
"fmt"
"go.opentelemetry.io/ebpf-profiler/asm/amd"
e "go.opentelemetry.io/ebpf-profiler/asm/expression"
"go.opentelemetry.io/ebpf-profiler/libpf"
"golang.org/x/arch/x86/x86asm"
)
func decodeStubArgumentAMD64(
code []byte,
codeAddress, memBase uint64,
) (
libpf.SymbolValue, error,
) {
it := amd.NewInterpreterWithCode(code)
it.CodeAddress = e.Imm(codeAddress)
_, err := it.LoopWithBreak(func(op x86asm.Inst) bool {
return op.Op == x86asm.JMP || op.Op == x86asm.CALL
})
if err != nil {
return 0, err
}
answer, err := evaluateStubAnswerAMD64(it.Regs.Get(amd.RDI), memBase)
if err != nil {
return 0, err
}
return libpf.SymbolValue(answer), err
}
func evaluateStubAnswerAMD64(res e.Expression, memBase uint64) (uint64, error) {
answer := e.NewImmediateCapture("answer")
if res.Match(e.ZeroExtend32(e.Mem8(answer))) {
return answer.CapturedValue(), nil
}
if res.Match(
e.Add(
e.Mem8(e.NewImmediateCapture("mem")),
answer,
),
) {
return memBase + answer.CapturedValue(), nil
}
if res.Match(
e.ZeroExtend32(
e.Mem8(
e.Add(
e.Mem8(e.NewImmediateCapture("mem")),
answer,
),
),
),
) {
return memBase + answer.CapturedValue(), nil
}
if res.Match(answer) {
return answer.CapturedValue(), nil
}
return 0, fmt.Errorf("not found %s", res.DebugString())
}

View File

@ -4,15 +4,9 @@
package python // import "go.opentelemetry.io/ebpf-profiler/interpreter/python" package python // import "go.opentelemetry.io/ebpf-profiler/interpreter/python"
import ( import (
"errors"
"fmt"
"runtime"
ah "go.opentelemetry.io/ebpf-profiler/armhelpers" ah "go.opentelemetry.io/ebpf-profiler/armhelpers"
"go.opentelemetry.io/ebpf-profiler/asm/amd"
"go.opentelemetry.io/ebpf-profiler/libpf" "go.opentelemetry.io/ebpf-profiler/libpf"
aa "golang.org/x/arch/arm64/arm64asm" aa "golang.org/x/arch/arm64/arm64asm"
"golang.org/x/arch/x86/x86asm"
) )
// decodeStubArgumentARM64 disassembles arm64 code and decodes the assumed value // decodeStubArgumentARM64 disassembles arm64 code and decodes the assumed value
@ -106,96 +100,3 @@ func decodeStubArgumentARM64(code []byte,
return libpf.SymbolValueInvalid return libpf.SymbolValueInvalid
} }
func decodeStubArgumentAMD64(code []byte, codeAddress, memoryBase uint64) (
libpf.SymbolValue, error) {
targetRegister := x86asm.RDI
instructionOffset := 0
regs := amd.RegsState{}
for instructionOffset < len(code) {
rem := code[instructionOffset:]
if ok, insnLen := amd.DecodeSkippable(rem); ok {
instructionOffset += insnLen
continue
}
inst, err := x86asm.Decode(rem, 64)
if err != nil {
return 0, fmt.Errorf("failed to decode instruction at 0x%x : %w",
instructionOffset, err)
}
instructionOffset += inst.Len
regs.Set(x86asm.RIP, codeAddress+uint64(instructionOffset), 0)
if inst.Op == x86asm.CALL || inst.Op == x86asm.JMP {
value, loadedFrom := regs.Get(targetRegister)
if loadedFrom != 0 {
return libpf.SymbolValue(loadedFrom), nil
}
return libpf.SymbolValue(value), nil
}
if (inst.Op == x86asm.LEA || inst.Op == x86asm.MOV) && inst.Args[0] != nil {
if reg, ok := inst.Args[0].(x86asm.Reg); ok {
var value uint64
var loadedFrom uint64
switch src := inst.Args[1].(type) {
case x86asm.Imm:
value = uint64(src)
case x86asm.Mem:
baseAddr, _ := regs.Get(src.Base)
displacement := uint64(src.Disp)
if inst.Op == x86asm.MOV {
value = memoryBase
loadedFrom = baseAddr + displacement
if src.Index != 0 {
indexValue, _ := regs.Get(src.Index)
loadedFrom += indexValue * uint64(src.Scale)
}
} else if inst.Op == x86asm.LEA {
value = baseAddr + displacement
if src.Index != 0 {
indexValue, _ := regs.Get(src.Index)
value += indexValue * uint64(src.Scale)
}
}
case x86asm.Reg:
value, _ = regs.Get(src)
}
regs.Set(reg, value, loadedFrom)
}
}
if inst.Op == x86asm.ADD && inst.Args[0] != nil && inst.Args[1] != nil {
if reg, ok0 := inst.Args[0].(x86asm.Reg); ok0 {
if _, ok1 := inst.Args[1].(x86asm.Mem); ok1 {
oldValue, _ := regs.Get(reg)
value := oldValue + memoryBase
regs.Set(reg, value, 0)
}
}
}
}
return 0, errors.New("no call/jump instructions found")
}
func decodeStubArgumentWrapper(
code []byte,
codeAddress libpf.SymbolValue,
memoryBase libpf.SymbolValue,
) (libpf.SymbolValue, error) {
switch runtime.GOARCH {
case "arm64":
return decodeStubArgumentARM64(code, memoryBase), nil
case "amd64":
return decodeStubArgumentAMD64(code, uint64(codeAddress), uint64(memoryBase))
default:
return libpf.SymbolValueInvalid, fmt.Errorf("unsupported arch %s", runtime.GOARCH)
}
}

View File

@ -56,11 +56,7 @@ func BenchmarkDecodeAmd64(b *testing.B) {
0xe9, 0xe7, 0xea, 0xe9, 0xff, // 1bbbb4: jmp 5a6a0 <pthread_getspecific@plt> 0xe9, 0xe7, 0xea, 0xe9, 0xff, // 1bbbb4: jmp 5a6a0 <pthread_getspecific@plt>
} }
rip := uint64(0x1bbba0) rip := uint64(0x1bbba0)
val, _ := decodeStubArgumentAMD64( val, _ := decodeStubArgumentAMD64(code, rip, 0)
code,
rip,
0,
)
if val != 0x3a4c2c { if val != 0x3a4c2c {
b.Fail() b.Fail()
} }
@ -72,6 +68,7 @@ func TestAmd64DecodeStub(t *testing.T) {
name string name string
code []byte code []byte
rip uint64 rip uint64
memBase uint64
expected uint64 expected uint64
expectedError string expectedError string
}{ }{
@ -113,7 +110,8 @@ func TestAmd64DecodeStub(t *testing.T) {
0xe9, 0x2e, 0x41, 0xeb, 0xff, // 1adcad: jmp 61de0 <PyThread_tss_get@plt> 0xe9, 0x2e, 0x41, 0xeb, 0xff, // 1adcad: jmp 61de0 <PyThread_tss_get@plt>
}, },
rip: 0x1adc90, rip: 0x1adc90,
expected: 0x248, memBase: 0xcafe0000,
expected: 0xcafe0248,
}, },
{ {
name: "3.12.8 gcc12 disable-optimizations enabled-shared", name: "3.12.8 gcc12 disable-optimizations enabled-shared",
@ -126,7 +124,8 @@ func TestAmd64DecodeStub(t *testing.T) {
0xe8, 0x95, 0x78, 0xe2, 0xff, // 2e25e6: call 109e80 <PyThread_tss_is_created@plt> 0xe8, 0x95, 0x78, 0xe2, 0xff, // 2e25e6: call 109e80 <PyThread_tss_is_created@plt>
}, },
rip: 0x2e25d0, rip: 0x2e25d0,
expected: 0x608, expected: 0x608 + 0xef00000,
memBase: 0xef00000,
}, },
{ {
name: "3.10.16 clang18 enable-optimizations enabled-shared", name: "3.10.16 clang18 enable-optimizations enabled-shared",
@ -139,7 +138,8 @@ func TestAmd64DecodeStub(t *testing.T) {
0xe9, 0x24, 0x55, 0xf9, 0xff, // cac67: jmp 60190 <pthread_getspecific@plt> 0xe9, 0x24, 0x55, 0xf9, 0xff, // cac67: jmp 60190 <pthread_getspecific@plt>
}, },
rip: 0xcac50, rip: 0xcac50,
expected: 0x24c, expected: 0x24c + 0xef00000,
memBase: 0xef00000,
}, },
{ {
name: "3.10.16 clang18 enable-optimizations disable-shared", name: "3.10.16 clang18 enable-optimizations disable-shared",
@ -222,14 +222,14 @@ func TestAmd64DecodeStub(t *testing.T) {
{ {
name: "empty code", name: "empty code",
code: nil, code: nil,
expectedError: "no call/jump instructions found", expectedError: "EOF",
}, },
{ {
name: "no call/jump instructions found", name: "no call/jump instructions found",
code: []byte{ code: []byte{
0x48, 0xC7, 0xC7, 0xEF, 0xEF, 0xEF, 0x00, // mov rdi, 0xefefef 0x48, 0xC7, 0xC7, 0xEF, 0xEF, 0xEF, 0x00, // mov rdi, 0xefefef
}, },
expectedError: "no call/jump instructions found", expectedError: "EOF",
}, },
{ {
name: "bad instruction", name: "bad instruction",
@ -237,17 +237,7 @@ func TestAmd64DecodeStub(t *testing.T) {
0x48, 0xC7, 0xC7, 0xEF, 0xEF, 0xEF, 0x00, // mov rdi, 0xefefef 0x48, 0xC7, 0xC7, 0xEF, 0xEF, 0xEF, 0x00, // mov rdi, 0xefefef
0xea, // :shrug: 0xea, // :shrug:
}, },
expectedError: "failed to decode instruction at 0x7", expectedError: "at 0x7",
},
{
name: "synthetic mov scale index",
code: []byte{
0x48, 0xC7, 0xC0, 0xCA, 0xCA, 0x00, 0x00, // mov rax, 0xcaca
0xBB, 0x00, 0x00, 0x00, 0x5E, // mov ebx, 0x5e000000
0x67, 0x48, 0x8B, 0x7C, 0x43, 0x05, // mov rdi, qword ptr [ebx + eax*2 + 5]
0xEB, 0x00, // jmp 0x14
},
expected: 0xCACA*2 + 0x5E000000 + 5,
}, },
{ {
name: "synthetic lea scale index", name: "synthetic lea scale index",
@ -276,7 +266,7 @@ func TestAmd64DecodeStub(t *testing.T) {
val, err := decodeStubArgumentAMD64( val, err := decodeStubArgumentAMD64(
td.code, td.code,
td.rip, td.rip,
0, // NULL pointer as mem td.memBase,
) )
if td.expectedError != "" { if td.expectedError != "" {
require.Error(t, err) require.Error(t, err)

View File

@ -666,7 +666,15 @@ func decodeStub(ef *pfelf.File, memoryBase libpf.SymbolValue,
return libpf.SymbolValueInvalid, fmt.Errorf("unable to read '%s': %v", return libpf.SymbolValueInvalid, fmt.Errorf("unable to read '%s': %v",
symbolName, err) symbolName, err)
} }
value, err := decodeStubArgumentWrapper(code, sym.Address, memoryBase) var value libpf.SymbolValue
switch ef.Machine {
case elf.EM_AARCH64:
value, err = decodeStubArgumentARM64(code, memoryBase), nil
case elf.EM_X86_64:
value, err = decodeStubArgumentAMD64(code, uint64(sym.Address), uint64(memoryBase))
default:
return libpf.SymbolValueInvalid, fmt.Errorf("unsupported arch %s", ef.Machine.String())
}
// Sanity check the value range and alignment // Sanity check the value range and alignment
if err != nil || value%4 != 0 { if err != nil || value%4 != 0 {

View File

@ -1,59 +0,0 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package libpf // import "go.opentelemetry.io/ebpf-profiler/libpf"
import (
"bufio"
"fmt"
"os"
"regexp"
lru "github.com/elastic/go-freelru"
log "github.com/sirupsen/logrus"
)
var (
cgroupv2PathPattern = regexp.MustCompile(`0:.*?:(.*)`)
)
// LookupCgroupv2 returns the cgroupv2 ID for pid.
func LookupCgroupv2(cgrouplru *lru.SyncedLRU[PID, string], pid PID) (string, error) {
id, ok := cgrouplru.Get(pid)
if ok {
return id, nil
}
// Slow path
f, err := os.Open(fmt.Sprintf("/proc/%d/cgroup", pid))
if err != nil {
return "", err
}
defer f.Close()
var genericCgroupv2 string
scanner := bufio.NewScanner(f)
buf := make([]byte, 512)
// Providing a predefined buffer overrides the internal buffer that Scanner uses (4096 bytes).
// We can do that and also set a maximum allocation size on the following call.
// With a maximum of 4096 characters path in the kernel, 8192 should be fine here. We don't
// expect lines in /proc/<PID>/cgroup to be longer than that.
scanner.Buffer(buf, 8192)
var pathParts []string
for scanner.Scan() {
line := scanner.Text()
pathParts = cgroupv2PathPattern.FindStringSubmatch(line)
if pathParts == nil {
log.Debugf("Could not extract cgroupv2 path from line: %s", line)
continue
}
genericCgroupv2 = pathParts[1]
break
}
// Cache the cgroupv2 information.
// To avoid busy lookups, also empty cgroupv2 information is cached.
cgrouplru.Add(pid, genericCgroupv2)
return genericCgroupv2, nil
}

View File

@ -115,7 +115,6 @@ func mainWithExitCode() exitCode {
ExecutablesCacheElements: 16384, ExecutablesCacheElements: 16384,
// Next step: Calculate FramesCacheElements from numCores and samplingRate. // Next step: Calculate FramesCacheElements from numCores and samplingRate.
FramesCacheElements: 131072, FramesCacheElements: 131072,
CGroupCacheElements: 1024,
SamplesPerSecond: cfg.SamplesPerSecond, SamplesPerSecond: cfg.SamplesPerSecond,
}) })
if err != nil { if err != nil {

View File

@ -38,8 +38,10 @@ var errEmptyEntry = errors.New("FDE/CIE empty")
// ehframeHooks interface provides hooks for filtering and debugging eh_frame parsing // ehframeHooks interface provides hooks for filtering and debugging eh_frame parsing
type ehframeHooks interface { type ehframeHooks interface {
// fdeUnsorted is called if FDE entries from unsorted area are found.
fdeUnsorted()
// fdeHook is called for each FDE. Returns false if the FDE should be filtered out. // fdeHook is called for each FDE. Returns false if the FDE should be filtered out.
fdeHook(cie *cieInfo, fde *fdeInfo) bool fdeHook(cie *cieInfo, fde *fdeInfo, deltas *sdtypes.StackDeltaArray) bool
// deltaHook is called for each stack delta found // deltaHook is called for each stack delta found
deltaHook(ip uintptr, regs *vmRegs, delta sdtypes.StackDelta) deltaHook(ip uintptr, regs *vmRegs, delta sdtypes.StackDelta)
// golangHook is called if .gopclntab is found to report its coverage // golangHook is called if .gopclntab is found to report its coverage
@ -369,7 +371,6 @@ type fdeInfo struct {
ciePos uint64 ciePos uint64
ipLen uintptr ipLen uintptr
ipStart uintptr ipStart uintptr
sorted bool
} }
const ( const (
@ -976,14 +977,12 @@ func (ee *elfExtractor) parseFDE(r *reader, ef *pfelf.File, ipStart uintptr,
return uintptr(fdeLen), err return uintptr(fdeLen), err
} }
st := state{cie: cie, cur: cie.initialState} st := state{cie: cie, cur: cie.initialState}
fde.sorted = sorted
// Process the FDE opcodes // Process the FDE opcodes
if !ee.hooks.fdeHook(st.cie, &fde) { if !ee.hooks.fdeHook(st.cie, &fde, ee.deltas) {
return uintptr(fdeLen), nil return uintptr(fdeLen), nil
} }
st.loc = fde.ipStart st.loc = fde.ipStart
if st.cie.isSignalHandler || isSignalTrampoline(ee.file, &fde) { if st.cie.isSignalHandler || isSignalTrampoline(ee.file, &fde) {
delta := sdtypes.StackDelta{ delta := sdtypes.StackDelta{
Address: uint64(st.loc), Address: uint64(st.loc),
@ -1006,6 +1005,7 @@ func (ee *elfExtractor) parseFDE(r *reader, ef *pfelf.File, ipStart uintptr,
} }
ee.hooks.deltaHook(ip, &st.cur, delta) ee.hooks.deltaHook(ip, &st.cur, delta)
ee.deltas.AddEx(delta, sorted) ee.deltas.AddEx(delta, sorted)
sorted = true
hint = sdtypes.UnwindHintNone hint = sdtypes.UnwindHintNone
} }
@ -1021,17 +1021,12 @@ func (ee *elfExtractor) parseFDE(r *reader, ef *pfelf.File, ipStart uintptr,
} }
} }
info := sdtypes.UnwindInfoInvalid
if ef.Entry == uint64(fde.ipStart+fde.ipLen) {
info = sdtypes.UnwindInfoStop
}
// Add end-of-function stop delta. This might later get removed if there is // Add end-of-function stop delta. This might later get removed if there is
// another function starting on this address. // another function starting on this address.
ee.deltas.AddEx(sdtypes.StackDelta{ ee.deltas.AddEx(sdtypes.StackDelta{
Address: uint64(fde.ipStart + fde.ipLen), Address: uint64(fde.ipStart + fde.ipLen),
Hints: sdtypes.UnwindHintGap, Hints: sdtypes.UnwindHintGap,
Info: info, Info: sdtypes.UnwindInfoInvalid,
}, sorted) }, sorted)
return uintptr(fdeLen), nil return uintptr(fdeLen), nil
@ -1178,16 +1173,13 @@ func (ee *elfExtractor) walkBinSearchTable(ef *pfelf.File, ehFrameHdrSec *elfReg
if err != nil { if err != nil {
return err return err
} }
r := t.entryAt(0)
for f := uintptr(0); f < t.fdeCount; f++ { for f := uintptr(0); f < t.fdeCount; f++ {
var ( ipStart, fr, entryErr := t.decodeEntry(&r)
ipStart uintptr
fr reader
)
ipStart, fr, entryErr := t.parseHdrEntry()
if entryErr != nil { if entryErr != nil {
return err return entryErr
} }
_, err = ee.parseFDE(&fr, ef, ipStart, t.cieCache, true) _, err = ee.parseFDE(&fr, ef, ipStart, t.cieCache, f > 0)
if err != nil && !errors.Is(err, errEmptyEntry) { if err != nil && !errors.Is(err, errEmptyEntry) {
return fmt.Errorf("failed to parse FDE: %v", err) return fmt.Errorf("failed to parse FDE: %v", err)
} }
@ -1204,6 +1196,8 @@ func (ee *elfExtractor) walkFDEs(ef *pfelf.File, ehFrameSec *elfRegion, debugFra
return err return err
} }
ee.hooks.fdeUnsorted()
// Walk the section, and process each FDE it contains // Walk the section, and process each FDE it contains
var entryLen uintptr var entryLen uintptr
for f := uintptr(0); f < uintptr(len(ehFrameSec.data)); f += entryLen { for f := uintptr(0); f < uintptr(len(ehFrameSec.data)); f += entryLen {

View File

@ -8,11 +8,13 @@ package elfunwindinfo // import "go.opentelemetry.io/ebpf-profiler/nativeunwind/
// can be taken into account regardless of the target build platform. // can be taken into account regardless of the target build platform.
import ( import (
"bytes"
"debug/elf" "debug/elf"
"fmt" "fmt"
sdtypes "go.opentelemetry.io/ebpf-profiler/nativeunwind/stackdeltatypes" sdtypes "go.opentelemetry.io/ebpf-profiler/nativeunwind/stackdeltatypes"
"go.opentelemetry.io/ebpf-profiler/support" "go.opentelemetry.io/ebpf-profiler/support"
"golang.org/x/arch/arm64/arm64asm"
) )
const ( const (
@ -160,3 +162,39 @@ func (regs *vmRegs) getUnwindInfoARM() sdtypes.UnwindInfo {
return info return info
} }
func detectEntryARM(code []byte) int {
// Refer to test cases for the seen assembly dumps.
// Both, on GLIBC and MUSL there is no FDE for the entry code. This code tries
// to match both. The main difference is that glibc uses BL (Branch with Link)
// or a proper function call to maintain frame, and musl uses B (Branch) or
// a jump so the entry is not seen on traces.
// Match the prolog for clearing LR/FP
if len(code) < 32 ||
!bytes.Equal(code[:8], []byte{0x1d, 0x00, 0x80, 0xd2, 0x1e, 0x00, 0x80, 0xd2}) {
return 0
}
// Search for the second B or BL
numBranch := 0
for pos := 8; pos < len(code); pos += 4 {
inst, err := arm64asm.Decode(code[pos:])
if err != nil {
return 0
}
switch inst.Op {
case arm64asm.ADD, arm64asm.ADRP, arm64asm.AND, arm64asm.LDR,
arm64asm.MOV, arm64asm.MOVK, arm64asm.MOVZ:
// nop, allowed instruction
case arm64asm.B, arm64asm.BL:
numBranch++
if numBranch == 2 {
return pos + 4
}
default:
return 0
}
}
return 0
}

View File

@ -20,7 +20,10 @@ type ehtester struct {
found int found int
} }
func (e *ehtester) fdeHook(cie *cieInfo, fde *fdeInfo) bool { func (e *ehtester) fdeUnsorted() {
}
func (e *ehtester) fdeHook(cie *cieInfo, fde *fdeInfo, _ *sdtypes.StackDeltaArray) bool {
e.t.Logf("FDE ciePos %x, ip %x...%x, ipLen %d (enc %x, cf %d, df %d, ra %d)", e.t.Logf("FDE ciePos %x, ip %x...%x, ipLen %d (enc %x, cf %d, df %d, ra %d)",
fde.ciePos, fde.ipStart, fde.ipStart+fde.ipLen, fde.ipLen, fde.ciePos, fde.ipStart, fde.ipStart+fde.ipLen, fde.ipLen,
cie.enc, cie.codeAlign, cie.dataAlign, cie.regRA) cie.enc, cie.codeAlign, cie.dataAlign, cie.regRA)

View File

@ -8,11 +8,13 @@ package elfunwindinfo // import "go.opentelemetry.io/ebpf-profiler/nativeunwind/
// can be taken into account regardless of the target build platform. // can be taken into account regardless of the target build platform.
import ( import (
"bytes"
"debug/elf" "debug/elf"
"fmt" "fmt"
sdtypes "go.opentelemetry.io/ebpf-profiler/nativeunwind/stackdeltatypes" sdtypes "go.opentelemetry.io/ebpf-profiler/nativeunwind/stackdeltatypes"
"go.opentelemetry.io/ebpf-profiler/support" "go.opentelemetry.io/ebpf-profiler/support"
"golang.org/x/arch/x86/x86asm"
) )
const ( const (
@ -165,3 +167,34 @@ func (regs *vmRegs) getUnwindInfoX86() sdtypes.UnwindInfo {
} }
return info return info
} }
func detectEntryX86(code []byte) int {
// Refer to test cases for the actual assembly code seen.
// On glibc, the entry has FDE. No fixup is needed.
// On musl, the entry has no FDE, or possibly has an FDE covering part of it.
// Detect the musl case and return entry.
// Match the assembly exactly except the LEA call offset
if len(code) < 32 ||
!bytes.Equal(code[:9], []byte{0x48, 0x31, 0xed, 0x48, 0x89, 0xe7, 0x48, 0x8d, 0x35}) ||
!bytes.Equal(code[13:22], []byte{0x48, 0x83, 0xe4, 0xf0, 0xe8, 0x00, 0x00, 0x00, 0x00}) {
return 0
}
// Decode the second portion and allow whitelisted opcodes finding the JMP
for pos := 22; pos < len(code); {
inst, err := x86asm.Decode(code[pos:], 64)
if err != nil {
return 0
}
switch inst.Op {
case x86asm.MOV, x86asm.LEA, x86asm.XOR:
pos += inst.Len
case x86asm.JMP:
return pos + inst.Len
default:
return 0
}
}
return 0
}

View File

@ -21,17 +21,21 @@ type FDE struct {
} }
type EhFrameTable struct { type EhFrameTable struct {
r reader fdeCount uintptr
hdr *ehFrameHdr tableStartPos uintptr
fdeCount uintptr tableEntrySize int
tableStartPos uintptr tableEnc encoding
ehFrameSec *elfRegion ehFrameHdrSec *elfRegion
efm elf.Machine ehFrameSec *elfRegion
cieCache *lru.LRU[uint64, *cieInfo] efm elf.Machine
// cieCache holds the CIEs decoded so far. This is the only piece that is
// not concurrent safe, and could be made into a sync lru if needed.
cieCache *lru.LRU[uint64, *cieInfo]
} }
// NewEhFrameTable creates a new EhFrameTable from the given pfelf.File // NewEhFrameTable creates a new EhFrameTable from the given pfelf.File.
// The returned EhFrameTable must not be used concurrently // The returned EhFrameTable is not concurrent safe.
func NewEhFrameTable(ef *pfelf.File) (*EhFrameTable, error) { func NewEhFrameTable(ef *pfelf.File) (*EhFrameTable, error) {
ehFrameHdrSec, ehFrameSec, err := findEhSections(ef) ehFrameHdrSec, ehFrameSec, err := findEhSections(ef)
if err != nil { if err != nil {
@ -49,15 +53,14 @@ func NewEhFrameTable(ef *pfelf.File) (*EhFrameTable, error) {
// LookupFDE performs a binary search in .eh_frame_hdr for an FDE covering the given addr. // LookupFDE performs a binary search in .eh_frame_hdr for an FDE covering the given addr.
func (e *EhFrameTable) LookupFDE(addr libpf.Address) (FDE, error) { func (e *EhFrameTable) LookupFDE(addr libpf.Address) (FDE, error) {
idx := sort.Search(e.count(), func(idx int) bool { idx := sort.Search(e.count(), func(idx int) bool {
e.position(idx) r := e.entryAt(idx)
ipStart, _, _ := e.parseHdrEntry() // ignoring error, check bounds later ipStart, _ := r.ptr(e.tableEnc) // ignoring error, check bounds later
return ipStart > uintptr(addr) return ipStart > uintptr(addr)
}) })
if idx <= 0 { if idx <= 0 {
return FDE{}, errors.New("FDE not found") return FDE{}, errors.New("FDE not found")
} }
e.position(idx - 1) ipStart, fr, entryErr := e.decodeEntryAt(idx - 1)
ipStart, fr, entryErr := e.parseHdrEntry()
if entryErr != nil { if entryErr != nil {
return FDE{}, entryErr return FDE{}, entryErr
} }
@ -78,23 +81,30 @@ func (e *EhFrameTable) LookupFDE(addr libpf.Address) (FDE, error) {
func newEhFrameTableFromSections(ehFrameHdrSec *elfRegion, func newEhFrameTableFromSections(ehFrameHdrSec *elfRegion,
ehFrameSec *elfRegion, efm elf.Machine, ehFrameSec *elfRegion, efm elf.Machine,
) (hp *EhFrameTable, err error) { ) (hp *EhFrameTable, err error) {
hp = &EhFrameTable{ hdr := (*ehFrameHdr)(unsafe.Pointer(&ehFrameHdrSec.data[0]))
hdr: (*ehFrameHdr)(unsafe.Pointer(&ehFrameHdrSec.data[0])),
r: ehFrameHdrSec.reader(unsafe.Sizeof(ehFrameHdr{}), false), r := ehFrameHdrSec.reader(unsafe.Sizeof(ehFrameHdr{}), false)
if _, err = r.ptr(hdr.ehFramePtrEnc); err != nil {
return nil, err
} }
if _, err = hp.r.ptr(hp.hdr.ehFramePtrEnc); err != nil { fdeCount, err := r.ptr(hdr.fdeCountEnc)
return hp, err if err != nil {
return nil, err
} }
if hp.fdeCount, err = hp.r.ptr(hp.hdr.fdeCountEnc); err != nil { cieCache, err := lru.New[uint64, *cieInfo](cieCacheSize, hashUint64)
return hp, err if err != nil {
return nil, err
} }
if hp.cieCache, err = lru.New[uint64, *cieInfo](cieCacheSize, hashUint64); err != nil { return &EhFrameTable{
return hp, err fdeCount: fdeCount,
} tableStartPos: r.pos,
hp.ehFrameSec = ehFrameSec tableEntrySize: formatLen(hdr.tableEnc) * 2,
hp.tableStartPos = hp.r.pos tableEnc: hdr.tableEnc,
hp.efm = efm ehFrameHdrSec: ehFrameHdrSec,
return hp, nil ehFrameSec: ehFrameSec,
efm: efm,
cieCache: cieCache,
}, nil
} }
// returns FDE count // returns FDE count
@ -102,21 +112,19 @@ func (e *EhFrameTable) count() int {
return int(e.fdeCount) return int(e.fdeCount)
} }
// position adjusts the reader position to point at the table entry with idx index // entryAt returns a reader for the binary search table at given index.
func (e *EhFrameTable) position(idx int) { func (e *EhFrameTable) entryAt(idx int) reader {
tableEntrySize := formatLen(e.hdr.tableEnc) * 2 return e.ehFrameHdrSec.reader(e.tableStartPos+uintptr(e.tableEntrySize*idx), false)
e.r.pos = e.tableStartPos + uintptr(tableEntrySize*idx)
} }
// parseHdrEntry parsers an entry in the .eh_frame_hdr binary search table and the corresponding // decodeEntry decodes one entry of the binary search table from the reader.
// entry in the .eh_frame section func (e *EhFrameTable) decodeEntry(r *reader) (ipStart uintptr, fr reader, err error) {
func (e *EhFrameTable) parseHdrEntry() (ipStart uintptr, fr reader, err error) { ipStart, err = r.ptr(e.tableEnc)
ipStart, err = e.r.ptr(e.hdr.tableEnc)
if err != nil { if err != nil {
return 0, reader{}, err return 0, reader{}, err
} }
var fdeAddr uintptr var fdeAddr uintptr
fdeAddr, err = e.r.ptr(e.hdr.tableEnc) fdeAddr, err = r.ptr(e.tableEnc)
if err != nil { if err != nil {
return 0, reader{}, err return 0, reader{}, err
} }
@ -125,10 +133,15 @@ func (e *EhFrameTable) parseHdrEntry() (ipStart uintptr, fr reader, err error) {
fdeAddr, e.ehFrameSec.vaddr) fdeAddr, e.ehFrameSec.vaddr)
} }
fr = e.ehFrameSec.reader(fdeAddr-e.ehFrameSec.vaddr, false) fr = e.ehFrameSec.reader(fdeAddr-e.ehFrameSec.vaddr, false)
return ipStart, fr, err return ipStart, fr, err
} }
// decodeEntryAt decodes the entry from given index.
func (e *EhFrameTable) decodeEntryAt(idx int) (ipStart uintptr, fr reader, err error) {
r := e.entryAt(idx)
return e.decodeEntry(&r)
}
// formatLen returns the length of a field encoded with enc encoding. // formatLen returns the length of a field encoded with enc encoding.
func formatLen(enc encoding) int { func formatLen(enc encoding) int {
switch enc & encFormatMask { switch enc & encFormatMask {

View File

@ -4,14 +4,11 @@
package elfunwindinfo package elfunwindinfo
import ( import (
"bytes"
"encoding/base64"
"fmt" "fmt"
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.opentelemetry.io/ebpf-profiler/libpf" "go.opentelemetry.io/ebpf-profiler/libpf"
"go.opentelemetry.io/ebpf-profiler/libpf/pfelf"
) )
func TestLookupFDE(t *testing.T) { func TestLookupFDE(t *testing.T) {
@ -51,9 +48,7 @@ func TestLookupFDE(t *testing.T) {
{at: 0x1000, expected: FDE{}}, {at: 0x1000, expected: FDE{}},
{at: 0xcafe000, expected: FDE{}}, {at: 0xcafe000, expected: FDE{}},
} }
buffer, err := base64.StdEncoding.DecodeString(usrBinVolname) elf, err := getUsrBinPfelf()
require.NoError(t, err)
elf, err := pfelf.NewFile(bytes.NewReader(buffer), 0, false)
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { t.Cleanup(func() {
err = elf.Close() err = elf.Close()

View File

@ -27,6 +27,13 @@ type extractionFilter struct {
// should be excluded from .eh_frame extraction. // should be excluded from .eh_frame extraction.
start, end uintptr start, end uintptr
// entryStart and entryEnd contain the virtual address for the entry
// stub code with synthesized stack deltas.
entryStart, entryEnd uintptr
// entryPending is true if the entry stub stack delta has not been added.
entryPending bool
// ehFrames is true if .eh_frame stack deltas are found // ehFrames is true if .eh_frame stack deltas are found
ehFrames bool ehFrames bool
@ -39,23 +46,47 @@ type extractionFilter struct {
var _ ehframeHooks = &extractionFilter{} var _ ehframeHooks = &extractionFilter{}
// addEntryDeltas generates the entry stub stack deltas.
func (f *extractionFilter) addEntryDeltas(deltas *sdtypes.StackDeltaArray) {
deltas.AddEx(sdtypes.StackDelta{
Address: uint64(f.entryStart),
Hints: sdtypes.UnwindHintKeep,
Info: sdtypes.UnwindInfoStop,
}, !f.unsortedFrames)
deltas.Add(sdtypes.StackDelta{
Address: uint64(f.entryEnd),
Info: sdtypes.UnwindInfoInvalid,
})
f.ehFrames = true
f.entryPending = false
}
func (f *extractionFilter) fdeUnsorted() {
f.unsortedFrames = true
}
// fdeHook filters out .eh_frame data that is superseded by .gopclntab data // fdeHook filters out .eh_frame data that is superseded by .gopclntab data
func (f *extractionFilter) fdeHook(_ *cieInfo, fde *fdeInfo) bool { func (f *extractionFilter) fdeHook(_ *cieInfo, fde *fdeInfo, deltas *sdtypes.StackDeltaArray) bool {
if !fde.sorted { // Drop FDEs inside the gopclntab area
// Seems .debug_frame sometimes has broken FDEs for zero address if f.start <= fde.ipStart && fde.ipStart+fde.ipLen <= f.end {
if fde.ipStart == 0 { return false
return false
}
f.unsortedFrames = true
} }
// Parse functions outside the gopclntab area // Seems .debug_frame sometimes has broken FDEs for zero address
if fde.ipStart < f.start || fde.ipStart > f.end { if f.unsortedFrames && fde.ipStart == 0 {
// This is here to set the flag only when we have collected at least return false
// one stack delta from the relevant source.
f.ehFrames = true
return true
} }
return false // Insert entry stub deltas to their sorted position.
if f.entryPending && fde.ipStart >= f.entryStart {
f.addEntryDeltas(deltas)
}
// Drop FDEs overlapping with the detected entry stub.
if fde.ipStart+fde.ipLen > f.entryStart && f.entryEnd >= fde.ipStart {
return false
}
// This is here to set the flag only when we have collected at least
// one stack delta from the relevant source.
f.ehFrames = true
return true
} }
// deltaHook is a stub to satisfy ehframeHooks interface // deltaHook is a stub to satisfy ehframeHooks interface
@ -85,9 +116,7 @@ type elfExtractor struct {
allowGenericRegs bool allowGenericRegs bool
} }
func (ee *elfExtractor) extractDebugDeltas() error { func (ee *elfExtractor) extractDebugDeltas() (err error) {
var err error
// Attempt finding the associated debug information file with .debug_frame, // Attempt finding the associated debug information file with .debug_frame,
// but ignore errors if it's not available; many production systems // but ignore errors if it's not available; many production systems
// do not intentionally have debug packages installed. // do not intentionally have debug packages installed.
@ -115,6 +144,32 @@ func Extract(filename string, interval *sdtypes.IntervalData) error {
return ExtractELF(elfRef, interval) return ExtractELF(elfRef, interval)
} }
// detectEntryCode matches machine code for known entry stubs, and detects its length.
func detectEntryCode(machine elf.Machine, code []byte) int {
switch machine {
case elf.EM_X86_64:
return detectEntryX86(code)
case elf.EM_AARCH64:
return detectEntryARM(code)
default:
return 0
}
}
// detectEntry loads the entry stub from the ELF DSO entry and matches it.
func detectEntry(ef *pfelf.File) int {
if ef.Entry == 0 {
return 0
}
// Typically 52-80 bytes, allow for a bit of variance
code, err := ef.VirtualMemory(int64(ef.Entry), 128, 128)
if err != nil {
return 0
}
return detectEntryCode(ef.Machine, code)
}
// ExtractELF takes a pfelf.Reference and provides the stack delta // ExtractELF takes a pfelf.Reference and provides the stack delta
// intervals for it in the interval parameter. // intervals for it in the interval parameter.
func ExtractELF(elfRef *pfelf.Reference, interval *sdtypes.IntervalData) error { func ExtractELF(elfRef *pfelf.Reference, interval *sdtypes.IntervalData) error {
@ -122,7 +177,13 @@ func ExtractELF(elfRef *pfelf.Reference, interval *sdtypes.IntervalData) error {
if err != nil { if err != nil {
return err return err
} }
return extractFile(elfFile, elfRef, interval)
}
// extractFile extracts the elfFile stack deltas and uses the optional elfRef to resolve
// debug link references if needed.
func extractFile(elfFile *pfelf.File, elfRef *pfelf.Reference,
interval *sdtypes.IntervalData) (err error) {
// Parse the stack deltas from the ELF // Parse the stack deltas from the ELF
filter := extractionFilter{} filter := extractionFilter{}
deltas := sdtypes.StackDeltaArray{} deltas := sdtypes.StackDeltaArray{}
@ -134,6 +195,12 @@ func ExtractELF(elfRef *pfelf.Reference, interval *sdtypes.IntervalData) error {
allowGenericRegs: isLibCrypto(elfFile), allowGenericRegs: isLibCrypto(elfFile),
} }
if entryLength := detectEntry(elfFile); entryLength != 0 {
filter.entryStart = uintptr(elfFile.Entry)
filter.entryEnd = filter.entryStart + uintptr(entryLength)
filter.entryPending = true
}
if err = ee.parseGoPclntab(); err != nil { if err = ee.parseGoPclntab(); err != nil {
return fmt.Errorf("failure to parse golang stack deltas: %v", err) return fmt.Errorf("failure to parse golang stack deltas: %v", err)
} }
@ -143,13 +210,16 @@ func ExtractELF(elfRef *pfelf.Reference, interval *sdtypes.IntervalData) error {
if err = ee.parseDebugFrame(elfFile); err != nil { if err = ee.parseDebugFrame(elfFile); err != nil {
return fmt.Errorf("failure to parse debug_frame stack deltas: %v", err) return fmt.Errorf("failure to parse debug_frame stack deltas: %v", err)
} }
if len(deltas) < numIntervalsToOmitDebugLink { if ee.ref != nil && len(deltas) < numIntervalsToOmitDebugLink {
// There is only few stack deltas. See if we find the .gnu_debuglink // There is only few stack deltas. See if we find the .gnu_debuglink
// debug information for additional .debug_frame stack deltas. // debug information for additional .debug_frame stack deltas.
if err = ee.extractDebugDeltas(); err != nil { if err = ee.extractDebugDeltas(); err != nil {
return fmt.Errorf("failure to parse debug stack deltas: %v", err) return fmt.Errorf("failure to parse debug stack deltas: %v", err)
} }
} }
if filter.entryPending {
filter.addEntryDeltas(ee.deltas)
}
// If multiple sources were merged, sort them. // If multiple sources were merged, sort them.
if filter.unsortedFrames || (filter.ehFrames && filter.golangFrames) { if filter.unsortedFrames || (filter.ehFrames && filter.golangFrames) {
@ -157,9 +227,12 @@ func ExtractELF(elfRef *pfelf.Reference, interval *sdtypes.IntervalData) error {
if deltas[i].Address != deltas[j].Address { if deltas[i].Address != deltas[j].Address {
return deltas[i].Address < deltas[j].Address return deltas[i].Address < deltas[j].Address
} }
// Make sure that the potential duplicate stop delta is sorted // Make sure that the potential duplicate "invalid" delta is sorted
// after the real delta. // after the real delta so the proper delta is removed in next stage.
return deltas[i].Info.Opcode < deltas[j].Info.Opcode if deltas[i].Info.Opcode != deltas[j].Info.Opcode {
return deltas[i].Info.Opcode < deltas[j].Info.Opcode
}
return deltas[i].Info.Param < deltas[j].Info.Param
}) })
maxDelta := 0 maxDelta := 0

View File

@ -4,11 +4,14 @@
package elfunwindinfo package elfunwindinfo
import ( import (
"bytes"
"debug/elf"
"encoding/base64" "encoding/base64"
"os"
"testing" "testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.opentelemetry.io/ebpf-profiler/libpf/pfelf"
sdtypes "go.opentelemetry.io/ebpf-profiler/nativeunwind/stackdeltatypes" sdtypes "go.opentelemetry.io/ebpf-profiler/nativeunwind/stackdeltatypes"
) )
@ -133,26 +136,166 @@ var firstDeltas = sdtypes.StackDeltaArray{
{Address: 0x8e7, Info: deltaRSP(80, 16)}, {Address: 0x8e7, Info: deltaRSP(80, 16)},
} }
func TestExtractStackDeltasFromFilename(t *testing.T) { func getUsrBinPfelf() (*pfelf.File, error) {
buffer, err := base64.StdEncoding.DecodeString(usrBinVolname) buffer, err := base64.StdEncoding.DecodeString(usrBinVolname)
if err != nil {
return nil, err
}
return pfelf.NewFile(bytes.NewReader(buffer), 0, false)
}
func TestExtractStackDeltasFromFilename(t *testing.T) {
elf, err := getUsrBinPfelf()
require.NoError(t, err) require.NoError(t, err)
// Write the executable file to a temporary file, and the symbol
// file, too.
exeFile, err := os.CreateTemp("/tmp", "dwarf_extract_elf_")
require.NoError(t, err)
defer exeFile.Close()
_, err = exeFile.Write(buffer)
require.NoError(t, err)
err = exeFile.Sync()
require.NoError(t, err)
defer os.Remove(exeFile.Name())
filename := exeFile.Name()
var data sdtypes.IntervalData var data sdtypes.IntervalData
err = Extract(filename, &data) err = extractFile(elf, nil, &data)
require.NoError(t, err) require.NoError(t, err)
for _, delta := range data.Deltas { for _, delta := range data.Deltas {
t.Logf("%#v", delta) t.Logf("%#v", delta)
} }
require.Equal(t, data.Deltas[:len(firstDeltas)], firstDeltas) require.Equal(t, data.Deltas[:len(firstDeltas)], firstDeltas)
} }
func TestEntryDetection(t *testing.T) {
testCases := map[string]struct {
machine elf.Machine
code []byte
len int
}{
"musl 1.2.5 / x86_64": {
machine: elf.EM_X86_64,
code: []byte{
// 1. assembly code from crt_arch.h (no FDE at all):
// 48 31 ed xor %rbp,%rbp
// 48 89 e7 mov %rsp,%rdi
// 48 8d 35 b2 c2 00 00 lea 0xc2b2(%rip),%rsi
// 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
// e8 00 00 00 00 call 0x4587
// 2. followed with C code from [r]crt1.c (maybe with FDE):
// 8b 37 mov (%rdi),%esi
// 48 8d 57 08 lea 0x8(%rdi),%rdx
// 4c 8d 05 d0 62 00 00 lea 0x62d0(%rip),%r8
// 45 31 c9 xor %r9d,%r9d
// 48 8d 0d 62 fa ff ff lea -0x59e(%rip),%rcx
// 48 8d 3d 8b fa ff ff lea -0x575(%rip),%rdi
// e9 76 fa ff ff jmp 0x4020 <__libc_start_main@plt>
0x48, 0x31, 0xed, 0x48, 0x89, 0xe7, 0x48, 0x8d,
0x35, 0xb2, 0xc2, 0x00, 0x00, 0x48, 0x83, 0xe4,
0xf0, 0xe8, 0x00, 0x00, 0x00, 0x00, 0x8b, 0x37,
0x48, 0x8d, 0x57, 0x08, 0x4c, 0x8d, 0x05, 0xd0,
0x62, 0x00, 0x00, 0x45, 0x31, 0xc9, 0x48, 0x8d,
0x0d, 0x62, 0xfa, 0xff, 0xff, 0x48, 0x8d, 0x3d,
0x8b, 0xfa, 0xff, 0xff, 0xe9, 0x76, 0xfa, 0xff,
0xff,
},
len: 57,
},
"musl 1.2.5 / arm64": {
machine: elf.EM_AARCH64,
code: []byte{
// 1. assembly code from crt_arch.h (no FDE):
// mov x29, #0x0
// mov x30, #0x0
// mov x0, sp
// adrp x1, 0x1f000
// add x1, x1, #0x7d0
// and sp, x0, #0xfffffffffffffff0
// b 0x413c
// 2. followed with C code from [r]crt1.c (no FDE):
// mov x2, x0
// mov x5, #0x0
// adrp x4, 0x1f000
// ldr x4, [x4, #3928]
// ldr x1, [x2], #8
// adrp x3, 0x1f000
// ldr x3, [x3, #4080]
// adrp x0, 0x1f000
// ldr x0, [x0, #4072]
// b 0x35a0 <__libc_start_main@plt>
0x1d, 0x00, 0x80, 0xd2, 0x1e, 0x00, 0x80, 0xd2,
0xe0, 0x03, 0x00, 0x91, 0xc1, 0x00, 0x00, 0xf0,
0x21, 0x40, 0x1f, 0x91, 0x1f, 0xec, 0x9c, 0x92,
0x01, 0x00, 0x00, 0x14, 0xe2, 0x03, 0x00, 0xaa,
0x05, 0x00, 0x80, 0xd2, 0xc3, 0x00, 0x00, 0xf0,
0x84, 0xac, 0x47, 0xf9, 0x41, 0x84, 0x40, 0xf8,
0xc3, 0x00, 0x00, 0xf0, 0x63, 0xf8, 0x47, 0xf9,
0xc0, 0x00, 0x00, 0xf0, 0x00, 0xf4, 0x47, 0xf9,
0x10, 0xfd, 0xff, 0x17,
},
len: 68,
},
"glibc 2.31 / arm64": {
machine: elf.EM_AARCH64,
code: []byte{
// mov x29, #0x0
// mov x30, #0x0
// mov x5, x0
// ldr x1, [sp]
// add x2, sp, #0x8
// mov x6, sp
// adrp x0, 0x11000
// ldr x0, [x0, #4064]
// adrp x3, 0x11000
// ldr x3, [x3, #4056]
// adrp x4, 0x11000
// ldr x4, [x4, #4008]
// bl 0xa90 <__libc_start_main@plt>
// bl 0xae0 <abort@plt>
0x1d, 0x00, 0x80, 0xd2, 0x1e, 0x00, 0x80, 0xd2,
0xe5, 0x03, 0x00, 0xaa, 0xe1, 0x03, 0x40, 0xf9,
0xe2, 0x23, 0x00, 0x91, 0xe6, 0x03, 0x00, 0x91,
0x80, 0x00, 0x00, 0xb0, 0x00, 0xf0, 0x47, 0xf9,
0x83, 0x00, 0x00, 0xb0, 0x63, 0xec, 0x47, 0xf9,
0x84, 0x00, 0x00, 0xb0, 0x84, 0xd4, 0x47, 0xf9,
0xab, 0xff, 0xff, 0x97, 0xbe, 0xff, 0xff, 0x97,
},
len: 56,
},
"glibc 2.35 / arm64": {
machine: elf.EM_AARCH64,
code: []byte{
// mov x29, #0x0
// mov x30, #0x0
// mov x5, x0
// ldr x1, [sp]
// add x2, sp, #0x8
// mov x6, sp
// movz x0, #0x0, lsl #48
// movk x0, #0x0, lsl #32
// movk x0, #0xb9, lsl #16
// movk x0, #0x1f90
// movz x3, #0x0, lsl #48
// movk x3, #0x0, lsl #32
// movk x3, #0x236, lsl #16
// movk x3, #0x65d0
// movz x4, #0x0, lsl #48
// movk x4, #0x0, lsl #32
// movk x4, #0x236, lsl #16
// movk x4, #0x6650
// bl 0xb614e0 <__libc_start_main@plt>
// bl 0xb61460 <abort@plt>
0x1d, 0x00, 0x80, 0xd2, 0x1e, 0x00, 0x80, 0xd2,
0xe5, 0x03, 0x00, 0xaa, 0xe1, 0x03, 0x40, 0xf9,
0xe2, 0x23, 0x00, 0x91, 0xe6, 0x03, 0x00, 0x91,
0x00, 0x00, 0xe0, 0xd2, 0x00, 0x00, 0xc0, 0xf2,
0x20, 0x17, 0xa0, 0xf2, 0x00, 0xf2, 0x83, 0xf2,
0x03, 0x00, 0xe0, 0xd2, 0x03, 0x00, 0xc0, 0xf2,
0xc3, 0x46, 0xa0, 0xf2, 0x03, 0xba, 0x8c, 0xf2,
0x04, 0x00, 0xe0, 0xd2, 0x04, 0x00, 0xc0, 0xf2,
0xc4, 0x46, 0xa0, 0xf2, 0x04, 0xca, 0x8c, 0xf2,
0x7d, 0x1c, 0xff, 0x97, 0x5c, 0x1c, 0xff, 0x97,
},
len: 80,
},
}
for name, test := range testCases {
name := name
test := test
t.Run(name, func(t *testing.T) {
entryLen := detectEntryCode(test.machine, test.code)
assert.Equal(t, test.len, entryLen)
})
}
}

View File

@ -172,8 +172,10 @@ func parseMappings(mapsFile io.Reader) ([]Mapping, uint32, error) {
path = VdsoPathName path = VdsoPathName
device = 0 device = 0
inode = vdsoInode inode = vdsoInode
} else if path == "" {
// This is an anonymous mapping, keep it
} else { } else {
// Ignore mappings that are invalid, non-existent or are special pseudo-files // Ignore other mappings that are invalid, non-existent or are special pseudo-files
continue continue
} }
} else { } else {

View File

@ -26,7 +26,8 @@ var testMappings = `55fe82710000-55fe8273c000 r--p 00000000 fd:01 1068432
7f63c8eef000-7f63c8fdf000 r-xp 0001c000 1fd:01 7f63c8eef000-7f63c8fdf000 r-xp 0001c000 1fd:01
7f63c8eef000-7f63c8fdf000 r-xp 0001c000 1fd.01 1075944 7f63c8eef000-7f63c8fdf000 r-xp 0001c000 1fd.01 1075944
7f63c8eef000-7f63c8fdf000 r- 0001c000 1fd:01 1075944 7f63c8eef000-7f63c8fdf000 r- 0001c000 1fd:01 1075944
7f63c8eef000 r-xp 0001c000 1fd:01 1075944` 7f63c8eef000 r-xp 0001c000 1fd:01 1075944
7f8b929f0000-7f8b92a00000 r-xp 00000000 00:00 0 `
func TestParseMappings(t *testing.T) { func TestParseMappings(t *testing.T) {
mappings, numParseErrors, err := parseMappings(strings.NewReader(testMappings)) mappings, numParseErrors, err := parseMappings(strings.NewReader(testMappings))
@ -98,6 +99,15 @@ func TestParseMappings(t *testing.T) {
FileOffset: 114688, FileOffset: 114688,
Path: "/tmp/usr_lib_x86_64-linux-gnu_libopensc.so.6.0.0", Path: "/tmp/usr_lib_x86_64-linux-gnu_libopensc.so.6.0.0",
}, },
{
Vaddr: 0x7f8b929f0000,
Device: 0x0,
Flags: elf.PF_R + elf.PF_X,
Inode: 0,
Length: 0x10000,
FileOffset: 0,
Path: "",
},
} }
assert.Equal(t, expected, mappings) assert.Equal(t, expected, mappings)
} }

View File

@ -22,7 +22,7 @@ import (
func loadTracers(t *testing.T) *ebpfMapsImpl { func loadTracers(t *testing.T) *ebpfMapsImpl {
t.Helper() t.Helper()
coll, err := support.LoadCollectionSpec(false) coll, err := support.LoadCollectionSpec()
require.NoError(t, err) require.NoError(t, err)
restoreRlimit, err := rlimit.MaximizeMemlock() restoreRlimit, err := rlimit.MaximizeMemlock()

View File

@ -4,6 +4,13 @@
package processmanager // import "go.opentelemetry.io/ebpf-profiler/processmanager" package processmanager // import "go.opentelemetry.io/ebpf-profiler/processmanager"
import ( import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"regexp"
lru "github.com/elastic/go-freelru" lru "github.com/elastic/go-freelru"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
@ -11,6 +18,11 @@ import (
"go.opentelemetry.io/ebpf-profiler/libpf" "go.opentelemetry.io/ebpf-profiler/libpf"
) )
//nolint:lll
var (
cgroupv2ContainerIDPattern = regexp.MustCompile(`0:.*?:.*?([0-9a-fA-F]{64})(?:\.scope)?(?:/[a-z]+)?$`)
)
type lruFileIDMapper struct { type lruFileIDMapper struct {
cache *lru.SyncedLRU[host.FileID, libpf.FileID] cache *lru.SyncedLRU[host.FileID, libpf.FileID]
} }
@ -79,3 +91,41 @@ type FileIDMapper interface {
// Set adds a mapping from the 64-bit file ID to the 128-bit file ID. // Set adds a mapping from the 64-bit file ID to the 128-bit file ID.
Set(pre host.FileID, post libpf.FileID) Set(pre host.FileID, post libpf.FileID)
} }
// parseContainerID parses cgroup v2 container IDs
func parseContainerID(cgroupFile io.Reader) string {
scanner := bufio.NewScanner(cgroupFile)
buf := make([]byte, 512)
// Providing a predefined buffer overrides the internal buffer that Scanner uses (4096 bytes).
// We can do that and also set a maximum allocation size on the following call.
// With a maximum of 4096 characters path in the kernel, 8192 should be fine here. We don't
// expect lines in /proc/<PID>/cgroup to be longer than that.
scanner.Buffer(buf, 8192)
var pathParts []string
for scanner.Scan() {
b := scanner.Bytes()
if bytes.Equal(b, []byte("0::/")) {
continue // Skip a common case
}
line := string(b)
pathParts = cgroupv2ContainerIDPattern.FindStringSubmatch(line)
if pathParts == nil {
log.Debugf("Could not extract cgroupv2 path from line: %s", line)
continue
}
return pathParts[1]
}
// No containerID could be extracted
return ""
}
// extractContainerID returns the containerID for pid if cgroup v2 is used.
func extractContainerID(pid libpf.PID) (string, error) {
cgroupFile, err := os.Open(fmt.Sprintf("/proc/%d/cgroup", pid))
if err != nil {
return "", err
}
return parseContainerID(cgroupFile), nil
}

View File

@ -0,0 +1,54 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package processmanager // import "go.opentelemetry.io/ebpf-profiler/processmanager"
import (
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
//nolint:lll
func TestExtractContainerID(t *testing.T) {
tests := []struct {
line string
expectedContainerID string
}{
{
line: "0::/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf6f2d169_f2ae_4afa-95ed_06ff2ed6b288.slice/cri-containerd-b4d6d161c62525d726fa394b27df30e14f8ea5646313ada576b390de70cfc8cc.scope",
expectedContainerID: "b4d6d161c62525d726fa394b27df30e14f8ea5646313ada576b390de70cfc8cc",
},
{
line: "0::/kubepods/besteffort/pod05e102bf-8744-4942-a241-9b6f07983a53/f52a212505a606972cf8614c3cb856539e71b77ecae33436c5ac442232fbacf8",
expectedContainerID: "f52a212505a606972cf8614c3cb856539e71b77ecae33436c5ac442232fbacf8",
},
{
line: "0::/kubepods/besteffort/pod897277d4-5e6f-4999-a976-b8340e8d075e/crio-a4d6b686848a610472a2eed3ae20d4d64b6b4819feb9fdfc7fd7854deaf59ef3",
expectedContainerID: "a4d6b686848a610472a2eed3ae20d4d64b6b4819feb9fdfc7fd7854deaf59ef3",
},
{
line: "0::/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c9f1974_5c46_44c2_b42f_3bbf0e98eef9.slice/cri-containerd-bacb920470900725e0aa7d914fee5eb0854315448b024b6b8420ad8429c607ba.scope",
expectedContainerID: "bacb920470900725e0aa7d914fee5eb0854315448b024b6b8420ad8429c607ba",
},
{
line: "0::/user.slice/user-1000.slice/user@1000.service/app.slice/app-org.gnome.Terminal.slice/vte-spawn-868f9513-eee8-457d-8e36-1b37ae8ae622.scope",
},
{
line: "0::/../../user.slice/user-501.slice/session-3.scope",
},
{
line: "0::/system.slice/docker-b1eba9dfaeba29d8b80532a574a03ea3cac29384327f339c26da13649e2120df.scope/init",
expectedContainerID: "b1eba9dfaeba29d8b80532a574a03ea3cac29384327f339c26da13649e2120df",
},
}
for _, tc := range tests {
t.Run(tc.expectedContainerID, func(t *testing.T) {
reader := strings.NewReader(tc.line)
gotContainerID := parseContainerID(reader)
assert.Equal(t, tc.expectedContainerID, gotContainerID)
})
}
}

View File

@ -145,10 +145,16 @@ func (pm *ProcessManager) updatePidInformation(pid libpf.PID, m *Mapping) (bool,
} }
} }
containerID, err := extractContainerID(pid)
if err != nil {
log.Debugf("Failed extracting containerID for %d: %v", pid, err)
}
info = &processInfo{ info = &processInfo{
meta: ProcessMeta{ meta: ProcessMeta{
Name: processName, Name: processName,
Executable: exePath, Executable: exePath,
ContainerID: containerID,
EnvVariables: envVarMap}, EnvVariables: envVarMap},
mappings: make(map[libpf.Address]*Mapping), mappings: make(map[libpf.Address]*Mapping),
mappingsByFileID: make(map[host.FileID]map[libpf.Address]*Mapping), mappingsByFileID: make(map[host.FileID]map[libpf.Address]*Mapping),

View File

@ -147,6 +147,8 @@ type ProcessMeta struct {
Executable string Executable string
// process env vars from /proc/PID/environ // process env vars from /proc/PID/environ
EnvVariables map[string]string EnvVariables map[string]string
// container ID retrieved from /proc/PID/cgroup
ContainerID string
} }
// processInfo contains information about the executable mappings // processInfo contains information about the executable mappings

View File

@ -34,9 +34,6 @@ type baseReporter struct {
// pdata holds the generator for the data being exported. // pdata holds the generator for the data being exported.
pdata *pdata.Pdata pdata *pdata.Pdata
// cgroupv2ID caches PID to container ID information for cgroupv2 containers.
cgroupv2ID *lru.SyncedLRU[libpf.PID, string]
// traceEvents stores reported trace events (trace metadata with frames and counts) // traceEvents stores reported trace events (trace metadata with frames and counts)
traceEvents xsync.RWMutex[samples.TraceEventsTree] traceEvents xsync.RWMutex[samples.TraceEventsTree]
@ -96,12 +93,7 @@ func (b *baseReporter) ReportTraceEvent(trace *libpf.Trace, meta *samples.TraceE
extraMeta = b.cfg.ExtraSampleAttrProd.CollectExtraSampleMeta(trace, meta) extraMeta = b.cfg.ExtraSampleAttrProd.CollectExtraSampleMeta(trace, meta)
} }
containerID, err := libpf.LookupCgroupv2(b.cgroupv2ID, meta.PID) containerID := meta.ContainerID
if err != nil {
log.Debugf("Failed to get a cgroupv2 ID as container ID for PID %d: %v",
meta.PID, err)
}
key := samples.TraceAndMetaKey{ key := samples.TraceAndMetaKey{
Hash: trace.Hash, Hash: trace.Hash,
Comm: meta.Comm, Comm: meta.Comm,

View File

@ -5,7 +5,6 @@ package reporter // import "go.opentelemetry.io/ebpf-profiler/reporter"
import ( import (
"context" "context"
"time"
lru "github.com/elastic/go-freelru" lru "github.com/elastic/go-freelru"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
@ -29,14 +28,6 @@ type CollectorReporter struct {
// NewCollector builds a new CollectorReporter // NewCollector builds a new CollectorReporter
func NewCollector(cfg *Config, nextConsumer xconsumer.Profiles) (*CollectorReporter, error) { func NewCollector(cfg *Config, nextConsumer xconsumer.Profiles) (*CollectorReporter, error) {
cgroupv2ID, err := lru.NewSynced[libpf.PID, string](cfg.CGroupCacheElements,
func(pid libpf.PID) uint32 { return uint32(pid) })
if err != nil {
return nil, err
}
// Set a lifetime to reduce the risk of invalid data in case of PID reuse.
cgroupv2ID.SetLifetime(90 * time.Second)
// Next step: Dynamically configure the size of this LRU. // Next step: Dynamically configure the size of this LRU.
// Currently, we use the length of the JSON array in // Currently, we use the length of the JSON array in
// hostmetadata/hostmetadata.json. // hostmetadata/hostmetadata.json.
@ -63,7 +54,6 @@ func NewCollector(cfg *Config, nextConsumer xconsumer.Profiles) (*CollectorRepor
name: cfg.Name, name: cfg.Name,
version: cfg.Version, version: cfg.Version,
pdata: data, pdata: data,
cgroupv2ID: cgroupv2ID,
traceEvents: xsync.NewRWMutex(tree), traceEvents: xsync.NewRWMutex(tree),
hostmetadata: hostmetadata, hostmetadata: hostmetadata,
runLoop: &runLoop{ runLoop: &runLoop{
@ -85,7 +75,6 @@ func (r *CollectorReporter) Start(ctx context.Context) error {
}, func() { }, func() {
// Allow the GC to purge expired entries to avoid memory leaks. // Allow the GC to purge expired entries to avoid memory leaks.
r.pdata.Purge() r.pdata.Purge()
r.cgroupv2ID.PurgeExpired()
}) })
// When Stop() is called and a signal to 'stop' is received, then: // When Stop() is called and a signal to 'stop' is received, then:

View File

@ -54,7 +54,6 @@ func TestCollectorReporterReportTraceEvent(t *testing.T) {
r, err := NewCollector(&Config{ r, err := NewCollector(&Config{
ExecutablesCacheElements: 1, ExecutablesCacheElements: 1,
FramesCacheElements: 1, FramesCacheElements: 1,
CGroupCacheElements: 1,
}, next) }, next)
require.NoError(t, err) require.NoError(t, err)
if err := r.ReportTraceEvent(tt.trace, tt.meta); err != nil && if err := r.ReportTraceEvent(tt.trace, tt.meta); err != nil &&

View File

@ -30,8 +30,6 @@ type Config struct {
ExecutablesCacheElements uint32 ExecutablesCacheElements uint32
// FramesCacheElements defines the item capacity of the frames cache. // FramesCacheElements defines the item capacity of the frames cache.
FramesCacheElements uint32 FramesCacheElements uint32
// CGroupCacheElements defines the item capacity of the cgroup cache.
CGroupCacheElements uint32
// samplesPerSecond defines the number of samples per second. // samplesPerSecond defines the number of samples per second.
SamplesPerSecond int SamplesPerSecond int

View File

@ -5,8 +5,8 @@ package pdata // import "go.opentelemetry.io/ebpf-profiler/reporter/internal/pda
import ( import (
"fmt" "fmt"
"math"
"path/filepath" "path/filepath"
"slices"
"time" "time"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
@ -131,14 +131,15 @@ func (p *Pdata) setProfile(
attrMgr := samples.NewAttrTableManager(dic.AttributeTable()) attrMgr := samples.NewAttrTableManager(dic.AttributeTable())
locationIndex := int32(profile.LocationIndices().Len()) locationIndex := int32(profile.LocationIndices().Len())
var startTS, endTS pcommon.Timestamp startTS, endTS := uint64(math.MaxUint64), uint64(0)
for traceKey, traceInfo := range events { for traceKey, traceInfo := range events {
sample := profile.Sample().AppendEmpty() sample := profile.Sample().AppendEmpty()
sample.SetLocationsStartIndex(locationIndex) sample.SetLocationsStartIndex(locationIndex)
slices.Sort(traceInfo.Timestamps) for _, ts := range traceInfo.Timestamps {
startTS = pcommon.Timestamp(traceInfo.Timestamps[0]) startTS = min(startTS, ts)
endTS = pcommon.Timestamp(traceInfo.Timestamps[len(traceInfo.Timestamps)-1]) endTS = max(endTS, ts)
}
sample.TimestampsUnixNano().FromRaw(traceInfo.Timestamps) sample.TimestampsUnixNano().FromRaw(traceInfo.Timestamps)
switch origin { switch origin {
@ -283,8 +284,8 @@ func (p *Pdata) setProfile(
log.Debugf("Reporting OTLP profile with %d samples", profile.Sample().Len()) log.Debugf("Reporting OTLP profile with %d samples", profile.Sample().Len())
profile.SetDuration(endTS - startTS) profile.SetDuration(pcommon.Timestamp(endTS - startTS))
profile.SetStartTime(startTS) profile.SetStartTime(pcommon.Timestamp(startTS))
return nil return nil
} }

View File

@ -5,6 +5,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/pprofile"
semconv "go.opentelemetry.io/otel/semconv/v1.34.0" semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
@ -217,3 +218,38 @@ func TestFunctionTableOrder(t *testing.T) {
}) })
} }
} }
func TestProfileDuration(t *testing.T) {
for _, tt := range []struct {
name string
events map[libpf.Origin]samples.KeyToEventMapping
}{
{
name: "profile duration",
events: map[libpf.Origin]samples.KeyToEventMapping{
support.TraceOriginSampling: map[samples.TraceAndMetaKey]*samples.TraceEvents{
{Pid: 1}: {
Timestamps: []uint64{2, 1, 3, 4, 7},
},
{Pid: 2}: {
Timestamps: []uint64{8},
},
},
},
},
} {
t.Run(tt.name, func(t *testing.T) {
d, err := New(100, 100, 100, nil)
require.NoError(t, err)
tree := make(samples.TraceEventsTree)
tree[""] = tt.events
res, err := d.Generate(tree, tt.name, "version")
require.NoError(t, err)
profile := res.ResourceProfiles().At(0).ScopeProfiles().At(0).Profiles().At(0)
require.Equal(t, pcommon.Timestamp(7), profile.Duration())
require.Equal(t, pcommon.Timestamp(1), profile.StartTime())
})
}
}

View File

@ -44,14 +44,6 @@ type OTLPReporter struct {
// NewOTLP returns a new instance of OTLPReporter // NewOTLP returns a new instance of OTLPReporter
func NewOTLP(cfg *Config) (*OTLPReporter, error) { func NewOTLP(cfg *Config) (*OTLPReporter, error) {
cgroupv2ID, err := lru.NewSynced[libpf.PID, string](cfg.CGroupCacheElements,
func(pid libpf.PID) uint32 { return uint32(pid) })
if err != nil {
return nil, err
}
// Set a lifetime to reduce risk of invalid data in case of PID reuse.
cgroupv2ID.SetLifetime(90 * time.Second)
// Next step: Dynamically configure the size of this LRU. // Next step: Dynamically configure the size of this LRU.
// Currently, we use the length of the JSON array in // Currently, we use the length of the JSON array in
// hostmetadata/hostmetadata.json. // hostmetadata/hostmetadata.json.
@ -78,7 +70,6 @@ func NewOTLP(cfg *Config) (*OTLPReporter, error) {
name: cfg.Name, name: cfg.Name,
version: cfg.Version, version: cfg.Version,
pdata: data, pdata: data,
cgroupv2ID: cgroupv2ID,
traceEvents: xsync.NewRWMutex(eventsTree), traceEvents: xsync.NewRWMutex(eventsTree),
hostmetadata: hostmetadata, hostmetadata: hostmetadata,
runLoop: &runLoop{ runLoop: &runLoop{
@ -113,7 +104,6 @@ func (r *OTLPReporter) Start(ctx context.Context) error {
}, func() { }, func() {
// Allow the GC to purge expired entries to avoid memory leaks. // Allow the GC to purge expired entries to avoid memory leaks.
r.pdata.Purge() r.pdata.Purge()
r.cgroupv2ID.PurgeExpired()
}) })
// When Stop() is called and a signal to 'stop' is received, then: // When Stop() is called and a signal to 'stop' is received, then:

View File

@ -11,6 +11,7 @@ type TraceEventMeta struct {
ProcessName string ProcessName string
ExecutablePath string ExecutablePath string
APMServiceName string APMServiceName string
ContainerID string
PID, TID libpf.PID PID, TID libpf.PID
CPU int CPU int
Origin libpf.Origin Origin libpf.Origin
@ -39,7 +40,7 @@ type TraceAndMetaKey struct {
// comm and apmServiceName are provided by the eBPF programs // comm and apmServiceName are provided by the eBPF programs
Comm string Comm string
ApmServiceName string ApmServiceName string
// containerID is annotated based on PID information // ContainerID is annotated based on PID information
ContainerID string ContainerID string
Pid int64 Pid int64
Tid int64 Tid int64

View File

@ -1,13 +0,0 @@
//go:build amd64 && debugtracer
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package support // import "go.opentelemetry.io/ebpf-profiler/support"
import (
_ "embed"
)
//go:embed ebpf/tracer.ebpf.debug.amd64
var debugTracerData []byte

View File

@ -1,13 +0,0 @@
//go:build arm64 && debugtracer
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package support // import "go.opentelemetry.io/ebpf-profiler/support"
import (
_ "embed"
)
//go:embed ebpf/tracer.ebpf.debug.arm64
var debugTracerData []byte

View File

@ -1,10 +0,0 @@
//go:build !debugtracer
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package support // import "go.opentelemetry.io/ebpf-profiler/support"
// debugtracer_dummy.go satisfies build requirements where the eBPF debug tracers
// file does not exist.
var debugTracerData []byte

View File

@ -1,11 +1,10 @@
SHELL ?= bash SHELL ?= bash
BPF_CLANG ?= clang-17 BPF_CLANG ?= clang-17
BPF_LINK ?= llvm-link-17 BPF_LINK ?= llvm-link-17
STRIP ?= llvm-strip-17
LLC ?= llc-17 LLC ?= llc-17
CLANG_FORMAT ?= clang-format-17 CLANG_FORMAT ?= clang-format-17
DEBUG_FLAGS = -DOPTI_DEBUG -g
# Detect native architecture and translate to GOARCH. # Detect native architecture and translate to GOARCH.
NATIVE_ARCH := $(shell uname -m) NATIVE_ARCH := $(shell uname -m)
ifeq ($(NATIVE_ARCH),x86_64) ifeq ($(NATIVE_ARCH),x86_64)
@ -16,13 +15,13 @@ else
$(error Unsupported architecture: $(NATIVE_ARCH)) $(error Unsupported architecture: $(NATIVE_ARCH))
endif endif
# Use a placeholder like '.' or '/' as the new prefix.
REPRODUCIBLE_PREFIX := .
# Valid values: amd64, arm64. # Valid values: amd64, arm64.
TARGET_ARCH ?= $(NATIVE_ARCH) TARGET_ARCH ?= $(NATIVE_ARCH)
# Valid values: release, debug TRACER_NAME ?= tracer.ebpf.$(TARGET_ARCH)
BUILD_TYPE ?= release
TRACER_NAME ?= tracer.ebpf.$(BUILD_TYPE).$(TARGET_ARCH)
ifeq ($(TARGET_ARCH),arm64) ifeq ($(TARGET_ARCH),arm64)
TARGET_FLAGS = -target aarch64-linux-gnu TARGET_FLAGS = -target aarch64-linux-gnu
@ -30,11 +29,8 @@ else
TARGET_FLAGS = -target x86_64-linux-gnu TARGET_FLAGS = -target x86_64-linux-gnu
endif endif
ifeq ($(BUILD_TYPE),debug) # Use -g to generate the btf section in the resulting binary.
TARGET_FLAGS+=$(DEBUG_FLAGS) FLAGS=$(TARGET_FLAGS) -g \
endif
FLAGS=$(TARGET_FLAGS) \
-fno-jump-tables \ -fno-jump-tables \
-nostdlib \ -nostdlib \
-nostdinc \ -nostdinc \
@ -45,40 +41,38 @@ FLAGS=$(TARGET_FLAGS) \
-Wno-unused-label \ -Wno-unused-label \
-Wno-unused-parameter \ -Wno-unused-parameter \
-Wno-sign-compare \ -Wno-sign-compare \
-fno-stack-protector -fno-stack-protector \
-D__SOURCE_DATE_EPOCH__=0 \
-Xclang -fdebug-prefix-map=$(CURDIR)=$(REPRODUCIBLE_PREFIX) \
-Xclang -fmacro-prefix-map=$(CURDIR)=$(REPRODUCIBLE_PREFIX)
SRCS := $(wildcard *.ebpf.c) SRCS := $(wildcard *.ebpf.c)
OBJS := $(SRCS:.c=.$(BUILD_TYPE).$(TARGET_ARCH).o) OBJS := $(SRCS:.c=.$(TARGET_ARCH).o)
.DEFAULT_GOAL := all .DEFAULT_GOAL := all
all: $(TRACER_NAME) all: $(TRACER_NAME)
debug:
$(MAKE) BUILD_TYPE=debug
amd64: amd64:
$(MAKE) TARGET_ARCH=amd64 $(MAKE) TARGET_ARCH=amd64
arm64: arm64:
$(MAKE) TARGET_ARCH=arm64 $(MAKE) TARGET_ARCH=arm64
debug-amd64:
$(MAKE) BUILD_TYPE=debug TARGET_ARCH=amd64
debug-arm64:
$(MAKE) BUILD_TYPE=debug TARGET_ARCH=arm64
errors.h: ../../tools/errors-codegen/errors.json errors.h: ../../tools/errors-codegen/errors.json
go run ../../tools/errors-codegen/main.go bpf $@ go run ../../tools/errors-codegen/main.go bpf $@
%.ebpf.c: errors.h ; %.ebpf.c: errors.h ;
%.ebpf.$(BUILD_TYPE).$(TARGET_ARCH).o: %.ebpf.c %.ebpf.$(TARGET_ARCH).o: %.ebpf.c
$(BPF_CLANG) $(FLAGS) -o $@ $(BPF_CLANG) $(FLAGS) -o $@
$(TRACER_NAME): $(OBJS) $(TRACER_NAME): $(OBJS)
$(BPF_LINK) $^ -o - | $(LLC) -march=bpf -mcpu=v2 -filetype=obj -o $@ $(BPF_LINK) $^ -o - | $(LLC) -march=bpf -mcpu=v2 -filetype=obj -o $@
# With the compile flag -g not only the btf section is added to the
# binary but also additional debug sections. As these debug sections
# are not relevant for BPF and increase the binary size remove them here.
$(STRIP) --strip-debug --enable-deterministic-archives $@
@./print_instruction_count.sh $@ @./print_instruction_count.sh $@
baseline: $(TRACER_NAME) baseline: $(TRACER_NAME)

View File

@ -4,7 +4,20 @@
#include "bpf_map.h" #include "bpf_map.h"
#include "kernel.h" #include "kernel.h"
// with_debug_output is declared in native_stack_trace.ebpf.c
extern u32 with_debug_output;
#if defined(TESTING_COREDUMP) #if defined(TESTING_COREDUMP)
// BPF_RODATA_VAR declares a global variable in the .rodata section,
// ensuring it's not optimized away by the compiler or linker.
//
// Arguments:
// _type: The data type of the variable (e.g., u32, int, struct my_config).
// _name: The name of the global variable.
// _value: The initial value for the variable.
#define BPF_RODATA_VAR(_type, _name, _value) _type _name = _value;
// tools/coredump uses CGO to build the eBPF code. Provide here the glue to // tools/coredump uses CGO to build the eBPF code. Provide here the glue to
// dispatch the BPF API to helpers implemented in ebpfhelpers.go. // dispatch the BPF API to helpers implemented in ebpfhelpers.go.
#define SEC(NAME) #define SEC(NAME)
@ -12,7 +25,6 @@
#define printt(fmt, ...) bpf_log(fmt, ##__VA_ARGS__) #define printt(fmt, ...) bpf_log(fmt, ##__VA_ARGS__)
#define DEBUG_PRINT(fmt, ...) bpf_log(fmt, ##__VA_ARGS__) #define DEBUG_PRINT(fmt, ...) bpf_log(fmt, ##__VA_ARGS__)
#define OPTI_DEBUG
// BPF helpers. Mostly stubs to dispatch the call to Go code with the context ID. // BPF helpers. Mostly stubs to dispatch the call to Go code with the context ID.
int bpf_tail_call(void *ctx, bpf_map_def *map, int index); int bpf_tail_call(void *ctx, bpf_map_def *map, int index);
@ -64,7 +76,17 @@ static inline int bpf_get_stackid(void *ctx, bpf_map_def *map, u64 flags)
#else // TESTING_COREDUMP #else // TESTING_COREDUMP
// Native eBPF build // Native eBPF build
// BPF_RODATA_VAR declares a global variable in the .rodata section,
// ensuring it's not optimized away by the compiler or linker.
//
// Arguments:
// _type: The data type of the variable (e.g., u32, int, struct my_config).
// _name: The name of the global variable.
// _value: The initial value for the variable.
#define BPF_RODATA_VAR(_type, _name, _value) \
_type _name __attribute__((section(".rodata.var"), used)) = _value;
// definitions of bpf helper functions we need, as found in // definitions of bpf helper functions we need, as found in
// https://elixir.bootlin.com/linux/v4.11/source/samples/bpf/bpf_helpers.h // https://elixir.bootlin.com/linux/v4.11/source/samples/bpf/bpf_helpers.h
@ -103,39 +125,41 @@ static long (*bpf_probe_read_kernel)(void *dst, int size, const void *unsafe_ptr
bpf_trace_printk(____fmt, sizeof(____fmt), ##__VA_ARGS__); \ bpf_trace_printk(____fmt, sizeof(____fmt), ##__VA_ARGS__); \
}) })
#ifdef OPTI_DEBUG #define DEBUG_PRINT(fmt, ...) \
#define DEBUG_PRINT(fmt, ...) printt(fmt, ##__VA_ARGS__); ({ \
if (__builtin_expect(with_debug_output, 0)) { \
printt(fmt, ##__VA_ARGS__); \
} \
})
// Sends `SIGTRAP` to the current task, killing it and capturing a coredump. // Sends `SIGTRAP` to the current task, killing it and capturing a coredump.
// //
// Only use this in code paths that you expect to be hit by a very specific process that you // Only use this in code paths that you expect to be hit by a very specific process that you
// intend to debug. Placing it into frequently taken code paths might otherwise take down // intend to debug. Placing it into frequently taken code paths might otherwise take down
// important system processes like sshd or your window manager. For frequently taken cases, // important system processes like sshd or your window manager. For frequently taken cases,
// prefer using the `DEBUG_CAPTURE_COREDUMP_IF_TGID` macro. // prefer using the `DEBUG_CAPTURE_COREDUMP_IF_TGID` macro.
// //
// This macro requires linking against kernel headers >= 5.6. // This macro requires linking against kernel headers >= 5.6.
#define DEBUG_CAPTURE_COREDUMP() \ #define DEBUG_CAPTURE_COREDUMP() \
({ \ ({ \
if (__builtin_expect(with_debug_output, 0)) { \
/* We don't define `bpf_send_signal_thread` globally because it requires a */ \ /* We don't define `bpf_send_signal_thread` globally because it requires a */ \
/* rather recent kernel (>= 5.6) and otherwise breaks builds of older versions. */ \ /* rather recent kernel (>= 5.6) and otherwise breaks builds of older versions. */ \
long (*bpf_send_signal_thread)(u32 sig) = (void *)BPF_FUNC_send_signal_thread; \ long (*bpf_send_signal_thread)(u32 sig) = (void *)BPF_FUNC_send_signal_thread; \
bpf_send_signal_thread(SIGTRAP); \ bpf_send_signal_thread(SIGTRAP); \
}) } \
})
// Like `DEBUG_CAPTURE_COREDUMP`, but only coredumps if the current task is a member of the // Like `DEBUG_CAPTURE_COREDUMP`, but only coredumps if the current task is a member of the
// given thread group ID ("process"). // given thread group ID ("process").
#define DEBUG_CAPTURE_COREDUMP_IF_TGID(tgid) \ #define DEBUG_CAPTURE_COREDUMP_IF_TGID(tgid) \
({ \ ({ \
if (bpf_get_current_pid_tgid() >> 32 == (tgid)) { \ if (__builtin_expect(with_debug_output, 0) && bpf_get_current_pid_tgid() >> 32 == (tgid)) { \
DEBUG_PRINT("coredumping process %d", (tgid)); \ printt("coredumping process %d", (tgid)); \
DEBUG_CAPTURE_COREDUMP(); \ long (*bpf_send_signal_thread)(u32 sig) = (void *)BPF_FUNC_send_signal_thread; \
} \ bpf_send_signal_thread(SIGTRAP); \
}) } \
#else })
#define DEBUG_PRINT(fmt, ...)
#define DEBUG_CAPTURE_COREDUMP()
#define DEBUG_CAPTURE_COREDUMP_IF_TGID(tgid)
#endif
// Definition of SEC as used by the Linux kernel in tools/lib/bpf/bpf_helpers.h for clang // Definition of SEC as used by the Linux kernel in tools/lib/bpf/bpf_helpers.h for clang
// compilations. // compilations.

View File

@ -4,6 +4,9 @@
#include "tracemgmt.h" #include "tracemgmt.h"
#include "types.h" #include "types.h"
// with_debug_output is set during load time.
BPF_RODATA_VAR(u32, with_debug_output, 0)
// Macro to create a map named exe_id_to_X_stack_deltas that is a nested maps with a fileID for the // Macro to create a map named exe_id_to_X_stack_deltas that is a nested maps with a fileID for the
// outer map and an array as inner map that holds up to 2^X stack delta entries for the given // outer map and an array as inner map that holds up to 2^X stack delta entries for the given
// fileID. // fileID.

View File

@ -37,7 +37,7 @@ int tracepoint__sched_switch(void *ctx)
return ERR_UNREACHABLE; return ERR_UNREACHABLE;
} }
if (bpf_get_prandom_u32() % OFF_CPU_THRESHOLD_MAX >= syscfg->off_cpu_threshold) { if (bpf_get_prandom_u32() > syscfg->off_cpu_threshold) {
return 0; return 0;
} }
@ -82,6 +82,10 @@ int finish_task_switch(struct pt_regs *ctx)
return 0; return 0;
} }
// Remove entry from the map so the stack for the same pid_tgid does not get unwound and
// reported accidentally without the start timestamp updated in tracepoint/sched/sched_switch.
bpf_map_delete_elem(&sched_times, &pid_tgid);
u64 diff = ts - *start_ts; u64 diff = ts - *start_ts;
DEBUG_PRINT("==== finish_task_switch ===="); DEBUG_PRINT("==== finish_task_switch ====");

View File

@ -173,7 +173,7 @@ static inline EBPF_INLINE bool report_pid(void *ctx, u64 pid_tgid, int ratelimit
increment_metric(metricID_PIDEventsErr); increment_metric(metricID_PIDEventsErr);
return false; return false;
} }
if (ratelimit_action == RATELIMIT_ACTION_RESET || errNo != 0) { if (ratelimit_action == RATELIMIT_ACTION_RESET) {
bpf_map_delete_elem(&reported_pids, &pid); bpf_map_delete_elem(&reported_pids, &pid);
} }

Binary file not shown.

Binary file not shown.

View File

@ -346,9 +346,6 @@ typedef enum TraceOrigin {
TRACE_OFF_CPU, TRACE_OFF_CPU,
} TraceOrigin; } TraceOrigin;
// OFF_CPU_THRESHOLD_MAX defines the maximum threshold.
#define OFF_CPU_THRESHOLD_MAX 1000
// MAX_FRAME_UNWINDS defines the maximum number of frames per // MAX_FRAME_UNWINDS defines the maximum number of frames per
// Trace we can unwind and respect the limit of eBPF instructions, // Trace we can unwind and respect the limit of eBPF instructions,
// limit of tail calls and limit of stack size per eBPF program. // limit of tail calls and limit of stack size per eBPF program.

View File

@ -7,19 +7,11 @@ import (
"bytes" "bytes"
cebpf "github.com/cilium/ebpf" cebpf "github.com/cilium/ebpf"
log "github.com/sirupsen/logrus"
) )
// LoadCollectionSpec is a wrapper around ebpf.LoadCollectionSpecFromReader and loads the eBPF // LoadCollectionSpec is a wrapper around ebpf.LoadCollectionSpecFromReader and loads the eBPF
// Spec from the embedded file. // Spec from the embedded file.
// We expect tracerData to hold all possible eBPF maps and programs. // We expect tracerData to hold all possible eBPF maps and programs.
func LoadCollectionSpec(debugTracer bool) (*cebpf.CollectionSpec, error) { func LoadCollectionSpec() (*cebpf.CollectionSpec, error) {
if debugTracer {
if len(debugTracerData) > 0 {
log.Warnf("Using debug eBPF tracers")
return cebpf.LoadCollectionSpecFromReader(bytes.NewReader(debugTracerData))
}
log.Warnf("Debug eBPF tracers not found, using release tracers instead")
}
return cebpf.LoadCollectionSpecFromReader(bytes.NewReader(tracerData)) return cebpf.LoadCollectionSpecFromReader(bytes.NewReader(tracerData))
} }

View File

@ -9,5 +9,5 @@ import (
_ "embed" _ "embed"
) )
//go:embed ebpf/tracer.ebpf.release.amd64 //go:embed ebpf/tracer.ebpf.amd64
var tracerData []byte var tracerData []byte

View File

@ -9,5 +9,5 @@ import (
_ "embed" _ "embed"
) )
//go:embed ebpf/tracer.ebpf.release.arm64 //go:embed ebpf/tracer.ebpf.arm64
var tracerData []byte var tracerData []byte

View File

@ -85,8 +85,6 @@ const (
TraceOriginOffCPU = 0x2 TraceOriginOffCPU = 0x2
) )
const OffCPUThresholdMax = 0x3e8
type ApmIntProcInfo struct { type ApmIntProcInfo struct {
Offset uint64 Offset uint64
} }

View File

@ -95,8 +95,6 @@ const (
TraceOriginOffCPU = C.TRACE_OFF_CPU TraceOriginOffCPU = C.TRACE_OFF_CPU
) )
const OffCPUThresholdMax = C.OFF_CPU_THRESHOLD_MAX
type ApmIntProcInfo C.ApmIntProcInfo type ApmIntProcInfo C.ApmIntProcInfo
type DotnetProcInfo C.DotnetProcInfo type DotnetProcInfo C.DotnetProcInfo
type PHPProcInfo C.PHPProcInfo type PHPProcInfo C.PHPProcInfo

View File

@ -5,6 +5,7 @@ import (
"context" "context"
"errors" "errors"
"io" "io"
"math"
"os" "os"
"strings" "strings"
"testing" "testing"
@ -12,6 +13,7 @@ import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.opentelemetry.io/ebpf-profiler/host" "go.opentelemetry.io/ebpf-profiler/host"
"go.opentelemetry.io/ebpf-profiler/libpf" "go.opentelemetry.io/ebpf-profiler/libpf"
"go.opentelemetry.io/ebpf-profiler/reporter" "go.opentelemetry.io/ebpf-profiler/reporter"
@ -49,6 +51,7 @@ func StartTracer(ctx context.Context, t *testing.T, et tracertypes.IncludedTrace
SamplesPerSecond: 20, SamplesPerSecond: 20,
ProbabilisticInterval: 100, ProbabilisticInterval: 100,
ProbabilisticThreshold: 100, ProbabilisticThreshold: 100,
OffCPUThreshold: uint32(math.MaxUint32 / 100),
DebugTracer: true, DebugTracer: true,
}) })
require.NoError(t, err) require.NoError(t, err)

View File

@ -0,0 +1,52 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// cloudstore provides access to the cloud based storage used in the tests.
package cloudstore // import "go.opentelemetry.io/ebpf-profiler/tools/coredump/cloudstore"
import (
"context"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsconfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/s3"
)
// moduleStoreRegion defines the S3 bucket OCI region.
const moduleStoreRegion = "us-sanjose-1"
// moduleStoreObjectNamespace defines the S3 bucket OCI object name space.
const moduleStoreObjectNamespace = "axtwf1hkrwcy"
// modulePublicReadUrl defines the S3 bucket OCI public read only base path.
//
//nolint:lll
const modulePublicReadURL = "sm-wftyyzHJkBghWeexmK1o5ArimNwZC-5eBej5Lx4e46sLVHtO_y7Zf7FZgoIu_/n/axtwf1hkrwcy"
// moduleStoreS3Bucket defines the S3 bucket used for the module store.
const moduleStoreS3Bucket = "ebpf-profiling-coredumps"
func PublicReadURL() string {
return fmt.Sprintf("https://%s.objectstorage.%s.oci.customer-oci.com/p/%s/b/%s/o/",
moduleStoreObjectNamespace, moduleStoreRegion, modulePublicReadURL, moduleStoreS3Bucket)
}
func ModulestoreS3Bucket() string {
return moduleStoreS3Bucket
}
func Client() (*s3.Client, error) {
cfg, err := awsconfig.LoadDefaultConfig(context.Background())
if err != nil {
return nil, err
}
return s3.NewFromConfig(cfg, func(o *s3.Options) {
baseEndpoint := fmt.Sprintf("https://%s.compat.objectstorage.%s.oraclecloud.com/",
moduleStoreObjectNamespace, moduleStoreRegion)
o.Region = moduleStoreRegion
o.BaseEndpoint = aws.String(baseEndpoint)
o.UsePathStyle = true
}), nil
}

View File

@ -8,6 +8,8 @@ import (
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.opentelemetry.io/ebpf-profiler/tools/coredump/cloudstore"
"go.opentelemetry.io/ebpf-profiler/tools/coredump/modulestore"
) )
func TestCoreDumps(t *testing.T) { func TestCoreDumps(t *testing.T) {
@ -15,7 +17,10 @@ func TestCoreDumps(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.NotEmpty(t, cases) require.NotEmpty(t, cases)
store, err := initModuleStore() cloudClient, err := cloudstore.Client()
require.NoError(t, err)
store, err := modulestore.New(cloudClient,
cloudstore.PublicReadURL(), cloudstore.ModulestoreS3Bucket(), "modulecache")
require.NoError(t, err) require.NoError(t, err)
for _, filename := range cases { for _, filename := range cases {

View File

@ -11,37 +11,24 @@ import (
"context" "context"
"errors" "errors"
"flag" "flag"
"fmt"
"os" "os"
"github.com/aws/aws-sdk-go-v2/aws"
awsconfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/peterbourgon/ff/v3/ffcli" "github.com/peterbourgon/ff/v3/ffcli"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"go.opentelemetry.io/ebpf-profiler/tools/coredump/cloudstore"
"go.opentelemetry.io/ebpf-profiler/tools/coredump/modulestore" "go.opentelemetry.io/ebpf-profiler/tools/coredump/modulestore"
) )
// moduleStoreRegion defines the S3 bucket OCI region.
const moduleStoreRegion = "us-sanjose-1"
// moduleStoreObjectNamespace defines the S3 bucket OCI object name space.
const moduleStoreObjectNamespace = "axtwf1hkrwcy"
// modulePublicReadUrl defines the S3 bucket OCI public read only base path.
//
//nolint:lll
const modulePublicReadURL = "sm-wftyyzHJkBghWeexmK1o5ArimNwZC-5eBej5Lx4e46sLVHtO_y7Zf7FZgoIu_/n/axtwf1hkrwcy"
// moduleStoreS3Bucket defines the S3 bucket used for the module store.
const moduleStoreS3Bucket = "ebpf-profiling-coredumps"
func main() { func main() {
log.SetReportCaller(false) log.SetReportCaller(false)
log.SetFormatter(&log.TextFormatter{}) log.SetFormatter(&log.TextFormatter{})
store, err := initModuleStore() cloudClient, err := cloudstore.Client()
if err != nil {
log.Fatalf("%v", err)
}
store, err := modulestore.New(cloudClient,
cloudstore.PublicReadURL(), cloudstore.ModulestoreS3Bucket(), "modulecache")
if err != nil { if err != nil {
log.Fatalf("%v", err) log.Fatalf("%v", err)
} }
@ -70,22 +57,3 @@ func main() {
} }
} }
} }
func initModuleStore() (*modulestore.Store, error) {
publicReadURL := fmt.Sprintf("https://%s.objectstorage.%s.oci.customer-oci.com/p/%s/b/%s/o/",
moduleStoreObjectNamespace, moduleStoreRegion, modulePublicReadURL, moduleStoreS3Bucket)
cfg, err := awsconfig.LoadDefaultConfig(context.Background())
if err != nil {
return nil, err
}
s3Client := s3.NewFromConfig(cfg, func(o *s3.Options) {
baseEndpoint := fmt.Sprintf("https://%s.compat.objectstorage.%s.oraclecloud.com/",
moduleStoreObjectNamespace, moduleStoreRegion)
o.Region = moduleStoreRegion
o.BaseEndpoint = aws.String(baseEndpoint)
o.UsePathStyle = true
})
return modulestore.New(s3Client, publicReadURL, moduleStoreS3Bucket, "modulecache")
}

View File

@ -31,8 +31,7 @@
"helloworld+0x13db8", "helloworld+0x13db8",
"helloworld+0x140a1", "helloworld+0x140a1",
"ld-musl-x86_64.so.1+0x1c6d0", "ld-musl-x86_64.so.1+0x1c6d0",
"helloworld+0x73e5", "helloworld+0x73e5"
"<unwinding aborted due to error native_exceeded_delta_lookup_iterations>"
] ]
}, },
{ {

View File

@ -18,8 +18,7 @@
"ld-musl-x86_64.so.1+0x5846a", "ld-musl-x86_64.so.1+0x5846a",
"sig+0x11f8", "sig+0x11f8",
"ld-musl-x86_64.so.1+0x1b87f", "ld-musl-x86_64.so.1+0x1b87f",
"sig+0x1065", "sig+0x1065"
"<unwinding aborted due to error native_small_pc>"
] ]
} }
], ],

View File

@ -30,8 +30,7 @@
"libpython3.11.so.1.0+0x270c06", "libpython3.11.so.1.0+0x270c06",
"libpython3.11.so.1.0+0x235b06", "libpython3.11.so.1.0+0x235b06",
"ld-musl-x86_64.so.1+0x1c9c9", "ld-musl-x86_64.so.1+0x1c9c9",
"python3.11+0x1075", "python3.11+0x1075"
"<unwinding aborted due to error native_stack_delta_invalid>"
] ]
} }
], ],

View File

@ -53,8 +53,7 @@
"libpython3.12.so.1.0+0x21f742", "libpython3.12.so.1.0+0x21f742",
"libpython3.12.so.1.0+0x1d6ed6", "libpython3.12.so.1.0+0x1d6ed6",
"ld-musl-x86_64.so.1+0x1c709", "ld-musl-x86_64.so.1+0x1c709",
"python3.12+0x1045", "python3.12+0x1045"
"<unwinding aborted due to error native_small_pc>"
] ]
} }
], ],

View File

@ -51,8 +51,7 @@
"libpython3.12.so.1.0+0x21f742", "libpython3.12.so.1.0+0x21f742",
"libpython3.12.so.1.0+0x1d6ed6", "libpython3.12.so.1.0+0x1d6ed6",
"ld-musl-x86_64.so.1+0x1c709", "ld-musl-x86_64.so.1+0x1c709",
"python3.12+0x1045", "python3.12+0x1045"
"<unwinding aborted due to error native_small_pc>"
] ]
} }
], ],

View File

@ -23,7 +23,6 @@
"runtime.mstart1+0 in /usr/local/go/src/runtime/proc.go:1428", "runtime.mstart1+0 in /usr/local/go/src/runtime/proc.go:1428",
"runtime.mstart0+0 in /usr/local/go/src/runtime/proc.go:1359", "runtime.mstart0+0 in /usr/local/go/src/runtime/proc.go:1359",
"runtime.mstart+0 in /usr/local/go/src/runtime/asm_arm64.s:129" "runtime.mstart+0 in /usr/local/go/src/runtime/asm_arm64.s:129"
] ]
}, },
{ {
@ -50,7 +49,6 @@
"runtime.schedule+0 in /usr/local/go/src/runtime/proc.go:3206", "runtime.schedule+0 in /usr/local/go/src/runtime/proc.go:3206",
"runtime.park_m+0 in /usr/local/go/src/runtime/proc.go:3356", "runtime.park_m+0 in /usr/local/go/src/runtime/proc.go:3356",
"runtime.mcall+0 in /usr/local/go/src/runtime/asm_arm64.s:193" "runtime.mcall+0 in /usr/local/go/src/runtime/asm_arm64.s:193"
] ]
} }
], ],

View File

@ -61,8 +61,7 @@
"node+0xad25bf", "node+0xad25bf",
"libc.so.6+0x273fb", "libc.so.6+0x273fb",
"libc.so.6+0x274cb", "libc.so.6+0x274cb",
"node+0xa583e3", "node+0xa583e3"
"<unwinding aborted due to error native_stack_delta_invalid>"
] ]
}, },
{ {

View File

@ -59,8 +59,7 @@
"node+0xabc237", "node+0xabc237",
"libc.so.6+0x273fb", "libc.so.6+0x273fb",
"libc.so.6+0x274cb", "libc.so.6+0x274cb",
"node+0xa41c3b", "node+0xa41c3b"
"<unwinding aborted due to error native_stack_delta_invalid>"
] ]
}, },
{ {

View File

@ -61,8 +61,7 @@
"node+0xae0fe7", "node+0xae0fe7",
"libc.so.6+0x273fb", "libc.so.6+0x273fb",
"libc.so.6+0x274cb", "libc.so.6+0x274cb",
"node+0xa66af3", "node+0xa66af3"
"<unwinding aborted due to error native_stack_delta_invalid>"
] ]
}, },
{ {

View File

@ -61,8 +61,7 @@
"node+0xabbf67", "node+0xabbf67",
"libc.so.6+0x273fb", "libc.so.6+0x273fb",
"libc.so.6+0x274cb", "libc.so.6+0x274cb",
"node+0xa41cb3", "node+0xa41cb3"
"<unwinding aborted due to error native_stack_delta_invalid>"
] ]
}, },
{ {

View File

@ -125,6 +125,7 @@ func (m *traceHandler) HandleTrace(bpfTrace *host.Trace) {
CPU: bpfTrace.CPU, CPU: bpfTrace.CPU,
ProcessName: bpfTrace.ProcessName, ProcessName: bpfTrace.ProcessName,
ExecutablePath: bpfTrace.ExecutablePath, ExecutablePath: bpfTrace.ExecutablePath,
ContainerID: bpfTrace.ContainerID,
Origin: bpfTrace.Origin, Origin: bpfTrace.Origin,
OffTime: bpfTrace.OffTime, OffTime: bpfTrace.OffTime,
EnvVars: bpfTrace.EnvVars, EnvVars: bpfTrace.EnvVars,

View File

@ -7,6 +7,7 @@ package tracer_test
import ( import (
"context" "context"
"math"
"runtime" "runtime"
"sync" "sync"
"testing" "testing"
@ -46,7 +47,7 @@ func forceContextSwitch() {
// runKernelFrameProbe executes a perf event on the sched/sched_switch tracepoint // runKernelFrameProbe executes a perf event on the sched/sched_switch tracepoint
// that sends a selection of hand-crafted, predictable traces. // that sends a selection of hand-crafted, predictable traces.
func runKernelFrameProbe(t *testing.T, tr *tracer.Tracer) { func runKernelFrameProbe(t *testing.T, tr *tracer.Tracer) {
coll, err := support.LoadCollectionSpec(false) coll, err := support.LoadCollectionSpec()
require.NoError(t, err) require.NoError(t, err)
//nolint:staticcheck //nolint:staticcheck
@ -114,7 +115,7 @@ func TestTraceTransmissionAndParsing(t *testing.T) {
BPFVerifierLogLevel: 0, BPFVerifierLogLevel: 0,
ProbabilisticInterval: 100, ProbabilisticInterval: 100,
ProbabilisticThreshold: 100, ProbabilisticThreshold: 100,
OffCPUThreshold: support.OffCPUThresholdMax, OffCPUThreshold: 1 * math.MaxUint32,
DebugTracer: true, DebugTracer: true,
}) })
require.NoError(t, err) require.NoError(t, err)

View File

@ -9,6 +9,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"math"
"math/rand/v2" "math/rand/v2"
"strings" "strings"
"sync/atomic" "sync/atomic"
@ -279,11 +280,17 @@ func initializeMapsAndPrograms(kmod *kallsyms.Module, cfg *Config) (
// References to eBPF maps in the eBPF programs are just placeholders that need to be // References to eBPF maps in the eBPF programs are just placeholders that need to be
// replaced by the actual loaded maps later on with RewriteMaps before loading the // replaced by the actual loaded maps later on with RewriteMaps before loading the
// programs into the kernel. // programs into the kernel.
coll, err := support.LoadCollectionSpec(cfg.DebugTracer) coll, err := support.LoadCollectionSpec()
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to load specification for tracers: %v", err) return nil, nil, fmt.Errorf("failed to load specification for tracers: %v", err)
} }
if cfg.DebugTracer {
if err = coll.Variables["with_debug_output"].Set(uint32(1)); err != nil {
return nil, nil, fmt.Errorf("failed to set debug output: %v", err)
}
}
err = buildStackDeltaTemplates(coll) err = buildStackDeltaTemplates(coll)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
@ -442,11 +449,7 @@ func loadAllMaps(coll *cebpf.CollectionSpec, cfg *Config,
adaption["stack_delta_page_to_info"] = adaption["stack_delta_page_to_info"] =
1 << uint32(stackDeltaPageToInfoSize+cfg.MapScaleFactor) 1 << uint32(stackDeltaPageToInfoSize+cfg.MapScaleFactor)
// To not lose too many scheduling events but also not oversize sched_times, adaption["sched_times"] = schedTimesSize(cfg.OffCPUThreshold)
// calculate a size based on an assumed upper bound of scheduler events per
// second (1000hz) multiplied by an average time a task remains off CPU (3s),
// scaled by the probability of capturing a trace.
adaption["sched_times"] = (4096 * cfg.OffCPUThreshold) / support.OffCPUThresholdMax
for i := support.StackDeltaBucketSmallest; i <= support.StackDeltaBucketLargest; i++ { for i := support.StackDeltaBucketSmallest; i <= support.StackDeltaBucketLargest; i++ {
mapName := fmt.Sprintf("exe_id_to_%d_stack_deltas", i) mapName := fmt.Sprintf("exe_id_to_%d_stack_deltas", i)
@ -472,6 +475,25 @@ func loadAllMaps(coll *cebpf.CollectionSpec, cfg *Config,
return nil return nil
} }
// schedTimesSize calculates the size of the sched_times map based on the
// configured off-cpu threshold.
// To not lose too many scheduling events but also not oversize sched_times,
// calculate a size based on an assumed upper bound of scheduler events per
// second (1000hz) multiplied by an average time a task remains off CPU (3s),
// scaled by the probability of capturing a trace.
func schedTimesSize(threshold uint32) uint32 {
size := uint32((4096 * uint64(threshold)) / math.MaxUint32)
if size < 16 {
// Guarantee a minimal size of 16.
return 16
}
if size > 4096 {
// Guarantee a maximum size of 4096.
return 4096
}
return size
}
// loadPerfUnwinders loads all perf eBPF Programs and their tail call targets. // loadPerfUnwinders loads all perf eBPF Programs and their tail call targets.
func loadPerfUnwinders(coll *cebpf.CollectionSpec, ebpfProgs map[string]*cebpf.Program, func loadPerfUnwinders(coll *cebpf.CollectionSpec, ebpfProgs map[string]*cebpf.Program,
tailcallMap *cebpf.Map, tailCallProgs []progLoaderHelper, tailcallMap *cebpf.Map, tailCallProgs []progLoaderHelper,
@ -871,6 +893,7 @@ func (t *Tracer) loadBpfTrace(raw []byte, cpu int) *host.Trace {
trace := &host.Trace{ trace := &host.Trace{
Comm: C.GoString((*C.char)(unsafe.Pointer(&ptr.comm))), Comm: C.GoString((*C.char)(unsafe.Pointer(&ptr.comm))),
ExecutablePath: procMeta.Executable, ExecutablePath: procMeta.Executable,
ContainerID: procMeta.ContainerID,
ProcessName: procMeta.Name, ProcessName: procMeta.Name,
APMTraceID: *(*libpf.APMTraceID)(unsafe.Pointer(&ptr.apm_trace_id)), APMTraceID: *(*libpf.APMTraceID)(unsafe.Pointer(&ptr.apm_trace_id)),
APMTransactionID: *(*libpf.APMTransactionID)(unsafe.Pointer(&ptr.apm_transaction_id)), APMTransactionID: *(*libpf.APMTransactionID)(unsafe.Pointer(&ptr.apm_transaction_id)),