Compare commits

..

33 Commits

Author SHA1 Message Date
Alexandru-Liviu Bratosin 063107068c
fix(async-processor): concurrent exports actually serialised (#3028) 2025-07-14 10:37:21 -07:00
Reiley Yang 8925f064d2
chore: Remove file .github/repository-settings.md (#3067) 2025-07-14 10:31:30 -07:00
Lalit Kumar Bhasin 8aba0913e9
chore: Bump semantic-conventions to v1.36.0 (#3064) 2025-07-14 10:04:21 -07:00
OpenTelemetry Bot 34d6d5082e
Sort contributor listings and remove affiliation from emeriti (#3060) 2025-07-09 22:11:59 +02:00
Berkus Decker 5e447d02cc
chore: Switch from unmaintained hex dependency to const-hex (#3053) 2025-07-09 08:54:12 -07:00
Whoemoon Jang 8d46c40b60
fix: Support HttpClient implementation for HyperClient with custom connectors (#3057) 2025-07-07 11:36:38 -07:00
Copilot eac368a7e4
chore: Fix spelling errors and typos in documentation (#3044)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-07-02 09:53:05 -07:00
dependabot[bot] 2bf8175d07
chore(deps): bump taiki-e/install-action from 2.52.4 to 2.56.0 (#3051)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-07-01 13:33:02 -07:00
dependabot[bot] 3fc7194796
chore(deps): bump EmbarkStudios/cargo-deny-action from 2.0.11 to 2.0.12 (#3052)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-01 13:24:26 -07:00
dependabot[bot] db15ecb541
chore(deps): bump obi1kenobi/cargo-semver-checks-action from 2.6 to 2.8 (#3050)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-01 13:24:14 -07:00
dependabot[bot] 674914a8ef
chore(deps): bump github/codeql-action from 3.28.16 to 3.29.2 (#3049)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2025-07-01 13:23:57 -07:00
dependabot[bot] 6bc2b19b85
chore(deps): bump step-security/harden-runner from 2.12.0 to 2.12.2 (#3048)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-01 12:49:44 -07:00
Lushe Shipkov d59aded375
docs: A few small doc touch-ups in some of the various in_memory_exporter modules (#3042) 2025-06-30 09:24:40 -07:00
OpenTelemetry Bot e7784bb78f
docs: Update community member listings (#3038)
Co-authored-by: otelbot <197425009+otelbot@users.noreply.github.com>
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-06-27 09:43:55 -06:00
Anton Grübel 5e29598369
chore: fix format lint (#3039) 2025-06-27 09:33:16 -06:00
yoshi-taka af2f1449e8
chore: remove unused glob (#3035) 2025-06-22 17:00:42 -07:00
Scott Gerring 0c2f808ec2
ci: Run benchmarks on main on the new oracle dedicated workers (#2942) 2025-06-20 08:37:17 -07:00
Cijo Thomas d4eb35a0cc
docs: on how to set right cardinality limit (#2998)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-06-12 17:03:39 -07:00
Lalit Kumar Bhasin 1f0d9a9f62
chore: Prepare for opentelemetry-appender-tracing 0.30.1 - bump tracing-opentelemetry to 0.31 (#3022) 2025-06-05 11:43:18 -07:00
dependabot[bot] 51dc2f04b7
chore(deps): update dtolnay/rust-toolchain requirement to b3b07ba8b418998c39fb20f53e8b695cdcc8de1b (#3016)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-06-05 10:43:48 -07:00
Igor Unanua eaca267d04
feat: support multi-value key propagation extraction (#3008)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-06-04 14:52:57 -07:00
Lalit Kumar Bhasin 7b3db0b6a6
chore: Bump otel-proto v1.7.0 (#3018) 2025-06-03 08:20:25 -07:00
Lalit Kumar Bhasin 082213e4e9
chore: bump semcon 1.34.0 (#3019) 2025-06-02 11:24:51 -07:00
dependabot[bot] c473db0788
chore(deps): bump taiki-e/install-action from 2.50.4 to 2.52.4 (#3015) 2025-06-01 21:31:03 -07:00
dependabot[bot] 85e639aef9
chore(deps): bump ossf/scorecard-action from 2.4.1 to 2.4.2 (#3014) 2025-06-01 21:21:45 -07:00
dependabot[bot] f1a541c3ca
chore(deps): bump codecov/codecov-action from 5.4.2 to 5.4.3 (#3013)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2025-06-01 20:34:16 -07:00
dependabot[bot] c30dc37002
chore(deps): bump fossas/fossa-action from 1.6.0 to 1.7.0 (#3012)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-01 13:22:45 -07:00
Gabriel 28becc0674
fix: with_cleared_baggage (#3006)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-05-30 08:19:46 -07:00
Joonas Bergius cab5565ba1
fix: use default endpoint for endpoint when provided empty string (#3000)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-05-30 07:51:40 -07:00
Cijo Thomas 62790608e1
fix: Small improvement to OTLP Exporter logs (#3007) 2025-05-30 07:13:26 -07:00
paullegranddc 167c94663a
fix(span_processor): only call on_start with recording spans (#3011) 2025-05-30 06:53:57 -07:00
Cijo Thomas 8e47d84922
chore: Add release notes for 0.30 (#3001)
Co-authored-by: Zhongyang Wu <zhongyang.wu@outlook.com>
2025-05-27 06:43:34 -07:00
Cijo Thomas 8882c31c95
chore: Nit fixes to examples (#3002) 2025-05-23 10:56:42 -07:00
102 changed files with 2797 additions and 963 deletions

View File

@ -12,7 +12,8 @@
"ignoreWords": [ "ignoreWords": [
"otel", "otel",
"rustdoc", "rustdoc",
"rustfilt" "rustfilt",
"webkpi"
], ],
// these are words that are always considered incorrect. // these are words that are always considered incorrect.
"flagWords": [ "flagWords": [
@ -34,47 +35,61 @@
"Bhasin", "Bhasin",
"Björn", "Björn",
"BLRP", "BLRP",
"chrono",
"Cijo", "Cijo",
"clippy", "clippy",
"clonable", "clonable",
"codecov", "codecov",
"dashmap",
"datapoint", "datapoint",
"deque", "deque",
"Dirkjan", "Dirkjan",
"docsrs", "docsrs",
"Dwarnings", "Dwarnings",
"eprintln",
"EPYC", "EPYC",
"flamegraph", "flamegraph",
"Gerring", "Gerring",
"grpcio",
"Grübel", "Grübel",
"hasher", "hasher",
"impls",
"isahc",
"Isobel", "Isobel",
"jaegertracing", "jaegertracing",
"Kühle", "Kühle",
"Kumar", "Kumar",
"Lalit", "Lalit",
"LIBCLANG", "LIBCLANG",
"logrecord",
"MILLIS", "MILLIS",
"mpsc",
"msrv", "msrv",
"mykey", "mykey",
"myunit",
"myvalue", "myvalue",
"nocapture", "nocapture",
"Ochtman", "Ochtman",
"opentelemetry", "opentelemetry",
"openzipkin",
"otcorrelations",
"OTELCOL", "OTELCOL",
"OTLP", "OTLP",
"periodicreader", "periodicreader",
"Pillai", "Pillai",
"pprof", "pprof",
"protos",
"prost", "prost",
"protoc", "protoc",
"quantile", "quantile",
"quantiles",
"Redelmeier", "Redelmeier",
"reqwest", "reqwest",
"rstest", "rstest",
"runtimes", "runtimes",
"rustc", "rustc",
"rustls", "rustls",
"schemars",
"semconv", "semconv",
"serde", "serde",
"shoppingcart", "shoppingcart",
@ -83,10 +98,18 @@
"testcontainers", "testcontainers",
"testresults", "testresults",
"thiserror", "thiserror",
"traceparent",
"Traceparent",
"tracerprovider", "tracerprovider",
"tracestate",
"UCUM",
"Umesan", "Umesan",
"unsampled",
"updown", "updown",
"urlencoding",
"usize",
"Utkarsh", "Utkarsh",
"webpki",
"Zhongyang", "Zhongyang",
"zipkin" "zipkin"
], ],

View File

@ -1,18 +0,0 @@
# Log of local changes
Maintainers are expected to maintain this log. This is required as per
[OpenTelemetry Community
guidelines](https://github.com/open-telemetry/community/blob/main/docs/how-to-configure-new-repository.md#collaborators-and-teams).
## May 6th 2024
Modified branch protection for main branch to require the following CI checks as
we now added Windows to CI.
test (ubuntu-latest, stable)
test (stable, windows-latest)
## April 30th 2024
Modified branch protection for main branch to require the following CI checks:
docs
test (stable)

View File

@ -15,40 +15,45 @@ on:
name: benchmark pull requests name: benchmark pull requests
permissions: permissions:
contents: read contents: read
jobs: jobs:
runBenchmark: runBenchmark:
name: run benchmark name: run benchmark
permissions: permissions:
pull-requests: write pull-requests: write
# If we're running on a PR, use ubuntu-latest - a shared runner. We can't use the self-hosted # If we're running on main, use our oracle bare-metal runner for accuracy.
# runners on arbitrary PRs, and we don't want to unleash that load on the pool anyway. # If we're running on a PR, use github's shared workers to save resources.
# If we're running on main, use the OTEL self-hosted runner pool. runs-on: ${{ github.event_name == 'pull_request' && 'ubuntu-latest' || 'oracle-bare-metal-64cpu-512gb-x86-64' }}
# TODO - temporarily move main to the shared workers, until we've resolved the instance setup issue
# runs-on: ${{ github.event_name == 'pull_request' && 'ubuntu-latest' || 'self-hosted' }}
runs-on: 'ubuntu-latest'
if: ${{ (github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'performance')) || github.event_name == 'push' }} if: ${{ (github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'performance')) || github.event_name == 'push' }}
container:
image: rust:slim-bullseye
env: env:
# For PRs, compare against the base branch - e.g., 'main'. # For PRs, compare against the base branch - e.g., 'main'.
# For pushes to main, compare against the previous commit # For pushes to main, compare against the previous commit
BRANCH_NAME: ${{ github.event_name == 'pull_request' && github.base_ref || github.event.before }} BRANCH_NAME: ${{ github.event_name == 'pull_request' && github.base_ref || github.event.before }}
GIT_DISCOVERY_ACROSS_FILESYSTEM: 1
steps: steps:
- name: Harden the runner (Audit all outbound calls) - name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with: with:
egress-policy: audit egress-policy: audit
- name: Setup container environment
run: |
apt-get update && apt-get install --fix-missing -y unzip cmake build-essential pkg-config curl git
cargo install cargo-criterion
- name: Make repo safe for Git inside container
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with: with:
fetch-depth: 10 # Fetch current commit and its parent fetch-depth: 10 # Fetch a bit of history so we can do perf diffs
- uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b # v3.0.0 - uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b # v3.0.0
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b
with:
toolchain: stable
- uses: boa-dev/criterion-compare-action@adfd3a94634fe2041ce5613eb7df09d247555b87 # v3.2.4 - uses: boa-dev/criterion-compare-action@adfd3a94634fe2041ce5613eb7df09d247555b87 # v3.2.4
with: with:
branchName: ${{ env.BRANCH_NAME }} branchName: ${{ env.BRANCH_NAME }}

View File

@ -32,7 +32,7 @@ jobs:
continue-on-error: ${{ matrix.rust == 'beta' }} continue-on-error: ${{ matrix.rust == 'beta' }}
steps: steps:
- name: Harden the runner (Audit all outbound calls) - name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with: with:
egress-policy: audit egress-policy: audit
@ -61,7 +61,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Harden the runner (Audit all outbound calls) - name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with: with:
egress-policy: audit egress-policy: audit
@ -72,7 +72,7 @@ jobs:
with: with:
toolchain: stable toolchain: stable
components: rustfmt, clippy components: rustfmt, clippy
- uses: taiki-e/install-action@33734a118689b0b418824fb78ea2bf18e970b43b # v2.50.4 - uses: taiki-e/install-action@0eee80d37f55e834144deec670972c19e81a85b0 # v2.56.0
with: with:
tool: cargo-hack tool: cargo-hack
- uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b # v3.0.0 - uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b # v3.0.0
@ -89,7 +89,7 @@ jobs:
runs-on: ubuntu-latest # TODO: Check if this could be covered for Windows. The step used currently fails on Windows. runs-on: ubuntu-latest # TODO: Check if this could be covered for Windows. The step used currently fails on Windows.
steps: steps:
- name: Harden the runner (Audit all outbound calls) - name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with: with:
egress-policy: audit egress-policy: audit
@ -100,7 +100,7 @@ jobs:
# https://github.com/awslabs/cargo-check-external-types/releases # https://github.com/awslabs/cargo-check-external-types/releases
toolchain: nightly-2025-05-04 toolchain: nightly-2025-05-04
components: rustfmt components: rustfmt
- uses: taiki-e/install-action@33734a118689b0b418824fb78ea2bf18e970b43b # v2.50.4 - uses: taiki-e/install-action@0eee80d37f55e834144deec670972c19e81a85b0 # v2.56.0
with: with:
tool: cargo-check-external-types@0.2.0 tool: cargo-check-external-types@0.2.0
- name: external-type-check - name: external-type-check
@ -114,7 +114,7 @@ jobs:
continue-on-error: true continue-on-error: true
steps: steps:
- name: Harden the runner (Audit all outbound calls) - name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with: with:
egress-policy: audit egress-policy: audit
@ -124,7 +124,7 @@ jobs:
- uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b - uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b
with: with:
toolchain: stable toolchain: stable
- uses: taiki-e/install-action@33734a118689b0b418824fb78ea2bf18e970b43b # v2.50.4 - uses: taiki-e/install-action@0eee80d37f55e834144deec670972c19e81a85b0 # v2.56.0
with: with:
tool: cargo-msrv tool: cargo-msrv
- uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b # v3.0.0 - uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b # v3.0.0
@ -137,29 +137,29 @@ jobs:
continue-on-error: true # Prevent sudden announcement of a new advisory from failing ci continue-on-error: true # Prevent sudden announcement of a new advisory from failing ci
steps: steps:
- name: Harden the runner (Audit all outbound calls) - name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with: with:
egress-policy: audit egress-policy: audit
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Check advisories - name: Check advisories
uses: EmbarkStudios/cargo-deny-action@34899fc7ba81ca6268d5947a7a16b4649013fea1 # v2.0.11 uses: EmbarkStudios/cargo-deny-action@30f817c6f72275c6d54dc744fbca09ebc958599f # v2.0.12
with: with:
command: check advisories command: check advisories
- name: Check licenses - name: Check licenses
uses: EmbarkStudios/cargo-deny-action@34899fc7ba81ca6268d5947a7a16b4649013fea1 # v2.0.11 uses: EmbarkStudios/cargo-deny-action@30f817c6f72275c6d54dc744fbca09ebc958599f # v2.0.12
with: with:
command: check licenses command: check licenses
- name: Check bans - name: Check bans
uses: EmbarkStudios/cargo-deny-action@34899fc7ba81ca6268d5947a7a16b4649013fea1 # v2.0.11 uses: EmbarkStudios/cargo-deny-action@30f817c6f72275c6d54dc744fbca09ebc958599f # v2.0.12
with: with:
command: check bans command: check bans
- name: Check sources - name: Check sources
uses: EmbarkStudios/cargo-deny-action@34899fc7ba81ca6268d5947a7a16b4649013fea1 # v2.0.11 uses: EmbarkStudios/cargo-deny-action@30f817c6f72275c6d54dc744fbca09ebc958599f # v2.0.12
with: with:
command: check sources command: check sources
@ -168,7 +168,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Harden the runner (Audit all outbound calls) - name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with: with:
egress-policy: audit egress-policy: audit
@ -191,7 +191,7 @@ jobs:
if: ${{ ! contains(github.event.pull_request.labels.*.name, 'dependencies') }} if: ${{ ! contains(github.event.pull_request.labels.*.name, 'dependencies') }}
steps: steps:
- name: Harden the runner (Audit all outbound calls) - name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with: with:
egress-policy: audit egress-policy: audit
@ -206,7 +206,7 @@ jobs:
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install cargo-llvm-cov - name: Install cargo-llvm-cov
uses: taiki-e/install-action@33734a118689b0b418824fb78ea2bf18e970b43b # v2.50.4 uses: taiki-e/install-action@0eee80d37f55e834144deec670972c19e81a85b0 # v2.56.0
with: with:
tool: cargo-llvm-cov tool: cargo-llvm-cov
- name: cargo generate-lockfile - name: cargo generate-lockfile
@ -215,7 +215,7 @@ jobs:
- name: cargo llvm-cov - name: cargo llvm-cov
run: cargo llvm-cov --locked --all-features --workspace --lcov --lib --output-path lcov.info run: cargo llvm-cov --locked --all-features --workspace --lcov --lib --output-path lcov.info
- name: Upload to codecov.io - name: Upload to codecov.io
uses: codecov/codecov-action@ad3126e916f78f00edff4ed0317cf185271ccc2d # v5.4.2 uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
env: env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with: with:
@ -224,8 +224,9 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: dtolnay/rust-toolchain@4305c38b25d97ef35a8ad1f985ccf2d2242004f2 # stable - uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # stable
with: with:
toolchain: stable
components: rustfmt components: rustfmt
- uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b # v3.0.0 - uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b # v3.0.0
with: with:
@ -243,7 +244,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Harden the runner (Audit all outbound calls) - name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with: with:
egress-policy: audit egress-policy: audit
@ -253,7 +254,7 @@ jobs:
- uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b - uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b
with: with:
toolchain: stable toolchain: stable
- uses: taiki-e/install-action@33734a118689b0b418824fb78ea2bf18e970b43b # v2.50.4 - uses: taiki-e/install-action@0eee80d37f55e834144deec670972c19e81a85b0 # v2.56.0
with: with:
tool: cargo-machete tool: cargo-machete
- name: cargo machete - name: cargo machete

View File

@ -24,7 +24,7 @@ jobs:
steps: steps:
- name: Harden the runner (Audit all outbound calls) - name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with: with:
egress-policy: audit egress-policy: audit
@ -34,12 +34,12 @@ jobs:
submodules: true submodules: true
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16 uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
with: with:
languages: rust languages: rust
- name: Autobuild - name: Autobuild
uses: github/codeql-action/autobuild@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16 uses: github/codeql-action/autobuild@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16 uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2

View File

@ -13,13 +13,13 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Harden the runner (Audit all outbound calls) - name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with: with:
egress-policy: audit egress-policy: audit
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: fossas/fossa-action@c0a7d013f84c8ee5e910593186598625513cc1e4 # v1.6.0 - uses: fossas/fossa-action@3ebcea1862c6ffbd5cf1b4d0bd6b3fe7bd6f2cac # v1.7.0
with: with:
api-key: ${{secrets.FOSSA_API_KEY}} api-key: ${{secrets.FOSSA_API_KEY}}
team: OpenTelemetry team: OpenTelemetry

View File

@ -14,7 +14,7 @@ jobs:
timeout-minutes: 10 timeout-minutes: 10
steps: steps:
- name: Harden the runner (Audit all outbound calls) - name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with: with:
egress-policy: audit egress-policy: audit

View File

@ -16,7 +16,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Harden the runner (Audit all outbound calls) - name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with: with:
egress-policy: audit egress-policy: audit

View File

@ -21,7 +21,7 @@ jobs:
id-token: write id-token: write
steps: steps:
- name: Harden the runner (Audit all outbound calls) - name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with: with:
egress-policy: audit egress-policy: audit
@ -29,7 +29,7 @@ jobs:
with: with:
persist-credentials: false persist-credentials: false
- uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1 - uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2
with: with:
results_file: results.sarif results_file: results.sarif
results_format: sarif results_format: sarif
@ -48,6 +48,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard (optional). # Upload the results to GitHub's code scanning dashboard (optional).
# Commenting out will disable upload of results to your repo's Code Scanning dashboard # Commenting out will disable upload of results to your repo's Code Scanning dashboard
- name: "Upload to code-scanning" - name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16 uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
with: with:
sarif_file: results.sarif sarif_file: results.sarif

View File

@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Harden the runner (Audit all outbound calls) - name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with: with:
egress-policy: audit egress-policy: audit

View File

@ -13,7 +13,7 @@ jobs:
if: ${{ github.event.label.name == 'semver-check' || contains(github.event.pull_request.labels.*.name, 'semver-check') }} if: ${{ github.event.label.name == 'semver-check' || contains(github.event.pull_request.labels.*.name, 'semver-check') }}
steps: steps:
- name: Harden the runner (Audit all outbound calls) - name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with: with:
egress-policy: audit egress-policy: audit
@ -26,4 +26,4 @@ jobs:
toolchain: stable toolchain: stable
components: rustfmt components: rustfmt
- name: cargo-semver-checks - name: cargo-semver-checks
uses: obi1kenobi/cargo-semver-checks-action@7272cc2caa468d3e009a2b0a9cc366839348237b # v2.6 uses: obi1kenobi/cargo-semver-checks-action@5b298c9520f7096a4683c0bd981a7ac5a7e249ae # v2.8

View File

@ -45,11 +45,11 @@ tonic = { version = "0.13", default-features = false }
tonic-build = "0.13" tonic-build = "0.13"
tokio = { version = "1", default-features = false } tokio = { version = "1", default-features = false }
tokio-stream = "0.1" tokio-stream = "0.1"
# Using `tracing 0.1.40` because 0.1.39 (which is yanked) introduces the ability to set event names in macros, # Using `tracing 0.1.40` because 0.1.39 (which is yanked) introduces the ability to set event names in macros,
# required for OpenTelemetry's internal logging macros. # required for OpenTelemetry's internal logging macros.
tracing = { version = ">=0.1.40", default-features = false } tracing = { version = ">=0.1.40", default-features = false }
# `tracing-core >=0.1.33` is required for compatibility with `tracing >=0.1.40`. # `tracing-core >=0.1.33` is required for compatibility with `tracing >=0.1.40`.
tracing-core = { version = ">=0.1.33", default-features = false } tracing-core = { version = ">=0.1.33", default-features = false }
tracing-subscriber = { version = "0.3", default-features = false } tracing-subscriber = { version = "0.3", default-features = false }
url = { version = "2.5", default-features = false } url = { version = "2.5", default-features = false }
anyhow = "1.0.94" anyhow = "1.0.94"
@ -59,8 +59,7 @@ ctor = "0.2.9"
ctrlc = "3.2.5" ctrlc = "3.2.5"
futures-channel = "0.3" futures-channel = "0.3"
futures-sink = "0.3" futures-sink = "0.3"
glob = "0.3.1" const-hex = "1.14.1"
hex = "0.4.3"
lazy_static = "1.4.0" lazy_static = "1.4.0"
num-format = "0.4.4" num-format = "0.4.4"
num_cpus = "1.15.0" num_cpus = "1.15.0"
@ -74,7 +73,7 @@ sysinfo = "0.32"
tempfile = "3.3.0" tempfile = "3.3.0"
testcontainers = "0.23.1" testcontainers = "0.23.1"
tracing-log = "0.2" tracing-log = "0.2"
tracing-opentelemetry = "0.30" tracing-opentelemetry = "0.31"
typed-builder = "0.20" typed-builder = "0.20"
uuid = "1.3" uuid = "1.3"

View File

@ -186,21 +186,27 @@ you're more than welcome to participate!
* [Utkarsh Umesan Pillai](https://github.com/utpilla), Microsoft * [Utkarsh Umesan Pillai](https://github.com/utpilla), Microsoft
* [Zhongyang Wu](https://github.com/TommyCpp) * [Zhongyang Wu](https://github.com/TommyCpp)
For more information about the maintainer role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#maintainer).
### Approvers ### Approvers
* [Anton Grübel](https://github.com/gruebel), Baz * [Anton Grübel](https://github.com/gruebel), Baz
* [Björn Antonsson](https://github.com/bantonsson), Datadog * [Björn Antonsson](https://github.com/bantonsson), Datadog
* [Shaun Cox](https://github.com/shaun-cox), Microsoft
* [Scott Gerring](https://github.com/scottgerring), Datadog * [Scott Gerring](https://github.com/scottgerring), Datadog
* [Shaun Cox](https://github.com/shaun-cox), Microsoft
For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver).
### Emeritus ### Emeritus
* [Dirkjan Ochtman](https://github.com/djc) * [Dirkjan Ochtman](https://github.com/djc)
* [Isobel Redelmeier](https://github.com/iredelmeier)
* [Jan Kühle](https://github.com/frigus02) * [Jan Kühle](https://github.com/frigus02)
* [Julian Tescher](https://github.com/jtescher) * [Julian Tescher](https://github.com/jtescher)
* [Isobel Redelmeier](https://github.com/iredelmeier)
* [Mike Goldsmith](https://github.com/MikeGoldsmith) * [Mike Goldsmith](https://github.com/MikeGoldsmith)
For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager).
### Thanks to all the people who have contributed ### Thanks to all the people who have contributed
[![contributors](https://contributors-img.web.app/image?repo=open-telemetry/opentelemetry-rust)](https://github.com/open-telemetry/opentelemetry-rust/graphs/contributors) [![contributors](https://contributors-img.web.app/image?repo=open-telemetry/opentelemetry-rust)](https://github.com/open-telemetry/opentelemetry-rust/graphs/contributors)

View File

@ -4,13 +4,13 @@
## Summary ## Summary
This ADR describes the general pattern we will follow when modelling errors in public API interfaces - that is, APIs that are exposed to users of the project's published crates. It summarises the discussion and final option from [#2571](https://github.com/open-telemetry/opentelemetry-rust/issues/2571); for more context check out that issue. This ADR describes the general pattern we will follow when modelling errors in public API interfaces - that is, APIs that are exposed to users of the project's published crates. It summarizes the discussion and final option from [#2571](https://github.com/open-telemetry/opentelemetry-rust/issues/2571); for more context check out that issue.
We will focus on the exporter traits in this example, but the outcome should be applied to _all_ public traits and their fallible operations. We will focus on the exporter traits in this example, but the outcome should be applied to _all_ public traits and their fallible operations.
These include [SpanExporter](https://github.com/open-telemetry/opentelemetry-rust/blob/eca1ce87084c39667061281e662d5edb9a002882/opentelemetry-sdk/src/trace/export.rs#L18), [LogExporter](https://github.com/open-telemetry/opentelemetry-rust/blob/eca1ce87084c39667061281e662d5edb9a002882/opentelemetry-sdk/src/logs/export.rs#L115), and [PushMetricExporter](https://github.com/open-telemetry/opentelemetry-rust/blob/eca1ce87084c39667061281e662d5edb9a002882/opentelemetry-sdk/src/metrics/exporter.rs#L11) which form part of the API surface of `opentelemetry-sdk`. These include [SpanExporter](https://github.com/open-telemetry/opentelemetry-rust/blob/eca1ce87084c39667061281e662d5edb9a002882/opentelemetry-sdk/src/trace/export.rs#L18), [LogExporter](https://github.com/open-telemetry/opentelemetry-rust/blob/eca1ce87084c39667061281e662d5edb9a002882/opentelemetry-sdk/src/logs/export.rs#L115), and [PushMetricExporter](https://github.com/open-telemetry/opentelemetry-rust/blob/eca1ce87084c39667061281e662d5edb9a002882/opentelemetry-sdk/src/metrics/exporter.rs#L11) which form part of the API surface of `opentelemetry-sdk`.
There are various ways to handle errors on trait methods, including swallowing them and logging, panicing, returning a shared global error, or returning a method-specific error. We strive for consistency, and we want to be sure that we've put enough thought into what this looks like that we don't have to make breaking interface changes unecessarily in the future. There are various ways to handle errors on trait methods, including swallowing them and logging, panicking, returning a shared global error, or returning a method-specific error. We strive for consistency, and we want to be sure that we've put enough thought into what this looks like that we don't have to make breaking interface changes unnecessarily in the future.
## Design Guidance ## Design Guidance
@ -69,7 +69,7 @@ trait MyTrait {
## 3. Consolidate error types between signals where we can, let them diverge where we can't ## 3. Consolidate error types between signals where we can, let them diverge where we can't
Consider the `Exporter`s mentioned earlier. Each of them has the same failure indicators - as dicated by the OpenTelemetry spec - and we will Consider the `Exporter`s mentioned earlier. Each of them has the same failure indicators - as dictated by the OpenTelemetry spec - and we will
share the error types accordingly: share the error types accordingly:
**Don't do this** - each signal has its own error type, despite having exactly the same failure cases: **Don't do this** - each signal has its own error type, despite having exactly the same failure cases:

View File

@ -485,9 +485,9 @@ practice is important:
* **Delta Temporality**: The SDK "forgets" the state after each * **Delta Temporality**: The SDK "forgets" the state after each
collection/export cycle. This means in each new interval, the SDK can track collection/export cycle. This means in each new interval, the SDK can track
up to the cardinality limit of distinct attribute combinations. up to the cardinality limit of distinct attribute combinations. Over time,
Over time, your metrics backend might see far more than the configured limit your metrics backend might see far more than the configured limit of
of distinct combinations from a single process. distinct combinations from a single process.
* **Cumulative Temporality**: Since the SDK maintains state across export * **Cumulative Temporality**: Since the SDK maintains state across export
intervals, once the cardinality limit is reached, new attribute combinations intervals, once the cardinality limit is reached, new attribute combinations
@ -560,7 +560,108 @@ The exported metrics would be:
words, attributes used to create `Meter` or `Resource` attributes are not words, attributes used to create `Meter` or `Resource` attributes are not
subject to this cap. subject to this cap.
// TODO: Document how to pick cardinality limit. #### Cardinality Limits - How to Choose the Right Limit
Choosing the right cardinality limit is crucial for maintaining efficient memory
usage and predictable performance in your metrics system. The optimal limit
depends on your temporality choice and application characteristics.
Setting the limit incorrectly can have consequences:
* **Limit too high**: Due to the SDK's [memory
preallocation](#memory-preallocation) strategy, excess memory will be
allocated upfront and remain unused, leading to resource waste.
* **Limit too low**: Measurements will be folded into the overflow bucket
(`{"otel.metric.overflow": true}`), losing granular attribute information and
making attribute-based queries unreliable.
Consider these guidelines when determining the appropriate limit:
##### Choosing the Right Limit for Cumulative Temporality
Cumulative metrics retain every unique attribute combination that has *ever*
been observed since the start of the process.
* You must account for the theoretical maximum number of attribute combinations.
* This can be estimated by multiplying the number of possible values for each
attribute.
* If certain attribute combinations are invalid or will never occur in practice,
you can reduce the limit accordingly.
###### Example - Fruit Sales Scenario
Attributes:
* `name` can be "apple" or "lemon" (2 values)
* `color` can be "red", "yellow", or "green" (3 values)
The theoretical maximum is 2 × 3 = 6 unique attribute sets.
For this example, the simplest approach is to use the theoretical maximum and **set the cardinality limit to 6**.
However, if you know that certain combinations will never occur (for example, if "red lemons" don't exist in your application domain), you could reduce the limit to only account for valid combinations. In this case, if only 5 combinations are valid, **setting the cardinality limit to 5** would be more memory-efficient.
##### Choosing the Right Limit for Delta Temporality
Delta metrics reset their aggregation state after every export interval. This
approach enables more efficient memory utilization by focusing only on attributes
observed during each interval rather than maintaining state for all combinations.
* **When attributes are low-cardinality** (as in the fruit example), use the
same calculation method as with cumulative temporality.
* **When high-cardinality attribute(s) exist** like `user_id`, leverage Delta
temporality's "forget state" nature to set a much lower limit based on active
usage patterns. This is where Delta temporality truly excels - when the set of
active values changes dynamically and only a small subset is active during any
given interval.
###### Example - High Cardinality Attribute Scenario
Export interval: 60 sec
Attributes:
* `user_id` (up to 1 million unique users)
* `success` (true or false, 2 values)
Theoretical limit: 1 million users × 2 = 2 million attribute sets
But if only 10,000 users are typically active during a 60 sec export interval:
10,000 × 2 = 20,000
**You can set the limit to 20,000, dramatically reducing memory usage during
normal operation.**
###### Export Interval Tuning
Shorter export intervals further reduce the required cardinality:
* If your interval is halved (e.g., from 60 sec to 30 sec), the number of unique
attribute sets seen per interval may also be halved.
> [!NOTE] More frequent exports increase CPU/network overhead due to
> serialization and transmission costs.
##### Choosing the Right Limit - Backend Considerations
While delta temporality offers certain advantages for cardinality management,
your choice may be constrained by backend support:
* **Backend Restrictions:** Some metrics backends only support cumulative
temporality. For example, Prometheus requires cumulative temporality and
cannot directly consume delta metrics.
* **Collector Conversion:** To leverage delta temporality's memory advantages
while maintaining backend compatibility, configure your SDK to use delta
temporality and deploy an OpenTelemetry Collector with a delta-to-cumulative
conversion processor. This approach pushes the memory overhead from your
application to the collector, which can be more easily scaled and managed
independently.
TODO: Add the memory cost incurred by each data points, so users can know the
memory impact of setting a higher limits.
TODO: Add example of how query can be affected when overflow occurs, use
[Aspire](https://github.com/dotnet/aspire/pull/7784) tool.
### Memory Preallocation ### Memory Preallocation
@ -622,7 +723,7 @@ Follow these guidelines when deciding where to attach metric attributes:
* **Meter-level attributes**: If the dimension applies only to a subset of * **Meter-level attributes**: If the dimension applies only to a subset of
metrics (e.g., library version), model it as meter-level attributes via metrics (e.g., library version), model it as meter-level attributes via
`meter_with_scope`. `meter_with_scope`.
```rust ```rust
// Example: Setting meter-level attributes // Example: Setting meter-level attributes
let scope = InstrumentationScope::builder("payment_library") let scope = InstrumentationScope::builder("payment_library")
@ -660,3 +761,7 @@ Common pitfalls that can result in missing metrics include:
used, some metrics may be placed in the overflow bucket. used, some metrics may be placed in the overflow bucket.
// TODO: Add more specific examples // TODO: Add more specific examples
## References
[OTel Metrics Specification - Supplementary Guidelines](https://opentelemetry.io/docs/specs/otel/metrics/supplementary-guidelines/)

68
docs/release_0.30.md Normal file
View File

@ -0,0 +1,68 @@
# Release Notes 0.30
OpenTelemetry Rust 0.30 introduces a few breaking changes to the
`opentelemetry_sdk` crate in the `metrics` feature. These changes were essential
to drive the Metrics SDK towards stability. With this release, the Metrics SDK
is officially declared stable. The Metrics API was declared stable last year,
and previously, the Logs API, SDK, and OTel-Appender-Tracing were also marked
stable. Importantly, no breaking changes have been introduced to components
already marked as stable.
It is worth noting that the `opentelemetry-otlp` crate remains in a
Release-Candidate state and is not yet considered stable. With the API and SDK
for Logs and Metrics now stable, the focus will shift towards further refining
and stabilizing the OTLP Exporters in upcoming releases. Additionally,
Distributed Tracing is expected to progress towards stability, addressing key
interoperability challenges.
For detailed changelogs of individual crates, please refer to their respective
changelog files. This document serves as a summary of the main changes.
## Key Changes
### Metrics SDK Improvements
1. **Stabilized "view" features**: Previously under an experimental feature
flag, views can now be used to modify the name, unit, description, and
cardinality limit of a metric. Advanced view capabilities, such as changing
aggregation or dropping attributes, remain under the experimental feature
flag.
2. **Cardinality capping**: Introduced the ability to cap cardinality and
configure limits using views.
3. **Polished public API**: Refined the public API to hide implementation
details from exporters, enabling future internal optimizations and ensuring
consistency. Some APIs related to authoring custom metric readers have been
moved behind experimental feature flags. These advanced use cases require
more time to finalize the API surface before being included in the stable
release.
### Context-Based Suppression
Added the ability to suppress telemetry based on Context. This feature prevents
telemetry-induced-telemetry scenarios and addresses a long-standing issue. Note
that suppression relies on proper context propagation. Certain libraries used in
OTLP Exporters utilize `tracing` but do not adopt OpenTelemetry's context
propagation. As a result, not all telemetry is automatically suppressed with
this feature. Improvements in this area are expected in future releases.
## Next Release
In the [next
release](https://github.com/open-telemetry/opentelemetry-rust/milestone/22), the
focus will shift to OTLP Exporters and Distributed Tracing, specifically
resolving
[interoperability](https://github.com/open-telemetry/opentelemetry-rust/issues/2420)
issues with `tokio-tracing` and other fixes required to drive Distributed
Tracing towards stability.
## Acknowledgments
Thank you to everyone who contributed to this milestone. We welcome your
feedback through GitHub issues or discussions in the OTel-Rust Slack channel
[here](https://cloud-native.slack.com/archives/C03GDP0H023).
We are also excited to announce that [Anton Grübel](https://github.com/gruebel)
and [Björn Antonsson](https://github.com/bantonsson) have joined the OTel Rust
project as Approvers.

View File

@ -12,6 +12,3 @@ Run the following, and the Metrics will be written out to stdout.
```shell ```shell
$ cargo run $ cargo run
``` ```

View File

@ -85,15 +85,23 @@ async fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
.with_description("My histogram example description") .with_description("My histogram example description")
.build(); .build();
// Record measurements using the histogram instrument. // Record measurements using the histogram instrument. This metric will have
// This metric will have a cardinality limit of 2, // a cardinality limit of 2, as set in the view. Because of this, only the
// as set in the view. Because of this, only the first two // first two distinct attribute combinations will be recorded, and the rest
// measurements will be recorded, and the rest will be folded // will be folded into the overflow attribute. Any number of measurements
// into the overflow attribute. // can be recorded as long as they use the same or already-seen attribute
// combinations.
histogram2.record(1.5, &[KeyValue::new("mykey1", "v1")]); histogram2.record(1.5, &[KeyValue::new("mykey1", "v1")]);
histogram2.record(1.2, &[KeyValue::new("mykey1", "v2")]); histogram2.record(1.2, &[KeyValue::new("mykey1", "v2")]);
// Repeatedly emitting measurements for "v1" and "v2" will not
// trigger overflow, as they are already seen attribute combinations.
histogram2.record(1.7, &[KeyValue::new("mykey1", "v1")]);
histogram2.record(1.8, &[KeyValue::new("mykey1", "v2")]);
// Emitting measurements for new attribute combinations will trigger
// overflow, as the cardinality limit of 2 has been reached.
// All the below measurements will be folded into the overflow attribute.
histogram2.record(1.23, &[KeyValue::new("mykey1", "v3")]); histogram2.record(1.23, &[KeyValue::new("mykey1", "v3")]);
histogram2.record(1.4, &[KeyValue::new("mykey1", "v4")]); histogram2.record(1.4, &[KeyValue::new("mykey1", "v4")]);
@ -104,9 +112,9 @@ async fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
histogram2.record(1.8, &[KeyValue::new("mykey1", "v7")]); histogram2.record(1.8, &[KeyValue::new("mykey1", "v7")]);
// Metrics are exported by default every 30 seconds when using stdout exporter, // Metrics are exported by default every 60 seconds when using stdout exporter,
// however shutting down the MeterProvider here instantly flushes // however shutting down the MeterProvider here instantly flushes
// the metrics, instead of waiting for the 30 sec interval. // the metrics, instead of waiting for the 60 sec interval.
meter_provider.shutdown()?; meter_provider.shutdown()?;
Ok(()) Ok(())
} }

View File

@ -11,6 +11,3 @@ Run the following, and the Metrics will be written out to stdout.
```shell ```shell
$ cargo run $ cargo run
``` ```

View File

@ -136,9 +136,9 @@ async fn main() -> Result<(), Box<dyn Error>> {
}) })
.build(); .build();
// Metrics are exported by default every 30 seconds when using stdout // Metrics are exported by default every 60 seconds when using stdout
// exporter, however shutting down the MeterProvider here instantly flushes // exporter, however shutting down the MeterProvider here instantly flushes
// the metrics, instead of waiting for the 30 sec interval. Shutdown returns // the metrics, instead of waiting for the 60 sec interval. Shutdown returns
// a result, which is bubbled up to the caller The commented code below // a result, which is bubbled up to the caller The commented code below
// demonstrates handling the shutdown result, instead of bubbling up the // demonstrates handling the shutdown result, instead of bubbling up the
// error. // error.

View File

@ -687,7 +687,7 @@ mod any_value {
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
let key = match key.serialize(ValueSerializer)? { let key = match key.serialize(ValueSerializer)? {
Some(AnyValue::String(key)) => Key::from(String::from(key)), Some(AnyValue::String(key)) => Key::from(String::from(key)),
key => Key::from(format!("{:?}", key)), key => Key::from(format!("{key:?}")),
}; };
self.key = Some(key); self.key = Some(key);

View File

@ -2,6 +2,12 @@
## vNext ## vNext
## 0.30.1
Released 2025-June-05
- Bump `tracing-opentelemetry` to 0.31
## 0.30.0 ## 0.30.0
Released 2025-May-23 Released 2025-May-23

View File

@ -1,6 +1,6 @@
[package] [package]
name = "opentelemetry-appender-tracing" name = "opentelemetry-appender-tracing"
version = "0.30.0" version = "0.30.1"
edition = "2021" edition = "2021"
description = "An OpenTelemetry log appender for the tracing crate" description = "An OpenTelemetry log appender for the tracing crate"
homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-appender-tracing" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-appender-tracing"
@ -18,7 +18,7 @@ tracing = { workspace = true, features = ["std"]}
tracing-core = { workspace = true } tracing-core = { workspace = true }
tracing-log = { workspace = true, optional = true } tracing-log = { workspace = true, optional = true }
tracing-subscriber = { workspace = true, features = ["registry", "std"] } tracing-subscriber = { workspace = true, features = ["registry", "std"] }
# tracing-opentelemetry = { workspace = true, optional = true } tracing-opentelemetry = { workspace = true, optional = true }
[dev-dependencies] [dev-dependencies]
log = { workspace = true } log = { workspace = true }
@ -37,8 +37,7 @@ pprof = { version = "0.14", features = ["flamegraph", "criterion"] }
default = [] default = []
experimental_metadata_attributes = ["dep:tracing-log"] experimental_metadata_attributes = ["dep:tracing-log"]
spec_unstable_logs_enabled = ["opentelemetry/spec_unstable_logs_enabled"] spec_unstable_logs_enabled = ["opentelemetry/spec_unstable_logs_enabled"]
# TODO: Enable this back in 0.30.1 after tracing-opentelemetry is released experimental_use_tracing_span_context = ["tracing-opentelemetry"]
# experimental_use_tracing_span_context = ["tracing-opentelemetry"]
[[bench]] [[bench]]

View File

@ -60,7 +60,7 @@ fn create_benchmark(c: &mut Criterion, num_attributes: usize) {
let subscriber = Registry::default().with(ot_layer); let subscriber = Registry::default().with(ot_layer);
tracing::subscriber::with_default(subscriber, || { tracing::subscriber::with_default(subscriber, || {
c.bench_function(&format!("otel_{}_attributes", num_attributes), |b| { c.bench_function(&format!("otel_{num_attributes}_attributes"), |b| {
b.iter(|| { b.iter(|| {
// Dynamically generate the error! macro call based on the number of attributes // Dynamically generate the error! macro call based on the number of attributes
match num_attributes { match num_attributes {

View File

@ -73,7 +73,7 @@ impl<LR: LogRecord> tracing::field::Visit for EventVisitor<'_, LR> {
return; return;
} }
if field.name() == "message" { if field.name() == "message" {
self.log_record.set_body(format!("{:?}", value).into()); self.log_record.set_body(format!("{value:?}").into());
} else { } else {
self.log_record self.log_record
.add_attribute(Key::new(field.name()), AnyValue::from(format!("{value:?}"))); .add_attribute(Key::new(field.name()), AnyValue::from(format!("{value:?}")));
@ -244,28 +244,28 @@ where
// Visit fields. // Visit fields.
event.record(&mut visitor); event.record(&mut visitor);
// #[cfg(feature = "experimental_use_tracing_span_context")] #[cfg(feature = "experimental_use_tracing_span_context")]
// if let Some(span) = _ctx.event_span(event) { if let Some(span) = _ctx.event_span(event) {
// use opentelemetry::trace::TraceContextExt; use opentelemetry::trace::TraceContextExt;
// use tracing_opentelemetry::OtelData; use tracing_opentelemetry::OtelData;
// if let Some(otd) = span.extensions().get::<OtelData>() { if let Some(otd) = span.extensions().get::<OtelData>() {
// if let Some(span_id) = otd.builder.span_id { if let Some(span_id) = otd.builder.span_id {
// let opt_trace_id = if otd.parent_cx.has_active_span() { let opt_trace_id = if otd.parent_cx.has_active_span() {
// Some(otd.parent_cx.span().span_context().trace_id()) Some(otd.parent_cx.span().span_context().trace_id())
// } else { } else {
// span.scope().last().and_then(|root_span| { span.scope().last().and_then(|root_span| {
// root_span root_span
// .extensions() .extensions()
// .get::<OtelData>() .get::<OtelData>()
// .and_then(|otd| otd.builder.trace_id) .and_then(|otd| otd.builder.trace_id)
// }) })
// }; };
// if let Some(trace_id) = opt_trace_id { if let Some(trace_id) = opt_trace_id {
// log_record.set_trace_context(trace_id, span_id, None); log_record.set_trace_context(trace_id, span_id, None);
// } }
// } }
// } }
// } }
//emit record //emit record
self.logger.emit(log_record); self.logger.emit(log_record);
@ -611,117 +611,117 @@ mod tests {
} }
} }
// #[cfg(feature = "experimental_use_tracing_span_context")] #[cfg(feature = "experimental_use_tracing_span_context")]
// #[test] #[test]
// fn tracing_appender_inside_tracing_crate_context() { fn tracing_appender_inside_tracing_crate_context() {
// use opentelemetry::{trace::SpanContext, Context, SpanId, TraceId}; use opentelemetry::{trace::SpanContext, Context, SpanId, TraceId};
// use opentelemetry_sdk::trace::InMemorySpanExporterBuilder; use opentelemetry_sdk::trace::InMemorySpanExporterBuilder;
// use tracing_opentelemetry::OpenTelemetrySpanExt; use tracing_opentelemetry::OpenTelemetrySpanExt;
// // Arrange // Arrange
// let exporter: InMemoryLogExporter = InMemoryLogExporter::default(); let exporter: InMemoryLogExporter = InMemoryLogExporter::default();
// let logger_provider = SdkLoggerProvider::builder() let logger_provider = SdkLoggerProvider::builder()
// .with_simple_exporter(exporter.clone()) .with_simple_exporter(exporter.clone())
// .build(); .build();
// // setup tracing layer to compare trace/span IDs against // setup tracing layer to compare trace/span IDs against
// let span_exporter = InMemorySpanExporterBuilder::new().build(); let span_exporter = InMemorySpanExporterBuilder::new().build();
// let tracer_provider = SdkTracerProvider::builder() let tracer_provider = SdkTracerProvider::builder()
// .with_simple_exporter(span_exporter.clone()) .with_simple_exporter(span_exporter.clone())
// .build(); .build();
// let tracer = tracer_provider.tracer("test-tracer"); let tracer = tracer_provider.tracer("test-tracer");
// let level_filter = tracing_subscriber::filter::LevelFilter::ERROR; let level_filter = tracing_subscriber::filter::LevelFilter::ERROR;
// let log_layer = let log_layer =
// layer::OpenTelemetryTracingBridge::new(&logger_provider).with_filter(level_filter); layer::OpenTelemetryTracingBridge::new(&logger_provider).with_filter(level_filter);
// let subscriber = tracing_subscriber::registry() let subscriber = tracing_subscriber::registry()
// .with(log_layer) .with(log_layer)
// .with(tracing_opentelemetry::layer().with_tracer(tracer)); .with(tracing_opentelemetry::layer().with_tracer(tracer));
// // Avoiding global subscriber.init() as that does not play well with unit tests. // Avoiding global subscriber.init() as that does not play well with unit tests.
// let _guard = tracing::subscriber::set_default(subscriber); let _guard = tracing::subscriber::set_default(subscriber);
// // Act // Act
// tracing::error_span!("outer-span").in_scope(|| { tracing::error_span!("outer-span").in_scope(|| {
// error!("first-event"); error!("first-event");
// tracing::error_span!("inner-span").in_scope(|| { tracing::error_span!("inner-span").in_scope(|| {
// error!("second-event"); error!("second-event");
// }); });
// }); });
// assert!(logger_provider.force_flush().is_ok()); assert!(logger_provider.force_flush().is_ok());
// let logs = exporter.get_emitted_logs().expect("No emitted logs"); let logs = exporter.get_emitted_logs().expect("No emitted logs");
// assert_eq!(logs.len(), 2, "Expected 2 logs, got: {logs:?}"); assert_eq!(logs.len(), 2, "Expected 2 logs, got: {logs:?}");
// let spans = span_exporter.get_finished_spans().unwrap(); let spans = span_exporter.get_finished_spans().unwrap();
// assert_eq!(spans.len(), 2); assert_eq!(spans.len(), 2);
// let trace_id = spans[0].span_context.trace_id(); let trace_id = spans[0].span_context.trace_id();
// assert_eq!(trace_id, spans[1].span_context.trace_id()); assert_eq!(trace_id, spans[1].span_context.trace_id());
// let inner_span_id = spans[0].span_context.span_id(); let inner_span_id = spans[0].span_context.span_id();
// let outer_span_id = spans[1].span_context.span_id(); let outer_span_id = spans[1].span_context.span_id();
// assert_eq!(outer_span_id, spans[0].parent_span_id); assert_eq!(outer_span_id, spans[0].parent_span_id);
// let trace_ctx0 = logs[0].record.trace_context().unwrap(); let trace_ctx0 = logs[0].record.trace_context().unwrap();
// let trace_ctx1 = logs[1].record.trace_context().unwrap(); let trace_ctx1 = logs[1].record.trace_context().unwrap();
// assert_eq!(trace_ctx0.trace_id, trace_id); assert_eq!(trace_ctx0.trace_id, trace_id);
// assert_eq!(trace_ctx1.trace_id, trace_id); assert_eq!(trace_ctx1.trace_id, trace_id);
// assert_eq!(trace_ctx0.span_id, outer_span_id); assert_eq!(trace_ctx0.span_id, outer_span_id);
// assert_eq!(trace_ctx1.span_id, inner_span_id); assert_eq!(trace_ctx1.span_id, inner_span_id);
// // Set context from remote. // Set context from remote.
// let remote_trace_id = TraceId::from_u128(233); let remote_trace_id = TraceId::from_u128(233);
// let remote_span_id = SpanId::from_u64(2333); let remote_span_id = SpanId::from_u64(2333);
// let remote_span_context = SpanContext::new( let remote_span_context = SpanContext::new(
// remote_trace_id, remote_trace_id,
// remote_span_id, remote_span_id,
// TraceFlags::SAMPLED, TraceFlags::SAMPLED,
// true, true,
// Default::default(), Default::default(),
// ); );
// // Act again. // Act again.
// tracing::error_span!("outer-span").in_scope(|| { tracing::error_span!("outer-span").in_scope(|| {
// let span = tracing::Span::current(); let span = tracing::Span::current();
// let parent_context = Context::current().with_remote_span_context(remote_span_context); let parent_context = Context::current().with_remote_span_context(remote_span_context);
// span.set_parent(parent_context); span.set_parent(parent_context);
// error!("first-event"); error!("first-event");
// tracing::error_span!("inner-span").in_scope(|| { tracing::error_span!("inner-span").in_scope(|| {
// error!("second-event"); error!("second-event");
// }); });
// }); });
// assert!(logger_provider.force_flush().is_ok()); assert!(logger_provider.force_flush().is_ok());
// let logs = exporter.get_emitted_logs().expect("No emitted logs"); let logs = exporter.get_emitted_logs().expect("No emitted logs");
// assert_eq!(logs.len(), 4, "Expected 4 logs, got: {logs:?}"); assert_eq!(logs.len(), 4, "Expected 4 logs, got: {logs:?}");
// let logs = &logs[2..]; let logs = &logs[2..];
// let spans = span_exporter.get_finished_spans().unwrap(); let spans = span_exporter.get_finished_spans().unwrap();
// assert_eq!(spans.len(), 4); assert_eq!(spans.len(), 4);
// let spans = &spans[2..]; let spans = &spans[2..];
// let trace_id = spans[0].span_context.trace_id(); let trace_id = spans[0].span_context.trace_id();
// assert_eq!(trace_id, remote_trace_id); assert_eq!(trace_id, remote_trace_id);
// assert_eq!(trace_id, spans[1].span_context.trace_id()); assert_eq!(trace_id, spans[1].span_context.trace_id());
// let inner_span_id = spans[0].span_context.span_id(); let inner_span_id = spans[0].span_context.span_id();
// let outer_span_id = spans[1].span_context.span_id(); let outer_span_id = spans[1].span_context.span_id();
// assert_eq!(outer_span_id, spans[0].parent_span_id); assert_eq!(outer_span_id, spans[0].parent_span_id);
// let trace_ctx0 = logs[0].record.trace_context().unwrap(); let trace_ctx0 = logs[0].record.trace_context().unwrap();
// let trace_ctx1 = logs[1].record.trace_context().unwrap(); let trace_ctx1 = logs[1].record.trace_context().unwrap();
// assert_eq!(trace_ctx0.trace_id, trace_id); assert_eq!(trace_ctx0.trace_id, trace_id);
// assert_eq!(trace_ctx1.trace_id, trace_id); assert_eq!(trace_ctx1.trace_id, trace_id);
// assert_eq!(trace_ctx0.span_id, outer_span_id); assert_eq!(trace_ctx0.span_id, outer_span_id);
// assert_eq!(trace_ctx1.span_id, inner_span_id); assert_eq!(trace_ctx1.span_id, inner_span_id);
// } }
#[test] #[test]
fn tracing_appender_standalone_with_tracing_log() { fn tracing_appender_standalone_with_tracing_log() {

View File

@ -2,6 +2,9 @@
## vNext ## vNext
- Implementation of `Extractor::get_all` for `HeaderExtractor`
- Support `HttpClient` implementation for `HyperClient<C>` with custom connectors beyond `HttpConnector`, enabling Unix Domain Socket connections and other custom transports
## 0.30.0 ## 0.30.0
Released 2025-May-23 Released 2025-May-23
@ -51,7 +54,7 @@ Released 2024-Sep-30
## v0.12.0 ## v0.12.0
- Add `reqwest-rustls-webkpi-roots` feature flag to configure [`reqwest`](https://docs.rs/reqwest/0.11.27/reqwest/index.html#optional-features) to use embedded `webkpi-roots`. - Add `reqwest-rustls-webpki-roots` feature flag to configure [`reqwest`](https://docs.rs/reqwest/0.11.27/reqwest/index.html#optional-features) to use embedded `webpki-roots`.
- Update `opentelemetry` dependency version to 0.23 - Update `opentelemetry` dependency version to 0.23
## v0.11.1 ## v0.11.1

View File

@ -43,6 +43,16 @@ impl Extractor for HeaderExtractor<'_> {
.map(|value| value.as_str()) .map(|value| value.as_str())
.collect::<Vec<_>>() .collect::<Vec<_>>()
} }
/// Get all the values for a key from the HeaderMap
fn get_all(&self, key: &str) -> Option<Vec<&str>> {
let all_iter = self.0.get_all(key).iter();
if let (0, Some(0)) = all_iter.size_hint() {
return None;
}
Some(all_iter.filter_map(|value| value.to_str().ok()).collect())
}
} }
pub type HttpError = Box<dyn std::error::Error + Send + Sync + 'static>; pub type HttpError = Box<dyn std::error::Error + Send + Sync + 'static>;
@ -169,7 +179,11 @@ pub mod hyper {
} }
#[async_trait] #[async_trait]
impl HttpClient for HyperClient { impl<C> HttpClient for HyperClient<C>
where
C: Connect + Clone + Send + Sync + 'static,
HyperClient<C>: Debug,
{
async fn send_bytes(&self, request: Request<Bytes>) -> Result<Response<Bytes>, HttpError> { async fn send_bytes(&self, request: Request<Bytes>) -> Result<Response<Bytes>, HttpError> {
otel_debug!(name: "HyperClient.Send"); otel_debug!(name: "HyperClient.Send");
let (parts, body) = request.into_parts(); let (parts, body) = request.into_parts();
@ -236,6 +250,8 @@ impl<T> ResponseExt for Response<T> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use http::HeaderValue;
use super::*; use super::*;
#[test] #[test]
@ -250,6 +266,32 @@ mod tests {
) )
} }
#[test]
fn http_headers_get_all() {
let mut carrier = http::HeaderMap::new();
carrier.append("headerName", HeaderValue::from_static("value"));
carrier.append("headerName", HeaderValue::from_static("value2"));
carrier.append("headerName", HeaderValue::from_static("value3"));
assert_eq!(
HeaderExtractor(&carrier).get_all("HEADERNAME"),
Some(vec!["value", "value2", "value3"]),
"all values from a key extraction"
)
}
#[test]
fn http_headers_get_all_missing_key() {
let mut carrier = http::HeaderMap::new();
carrier.append("headerName", HeaderValue::from_static("value"));
assert_eq!(
HeaderExtractor(&carrier).get_all("not_existing"),
None,
"all values from a missing key extraction"
)
}
#[test] #[test]
fn http_headers_keys() { fn http_headers_keys() {
let mut carrier = http::HeaderMap::new(); let mut carrier = http::HeaderMap::new();

View File

@ -325,7 +325,7 @@ mod tests {
true, true,
TraceState::default(), TraceState::default(),
), ),
format!("{}:{}:0:1", LONG_TRACE_ID_STR, SPAN_ID_STR), format!("{LONG_TRACE_ID_STR}:{SPAN_ID_STR}:0:1"),
), ),
( (
SpanContext::new( SpanContext::new(
@ -335,7 +335,7 @@ mod tests {
true, true,
TraceState::default(), TraceState::default(),
), ),
format!("{}:{}:0:0", LONG_TRACE_ID_STR, SPAN_ID_STR), format!("{LONG_TRACE_ID_STR}:{SPAN_ID_STR}:0:0"),
), ),
( (
SpanContext::new( SpanContext::new(
@ -345,7 +345,7 @@ mod tests {
true, true,
TraceState::default(), TraceState::default(),
), ),
format!("{}:{}:0:3", LONG_TRACE_ID_STR, SPAN_ID_STR), format!("{LONG_TRACE_ID_STR}:{SPAN_ID_STR}:0:3"),
), ),
] ]
} }
@ -356,7 +356,7 @@ mod tests {
let propagator = Propagator::with_custom_header(construct_header); let propagator = Propagator::with_custom_header(construct_header);
for (trace_id, span_id, flag, expected) in get_extract_data() { for (trace_id, span_id, flag, expected) in get_extract_data() {
let mut map: HashMap<String, String> = HashMap::new(); let mut map: HashMap<String, String> = HashMap::new();
map.set(context_key, format!("{}:{}:0:{}", trace_id, span_id, flag)); map.set(context_key, format!("{trace_id}:{span_id}:0:{flag}"));
let context = propagator.extract(&map); let context = propagator.extract(&map);
assert_eq!(context.span().span_context(), &expected); assert_eq!(context.span().span_context(), &expected);
} }
@ -392,7 +392,7 @@ mod tests {
// Propagators implement debug // Propagators implement debug
assert_eq!( assert_eq!(
format!("{:?}", default_propagator), format!("{default_propagator:?}"),
format!( format!(
"Propagator {{ baggage_prefix: \"{}\", header_name: \"{}\", fields: [\"{}\"] }}", "Propagator {{ baggage_prefix: \"{}\", header_name: \"{}\", fields: [\"{}\"] }}",
JAEGER_BAGGAGE_PREFIX, JAEGER_HEADER, JAEGER_HEADER JAEGER_BAGGAGE_PREFIX, JAEGER_HEADER, JAEGER_HEADER
@ -641,10 +641,7 @@ mod tests {
} }
for (trace_id, span_id, flag, expected) in get_extract_data() { for (trace_id, span_id, flag, expected) in get_extract_data() {
let mut map: HashMap<String, String> = HashMap::new(); let mut map: HashMap<String, String> = HashMap::new();
map.set( map.set(JAEGER_HEADER, format!("{trace_id}:{span_id}:0:{flag}"));
JAEGER_HEADER,
format!("{}:{}:0:{}", trace_id, span_id, flag),
);
let context = propagator.extract(&map); let context = propagator.extract(&map);
assert_eq!(context.span().span_context(), &expected); assert_eq!(context.span().span_context(), &expected);
} }
@ -655,7 +652,7 @@ mod tests {
let mut map: HashMap<String, String> = HashMap::new(); let mut map: HashMap<String, String> = HashMap::new();
map.set( map.set(
JAEGER_HEADER, JAEGER_HEADER,
format!("{}:{}:0:1:aa", LONG_TRACE_ID_STR, SPAN_ID_STR), format!("{LONG_TRACE_ID_STR}:{SPAN_ID_STR}:0:1:aa"),
); );
let propagator = Propagator::new(); let propagator = Propagator::new();
let context = propagator.extract(&map); let context = propagator.extract(&map);
@ -667,7 +664,7 @@ mod tests {
let mut map: HashMap<String, String> = HashMap::new(); let mut map: HashMap<String, String> = HashMap::new();
map.set( map.set(
JAEGER_HEADER, JAEGER_HEADER,
format!("{}:{}:0:aa", LONG_TRACE_ID_STR, SPAN_ID_STR), format!("{LONG_TRACE_ID_STR}:{SPAN_ID_STR}:0:aa"),
); );
let propagator = Propagator::new(); let propagator = Propagator::new();
let context = propagator.extract(&map); let context = propagator.extract(&map);
@ -679,7 +676,7 @@ mod tests {
let mut map: HashMap<String, String> = HashMap::new(); let mut map: HashMap<String, String> = HashMap::new();
map.set( map.set(
JAEGER_HEADER, JAEGER_HEADER,
format!("{}%3A{}%3A0%3A1", LONG_TRACE_ID_STR, SPAN_ID_STR), format!("{LONG_TRACE_ID_STR}%3A{SPAN_ID_STR}%3A0%3A1"),
); );
let propagator = Propagator::new(); let propagator = Propagator::new();
let context = propagator.extract(&map); let context = propagator.extract(&map);

View File

@ -27,7 +27,7 @@ Released 2025-Mar-21
- Update `opentelemetry-http` dependency version to 0.29 - Update `opentelemetry-http` dependency version to 0.29
- Update `opentelemetry-proto` dependency version to 0.29 - Update `opentelemetry-proto` dependency version to 0.29
- The `OTEL_EXPORTER_OTLP_TIMEOUT`, `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT`, `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` and `OTEL_EXPORTER_OTLP_LOGS_TIMEOUT` are changed from seconds to miliseconds. - The `OTEL_EXPORTER_OTLP_TIMEOUT`, `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT`, `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` and `OTEL_EXPORTER_OTLP_LOGS_TIMEOUT` are changed from seconds to milliseconds.
- Fixed `.with_headers()` in `HttpExporterBuilder` to correctly support multiple key/value pairs. [#2699](https://github.com/open-telemetry/opentelemetry-rust/pull/2699) - Fixed `.with_headers()` in `HttpExporterBuilder` to correctly support multiple key/value pairs. [#2699](https://github.com/open-telemetry/opentelemetry-rust/pull/2699)
- Fixed - Fixed
[#2770](https://github.com/open-telemetry/opentelemetry-rust/issues/2770) [#2770](https://github.com/open-telemetry/opentelemetry-rust/issues/2770)
@ -199,7 +199,7 @@ now use `.with_resource(RESOURCE::default())` to configure Resource when using
### Added ### Added
- Added `DeltaTemporalitySelector` ([#1568]) - Added `DeltaTemporalitySelector` ([#1568])
- Add `webkpi-roots` features to `reqwest` and `tonic` backends - Add `webpki-roots` features to `reqwest` and `tonic` backends
[#1568]: https://github.com/open-telemetry/opentelemetry-rust/pull/1568 [#1568]: https://github.com/open-telemetry/opentelemetry-rust/pull/1568

View File

@ -45,8 +45,7 @@ serde_json = { workspace = true, optional = true }
[dev-dependencies] [dev-dependencies]
tokio-stream = { workspace = true, features = ["net"] } tokio-stream = { workspace = true, features = ["net"] }
# need tokio runtime to run smoke tests. opentelemetry_sdk = { features = ["trace", "testing"], path = "../opentelemetry-sdk" }
opentelemetry_sdk = { features = ["trace", "rt-tokio", "testing"], path = "../opentelemetry-sdk" }
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
futures-util = { workspace = true } futures-util = { workspace = true }
temp-env = { workspace = true } temp-env = { workspace = true }

View File

@ -169,15 +169,15 @@ async fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
// Collect all shutdown errors // Collect all shutdown errors
let mut shutdown_errors = Vec::new(); let mut shutdown_errors = Vec::new();
if let Err(e) = tracer_provider.shutdown() { if let Err(e) = tracer_provider.shutdown() {
shutdown_errors.push(format!("tracer provider: {}", e)); shutdown_errors.push(format!("tracer provider: {e}"));
} }
if let Err(e) = meter_provider.shutdown() { if let Err(e) = meter_provider.shutdown() {
shutdown_errors.push(format!("meter provider: {}", e)); shutdown_errors.push(format!("meter provider: {e}"));
} }
if let Err(e) = logger_provider.shutdown() { if let Err(e) = logger_provider.shutdown() {
shutdown_errors.push(format!("logger provider: {}", e)); shutdown_errors.push(format!("logger provider: {e}"));
} }
// Return an error if any shutdown failed // Return an error if any shutdown failed

View File

@ -162,15 +162,15 @@ async fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
// Collect all shutdown errors // Collect all shutdown errors
let mut shutdown_errors = Vec::new(); let mut shutdown_errors = Vec::new();
if let Err(e) = tracer_provider.shutdown() { if let Err(e) = tracer_provider.shutdown() {
shutdown_errors.push(format!("tracer provider: {}", e)); shutdown_errors.push(format!("tracer provider: {e}"));
} }
if let Err(e) = meter_provider.shutdown() { if let Err(e) = meter_provider.shutdown() {
shutdown_errors.push(format!("meter provider: {}", e)); shutdown_errors.push(format!("meter provider: {e}"));
} }
if let Err(e) = logger_provider.shutdown() { if let Err(e) = logger_provider.shutdown() {
shutdown_errors.push(format!("logger provider: {}", e)); shutdown_errors.push(format!("logger provider: {e}"));
} }
// Return an error if any shutdown failed // Return an error if any shutdown failed

View File

@ -10,7 +10,7 @@ impl LogExporter for OtlpHttpClient {
let client = self let client = self
.client .client
.lock() .lock()
.map_err(|e| OTelSdkError::InternalFailure(format!("Mutex lock failed: {}", e)))? .map_err(|e| OTelSdkError::InternalFailure(format!("Mutex lock failed: {e}")))?
.clone() .clone()
.ok_or(OTelSdkError::AlreadyShutdown)?; .ok_or(OTelSdkError::AlreadyShutdown)?;
@ -30,11 +30,12 @@ impl LogExporter for OtlpHttpClient {
} }
let request_uri = request.uri().to_string(); let request_uri = request.uri().to_string();
otel_debug!(name: "HttpLogsClient.CallingExport"); otel_debug!(name: "HttpLogsClient.ExportStarted");
let response = client let response = client
.send_bytes(request) .send_bytes(request)
.await .await
.map_err(|e| OTelSdkError::InternalFailure(format!("{e:?}")))?; .map_err(|e| OTelSdkError::InternalFailure(format!("{e:?}")))?;
if !response.status().is_success() { if !response.status().is_success() {
let error = format!( let error = format!(
"OpenTelemetry logs export failed. Url: {}, Status Code: {}, Response: {:?}", "OpenTelemetry logs export failed. Url: {}, Status Code: {}, Response: {:?}",
@ -42,14 +43,17 @@ impl LogExporter for OtlpHttpClient {
response.status().as_u16(), response.status().as_u16(),
response.body() response.body()
); );
otel_debug!(name: "HttpLogsClient.ExportFailed", error = &error);
return Err(OTelSdkError::InternalFailure(error)); return Err(OTelSdkError::InternalFailure(error));
} }
otel_debug!(name: "HttpLogsClient.ExportSucceeded");
Ok(()) Ok(())
} }
fn shutdown_with_timeout(&self, _timeout: time::Duration) -> OTelSdkResult { fn shutdown_with_timeout(&self, _timeout: time::Duration) -> OTelSdkResult {
let mut client_guard = self.client.lock().map_err(|e| { let mut client_guard = self.client.lock().map_err(|e| {
OTelSdkError::InternalFailure(format!("Failed to acquire client lock: {}", e)) OTelSdkError::InternalFailure(format!("Failed to acquire client lock: {e}"))
})?; })?;
if client_guard.take().is_none() { if client_guard.take().is_none() {

View File

@ -33,19 +33,36 @@ impl MetricsClient for OtlpHttpClient {
request.headers_mut().insert(k.clone(), v.clone()); request.headers_mut().insert(k.clone(), v.clone());
} }
otel_debug!(name: "HttpMetricsClient.CallingExport"); otel_debug!(name: "HttpMetricsClient.ExportStarted");
client let result = client.send_bytes(request).await;
.send_bytes(request)
.await
.map_err(|e| OTelSdkError::InternalFailure(format!("{e:?}")))?;
Ok(()) match result {
Ok(response) => {
if response.status().is_success() {
otel_debug!(name: "HttpMetricsClient.ExportSucceeded");
Ok(())
} else {
let error = format!(
"OpenTelemetry metrics export failed. Status Code: {}, Response: {:?}",
response.status().as_u16(),
response.body()
);
otel_debug!(name: "HttpMetricsClient.ExportFailed", error = &error);
Err(OTelSdkError::InternalFailure(error))
}
}
Err(e) => {
let error = format!("{e:?}");
otel_debug!(name: "HttpMetricsClient.ExportFailed", error = &error);
Err(OTelSdkError::InternalFailure(error))
}
}
} }
fn shutdown(&self) -> OTelSdkResult { fn shutdown(&self) -> OTelSdkResult {
self.client self.client
.lock() .lock()
.map_err(|e| OTelSdkError::InternalFailure(format!("Failed to acquire lock: {}", e)))? .map_err(|e| OTelSdkError::InternalFailure(format!("Failed to acquire lock: {e}")))?
.take(); .take();
Ok(()) Ok(())

View File

@ -365,7 +365,7 @@ fn resolve_http_endpoint(
provided_endpoint: Option<&str>, provided_endpoint: Option<&str>,
) -> Result<Uri, ExporterBuildError> { ) -> Result<Uri, ExporterBuildError> {
// programmatic configuration overrides any value set via environment variables // programmatic configuration overrides any value set via environment variables
if let Some(provider_endpoint) = provided_endpoint { if let Some(provider_endpoint) = provided_endpoint.filter(|s| !s.is_empty()) {
provider_endpoint provider_endpoint
.parse() .parse()
.map_err(|er: http::uri::InvalidUri| { .map_err(|er: http::uri::InvalidUri| {
@ -528,6 +528,15 @@ mod tests {
); );
} }
#[test]
fn test_use_default_when_empty_string_for_option() {
run_env_test(vec![], || {
let endpoint =
super::resolve_http_endpoint("non_existent_var", "/v1/traces", Some("")).unwrap();
assert_eq!(endpoint, "http://localhost:4318/v1/traces");
});
}
#[test] #[test]
fn test_use_default_when_others_missing() { fn test_use_default_when_others_missing() {
run_env_test(vec![], || { run_env_test(vec![], || {
@ -606,17 +615,14 @@ mod tests {
assert_eq!( assert_eq!(
headers.len(), headers.len(),
expected_headers.len(), expected_headers.len(),
"Failed on input: {}", "Failed on input: {input_str}"
input_str
); );
for (expected_key, expected_value) in expected_headers { for (expected_key, expected_value) in expected_headers {
assert_eq!( assert_eq!(
headers.get(&HeaderName::from_static(expected_key)), headers.get(&HeaderName::from_static(expected_key)),
Some(&HeaderValue::from_static(expected_value)), Some(&HeaderValue::from_static(expected_value)),
"Failed on key: {} with input: {}", "Failed on key: {expected_key} with input: {input_str}"
expected_key,
input_str
); );
} }
} }
@ -656,17 +662,14 @@ mod tests {
assert_eq!( assert_eq!(
headers.len(), headers.len(),
expected_headers.len(), expected_headers.len(),
"Failed on input: {}", "Failed on input: {input_str}"
input_str
); );
for (expected_key, expected_value) in expected_headers { for (expected_key, expected_value) in expected_headers {
assert_eq!( assert_eq!(
headers.get(&HeaderName::from_static(expected_key)), headers.get(&HeaderName::from_static(expected_key)),
Some(&HeaderValue::from_static(expected_value)), Some(&HeaderValue::from_static(expected_value)),
"Failed on key: {} with input: {}", "Failed on key: {expected_key} with input: {input_str}"
expected_key,
input_str
); );
} }
} }

View File

@ -13,7 +13,7 @@ impl SpanExporter for OtlpHttpClient {
let client = match self let client = match self
.client .client
.lock() .lock()
.map_err(|e| OTelSdkError::InternalFailure(format!("Mutex lock failed: {}", e))) .map_err(|e| OTelSdkError::InternalFailure(format!("Mutex lock failed: {e}")))
.and_then(|g| match &*g { .and_then(|g| match &*g {
Some(client) => Ok(Arc::clone(client)), Some(client) => Ok(Arc::clone(client)),
_ => Err(OTelSdkError::AlreadyShutdown), _ => Err(OTelSdkError::AlreadyShutdown),
@ -42,7 +42,7 @@ impl SpanExporter for OtlpHttpClient {
} }
let request_uri = request.uri().to_string(); let request_uri = request.uri().to_string();
otel_debug!(name: "HttpTracesClient.CallingExport"); otel_debug!(name: "HttpTracesClient.ExportStarted");
let response = client let response = client
.send_bytes(request) .send_bytes(request)
.await .await
@ -51,19 +51,21 @@ impl SpanExporter for OtlpHttpClient {
if !response.status().is_success() { if !response.status().is_success() {
let error = format!( let error = format!(
"OpenTelemetry trace export failed. Url: {}, Status Code: {}, Response: {:?}", "OpenTelemetry trace export failed. Url: {}, Status Code: {}, Response: {:?}",
response.status().as_u16(),
request_uri, request_uri,
response.status().as_u16(),
response.body() response.body()
); );
otel_debug!(name: "HttpTracesClient.ExportFailed", error = &error);
return Err(OTelSdkError::InternalFailure(error)); return Err(OTelSdkError::InternalFailure(error));
} }
otel_debug!(name: "HttpTracesClient.ExportSucceeded");
Ok(()) Ok(())
} }
fn shutdown(&mut self) -> OTelSdkResult { fn shutdown(&mut self) -> OTelSdkResult {
let mut client_guard = self.client.lock().map_err(|e| { let mut client_guard = self.client.lock().map_err(|e| {
OTelSdkError::InternalFailure(format!("Failed to acquire client lock: {}", e)) OTelSdkError::InternalFailure(format!("Failed to acquire client lock: {e}"))
})?; })?;
if client_guard.take().is_none() { if client_guard.take().is_none() {

View File

@ -396,8 +396,7 @@ mod tests {
exporter_result, exporter_result,
Err(crate::exporter::ExporterBuildError::InvalidUri(_, _)) Err(crate::exporter::ExporterBuildError::InvalidUri(_, _))
), ),
"Expected InvalidUri error, but got {:?}", "Expected InvalidUri error, but got {exporter_result:?}"
exporter_result
); );
} }

View File

@ -63,7 +63,7 @@ impl LogExporter for TonicLogsClient {
let (m, e, _) = inner let (m, e, _) = inner
.interceptor .interceptor
.call(Request::new(())) .call(Request::new(()))
.map_err(|e| OTelSdkError::InternalFailure(format!("error: {:?}", e)))? .map_err(|e| OTelSdkError::InternalFailure(format!("error: {e:?}")))?
.into_parts(); .into_parts();
(inner.client.clone(), m, e) (inner.client.clone(), m, e)
} }
@ -72,17 +72,27 @@ impl LogExporter for TonicLogsClient {
let resource_logs = group_logs_by_resource_and_scope(batch, &self.resource); let resource_logs = group_logs_by_resource_and_scope(batch, &self.resource);
otel_debug!(name: "TonicsLogsClient.CallingExport"); otel_debug!(name: "TonicLogsClient.ExportStarted");
client let result = client
.export(Request::from_parts( .export(Request::from_parts(
metadata, metadata,
extensions, extensions,
ExportLogsServiceRequest { resource_logs }, ExportLogsServiceRequest { resource_logs },
)) ))
.await .await;
.map_err(|e| OTelSdkError::InternalFailure(format!("export error: {:?}", e)))?;
Ok(()) match result {
Ok(_) => {
otel_debug!(name: "TonicLogsClient.ExportSucceeded");
Ok(())
}
Err(e) => {
let error = format!("export error: {e:?}");
otel_debug!(name: "TonicLogsClient.ExportFailed", error = &error);
Err(OTelSdkError::InternalFailure(error))
}
}
} }
fn shutdown_with_timeout(&self, _timeout: time::Duration) -> OTelSdkResult { fn shutdown_with_timeout(&self, _timeout: time::Duration) -> OTelSdkResult {

View File

@ -75,24 +75,33 @@ impl MetricsClient for TonicMetricsClient {
)), )),
})?; })?;
otel_debug!(name: "TonicsMetricsClient.CallingExport"); otel_debug!(name: "TonicMetricsClient.ExportStarted");
client let result = client
.export(Request::from_parts( .export(Request::from_parts(
metadata, metadata,
extensions, extensions,
ExportMetricsServiceRequest::from(metrics), ExportMetricsServiceRequest::from(metrics),
)) ))
.await .await;
.map_err(|e| OTelSdkError::InternalFailure(format!("{e:?}")))?;
Ok(()) match result {
Ok(_) => {
otel_debug!(name: "TonicMetricsClient.ExportSucceeded");
Ok(())
}
Err(e) => {
let error = format!("{e:?}");
otel_debug!(name: "TonicMetricsClient.ExportFailed", error = &error);
Err(OTelSdkError::InternalFailure(error))
}
}
} }
fn shutdown(&self) -> OTelSdkResult { fn shutdown(&self) -> OTelSdkResult {
self.inner self.inner
.lock() .lock()
.map_err(|e| OTelSdkError::InternalFailure(format!("Failed to acquire lock: {}", e)))? .map_err(|e| OTelSdkError::InternalFailure(format!("Failed to acquire lock: {e}")))?
.take(); .take();
Ok(()) Ok(())

View File

@ -225,7 +225,7 @@ impl TonicExporterBuilder {
// If users for some reason want to use a custom path, they can use env var or builder to pass it // If users for some reason want to use a custom path, they can use env var or builder to pass it
// //
// programmatic configuration overrides any value set via environment variables // programmatic configuration overrides any value set via environment variables
if let Some(endpoint) = provided_endpoint { if let Some(endpoint) = provided_endpoint.filter(|s| !s.is_empty()) {
endpoint endpoint
} else if let Ok(endpoint) = env::var(default_endpoint_var) { } else if let Ok(endpoint) = env::var(default_endpoint_var) {
endpoint endpoint
@ -666,4 +666,15 @@ mod tests {
assert_eq!(url, "http://localhost:4317"); assert_eq!(url, "http://localhost:4317");
}); });
} }
#[test]
fn test_use_default_when_empty_string_for_option() {
run_env_test(vec![], || {
let url = TonicExporterBuilder::resolve_endpoint(
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
Some(String::new()),
);
assert_eq!(url, "http://localhost:4317");
});
}
} }

View File

@ -67,7 +67,7 @@ impl SpanExporter for TonicTracesClient {
.lock() .lock()
.await // tokio::sync::Mutex doesn't return a poisoned error, so we can safely use the interceptor here .await // tokio::sync::Mutex doesn't return a poisoned error, so we can safely use the interceptor here
.call(Request::new(())) .call(Request::new(()))
.map_err(|e| OTelSdkError::InternalFailure(format!("error: {:?}", e)))? .map_err(|e| OTelSdkError::InternalFailure(format!("error: {e:?}")))?
.into_parts(); .into_parts();
(inner.client.clone(), m, e) (inner.client.clone(), m, e)
} }
@ -76,17 +76,27 @@ impl SpanExporter for TonicTracesClient {
let resource_spans = group_spans_by_resource_and_scope(batch, &self.resource); let resource_spans = group_spans_by_resource_and_scope(batch, &self.resource);
otel_debug!(name: "TonicsTracesClient.CallingExport"); otel_debug!(name: "TonicTracesClient.ExportStarted");
client let result = client
.export(Request::from_parts( .export(Request::from_parts(
metadata, metadata,
extensions, extensions,
ExportTraceServiceRequest { resource_spans }, ExportTraceServiceRequest { resource_spans },
)) ))
.await .await;
.map_err(|e| OTelSdkError::InternalFailure(e.to_string()))?;
Ok(()) match result {
Ok(_) => {
otel_debug!(name: "TonicTracesClient.ExportSucceeded");
Ok(())
}
Err(e) => {
let error = e.to_string();
otel_debug!(name: "TonicTracesClient.ExportFailed", error = &error);
Err(OTelSdkError::InternalFailure(error))
}
}
} }
fn shutdown(&mut self) -> OTelSdkResult { fn shutdown(&mut self) -> OTelSdkResult {

View File

@ -101,9 +101,7 @@ pub fn assert_metrics_results_contains(expected_content: &str) -> Result<()> {
reader.read_to_string(&mut contents)?; reader.read_to_string(&mut contents)?;
assert!( assert!(
contents.contains(expected_content), contents.contains(expected_content),
"Expected content {} not found in actual content {}", "Expected content {expected_content} not found in actual content {contents}"
expected_content,
contents
); );
Ok(()) Ok(())
} }
@ -162,12 +160,7 @@ pub fn fetch_latest_metrics_for_scope(scope_name: &str) -> Result<Value> {
None None
}) })
}) })
.with_context(|| { .with_context(|| format!("No valid JSON line containing scope `{scope_name}` found."))?;
format!(
"No valid JSON line containing scope `{}` found.",
scope_name
)
})?;
Ok(json_line) Ok(json_line)
} }
@ -178,18 +171,16 @@ pub fn fetch_latest_metrics_for_scope(scope_name: &str) -> Result<Value> {
/// ///
pub fn validate_metrics_against_results(scope_name: &str) -> Result<()> { pub fn validate_metrics_against_results(scope_name: &str) -> Result<()> {
// Define the results file path // Define the results file path
let results_file_path = format!("./expected/metrics/{}.json", scope_name); let results_file_path = format!("./expected/metrics/{scope_name}.json");
// Fetch the actual metrics for the given scope // Fetch the actual metrics for the given scope
let actual_metrics = fetch_latest_metrics_for_scope(scope_name) let actual_metrics = fetch_latest_metrics_for_scope(scope_name)
.context(format!("Failed to fetch metrics for scope: {}", scope_name))?; .context(format!("Failed to fetch metrics for scope: {scope_name}"))?;
// Read the expected metrics from the results file // Read the expected metrics from the results file
let expected_metrics = { let expected_metrics = {
let file = File::open(&results_file_path).context(format!( let file = File::open(&results_file_path)
"Failed to open results file: {}", .context(format!("Failed to open results file: {results_file_path}"))?;
results_file_path
))?;
read_metrics_from_json(file) read_metrics_from_json(file)
}?; }?;

View File

@ -119,7 +119,7 @@ impl SpanForest {
} }
if !spans.is_empty() { if !spans.is_empty() {
panic!("found spans with invalid parent: {:?}", spans); panic!("found spans with invalid parent: {spans:?}");
} }
forest forest

View File

@ -38,7 +38,7 @@ mod metrictests_roundtrip {
let metrics: MetricsData = serde_json::from_str(metrics_in)?; let metrics: MetricsData = serde_json::from_str(metrics_in)?;
let metrics_out = serde_json::to_string(&metrics)?; let metrics_out = serde_json::to_string(&metrics)?;
println!("{:}", metrics_out); println!("{metrics_out:}");
let metrics_in_json: Value = serde_json::from_str(metrics_in)?; let metrics_in_json: Value = serde_json::from_str(metrics_in)?;
let metrics_out_json: Value = serde_json::from_str(&metrics_out)?; let metrics_out_json: Value = serde_json::from_str(&metrics_out)?;

View File

@ -89,7 +89,7 @@ async fn smoke_tracer() {
opentelemetry_otlp::SpanExporter::builder() opentelemetry_otlp::SpanExporter::builder()
.with_tonic() .with_tonic()
.with_compression(opentelemetry_otlp::Compression::Gzip) .with_compression(opentelemetry_otlp::Compression::Gzip)
.with_endpoint(format!("http://{}", addr)) .with_endpoint(format!("http://{addr}"))
.with_metadata(metadata) .with_metadata(metadata)
.build() .build()
.expect("gzip-tonic SpanExporter failed to build"), .expect("gzip-tonic SpanExporter failed to build"),

View File

@ -26,10 +26,10 @@ pub(crate) fn get_unit_suffixes(unit: &str) -> Option<Cow<'static, str>> {
get_prom_per_unit(second), get_prom_per_unit(second),
) { ) {
(true, _, Some(second_part)) | (false, None, Some(second_part)) => { (true, _, Some(second_part)) | (false, None, Some(second_part)) => {
Some(Cow::Owned(format!("per_{}", second_part))) Some(Cow::Owned(format!("per_{second_part}")))
} }
(false, Some(first_part), Some(second_part)) => { (false, Some(first_part), Some(second_part)) => {
Some(Cow::Owned(format!("{}_per_{}", first_part, second_part))) Some(Cow::Owned(format!("{first_part}_per_{second_part}")))
} }
_ => None, _ => None,
}; };

View File

@ -2,6 +2,8 @@
## vNext ## vNext
- Update proto definitions to v1.7.0.
## 0.30.0 ## 0.30.0
Released 2025-May-23 Released 2025-May-23

View File

@ -49,7 +49,7 @@ testing = ["opentelemetry/testing"]
# add ons # add ons
internal-logs = ["opentelemetry/internal-logs"] internal-logs = ["opentelemetry/internal-logs"]
with-schemars = ["schemars"] with-schemars = ["schemars"]
with-serde = ["serde", "hex", "base64"] with-serde = ["serde", "const-hex", "base64"]
[dependencies] [dependencies]
tonic = { workspace = true, optional = true, features = ["codegen", "prost"] } tonic = { workspace = true, optional = true, features = ["codegen", "prost"] }
@ -58,7 +58,7 @@ opentelemetry = { version = "0.30", default-features = false, path = "../opentel
opentelemetry_sdk = { version = "0.30", default-features = false, path = "../opentelemetry-sdk" } opentelemetry_sdk = { version = "0.30", default-features = false, path = "../opentelemetry-sdk" }
schemars = { workspace = true, optional = true } schemars = { workspace = true, optional = true }
serde = { workspace = true, optional = true, features = ["serde_derive"] } serde = { workspace = true, optional = true, features = ["serde_derive"] }
hex = { workspace = true, optional = true } const-hex = { workspace = true, optional = true }
base64 = { workspace = true, optional = true } base64 = { workspace = true, optional = true }
[dev-dependencies] [dev-dependencies]

View File

@ -16,7 +16,7 @@ pub(crate) mod serializers {
where where
S: Serializer, S: Serializer,
{ {
let hex_string = hex::encode(bytes); let hex_string = const_hex::encode(bytes);
serializer.serialize_str(&hex_string) serializer.serialize_str(&hex_string)
} }
@ -37,7 +37,7 @@ pub(crate) mod serializers {
where where
E: de::Error, E: de::Error,
{ {
hex::decode(value).map_err(E::custom) const_hex::decode(value).map_err(E::custom)
} }
} }

@ -1 +1 @@
Subproject commit f7676e812035aa8a67478c6d740cb09f4c50f86a Subproject commit 8654ab7a5a43ca25fe8046e59dcd6935c3f76de0

View File

@ -1,4 +1,35 @@
// This file is @generated by prost-build. // This file is @generated by prost-build.
/// ProfilesDictionary represents the profiles data shared across the
/// entire message being sent.
#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ProfilesDictionary {
/// Mappings from address ranges to the image/binary/library mapped
/// into that address range referenced by locations via Location.mapping_index.
#[prost(message, repeated, tag = "1")]
pub mapping_table: ::prost::alloc::vec::Vec<Mapping>,
/// Locations referenced by samples via Profile.location_indices.
#[prost(message, repeated, tag = "2")]
pub location_table: ::prost::alloc::vec::Vec<Location>,
/// Functions referenced by locations via Line.function_index.
#[prost(message, repeated, tag = "3")]
pub function_table: ::prost::alloc::vec::Vec<Function>,
/// Links referenced by samples via Sample.link_index.
#[prost(message, repeated, tag = "4")]
pub link_table: ::prost::alloc::vec::Vec<Link>,
/// A common table for strings referenced by various messages.
/// string_table\[0\] must always be "".
#[prost(string, repeated, tag = "5")]
pub string_table: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
/// A common table for attributes referenced by various messages.
#[prost(message, repeated, tag = "6")]
pub attribute_table: ::prost::alloc::vec::Vec<super::super::common::v1::KeyValue>,
/// Represents a mapping between Attribute Keys and Units.
#[prost(message, repeated, tag = "7")]
pub attribute_units: ::prost::alloc::vec::Vec<AttributeUnit>,
}
/// ProfilesData represents the profiles data that can be stored in persistent storage, /// ProfilesData represents the profiles data that can be stored in persistent storage,
/// OR can be embedded by other protocols that transfer OTLP profiles data but do not /// OR can be embedded by other protocols that transfer OTLP profiles data but do not
/// implement the OTLP protocol. /// implement the OTLP protocol.
@ -23,29 +54,9 @@ pub struct ProfilesData {
/// Resource.attributes and semantic conventions. /// Resource.attributes and semantic conventions.
#[prost(message, repeated, tag = "1")] #[prost(message, repeated, tag = "1")]
pub resource_profiles: ::prost::alloc::vec::Vec<ResourceProfiles>, pub resource_profiles: ::prost::alloc::vec::Vec<ResourceProfiles>,
/// Mappings from address ranges to the image/binary/library mapped /// One instance of ProfilesDictionary
/// into that address range referenced by locations via Location.mapping_index. #[prost(message, optional, tag = "2")]
#[prost(message, repeated, tag = "2")] pub dictionary: ::core::option::Option<ProfilesDictionary>,
pub mapping_table: ::prost::alloc::vec::Vec<Mapping>,
/// Locations referenced by samples via Profile.location_indices.
#[prost(message, repeated, tag = "3")]
pub location_table: ::prost::alloc::vec::Vec<Location>,
/// Functions referenced by locations via Line.function_index.
#[prost(message, repeated, tag = "4")]
pub function_table: ::prost::alloc::vec::Vec<Function>,
/// Links referenced by samples via Sample.link_index.
#[prost(message, repeated, tag = "5")]
pub link_table: ::prost::alloc::vec::Vec<Link>,
/// A common table for strings referenced by various messages.
/// string_table\[0\] must always be "".
#[prost(string, repeated, tag = "6")]
pub string_table: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
/// A common table for attributes referenced by various messages.
#[prost(message, repeated, tag = "7")]
pub attribute_table: ::prost::alloc::vec::Vec<super::super::common::v1::KeyValue>,
/// Represents a mapping between Attribute Keys and Units.
#[prost(message, repeated, tag = "8")]
pub attribute_units: ::prost::alloc::vec::Vec<AttributeUnit>,
} }
/// A collection of ScopeProfiles from a Resource. /// A collection of ScopeProfiles from a Resource.
#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
@ -116,7 +127,7 @@ pub struct Profile {
/// The set of samples recorded in this profile. /// The set of samples recorded in this profile.
#[prost(message, repeated, tag = "2")] #[prost(message, repeated, tag = "2")]
pub sample: ::prost::alloc::vec::Vec<Sample>, pub sample: ::prost::alloc::vec::Vec<Sample>,
/// References to locations in ProfilesData.location_table. /// References to locations in ProfilesDictionary.location_table.
#[prost(int32, repeated, tag = "3")] #[prost(int32, repeated, tag = "3")]
pub location_indices: ::prost::alloc::vec::Vec<i32>, pub location_indices: ::prost::alloc::vec::Vec<i32>,
/// Time of collection (UTC) represented as nanoseconds past the epoch. /// Time of collection (UTC) represented as nanoseconds past the epoch.
@ -145,7 +156,7 @@ pub struct Profile {
/// for human-friendly content. The profile must stay functional if this field /// for human-friendly content. The profile must stay functional if this field
/// is cleaned. /// is cleaned.
/// ///
/// Indices into ProfilesData.string_table. /// Indices into ProfilesDictionary.string_table.
#[prost(int32, repeated, tag = "8")] #[prost(int32, repeated, tag = "8")]
pub comment_strindices: ::prost::alloc::vec::Vec<i32>, pub comment_strindices: ::prost::alloc::vec::Vec<i32>,
/// Index into the sample_type array to the default sample type. /// Index into the sample_type array to the default sample type.
@ -230,10 +241,10 @@ pub struct Link {
#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
#[derive(Clone, Copy, PartialEq, ::prost::Message)] #[derive(Clone, Copy, PartialEq, ::prost::Message)]
pub struct ValueType { pub struct ValueType {
/// Index into ProfilesData.string_table. /// Index into ProfilesDictionary.string_table.
#[prost(int32, tag = "1")] #[prost(int32, tag = "1")]
pub type_strindex: i32, pub type_strindex: i32,
/// Index into ProfilesData.string_table. /// Index into ProfilesDictionary.string_table.
#[prost(int32, tag = "2")] #[prost(int32, tag = "2")]
pub unit_strindex: i32, pub unit_strindex: i32,
#[prost(enumeration = "AggregationTemporality", tag = "3")] #[prost(enumeration = "AggregationTemporality", tag = "3")]
@ -263,10 +274,10 @@ pub struct Sample {
/// lists of the originals. /// lists of the originals.
#[prost(int64, repeated, tag = "3")] #[prost(int64, repeated, tag = "3")]
pub value: ::prost::alloc::vec::Vec<i64>, pub value: ::prost::alloc::vec::Vec<i64>,
/// References to attributes in ProfilesData.attribute_table. \[optional\] /// References to attributes in ProfilesDictionary.attribute_table. \[optional\]
#[prost(int32, repeated, tag = "4")] #[prost(int32, repeated, tag = "4")]
pub attribute_indices: ::prost::alloc::vec::Vec<i32>, pub attribute_indices: ::prost::alloc::vec::Vec<i32>,
/// Reference to link in ProfilesData.link_table. \[optional\] /// Reference to link in ProfilesDictionary.link_table. \[optional\]
#[prost(int32, optional, tag = "5")] #[prost(int32, optional, tag = "5")]
pub link_index: ::core::option::Option<i32>, pub link_index: ::core::option::Option<i32>,
/// Timestamps associated with Sample represented in nanoseconds. These timestamps are expected /// Timestamps associated with Sample represented in nanoseconds. These timestamps are expected
@ -301,10 +312,10 @@ pub struct Mapping {
/// disk for the main binary and shared libraries, or virtual /// disk for the main binary and shared libraries, or virtual
/// abstractions like "\[vdso\]". /// abstractions like "\[vdso\]".
/// ///
/// Index into ProfilesData.string_table. /// Index into ProfilesDictionary.string_table.
#[prost(int32, tag = "4")] #[prost(int32, tag = "4")]
pub filename_strindex: i32, pub filename_strindex: i32,
/// References to attributes in ProfilesData.attribute_table. \[optional\] /// References to attributes in ProfilesDictionary.attribute_table. \[optional\]
#[prost(int32, repeated, tag = "5")] #[prost(int32, repeated, tag = "5")]
pub attribute_indices: ::prost::alloc::vec::Vec<i32>, pub attribute_indices: ::prost::alloc::vec::Vec<i32>,
/// The following fields indicate the resolution of symbolic info. /// The following fields indicate the resolution of symbolic info.
@ -323,7 +334,7 @@ pub struct Mapping {
#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
#[derive(Clone, PartialEq, ::prost::Message)] #[derive(Clone, PartialEq, ::prost::Message)]
pub struct Location { pub struct Location {
/// Reference to mapping in ProfilesData.mapping_table. /// Reference to mapping in ProfilesDictionary.mapping_table.
/// It can be unset if the mapping is unknown or not applicable for /// It can be unset if the mapping is unknown or not applicable for
/// this profile type. /// this profile type.
#[prost(int32, optional, tag = "1")] #[prost(int32, optional, tag = "1")]
@ -351,7 +362,7 @@ pub struct Location {
/// profile changes. /// profile changes.
#[prost(bool, tag = "4")] #[prost(bool, tag = "4")]
pub is_folded: bool, pub is_folded: bool,
/// References to attributes in ProfilesData.attribute_table. \[optional\] /// References to attributes in ProfilesDictionary.attribute_table. \[optional\]
#[prost(int32, repeated, tag = "5")] #[prost(int32, repeated, tag = "5")]
pub attribute_indices: ::prost::alloc::vec::Vec<i32>, pub attribute_indices: ::prost::alloc::vec::Vec<i32>,
} }
@ -361,7 +372,7 @@ pub struct Location {
#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
#[derive(Clone, Copy, PartialEq, ::prost::Message)] #[derive(Clone, Copy, PartialEq, ::prost::Message)]
pub struct Line { pub struct Line {
/// Reference to function in ProfilesData.function_table. /// Reference to function in ProfilesDictionary.function_table.
#[prost(int32, tag = "1")] #[prost(int32, tag = "1")]
pub function_index: i32, pub function_index: i32,
/// Line number in source code. 0 means unset. /// Line number in source code. 0 means unset.

View File

@ -61,10 +61,11 @@ mod json_serde {
dropped_attributes_count: 0, dropped_attributes_count: 0,
}), }),
spans: vec![Span { spans: vec![Span {
trace_id: hex::decode("5b8efff798038103d269b633813fc60c").unwrap(), trace_id: const_hex::decode("5b8efff798038103d269b633813fc60c")
span_id: hex::decode("eee19b7ec3c1b174").unwrap(), .unwrap(),
span_id: const_hex::decode("eee19b7ec3c1b174").unwrap(),
trace_state: String::new(), trace_state: String::new(),
parent_span_id: hex::decode("eee19b7ec3c1b173").unwrap(), parent_span_id: const_hex::decode("eee19b7ec3c1b173").unwrap(),
flags: 0, flags: 0,
name: String::from("I'm a server span"), name: String::from("I'm a server span"),
kind: 2, kind: 2,
@ -267,10 +268,11 @@ mod json_serde {
dropped_attributes_count: 1, dropped_attributes_count: 1,
}), }),
spans: vec![Span { spans: vec![Span {
trace_id: hex::decode("5b8efff798038103d269b633813fc60c").unwrap(), trace_id: const_hex::decode("5b8efff798038103d269b633813fc60c")
span_id: hex::decode("eee19b7ec3c1b174").unwrap(), .unwrap(),
span_id: const_hex::decode("eee19b7ec3c1b174").unwrap(),
trace_state: String::from("browser=firefox,os=linux"), trace_state: String::from("browser=firefox,os=linux"),
parent_span_id: hex::decode("eee19b7ec3c1b173").unwrap(), parent_span_id: const_hex::decode("eee19b7ec3c1b173").unwrap(),
flags: 1, flags: 1,
name: String::from("I'm a server span"), name: String::from("I'm a server span"),
kind: 2, kind: 2,
@ -308,9 +310,9 @@ mod json_serde {
}], }],
dropped_events_count: 1, dropped_events_count: 1,
links: vec![Link { links: vec![Link {
trace_id: hex::decode("5b8efff798038103d269b633813fc60b") trace_id: const_hex::decode("5b8efff798038103d269b633813fc60b")
.unwrap(), .unwrap(),
span_id: hex::decode("eee19b7ec3c1b172").unwrap(), span_id: const_hex::decode("eee19b7ec3c1b172").unwrap(),
trace_state: String::from("food=pizza,color=red"), trace_state: String::from("food=pizza,color=red"),
attributes: vec![KeyValue { attributes: vec![KeyValue {
key: String::from("my.link.attr"), key: String::from("my.link.attr"),
@ -1272,8 +1274,9 @@ mod json_serde {
], ],
dropped_attributes_count: 0, dropped_attributes_count: 0,
flags: 0, flags: 0,
trace_id: hex::decode("5b8efff798038103d269b633813fc60c").unwrap(), trace_id: const_hex::decode("5b8efff798038103d269b633813fc60c")
span_id: hex::decode("eee19b7ec3c1b174").unwrap(), .unwrap(),
span_id: const_hex::decode("eee19b7ec3c1b174").unwrap(),
}], }],
schema_url: String::new(), schema_url: String::new(),
}], }],

View File

@ -2,6 +2,10 @@
## vNext ## vNext
- TODO: Placeholder for Span processor related things
- *Fix* SpanProcessor::on_start is no longer called on non recording spans
- **Fix**: Restore true parallel exports in the async-native `BatchSpanProcessor` by honoring `OTEL_BSP_MAX_CONCURRENT_EXPORTS` ([#2959](https://github.com/open-telemetry/opentelemetry-rust/pull/3028)). A regression in [#2685](https://github.com/open-telemetry/opentelemetry-rust/pull/2685) inadvertently awaited the `export()` future directly in `opentelemetry-sdk/src/trace/span_processor_with_async_runtime.rs` instead of spawning it on the runtime, forcing all exports to run sequentially.
## 0.30.0 ## 0.30.0
Released 2025-May-23 Released 2025-May-23
@ -186,7 +190,7 @@ Released 2025-Mar-21
needs to mutate state, it should rely on interior mutability. needs to mutate state, it should rely on interior mutability.
[2764](https://github.com/open-telemetry/opentelemetry-rust/pull/2764) [2764](https://github.com/open-telemetry/opentelemetry-rust/pull/2764)
- *Breaking (Affects custom Exporter/Processor authors only)* Removed - *Breaking (Affects custom Exporter/Processor authors only)* Removed
`opentelelemetry_sdk::logs::error::{LogError, LogResult}`. These were not `opentelemetry_sdk::logs::error::{LogError, LogResult}`. These were not
intended to be public. If you are authoring custom processor/exporters, use intended to be public. If you are authoring custom processor/exporters, use
`opentelemetry_sdk::error::OTelSdkError` and `opentelemetry_sdk::error::OTelSdkError` and
`opentelemetry_sdk::error::OTelSdkResult`. `opentelemetry_sdk::error::OTelSdkResult`.

View File

@ -22,7 +22,7 @@ serde = { workspace = true, features = ["derive", "rc"], optional = true }
serde_json = { workspace = true, optional = true } serde_json = { workspace = true, optional = true }
thiserror = { workspace = true } thiserror = { workspace = true }
url = { workspace = true, optional = true } url = { workspace = true, optional = true }
tokio = { workspace = true, features = ["rt", "time"], optional = true } tokio = { workspace = true, default-features = false, optional = true }
tokio-stream = { workspace = true, optional = true } tokio-stream = { workspace = true, optional = true }
http = { workspace = true, optional = true } http = { workspace = true, optional = true }
@ -47,15 +47,15 @@ spec_unstable_logs_enabled = ["logs", "opentelemetry/spec_unstable_logs_enabled"
metrics = ["opentelemetry/metrics"] metrics = ["opentelemetry/metrics"]
testing = ["opentelemetry/testing", "trace", "metrics", "logs", "rt-tokio", "rt-tokio-current-thread", "tokio/macros", "tokio/rt-multi-thread"] testing = ["opentelemetry/testing", "trace", "metrics", "logs", "rt-tokio", "rt-tokio-current-thread", "tokio/macros", "tokio/rt-multi-thread"]
experimental_async_runtime = [] experimental_async_runtime = []
rt-tokio = ["tokio", "tokio-stream", "experimental_async_runtime"] rt-tokio = ["tokio/rt", "tokio/time", "tokio-stream", "experimental_async_runtime"]
rt-tokio-current-thread = ["tokio", "tokio-stream", "experimental_async_runtime"] rt-tokio-current-thread = ["tokio/rt", "tokio/time", "tokio-stream", "experimental_async_runtime"]
internal-logs = ["opentelemetry/internal-logs"] internal-logs = ["opentelemetry/internal-logs"]
experimental_metrics_periodicreader_with_async_runtime = ["metrics", "experimental_async_runtime"] experimental_metrics_periodicreader_with_async_runtime = ["metrics", "experimental_async_runtime"]
spec_unstable_metrics_views = ["metrics"] spec_unstable_metrics_views = ["metrics"]
experimental_metrics_custom_reader = ["metrics"] experimental_metrics_custom_reader = ["metrics"]
experimental_logs_batch_log_processor_with_async_runtime = ["logs", "experimental_async_runtime"] experimental_logs_batch_log_processor_with_async_runtime = ["logs", "experimental_async_runtime"]
experimental_logs_concurrent_log_processor = ["logs"] experimental_logs_concurrent_log_processor = ["logs"]
experimental_trace_batch_span_processor_with_async_runtime = ["trace", "experimental_async_runtime"] experimental_trace_batch_span_processor_with_async_runtime = ["tokio/sync", "trace", "experimental_async_runtime"]
experimental_metrics_disable_name_validation = ["metrics"] experimental_metrics_disable_name_validation = ["metrics"]
[[bench]] [[bench]]

View File

@ -42,7 +42,7 @@ fn criterion_benchmark(c: &mut Criterion) {
for task_num in [1, 2, 4, 8, 16, 32].iter() { for task_num in [1, 2, 4, 8, 16, 32].iter() {
group.bench_with_input( group.bench_with_input(
BenchmarkId::from_parameter(format!("with {} concurrent task", task_num)), BenchmarkId::from_parameter(format!("with {task_num} concurrent task")),
task_num, task_num,
|b, &task_num| { |b, &task_num| {
b.iter(|| { b.iter(|| {

View File

@ -291,7 +291,7 @@ fn bench_histogram(bound_count: usize) -> (SharedReader, Histogram<u64>) {
let mtr = builder.build().meter("test_meter"); let mtr = builder.build().meter("test_meter");
let hist = mtr let hist = mtr
.u64_histogram(format!("histogram_{}", bound_count)) .u64_histogram(format!("histogram_{bound_count}"))
.build(); .build();
(r, hist) (r, hist)
@ -307,15 +307,14 @@ fn histograms(c: &mut Criterion) {
let mut attributes: Vec<KeyValue> = Vec::new(); let mut attributes: Vec<KeyValue> = Vec::new();
for i in 0..*attr_size { for i in 0..*attr_size {
attributes.push(KeyValue::new( attributes.push(KeyValue::new(
format!("K,{},{}", bound_size, attr_size), format!("K,{bound_size},{attr_size}"),
format!("V,{},{},{}", bound_size, attr_size, i), format!("V,{bound_size},{attr_size},{i}"),
)) ))
} }
let value: u64 = rng.random_range(0..MAX_BOUND).try_into().unwrap(); let value: u64 = rng.random_range(0..MAX_BOUND).try_into().unwrap();
group.bench_function( group.bench_function(format!("Record{attr_size}Attrs{bound_size}bounds"), |b| {
format!("Record{}Attrs{}bounds", attr_size, bound_size), b.iter(|| hist.record(value, &attributes))
|b| b.iter(|| hist.record(value, &attributes)), });
);
} }
} }
group.bench_function("CollectOne", |b| benchmark_collect_histogram(b, 1)); group.bench_function("CollectOne", |b| benchmark_collect_histogram(b, 1));

View File

@ -55,7 +55,7 @@ fn criterion_benchmark(c: &mut Criterion) {
counter_add_unsorted(c); counter_add_unsorted(c);
let attribute_values: [String; 10] = (1..=10) let attribute_values: [String; 10] = (1..=10)
.map(|i| format!("value{}", i)) .map(|i| format!("value{i}"))
.collect::<Vec<String>>() .collect::<Vec<String>>()
.try_into() .try_into()
.expect("Expected a Vec of length 10"); .expect("Expected a Vec of length 10");

View File

@ -51,7 +51,7 @@ fn criterion_benchmark(c: &mut Criterion) {
histogram_record(c); histogram_record(c);
let attribute_values: [String; 10] = (1..=10) let attribute_values: [String; 10] = (1..=10)
.map(|i| format!("value{}", i)) .map(|i| format!("value{i}"))
.collect::<Vec<String>>() .collect::<Vec<String>>()
.try_into() .try_into()
.expect("Expected a Vec of length 10"); .expect("Expected a Vec of length 10");

View File

@ -164,6 +164,6 @@ pub enum InMemoryExporterError {
#[cfg(any(feature = "testing", test))] #[cfg(any(feature = "testing", test))]
impl<T> From<std::sync::PoisonError<T>> for InMemoryExporterError { impl<T> From<std::sync::PoisonError<T>> for InMemoryExporterError {
fn from(err: std::sync::PoisonError<T>) -> Self { fn from(err: std::sync::PoisonError<T>) -> Self {
InMemoryExporterError::InternalFailure(format!("Mutex poison error: {}", err)) InMemoryExporterError::InternalFailure(format!("Mutex poison error: {err}"))
} }
} }

View File

@ -229,7 +229,7 @@ impl LogProcessor for BatchLogProcessor {
if err == RecvTimeoutError::Timeout { if err == RecvTimeoutError::Timeout {
OTelSdkError::Timeout(self.forceflush_timeout) OTelSdkError::Timeout(self.forceflush_timeout)
} else { } else {
OTelSdkError::InternalFailure(format!("{}", err)) OTelSdkError::InternalFailure(format!("{err}"))
} }
})?, })?,
Err(mpsc::TrySendError::Full(_)) => { Err(mpsc::TrySendError::Full(_)) => {
@ -291,7 +291,7 @@ impl LogProcessor for BatchLogProcessor {
name: "BatchLogProcessor.Shutdown.Error", name: "BatchLogProcessor.Shutdown.Error",
error = format!("{}", err) error = format!("{}", err)
); );
OTelSdkError::InternalFailure(format!("{}", err)) OTelSdkError::InternalFailure(format!("{err}"))
} }
})? })?
} }

View File

@ -9,7 +9,7 @@ use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::time; use std::time;
/// An in-memory logs exporter that stores logs data in memory.. /// An in-memory logs exporter that stores logs data in memory.
/// ///
/// This exporter is useful for testing and debugging purposes. /// This exporter is useful for testing and debugging purposes.
/// It stores logs in a `Vec<OwnedLogData>`. Logs can be retrieved using /// It stores logs in a `Vec<OwnedLogData>`. Logs can be retrieved using
@ -73,7 +73,7 @@ pub struct LogDataWithResource {
pub resource: Cow<'static, Resource>, pub resource: Cow<'static, Resource>,
} }
///Builder for ['InMemoryLogExporter']. ///Builder for [`InMemoryLogExporter`].
/// # Example /// # Example
/// ///
/// ```no_run /// ```no_run
@ -187,14 +187,14 @@ impl InMemoryLogExporter {
.logs .logs
.lock() .lock()
.map(|mut logs_guard| logs_guard.clear()) .map(|mut logs_guard| logs_guard.clear())
.map_err(|e| OTelSdkError::InternalFailure(format!("Failed to reset logs: {}", e))); .map_err(|e| OTelSdkError::InternalFailure(format!("Failed to reset logs: {e}")));
} }
} }
impl LogExporter for InMemoryLogExporter { impl LogExporter for InMemoryLogExporter {
async fn export(&self, batch: LogBatch<'_>) -> OTelSdkResult { async fn export(&self, batch: LogBatch<'_>) -> OTelSdkResult {
let mut logs_guard = self.logs.lock().map_err(|e| { let mut logs_guard = self.logs.lock().map_err(|e| {
OTelSdkError::InternalFailure(format!("Failed to lock logs for export: {}", e)) OTelSdkError::InternalFailure(format!("Failed to lock logs for export: {e}"))
})?; })?;
for (log_record, instrumentation) in batch.iter() { for (log_record, instrumentation) in batch.iter() {
let owned_log = OwnedLogData { let owned_log = OwnedLogData {

View File

@ -80,10 +80,10 @@ impl<R: RuntimeChannel> LogProcessor for BatchLogProcessor<R> {
let (res_sender, res_receiver) = oneshot::channel(); let (res_sender, res_receiver) = oneshot::channel();
self.message_sender self.message_sender
.try_send(BatchMessage::Flush(Some(res_sender))) .try_send(BatchMessage::Flush(Some(res_sender)))
.map_err(|err| OTelSdkError::InternalFailure(format!("{:?}", err)))?; .map_err(|err| OTelSdkError::InternalFailure(format!("{err:?}")))?;
futures_executor::block_on(res_receiver) futures_executor::block_on(res_receiver)
.map_err(|err| OTelSdkError::InternalFailure(format!("{:?}", err))) .map_err(|err| OTelSdkError::InternalFailure(format!("{err:?}")))
.and_then(std::convert::identity) .and_then(std::convert::identity)
} }
@ -101,10 +101,10 @@ impl<R: RuntimeChannel> LogProcessor for BatchLogProcessor<R> {
let (res_sender, res_receiver) = oneshot::channel(); let (res_sender, res_receiver) = oneshot::channel();
self.message_sender self.message_sender
.try_send(BatchMessage::Shutdown(res_sender)) .try_send(BatchMessage::Shutdown(res_sender))
.map_err(|err| OTelSdkError::InternalFailure(format!("{:?}", err)))?; .map_err(|err| OTelSdkError::InternalFailure(format!("{err:?}")))?;
futures_executor::block_on(res_receiver) futures_executor::block_on(res_receiver)
.map_err(|err| OTelSdkError::InternalFailure(format!("{:?}", err))) .map_err(|err| OTelSdkError::InternalFailure(format!("{err:?}")))
.and_then(std::convert::identity) .and_then(std::convert::identity)
} }

View File

@ -92,7 +92,7 @@ impl SdkLoggerProvider {
if result.iter().all(|r| r.is_ok()) { if result.iter().all(|r| r.is_ok()) {
Ok(()) Ok(())
} else { } else {
Err(OTelSdkError::InternalFailure(format!("errs: {:?}", result))) Err(OTelSdkError::InternalFailure(format!("errs: {result:?}")))
} }
} }
@ -798,7 +798,7 @@ mod tests {
// Explicitly shut down the logger provider // Explicitly shut down the logger provider
let shutdown_result = logger_provider1.shutdown(); let shutdown_result = logger_provider1.shutdown();
println!("---->Result: {:?}", shutdown_result); println!("---->Result: {shutdown_result:?}");
assert!(shutdown_result.is_ok()); assert!(shutdown_result.is_ok());
// Verify that shutdown was called exactly once // Verify that shutdown was called exactly once

View File

@ -110,8 +110,8 @@ mod tests {
assert_eq!(log.record.attributes_len(), 10); assert_eq!(log.record.attributes_len(), 10);
for i in 1..=10 { for i in 1..=10 {
assert!(log.record.attributes_contains( assert!(log.record.attributes_contains(
&Key::new(format!("key{}", i)), &Key::new(format!("key{i}")),
&AnyValue::String(format!("value{}", i).into()) &AnyValue::String(format!("value{i}").into())
)); ));
} }

View File

@ -340,8 +340,7 @@ mod tests {
assert!( assert!(
panic_message.contains("no reactor running") panic_message.contains("no reactor running")
|| panic_message.contains("must be called from the context of a Tokio 1.x runtime"), || panic_message.contains("must be called from the context of a Tokio 1.x runtime"),
"Expected panic message about missing Tokio runtime, but got: {}", "Expected panic message about missing Tokio runtime, but got: {panic_message}"
panic_message
); );
} }

View File

@ -120,8 +120,7 @@ impl Aggregation {
for x in boundaries.windows(2) { for x in boundaries.windows(2) {
if x[0] >= x[1] { if x[0] >= x[1] {
return Err(MetricError::Config(format!( return Err(MetricError::Config(format!(
"aggregation: explicit bucket histogram: non-monotonic boundaries: {:?}", "aggregation: explicit bucket histogram: non-monotonic boundaries: {boundaries:?}",
boundaries,
))); )));
} }
} }
@ -131,14 +130,12 @@ impl Aggregation {
Aggregation::Base2ExponentialHistogram { max_scale, .. } => { Aggregation::Base2ExponentialHistogram { max_scale, .. } => {
if *max_scale > EXPO_MAX_SCALE { if *max_scale > EXPO_MAX_SCALE {
return Err(MetricError::Config(format!( return Err(MetricError::Config(format!(
"aggregation: exponential histogram: max scale ({}) is greater than 20", "aggregation: exponential histogram: max scale ({max_scale}) is greater than 20",
max_scale,
))); )));
} }
if *max_scale < EXPO_MIN_SCALE { if *max_scale < EXPO_MIN_SCALE {
return Err(MetricError::Config(format!( return Err(MetricError::Config(format!(
"aggregation: exponential histogram: max scale ({}) is less than -10", "aggregation: exponential histogram: max scale ({max_scale}) is less than -10",
max_scale,
))); )));
} }

View File

@ -436,10 +436,7 @@ mod tests {
let err_str = err.to_string(); let err_str = err.to_string();
assert!( assert!(
err_str == expected_error, err_str == expected_error,
"For name '{}', expected error '{}', but got '{}'", "For name '{name}', expected error '{expected_error}', but got '{err_str}'"
name,
expected_error,
err_str
); );
} }
} }
@ -478,10 +475,7 @@ mod tests {
let err_str = err.to_string(); let err_str = err.to_string();
assert!( assert!(
err_str == expected_error, err_str == expected_error,
"For unit '{}', expected error '{}', but got '{}'", "For unit '{unit}', expected error '{expected_error}', but got '{err_str}'"
unit,
expected_error,
err_str
); );
} }
} }

View File

@ -1505,8 +1505,7 @@ mod tests {
assert_eq!( assert_eq!(
a.data_points.len(), a.data_points.len(),
b.data_points.len(), b.data_points.len(),
"{} gauge counts", "{test_name} gauge counts"
test_name
); );
for (a, b) in a.data_points.iter().zip(b.data_points.iter()) { for (a, b) in a.data_points.iter().zip(b.data_points.iter()) {
assert_gauge_data_points_eq(a, b, "mismatching gauge data points", test_name); assert_gauge_data_points_eq(a, b, "mismatching gauge data points", test_name);
@ -1515,19 +1514,16 @@ mod tests {
(MetricData::Sum(a), MetricData::Sum(b)) => { (MetricData::Sum(a), MetricData::Sum(b)) => {
assert_eq!( assert_eq!(
a.temporality, b.temporality, a.temporality, b.temporality,
"{} mismatching sum temporality", "{test_name} mismatching sum temporality"
test_name
); );
assert_eq!( assert_eq!(
a.is_monotonic, b.is_monotonic, a.is_monotonic, b.is_monotonic,
"{} mismatching sum monotonicity", "{test_name} mismatching sum monotonicity",
test_name,
); );
assert_eq!( assert_eq!(
a.data_points.len(), a.data_points.len(),
b.data_points.len(), b.data_points.len(),
"{} sum counts", "{test_name} sum counts"
test_name
); );
for (a, b) in a.data_points.iter().zip(b.data_points.iter()) { for (a, b) in a.data_points.iter().zip(b.data_points.iter()) {
assert_sum_data_points_eq(a, b, "mismatching sum data points", test_name); assert_sum_data_points_eq(a, b, "mismatching sum data points", test_name);
@ -1536,14 +1532,12 @@ mod tests {
(MetricData::Histogram(a), MetricData::Histogram(b)) => { (MetricData::Histogram(a), MetricData::Histogram(b)) => {
assert_eq!( assert_eq!(
a.temporality, b.temporality, a.temporality, b.temporality,
"{}: mismatching hist temporality", "{test_name}: mismatching hist temporality"
test_name
); );
assert_eq!( assert_eq!(
a.data_points.len(), a.data_points.len(),
b.data_points.len(), b.data_points.len(),
"{} hist counts", "{test_name} hist counts"
test_name
); );
for (a, b) in a.data_points.iter().zip(b.data_points.iter()) { for (a, b) in a.data_points.iter().zip(b.data_points.iter()) {
assert_hist_data_points_eq(a, b, "mismatching hist data points", test_name); assert_hist_data_points_eq(a, b, "mismatching hist data points", test_name);
@ -1552,14 +1546,12 @@ mod tests {
(MetricData::ExponentialHistogram(a), MetricData::ExponentialHistogram(b)) => { (MetricData::ExponentialHistogram(a), MetricData::ExponentialHistogram(b)) => {
assert_eq!( assert_eq!(
a.temporality, b.temporality, a.temporality, b.temporality,
"{} mismatching hist temporality", "{test_name} mismatching hist temporality"
test_name
); );
assert_eq!( assert_eq!(
a.data_points.len(), a.data_points.len(),
b.data_points.len(), b.data_points.len(),
"{} hist counts", "{test_name} hist counts"
test_name
); );
for (a, b) in a.data_points.iter().zip(b.data_points.iter()) { for (a, b) in a.data_points.iter().zip(b.data_points.iter()) {
assert_exponential_hist_data_points_eq( assert_exponential_hist_data_points_eq(
@ -1574,8 +1566,7 @@ mod tests {
assert_eq!( assert_eq!(
a.type_id(), a.type_id(),
b.type_id(), b.type_id(),
"{} Aggregation types not equal", "{test_name} Aggregation types not equal"
test_name
); );
} }
} }
@ -1589,10 +1580,9 @@ mod tests {
) { ) {
assert_eq!( assert_eq!(
a.attributes, b.attributes, a.attributes, b.attributes,
"{}: {} attributes", "{test_name}: {message} attributes"
test_name, message
); );
assert_eq!(a.value, b.value, "{}: {} value", test_name, message); assert_eq!(a.value, b.value, "{test_name}: {message} value");
} }
fn assert_gauge_data_points_eq<T: Number>( fn assert_gauge_data_points_eq<T: Number>(
@ -1603,10 +1593,9 @@ mod tests {
) { ) {
assert_eq!( assert_eq!(
a.attributes, b.attributes, a.attributes, b.attributes,
"{}: {} attributes", "{test_name}: {message} attributes"
test_name, message
); );
assert_eq!(a.value, b.value, "{}: {} value", test_name, message); assert_eq!(a.value, b.value, "{test_name}: {message} value");
} }
fn assert_hist_data_points_eq<T: Number>( fn assert_hist_data_points_eq<T: Number>(
@ -1617,19 +1606,17 @@ mod tests {
) { ) {
assert_eq!( assert_eq!(
a.attributes, b.attributes, a.attributes, b.attributes,
"{}: {} attributes", "{test_name}: {message} attributes"
test_name, message
); );
assert_eq!(a.count, b.count, "{}: {} count", test_name, message); assert_eq!(a.count, b.count, "{test_name}: {message} count");
assert_eq!(a.bounds, b.bounds, "{}: {} bounds", test_name, message); assert_eq!(a.bounds, b.bounds, "{test_name}: {message} bounds");
assert_eq!( assert_eq!(
a.bucket_counts, b.bucket_counts, a.bucket_counts, b.bucket_counts,
"{}: {} bucket counts", "{test_name}: {message} bucket counts"
test_name, message
); );
assert_eq!(a.min, b.min, "{}: {} min", test_name, message); assert_eq!(a.min, b.min, "{test_name}: {message} min");
assert_eq!(a.max, b.max, "{}: {} max", test_name, message); assert_eq!(a.max, b.max, "{test_name}: {message} max");
assert_eq!(a.sum, b.sum, "{}: {} sum", test_name, message); assert_eq!(a.sum, b.sum, "{test_name}: {message} sum");
} }
fn assert_exponential_hist_data_points_eq<T: Number>( fn assert_exponential_hist_data_points_eq<T: Number>(
@ -1640,30 +1627,23 @@ mod tests {
) { ) {
assert_eq!( assert_eq!(
a.attributes, b.attributes, a.attributes, b.attributes,
"{}: {} attributes", "{test_name}: {message} attributes"
test_name, message
); );
assert_eq!(a.count, b.count, "{}: {} count", test_name, message); assert_eq!(a.count, b.count, "{test_name}: {message} count");
assert_eq!(a.min, b.min, "{}: {} min", test_name, message); assert_eq!(a.min, b.min, "{test_name}: {message} min");
assert_eq!(a.max, b.max, "{}: {} max", test_name, message); assert_eq!(a.max, b.max, "{test_name}: {message} max");
assert_eq!(a.sum, b.sum, "{}: {} sum", test_name, message); assert_eq!(a.sum, b.sum, "{test_name}: {message} sum");
assert_eq!(a.scale, b.scale, "{}: {} scale", test_name, message); assert_eq!(a.scale, b.scale, "{test_name}: {message} scale");
assert_eq!( assert_eq!(a.zero_count, b.zero_count, "{test_name}: {message} zeros");
a.zero_count, b.zero_count,
"{}: {} zeros",
test_name, message
);
assert_eq!( assert_eq!(
a.positive_bucket, b.positive_bucket, a.positive_bucket, b.positive_bucket,
"{}: {} pos", "{test_name}: {message} pos"
test_name, message
); );
assert_eq!( assert_eq!(
a.negative_bucket, b.negative_bucket, a.negative_bucket, b.negative_bucket,
"{}: {} neg", "{test_name}: {message} neg"
test_name, message
); );
} }
} }

View File

@ -118,7 +118,7 @@ impl MetricReader for ManualReader {
let mut inner = self let mut inner = self
.inner .inner
.lock() .lock()
.map_err(|e| OTelSdkError::InternalFailure(format!("Failed to acquire lock: {}", e)))?; .map_err(|e| OTelSdkError::InternalFailure(format!("Failed to acquire lock: {e}")))?;
// Any future call to collect will now return an error. // Any future call to collect will now return an error.
inner.sdk_producer = None; inner.sdk_producer = None;

View File

@ -545,7 +545,7 @@ mod tests {
}; };
// The Observable counter reports values[0], values[1],....values[n] on each flush. // The Observable counter reports values[0], values[1],....values[n] on each flush.
let values: Vec<u64> = (0..length).map(|i| start + i * increment).collect(); let values: Vec<u64> = (0..length).map(|i| start + i * increment).collect();
println!("Testing with observable values: {:?}", values); println!("Testing with observable values: {values:?}");
let values = Arc::new(values); let values = Arc::new(values);
let values_clone = values.clone(); let values_clone = values.clone();
let i = Arc::new(Mutex::new(0)); let i = Arc::new(Mutex::new(0));
@ -834,7 +834,7 @@ mod tests {
let resource_metrics = exporter let resource_metrics = exporter
.get_finished_metrics() .get_finished_metrics()
.expect("metrics are expected to be exported."); .expect("metrics are expected to be exported.");
println!("resource_metrics: {:?}", resource_metrics); println!("resource_metrics: {resource_metrics:?}");
assert!( assert!(
resource_metrics[0].scope_metrics.len() == 1, resource_metrics[0].scope_metrics.len() == 1,
"There should be a single scope as the meters are identical" "There should be a single scope as the meters are identical"
@ -1272,7 +1272,7 @@ mod tests {
.exporter .exporter
.get_finished_metrics() .get_finished_metrics()
.expect("metrics are expected to be exported."); .expect("metrics are expected to be exported.");
println!("resource_metrics: {:?}", resource_metrics); println!("resource_metrics: {resource_metrics:?}");
assert!(resource_metrics.is_empty(), "No metrics should be exported as no new measurements were recorded since last collect."); assert!(resource_metrics.is_empty(), "No metrics should be exported as no new measurements were recorded since last collect.");
} }
@ -1666,18 +1666,15 @@ mod tests {
let metric = &resource_metrics[0].scope_metrics[0].metrics[0]; let metric = &resource_metrics[0].scope_metrics[0].metrics[0];
assert_eq!( assert_eq!(
metric.name, expected_name, metric.name, expected_name,
"Expected name: {}.", "Expected name: {expected_name}."
expected_name
); );
assert_eq!( assert_eq!(
metric.unit, expected_unit, metric.unit, expected_unit,
"Expected unit: {}.", "Expected unit: {expected_unit}."
expected_unit
); );
assert_eq!( assert_eq!(
metric.description, expected_description, metric.description, expected_description,
"Expected description: {}.", "Expected description: {expected_description}."
expected_description
); );
} }
@ -3337,8 +3334,7 @@ mod tests {
assert_eq!( assert_eq!(
self.resource_metrics.len(), self.resource_metrics.len(),
invocation_count, invocation_count,
"Expected collect to be called {} times", "Expected collect to be called {invocation_count} times"
invocation_count
); );
let result = self let result = self

View File

@ -255,8 +255,7 @@ mod tests {
assert_eq!( assert_eq!(
propagator.extract(&extractor).span().span_context(), propagator.extract(&extractor).span().span_context(),
&SpanContext::empty_context(), &SpanContext::empty_context(),
"{}", "{reason}"
reason
) )
} }
} }

View File

@ -411,10 +411,7 @@ mod tests {
assert_eq!( assert_eq!(
result_schema_url.map(|s| s as &str), result_schema_url.map(|s| s as &str),
expected_schema_url, expected_schema_url,
"Merging schema_url_a {:?} with schema_url_b {:?} did not yield expected result {:?}", "Merging schema_url_a {schema_url_a:?} with schema_url_b {schema_url_b:?} did not yield expected result {expected_schema_url:?}"
schema_url_a,
schema_url_b,
expected_schema_url
); );
} }
@ -510,10 +507,7 @@ mod tests {
assert_eq!( assert_eq!(
resource.schema_url().map(|s| s as &str), resource.schema_url().map(|s| s as &str),
expected_schema_url, expected_schema_url,
"Merging schema_url_a {:?} with schema_url_b {:?} did not yield expected result {:?}", "Merging schema_url_a {schema_url_a:?} with schema_url_b {schema_url_b:?} did not yield expected result {expected_schema_url:?}"
schema_url_a,
schema_url_b,
expected_schema_url
); );
} }

View File

@ -45,7 +45,7 @@ impl SpanExporter for TokioSpanExporter {
batch.into_iter().try_for_each(|span_data| { batch.into_iter().try_for_each(|span_data| {
self.tx_export self.tx_export
.send(span_data) .send(span_data)
.map_err(|err| OTelSdkError::InternalFailure(format!("Export failed: {:?}", err))) .map_err(|err| OTelSdkError::InternalFailure(format!("Export failed: {err:?}")))
}) })
} }

View File

@ -8,7 +8,7 @@ use std::time::Duration;
/// An in-memory span exporter that stores span data in memory. /// An in-memory span exporter that stores span data in memory.
/// ///
/// This exporter is useful for testing and debugging purposes. It stores /// This exporter is useful for testing and debugging purposes. It stores
/// metric data in a `Vec<SpanData>`. Metrics can be retrieved /// span data in a `Vec<SpanData>`. Spans can be retrieved
/// using the `get_finished_spans` method. /// using the `get_finished_spans` method.
/// # Example /// # Example
/// ``` /// ```
@ -135,9 +135,7 @@ impl SpanExporter for InMemorySpanExporter {
.spans .spans
.lock() .lock()
.map(|mut spans_guard| spans_guard.append(&mut batch.clone())) .map(|mut spans_guard| spans_guard.append(&mut batch.clone()))
.map_err(|err| { .map_err(|err| OTelSdkError::InternalFailure(format!("Failed to lock spans: {err:?}")));
OTelSdkError::InternalFailure(format!("Failed to lock spans: {:?}", err))
});
result result
} }

View File

@ -236,7 +236,7 @@ impl SdkTracerProvider {
if result.iter().all(|r| r.is_ok()) { if result.iter().all(|r| r.is_ok()) {
Ok(()) Ok(())
} else { } else {
Err(OTelSdkError::InternalFailure(format!("errs: {:?}", result))) Err(OTelSdkError::InternalFailure(format!("errs: {result:?}")))
} }
} }

View File

@ -351,12 +351,7 @@ mod tests {
let diff = (got - expectation).abs(); let diff = (got - expectation).abs();
assert!( assert!(
diff <= tolerance, diff <= tolerance,
"{} got {:?} (diff: {}), expected {} (w/tolerance: {})", "{name} got {got:?} (diff: {diff}), expected {expectation} (w/tolerance: {tolerance})"
name,
got,
diff,
expectation,
tolerance
); );
} }
} }

View File

@ -236,7 +236,7 @@ impl JaegerRemoteSampler {
let resp = client let resp = client
.send_bytes(request) .send_bytes(request)
.await .await
.map_err(|err| format!("the request is failed to send {}", err))?; .map_err(|err| format!("the request is failed to send {err}"))?;
// process failures // process failures
if resp.status() != http::StatusCode::OK { if resp.status() != http::StatusCode::OK {
@ -248,7 +248,7 @@ impl JaegerRemoteSampler {
// deserialize the response // deserialize the response
serde_json::from_slice(&resp.body()[..]) serde_json::from_slice(&resp.body()[..])
.map_err(|err| format!("cannot deserialize the response, {}", err)) .map_err(|err| format!("cannot deserialize the response, {err}"))
} }
} }

View File

@ -538,7 +538,7 @@ mod tests {
let mut initial_attributes = Vec::new(); let mut initial_attributes = Vec::new();
let mut expected_dropped_count = 1; let mut expected_dropped_count = 1;
for i in 0..(DEFAULT_MAX_ATTRIBUTES_PER_SPAN + 1) { for i in 0..(DEFAULT_MAX_ATTRIBUTES_PER_SPAN + 1) {
initial_attributes.push(KeyValue::new(format!("key {}", i), i.to_string())) initial_attributes.push(KeyValue::new(format!("key {i}"), i.to_string()))
} }
let span_builder = SpanBuilder::from_name("test_span").with_attributes(initial_attributes); let span_builder = SpanBuilder::from_name("test_span").with_attributes(initial_attributes);
@ -578,7 +578,7 @@ mod tests {
for i in 0..(DEFAULT_MAX_ATTRIBUTES_PER_EVENT * 2) { for i in 0..(DEFAULT_MAX_ATTRIBUTES_PER_EVENT * 2) {
event1 event1
.attributes .attributes
.push(KeyValue::new(format!("key {}", i), i.to_string())) .push(KeyValue::new(format!("key {i}"), i.to_string()))
} }
let event2 = event1.clone(); let event2 = event1.clone();
@ -619,7 +619,7 @@ mod tests {
)); ));
for i in 0..(DEFAULT_MAX_ATTRIBUTES_PER_LINK * 2) { for i in 0..(DEFAULT_MAX_ATTRIBUTES_PER_LINK * 2) {
link.attributes link.attributes
.push(KeyValue::new(format!("key {}", i), i.to_string())); .push(KeyValue::new(format!("key {i}"), i.to_string()));
} }
let span_builder = tracer.span_builder("test").with_links(vec![link]); let span_builder = tracer.span_builder("test").with_links(vec![link]);
@ -719,7 +719,7 @@ mod tests {
let exported_data = span.exported_data(); let exported_data = span.exported_data();
assert!(exported_data.is_some()); assert!(exported_data.is_some());
let res = provider.shutdown(); let res = provider.shutdown();
println!("{:?}", res); println!("{res:?}");
assert!(res.is_ok()); assert!(res.is_ok());
let dropped_span = tracer.start("span_with_dropped_provider"); let dropped_span = tracer.start("span_with_dropped_provider");
// return none if the provider has already been dropped // return none if the provider has already been dropped

View File

@ -606,8 +606,7 @@ impl SpanProcessor for BatchSpanProcessor {
if let Some(handle) = self.handle.lock().unwrap().take() { if let Some(handle) = self.handle.lock().unwrap().take() {
if let Err(err) = handle.join() { if let Err(err) = handle.join() {
return Err(OTelSdkError::InternalFailure(format!( return Err(OTelSdkError::InternalFailure(format!(
"Background thread failed to join during shutdown. This may indicate a panic or unexpected termination: {:?}", "Background thread failed to join during shutdown. This may indicate a panic or unexpected termination: {err:?}"
err
))); )));
} }
} }

View File

@ -6,19 +6,21 @@ use crate::trace::Span;
use crate::trace::SpanProcessor; use crate::trace::SpanProcessor;
use crate::trace::{SpanData, SpanExporter}; use crate::trace::{SpanData, SpanExporter};
use futures_channel::oneshot; use futures_channel::oneshot;
use futures_util::pin_mut;
use futures_util::{ use futures_util::{
future::{self, BoxFuture, Either}, future::{self, BoxFuture, Either},
select, pin_mut, select,
stream::{self, FusedStream, FuturesUnordered}, stream::{self, FusedStream, FuturesUnordered},
StreamExt as _, StreamExt as _,
}; };
use opentelemetry::Context; use opentelemetry::Context;
use opentelemetry::{otel_debug, otel_error, otel_warn}; use opentelemetry::{otel_debug, otel_error, otel_warn};
use std::fmt; use std::fmt;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{
use std::sync::Arc; atomic::{AtomicUsize, Ordering},
Arc,
};
use std::time::Duration; use std::time::Duration;
use tokio::sync::RwLock;
/// A [`SpanProcessor`] that asynchronously buffers finished spans and reports /// A [`SpanProcessor`] that asynchronously buffers finished spans and reports
/// them at a preconfigured interval. /// them at a preconfigured interval.
@ -126,11 +128,11 @@ impl<R: RuntimeChannel> SpanProcessor for BatchSpanProcessor<R> {
self.message_sender self.message_sender
.try_send(BatchMessage::Flush(Some(res_sender))) .try_send(BatchMessage::Flush(Some(res_sender)))
.map_err(|err| { .map_err(|err| {
OTelSdkError::InternalFailure(format!("Failed to send flush message: {}", err)) OTelSdkError::InternalFailure(format!("Failed to send flush message: {err}"))
})?; })?;
futures_executor::block_on(res_receiver).map_err(|err| { futures_executor::block_on(res_receiver).map_err(|err| {
OTelSdkError::InternalFailure(format!("Flush response channel error: {}", err)) OTelSdkError::InternalFailure(format!("Flush response channel error: {err}"))
})? })?
} }
@ -150,11 +152,11 @@ impl<R: RuntimeChannel> SpanProcessor for BatchSpanProcessor<R> {
self.message_sender self.message_sender
.try_send(BatchMessage::Shutdown(res_sender)) .try_send(BatchMessage::Shutdown(res_sender))
.map_err(|err| { .map_err(|err| {
OTelSdkError::InternalFailure(format!("Failed to send shutdown message: {}", err)) OTelSdkError::InternalFailure(format!("Failed to send shutdown message: {err}"))
})?; })?;
futures_executor::block_on(res_receiver).map_err(|err| { futures_executor::block_on(res_receiver).map_err(|err| {
OTelSdkError::InternalFailure(format!("Shutdown response channel error: {}", err)) OTelSdkError::InternalFailure(format!("Shutdown response channel error: {err}"))
})? })?
} }
@ -188,13 +190,22 @@ struct BatchSpanProcessorInternal<E, R> {
spans: Vec<SpanData>, spans: Vec<SpanData>,
export_tasks: FuturesUnordered<BoxFuture<'static, OTelSdkResult>>, export_tasks: FuturesUnordered<BoxFuture<'static, OTelSdkResult>>,
runtime: R, runtime: R,
exporter: E,
config: BatchConfig, config: BatchConfig,
// TODO: Redesign the `SpanExporter` trait to use immutable references (`&self`)
// for all methods. This would allow us to remove the `RwLock` and just use `Arc<E>`,
// similar to how `crate::logs::LogExporter` is implemented.
exporter: Arc<RwLock<E>>,
} }
impl<E: SpanExporter, R: RuntimeChannel> BatchSpanProcessorInternal<E, R> { impl<E: SpanExporter + 'static, R: RuntimeChannel> BatchSpanProcessorInternal<E, R> {
async fn flush(&mut self, res_channel: Option<oneshot::Sender<OTelSdkResult>>) { async fn flush(&mut self, res_channel: Option<oneshot::Sender<OTelSdkResult>>) {
let export_result = self.export().await; let export_result = Self::export(
self.spans.split_off(0),
self.exporter.clone(),
self.runtime.clone(),
self.config.max_export_timeout,
)
.await;
let task = Box::pin(async move { let task = Box::pin(async move {
if let Some(channel) = res_channel { if let Some(channel) = res_channel {
// If a response channel is provided, attempt to send the export result through it. // If a response channel is provided, attempt to send the export result through it.
@ -243,9 +254,15 @@ impl<E: SpanExporter, R: RuntimeChannel> BatchSpanProcessorInternal<E, R> {
self.export_tasks.next().await; self.export_tasks.next().await;
} }
let export_result = self.export().await; let batch = self.spans.split_off(0);
let exporter = self.exporter.clone();
let runtime = self.runtime.clone();
let max_export_timeout = self.config.max_export_timeout;
let task = async move { let task = async move {
if let Err(err) = export_result { if let Err(err) =
Self::export(batch, exporter, runtime, max_export_timeout).await
{
otel_error!( otel_error!(
name: "BatchSpanProcessor.Export.Error", name: "BatchSpanProcessor.Export.Error",
reason = format!("{}", err) reason = format!("{}", err)
@ -254,6 +271,7 @@ impl<E: SpanExporter, R: RuntimeChannel> BatchSpanProcessorInternal<E, R> {
Ok(()) Ok(())
}; };
// Special case when not using concurrent exports // Special case when not using concurrent exports
if self.config.max_concurrent_exports == 1 { if self.config.max_concurrent_exports == 1 {
let _ = task.await; let _ = task.await;
@ -288,34 +306,39 @@ impl<E: SpanExporter, R: RuntimeChannel> BatchSpanProcessorInternal<E, R> {
// Stream has terminated or processor is shutdown, return to finish execution. // Stream has terminated or processor is shutdown, return to finish execution.
BatchMessage::Shutdown(ch) => { BatchMessage::Shutdown(ch) => {
self.flush(Some(ch)).await; self.flush(Some(ch)).await;
let _ = self.exporter.shutdown(); let _ = self.exporter.write().await.shutdown();
return false; return false;
} }
// propagate the resource // propagate the resource
BatchMessage::SetResource(resource) => { BatchMessage::SetResource(resource) => {
self.exporter.set_resource(&resource); self.exporter.write().await.set_resource(&resource);
} }
} }
true true
} }
async fn export(&mut self) -> OTelSdkResult { async fn export(
batch: Vec<SpanData>,
exporter: Arc<RwLock<E>>,
runtime: R,
max_export_timeout: Duration,
) -> OTelSdkResult {
// Batch size check for flush / shutdown. Those methods may be called // Batch size check for flush / shutdown. Those methods may be called
// when there's no work to do. // when there's no work to do.
if self.spans.is_empty() { if batch.is_empty() {
return Ok(()); return Ok(());
} }
let export = self.exporter.export(self.spans.split_off(0)); let exporter_guard = exporter.read().await;
let timeout = self.runtime.delay(self.config.max_export_timeout); let export = exporter_guard.export(batch);
let time_out = self.config.max_export_timeout; let timeout = runtime.delay(max_export_timeout);
pin_mut!(export); pin_mut!(export);
pin_mut!(timeout); pin_mut!(timeout);
match future::select(export, timeout).await { match future::select(export, timeout).await {
Either::Left((export_res, _)) => export_res, Either::Left((export_res, _)) => export_res,
Either::Right((_, _)) => Err(OTelSdkError::Timeout(time_out)), Either::Right((_, _)) => Err(OTelSdkError::Timeout(max_export_timeout)),
} }
} }
@ -368,7 +391,7 @@ impl<R: RuntimeChannel> BatchSpanProcessor<R> {
export_tasks: FuturesUnordered::new(), export_tasks: FuturesUnordered::new(),
runtime: timeout_runtime, runtime: timeout_runtime,
config, config,
exporter, exporter: Arc::new(RwLock::new(exporter)),
}; };
processor.run(messages).await processor.run(messages).await
@ -435,6 +458,8 @@ mod tests {
use crate::trace::{SpanData, SpanExporter}; use crate::trace::{SpanData, SpanExporter};
use futures_util::Future; use futures_util::Future;
use std::fmt::Debug; use std::fmt::Debug;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
struct BlockingExporter<D> { struct BlockingExporter<D> {
@ -463,6 +488,39 @@ mod tests {
} }
} }
/// Exporter that records whether two exports overlap in time.
struct TrackingExporter {
/// Artificial delay to keep each export alive for a while.
delay: Duration,
/// Current number of in-flight exports.
active: Arc<AtomicUsize>,
/// Set to true the first time we see overlap.
concurrent_seen: Arc<AtomicBool>,
}
impl Debug for TrackingExporter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("tracking exporter")
}
}
impl SpanExporter for TrackingExporter {
async fn export(&self, _batch: Vec<SpanData>) -> crate::error::OTelSdkResult {
// Increment in-flight counter and note any overlap.
let inflight = self.active.fetch_add(1, Ordering::SeqCst) + 1;
if inflight > 1 {
self.concurrent_seen.store(true, Ordering::SeqCst);
}
// Keep the export "busy" for a bit.
tokio::time::sleep(self.delay).await;
// Decrement counter.
self.active.fetch_sub(1, Ordering::SeqCst);
Ok(())
}
}
#[test] #[test]
fn test_build_batch_span_processor_builder() { fn test_build_batch_span_processor_builder() {
let mut env_vars = vec![ let mut env_vars = vec![
@ -532,8 +590,8 @@ mod tests {
); );
} }
// If the time_out is true, then the result suppose to ended with timeout. // If `time_out` is `true`, then the export should fail with a timeout.
// otherwise the exporter should be able to export within time out duration. // Else, the exporter should be able to export within the timeout duration.
async fn timeout_test_tokio(time_out: bool) { async fn timeout_test_tokio(time_out: bool) {
let config = BatchConfig { let config = BatchConfig {
max_export_timeout: Duration::from_millis(if time_out { 5 } else { 60 }), max_export_timeout: Duration::from_millis(if time_out { 5 } else { 60 }),
@ -557,24 +615,92 @@ mod tests {
assert!(shutdown_res.is_ok()); assert!(shutdown_res.is_ok());
} }
#[test] #[tokio::test(flavor = "multi_thread")]
fn test_timeout_tokio_timeout() { async fn test_timeout_tokio_timeout() {
// If time_out is true, then we ask exporter to block for 60s and set timeout to 5s. // If time_out is true, then we ask exporter to block for 60s and set timeout to 5s.
// If time_out is false, then we ask the exporter to block for 5s and set timeout to 60s. // If time_out is false, then we ask the exporter to block for 5s and set timeout to 60s.
// Either way, the test should be finished within 5s. // Either way, the test should be finished within 5s.
let runtime = tokio::runtime::Builder::new_multi_thread() timeout_test_tokio(true).await;
.enable_all()
.build()
.unwrap();
runtime.block_on(timeout_test_tokio(true));
} }
#[test] #[tokio::test(flavor = "multi_thread")]
fn test_timeout_tokio_not_timeout() { async fn test_timeout_tokio_not_timeout() {
let runtime = tokio::runtime::Builder::new_multi_thread() timeout_test_tokio(false).await;
.enable_all() }
.build()
.unwrap(); #[tokio::test(flavor = "multi_thread")]
runtime.block_on(timeout_test_tokio(false)); async fn test_concurrent_exports_expected() {
// Shared state for the exporter.
let active = Arc::new(AtomicUsize::new(0));
let concurrent_seen = Arc::new(AtomicBool::new(false));
let exporter = TrackingExporter {
delay: Duration::from_millis(50),
active: active.clone(),
concurrent_seen: concurrent_seen.clone(),
};
// Intentionally tiny batch-size so every span forces an export.
let config = BatchConfig {
max_export_batch_size: 1,
max_queue_size: 16,
scheduled_delay: Duration::from_secs(3600), // effectively disabled
max_export_timeout: Duration::from_secs(5),
max_concurrent_exports: 2, // what we want to verify
};
// Spawn the processor.
let processor = BatchSpanProcessor::new(exporter, config, runtime::Tokio);
// Finish three spans in rapid succession.
processor.on_end(new_test_export_span_data());
processor.on_end(new_test_export_span_data());
processor.on_end(new_test_export_span_data());
// Wait until everything has been exported.
processor.force_flush().expect("force flush failed");
processor.shutdown().expect("shutdown failed");
// Expect at least one period with >1 export in flight.
assert!(
concurrent_seen.load(Ordering::SeqCst),
"exports never overlapped, processor is still serialising them"
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_exports_serial_when_max_concurrent_exports_1() {
let active = Arc::new(AtomicUsize::new(0));
let concurrent_seen = Arc::new(AtomicBool::new(false));
let exporter = TrackingExporter {
delay: Duration::from_millis(50),
active: active.clone(),
concurrent_seen: concurrent_seen.clone(),
};
let config = BatchConfig {
max_export_batch_size: 1,
max_queue_size: 16,
scheduled_delay: Duration::from_secs(3600),
max_export_timeout: Duration::from_secs(5),
max_concurrent_exports: 1, // what we want to verify
};
let processor = BatchSpanProcessor::new(exporter, config, runtime::Tokio);
// Finish several spans quickly.
processor.on_end(new_test_export_span_data());
processor.on_end(new_test_export_span_data());
processor.on_end(new_test_export_span_data());
processor.force_flush().expect("force flush failed");
processor.shutdown().expect("shutdown failed");
// There must never have been more than one export in flight.
assert!(
!concurrent_seen.load(Ordering::SeqCst),
"exports overlapped even though max_concurrent_exports was 1"
);
} }
} }

View File

@ -13,7 +13,10 @@ use crate::trace::{
IdGenerator, ShouldSample, SpanEvents, SpanLimits, SpanLinks, IdGenerator, ShouldSample, SpanEvents, SpanLimits, SpanLinks,
}; };
use opentelemetry::{ use opentelemetry::{
trace::{SamplingDecision, SpanBuilder, SpanContext, SpanKind, TraceContextExt, TraceFlags}, trace::{
SamplingDecision, Span as _, SpanBuilder, SpanContext, SpanKind, TraceContextExt,
TraceFlags,
},
Context, InstrumentationScope, KeyValue, Context, InstrumentationScope, KeyValue,
}; };
use std::fmt; use std::fmt;
@ -281,9 +284,11 @@ impl opentelemetry::trace::Tracer for SdkTracer {
} }
}; };
// Call `on_start` for all processors if span.is_recording() {
for processor in provider.span_processors() { // Call `on_start` for all processors
processor.on_start(&mut span, parent_cx) for processor in provider.span_processors() {
processor.on_start(&mut span, parent_cx)
}
} }
span span

View File

@ -2,6 +2,8 @@
## vNext ## vNext
- Update to [v1.34.0](https://github.com/open-telemetry/semantic-conventions/releases/tag/v1.34.0) of the semantic conventions.
## 0.30.0 ## 0.30.0
Released 2025-May-23 Released 2025-May-23

View File

@ -5,8 +5,8 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CRATE_DIR="${SCRIPT_DIR}/../" CRATE_DIR="${SCRIPT_DIR}/../"
# freeze the spec version and generator version to make generation reproducible # freeze the spec version and generator version to make generation reproducible
SPEC_VERSION=1.33.0 SPEC_VERSION=1.36.0
WEAVER_VERSION=v0.15.0 WEAVER_VERSION=v0.16.1
cd "$CRATE_DIR" cd "$CRATE_DIR"
@ -58,4 +58,9 @@ expression='
# TODO: This workaround should be removed once the upstream generator handles this correctly. # TODO: This workaround should be removed once the upstream generator handles this correctly.
"${SED[@]}" 's/<key>/`key`/g' src/attribute.rs "${SED[@]}" 's/<key>/`key`/g' src/attribute.rs
# Patch: rustdoc warns about bare URLs in doc comments.
# The following line wraps the specific Kubernetes ResourceRequirements URL with <...>
# as suggested by rustdoc warnings, so it becomes a clickable link and the warning goes away.
"${SED[@]}" -E 's|(/// See )(https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core)( for details)|\1<\2>\3|g' src/metric.rs
cargo fmt cargo fmt

View File

@ -14,7 +14,7 @@ comment_formats:
default_comment_format: rust default_comment_format: rust
params: params:
schema_url: "https://opentelemetry.io/schemas/1.33.0" schema_url: "https://opentelemetry.io/schemas/1.36.0"
exclude_root_namespace: [] exclude_root_namespace: []
excluded_attributes: ["messaging.client_id"] excluded_attributes: ["messaging.client_id"]

File diff suppressed because it is too large Load Diff

View File

@ -22,4 +22,4 @@ pub mod trace;
/// The schema URL that matches the version of the semantic conventions that /// The schema URL that matches the version of the semantic conventions that
/// this crate defines. /// this crate defines.
pub const SCHEMA_URL: &str = "https://opentelemetry.io/schemas/1.33.0"; pub const SCHEMA_URL: &str = "https://opentelemetry.io/schemas/1.36.0";

File diff suppressed because it is too large Load Diff

View File

@ -350,6 +350,15 @@ pub use crate::attribute::K8S_DEPLOYMENT_UID;
#[cfg(feature = "semconv_experimental")] #[cfg(feature = "semconv_experimental")]
pub use crate::attribute::K8S_HPA_NAME; pub use crate::attribute::K8S_HPA_NAME;
#[cfg(feature = "semconv_experimental")]
pub use crate::attribute::K8S_HPA_SCALETARGETREF_API_VERSION;
#[cfg(feature = "semconv_experimental")]
pub use crate::attribute::K8S_HPA_SCALETARGETREF_KIND;
#[cfg(feature = "semconv_experimental")]
pub use crate::attribute::K8S_HPA_SCALETARGETREF_NAME;
#[cfg(feature = "semconv_experimental")] #[cfg(feature = "semconv_experimental")]
pub use crate::attribute::K8S_HPA_UID; pub use crate::attribute::K8S_HPA_UID;
@ -434,6 +443,9 @@ pub use crate::attribute::K8S_STATEFULSET_NAME;
#[cfg(feature = "semconv_experimental")] #[cfg(feature = "semconv_experimental")]
pub use crate::attribute::K8S_STATEFULSET_UID; pub use crate::attribute::K8S_STATEFULSET_UID;
#[cfg(feature = "semconv_experimental")]
pub use crate::attribute::MAINFRAME_LPAR_NAME;
#[cfg(feature = "semconv_experimental")] #[cfg(feature = "semconv_experimental")]
pub use crate::attribute::OCI_MANIFEST_DIGEST; pub use crate::attribute::OCI_MANIFEST_DIGEST;
@ -539,3 +551,9 @@ pub use crate::attribute::WEBENGINE_NAME;
#[cfg(feature = "semconv_experimental")] #[cfg(feature = "semconv_experimental")]
pub use crate::attribute::WEBENGINE_VERSION; pub use crate::attribute::WEBENGINE_VERSION;
#[cfg(feature = "semconv_experimental")]
pub use crate::attribute::ZOS_SMF_ID;
#[cfg(feature = "semconv_experimental")]
pub use crate::attribute::ZOS_SYSPLEX_NAME;

View File

@ -43,6 +43,12 @@ pub use crate::attribute::APP_WIDGET_ID;
#[cfg(feature = "semconv_experimental")] #[cfg(feature = "semconv_experimental")]
pub use crate::attribute::APP_WIDGET_NAME; pub use crate::attribute::APP_WIDGET_NAME;
#[cfg(feature = "semconv_experimental")]
pub use crate::attribute::AWS_BEDROCK_GUARDRAIL_ID;
#[cfg(feature = "semconv_experimental")]
pub use crate::attribute::AWS_BEDROCK_KNOWLEDGE_BASE_ID;
#[cfg(feature = "semconv_experimental")] #[cfg(feature = "semconv_experimental")]
pub use crate::attribute::AWS_DYNAMODB_ATTRIBUTE_DEFINITIONS; pub use crate::attribute::AWS_DYNAMODB_ATTRIBUTE_DEFINITIONS;
@ -115,6 +121,9 @@ pub use crate::attribute::AWS_EXTENDED_REQUEST_ID;
#[cfg(feature = "semconv_experimental")] #[cfg(feature = "semconv_experimental")]
pub use crate::attribute::AWS_LAMBDA_INVOKED_ARN; pub use crate::attribute::AWS_LAMBDA_INVOKED_ARN;
#[cfg(feature = "semconv_experimental")]
pub use crate::attribute::AWS_LAMBDA_RESOURCE_MAPPING_ID;
#[cfg(feature = "semconv_experimental")] #[cfg(feature = "semconv_experimental")]
pub use crate::attribute::AWS_REQUEST_ID; pub use crate::attribute::AWS_REQUEST_ID;
@ -137,9 +146,7 @@ pub use crate::attribute::AWS_S3_PART_NUMBER;
pub use crate::attribute::AWS_S3_UPLOAD_ID; pub use crate::attribute::AWS_S3_UPLOAD_ID;
#[cfg(feature = "semconv_experimental")] #[cfg(feature = "semconv_experimental")]
pub use crate::attribute::AZ_NAMESPACE; #[allow(deprecated)]
#[cfg(feature = "semconv_experimental")]
pub use crate::attribute::AZ_SERVICE_REQUEST_ID; pub use crate::attribute::AZ_SERVICE_REQUEST_ID;
#[cfg(feature = "semconv_experimental")] #[cfg(feature = "semconv_experimental")]
@ -163,6 +170,12 @@ pub use crate::attribute::AZURE_COSMOSDB_REQUEST_BODY_SIZE;
#[cfg(feature = "semconv_experimental")] #[cfg(feature = "semconv_experimental")]
pub use crate::attribute::AZURE_COSMOSDB_RESPONSE_SUB_STATUS_CODE; pub use crate::attribute::AZURE_COSMOSDB_RESPONSE_SUB_STATUS_CODE;
#[cfg(feature = "semconv_experimental")]
pub use crate::attribute::AZURE_RESOURCE_PROVIDER_NAMESPACE;
#[cfg(feature = "semconv_experimental")]
pub use crate::attribute::AZURE_SERVICE_REQUEST_ID;
#[cfg(feature = "semconv_experimental")] #[cfg(feature = "semconv_experimental")]
pub use crate::attribute::CASSANDRA_CONSISTENCY_LEVEL; pub use crate::attribute::CASSANDRA_CONSISTENCY_LEVEL;
@ -203,6 +216,9 @@ pub use crate::attribute::CLIENT_ADDRESS;
pub use crate::attribute::CLIENT_PORT; pub use crate::attribute::CLIENT_PORT;
#[cfg(feature = "semconv_experimental")]
pub use crate::attribute::CLOUD_REGION;
#[cfg(feature = "semconv_experimental")] #[cfg(feature = "semconv_experimental")]
pub use crate::attribute::CLOUD_RESOURCE_ID; pub use crate::attribute::CLOUD_RESOURCE_ID;
@ -233,6 +249,12 @@ pub use crate::attribute::DB_STORED_PROCEDURE_NAME;
pub use crate::attribute::DB_SYSTEM_NAME; pub use crate::attribute::DB_SYSTEM_NAME;
#[cfg(feature = "semconv_experimental")]
pub use crate::attribute::DNS_ANSWERS;
#[cfg(feature = "semconv_experimental")]
pub use crate::attribute::DNS_QUESTION_NAME;
#[cfg(feature = "semconv_experimental")] #[cfg(feature = "semconv_experimental")]
pub use crate::attribute::ELASTICSEARCH_NODE_NAME; pub use crate::attribute::ELASTICSEARCH_NODE_NAME;
@ -295,6 +317,9 @@ pub use crate::attribute::FEATURE_FLAG_PROVIDER_NAME;
#[cfg(feature = "semconv_experimental")] #[cfg(feature = "semconv_experimental")]
pub use crate::attribute::FEATURE_FLAG_RESULT_REASON; pub use crate::attribute::FEATURE_FLAG_RESULT_REASON;
#[cfg(feature = "semconv_experimental")]
pub use crate::attribute::FEATURE_FLAG_RESULT_VALUE;
#[cfg(feature = "semconv_experimental")] #[cfg(feature = "semconv_experimental")]
pub use crate::attribute::FEATURE_FLAG_RESULT_VARIANT; pub use crate::attribute::FEATURE_FLAG_RESULT_VARIANT;
@ -313,6 +338,12 @@ pub use crate::attribute::GEN_AI_AGENT_ID;
#[cfg(feature = "semconv_experimental")] #[cfg(feature = "semconv_experimental")]
pub use crate::attribute::GEN_AI_AGENT_NAME; pub use crate::attribute::GEN_AI_AGENT_NAME;
#[cfg(feature = "semconv_experimental")]
pub use crate::attribute::GEN_AI_CONVERSATION_ID;
#[cfg(feature = "semconv_experimental")]
pub use crate::attribute::GEN_AI_DATA_SOURCE_ID;
#[cfg(feature = "semconv_experimental")] #[cfg(feature = "semconv_experimental")]
pub use crate::attribute::GEN_AI_OPENAI_REQUEST_SERVICE_TIER; pub use crate::attribute::GEN_AI_OPENAI_REQUEST_SERVICE_TIER;
@ -488,6 +519,12 @@ pub use crate::attribute::SESSION_ID;
#[cfg(feature = "semconv_experimental")] #[cfg(feature = "semconv_experimental")]
pub use crate::attribute::SESSION_PREVIOUS_ID; pub use crate::attribute::SESSION_PREVIOUS_ID;
#[cfg(feature = "semconv_experimental")]
pub use crate::attribute::TLS_PROTOCOL_NAME;
#[cfg(feature = "semconv_experimental")]
pub use crate::attribute::TLS_PROTOCOL_VERSION;
pub use crate::attribute::URL_FULL; pub use crate::attribute::URL_FULL;
pub use crate::attribute::URL_PATH; pub use crate::attribute::URL_PATH;

View File

@ -33,7 +33,7 @@ opentelemetry_sdk = { version = "0.30", path = "../opentelemetry-sdk" }
[dev-dependencies] [dev-dependencies]
opentelemetry = { path = "../opentelemetry", features = ["metrics"] } opentelemetry = { path = "../opentelemetry", features = ["metrics"] }
opentelemetry_sdk = { path = "../opentelemetry-sdk", features = ["rt-tokio", "metrics"] } opentelemetry_sdk = { path = "../opentelemetry-sdk", features = ["metrics"] }
opentelemetry-appender-tracing = { workspace = true } opentelemetry-appender-tracing = { workspace = true }
tracing = { workspace = true, features = ["std"]} tracing = { workspace = true, features = ["std"]}
tracing-subscriber = { workspace = true, features = ["registry", "std"] } tracing-subscriber = { workspace = true, features = ["registry", "std"] }

View File

@ -46,10 +46,10 @@ impl opentelemetry_sdk::logs::LogExporter for LogExporter {
} else { } else {
println!("Resource"); println!("Resource");
if let Some(schema_url) = self.resource.schema_url() { if let Some(schema_url) = self.resource.schema_url() {
println!("\t Resource SchemaUrl: {:?}", schema_url); println!("\t Resource SchemaUrl: {schema_url:?}");
} }
self.resource.iter().for_each(|(k, v)| { self.resource.iter().for_each(|(k, v)| {
println!("\t -> {}={:?}", k, v); println!("\t -> {k}={v:?}");
}); });
print_logs(batch); print_logs(batch);
} }
@ -70,22 +70,22 @@ impl opentelemetry_sdk::logs::LogExporter for LogExporter {
fn print_logs(batch: LogBatch<'_>) { fn print_logs(batch: LogBatch<'_>) {
for (i, log) in batch.iter().enumerate() { for (i, log) in batch.iter().enumerate() {
println!("Log #{}", i); println!("Log #{i}");
let (record, library) = log; let (record, library) = log;
println!("\t Instrumentation Scope: {:?}", library); println!("\t Instrumentation Scope: {library:?}");
if let Some(event_name) = record.event_name() { if let Some(event_name) = record.event_name() {
println!("\t EventName: {:?}", event_name); println!("\t EventName: {event_name:?}");
} }
if let Some(target) = record.target() { if let Some(target) = record.target() {
println!("\t Target (Scope): {:?}", target); println!("\t Target (Scope): {target:?}");
} }
if let Some(trace_context) = record.trace_context() { if let Some(trace_context) = record.trace_context() {
println!("\t TraceId: {:?}", trace_context.trace_id); println!("\t TraceId: {:?}", trace_context.trace_id);
println!("\t SpanId: {:?}", trace_context.span_id); println!("\t SpanId: {:?}", trace_context.span_id);
if let Some(trace_flags) = trace_context.trace_flags { if let Some(trace_flags) = trace_context.trace_flags {
println!("\t TraceFlags: {:?}", trace_flags); println!("\t TraceFlags: {trace_flags:?}");
} }
} }
if let Some(timestamp) = record.timestamp() { if let Some(timestamp) = record.timestamp() {
@ -100,18 +100,18 @@ fn print_logs(batch: LogBatch<'_>) {
); );
} }
if let Some(severity) = record.severity_text() { if let Some(severity) = record.severity_text() {
println!("\t SeverityText: {:?}", severity); println!("\t SeverityText: {severity:?}");
} }
if let Some(severity) = record.severity_number() { if let Some(severity) = record.severity_number() {
println!("\t SeverityNumber: {:?}", severity); println!("\t SeverityNumber: {severity:?}");
} }
if let Some(body) = record.body() { if let Some(body) = record.body() {
println!("\t Body: {:?}", body); println!("\t Body: {body:?}");
} }
println!("\t Attributes:"); println!("\t Attributes:");
for (k, v) in record.attributes_iter() { for (k, v) in record.attributes_iter() {
println!("\t\t -> {}: {:?}", k, v); println!("\t\t -> {k}: {v:?}");
} }
} }
} }

View File

@ -49,11 +49,11 @@ impl PushMetricExporter for MetricExporter {
println!("Metrics"); println!("Metrics");
println!("Resource"); println!("Resource");
if let Some(schema_url) = metrics.resource().schema_url() { if let Some(schema_url) = metrics.resource().schema_url() {
println!("\tResource SchemaUrl: {:?}", schema_url); println!("\tResource SchemaUrl: {schema_url:?}");
} }
metrics.resource().iter().for_each(|(k, v)| { metrics.resource().iter().for_each(|(k, v)| {
println!("\t -> {}={:?}", k, v); println!("\t -> {k}={v:?}");
}); });
print_metrics(metrics.scope_metrics()); print_metrics(metrics.scope_metrics());
Ok(()) Ok(())
@ -81,14 +81,14 @@ impl PushMetricExporter for MetricExporter {
fn print_metrics<'a>(metrics: impl Iterator<Item = &'a ScopeMetrics>) { fn print_metrics<'a>(metrics: impl Iterator<Item = &'a ScopeMetrics>) {
for (i, metric) in metrics.enumerate() { for (i, metric) in metrics.enumerate() {
println!("\tInstrumentation Scope #{}", i); println!("\tInstrumentation Scope #{i}");
let scope = metric.scope(); let scope = metric.scope();
println!("\t\tName : {}", scope.name()); println!("\t\tName : {}", scope.name());
if let Some(version) = scope.version() { if let Some(version) = scope.version() {
println!("\t\tVersion : {:?}", version); println!("\t\tVersion : {version:?}");
} }
if let Some(schema_url) = scope.schema_url() { if let Some(schema_url) = scope.schema_url() {
println!("\t\tSchemaUrl: {:?}", schema_url); println!("\t\tSchemaUrl: {schema_url:?}");
} }
scope.attributes().enumerate().for_each(|(index, kv)| { scope.attributes().enumerate().for_each(|(index, kv)| {
if index == 0 { if index == 0 {
@ -98,7 +98,7 @@ fn print_metrics<'a>(metrics: impl Iterator<Item = &'a ScopeMetrics>) {
}); });
metric.metrics().enumerate().for_each(|(i, metric)| { metric.metrics().enumerate().for_each(|(i, metric)| {
println!("Metric #{}", i); println!("Metric #{i}");
println!("\t\tName : {}", metric.name()); println!("\t\tName : {}", metric.name());
println!("\t\tDescription : {}", metric.description()); println!("\t\tDescription : {}", metric.description());
println!("\t\tUnit : {}", metric.unit()); println!("\t\tUnit : {}", metric.unit());
@ -197,7 +197,7 @@ fn print_sum_data_points<'a, T: Debug + Copy + 'a>(
data_points: impl Iterator<Item = &'a SumDataPoint<T>>, data_points: impl Iterator<Item = &'a SumDataPoint<T>>,
) { ) {
for (i, data_point) in data_points.enumerate() { for (i, data_point) in data_points.enumerate() {
println!("\t\tDataPoint #{}", i); println!("\t\tDataPoint #{i}");
println!("\t\t\tValue : {:#?}", data_point.value()); println!("\t\t\tValue : {:#?}", data_point.value());
println!("\t\t\tAttributes :"); println!("\t\t\tAttributes :");
for kv in data_point.attributes() { for kv in data_point.attributes() {
@ -210,7 +210,7 @@ fn print_gauge_data_points<'a, T: Debug + Copy + 'a>(
data_points: impl Iterator<Item = &'a GaugeDataPoint<T>>, data_points: impl Iterator<Item = &'a GaugeDataPoint<T>>,
) { ) {
for (i, data_point) in data_points.enumerate() { for (i, data_point) in data_points.enumerate() {
println!("\t\tDataPoint #{}", i); println!("\t\tDataPoint #{i}");
println!("\t\t\tValue : {:#?}", data_point.value()); println!("\t\t\tValue : {:#?}", data_point.value());
println!("\t\t\tAttributes :"); println!("\t\t\tAttributes :");
for kv in data_point.attributes() { for kv in data_point.attributes() {
@ -223,15 +223,15 @@ fn print_hist_data_points<'a, T: Debug + Copy + 'a>(
data_points: impl Iterator<Item = &'a HistogramDataPoint<T>>, data_points: impl Iterator<Item = &'a HistogramDataPoint<T>>,
) { ) {
for (i, data_point) in data_points.enumerate() { for (i, data_point) in data_points.enumerate() {
println!("\t\tDataPoint #{}", i); println!("\t\tDataPoint #{i}");
println!("\t\t\tCount : {}", data_point.count()); println!("\t\t\tCount : {}", data_point.count());
println!("\t\t\tSum : {:?}", data_point.sum()); println!("\t\t\tSum : {:?}", data_point.sum());
if let Some(min) = &data_point.min() { if let Some(min) = &data_point.min() {
println!("\t\t\tMin : {:?}", min); println!("\t\t\tMin : {min:?}");
} }
if let Some(max) = &data_point.max() { if let Some(max) = &data_point.max() {
println!("\t\t\tMax : {:?}", max); println!("\t\t\tMax : {max:?}");
} }
println!("\t\t\tAttributes :"); println!("\t\t\tAttributes :");
@ -254,14 +254,14 @@ fn print_hist_data_points<'a, T: Debug + Copy + 'a>(
// Get the count for this bucket, or 0 if not available // Get the count for this bucket, or 0 if not available
let count = bucket_counts_iter.next().unwrap_or(0); let count = bucket_counts_iter.next().unwrap_or(0);
println!("\t\t\t\t {} to {} : {}", lower_bound, upper_bound, count); println!("\t\t\t\t {lower_bound} to {upper_bound} : {count}");
lower_bound = upper_bound; lower_bound = upper_bound;
} }
// Handle the final +Infinity bucket if we processed any buckets // Handle the final +Infinity bucket if we processed any buckets
if header_printed { if header_printed {
let last_count = bucket_counts_iter.next().unwrap_or(0); let last_count = bucket_counts_iter.next().unwrap_or(0);
println!("\t\t\t\t{} to +Infinity : {}", lower_bound, last_count); println!("\t\t\t\t{lower_bound} to +Infinity : {last_count}");
} }
} }
} }

View File

@ -45,11 +45,11 @@ impl opentelemetry_sdk::trace::SpanExporter for SpanExporter {
} else { } else {
println!("Resource"); println!("Resource");
if let Some(schema_url) = self.resource.schema_url() { if let Some(schema_url) = self.resource.schema_url() {
println!("\tResource SchemaUrl: {:?}", schema_url); println!("\tResource SchemaUrl: {schema_url:?}");
} }
self.resource.iter().for_each(|(k, v)| { self.resource.iter().for_each(|(k, v)| {
println!("\t -> {}={:?}", k, v); println!("\t -> {k}={v:?}");
}); });
print_spans(batch); print_spans(batch);
@ -71,17 +71,17 @@ impl opentelemetry_sdk::trace::SpanExporter for SpanExporter {
fn print_spans(batch: Vec<SpanData>) { fn print_spans(batch: Vec<SpanData>) {
for (i, span) in batch.into_iter().enumerate() { for (i, span) in batch.into_iter().enumerate() {
println!("Span #{}", i); println!("Span #{i}");
println!("\tInstrumentation Scope"); println!("\tInstrumentation Scope");
println!( println!(
"\t\tName : {:?}", "\t\tName : {:?}",
&span.instrumentation_scope.name() &span.instrumentation_scope.name()
); );
if let Some(version) = &span.instrumentation_scope.version() { if let Some(version) = &span.instrumentation_scope.version() {
println!("\t\tVersion : {:?}", version); println!("\t\tVersion : {version:?}");
} }
if let Some(schema_url) = &span.instrumentation_scope.schema_url() { if let Some(schema_url) = &span.instrumentation_scope.schema_url() {
println!("\t\tSchemaUrl: {:?}", schema_url); println!("\t\tSchemaUrl: {schema_url:?}");
} }
span.instrumentation_scope span.instrumentation_scope
.attributes() .attributes()
@ -120,7 +120,7 @@ fn print_spans(batch: Vec<SpanData>) {
if index == 0 { if index == 0 {
println!("\tEvents:"); println!("\tEvents:");
} }
println!("\tEvent #{}", index); println!("\tEvent #{index}");
println!("\tName : {}", event.name); println!("\tName : {}", event.name);
let datetime: DateTime<Utc> = event.timestamp.into(); let datetime: DateTime<Utc> = event.timestamp.into();
println!("\tTimestamp : {}", datetime.format("%Y-%m-%d %H:%M:%S%.6f")); println!("\tTimestamp : {}", datetime.format("%Y-%m-%d %H:%M:%S%.6f"));
@ -137,7 +137,7 @@ fn print_spans(batch: Vec<SpanData>) {
if index == 0 { if index == 0 {
println!("\tLinks:"); println!("\tLinks:");
} }
println!("\tLink #{}", index); println!("\tLink #{index}");
println!("\tTraceId: {}", link.span_context.trace_id()); println!("\tTraceId: {}", link.span_context.trace_id());
println!("\tSpanId : {}", link.span_context.span_id()); println!("\tSpanId : {}", link.span_context.span_id());

View File

@ -21,7 +21,7 @@ pub(crate) fn get_timeout() -> Duration {
Some(timeout) => match timeout.parse() { Some(timeout) => match timeout.parse() {
Ok(timeout) => Duration::from_millis(timeout), Ok(timeout) => Duration::from_millis(timeout),
Err(e) => { Err(e) => {
eprintln!("{} malformed defaulting to 10000: {}", ENV_TIMEOUT, e); eprintln!("{ENV_TIMEOUT} malformed defaulting to 10000: {e}");
DEFAULT_COLLECTOR_TIMEOUT DEFAULT_COLLECTOR_TIMEOUT
} }
}, },

View File

@ -38,25 +38,24 @@ pub(crate) struct JsonV2Client {
impl JsonV2Client { impl JsonV2Client {
async fn upload(&self, spans: Vec<Span>) -> OTelSdkResult { async fn upload(&self, spans: Vec<Span>) -> OTelSdkResult {
let body = serde_json::to_vec(&spans).map_err(|e| { let body = serde_json::to_vec(&spans).map_err(|e| {
OTelSdkError::InternalFailure(format!("JSON serialization failed: {}", e)) OTelSdkError::InternalFailure(format!("JSON serialization failed: {e}"))
})?; })?;
let req = Request::builder() let req = Request::builder()
.method(Method::POST) .method(Method::POST)
.uri(self.collector_endpoint.clone()) .uri(self.collector_endpoint.clone())
.header(CONTENT_TYPE, "application/json") .header(CONTENT_TYPE, "application/json")
.body(body.into()) .body(body.into())
.map_err(|e| { .map_err(|e| OTelSdkError::InternalFailure(format!("Failed to create request: {e}")))?;
OTelSdkError::InternalFailure(format!("Failed to create request: {}", e))
})?;
let response = let response = self
self.client.send_bytes(req).await.map_err(|e| { .client
OTelSdkError::InternalFailure(format!("HTTP request failed: {}", e)) .send_bytes(req)
})?; .await
.map_err(|e| OTelSdkError::InternalFailure(format!("HTTP request failed: {e}")))?;
response response
.error_for_status() .error_for_status()
.map_err(|e| OTelSdkError::InternalFailure(format!("HTTP response error: {}", e)))?; .map_err(|e| OTelSdkError::InternalFailure(format!("HTTP response error: {e}")))?;
Ok(()) Ok(())
} }
} }

View File

@ -237,7 +237,7 @@ impl TextMapPropagator for Propagator {
} else { } else {
"0" "0"
}; };
value = format!("{}-{:01}", value, flag) value = format!("{value}-{flag:01}")
} }
injector.set(B3_SINGLE_HEADER, value); injector.set(B3_SINGLE_HEADER, value);

View File

@ -2,6 +2,8 @@
## vNext ## vNext
- Add `get_all` method to `opentelemetry::propagation::Extractor` to return all values of the given propagation key and provide a default implementation.
## 0.30.0 ## 0.30.0
Released 2025-May-23 Released 2025-May-23
@ -33,7 +35,7 @@ predictable and efficient observability pipelines.
Release 2025-Apr-01 Release 2025-Apr-01
- Bug Fix: Re-export `WithContext` at `opentelemetry::trace::context::WithContext` [#2879](https://github.com/open-telemetry/opentelemetry-rust/pull/2879) to restore backwards compatability - Bug Fix: Re-export `WithContext` at `opentelemetry::trace::context::WithContext` [#2879](https://github.com/open-telemetry/opentelemetry-rust/pull/2879) to restore backwards compatibility
- The new path for `WithContext` and `FutureExt` are in `opentelemetry::context` as they are independent of the trace signal. Users should prefer this path. - The new path for `WithContext` and `FutureExt` are in `opentelemetry::context` as they are independent of the trace signal. Users should prefer this path.
## 0.29.0 ## 0.29.0
@ -502,7 +504,7 @@ and SDK are still unstable.
- Use current span for SDK-less context propagation #510 - Use current span for SDK-less context propagation #510
- Always export span batch when limit reached #519 - Always export span batch when limit reached #519
- Rename message events to events #530 - Rename message events to events #530
- Update resource merge behaviour #537 - Update resource merge behavior #537
- Ignore links with invalid context #538 - Ignore links with invalid context #538
## Removed ## Removed
@ -527,7 +529,7 @@ use `opentelemetry::global::shutdown_tracer_provider` explicitly instead.
## Changed ## Changed
- Pull configrations from environment variables by default when creating BatchSpanProcessor #445 - Pull configurations from environment variables by default when creating BatchSpanProcessor #445
- Convert doc links to intra-doc #466 - Convert doc links to intra-doc #466
- Switch to Cow for event names #471 - Switch to Cow for event names #471
- Use API to configure async runtime instead of features #481 - Use API to configure async runtime instead of features #481
@ -824,7 +826,7 @@ use `opentelemetry::global::shutdown_tracer_provider` explicitly instead.
- Make trace and metrics features optional - Make trace and metrics features optional
- ExportResult as specified in the specification - ExportResult as specified in the specification
- Add Futures compatibility API - Add Futures compatibility API
- Added serde serialise support to SpanData - Added serde serialize support to SpanData
- Separate OpenTelemetry Jaeger crate - Separate OpenTelemetry Jaeger crate
### Changed ### Changed

View File

@ -309,7 +309,7 @@ fn encode(s: &str) -> String {
encoded_string.push(*byte as char) encoded_string.push(*byte as char)
} }
b' ' => encoded_string.push_str("%20"), b' ' => encoded_string.push_str("%20"),
_ => encoded_string.push_str(&format!("%{:02X}", byte)), _ => encoded_string.push_str(&format!("%{byte:02X}")),
} }
} }
encoded_string encoded_string
@ -412,7 +412,7 @@ impl BaggageExt for Context {
} }
fn with_cleared_baggage(&self) -> Self { fn with_cleared_baggage(&self) -> Self {
self.with_value(Baggage::new()) self.with_baggage(Baggage::new())
} }
fn baggage(&self) -> &Baggage { fn baggage(&self) -> &Baggage {
@ -653,4 +653,28 @@ mod tests {
baggage.insert("(example)", "1"); baggage.insert("(example)", "1");
assert!(baggage.is_empty()); assert!(baggage.is_empty());
} }
#[test]
fn test_context_clear_baggage() {
let ctx = Context::new();
let ctx = ctx.with_baggage([KeyValue::new("foo", 1)]);
let _guard = ctx.attach();
{
let ctx = Context::current();
let baggage = ctx.baggage();
// At this point baggage should still contain the inital value.
assert_eq!(baggage.len(), 1);
// Baggage gets cleared.
let ctx = ctx.with_cleared_baggage();
let _guard = ctx.attach();
{
let ctx = Context::current();
let baggage = ctx.baggage();
// Baggage should contain no entries.
assert_eq!(baggage.len(), 0);
}
}
}
} }

View File

@ -177,7 +177,7 @@ impl fmt::Display for Array {
if i > 0 { if i > 0 {
write!(fmt, ",")?; write!(fmt, ",")?;
} }
write!(fmt, "\"{}\"", t)?; write!(fmt, "\"{t}\"")?;
} }
write!(fmt, "]") write!(fmt, "]")
} }
@ -191,7 +191,7 @@ fn display_array_str<T: fmt::Display>(slice: &[T], fmt: &mut fmt::Formatter<'_>)
if i > 0 { if i > 0 {
write!(fmt, ",")?; write!(fmt, ",")?;
} }
write!(fmt, "{}", t)?; write!(fmt, "{t}")?;
} }
write!(fmt, "]") write!(fmt, "]")
} }
@ -305,11 +305,11 @@ impl From<Cow<'static, str>> for StringValue {
impl From<Value> for StringValue { impl From<Value> for StringValue {
fn from(s: Value) -> Self { fn from(s: Value) -> Self {
match s { match s {
Value::Bool(v) => format!("{}", v).into(), Value::Bool(v) => format!("{v}").into(),
Value::I64(v) => format!("{}", v).into(), Value::I64(v) => format!("{v}").into(),
Value::F64(v) => format!("{}", v).into(), Value::F64(v) => format!("{v}").into(),
Value::String(v) => v, Value::String(v) => v,
Value::Array(v) => format!("{}", v).into(), Value::Array(v) => format!("{v}").into(),
} }
} }
} }
@ -320,11 +320,11 @@ impl Value {
/// This will allocate if the underlying value is not a `String`. /// This will allocate if the underlying value is not a `String`.
pub fn as_str(&self) -> Cow<'_, str> { pub fn as_str(&self) -> Cow<'_, str> {
match self { match self {
Value::Bool(v) => format!("{}", v).into(), Value::Bool(v) => format!("{v}").into(),
Value::I64(v) => format!("{}", v).into(), Value::I64(v) => format!("{v}").into(),
Value::F64(v) => format!("{}", v).into(), Value::F64(v) => format!("{v}").into(),
Value::String(v) => Cow::Borrowed(v.as_str()), Value::String(v) => Cow::Borrowed(v.as_str()),
Value::Array(v) => format!("{}", v).into(), Value::Array(v) => format!("{v}").into(),
} }
} }
} }

View File

@ -40,6 +40,11 @@ pub trait Extractor {
/// Collect all the keys from the underlying data. /// Collect all the keys from the underlying data.
fn keys(&self) -> Vec<&str>; fn keys(&self) -> Vec<&str>;
/// Get all values from a key from the underlying data.
fn get_all(&self, key: &str) -> Option<Vec<&str>> {
self.get(key).map(|value| vec![value])
}
} }
impl<S: std::hash::BuildHasher> Injector for HashMap<String, String, S> { impl<S: std::hash::BuildHasher> Injector for HashMap<String, String, S> {
@ -77,6 +82,30 @@ mod tests {
); );
} }
#[test]
fn hash_map_get_all() {
let mut carrier = HashMap::new();
carrier.set("headerName", "value".to_string());
assert_eq!(
Extractor::get_all(&carrier, "HEADERNAME"),
Some(vec!["value"]),
"case insensitive get_all extraction"
);
}
#[test]
fn hash_map_get_all_missing_key() {
let mut carrier = HashMap::new();
carrier.set("headerName", "value".to_string());
assert_eq!(
Extractor::get_all(&carrier, "missing_key"),
None,
"case insensitive get_all extraction"
);
}
#[test] #[test]
fn hash_map_keys() { fn hash_map_keys() {
let mut carrier = HashMap::new(); let mut carrier = HashMap::new();

Some files were not shown because too many files have changed in this diff Show More