Compare commits

...

461 Commits

Author SHA1 Message Date
Alexandru-Liviu Bratosin 063107068c
fix(async-processor): concurrent exports actually serialised (#3028) 2025-07-14 10:37:21 -07:00
Reiley Yang 8925f064d2
chore: Remove file .github/repository-settings.md (#3067) 2025-07-14 10:31:30 -07:00
Lalit Kumar Bhasin 8aba0913e9
chore: Bump semantic-conventions to v1.36.0 (#3064) 2025-07-14 10:04:21 -07:00
OpenTelemetry Bot 34d6d5082e
Sort contributor listings and remove affiliation from emeriti (#3060) 2025-07-09 22:11:59 +02:00
Berkus Decker 5e447d02cc
chore: Switch from unmaintained hex dependency to const-hex (#3053) 2025-07-09 08:54:12 -07:00
Whoemoon Jang 8d46c40b60
fix: Support HttpClient implementation for HyperClient with custom connectors (#3057) 2025-07-07 11:36:38 -07:00
Copilot eac368a7e4
chore: Fix spelling errors and typos in documentation (#3044)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-07-02 09:53:05 -07:00
dependabot[bot] 2bf8175d07
chore(deps): bump taiki-e/install-action from 2.52.4 to 2.56.0 (#3051)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-07-01 13:33:02 -07:00
dependabot[bot] 3fc7194796
chore(deps): bump EmbarkStudios/cargo-deny-action from 2.0.11 to 2.0.12 (#3052)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-01 13:24:26 -07:00
dependabot[bot] db15ecb541
chore(deps): bump obi1kenobi/cargo-semver-checks-action from 2.6 to 2.8 (#3050)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-01 13:24:14 -07:00
dependabot[bot] 674914a8ef
chore(deps): bump github/codeql-action from 3.28.16 to 3.29.2 (#3049)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2025-07-01 13:23:57 -07:00
dependabot[bot] 6bc2b19b85
chore(deps): bump step-security/harden-runner from 2.12.0 to 2.12.2 (#3048)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-01 12:49:44 -07:00
Lushe Shipkov d59aded375
docs: A few small doc touch-ups in some of the various in_memory_exporter modules (#3042) 2025-06-30 09:24:40 -07:00
OpenTelemetry Bot e7784bb78f
docs: Update community member listings (#3038)
Co-authored-by: otelbot <197425009+otelbot@users.noreply.github.com>
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-06-27 09:43:55 -06:00
Anton Grübel 5e29598369
chore: fix format lint (#3039) 2025-06-27 09:33:16 -06:00
yoshi-taka af2f1449e8
chore: remove unused glob (#3035) 2025-06-22 17:00:42 -07:00
Scott Gerring 0c2f808ec2
ci: Run benchmarks on main on the new oracle dedicated workers (#2942) 2025-06-20 08:37:17 -07:00
Cijo Thomas d4eb35a0cc
docs: on how to set right cardinality limit (#2998)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-06-12 17:03:39 -07:00
Lalit Kumar Bhasin 1f0d9a9f62
chore: Prepare for opentelemetry-appender-tracing 0.30.1 - bump tracing-opentelemetry to 0.31 (#3022) 2025-06-05 11:43:18 -07:00
dependabot[bot] 51dc2f04b7
chore(deps): update dtolnay/rust-toolchain requirement to b3b07ba8b418998c39fb20f53e8b695cdcc8de1b (#3016)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-06-05 10:43:48 -07:00
Igor Unanua eaca267d04
feat: support multi-value key propagation extraction (#3008)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-06-04 14:52:57 -07:00
Lalit Kumar Bhasin 7b3db0b6a6
chore: Bump otel-proto v1.7.0 (#3018) 2025-06-03 08:20:25 -07:00
Lalit Kumar Bhasin 082213e4e9
chore: bump semcon 1.34.0 (#3019) 2025-06-02 11:24:51 -07:00
dependabot[bot] c473db0788
chore(deps): bump taiki-e/install-action from 2.50.4 to 2.52.4 (#3015) 2025-06-01 21:31:03 -07:00
dependabot[bot] 85e639aef9
chore(deps): bump ossf/scorecard-action from 2.4.1 to 2.4.2 (#3014) 2025-06-01 21:21:45 -07:00
dependabot[bot] f1a541c3ca
chore(deps): bump codecov/codecov-action from 5.4.2 to 5.4.3 (#3013)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2025-06-01 20:34:16 -07:00
dependabot[bot] c30dc37002
chore(deps): bump fossas/fossa-action from 1.6.0 to 1.7.0 (#3012)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-01 13:22:45 -07:00
Gabriel 28becc0674
fix: with_cleared_baggage (#3006)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-05-30 08:19:46 -07:00
Joonas Bergius cab5565ba1
fix: use default endpoint for endpoint when provided empty string (#3000)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-05-30 07:51:40 -07:00
Cijo Thomas 62790608e1
fix: Small improvement to OTLP Exporter logs (#3007) 2025-05-30 07:13:26 -07:00
paullegranddc 167c94663a
fix(span_processor): only call on_start with recording spans (#3011) 2025-05-30 06:53:57 -07:00
Cijo Thomas 8e47d84922
chore: Add release notes for 0.30 (#3001)
Co-authored-by: Zhongyang Wu <zhongyang.wu@outlook.com>
2025-05-27 06:43:34 -07:00
Cijo Thomas 8882c31c95
chore: Nit fixes to examples (#3002) 2025-05-23 10:56:42 -07:00
Cijo Thomas c811cde1ae
chore: Prepare release 0.30.0 (#2999) 2025-05-23 09:52:50 -07:00
SF-Zhou 200885a6c3
fix: fix trace id in logs when using set_parent nested in a trace span (#2924)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-05-23 08:01:46 -07:00
Cijo Thomas c24369e86a
chore: Update metric sdk to stable status (#2996) 2025-05-23 07:51:41 -07:00
Cijo Thomas bf22aeb7cc
fix: Remove pub fields and replace with getter method consistently across … (#2997) 2025-05-22 22:41:49 -07:00
Cijo Thomas 4be1a32d3f
fix: remove cardinality capping via instrument advice (#2995) 2025-05-22 13:46:27 -07:00
Cijo Thomas 3d04c16e39
docs: Add metric doc (#2946)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2025-05-22 13:32:37 -07:00
Cijo Thomas 2018959eec
fix: Fix validation in Metric stream (#2991) 2025-05-22 11:18:44 -07:00
Anton Grübel 8c29ca7e21
chore: leverage fallback resolver for MSRV check (#2993) 2025-05-22 08:39:53 -07:00
Cijo Thomas 4b3a383267
chore: add required features to benches (#2990) 2025-05-21 20:45:20 -07:00
Cijo Thomas ebbebf57ba
fix: Further trim public API on views (#2989)
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2025-05-21 19:07:08 -07:00
Cijo Thomas e123996d80
feat: View cleanups (#2988) 2025-05-21 16:40:32 -07:00
Adrian Garcia Badaracco 3cdc62e716
feat: add generated proto models for profiles signal (#2979)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-05-21 09:24:47 -07:00
Cijo Thomas f04e9ec6cd
feat: Use builder pattern for constructing Metric Streams (#2984) 2025-05-21 07:37:16 -07:00
Cijo Thomas 7cfe8cd883
chore: fix changelogs (#2983) 2025-05-21 07:07:07 -07:00
Cijo Thomas aeb38a02c1
feat: Promote subset of Metric Views to stable (#2982) 2025-05-20 21:09:20 -07:00
Elichai Turkel 857a38b191
fix: Expose SpanExporterBuilder and MetricExporterBuilder (#2966)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-05-20 20:21:04 -07:00
Cijo Thomas cc93ead2df
fix: Metrics Views - fix a bug that causes unit, description to be lost when applying views that influence other aspects (#2981) 2025-05-20 17:59:17 -07:00
Cijo Thomas d52dcef07d
fix: MetricExporters use getter methods instead of direct access (#2973)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-05-20 10:07:07 -07:00
Lalit Kumar Bhasin 84999140a7
chore: bump semconv 1.33.0 (#2975) 2025-05-16 20:14:16 -07:00
Cijo Thomas 9a0099ab8d
chore: remove unused (and incorrect!) doc links (#2974) 2025-05-16 13:21:00 -07:00
Mohammad Vatandoost c5f97180a3
feat: add shutdown with timeout for traces (#2956)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-05-16 07:12:50 -07:00
Cijo Thomas 8f4fe23bb1
fix: Avoid exposing HistogramBuckets and bounds (#2969) 2025-05-14 22:11:59 -07:00
StepSecurity Bot fed6fee190
ci: Harden GitHub Actions (#2971)
Signed-off-by: StepSecurity Bot <bot@stepsecurity.io>
2025-05-14 22:03:27 -07:00
Cijo Thomas 970bb1e4b6
fix: Avoid exposing implementation detail in public API for PushMetricExporter (#2968) 2025-05-14 10:15:50 -07:00
Utkarsh Umesan Pillai a4575af593
fix: Update ResourceMetrics public API (#2965)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-05-14 07:39:02 -07:00
Cijo Thomas 82e5ed405e
fix: PeriodicReader to reuse data structures across collect (#2963) 2025-05-14 07:29:37 -07:00
paullegranddc 8fe3dcccc4
fix(CI): patch dependencies before running external-types check (#2967) 2025-05-14 06:44:27 -07:00
Cijo Thomas 3f29f6d8bf
chore: fix cargo deny check by updating unicode allowed list (#2964) 2025-05-14 06:39:46 +02:00
Björn Antonsson f771404f82
fix: allow span links to be added to a SpanRef (#2959) 2025-05-07 08:34:42 -07:00
Lalit Kumar Bhasin 1d9bd25ec8
chore: Fix CI coverage error for failing to install llvm-cov (#2958) 2025-05-05 11:32:10 -07:00
Lalit Kumar Bhasin 377fe5db7c
chore: publish otel-proto v1.6.0 (#2955) 2025-05-05 10:32:16 -07:00
Cijo Thomas 3d589d6449
ci: Try to build examples in CI (#2711)
Co-authored-by: Harold Dost <h.dost@criteo.com>
2025-05-05 08:08:50 +02:00
dependabot[bot] fa692d8c5c
chore(deps): bump taiki-e/install-action from 2.49.45 to 2.50.4 (#2952)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2025-05-01 14:13:49 -07:00
dependabot[bot] c6c2453ac4
chore(deps): update dtolnay/rust-toolchain requirement to b3b07ba8b418998c39fb20f53e8b695cdcc8de1b (#2953)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2025-05-01 14:05:18 -07:00
dependabot[bot] 06c6dfd6a8
chore(deps): bump github/codeql-action from 3.28.13 to 3.28.16 (#2954)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2025-05-01 13:57:25 -07:00
dependabot[bot] 175c7c6e9c
chore(deps): bump step-security/harden-runner from 2.11.1 to 2.12.0 (#2951)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2025-05-01 13:43:03 -07:00
dependabot[bot] 225bc0ebfa
chore(deps): bump codecov/codecov-action from 4.6.0 to 5.4.2 (#2950)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-05-01 13:17:20 -07:00
Mohammad Vatandoost 1d37e07529
feat: add-shutdown-with-timeout-for-log-provider-and-processor (#2941)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-05-01 09:09:01 -07:00
Cijo Thomas 4f2de12350
fix: Pass immutable metrics to PushMetricExporter (#2947) 2025-04-30 10:01:52 -07:00
Mathieu Tricoire 1d610a211a
feat(otlp): Re-export tonic crate (#2898)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-04-30 09:44:23 -07:00
Cijo Thomas 409713d2d9
fix: Allow Histograms with no buckets (#2948) 2025-04-30 09:19:05 -07:00
bestgopher 7f8adcc3f5
docs(semconv): fix some invalid urls (#2944) 2025-04-29 10:49:48 -07:00
Anton Grübel 02c290de84
chore: change webpki-roots exception license to CDLA (#2945) 2025-04-28 16:18:24 -07:00
Cijo Thomas 9dc727ef58
chore: Add Bjorn as approver (#2937)
Björn has been actively helping the repo for last few months, and is leading the exploration/development of tokio-tracing interop, among many other contributions/reviews.
He has agreed to volunteer time as an Approver for the repo.

In my view, https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#requirements-2 requirements have been met.
2025-04-22 22:25:22 +02:00
Mohammad Vatandoost 5c60f12f04
feat: add shutdown with timeout for log exporter (#2909)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-04-21 18:17:42 -07:00
Utkarsh Umesan Pillai d7f824486a
ci: Add cargo deny license check (#2936) 2025-04-21 14:55:29 -07:00
Utkarsh Umesan Pillai d8d57d834d
ci: Add cargo deny checks for bans and sources (#2935) 2025-04-21 11:34:20 +02:00
Gilles Henaux b5d31f11fa
docs: fix the HTTP and gRPC transports quickstart guides (#2933) 2025-04-17 07:43:06 -07:00
Lalit Kumar Bhasin 9cdc93161d
chore: bump semconv 1.32.0 (#2932) 2025-04-15 13:28:43 -07:00
Cijo Thomas 10cf02c458
chore: Fix changelogs and few nits (#2929) 2025-04-13 18:24:26 -07:00
Cijo Thomas 4ce765567c
feat: Hide MetricReader and friends (#2928) 2025-04-11 16:48:34 -07:00
Cijo Thomas 64cf2916c4
chore: Patch release prometheus to fix security vulnerability (#2927)
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2025-04-10 21:55:25 -07:00
houseme 431689dd04
chore: Upgrade `prometheus` to 0.14 and clean up protobuf-related code in `lib.rs` (#2920)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-04-10 19:33:36 -07:00
Utkarsh Umesan Pillai 8b3fc06555
ci: Update permissions for workflow (#2923) 2025-04-09 15:51:59 -07:00
Utkarsh Umesan Pillai 130e178ad3
docs: Add openssf scorecard badge (#2919) 2025-04-08 16:32:20 -07:00
Utkarsh Umesan Pillai 6b5251f0d0
ci: Update CodeQL workflow (#2918) 2025-04-08 14:46:01 -07:00
Cijo Thomas 4ff8e02031
fix: Cardinality overflow to use bool value instead of string (#2916) 2025-04-08 12:52:16 -07:00
Cijo Thomas d4c646738f
fix: cleanup MetricError (#2906) 2025-04-08 08:23:51 -07:00
Cijo Thomas df262401da
feat: Add ability to specify cardinality limit via Instrument advice (#2903) 2025-04-07 22:52:25 -07:00
Utkarsh Umesan Pillai 1760889e27
ci: Harden GitHub Actions (#2915) 2025-04-07 22:41:11 -07:00
Utkarsh Umesan Pillai e680514e4f
ci: Harden GitHub Actions (#2913) 2025-04-07 19:55:50 -07:00
Utkarsh Umesan Pillai bef0523b68
ci: Harden GitHub Actions (#2914) 2025-04-07 19:46:09 -07:00
StepSecurity Bot 72fc1b60a5
ci: [StepSecurity] Harden GitHub Actions (#2910)
Signed-off-by: StepSecurity Bot <bot@stepsecurity.io>
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2025-04-07 18:03:12 -07:00
StepSecurity Bot f99f20a87d
ci: [StepSecurity] Harden GitHub Actions (#2912)
Signed-off-by: StepSecurity Bot <bot@stepsecurity.io>
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2025-04-07 17:28:49 -07:00
Utkarsh Umesan Pillai 940ec2304b
ci: Harden GitHub Actions (#2911) 2025-04-07 15:25:42 -07:00
StepSecurity Bot 9a0ffc4adf
fix: [StepSecurity] ci: Harden GitHub Actions (#2907)
Signed-off-by: StepSecurity Bot <bot@stepsecurity.io>
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2025-04-07 13:01:32 -07:00
Lalit Kumar Bhasin 16ff4b0575
chore: CI lint fix for dead link (#2908) 2025-04-07 12:22:18 -07:00
Cijo Thomas bc82d4f66d
fix: Cleanup MetricError and use OTelSdkResult instead (#2905) 2025-04-06 11:29:55 -07:00
Mohammad Vatandoost e9ae9f90ef
feat: add shutdown with timeout for metric reader and provider (#2890)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-04-06 09:37:37 -07:00
Cijo Thomas 4791aae19f
test: Add ignored code in test to prove known issue (#2872) 2025-04-06 07:59:30 -07:00
Cijo Thomas 4b56ee354c
fix: Remove logging for cardinality overflow (#2904) 2025-04-06 07:38:55 -07:00
Cijo Thomas 2564a71808
feat: Add and enabled Metric cardinality capping by default (#2901) 2025-04-04 16:04:45 -07:00
Björn Antonsson 86e842ca5e
chore: fix clippy lint errors for rust 1.86.0 (#2896) 2025-04-03 09:14:04 -07:00
dependabot[bot] 24b92cb7c7
chore(deps): bump fossas/fossa-action from 1.5.0 to 1.6.0 (#2892)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-04-01 12:50:07 -07:00
dependabot[bot] 93a151e720
chore(deps): bump github/codeql-action from 3.28.12 to 3.28.13 (#2891)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-04-01 12:49:36 -07:00
Scott Gerring 7bdd2f4160
fix: re-export WithContext in the same place (#2879)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-04-01 08:26:38 -07:00
Mohammad Vatandoost af3a33e1b3
feat: Add shutdown with timeout for metric exporter (#2854)
Co-authored-by: Braden Steffaniak <BradenSteffaniak+github@gmail.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
Co-authored-by: Cijo Thomas <cithomas@microsoft.com>
2025-04-01 08:02:17 -07:00
Scott Gerring 37d794788e
chore: Move 'main' benchmark to shared workers temporarily (#2889) 2025-04-01 07:49:30 -07:00
OpenTelemetry Bot 36633015ab
ci: Add ossf-scorecard scanning workflow (#2887)
Co-authored-by: otelbot <197425009+otelbot@users.noreply.github.com>
2025-04-01 07:43:12 -07:00
Björn Antonsson 867e2a172c
perf: Run all benchmarks in one action (#2885)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-03-31 08:03:31 -07:00
Anton Grübel d5e409ce1f
refactor: re-export tracing for internal-logs (#2867)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-03-31 07:47:41 -07:00
Cijo Thomas 99cb67d19c
perf: Nit fix to benchmarks (#2884) 2025-03-28 15:33:23 -07:00
Cijo Thomas 303803e304
chore: Add Anton Grübel as approver (#2863) 2025-03-28 14:55:40 -07:00
Cijo Thomas 62e43c5489
feat: Leverage Suppression Context in Sdk (#2868)
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2025-03-28 10:51:40 -07:00
Björn Antonsson 50f0bb82f8
ci: run clippy on features separately to find issues (#2866)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-03-28 09:38:51 -07:00
Björn Antonsson da2029ea17
perf: Run all benchmarks for shorter time (#2870) 2025-03-28 09:23:06 -07:00
Anton Grübel b2de6cc5a3
chore: update tonic to 0.13 (#2876) 2025-03-27 14:47:02 -07:00
Anton Grübel a071d8fc39
ci: update deny GHA and its config (#2875) 2025-03-27 12:03:54 -07:00
Cijo Thomas 297146701d
feat: Add Suppression flag to context (#2821)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-03-26 16:14:41 -07:00
Mindaugas Vinkelis f3e93a09ea
refactor: AggregatedMetrics as enum instead of dyn Aggregation (#2857)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-03-26 10:39:57 -07:00
Cijo Thomas f12833f383
docs: Modify example to use logs, baggage (#2855)
Co-authored-by: Zhongyang Wu <zhongyang.wu@outlook.com>
2025-03-26 07:55:06 -07:00
Scott Gerring fb3699bc05
chore: Update error handling ADR - mention non_exhaustive (#2865) 2025-03-26 06:41:38 -07:00
Anton Grübel a711ae91c7
ci: add cargo machete and remove unused dependencies (#2864) 2025-03-25 15:01:24 -07:00
Cijo Thomas 5bfa70ef23
chore: Add company affiliation to maintainers and approvers (#2859) 2025-03-25 10:26:49 -07:00
houseme e9b27a4df6
chore: update from tracing-opentelemetry 0.29.0 to 0.30.0 (#2856)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-03-24 09:56:56 -07:00
Cijo Thomas f1c7ce9225
chore: fix few nit build warnings (#2848) 2025-03-24 08:11:49 -07:00
Cijo Thomas fa170f3258
fix: LogEnabled benchmarks to use blackbox (#2853) 2025-03-23 17:09:58 -07:00
tison 369b952baf
chore: Add link to sdk's CHANGELOG.md (#2850) 2025-03-22 10:28:12 -07:00
Braden Steffaniak e994d5237e
chore: Upgrade opentelemetry-prometheus to 0.29 (#2851) 2025-03-22 10:15:28 -07:00
Anton Grübel c5d5a1cc69
perf: small perf improvements in OTel API (#2842)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-03-21 20:20:52 -07:00
Cijo Thomas d32d34c8e0
chore: fix and release appender-tracing (#2847) 2025-03-21 17:47:50 -07:00
Cijo Thomas ad0552a6a9
chore: Prepare 0.29 release (#2844) 2025-03-21 17:10:01 -07:00
Anton Grübel 14d8f749ac
ci: replace actions-rs/cargo GHA (#2846) 2025-03-21 15:18:06 -07:00
Lalit Kumar Bhasin 5881f99528
chore: bump semconv 1.31.0 (#2845) 2025-03-21 15:03:28 -07:00
Cijo Thomas 34835a89e5
chore: Prepare docs before 0.29 (#2837)
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2025-03-21 12:10:29 -07:00
Cijo Thomas 1ecd1a8718
fix: KeyValueMetadata to not expose its fields public (#2843) 2025-03-21 12:00:35 -07:00
Cijo Thomas 3e77a22ca3
fix: Error cleanups continued (#2838) 2025-03-21 11:42:30 -07:00
Scott Gerring 36c48db5fd
chore: context stabilization tests & logging pieces (#2833)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-03-20 16:04:05 -07:00
Cijo Thomas b03296cc3a
chore: Add warning on Prometheus crate (#2831) 2025-03-20 07:26:48 -07:00
Cijo Thomas 969bedf1a7
fix: Cleanup zipkin build error (#2839) 2025-03-20 07:09:01 -07:00
Anton Grübel 702c61dcee
test: validate code-based config is preferred over env vars in ZipkinExporterBuilder (#2836)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-03-19 18:54:03 -07:00
Cijo Thomas dc580ff54b
fix: Report error using OTel convention (#2808)
Co-authored-by: Anton Grübel <anton.gruebel@gmail.com>
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2025-03-19 17:24:41 -07:00
Anton Grübel bece03ba59
fix: validate Baggage key by W3C standards (#2804)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-03-19 15:46:11 -07:00
Scott Gerring 31b494bccb
ci: fix filter for benchmark pushes on branch (#2834) 2025-03-19 08:25:15 -07:00
Scott Gerring 75485dcd07
ci: Use bare-metal OTEL runners for benchmark CI for pushes to main (#2832) 2025-03-19 07:19:53 -07:00
Anton Grübel 5be79c7477
refactor: prioritize code-based config over env vars in OTLP exporter (#2827)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
Co-authored-by: Cijo Thomas <cithomas@microsoft.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-03-18 21:02:29 -07:00
Anton Grübel 06abe3dbf9
ci: use GHCR otel-collector image (#2830) 2025-03-18 16:03:41 -07:00
Cijo Thomas d2859dc8c0
docs: Add docs to tracing appender (#2811) 2025-03-18 13:02:53 -07:00
Cijo Thomas 01898be24a
perf: LogEnabled inline more (#2828) 2025-03-18 10:32:43 -07:00
Cijo Thomas e43f3dfa4a
perf: Inline enabled check in Logger (#2823) 2025-03-18 10:08:56 -07:00
Cijo Thomas 40effae08a
fix: Remove async std (#2820)
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2025-03-18 07:47:43 -07:00
Cijo Thomas 9569b0f816
docs: Add few more notes about Logs design doc (#2816) 2025-03-17 16:18:00 -07:00
Cijo Thomas 2070e6cf90
chore: Modify OTLP doc to show jaeger usage instead of a full example (#2805) 2025-03-17 14:05:03 -07:00
Julian Tescher 0ff8c34b06
chore: Move jtescher from Maintainer to Emeritus (#2815) 2025-03-17 13:02:41 -07:00
Cijo Thomas a80dc0c686
test: Improve OTLP Builder test (#2817) 2025-03-17 12:54:00 -07:00
Cijo Thomas 233d11a5fd
docs: OTLP Examples to shutdown all signals (#2806) 2025-03-17 09:49:56 -07:00
Scott Gerring 1bce83fcda
chore: Move FutureExt into context (#2776)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-03-17 07:45:22 -07:00
Anton Grübel e48e6f4e16
chore: remove deprecated functions/methods in trace::Config (#2810) 2025-03-16 06:19:40 -07:00
Anton Grübel e5f8a48d3b
test: validate code based config is preferrred over env vars in trace BatchConfig (#2809) 2025-03-15 18:46:03 -07:00
Cijo Thomas a442d623e5
fix: Byte arrays represented as Anyvalue::Byte instead of string (#2807) 2025-03-14 19:05:34 -07:00
Cijo Thomas 6e58810080
docs: Include context, baggage etc to main readme (#2785)
Co-authored-by: Zhongyang Wu <zhongyang.wu@outlook.com>
2025-03-14 08:28:46 -07:00
Cijo Thomas 8872bf810d
feat: OTLP Exporter builders to return specific Error type (#2790) 2025-03-14 08:22:23 -07:00
Cijo Thomas ddac8e1ef3
fix: LogProcessor.SetResource to require mutable self (#2801) 2025-03-13 18:06:18 -07:00
Cijo Thomas 07b3b4056d
perf: Small perf optimization for logs (#2799) 2025-03-13 10:55:21 -07:00
Cijo Thomas 6b221e496b
fix: Avoid stringifying int values unless necessary (#2795) 2025-03-13 10:11:49 -07:00
Anton Grübel 66579ac47e
fix: Change logger scope in log appender (#2796) 2025-03-13 07:48:50 -07:00
Marcus Griep ad886152d5
docs: fix unit in metrics semantic conventions example (#2794)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-03-12 09:40:06 -07:00
Cijo Thomas 5bcaf30581
chore: Update BUG-REPORT.yml (#2791) 2025-03-12 08:41:34 -07:00
Cijo Thomas 8aa5b00493
feat: Add experimental concurrent processor for logs (#2780) 2025-03-11 16:01:38 -07:00
Cijo Thomas 52cd0e9ef1
test: Add test to confirm programmatic config wins over env in BatchConfig (#2781) 2025-03-11 11:04:04 -07:00
paullegranddc 51dda2f0a1
test: fix otlp log integration tests failing with some set of features (#2784) 2025-03-11 10:51:44 -07:00
paullegranddc dac8bd5cde
test: fix span_processor test failing with default features (#2782) 2025-03-11 07:42:33 -07:00
Cijo Thomas 68c9133867
fix: Remove mut ref requirement for shutdown LogExporter (#2764)
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2025-03-10 14:58:15 -07:00
Cijo Thomas 9d3a5079f6
test: Add test showing Baggage and Context interation with Tracing (#2732)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-03-06 05:57:25 -08:00
Scott Gerring 69c4adee46
docs: Add ADR dir and error handling ADR (#2664)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
Co-authored-by: Zhongyang Wu <zhongyang.wu@outlook.com>
Co-authored-by: Cijo Thomas <cithomas@microsoft.com>
2025-03-06 05:42:05 -08:00
Pixels b33f0cc56c
fix: typo in metrics.rs (#2761)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-03-05 12:19:11 -08:00
Cijo Thomas 36cbe8dc1f
test: Modify perf tests to pass event name correctly to avoid string allocation (#2762) 2025-03-05 11:09:11 -08:00
Cijo Thomas f013b3d474
test: Stress test for logs modified to show throughput when enabled()… (#2760) 2025-03-05 10:31:32 -08:00
Anton Grübel a317856787
refactor: refactor Baggage with Context interaction (#2748)
Co-authored-by: Zhongyang Wu <zhongyang.wu@outlook.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-03-05 06:40:55 -08:00
Björn Antonsson baf4bfd61e
fix: Allow overlapping context scopes (#2378)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-03-04 17:03:45 -08:00
Cijo Thomas ff33638d1e
feat: Allow event name to be provided to IsEnabled check in Logger (#2756) 2025-03-04 15:39:58 -08:00
Cijo Thomas c5a4b7f436
doc: Update CONTRIBUTING.md with meeting notes (#2755) 2025-03-04 10:04:48 -08:00
Cijo Thomas 88cae2cf7d
test: Add test to show how to add baggage to logrecords via processor (#2738) 2025-03-03 21:40:49 -08:00
Cijo Thomas f15a3379f8
fix: Fix Logger scope in Tracing Appender (#2735) 2025-03-03 19:11:33 -08:00
Anton Grübel 5a77fb2b50
chore: enable Rust 2024 edition lints (#2728)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-03-03 13:46:22 -08:00
Cijo Thomas 1ddecb04d9
perf: Nit additions to Baggage benchmarks (#2740) 2025-03-03 11:33:41 -08:00
Anton Grübel 2493fecc40
fix: align baggage.remove() implementation (#2734) 2025-03-03 11:31:25 -08:00
Cijo Thomas 46a7cd623c
perf: Add benchmarks to show the cost of attributes in Log (#2741) 2025-03-03 11:13:09 -08:00
Anton Grübel 92303b6f72
test: add benchmark tests for baggage (#2737) 2025-03-02 11:00:51 -08:00
Shunsuke Tokunaga 13fafcb0ab
test: Add Scope attributes test cases for metrics (#2736)
Signed-off-by: Shunpoco <tkngsnsk313320@gmail.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-03-02 09:41:42 -08:00
Mindaugas Vinkelis 1583e70c2f
perf: Baggage insert & BaggageExt::with_baggage & updated constants to latest standard (#2284) 2025-03-02 08:02:34 -08:00
Anton Grübel 5e47487427
fix: change value type of Baggage to StringValue (#2729)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-03-01 12:03:32 -08:00
dependabot[bot] 382bad420b
Bump ytanikin/pr-conventional-commits from 1.4.0 to 1.4.1 (#2733)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-03-01 12:01:46 -08:00
Anton Grübel a423ad2aa9
ci: fix rate limiting of setup-protoc in CI (#2731) 2025-02-28 12:47:02 -08:00
Anton Grübel d581ae170d
test: fix and enable doctests (#2730) 2025-02-28 12:46:30 -08:00
Cijo Thomas 91ae096206
test : Add test to confirm Context expected behavior (#2724) 2025-02-28 07:04:54 -08:00
martintmk fb74565453
feat: Refine runtime trait (#2641) 2025-02-27 10:09:43 -08:00
Cijo Thomas 367e484686
ci: Run perf tests on demand (#2722) 2025-02-26 14:00:26 -08:00
Cijo Thomas edb0d5857f
docs: Modify examples to show best practices - reuse tracer (#2709) 2025-02-26 12:08:01 -08:00
Cijo Thomas 11ed8e0af7
docs: Fix tracing grpc example (#2710) 2025-02-26 11:05:55 -08:00
Cijo Thomas 08a1f52e18
test: Try fix a flaky test (#2719) 2025-02-26 10:05:28 -08:00
Scott Gerring 4830a3cf3b
ci: comment out intermittent failing assertion (#2714) 2025-02-26 08:01:43 -08:00
Björn Antonsson f9ccdfff1c
chore: Fix small nits on benchmarks and remove throughput (#2713) 2025-02-26 07:18:48 -08:00
Scott Gerring 261ac75ab5
ci: Add criterion performance regressions to PR workflows (#2706) 2025-02-25 23:37:37 -08:00
Björn Antonsson 2bf05f6161
perf: More Context benchmarks (#2707) 2025-02-25 23:03:55 -08:00
Cijo Thomas 7954252154
test: Nit improvement to Logger test logic (#2708) 2025-02-25 15:32:44 -08:00
Mohammad Vatandoost 29eda05337
Modify batch processor test use flush instead of sleeptime (#2675)
Co-authored-by: Braden Steffaniak <BradenSteffaniak+github@gmail.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-02-25 14:43:58 -08:00
Dimitris Apostolou 81fea0747c
Avoid duplicate crates (#2703)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-02-25 08:07:55 -08:00
Scott Gerring bfd9f3139b
ci: add pr naming workflow (#2698) 2025-02-25 07:15:18 -08:00
Scott Gerring 59af7e2277
chore: Prevent libtest being added to bench builds (#2705) 2025-02-25 07:13:26 -08:00
Josh W Lewis 64549d741f
Fix `.with_headers` to support multiple k/v pairs (#2699)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-02-24 15:52:59 -08:00
Spencer Judge edab145d6a
Reduce various info logs to debug (#2700) 2025-02-24 11:28:38 -08:00
Cijo Thomas bc5e6cea61
InstrumentationScope to include attributes in hash and eq check (#2701) 2025-02-21 12:06:10 -08:00
Cijo Thomas 27d364b105
Fix: BatchLogProcessor to invoke shutdown on exporter (#2696) 2025-02-21 11:55:36 -08:00
Cijo Thomas cb81eb6873
Add Scott Gerring as an Approver (#2679)
Co-authored-by: Zhongyang Wu <zhongyang.wu@outlook.com>
2025-02-21 07:07:10 -08:00
Cijo Thomas ee76205a8a
Add few more notes to design docs for Logs (#2694) 2025-02-20 17:45:41 -08:00
Lalit Kumar Bhasin a70affd3e3
Remove `mut` self reference from SpanExporter::export() method. (#2695) 2025-02-20 17:39:03 -08:00
Cijo Thomas c072629648
Use Duration for const for delays (#2688) 2025-02-20 16:43:31 -08:00
Cijo Thomas 19dba6964e
Appender-Tracing - extend spl treatment of message when recording str (#2689)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-02-20 16:10:15 -08:00
Cijo Thomas 6648d74cb2
Nit fix to changelogs (#2693) 2025-02-20 16:02:05 -08:00
Anton Grübel 5ce50ae4ed
simplify async trait impl (#2692) 2025-02-20 15:42:43 -08:00
Anton Grübel 37efb88e55
move TraceError, TraceResult and ExportError to opentelemetry-sdk (#2680)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-02-20 11:11:02 -08:00
martintmk f01e8f4682
Calls to `with_resource` for signal builders (Metrics, Logs, Traces) are now additive (#2677)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
Co-authored-by: Cijo Thomas <cithomas@microsoft.com>
2025-02-20 10:25:43 -08:00
Lalit Kumar Bhasin dc9a5c87d6
Span Exporter async native (#2685) 2025-02-20 00:16:48 -08:00
Anton Grübel ac69af6a0a
update rand dependency (#2681)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-02-19 16:35:06 -08:00
Lalit Kumar Bhasin f4690f18b3
chore: CI fix - msrv patching for native-tls. (#2687) 2025-02-19 13:42:34 -08:00
Luc van Kampen 64868d239e
Update typo (#2686) 2025-02-19 11:32:58 -08:00
houseme 1f0e361dff
Fix typos (#2684) 2025-02-18 21:35:36 -08:00
Shunsuke Tokunaga b5c9fdc409
Fix 2638: Use absolute URL for LICENSE/CHANGELOG in README (#2682)
Signed-off-by: Shunpoco <tkngsnsk313320@gmail.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-02-18 19:00:29 -08:00
OpenTelemetry Bot 58c7644ac8
Add FOSSA scanning workflow (#2676)
Co-authored-by: otelbot <197425009+otelbot@users.noreply.github.com>
2025-02-18 14:58:21 -08:00
Anton Grübel 1b1ba899cf
use workspace lint configuration (#2674) 2025-02-17 18:18:13 -08:00
Cijo Thomas eca1ce8708
Temp fix flaky test for BatchSpanprocessors (#2671) 2025-02-15 10:26:53 -08:00
Anton Grübel a9a5cbf7a0
leverage native async trait in MetricsClient (#2672) 2025-02-15 09:50:47 -08:00
Cijo Thomas 06ca4a12f2
Nit cleanup of method signature for async export (#2669) 2025-02-14 19:31:27 -08:00
Anton Grübel bc931b151f
make force_flush() in PushMetricExporter synchronous (#2663)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-02-14 16:37:29 -08:00
Cijo Thomas dbd44a3486
Fix OTel Appender for tracing from suppressing other layers (#2668) 2025-02-14 15:26:08 -08:00
Mohammad Vatandoost 41b381ae80
Chang timeout value to miliseconds (#2656)
Co-authored-by: Braden Steffaniak <BradenSteffaniak+github@gmail.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-02-14 12:58:35 -08:00
Cijo Thomas 2997c4bb89
Show using tracing layer filters in examples (#2667) 2025-02-14 09:44:33 -08:00
tison 32a43caada
build: drop unused tonic server deps (#2666)
Signed-off-by: tison <wander4096@gmail.com>
2025-02-14 06:59:42 -08:00
Drew Relmas cd9bbcfa0d
Split SimpleLogProcessor and BatchLogProcessor out of single file (#2661) 2025-02-13 18:00:24 -08:00
Anton Grübel 00ebc98e63
leverage native async trait in PushMetricExporter (#2662) 2025-02-13 17:45:14 -08:00
Cijo Thomas ac66848ae8
Add design docs (#2657)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-02-13 10:56:06 -08:00
Cijo Thomas 1aca212a72
Simplifiy OTLP Example Resource creation (#2660) 2025-02-13 10:13:11 -08:00
martintmk e68fe94500
Enable per-thread instances for histogram stress test (#2659) 2025-02-13 08:46:54 -08:00
Drew Relmas 0592075930
Update appender-tracing pprof usage with Windows exclusion (#2655) 2025-02-12 14:11:23 -08:00
Lalit Kumar Bhasin f88d930be7
chore: Prepare for opentelemetry-appender-tracing v0.28.1 (#2653) 2025-02-12 10:05:20 -08:00
Lalit Kumar Bhasin f657b5522d
chore: fix doc tests (#2652) 2025-02-12 09:33:28 -08:00
Cijo Thomas 6ea208652a
Fix link in migration guide (#2647) 2025-02-11 14:36:43 -08:00
Braden Steffaniak b3582021b0
Upgrade opentelemetry-prometheus to 0.28 (#2650) 2025-02-11 12:01:16 -08:00
Cijo Thomas f3a375cb48
Update CI for ARM to use public image (#2646) 2025-02-11 10:48:08 -08:00
Drew Relmas 2d7e7d6936
Add missing Windows exclusion for pprof crate in sdk/benches (#2648) 2025-02-11 10:35:46 -08:00
tison 5236670f8d
chore: fix CHANGELOG.md (#2642) 2025-02-11 07:03:05 -08:00
Cijo Thomas de197e4011
Fix zipkin deadlink (#2640) 2025-02-10 12:26:29 -08:00
Lalit Kumar Bhasin 189078d7a9
Bump otel-appender-tracing version to 0.28 (#2637) 2025-02-10 10:18:49 -08:00
Lalit Kumar Bhasin 062c9d6589
Prepare v0.28.0 (#2635) 2025-02-10 09:52:55 -08:00
Cijo Thomas b6783a1098
Changelog refactoring (#2632) 2025-02-07 15:25:55 -08:00
Cijo Thomas c56a03ebbc
Separate logger and loggerprovider mod (#2633) 2025-02-07 11:53:55 -08:00
Tom Tan 34742d02dc
Set pwd for precommit.sh script to support running it in any directory (#2535)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-02-07 11:32:25 -08:00
Cijo Thomas eeb367d933
Nit fixes to doc and tests (#2626) 2025-02-06 22:05:43 -08:00
Cijo Thomas c2a2b8dd9b
Fix stress test for Logs to use OTelSdkResult (#2627) 2025-02-06 20:43:02 -08:00
Cijo Thomas 3a8ceddd5e
Rearrange trace integration tests (#2628) 2025-02-06 20:16:06 -08:00
Cijo Thomas 1fc86da1c6
0.28 migration guide and OTLP Example fixes (#2622) 2025-02-06 19:11:38 -08:00
Anton Grübel 61e539f491
leverage tracer provider to set service name for Zipkin exporter (#2624)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-02-06 17:12:11 -08:00
Lalit Kumar Bhasin 420416f2bf
Logs SDK - use OTelSdkError, OTelSdkResult (#2625) 2025-02-06 16:33:35 -08:00
Cijo Thomas 41464d1c1a
Readme updates in prep for release (#2623) 2025-02-06 12:48:05 -08:00
Cijo Thomas 69e7e89b47
Remove ExportResult and TraceError as no longer required/used (#2620) 2025-02-05 14:35:43 -08:00
Mindaugas Vinkelis d79950da59
ObservableGauge collect data points since previous collection (#2618)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-02-05 13:53:44 -08:00
Cijo Thomas ad3830383f
Add conditional compilation to avoid build warnings (#2619) 2025-02-05 13:01:24 -08:00
Cijo Thomas 1fbaaf2ac3
Rename LogRecord to SdkLogRecord (#2617) 2025-02-05 12:02:23 -08:00
Lalit Kumar Bhasin 5f7f2d5b7f
Remove export timeout configuration for PeriodicReader (#2598) 2025-02-05 11:52:47 -08:00
Cijo Thomas 15b5fa4e20
Move traceid spanid traceflags to common (#2609) 2025-02-05 11:07:56 -08:00
Cijo Thomas dfeeac7a7d
Rename TracerProvider to SdkTracerProvider (#2614)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-02-05 11:01:29 -08:00
Lalit Kumar Bhasin d01c1137cf
Trace subsystem - use OTelSdkResult/OTelSdkError (#2613) 2025-02-05 10:20:34 -08:00
Adrian Garcia Badaracco 0e751b4a2b
json: ser/de bytes as base64 strings not an array of bytes (#2471)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-02-05 09:09:34 -08:00
anujnegi270 775f1f9a3e
Disabling the Instrument Name Validation under a new feature flag (#2543)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-02-05 08:08:18 -08:00
Cijo Thomas fdc1151251
Rename LoggerProvider, Logger to SdkLoggerProvider, SdkLogger (#2612) 2025-02-05 07:37:45 -08:00
Lalit Kumar Bhasin 5bebbcc6d7
Refine error messages for OTelSdkError variants (#2608)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-02-04 16:05:57 -08:00
Mohammad Vatandoost d481be5a4f
Use opentelemetry:time::now instead of systemtime (final part) (#2607)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-02-04 15:57:48 -08:00
Cijo Thomas 4a76c96344
Use OTelSdkResult for metric flush (#2606) 2025-02-04 15:49:47 -08:00
Cijo Thomas 4783b64467
Assert explicitly that shutdown succeeded in Integration tests (#2605) 2025-02-04 13:33:33 -08:00
Cijo Thomas 4cdc64fc77
Metric export to use common OTelSdkResult (#2604) 2025-02-04 13:08:09 -08:00
Lalit Kumar Bhasin f5b44a54ea
Shutdown in separate thread for Logs integration tests in current_thread mode. (#2601) 2025-02-04 12:19:54 -08:00
Cijo Thomas 6bdb4c69b6
Make a common Error enum for export shutdown and flush operations (#2603) 2025-02-04 12:00:57 -08:00
Cijo Thomas b017c7b9c4
Fix PeriodicReader panic due to timeout (#2586)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-02-03 15:42:25 -08:00
Cijo Thomas 5e6b3d622e
Add easier way to add periodicreader to meterprovider (#2597) 2025-02-03 13:50:23 -08:00
Cijo Thomas 013d51af6e
Doc additions to warn about tokio current (#2594)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-02-03 13:35:53 -08:00
Lalit Kumar Bhasin 7ca4a85a24
Remove export timeout configuration for BatchSpanProcessor (#2596)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-02-03 13:01:46 -08:00
Mohammad Vatandoost 6c88d31aa2
Use opentelemetry:time::now instead of systemtime (#2595) 2025-02-03 12:35:25 -08:00
Lalit Kumar Bhasin a81ad249ec
Enable reqwest and reqwest-blocking client creation with custom timeout (#2584)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-02-03 11:17:01 -08:00
Lalit Kumar Bhasin 7c9447ff6c
Remove export timeout configuration for BatchLogProcessor. (#2587)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-02-03 10:22:41 -08:00
Cijo Thomas dde68a073c
Nit fix to log message on LogProcessor (#2590)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-02-03 09:47:39 -08:00
Cijo Thomas aece6419eb
Few fixes to PeriodicReader tests (#2589) 2025-02-03 09:38:02 -08:00
Cijo Thomas 3fc8485067
Add more metric integration tests (#2582) 2025-02-03 06:54:29 -08:00
Shunsuke Tokunaga 8c5daa2557
Version up sample code/Cargo.toml on README.md (#2585)
Signed-off-by: Shunpoco <tkngsnsk313320@gmail.com>
2025-02-02 09:41:23 -08:00
Cijo Thomas a1860eb7a8
Better handling of shutdown in BatchLogProcessor (#2581) 2025-01-31 11:36:25 -08:00
Cijo Thomas 9dfcff1f55
Use dedicated ShutdownResult for Metric SDK shutdown (#2573) 2025-01-31 10:20:22 -08:00
Cijo Thomas b50da912b4
Remove unwanted Arc insider PeriodicReader (#2579) 2025-01-31 09:59:38 -08:00
Cijo Thomas 732b938e59
Removed unnecessary public methods from OTLP Exporter (#2575) 2025-01-30 10:01:31 -08:00
Yury Yarashevich 85206105f8
Use bytes::Bytes as the HTTP request body in HttpClient. (#2485)
Co-authored-by: Zhongyang Wu <zhongyang.wu@outlook.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-01-29 08:41:22 -08:00
Anton Grübel 17cce839c3
feat: Replace Zipkin pipeline with exporter builders (#2565)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-01-28 21:57:48 -08:00
Cijo Thomas 144fdd961c
Print histogram bucket info in stdout exporter (#2569) 2025-01-28 12:09:20 -08:00
Lalit Kumar Bhasin 45f4d7c24a
chore: bump semantic convention 1.30.0 (#2572) 2025-01-28 11:40:17 -08:00
Cijo Thomas 3ec4c186ad
Nit doc update for OTLP Exporter (#2567) 2025-01-27 12:59:29 -08:00
David Pitoniak 6fa9ae2f09
doc(resource): update docs to be accurate with current behavior (#2568) 2025-01-27 09:23:51 -08:00
Dylan Thacker-Smith 18834f53af
appender-tracing: Include trace ID and span ID in logs when nested in tracing crate's spans (#2438)
Co-authored-by: Zhongyang Wu <zhongyang.wu@outlook.com>
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
Co-authored-by: Cijo Thomas <cithomas@microsoft.com>
2025-01-25 12:00:15 -08:00
Cijo Thomas b3879b6781
Minor fixes, stdout to print scope info for logs (#2563) 2025-01-24 18:46:10 -08:00
Cijo Thomas 879f4d7964
Rename provider builders (#2562) 2025-01-24 16:40:50 -08:00
Cijo Thomas bd0ee846e5
Rename log emitter to provider (#2559) 2025-01-24 11:30:05 -08:00
Cijo Thomas a1ac8065c5
Use opentelemetry:time::now instead of systemtime in LogEmitter (#2558) 2025-01-24 10:58:00 -08:00
Cijo Thomas b2d3d74962
Remove unsupported feature from BatchSpanProcessor (#2556) 2025-01-24 09:46:31 -08:00
Cijo Thomas 14faca3fbe
Resource.get modified to only need a reference (#2552) 2025-01-24 09:27:30 -08:00
Cijo Thomas 4f472a8c67
Run integration tests on all PRs (#2554) 2025-01-24 08:35:49 -08:00
Cijo Thomas de2b25ee89
Update pprof version (#2553) 2025-01-24 08:20:14 -08:00
Lalit Kumar Bhasin 8a4e5ca419
Restrict LogRecord creation access outside opentelemetry_sdk crate. (#2549) 2025-01-24 07:08:49 -08:00
Cijo Thomas 6a8db83871
Move Metric inmemory exporter (#2548) 2025-01-23 21:39:47 -08:00
Cijo Thomas 6a13678421
Move inmemory exporter for spans to diff namespace (#2544) 2025-01-23 18:44:14 -08:00
Cijo Thomas 97cf951396
Cleanup unused Error struct - PropagationError (#2547) 2025-01-23 18:33:01 -08:00
Lalit Kumar Bhasin 4c77c89999
More integration tests added (#2545)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-01-23 18:04:37 -08:00
Cijo Thomas 95fa209669
Remove unused Error enum (#2546) 2025-01-23 17:47:24 -08:00
Cijo Thomas fdf4a63a5b
Move inmemory exporter for logs to diff namespace (#2538) 2025-01-23 11:44:00 -08:00
Cijo Thomas 5935ccac93
Move span exporter module (#2540) 2025-01-23 11:34:10 -08:00
Utkarsh Umesan Pillai a0e50615cd
Update documentation for Metrics API (#2280)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
Co-authored-by: Zhongyang Wu <zhongyang.wu@outlook.com>
2025-01-23 11:24:42 -08:00
Lalit Kumar Bhasin 927a08c902
Enable hyper client tests for SimpleLogProcessor integration tests (#2541) 2025-01-22 18:47:38 -08:00
Cijo Thomas 38be4d98f4
Nit leftovers from previous PR (#2537) 2025-01-22 17:37:00 -08:00
Cijo Thomas 3042aa0cac
Simplify metric::data imports (#2536) 2025-01-22 17:23:10 -08:00
Cijo Thomas 57d129734c
Refactor LogExporter mod (#2534) 2025-01-22 16:50:18 -08:00
Tom Tan b1debf0797
Short circuit the event_enabled check (#2533) 2025-01-22 15:10:02 -08:00
Cijo Thomas acaa98d814
Doc additions for Simple and Batch processors (#2529)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2025-01-22 10:15:42 -08:00
Cijo Thomas 90b0dd46c8
Remove cardinality capping in Metrics (#2528) 2025-01-21 17:38:46 -08:00
Lalit Kumar Bhasin d2a6b3b2fc
BatchSpanProcessor optimizations - Separate control signal queue, and wake up background thread only when required. (#2526)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-01-21 15:00:40 -08:00
Lalit Kumar Bhasin 78db32c58d
Add the full log validation test in Integration test (#2525) 2025-01-21 14:33:20 -08:00
Cijo Thomas 68af3bb038
Add reqwest client to integration test (#2523) 2025-01-16 19:23:59 -08:00
Cijo Thomas dace0ca7fa
Add simple processor to integration test (#2522) 2025-01-16 17:18:34 -08:00
Cijo Thomas 27410b0031
Integration test for non tokio main (#2520) 2025-01-16 16:07:25 -08:00
Lalit Kumar Bhasin c51c4b2c71
Add runtime traits under experimental flag. (#2519) 2025-01-15 17:18:08 -08:00
William Perron a719d0d1a6
Allow values containing '=' in OTEL_RESOURCE_ATTRIBUTES (#2120)
Values passed in to OTEL_RESOURCE_ATTRIBUTES containing an equal sign "=" are currently ignored by the Resource constructor, but should be accepted as it is part of the W3C Baggage octet
range.
2025-01-16 00:22:53 +01:00
Cijo Thomas b53c19e2e6
Few fixes to OTLP Exporter Examples (#2518) 2025-01-15 12:41:41 -08:00
Cijo Thomas 1904d4b8eb
Remove redundant shutdown check in batchlogprocessor (#2514) 2025-01-15 10:39:34 -08:00
Cijo Thomas 9aea82698a
OTLPExporter - change to http as default feature flag (#2516) 2025-01-15 09:52:30 -08:00
Cijo Thomas f46bccceab
PeriodicReader shutdown modified to enforce timeout (#2513) 2025-01-14 20:21:53 -08:00
Utkarsh Umesan Pillai 5aa9120f1e
Fix BatchLogProcessor (#2510) 2025-01-14 13:53:35 -08:00
Utkarsh Umesan Pillai 888d5a34ad
Refactor BatchLogProcessor (#2494)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2025-01-14 03:31:39 -08:00
Utkarsh Umesan Pillai 73859c3bd7
Add internal-logs feature to opentelemetry-proto (#2507) 2025-01-13 18:01:25 -08:00
Utkarsh Umesan Pillai 6cc327d555
Update metrics benchmarks (#2502) 2025-01-09 11:42:56 -08:00
Utkarsh Umesan Pillai e05979d6d7
Update the link to Meeting Notes doc (#2501) 2025-01-07 22:57:07 -08:00
Joe Burnard a1dda220de
OTLP documentation: Fix typo where webkpi is used instead of webpki (#2498) 2025-01-07 07:58:36 -08:00
Mindaugas Vinkelis 82ed8e05f4
Sepatate Aggregate measure and collect functions (#2492) 2025-01-04 16:40:33 -08:00
Lalit Kumar Bhasin 37d2e51c23
chore: disable failing integration tests (#2495)
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2025-01-04 02:02:28 +05:30
Utkarsh Umesan Pillai 42b4f2fbd8
Minor refactoring- Rename the variants of LogBatchData enum (#2490) 2025-01-01 06:14:37 -08:00
Takahiro Ebato 17855be169
Retain empty tracer name as is instead of using default name (#2486)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-12-31 07:53:33 +00:00
Utkarsh Umesan Pillai 56e7fae000
Avoid vec allocation during each export for BatchLogProcessor - Part 2 (#2488) 2024-12-31 07:46:06 +00:00
Utkarsh Umesan Pillai 9a8ad95ee8
Avoid vec allocation during each export for BatchLogProcessor (#2483)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-12-27 17:56:14 -08:00
Utkarsh Umesan Pillai f3b5fd3af2
Minor refactoring (#2484) 2024-12-27 17:48:11 -08:00
Cijo Thomas d52fec7939
Minor cleanup in Stdout exporter (#2481) 2024-12-27 12:21:03 -08:00
Cijo Thomas 8fde6eb3b2
Remove unnecessary featureflag for eventname (#2480) 2024-12-27 10:45:39 -08:00
Cijo Thomas e378bc8776
Fix PeriodicReader shutdown to invoke shutdown on exporter (#2477) 2024-12-26 14:55:07 -08:00
Cijo Thomas 36f9caf45e
Fix changelog and few spell check issues (#2474)
Co-authored-by: Zhongyang Wu <zhongyang.wu@outlook.com>
2024-12-26 11:56:36 -08:00
Lalit Kumar Bhasin 5b86b7fd2c
Add doc-comment for SimpleLogProcessor and BatchLogProcessor (#2468) 2024-12-24 09:33:19 -08:00
Cijo Thomas 6e1032f9af
Nit fix to LogProcessor tests (#2467) 2024-12-24 05:23:32 -08:00
Cijo Thomas 8d5f2226df
Nit improvements to batchprocessors (#2466) 2024-12-23 17:17:37 -08:00
Cijo Thomas ef49833f4d
Minor followups to LogProcessor (#2464) 2024-12-23 12:46:53 -08:00
Lalit Kumar Bhasin 1f354674d6
BatchSpanProcessor with dedicated thread. (#2456) 2024-12-23 11:15:48 -08:00
Cijo Thomas 6209c06db5
Move BatchLogProcessor with runtime to separate module (#2463) 2024-12-20 22:14:39 -08:00
Lalit Kumar Bhasin 23f6ae2992
Use native Rust support for async traits in LogExporter::export() method (11% improvement) (#2374)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
Co-authored-by: Zhongyang Wu <zhongyang.wu@outlook.com>
2024-12-20 08:03:49 -08:00
Cijo Thomas 80629c8161
Minor update to stdout exporter example to show log correlation (#2459) 2024-12-19 20:50:27 -08:00
Cijo Thomas 3694ff9fe7
Modify OTLP Http example to use reqwest blocking (#2460) 2024-12-19 20:27:57 -08:00
Cijo Thomas f911ed5d7b
Cleanup leftover feature flag (#2458) 2024-12-19 10:38:38 -08:00
Lalit Kumar Bhasin c617be7449
chore: bring back BatchLogProcessorWithAsyncRuntime unit tests (#2457) 2024-12-19 09:27:42 -08:00
Scott Gerring 0fc0764ffd
chore: Test sync exporters (#2455) 2024-12-19 07:47:23 -08:00
Cijo Thomas acf16ed451
Tweaks to logs integration test (#2453) 2024-12-18 18:33:15 -08:00
Cijo Thomas 0a7ad690fc
Add internal logging to HTTP libraries (#2452) 2024-12-18 18:21:06 -08:00
Tom Tan 938893c526
Handle batch log processing in a dedicated background thread (#2436) 2024-12-18 16:12:02 -08:00
Mindaugas Vinkelis 06053411bd
Directly implement Measure trait for metric aggregates (#2371)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-12-18 10:52:38 -08:00
Cijo Thomas fbcba3b003
Enable metric integration test for req-blocking (#2445) 2024-12-18 06:28:32 -08:00
Cijo Thomas 9011f63ff6
Nit spelling fixes (#2443) 2024-12-17 18:00:31 -08:00
Mindaugas Vinkelis b9a422b39e
Directly implement ComputeAggregation (#2425)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
Co-authored-by: Cijo Thomas <cithomas@microsoft.com>
2024-12-17 17:36:29 -08:00
Lalit Kumar Bhasin 551760b220
chore: publish otel-proto v1.5.0 (#2439)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-12-17 08:43:25 -08:00
Cijo Thomas 6a3b04dc0b
Add observable instruments to periodicreader tests (#2428) 2024-12-17 08:15:33 -08:00
Lalit Kumar Bhasin 4e5255401f
chore: cleanup unused dependencies (#2440)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-12-17 07:26:31 -08:00
Scott Gerring 9173ddf93f
chore: refactor integration tests and add metrics coverage (#2432)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-12-17 07:08:31 -08:00
David Pitoniak eb8d7c69ce
otlp: spawn thread to create blocking reqwest client (#2431)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2024-12-17 06:59:02 -08:00
Cijo Thomas 9cf7a40b21
Add a minor log to help debug when Meter is obtained from noops (#2435) 2024-12-16 17:16:43 -08:00
Cijo Thomas 967dc93055
Move shutdown checks to MeterProvider (#2433) 2024-12-16 07:51:09 -08:00
Lalit Kumar Bhasin c726c4d97a
Version pinning for tracing and tracing-core (#2418)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-12-15 22:28:53 +05:30
Lalit Kumar Bhasin fbc3c70c27
Make LogRecord fields private and add getters for encapsulation (#2314) 2024-12-15 20:28:55 +05:30
David Pitoniak b8380ebfa4
resource: remove vec allocations for attributes (#2429) 2024-12-13 17:15:21 -08:00
Mindaugas Vinkelis d2e179eb5d
Aggregate time initiator (#2423)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-12-13 12:33:59 -08:00
David Pitoniak 9aeae0f03f
Feat/add resource builder (#2322)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2024-12-13 12:16:03 -08:00
Cijo Thomas 15d69b1840
Improve shutdown handling in PeriodicReader (#2422) 2024-12-13 07:56:19 -08:00
Cijo Thomas 9b0ccce04e
Small improvements to PeriodicReader (#2421)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2024-12-13 05:44:26 -08:00
Lalit Kumar Bhasin ce550e36bb
bump MSRV to 1.75.0 for all crates (#2417) 2024-12-12 21:12:41 -08:00
Mindaugas Vinkelis dcaff0d7c1
Update CHANGELOGs for time changes in metrics aggregations (#2412)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
Co-authored-by: Cijo Thomas <cithomas@microsoft.com>
2024-12-12 09:00:54 -08:00
Björn Antonsson d0ef36510f
Include SpanContext information in Context Debug (#2365)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-12-11 20:03:40 -08:00
Cijo Thomas 1da35a99a2
Enable Feature in otlp http example (#2416) 2024-12-11 17:40:58 -08:00
Lalit Kumar Bhasin e8a28788b9
fix: missing thiserror crate when building with --no-default-features (#2413)
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-12-11 09:32:34 -08:00
Utkarsh Umesan Pillai f9581f545f
Add internal logging for OTLP Exporter tonic client (#2415) 2024-12-11 09:05:26 -08:00
Mindaugas Vinkelis e4cba9457e
Move time from DataPoint to Histogram/ExpoHistogram (#2411) 2024-12-11 07:27:43 -08:00
Cijo Thomas 1a4e931fe5
Minor fixes to PeriodicReader and examples (#2409) 2024-12-10 20:13:46 -08:00
Cijo Thomas 238a8f2a74
Make PeriodicReader with own Thread the default (#2403) 2024-12-10 16:08:00 -08:00
Mindaugas Vinkelis d67d1fc558
Move time from DataPoint to Sum/Gauge (#2377) 2024-12-10 13:14:58 -08:00
David Pitoniak f513768305
test: use rstest for table testing sdk resource (#2407)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-12-10 12:16:48 -08:00
Cijo Thomas 540cdb3613
Remove self diagnostics example (#2404) 2024-12-10 11:53:13 -08:00
Dirkjan Ochtman 257377315d
Upgrade to thiserror 2 (#2406)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-12-10 07:53:08 -08:00
Dirkjan Ochtman e8ccb01676
sdk: set correct span limit in builder (#2405) 2024-12-10 07:36:29 -08:00
Mindaugas Vinkelis 902baa9161
Gauge start-time is optional (#2389)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-12-09 15:20:31 -08:00
Lalit Kumar Bhasin de3ea4ee97
Remov deprecated LogData structure (#2398) 2024-12-09 18:33:31 +00:00
Cijo Thomas a16fac6ca6
OTLP Example fixes (#2394) 2024-12-09 10:19:05 -08:00
Cijo Thomas 2030f8f77f
Modify logs basic example to use tracing (#2397)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2024-12-09 09:38:48 -08:00
Lalit Kumar Bhasin 3df15a1b59
chore: Update semantic conventions v1.29.0, weaver v0.11.0 (#2395) 2024-12-08 11:02:31 -08:00
Lalit Kumar Bhasin b417e848dd
Remove Deprecated Logger Method (#2396) 2024-12-08 06:45:43 -08:00
Karol Fuksiewicz 01561465cb
feat: update otel versions for prometheus to 0.27 (#2309)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
Co-authored-by: ttys3 <ttys3.rust@gmail.com>
2024-12-05 16:12:33 -08:00
Lalit Kumar Bhasin 96b7acc73d
Remove `mut` self reference from LogExporter::export() method. (#2380) 2024-12-05 13:36:08 -08:00
Lalit Kumar Bhasin 957659fac9
chore: ARM64 CI fix. (#2382) 2024-12-04 09:48:31 -08:00
David Pitoniak 6d1a765c41
chore: remove the global::shutdown_tracer_provider function (#2369) 2024-12-04 06:03:46 -08:00
David Pitoniak b35c0d6b99
Feat/remove timeout from detectors (#2332) 2024-12-03 21:09:16 -08:00
Utkarsh Umesan Pillai 506a4f9fb2
[Metrics] Remove unnecessary box (#2376)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-12-02 20:01:35 -08:00
Utkarsh Umesan Pillai c73c938c4a
Fix CI (#2375) 2024-12-02 19:27:37 -08:00
Lalit Kumar Bhasin c225c82784
Add OTLP integration test for hyper and request client for logs and traces (#2312)
Co-authored-by: Zhongyang Wu <zhongyang.wu@outlook.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-12-02 15:47:57 -08:00
Mindaugas Vinkelis 5b6e9b952f
Time is not optional in DataPoints (#2367)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-12-02 12:15:56 -08:00
Cijo Thomas 6b71301b99
Use static dispatch in SimpleLogProcessor (#2338)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2024-12-02 10:01:08 -08:00
Lalit Kumar Bhasin a3c469b1a6
Fix CI lint error from new stable toolchain v1.83 (#2370) 2024-12-02 08:20:55 -08:00
Cijo Thomas e0159ad91f
Bump version of sdk release 0.27.1 (#2362) 2024-11-27 12:05:00 -08:00
Cijo Thomas b7276d824e
Minor Internal logs (#2361) 2024-11-27 11:45:03 -08:00
Scott Gerring cbe9ebe91e
Track dropped spans and logs due to full buffer (#2357)
Co-authored-by: Cijo Thomas <cithomas@microsoft.com>
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-11-27 10:24:06 -08:00
Cijo Thomas 195dea8070
Few doc updates for opentelemetry (#2353) 2024-11-27 10:04:27 -08:00
Cijo Thomas ab332b06c9
Let CI check for stable run even if beta run fails (#2360) 2024-11-27 09:56:47 -08:00
Cijo Thomas 53e892a2ca
Nit fix by avoiding bound validation unless View feature is enabled (#2355) 2024-11-26 17:55:06 -08:00
Cijo Thomas 0e221c1d43
Bugfix - add validation for custom buckets provided for Histograms (#2351)
Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com>
2024-11-26 17:30:29 -08:00
Lalit Kumar Bhasin fa6e6cd289
Replace once_cell::Lazy with std::sync::OnceLock for global Initialization in otel-sdk crate (#2342) 2024-11-26 17:09:04 -08:00
Utkarsh Umesan Pillai 8e6b479684
Avoid additional HashMap allocation for Cumulative aggregation (#2352) 2024-11-26 16:00:51 -08:00
Cijo Thomas 1cecaeabf4
Add few internal logs for Metrics sdks (#2350) 2024-11-26 14:05:26 -08:00
Lalit Kumar Bhasin 91370d23cf
Deprecate logger::provider() and logger::instrumentation_scope() (#2349)
Co-authored-by: Cijo Thomas <cithomas@microsoft.com>
2024-11-26 13:04:18 -08:00
Cijo Thomas c9388e4753
Preallocate and keep memory for HashMap in Metric aggregation (#2343) 2024-11-26 10:25:12 -08:00
Cijo Thomas 3a4d12af9a
Minor fix to main readme (#2348) 2024-11-26 10:15:49 -08:00
Björn Antonsson 9a85ce1c0c
Remove non-default feature from doc test (#2346) 2024-11-26 07:22:15 -08:00
Lalit Kumar Bhasin 1541a330e2
attempt to fix stress test CI by pinning libc (#2344) 2024-11-25 19:20:32 -08:00
Lalit Kumar Bhasin 1119549b0d
Remove async-trait crate as dependency for traces (#2339) 2024-11-25 12:24:15 -08:00
Cijo Thomas e6c351f2d7
More consistent readme for msrv (#2336)
Co-authored-by: Lalit Kumar Bhasin <lalit_fin@yahoo.com>
2024-11-25 10:40:27 -08:00
David Pitoniak 129ca03128
feat: add concurrency options to ci workflow (#2337) 2024-11-24 17:38:29 -08:00
Cijo Thomas dd982e3501
InternalLog fixes for GlobalMeterProvider (#2333) 2024-11-24 12:35:48 -08:00
Lalit Kumar Bhasin a6e2cd6e91
Remove default logger name (#2316)
Co-authored-by: Cijo Thomas <cithomas@microsoft.com>
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-11-24 11:39:18 -08:00
Cijo Thomas 882e1c2d25
Empty MeterName retained as is instead of using default name (#2334) 2024-11-24 10:33:03 -08:00
Cijo Thomas 5de2caf5de
Emit internal log for global meter provider (#2331) 2024-11-23 18:37:01 -08:00
Cijo Thomas 8c9babbda8
Minor additions to internal logs (#2330) 2024-11-23 12:52:32 -08:00
Lalit Kumar Bhasin ebeeea1724
Replace `once_cell::Lazy` with `std::sync::OnceLock` for global Initialization in OpenTelemetry API crate (#2326)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-11-23 12:22:53 -08:00
Lalit Kumar Bhasin 3d352d870a
[Logs SDK] Deprecate LogData struct (#2325)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-11-22 18:42:22 -08:00
Lalit Kumar Bhasin 2ba2e7fffe
[CI fix] msrv patch for rustls (#2327) 2024-11-22 17:05:54 -08:00
Lalit Kumar Bhasin 6ca7881f7c
CI fix: patch msrv for rustls-native-certs (#2318) 2024-11-22 13:43:41 -08:00
Cijo Thomas 465fcc2eda
AttributeSet cleanup, better perf for overflows (#2313) 2024-11-20 19:41:40 -08:00
Lalit Kumar Bhasin af9d925736
chore: update proto definition to v1.4.0 (#2315) 2024-11-20 17:33:37 -08:00
Cijo Thomas ca6c67e752
Remove building examples twice (#2311) 2024-11-20 11:27:48 -08:00
Mindaugas Vinkelis 3b652aca29
Use ValueMap in ExpoHistogram (#2282)
Co-authored-by: Cijo Thomas <cijo.thomas@gmail.com>
2024-11-20 11:13:24 -08:00
Cijo Thomas cd54f24224
Update benchmark results for metrics (#2307) 2024-11-20 10:36:09 -08:00
Cijo Thomas a4f602b4cb
Doc CI to use stable rust (#2310) 2024-11-20 10:25:09 -08:00
David Pitoniak 849778d524
Refactor/deprecate trace config (#2303) 2024-11-18 18:11:46 -08:00
Utkarsh Umesan Pillai 41afd7f916
Make stress tests independent of batch size (#2306) 2024-11-18 10:53:52 -08:00
Raphael Deem 3ac2d9fb64
cleanup imports in examples (#2300) 2024-11-14 07:49:32 -08:00
Lalit Kumar Bhasin 45ed2e061a
Publish otel-appender-log : add version to semconv (#2299) 2024-11-11 17:43:34 -08:00
330 changed files with 32426 additions and 12425 deletions

3
.cargo/config.toml Normal file
View File

@ -0,0 +1,3 @@
[resolver]
# https://doc.rust-lang.org/cargo/reference/config.html#resolverincompatible-rust-versions
incompatible-rust-versions = "fallback"

View File

@ -12,7 +12,8 @@
"ignoreWords": [
"otel",
"rustdoc",
"rustfilt"
"rustfilt",
"webkpi"
],
// these are words that are always considered incorrect.
"flagWords": [
@ -26,41 +27,89 @@
// workspace dictionary.
"words": [
"actix",
"Antonsson",
"anyvalue",
"appender",
"appenders",
"autobenches",
"Bhasin",
"Björn",
"BLRP",
"chrono",
"Cijo",
"clippy",
"clonable",
"codecov",
"dashmap",
"datapoint",
"deque",
"Dirkjan",
"docsrs",
"Dwarnings",
"eprintln",
"EPYC",
"flamegraph",
"Gerring",
"grpcio",
"Grübel",
"hasher",
"impls",
"isahc",
"Isobel",
"jaegertracing",
"Kühle",
"Kumar",
"Lalit",
"LIBCLANG",
"logrecord",
"MILLIS",
"mpsc",
"msrv",
"mykey",
"myunit",
"myvalue",
"nocapture",
"Ochtman",
"opentelemetry",
"openzipkin",
"otcorrelations",
"OTELCOL",
"OTLP",
"periodicreader",
"Pillai",
"pprof",
"protos",
"prost",
"protoc",
"quantile",
"quantiles",
"Redelmeier",
"reqwest",
"rstest",
"runtimes",
"rustc",
"rustls",
"schemars",
"semconv",
"serde",
"shoppingcart",
"struct",
"Tescher",
"testcontainers",
"testresults",
"thiserror",
"traceparent",
"Traceparent",
"tracerprovider",
"tracestate",
"UCUM",
"Umesan",
"unsampled",
"updown",
"urlencoding",
"usize",
"Utkarsh",
"webpki",
"Zhongyang",
"zipkin"
],

View File

@ -28,7 +28,7 @@ body:
- type: textarea
id: sdk-version
attributes:
label: label: OpenTelemetry SDK Version (i.e version of `opentelemetry_sdk` crate)
label: OpenTelemetry SDK Version (i.e version of `opentelemetry_sdk` crate)
description: What version of the `opentelemetry_sdk` crate are you using?
placeholder: 0.x, 1.x, etc.
validations:

View File

@ -1,18 +0,0 @@
# Log of local changes
Maintainers are expected to maintain this log. This is required as per
[OpenTelemetry Community
guidelines](https://github.com/open-telemetry/community/blob/main/docs/how-to-configure-new-repository.md#collaborators-and-teams).
## May 6th 2024
Modified branch protection for main branch to require the following CI checks as
we now added Windows to CI.
test (ubuntu-latest, stable)
test (stable, windows-latest)
## April 30th 2024
Modified branch protection for main branch to require the following CI checks:
docs
test (stable)

59
.github/workflows/benchmark.yml vendored Normal file
View File

@ -0,0 +1,59 @@
# This workflow runs a Criterion benchmark on a PR and compares the results against the base branch.
# It is triggered on a PR or a push to main.
#
# The workflow is gated on the presence of the "performance" label on the PR.
#
# The workflow runs on a self-hosted runner pool. We can't use the shared runners for this,
# because they are only permitted to run on the default branch to preserve resources.
#
# In the future, we might like to consider using bencher.dev or the framework used by otel-golang here.
on:
pull_request:
push:
branches:
- main
name: benchmark pull requests
permissions:
contents: read
jobs:
runBenchmark:
name: run benchmark
permissions:
pull-requests: write
# If we're running on main, use our oracle bare-metal runner for accuracy.
# If we're running on a PR, use github's shared workers to save resources.
runs-on: ${{ github.event_name == 'pull_request' && 'ubuntu-latest' || 'oracle-bare-metal-64cpu-512gb-x86-64' }}
if: ${{ (github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'performance')) || github.event_name == 'push' }}
container:
image: rust:slim-bullseye
env:
# For PRs, compare against the base branch - e.g., 'main'.
# For pushes to main, compare against the previous commit
BRANCH_NAME: ${{ github.event_name == 'pull_request' && github.base_ref || github.event.before }}
GIT_DISCOVERY_ACROSS_FILESYSTEM: 1
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with:
egress-policy: audit
- name: Setup container environment
run: |
apt-get update && apt-get install --fix-missing -y unzip cmake build-essential pkg-config curl git
cargo install cargo-criterion
- name: Make repo safe for Git inside container
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 10 # Fetch a bit of history so we can do perf diffs
- uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b # v3.0.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: boa-dev/criterion-compare-action@adfd3a94634fe2041ce5613eb7df09d247555b87 # v3.2.4
with:
branchName: ${{ env.BRANCH_NAME }}

View File

@ -1,6 +1,8 @@
name: CI
env:
CI: true
permissions:
contents: read
on:
pull_request:
push:
@ -8,6 +10,9 @@ on:
- main
paths-ignore:
- '**.md'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
test:
strategy:
@ -22,9 +27,15 @@ jobs:
- rust: stable
os: macos-latest
- rust: stable
os: actuated-arm64-4cpu-16gb
os: ubuntu-22.04-arm
runs-on: ${{ matrix.os }}
continue-on-error: ${{ matrix.rust == 'beta' }}
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with:
egress-policy: audit
- name: Free disk space
if: ${{ matrix.os == 'ubuntu-latest'}}
run: |
@ -32,16 +43,16 @@ jobs:
sudo rm -rf /usr/local/lib/android
sudo rm -rf /usr/share/dotnet
df -h
- uses: actions/checkout@v4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: true
- uses: dtolnay/rust-toolchain@master
- uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b
with:
toolchain: ${{ matrix.rust }}
components: rustfmt
- name: "Set rustup profile"
run: rustup set profile minimal
- uses: arduino/setup-protoc@v3
- uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b # v3.0.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Test
@ -49,93 +60,124 @@ jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with:
egress-policy: audit
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: true
- uses: dtolnay/rust-toolchain@stable
- uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b
with:
components: rustfmt
- uses: arduino/setup-protoc@v3
toolchain: stable
components: rustfmt, clippy
- uses: taiki-e/install-action@0eee80d37f55e834144deec670972c19e81a85b0 # v2.56.0
with:
tool: cargo-hack
- uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b # v3.0.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
- name: Format
run: cargo fmt --all -- --check
- name: Lint
run: bash ./scripts/lint.sh
external-types:
strategy:
matrix:
example: [opentelemetry, opentelemetry-sdk, opentelemetry-otlp, opentelemetry-zipkin]
member: [opentelemetry, opentelemetry-sdk, opentelemetry-otlp, opentelemetry-zipkin]
runs-on: ubuntu-latest # TODO: Check if this could be covered for Windows. The step used currently fails on Windows.
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@nightly
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with:
toolchain: nightly-2024-06-30
egress-policy: audit
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b
with:
# Rust version should be kept in sync with the one the release was tested with
# https://github.com/awslabs/cargo-check-external-types/releases
toolchain: nightly-2025-05-04
components: rustfmt
- uses: taiki-e/install-action@0eee80d37f55e834144deec670972c19e81a85b0 # v2.56.0
with:
tool: cargo-check-external-types@0.2.0
- name: external-type-check
run: |
cargo install cargo-check-external-types@0.1.13
cd ${{ matrix.example }}
cargo check-external-types --config allowed-external-types.toml
non-default-examples:
strategy:
matrix:
os: [ windows-latest, ubuntu-latest ]
example: [opentelemetry-otlp/examples/basic-otlp]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
- uses: dtolnay/rust-toolchain@stable
with:
components: rustfmt
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Build
run: |
cd ${{ matrix.example }}
cargo build --verbose
working-directory: ${{ matrix.member }}
run: cargo check-external-types --all-features --config allowed-external-types.toml
msrv:
strategy:
matrix:
os: [windows-latest, ubuntu-latest]
rust: [1.70.0, 1.71.1]
runs-on: ${{ matrix.os }}
continue-on-error: true
steps:
- uses: actions/checkout@v4
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with:
egress-policy: audit
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: true
- name: Set up Rust ${{ matrix.rust }}
uses: dtolnay/rust-toolchain@master
- uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b
with:
toolchain: ${{ matrix.rust }}
- name: Patch dependencies versions
run: bash ./scripts/patch_dependencies.sh
toolchain: stable
- uses: taiki-e/install-action@0eee80d37f55e834144deec670972c19e81a85b0 # v2.56.0
with:
tool: cargo-msrv
- uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b # v3.0.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Check MSRV for all crates
run: bash ./scripts/msrv.sh ${{ matrix.rust }}
run: bash ./scripts/msrv.sh
cargo-deny:
runs-on: ubuntu-latest # This uses the step `EmbarkStudios/cargo-deny-action@v1` which is only supported on Linux
continue-on-error: true # Prevent sudden announcement of a new advisory from failing ci
steps:
- uses: actions/checkout@v4
- uses: EmbarkStudios/cargo-deny-action@v1
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with:
egress-policy: audit
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Check advisories
uses: EmbarkStudios/cargo-deny-action@30f817c6f72275c6d54dc744fbca09ebc958599f # v2.0.12
with:
command: check advisories
- name: Check licenses
uses: EmbarkStudios/cargo-deny-action@30f817c6f72275c6d54dc744fbca09ebc958599f # v2.0.12
with:
command: check licenses
- name: Check bans
uses: EmbarkStudios/cargo-deny-action@30f817c6f72275c6d54dc744fbca09ebc958599f # v2.0.12
with:
command: check bans
- name: Check sources
uses: EmbarkStudios/cargo-deny-action@30f817c6f72275c6d54dc744fbca09ebc958599f # v2.0.12
with:
command: check sources
docs:
continue-on-error: true
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@nightly
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with:
egress-policy: audit
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b
with:
toolchain: stable
components: rustfmt
- uses: arduino/setup-protoc@v3
- uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b # v3.0.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: doc
@ -148,26 +190,72 @@ jobs:
runs-on: ubuntu-latest
if: ${{ ! contains(github.event.pull_request.labels.*.name, 'dependencies') }}
steps:
- uses: actions/checkout@v4
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with:
egress-policy: audit
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: true
- uses: dtolnay/rust-toolchain@stable
- uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b
with:
toolchain: stable
components: rustfmt,llvm-tools-preview
- uses: arduino/setup-protoc@v3
- uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b # v3.0.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: cargo install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@0eee80d37f55e834144deec670972c19e81a85b0 # v2.56.0
with:
tool: cargo-llvm-cov
- name: cargo generate-lockfile
if: hashFiles('Cargo.lock') == ''
run: cargo generate-lockfile
- name: cargo llvm-cov
run: cargo llvm-cov --locked --all-features --workspace --lcov --output-path lcov.info
run: cargo llvm-cov --locked --all-features --workspace --lcov --lib --output-path lcov.info
- name: Upload to codecov.io
uses: codecov/codecov-action@v4
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
fail_ci_if_error: true
build-examples:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # stable
with:
toolchain: stable
components: rustfmt
- uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b # v3.0.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Build examples
run: |
for example in examples/*; do
if [ -d "$example" ]; then
echo "Building $example"
cargo build
fi
done
cargo-machete:
continue-on-error: true
runs-on: ubuntu-latest
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with:
egress-policy: audit
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: true
- uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b
with:
toolchain: stable
- uses: taiki-e/install-action@0eee80d37f55e834144deec670972c19e81a85b0 # v2.56.0
with:
tool: cargo-machete
- name: cargo machete
run: cargo machete

45
.github/workflows/codeql-analysis.yml vendored Normal file
View File

@ -0,0 +1,45 @@
name: "CodeQL Analysis"
env:
CODEQL_ENABLE_EXPERIMENTAL_FEATURES : true # CodeQL support for Rust is experimental
permissions:
contents: read
on:
pull_request:
push:
branches: [main]
workflow_dispatch:
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
security-events: write # for github/codeql-action/autobuild to send a status report
strategy:
fail-fast: false
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with:
egress-policy: audit
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: true
- name: Initialize CodeQL
uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
with:
languages: rust
- name: Autobuild
uses: github/codeql-action/autobuild@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2

25
.github/workflows/fossa.yml vendored Normal file
View File

@ -0,0 +1,25 @@
name: FOSSA scanning
on:
push:
branches:
- main
permissions:
contents: read
jobs:
fossa:
runs-on: ubuntu-latest
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with:
egress-policy: audit
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: fossas/fossa-action@3ebcea1862c6ffbd5cf1b4d0bd6b3fe7bd6f2cac # v1.7.0
with:
api-key: ${{secrets.FOSSA_API_KEY}}
team: OpenTelemetry

View File

@ -5,24 +5,34 @@ on:
pull_request:
types: [ labeled, synchronize, opened, reopened ]
permissions:
contents: read
jobs:
integration_tests:
runs-on: ubuntu-latest
timeout-minutes: 10
if: ${{ github.event.label.name == 'integration tests' || contains(github.event.pull_request.labels.*.name, 'integration tests') }}
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with:
egress-policy: audit
- name: Free disk space
run: |
df -h
sudo rm -rf /usr/local/lib/android
sudo rm -rf /usr/share/dotnet
df -h
- uses: actions/checkout@v4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: true
- uses: dtolnay/rust-toolchain@stable
- uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b
with:
toolchain: stable
components: rustfmt
- uses: arduino/setup-protoc@v3
- name: Run integration tests using docker compose
- uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b # v3.0.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Run integration tests
run: ./scripts/integration_tests.sh

View File

@ -8,14 +8,22 @@ on:
paths:
- '**/*.md'
permissions:
contents: read
jobs:
markdown-link-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with:
egress-policy: audit
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install markdown-link-check
run: npm install -g markdown-link-check@3.11.2
run: npm install -g "git://github.com/tcort/markdown-link-check.git#ef7e09486e579ba7479700b386e7ca90f34cbd0a" # v3.13.7
- name: Run markdown-link-check
run: |

53
.github/workflows/ossf-scorecard.yml vendored Normal file
View File

@ -0,0 +1,53 @@
name: OSSF Scorecard
on:
push:
branches:
- main
schedule:
- cron: "50 3 * * 0" # once a week
workflow_dispatch:
permissions:
contents: read
jobs:
analysis:
runs-on: ubuntu-latest
permissions:
# Needed for Code scanning upload
security-events: write
# Needed for GitHub OIDC token if publish_results is true
id-token: write
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with:
egress-policy: audit
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2
with:
results_file: results.sarif
results_format: sarif
publish_results: true
# Upload the results as artifacts (optional). Commenting out will disable
# uploads of run results in SARIF format to the repository Actions tab.
# https://docs.github.com/en/actions/advanced-guides/storing-workflow-data-as-artifacts
- name: "Upload artifact"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: SARIF file
path: results.sarif
retention-days: 5
# Upload the results to GitHub's code scanning dashboard (optional).
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
with:
sarif_file: results.sarif

23
.github/workflows/pr_naming.yml vendored Normal file
View File

@ -0,0 +1,23 @@
name: PR Conventional Commit Validation
on:
pull_request:
types: [opened, synchronize, reopened, edited]
permissions:
contents: read
jobs:
validate-pr-title:
runs-on: ubuntu-latest
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with:
egress-policy: audit
- name: PR Conventional Commit Validation
uses: ytanikin/pr-conventional-commits@8267db1bacc237419f9ed0228bb9d94e94271a1d # 1.4.1
with:
task_types: '["build","chore","ci","docs","feat","fix","perf","refactor","revert","test"]'
add_label: 'false'

View File

@ -1,22 +1,29 @@
name: Semver compliance
env:
CI: true
permissions:
contents: read
on:
pull_request:
types: [ labeled, synchronize, opened, reopened ]
jobs:
semver-compliance: # This job uses the latest published crate as baseline for comparison.
runs-on: ubuntu-latest
timeout-minutes: 10
if: ${{ github.event.label.name == 'semver-check' || contains(github.event.pull_request.labels.*.name, 'semver-check') }}
steps:
- uses: actions/checkout@v4
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
with:
egress-policy: audit
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: true
- name: Install stable
uses: dtolnay/rust-toolchain@stable
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b
with:
toolchain: stable
components: rustfmt
- name: cargo-semver-checks
uses: obi1kenobi/cargo-semver-checks-action@v2.6
uses: obi1kenobi/cargo-semver-checks-action@5b298c9520f7096a4683c0bd981a7ac5a7e249ae # v2.8

4
.gitignore vendored
View File

@ -5,4 +5,6 @@
Cargo.lock
/.idea/
.cosine
.cosine
opentelemetry-otlp/tests/integration_test/result.json

2
.gitmodules vendored
View File

@ -1,4 +1,4 @@
[submodule "opentelemetry-proto/src/proto/opentelemetry-proto"]
path = opentelemetry-proto/src/proto/opentelemetry-proto
url = https://github.com/open-telemetry/opentelemetry-proto
branch = tags/v1.0.0
branch = tags/v1.5.0

View File

@ -8,7 +8,7 @@ for specific dates and for Zoom meeting links. "OTel Rust SIG" is the name of
meeting for this group.
Meeting notes are available as a public [Google
doc](https://docs.google.com/document/d/1tGKuCsSnyT2McDncVJrMgg74_z8V06riWZa0Sr79I_4/edit).
doc](https://docs.google.com/document/d/12upOzNk8c3SFTjsL6IRohCWMgzLKoknSCOOdMakbWo4/edit).
If you have trouble accessing the doc, please get in touch on
[Slack](https://cloud-native.slack.com/archives/C03GDP0H023).
@ -78,9 +78,13 @@ Open a pull request against the main
[opentelemetry-rust](https://github.com/open-telemetry/opentelemetry-rust)
repo.
Your pull request should be named according to the
[conventional commits](https://www.conventionalcommits.org/en/v1.0.0/) standard. This ensures that
when the PR is squashed into `main`, the resulting commit message is consistent and makes it easier
for us to generate a changelog standard.
> **Note**
> It is recommended to run [pre-commit script](scripts/precommit.sh) from the root of
the repo to catch any issues locally.
> It is recommended to run [pre-commit script](scripts/precommit.sh) to catch any issues locally.
### How to Receive Comments
@ -168,7 +172,7 @@ It's important to regularly review and remove the `otel_unstable` flag from the
The potential features include:
- Stable and non-experimental features that compliant to specification, and have a feature flag to minimize compilation size. Example: feature flags for signals (like `logs`, `traces`, `metrics`) and runtimes (`rt-tokio`, `rt-tokio-current-thread`, `rt-async-std`).
- Stable and non-experimental features that are compliant with the specification and have a feature flag to minimize compilation size. Example: feature flags for signals (like `logs`, `traces`, `metrics`) and runtimes (`rt-tokio`, `rt-tokio-current-thread`).
- Stable and non-experimental features, although not part of the specification, are crucial for enhancing the tracing/log crate's functionality or boosting performance. These features are also subject to discussion and approval by the OpenTelemetry Rust Maintainers.
All such features should adhere to naming convention `<signal>_<feature_name>`

View File

@ -8,6 +8,9 @@ members = [
"stress",
]
resolver = "2"
# Avoid applying patch to force use of workspace members for this
# not actively maintained crate
exclude = ["opentelemetry-prometheus"]
[profile.bench]
# https://doc.rust-lang.org/cargo/reference/profiles.html#bench
@ -16,7 +19,6 @@ resolver = "2"
debug = 1
[workspace.dependencies]
async-std = "1.13"
async-trait = "0.1"
bytes = "1"
criterion = "0.5"
@ -29,22 +31,64 @@ hyper = { version = "1.3", default-features = false }
hyper-util = "0.1"
log = "0.4.21"
once_cell = "1.13"
ordered-float = "4.0"
pin-project-lite = "0.2"
prost = "0.13"
prost-build = "0.13"
prost-types = "0.13"
rand = { version = "0.8", default-features = false }
rand = { version = "0.9", default-features = false }
reqwest = { version = "0.12", default-features = false }
serde = { version = "1.0", default-features = false }
serde_json = "1.0"
temp-env = "0.3.6"
thiserror = { version = "1", default-features = false }
tonic = { version = "0.12.3", default-features = false }
tonic-build = "0.12"
thiserror = { version = "2", default-features = false }
tonic = { version = "0.13", default-features = false }
tonic-build = "0.13"
tokio = { version = "1", default-features = false }
tokio-stream = "0.1"
tracing = { version = "0.1", default-features = false }
tracing-core = { version = "0.1", default-features = false }
# Using `tracing 0.1.40` because 0.1.39 (which is yanked) introduces the ability to set event names in macros,
# required for OpenTelemetry's internal logging macros.
tracing = { version = ">=0.1.40", default-features = false }
# `tracing-core >=0.1.33` is required for compatibility with `tracing >=0.1.40`.
tracing-core = { version = ">=0.1.33", default-features = false }
tracing-subscriber = { version = "0.3", default-features = false }
url = { version = "2.5.2", default-features = false } #https://github.com/servo/rust-url/issues/992
url = { version = "2.5", default-features = false }
anyhow = "1.0.94"
base64 = "0.22.1"
chrono = { version = "0.4.34", default-features = false }
ctor = "0.2.9"
ctrlc = "3.2.5"
futures-channel = "0.3"
futures-sink = "0.3"
const-hex = "1.14.1"
lazy_static = "1.4.0"
num-format = "0.4.4"
num_cpus = "1.15.0"
opentelemetry-appender-tracing = { path = "opentelemetry-appender-tracing", default-features = false }
opentelemetry-otlp = { path = "opentelemetry-otlp" }
opentelemetry-stdout = { path = "opentelemetry-stdout" }
percent-encoding = "2.0"
rstest = "0.23.0"
schemars = "0.8"
sysinfo = "0.32"
tempfile = "3.3.0"
testcontainers = "0.23.1"
tracing-log = "0.2"
tracing-opentelemetry = "0.31"
typed-builder = "0.20"
uuid = "1.3"
# Aviod use of crates.io version of these crates through the tracing-opentelemetry dependencies
[patch.crates-io]
opentelemetry = { path = "opentelemetry" }
opentelemetry_sdk = { path = "opentelemetry-sdk" }
opentelemetry-stdout = { path = "opentelemetry-stdout" }
[workspace.lints.rust]
rust_2024_compatibility = { level = "warn", priority = -1 }
# No need to enable those, because it either not needed or results in ugly syntax
edition_2024_expr_fragment_specifier = "allow"
if_let_rescope = "allow"
tail_expr_drop_order = "allow"
[workspace.lints.clippy]
all = { level = "warn", priority = 1 }

View File

@ -3,10 +3,11 @@
The Rust [OpenTelemetry](https://opentelemetry.io/) implementation.
[![Crates.io: opentelemetry](https://img.shields.io/crates/v/opentelemetry.svg)](https://crates.io/crates/opentelemetry)
[![Documentation](https://docs.rs/opentelemetry/badge.svg)](https://docs.rs/opentelemetry)
[![LICENSE](https://img.shields.io/crates/l/opentelemetry)](./LICENSE)
[![GitHub Actions CI](https://github.com/open-telemetry/opentelemetry-rust/workflows/CI/badge.svg)](https://github.com/open-telemetry/opentelemetry-rust/actions?query=workflow%3ACI+branch%3Amain)
[![Documentation](https://docs.rs/opentelemetry/badge.svg)](https://docs.rs/opentelemetry)
[![codecov](https://codecov.io/gh/open-telemetry/opentelemetry-rust/branch/main/graph/badge.svg)](https://codecov.io/gh/open-telemetry/opentelemetry-rust)
[![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/open-telemetry/opentelemetry-rust/badge)](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-rust)
[![Slack](https://img.shields.io/badge/slack-@cncf/otel/rust-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C03GDP0H023)
## Overview
@ -17,23 +18,29 @@ analysis in order to understand your software's performance and behavior. You
can export and analyze them using [Prometheus], [Jaeger], and other
observability tools.
*Compiler support: [requires `rustc` 1.70+][msrv]*
*[Supported Rust Versions](#supported-rust-versions)*
[Prometheus]: https://prometheus.io
[Jaeger]: https://www.jaegertracing.io
[msrv]: #supported-rust-versions
## Project Status
The table below summarizes the overall status of each component. Some components
include unstable features, which are documented in their respective crate
documentation.
| Signal/Component | Overall Status |
| -------------------- | ------------------ |
| Logs-API | RC* |
| Logs-SDK | Beta |
| Logs-OTLP Exporter | Beta |
| Logs-Appender-Tracing | Beta |
| Metrics-API | RC |
| Metrics-SDK | Beta |
| Metrics-OTLP Exporter | Beta |
| Context | Beta |
| Baggage | RC |
| Propagators | Beta |
| Logs-API | Stable* |
| Logs-SDK | Stable |
| Logs-OTLP Exporter | RC |
| Logs-Appender-Tracing | Stable |
| Metrics-API | Stable |
| Metrics-SDK | Stable |
| Metrics-OTLP Exporter | RC |
| Traces-API | Beta |
| Traces-SDK | Beta |
| Traces-OTLP Exporter | Beta |
@ -52,49 +59,28 @@ If you already use the logging APIs from above, continue to use them, and use
the appenders above to bridge the logs to OpenTelemetry. If you are using a
library not listed here, feel free to contribute a new appender for the same.
If you are starting fresh, then consider using
If you are starting fresh, we recommend using
[tracing](https://github.com/tokio-rs/tracing) as your logging API. It supports
structured logging and is actively maintained.
structured logging and is actively maintained. `OpenTelemetry` itself uses
`tracing` for its internal logging.
Project versioning information and stability guarantees can be found
[here](VERSIONING.md).
## Getting Started
```rust
use opentelemetry::{
global,
trace::{Tracer, TracerProvider as _},
};
use opentelemetry_sdk::trace::TracerProvider;
If you are new to OpenTelemetry, start with the [Stdout
Example](./opentelemetry-stdout/examples/basic.rs). This example demonstrates
how to use OpenTelemetry for logs, metrics, and traces, and display
telemetry data on your console.
fn main() {
// Create a new trace pipeline that prints to stdout
let provider = TracerProvider::builder()
.with_simple_exporter(opentelemetry_stdout::SpanExporter::default())
.build();
let tracer = provider.tracer("readme_example");
For those using OTLP, the recommended OpenTelemetry Exporter for production
scenarios, refer to the [OTLP Example -
HTTP](./opentelemetry-otlp/examples/basic-otlp-http/README.md) and the [OTLP
Example - gRPC](./opentelemetry-otlp/examples/basic-otlp/README.md).
tracer.in_span("doing_work", |cx| {
// Traced app logic here...
});
// Shutdown trace pipeline
global::shutdown_tracer_provider();
}
```
The example above requires the following packages:
```toml
# Cargo.toml
[dependencies]
opentelemetry = "0.22"
opentelemetry_sdk = "0.22"
opentelemetry-stdout = { version = "0.3", features = ["trace"] }
```
See the [examples](./examples) directory for different integration patterns.
Additional examples for various integration patterns can be found in the
[examples](./examples) directory.
## Overview of crates
@ -159,7 +145,7 @@ Registry](https://opentelemetry.io/ecosystem/registry/?language=rust).
## Supported Rust Versions
OpenTelemetry is built against the latest stable release. The minimum supported
version is 1.70. The current OpenTelemetry version is not guaranteed to build
version is 1.75. The current OpenTelemetry version is not guaranteed to build
on Rust versions earlier than the minimum supported version.
The current stable Rust compiler and the three most recent minor versions
@ -181,7 +167,7 @@ for specific dates and for Zoom meeting links. "OTel Rust SIG" is the name of
meeting for this group.
Meeting notes are available as a public [Google
doc](https://docs.google.com/document/d/1tGKuCsSnyT2McDncVJrMgg74_z8V06riWZa0Sr79I_4/edit).
doc](https://docs.google.com/document/d/12upOzNk8c3SFTjsL6IRohCWMgzLKoknSCOOdMakbWo4/edit).
If you have trouble accessing the doc, please get in touch on
[Slack](https://cloud-native.slack.com/archives/C03GDP0H023).
@ -194,24 +180,33 @@ you're more than welcome to participate!
### Maintainers
* [Cijo Thomas](https://github.com/cijothomas)
* [Cijo Thomas](https://github.com/cijothomas), Microsoft
* [Harold Dost](https://github.com/hdost)
* [Julian Tescher](https://github.com/jtescher)
* [Lalit Kumar Bhasin](https://github.com/lalitb)
* [Utkarsh Umesan Pillai](https://github.com/utpilla)
* [Lalit Kumar Bhasin](https://github.com/lalitb), Microsoft
* [Utkarsh Umesan Pillai](https://github.com/utpilla), Microsoft
* [Zhongyang Wu](https://github.com/TommyCpp)
For more information about the maintainer role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#maintainer).
### Approvers
* [Shaun Cox](https://github.com/shaun-cox)
* [Anton Grübel](https://github.com/gruebel), Baz
* [Björn Antonsson](https://github.com/bantonsson), Datadog
* [Scott Gerring](https://github.com/scottgerring), Datadog
* [Shaun Cox](https://github.com/shaun-cox), Microsoft
For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver).
### Emeritus
* [Dirkjan Ochtman](https://github.com/djc)
* [Jan Kühle](https://github.com/frigus02)
* [Isobel Redelmeier](https://github.com/iredelmeier)
* [Jan Kühle](https://github.com/frigus02)
* [Julian Tescher](https://github.com/jtescher)
* [Mike Goldsmith](https://github.com/MikeGoldsmith)
For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager).
### Thanks to all the people who have contributed
[![contributors](https://contributors-img.web.app/image?repo=open-telemetry/opentelemetry-rust)](https://github.com/open-telemetry/opentelemetry-rust/graphs/contributors)

View File

@ -1,20 +1,39 @@
exclude=[
"actix-http",
"actix-http-tracing",
"actix-udp",
"actix-udp-example",
"tracing-grpc",
"http"
]
[graph]
exclude=[]
[licenses]
unlicensed = "deny"
allow = [
"MIT",
"Apache-2.0",
"ISC",
"BSD-3-Clause",
"OpenSSL"
]
exceptions = [
{ allow = ["CDLA-Permissive-2.0"], crate = "webpki-roots" }, # This crate is a dependency of `reqwest`.
{ allow = ["Unicode-3.0"], crate = "icu_collections" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "icu_locid" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "icu_locid_transform" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "icu_locid_transform_data" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "icu_locale_core" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "icu_normalizer" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "icu_normalizer_data" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "icu_properties" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "icu_properties_data" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "icu_provider" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "icu_provider_macros" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "potential_utf" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "litemap" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "tinystr" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "writeable" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "unicode-ident" }, # This crate gets used transitively by `reqwest` and other crates.
{ allow = ["Unicode-3.0"], crate = "yoke" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "yoke-derive" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "zerovec" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "zerotrie" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "zerovec-derive" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "zerofrom" }, # This crate gets used transitively by `reqwest`.
{ allow = ["Unicode-3.0"], crate = "zerofrom-derive" }, # This crate gets used transitively by `reqwest`.
]
[licenses.private]
@ -28,11 +47,31 @@ license-files = [
{ path = "LICENSE", hash = 0xbd0eed23 }
]
# This section is considered when running `cargo deny check advisories`
# More documentation for the advisories section can be found here:
# https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html
[advisories]
ignore = [
# unsoundness in indirect dependencies without a safe upgrade below
"RUSTSEC-2021-0145",
"RUSTSEC-2019-0036"
]
unmaintained = "allow"
yanked = "allow"
unmaintained = "none"
yanked = "deny"
# This section is considered when running `cargo deny check bans`.
# More documentation about the 'bans' section can be found here:
# https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html
[bans]
# Lint level for when multiple versions of the same crate are detected
multiple-versions = "warn"
# Lint level for when a crate version requirement is `*`
wildcards = "warn"
# The graph highlighting used when creating dotgraphs for crates
# with multiple versions
# * lowest-version - The path to the lowest versioned duplicate is highlighted
# * simplest-path - The path to the version with the fewest edges is highlighted
# * all - Both lowest-version and simplest-path are used
highlight = "all"
# This section is considered when running `cargo deny check sources`.
# More documentation about the 'sources' section can be found here:
# https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html
[sources]
unknown-registry = "deny"
unknown-git = "deny"

View File

@ -0,0 +1,175 @@
# Error handling patterns in public API interfaces
## Date
27 Feb 2025
## Summary
This ADR describes the general pattern we will follow when modelling errors in public API interfaces - that is, APIs that are exposed to users of the project's published crates. It summarizes the discussion and final option from [#2571](https://github.com/open-telemetry/opentelemetry-rust/issues/2571); for more context check out that issue.
We will focus on the exporter traits in this example, but the outcome should be applied to _all_ public traits and their fallible operations.
These include [SpanExporter](https://github.com/open-telemetry/opentelemetry-rust/blob/eca1ce87084c39667061281e662d5edb9a002882/opentelemetry-sdk/src/trace/export.rs#L18), [LogExporter](https://github.com/open-telemetry/opentelemetry-rust/blob/eca1ce87084c39667061281e662d5edb9a002882/opentelemetry-sdk/src/logs/export.rs#L115), and [PushMetricExporter](https://github.com/open-telemetry/opentelemetry-rust/blob/eca1ce87084c39667061281e662d5edb9a002882/opentelemetry-sdk/src/metrics/exporter.rs#L11) which form part of the API surface of `opentelemetry-sdk`.
There are various ways to handle errors on trait methods, including swallowing them and logging, panicking, returning a shared global error, or returning a method-specific error. We strive for consistency, and we want to be sure that we've put enough thought into what this looks like that we don't have to make breaking interface changes unnecessarily in the future.
## Design Guidance
### 1. No panics from SDK APIs
Failures during regular operation should not panic, instead returning errors to the caller where appropriate, _or_ logging an error if not appropriate.
Some of the opentelemetry SDK interfaces are dictated by the specification in way such that they may not return errors.
### 2. Consolidate error types within a trait where we can, let them diverge when we can't**
We aim to consolidate error types where possible _without indicating a function may return more errors than it can actually return_.
**Don't do this** - each function's signature indicates that it returns errors it will _never_ return, forcing the caller to write handlers for dead paths:
```rust
enum MegaError {
TooBig,
TooSmall,
TooLong,
TooShort
}
trait MyTrait {
// Will only ever return TooBig,TooSmall errors
fn action_one() -> Result<(), MegaError>;
// These will only ever return TooLong,TooShort errors
fn action_two() -> Result<(), MegaError>;
fn action_three() -> Result<(), MegaError>;
}
```
**Instead, do this** - each function's signature indicates only the errors it can return, providing an accurate contract to the caller:
```rust
enum ErrorOne {
TooBig,
TooSmall,
}
enum ErrorTwo {
TooLong,
TooShort
}
trait MyTrait {
fn action_one() -> Result<(), ErrorOne>;
// Action two and three share the same error type.
// We do not introduce a common error MyTraitError for all operations, as this would
// force all methods on the trait to indicate they return errors they do not return,
// complicating things for the caller.
fn action_two() -> Result<(), ErrorTwo>;
fn action_three() -> Result<(), ErrorTwo>;
}
```
## 3. Consolidate error types between signals where we can, let them diverge where we can't
Consider the `Exporter`s mentioned earlier. Each of them has the same failure indicators - as dictated by the OpenTelemetry spec - and we will
share the error types accordingly:
**Don't do this** - each signal has its own error type, despite having exactly the same failure cases:
```rust
#[derive(Error, Debug)]
pub enum OtelTraceError {
#[error("Shutdown already invoked")]
AlreadyShutdown,
#[error("Operation failed: {0}")]
InternalFailure(String),
/** ... additional errors ... **/
}
#[derive(Error, Debug)]
pub enum OtelLogError {
#[error("Shutdown already invoked")]
AlreadyShutdown,
#[error("Operation failed: {0}")]
InternalFailure(String),
/** ... additional errors ... **/
}
```
**Instead, do this** - error types are consolidated between signals where this can be done appropriately:
```rust
/// opentelemetry-sdk::error
#[derive(Error, Debug)]
pub enum OTelSdkError {
#[error("Shutdown already invoked")]
AlreadyShutdown,
#[error("Operation failed: {0}")]
InternalFailure(String),
/** ... additional errors ... **/
}
pub type OTelSdkResult = Result<(), OTelSdkError>;
/// signal-specific exporter traits all share the same
/// result types for the exporter operations.
// pub trait LogExporter {
// pub trait SpanExporter {
pub trait PushMetricExporter {
fn export(&self, /* ... */) -> OtelSdkResult;
fn force_flush(&self, /* ... */ ) -> OTelSdkResult;
fn shutdown(&self, /* ... */ ) -> OTelSdkResult;
```
If this were _not_ the case - if we needed to mark an extra error for instance for `LogExporter` that the caller could reasonably handle -
we would let that error traits diverge at that point.
### 4. Box custom errors where a savvy caller may be able to handle them, stringify them if not
Note above that we do not box any `Error` into `InternalFailure`. Our rule here is that if the caller cannot reasonably be expected to handle a particular error variant, we will use a simplified interface that returns only a descriptive string. In the concrete example we are using with the exporters, we have a [strong signal in the opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/sdk.md#export) that indicates that the error types _are not actionable_ by the caller.
If the caller may potentially recover from an error, we will follow the generally-accepted best practice (e.g., see [canonical's guide](https://canonical.github.io/rust-best-practices/error-and-panic-discipline.html) and instead preserve the nested error:
**Don't do this if the OtherError is potentially recoverable by a savvy caller**:
```rust
#[derive(Debug, Error)]
pub enum MyError {
#[error("Error one occurred")]
ErrorOne,
#[error("Operation failed: {0}")]
OtherError(String),
```
**Instead, do this**, allowing the caller to match on the nested error:
```rust
#[derive(Debug, Error)]
pub enum MyError {
#[error("Error one occurred")]
ErrorOne,
#[error("Operation failed: {source}")]
OtherError {
#[from]
source: Box<dyn Error + Send + Sync>,
},
}
```
Note that at the time of writing, there is no instance we have identified within the project that has required this.
### 5. Use thiserror by default
We will use [thiserror](https://docs.rs/thiserror/latest/thiserror/) by default to implement Rust's [error trait](https://doc.rust-lang.org/core/error/trait.Error.html).
This keeps our code clean, and as it does not appear in our interface, we can choose to replace any particular usage with a hand-rolled implementation should we need to.
### 6. Don't use `#[non_exhaustive]` by default
If an `Error` response set is closed - if we can confidently say it is very unlikely to gain new variants in the future - we should not annotate it with `#[non_exhaustive]`. By way of example, the variants of the exporter error types described above are exhaustively documented in the OpenTelemetry Specification, and we can confidently say that we do not expect new variants.

5
docs/adr/README.md Normal file
View File

@ -0,0 +1,5 @@
# Architectural Decision Records
This directory contains architectural decision records made for the opentelemetry-rust project. These allow us to consolidate discussion, options, and outcomes, around key architectural decisions.
* [001 - Error Handling](001_error_handling.md)

378
docs/design/logs.md Normal file
View File

@ -0,0 +1,378 @@
# OpenTelemetry Rust Logs Design
Status:
[Development](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/document-status.md)
## Overview
[OpenTelemetry (OTel)
Logs](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/README.md)
support differs from Metrics and Traces as it does not introduce a new logging
API for end users. Instead, OTel recommends leveraging existing logging
libraries such as [log](https://crates.io/crates/log) and
[tracing](https://crates.io/crates/tracing), while providing bridges (appenders)
to route logs through OpenTelemetry.
OTel took this different approach due to the long history of existing logging
solutions. In Rust, these are [log](https://crates.io/crates/log) and
[tracing](https://crates.io/crates/tracing), and have been embraced in the
community for some time. OTel Rust maintains appenders for these libraries,
allowing users to seamlessly integrate with OpenTelemetry without changing their
existing logging instrumentation.
The `tracing` appender is particularly optimized for performance due to its
widespread adoption and the fact that `tracing` itself has a bridge from the
`log` crate. Notably, OpenTelemetry Rust itself is instrumented using `tracing`
for internal logs. Additionally, when OTel began supporting logging as a signal,
the `log` crate lacked structured logging support, reinforcing the decision to
prioritize `tracing`.
## Benefits of OpenTelemetry Logs
- **Unified configuration** across Traces, Metrics, and Logs.
- **Automatic correlation** with Traces.
- **Consistent Resource attributes** across signals.
- **Multiple destinations support**: Logs can continue flowing to existing
destinations like stdout etc. while also being sent to an
OpenTelemetry-capable backend, typically via an OTLP Exporter or exporters
that export to operating system native systems like `Windows ETW` or `Linux
user_events`.
- **Standalone logging support** for applications that use OpenTelemetry as
their primary logging mechanism.
## Key Design Principles
- High performance - no locks/contention in the hot path with minimal/no heap
allocation where possible.
- Capped resource (memory) usage - well-defined behavior when overloaded.
- Self-observable - exposes telemetry about itself to aid in troubleshooting
etc.
- Robust error handling, returning Result where possible instead of panicking.
- Minimal public API, exposing based on need only.
## Architecture Overview
```mermaid
graph TD
subgraph Application
A1[Application Code]
end
subgraph Logging Libraries
B1[log crate]
B2[tracing crate]
end
subgraph OpenTelemetry
C1[OpenTelemetry Appender for log]
C2[OpenTelemetry Appender for tracing]
C3[OpenTelemetry Logs API]
C4[OpenTelemetry Logs SDK]
C5[OTLP Exporter]
end
subgraph Observability Backend
D1[OTLP-Compatible Backend]
end
A1 --> |Emits Logs| B1
A1 --> |Emits Logs| B2
B1 --> |Bridged by| C1
B2 --> |Bridged by| C2
C1 --> |Sends to| C3
C2 --> |Sends to| C3
C3 --> |Processes with| C4
C4 --> |Exports via| C5
C5 --> |Sends to| D1
```
## Logs API
Logs API is part of the [opentelemetry](https://crates.io/crates/opentelemetry)
crate.
The OTel Logs API is not intended for direct end-user usage. Instead, it is
designed for appender/bridge authors to integrate existing logging libraries
with OpenTelemetry. However, there is nothing preventing it from being used by
end-users.
### API Components
1. **Key-Value Structs**: Used in `LogRecord`, where `Key` struct is shared
across signals but `Value` struct differ from Metrics and Traces. This is
because values in Logs can contain more complex structures than those in
Traces and Metrics.
2. **Traits**:
- `LoggerProvider` - provides methods to obtain Logger.
- `Logger` - provides methods to create LogRecord and emit the created
LogRecord.
- `LogRecord` - provides methods to populate LogRecord.
3. **No-Op Implementations**: By default, the API performs no operations until
an SDK is attached.
### Logs Flow
1. Obtain a `LoggerProvider` implementation.
2. Use the `LoggerProvider` to create `Logger` instances, specifying a scope
name (module/component emitting logs). Optional attributes and version are
also supported.
3. Use the `Logger` to create an empty `LogRecord` instance.
4. Populate the `LogRecord` with body, timestamp, attributes, etc.
5. Call `Logger.emit(LogRecord)` to process and export the log.
If only the Logs API is used (without an SDK), all the above steps result in no
operations, following OpenTelemetrys philosophy of separating API from SDK. The
official Logs SDK provides real implementations to process and export logs.
Users or vendors can also provide alternative SDK implementations.
## Logs SDK
Logs SDK is part of the
[opentelemetry_sdk](https://crates.io/crates/opentelemetry_sdk) crate.
The OpenTelemetry Logs SDK provides an OTel specification-compliant
implementation of the Logs API, handling log processing and export.
### Core Components
#### `SdkLoggerProvider`
This is the implementation of the `LoggerProvider` and deals with concerns such
as processing and exporting Logs.
- Implements the `LoggerProvider` trait.
- Creates and manages `SdkLogger` instances.
- Holds logging configuration, including `Resource` and processors.
- Does not retain a list of created loggers. Instead, it passes an owned clone
of itself to each logger created. This is done so that loggers get a hold of
the configuration (like which processor to invoke).
- Uses an `Arc<LoggerProviderInner>` and delegates all configuration to
`LoggerProviderInner`. This allows cheap cloning of itself and ensures all
clones point to the same underlying configuration.
- As `SdkLoggerProvider` only holds an `Arc` of its inner, it can only take
`&self` in its methods like flush and shutdown. Else it needs to rely on
interior mutability that comes with runtime performance costs. Since methods
like shutdown usually need to mutate interior state, but this component can
only take `&self`, it defers to components like exporter to use interior
mutability to handle shutdown. (More on this in the exporter section)
- An alternative design was to let `SdkLogger` hold a `Weak` reference to the
`SdkLoggerProvider`. This would be a `weak->arc` upgrade in every log
emission, significantly affecting throughput.
- `LoggerProviderInner` implements `Drop`, triggering `shutdown()` when no
references remain. However, in practice, loggers are often stored statically
inside appenders (like tracing-appender), so explicit shutdown by the user is
required.
#### `SdkLogger`
This is an implementation of the `Logger`, and contains functionality to create
and emit logs.
- Implements the `Logger` trait.
- Creates `SdkLogRecord` instances and emits them.
- Calls `OnEmit()` on all registered processors when emitting logs.
- Passes mutable references to each processor (`&mut log_record`), i.e.,
ownership is not passed to the processor. This ensures that the logger avoids
cloning costs. Since a mutable reference is passed, processors can modify the
log, and it will be visible to the next processor in the chain.
- Since the processor only gets a reference to the log, it cannot store it
beyond the `OnEmit()`. If a processor needs to buffer logs, it must explicitly
copy them to the heap.
- This design allows for stack-only log processing when exporting to operating
system native facilities like `Windows ETW` or `Linux user_events`.
- OTLP Exporting requires network calls (HTTP/gRPC) and batching of logs for
efficiency purposes. These exporters buffer log records by copying them to the
heap. (More on this in the BatchLogRecordProcessor section)
#### `LogRecord`
- Holds log data, including attributes.
- Uses an inline array for up to 5 attributes to optimize stack usage.
- Falls back to a heap-allocated `Vec` if more attributes are required.
- Inspired by Gos `slog` library for efficiency.
#### LogRecord Processors
`SdkLoggerProvider` allows being configured with any number of LogProcessors.
They get called in the order of registration. Log records are passed to the
`OnEmit` method of LogProcessor. LogProcessors can be used to process the log
records, enrich them, filter them, and export to destinations by leveraging
LogRecord Exporters.
Similar to [LoggerProvider](#sdkloggerprovider), methods on the `LogProcessor`
trait also takes a immutable self (`&self`) only, forcing the need to use
interior mutability, if any mutation is required. The exception to this is
`set_resource`, which takes a `&mut self`. This is acceptable as `set_resource`
is called by the `SdkLoggerProvider` during build() method only, and is not
required after that.
Following built-in Log processors are provided in the Log SDK:
##### SimpleLogProcessor
This processor is designed to be used for exporting purposes. Export is handled
by an Exporter (which is a separate component). SimpleLogProcessor is "simple"
in the sense that it does not attempt to do any processing - it just calls the
exporter and passes the log record to it. To comply with OTel specification, it
synchronizes calls to the `Export()` method, i.e., only one `Export()` call will
be done at any given time.
SimpleLogProcessor is only used for test/learning purposes and is often used
along with a `stdout` exporter.
##### BatchLogProcessor
This is another "exporting" processor. As with SimpleLogProcessor, a different
component named LogExporter handles the actual export logic. BatchLogProcessor
buffers/batches the logs it receives into an in-memory buffer. It invokes the
exporter every 1 second or when 512 items are in the batch (customizable). It
uses a background thread to do the export, and communication between the user
thread (where logs are emitted) and the background thread occurs with `mpsc`
channels.
The max amount of items the buffer holds is 2048 (customizable). Once the limit
is reached, any *new* logs are dropped. It *does not* apply back-pressure to the
user thread and instead drops logs.
As with SimpleLogProcessor, this component also ensures only one export is
active at a given time. A modified version of this is required to achieve higher
throughput in some environments.
In this design, at most 2048+512 logs can be in memory at any given point. In
other words, that many logs can be lost if the app crashes in the middle.
## LogExporters
LogExporters are responsible for exporting logs to a destination.
`SdkLoggerProvider` does not have a direct knowledge of the `LogExporter`, as it
only deals with `LogProcessors`. It is the `LogProcessor`s that invokes
`LogExporter` methods. Most methods on `LogExporter` trait also only takes
`&self`, following the same reasoning as [LogProcessors](#logrecord-processors)
Some of the exporters are:
1. **InMemoryExporter** - exports to an in-memory list, primarily for
unit-testing. This is used extensively in the repo itself, and external users
are also encouraged to use this.
2. **Stdout exporter** - prints telemetry to stdout. Only for debugging/learning
purposes. The output format is not defined and also is not performance
optimized. A production-recommended version with a standardized output format
is in the plan.
3. **OTLP Exporter** - OTel's official exporter which uses the OTLP protocol
that is designed with the OTel data model in mind. Both HTTP and gRPC-based
exporting is offered.
4. **Exporters to OS Kernel facilities** - These exporters are not maintained in
the core repo but listed for completion. They export telemetry to Windows ETW
or Linux user_events. They are designed for high-performance workloads. Due
to their nature of synchronous exporting, they do not require
buffering/batching. This allows logs to operate entirely on the stack and can
scale easily with the number of CPU cores. (Kernel uses per-CPU buffers for
the events, ensuring no contention)
## `tracing` Log Appender
Tracing appender is part of the
[opentelemetry-appender-tracing](https://crates.io/crates/opentelemetry-appender-tracing)
crate.
The `tracing` appender bridges `tracing` logs to OpenTelemetry. Logs emitted via
`tracing` macros (`info!`, `warn!`, etc.) are forwarded to OpenTelemetry through
this integration.
- `tracing` is designed for high performance, using *layers* or *subscribers* to
handle emitted logs (events).
- The appender implements a `Layer`, receiving logs from `tracing`.
- Uses the OTel Logs API to create `LogRecord`, populate it, and emit it via
`Logger.emit(LogRecord)`.
- If no Logs SDK is present, the process is a no-op.
Note on terminology: Within OpenTelemetry, "tracing" refers to distributed
tracing (i.e creation of Spans) and not in-process structured logging and
execution traces. The crate "tracing" has notion of creating Spans as well as
Events. The events from "tracing" crate is what gets converted to OTel Logs,
when using this appender. Spans created using "tracing" crate is not handled by
this crate.
## Performance
// Call out things done specifically for performance
// Rough draft
1. `LogRecord` is stack allocated and not Boxed unless required by the component
needing to store it beyond the logging call. (eg: BatchProcessor)
2. LogRecords's Attribute storage is specially designed struct, that holds up to
five attributes in stack.
3. When passing `LogRecord`s to processor, a mutable ref is passed. This allows
calling multiple processors one after another, without the need for cloning.
4. `Logger` provides a `Enabled` check which can optimize performance when
no-one is interested in the log. The check is passed from `Logger` to the
processor, which may consult its exporter to make the decision. An example use
case - an ETW or user-events exporter can check for the presence of listener and
convey that decision back to logger, allowing appender to avoid even the cost of
creating a `LogRecord` in the first place if there is no listener. This check is
done for each log emission, and can react dynamically to changes in interest, by
enabling/disabling ETW/user-event listener.
5. `tracing` has a notion of "target", which is expected to be mapped to OTel's
concept of Instrumentation Scope for Logs, when `OpenTelemetry-Tracing-Appender`
bridges `tracing` to OpenTelemetry. Since scopes are tied to Loggers, a naive
approach would require creating a separate logger for each unique target. This
would necessitate an RWLock-protected HashMap lookup, introducing contention and
reducing throughput. To avoid this, `OpenTelemetry-Tracing-Appender` instead
stores the target directly in the LogRecord as a top-level field, ensuring fast
access in the hot path. Components processing the LogRecord can retrieve the
target via LogRecord.target(), treating it as the scope. The OTLP Exporter
already handles this automatically, so end-users will see “target” reflected in
the Instrumentation Scope. An alternative design would be to use thread-local
HashMaps - but it can cause increased memory usage, as there can be 100s of
unique targets. (because `tracing` defaults to using module path as target).
### Perf test - benchmarks
// Share ~~ numbers
### Perf test - stress test
// Share ~~ numbers
## Internal logs
OTel itself is instrumented with `tracing` crate to emit internal logs about its
operations. This is feature gated under "internal-logs", and is enabled by
default for all components. The `opentelemetry` provide few helper macros
`otel_warn` etc., which in turn invokes various `tracing` macros like `warn!`
etc. The cargo package name will be set as `target` when using `tracing`. For
example, logs from `opentelemetry-otlp` will have target set to
"opentelemetry-otlp".
The helper macros are part of public API, so can be used by anyone. But it is
only meant for OTel components itself and anyone writing extensions like custom
Exporters etc.
// TODO: Document the principles followed when selecting severity for internal
logs
When OpenTelemetry components generate logs that could potentially feed back
into OpenTelemetry, this can result in what is known as "telemetry-induced
telemetry." To address this, OpenTelemetry provides a mechanism to suppress such
telemetry using the `Context`. Components are expected to mark telemetry as
suppressed within a specific `Context` by invoking
`Context::enter_telemetry_suppressed_scope()`. The Logs SDK implementation
checks this flag in the current `Context` and ignores logs if suppression is
enabled.
This mechanism relies on proper in-process propagation of the `Context`.
However, external libraries like `hyper` and `tonic`, which are used by
OpenTelemetry in its OTLP Exporters, do not propagate OpenTelemetry's `Context`.
As a result, the suppression mechanism does not work out-of-the-box to suppress
logs originating from these libraries.
// TODO: Document how OTLP can solve this issue without asking external
crates to respect and propagate OTel Context.
## Summary
- OpenTelemetry Logs does not provide a user-facing logging API.
- Instead, it integrates with existing logging libraries (`log`, `tracing`).
- The Logs API defines key traits but performs no operations unless an SDK is
installed.
- The Logs SDK enables log processing, transformation, and export.
- The Logs SDK is performance optimized to minimize copying and heap allocation,
wherever feasible.
- The `tracing` appender efficiently routes logs to OpenTelemetry without
modifying existing logging workflows.

6
docs/design/metrics.md Normal file
View File

@ -0,0 +1,6 @@
# OpenTelemetry Rust Metrics Design
Status:
[Development](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/document-status.md)
TODO:

6
docs/design/traces.md Normal file
View File

@ -0,0 +1,6 @@
# OpenTelemetry Rust Traces Design
Status:
[Development](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/document-status.md)
TODO:

767
docs/metrics.md Normal file
View File

@ -0,0 +1,767 @@
# OpenTelemetry Rust Metrics
Status: **Work-In-Progress**
<details>
<summary>Table of Contents</summary>
* [Introduction](#introduction)
* [Best Practices](#best-practices)
* [Metrics API](#metrics-api)
* [Meter](#meter)
* [Instruments](#instruments)
* [Reporting measurements - use array slices for
attributes](#reporting-measurements---use-array-slices-for-attributes)
* [Reporting measurements via synchronous
instruments](#reporting-measurements-via-synchronous-instruments)
* [Reporting measurements via asynchronous
instruments](#reporting-measurements-via-asynchronous-instruments)
* [MeterProvider Management](#meterprovider-management)
* [Memory Management](#memory-management)
* [Example](#example)
* [Pre-Aggregation](#pre-aggregation)
* [Pre-Aggregation Benefits](#pre-aggregation-benefits)
* [Cardinality Limits](#cardinality-limits)
* [Cardinality Limits - Implications](#cardinality-limits---implications)
* [Cardinality Limits - Example](#cardinality-limits---example)
* [Memory Preallocation](#memory-preallocation)
* [Metrics Correlation](#metrics-correlation)
* [Modelling Metric Attributes](#modelling-metric-attributes)
* [Common Issues Leading to Missing
Metrics](#common-issues-that-lead-to-missing-metrics)
</details>
## Introduction
This document provides comprehensive guidance on leveraging OpenTelemetry
metrics in Rust applications. Whether you're tracking request counts, monitoring
response times, or analyzing resource utilization, this guide equips you with
the knowledge to implement robust and efficient metrics collection.
It covers best practices, API usage patterns, memory management techniques, and
advanced topics to help you design effective metrics solutions while steering
clear of common challenges.
## Best Practices
// TODO: Add link to the examples, once they are modified to show best
practices.
## Metrics API
### Meter
[Meter](https://docs.rs/opentelemetry/latest/opentelemetry/metrics/struct.Meter.html)
provides the ability to create instruments for recording measurements or
accepting callbacks to report measurements.
:stop_sign: You should avoid creating duplicate
[`Meter`](https://docs.rs/opentelemetry/latest/opentelemetry/metrics/struct.Meter.html)
instances with the same name. `Meter` is fairly expensive and meant to be reused
throughout the application. For most applications, a `Meter` should be obtained
from `global` and saved for re-use.
> [!IMPORTANT] Create your `Meter` instance once at initialization time and
> store it for reuse throughout your application's lifecycle.
The fully qualified module name might be a good option for the Meter name.
Optionally, one may create a meter with version, schema_url, and additional
meter-level attributes as well. Both approaches are demonstrated below.
```rust
use opentelemetry::global;
use opentelemetry::InstrumentationScope;
use opentelemetry::KeyValue;
let scope = InstrumentationScope::builder("my_company.my_product.my_library")
.with_version("0.17")
.with_schema_url("https://opentelemetry.io/schema/1.2.0")
.with_attributes([KeyValue::new("key", "value")])
.build();
// creating Meter with InstrumentationScope, comprising of
// name, version, schema and attributes.
let meter = global::meter_with_scope(scope);
// creating Meter with just name
let meter = global::meter("my_company.my_product.my_library");
```
### Instruments
OpenTelemetry defines several types of metric instruments, each optimized for
specific usage patterns. The following table maps OpenTelemetry Specification
instruments to their corresponding Rust SDK types.
:heavy_check_mark: You should understand and pick the right instrument type.
> [!NOTE] Picking the right instrument type for your use case is crucial to
> ensure the correct semantics and performance. Check the [Instrument
Selection](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/supplementary-guidelines.md#instrument-selection)
section from the supplementary guidelines for more information.
| OpenTelemetry Specification | OpenTelemetry Rust Instrument Type |
| --------------------------- | -------------------- |
| [Asynchronous Counter](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md#asynchronous-counter) | [`ObservableCounter`](https://docs.rs/opentelemetry/latest/opentelemetry/metrics/struct.ObservableCounter.html) |
| [Asynchronous Gauge](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md#asynchronous-gauge) | [`ObservableGauge`](https://docs.rs/opentelemetry/latest/opentelemetry/metrics/struct.ObservableGauge.html) |
| [Asynchronous UpDownCounter](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md#asynchronous-updowncounter) | [`ObservableUpDownCounter`](https://docs.rs/opentelemetry/latest/opentelemetry/metrics/struct.ObservableUpDownCounter.html) |
| [Counter](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md#counter) | [`Counter`](https://docs.rs/opentelemetry/latest/opentelemetry/metrics/struct.Counter.html) |
| [Gauge](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md#gauge) | [`Gauge`](https://docs.rs/opentelemetry/latest/opentelemetry/metrics/struct.Gauge.html) |
| [Histogram](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md#histogram) | [`Histogram`](https://docs.rs/opentelemetry/latest/opentelemetry/metrics/struct.Histogram.html) |
| [UpDownCounter](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md#updowncounter) | [`UpDownCounter`](https://docs.rs/opentelemetry/latest/opentelemetry/metrics/struct.UpDownCounter.html) |
:stop_sign: You should avoid creating duplicate instruments (e.g., `Counter`)
with the same name. Instruments are fairly expensive and meant to be reused
throughout the application. For most applications, an instrument should be
created once and saved for re-use. Instruments can also be cloned to create
multiple handles to the same instrument, but cloning should not occur on the hot
path. Instead, the cloned instance should be stored and reused.
:stop_sign: Do NOT use invalid instrument names.
> [!NOTE] OpenTelemetry will not collect metrics from instruments that are using
> invalid names. Refer to the [OpenTelemetry
Specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md#instrument-name-syntax)
for the valid syntax.
:stop_sign: You should avoid changing the order of attributes while reporting
measurements.
> [!WARNING] The last line of code has bad performance since the attributes are
> not following the same order as before:
```rust
let counter = meter.u64_counter("fruits_sold").build();
counter.add(2, &[KeyValue::new("color", "red"), KeyValue::new("name", "apple")]);
counter.add(3, &[KeyValue::new("color", "green"), KeyValue::new("name", "lime")]);
counter.add(5, &[KeyValue::new("color", "yellow"), KeyValue::new("name", "lemon")]);
counter.add(8, &[KeyValue::new("name", "lemon"), KeyValue::new("color", "yellow")]); // bad performance
```
:heavy_check_mark: If feasible, provide the attributes sorted by `Key`s in
ascending order to minimize memory usage within the Metrics SDK. Using
consistent attribute ordering allows the SDK to efficiently reuse internal data
structures.
```rust
// Good practice: Consistent attribute ordering
let counter = meter.u64_counter("fruits_sold").build();
counter.add(2, &[KeyValue::new("color", "red"), KeyValue::new("name", "apple")]);
```
### Reporting measurements - use array slices for attributes
:heavy_check_mark: When reporting measurements, use array slices for attributes
rather than creating vectors. Arrays are more efficient as they avoid
unnecessary heap allocations on the measurement path. This is true for both
synchronous and observable instruments.
```rust
// Good practice: Using an array slice directly
counter.add(2, &[KeyValue::new("color", "red"), KeyValue::new("name", "apple")]);
// Observable instrument
let _observable_counter = meter
.u64_observable_counter("request_count")
.with_description("Counts HTTP requests")
.with_unit("requests") // Optional: Adding units improves context
.with_callback(|observer| {
// Good practice: Using an array slice directly
observer.observe(
100,
&[KeyValue::new("endpoint", "/api")]
)
})
.build();
// Avoid this: Creating a Vec is unnecessary, and it allocates on the heap each time
// counter.add(2, &vec![KeyValue::new("color", "red"), KeyValue::new("name", "apple")]);
```
### Reporting measurements via synchronous instruments
:heavy_check_mark: Use synchronous Counter when you need to increment counts at
specific points in your code:
```rust
// Example: Using Counter when incrementing at specific code points
use opentelemetry::KeyValue;
fn process_item(counter: &opentelemetry::metrics::Counter<u64>, item_type: &str) {
// Process item...
// Increment the counter with the item type as an attribute
counter.add(1, &[KeyValue::new("type", item_type)]);
}
```
### Reporting measurements via asynchronous instruments
Asynchronous instruments like `ObservableCounter` are ideal for reporting
metrics that are already being tracked or stored elsewhere in your application.
These instruments allow you to observe and report the current state of such
metric.
:heavy_check_mark: Use `ObservableCounter` when you already have a variable
tracking a count:
```rust
// Example: Using ObservableCounter when you already have a variable tracking counts
use opentelemetry::KeyValue;
use std::sync::atomic::{AtomicU64, Ordering};
// An existing variable in your application
static REQUEST_COUNT: AtomicU64 = AtomicU64::new(0);
// In your application code, you update this directly
fn handle_request() {
// Process request...
REQUEST_COUNT.fetch_add(1, Ordering::SeqCst);
}
// When setting up metrics, register an observable counter that reads from your variable
fn setup_metrics(meter: &opentelemetry::metrics::Meter) {
let _observable_counter = meter
.u64_observable_counter("request_count")
.with_description("Number of requests processed")
.with_unit("requests")
.with_callback(|observer| {
// Read the current value from your existing counter
observer.observe(
REQUEST_COUNT.load(Ordering::SeqCst),
&[KeyValue::new("endpoint", "/api")]
)
})
.build();
}
```
> [!NOTE] The callbacks in the Observable instruments are invoked by the SDK
> during each export cycle.
## MeterProvider Management
Most use-cases require you to create ONLY one instance of MeterProvider. You
should NOT create multiple instances of MeterProvider unless you have some
unusual requirement of having different export strategies within the same
application. Using multiple instances of MeterProvider requires users to
exercise caution.
// TODO: Mention about creating per-thread MeterProvider // as shown in
[this](https://github.com/open-telemetry/opentelemetry-rust/pull/2659) // PR
:heavy_check_mark: Properly manage the lifecycle of `MeterProvider` instances if
you create them. Creating a MeterProvider is typically done at application
startup. Follow these guidelines:
* **Cloning**: A `MeterProvider` is a handle to an underlying provider. Cloning
it creates a new handle pointing to the same provider. Clone the
`MeterProvider` when necessary, but re-use the cloned instead of repeatedly
cloning.
* **Set as Global Provider**: Use `opentelemetry::global::set_meter_provider` to
set a clone of the `MeterProvider` as the global provider. This ensures
consistent usage across the application, allowing applications and libraries
to obtain `Meter` from the global instance.
* **Shutdown**: Explicitly call `shutdown` on the `MeterProvider` at the end of
your application to ensure all metrics are properly flushed and exported.
> [!NOTE] If you did not use `opentelemetry::global::set_meter_provider` to set
> a clone of the `MeterProvider` as the global provider, then you should be
> aware that dropping the last instance of `MeterProvider` implicitly calls
> shutdown on the provider.
:heavy_check_mark: Always call `shutdown` on the `MeterProvider` at the end of
your application to ensure proper cleanup.
## Memory Management
In OpenTelemetry,
[measurements](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md#measurement)
are reported via the metrics API. The SDK
[aggregates](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#aggregation)
metrics using certain algorithms and memory management strategies to achieve
good performance and efficiency. Here are the rules which OpenTelemetry Rust
follows while implementing the metrics aggregation logic:
1. [**Pre-Aggregation**](#pre-aggregation): aggregation occurs within the SDK.
2. [**Cardinality Limits**](#cardinality-limits): the aggregation logic respects
[cardinality
limits](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#cardinality-limits),
so the SDK does not use an indefinite amount of memory in the event of a
cardinality explosion.
3. [**Memory Preallocation**](#memory-preallocation): SDK tries to pre-allocate
the memory it needs at each instrument creation time.
### Example
Let us take the following example of OpenTelemetry Rust metrics being used to
track the number of fruits sold:
* During the time range (T0, T1]:
* value = 1, color = `red`, name = `apple`
* value = 2, color = `yellow`, name = `lemon`
* During the time range (T1, T2]:
* no fruit has been sold
* During the time range (T2, T3]:
* value = 5, color = `red`, name = `apple`
* value = 2, color = `green`, name = `apple`
* value = 4, color = `yellow`, name = `lemon`
* value = 2, color = `yellow`, name = `lemon`
* value = 1, color = `yellow`, name = `lemon`
* value = 3, color = `yellow`, name = `lemon`
### Example - Cumulative Aggregation Temporality
If we aggregate and export the metrics using [Cumulative Aggregation
Temporality](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#temporality):
* (T0, T1]
* attributes: {color = `red`, name = `apple`}, count: `1`
* attributes: {color = `yellow`, name = `lemon`}, count: `2`
* (T0, T2]
* attributes: {color = `red`, name = `apple`}, count: `1`
* attributes: {color = `yellow`, name = `lemon`}, count: `2`
* (T0, T3]
* attributes: {color = `red`, name = `apple`}, count: `6`
* attributes: {color = `green`, name = `apple`}, count: `2`
* attributes: {color = `yellow`, name = `lemon`}, count: `12`
Note that the start time is not advanced, and the exported values are the
cumulative total of what happened since the beginning.
### Example - Delta Aggregation Temporality
If we aggregate and export the metrics using [Delta Aggregation
Temporality](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#temporality):
* (T0, T1]
* attributes: {color = `red`, name = `apple`}, count: `1`
* attributes: {color = `yellow`, name = `lemon`}, count: `2`
* (T1, T2]
* nothing since we do not have any measurement received
* (T2, T3]
* attributes: {color = `red`, name = `apple`}, count: `5`
* attributes: {color = `green`, name = `apple`}, count: `2`
* attributes: {color = `yellow`, name = `lemon`}, count: `10`
Note that the start time is advanced after each export, and only the delta since
last export is exported, allowing the SDK to "forget" previous state.
### Pre-Aggregation
Rather than exporting every individual measurement to the backend, OpenTelemetry
Rust aggregates data locally and only exports the aggregated metrics.
Using the [fruit example](#example), there are six measurements reported during
the time range `(T2, T3]`. Instead of exporting each individual measurement
event, the SDK aggregates them and exports only the summarized results. This
summarization process, illustrated in the following diagram, is known as
pre-aggregation:
```mermaid
graph LR
subgraph SDK
Instrument --> | Measurements | Pre-Aggregation[Pre-Aggregation]
end
subgraph Collector
Aggregation
end
Pre-Aggregation --> | Metrics | Aggregation
```
In addition to the in-process aggregation performed by the OpenTelemetry Rust
Metrics SDK, further aggregations can be carried out by the Collector and/or the
metrics backend.
### Pre-Aggregation Benefits
Pre-aggregation offers several advantages:
1. **Reduced Data Volume**: Summarizes measurements before export, minimizing
network overhead and improving efficiency.
2. **Predictable Resource Usage**: Ensures consistent resource consumption by
applying [cardinality limits](#cardinality-limits) and [memory
preallocation](#memory-preallocation) during SDK initialization. In other
words, metrics memory/network usage remains capped, regardless of the volume
of measurements being made.This ensures that resource utilization remains
stable despite fluctuations in traffic volume.
3. **Improved Performance**: Reduces serialization costs as we work with
aggregated data and not the numerous individual measurements. It also reduces
computational load on downstream systems, enabling them to focus on analysis
and storage.
> [!NOTE] There is no ability to opt out of pre-aggregation in OpenTelemetry.
### Cardinality Limits
The number of distinct combinations of attributes for a given metric is referred
to as the cardinality of that metric. Taking the [fruit example](#example), if
we know that we can only have apple/lemon as the name, red/yellow/green as the
color, then we can say the cardinality is 6 (i.e., 2 names × 3 colors = 6
combinations). No matter how many fruits we sell, we can always use the
following table to summarize the total number of fruits based on the name and
color.
| Color | Name | Count |
| ------ | ----- | ----- |
| red | apple | 6 |
| yellow | apple | 0 |
| green | apple | 2 |
| red | lemon | 0 |
| yellow | lemon | 12 |
| green | lemon | 0 |
In other words, we know how much memory and network are needed to collect and
transmit these metrics, regardless of the traffic pattern or volume.
In real world applications, the cardinality can be extremely high. Imagine if we
have a long running service and we collect metrics with 7 attributes and each
attribute can have 30 different values. We might eventually end up having to
remember the complete set of 30⁷ - or 21.87 billion combinations! This
cardinality explosion is a well-known challenge in the metrics space. For
example, it can cause:
* Surprisingly high costs in the observability system
* Excessive memory consumption in your application
* Poor query performance in your metrics backend
* Potential denial-of-service vulnerability that could be exploited by bad
actors
[Cardinality
limit](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#cardinality-limits)
is a throttling mechanism which allows the metrics collection system to have a
predictable and reliable behavior when there is a cardinality explosion, be it
due to a malicious attack or developer making mistakes while writing code.
OpenTelemetry has a default cardinality limit of `2000` per metric. This limit
can be configured at the individual metric level using the [View
API](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#view)
leveraging the
[`cardinality_limit`](https://docs.rs/opentelemetry_sdk/latest/opentelemetry_sdk/metrics/struct.Stream.html#structfield.cardinality_limit)
setting.
It's important to understand that this cardinality limit applies only at the
OpenTelemetry SDK level, not to the ultimate cardinality of the metric as seen
by the backend system. For example, while a single process might be limited to
2000 attribute combinations per metric, the actual backend metrics system might
see much higher cardinality due to:
1. Resource attributes (such as `service.instance.id`, `host.name`, etc.) that
can be added to each metric
2. Multiple process instances running the same application across your
infrastructure
3. The possibility of reporting different key-value pair combinations in each
export interval, as the cardinality limit only applies to the number of
distinct attribute combinations tracked during a single export interval.
(This is only applicable to Delta temporality)
Therefore, the actual cardinality in your metrics backend can be orders of
magnitude higher than what any single OpenTelemetry SDK process handles in an
export cycle.
#### Cardinality Limits - Implications
Cardinality limits are enforced for each export interval, meaning the metrics
aggregation system only allows up to the configured cardinality limit of
distinct attribute combinations per metric. Understanding how this works in
practice is important:
* **Cardinality Capping**: When the limit is reached within an export interval,
any new attribute combination is not individually tracked but instead folded
into a single aggregation with the attribute `{"otel.metric.overflow": true}`.
This preserves the overall accuracy of aggregates (such as Sum, Count, etc.)
even though information about specific attribute combinations is lost. Every
measurement is accounted for - either with its original attributes or within
the overflow bucket.
* **Temporality Effects**: The impact of cardinality limits differs based on the
temporality mode:
* **Delta Temporality**: The SDK "forgets" the state after each
collection/export cycle. This means in each new interval, the SDK can track
up to the cardinality limit of distinct attribute combinations. Over time,
your metrics backend might see far more than the configured limit of
distinct combinations from a single process.
* **Cumulative Temporality**: Since the SDK maintains state across export
intervals, once the cardinality limit is reached, new attribute combinations
will continue to be folded into the overflow bucket. The total number of
distinct attribute combinations exported cannot exceed the cardinality limit
for the lifetime of that metric instrument.
* **Impact on Monitoring**: While cardinality limits protect your system from
unbounded resource consumption, they do mean that high-cardinality attributes
may not be fully represented in your metrics. Since cardinality capping can
cause metrics to be folded into the overflow bucket, it becomes impossible to
predict which specific attribute combinations were affected across multiple
collection cycles or different service instances.
This unpredictability creates several important considerations when querying
metrics in any backend system:
* **Total Accuracy**: OpenTelemetry Metrics always ensures the total
aggregation (sum of metric values across all attributes) remains accurate,
even when overflow occurs.
* **Attribute-Based Query Limitations**: Any metric query based on specific
attributes could be misleading, as it's possible that measurements recorded
with a superset of those attributes were folded into the overflow bucket due
to cardinality capping.
* **All Attributes Affected**: When overflow occurs, it's not just
high-cardinality attributes that are affected. The entire attribute
combination is replaced with the `{"otel.metric.overflow": true}` attribute,
meaning queries for any attribute in that combination will miss data points.
#### Cardinality Limits - Example
Extending our fruit sales tracking example, imagine we set a cardinality limit
of 3 and we're tracking sales with attributes for `name`, `color`, and
`store_location`:
During a busy sales period at time (T3, T4], we record:
1. 10 red apples sold at Downtown store
2. 5 yellow lemons sold at Uptown store
3. 8 green apples sold at Downtown store
4. 3 red apples sold at Midtown store (at this point, the cardinality limit is
hit, and attributes are replaced with overflow attribute.)
The exported metrics would be:
* attributes: {color = `red`, name = `apple`, store_location = `Downtown`},
count: `10`
* attributes: {color = `yellow`, name = `lemon`, store_location = `Uptown`},
count: `5`
* attributes: {color = `green`, name = `apple`, store_location = `Downtown`},
count: `8`
* attributes: {`otel.metric.overflow` = `true`}, count: `3` ← Notice this
special overflow attribute
If we later query "How many red apples were sold?" the answer would be 10, not
13, because the Midtown sales were folded into the overflow bucket. Similarly,
queries about "How many items were sold in Midtown?" would return 0, not 3.
However, the total count across all attributes (i.e How many total fruits were
sold in (T3, T4] would correctly give 26) would be accurate.
This limitation applies regardless of whether the attribute in question is
naturally high-cardinality. Even low-cardinality attributes like "color"
become unreliable for querying if they were part of attribute combinations
that triggered overflow.
OpenTelemetry's cardinality capping is only applied to attributes provided
when reporting measurements via the [Metrics API](#metrics-api). In other
words, attributes used to create `Meter` or `Resource` attributes are not
subject to this cap.
#### Cardinality Limits - How to Choose the Right Limit
Choosing the right cardinality limit is crucial for maintaining efficient memory
usage and predictable performance in your metrics system. The optimal limit
depends on your temporality choice and application characteristics.
Setting the limit incorrectly can have consequences:
* **Limit too high**: Due to the SDK's [memory
preallocation](#memory-preallocation) strategy, excess memory will be
allocated upfront and remain unused, leading to resource waste.
* **Limit too low**: Measurements will be folded into the overflow bucket
(`{"otel.metric.overflow": true}`), losing granular attribute information and
making attribute-based queries unreliable.
Consider these guidelines when determining the appropriate limit:
##### Choosing the Right Limit for Cumulative Temporality
Cumulative metrics retain every unique attribute combination that has *ever*
been observed since the start of the process.
* You must account for the theoretical maximum number of attribute combinations.
* This can be estimated by multiplying the number of possible values for each
attribute.
* If certain attribute combinations are invalid or will never occur in practice,
you can reduce the limit accordingly.
###### Example - Fruit Sales Scenario
Attributes:
* `name` can be "apple" or "lemon" (2 values)
* `color` can be "red", "yellow", or "green" (3 values)
The theoretical maximum is 2 × 3 = 6 unique attribute sets.
For this example, the simplest approach is to use the theoretical maximum and **set the cardinality limit to 6**.
However, if you know that certain combinations will never occur (for example, if "red lemons" don't exist in your application domain), you could reduce the limit to only account for valid combinations. In this case, if only 5 combinations are valid, **setting the cardinality limit to 5** would be more memory-efficient.
##### Choosing the Right Limit for Delta Temporality
Delta metrics reset their aggregation state after every export interval. This
approach enables more efficient memory utilization by focusing only on attributes
observed during each interval rather than maintaining state for all combinations.
* **When attributes are low-cardinality** (as in the fruit example), use the
same calculation method as with cumulative temporality.
* **When high-cardinality attribute(s) exist** like `user_id`, leverage Delta
temporality's "forget state" nature to set a much lower limit based on active
usage patterns. This is where Delta temporality truly excels - when the set of
active values changes dynamically and only a small subset is active during any
given interval.
###### Example - High Cardinality Attribute Scenario
Export interval: 60 sec
Attributes:
* `user_id` (up to 1 million unique users)
* `success` (true or false, 2 values)
Theoretical limit: 1 million users × 2 = 2 million attribute sets
But if only 10,000 users are typically active during a 60 sec export interval:
10,000 × 2 = 20,000
**You can set the limit to 20,000, dramatically reducing memory usage during
normal operation.**
###### Export Interval Tuning
Shorter export intervals further reduce the required cardinality:
* If your interval is halved (e.g., from 60 sec to 30 sec), the number of unique
attribute sets seen per interval may also be halved.
> [!NOTE] More frequent exports increase CPU/network overhead due to
> serialization and transmission costs.
##### Choosing the Right Limit - Backend Considerations
While delta temporality offers certain advantages for cardinality management,
your choice may be constrained by backend support:
* **Backend Restrictions:** Some metrics backends only support cumulative
temporality. For example, Prometheus requires cumulative temporality and
cannot directly consume delta metrics.
* **Collector Conversion:** To leverage delta temporality's memory advantages
while maintaining backend compatibility, configure your SDK to use delta
temporality and deploy an OpenTelemetry Collector with a delta-to-cumulative
conversion processor. This approach pushes the memory overhead from your
application to the collector, which can be more easily scaled and managed
independently.
TODO: Add the memory cost incurred by each data points, so users can know the
memory impact of setting a higher limits.
TODO: Add example of how query can be affected when overflow occurs, use
[Aspire](https://github.com/dotnet/aspire/pull/7784) tool.
### Memory Preallocation
OpenTelemetry Rust SDK aims to avoid memory allocation on the hot code path.
When this is combined with [proper use of Metrics API](#metrics-api), heap
allocation can be avoided on the hot code path.
## Metrics Correlation
Including `TraceId` and `SpanId` as attributes in metrics might seem like an
intuitive way to achieve correlation with traces or logs. However, this approach
is ineffective and can make metrics practically unusable. Moreover, it can
quickly lead to cardinality issues, resulting in metrics being capped.
A better alternative is to use a concept in OpenTelemetry called
[Exemplars](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplar).
Exemplars provide a mechanism to correlate metrics with traces by sampling
specific measurements and attaching trace context to them.
> [!NOTE] Currently, exemplars are not yet implemented in the OpenTelemetry Rust
> SDK.
## Modelling Metric Attributes
When metrics are being collected, they normally get stored in a [time series
database](https://en.wikipedia.org/wiki/Time_series_database). From storage and
consumption perspective, metrics can be multi-dimensional. Taking the [fruit
example](#example), there are two attributes - "name" and "color". For basic
scenarios, all the attributes can be reported during the [Metrics
API](#metrics-api) invocation, however, for less trivial scenarios, the
attributes can come from different sources:
* [Measurements](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md#measurement)
reported via the [Metrics API](#metrics-api).
* Additional attributes provided at meter creation time via
[`meter_with_scope`](https://docs.rs/opentelemetry/latest/opentelemetry/metrics/trait.MeterProvider.html#tymethod.meter_with_scope).
* [Resources](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md)
configured at the `MeterProvider` level.
* Additional attributes provided by the collector. For example, [jobs and
instances](https://prometheus.io/docs/concepts/jobs_instances/) in Prometheus.
### Best Practices for Modeling Attributes
Follow these guidelines when deciding where to attach metric attributes:
* **For static attributes** (constant throughout the process lifetime):
* **Resource-level attributes**: If the dimension applies to all metrics
(e.g., hostname, datacenter), model it as a Resource attribute, or better
yet, let the collector add these automatically.
```rust
// Example: Setting resource-level attributes
let resource = Resource::new(vec![
KeyValue::new("service.name", "payment-processor"),
KeyValue::new("deployment.environment", "production"),
]);
```
* **Meter-level attributes**: If the dimension applies only to a subset of
metrics (e.g., library version), model it as meter-level attributes via
`meter_with_scope`.
```rust
// Example: Setting meter-level attributes
let scope = InstrumentationScope::builder("payment_library")
.with_version("1.2.3")
.with_attributes([KeyValue::new("payment.gateway", "stripe")])
.build();
let meter = global::meter_with_scope(scope);
```
* **For dynamic attributes** (values that change during execution):
* Report these via the Metrics API with each measurement.
* Be mindful that [cardinality limits](#cardinality-limits) apply to these
attributes.
```rust
// Example: Using dynamic attributes with each measurement
counter.add(1, &[
KeyValue::new("customer.tier", customer.tier),
KeyValue::new("transaction.status", status.to_string()),
]);
```
## Common issues that lead to missing metrics
Common pitfalls that can result in missing metrics include:
1. **Invalid instrument names** - OpenTelemetry will not collect metrics from
instruments using invalid names. See the [specification for valid
syntax](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md#instrument-name-syntax).
2. **Not calling `shutdown` on the MeterProvider** - Ensure you properly call
`shutdown` at application termination to flush any pending metrics.
3. **Cardinality explosion** - When too many unique attribute combinations are
used, some metrics may be placed in the overflow bucket.
// TODO: Add more specific examples
## References
[OTel Metrics Specification - Supplementary Guidelines](https://opentelemetry.io/docs/specs/otel/metrics/supplementary-guidelines/)

187
docs/migration_0.28.md Normal file
View File

@ -0,0 +1,187 @@
# Migration guide from 0.27 to 0.28
OpenTelemetry Rust 0.28 introduces a large number of breaking changes that
impact all signals (logs/metrics/traces). This guide is intended to help with a
smooth migration for the common use cases of using `opentelemetry`,
`opentelemetry_sdk` `opentelemetry-otlp`, `opentelemetry-appender-tracing`
crates. The detailed changelog for each crate that you use can be consulted for
the full set of changes. This doc covers only the common scenario.
## Tracing Shutdown changes
`opentelemetry::global::shutdown_tracer_provider()` is removed. Now, you should
explicitly call shutdown() on the created tracer provider.
Before (0.27):
```rust
opentelemetry::global::shutdown_tracer_provider();
```
After (0.28):
```rust
let tracer_provider = opentelemetry_sdk::trace::SdkTracerProvider::builder()
.build();
// Clone and set the tracer provider globally. Retain the original to invoke shutdown later.
opentelemetry::global::set_tracer_provider(tracer_provider.clone());
// Shutdown the provider when application is exiting.
tracer_provider.shutdown();
```
This now makes shutdown consistent across signals.
## Rename SDK Structs
`LoggerProvider`, `TracerProvider` are renamed to `SdkLoggerProvider` and
`SdkTracerProvider` respectively. `MeterProvider` was already named
`SdkMeterProvider` and this now ensures consistency across signals.
### Async Runtime Requirements removed
When using OTLP Exporter for Logs, Traces a "batching" exporter is recommended.
Also, metrics always required a component named `PeriodicReader`. These
components previously needed user to pass in an async runtime and enable
appropriate feature flag depending on the runtime.
These components have been re-written to no longer require an async runtime.
Instead they operate by spawning dedicated background thread, and making
blocking calls from the same.
PeriodicReader, BatchSpanProcessor, BatchLogProcessor are the components
affected.
For Logs,Traces replace `.with_batch_exporter(exporter, runtime::Tokio)` with
`.with_batch_exporter(exporter)`.
For Metrics, replace `let reader =
PeriodicReader::builder(exporter, runtime::Tokio).build();` with `let reader =
PeriodicReader::builder(exporter).build();` or more conveniently,
`.with_periodic_exporter(exporter)`.
Please note the following:
* With the new approach, only the following grpc/http clients are supported in
`opentelemetry-otlp`.
`grpc-tonic` (OTLP
Exporter must be created from within a Tokio runtime)
`reqwest-blocking-client`
In other words,
`reqwest` and `hyper` are not supported.
If using exporters other than `opentelemetry-otlp`, consult the docs
for the same to know if there are any restrictions/requirements on async
runtime.
* Timeout enforcement is now moved to Exporters. i.e
BatchProcessor,PeriodicReader does not enforce timeouts. For logs and traces,
`max_export_timeout` (on Processors) or `OTEL_BLRP_EXPORT_TIMEOUT` or
`OTEL_BSP_EXPORT_TIMEOUT` is no longer supported. For metrics, `with_timeout` on
PeriodicReader is no longer supported.
`OTEL_EXPORTER_OTLP_TIMEOUT` can be used to setup timeout for OTLP Exporters
via environment variables, or `.with_tonic().with_timeout()` or
`.with_http().with_timeout()` programmatically.
* If you need the old behavior (your application cannot spawn a new thread, or
need to use another networking client etc.) use appropriate feature flag(s) from
below.
“experimental_metrics_periodicreader_with_async_runtime”
"experimental_logs_batch_log_processor_with_async_runtime"
"experimental_trace_batch_span_processor_with_async_runtime"
**and** adjust the namespace:
Example, when using Tokio runtime.
```rust
let reader = opentelemetry_sdk::metrics::periodic_reader_with_async_runtime::PeriodicReader::builder(exporter, runtime::Tokio).build();
let tracer_provider = SdkTracerProvider::builder()
.with_span_processor(span_processor_with_async_runtime::BatchSpanProcessor::builder(exporter, runtime::Tokio).build())
.build();
let logger_provider = SdkLoggerProvider::builder()
.with_log_processor(log_processor_with_async_runtime::BatchLogProcessor::builder(exporter, runtime::Tokio).build())
.build();
```
## OTLP Default change
"grpc-tonic" feature flag is no longer enabled by default in
`opentelemetry-otlp`. "http-proto" and "reqwest-blocking-client" features are
added as default, to align with the OTel specification.
## Resource Changes
`Resource` creation is moved to a builder pattern, and `Resource::{new, empty,
from_detectors, new_with_defaults, from_schema_url, merge, default}` are
replaced with `Resource::builder()`.
Before:
```rust
Resource::default().with_attributes([
KeyValue::new("service.name", "test_service"),
KeyValue::new("key", "value"),
]);
```
After:
```rust
Resource::builder()
.with_service_name("test_service")
.with_attribute(KeyValue::new("key", "value"))
.build();
```
## Improved internal logging
OpenTelemetry internally used `tracing` to emit its internal logs. This is under
feature-flag "internal-logs" that is enabled by default in all crates. When
using OTel Logging, care must be taken to avoid OTel's own internal log being
fed back to OTel, creating an circular dependency. This can be achieved via proper
filtering. The OTLP Examples in the repo shows how to achieve this. It also
shows how to send OTel's internal logs to stdout using `tracing::Fmt`.
## Full example
A fully runnable example application using OTLP Exporter is provided in this
repo. Comparing the 0.27 vs 0.28 of the example would give a good overview of
the changes required to be made.
[Basic OTLP Example
(0.27)](https://github.com/open-telemetry/opentelemetry-rust/tree/opentelemetry-otlp-0.27.0/opentelemetry-otlp/examples)
[Basic OTLP Example
(0.28)](https://github.com/open-telemetry/opentelemetry-rust/tree/opentelemetry-otlp-0.28.0/opentelemetry-otlp/examples)
This guide covers only the most common breaking changes. If youre using custom
exporters or processors (or authoring one), please consult the changelog for
additional migration details.
## Notes on Breaking Changes and the Path to 1.0
We understand that breaking changes can be challenging, but they are essential
for the growth and stability of the project. With the release of 0.28, the
Metric API (`opentelemetry` crate, "metrics" feature flag) and LogBridge API
(`opentelemetry` crate, "logs" feature flag) are now stable, and we do not
anticipate further breaking changes for these components.
Moreover, the `opentelemetry_sdk` crate for "logs" and "metrics" will have a
very high bar for any future breaking changes. Any changes are expected to
primarily impact those developing custom components, such as custom exporters.
In the upcoming releases, we aim to bring the "traces" feature to the same level
of stability as "logs" and "metrics". Additionally, "opentelemetry-otlp", the
official exporter, will also receive stability guarantees.
We are excited to announce that a 1.0 release, encompassing logs, metrics, and
traces, is planned for June 2025. We appreciate your patience and support as we
work towards this milestone. The 1.0 release will cover the API
(`opentelemetry`), SDK (`opentelemetry_sdk`), OTLP Exporter
(`opentelemetry-otlp`), and Tracing-Bridge (`opentelemetry-appender-tracing`).
We encourage you to share your feedback via GitHub issues or the OTel-Rust Slack
channel [here](https://cloud-native.slack.com/archives/C03GDP0H023).

92
docs/migration_0.29.md Normal file
View File

@ -0,0 +1,92 @@
# Migration Guide from 0.28 to 0.29
OpenTelemetry Rust 0.29 introduces a few breaking changes. This guide aims to
facilitate a smooth migration for common use cases involving the
`opentelemetry`, `opentelemetry_sdk`, `opentelemetry-otlp`, and
`opentelemetry-appender-tracing` crates. For a comprehensive list of changes,
please refer to the detailed changelog for each crate. This document covers only
the most common scenarios. Note that changes that only affect custom
exporter/processor authors are not mentioned in this doc.
OpenTelemetry Metrics API and Log-Bridge API were declared stable in 0.28, and have
no breaking changes.
## Baggage Changes
The Baggage API has been redesigned to align with the OpenTelemetry
specification. While the core API for interacting with Baggage remains the same,
the accepted data types have changed. Baggage Keys now only allow strings (ASCII
printable characters), and Baggage values are restricted to strings.
For detailed changes, see the [changelog](../opentelemetry/CHANGELOG.md). With
version 0.29, the Baggage API has reached "Release Candidate" status, meaning
further breaking changes will be highly restricted.
## Appender-Tracing Changes
The `opentelemetry-appender-tracing` crate, which bridges `tracing` events to
OpenTelemetry logs, has been updated to properly map `tracing` data types to the
OpenTelemetry model. As of version 0.29, this crate is considered "Stable," and
no further breaking changes will be made without a major version bump.
## Configuration via Environment Variables
The 0.29 release aligns OpenTelemetry Rust with the rest of the OpenTelemetry
ecosystem by treating any code-based configuration as final (i.e., it cannot be
overridden by environment variables). This policy was partially true before but
is now applied consistently. If you prefer to configure your application via
environment variables, avoid configuring it programmatically.
## Discontinuing Dedicated Prometheus Exporter
The `opentelemetry-prometheus` crate will be discontinued with the 0.29 release.
Active development on this crate ceased a few months ago. Given that Prometheus
now natively supports OTLP, and considering that the OpenTelemetry Rust project
is still working towards a 1.0 release, we need to focus on essential components
to maintain scope and ensure timely delivery.
Prometheus interoperability remains a key goal for OpenTelemetry. However, the
current `opentelemetry-prometheus` crate requires a major rewrite to eliminate
dependencies on unmaintained crates. We may reintroduce a dedicated Prometheus
exporter in the future once these issues are resolved.
### Migration Guide
For those using Prometheus as a backend, you can integrate with Prometheus using
the following methods:
1. Use the OTLP Exporter to push metrics directly to Prometheus.
2. If you require a pull (scrape) model, push metrics to an OpenTelemetry
Collector using the OTLP Exporter, and configure Prometheus to scrape the
OpenTelemetry Collector.
These alternatives ensure continued Prometheus integration while allowing us to
focus on achieving a stable 1.0 release for OpenTelemetry Rust.
## Next Release
In the [next
release](https://github.com/open-telemetry/opentelemetry-rust/milestone/21), we
expect to stabilize the Metrics SDK and resolve the long-standing question of
`tokio-tracing` vs. `opentelemetry tracing`, which is a prerequisite before
stabilizing Distributed Tracing. Additionally, `Context` is also expected to be
enhanced with the ability to suppress telemetry-induced-telemetry.
## Instrumentation Libraries
Unlike other OpenTelemetry language implementations, OpenTelemetry Rust historically did not
maintain any instrumentations directly. This has recently changed with a
[contribution](https://github.com/open-telemetry/opentelemetry-rust-contrib/pull/202)
from one of the founding members of the OpenTelemetry Rust project to the
contrib repository, providing an instrumentation library for
[`actix-web`](https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-instrumentation-actix-web).
We expect that this instrumentation will serve as a reference implementation demonstrating best practices for
creating OpenTelemetry instrumentations in Rust.
We welcome additional contributions of instrumentation libraries to the contrib repository.
## Thanks
Thank you to everyone who contributed to this milestone. Please share your feedback
through GitHub issues or join the discussion in the OTel-Rust Slack channel
[here](https://cloud-native.slack.com/archives/C03GDP0H023).

68
docs/release_0.30.md Normal file
View File

@ -0,0 +1,68 @@
# Release Notes 0.30
OpenTelemetry Rust 0.30 introduces a few breaking changes to the
`opentelemetry_sdk` crate in the `metrics` feature. These changes were essential
to drive the Metrics SDK towards stability. With this release, the Metrics SDK
is officially declared stable. The Metrics API was declared stable last year,
and previously, the Logs API, SDK, and OTel-Appender-Tracing were also marked
stable. Importantly, no breaking changes have been introduced to components
already marked as stable.
It is worth noting that the `opentelemetry-otlp` crate remains in a
Release-Candidate state and is not yet considered stable. With the API and SDK
for Logs and Metrics now stable, the focus will shift towards further refining
and stabilizing the OTLP Exporters in upcoming releases. Additionally,
Distributed Tracing is expected to progress towards stability, addressing key
interoperability challenges.
For detailed changelogs of individual crates, please refer to their respective
changelog files. This document serves as a summary of the main changes.
## Key Changes
### Metrics SDK Improvements
1. **Stabilized "view" features**: Previously under an experimental feature
flag, views can now be used to modify the name, unit, description, and
cardinality limit of a metric. Advanced view capabilities, such as changing
aggregation or dropping attributes, remain under the experimental feature
flag.
2. **Cardinality capping**: Introduced the ability to cap cardinality and
configure limits using views.
3. **Polished public API**: Refined the public API to hide implementation
details from exporters, enabling future internal optimizations and ensuring
consistency. Some APIs related to authoring custom metric readers have been
moved behind experimental feature flags. These advanced use cases require
more time to finalize the API surface before being included in the stable
release.
### Context-Based Suppression
Added the ability to suppress telemetry based on Context. This feature prevents
telemetry-induced-telemetry scenarios and addresses a long-standing issue. Note
that suppression relies on proper context propagation. Certain libraries used in
OTLP Exporters utilize `tracing` but do not adopt OpenTelemetry's context
propagation. As a result, not all telemetry is automatically suppressed with
this feature. Improvements in this area are expected in future releases.
## Next Release
In the [next
release](https://github.com/open-telemetry/opentelemetry-rust/milestone/22), the
focus will shift to OTLP Exporters and Distributed Tracing, specifically
resolving
[interoperability](https://github.com/open-telemetry/opentelemetry-rust/issues/2420)
issues with `tokio-tracing` and other fixes required to drive Distributed
Tracing towards stability.
## Acknowledgments
Thank you to everyone who contributed to this milestone. We welcome your
feedback through GitHub issues or discussions in the OTel-Rust Slack channel
[here](https://cloud-native.slack.com/archives/C03GDP0H023).
We are also excited to announce that [Anton Grübel](https://github.com/gruebel)
and [Björn Antonsson](https://github.com/bantonsson) have joined the OTel Rust
project as Approvers.

View File

@ -7,7 +7,7 @@ This directory contains some examples that should help you get start crates from
This example uses following crates from this repo:
- opentelemetry(log)
- opentelemetry-appender-log
- opentelemetry-appender-tracing
- opentelemetry-stdout
Check this example if you want to understand *how to instrument logs using opentelemetry*.

View File

@ -3,13 +3,18 @@ name = "logs-basic"
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
rust-version = "1.75.0"
publish = false
autobenches = false
[[bin]]
name = "logs-basic"
path = "src/main.rs"
bench = false
[dependencies]
opentelemetry = { path = "../../opentelemetry", features = ["logs"] }
opentelemetry_sdk = { path = "../../opentelemetry-sdk", features = ["logs"] }
opentelemetry-stdout = { path = "../../opentelemetry-stdout", features = ["logs"]}
opentelemetry-appender-log = { path = "../../opentelemetry-appender-log", default-features = false}
opentelemetry-semantic-conventions = { path = "../../opentelemetry-semantic-conventions" }
log = { workspace = true }
serde_json = { workspace = true }
opentelemetry-stdout = { workspace = true, features = ["logs"] }
opentelemetry-appender-tracing = { workspace = true }
tracing = { workspace = true, features = ["std"]}
tracing-subscriber = { workspace = true, features = ["env-filter","registry", "std", "fmt"] }

View File

@ -1,15 +1,15 @@
# OpenTelemetry Log Appender for log - Example
# OpenTelemetry Log Appender for tracing - Example
This example shows how to use the opentelemetry-appender-log crate, which is a
This example shows how to use the opentelemetry-appender-tracing crate, which is a
[logging
appender](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/glossary.md#log-appender--bridge)
that bridges logs from the [log crate](https://docs.rs/log/latest/log/) to
that bridges logs from the [tracing crate](https://tracing.rs/tracing/#events) to
OpenTelemetry. The example setups a LoggerProvider with stdout exporter, so logs
are emitted to stdout.
## Usage
Run the following, and Logs emitted using [log](https://docs.rs/log/latest/log/)
Run the following, and Logs emitted using [tracing](https://docs.rs/tracing/latest/tracing/)
will be written out to stdout.
```shell

View File

@ -1,27 +1,53 @@
use log::{error, Level};
use opentelemetry::KeyValue;
use opentelemetry_appender_log::OpenTelemetryLogBridge;
use opentelemetry_sdk::logs::LoggerProvider;
use opentelemetry_appender_tracing::layer;
use opentelemetry_sdk::logs::SdkLoggerProvider;
use opentelemetry_sdk::Resource;
use opentelemetry_semantic_conventions::resource::SERVICE_NAME;
use tracing::error;
use tracing_subscriber::{prelude::*, EnvFilter};
fn main() {
// Setup LoggerProvider with a stdout exporter
let exporter = opentelemetry_stdout::LogExporter::default();
let logger_provider = LoggerProvider::builder()
.with_resource(Resource::new([KeyValue::new(
SERVICE_NAME,
"logs-basic-example",
)]))
let provider: SdkLoggerProvider = SdkLoggerProvider::builder()
.with_resource(
Resource::builder()
.with_service_name("log-appender-tracing-example")
.build(),
)
.with_simple_exporter(exporter)
.build();
// Setup Log Appender for the log crate.
let otel_log_appender = OpenTelemetryLogBridge::new(&logger_provider);
log::set_boxed_logger(Box::new(otel_log_appender)).unwrap();
log::set_max_level(Level::Error.to_level_filter());
// To prevent a telemetry-induced-telemetry loop, OpenTelemetry's own internal
// logging is properly suppressed. However, logs emitted by external components
// (such as reqwest, tonic, etc.) are not suppressed as they do not propagate
// OpenTelemetry context. Until this issue is addressed
// (https://github.com/open-telemetry/opentelemetry-rust/issues/2877),
// filtering like this is the best way to suppress such logs.
//
// The filter levels are set as follows:
// - Allow `info` level and above by default.
// - Completely restrict logs from `hyper`, `tonic`, `h2`, and `reqwest`.
//
// Note: This filtering will also drop logs from these components even when
// they are used outside of the OTLP Exporter.
let filter_otel = EnvFilter::new("info")
.add_directive("hyper=off".parse().unwrap())
.add_directive("tonic=off".parse().unwrap())
.add_directive("h2=off".parse().unwrap())
.add_directive("reqwest=off".parse().unwrap());
let otel_layer = layer::OpenTelemetryTracingBridge::new(&provider).with_filter(filter_otel);
// Emit logs using macros from the log crate.
// These logs gets piped through OpenTelemetry bridge and gets exported to stdout.
error!(target: "my-target", "hello from {}. My price is {}", "apple", 2.99);
// Create a new tracing::Fmt layer to print the logs to stdout. It has a
// default filter of `info` level and above, and `debug` and above for logs
// from OpenTelemetry crates. The filter levels can be customized as needed.
let filter_fmt = EnvFilter::new("info").add_directive("opentelemetry=debug".parse().unwrap());
let fmt_layer = tracing_subscriber::fmt::layer()
.with_thread_names(true)
.with_filter(filter_fmt);
tracing_subscriber::registry()
.with(otel_layer)
.with(fmt_layer)
.init();
error!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io", message = "This is an example message");
let _ = provider.shutdown();
}

View File

@ -3,11 +3,17 @@ name = "metrics-advanced"
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
rust-version = "1.75.0"
publish = false
autobenches = false
[[bin]]
name = "metrics-advanced"
path = "src/main.rs"
bench = false
[dependencies]
opentelemetry = { path = "../../opentelemetry", features = ["metrics"] }
opentelemetry_sdk = { path = "../../opentelemetry-sdk", features = ["spec_unstable_metrics_views", "rt-tokio"] }
opentelemetry-stdout = { path = "../../opentelemetry-stdout", features = ["metrics"] }
opentelemetry_sdk = { path = "../../opentelemetry-sdk" }
opentelemetry-stdout = { workspace = true, features = ["metrics"] }
tokio = { workspace = true, features = ["full"] }
serde_json = { workspace = true }

View File

@ -12,6 +12,3 @@ Run the following, and the Metrics will be written out to stdout.
```shell
$ cargo run
```

View File

@ -1,20 +1,19 @@
use opentelemetry::global;
use opentelemetry::Key;
use opentelemetry::KeyValue;
use opentelemetry_sdk::metrics::{
Aggregation, Instrument, PeriodicReader, SdkMeterProvider, Stream, Temporality,
};
use opentelemetry_sdk::{runtime, Resource};
use opentelemetry_sdk::metrics::{Instrument, SdkMeterProvider, Stream, Temporality};
use opentelemetry_sdk::Resource;
use std::error::Error;
fn init_meter_provider() -> opentelemetry_sdk::metrics::SdkMeterProvider {
// for example 1
let my_view_rename_and_unit = |i: &Instrument| {
if i.name == "my_histogram" {
if i.name() == "my_histogram" {
Some(
Stream::new()
.name("my_histogram_renamed")
.unit("milliseconds"),
Stream::builder()
.with_name("my_histogram_renamed")
.with_unit("milliseconds")
.build()
.unwrap(),
)
} else {
None
@ -22,23 +21,13 @@ fn init_meter_provider() -> opentelemetry_sdk::metrics::SdkMeterProvider {
};
// for example 2
let my_view_drop_attributes = |i: &Instrument| {
if i.name == "my_counter" {
Some(Stream::new().allowed_attribute_keys(vec![Key::from("mykey1")]))
} else {
None
}
};
// for example 3
let my_view_change_aggregation = |i: &Instrument| {
if i.name == "my_second_histogram" {
Some(
Stream::new().aggregation(Aggregation::ExplicitBucketHistogram {
boundaries: vec![0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5],
record_min_max: false,
}),
)
let my_view_change_cardinality = |i: &Instrument| {
if i.name() == "my_second_histogram" {
// Note: If Stream is invalid, build() will return an error. By
// calling `.ok()`, any such error is ignored and treated as if the
// view does not match the instrument. If this is not the desired
// behavior, consider handling the error explicitly.
Stream::builder().with_cardinality_limit(2).build().ok()
} else {
None
}
@ -49,16 +38,15 @@ fn init_meter_provider() -> opentelemetry_sdk::metrics::SdkMeterProvider {
.with_temporality(Temporality::Delta)
.build();
let reader = PeriodicReader::builder(exporter, runtime::Tokio).build();
let resource = Resource::builder()
.with_service_name("metrics-advanced-example")
.build();
let provider = SdkMeterProvider::builder()
.with_reader(reader)
.with_resource(Resource::new([KeyValue::new(
"service.name",
"metrics-advanced-example",
)]))
.with_periodic_exporter(exporter)
.with_resource(resource)
.with_view(my_view_rename_and_unit)
.with_view(my_view_drop_attributes)
.with_view(my_view_change_aggregation)
.with_view(my_view_change_cardinality)
.build();
global::set_meter_provider(provider.clone());
provider
@ -90,69 +78,43 @@ async fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
],
);
// Example 2 - Drop unwanted attributes using view.
let counter = meter.u64_counter("my_counter").build();
// Record measurements using the Counter instrument.
// Though we are passing 4 attributes here, only 1 will be used
// for aggregation as view is configured to use only "mykey1"
// attribute.
counter.add(
10,
&[
KeyValue::new("mykey1", "myvalue1"),
KeyValue::new("mykey2", "myvalue2"),
KeyValue::new("mykey3", "myvalue3"),
KeyValue::new("mykey4", "myvalue4"),
],
);
// Example 3 - Change Aggregation configuration using View.
// Histograms are by default aggregated using ExplicitBucketHistogram
// with default buckets. The configured view will change the aggregation to
// use a custom set of boundaries, and min/max values will not be recorded.
// Example 2 - Change cardinality using View.
let histogram2 = meter
.f64_histogram("my_second_histogram")
.with_unit("ms")
.with_description("My histogram example description")
.build();
// Record measurements using the histogram instrument.
// The values recorded are in the range of 1.2 to 1.5, warranting
// the change of boundaries.
histogram2.record(
1.5,
&[
KeyValue::new("mykey1", "myvalue1"),
KeyValue::new("mykey2", "myvalue2"),
KeyValue::new("mykey3", "myvalue3"),
KeyValue::new("mykey4", "myvalue4"),
],
);
// Record measurements using the histogram instrument. This metric will have
// a cardinality limit of 2, as set in the view. Because of this, only the
// first two distinct attribute combinations will be recorded, and the rest
// will be folded into the overflow attribute. Any number of measurements
// can be recorded as long as they use the same or already-seen attribute
// combinations.
histogram2.record(1.5, &[KeyValue::new("mykey1", "v1")]);
histogram2.record(1.2, &[KeyValue::new("mykey1", "v2")]);
histogram2.record(
1.2,
&[
KeyValue::new("mykey1", "myvalue1"),
KeyValue::new("mykey2", "myvalue2"),
KeyValue::new("mykey3", "myvalue3"),
KeyValue::new("mykey4", "myvalue4"),
],
);
// Repeatedly emitting measurements for "v1" and "v2" will not
// trigger overflow, as they are already seen attribute combinations.
histogram2.record(1.7, &[KeyValue::new("mykey1", "v1")]);
histogram2.record(1.8, &[KeyValue::new("mykey1", "v2")]);
histogram2.record(
1.23,
&[
KeyValue::new("mykey1", "myvalue1"),
KeyValue::new("mykey2", "myvalue2"),
KeyValue::new("mykey3", "myvalue3"),
KeyValue::new("mykey4", "myvalue4"),
],
);
// Emitting measurements for new attribute combinations will trigger
// overflow, as the cardinality limit of 2 has been reached.
// All the below measurements will be folded into the overflow attribute.
histogram2.record(1.23, &[KeyValue::new("mykey1", "v3")]);
// Metrics are exported by default every 30 seconds when using stdout exporter,
histogram2.record(1.4, &[KeyValue::new("mykey1", "v4")]);
histogram2.record(1.6, &[KeyValue::new("mykey1", "v5")]);
histogram2.record(1.7, &[KeyValue::new("mykey1", "v6")]);
histogram2.record(1.8, &[KeyValue::new("mykey1", "v7")]);
// Metrics are exported by default every 60 seconds when using stdout exporter,
// however shutting down the MeterProvider here instantly flushes
// the metrics, instead of waiting for the 30 sec interval.
// the metrics, instead of waiting for the 60 sec interval.
meter_provider.shutdown()?;
Ok(())
}

View File

@ -3,15 +3,18 @@ name = "metrics-basic"
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
rust-version = "1.75.0"
publish = false
autobenches = false
[[bin]]
name = "metrics-basic"
path = "src/main.rs"
bench = false
[dependencies]
opentelemetry = { path = "../../opentelemetry", features = ["metrics", "otel_unstable"] }
opentelemetry_sdk = { path = "../../opentelemetry-sdk", features = ["metrics", "rt-tokio"] }
opentelemetry-stdout = { path = "../../opentelemetry-stdout", features = ["metrics"]}
opentelemetry = { path = "../../opentelemetry", features = ["metrics"] }
opentelemetry_sdk = { path = "../../opentelemetry-sdk", features = ["metrics"] }
opentelemetry-stdout = { workspace = true, features = ["metrics"] }
tokio = { workspace = true, features = ["full"] }
serde_json = { workspace = true }
[features]
default = ["otel_unstable"]
otel_unstable = ["opentelemetry/otel_unstable"]

View File

@ -11,6 +11,3 @@ Run the following, and the Metrics will be written out to stdout.
```shell
$ cargo run
```

View File

@ -1,7 +1,6 @@
use opentelemetry::global;
use opentelemetry::KeyValue;
use opentelemetry_sdk::metrics::{PeriodicReader, SdkMeterProvider};
use opentelemetry_sdk::{runtime, Resource};
use opentelemetry::{global, KeyValue};
use opentelemetry_sdk::metrics::SdkMeterProvider;
use opentelemetry_sdk::Resource;
use std::error::Error;
use std::vec;
@ -10,20 +9,20 @@ fn init_meter_provider() -> opentelemetry_sdk::metrics::SdkMeterProvider {
// Build exporter using Delta Temporality (Defaults to Temporality::Cumulative)
// .with_temporality(opentelemetry_sdk::metrics::Temporality::Delta)
.build();
let reader = PeriodicReader::builder(exporter, runtime::Tokio).build();
let provider = SdkMeterProvider::builder()
.with_reader(reader)
.with_resource(Resource::new([KeyValue::new(
"service.name",
"metrics-basic-example",
)]))
.with_periodic_exporter(exporter)
.with_resource(
Resource::builder()
.with_service_name("metrics-basic-example")
.build(),
)
.build();
global::set_meter_provider(provider.clone());
provider
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
async fn main() -> Result<(), Box<dyn Error>> {
// Initialize the MeterProvider with the stdout Exporter.
let meter_provider = init_meter_provider();
@ -137,9 +136,41 @@ async fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
})
.build();
// Metrics are exported by default every 30 seconds when using stdout exporter,
// however shutting down the MeterProvider here instantly flushes
// the metrics, instead of waiting for the 30 sec interval.
// Metrics are exported by default every 60 seconds when using stdout
// exporter, however shutting down the MeterProvider here instantly flushes
// the metrics, instead of waiting for the 60 sec interval. Shutdown returns
// a result, which is bubbled up to the caller The commented code below
// demonstrates handling the shutdown result, instead of bubbling up the
// error.
meter_provider.shutdown()?;
// let shutdown_result = meter_provider.shutdown();
// Handle the shutdown result.
// match shutdown_result {
// Ok(_) => println!("MeterProvider shutdown successfully"),
// Err(e) => {
// match e {
// opentelemetry_sdk::error::ShutdownError::InternalFailure(message) => {
// // This indicates some internal failure during shutdown. The
// // error message is intended for logging purposes only and
// // should not be used to make programmatic decisions.
// println!("MeterProvider shutdown failed: {}", message)
// }
// opentelemetry_sdk::error::ShutdownError::AlreadyShutdown => {
// // This indicates some user code tried to shutdown
// // elsewhere. user need to review their code to ensure
// // shutdown is called only once.
// println!("MeterProvider already shutdown")
// }
// opentelemetry_sdk::error::ShutdownError::Timeout(e) => {
// // This indicates the shutdown timed out, and a good hint to
// // user to increase the timeout. (Shutdown method does not
// // allow custom timeout today, but that is temporary)
// println!("MeterProvider shutdown timed out after {:?}", e)
// }
// }
// }
// }
Ok(())
}

View File

@ -1,15 +0,0 @@
[package]
name = "self-diagnostics"
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
publish = false
[dependencies]
opentelemetry = { path = "../../opentelemetry" }
opentelemetry_sdk = { path = "../../opentelemetry-sdk", features = ["rt-tokio"]}
opentelemetry-stdout = { path = "../../opentelemetry-stdout"}
tokio = { workspace = true, features = ["full"] }
tracing = { workspace = true, features = ["std"]}
tracing-core = { workspace = true }
tracing-subscriber = { version = "0.3.18", features = ["env-filter","registry", "std"]}

View File

@ -1,28 +0,0 @@
# Basic OpenTelemetry metrics example with custom error handler:
This example shows how to self-diagnose OpenTelemetry by enabling its internal
logs. OpenTelemetry crates publish internal logs when "internal-logs" feature is
enabled. This feature is enabled by default. Internal logs are published using
`tracing` events, and hence, a `tracing` subscriber must be configured without
which the logs are simply discarded.
## Filtering logs from external dependencies of OTLP Exporter:
The example configures a tracing `filter` to restrict logs from external crates
(`hyper`, `tonic`, and `reqwest` etc.) used by the OTLP Exporter to the `error`
level. This helps prevent an infinite loop of log generation when these crates
emit logs that are picked up by the tracing subscriber. This is only a
workaround until [the root
issue](https://github.com/open-telemetry/opentelemetry-rust/issues/761) is
resolved.
## Filtering logs to be send to OpenTelemetry itself
If you use [OpenTelemetry Tracing
Appender](../../opentelemetry-appender-tracing/README.md) to send `tracing` logs
to OpenTelemetry, then enabling OpenTelemetry internal logs can also cause
infinite, recursive logging. You can filter out all OpenTelemetry internal logs
from being sent to [OpenTelemetry Tracing
Appender](../../opentelemetry-appender-tracing/README.md) using a filter, like
"add_directive("opentelemetry=off".parse().unwrap())" being done for tracing's
`FmtSubscriber`.

View File

@ -1,65 +0,0 @@
use opentelemetry::global;
use opentelemetry::KeyValue;
use opentelemetry_sdk::metrics::PeriodicReader;
use std::error::Error;
use tracing::info;
use tracing_subscriber::fmt;
use tracing_subscriber::prelude::*;
use tracing_subscriber::EnvFilter;
fn init_meter_provider() -> opentelemetry_sdk::metrics::SdkMeterProvider {
let exporter = opentelemetry_stdout::MetricExporterBuilder::default().build();
let reader = PeriodicReader::builder(exporter, opentelemetry_sdk::runtime::Tokio).build();
let provider = opentelemetry_sdk::metrics::SdkMeterProvider::builder()
.with_reader(reader)
.build();
let cloned_provider = provider.clone();
global::set_meter_provider(cloned_provider);
provider
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
// OpenTelemetry uses `tracing` crate for its internal logging. Unless a
// tracing subscriber is set, the logs will be discarded. In this example,
// we configure a `tracing` subscriber to:
// 1. Print logs of level INFO or higher to stdout.
// 2. Filter logs from OpenTelemetry's dependencies (like tonic, hyper,
// reqwest etc. which are commonly used by the OTLP exporter) to only print
// ERROR-level logs. This filtering helps reduce repetitive log messages
// that could otherwise create an infinite loop of log output. This is a
// workaround until
// https://github.com/open-telemetry/opentelemetry-rust/issues/761 is
// resolved.
// Target name used by OpenTelemetry always start with "opentelemetry".
// Hence, one may use "add_directive("opentelemetry=off".parse().unwrap())"
// to turn off all logs from OpenTelemetry.
let filter = EnvFilter::new("info")
.add_directive("hyper=error".parse().unwrap())
.add_directive("tonic=error".parse().unwrap())
.add_directive("h2=error".parse().unwrap())
.add_directive("tower=error".parse().unwrap())
.add_directive("reqwest=error".parse().unwrap());
tracing_subscriber::registry()
.with(fmt::layer().with_thread_names(true).with_filter(filter))
.init();
// Initialize the MeterProvider with the stdout Exporter.
let meter_provider = init_meter_provider();
info!("Starting self-diagnostics example");
let meter = global::meter("example");
// Create a counter using an invalid name to trigger
// internal log about the same.
let counter = meter.u64_counter("my_counter with_space").build();
counter.add(10, &[KeyValue::new("key", "value")]);
meter_provider.shutdown()?;
info!("Shutdown complete. Bye!");
Ok(())
}

View File

@ -3,24 +3,32 @@ name = "tracing-grpc"
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
rust-version = "1.75.0"
publish = false
autobenches = false
[[bin]] # Bin to run the gRPC server
name = "grpc-server"
path = "src/server.rs"
bench = false
[[bin]] # Bin to run the gRPC client
name = "grpc-client"
path = "src/client.rs"
bench = false
[dependencies]
opentelemetry = { path = "../../opentelemetry" }
opentelemetry_sdk = { path = "../../opentelemetry-sdk", features = ["rt-tokio"] }
opentelemetry-stdout = { path = "../../opentelemetry-stdout", features = ["trace"] }
opentelemetry-stdout = { workspace = true, features = ["trace"] }
prost = { workspace = true }
tokio = { workspace = true, features = ["full"] }
tonic = { workspace = true }
serde_json = { workspace = true }
tonic = { workspace = true, features = ["server", "codegen", "channel", "prost", "router"] }
[build-dependencies]
tonic-build = { workspace = true }
[package.metadata.cargo-machete]
ignored = [
"prost" # needed for `tonic-build`
]

View File

@ -1,9 +1,7 @@
use hello_world::greeter_client::GreeterClient;
use hello_world::HelloRequest;
use opentelemetry::{global, propagation::Injector};
use opentelemetry_sdk::{
propagation::TraceContextPropagator, runtime::Tokio, trace::TracerProvider,
};
use opentelemetry_sdk::{propagation::TraceContextPropagator, trace as sdktrace};
use opentelemetry_stdout::SpanExporter;
use opentelemetry::{
@ -11,19 +9,20 @@ use opentelemetry::{
Context, KeyValue,
};
fn init_tracer() {
fn init_tracer() -> sdktrace::SdkTracerProvider {
global::set_text_map_propagator(TraceContextPropagator::new());
// Install stdout exporter pipeline to be able to retrieve the collected spans.
let provider = TracerProvider::builder()
.with_batch_exporter(SpanExporter::default(), Tokio)
let provider = sdktrace::SdkTracerProvider::builder()
.with_simple_exporter(SpanExporter::default())
.build();
global::set_tracer_provider(provider);
global::set_tracer_provider(provider.clone());
provider
}
struct MetadataMap<'a>(&'a mut tonic::metadata::MetadataMap);
impl<'a> Injector for MetadataMap<'a> {
impl Injector for MetadataMap<'_> {
/// Set a key and value in the MetadataMap. Does nothing if the key or value are not valid inputs
fn set(&mut self, key: &str, value: String) {
if let Ok(key) = tonic::metadata::MetadataKey::from_bytes(key.as_bytes()) {
@ -44,10 +43,14 @@ async fn greet() -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static
let span = tracer
.span_builder("Greeter/client")
.with_kind(SpanKind::Client)
.with_attributes([KeyValue::new("component", "grpc")])
.with_attributes([
KeyValue::new("rpc.system", "grpc"),
KeyValue::new("server.port", 50052),
KeyValue::new("rpc.method", "say_hello"),
])
.start(&tracer);
let cx = Context::current_with_span(span);
let mut client = GreeterClient::connect("http://[::1]:50051").await?;
let mut client = GreeterClient::connect("http://[::1]:50052").await?;
let mut request = tonic::Request::new(HelloRequest {
name: "Tonic".into(),
@ -59,25 +62,33 @@ async fn greet() -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static
let response = client.say_hello(request).await;
let span = cx.span();
let status = match response {
Ok(_res) => "OK".to_string(),
Ok(_res) => {
span.set_attribute(KeyValue::new("response", "OK"));
"OK".to_string()
}
Err(status) => {
// Access the status code
let status_code = status.code();
span.set_attribute(KeyValue::new(
"response_code_desc",
status_code.description(),
));
status_code.to_string()
}
};
cx.span()
.add_event("Got response!", vec![KeyValue::new("status", status)]);
span.add_event("Got response!", vec![KeyValue::new("status", status)]);
Ok(())
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
init_tracer();
let provider = init_tracer();
greet().await?;
opentelemetry::global::shutdown_tracer_provider();
provider.shutdown()?;
Ok(())
}

View File

@ -1,24 +1,24 @@
use hello_world::greeter_server::{Greeter, GreeterServer};
use hello_world::{HelloReply, HelloRequest};
use opentelemetry::KeyValue;
use opentelemetry::{
global,
propagation::Extractor,
trace::{Span, SpanKind, Tracer},
};
use opentelemetry_sdk::{
propagation::TraceContextPropagator, runtime::Tokio, trace::TracerProvider,
};
use opentelemetry_sdk::{propagation::TraceContextPropagator, trace::SdkTracerProvider};
use opentelemetry_stdout::SpanExporter;
use tonic::{transport::Server, Request, Response, Status};
fn init_tracer() {
fn init_tracer() -> SdkTracerProvider {
global::set_text_map_propagator(TraceContextPropagator::new());
// Install stdout exporter pipeline to be able to retrieve the collected spans.
let provider = TracerProvider::builder()
.with_batch_exporter(SpanExporter::default(), Tokio)
let provider = SdkTracerProvider::builder()
.with_simple_exporter(SpanExporter::default())
.build();
global::set_tracer_provider(provider);
global::set_tracer_provider(provider.clone());
provider
}
#[allow(clippy::derive_partial_eq_without_eq)] // tonic don't derive Eq for generated types. We shouldn't manually change it.
@ -28,7 +28,7 @@ pub mod hello_world {
struct MetadataMap<'a>(&'a tonic::metadata::MetadataMap);
impl<'a> Extractor for MetadataMap<'a> {
impl Extractor for MetadataMap<'_> {
/// Get a value for a key from the MetadataMap. If the value can't be converted to &str, returns None
fn get(&self, key: &str) -> Option<&str> {
self.0.get(key).and_then(|metadata| metadata.to_str().ok())
@ -66,6 +66,11 @@ impl Greeter for MyGreeter {
let mut span = tracer
.span_builder("Greeter/server")
.with_kind(SpanKind::Server)
.with_attributes([
KeyValue::new("rpc.system", "grpc"),
KeyValue::new("server.port", 50052),
KeyValue::new("rpc.method", "say_hello"),
])
.start_with_context(&tracer, &parent_cx);
let name = request.into_inner().name;
@ -82,9 +87,9 @@ impl Greeter for MyGreeter {
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
init_tracer();
let provider = init_tracer();
let addr = "[::1]:50051".parse()?;
let addr = "[::1]:50052".parse()?;
let greeter = MyGreeter::default();
Server::builder()
@ -92,7 +97,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>
.serve(addr)
.await?;
opentelemetry::global::shutdown_tracer_provider();
provider.shutdown()?;
Ok(())
}

View File

@ -3,17 +3,21 @@ name = "tracing-http-propagator"
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
rust-version = "1.75.0"
publish = false
autobenches = false
[[bin]] # Bin to run the http server
name = "http-server"
path = "src/server.rs"
doc = false
bench = false
[[bin]] # Bin to run the client
name = "http-client"
path = "src/client.rs"
doc = false
bench = false
[dependencies]
http-body-util = { workspace = true }
@ -23,5 +27,8 @@ tokio = { workspace = true, features = ["full"] }
opentelemetry = { path = "../../opentelemetry" }
opentelemetry_sdk = { path = "../../opentelemetry-sdk" }
opentelemetry-http = { path = "../../opentelemetry-http" }
opentelemetry-stdout = { path = "../../opentelemetry-stdout", features = ["trace"] }
opentelemetry-stdout = { workspace = true, features = ["trace", "logs"] }
opentelemetry-semantic-conventions = { path = "../../opentelemetry-semantic-conventions" }
opentelemetry-appender-tracing = { workspace = true }
tracing = { workspace = true, features = ["std"]}
tracing-subscriber = { workspace = true, features = ["env-filter","registry", "std", "fmt"] }

View File

@ -3,21 +3,42 @@ use hyper_util::{client::legacy::Client, rt::TokioExecutor};
use opentelemetry::{
global,
trace::{SpanKind, TraceContextExt, Tracer},
Context, KeyValue,
Context,
};
use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge;
use opentelemetry_http::{Bytes, HeaderInjector};
use opentelemetry_sdk::{propagation::TraceContextPropagator, trace::TracerProvider};
use opentelemetry_stdout::SpanExporter;
use opentelemetry_sdk::{
logs::SdkLoggerProvider, propagation::TraceContextPropagator, trace::SdkTracerProvider,
};
use opentelemetry_stdout::{LogExporter, SpanExporter};
use tracing::info;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
fn init_tracer() {
fn init_tracer() -> SdkTracerProvider {
global::set_text_map_propagator(TraceContextPropagator::new());
// Install stdout exporter pipeline to be able to retrieve the collected spans.
// For the demonstration, use `Sampler::AlwaysOn` sampler to sample all traces.
let provider = TracerProvider::builder()
let provider = SdkTracerProvider::builder()
.with_simple_exporter(SpanExporter::default())
.build();
global::set_tracer_provider(provider);
global::set_tracer_provider(provider.clone());
provider
}
fn init_logs() -> SdkLoggerProvider {
// Setup tracerprovider with stdout exporter
// that prints the spans to stdout.
let logger_provider = SdkLoggerProvider::builder()
.with_simple_exporter(LogExporter::default())
.build();
let otel_layer = OpenTelemetryTracingBridge::new(&logger_provider);
tracing_subscriber::registry()
.with(otel_layer)
.with(tracing_subscriber::filter::LevelFilter::INFO)
.init();
logger_provider
}
async fn send_request(
@ -37,21 +58,22 @@ async fn send_request(
global::get_text_map_propagator(|propagator| {
propagator.inject_context(&cx, &mut HeaderInjector(req.headers_mut().unwrap()))
});
req.headers_mut()
.unwrap()
.insert("baggage", "is_synthetic=true".parse().unwrap());
let res = client
.request(req.body(Full::new(Bytes::from(body_content.to_string())))?)
.await?;
cx.span().add_event(
"Got response!",
vec![KeyValue::new("status", res.status().to_string())],
);
info!(name: "ResponseReceived", status = res.status().to_string(), message = "Response received");
Ok(())
}
#[tokio::main]
async fn main() -> std::result::Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
init_tracer();
let tracer_provider = init_tracer();
let logger_provider = init_logs();
send_request(
"http://127.0.0.1:3000/health",
@ -66,5 +88,11 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error + Send + Sy
)
.await?;
tracer_provider
.shutdown()
.expect("Shutdown provider failed");
logger_provider
.shutdown()
.expect("Shutdown provider failed");
Ok(())
}

View File

@ -2,16 +2,33 @@ use http_body_util::{combinators::BoxBody, BodyExt, Full};
use hyper::{body::Incoming, service::service_fn, Request, Response, StatusCode};
use hyper_util::rt::{TokioExecutor, TokioIo};
use opentelemetry::{
global,
baggage::BaggageExt,
global::{self, BoxedTracer},
logs::LogRecord,
propagation::TextMapCompositePropagator,
trace::{FutureExt, Span, SpanKind, TraceContextExt, Tracer},
Context, KeyValue,
Context, InstrumentationScope, KeyValue,
};
use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge;
use opentelemetry_http::{Bytes, HeaderExtractor};
use opentelemetry_sdk::{propagation::TraceContextPropagator, trace::TracerProvider};
use opentelemetry_sdk::{
error::OTelSdkResult,
logs::{LogProcessor, SdkLogRecord, SdkLoggerProvider},
propagation::{BaggagePropagator, TraceContextPropagator},
trace::{SdkTracerProvider, SpanProcessor},
};
use opentelemetry_semantic_conventions::trace;
use opentelemetry_stdout::SpanExporter;
use std::{convert::Infallible, net::SocketAddr};
use opentelemetry_stdout::{LogExporter, SpanExporter};
use std::time::Duration;
use std::{convert::Infallible, net::SocketAddr, sync::OnceLock};
use tokio::net::TcpListener;
use tracing::info;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
fn get_tracer() -> &'static BoxedTracer {
static TRACER: OnceLock<BoxedTracer> = OnceLock::new();
TRACER.get_or_init(|| global::tracer("example/server"))
}
// Utility function to extract the context from the incoming request headers
fn extract_context_from_request(req: &Request<Incoming>) -> Context {
@ -24,12 +41,12 @@ fn extract_context_from_request(req: &Request<Incoming>) -> Context {
async fn handle_health_check(
_req: Request<Incoming>,
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, Infallible> {
let tracer = global::tracer("example/server");
let mut span = tracer
let tracer = get_tracer();
let _span = tracer
.span_builder("health_check")
.with_kind(SpanKind::Internal)
.start(&tracer);
span.add_event("Health check accessed", vec![]);
.start(tracer);
info!(name: "health_check", message = "Health check endpoint hit");
let res = Response::new(
Full::new(Bytes::from_static(b"Server is up and running!"))
@ -44,12 +61,12 @@ async fn handle_health_check(
async fn handle_echo(
req: Request<Incoming>,
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, Infallible> {
let tracer = global::tracer("example/server");
let mut span = tracer
let tracer = get_tracer();
let _span = tracer
.span_builder("echo")
.with_kind(SpanKind::Internal)
.start(&tracer);
span.add_event("Echoing back the request", vec![]);
.start(tracer);
info!(name = "echo", message = "Echo endpoint hit");
let res = Response::new(req.into_body().boxed());
@ -63,15 +80,15 @@ async fn router(
let parent_cx = extract_context_from_request(&req);
let response = {
// Create a span parenting the remote client span.
let tracer = global::tracer("example/server");
let mut span = tracer
let tracer = get_tracer();
let span = tracer
.span_builder("router")
.with_kind(SpanKind::Server)
.start_with_context(&tracer, &parent_cx);
.start_with_context(tracer, &parent_cx);
span.add_event("dispatching request", vec![]);
info!(name = "router", message = "Dispatching request");
let cx = Context::default().with_span(span);
let cx = parent_cx.with_span(span);
match (req.method(), req.uri().path()) {
(&hyper::Method::GET, "/health") => handle_health_check(req).with_context(cx).await,
(&hyper::Method::GET, "/echo") => handle_echo(req).with_context(cx).await,
@ -88,23 +105,86 @@ async fn router(
response
}
fn init_tracer() {
global::set_text_map_propagator(TraceContextPropagator::new());
/// A custom log processor that enriches LogRecords with baggage attributes.
/// Baggage information is not added automatically without this processor.
#[derive(Debug)]
struct EnrichWithBaggageLogProcessor;
impl LogProcessor for EnrichWithBaggageLogProcessor {
fn emit(&self, data: &mut SdkLogRecord, _instrumentation: &InstrumentationScope) {
Context::map_current(|cx| {
for (kk, vv) in cx.baggage().iter() {
data.add_attribute(kk.clone(), vv.0.clone());
}
});
}
fn force_flush(&self) -> OTelSdkResult {
Ok(())
}
}
/// A custom span processor that enriches spans with baggage attributes. Baggage
/// information is not added automatically without this processor.
#[derive(Debug)]
struct EnrichWithBaggageSpanProcessor;
impl SpanProcessor for EnrichWithBaggageSpanProcessor {
fn force_flush(&self) -> OTelSdkResult {
Ok(())
}
fn shutdown_with_timeout(&self, _timeout: Duration) -> OTelSdkResult {
Ok(())
}
fn on_start(&self, span: &mut opentelemetry_sdk::trace::Span, cx: &Context) {
for (kk, vv) in cx.baggage().iter() {
span.set_attribute(KeyValue::new(kk.clone(), vv.0.clone()));
}
}
fn on_end(&self, _span: opentelemetry_sdk::trace::SpanData) {}
}
fn init_tracer() -> SdkTracerProvider {
let baggage_propagator = BaggagePropagator::new();
let trace_context_propagator = TraceContextPropagator::new();
let composite_propagator = TextMapCompositePropagator::new(vec![
Box::new(baggage_propagator),
Box::new(trace_context_propagator),
]);
global::set_text_map_propagator(composite_propagator);
// Setup tracerprovider with stdout exporter
// that prints the spans to stdout.
let provider = TracerProvider::builder()
let provider = SdkTracerProvider::builder()
.with_span_processor(EnrichWithBaggageSpanProcessor)
.with_simple_exporter(SpanExporter::default())
.build();
global::set_tracer_provider(provider);
global::set_tracer_provider(provider.clone());
provider
}
fn init_logs() -> SdkLoggerProvider {
// Setup tracerprovider with stdout exporter
// that prints the spans to stdout.
let logger_provider = SdkLoggerProvider::builder()
.with_log_processor(EnrichWithBaggageLogProcessor)
.with_simple_exporter(LogExporter::default())
.build();
let otel_layer = OpenTelemetryTracingBridge::new(&logger_provider);
tracing_subscriber::registry().with(otel_layer).init();
logger_provider
}
#[tokio::main]
async fn main() {
use hyper_util::server::conn::auto::Builder;
init_tracer();
let provider = init_tracer();
let logger_provider = init_logs();
let addr = SocketAddr::from(([127, 0, 0, 1], 3000));
let listener = TcpListener::bind(addr).await.unwrap();
@ -116,4 +196,9 @@ async fn main() {
eprintln!("{err}");
}
}
provider.shutdown().expect("Shutdown provider failed");
logger_provider
.shutdown()
.expect("Shutdown provider failed");
}

View File

@ -1,13 +0,0 @@
[package]
name = "tracing-jaeger"
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
publish = false
[dependencies]
opentelemetry = { path = "../../opentelemetry" }
opentelemetry_sdk = { path = "../../opentelemetry-sdk", features = ["rt-tokio"] }
opentelemetry-otlp = { path = "../../opentelemetry-otlp", features = ["tonic"] }
opentelemetry-semantic-conventions = { path = "../../opentelemetry-semantic-conventions" }
tokio = { workspace = true, features = ["full"] }

View File

@ -1,21 +0,0 @@
# Exporting traces to Jaeger
This example shows how to export spans to Jaeger agent using OTLPExporter.
## Usage
Launch the example app with Jaeger running in background via docker:
```shell
# Run jaeger in background with native OTLP Ingestion
$ docker run -d -p16686:16686 -p4317:4317 -e COLLECTOR_OTLP_ENABLED=true jaegertracing/all-in-one:latest
# Run the app
$ cargo run
# View spans
$ firefox http://localhost:16686/
```
![Jaeger UI](jaeger.png)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 165 KiB

View File

@ -1,50 +0,0 @@
use opentelemetry::global::shutdown_tracer_provider;
use opentelemetry::{
global,
trace::{TraceContextExt, TraceError, Tracer},
KeyValue,
};
use opentelemetry_sdk::trace::TracerProvider;
use opentelemetry_sdk::{runtime, trace as sdktrace, Resource};
use opentelemetry_semantic_conventions::resource::SERVICE_NAME;
use std::error::Error;
fn init_tracer_provider() -> Result<opentelemetry_sdk::trace::TracerProvider, TraceError> {
let exporter = opentelemetry_otlp::SpanExporter::builder()
.with_tonic()
.build()?;
Ok(TracerProvider::builder()
.with_batch_exporter(exporter, runtime::Tokio)
.with_config(
sdktrace::Config::default().with_resource(Resource::new(vec![KeyValue::new(
SERVICE_NAME,
"tracing-jaeger",
)])),
)
.build())
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
let tracer_provider = init_tracer_provider().expect("Failed to initialize tracer provider.");
global::set_tracer_provider(tracer_provider.clone());
let tracer = global::tracer("tracing-jaeger");
tracer.in_span("main-operation", |cx| {
let span = cx.span();
span.set_attribute(KeyValue::new("my-attribute", "my-value"));
span.add_event(
"Main span event".to_string(),
vec![KeyValue::new("foo", "1")],
);
tracer.in_span("child-operation...", |cx| {
let span = cx.span();
span.add_event("Sub span event", vec![KeyValue::new("bar", "1")]);
});
});
shutdown_tracer_provider();
Ok(())
}

View File

@ -2,6 +2,32 @@
## vNext
## 0.30.0
Released 2025-May-23
- Updated `opentelemetry` and `opentelemetry-semantic-conventions` dependencies to version 0.30.0.
## 0.29.0
Released 2025-Mar-21
- Similar to the `opentelemetry-appender-tracing` fix [2658](https://github.com/open-telemetry/opentelemetry-rust/issues/2658)
InstrumentationScope(Logger) used by the appender now uses an empty ("") named Logger.
Previously, a Logger with name and version of the crate was used.
Receivers (processors, exporters) are expected to use `LogRecord.target()` as scope name.
This is already done in OTLP Exporters, so this change should be transparent to most users.
- Update `opentelemetry` dependency version to 0.29.
- Update `opentelemetry-semantic-conventions` dependency version to 0.29.
## 0.28.0
Released 2025-Feb-10
- Update `opentelemetry` dependency version to 0.28.
- Update `opentelemetry-semantic-conventions` dependency version to 0.28.
- Bump msrv to 1.75.0.
## 0.27.0
Released 2024-Nov-11

View File

@ -1,22 +1,26 @@
[package]
name = "opentelemetry-appender-log"
version = "0.27.0"
version = "0.30.0"
description = "An OpenTelemetry appender for the log crate"
homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-appender-log"
repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-appender-log"
readme = "README.md"
keywords = ["opentelemetry", "log", "logs"]
license = "Apache-2.0"
rust-version = "1.70"
rust-version = "1.75.0"
edition = "2021"
autobenches = false
[lib]
bench = false
[dependencies]
opentelemetry = { version = "0.27", path = "../opentelemetry", features = [
opentelemetry = { version = "0.30", path = "../opentelemetry", features = [
"logs",
] }
log = { workspace = true, features = ["kv", "std"] }
serde = { workspace = true, optional = true, features = ["std"] }
opentelemetry-semantic-conventions = { path = "../opentelemetry-semantic-conventions", optional = true, features = [
opentelemetry-semantic-conventions = { version = "0.30", path = "../opentelemetry-semantic-conventions", optional = true, features = [
"semconv_experimental",
] }
@ -30,7 +34,10 @@ opentelemetry_sdk = { path = "../opentelemetry-sdk", features = [
"testing",
"spec_unstable_logs_enabled",
] }
opentelemetry-stdout = { path = "../opentelemetry-stdout", features = ["logs"] }
opentelemetry-stdout = { workspace = true, features = ["logs"] }
log = { workspace = true, features = ["kv_serde"] }
tokio = { workspace = true }
serde = { workspace = true, features = ["std", "derive"] }
[lints]
workspace = true

View File

@ -8,7 +8,7 @@ This crate contains a [Log Appender](https://github.com/open-telemetry/opentelem
[![Crates.io: opentelemetry-appender-log](https://img.shields.io/crates/v/opentelemetry-appender-log.svg)](https://crates.io/crates/opentelemetry-appender-log)
[![Documentation](https://docs.rs/opentelemetry-appender-log/badge.svg)](https://docs.rs/opentelemetry-appender-log)
[![LICENSE](https://img.shields.io/crates/l/opentelemetry-appender-log)](./LICENSE)
[![LICENSE](https://img.shields.io/crates/l/opentelemetry-appender-log)](https://github.com/open-telemetry/opentelemetry-rust/blob/main/opentelemetry-appender-log/LICENSE)
[![GitHub Actions CI](https://github.com/open-telemetry/opentelemetry-rust/workflows/CI/badge.svg)](https://github.com/open-telemetry/opentelemetry-rust/actions?query=workflow%3ACI+branch%3Amain)
[![Slack](https://img.shields.io/badge/slack-@cncf/otel/rust-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C03GDP0H023)
@ -26,3 +26,7 @@ management, and export of telemetry. A major goal of OpenTelemetry is that you
can easily instrument your applications or systems, no matter their language,
infrastructure, or runtime environment. Crucially, the storage and visualization
of telemetry is intentionally left to other tools.
## Release Notes
You can find the release notes (changelog) [here](https://github.com/open-telemetry/opentelemetry-rust/blob/main/opentelemetry-appender-log/CHANGELOG.md).

View File

@ -6,8 +6,7 @@
///
use log::{error, info, warn, Level};
use opentelemetry_appender_log::OpenTelemetryLogBridge;
use opentelemetry_sdk::logs::{BatchLogProcessor, LoggerProvider};
use opentelemetry_sdk::runtime;
use opentelemetry_sdk::logs::{BatchLogProcessor, SdkLoggerProvider};
use opentelemetry_stdout::LogExporter;
#[tokio::main]
@ -15,8 +14,8 @@ async fn main() {
//Create an exporter that writes to stdout
let exporter = LogExporter::default();
//Create a LoggerProvider and register the exporter
let logger_provider = LoggerProvider::builder()
.with_log_processor(BatchLogProcessor::builder(exporter, runtime::Tokio).build())
let logger_provider = SdkLoggerProvider::builder()
.with_log_processor(BatchLogProcessor::builder(exporter).build())
.build();
// Setup Log Appender for the log crate.

View File

@ -2,6 +2,8 @@
//!
//! This library implements a log appender for the [`log`] crate using the [Logs Bridge API].
//!
//! *[Supported Rust Versions](#supported-rust-versions)*
//!
//! # Getting Started
//!
//! The bridge requires configuration on both the `log` and OpenTelemetry sides.
@ -10,12 +12,12 @@
//!
//! ```
//! # #[tokio::main] async fn main() {
//! # use opentelemetry_sdk::logs::{BatchLogProcessor, LoggerProvider};
//! # use opentelemetry_sdk::logs::{BatchLogProcessor, SdkLoggerProvider};
//! # use opentelemetry_sdk::runtime;
//! let exporter = opentelemetry_stdout::LogExporter::default();
//!
//! let logger_provider = LoggerProvider::builder()
//! .with_log_processor(BatchLogProcessor::builder(exporter, runtime::Tokio).build())
//! let logger_provider = SdkLoggerProvider::builder()
//! .with_log_processor(BatchLogProcessor::builder(exporter).build())
//! .build();
//! # }
//! ```
@ -24,12 +26,12 @@
//!
//! ```
//! # #[tokio::main] async fn main() {
//! # use opentelemetry_sdk::logs::{BatchLogProcessor, LoggerProvider};
//! # use opentelemetry_sdk::logs::{BatchLogProcessor, SdkLoggerProvider};
//! # use opentelemetry_sdk::runtime;
//! # use opentelemetry_appender_log::OpenTelemetryLogBridge;
//! # let exporter = opentelemetry_stdout::LogExporter::default();
//! # let logger_provider = LoggerProvider::builder()
//! # .with_log_processor(BatchLogProcessor::builder(exporter, runtime::Tokio).build())
//! # let logger_provider = SdkLoggerProvider::builder()
//! # .with_log_processor(BatchLogProcessor::builder(exporter).build())
//! # .build();
//! let otel_log_appender = OpenTelemetryLogBridge::new(&logger_provider);
//!
@ -93,14 +95,30 @@
//! - `with-serde`: Support complex values as attributes without stringifying them.
//!
//! [Logs Bridge API]: https://opentelemetry.io/docs/specs/otel/logs/bridge-api/
//!
//! ## Supported Rust Versions
//!
//! OpenTelemetry is built against the latest stable release. The minimum
//! supported version is 1.70. The current OpenTelemetry version is not
//! guaranteed to build on Rust versions earlier than the minimum supported
//! version.
//!
//! The current stable Rust compiler and the three most recent minor versions
//! before it will always be supported. For example, if the current stable
//! compiler version is 1.49, the minimum supported version will not be
//! increased past 1.46, three minor versions prior. Increasing the minimum
//! supported compiler version is not considered a semver breaking change as
//! long as doing so complies with this policy.
use log::{Level, Metadata, Record};
use opentelemetry::{
logs::{AnyValue, LogRecord, Logger, LoggerProvider, Severity},
InstrumentationScope, Key,
Key,
};
#[cfg(feature = "experimental_metadata_attributes")]
use opentelemetry_semantic_conventions::attribute::{CODE_FILEPATH, CODE_LINENO, CODE_NAMESPACE};
use opentelemetry_semantic_conventions::attribute::{
CODE_FILE_PATH, CODE_FUNCTION_NAME, CODE_LINE_NUMBER,
};
pub struct OpenTelemetryLogBridge<P, L>
where
@ -118,9 +136,11 @@ where
{
fn enabled(&self, _metadata: &Metadata) -> bool {
#[cfg(feature = "spec_unstable_logs_enabled")]
return self
.logger
.event_enabled(severity_of_level(_metadata.level()), _metadata.target());
return self.logger.event_enabled(
severity_of_level(_metadata.level()),
_metadata.target(),
None,
);
#[cfg(not(feature = "spec_unstable_logs_enabled"))]
true
}
@ -136,18 +156,18 @@ where
{
if let Some(filepath) = record.file() {
log_record.add_attribute(
Key::new(CODE_FILEPATH),
Key::new(CODE_FILE_PATH),
AnyValue::from(filepath.to_string()),
);
}
if let Some(line_no) = record.line() {
log_record.add_attribute(Key::new(CODE_LINENO), AnyValue::from(line_no));
log_record.add_attribute(Key::new(CODE_LINE_NUMBER), AnyValue::from(line_no));
}
if let Some(module) = record.module_path() {
log_record.add_attribute(
Key::new(CODE_NAMESPACE),
Key::new(CODE_FUNCTION_NAME),
AnyValue::from(module.to_string()),
);
}
@ -169,12 +189,13 @@ where
L: Logger + Send + Sync,
{
pub fn new(provider: &P) -> Self {
let scope = InstrumentationScope::builder("opentelemetry-log-appender")
.with_version(env!("CARGO_PKG_VERSION"))
.build();
OpenTelemetryLogBridge {
logger: provider.logger_with_scope(scope),
Self {
// Using empty scope name.
// The name/version of this library itself can be added
// as a Scope attribute once a semantic convention is
// defined for the same.
// See https://github.com/open-telemetry/semantic-conventions/issues/1550
logger: provider.logger(""),
_phantom: Default::default(),
}
}
@ -223,7 +244,7 @@ mod any_value {
pub(crate) fn serialize(value: log::kv::Value) -> Option<AnyValue> {
struct ValueVisitor(Option<AnyValue>);
impl<'kvs> log::kv::VisitValue<'kvs> for ValueVisitor {
impl log::kv::VisitValue<'_> for ValueVisitor {
fn visit_any(&mut self, value: log::kv::Value) -> Result<(), log::kv::Error> {
self.0 = Some(AnyValue::String(StringValue::from(value.to_string())));
@ -666,7 +687,7 @@ mod any_value {
) -> Result<(), Self::Error> {
let key = match key.serialize(ValueSerializer)? {
Some(AnyValue::String(key)) => Key::from(String::from(key)),
key => Key::from(format!("{:?}", key)),
key => Key::from(format!("{key:?}")),
};
self.key = Some(key);
@ -753,7 +774,7 @@ mod tests {
use super::OpenTelemetryLogBridge;
use opentelemetry::{logs::AnyValue, StringValue};
use opentelemetry_sdk::{logs::LoggerProvider, testing::logs::InMemoryLogExporter};
use opentelemetry_sdk::{logs::InMemoryLogExporter, logs::SdkLoggerProvider};
use log::Log;
@ -761,7 +782,7 @@ mod tests {
fn logbridge_with_default_metadata_is_enabled() {
let exporter = InMemoryLogExporter::default();
let logger_provider = LoggerProvider::builder()
let logger_provider = SdkLoggerProvider::builder()
.with_simple_exporter(exporter)
.build();
@ -780,7 +801,7 @@ mod tests {
fn logbridge_with_record_can_log() {
let exporter = InMemoryLogExporter::default();
let logger_provider = LoggerProvider::builder()
let logger_provider = SdkLoggerProvider::builder()
.with_simple_exporter(exporter.clone())
.build();
@ -830,11 +851,11 @@ mod tests {
assert_eq!(logs.len(), 5);
for log in logs {
let body: String = match log.record.body.as_ref().unwrap() {
let body: String = match log.record.body().unwrap() {
super::AnyValue::String(s) => s.to_string(),
_ => panic!("AnyValue::String expected"),
};
assert_eq!(body, log.record.severity_text.unwrap());
assert_eq!(body, log.record.severity_text().unwrap());
}
}
@ -894,7 +915,7 @@ mod tests {
let exporter = InMemoryLogExporter::default();
let logger_provider = LoggerProvider::builder()
let logger_provider = SdkLoggerProvider::builder()
.with_simple_exporter(exporter.clone())
.build();
@ -965,9 +986,13 @@ mod tests {
);
let logs = exporter.get_emitted_logs().unwrap();
assert_eq!(logs.len(), 1);
let log = logs.first().unwrap();
assert_eq!(log.instrumentation.name(), "");
let get = |needle: &str| -> Option<AnyValue> {
logs[0].record.attributes_iter().find_map(|(k, v)| {
log.record.attributes_iter().find_map(|(k, v)| {
if k.as_str() == needle {
Some(v.clone())
} else {
@ -1155,12 +1180,12 @@ mod tests {
#[test]
fn logbridge_code_attributes() {
use opentelemetry_semantic_conventions::attribute::{
CODE_FILEPATH, CODE_LINENO, CODE_NAMESPACE,
CODE_FILE_PATH, CODE_FUNCTION_NAME, CODE_LINE_NUMBER,
};
let exporter = InMemoryLogExporter::default();
let logger_provider = LoggerProvider::builder()
let logger_provider = SdkLoggerProvider::builder()
.with_simple_exporter(exporter.clone())
.build();
@ -1177,9 +1202,13 @@ mod tests {
);
let logs = exporter.get_emitted_logs().unwrap();
assert_eq!(logs.len(), 1);
let log = logs.first().unwrap();
assert_eq!(log.instrumentation.name(), "");
let get = |needle: &str| -> Option<AnyValue> {
logs[0].record.attributes_iter().find_map(|(k, v)| {
log.record.attributes_iter().find_map(|(k, v)| {
if k.as_str() == needle {
Some(v.clone())
} else {
@ -1190,20 +1219,20 @@ mod tests {
assert_eq!(
Some(AnyValue::String(StringValue::from("src/main.rs"))),
get(CODE_FILEPATH)
get(CODE_FILE_PATH)
);
assert_eq!(
Some(AnyValue::String(StringValue::from("service"))),
get(CODE_NAMESPACE)
get(CODE_FUNCTION_NAME)
);
assert_eq!(Some(AnyValue::Int(101)), get(CODE_LINENO));
assert_eq!(Some(AnyValue::Int(101)), get(CODE_LINE_NUMBER));
}
#[test]
fn test_flush() {
let exporter = InMemoryLogExporter::default();
let logger_provider = LoggerProvider::builder()
let logger_provider = SdkLoggerProvider::builder()
.with_simple_exporter(exporter)
.build();

View File

@ -2,6 +2,96 @@
## vNext
## 0.30.1
Released 2025-June-05
- Bump `tracing-opentelemetry` to 0.31
## 0.30.0
Released 2025-May-23
- Updated `opentelemetry` dependency to version 0.30.0.
## 0.29.1
Released 2025-Mar-24
- Bump `tracing-opentelemetry` to 0.30
## 0.29.0
Released 2025-Mar-21
Fixes [1682](https://github.com/open-telemetry/opentelemetry-rust/issues/1682).
"spec_unstable_logs_enabled" feature now do not suppress logs for other layers.
The special treatment of the "message" field has been extended when recording
string values. With this change, when a log is emitted with a field named
"message" (and string value), its value is directly assigned to the LogRecords
body rather than being stored as an attribute named "message". This offers a
slight performance improvement over previous.
For example, the below will now produce LogRecord with the message value
populated as LogRecord's body:
```rust
error!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io", message = "This is an example message");
```
Previously, Body was only populated when the below style was used.
```rust
error!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io", "This is an example message");
```
This style, while slightly slower, should still be used when the value is not a
simple string, but require format arguments as in the below example.
```rust
error!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io", "This is an example message with format arguments {} and {}", "foo", "bar");
```
Fixes [2658](https://github.com/open-telemetry/opentelemetry-rust/issues/2658)
InstrumentationScope(Logger) used by the appender now uses an empty ("") named
Logger. Previously, a Logger with name and version of the crate was used.
Receivers (processors, exporters) are expected to use `LogRecord.target()` as
scope name. This is already done in OTLP Exporters, so this change should be
transparent to most users.
- Passes event name to the `event_enabled` method on the `Logger`. This allows
implementations (SDK, processor, exporters) to leverage this additional
information to determine if an event is enabled.
- `u64`, `i128`, `u128` and `usize` values are stored as `opentelemetry::logs::AnyValue::Int`
when conversion is feasible. Otherwise stored as
`opentelemetry::logs::AnyValue::String`. This avoids unnecessary string
allocation when values can be represented in their original types.
- Byte arrays are stored as `opentelemetry::logs::AnyValue::Bytes` instead
of string.
- `Error` fields are reported using attribute named "exception.message". For
example, the below will now report an attribute named "exception.message",
instead of previously reporting the user provided attribute "error".
`error!(....error = &OTelSdkError::AlreadyShutdown as &dyn std::error::Error...)`
- perf - small perf improvement by avoiding string allocation of `target`
- Update `opentelemetry` dependency version to 0.29.
## 0.28.1
Released 2025-Feb-12
- New *experimental* feature to use trace_id & span_id from spans created through the [tracing](https://crates.io/crates/tracing) crate (experimental_use_tracing_span_context) [#2438](https://github.com/open-telemetry/opentelemetry-rust/pull/2438)
## 0.28.0
Released 2025-Feb-10
- Update `opentelemetry` dependency version to 0.28.
- Bump msrv to 1.75.0.
## 0.27.0
Released 2024-Nov-11
@ -12,6 +102,7 @@ Released 2024-Nov-11
- **Breaking** [2291](https://github.com/open-telemetry/opentelemetry-rust/pull/2291) Rename `logs_level_enabled flag` to `spec_unstable_logs_enabled`. Please enable this updated flag if the feature is needed. This flag will be removed once the feature is stabilized in the specifications.
## v0.26.0
Released 2024-Sep-30
- Update `opentelemetry` dependency version to 0.26
@ -30,7 +121,7 @@ Released 2024-Sep-30
Exporters might use the target to override the instrumentation scope, which previously contained "opentelemetry-appender-tracing".
- **Breaking** [1928](https://github.com/open-telemetry/opentelemetry-rust/pull/1928) Insert tracing event name into LogRecord::event_name instead of attributes.
- If using a custom exporter, then they must serialize this field directly from LogRecord::event_name instead of iterating over the attributes. OTLP Exporter is modified to handle this.
- If using a custom exporter, then they must serialize this field directly from LogRecord::event_name instead of iterating over the attributes. OTLP Exporter is modified to handle this.
- Update `opentelemetry` dependency version to 0.24
## v0.4.0

View File

@ -1,6 +1,6 @@
[package]
name = "opentelemetry-appender-tracing"
version = "0.27.0"
version = "0.30.1"
edition = "2021"
description = "An OpenTelemetry log appender for the tracing crate"
homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-appender-tracing"
@ -8,35 +8,49 @@ repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/ope
readme = "README.md"
keywords = ["opentelemetry", "log", "logs", "tracing"]
license = "Apache-2.0"
rust-version = "1.70"
rust-version = "1.75.0"
autobenches = false
[dependencies]
log = { workspace = true, optional = true }
opentelemetry = { version = "0.27", path = "../opentelemetry", features = ["logs"] }
opentelemetry = { version = "0.30", path = "../opentelemetry", features = ["logs"] }
tracing = { workspace = true, features = ["std"]}
tracing-core = { workspace = true }
tracing-log = { version = "0.2", optional = true }
tracing-log = { workspace = true, optional = true }
tracing-subscriber = { workspace = true, features = ["registry", "std"] }
tracing-opentelemetry = { workspace = true, optional = true }
[dev-dependencies]
log = { workspace = true }
opentelemetry-stdout = { path = "../opentelemetry-stdout", features = ["logs"] }
opentelemetry-stdout = { workspace = true, features = ["logs"] }
opentelemetry_sdk = { path = "../opentelemetry-sdk", features = ["logs", "testing"] }
tracing-subscriber = { workspace = true, features = ["registry", "std", "env-filter"] }
tracing-log = "0.2"
async-trait = { workspace = true }
tracing = { workspace = true, features = ["std"]}
tracing-subscriber = { workspace = true, features = ["env-filter","registry", "std", "fmt"] }
tracing-log = { workspace = true }
criterion = { workspace = true }
tokio = { workspace = true, features = ["full"]}
[target.'cfg(not(target_os = "windows"))'.dev-dependencies]
pprof = { version = "0.13", features = ["flamegraph", "criterion"] }
pprof = { version = "0.14", features = ["flamegraph", "criterion"] }
[features]
default = []
experimental_metadata_attributes = ["dep:tracing-log"]
spec_unstable_logs_enabled = ["opentelemetry/spec_unstable_logs_enabled"]
experimental_use_tracing_span_context = ["tracing-opentelemetry"]
[[bench]]
name = "logs"
harness = false
required-features = ["spec_unstable_logs_enabled"]
[[bench]]
name = "log-attributes"
harness = false
[lib]
bench = false
[lints]
workspace = true

View File

@ -14,7 +14,7 @@ traces.
[![Crates.io: opentelemetry-appender-tracing](https://img.shields.io/crates/v/opentelemetry-appender-tracing.svg)](https://crates.io/crates/opentelemetry-appender-tracing)
[![Documentation](https://docs.rs/opentelemetry-appender-tracing/badge.svg)](https://docs.rs/opentelemetry-appender-tracing)
[![LICENSE](https://img.shields.io/crates/l/opentelemetry-appender-tracing)](./LICENSE)
[![LICENSE](https://img.shields.io/crates/l/opentelemetry-appender-tracing)](https://github.com/open-telemetry/opentelemetry-rust/blob/main/opentelemetry-appender-tracing/LICENSE)
[![GitHub Actions CI](https://github.com/open-telemetry/opentelemetry-rust/workflows/CI/badge.svg)](https://github.com/open-telemetry/opentelemetry-rust/actions?query=workflow%3ACI+branch%3Amain)
[![Slack](https://img.shields.io/badge/slack-@cncf/otel/rust-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C03GDP0H023)
@ -32,3 +32,22 @@ management, and export of telemetry. A major goal of OpenTelemetry is that you
can easily instrument your applications or systems, no matter their language,
infrastructure, or runtime environment. Crucially, the storage and visualization
of telemetry is intentionally left to other tools.
*[Supported Rust Versions](#supported-rust-versions)*
## Release Notes
You can find the release notes (changelog) [here](https://github.com/open-telemetry/opentelemetry-rust/blob/main/opentelemetry-appender-tracing/CHANGELOG.md).
## Supported Rust Versions
OpenTelemetry is built against the latest stable release. The minimum supported
version is 1.75.0. The current OpenTelemetry version is not guaranteed to build
on Rust versions earlier than the minimum supported version.
The current stable Rust compiler and the three most recent minor versions
before it will always be supported. For example, if the current stable compiler
version is 1.49, the minimum supported version will not be increased past 1.46,
three minor versions prior. Increasing the minimum supported compiler version
is not considered a semver breaking change as long as doing so complies with
this policy.

View File

@ -0,0 +1,273 @@
/*
// Run this benchmark with:
// cargo bench --bench log-attributes
// Adding results in comments for a quick reference.
// Apple M4 Pro
// Total Number of Cores: 14 (10 performance and 4 efficiency)
| Test | Average time | Increment |
|----------------------|--------------|-----------|
| otel_0_attributes | 72 ns | - |
| otel_1_attributes | 117 ns | +45 ns |
| otel_2_attributes | 155 ns | +38 ns |
| otel_3_attributes | 196 ns | +41 ns |
| otel_4_attributes | 240 ns | +44 ns |
| otel_5_attributes | 278 ns | +38 ns |
| otel_6_attributes | 346 ns | +68 ns | // Array is full. 6th attribute causes vec! to be allocated
| otel_7_attributes | 390 ns | +44 ns |
| otel_8_attributes | 431 ns | +41 ns |
| otel_9_attributes | 480 ns | +49 ns |
| otel_10_attributes | 519 ns | +39 ns |
| otel_11_attributes | 625 ns | +106 ns | // vec! initial capacity is 5. 11th attribute causes vec! to be reallocated
| otel_12_attributes | 676 ns | +51 ns |
*/
use criterion::{criterion_group, criterion_main, Criterion};
use opentelemetry::InstrumentationScope;
use opentelemetry_appender_tracing::layer as tracing_layer;
use opentelemetry_sdk::error::OTelSdkResult;
use opentelemetry_sdk::logs::{LogProcessor, SdkLogRecord, SdkLoggerProvider};
use opentelemetry_sdk::Resource;
#[cfg(not(target_os = "windows"))]
use pprof::criterion::{Output, PProfProfiler};
use tracing::error;
use tracing_subscriber::prelude::*;
use tracing_subscriber::Registry;
#[derive(Debug)]
struct NoopProcessor;
impl LogProcessor for NoopProcessor {
fn emit(&self, _: &mut SdkLogRecord, _: &InstrumentationScope) {}
fn force_flush(&self) -> OTelSdkResult {
Ok(())
}
}
/// Creates a single benchmark for a specific number of attributes
fn create_benchmark(c: &mut Criterion, num_attributes: usize) {
let provider = SdkLoggerProvider::builder()
.with_resource(
Resource::builder_empty()
.with_service_name("benchmark")
.build(),
)
.with_log_processor(NoopProcessor)
.build();
let ot_layer = tracing_layer::OpenTelemetryTracingBridge::new(&provider);
let subscriber = Registry::default().with(ot_layer);
tracing::subscriber::with_default(subscriber, || {
c.bench_function(&format!("otel_{num_attributes}_attributes"), |b| {
b.iter(|| {
// Dynamically generate the error! macro call based on the number of attributes
match num_attributes {
0 => {
error!(
name : "CheckoutFailed",
message = "Unable to process checkout."
);
}
1 => {
error!(
name : "CheckoutFailed",
field1 = "field1",
message = "Unable to process checkout."
);
}
2 => {
error!(
name : "CheckoutFailed",
field1 = "field1",
field2 = "field2",
message = "Unable to process checkout."
);
}
3 => {
error!(
name : "CheckoutFailed",
field1 = "field1",
field2 = "field2",
field3 = "field3",
message = "Unable to process checkout."
);
}
4 => {
error!(
name : "CheckoutFailed",
field1 = "field1",
field2 = "field2",
field3 = "field3",
field4 = "field4",
message = "Unable to process checkout."
);
}
5 => {
error!(
name : "CheckoutFailed",
field1 = "field1",
field2 = "field2",
field3 = "field3",
field4 = "field4",
field5 = "field5",
message = "Unable to process checkout."
);
}
6 => {
error!(
name : "CheckoutFailed",
field1 = "field1",
field2 = "field2",
field3 = "field3",
field4 = "field4",
field5 = "field5",
field6 = "field6",
message = "Unable to process checkout."
);
}
7 => {
error!(
name : "CheckoutFailed",
field1 = "field1",
field2 = "field2",
field3 = "field3",
field4 = "field4",
field5 = "field5",
field6 = "field6",
field7 = "field7",
message = "Unable to process checkout."
);
}
8 => {
error!(
name : "CheckoutFailed",
field1 = "field1",
field2 = "field2",
field3 = "field3",
field4 = "field4",
field5 = "field5",
field6 = "field6",
field7 = "field7",
field8 = "field8",
message = "Unable to process checkout."
);
}
9 => {
error!(
name : "CheckoutFailed",
field1 = "field1",
field2 = "field2",
field3 = "field3",
field4 = "field4",
field5 = "field5",
field6 = "field6",
field7 = "field7",
field8 = "field8",
field9 = "field9",
message = "Unable to process checkout."
);
}
10 => {
error!(
name : "CheckoutFailed",
field1 = "field1",
field2 = "field2",
field3 = "field3",
field4 = "field4",
field5 = "field5",
field6 = "field6",
field7 = "field7",
field8 = "field8",
field9 = "field9",
field10 = "field10",
message = "Unable to process checkout."
);
}
11 => {
error!(
name : "CheckoutFailed",
field1 = "field1",
field2 = "field2",
field3 = "field3",
field4 = "field4",
field5 = "field5",
field6 = "field6",
field7 = "field7",
field8 = "field8",
field9 = "field9",
field10 = "field10",
field11 = "field11",
message = "Unable to process checkout."
);
}
12 => {
error!(
name : "CheckoutFailed",
field1 = "field1",
field2 = "field2",
field3 = "field3",
field4 = "field4",
field5 = "field5",
field6 = "field6",
field7 = "field7",
field8 = "field8",
field9 = "field9",
field10 = "field10",
field11 = "field11",
field12 = "field12",
message = "Unable to process checkout."
);
}
_ => {
// Fall back to 10 attributes for any higher number
error!(
name : "CheckoutFailed",
field1 = "field1",
field2 = "field2",
field3 = "field3",
field4 = "field4",
field5 = "field5",
field6 = "field6",
field7 = "field7",
field8 = "field8",
field9 = "field9",
field10 = "field10",
message = "Unable to process checkout."
);
}
}
});
});
});
}
fn criterion_benchmark(c: &mut Criterion) {
create_benchmark(c, 2);
// Run benchmarks for 0 to 12 attributes
// for num_attributes in 0..=12 {
// create_benchmark(c, 2);
// }
}
#[cfg(not(target_os = "windows"))]
criterion_group! {
name = benches;
config = Criterion::default()
.warm_up_time(std::time::Duration::from_secs(1))
.measurement_time(std::time::Duration::from_secs(2))
.with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
targets = criterion_benchmark
}
#[cfg(target_os = "windows")]
criterion_group! {
name = benches;
config = Criterion::default()
.warm_up_time(std::time::Duration::from_secs(1))
.measurement_time(std::time::Duration::from_secs(2));
targets = criterion_benchmark
}
criterion_main!(benches);

View File

@ -10,70 +10,57 @@
| noop_layer_disabled | 12 ns |
| noop_layer_enabled | 25 ns |
| ot_layer_disabled | 19 ns |
| ot_layer_enabled | 196 ns |
| ot_layer_enabled | 155 ns |
Hardware: Apple M4 Pro
Total Number of Cores: 14 (10 performance and 4 efficiency)
| Test | Average time|
|-----------------------------|-------------|
| log_no_subscriber | 285 ps |
| noop_layer_disabled | 8 ns |
| noop_layer_enabled | 14 ns |
| ot_layer_disabled | 12 ns |
| ot_layer_enabled | 130 ns |
*/
use async_trait::async_trait;
use criterion::{criterion_group, criterion_main, Criterion};
use opentelemetry::{InstrumentationScope, KeyValue};
use opentelemetry::InstrumentationScope;
use opentelemetry_appender_tracing::layer as tracing_layer;
use opentelemetry_sdk::export::logs::{LogBatch, LogExporter};
use opentelemetry_sdk::logs::LogResult;
use opentelemetry_sdk::logs::{LogProcessor, LogRecord, LoggerProvider};
use opentelemetry_sdk::error::OTelSdkResult;
use opentelemetry_sdk::logs::{LogProcessor, SdkLogRecord, SdkLoggerProvider};
use opentelemetry_sdk::Resource;
#[cfg(not(target_os = "windows"))]
use pprof::criterion::{Output, PProfProfiler};
use tracing::error;
use tracing_subscriber::prelude::*;
use tracing_subscriber::Layer;
use tracing_subscriber::Registry;
#[derive(Debug, Clone)]
struct NoopExporter {
#[derive(Debug)]
struct NoopProcessor {
enabled: bool,
}
#[async_trait]
impl LogExporter for NoopExporter {
async fn export(&mut self, _: LogBatch<'_>) -> LogResult<()> {
LogResult::Ok(())
}
fn event_enabled(&self, _: opentelemetry::logs::Severity, _: &str, _: &str) -> bool {
self.enabled
}
}
#[derive(Debug)]
struct NoopProcessor {
exporter: Box<dyn LogExporter>,
}
impl NoopProcessor {
fn new(exporter: Box<dyn LogExporter>) -> Self {
Self { exporter }
fn new(enabled: bool) -> Self {
Self { enabled }
}
}
impl LogProcessor for NoopProcessor {
fn emit(&self, _: &mut LogRecord, _: &InstrumentationScope) {
// no-op
}
fn emit(&self, _: &mut SdkLogRecord, _: &InstrumentationScope) {}
fn force_flush(&self) -> LogResult<()> {
Ok(())
}
fn shutdown(&self) -> LogResult<()> {
fn force_flush(&self) -> OTelSdkResult {
Ok(())
}
fn event_enabled(
&self,
level: opentelemetry::logs::Severity,
target: &str,
name: &str,
_level: opentelemetry::logs::Severity,
_target: &str,
_name: Option<&str>,
) -> bool {
self.exporter.event_enabled(level, target, name)
self.enabled
}
}
@ -113,7 +100,7 @@ fn benchmark_no_subscriber(c: &mut Criterion) {
c.bench_function("log_no_subscriber", |b| {
b.iter(|| {
error!(
name = "CheckoutFailed",
name : "CheckoutFailed",
book_id = "12345",
book_title = "Rust Programming Adventures",
message = "Unable to process checkout."
@ -123,13 +110,13 @@ fn benchmark_no_subscriber(c: &mut Criterion) {
}
fn benchmark_with_ot_layer(c: &mut Criterion, enabled: bool, bench_name: &str) {
let exporter = NoopExporter { enabled };
let processor = NoopProcessor::new(Box::new(exporter));
let provider = LoggerProvider::builder()
.with_resource(Resource::new(vec![KeyValue::new(
"service.name",
"benchmark",
)]))
let processor = NoopProcessor::new(enabled);
let provider = SdkLoggerProvider::builder()
.with_resource(
Resource::builder_empty()
.with_service_name("benchmark")
.build(),
)
.with_log_processor(processor)
.build();
let ot_layer = tracing_layer::OpenTelemetryTracingBridge::new(&provider);
@ -139,7 +126,7 @@ fn benchmark_with_ot_layer(c: &mut Criterion, enabled: bool, bench_name: &str) {
c.bench_function(bench_name, |b| {
b.iter(|| {
error!(
name = "CheckoutFailed",
name : "CheckoutFailed",
book_id = "12345",
book_title = "Rust Programming Adventures",
message = "Unable to process checkout."
@ -156,10 +143,10 @@ fn benchmark_with_noop_layer(c: &mut Criterion, enabled: bool, bench_name: &str)
c.bench_function(bench_name, |b| {
b.iter(|| {
error!(
name = "CheckoutFailed",
name : "CheckoutFailed",
book_id = "12345",
book_title = "Rust Programming Adventures",
"Unable to process checkout."
message = "Unable to process checkout."
);
});
});
@ -177,13 +164,18 @@ fn criterion_benchmark(c: &mut Criterion) {
#[cfg(not(target_os = "windows"))]
criterion_group! {
name = benches;
config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
config = Criterion::default()
.warm_up_time(std::time::Duration::from_secs(1))
.measurement_time(std::time::Duration::from_secs(2))
.with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
targets = criterion_benchmark
}
#[cfg(target_os = "windows")]
criterion_group! {
name = benches;
config = Criterion::default();
config = Criterion::default()
.warm_up_time(std::time::Duration::from_secs(1))
.measurement_time(std::time::Duration::from_secs(2));
targets = criterion_benchmark
}
criterion_main!(benches);

View File

@ -1,22 +1,54 @@
//! run with `$ cargo run --example basic
use opentelemetry::KeyValue;
use opentelemetry_appender_tracing::layer;
use opentelemetry_sdk::{logs::LoggerProvider, Resource};
use opentelemetry_sdk::{logs::SdkLoggerProvider, Resource};
use tracing::error;
use tracing_subscriber::prelude::*;
use tracing_subscriber::{prelude::*, EnvFilter};
fn main() {
let exporter = opentelemetry_stdout::LogExporter::default();
let provider: LoggerProvider = LoggerProvider::builder()
.with_resource(Resource::new(vec![KeyValue::new(
"service.name",
"log-appender-tracing-example",
)]))
let provider: SdkLoggerProvider = SdkLoggerProvider::builder()
.with_resource(
Resource::builder()
.with_service_name("log-appender-tracing-example")
.build(),
)
.with_simple_exporter(exporter)
.build();
let layer = layer::OpenTelemetryTracingBridge::new(&provider);
tracing_subscriber::registry().with(layer).init();
// To prevent a telemetry-induced-telemetry loop, OpenTelemetry's own internal
// logging is properly suppressed. However, logs emitted by external components
// (such as reqwest, tonic, etc.) are not suppressed as they do not propagate
// OpenTelemetry context. Until this issue is addressed
// (https://github.com/open-telemetry/opentelemetry-rust/issues/2877),
// filtering like this is the best way to suppress such logs.
//
// The filter levels are set as follows:
// - Allow `info` level and above by default.
// - Completely restrict logs from `hyper`, `tonic`, `h2`, and `reqwest`.
//
// Note: This filtering will also drop logs from these components even when
// they are used outside of the OTLP Exporter.
let filter_otel = EnvFilter::new("info")
.add_directive("hyper=off".parse().unwrap())
.add_directive("opentelemetry=off".parse().unwrap())
.add_directive("tonic=off".parse().unwrap())
.add_directive("h2=off".parse().unwrap())
.add_directive("reqwest=off".parse().unwrap());
let otel_layer = layer::OpenTelemetryTracingBridge::new(&provider).with_filter(filter_otel);
// Create a new tracing::Fmt layer to print the logs to stdout. It has a
// default filter of `info` level and above, and `debug` and above for logs
// from OpenTelemetry crates. The filter levels can be customized as needed.
let filter_fmt = EnvFilter::new("info").add_directive("opentelemetry=debug".parse().unwrap());
let fmt_layer = tracing_subscriber::fmt::layer()
.with_thread_names(true)
.with_filter(filter_fmt);
tracing_subscriber::registry()
.with(otel_layer)
.with(fmt_layer)
.init();
error!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io", message = "This is an example message");
let _ = provider.shutdown();

View File

@ -1,16 +1,13 @@
use opentelemetry::{
logs::{AnyValue, LogRecord, Logger, LoggerProvider, Severity},
InstrumentationScope, Key,
Key,
};
use std::borrow::Cow;
use tracing_core::Level;
#[cfg(feature = "experimental_metadata_attributes")]
use tracing_core::Metadata;
#[cfg(feature = "experimental_metadata_attributes")]
use tracing_log::NormalizeEvent;
use tracing_subscriber::Layer;
const INSTRUMENTATION_LIBRARY_NAME: &str = "opentelemetry-appender-tracing";
use tracing_subscriber::{registry::LookupSpan, Layer};
/// Visitor to record the fields from the event record.
struct EventVisitor<'a, LR: LogRecord> {
@ -69,34 +66,50 @@ impl<'a, LR: LogRecord> EventVisitor<'a, LR> {
}
}
impl<'a, LR: LogRecord> tracing::field::Visit for EventVisitor<'a, LR> {
impl<LR: LogRecord> tracing::field::Visit for EventVisitor<'_, LR> {
fn record_debug(&mut self, field: &tracing::field::Field, value: &dyn std::fmt::Debug) {
#[cfg(feature = "experimental_metadata_attributes")]
if is_duplicated_metadata(field.name()) {
return;
}
if field.name() == "message" {
self.log_record.set_body(format!("{:?}", value).into());
self.log_record.set_body(format!("{value:?}").into());
} else {
self.log_record
.add_attribute(Key::new(field.name()), AnyValue::from(format!("{value:?}")));
}
}
fn record_error(
&mut self,
_field: &tracing_core::Field,
value: &(dyn std::error::Error + 'static),
) {
self.log_record.add_attribute(
Key::new("exception.message"),
AnyValue::from(value.to_string()),
);
// No ability to get exception.stacktrace or exception.type from the error today.
}
fn record_bytes(&mut self, field: &tracing_core::Field, value: &[u8]) {
self.log_record
.add_attribute(Key::new(field.name()), AnyValue::from(value));
}
fn record_str(&mut self, field: &tracing_core::Field, value: &str) {
#[cfg(feature = "experimental_metadata_attributes")]
if is_duplicated_metadata(field.name()) {
return;
}
//TODO: Consider special casing "message" to populate body and document
// to users to use message field for log message, to avoid going to the
// record_debug, which has dyn dispatch, string allocation and
// formatting cost.
//TODO: Fix heap allocation. Check if lifetime of &str can be used
// to optimize sync exporter scenario.
self.log_record
.add_attribute(Key::new(field.name()), AnyValue::from(value.to_owned()));
if field.name() == "message" {
self.log_record.set_body(AnyValue::from(value.to_owned()));
} else {
self.log_record
.add_attribute(Key::new(field.name()), AnyValue::from(value.to_owned()));
}
}
fn record_bool(&mut self, field: &tracing_core::Field, value: bool) {
@ -118,6 +131,50 @@ impl<'a, LR: LogRecord> tracing::field::Visit for EventVisitor<'a, LR> {
.add_attribute(Key::new(field.name()), AnyValue::from(value));
}
// TODO: We might need to do similar for record_i128,record_u128 too
// to avoid stringification, unless needed.
fn record_u64(&mut self, field: &tracing::field::Field, value: u64) {
#[cfg(feature = "experimental_metadata_attributes")]
if is_duplicated_metadata(field.name()) {
return;
}
if let Ok(signed) = i64::try_from(value) {
self.log_record
.add_attribute(Key::new(field.name()), AnyValue::from(signed));
} else {
self.log_record
.add_attribute(Key::new(field.name()), AnyValue::from(format!("{value:?}")));
}
}
fn record_i128(&mut self, field: &tracing::field::Field, value: i128) {
#[cfg(feature = "experimental_metadata_attributes")]
if is_duplicated_metadata(field.name()) {
return;
}
if let Ok(signed) = i64::try_from(value) {
self.log_record
.add_attribute(Key::new(field.name()), AnyValue::from(signed));
} else {
self.log_record
.add_attribute(Key::new(field.name()), AnyValue::from(format!("{value:?}")));
}
}
fn record_u128(&mut self, field: &tracing::field::Field, value: u128) {
#[cfg(feature = "experimental_metadata_attributes")]
if is_duplicated_metadata(field.name()) {
return;
}
if let Ok(signed) = i64::try_from(value) {
self.log_record
.add_attribute(Key::new(field.name()), AnyValue::from(signed));
} else {
self.log_record
.add_attribute(Key::new(field.name()), AnyValue::from(format!("{value:?}")));
}
}
// TODO: Remaining field types from AnyValue : Bytes, ListAny, Boolean
}
@ -136,12 +193,13 @@ where
L: Logger + Send + Sync,
{
pub fn new(provider: &P) -> Self {
let scope = InstrumentationScope::builder(INSTRUMENTATION_LIBRARY_NAME)
.with_version(Cow::Borrowed(env!("CARGO_PKG_VERSION")))
.build();
OpenTelemetryTracingBridge {
logger: provider.logger_with_scope(scope),
// Using empty scope name.
// The name/version of this library itself can be added
// as a Scope attribute, once a semantic convention is
// defined for the same.
// See https://github.com/open-telemetry/semantic-conventions/issues/1550
logger: provider.logger(""),
_phantom: Default::default(),
}
}
@ -149,7 +207,7 @@ where
impl<S, P, L> Layer<S> for OpenTelemetryTracingBridge<P, L>
where
S: tracing::Subscriber,
S: tracing::Subscriber + for<'a> LookupSpan<'a>,
P: LoggerProvider<Logger = L> + Send + Sync + 'static,
L: Logger + Send + Sync + 'static,
{
@ -158,42 +216,60 @@ where
event: &tracing::Event<'_>,
_ctx: tracing_subscriber::layer::Context<'_, S>,
) {
let metadata = event.metadata();
let severity = severity_of_level(metadata.level());
let target = metadata.target();
let name = metadata.name();
#[cfg(feature = "spec_unstable_logs_enabled")]
if !self.logger.event_enabled(severity, target, Some(name)) {
// TODO: See if we need internal logs or track the count.
return;
}
#[cfg(feature = "experimental_metadata_attributes")]
let normalized_meta = event.normalized_metadata();
#[cfg(feature = "experimental_metadata_attributes")]
let meta = normalized_meta.as_ref().unwrap_or_else(|| event.metadata());
#[cfg(not(feature = "experimental_metadata_attributes"))]
let meta = event.metadata();
let mut log_record = self.logger.create_log_record();
// TODO: Fix heap allocation
log_record.set_target(meta.target().to_string());
log_record.set_event_name(meta.name());
log_record.set_severity_number(severity_of_level(meta.level()));
log_record.set_severity_text(meta.level().as_str());
log_record.set_target(target);
log_record.set_event_name(name);
log_record.set_severity_number(severity);
log_record.set_severity_text(metadata.level().as_str());
let mut visitor = EventVisitor::new(&mut log_record);
#[cfg(feature = "experimental_metadata_attributes")]
visitor.visit_experimental_metadata(meta);
// Visit fields.
event.record(&mut visitor);
#[cfg(feature = "experimental_use_tracing_span_context")]
if let Some(span) = _ctx.event_span(event) {
use opentelemetry::trace::TraceContextExt;
use tracing_opentelemetry::OtelData;
if let Some(otd) = span.extensions().get::<OtelData>() {
if let Some(span_id) = otd.builder.span_id {
let opt_trace_id = if otd.parent_cx.has_active_span() {
Some(otd.parent_cx.span().span_context().trace_id())
} else {
span.scope().last().and_then(|root_span| {
root_span
.extensions()
.get::<OtelData>()
.and_then(|otd| otd.builder.trace_id)
})
};
if let Some(trace_id) = opt_trace_id {
log_record.set_trace_context(trace_id, span_id, None);
}
}
}
}
//emit record
self.logger.emit(log_record);
}
#[cfg(feature = "spec_unstable_logs_enabled")]
fn event_enabled(
&self,
_event: &tracing_core::Event<'_>,
_ctx: tracing_subscriber::layer::Context<'_, S>,
) -> bool {
let severity = severity_of_level(_event.metadata().level());
self.logger
.event_enabled(severity, _event.metadata().target())
}
}
const fn severity_of_level(level: &Level) -> Severity {
@ -209,31 +285,27 @@ const fn severity_of_level(level: &Level) -> Severity {
#[cfg(test)]
mod tests {
use crate::layer;
use async_trait::async_trait;
use opentelemetry::logs::Severity;
use opentelemetry::trace::TracerProvider as _;
use opentelemetry::trace::TracerProvider;
use opentelemetry::trace::{TraceContextExt, TraceFlags, Tracer};
use opentelemetry::InstrumentationScope;
use opentelemetry::{logs::AnyValue, Key};
use opentelemetry_sdk::export::logs::{LogBatch, LogExporter};
use opentelemetry_sdk::logs::{LogRecord, LogResult, LoggerProvider};
use opentelemetry_sdk::testing::logs::InMemoryLogExporter;
use opentelemetry_sdk::trace;
use opentelemetry_sdk::trace::{Sampler, TracerProvider};
use tracing::{error, warn};
use opentelemetry_sdk::error::{OTelSdkError, OTelSdkResult};
use opentelemetry_sdk::logs::{InMemoryLogExporter, LogProcessor};
use opentelemetry_sdk::logs::{SdkLogRecord, SdkLoggerProvider};
use opentelemetry_sdk::trace::{Sampler, SdkTracerProvider};
use tracing::error;
use tracing_subscriber::prelude::__tracing_subscriber_SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use tracing_subscriber::{EnvFilter, Layer};
use tracing_subscriber::Layer;
pub fn attributes_contains(log_record: &LogRecord, key: &Key, value: &AnyValue) -> bool {
pub fn attributes_contains(log_record: &SdkLogRecord, key: &Key, value: &AnyValue) -> bool {
log_record
.attributes_iter()
.any(|(k, v)| k == key && v == value)
}
fn create_tracing_subscriber(
_exporter: InMemoryLogExporter,
logger_provider: &LoggerProvider,
) -> impl tracing::Subscriber {
#[allow(impl_trait_overcaptures)] // can only be fixed with Rust 1.82+
fn create_tracing_subscriber(logger_provider: &SdkLoggerProvider) -> impl tracing::Subscriber {
let level_filter = tracing_subscriber::filter::LevelFilter::WARN; // Capture WARN and ERROR levels
let layer =
layer::OpenTelemetryTracingBridge::new(logger_provider).with_filter(level_filter); // No filter based on target, only based on log level
@ -242,87 +314,31 @@ mod tests {
}
// cargo test --features=testing
#[derive(Clone, Debug, Default)]
struct ReentrantLogExporter;
#[async_trait]
impl LogExporter for ReentrantLogExporter {
async fn export(&mut self, _batch: LogBatch<'_>) -> LogResult<()> {
// This will cause a deadlock as the export itself creates a log
// while still within the lock of the SimpleLogProcessor.
warn!(name: "my-event-name", target: "reentrant", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io");
Ok(())
}
}
#[test]
#[ignore = "See issue: https://github.com/open-telemetry/opentelemetry-rust/issues/1745"]
fn simple_processor_deadlock() {
let exporter: ReentrantLogExporter = ReentrantLogExporter;
let logger_provider = LoggerProvider::builder()
.with_simple_exporter(exporter.clone())
.build();
let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider);
// Setting subscriber as global as that is the only way to test this scenario.
tracing_subscriber::registry().with(layer).init();
warn!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io");
}
#[test]
#[ignore = "While this test runs fine, this uses global subscriber and does not play well with other tests."]
fn simple_processor_no_deadlock() {
let exporter: ReentrantLogExporter = ReentrantLogExporter;
let logger_provider = LoggerProvider::builder()
.with_simple_exporter(exporter.clone())
.build();
let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider);
// This filter will prevent the deadlock as the reentrant log will be
// ignored.
let filter = EnvFilter::new("debug").add_directive("reentrant=error".parse().unwrap());
// Setting subscriber as global as that is the only way to test this scenario.
tracing_subscriber::registry()
.with(filter)
.with(layer)
.init();
warn!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io");
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
#[ignore = "While this test runs fine, this uses global subscriber and does not play well with other tests."]
async fn batch_processor_no_deadlock() {
let exporter: ReentrantLogExporter = ReentrantLogExporter;
let logger_provider = LoggerProvider::builder()
.with_batch_exporter(exporter.clone(), opentelemetry_sdk::runtime::Tokio)
.build();
let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider);
tracing_subscriber::registry().with(layer).init();
warn!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io");
}
#[test]
fn tracing_appender_standalone() {
// Arrange
let exporter: InMemoryLogExporter = InMemoryLogExporter::default();
let logger_provider = LoggerProvider::builder()
let logger_provider = SdkLoggerProvider::builder()
.with_simple_exporter(exporter.clone())
.build();
let subscriber = create_tracing_subscriber(exporter.clone(), &logger_provider);
let subscriber = create_tracing_subscriber(&logger_provider);
// avoiding setting tracing subscriber as global as that does not
// play well with unit tests.
let _guard = tracing::subscriber::set_default(subscriber);
// Act
error!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io");
logger_provider.force_flush();
let small_u64value: u64 = 42;
let big_u64value: u64 = u64::MAX;
let small_usizevalue: usize = 42;
let big_usizevalue: usize = usize::MAX;
let small_u128value: u128 = 42;
let big_u128value: u128 = u128::MAX;
let small_i128value: i128 = 42;
let big_i128value: i128 = i128::MAX;
error!(name: "my-event-name", target: "my-system", event_id = 20, bytes = &b"abc"[..], error = &OTelSdkError::AlreadyShutdown as &dyn std::error::Error, small_u64value, big_u64value, small_usizevalue, big_usizevalue, small_u128value, big_u128value, small_i128value, big_i128value, user_name = "otel", user_email = "otel@opentelemetry.io");
assert!(logger_provider.force_flush().is_ok());
// Assert TODO: move to helper methods
let exported_logs = exporter
@ -334,17 +350,27 @@ mod tests {
.expect("Atleast one log is expected to be present.");
// Validate common fields
assert_eq!(log.instrumentation.name(), "opentelemetry-appender-tracing");
assert_eq!(log.record.severity_number, Some(Severity::Error));
assert_eq!(log.instrumentation.name(), "");
assert_eq!(log.record.severity_number(), Some(Severity::Error));
// Validate target
assert_eq!(
log.record.target().expect("target is expected").to_string(),
"my-system"
);
// Validate event name
assert_eq!(
log.record.event_name().expect("event_name is expected"),
"my-event-name"
);
// Validate trace context is none.
assert!(log.record.trace_context.is_none());
assert!(log.record.trace_context().is_none());
// Validate attributes
#[cfg(not(feature = "experimental_metadata_attributes"))]
assert_eq!(log.record.attributes_iter().count(), 3);
assert_eq!(log.record.attributes_iter().count(), 13);
#[cfg(feature = "experimental_metadata_attributes")]
assert_eq!(log.record.attributes_iter().count(), 7);
assert_eq!(log.record.attributes_iter().count(), 17);
assert!(attributes_contains(
&log.record,
&Key::new("event_id"),
@ -360,6 +386,56 @@ mod tests {
&Key::new("user_email"),
&AnyValue::String("otel@opentelemetry.io".into())
));
assert!(attributes_contains(
&log.record,
&Key::new("exception.message"),
&AnyValue::String(OTelSdkError::AlreadyShutdown.to_string().into())
));
assert!(attributes_contains(
&log.record,
&Key::new("small_u64value"),
&AnyValue::Int(42.into())
));
assert!(attributes_contains(
&log.record,
&Key::new("big_u64value"),
&AnyValue::String(format!("{}", u64::MAX).into())
));
assert!(attributes_contains(
&log.record,
&Key::new("small_usizevalue"),
&AnyValue::Int(42.into())
));
assert!(attributes_contains(
&log.record,
&Key::new("big_usizevalue"),
&AnyValue::String(format!("{}", u64::MAX).into())
));
assert!(attributes_contains(
&log.record,
&Key::new("small_u128value"),
&AnyValue::Int(42.into())
));
assert!(attributes_contains(
&log.record,
&Key::new("big_u128value"),
&AnyValue::String(format!("{}", u128::MAX).into())
));
assert!(attributes_contains(
&log.record,
&Key::new("small_i128value"),
&AnyValue::Int(42.into())
));
assert!(attributes_contains(
&log.record,
&Key::new("big_i128value"),
&AnyValue::String(format!("{}", i128::MAX).into())
));
assert!(attributes_contains(
&log.record,
&Key::new("bytes"),
&AnyValue::Bytes(Box::new(b"abc".to_vec()))
));
#[cfg(feature = "experimental_metadata_attributes")]
{
assert!(attributes_contains(
@ -384,25 +460,58 @@ mod tests {
assert!(attributes_key.contains(&Key::new("code.lineno")));
assert!(!attributes_key.contains(&Key::new("log.target")));
}
// Test when target, eventname are not explicitly provided
exporter.reset();
error!(
event_id = 20,
user_name = "otel",
user_email = "otel@opentelemetry.io"
);
assert!(logger_provider.force_flush().is_ok());
// Assert TODO: move to helper methods
let exported_logs = exporter
.get_emitted_logs()
.expect("Logs are expected to be exported.");
assert_eq!(exported_logs.len(), 1);
let log = exported_logs
.first()
.expect("Atleast one log is expected to be present.");
// Validate target - tracing defaults to module path
assert_eq!(
log.record.target().expect("target is expected").to_string(),
"opentelemetry_appender_tracing::layer::tests"
);
// Validate event name - tracing defaults to event followed source & line number
// Assert is doing "contains" check to avoid tests failing when line number changes.
// and also account for the fact that the module path is different on different platforms.
// Ex.: The path will be different on a Windows and Linux machine.
assert!(log
.record
.event_name()
.expect("event_name is expected")
.contains("event opentelemetry-appender-tracing"),);
}
#[test]
fn tracing_appender_inside_tracing_context() {
// Arrange
let exporter: InMemoryLogExporter = InMemoryLogExporter::default();
let logger_provider = LoggerProvider::builder()
let logger_provider = SdkLoggerProvider::builder()
.with_simple_exporter(exporter.clone())
.build();
let subscriber = create_tracing_subscriber(exporter.clone(), &logger_provider);
let subscriber = create_tracing_subscriber(&logger_provider);
// avoiding setting tracing subscriber as global as that does not
// play well with unit tests.
let _guard = tracing::subscriber::set_default(subscriber);
// setup tracing as well.
let tracer_provider = TracerProvider::builder()
.with_config(trace::Config::default().with_sampler(Sampler::AlwaysOn))
let tracer_provider = SdkTracerProvider::builder()
.with_sampler(Sampler::AlwaysOn)
.build();
let tracer = tracer_provider.tracer("test-tracer");
@ -416,7 +525,7 @@ mod tests {
(trace_id, span_id)
});
logger_provider.force_flush();
assert!(logger_provider.force_flush().is_ok());
// Assert TODO: move to helper methods
let exported_logs = exporter
@ -428,26 +537,31 @@ mod tests {
.expect("Atleast one log is expected to be present.");
// validate common fields.
assert_eq!(log.instrumentation.name(), "opentelemetry-appender-tracing");
assert_eq!(log.record.severity_number, Some(Severity::Error));
assert_eq!(log.instrumentation.name(), "");
assert_eq!(log.record.severity_number(), Some(Severity::Error));
// Validate target
assert_eq!(
log.record.target().expect("target is expected").to_string(),
"my-system"
);
// Validate event name
assert_eq!(
log.record.event_name().expect("event_name is expected"),
"my-event-name"
);
// validate trace context.
assert!(log.record.trace_context.is_some());
assert!(log.record.trace_context().is_some());
assert_eq!(
log.record.trace_context.as_ref().unwrap().trace_id,
log.record.trace_context().unwrap().trace_id,
trace_id_expected
);
assert_eq!(
log.record.trace_context.as_ref().unwrap().span_id,
log.record.trace_context().unwrap().span_id,
span_id_expected
);
assert_eq!(
log.record
.trace_context
.as_ref()
.unwrap()
.trace_flags
.unwrap(),
log.record.trace_context().unwrap().trace_flags.unwrap(),
TraceFlags::SAMPLED
);
@ -497,15 +611,127 @@ mod tests {
}
}
#[cfg(feature = "experimental_use_tracing_span_context")]
#[test]
fn tracing_appender_inside_tracing_crate_context() {
use opentelemetry::{trace::SpanContext, Context, SpanId, TraceId};
use opentelemetry_sdk::trace::InMemorySpanExporterBuilder;
use tracing_opentelemetry::OpenTelemetrySpanExt;
// Arrange
let exporter: InMemoryLogExporter = InMemoryLogExporter::default();
let logger_provider = SdkLoggerProvider::builder()
.with_simple_exporter(exporter.clone())
.build();
// setup tracing layer to compare trace/span IDs against
let span_exporter = InMemorySpanExporterBuilder::new().build();
let tracer_provider = SdkTracerProvider::builder()
.with_simple_exporter(span_exporter.clone())
.build();
let tracer = tracer_provider.tracer("test-tracer");
let level_filter = tracing_subscriber::filter::LevelFilter::ERROR;
let log_layer =
layer::OpenTelemetryTracingBridge::new(&logger_provider).with_filter(level_filter);
let subscriber = tracing_subscriber::registry()
.with(log_layer)
.with(tracing_opentelemetry::layer().with_tracer(tracer));
// Avoiding global subscriber.init() as that does not play well with unit tests.
let _guard = tracing::subscriber::set_default(subscriber);
// Act
tracing::error_span!("outer-span").in_scope(|| {
error!("first-event");
tracing::error_span!("inner-span").in_scope(|| {
error!("second-event");
});
});
assert!(logger_provider.force_flush().is_ok());
let logs = exporter.get_emitted_logs().expect("No emitted logs");
assert_eq!(logs.len(), 2, "Expected 2 logs, got: {logs:?}");
let spans = span_exporter.get_finished_spans().unwrap();
assert_eq!(spans.len(), 2);
let trace_id = spans[0].span_context.trace_id();
assert_eq!(trace_id, spans[1].span_context.trace_id());
let inner_span_id = spans[0].span_context.span_id();
let outer_span_id = spans[1].span_context.span_id();
assert_eq!(outer_span_id, spans[0].parent_span_id);
let trace_ctx0 = logs[0].record.trace_context().unwrap();
let trace_ctx1 = logs[1].record.trace_context().unwrap();
assert_eq!(trace_ctx0.trace_id, trace_id);
assert_eq!(trace_ctx1.trace_id, trace_id);
assert_eq!(trace_ctx0.span_id, outer_span_id);
assert_eq!(trace_ctx1.span_id, inner_span_id);
// Set context from remote.
let remote_trace_id = TraceId::from_u128(233);
let remote_span_id = SpanId::from_u64(2333);
let remote_span_context = SpanContext::new(
remote_trace_id,
remote_span_id,
TraceFlags::SAMPLED,
true,
Default::default(),
);
// Act again.
tracing::error_span!("outer-span").in_scope(|| {
let span = tracing::Span::current();
let parent_context = Context::current().with_remote_span_context(remote_span_context);
span.set_parent(parent_context);
error!("first-event");
tracing::error_span!("inner-span").in_scope(|| {
error!("second-event");
});
});
assert!(logger_provider.force_flush().is_ok());
let logs = exporter.get_emitted_logs().expect("No emitted logs");
assert_eq!(logs.len(), 4, "Expected 4 logs, got: {logs:?}");
let logs = &logs[2..];
let spans = span_exporter.get_finished_spans().unwrap();
assert_eq!(spans.len(), 4);
let spans = &spans[2..];
let trace_id = spans[0].span_context.trace_id();
assert_eq!(trace_id, remote_trace_id);
assert_eq!(trace_id, spans[1].span_context.trace_id());
let inner_span_id = spans[0].span_context.span_id();
let outer_span_id = spans[1].span_context.span_id();
assert_eq!(outer_span_id, spans[0].parent_span_id);
let trace_ctx0 = logs[0].record.trace_context().unwrap();
let trace_ctx1 = logs[1].record.trace_context().unwrap();
assert_eq!(trace_ctx0.trace_id, trace_id);
assert_eq!(trace_ctx1.trace_id, trace_id);
assert_eq!(trace_ctx0.span_id, outer_span_id);
assert_eq!(trace_ctx1.span_id, inner_span_id);
}
#[test]
fn tracing_appender_standalone_with_tracing_log() {
// Arrange
let exporter: InMemoryLogExporter = InMemoryLogExporter::default();
let logger_provider = LoggerProvider::builder()
let logger_provider = SdkLoggerProvider::builder()
.with_simple_exporter(exporter.clone())
.build();
let subscriber = create_tracing_subscriber(exporter.clone(), &logger_provider);
let subscriber = create_tracing_subscriber(&logger_provider);
// avoiding setting tracing subscriber as global as that does not
// play well with unit tests.
@ -513,8 +739,8 @@ mod tests {
drop(tracing_log::LogTracer::init());
// Act
log::error!(target: "my-system", "log from log crate");
logger_provider.force_flush();
log::error!("log from log crate");
assert!(logger_provider.force_flush().is_ok());
// Assert TODO: move to helper methods
let exported_logs = exporter
@ -526,11 +752,22 @@ mod tests {
.expect("Atleast one log is expected to be present.");
// Validate common fields
assert_eq!(log.instrumentation.name(), "opentelemetry-appender-tracing");
assert_eq!(log.record.severity_number, Some(Severity::Error));
assert_eq!(log.instrumentation.name(), "");
assert_eq!(log.record.severity_number(), Some(Severity::Error));
// Target and EventName from Log crate are "log" and "log event" respectively.
// Validate target
assert_eq!(
log.record.target().expect("target is expected").to_string(),
"log"
);
// Validate event name
assert_eq!(
log.record.event_name().expect("event_name is expected"),
"log event"
);
// Validate trace context is none.
assert!(log.record.trace_context.is_none());
assert!(log.record.trace_context().is_none());
// Attributes can be polluted when we don't use this feature.
#[cfg(feature = "experimental_metadata_attributes")]
@ -566,11 +803,11 @@ mod tests {
fn tracing_appender_inside_tracing_context_with_tracing_log() {
// Arrange
let exporter: InMemoryLogExporter = InMemoryLogExporter::default();
let logger_provider = LoggerProvider::builder()
let logger_provider = SdkLoggerProvider::builder()
.with_simple_exporter(exporter.clone())
.build();
let subscriber = create_tracing_subscriber(exporter.clone(), &logger_provider);
let subscriber = create_tracing_subscriber(&logger_provider);
// avoiding setting tracing subscriber as global as that does not
// play well with unit tests.
@ -578,8 +815,8 @@ mod tests {
drop(tracing_log::LogTracer::init());
// setup tracing as well.
let tracer_provider = TracerProvider::builder()
.with_config(trace::Config::default().with_sampler(Sampler::AlwaysOn))
let tracer_provider = SdkTracerProvider::builder()
.with_sampler(Sampler::AlwaysOn)
.build();
let tracer = tracer_provider.tracer("test-tracer");
@ -593,7 +830,7 @@ mod tests {
(trace_id, span_id)
});
logger_provider.force_flush();
assert!(logger_provider.force_flush().is_ok());
// Assert TODO: move to helper methods
let exported_logs = exporter
@ -605,29 +842,28 @@ mod tests {
.expect("Atleast one log is expected to be present.");
// validate common fields.
assert_eq!(log.instrumentation.name(), "opentelemetry-appender-tracing");
assert_eq!(log.record.severity_number, Some(Severity::Error));
assert_eq!(log.instrumentation.name(), "");
assert_eq!(log.record.severity_number(), Some(Severity::Error));
// validate trace context.
assert!(log.record.trace_context.is_some());
assert!(log.record.trace_context().is_some());
assert_eq!(
log.record.trace_context.as_ref().unwrap().trace_id,
log.record.trace_context().unwrap().trace_id,
trace_id_expected
);
assert_eq!(
log.record.trace_context.as_ref().unwrap().span_id,
log.record.trace_context().unwrap().span_id,
span_id_expected
);
assert_eq!(
log.record
.trace_context
.as_ref()
.unwrap()
.trace_flags
.unwrap(),
log.record.trace_context().unwrap().trace_flags.unwrap(),
TraceFlags::SAMPLED
);
for attribute in log.record.attributes_iter() {
println!("key: {:?}, value: {:?}", attribute.0, attribute.1);
}
// Attributes can be polluted when we don't use this feature.
#[cfg(feature = "experimental_metadata_attributes")]
assert_eq!(log.record.attributes_iter().count(), 4);
@ -657,4 +893,66 @@ mod tests {
assert!(!attributes_key.contains(&Key::new("log.target")));
}
}
#[derive(Debug)]
struct LogProcessorWithIsEnabled {
severity_level: Severity,
name: String,
target: String,
}
impl LogProcessorWithIsEnabled {
fn new(severity_level: Severity, name: String, target: String) -> Self {
LogProcessorWithIsEnabled {
severity_level,
name,
target,
}
}
}
impl LogProcessor for LogProcessorWithIsEnabled {
fn emit(&self, _record: &mut SdkLogRecord, _scope: &InstrumentationScope) {
// no-op
}
#[cfg(feature = "spec_unstable_logs_enabled")]
fn event_enabled(&self, level: Severity, target: &str, name: Option<&str>) -> bool {
// assert that passed in arguments are same as the ones set in the test.
assert_eq!(self.severity_level, level);
assert_eq!(self.target, target);
assert_eq!(
self.name,
name.expect("name is expected from tracing appender")
);
true
}
fn force_flush(&self) -> OTelSdkResult {
Ok(())
}
}
#[cfg(feature = "spec_unstable_logs_enabled")]
#[test]
fn is_enabled() {
// Arrange
let logger_provider = SdkLoggerProvider::builder()
.with_log_processor(LogProcessorWithIsEnabled::new(
Severity::Error,
"my-event-name".to_string(),
"my-system".to_string(),
))
.build();
let subscriber = create_tracing_subscriber(&logger_provider);
// avoiding setting tracing subscriber as global as that does not
// play well with unit tests.
let _guard = tracing::subscriber::set_default(subscriber);
// Name, Target and Severity are expected to be passed to the IsEnabled check
// The validation is done in the LogProcessorWithIsEnabled struct.
error!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io");
}
}

View File

@ -1 +1,155 @@
//! # OpenTelemetry-Appender-Tracing
//!
//! This crate provides a bridge between the [`tracing`](https://docs.rs/tracing/latest/tracing/) crate and OpenTelemetry logs.
//! It converts `tracing` events into OpenTelemetry `LogRecords`, allowing applications using `tracing` to seamlessly integrate
//! with OpenTelemetry logging backends.
//!
//! ## Background
//!
//! Unlike traces and metrics, OpenTelemetry does not provide a dedicated logging API for end-users. Instead, it recommends using
//! existing logging libraries and bridging them to OpenTelemetry logs. This crate serves as such a bridge for `tracing` users.
//!
//! ## Features
//!
//! - Converts `tracing` events into OpenTelemetry [`LogRecords`](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#log-and-event-record-definition)
//! - Integrates as a [`Layer`](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/layer/trait.Layer.html)
//! from [`tracing-subscriber`](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/), allowing
//! to be used alongside other `tracing` layers, such as `fmt`
//! - Automatically attaches OpenTelemetry trace context (`TraceId`, `SpanId`, `TraceFlags`) to logs
//! - Automatically associates OpenTelemetry Resource to logs
//! - Supports exporting logs to OpenTelemetry-compatible backends (OTLP, stdout, etc.)
//!
//! ## Getting Started
//!
//! ### 1. Install Dependencies
//!
//! Add the following dependencies to your `Cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! tracing = ">=0.1.40"
//! tracing-core = { version = ">=0.1.33" }
//! tracing-subscriber = { version = "0.3", features = ["registry", "std", "fmt"] }
//! opentelemetry = { version = "0.28", features = ["logs"] }
//! opentelemetry-sdk = { version = "0.28", features = ["logs"] }
//! opentelemetry-appender-tracing = { version = "0.28.1" }
//! ```
//!
//! ### 2. Set Up the OpenTelemetry Logger Provider
//!
//! Before integrating with `tracing`, create an OpenTelemetry [`SdkLoggerProvider`](https://docs.rs/opentelemetry_sdk/latest/opentelemetry_sdk/logs/struct.SdkLoggerProvider.html):
//!
//! ```rust
//! use opentelemetry_sdk::logs::SdkLoggerProvider;
//! use opentelemetry_stdout::LogExporter;
//!
//! let exporter = LogExporter::default();
//! let provider = SdkLoggerProvider::builder()
//! .with_simple_exporter(exporter)
//! .build();
//! ```
//!
//! In this example, `SdkLoggerProvider` is configured to use the `opentelemetry_stdout` crate to export logs to stdout. You can replace it with any other OpenTelemetry-compatible exporter.
//! Any additional OpenTelemetry configuration (e.g., setting up a resource, additional processors etc.) can be done at this stage.
//!
//! ### 3. Create the OpenTelemetry-Tracing Bridge
//!
//! Create `OpenTelemetryTracingBridge` layer using the `SdkLoggerProvider` created in the previous step.
//!
//! ```rust
//! # use opentelemetry_sdk::logs::SdkLoggerProvider;
//! # use opentelemetry_stdout::LogExporter;
//! # use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge;
//! # let exporter = LogExporter::default();
//! # let provider = SdkLoggerProvider::builder()
//! # .with_simple_exporter(exporter)
//! # .build();
//! let otel_layer = OpenTelemetryTracingBridge::new(&provider);
//! ```
//!
//! ### 4. Register the `tracing` Subscriber
//!
//! Since this crate provides a `Layer` for `tracing`, you can register it with the `tracing` subscriber as shown below.
//!
//! ```rust
//! # use opentelemetry_sdk::logs::SdkLoggerProvider;
//! # use opentelemetry_stdout::LogExporter;
//! # let exporter = LogExporter::default();
//! # let provider = SdkLoggerProvider::builder().with_simple_exporter(exporter).build();
//! # use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge;
//! # let otel_layer = OpenTelemetryTracingBridge::new(&provider);
//! use tracing_subscriber::prelude::*;
//!
//! tracing_subscriber::registry()
//! .with(otel_layer)
//! .with(tracing_subscriber::fmt::layer()) // In this example, `fmt` layer is also added.
//! .init();
//! ```
//!
//! ### 5. Log Events Using `tracing`
//!
//! ```rust
//! use tracing::error;
//! error!(name: "my-event-name1", target: "my-system", event_id = 10, user_name = "otel", user_email = "otel@opentelemetry.io", message = "This is an example message");
//! ```
//!
//!
//! ## Mapping details
//!
//! Since OpenTelemetry and `tracing` have their own data models, this bridge performs the following mappings:
//!
//! | `tracing` | OpenTelemetry | Notes |
//! |-----------------------|-------------------------|-----------------------------------------------------------------------------------------|
//! | name of the event | `EventName` | OpenTelemetry defines logs with name as Events, so every `tracing` Event is actually an OTel Event |
//! | target | `target` | Groups logs from the same module/crate. At recording time, `target` is stored in a top-level field. But exporters treat this information as OpenTelemetry `InstrumentationScope` |
//! | level of the event | `Severity`, `SeverityText` | |
//! | Fields | `Attributes` | Converted into OpenTelemetry log attributes. Field with "message" as key is specially treated and stored as `LogRecord::Body` |
//! | Message | `Body` | The body/message of the log. This is done only if body was not already populated from "message" field above |
//!
//! ### Data Type Mapping
//!
//! The data types supported by `tracing` and OpenTelemetry are different and the following conversions are applied:
//!
//! | `tracing` Type | OpenTelemetry `AnyValue` Type |
//! |----------------|-------------------------------|
//! | `i64` | `Int` |
//! | `f32`, `f64` | `Double` |
//! | `u64`,`u128` ,`i128` | `Int` (if convertible to `i64` without loss) else `String` |
//! | `&str` | `String` |
//! | `bool` | `Bool` |
//! | `&[u8]` | `Bytes` |
//! | `&dyn Debug` | `String` (via `Debug` formatting) |
//! | `&dyn Error` | `String` (via `Debug` formatting). This is stored into an attribute with key "exception.message", following [OTel conventions](https://opentelemetry.io/docs/specs/semconv/attributes-registry/exception/) |
//!
//! In future, additional types may be supported.
//!
//! > **Note:** This crate does not support `tracing` Spans. One may use [`tracing-opentelemetry`](https://docs.rs/tracing-opentelemetry/latest/tracing_opentelemetry/) to
//! > convert `tracing` spans into OpenTelemetry spans. This is a third-party crate
//! > that is not maintained by the OpenTelemetry project.
//! > `tracing-opentelemetry`:
//! > - Converts `tracing` spans into OpenTelemetry spans
//! > - Converts `tracing` events into OpenTelemetry `SpanEvents` rather than logs
//! > Depending on the outcome of the
//! > [discussion](https://github.com/open-telemetry/opentelemetry-rust/issues/1571),
//! > the OpenTelemetry project may provide direct support to map `tracing`
//! > spans to OpenTelemetry in the future.
//!
//! ## Feature Flags
//! `spec_unstable_logs_enabled`: TODO
//!
//! `experimental_metadata_attributes`: TODO
//!
//! `experimental_use_tracing_span_context`: TODO
//!
//! ## Limitations
//! 1. There is no support for `Valuable` crate. [2819](https://github.com/open-telemetry/opentelemetry-rust/issues/2819)
//!
//! ## Stability Guarantees
//! // TODO
//!
//! ## Further Reading
//!
//! - OpenTelemetry Rust: [opentelemetry-rust](https://github.com/open-telemetry/opentelemetry-rust)
//! - Tracing: [tracing](https://docs.rs/tracing/)
//! - OpenTelemetry Logs: [OpenTelemetry Logging Specification](https://opentelemetry.io/docs/specs/otel/logs/)
pub mod layer;

View File

@ -2,6 +2,30 @@
## vNext
- Implementation of `Extractor::get_all` for `HeaderExtractor`
- Support `HttpClient` implementation for `HyperClient<C>` with custom connectors beyond `HttpConnector`, enabling Unix Domain Socket connections and other custom transports
## 0.30.0
Released 2025-May-23
- Updated `opentelemetry` dependency to version 0.30.0.
## 0.29.0
Released 2025-Mar-21
- Update `opentelemetry` dependency version to 0.29.
## 0.28.0
Released 2025-Feb-10
- Update `opentelemetry` dependency version to 0.28.
- Bump msrv to 1.75.0.
- Add "internal-logs" feature flag (enabled by default), and emit internal logs via `tracing` crate.
- Add `HttpClient::send_bytes` with `bytes::Bytes` request payload and deprecate old `HttpClient::send` function.
## 0.27.0
Released 2024-Nov-08
@ -30,7 +54,7 @@ Released 2024-Sep-30
## v0.12.0
- Add `reqwest-rustls-webkpi-roots` feature flag to configure [`reqwest`](https://docs.rs/reqwest/0.11.27/reqwest/index.html#optional-features) to use embedded `webkpi-roots`.
- Add `reqwest-rustls-webpki-roots` feature flag to configure [`reqwest`](https://docs.rs/reqwest/0.11.27/reqwest/index.html#optional-features) to use embedded `webpki-roots`.
- Update `opentelemetry` dependency version to 0.23
## v0.11.1

View File

@ -1,18 +1,21 @@
[package]
name = "opentelemetry-http"
version = "0.27.0"
version = "0.30.0"
description = "Helper implementations for sending HTTP requests. Uses include propagating and extracting context over http, exporting telemetry, requesting sampling strategies."
homepage = "https://github.com/open-telemetry/opentelemetry-rust"
repository = "https://github.com/open-telemetry/opentelemetry-rust"
homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-http"
repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-http"
keywords = ["opentelemetry", "tracing", "context", "propagation"]
license = "Apache-2.0"
edition = "2021"
rust-version = "1.70"
rust-version = "1.75.0"
autobenches = false
[features]
default = ["internal-logs"]
hyper = ["dep:http-body-util", "dep:hyper", "dep:hyper-util", "dep:tokio"]
reqwest-rustls = ["reqwest", "reqwest/rustls-tls-native-roots"]
reqwest-rustls-webpki-roots = ["reqwest", "reqwest/rustls-tls-webpki-roots"]
internal-logs = ["opentelemetry/internal-logs"]
[dependencies]
async-trait = { workspace = true }
@ -21,6 +24,12 @@ http = { workspace = true }
http-body-util = { workspace = true, optional = true }
hyper = { workspace = true, optional = true }
hyper-util = { workspace = true, features = ["client-legacy", "http1", "http2"], optional = true }
opentelemetry = { version = "0.27", path = "../opentelemetry", features = ["trace"] }
opentelemetry = { version = "0.30", path = "../opentelemetry", features = ["trace"] }
reqwest = { workspace = true, features = ["blocking"], optional = true }
tokio = { workspace = true, features = ["time"], optional = true }
[lints]
workspace = true
[lib]
bench = false

View File

@ -10,7 +10,7 @@ requesting sampling strategies.
[![Crates.io: opentelemetry-http](https://img.shields.io/crates/v/opentelemetry-http.svg)](https://crates.io/crates/opentelemetry-http)
[![Documentation](https://docs.rs/opentelemetry-http/badge.svg)](https://docs.rs/opentelemetry-http)
[![LICENSE](https://img.shields.io/crates/l/opentelemetry-http)](./LICENSE)
[![LICENSE](https://img.shields.io/crates/l/opentelemetry-http)](https://github.com/open-telemetry/opentelemetry-rust/blob/main/opentelemetry-http/LICENSE)
[![GitHub Actions CI](https://github.com/open-telemetry/opentelemetry-rust/workflows/CI/badge.svg)](https://github.com/open-telemetry/opentelemetry-rust/actions?query=workflow%3ACI+branch%3Amain)
[![Slack](https://img.shields.io/badge/slack-@cncf/otel/rust-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C03GDP0H023)
@ -28,3 +28,22 @@ management, and export of telemetry. A major goal of OpenTelemetry is that you
can easily instrument your applications or systems, no matter their language,
infrastructure, or runtime environment. Crucially, the storage and visualization
of telemetry is intentionally left to other tools.
*[Supported Rust Versions](#supported-rust-versions)*
## Release Notes
You can find the release notes (changelog) [here](https://github.com/open-telemetry/opentelemetry-rust/blob/main/opentelemetry-http/CHANGELOG.md).
## Supported Rust Versions
OpenTelemetry is built against the latest stable release. The minimum supported
version is 1.75.0. The current OpenTelemetry version is not guaranteed to build
on Rust versions earlier than the minimum supported version.
The current stable Rust compiler and the three most recent minor versions
before it will always be supported. For example, if the current stable compiler
version is 1.49, the minimum supported version will not be increased past 1.46,
three minor versions prior. Increasing the minimum supported compiler version
is not considered a semver breaking change as long as doing so complies with
this policy.

View File

@ -13,7 +13,7 @@ use opentelemetry::propagation::{Extractor, Injector};
/// for example usage.
pub struct HeaderInjector<'a>(pub &'a mut http::HeaderMap);
impl<'a> Injector for HeaderInjector<'a> {
impl Injector for HeaderInjector<'_> {
/// Set a key and value in the HeaderMap. Does nothing if the key or value are not valid inputs.
fn set(&mut self, key: &str, value: String) {
if let Ok(name) = http::header::HeaderName::from_bytes(key.as_bytes()) {
@ -30,7 +30,7 @@ impl<'a> Injector for HeaderInjector<'a> {
/// for example usage.
pub struct HeaderExtractor<'a>(pub &'a http::HeaderMap);
impl<'a> Extractor for HeaderExtractor<'a> {
impl Extractor for HeaderExtractor<'_> {
/// Get a value for a key from the HeaderMap. If the value is not valid ASCII, returns None.
fn get(&self, key: &str) -> Option<&str> {
self.0.get(key).and_then(|value| value.to_str().ok())
@ -43,6 +43,16 @@ impl<'a> Extractor for HeaderExtractor<'a> {
.map(|value| value.as_str())
.collect::<Vec<_>>()
}
/// Get all the values for a key from the HeaderMap
fn get_all(&self, key: &str) -> Option<Vec<&str>> {
let all_iter = self.0.get_all(key).iter();
if let (0, Some(0)) = all_iter.size_hint() {
return None;
}
Some(all_iter.filter_map(|value| value.to_str().ok()).collect())
}
}
pub type HttpError = Box<dyn std::error::Error + Send + Sync + 'static>;
@ -55,22 +65,36 @@ pub type HttpError = Box<dyn std::error::Error + Send + Sync + 'static>;
/// users to bring their choice of HTTP client.
#[async_trait]
pub trait HttpClient: Debug + Send + Sync {
/// Send the specified HTTP request
/// Send the specified HTTP request with `Vec<u8>` payload
///
/// Returns the HTTP response including the status code and body.
///
/// Returns an error if it can't connect to the server or the request could not be completed,
/// e.g. because of a timeout, infinite redirects, or a loss of connection.
async fn send(&self, request: Request<Vec<u8>>) -> Result<Response<Bytes>, HttpError>;
#[deprecated(note = "Use `send_bytes` with `Bytes` payload instead.")]
async fn send(&self, request: Request<Vec<u8>>) -> Result<Response<Bytes>, HttpError> {
self.send_bytes(request.map(Into::into)).await
}
/// Send the specified HTTP request with `Bytes` payload.
///
/// Returns the HTTP response including the status code and body.
///
/// Returns an error if it can't connect to the server or the request could not be completed,
/// e.g. because of a timeout, infinite redirects, or a loss of connection.
async fn send_bytes(&self, request: Request<Bytes>) -> Result<Response<Bytes>, HttpError>;
}
#[cfg(feature = "reqwest")]
mod reqwest {
use opentelemetry::otel_debug;
use super::{async_trait, Bytes, HttpClient, HttpError, Request, Response};
#[async_trait]
impl HttpClient for reqwest::Client {
async fn send(&self, request: Request<Vec<u8>>) -> Result<Response<Bytes>, HttpError> {
async fn send_bytes(&self, request: Request<Bytes>) -> Result<Response<Bytes>, HttpError> {
otel_debug!(name: "ReqwestClient.Send");
let request = request.try_into()?;
let mut response = self.execute(request).await?.error_for_status()?;
let headers = std::mem::take(response.headers_mut());
@ -86,7 +110,8 @@ mod reqwest {
#[cfg(not(target_arch = "wasm32"))]
#[async_trait]
impl HttpClient for reqwest::blocking::Client {
async fn send(&self, request: Request<Vec<u8>>) -> Result<Response<Bytes>, HttpError> {
async fn send_bytes(&self, request: Request<Bytes>) -> Result<Response<Bytes>, HttpError> {
otel_debug!(name: "ReqwestBlockingClient.Send");
let request = request.try_into()?;
let mut response = self.execute(request)?.error_for_status()?;
let headers = std::mem::take(response.headers_mut());
@ -102,9 +127,8 @@ mod reqwest {
#[cfg(feature = "hyper")]
pub mod hyper {
use crate::ResponseExt;
use super::{async_trait, Bytes, HttpClient, HttpError, Request, Response};
use crate::ResponseExt;
use http::HeaderValue;
use http_body_util::{BodyExt, Full};
use hyper::body::{Body as HttpBody, Frame};
@ -112,6 +136,7 @@ pub mod hyper {
connect::{Connect, HttpConnector},
Client,
};
use opentelemetry::otel_debug;
use std::fmt::Debug;
use std::pin::Pin;
use std::task::{self, Poll};
@ -154,8 +179,13 @@ pub mod hyper {
}
#[async_trait]
impl HttpClient for HyperClient {
async fn send(&self, request: Request<Vec<u8>>) -> Result<Response<Bytes>, HttpError> {
impl<C> HttpClient for HyperClient<C>
where
C: Connect + Clone + Send + Sync + 'static,
HyperClient<C>: Debug,
{
async fn send_bytes(&self, request: Request<Bytes>) -> Result<Response<Bytes>, HttpError> {
otel_debug!(name: "HyperClient.Send");
let (parts, body) = request.into_parts();
let mut request = Request::from_parts(parts, Body(Full::from(body)));
if let Some(ref authorization) = self.authorization {
@ -220,6 +250,8 @@ impl<T> ResponseExt for Response<T> {
#[cfg(test)]
mod tests {
use http::HeaderValue;
use super::*;
#[test]
@ -234,6 +266,32 @@ mod tests {
)
}
#[test]
fn http_headers_get_all() {
let mut carrier = http::HeaderMap::new();
carrier.append("headerName", HeaderValue::from_static("value"));
carrier.append("headerName", HeaderValue::from_static("value2"));
carrier.append("headerName", HeaderValue::from_static("value3"));
assert_eq!(
HeaderExtractor(&carrier).get_all("HEADERNAME"),
Some(vec!["value", "value2", "value3"]),
"all values from a key extraction"
)
}
#[test]
fn http_headers_get_all_missing_key() {
let mut carrier = http::HeaderMap::new();
carrier.append("headerName", HeaderValue::from_static("value"));
assert_eq!(
HeaderExtractor(&carrier).get_all("not_existing"),
None,
"all values from a missing key extraction"
)
}
#[test]
fn http_headers_keys() {
let mut carrier = http::HeaderMap::new();

View File

@ -2,6 +2,25 @@
## vNext
## 0.30.0
Released 2025-May-23
- Updated `opentelemetry` dependency to version 0.30.0.
## 0.29.0
Released 2025-Mar-21
- Update `opentelemetry` dependency version to 0.29.
## 0.28.0
Released 2025-Feb-10
- Update `opentelemetry` dependency version to 0.28.
- Bump msrv to 1.75.0.
## 0.27.0
Released 2024-Nov-11

View File

@ -1,6 +1,6 @@
[package]
name = "opentelemetry-jaeger-propagator"
version = "0.27.0"
version = "0.30.0"
description = "Jaeger propagator for OpenTelemetry"
homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-jaeger-propagator"
repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-jaeger-propagator"
@ -13,14 +13,15 @@ categories = [
keywords = ["opentelemetry", "jaeger", "propagator"]
license = "Apache-2.0"
edition = "2021"
rust-version = "1.70"
rust-version = "1.75.0"
autobenches = false
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[dependencies]
opentelemetry = { version = "0.27", default-features = false, features = [
opentelemetry = { version = "0.30", default-features = false, features = [
"trace",
], path = "../opentelemetry" }
@ -28,4 +29,11 @@ opentelemetry = { version = "0.27", default-features = false, features = [
opentelemetry = { features = ["testing"], path = "../opentelemetry" }
[features]
default = []
default = ["internal-logs"]
internal-logs = ["opentelemetry/internal-logs"]
[lints]
workspace = true
[lib]
bench = false

View File

@ -10,7 +10,7 @@ opentelemetry-otlp crate.
[![Crates.io: opentelemetry-jaeger-propagator](https://img.shields.io/crates/v/opentelemetry-jaeger-propagator.svg)](https://crates.io/crates/opentelemetry-jaeger-propagator)
[![Documentation](https://docs.rs/opentelemetry-jaeger-propagator/badge.svg)](https://docs.rs/opentelemetry-jaeger-propagator)
[![LICENSE](https://img.shields.io/crates/l/opentelemetry-jaeger-propagator)](./LICENSE)
[![LICENSE](https://img.shields.io/crates/l/opentelemetry-jaeger-propagator)](https://github.com/open-telemetry/opentelemetry-rust/blob/main/opentelemetry-jaeger-propagator/LICENSE)
[![GitHub Actions CI](https://github.com/open-telemetry/opentelemetry-rust/workflows/CI/badge.svg)](https://github.com/open-telemetry/opentelemetry-rust/actions?query=workflow%3ACI+branch%3Amain)
[![Slack](https://img.shields.io/badge/slack-@cncf/otel/rust-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C03GDP0H023)
@ -28,3 +28,22 @@ management, and export of telemetry. A major goal of OpenTelemetry is that you
can easily instrument your applications or systems, no matter their language,
infrastructure, or runtime environment. Crucially, the storage and visualization
of telemetry is intentionally left to other tools.
*[Supported Rust Versions](#supported-rust-versions)*
## Release Notes
You can find the release notes (changelog) [here](https://github.com/open-telemetry/opentelemetry-rust/blob/main/opentelemetry-jaeger-propagator/CHANGELOG.md).
## Supported Rust Versions
OpenTelemetry is built against the latest stable release. The minimum supported
version is 1.75.0. The current OpenTelemetry version is not guaranteed to build
on Rust versions earlier than the minimum supported version.
The current stable Rust compiler and the three most recent minor versions
before it will always be supported. For example, if the current stable compiler
version is 1.49, the minimum supported version will not be increased past 1.46,
three minor versions prior. Increasing the minimum supported compiler version
is not considered a semver breaking change as long as doing so complies with
this policy.

View File

@ -1,10 +1,9 @@
//! *Compiler support: [requires `rustc` 1.64+][msrv]*
//! *[Supported Rust Versions](#supported-rust-versions)*
//!
//! [Jaeger Docs]: https://www.jaegertracing.io/docs/
//! [jaeger-deprecation]: https://github.com/open-telemetry/opentelemetry-specification/pull/2858/files
//! [jaeger-otlp]: https://www.jaegertracing.io/docs/1.38/apis/#opentelemetry-protocol-stable
//! [otlp-exporter]: https://docs.rs/opentelemetry-otlp/latest/opentelemetry_otlp/
//! [msrv]: #supported-rust-versions
//! [jaeger propagation format]: https://www.jaegertracing.io/docs/1.18/client-libraries/#propagation-format
//!
//! # Supported Rust Versions

View File

@ -325,7 +325,7 @@ mod tests {
true,
TraceState::default(),
),
format!("{}:{}:0:1", LONG_TRACE_ID_STR, SPAN_ID_STR),
format!("{LONG_TRACE_ID_STR}:{SPAN_ID_STR}:0:1"),
),
(
SpanContext::new(
@ -335,7 +335,7 @@ mod tests {
true,
TraceState::default(),
),
format!("{}:{}:0:0", LONG_TRACE_ID_STR, SPAN_ID_STR),
format!("{LONG_TRACE_ID_STR}:{SPAN_ID_STR}:0:0"),
),
(
SpanContext::new(
@ -345,7 +345,7 @@ mod tests {
true,
TraceState::default(),
),
format!("{}:{}:0:3", LONG_TRACE_ID_STR, SPAN_ID_STR),
format!("{LONG_TRACE_ID_STR}:{SPAN_ID_STR}:0:3"),
),
]
}
@ -356,7 +356,7 @@ mod tests {
let propagator = Propagator::with_custom_header(construct_header);
for (trace_id, span_id, flag, expected) in get_extract_data() {
let mut map: HashMap<String, String> = HashMap::new();
map.set(context_key, format!("{}:{}:0:{}", trace_id, span_id, flag));
map.set(context_key, format!("{trace_id}:{span_id}:0:{flag}"));
let context = propagator.extract(&map);
assert_eq!(context.span().span_context(), &expected);
}
@ -392,7 +392,7 @@ mod tests {
// Propagators implement debug
assert_eq!(
format!("{:?}", default_propagator),
format!("{default_propagator:?}"),
format!(
"Propagator {{ baggage_prefix: \"{}\", header_name: \"{}\", fields: [\"{}\"] }}",
JAEGER_BAGGAGE_PREFIX, JAEGER_HEADER, JAEGER_HEADER
@ -641,10 +641,7 @@ mod tests {
}
for (trace_id, span_id, flag, expected) in get_extract_data() {
let mut map: HashMap<String, String> = HashMap::new();
map.set(
JAEGER_HEADER,
format!("{}:{}:0:{}", trace_id, span_id, flag),
);
map.set(JAEGER_HEADER, format!("{trace_id}:{span_id}:0:{flag}"));
let context = propagator.extract(&map);
assert_eq!(context.span().span_context(), &expected);
}
@ -655,7 +652,7 @@ mod tests {
let mut map: HashMap<String, String> = HashMap::new();
map.set(
JAEGER_HEADER,
format!("{}:{}:0:1:aa", LONG_TRACE_ID_STR, SPAN_ID_STR),
format!("{LONG_TRACE_ID_STR}:{SPAN_ID_STR}:0:1:aa"),
);
let propagator = Propagator::new();
let context = propagator.extract(&map);
@ -667,7 +664,7 @@ mod tests {
let mut map: HashMap<String, String> = HashMap::new();
map.set(
JAEGER_HEADER,
format!("{}:{}:0:aa", LONG_TRACE_ID_STR, SPAN_ID_STR),
format!("{LONG_TRACE_ID_STR}:{SPAN_ID_STR}:0:aa"),
);
let propagator = Propagator::new();
let context = propagator.extract(&map);
@ -679,7 +676,7 @@ mod tests {
let mut map: HashMap<String, String> = HashMap::new();
map.set(
JAEGER_HEADER,
format!("{}%3A{}%3A0%3A1", LONG_TRACE_ID_STR, SPAN_ID_STR),
format!("{LONG_TRACE_ID_STR}%3A{SPAN_ID_STR}%3A0%3A1"),
);
let propagator = Propagator::new();
let context = propagator.extract(&map);

View File

@ -2,6 +2,82 @@
## vNext
## 0.30.0
Released 2025-May-23
- Update `opentelemetry` dependency version to 0.30
- Update `opentelemetry_sdk` dependency version to 0.30
- Update `opentelemetry-http` dependency version to 0.30
- Update `opentelemetry-proto` dependency version to 0.30
- Update `tonic` dependency version to 0.13
- Re-export `tonic` types under `tonic_types`
[2898](https://github.com/open-telemetry/opentelemetry-rust/pull/2898)
- Publicly re-exported `MetricExporterBuilder`, `SpanExporterBuilder`, and
`LogExporterBuilder` types, enabling users to directly reference and use these
builder types for metrics, traces, and logs exporters.
[2966](https://github.com/open-telemetry/opentelemetry-rust/pull/2966)
## 0.29.0
Released 2025-Mar-21
- Update `opentelemetry` dependency version to 0.29
- Update `opentelemetry_sdk` dependency version to 0.29
- Update `opentelemetry-http` dependency version to 0.29
- Update `opentelemetry-proto` dependency version to 0.29
- The `OTEL_EXPORTER_OTLP_TIMEOUT`, `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT`, `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` and `OTEL_EXPORTER_OTLP_LOGS_TIMEOUT` are changed from seconds to milliseconds.
- Fixed `.with_headers()` in `HttpExporterBuilder` to correctly support multiple key/value pairs. [#2699](https://github.com/open-telemetry/opentelemetry-rust/pull/2699)
- Fixed
[#2770](https://github.com/open-telemetry/opentelemetry-rust/issues/2770)
partially to properly handle `shutdown()` when using `http`. (`tonic` still
does not do proper shutdown)
- *Breaking*
ExporterBuilder's build() method now Result with `ExporterBuildError` being the
Error variant. Previously it returned signal specific errors like `LogError`
from the `opentelemetry_sdk`, which are no longer part of the sdk. No changes
required if you were using unwrap/expect. If you were matching on the returning
Error enum, replace with the enum `ExporterBuildError`. Unlike the previous
`Error` which contained many variants unrelated to building an exporter, the
new one returns specific variants applicable to building an exporter. Some
variants might be applicable only on select features.
Also, now unused `Error` enum is removed.
- **Breaking** `ExportConfig`'s `timeout` field is now optional(`Option<Duration>`)
- **Breaking** Export configuration done via code is final. ENV variables cannot be used to override the code config.
Do not use code based config, if there is desire to control the settings via ENV variables.
List of ENV variables and corresponding setting being affected by this change.
- `OTEL_EXPORTER_OTLP_ENDPOINT` -> `ExportConfig.endpoint`
- `OTEL_EXPORTER_OTLP_TIMEOUT` -> `ExportConfig.timeout`
## 0.28.0
Released 2025-Feb-10
- Update `opentelemetry` dependency version to 0.28.
- Update `opentelemetry_sdk` dependency version to 0.28.
- Update `opentelemetry-http` dependency version to 0.28.
- Update `opentelemetry-proto` dependency version to 0.28.
- Bump msrv to 1.75.0.
- Feature flag "populate-logs-event-name" is removed as no longer relevant.
LogRecord's `event_name()` is now automatically populated on the newly added
"event_name" field in LogRecord proto definition.
- Remove "grpc-tonic" feature from default, and instead add "http-proto" and
"reqwest-blocking-client" features as default, to align with the
specification.
[2516](https://github.com/open-telemetry/opentelemetry-rust/pull/2516)
- Remove unnecessarily public trait `opentelemetry_otlp::metrics::MetricsClient`
and `MetricExporter::new(..)` method. Use
`MetricExporter::builder()...build()` to obtain `MetricExporter`.
- The HTTP clients (reqwest, reqwest-blocking, hyper) now support the
export timeout interval configured in below order
- Signal specific env variable `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT`,
`OTEL_EXPORTER_OTLP_LOGS_TIMEOUT` or `OTEL_EXPORTER_OTLP_TIMEOUT`.
- `OTEL_EXPORTER_OTLP_TIMEOUT` env variable.
- `with_http().with_timeout()` API method of
`LogExporterBuilder` and `SpanExporterBuilder` and `MetricsExporterBuilder`.
- The default interval of 10 seconds is used if none is configured.
## 0.27.0
Released 2024-Nov-11
@ -11,7 +87,7 @@ Released 2024-Nov-11
- Update `opentelemetry-http` dependency version to 0.27
- Update `opentelemetry-proto` dependency version to 0.27
- **BREAKING**:
- **BREAKING**:
- ([#2217](https://github.com/open-telemetry/opentelemetry-rust/pull/2217)) **Replaced**: The `MetricsExporterBuilder` interface is modified from `with_temporality_selector` to `with_temporality` example can be seen below:
Previous Signature:
```rust
@ -58,9 +134,9 @@ Released 2024-Nov-11
- `MetricsExporterBuilder` -> `MetricExporterBuilder`
- [#2263](https://github.com/open-telemetry/opentelemetry-rust/pull/2263)
Support `hyper` client for opentelemetry-otlp. This can be enabled using flag `hyper-client`.
Support `hyper` client for opentelemetry-otlp. This can be enabled using flag `hyper-client`.
Refer example: https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-otlp/examples/basic-otlp-http
## v0.26.0
Released 2024-Sep-30
@ -123,7 +199,7 @@ now use `.with_resource(RESOURCE::default())` to configure Resource when using
### Added
- Added `DeltaTemporalitySelector` ([#1568])
- Add `webkpi-roots` features to `reqwest` and `tonic` backends
- Add `webpki-roots` features to `reqwest` and `tonic` backends
[#1568]: https://github.com/open-telemetry/opentelemetry-rust/pull/1568

View File

@ -1,6 +1,6 @@
[package]
name = "opentelemetry-otlp"
version = "0.27.0"
version = "0.30.0"
description = "Exporter for the OpenTelemetry Collector"
homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-otlp"
repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-otlp"
@ -13,8 +13,9 @@ categories = [
keywords = ["opentelemetry", "otlp", "logging", "tracing", "metrics"]
license = "Apache-2.0"
edition = "2021"
rust-version = "1.71.1"
rust-version = "1.75.0"
autotests = false
autobenches = false
[[test]]
name = "smoke"
@ -26,12 +27,10 @@ all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[dependencies]
async-trait = { workspace = true }
futures-core = { workspace = true }
opentelemetry = { version = "0.27", default-features = false, path = "../opentelemetry" }
opentelemetry_sdk = { version = "0.27", default-features = false, path = "../opentelemetry-sdk" }
opentelemetry-http = { version = "0.27", path = "../opentelemetry-http", optional = true }
opentelemetry-proto = { version = "0.27", path = "../opentelemetry-proto", default-features = false }
opentelemetry = { version = "0.30", default-features = false, path = "../opentelemetry" }
opentelemetry_sdk = { version = "0.30", default-features = false, path = "../opentelemetry-sdk" }
opentelemetry-http = { version = "0.30", path = "../opentelemetry-http", optional = true }
opentelemetry-proto = { version = "0.30", path = "../opentelemetry-proto", default-features = false }
tracing = {workspace = true, optional = true}
prost = { workspace = true, optional = true }
@ -46,31 +45,30 @@ serde_json = { workspace = true, optional = true }
[dev-dependencies]
tokio-stream = { workspace = true, features = ["net"] }
# need tokio runtime to run smoke tests.
opentelemetry_sdk = { features = ["trace", "rt-tokio", "testing"], path = "../opentelemetry-sdk" }
opentelemetry_sdk = { features = ["trace", "testing"], path = "../opentelemetry-sdk" }
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
futures-util = { workspace = true }
temp-env = { workspace = true }
tonic = { workspace = true, features = ["router", "server"] }
[features]
# telemetry pillars and functions
trace = ["opentelemetry/trace", "opentelemetry_sdk/trace", "opentelemetry-proto/trace"]
metrics = ["opentelemetry/metrics", "opentelemetry_sdk/metrics", "opentelemetry-proto/metrics"]
logs = ["opentelemetry/logs", "opentelemetry_sdk/logs", "opentelemetry-proto/logs"]
populate-logs-event-name = ["opentelemetry-proto/populate-logs-event-name"]
internal-logs = ["tracing"]
internal-logs = ["tracing", "opentelemetry/internal-logs"]
# add ons
serialize = ["serde", "serde_json"]
default = ["grpc-tonic", "trace", "metrics", "logs", "internal-logs"]
default = ["http-proto", "reqwest-blocking-client", "trace", "metrics", "logs", "internal-logs"]
# grpc using tonic
grpc-tonic = ["tonic", "prost", "http", "tokio", "opentelemetry-proto/gen-tonic"]
gzip-tonic = ["tonic/gzip"]
zstd-tonic = ["tonic/zstd"]
tls = ["tonic/tls"]
tls-roots = ["tls", "tonic/tls-roots"]
tls = ["tonic/tls-ring"]
tls-roots = ["tls", "tonic/tls-native-roots"]
tls-webpki-roots = ["tls", "tonic/tls-webpki-roots"]
# http binary
@ -83,4 +81,10 @@ reqwest-rustls-webpki-roots = ["reqwest", "opentelemetry-http/reqwest-rustls-web
hyper-client = ["opentelemetry-http/hyper"]
# test
integration-testing = ["tonic", "prost", "tokio/full", "trace"]
integration-testing = ["tonic", "prost", "tokio/full", "trace", "logs"]
[lints]
workspace = true
[lib]
bench = false

View File

@ -10,7 +10,7 @@ implementation for
[![Crates.io: opentelemetry-otlp](https://img.shields.io/crates/v/opentelemetry-otlp.svg)](https://crates.io/crates/opentelemetry-otlp)
[![Documentation](https://docs.rs/opentelemetry-otlp/badge.svg)](https://docs.rs/opentelemetry-otlp)
[![LICENSE](https://img.shields.io/crates/l/opentelemetry-otlp)](./LICENSE)
[![LICENSE](https://img.shields.io/crates/l/opentelemetry-otlp)](https://github.com/open-telemetry/opentelemetry-rust/blob/main/opentelemetry-otlp/LICENSE)
[![GitHub Actions CI](https://github.com/open-telemetry/opentelemetry-rust/workflows/CI/badge.svg)](https://github.com/open-telemetry/opentelemetry-rust/actions?query=workflow%3ACI+branch%3Amain)
[![codecov](https://codecov.io/gh/open-telemetry/opentelemetry-rust/branch/main/graph/badge.svg)](https://codecov.io/gh/open-telemetry/opentelemetry-rust)
[![Slack](https://img.shields.io/badge/slack-@cncf/otel/rust-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C03GDP0H023)
@ -30,20 +30,23 @@ can easily instrument your applications or systems, no matter their language,
infrastructure, or runtime environment. Crucially, the storage and visualization
of telemetry is intentionally left to other tools.
*Compiler support: [requires `rustc` 1.71.1+][msrv]*
*[Supported Rust Versions](#supported-rust-versions)*
[Prometheus]: https://prometheus.io
[Jaeger]: https://www.jaegertracing.io
[msrv]: #supported-rust-versions
## Getting started
See [docs](https://docs.rs/opentelemetry-otlp).
## Release Notes
You can find the release notes (changelog) [here](https://github.com/open-telemetry/opentelemetry-rust/blob/main/opentelemetry-otlp/CHANGELOG.md).
## Supported Rust Versions
OpenTelemetry is built against the latest stable release. The minimum supported
version is 1.71.1. The current OpenTelemetry version is not guaranteed to build
version is 1.75.0. The current OpenTelemetry version is not guaranteed to build
on Rust versions earlier than the minimum supported version.
The current stable Rust compiler and the three most recent minor versions

View File

@ -3,20 +3,16 @@
# This is used with cargo-check-external-types to reduce the surface area of downstream crates from
# the public API. Ideally this can have a few exceptions as possible.
allowed_external_types = [
"opentelemetry::*",
"opentelemetry_http::*",
"opentelemetry_sdk::*",
# http is a pre 1.0 crate
"http::uri::InvalidUri",
"http::header::name::InvalidHeaderName",
"http::header::value::InvalidHeaderValue",
# prost is a pre 1.0 crate
"prost::error::EncodeError",
# serde
"serde::de::Deserialize",
"serde::ser::Serialize",
# tonic is a pre 1.0 crate
"tonic::status::Code",
"tonic::status::Status",
"tonic::metadata::map::MetadataMap",
"tonic::transport::channel::tls::ClientTlsConfig",
"tonic::transport::tls::Certificate",
"tonic::transport::tls::Identity",
"tonic::transport::channel::Channel",
"tonic::transport::error::Error",
"tonic::service::interceptor::Interceptor",
]

View File

@ -3,28 +3,28 @@ name = "basic-otlp-http"
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
rust-version = "1.75.0"
publish = false
autobenches = false
[[bin]]
name = "basic-otlp-http"
path = "src/main.rs"
bench = false
[features]
default = ["reqwest"]
reqwest = ["opentelemetry-otlp/reqwest-client"]
hyper = ["opentelemetry-otlp/hyper-client"]
default = ["reqwest-blocking"]
reqwest-blocking = ["opentelemetry-otlp/reqwest-blocking-client"]
[dependencies]
once_cell = { workspace = true }
opentelemetry = { path = "../../../opentelemetry" }
opentelemetry_sdk = { path = "../../../opentelemetry-sdk", features = ["rt-tokio", "metrics", "logs"]}
opentelemetry-http = { path = "../../../opentelemetry-http", optional = true, default-features = false}
opentelemetry-otlp = { path = "../..", features = ["http-proto", "http-json", "logs"] , default-features = false}
opentelemetry-appender-tracing = { path = "../../../opentelemetry-appender-tracing", default-features = false}
opentelemetry-semantic-conventions = { path = "../../../opentelemetry-semantic-conventions" }
opentelemetry_sdk = { path = "../../../opentelemetry-sdk" }
opentelemetry-otlp = { workspace = true }
opentelemetry-appender-tracing = { workspace = true }
async-trait = { workspace = true, optional = true }
bytes = { workspace = true, optional = true }
http = { workspace = true, optional = true }
http-body-util = { workspace = true, optional = true }
tokio = { workspace = true, features = ["full"] }
tracing = { workspace = true, features = ["std"]}
tracing-core = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter","registry", "std"] }
tracing-subscriber = { workspace = true, features = ["env-filter","registry", "std", "fmt"] }
[lints]
workspace = true

View File

@ -1,6 +0,0 @@
FROM rust:1.51
COPY . /usr/src/basic-otlp-http/
WORKDIR /usr/src/basic-otlp-http/
RUN cargo build --release
RUN cargo install --path .
CMD ["/usr/local/cargo/bin/basic-otlp-http"]

View File

@ -1,49 +1,40 @@
# Basic OTLP exporter Example
# Basic OTLP Exporter Example
This example shows how to setup OpenTelemetry OTLP exporter for logs, metrics
and traces to export them to the [OpenTelemetry
This example demonstrates how to set up an OpenTelemetry OTLP exporter for logs,
metrics, and traces to send data to the [OpenTelemetry
Collector](https://github.com/open-telemetry/opentelemetry-collector) via OTLP
over selected protocol such as HTTP/protobuf or HTTP/json. The Collector then sends the data to the appropriate
backend, in this case, the logging Exporter, which displays data to console.
over HTTP (using `protobuf` encoding by default but can be changed to use
`json`). The Collector then forwards the data to the configured backend, which
in this case is the logging exporter, displaying data on the console.
Additionally, the example configures a `tracing::fmt` layer to output logs
emitted via `tracing` to `stdout`. For demonstration, this layer uses a filter
to display `DEBUG` level logs from various OpenTelemetry components. In real
applications, these filters should be adjusted appropriately.
The example employs a `BatchExporter` for logs and traces, which is the
recommended approach when using OTLP exporters. While it can be modified to use
a `SimpleExporter`, this requires making the main function a regular main and
*not* tokio main.
// TODO: Document how to use hyper client.
## Usage
### `docker-compose`
By default runs against the `otel/opentelemetry-collector:latest` image, and uses `reqwest-client`
as the http client, using http as the transport.
```shell
docker-compose up
```
In another terminal run the application `cargo run`
The docker-compose terminal will display logs, traces, metrics.
Press Ctrl+C to stop the collector, and then tear it down:
```shell
docker-compose down
```
### Manual
If you don't want to use `docker-compose`, you can manually run the `otel/opentelemetry-collector` container
and inspect the logs to see traces being transferred.
Run the `otel/opentelemetry-collector` container using docker
and inspect the logs to see the exported telemetry.
On Unix based systems use:
```shell
# From the current directory, run `opentelemetry-collector`
docker run --rm -it -p 4318:4318 -v $(pwd):/cfg otel/opentelemetry-collector:latest --config=/cfg/otel-collector-config.yaml
docker run --rm -it -p 4317:4317 -p 4318:4318 -v $(pwd):/cfg otel/opentelemetry-collector:latest --config=/cfg/otel-collector-config.yaml
```
On Windows use:
```shell
# From the current directory, run `opentelemetry-collector`
docker run --rm -it -p 4318:4318 -v "%cd%":/cfg otel/opentelemetry-collector:latest --config=/cfg/otel-collector-config.yaml
docker run --rm -it -p 4317:4317 -p 4318:4318 -v "%cd%":/cfg otel/opentelemetry-collector:latest --config=/cfg/otel-collector-config.yaml
```
Run the app which exports logs, metrics and traces via OTLP to the collector
@ -52,13 +43,7 @@ Run the app which exports logs, metrics and traces via OTLP to the collector
cargo run
```
By default the app will use a `reqwest` client to send. A hyper 0.14 client can be used with the `hyper` feature enabled
```shell
cargo run --no-default-features --features=hyper
```
The app will use a `reqwest-blocking` client to send.
## View results
@ -121,7 +106,7 @@ SpanEvent #0
-> Timestamp: 2024-05-14 02:15:56.824201397 +0000 UTC
-> DroppedAttributesCount: 0
-> Attributes::
-> bogons: Int(100)
-> some.key: Int(100)
{"kind": "exporter", "data_type": "traces", "name": "logging"}
...
```

View File

@ -1,15 +0,0 @@
version: "2"
services:
# Collector
otel-collector:
image: otel/opentelemetry-collector:latest
command: ["--config=/etc/otel-collector-config.yaml", "${OTELCOL_ARGS}"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
ports:
- "4318:4318" # OTLP HTTP receiver

View File

@ -1,123 +1,133 @@
/// To use hyper as the HTTP client - cargo run --features="hyper" --no-default-features
use once_cell::sync::Lazy;
use opentelemetry::{
global,
trace::{TraceContextExt, TraceError, Tracer},
trace::{TraceContextExt, Tracer},
InstrumentationScope, KeyValue,
};
use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge;
use opentelemetry_otlp::WithExportConfig;
use opentelemetry_otlp::{LogExporter, MetricExporter, Protocol, SpanExporter};
use opentelemetry_sdk::Resource;
use opentelemetry_sdk::{
logs::LoggerProvider,
metrics::{MetricError, PeriodicReader, SdkMeterProvider},
runtime,
trace::{self as sdktrace, Config, TracerProvider},
logs::SdkLoggerProvider, metrics::SdkMeterProvider, trace::SdkTracerProvider,
};
use opentelemetry_sdk::{
logs::{self as sdklogs},
Resource,
};
use std::error::Error;
use std::{error::Error, sync::OnceLock};
use tracing::info;
use tracing_subscriber::prelude::*;
use tracing_subscriber::EnvFilter;
static RESOURCE: Lazy<Resource> = Lazy::new(|| {
Resource::new(vec![KeyValue::new(
opentelemetry_semantic_conventions::resource::SERVICE_NAME,
"basic-otlp-example",
)])
});
fn init_logs() -> Result<sdklogs::LoggerProvider, opentelemetry_sdk::logs::LogError> {
let exporter = LogExporter::builder()
.with_http()
.with_endpoint("http://localhost:4318/v1/logs")
.with_protocol(Protocol::HttpBinary)
.build()?;
Ok(LoggerProvider::builder()
.with_batch_exporter(exporter, runtime::Tokio)
.with_resource(RESOURCE.clone())
.build())
fn get_resource() -> Resource {
static RESOURCE: OnceLock<Resource> = OnceLock::new();
RESOURCE
.get_or_init(|| {
Resource::builder()
.with_service_name("basic-otlp-example-grpc")
.build()
})
.clone()
}
fn init_tracer_provider() -> Result<sdktrace::TracerProvider, TraceError> {
fn init_logs() -> SdkLoggerProvider {
let exporter = LogExporter::builder()
.with_http()
.with_protocol(Protocol::HttpBinary)
.build()
.expect("Failed to create log exporter");
SdkLoggerProvider::builder()
.with_batch_exporter(exporter)
.with_resource(get_resource())
.build()
}
fn init_traces() -> SdkTracerProvider {
let exporter = SpanExporter::builder()
.with_http()
.with_protocol(Protocol::HttpBinary) //can be changed to `Protocol::HttpJson` to export in JSON format
.with_endpoint("http://localhost:4318/v1/traces")
.build()?;
.build()
.expect("Failed to create trace exporter");
Ok(TracerProvider::builder()
.with_batch_exporter(exporter, runtime::Tokio)
.with_config(Config::default().with_resource(RESOURCE.clone()))
.build())
SdkTracerProvider::builder()
.with_batch_exporter(exporter)
.with_resource(get_resource())
.build()
}
fn init_metrics() -> Result<opentelemetry_sdk::metrics::SdkMeterProvider, MetricError> {
fn init_metrics() -> SdkMeterProvider {
let exporter = MetricExporter::builder()
.with_http()
.with_protocol(Protocol::HttpBinary) //can be changed to `Protocol::HttpJson` to export in JSON format
.with_endpoint("http://localhost:4318/v1/metrics")
.build()?;
.build()
.expect("Failed to create metric exporter");
Ok(SdkMeterProvider::builder()
.with_reader(PeriodicReader::builder(exporter, runtime::Tokio).build())
.with_resource(RESOURCE.clone())
.build())
SdkMeterProvider::builder()
.with_periodic_exporter(exporter)
.with_resource(get_resource())
.build()
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
let result = init_tracer_provider();
assert!(
result.is_ok(),
"Init tracer failed with error: {:?}",
result.err()
);
let tracer_provider = result.unwrap();
global::set_tracer_provider(tracer_provider.clone());
let result = init_metrics();
assert!(
result.is_ok(),
"Init metrics failed with error: {:?}",
result.err()
);
let meter_provider = result.unwrap();
global::set_meter_provider(meter_provider.clone());
// Opentelemetry will not provide a global API to manage the logger
// provider. Application users must manage the lifecycle of the logger
// provider on their own. Dropping logger providers will disable log
// emitting.
let logger_provider = init_logs().unwrap();
let logger_provider = init_logs();
// Create a new OpenTelemetryTracingBridge using the above LoggerProvider.
let layer = OpenTelemetryTracingBridge::new(&logger_provider);
let otel_layer = OpenTelemetryTracingBridge::new(&logger_provider);
// Add a tracing filter to filter events from crates used by opentelemetry-otlp.
// To prevent a telemetry-induced-telemetry loop, OpenTelemetry's own internal
// logging is properly suppressed. However, logs emitted by external components
// (such as reqwest, tonic, etc.) are not suppressed as they do not propagate
// OpenTelemetry context. Until this issue is addressed
// (https://github.com/open-telemetry/opentelemetry-rust/issues/2877),
// filtering like this is the best way to suppress such logs.
//
// The filter levels are set as follows:
// - Allow `info` level and above by default.
// - Restrict `hyper`, `tonic`, and `reqwest` to `error` level logs only.
// This ensures events generated from these crates within the OTLP Exporter are not looped back,
// thus preventing infinite event generation.
// Note: This will also drop events from these crates used outside the OTLP Exporter.
// For more details, see: https://github.com/open-telemetry/opentelemetry-rust/issues/761
let filter = EnvFilter::new("info")
.add_directive("hyper=error".parse().unwrap())
.add_directive("tonic=error".parse().unwrap())
.add_directive("reqwest=error".parse().unwrap());
// - Completely restrict logs from `hyper`, `tonic`, `h2`, and `reqwest`.
//
// Note: This filtering will also drop logs from these components even when
// they are used outside of the OTLP Exporter.
let filter_otel = EnvFilter::new("info")
.add_directive("hyper=off".parse().unwrap())
.add_directive("tonic=off".parse().unwrap())
.add_directive("h2=off".parse().unwrap())
.add_directive("reqwest=off".parse().unwrap());
let otel_layer = otel_layer.with_filter(filter_otel);
// Create a new tracing::Fmt layer to print the logs to stdout. It has a
// default filter of `info` level and above, and `debug` and above for logs
// from OpenTelemetry crates. The filter levels can be customized as needed.
let filter_fmt = EnvFilter::new("info").add_directive("opentelemetry=debug".parse().unwrap());
let fmt_layer = tracing_subscriber::fmt::layer()
.with_thread_names(true)
.with_filter(filter_fmt);
// Initialize the tracing subscriber with the OpenTelemetry layer and the
// Fmt layer.
tracing_subscriber::registry()
.with(filter)
.with(layer)
.with(otel_layer)
.with(fmt_layer)
.init();
// At this point Logs (OTel Logs and Fmt Logs) are initialized, which will
// allow internal-logs from Tracing/Metrics initializer to be captured.
let tracer_provider = init_traces();
// Set the global tracer provider using a clone of the tracer_provider.
// Setting global tracer provider is required if other parts of the application
// uses global::tracer() or global::tracer_with_version() to get a tracer.
// Cloning simply creates a new reference to the same tracer provider. It is
// important to hold on to the tracer_provider here, so as to invoke
// shutdown on it when application ends.
global::set_tracer_provider(tracer_provider.clone());
let meter_provider = init_metrics();
// Set the global meter provider using a clone of the meter_provider.
// Setting global meter provider is required if other parts of the application
// uses global::meter() or global::meter_with_version() to get a meter.
// Cloning simply creates a new reference to the same meter provider. It is
// important to hold on to the meter_provider here, so as to invoke
// shutdown on it when application ends.
global::set_meter_provider(meter_provider.clone());
let common_scope_attributes = vec![KeyValue::new("scope-key", "scope-value")];
let scope = InstrumentationScope::builder("basic")
.with_version("1.0")
@ -141,7 +151,7 @@ async fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
let span = cx.span();
span.add_event(
"Nice operation!".to_string(),
vec![KeyValue::new("bogons", 100)],
vec![KeyValue::new("some.key", 100)],
);
span.set_attribute(KeyValue::new("another.key", "yes"));
@ -156,9 +166,27 @@ async fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
info!(target: "my-target", "hello from {}. My price is {}", "apple", 1.99);
global::shutdown_tracer_provider();
logger_provider.shutdown()?;
meter_provider.shutdown()?;
// Collect all shutdown errors
let mut shutdown_errors = Vec::new();
if let Err(e) = tracer_provider.shutdown() {
shutdown_errors.push(format!("tracer provider: {e}"));
}
if let Err(e) = meter_provider.shutdown() {
shutdown_errors.push(format!("meter provider: {e}"));
}
if let Err(e) = logger_provider.shutdown() {
shutdown_errors.push(format!("logger provider: {e}"));
}
// Return an error if any shutdown failed
if !shutdown_errors.is_empty() {
return Err(format!(
"Failed to shutdown providers:{}",
shutdown_errors.join("\n")
)
.into());
}
Ok(())
}

View File

@ -3,16 +3,23 @@ name = "basic-otlp"
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
rust-version = "1.75.0"
publish = false
autobenches = false
[[bin]]
name = "basic-otlp"
path = "src/main.rs"
bench = false
[dependencies]
once_cell = { workspace = true }
opentelemetry = { path = "../../../opentelemetry", features = ["metrics", "logs"] }
opentelemetry_sdk = { path = "../../../opentelemetry-sdk", features = ["rt-tokio", "logs"] }
opentelemetry-otlp = { path = "../../../opentelemetry-otlp", features = ["tonic", "metrics", "logs"] }
opentelemetry-semantic-conventions = { path = "../../../opentelemetry-semantic-conventions" }
tokio = { version = "1.0", features = ["full"] }
opentelemetry-appender-tracing = { path = "../../../opentelemetry-appender-tracing", default-features = false}
opentelemetry = { path = "../../../opentelemetry" }
opentelemetry_sdk = { path = "../../../opentelemetry-sdk" }
opentelemetry-otlp = { workspace = true, features = ["grpc-tonic"] }
tokio = { workspace = true, features = ["full"] }
opentelemetry-appender-tracing = { workspace = true }
tracing = { workspace = true, features = ["std"]}
tracing-core = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter","registry", "std"] }
tracing-subscriber = { workspace = true, features = ["env-filter","registry", "std", "fmt"] }
[lints]
workspace = true

View File

@ -1,6 +0,0 @@
FROM rust:1.51
COPY . /usr/src/basic-otlp/
WORKDIR /usr/src/basic-otlp/
RUN cargo build --release
RUN cargo install --path .
CMD ["/usr/local/cargo/bin/basic-otlp"]

View File

@ -1,49 +1,69 @@
# Basic OTLP exporter Example
# Basic OTLP Exporter Example
This example shows how to setup OpenTelemetry OTLP exporter for logs, metrics
and traces to exports them to the [OpenTelemetry
Collector](https://github.com/open-telemetry/opentelemetry-collector) via OTLP over gRPC.
The Collector then sends the data to the appropriate backend, in this case,
the logging Exporter, which displays data to console.
This example demonstrates how to set up an OpenTelemetry OTLP exporter for logs,
metrics, and traces to send data to the [OpenTelemetry
Collector](https://github.com/open-telemetry/opentelemetry-collector) via OTLP
over gRPC. The Collector then forwards the data to the configured backend, which
in this case is the logging exporter, displaying data on the console.
Additionally, the example configures a `tracing::fmt` layer to output logs
emitted via `tracing` to `stdout`. For demonstration, this layer uses a filter
to display `DEBUG` level logs from various OpenTelemetry components. In real
applications, these filters should be adjusted appropriately.
The example employs a `BatchExporter` for logs and traces, which is the
recommended approach when using OTLP exporters. While it can be modified to use
a `SimpleExporter`, this requires the main method to be a `tokio::main` function
since the `tonic` client requires a Tokio runtime. If you prefer not to use
`tokio::main`, then the `init_logs` and `init_traces` functions must be executed
within a Tokio runtime.
This examples uses the default `PeriodicReader` for metrics, which uses own
thread for background processing/exporting. Since the `tonic` client requires a
Tokio runtime, the main method must be a `tokio::main` function. If you prefer not
to use `tokio::main`, then the `init_metrics` function must be executed within a
Tokio runtime.
Below is an example on how to use non `tokio::main`:
```rust
fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
let rt = tokio::runtime::Runtime::new()?;
let tracer_provider = rt.block_on(async {
init_traces()
})?;
global::set_tracer_provider(tracer_provider.clone());
let meter_provider = rt.block_on(async {
init_metrics()
})?;
global::set_meter_provider(meter_provider.clone());
let logger_provider = rt.block_on(async {
init_logs()
})?;
// Ensure the runtime (`rt`) remains active until the program ends
// Additional code goes here...
}
```
## Usage
### `docker-compose`
By default runs against the `otel/opentelemetry-collector:latest` image, and uses the `tonic`'s
`grpc` example as the transport.
```shell
docker-compose up
```
In another terminal run the application `cargo run`
The docker-compose terminal will display logs, traces, metrics.
Press Ctrl+C to stop the collector, and then tear it down:
```shell
docker-compose down
```
### Manual
If you don't want to use `docker-compose`, you can manually run the `otel/opentelemetry-collector` container
and inspect the logs to see traces being transferred.
Run the `otel/opentelemetry-collector` container using docker
and inspect the logs to see the exported telemetry.
On Unix based systems use:
```shell
# From the current directory, run `opentelemetry-collector`
docker run --rm -it -p 4317:4317 -v $(pwd):/cfg otel/opentelemetry-collector:latest --config=/cfg/otel-collector-config.yaml
docker run --rm -it -p 4317:4317 -p 4318:4318 -v $(pwd):/cfg otel/opentelemetry-collector:latest --config=/cfg/otel-collector-config.yaml
```
On Windows use:
```shell
# From the current directory, run `opentelemetry-collector`
docker run --rm -it -p 4317:4317 -v "%cd%":/cfg otel/opentelemetry-collector:latest --config=/cfg/otel-collector-config.yaml
docker run --rm -it -p 4317:4317 -p 4318:4318 -v "%cd%":/cfg otel/opentelemetry-collector:latest --config=/cfg/otel-collector-config.yaml
```
Run the app which exports logs, metrics and traces via OTLP to the collector
@ -113,7 +133,7 @@ SpanEvent #0
-> Timestamp: 2024-05-22 20:25:42.8770471 +0000 UTC
-> DroppedAttributesCount: 0
-> Attributes::
-> bogons: Int(100)
-> some.key: Int(100)
{"kind": "exporter", "data_type": "traces", "name": "logging"}
```

View File

@ -1,15 +0,0 @@
version: "2"
services:
# Collector
otel-collector:
image: otel/opentelemetry-collector:latest
command: ["--config=/etc/otel-collector-config.yaml", "${OTELCOL_ARGS}"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
ports:
- "4317:4317" # OTLP gRPC receiver

View File

@ -1,109 +1,127 @@
use once_cell::sync::Lazy;
use opentelemetry::trace::{TraceContextExt, TraceError, Tracer};
use opentelemetry::trace::{TraceContextExt, Tracer};
use opentelemetry::KeyValue;
use opentelemetry::{global, InstrumentationScope};
use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge;
use opentelemetry_otlp::{LogExporter, MetricExporter, SpanExporter, WithExportConfig};
use opentelemetry_sdk::logs::LogError;
use opentelemetry_sdk::logs::LoggerProvider;
use opentelemetry_sdk::metrics::MetricError;
use opentelemetry_sdk::metrics::{PeriodicReader, SdkMeterProvider};
use opentelemetry_sdk::trace::Config;
use opentelemetry_sdk::{runtime, trace as sdktrace, Resource};
use opentelemetry_otlp::{LogExporter, MetricExporter, SpanExporter};
use opentelemetry_sdk::logs::SdkLoggerProvider;
use opentelemetry_sdk::metrics::SdkMeterProvider;
use opentelemetry_sdk::trace::SdkTracerProvider;
use opentelemetry_sdk::Resource;
use std::error::Error;
use std::sync::OnceLock;
use tracing::info;
use tracing_subscriber::prelude::*;
use tracing_subscriber::EnvFilter;
static RESOURCE: Lazy<Resource> = Lazy::new(|| {
Resource::new(vec![KeyValue::new(
opentelemetry_semantic_conventions::resource::SERVICE_NAME,
"basic-otlp-example",
)])
});
fn get_resource() -> Resource {
static RESOURCE: OnceLock<Resource> = OnceLock::new();
RESOURCE
.get_or_init(|| {
Resource::builder()
.with_service_name("basic-otlp-example-grpc")
.build()
})
.clone()
}
fn init_tracer_provider() -> Result<sdktrace::TracerProvider, TraceError> {
fn init_traces() -> SdkTracerProvider {
let exporter = SpanExporter::builder()
.with_tonic()
.with_endpoint("http://localhost:4317")
.build()?;
Ok(sdktrace::TracerProvider::builder()
.with_config(Config::default().with_resource(RESOURCE.clone()))
.with_batch_exporter(exporter, runtime::Tokio)
.build())
.build()
.expect("Failed to create span exporter");
SdkTracerProvider::builder()
.with_resource(get_resource())
.with_batch_exporter(exporter)
.build()
}
fn init_metrics() -> Result<opentelemetry_sdk::metrics::SdkMeterProvider, MetricError> {
let exporter = MetricExporter::builder().with_tonic().build()?;
fn init_metrics() -> SdkMeterProvider {
let exporter = MetricExporter::builder()
.with_tonic()
.build()
.expect("Failed to create metric exporter");
let reader = PeriodicReader::builder(exporter, runtime::Tokio).build();
Ok(SdkMeterProvider::builder()
.with_reader(reader)
.with_resource(RESOURCE.clone())
.build())
SdkMeterProvider::builder()
.with_periodic_exporter(exporter)
.with_resource(get_resource())
.build()
}
fn init_logs() -> Result<opentelemetry_sdk::logs::LoggerProvider, LogError> {
fn init_logs() -> SdkLoggerProvider {
let exporter = LogExporter::builder()
.with_tonic()
.with_endpoint("http://localhost:4317")
.build()?;
.build()
.expect("Failed to create log exporter");
Ok(LoggerProvider::builder()
.with_resource(RESOURCE.clone())
.with_batch_exporter(exporter, runtime::Tokio)
.build())
SdkLoggerProvider::builder()
.with_resource(get_resource())
.with_batch_exporter(exporter)
.build()
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
// By binding the result to an unused variable, the lifetime of the variable
// matches the containing block, reporting traces and metrics during the whole
// execution.
let result = init_tracer_provider();
assert!(
result.is_ok(),
"Init tracer failed with error: {:?}",
result.err()
);
let tracer_provider = result.unwrap();
global::set_tracer_provider(tracer_provider.clone());
let result = init_metrics();
assert!(
result.is_ok(),
"Init metrics failed with error: {:?}",
result.err()
);
let meter_provider = result.unwrap();
global::set_meter_provider(meter_provider.clone());
// Initialize logs and save the logger_provider.
let logger_provider = init_logs().unwrap();
let logger_provider = init_logs();
// Create a new OpenTelemetryTracingBridge using the above LoggerProvider.
let layer = OpenTelemetryTracingBridge::new(&logger_provider);
let otel_layer = OpenTelemetryTracingBridge::new(&logger_provider);
// Add a tracing filter to filter events from crates used by opentelemetry-otlp.
// To prevent a telemetry-induced-telemetry loop, OpenTelemetry's own internal
// logging is properly suppressed. However, logs emitted by external components
// (such as reqwest, tonic, etc.) are not suppressed as they do not propagate
// OpenTelemetry context. Until this issue is addressed
// (https://github.com/open-telemetry/opentelemetry-rust/issues/2877),
// filtering like this is the best way to suppress such logs.
//
// The filter levels are set as follows:
// - Allow `info` level and above by default.
// - Restrict `hyper`, `tonic`, and `reqwest` to `error` level logs only.
// This ensures events generated from these crates within the OTLP Exporter are not looped back,
// thus preventing infinite event generation.
// Note: This will also drop events from these crates used outside the OTLP Exporter.
// For more details, see: https://github.com/open-telemetry/opentelemetry-rust/issues/761
let filter = EnvFilter::new("info")
.add_directive("hyper=error".parse().unwrap())
.add_directive("tonic=error".parse().unwrap())
.add_directive("reqwest=error".parse().unwrap());
// - Completely restrict logs from `hyper`, `tonic`, `h2`, and `reqwest`.
//
// Note: This filtering will also drop logs from these components even when
// they are used outside of the OTLP Exporter.
let filter_otel = EnvFilter::new("info")
.add_directive("hyper=off".parse().unwrap())
.add_directive("tonic=off".parse().unwrap())
.add_directive("h2=off".parse().unwrap())
.add_directive("reqwest=off".parse().unwrap());
let otel_layer = otel_layer.with_filter(filter_otel);
// Create a new tracing::Fmt layer to print the logs to stdout. It has a
// default filter of `info` level and above, and `debug` and above for logs
// from OpenTelemetry crates. The filter levels can be customized as needed.
let filter_fmt = EnvFilter::new("info").add_directive("opentelemetry=debug".parse().unwrap());
let fmt_layer = tracing_subscriber::fmt::layer()
.with_thread_names(true)
.with_filter(filter_fmt);
// Initialize the tracing subscriber with the OpenTelemetry layer and the
// Fmt layer.
tracing_subscriber::registry()
.with(filter)
.with(layer)
.with(otel_layer)
.with(fmt_layer)
.init();
// At this point Logs (OTel Logs and Fmt Logs) are initialized, which will
// allow internal-logs from Tracing/Metrics initializer to be captured.
let tracer_provider = init_traces();
// Set the global tracer provider using a clone of the tracer_provider.
// Setting global tracer provider is required if other parts of the application
// uses global::tracer() or global::tracer_with_version() to get a tracer.
// Cloning simply creates a new reference to the same tracer provider. It is
// important to hold on to the tracer_provider here, so as to invoke
// shutdown on it when application ends.
global::set_tracer_provider(tracer_provider.clone());
let meter_provider = init_metrics();
// Set the global meter provider using a clone of the meter_provider.
// Setting global meter provider is required if other parts of the application
// uses global::meter() or global::meter_with_version() to get a meter.
// Cloning simply creates a new reference to the same meter provider. It is
// important to hold on to the meter_provider here, so as to invoke
// shutdown on it when application ends.
global::set_meter_provider(meter_provider.clone());
let common_scope_attributes = vec![KeyValue::new("scope-key", "scope-value")];
let scope = InstrumentationScope::builder("basic")
.with_version("1.0")
@ -141,9 +159,28 @@ async fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
info!(name: "my-event", target: "my-target", "hello from {}. My price is {}", "apple", 1.99);
global::shutdown_tracer_provider();
meter_provider.shutdown()?;
logger_provider.shutdown()?;
// Collect all shutdown errors
let mut shutdown_errors = Vec::new();
if let Err(e) = tracer_provider.shutdown() {
shutdown_errors.push(format!("tracer provider: {e}"));
}
if let Err(e) = meter_provider.shutdown() {
shutdown_errors.push(format!("meter provider: {e}"));
}
if let Err(e) = logger_provider.shutdown() {
shutdown_errors.push(format!("logger provider: {e}"));
}
// Return an error if any shutdown failed
if !shutdown_errors.is_empty() {
return Err(format!(
"Failed to shutdown providers:{}",
shutdown_errors.join("\n")
)
.into());
}
Ok(())
}

View File

@ -1,54 +1,66 @@
use std::sync::Arc;
use async_trait::async_trait;
use http::{header::CONTENT_TYPE, Method};
use opentelemetry_sdk::export::logs::{LogBatch, LogExporter};
use opentelemetry_sdk::logs::{LogError, LogResult};
use super::OtlpHttpClient;
use http::{header::CONTENT_TYPE, Method};
use opentelemetry::otel_debug;
use opentelemetry_sdk::error::{OTelSdkError, OTelSdkResult};
use opentelemetry_sdk::logs::{LogBatch, LogExporter};
use std::time;
#[async_trait]
impl LogExporter for OtlpHttpClient {
async fn export(&mut self, batch: LogBatch<'_>) -> LogResult<()> {
async fn export(&self, batch: LogBatch<'_>) -> OTelSdkResult {
let client = self
.client
.lock()
.map_err(|e| LogError::Other(e.to_string().into()))
.and_then(|g| match &*g {
Some(client) => Ok(Arc::clone(client)),
_ => Err(LogError::Other("exporter is already shut down".into())),
})?;
.map_err(|e| OTelSdkError::InternalFailure(format!("Mutex lock failed: {e}")))?
.clone()
.ok_or(OTelSdkError::AlreadyShutdown)?;
let (body, content_type) = self
.build_logs_export_body(batch)
.map_err(OTelSdkError::InternalFailure)?;
let (body, content_type) = { self.build_logs_export_body(batch)? };
let mut request = http::Request::builder()
.method(Method::POST)
.uri(&self.collector_endpoint)
.header(CONTENT_TYPE, content_type)
.body(body)
.map_err(|e| crate::Error::RequestFailed(Box::new(e)))?;
.body(body.into())
.map_err(|e| OTelSdkError::InternalFailure(e.to_string()))?;
for (k, v) in &self.headers {
request.headers_mut().insert(k.clone(), v.clone());
}
let request_uri = request.uri().to_string();
let response = client.send(request).await?;
otel_debug!(name: "HttpLogsClient.ExportStarted");
let response = client
.send_bytes(request)
.await
.map_err(|e| OTelSdkError::InternalFailure(format!("{e:?}")))?;
if !response.status().is_success() {
let error = format!(
"OpenTelemetry logs export failed. Url: {}, Status Code: {}, Response: {:?}",
response.status().as_u16(),
request_uri,
response.status().as_u16(),
response.body()
);
return Err(LogError::Other(error.into()));
otel_debug!(name: "HttpLogsClient.ExportFailed", error = &error);
return Err(OTelSdkError::InternalFailure(error));
}
otel_debug!(name: "HttpLogsClient.ExportSucceeded");
Ok(())
}
fn shutdown(&mut self) {
let _ = self.client.lock().map(|mut c| c.take());
fn shutdown_with_timeout(&self, _timeout: time::Duration) -> OTelSdkResult {
let mut client_guard = self.client.lock().map_err(|e| {
OTelSdkError::InternalFailure(format!("Failed to acquire client lock: {e}"))
})?;
if client_guard.take().is_none() {
return Err(OTelSdkError::AlreadyShutdown);
}
Ok(())
}
fn set_resource(&mut self, resource: &opentelemetry_sdk::Resource) {

View File

@ -1,48 +1,69 @@
use std::sync::Arc;
use async_trait::async_trait;
use crate::metric::MetricsClient;
use http::{header::CONTENT_TYPE, Method};
use opentelemetry::otel_debug;
use opentelemetry_sdk::error::{OTelSdkError, OTelSdkResult};
use opentelemetry_sdk::metrics::data::ResourceMetrics;
use opentelemetry_sdk::metrics::{MetricError, MetricResult};
use crate::{metric::MetricsClient, Error};
use super::OtlpHttpClient;
#[async_trait]
impl MetricsClient for OtlpHttpClient {
async fn export(&self, metrics: &mut ResourceMetrics) -> MetricResult<()> {
async fn export(&self, metrics: &ResourceMetrics) -> OTelSdkResult {
let client = self
.client
.lock()
.map_err(Into::into)
.map_err(|e| OTelSdkError::InternalFailure(format!("Failed to acquire lock: {e:?}")))
.and_then(|g| match &*g {
Some(client) => Ok(Arc::clone(client)),
_ => Err(MetricError::Other("exporter is already shut down".into())),
_ => Err(OTelSdkError::AlreadyShutdown),
})?;
let (body, content_type) = self.build_metrics_export_body(metrics)?;
let (body, content_type) = self.build_metrics_export_body(metrics).ok_or_else(|| {
OTelSdkError::InternalFailure("Failed to serialize metrics".to_string())
})?;
let mut request = http::Request::builder()
.method(Method::POST)
.uri(&self.collector_endpoint)
.header(CONTENT_TYPE, content_type)
.body(body)
.map_err(|e| crate::Error::RequestFailed(Box::new(e)))?;
.body(body.into())
.map_err(|e| OTelSdkError::InternalFailure(format!("{e:?}")))?;
for (k, v) in &self.headers {
request.headers_mut().insert(k.clone(), v.clone());
}
client
.send(request)
.await
.map_err(|e| MetricError::ExportErr(Box::new(Error::RequestFailed(e))))?;
otel_debug!(name: "HttpMetricsClient.ExportStarted");
let result = client.send_bytes(request).await;
Ok(())
match result {
Ok(response) => {
if response.status().is_success() {
otel_debug!(name: "HttpMetricsClient.ExportSucceeded");
Ok(())
} else {
let error = format!(
"OpenTelemetry metrics export failed. Status Code: {}, Response: {:?}",
response.status().as_u16(),
response.body()
);
otel_debug!(name: "HttpMetricsClient.ExportFailed", error = &error);
Err(OTelSdkError::InternalFailure(error))
}
}
Err(e) => {
let error = format!("{e:?}");
otel_debug!(name: "HttpMetricsClient.ExportFailed", error = &error);
Err(OTelSdkError::InternalFailure(error))
}
}
}
fn shutdown(&self) -> MetricResult<()> {
let _ = self.client.lock()?.take();
fn shutdown(&self) -> OTelSdkResult {
self.client
.lock()
.map_err(|e| OTelSdkError::InternalFailure(format!("Failed to acquire lock: {e}")))?
.take();
Ok(())
}

View File

@ -1,12 +1,11 @@
use super::{
default_headers, default_protocol, parse_header_string,
default_headers, default_protocol, parse_header_string, resolve_timeout, ExporterBuildError,
OTEL_EXPORTER_OTLP_HTTP_ENDPOINT_DEFAULT,
};
use crate::{
ExportConfig, Protocol, OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_HEADERS,
OTEL_EXPORTER_OTLP_TIMEOUT,
};
use crate::{ExportConfig, Protocol, OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_HEADERS};
use http::{HeaderName, HeaderValue, Uri};
#[cfg(feature = "http-json")]
use opentelemetry::otel_debug;
use opentelemetry_http::HttpClient;
use opentelemetry_proto::transform::common::tonic::ResourceAttributesWithSchema;
#[cfg(feature = "logs")]
@ -14,9 +13,9 @@ use opentelemetry_proto::transform::logs::tonic::group_logs_by_resource_and_scop
#[cfg(feature = "trace")]
use opentelemetry_proto::transform::trace::tonic::group_spans_by_resource_and_scope;
#[cfg(feature = "logs")]
use opentelemetry_sdk::export::logs::LogBatch;
use opentelemetry_sdk::logs::LogBatch;
#[cfg(feature = "trace")]
use opentelemetry_sdk::export::trace::SpanData;
use opentelemetry_sdk::trace::SpanData;
use prost::Message;
use std::collections::HashMap;
use std::env;
@ -27,8 +26,11 @@ use std::time::Duration;
#[cfg(feature = "metrics")]
mod metrics;
#[cfg(feature = "metrics")]
use opentelemetry_sdk::metrics::data::ResourceMetrics;
#[cfg(feature = "logs")]
mod logs;
pub(crate) mod logs;
#[cfg(feature = "trace")]
mod trace;
@ -41,15 +43,7 @@ mod trace;
use opentelemetry_http::hyper::HyperClient;
/// Configuration of the http transport
#[derive(Debug)]
#[cfg_attr(
all(
not(feature = "reqwest-client"),
not(feature = "reqwest-blocking-client"),
not(feature = "hyper-client")
),
derive(Default)
)]
#[derive(Debug, Default)]
pub struct HttpConfig {
/// Select the HTTP client
client: Option<Arc<dyn HttpClient>>,
@ -58,51 +52,16 @@ pub struct HttpConfig {
headers: Option<HashMap<String, String>>,
}
#[cfg(any(
feature = "reqwest-blocking-client",
feature = "reqwest-client",
feature = "hyper-client"
))]
impl Default for HttpConfig {
fn default() -> Self {
#[cfg(feature = "reqwest-blocking-client")]
let default_client =
Some(Arc::new(reqwest::blocking::Client::new()) as Arc<dyn HttpClient>);
#[cfg(all(not(feature = "reqwest-blocking-client"), feature = "reqwest-client"))]
let default_client = Some(Arc::new(reqwest::Client::new()) as Arc<dyn HttpClient>);
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "reqwest-blocking-client"),
feature = "hyper-client"
))]
// TODO - support configuring custom connector and executor
let default_client = Some(Arc::new(HyperClient::with_default_connector(
Duration::from_secs(10),
None,
)) as Arc<dyn HttpClient>);
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "reqwest-blocking-client"),
not(feature = "hyper-client")
))]
let default_client = None;
HttpConfig {
client: default_client,
headers: None,
}
}
}
/// Configuration for the OTLP HTTP exporter.
///
/// ## Examples
///
/// ```
/// ```no_run
/// # #[cfg(feature="metrics")]
/// use opentelemetry_sdk::metrics::Temporality;
///
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// // Create a span exporter you can use to when configuring tracer providers
/// // Create a span exporter you can use when configuring tracer providers
/// # #[cfg(feature="trace")]
/// let span_exporter = opentelemetry_otlp::SpanExporter::builder().with_http().build()?;
///
@ -148,28 +107,64 @@ impl HttpExporterBuilder {
signal_endpoint_path: &str,
signal_timeout_var: &str,
signal_http_headers_var: &str,
) -> Result<OtlpHttpClient, crate::Error> {
) -> Result<OtlpHttpClient, ExporterBuildError> {
let endpoint = resolve_http_endpoint(
signal_endpoint_var,
signal_endpoint_path,
self.exporter_config.endpoint.clone(),
self.exporter_config.endpoint.as_deref(),
)?;
let timeout = match env::var(signal_timeout_var)
.ok()
.or(env::var(OTEL_EXPORTER_OTLP_TIMEOUT).ok())
{
Some(val) => match val.parse() {
Ok(seconds) => Duration::from_secs(seconds),
Err(_) => self.exporter_config.timeout,
},
None => self.exporter_config.timeout,
};
let http_client = self
.http_config
.client
.take()
.ok_or(crate::Error::NoHttpClient)?;
let timeout = resolve_timeout(signal_timeout_var, self.exporter_config.timeout.as_ref());
#[allow(unused_mut)] // TODO - clippy thinks mut is not needed, but it is
let mut http_client = self.http_config.client.take();
if http_client.is_none() {
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "reqwest-blocking-client"),
feature = "hyper-client"
))]
{
// TODO - support configuring custom connector and executor
http_client = Some(Arc::new(HyperClient::with_default_connector(timeout, None))
as Arc<dyn HttpClient>);
}
#[cfg(all(
not(feature = "hyper-client"),
not(feature = "reqwest-blocking-client"),
feature = "reqwest-client"
))]
{
http_client = Some(Arc::new(
reqwest::Client::builder()
.timeout(timeout)
.build()
.unwrap_or_default(),
) as Arc<dyn HttpClient>);
}
#[cfg(all(
not(feature = "hyper-client"),
not(feature = "reqwest-client"),
feature = "reqwest-blocking-client"
))]
{
let timeout_clone = timeout;
http_client = Some(Arc::new(
std::thread::spawn(move || {
reqwest::blocking::Client::builder()
.timeout(timeout_clone)
.build()
.unwrap_or_else(|_| reqwest::blocking::Client::new())
})
.join()
.unwrap(), // TODO: Return ExporterBuildError::ThreadSpawnFailed
) as Arc<dyn HttpClient>);
}
}
let http_client = http_client.ok_or(ExporterBuildError::NoHttpClient)?;
#[allow(clippy::mutable_key_type)] // http headers are not mutated
let mut headers: HashMap<HeaderName, HeaderValue> = self
.http_config
@ -203,9 +198,7 @@ impl HttpExporterBuilder {
/// Create a log exporter with the current configuration
#[cfg(feature = "trace")]
pub fn build_span_exporter(
mut self,
) -> Result<crate::SpanExporter, opentelemetry::trace::TraceError> {
pub fn build_span_exporter(mut self) -> Result<crate::SpanExporter, ExporterBuildError> {
use crate::{
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_HEADERS,
OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,
@ -218,12 +211,12 @@ impl HttpExporterBuilder {
OTEL_EXPORTER_OTLP_TRACES_HEADERS,
)?;
Ok(crate::SpanExporter::new(client))
Ok(crate::SpanExporter::from_http(client))
}
/// Create a log exporter with the current configuration
#[cfg(feature = "logs")]
pub fn build_log_exporter(mut self) -> opentelemetry_sdk::logs::LogResult<crate::LogExporter> {
pub fn build_log_exporter(mut self) -> Result<crate::LogExporter, ExporterBuildError> {
use crate::{
OTEL_EXPORTER_OTLP_LOGS_ENDPOINT, OTEL_EXPORTER_OTLP_LOGS_HEADERS,
OTEL_EXPORTER_OTLP_LOGS_TIMEOUT,
@ -236,7 +229,7 @@ impl HttpExporterBuilder {
OTEL_EXPORTER_OTLP_LOGS_HEADERS,
)?;
Ok(crate::LogExporter::new(client))
Ok(crate::LogExporter::from_http(client))
}
/// Create a metrics exporter with the current configuration
@ -244,7 +237,7 @@ impl HttpExporterBuilder {
pub fn build_metrics_exporter(
mut self,
temporality: opentelemetry_sdk::metrics::Temporality,
) -> opentelemetry_sdk::metrics::MetricResult<crate::MetricExporter> {
) -> Result<crate::MetricExporter, ExporterBuildError> {
use crate::{
OTEL_EXPORTER_OTLP_METRICS_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_HEADERS,
OTEL_EXPORTER_OTLP_METRICS_TIMEOUT,
@ -257,12 +250,12 @@ impl HttpExporterBuilder {
OTEL_EXPORTER_OTLP_METRICS_HEADERS,
)?;
Ok(crate::MetricExporter::new(client, temporality))
Ok(crate::MetricExporter::from_http(client, temporality))
}
}
#[derive(Debug)]
struct OtlpHttpClient {
pub(crate) struct OtlpHttpClient {
client: Mutex<Option<Arc<dyn HttpClient>>>,
collector_endpoint: Uri,
headers: HashMap<HeaderName, HeaderValue>,
@ -296,7 +289,7 @@ impl OtlpHttpClient {
fn build_trace_export_body(
&self,
spans: Vec<SpanData>,
) -> opentelemetry::trace::TraceResult<(Vec<u8>, &'static str)> {
) -> Result<(Vec<u8>, &'static str), String> {
use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest;
let resource_spans = group_spans_by_resource_and_scope(spans, &self.resource);
@ -304,8 +297,8 @@ impl OtlpHttpClient {
match self.protocol {
#[cfg(feature = "http-json")]
Protocol::HttpJson => match serde_json::to_string_pretty(&req) {
Ok(json) => Ok((json.into(), "application/json")),
Err(e) => Err(opentelemetry::trace::TraceError::from(e.to_string())),
Ok(json) => Ok((json.into_bytes(), "application/json")),
Err(e) => Err(e.to_string()),
},
_ => Ok((req.encode_to_vec(), "application/x-protobuf")),
}
@ -315,7 +308,7 @@ impl OtlpHttpClient {
fn build_logs_export_body(
&self,
logs: LogBatch<'_>,
) -> opentelemetry_sdk::logs::LogResult<(Vec<u8>, &'static str)> {
) -> Result<(Vec<u8>, &'static str), String> {
use opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest;
let resource_logs = group_logs_by_resource_and_scope(logs, &self.resource);
let req = ExportLogsServiceRequest { resource_logs };
@ -324,7 +317,7 @@ impl OtlpHttpClient {
#[cfg(feature = "http-json")]
Protocol::HttpJson => match serde_json::to_string_pretty(&req) {
Ok(json) => Ok((json.into(), "application/json")),
Err(e) => Err(opentelemetry_sdk::logs::LogError::from(e.to_string())),
Err(e) => Err(e.to_string()),
},
_ => Ok((req.encode_to_vec(), "application/x-protobuf")),
}
@ -333,64 +326,69 @@ impl OtlpHttpClient {
#[cfg(feature = "metrics")]
fn build_metrics_export_body(
&self,
metrics: &mut opentelemetry_sdk::metrics::data::ResourceMetrics,
) -> opentelemetry_sdk::metrics::MetricResult<(Vec<u8>, &'static str)> {
metrics: &ResourceMetrics,
) -> Option<(Vec<u8>, &'static str)> {
use opentelemetry_proto::tonic::collector::metrics::v1::ExportMetricsServiceRequest;
let req: ExportMetricsServiceRequest = (&*metrics).into();
let req: ExportMetricsServiceRequest = metrics.into();
match self.protocol {
#[cfg(feature = "http-json")]
Protocol::HttpJson => match serde_json::to_string_pretty(&req) {
Ok(json) => Ok((json.into(), "application/json")),
Err(e) => Err(opentelemetry_sdk::metrics::MetricError::Other(
e.to_string(),
)),
Ok(json) => Some((json.into(), "application/json")),
Err(e) => {
otel_debug!(name: "JsonSerializationFaied", error = e.to_string());
None
}
},
_ => Ok((req.encode_to_vec(), "application/x-protobuf")),
_ => Some((req.encode_to_vec(), "application/x-protobuf")),
}
}
}
fn build_endpoint_uri(endpoint: &str, path: &str) -> Result<Uri, crate::Error> {
fn build_endpoint_uri(endpoint: &str, path: &str) -> Result<Uri, ExporterBuildError> {
let path = if endpoint.ends_with('/') && path.starts_with('/') {
path.strip_prefix('/').unwrap()
} else {
path
};
format!("{endpoint}{path}").parse().map_err(From::from)
let endpoint = format!("{endpoint}{path}");
endpoint.parse().map_err(|er: http::uri::InvalidUri| {
ExporterBuildError::InvalidUri(endpoint, er.to_string())
})
}
// see https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#endpoint-urls-for-otlphttp
fn resolve_http_endpoint(
signal_endpoint_var: &str,
signal_endpoint_path: &str,
provided_endpoint: Option<String>,
) -> Result<Uri, crate::Error> {
// per signal env var is not modified
if let Some(endpoint) = env::var(signal_endpoint_var)
provided_endpoint: Option<&str>,
) -> Result<Uri, ExporterBuildError> {
// programmatic configuration overrides any value set via environment variables
if let Some(provider_endpoint) = provided_endpoint.filter(|s| !s.is_empty()) {
provider_endpoint
.parse()
.map_err(|er: http::uri::InvalidUri| {
ExporterBuildError::InvalidUri(provider_endpoint.to_string(), er.to_string())
})
} else if let Some(endpoint) = env::var(signal_endpoint_var)
.ok()
.and_then(|s| s.parse().ok())
{
return Ok(endpoint);
}
// if signal env var is not set, then we check if the OTEL_EXPORTER_OTLP_ENDPOINT is set
if let Some(endpoint) = env::var(OTEL_EXPORTER_OTLP_ENDPOINT)
// per signal env var is not modified
Ok(endpoint)
} else if let Some(endpoint) = env::var(OTEL_EXPORTER_OTLP_ENDPOINT)
.ok()
.and_then(|s| build_endpoint_uri(&s, signal_endpoint_path).ok())
{
return Ok(endpoint);
// if signal env var is not set, then we check if the OTEL_EXPORTER_OTLP_ENDPOINT env var is set
Ok(endpoint)
} else {
build_endpoint_uri(
OTEL_EXPORTER_OTLP_HTTP_ENDPOINT_DEFAULT,
signal_endpoint_path,
)
}
provided_endpoint
.map(|e| e.parse().map_err(From::from))
.unwrap_or_else(|| {
build_endpoint_uri(
OTEL_EXPORTER_OTLP_HTTP_ENDPOINT_DEFAULT,
signal_endpoint_path,
)
})
}
#[allow(clippy::mutable_key_type)] // http headers are not mutated
@ -444,13 +442,13 @@ impl<B: HasHttpConfig> WithHttpConfig for B {
fn with_headers(mut self, headers: HashMap<String, String>) -> Self {
// headers will be wrapped, so we must do some logic to unwrap first.
self.http_client_config()
let http_client_headers = self
.http_client_config()
.headers
.iter_mut()
.zip(headers)
.for_each(|(http_client_headers, (key, value))| {
http_client_headers.insert(key, super::url_decode(&value).unwrap_or(value));
});
.get_or_insert(HashMap::new());
headers.into_iter().for_each(|(key, value)| {
http_client_headers.insert(key, super::url_decode(&value).unwrap_or(value));
});
self
}
}
@ -471,12 +469,9 @@ mod tests {
run_env_test(
vec![(OTEL_EXPORTER_OTLP_ENDPOINT, "http://example.com")],
|| {
let endpoint = resolve_http_endpoint(
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
"/v1/traces",
Some("http://localhost:4317".to_string()),
)
.unwrap();
let endpoint =
resolve_http_endpoint(OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, "/v1/traces", None)
.unwrap();
assert_eq!(endpoint, "http://example.com/v1/traces");
},
)
@ -487,12 +482,9 @@ mod tests {
run_env_test(
vec![(OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, "http://example.com")],
|| {
let endpoint = super::resolve_http_endpoint(
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
"/v1/traces",
Some("http://localhost:4317".to_string()),
)
.unwrap();
let endpoint =
resolve_http_endpoint(OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, "/v1/traces", None)
.unwrap();
assert_eq!(endpoint, "http://example.com");
},
)
@ -509,7 +501,7 @@ mod tests {
let endpoint = super::resolve_http_endpoint(
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
"/v1/traces",
Some("http://localhost:4317".to_string()),
None,
)
.unwrap();
assert_eq!(endpoint, "http://example.com");
@ -518,15 +510,39 @@ mod tests {
}
#[test]
fn test_use_provided_or_default_when_others_missing() {
fn test_priority_of_code_based_config_over_envs() {
run_env_test(
vec![
(OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, "http://example.com"),
(OTEL_EXPORTER_OTLP_ENDPOINT, "http://wrong.com"),
],
|| {
let endpoint = super::resolve_http_endpoint(
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
"/v1/traces",
Some("http://localhost:4317"),
)
.unwrap();
assert_eq!(endpoint, "http://localhost:4317");
},
);
}
#[test]
fn test_use_default_when_empty_string_for_option() {
run_env_test(vec![], || {
let endpoint = super::resolve_http_endpoint(
"NON_EXISTENT_VAR",
"/v1/traces",
Some("http://localhost:4317".to_string()),
)
.unwrap();
assert_eq!(endpoint, "http://localhost:4317/");
let endpoint =
super::resolve_http_endpoint("non_existent_var", "/v1/traces", Some("")).unwrap();
assert_eq!(endpoint, "http://localhost:4318/v1/traces");
});
}
#[test]
fn test_use_default_when_others_missing() {
run_env_test(vec![], || {
let endpoint =
super::resolve_http_endpoint("NON_EXISTENT_VAR", "/v1/traces", None).unwrap();
assert_eq!(endpoint, "http://localhost:4318/v1/traces");
});
}
@ -558,7 +574,7 @@ mod tests {
let endpoint = super::resolve_http_endpoint(
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
"/v1/traces",
Some("http://localhost:4317".to_string()),
None,
)
.unwrap();
assert_eq!(endpoint, "http://example.com/v1/traces");
@ -572,7 +588,7 @@ mod tests {
let result = super::resolve_http_endpoint(
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
"/v1/traces",
Some("-*/*-/*-//-/-/yet-another-invalid-uri".to_string()),
Some("-*/*-/*-//-/-/yet-another-invalid-uri"),
);
assert!(result.is_err());
// You may also want to assert on the specific error type if applicable
@ -599,17 +615,14 @@ mod tests {
assert_eq!(
headers.len(),
expected_headers.len(),
"Failed on input: {}",
input_str
"Failed on input: {input_str}"
);
for (expected_key, expected_value) in expected_headers {
assert_eq!(
headers.get(&HeaderName::from_static(expected_key)),
Some(&HeaderValue::from_static(expected_value)),
"Failed on key: {} with input: {}",
expected_key,
input_str
"Failed on key: {expected_key} with input: {input_str}"
);
}
}
@ -649,28 +662,28 @@ mod tests {
assert_eq!(
headers.len(),
expected_headers.len(),
"Failed on input: {}",
input_str
"Failed on input: {input_str}"
);
for (expected_key, expected_value) in expected_headers {
assert_eq!(
headers.get(&HeaderName::from_static(expected_key)),
Some(&HeaderValue::from_static(expected_value)),
"Failed on key: {} with input: {}",
expected_key,
input_str
"Failed on key: {expected_key} with input: {input_str}"
);
}
}
}
#[test]
fn test_http_exporter_builder_with_header() {
fn test_http_exporter_builder_with_headers() {
use std::collections::HashMap;
// Arrange
let initial_headers = HashMap::from([("k1".to_string(), "v1".to_string())]);
let extra_headers = HashMap::from([("k2".to_string(), "v2".to_string())]);
let extra_headers = HashMap::from([
("k2".to_string(), "v2".to_string()),
("k3".to_string(), "v3".to_string()),
]);
let expected_headers = initial_headers.iter().chain(extra_headers.iter()).fold(
HashMap::new(),
|mut acc, (k, v)| {
@ -709,7 +722,7 @@ mod tests {
let url = resolve_http_endpoint(
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
"/v1/traces",
exporter.exporter_config.endpoint,
exporter.exporter_config.endpoint.as_deref(),
)
.unwrap();
@ -724,7 +737,7 @@ mod tests {
let url = resolve_http_endpoint(
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
"/v1/traces",
exporter.exporter_config.endpoint,
exporter.exporter_config.endpoint.as_deref(),
)
.unwrap();

View File

@ -1,70 +1,78 @@
use std::sync::Arc;
use futures_core::future::BoxFuture;
use http::{header::CONTENT_TYPE, Method};
use opentelemetry::trace::TraceError;
use opentelemetry_sdk::export::trace::{ExportResult, SpanData, SpanExporter};
use super::OtlpHttpClient;
use http::{header::CONTENT_TYPE, Method};
use opentelemetry::otel_debug;
use opentelemetry_sdk::{
error::{OTelSdkError, OTelSdkResult},
trace::{SpanData, SpanExporter},
};
impl SpanExporter for OtlpHttpClient {
fn export(&mut self, batch: Vec<SpanData>) -> BoxFuture<'static, ExportResult> {
async fn export(&self, batch: Vec<SpanData>) -> OTelSdkResult {
let client = match self
.client
.lock()
.map_err(|e| TraceError::Other(e.to_string().into()))
.map_err(|e| OTelSdkError::InternalFailure(format!("Mutex lock failed: {e}")))
.and_then(|g| match &*g {
Some(client) => Ok(Arc::clone(client)),
_ => Err(TraceError::Other("exporter is already shut down".into())),
_ => Err(OTelSdkError::AlreadyShutdown),
}) {
Ok(client) => client,
Err(err) => return Box::pin(std::future::ready(Err(err))),
Err(err) => return Err(err),
};
let (body, content_type) = match self.build_trace_export_body(batch) {
Ok(body) => body,
Err(e) => return Box::pin(std::future::ready(Err(e))),
Err(e) => return Err(OTelSdkError::InternalFailure(e.to_string())),
};
let mut request = match http::Request::builder()
.method(Method::POST)
.uri(&self.collector_endpoint)
.header(CONTENT_TYPE, content_type)
.body(body)
.body(body.into())
{
Ok(req) => req,
Err(e) => {
return Box::pin(std::future::ready(Err(crate::Error::RequestFailed(
Box::new(e),
)
.into())))
}
Err(e) => return Err(OTelSdkError::InternalFailure(e.to_string())),
};
for (k, v) in &self.headers {
request.headers_mut().insert(k.clone(), v.clone());
}
Box::pin(async move {
let request_uri = request.uri().to_string();
let response = client.send(request).await?;
let request_uri = request.uri().to_string();
otel_debug!(name: "HttpTracesClient.ExportStarted");
let response = client
.send_bytes(request)
.await
.map_err(|e| OTelSdkError::InternalFailure(format!("{e:?}")))?;
if !response.status().is_success() {
let error = format!(
"OpenTelemetry trace export failed. Url: {}, Status Code: {}, Response: {:?}",
response.status().as_u16(),
request_uri,
response.body()
);
return Err(TraceError::Other(error.into()));
}
if !response.status().is_success() {
let error = format!(
"OpenTelemetry trace export failed. Url: {}, Status Code: {}, Response: {:?}",
request_uri,
response.status().as_u16(),
response.body()
);
otel_debug!(name: "HttpTracesClient.ExportFailed", error = &error);
return Err(OTelSdkError::InternalFailure(error));
}
Ok(())
})
otel_debug!(name: "HttpTracesClient.ExportSucceeded");
Ok(())
}
fn shutdown(&mut self) {
let _ = self.client.lock().map(|mut c| c.take());
fn shutdown(&mut self) -> OTelSdkResult {
let mut client_guard = self.client.lock().map_err(|e| {
OTelSdkError::InternalFailure(format!("Failed to acquire client lock: {e}"))
})?;
if client_guard.take().is_none() {
return Err(OTelSdkError::AlreadyShutdown);
}
Ok(())
}
fn set_resource(&mut self, resource: &opentelemetry_sdk::Resource) {

View File

@ -6,12 +6,13 @@
use crate::exporter::http::HttpExporterBuilder;
#[cfg(feature = "grpc-tonic")]
use crate::exporter::tonic::TonicExporterBuilder;
use crate::{Error, Protocol};
use crate::Protocol;
#[cfg(feature = "serialize")]
use serde::{Deserialize, Serialize};
use std::fmt::{Display, Formatter};
use std::str::FromStr;
use std::time::Duration;
use thiserror::Error;
/// Target to which the exporter is going to send signals, defaults to https://localhost:4317.
/// Learn about the relationship between this constant and metrics/spans/logs at
@ -52,7 +53,7 @@ const OTEL_EXPORTER_OTLP_PROTOCOL_HTTP_JSON: &str = "http/json";
/// Max waiting time for the backend to process each signal batch, defaults to 10 seconds.
pub const OTEL_EXPORTER_OTLP_TIMEOUT: &str = "OTEL_EXPORTER_OTLP_TIMEOUT";
/// Default max waiting time for the backend to process each signal batch.
pub const OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT: u64 = 10;
pub const OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT: Duration = Duration::from_millis(10000);
// Endpoints per protocol https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md
#[cfg(feature = "grpc-tonic")]
@ -67,31 +68,74 @@ pub(crate) mod tonic;
/// Configuration for the OTLP exporter.
#[derive(Debug)]
pub struct ExportConfig {
/// The address of the OTLP collector. If it's not provided via builder or environment variables.
/// The address of the OTLP collector.
/// Default address will be used based on the protocol.
///
/// Note: Programmatically setting this will override any value set via the environment variable.
pub endpoint: Option<String>,
/// The protocol to use when communicating with the collector.
pub protocol: Protocol,
/// The timeout to the collector.
pub timeout: Duration,
/// The default value is 10 seconds.
///
/// Note: Programmatically setting this will override any value set via the environment variable.
pub timeout: Option<Duration>,
}
impl Default for ExportConfig {
fn default() -> Self {
let protocol = default_protocol();
ExportConfig {
Self {
endpoint: None,
// don't use default_endpoint(protocol) here otherwise we
// won't know if user provided a value
protocol,
timeout: Duration::from_secs(OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT),
timeout: None,
}
}
}
#[derive(Error, Debug)]
/// Errors that can occur while building an exporter.
// TODO: Refine and polish this.
// Non-exhaustive to allow for future expansion without breaking changes.
// This could be refined after polishing and finalizing the errors.
#[non_exhaustive]
pub enum ExporterBuildError {
/// Spawning a new thread failed.
#[error("Spawning a new thread failed. Unable to create Reqwest-Blocking client.")]
ThreadSpawnFailed,
/// Feature required to use the specified compression algorithm.
#[cfg(any(not(feature = "gzip-tonic"), not(feature = "zstd-tonic")))]
#[error("feature '{0}' is required to use the compression algorithm '{1}'")]
FeatureRequiredForCompressionAlgorithm(&'static str, Compression),
/// No Http client specified.
#[error("no http client specified")]
NoHttpClient,
/// Unsupported compression algorithm.
#[error("unsupported compression algorithm '{0}'")]
UnsupportedCompressionAlgorithm(String),
/// Invalid URI.
#[cfg(any(feature = "grpc-tonic", feature = "http-proto", feature = "http-json"))]
#[error("invalid URI {0}. Reason {1}")]
InvalidUri(String, String),
/// Failed due to an internal error.
/// The error message is intended for logging purposes only and should not
/// be used to make programmatic decisions. It is implementation-specific
/// and subject to change without notice. Consumers of this error should not
/// rely on its content beyond logging.
#[error("Reason: {0}")]
InternalFailure(String),
}
/// The compression algorithm to use when sending data.
#[cfg_attr(feature = "serialize", derive(Deserialize, Serialize))]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
@ -112,13 +156,15 @@ impl Display for Compression {
}
impl FromStr for Compression {
type Err = Error;
type Err = ExporterBuildError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"gzip" => Ok(Compression::Gzip),
"zstd" => Ok(Compression::Zstd),
_ => Err(Error::UnsupportedCompressionAlgorithm(s.to_string())),
_ => Err(ExporterBuildError::UnsupportedCompressionAlgorithm(
s.to_string(),
)),
}
}
}
@ -182,6 +228,8 @@ impl HasExportConfig for HttpExporterBuilder {
/// ```
pub trait WithExportConfig {
/// Set the address of the OTLP collector. If not set or set to empty string, the default address is used.
///
/// Note: Programmatically setting this will override any value set via the environment variable.
fn with_endpoint<T: Into<String>>(self, endpoint: T) -> Self;
/// Set the protocol to use when communicating with the collector.
///
@ -189,11 +237,15 @@ pub trait WithExportConfig {
/// will use default protocol in this case.
///
/// ## Note
/// All exporters in this crate only support one protocol, thus choosing the protocol is an no-op at the moment.
/// All exporters in this crate only support one protocol, thus choosing the protocol is a no-op at the moment.
fn with_protocol(self, protocol: Protocol) -> Self;
/// Set the timeout to the collector.
///
/// Note: Programmatically setting this will override any value set via the environment variable.
fn with_timeout(self, timeout: Duration) -> Self;
/// Set export config. This will override all previous configuration.
/// Set export config. This will override all previous configurations.
///
/// Note: Programmatically setting this will override any value set via environment variables.
fn with_export_config(self, export_config: ExportConfig) -> Self;
}
@ -209,7 +261,7 @@ impl<B: HasExportConfig> WithExportConfig for B {
}
fn with_timeout(mut self, timeout: Duration) -> Self {
self.export_config().timeout = timeout;
self.export_config().timeout = Some(timeout);
self
}
@ -221,6 +273,28 @@ impl<B: HasExportConfig> WithExportConfig for B {
}
}
#[cfg(any(feature = "grpc-tonic", feature = "http-proto", feature = "http-json"))]
fn resolve_timeout(signal_timeout_var: &str, provided_timeout: Option<&Duration>) -> Duration {
// programmatic configuration overrides any value set via environment variables
if let Some(timeout) = provided_timeout {
*timeout
} else if let Some(timeout) = std::env::var(signal_timeout_var)
.ok()
.and_then(|s| s.parse().ok())
{
// per signal env var is not modified
Duration::from_millis(timeout)
} else if let Some(timeout) = std::env::var(OTEL_EXPORTER_OTLP_TIMEOUT)
.ok()
.and_then(|s| s.parse().ok())
{
// if signal env var is not set, then we check if the OTEL_EXPORTER_OTLP_TIMEOUT env var is set
Duration::from_millis(timeout)
} else {
OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT
}
}
#[cfg(any(feature = "grpc-tonic", feature = "http-proto", feature = "http-json"))]
fn parse_header_string(value: &str) -> impl Iterator<Item = (&str, String)> {
value
@ -298,6 +372,58 @@ mod tests {
assert_eq!(exporter_builder.exporter_config.endpoint, None);
}
#[cfg(feature = "logs")]
#[cfg(any(feature = "http-proto", feature = "http-json"))]
#[test]
fn export_builder_error_invalid_http_endpoint() {
use std::time::Duration;
use crate::{ExportConfig, LogExporter, Protocol, WithExportConfig};
let ex_config = ExportConfig {
endpoint: Some("invalid_uri/something".to_string()),
protocol: Protocol::HttpBinary,
timeout: Some(Duration::from_secs(10)),
};
let exporter_result = LogExporter::builder()
.with_http()
.with_export_config(ex_config)
.build();
assert!(
matches!(
exporter_result,
Err(crate::exporter::ExporterBuildError::InvalidUri(_, _))
),
"Expected InvalidUri error, but got {exporter_result:?}"
);
}
#[cfg(feature = "grpc-tonic")]
#[tokio::test]
async fn export_builder_error_invalid_grpc_endpoint() {
use std::time::Duration;
use crate::{ExportConfig, LogExporter, Protocol, WithExportConfig};
let ex_config = ExportConfig {
endpoint: Some("invalid_uri/something".to_string()),
protocol: Protocol::Grpc,
timeout: Some(Duration::from_secs(10)),
};
let exporter_result = LogExporter::builder()
.with_tonic()
.with_export_config(ex_config)
.build();
assert!(matches!(
exporter_result,
Err(crate::exporter::ExporterBuildError::InvalidUri(_, _))
));
}
#[cfg(feature = "grpc-tonic")]
#[test]
fn test_default_tonic_endpoint() {
@ -404,4 +530,44 @@ mod tests {
)
}
}
#[test]
fn test_priority_of_signal_env_over_generic_env_for_timeout() {
run_env_test(
vec![
(crate::OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, "3000"),
(super::OTEL_EXPORTER_OTLP_TIMEOUT, "2000"),
],
|| {
let timeout =
super::resolve_timeout(crate::OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, None);
assert_eq!(timeout.as_millis(), 3000);
},
);
}
#[test]
fn test_priority_of_code_based_config_over_envs_for_timeout() {
run_env_test(
vec![
(crate::OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, "3000"),
(super::OTEL_EXPORTER_OTLP_TIMEOUT, "2000"),
],
|| {
let timeout = super::resolve_timeout(
crate::OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,
Some(&std::time::Duration::from_millis(1000)),
);
assert_eq!(timeout.as_millis(), 1000);
},
);
}
#[test]
fn test_use_default_when_others_missing_for_timeout() {
run_env_test(vec![], || {
let timeout = super::resolve_timeout(crate::OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, None);
assert_eq!(timeout.as_millis(), 10_000);
});
}
}

View File

@ -1,10 +1,12 @@
use async_trait::async_trait;
use core::fmt;
use opentelemetry::otel_debug;
use opentelemetry_proto::tonic::collector::logs::v1::{
logs_service_client::LogsServiceClient, ExportLogsServiceRequest,
};
use opentelemetry_sdk::export::logs::{LogBatch, LogExporter};
use opentelemetry_sdk::logs::{LogError, LogResult};
use opentelemetry_sdk::error::{OTelSdkError, OTelSdkResult};
use opentelemetry_sdk::logs::{LogBatch, LogExporter};
use std::time;
use tokio::sync::Mutex;
use tonic::{codegen::CompressionEncoding, service::Interceptor, transport::Channel, Request};
use opentelemetry_proto::transform::logs::tonic::group_logs_by_resource_and_scope;
@ -12,7 +14,7 @@ use opentelemetry_proto::transform::logs::tonic::group_logs_by_resource_and_scop
use super::BoxInterceptor;
pub(crate) struct TonicLogsClient {
inner: Option<ClientInner>,
inner: Mutex<Option<ClientInner>>,
#[allow(dead_code)]
// <allow dead> would be removed once we support set_resource for metrics.
resource: opentelemetry_proto::transform::common::tonic::ResourceAttributesWithSchema,
@ -42,47 +44,66 @@ impl TonicLogsClient {
.accept_compressed(compression);
}
otel_debug!(name: "TonicsLogsClientBuilt");
TonicLogsClient {
inner: Some(ClientInner {
inner: Mutex::new(Some(ClientInner {
client,
interceptor,
}),
})),
resource: Default::default(),
}
}
}
#[async_trait]
impl LogExporter for TonicLogsClient {
async fn export(&mut self, batch: LogBatch<'_>) -> LogResult<()> {
let (mut client, metadata, extensions) = match &mut self.inner {
async fn export(&self, batch: LogBatch<'_>) -> OTelSdkResult {
let (mut client, metadata, extensions) = match self.inner.lock().await.as_mut() {
Some(inner) => {
let (m, e, _) = inner
.interceptor
.call(Request::new(()))
.map_err(|e| LogError::Other(Box::new(e)))?
.map_err(|e| OTelSdkError::InternalFailure(format!("error: {e:?}")))?
.into_parts();
(inner.client.clone(), m, e)
}
None => return Err(LogError::Other("exporter is already shut down".into())),
None => return Err(OTelSdkError::AlreadyShutdown),
};
let resource_logs = group_logs_by_resource_and_scope(batch, &self.resource);
client
otel_debug!(name: "TonicLogsClient.ExportStarted");
let result = client
.export(Request::from_parts(
metadata,
extensions,
ExportLogsServiceRequest { resource_logs },
))
.await
.map_err(crate::Error::from)?;
.await;
Ok(())
match result {
Ok(_) => {
otel_debug!(name: "TonicLogsClient.ExportSucceeded");
Ok(())
}
Err(e) => {
let error = format!("export error: {e:?}");
otel_debug!(name: "TonicLogsClient.ExportFailed", error = &error);
Err(OTelSdkError::InternalFailure(error))
}
}
}
fn shutdown(&mut self) {
let _ = self.inner.take();
fn shutdown_with_timeout(&self, _timeout: time::Duration) -> OTelSdkResult {
// TODO: Implement actual shutdown
// Due to the use of tokio::sync::Mutex to guard
// the inner client, we need to await the call to lock the mutex
// and that requires async runtime.
// It is possible to fix this by using
// a dedicated thread just to handle shutdown.
// But for now, we just return Ok.
Ok(())
}
fn set_resource(&mut self, resource: &opentelemetry_sdk::Resource) {

View File

@ -1,12 +1,12 @@
use core::fmt;
use std::sync::Mutex;
use async_trait::async_trait;
use opentelemetry::otel_debug;
use opentelemetry_proto::tonic::collector::metrics::v1::{
metrics_service_client::MetricsServiceClient, ExportMetricsServiceRequest,
};
use opentelemetry_sdk::error::{OTelSdkError, OTelSdkResult};
use opentelemetry_sdk::metrics::data::ResourceMetrics;
use opentelemetry_sdk::metrics::{MetricError, MetricResult};
use tonic::{codegen::CompressionEncoding, service::Interceptor, transport::Channel, Request};
use super::BoxInterceptor;
@ -40,6 +40,8 @@ impl TonicMetricsClient {
.accept_compressed(compression);
}
otel_debug!(name: "TonicsMetricsClientBuilt");
TonicMetricsClient {
inner: Mutex::new(Some(ClientInner {
client,
@ -49,43 +51,58 @@ impl TonicMetricsClient {
}
}
#[async_trait]
impl MetricsClient for TonicMetricsClient {
async fn export(&self, metrics: &mut ResourceMetrics) -> MetricResult<()> {
let (mut client, metadata, extensions) =
self.inner
.lock()
.map_err(Into::into)
.and_then(|mut inner| match &mut *inner {
Some(inner) => {
let (m, e, _) = inner
.interceptor
.call(Request::new(()))
.map_err(|e| {
MetricError::Other(format!(
"unexpected status while exporting {e:?}"
))
})?
.into_parts();
Ok((inner.client.clone(), m, e))
}
None => Err(MetricError::Other("exporter is already shut down".into())),
})?;
async fn export(&self, metrics: &ResourceMetrics) -> OTelSdkResult {
let (mut client, metadata, extensions) = self
.inner
.lock()
.map_err(|e| OTelSdkError::InternalFailure(format!("Failed to acquire lock: {e:?}")))
.and_then(|mut inner| match &mut *inner {
Some(inner) => {
let (m, e, _) = inner
.interceptor
.call(Request::new(()))
.map_err(|e| {
OTelSdkError::InternalFailure(format!(
"unexpected status while exporting {e:?}"
))
})?
.into_parts();
Ok((inner.client.clone(), m, e))
}
None => Err(OTelSdkError::InternalFailure(
"exporter is already shut down".into(),
)),
})?;
client
otel_debug!(name: "TonicMetricsClient.ExportStarted");
let result = client
.export(Request::from_parts(
metadata,
extensions,
ExportMetricsServiceRequest::from(&*metrics),
ExportMetricsServiceRequest::from(metrics),
))
.await
.map_err(crate::Error::from)?;
.await;
Ok(())
match result {
Ok(_) => {
otel_debug!(name: "TonicMetricsClient.ExportSucceeded");
Ok(())
}
Err(e) => {
let error = format!("{e:?}");
otel_debug!(name: "TonicMetricsClient.ExportFailed", error = &error);
Err(OTelSdkError::InternalFailure(error))
}
}
}
fn shutdown(&self) -> MetricResult<()> {
let _ = self.inner.lock()?.take();
fn shutdown(&self) -> OTelSdkResult {
self.inner
.lock()
.map_err(|e| OTelSdkError::InternalFailure(format!("Failed to acquire lock: {e}")))?
.take();
Ok(())
}

View File

@ -1,9 +1,9 @@
use std::env;
use std::fmt::{Debug, Formatter};
use std::str::FromStr;
use std::time::Duration;
use http::{HeaderMap, HeaderName, HeaderValue};
use opentelemetry::otel_debug;
use tonic::codec::CompressionEncoding;
use tonic::metadata::{KeyAndValueRef, MetadataMap};
use tonic::service::Interceptor;
@ -12,17 +12,18 @@ use tonic::transport::Channel;
use tonic::transport::ClientTlsConfig;
use super::{default_headers, parse_header_string, OTEL_EXPORTER_OTLP_GRPC_ENDPOINT_DEFAULT};
use super::{resolve_timeout, ExporterBuildError};
use crate::exporter::Compression;
use crate::{
ExportConfig, OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_ENDPOINT,
OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TIMEOUT,
OTEL_EXPORTER_OTLP_HEADERS,
};
#[cfg(feature = "logs")]
mod logs;
pub(crate) mod logs;
#[cfg(feature = "metrics")]
mod metrics;
pub(crate) mod metrics;
#[cfg(feature = "trace")]
pub(crate) mod trace;
@ -45,21 +46,21 @@ pub struct TonicConfig {
}
impl TryFrom<Compression> for tonic::codec::CompressionEncoding {
type Error = crate::Error;
type Error = ExporterBuildError;
fn try_from(value: Compression) -> Result<Self, Self::Error> {
fn try_from(value: Compression) -> Result<Self, ExporterBuildError> {
match value {
#[cfg(feature = "gzip-tonic")]
Compression::Gzip => Ok(tonic::codec::CompressionEncoding::Gzip),
#[cfg(not(feature = "gzip-tonic"))]
Compression::Gzip => Err(crate::Error::FeatureRequiredForCompressionAlgorithm(
Compression::Gzip => Err(ExporterBuildError::FeatureRequiredForCompressionAlgorithm(
"gzip-tonic",
Compression::Gzip,
)),
#[cfg(feature = "zstd-tonic")]
Compression::Zstd => Ok(tonic::codec::CompressionEncoding::Zstd),
#[cfg(not(feature = "zstd-tonic"))]
Compression::Zstd => Err(crate::Error::FeatureRequiredForCompressionAlgorithm(
Compression::Zstd => Err(ExporterBuildError::FeatureRequiredForCompressionAlgorithm(
"zstd-tonic",
Compression::Zstd,
)),
@ -144,16 +145,18 @@ impl Default for TonicExporterBuilder {
}
impl TonicExporterBuilder {
// This is for clippy to work with only the grpc-tonic feature enabled
#[allow(unused)]
fn build_channel(
self,
signal_endpoint_var: &str,
signal_timeout_var: &str,
signal_compression_var: &str,
signal_headers_var: &str,
) -> Result<(Channel, BoxInterceptor, Option<CompressionEncoding>), crate::Error> {
) -> Result<(Channel, BoxInterceptor, Option<CompressionEncoding>), ExporterBuildError> {
let compression = self.resolve_compression(signal_compression_var)?;
let headers_from_env = parse_headers_from_env(signal_headers_var);
let (headers_from_env, headers_for_logging) = parse_headers_from_env(signal_headers_var);
let metadata = merge_metadata_with_headers_from_env(
self.tonic_config.metadata.unwrap_or_default(),
headers_from_env,
@ -190,23 +193,18 @@ impl TonicExporterBuilder {
let endpoint = Self::resolve_endpoint(signal_endpoint_var, config.endpoint);
let endpoint = Channel::from_shared(endpoint).map_err(crate::Error::from)?;
let timeout = match env::var(signal_timeout_var)
.ok()
.or(env::var(OTEL_EXPORTER_OTLP_TIMEOUT).ok())
{
Some(val) => match val.parse() {
Ok(seconds) => Duration::from_secs(seconds),
Err(_) => config.timeout,
},
None => config.timeout,
};
// Used for logging the endpoint
let endpoint_clone = endpoint.clone();
let endpoint = Channel::from_shared(endpoint)
.map_err(|op| ExporterBuildError::InvalidUri(endpoint_clone.clone(), op.to_string()))?;
let timeout = resolve_timeout(signal_timeout_var, config.timeout.as_ref());
#[cfg(feature = "tls")]
let channel = match self.tonic_config.tls_config {
Some(tls_config) => endpoint
.tls_config(tls_config)
.map_err(crate::Error::from)?,
.map_err(|er| ExporterBuildError::InternalFailure(er.to_string()))?,
None => endpoint,
}
.timeout(timeout)
@ -215,6 +213,7 @@ impl TonicExporterBuilder {
#[cfg(not(feature = "tls"))]
let channel = endpoint.timeout(timeout).connect_lazy();
otel_debug!(name: "TonicChannelBuilt", endpoint = endpoint_clone, timeout_in_millisecs = timeout.as_millis(), compression = format!("{:?}", compression), headers = format!("{:?}", headers_for_logging));
Ok((channel, interceptor, compression))
}
@ -224,21 +223,23 @@ impl TonicExporterBuilder {
// the path of grpc calls are based on the protobuf service definition
// so we won't append one for default grpc endpoints
// If users for some reason want to use a custom path, they can use env var or builder to pass it
match env::var(default_endpoint_var)
.ok()
.or(env::var(OTEL_EXPORTER_OTLP_ENDPOINT).ok())
{
Some(val) => val,
None => {
provided_endpoint.unwrap_or(OTEL_EXPORTER_OTLP_GRPC_ENDPOINT_DEFAULT.to_string())
}
//
// programmatic configuration overrides any value set via environment variables
if let Some(endpoint) = provided_endpoint.filter(|s| !s.is_empty()) {
endpoint
} else if let Ok(endpoint) = env::var(default_endpoint_var) {
endpoint
} else if let Ok(endpoint) = env::var(OTEL_EXPORTER_OTLP_ENDPOINT) {
endpoint
} else {
OTEL_EXPORTER_OTLP_GRPC_ENDPOINT_DEFAULT.to_string()
}
}
fn resolve_compression(
&self,
env_override: &str,
) -> Result<Option<CompressionEncoding>, crate::Error> {
) -> Result<Option<CompressionEncoding>, ExporterBuildError> {
if let Some(compression) = self.tonic_config.compression {
Ok(Some(compression.try_into()?))
} else if let Ok(compression) = env::var(env_override) {
@ -252,11 +253,11 @@ impl TonicExporterBuilder {
/// Build a new tonic log exporter
#[cfg(feature = "logs")]
pub(crate) fn build_log_exporter(
self,
) -> Result<crate::logs::LogExporter, opentelemetry_sdk::logs::LogError> {
pub(crate) fn build_log_exporter(self) -> Result<crate::logs::LogExporter, ExporterBuildError> {
use crate::exporter::tonic::logs::TonicLogsClient;
otel_debug!(name: "LogsTonicChannelBuilding");
let (channel, interceptor, compression) = self.build_channel(
crate::logs::OTEL_EXPORTER_OTLP_LOGS_ENDPOINT,
crate::logs::OTEL_EXPORTER_OTLP_LOGS_TIMEOUT,
@ -266,7 +267,7 @@ impl TonicExporterBuilder {
let client = TonicLogsClient::new(channel, interceptor, compression);
Ok(crate::logs::LogExporter::new(client))
Ok(crate::logs::LogExporter::from_tonic(client))
}
/// Build a new tonic metrics exporter
@ -274,10 +275,12 @@ impl TonicExporterBuilder {
pub(crate) fn build_metrics_exporter(
self,
temporality: opentelemetry_sdk::metrics::Temporality,
) -> opentelemetry_sdk::metrics::MetricResult<crate::MetricExporter> {
) -> Result<crate::MetricExporter, ExporterBuildError> {
use crate::MetricExporter;
use metrics::TonicMetricsClient;
otel_debug!(name: "MetricsTonicChannelBuilding");
let (channel, interceptor, compression) = self.build_channel(
crate::metric::OTEL_EXPORTER_OTLP_METRICS_ENDPOINT,
crate::metric::OTEL_EXPORTER_OTLP_METRICS_TIMEOUT,
@ -287,16 +290,16 @@ impl TonicExporterBuilder {
let client = TonicMetricsClient::new(channel, interceptor, compression);
Ok(MetricExporter::new(client, temporality))
Ok(MetricExporter::from_tonic(client, temporality))
}
/// Build a new tonic span exporter
#[cfg(feature = "trace")]
pub(crate) fn build_span_exporter(
self,
) -> Result<crate::SpanExporter, opentelemetry::trace::TraceError> {
pub(crate) fn build_span_exporter(self) -> Result<crate::SpanExporter, ExporterBuildError> {
use crate::exporter::tonic::trace::TonicTracesClient;
otel_debug!(name: "TracesTonicChannelBuilding");
let (channel, interceptor, compression) = self.build_channel(
crate::span::OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
crate::span::OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,
@ -306,7 +309,7 @@ impl TonicExporterBuilder {
let client = TonicTracesClient::new(channel, interceptor, compression);
Ok(crate::SpanExporter::new(client))
Ok(crate::SpanExporter::from_tonic(client))
}
}
@ -324,20 +327,26 @@ fn merge_metadata_with_headers_from_env(
}
}
fn parse_headers_from_env(signal_headers_var: &str) -> HeaderMap {
env::var(signal_headers_var)
.or_else(|_| env::var(OTEL_EXPORTER_OTLP_HEADERS))
.map(|input| {
parse_header_string(&input)
.filter_map(|(key, value)| {
Some((
HeaderName::from_str(key).ok()?,
HeaderValue::from_str(&value).ok()?,
))
})
.collect::<HeaderMap>()
})
.unwrap_or_default()
fn parse_headers_from_env(signal_headers_var: &str) -> (HeaderMap, Vec<(String, String)>) {
let mut headers = Vec::new();
(
env::var(signal_headers_var)
.or_else(|_| env::var(OTEL_EXPORTER_OTLP_HEADERS))
.map(|input| {
parse_header_string(&input)
.filter_map(|(key, value)| {
headers.push((key.to_owned(), value.clone()));
Some((
HeaderName::from_str(key).ok()?,
HeaderValue::from_str(&value).ok()?,
))
})
.collect::<HeaderMap>()
})
.unwrap_or_default(),
headers,
)
}
/// Expose interface for modifying [TonicConfig] fields within the exporter builders.
@ -441,7 +450,7 @@ mod tests {
use crate::exporter::tonic::WithTonicConfig;
#[cfg(feature = "grpc-tonic")]
use crate::exporter::Compression;
use crate::{TonicExporterBuilder, WithExportConfig, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT};
use crate::{TonicExporterBuilder, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT};
use crate::{OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS};
use http::{HeaderMap, HeaderName, HeaderValue};
use tonic::metadata::{MetadataMap, MetadataValue};
@ -507,6 +516,56 @@ mod tests {
assert!(tonic::codec::CompressionEncoding::try_from(Compression::Zstd).is_err());
}
#[cfg(feature = "zstd-tonic")]
#[test]
fn test_priority_of_signal_env_over_generic_env_for_compression() {
run_env_test(
vec![
(crate::OTEL_EXPORTER_OTLP_TRACES_COMPRESSION, "zstd"),
(super::OTEL_EXPORTER_OTLP_COMPRESSION, "gzip"),
],
|| {
let builder = TonicExporterBuilder::default();
let compression = builder
.resolve_compression(crate::OTEL_EXPORTER_OTLP_TRACES_COMPRESSION)
.unwrap();
assert_eq!(compression, Some(tonic::codec::CompressionEncoding::Zstd));
},
);
}
#[cfg(feature = "zstd-tonic")]
#[test]
fn test_priority_of_code_based_config_over_envs_for_compression() {
run_env_test(
vec![
(crate::OTEL_EXPORTER_OTLP_TRACES_COMPRESSION, "gzip"),
(super::OTEL_EXPORTER_OTLP_COMPRESSION, "gzip"),
],
|| {
let builder = TonicExporterBuilder::default().with_compression(Compression::Zstd);
let compression = builder
.resolve_compression(crate::OTEL_EXPORTER_OTLP_TRACES_COMPRESSION)
.unwrap();
assert_eq!(compression, Some(tonic::codec::CompressionEncoding::Zstd));
},
);
}
#[test]
fn test_use_default_when_others_missing_for_compression() {
run_env_test(vec![], || {
let builder = TonicExporterBuilder::default();
let compression = builder
.resolve_compression(crate::OTEL_EXPORTER_OTLP_TRACES_COMPRESSION)
.unwrap();
assert!(compression.is_none());
});
}
#[test]
fn test_parse_headers_from_env() {
run_env_test(
@ -516,7 +575,7 @@ mod tests {
],
|| {
assert_eq!(
super::parse_headers_from_env(OTEL_EXPORTER_OTLP_TRACES_HEADERS),
super::parse_headers_from_env(OTEL_EXPORTER_OTLP_TRACES_HEADERS).0,
HeaderMap::from_iter([
(
HeaderName::from_static("k1"),
@ -530,7 +589,7 @@ mod tests {
);
assert_eq!(
super::parse_headers_from_env("EMPTY_ENV"),
super::parse_headers_from_env("EMPTY_ENV").0,
HeaderMap::from_iter([(
HeaderName::from_static("k3"),
HeaderValue::from_static("v3")
@ -553,7 +612,7 @@ mod tests {
metadata.insert("k1", "v0".parse().unwrap());
let result =
super::merge_metadata_with_headers_from_env(metadata, headers_from_env);
super::merge_metadata_with_headers_from_env(metadata, headers_from_env.0);
assert_eq!(
result.get("foo").unwrap(),
@ -566,29 +625,56 @@ mod tests {
}
#[test]
fn test_tonic_exporter_endpoint() {
// default endpoint for grpc should not add signal path.
fn test_priority_of_signal_env_over_generic_env_for_endpoint() {
run_env_test(
vec![
(OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, "http://localhost:1234"),
(super::OTEL_EXPORTER_OTLP_ENDPOINT, "http://localhost:2345"),
],
|| {
let url = TonicExporterBuilder::resolve_endpoint(
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
None,
);
assert_eq!(url, "http://localhost:1234");
},
);
}
#[test]
fn test_priority_of_code_based_config_over_envs_for_endpoint() {
run_env_test(
vec![
(OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, "http://localhost:1234"),
(super::OTEL_EXPORTER_OTLP_ENDPOINT, "http://localhost:2345"),
],
|| {
let url = TonicExporterBuilder::resolve_endpoint(
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
Some("http://localhost:3456".to_string()),
);
assert_eq!(url, "http://localhost:3456");
},
);
}
#[test]
fn test_use_default_when_others_missing_for_endpoint() {
run_env_test(vec![], || {
let exporter = TonicExporterBuilder::default();
let url = TonicExporterBuilder::resolve_endpoint(
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
exporter.exporter_config.endpoint,
);
let url =
TonicExporterBuilder::resolve_endpoint(OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, None);
assert_eq!(url, "http://localhost:4317");
});
}
// if builder endpoint is set, it should not use default.
#[test]
fn test_use_default_when_empty_string_for_option() {
run_env_test(vec![], || {
let exporter = TonicExporterBuilder::default().with_endpoint("http://localhost:1234");
let url = TonicExporterBuilder::resolve_endpoint(
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
exporter.exporter_config.endpoint,
Some(String::new()),
);
assert_eq!(url, "http://localhost:1234");
assert_eq!(url, "http://localhost:4317");
});
}
}

View File

@ -1,14 +1,17 @@
use core::fmt;
use tokio::sync::Mutex;
use futures_core::future::BoxFuture;
use opentelemetry::trace::TraceError;
use opentelemetry::otel_debug;
use opentelemetry_proto::tonic::collector::trace::v1::{
trace_service_client::TraceServiceClient, ExportTraceServiceRequest,
};
use opentelemetry_sdk::export::trace::{ExportResult, SpanData, SpanExporter};
use tonic::{codegen::CompressionEncoding, service::Interceptor, transport::Channel, Request};
use opentelemetry_proto::transform::trace::tonic::group_spans_by_resource_and_scope;
use opentelemetry_sdk::error::OTelSdkError;
use opentelemetry_sdk::{
error::OTelSdkResult,
trace::{SpanData, SpanExporter},
};
use tonic::{codegen::CompressionEncoding, service::Interceptor, transport::Channel, Request};
use super::BoxInterceptor;
@ -21,7 +24,7 @@ pub(crate) struct TonicTracesClient {
struct ClientInner {
client: TraceServiceClient<Channel>,
interceptor: BoxInterceptor,
interceptor: Mutex<BoxInterceptor>,
}
impl fmt::Debug for TonicTracesClient {
@ -43,10 +46,12 @@ impl TonicTracesClient {
.accept_compressed(compression);
}
otel_debug!(name: "TonicsTracesClientBuilt");
TonicTracesClient {
inner: Some(ClientInner {
client,
interceptor,
interceptor: Mutex::new(interceptor),
}),
resource: Default::default(),
}
@ -54,42 +59,51 @@ impl TonicTracesClient {
}
impl SpanExporter for TonicTracesClient {
fn export(&mut self, batch: Vec<SpanData>) -> BoxFuture<'static, ExportResult> {
let (mut client, metadata, extensions) = match &mut self.inner {
async fn export(&self, batch: Vec<SpanData>) -> OTelSdkResult {
let (mut client, metadata, extensions) = match &self.inner {
Some(inner) => {
let (m, e, _) = match inner.interceptor.call(Request::new(())) {
Ok(res) => res.into_parts(),
Err(e) => {
return Box::pin(std::future::ready(Err(TraceError::Other(Box::new(e)))))
}
};
let (m, e, _) = inner
.interceptor
.lock()
.await // tokio::sync::Mutex doesn't return a poisoned error, so we can safely use the interceptor here
.call(Request::new(()))
.map_err(|e| OTelSdkError::InternalFailure(format!("error: {e:?}")))?
.into_parts();
(inner.client.clone(), m, e)
}
None => {
return Box::pin(std::future::ready(Err(TraceError::Other(
"exporter is already shut down".into(),
))))
}
None => return Err(OTelSdkError::AlreadyShutdown),
};
let resource_spans = group_spans_by_resource_and_scope(batch, &self.resource);
Box::pin(async move {
client
.export(Request::from_parts(
metadata,
extensions,
ExportTraceServiceRequest { resource_spans },
))
.await
.map_err(crate::Error::from)?;
otel_debug!(name: "TonicTracesClient.ExportStarted");
Ok(())
})
let result = client
.export(Request::from_parts(
metadata,
extensions,
ExportTraceServiceRequest { resource_spans },
))
.await;
match result {
Ok(_) => {
otel_debug!(name: "TonicTracesClient.ExportSucceeded");
Ok(())
}
Err(e) => {
let error = e.to_string();
otel_debug!(name: "TonicTracesClient.ExportFailed", error = &error);
Err(OTelSdkError::InternalFailure(error))
}
}
}
fn shutdown(&mut self) {
let _ = self.inner.take();
fn shutdown(&mut self) -> OTelSdkResult {
match self.inner.take() {
Some(_) => Ok(()), // Successfully took `inner`, indicating a successful shutdown.
None => Err(OTelSdkError::AlreadyShutdown), // `inner` was already `None`, meaning it's already shut down.
}
}
fn set_resource(&mut self, resource: &opentelemetry_sdk::Resource) {

View File

@ -1,35 +1,218 @@
//! The OTLP Exporter supports exporting logs, metrics and traces in the OTLP
//! format to the OpenTelemetry collector or other compatible backend.
//! # OpenTelemetry OTLP Exporter
//!
//! The OpenTelemetry Collector offers a vendor-agnostic implementation on how
//! to receive, process, and export telemetry data. In addition, it removes
//! the need to run, operate, and maintain multiple agents/collectors in
//! order to support open-source telemetry data formats (e.g. Jaeger,
//! Prometheus, etc.) sending to multiple open-source or commercial back-ends.
//! The OTLP Exporter enables exporting telemetry data (logs, metrics, and traces) in the
//! OpenTelemetry Protocol (OTLP) format to compatible backends. These backends include:
//!
//! Currently, this crate only support sending telemetry in OTLP
//! via grpc and http (in binary format). Supports for other format and protocol
//! will be added in the future. The details of what's currently offering in this
//! crate can be found in this doc.
//! - OpenTelemetry Collector
//! - Open-source observability tools (Prometheus, Jaeger, etc.)
//! - Vendor-specific monitoring platforms
//!
//! # Quickstart
//! This crate supports sending OTLP data via:
//! - gRPC
//! - HTTP (binary protobuf or JSON)
//!
//! First make sure you have a running version of the opentelemetry collector
//! you want to send data to:
//! ## Quickstart with OpenTelemetry Collector
//!
//! ### HTTP Transport (Port 4318)
//!
//! Run the OpenTelemetry Collector:
//!
//! ```shell
//! $ docker run -p 4318:4318 otel/opentelemetry-collector:latest
//! ```
//!
//! Configure your application to export traces via HTTP:
//!
//! ```no_run
//! # #[cfg(all(feature = "trace", feature = "http-proto"))]
//! # {
//! use opentelemetry::global;
//! use opentelemetry::trace::Tracer;
//! use opentelemetry_otlp::Protocol;
//! use opentelemetry_otlp::WithExportConfig;
//!
//! fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
//! // Initialize OTLP exporter using HTTP binary protocol
//! let otlp_exporter = opentelemetry_otlp::SpanExporter::builder()
//! .with_http()
//! .with_protocol(Protocol::HttpBinary)
//! .build()?;
//!
//! // Create a tracer provider with the exporter
//! let tracer_provider = opentelemetry_sdk::trace::SdkTracerProvider::builder()
//! .with_simple_exporter(otlp_exporter)
//! .build();
//!
//! // Set it as the global provider
//! global::set_tracer_provider(tracer_provider);
//!
//! // Get a tracer and create spans
//! let tracer = global::tracer("my_tracer");
//! tracer.in_span("doing_work", |_cx| {
//! // Your application logic here...
//! });
//!
//! Ok(())
//! # }
//! }
//! ```
//!
//! ### gRPC Transport (Port 4317)
//!
//! Run the OpenTelemetry Collector:
//!
//! ```shell
//! $ docker run -p 4317:4317 otel/opentelemetry-collector:latest
//! ```
//!
//! Then create a new `Exporter`, and `Provider` with the recommended defaults to start exporting
//! telemetry.
//! Configure your application to export traces via gRPC (the tonic client requires a Tokio runtime):
//!
//! You will have to build a OTLP exporter first. Create the correct exporter based on the signal
//! you are looking to export `SpanExporter::builder()`, `MetricExporter::builder()`,
//! `LogExporter::builder()` respectively for traces, metrics, and logs.
//! - With `[tokio::main]`
//!
//! Once you have the exporter, you can create your `Provider` by starting with `TracerProvider::builder()`,
//! `SdkMeterProvider::builder()`, and `LoggerProvider::builder()` respectively for traces, metrics, and logs.
//! ```no_run
//! # #[cfg(all(feature = "trace", feature = "grpc-tonic"))]
//! # {
//! use opentelemetry::{global, trace::Tracer};
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
//! // Initialize OTLP exporter using gRPC (Tonic)
//! let otlp_exporter = opentelemetry_otlp::SpanExporter::builder()
//! .with_tonic()
//! .build()?;
//!
//! // Create a tracer provider with the exporter
//! let tracer_provider = opentelemetry_sdk::trace::SdkTracerProvider::builder()
//! .with_simple_exporter(otlp_exporter)
//! .build();
//!
//! // Set it as the global provider
//! global::set_tracer_provider(tracer_provider);
//!
//! // Get a tracer and create spans
//! let tracer = global::tracer("my_tracer");
//! tracer.in_span("doing_work", |_cx| {
//! // Your application logic here...
//! });
//!
//! Ok(())
//! # }
//! }
//! ```
//!
//! - Without `[tokio::main]`
//!
//! ```no_run
//! # #[cfg(all(feature = "trace", feature = "grpc-tonic"))]
//! # {
//! use opentelemetry::{global, trace::Tracer};
//!
//! fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
//! // Initialize OTLP exporter using gRPC (Tonic)
//! let rt = tokio::runtime::Runtime::new()?;
//! let tracer_provider = rt.block_on(async {
//! let exporter = opentelemetry_otlp::SpanExporter::builder()
//! .with_tonic()
//! .build()
//! .expect("Failed to create span exporter");
//! opentelemetry_sdk::trace::SdkTracerProvider::builder()
//! .with_simple_exporter(exporter)
//! .build()
//! });
//!
//! // Set it as the global provider
//! global::set_tracer_provider(tracer_provider);
//!
//! // Get a tracer and create spans
//! let tracer = global::tracer("my_tracer");
//! tracer.in_span("doing_work", |_cx| {
//! // Your application logic here...
//! });
//!
//! // Ensure the runtime (`rt`) remains active until the program ends
//! Ok(())
//! # }
//! }
//! ```
//!
//! ## Using with Jaeger
//!
//! Jaeger natively supports the OTLP protocol, making it easy to send traces directly:
//!
//! ```shell
//! $ docker run -p 16686:16686 -p 4317:4317 -e COLLECTOR_OTLP_ENABLED=true jaegertracing/all-in-one:latest
//! ```
//!
//! After running your application configured with the OTLP exporter, view traces at:
//! `http://localhost:16686`
//!
//! ## Using with Prometheus
//!
//! Prometheus natively supports accepting metrics via the OTLP protocol
//! (HTTP/protobuf). You can [run
//! Prometheus](https://prometheus.io/docs/prometheus/latest/installation/) with
//! the following command:
//!
//! ```shell
//! docker run -p 9090:9090 -v ./prometheus.yml:/etc/prometheus/prometheus.yml prom/prometheus --config.file=/etc/prometheus/prometheus.yml --web.enable-otlp-receiver
//! ```
//!
//! (An empty prometheus.yml file is sufficient for this example.)
//!
//! Modify your application to export metrics via OTLP:
//!
//! ```no_run
//! # #[cfg(all(feature = "metrics", feature = "http-proto"))]
//! # {
//! use opentelemetry::global;
//! use opentelemetry::metrics::Meter;
//! use opentelemetry::KeyValue;
//! use opentelemetry_otlp::Protocol;
//! use opentelemetry_otlp::WithExportConfig;
//!
//! fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
//! // Initialize OTLP exporter using HTTP binary protocol
//! let exporter = opentelemetry_otlp::MetricExporter::builder()
//! .with_http()
//! .with_protocol(Protocol::HttpBinary)
//! .with_endpoint("http://localhost:9090/api/v1/otlp/v1/metrics")
//! .build()?;
//!
//! // Create a meter provider with the OTLP Metric exporter
//! let meter_provider = opentelemetry_sdk::metrics::SdkMeterProvider::builder()
//! .with_periodic_exporter(exporter)
//! .build();
//! global::set_meter_provider(meter_provider.clone());
//!
//! // Get a meter
//! let meter = global::meter("my_meter");
//!
//! // Create a metric
//! let counter = meter.u64_counter("my_counter").build();
//! counter.add(1, &[KeyValue::new("key", "value")]);
//!
//! // Shutdown the meter provider. This will trigger an export of all metrics.
//! meter_provider.shutdown()?;
//!
//! Ok(())
//! # }
//! }
//! ```
//!
//! After running your application configured with the OTLP exporter, view metrics at:
//! `http://localhost:9090`
//! ## Show Logs, Metrics too (TODO)
//!
//! ## Performance
//!
//! For optimal performance, a batch exporting processor is recommended as the simple
//! processor will export each span synchronously on dropping, and is only good
//! for test/debug purposes.
//!
//! ```toml
//! [dependencies]
//! opentelemetry-otlp = { version = "*", features = ["grpc-tonic"] }
//! ```
//!
//! ```no_run
//! # #[cfg(all(feature = "trace", feature = "grpc-tonic"))]
@ -41,8 +224,8 @@
//! // First, create a OTLP exporter builder. Configure it as you need.
//! let otlp_exporter = opentelemetry_otlp::SpanExporter::builder().with_tonic().build()?;
//! // Then pass it into provider builder
//! let _ = opentelemetry_sdk::trace::TracerProvider::builder()
//! .with_simple_exporter(otlp_exporter)
//! let _ = opentelemetry_sdk::trace::SdkTracerProvider::builder()
//! .with_batch_exporter(otlp_exporter)
//! .build();
//! let tracer = global::tracer("my_tracer");
//! tracer.in_span("doing_work", |cx| {
@ -54,69 +237,34 @@
//! }
//! ```
//!
//! ## Performance
//!
//! For optimal performance, a batch exporter is recommended as the simple
//! exporter will export each span synchronously on dropping. You can enable the
//! [`rt-tokio`], [`rt-tokio-current-thread`] or [`rt-async-std`] features and
//! specify a runtime on the pipeline builder to have a batch exporter
//! configured for you automatically.
//!
//! ```toml
//! [dependencies]
//! opentelemetry_sdk = { version = "*", features = ["async-std"] }
//! opentelemetry-otlp = { version = "*", features = ["grpc-tonic"] }
//! ```
//!
//! ```no_run
//! # #[cfg(all(feature = "trace", feature = "grpc-tonic"))]
//! # {
//! # fn main() -> Result<(), opentelemetry::trace::TraceError> {
//! let tracer = opentelemetry_sdk::trace::TracerProvider::builder()
//! .with_batch_exporter(
//! opentelemetry_otlp::SpanExporter::builder()
//! .with_tonic()
//! .build()?,
//! opentelemetry_sdk::runtime::Tokio,
//! )
//! .build();
//!
//! # Ok(())
//! # }
//! # }
//! ```
//!
//! [`tokio`]: https://tokio.rs
//! [`async-std`]: https://async.rs
//!
//! # Feature Flags
//! The following feature flags can enable exporters for different telemetry signals:
//!
//! * `trace`: Includes the trace exporters (enabled by default).
//! * `trace`: Includes the trace exporters.
//! * `metrics`: Includes the metrics exporters.
//! * `logs`: Includes the logs exporters.
//!
//! The following feature flags generate additional code and types:
//! * `serialize`: Enables serialization support for type defined in this create via `serde`.
//! * `populate-logs-event-name`: Enables sending `LogRecord::event_name` as an attribute
//! with the key `name`
//! * `serialize`: Enables serialization support for type defined in this crate via `serde`.
//!
//! The following feature flags offer additional configurations on gRPC:
//!
//! For users uses `tonic` as grpc layer:
//! * `grpc-tonic`: Use `tonic` as grpc layer. This is enabled by default.
//! For users using `tonic` as grpc layer:
//! * `grpc-tonic`: Use `tonic` as grpc layer.
//! * `gzip-tonic`: Use gzip compression for `tonic` grpc layer.
//! * `zstd-tonic`: Use zstd compression for `tonic` grpc layer.
//! * `tls-roots`: Adds system trust roots to rustls-based gRPC clients using the rustls-native-certs crate
//! * `tls-webkpi-roots`: Embeds Mozilla's trust roots to rustls-based gRPC clients using the webkpi-roots crate
//! * `tls-webpki-roots`: Embeds Mozilla's trust roots to rustls-based gRPC clients using the webpki-roots crate
//!
//! The following feature flags offer additional configurations on http:
//!
//! * `http-proto`: Use http as transport layer, protobuf as body format.
//! * `reqwest-blocking-client`: Use reqwest blocking http client.
//! * `http-proto`: Use http as transport layer, protobuf as body format. This feature is enabled by default.
//! * `reqwest-blocking-client`: Use reqwest blocking http client. This feature is enabled by default.
//! * `reqwest-client`: Use reqwest http client.
//! * `reqwest-rustls`: Use reqwest with TLS with system trust roots via `rustls-native-certs` crate.
//! * `reqwest-rustls-webkpi-roots`: Use reqwest with TLS with Mozilla's trust roots via `webkpi-roots` crate.
//! * `reqwest-rustls-webpki-roots`: Use reqwest with TLS with Mozilla's trust roots via `webpki-roots` crate.
//!
//! # Kitchen Sink Full Configuration
//!
@ -125,7 +273,7 @@
//! Generally there are two parts of configuration. One is the exporter, the other is the provider.
//! Users can configure the exporter using [SpanExporter::builder()] for traces,
//! and [MetricExporter::builder()] + [opentelemetry_sdk::metrics::PeriodicReader::builder()] for metrics.
//! Once you have an exporter, you can add it to either a [opentelemetry_sdk::trace::TracerProvider::builder()] for traces,
//! Once you have an exporter, you can add it to either a [opentelemetry_sdk::trace::SdkTracerProvider::builder()] for traces,
//! or [opentelemetry_sdk::metrics::SdkMeterProvider::builder()] for metrics.
//!
//! ```no_run
@ -153,18 +301,15 @@
//! .with_metadata(map)
//! .build()?;
//!
//! let tracer_provider = opentelemetry_sdk::trace::TracerProvider::builder()
//! .with_batch_exporter(exporter, opentelemetry_sdk::runtime::Tokio)
//! .with_config(
//! trace::Config::default()
//! .with_sampler(Sampler::AlwaysOn)
//! .with_id_generator(RandomIdGenerator::default())
//! .with_max_events_per_span(64)
//! .with_max_attributes_per_span(16)
//! .with_max_events_per_span(16)
//! .with_resource(Resource::new(vec![KeyValue::new("service.name", "example")])),
//! ).build();
//! global::set_tracer_provider(tracer_provider);
//! let tracer_provider = opentelemetry_sdk::trace::SdkTracerProvider::builder()
//! .with_batch_exporter(exporter)
//! .with_sampler(Sampler::AlwaysOn)
//! .with_id_generator(RandomIdGenerator::default())
//! .with_max_events_per_span(64)
//! .with_max_attributes_per_span(16)
//! .with_resource(Resource::builder_empty().with_attributes([KeyValue::new("service.name", "example")]).build())
//! .build();
//! global::set_tracer_provider(tracer_provider.clone());
//! let tracer = global::tracer("tracer-name");
//! # tracer
//! # };
@ -179,14 +324,9 @@
//! .build()
//! .unwrap();
//!
//! let reader = opentelemetry_sdk::metrics::PeriodicReader::builder(exporter, opentelemetry_sdk::runtime::Tokio)
//! .with_interval(std::time::Duration::from_secs(3))
//! .with_timeout(Duration::from_secs(10))
//! .build();
//!
//! let provider = opentelemetry_sdk::metrics::SdkMeterProvider::builder()
//! .with_reader(reader)
//! .with_resource(Resource::new(vec![KeyValue::new("service.name", "example")]))
//! .with_periodic_exporter(exporter)
//! .with_resource(Resource::builder_empty().with_attributes([KeyValue::new("service.name", "example")]).build())
//! .build();
//! # }
//!
@ -230,25 +370,29 @@ mod span;
pub use crate::exporter::Compression;
pub use crate::exporter::ExportConfig;
pub use crate::exporter::ExporterBuildError;
#[cfg(feature = "trace")]
#[cfg(any(feature = "http-proto", feature = "http-json", feature = "grpc-tonic"))]
pub use crate::span::{
SpanExporter, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
OTEL_EXPORTER_OTLP_TRACES_HEADERS, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,
SpanExporter, SpanExporterBuilder, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION,
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_HEADERS,
OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,
};
#[cfg(feature = "metrics")]
#[cfg(any(feature = "http-proto", feature = "http-json", feature = "grpc-tonic"))]
pub use crate::metric::{
MetricExporter, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT,
OTEL_EXPORTER_OTLP_METRICS_HEADERS, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT,
MetricExporter, MetricExporterBuilder, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION,
OTEL_EXPORTER_OTLP_METRICS_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_HEADERS,
OTEL_EXPORTER_OTLP_METRICS_TIMEOUT,
};
#[cfg(feature = "logs")]
#[cfg(any(feature = "http-proto", feature = "http-json", feature = "grpc-tonic"))]
pub use crate::logs::{
LogExporter, OTEL_EXPORTER_OTLP_LOGS_COMPRESSION, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT,
OTEL_EXPORTER_OTLP_LOGS_HEADERS, OTEL_EXPORTER_OTLP_LOGS_TIMEOUT,
LogExporter, LogExporterBuilder, OTEL_EXPORTER_OTLP_LOGS_COMPRESSION,
OTEL_EXPORTER_OTLP_LOGS_ENDPOINT, OTEL_EXPORTER_OTLP_LOGS_HEADERS,
OTEL_EXPORTER_OTLP_LOGS_TIMEOUT,
};
#[cfg(any(feature = "http-proto", feature = "http-json"))]
@ -264,8 +408,6 @@ pub use crate::exporter::{
OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT,
};
use opentelemetry_sdk::export::ExportError;
/// Type to indicate the builder does not have a client set.
#[derive(Debug, Default, Clone)]
pub struct NoExporterBuilderSet;
@ -274,6 +416,8 @@ pub struct NoExporterBuilderSet;
///
/// Allowing access to [TonicExporterBuilder] specific configuration methods.
#[cfg(feature = "grpc-tonic")]
// This is for clippy to work with only the grpc-tonic feature enabled
#[allow(unused)]
#[derive(Debug, Default)]
pub struct TonicExporterBuilderSet(TonicExporterBuilder);
@ -293,110 +437,6 @@ pub use crate::exporter::tonic::{TonicConfig, TonicExporterBuilder};
#[cfg(feature = "serialize")]
use serde::{Deserialize, Serialize};
/// Wrap type for errors from this crate.
#[derive(thiserror::Error, Debug)]
pub enum Error {
/// Wrap error from [`tonic::transport::Error`]
#[cfg(feature = "grpc-tonic")]
#[error("transport error {0}")]
Transport(#[from] tonic::transport::Error),
/// Wrap the [`tonic::codegen::http::uri::InvalidUri`] error
#[cfg(any(feature = "grpc-tonic", feature = "http-proto", feature = "http-json"))]
#[error("invalid URI {0}")]
InvalidUri(#[from] http::uri::InvalidUri),
/// Wrap type for [`tonic::Status`]
#[cfg(feature = "grpc-tonic")]
#[error("the grpc server returns error ({code}): {message}")]
Status {
/// grpc status code
code: tonic::Code,
/// error message
message: String,
},
/// Http requests failed because no http client is provided.
#[cfg(any(feature = "http-proto", feature = "http-json"))]
#[error(
"no http client, you must select one from features or provide your own implementation"
)]
NoHttpClient,
/// Http requests failed.
#[cfg(any(feature = "http-proto", feature = "http-json"))]
#[error("http request failed with {0}")]
RequestFailed(#[from] opentelemetry_http::HttpError),
/// The provided value is invalid in HTTP headers.
#[cfg(any(feature = "grpc-tonic", feature = "http-proto", feature = "http-json"))]
#[error("http header value error {0}")]
InvalidHeaderValue(#[from] http::header::InvalidHeaderValue),
/// The provided name is invalid in HTTP headers.
#[cfg(any(feature = "grpc-tonic", feature = "http-proto", feature = "http-json"))]
#[error("http header name error {0}")]
InvalidHeaderName(#[from] http::header::InvalidHeaderName),
/// Prost encode failed
#[cfg(any(
feature = "http-proto",
all(feature = "http-json", not(feature = "trace"))
))]
#[error("prost encoding error {0}")]
EncodeError(#[from] prost::EncodeError),
/// The lock in exporters has been poisoned.
#[cfg(feature = "metrics")]
#[error("the lock of the {0} has been poisoned")]
PoisonedLock(&'static str),
/// Unsupported compression algorithm.
#[error("unsupported compression algorithm '{0}'")]
UnsupportedCompressionAlgorithm(String),
/// Feature required to use the specified compression algorithm.
#[cfg(any(not(feature = "gzip-tonic"), not(feature = "zstd-tonic")))]
#[error("feature '{0}' is required to use the compression algorithm '{1}'")]
FeatureRequiredForCompressionAlgorithm(&'static str, Compression),
}
#[cfg(feature = "grpc-tonic")]
impl From<tonic::Status> for Error {
fn from(status: tonic::Status) -> Error {
Error::Status {
code: status.code(),
message: {
if !status.message().is_empty() {
let mut result = ", detailed error message: ".to_string() + status.message();
if status.code() == tonic::Code::Unknown {
let source = (&status as &dyn std::error::Error)
.source()
.map(|e| format!("{:?}", e));
result.push(' ');
result.push_str(source.unwrap_or_default().as_ref());
}
result
} else {
String::new()
}
},
}
}
}
impl ExportError for Error {
fn exporter_name(&self) -> &'static str {
"otlp"
}
}
impl opentelemetry::trace::ExportError for Error {
fn exporter_name(&self) -> &'static str {
"otlp"
}
}
/// The communication protocol to use when exporting data.
#[cfg_attr(feature = "serialize", derive(Deserialize, Serialize))]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
@ -413,3 +453,20 @@ pub enum Protocol {
#[doc(hidden)]
/// Placeholder type when no exporter pipeline has been configured in telemetry pipeline.
pub struct NoExporterConfig(());
/// Re-exported types from the `tonic` crate.
#[cfg(feature = "grpc-tonic")]
pub mod tonic_types {
/// Re-exported types from `tonic::metadata`.
pub mod metadata {
#[doc(no_inline)]
pub use tonic::metadata::MetadataMap;
}
/// Re-exported types from `tonic::transport`.
#[cfg(feature = "tls")]
pub mod transport {
#[doc(no_inline)]
pub use tonic::transport::{Certificate, ClientTlsConfig, Identity};
}
}

View File

@ -2,14 +2,13 @@
//!
//! Defines a [LogExporter] to send logs via the OpenTelemetry Protocol (OTLP)
use async_trait::async_trait;
#[cfg(feature = "grpc-tonic")]
use opentelemetry::otel_debug;
use opentelemetry_sdk::{error::OTelSdkResult, logs::LogBatch};
use std::fmt::Debug;
use std::time;
use opentelemetry_sdk::logs::LogResult;
use opentelemetry_sdk::export::logs::LogBatch;
use crate::{HasExportConfig, NoExporterBuilderSet};
use crate::{ExporterBuildError, HasExportConfig, NoExporterBuilderSet};
#[cfg(feature = "grpc-tonic")]
use crate::{HasTonicConfig, TonicExporterBuilder, TonicExporterBuilderSet};
@ -32,6 +31,7 @@ pub const OTEL_EXPORTER_OTLP_LOGS_TIMEOUT: &str = "OTEL_EXPORTER_OTLP_LOGS_TIMEO
/// Note: this is only supported for HTTP.
pub const OTEL_EXPORTER_OTLP_LOGS_HEADERS: &str = "OTEL_EXPORTER_OTLP_LOGS_HEADERS";
/// Builder for creating a new [LogExporter].
#[derive(Debug, Default, Clone)]
pub struct LogExporterBuilder<C> {
client: C,
@ -39,10 +39,12 @@ pub struct LogExporterBuilder<C> {
}
impl LogExporterBuilder<NoExporterBuilderSet> {
/// Create a new [LogExporterBuilder] with default settings.
pub fn new() -> Self {
LogExporterBuilder::default()
}
/// With the gRPC Tonic transport.
#[cfg(feature = "grpc-tonic")]
pub fn with_tonic(self) -> LogExporterBuilder<TonicExporterBuilderSet> {
LogExporterBuilder {
@ -51,6 +53,7 @@ impl LogExporterBuilder<NoExporterBuilderSet> {
}
}
/// With the HTTP transport.
#[cfg(any(feature = "http-proto", feature = "http-json"))]
pub fn with_http(self) -> LogExporterBuilder<HttpExporterBuilderSet> {
LogExporterBuilder {
@ -62,14 +65,18 @@ impl LogExporterBuilder<NoExporterBuilderSet> {
#[cfg(feature = "grpc-tonic")]
impl LogExporterBuilder<TonicExporterBuilderSet> {
pub fn build(self) -> Result<LogExporter, opentelemetry_sdk::logs::LogError> {
self.client.0.build_log_exporter()
/// Build the [LogExporter] with the gRPC Tonic transport.
pub fn build(self) -> Result<LogExporter, ExporterBuildError> {
let result = self.client.0.build_log_exporter();
otel_debug!(name: "LogExporterBuilt", result = format!("{:?}", &result));
result
}
}
#[cfg(any(feature = "http-proto", feature = "http-json"))]
impl LogExporterBuilder<HttpExporterBuilderSet> {
pub fn build(self) -> Result<LogExporter, opentelemetry_sdk::logs::LogError> {
/// Build the [LogExporter] with the HTTP transport.
pub fn build(self) -> Result<LogExporter, ExporterBuildError> {
self.client.0.build_log_exporter()
}
}
@ -105,7 +112,15 @@ impl HasHttpConfig for LogExporterBuilder<HttpExporterBuilderSet> {
/// OTLP exporter that sends log data
#[derive(Debug)]
pub struct LogExporter {
client: Box<dyn opentelemetry_sdk::export::logs::LogExporter>,
client: SupportedTransportClient,
}
#[derive(Debug)]
enum SupportedTransportClient {
#[cfg(feature = "grpc-tonic")]
Tonic(crate::exporter::tonic::logs::TonicLogsClient),
#[cfg(any(feature = "http-proto", feature = "http-json"))]
Http(crate::exporter::http::OtlpHttpClient),
}
impl LogExporter {
@ -114,21 +129,46 @@ impl LogExporter {
LogExporterBuilder::default()
}
/// Create a new log exporter
pub fn new(client: impl opentelemetry_sdk::export::logs::LogExporter + 'static) -> Self {
#[cfg(any(feature = "http-proto", feature = "http-json"))]
pub(crate) fn from_http(client: crate::exporter::http::OtlpHttpClient) -> Self {
LogExporter {
client: Box::new(client),
client: SupportedTransportClient::Http(client),
}
}
#[cfg(feature = "grpc-tonic")]
pub(crate) fn from_tonic(client: crate::exporter::tonic::logs::TonicLogsClient) -> Self {
LogExporter {
client: SupportedTransportClient::Tonic(client),
}
}
}
#[async_trait]
impl opentelemetry_sdk::export::logs::LogExporter for LogExporter {
async fn export(&mut self, batch: LogBatch<'_>) -> LogResult<()> {
self.client.export(batch).await
impl opentelemetry_sdk::logs::LogExporter for LogExporter {
async fn export(&self, batch: LogBatch<'_>) -> OTelSdkResult {
match &self.client {
#[cfg(feature = "grpc-tonic")]
SupportedTransportClient::Tonic(client) => client.export(batch).await,
#[cfg(any(feature = "http-proto", feature = "http-json"))]
SupportedTransportClient::Http(client) => client.export(batch).await,
}
}
fn set_resource(&mut self, resource: &opentelemetry_sdk::Resource) {
self.client.set_resource(resource);
match &mut self.client {
#[cfg(feature = "grpc-tonic")]
SupportedTransportClient::Tonic(client) => client.set_resource(resource),
#[cfg(any(feature = "http-proto", feature = "http-json"))]
SupportedTransportClient::Http(client) => client.set_resource(resource),
}
}
fn shutdown_with_timeout(&self, _timeout: time::Duration) -> OTelSdkResult {
match &self.client {
#[cfg(feature = "grpc-tonic")]
SupportedTransportClient::Tonic(client) => client.shutdown(),
#[cfg(any(feature = "http-proto", feature = "http-json"))]
SupportedTransportClient::Http(client) => client.shutdown(),
}
}
}

View File

@ -12,16 +12,16 @@ use crate::{exporter::http::HttpExporterBuilder, HasHttpConfig, HttpExporterBuil
#[cfg(feature = "grpc-tonic")]
use crate::{exporter::tonic::TonicExporterBuilder, HasTonicConfig, TonicExporterBuilderSet};
use crate::NoExporterBuilderSet;
use crate::{ExporterBuildError, NoExporterBuilderSet};
use async_trait::async_trait;
use core::fmt;
use opentelemetry_sdk::metrics::MetricResult;
use opentelemetry_sdk::error::OTelSdkResult;
use opentelemetry_sdk::metrics::{
data::ResourceMetrics, exporter::PushMetricExporter, Temporality,
};
use std::fmt::{Debug, Formatter};
use std::time::Duration;
/// Target to which the exporter is going to send metrics, defaults to https://localhost:4317/v1/metrics.
/// Learn about the relationship between this constant and default/spans/logs at
@ -37,6 +37,7 @@ pub const OTEL_EXPORTER_OTLP_METRICS_COMPRESSION: &str = "OTEL_EXPORTER_OTLP_MET
/// Note: this is only supported for HTTP.
pub const OTEL_EXPORTER_OTLP_METRICS_HEADERS: &str = "OTEL_EXPORTER_OTLP_METRICS_HEADERS";
/// A builder for creating a new [MetricExporter].
#[derive(Debug, Default, Clone)]
pub struct MetricExporterBuilder<C> {
client: C,
@ -44,12 +45,14 @@ pub struct MetricExporterBuilder<C> {
}
impl MetricExporterBuilder<NoExporterBuilderSet> {
/// Create a new [MetricExporterBuilder] with default settings.
pub fn new() -> Self {
MetricExporterBuilder::default()
}
}
impl<C> MetricExporterBuilder<C> {
/// With the gRPC Tonic transport.
#[cfg(feature = "grpc-tonic")]
pub fn with_tonic(self) -> MetricExporterBuilder<TonicExporterBuilderSet> {
MetricExporterBuilder {
@ -58,6 +61,7 @@ impl<C> MetricExporterBuilder<C> {
}
}
/// With the HTTP transport.
#[cfg(any(feature = "http-proto", feature = "http-json"))]
pub fn with_http(self) -> MetricExporterBuilder<HttpExporterBuilderSet> {
MetricExporterBuilder {
@ -66,6 +70,7 @@ impl<C> MetricExporterBuilder<C> {
}
}
/// Set the temporality for the metrics.
pub fn with_temporality(self, temporality: Temporality) -> MetricExporterBuilder<C> {
MetricExporterBuilder {
client: self.client,
@ -76,15 +81,18 @@ impl<C> MetricExporterBuilder<C> {
#[cfg(feature = "grpc-tonic")]
impl MetricExporterBuilder<TonicExporterBuilderSet> {
pub fn build(self) -> MetricResult<MetricExporter> {
/// Build the [MetricExporter] with the gRPC Tonic transport.
pub fn build(self) -> Result<MetricExporter, ExporterBuildError> {
let exporter = self.client.0.build_metrics_exporter(self.temporality)?;
opentelemetry::otel_debug!(name: "MetricExporterBuilt");
Ok(exporter)
}
}
#[cfg(any(feature = "http-proto", feature = "http-json"))]
impl MetricExporterBuilder<HttpExporterBuilderSet> {
pub fn build(self) -> MetricResult<MetricExporter> {
/// Build the [MetricExporter] with the HTTP transport.
pub fn build(self) -> Result<MetricExporter, ExporterBuildError> {
let exporter = self.client.0.build_metrics_exporter(self.temporality)?;
Ok(exporter)
}
@ -119,37 +127,60 @@ impl HasHttpConfig for MetricExporterBuilder<HttpExporterBuilderSet> {
}
/// An interface for OTLP metrics clients
#[async_trait]
pub trait MetricsClient: fmt::Debug + Send + Sync + 'static {
async fn export(&self, metrics: &mut ResourceMetrics) -> MetricResult<()>;
fn shutdown(&self) -> MetricResult<()>;
pub(crate) trait MetricsClient: fmt::Debug + Send + Sync + 'static {
fn export(
&self,
metrics: &ResourceMetrics,
) -> impl std::future::Future<Output = OTelSdkResult> + Send;
fn shutdown(&self) -> OTelSdkResult;
}
/// Export metrics in OTEL format.
pub struct MetricExporter {
client: Box<dyn MetricsClient>,
client: SupportedTransportClient,
temporality: Temporality,
}
#[derive(Debug)]
enum SupportedTransportClient {
#[cfg(feature = "grpc-tonic")]
Tonic(crate::exporter::tonic::metrics::TonicMetricsClient),
#[cfg(any(feature = "http-proto", feature = "http-json"))]
Http(crate::exporter::http::OtlpHttpClient),
}
impl Debug for MetricExporter {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("MetricExporter").finish()
}
}
#[async_trait]
impl PushMetricExporter for MetricExporter {
async fn export(&self, metrics: &mut ResourceMetrics) -> MetricResult<()> {
self.client.export(metrics).await
async fn export(&self, metrics: &ResourceMetrics) -> OTelSdkResult {
match &self.client {
#[cfg(feature = "grpc-tonic")]
SupportedTransportClient::Tonic(client) => client.export(metrics).await,
#[cfg(any(feature = "http-proto", feature = "http-json"))]
SupportedTransportClient::Http(client) => client.export(metrics).await,
}
}
async fn force_flush(&self) -> MetricResult<()> {
fn force_flush(&self) -> OTelSdkResult {
// this component is stateless
Ok(())
}
fn shutdown(&self) -> MetricResult<()> {
self.client.shutdown()
fn shutdown(&self) -> OTelSdkResult {
self.shutdown_with_timeout(Duration::from_secs(5))
}
fn shutdown_with_timeout(&self, _timeout: std::time::Duration) -> OTelSdkResult {
match &self.client {
#[cfg(feature = "grpc-tonic")]
SupportedTransportClient::Tonic(client) => client.shutdown(),
#[cfg(any(feature = "http-proto", feature = "http-json"))]
SupportedTransportClient::Http(client) => client.shutdown(),
}
}
fn temporality(&self) -> Temporality {
@ -163,10 +194,24 @@ impl MetricExporter {
MetricExporterBuilder::default()
}
/// Create a new metrics exporter
pub fn new(client: impl MetricsClient, temporality: Temporality) -> MetricExporter {
MetricExporter {
client: Box::new(client),
#[cfg(feature = "grpc-tonic")]
pub(crate) fn from_tonic(
client: crate::exporter::tonic::metrics::TonicMetricsClient,
temporality: Temporality,
) -> Self {
Self {
client: SupportedTransportClient::Tonic(client),
temporality,
}
}
#[cfg(any(feature = "http-proto", feature = "http-json"))]
pub(crate) fn from_http(
client: crate::exporter::http::OtlpHttpClient,
temporality: Temporality,
) -> Self {
Self {
client: SupportedTransportClient::Http(client),
temporality,
}
}

View File

@ -4,9 +4,10 @@
use std::fmt::Debug;
use futures_core::future::BoxFuture;
use opentelemetry_sdk::export::trace::{ExportResult, SpanData};
use opentelemetry_sdk::error::OTelSdkResult;
use opentelemetry_sdk::trace::SpanData;
use crate::ExporterBuildError;
#[cfg(feature = "grpc-tonic")]
use crate::{
exporter::tonic::{HasTonicConfig, TonicExporterBuilder},
@ -35,16 +36,19 @@ pub const OTEL_EXPORTER_OTLP_TRACES_COMPRESSION: &str = "OTEL_EXPORTER_OTLP_TRAC
/// Note: this is only supported for HTTP.
pub const OTEL_EXPORTER_OTLP_TRACES_HEADERS: &str = "OTEL_EXPORTER_OTLP_TRACES_HEADERS";
/// OTLP span exporter builder
#[derive(Debug, Default, Clone)]
pub struct SpanExporterBuilder<C> {
client: C,
}
impl SpanExporterBuilder<NoExporterBuilderSet> {
/// Create a new [SpanExporterBuilder] with default settings.
pub fn new() -> Self {
SpanExporterBuilder::default()
}
/// With the gRPC Tonic transport.
#[cfg(feature = "grpc-tonic")]
pub fn with_tonic(self) -> SpanExporterBuilder<TonicExporterBuilderSet> {
SpanExporterBuilder {
@ -52,6 +56,7 @@ impl SpanExporterBuilder<NoExporterBuilderSet> {
}
}
/// With the HTTP transport.
#[cfg(any(feature = "http-proto", feature = "http-json"))]
pub fn with_http(self) -> SpanExporterBuilder<HttpExporterBuilderSet> {
SpanExporterBuilder {
@ -62,17 +67,20 @@ impl SpanExporterBuilder<NoExporterBuilderSet> {
#[cfg(feature = "grpc-tonic")]
impl SpanExporterBuilder<TonicExporterBuilderSet> {
pub fn build(self) -> Result<SpanExporter, opentelemetry::trace::TraceError> {
/// Build the [SpanExporter] with the gRPC Tonic transport.
pub fn build(self) -> Result<SpanExporter, ExporterBuildError> {
let span_exporter = self.client.0.build_span_exporter()?;
Ok(SpanExporter::new(span_exporter))
opentelemetry::otel_debug!(name: "SpanExporterBuilt");
Ok(span_exporter)
}
}
#[cfg(any(feature = "http-proto", feature = "http-json"))]
impl SpanExporterBuilder<HttpExporterBuilderSet> {
pub fn build(self) -> Result<SpanExporter, opentelemetry::trace::TraceError> {
/// Build the [SpanExporter] with the HTTP transport.
pub fn build(self) -> Result<SpanExporter, ExporterBuildError> {
let span_exporter = self.client.0.build_span_exporter()?;
Ok(SpanExporter::new(span_exporter))
Ok(span_exporter)
}
}
@ -104,9 +112,19 @@ impl HasHttpConfig for SpanExporterBuilder<HttpExporterBuilderSet> {
}
}
/// OTLP exporter that sends tracing information
/// OTLP exporter that sends tracing data
#[derive(Debug)]
pub struct SpanExporter(Box<dyn opentelemetry_sdk::export::trace::SpanExporter>);
pub struct SpanExporter {
client: SupportedTransportClient,
}
#[derive(Debug)]
enum SupportedTransportClient {
#[cfg(feature = "grpc-tonic")]
Tonic(crate::exporter::tonic::trace::TonicTracesClient),
#[cfg(any(feature = "http-proto", feature = "http-json"))]
Http(crate::exporter::http::OtlpHttpClient),
}
impl SpanExporter {
/// Obtain a builder to configure a [SpanExporter].
@ -114,18 +132,37 @@ impl SpanExporter {
SpanExporterBuilder::default()
}
/// Build a new span exporter from a client
pub fn new(client: impl opentelemetry_sdk::export::trace::SpanExporter + 'static) -> Self {
SpanExporter(Box::new(client))
#[cfg(any(feature = "http-proto", feature = "http-json"))]
pub(crate) fn from_http(client: crate::exporter::http::OtlpHttpClient) -> Self {
SpanExporter {
client: SupportedTransportClient::Http(client),
}
}
#[cfg(feature = "grpc-tonic")]
pub(crate) fn from_tonic(client: crate::exporter::tonic::trace::TonicTracesClient) -> Self {
SpanExporter {
client: SupportedTransportClient::Tonic(client),
}
}
}
impl opentelemetry_sdk::export::trace::SpanExporter for SpanExporter {
fn export(&mut self, batch: Vec<SpanData>) -> BoxFuture<'static, ExportResult> {
self.0.export(batch)
impl opentelemetry_sdk::trace::SpanExporter for SpanExporter {
async fn export(&self, batch: Vec<SpanData>) -> OTelSdkResult {
match &self.client {
#[cfg(feature = "grpc-tonic")]
SupportedTransportClient::Tonic(client) => client.export(batch).await,
#[cfg(any(feature = "http-proto", feature = "http-json"))]
SupportedTransportClient::Http(client) => client.export(batch).await,
}
}
fn set_resource(&mut self, resource: &opentelemetry_sdk::Resource) {
self.0.set_resource(resource);
match &mut self.client {
#[cfg(feature = "grpc-tonic")]
SupportedTransportClient::Tonic(client) => client.set_resource(resource),
#[cfg(any(feature = "http-proto", feature = "http-json"))]
SupportedTransportClient::Http(client) => client.set_resource(resource),
}
}
}

Some files were not shown because too many files have changed in this diff Show More