Compare commits
970 Commits
Author | SHA1 | Date |
---|---|---|
|
5d702ed0d3 | |
|
1773b8f778 | |
|
a820cad474 | |
|
36156f4d2e | |
|
6a3094f18a | |
|
6f3a18a109 | |
|
83d5cd843d | |
|
45ea93180c | |
|
5aaf88fa0a | |
|
c3fc90f076 | |
|
97382709ba | |
|
65179dc9db | |
|
8c6089aa59 | |
|
8add72d056 | |
|
b2c64b3765 | |
|
81ef37bb55 | |
|
8a65c8cbcf | |
|
a6ff9a030c | |
|
01680d8df5 | |
|
358e039e26 | |
|
81a36172bb | |
|
a6e06d0049 | |
|
26143b21d2 | |
|
5ae1d570ec | |
|
52c1ac978b | |
|
c2ad96df7a | |
|
88f07b6afd | |
|
a8338679e7 | |
|
8bf35d0894 | |
|
c6e20dba85 | |
|
9ec6ff7540 | |
|
3c1269ef4e | |
|
032e0e67cc | |
|
fc0fcb8dfa | |
|
4b43cb610d | |
|
9b6fa69bdf | |
|
b3a3b2cd17 | |
|
0b7c5de951 | |
|
da7268339a | |
|
37f0278c84 | |
|
70d2c6520a | |
|
b9be5bdd62 | |
|
0d0f513348 | |
|
a26d257a44 | |
|
3856e1d003 | |
|
4b992a7a61 | |
|
b73618392b | |
|
6c31f42ba8 | |
|
a4cc53feef | |
|
f3fe52b300 | |
|
9c487385ac | |
|
49fa87b916 | |
|
9abec35e31 | |
|
2ef34b8804 | |
|
ce537f25e1 | |
|
9033751161 | |
|
2f84857694 | |
|
ebfe715c2c | |
|
0a37bb9ed2 | |
|
82cdc8959d | |
|
e1c2db4aa7 | |
|
7b8bbf7d45 | |
|
91276a92f9 | |
|
5581d561ab | |
|
312ecac752 | |
|
a3deeb2e86 | |
|
3be75ed83e | |
|
23a2a7046a | |
|
805a784874 | |
|
8d9b238a14 | |
|
124993ee04 | |
|
2a7610aad1 | |
|
4e7590ed5b | |
|
793a3ab3aa | |
|
f4ff5f4b02 | |
|
f597cbe4ab | |
|
fc6507e5e2 | |
|
8871a07a11 | |
|
1ea60283f9 | |
|
d7185495de | |
|
d169a753b3 | |
|
98f5f837dc | |
|
d2ebf96a6b | |
|
f59725bffd | |
|
775c5adf7e | |
|
7b9925be4f | |
|
dda8cbf9d9 | |
|
0b1780df27 | |
|
11c3dda26b | |
|
858912df28 | |
|
62da4c9ced | |
|
0073d20a54 | |
|
d1beba98e3 | |
|
117eabb7df | |
|
edf87b1226 | |
|
3e2515164f | |
|
05a4eac804 | |
|
dba60a384f | |
|
5a3c5f6cd1 | |
|
60ffcf2b9c | |
|
55bd524d0c | |
|
34937d2e24 | |
|
b897a0ec76 | |
|
7445910478 | |
|
bd739679ea | |
|
4e8db837c1 | |
|
9b8178d1ce | |
|
fc9d836f0b | |
|
22daafd325 | |
|
e047c1ae62 | |
|
4ae092cf6a | |
|
d8c3f95e14 | |
|
3fc4309a4a | |
|
e437944860 | |
|
5971fcc6f7 | |
|
32c6de1c84 | |
|
aaf822c38a | |
|
451f9bf95b | |
|
a096565d4e | |
|
9076a8bfc4 | |
|
c253efa539 | |
|
fc199eb190 | |
|
806d540045 | |
|
017bf8777c | |
|
30265cd40a | |
|
b29e9cbcc8 | |
|
ca69001f42 | |
|
13c94405ad | |
|
1defd8a965 | |
|
7fbfeb2ab5 | |
|
d4f9c1e3f4 | |
|
95c17fe822 | |
|
7b95733bf2 | |
|
14efda12f6 | |
|
0f65ece890 | |
|
0a121fc089 | |
|
ddfe0d35c3 | |
|
2fe0301087 | |
|
0154bd1387 | |
|
e009336016 | |
|
b3fbfe3888 | |
|
67220b0f2d | |
|
d6cdd3e810 | |
|
5aa878193c | |
|
f0fadf0c2b | |
|
a79c3ee79d | |
|
673e45eacd | |
|
d5790ebc55 | |
|
8405e30819 | |
|
40c3ab55a4 | |
|
9ff34de29d | |
|
8a09f8b250 | |
|
bc4348d974 | |
|
afc3568016 | |
|
f3876c021c | |
|
cb666e44fd | |
|
c6d53b77e1 | |
|
2eccd3200b | |
|
aeade81452 | |
|
47076186a0 | |
|
1e8c9a39b5 | |
|
354dc82ffe | |
|
0caea40bc2 | |
|
82372b7d53 | |
|
2936dfbc68 | |
|
f013ec31da | |
|
93149757c2 | |
|
e5fcba07f0 | |
|
c7cd15f066 | |
|
8e63ae5aef | |
|
0c445ec562 | |
|
e65f4960ba | |
|
c9b1312855 | |
|
43b7618375 | |
|
f183c87b95 | |
|
e42b000c2e | |
|
0e3861a7da | |
|
226758b9c0 | |
|
4ac3ed10a9 | |
|
99f80e450e | |
|
2b31033720 | |
|
ed5e5bd15f | |
|
d2cdd2910f | |
|
96dad3ed06 | |
|
eaeaeb95a4 | |
|
9f40e41b09 | |
|
f7d3cfb46f | |
|
d7d0b9faeb | |
|
a87f4c9547 | |
|
dae9543fd7 | |
|
7cf5404b76 | |
|
4e7f0de705 | |
|
945a2ef14b | |
|
7a0e505a4e | |
|
14b73128a6 | |
|
e3a2d5c46b | |
|
d4125ec723 | |
|
2dcb26b4df | |
|
dc020cc456 | |
|
80adbe1cf8 | |
|
e26d2cf08f | |
|
9dbb7f1680 | |
|
77ca1ad0cf | |
|
2858a2c66b | |
|
539e0ef688 | |
|
4ff90552aa | |
|
023c02b4ab | |
|
453c0c12a1 | |
|
15be8178f6 | |
|
b079b133de | |
|
2ae70759ff | |
|
fb98ed26c5 | |
|
3af0b8ecea | |
|
5c6be9de3e | |
|
4ce7187391 | |
|
572575e7b7 | |
|
f1ea742f99 | |
|
79ba066f64 | |
|
e0cdc569e0 | |
|
6e411e659a | |
|
96a34e3f6a | |
|
b3f93ad580 | |
|
a52809ad43 | |
|
cfaa97a490 | |
|
b92a465bc8 | |
|
a5e9082385 | |
|
331f6052b4 | |
|
b3af09ee84 | |
|
aa90fa25bd | |
|
20673f8247 | |
|
218623b2ff | |
|
be72231f79 | |
|
4ed5a1432f | |
|
e0db18eb22 | |
|
da490c40b9 | |
|
36ab34f825 | |
|
59d248267e | |
|
92afa67be3 | |
|
2afd376e95 | |
|
735e39f121 | |
|
e28de1a3e0 | |
|
422c4a1c2b | |
|
53c273d250 | |
|
7431248f54 | |
|
1e9d506831 | |
|
d0b12d8512 | |
|
ebe78ff1b4 | |
|
9c38680126 | |
|
a801a43c86 | |
|
5cdcd59fcd | |
|
01f7d32de6 | |
|
3aa5488989 | |
|
2c8ea61bc0 | |
|
0c1bb9aab7 | |
|
45dc94ea61 | |
|
3e332acd18 | |
|
f6106a77c7 | |
|
69fc24ac67 | |
|
5ac63800fd | |
|
f3ec89caec | |
|
4153e5b579 | |
|
54e7e5be60 | |
|
7986fa93dc | |
|
4af0ec7b7c | |
|
a7876ca243 | |
|
97b6ae0b77 | |
|
4a215393be | |
|
e3ac19fc79 | |
|
d2a97f4f48 | |
|
a2bb444d83 | |
|
92ec93c4bc | |
|
1615862cee | |
|
4cd6a0a777 | |
|
a334a79dc8 | |
|
84dd8191fb | |
|
067f547d69 | |
|
6e24f35ccb | |
|
3db20f7bc4 | |
|
ca282f2e79 | |
|
782b540b3b | |
|
8137c72b18 | |
|
fafb86aec5 | |
|
2a4d37afe2 | |
|
808b769cc5 | |
|
581295361f | |
|
1f13340083 | |
|
258befe037 | |
|
7cc0189833 | |
|
8e2cd39852 | |
|
31d1443017 | |
|
512d0854fe | |
|
6a66cacb56 | |
|
c19a97334a | |
|
1b006501c0 | |
|
6ff8600f6c | |
|
72dc156391 | |
|
c00f094556 | |
|
23119a9d66 | |
|
eed18063b4 | |
|
a094efc1c6 | |
|
92f84e69de | |
|
09ab59b79f | |
|
d9cddcc647 | |
|
7480d43b1f | |
|
deb9746f80 | |
|
239e6a8fd2 | |
|
6069c1df8b | |
|
ec755da378 | |
|
aefc3a3f84 | |
|
17a01f1884 | |
|
f69f453a79 | |
|
ac36bb6ede | |
|
7a01c8c56e | |
|
59355babef | |
|
6d3a142cf3 | |
|
0d775871f4 | |
|
b600c74e85 | |
|
f3b7877c09 | |
|
7febaa8168 | |
|
889cb35050 | |
|
16222fe189 | |
|
2d14db9503 | |
|
426adfcc66 | |
|
49d738d652 | |
|
be9aee0b69 | |
|
5257aa0635 | |
|
3a16d3031e | |
|
d06d470d06 | |
|
6cf3b19018 | |
|
6595228b96 | |
|
d7889a9769 | |
|
cd32fec3ef | |
|
065e7559e0 | |
|
951d5a3171 | |
|
24d12b3af5 | |
|
0ff1851ecf | |
|
5b0e7691ea | |
|
9ad072c556 | |
|
f4ccd2af05 | |
|
9cf28ad6fe | |
|
fa217e54c3 | |
|
10efe16ed9 | |
|
6c3add9517 | |
|
10e8994151 | |
|
b149716c31 | |
|
f9774f329b | |
|
27643de13a | |
|
bfd739042d | |
|
2b6625ef8a | |
|
f798481818 | |
|
2b46b8c1e7 | |
|
cd9c7d2d2e | |
|
c5948f6b2d | |
|
3f6ac3ed0f | |
|
596a965d74 | |
|
a713492cda | |
|
b2f04ab9ed | |
|
1fa07602fe | |
|
1d7f4824b3 | |
|
436b166585 | |
|
66f07b9f63 | |
|
6ea7309598 | |
|
d06b99f9ca | |
|
174f443242 | |
|
f342cd3dbb | |
|
6ca397a839 | |
|
4c72e59721 | |
|
c2f8c17c45 | |
|
905c82af90 | |
|
33719e1b53 | |
|
c9c3f5cd75 | |
|
a12ab631f0 | |
|
035d0c84e6 | |
|
857e4ae8c5 | |
|
41a07af284 | |
|
5d530d21af | |
|
f71173841f | |
|
26db3d44c5 | |
|
23d2e88365 | |
|
7465c837e2 | |
|
ba4994008b | |
|
9bb3e5fcf4 | |
|
20d8d6dd2c | |
|
0b7233e1da | |
|
74a3d786b0 | |
|
6ea0fa4c6e | |
|
ca2c14460b | |
|
e9f1ac89d5 | |
|
b1f46bb03b | |
|
df4960bced | |
|
e0fbc717fc | |
|
0a2166ef28 | |
|
f05a039884 | |
|
2a9da21970 | |
|
8ae2a4bc33 | |
|
15230a7b7a | |
|
b4c91566d0 | |
|
40c2a73c38 | |
|
29be7f9f09 | |
|
d4e0738ffd | |
|
d1cc766de2 | |
|
5afa9c835d | |
|
2251a88a2f | |
|
cdde090cde | |
|
b364250fb6 | |
|
4df6d2bb6b | |
|
a8b64c56d7 | |
|
c6631e331d | |
|
5a790f2ae6 | |
|
541d5d8906 | |
|
55bd8475e1 | |
|
011835a850 | |
|
f8c7cb43f9 | |
|
e464516d8d | |
|
ef0caea27e | |
|
193f1f53eb | |
|
4192794f53 | |
|
9e680116fa | |
|
dc66266936 | |
|
b0aae230cc | |
|
ec5b64837a | |
|
7e81432e96 | |
|
26c31db412 | |
|
be97e0db27 | |
|
6ed999e66f | |
|
b3058721d1 | |
|
d754367635 | |
|
7bb0a73107 | |
|
a0d3e6869f | |
|
71ea1d5fd3 | |
|
e6256339a8 | |
|
d4c6e8b04f | |
|
6bf15df5f3 | |
|
704421fd6b | |
|
5699d83dfc | |
|
d975b9df98 | |
|
3324eea71c | |
|
a3c6bd9422 | |
|
5e192e8d9f | |
|
3e75fe580b | |
|
1151437228 | |
|
b733b5ac1f | |
|
a36c217c18 | |
|
81fce14af5 | |
|
8ece6d7d19 | |
|
f691d32584 | |
|
c8a9e2cb6c | |
|
b51cedf925 | |
|
d45b740b69 | |
|
e957a15ee8 | |
|
8d96fa5495 | |
|
be32409fe0 | |
|
c7ec823b0a | |
|
34c624c045 | |
|
ca609c8999 | |
|
0edcfae368 | |
|
1d05953b2f | |
|
75db558e79 | |
|
41c889141e | |
|
a76679bd0e | |
|
711f6177b0 | |
|
5cb581de6a | |
|
25b195fe99 | |
|
f6fa90977a | |
|
15e430779d | |
|
e7a39b8090 | |
|
daa885e297 | |
|
1d133a4671 | |
|
b9bbae6703 | |
|
c64a09c2cc | |
|
ad8b412372 | |
|
7bb7be1f3b | |
|
5d224dbd50 | |
|
3f50359058 | |
|
4eb047394f | |
|
682ccd20ae | |
|
e257fa3baf | |
|
336a10747b | |
|
06545f7e47 | |
|
0f728dfea8 | |
|
cf1490e066 | |
|
829ad73283 | |
|
adcc624d25 | |
|
b9fd5fac57 | |
|
6e3f837a2a | |
|
179e5fdd12 | |
|
1942659046 | |
|
cf3589eb2f | |
|
66bf38aaa9 | |
|
dd08543443 | |
|
8ad4ca8c15 | |
|
9117be4944 | |
|
26915f780d | |
|
c890bd2e5c | |
|
49fb5cddcd | |
|
1d9f1f3489 | |
|
13f407f922 | |
|
90efefc220 | |
|
1619e4f08f | |
|
66e43cc594 | |
|
dcfdebe2ab | |
|
01ee20c5bd | |
|
bd46cde7dd | |
|
d7b4dd91b0 | |
|
8ca403c5f5 | |
|
3856df0a59 | |
|
5a7f6a63e4 | |
|
8b039bbf93 | |
|
f3d84b5f88 | |
|
2c6467779a | |
|
32c7e89e8e | |
|
332b4f4b52 | |
|
2c174bb263 | |
|
9491564138 | |
|
857032ecf1 | |
|
33c35a642e | |
|
d3d522ffe9 | |
|
143566905a | |
|
e5df2f37ce | |
|
000a61f68c | |
|
2c9ce711a4 | |
|
1c0168c5ae | |
|
928faffa63 | |
|
380f8ee7f0 | |
|
644a297bcc | |
|
5c2a17b0da | |
|
63361ccbe8 | |
|
6e7673b11f | |
|
eb5f49619c | |
|
0e19b62078 | |
|
6d30e15ff2 | |
|
08e0a8a0cf | |
|
aea4251e90 | |
|
4df156af75 | |
|
dee0ea9f0a | |
|
b7345dc9ac | |
|
cdfe1a175b | |
|
b7ba0a6e45 | |
|
62ba6db9e3 | |
|
c755b34703 | |
|
852fa50120 | |
|
24cc64f95b | |
|
d23bc116c0 | |
|
6f3cfa3c39 | |
|
8a9d9c65ba | |
|
7d108fa804 | |
|
8ff5ce0e37 | |
|
21f8fe899d | |
|
18e580e5bb | |
|
3950997ae5 | |
|
456e2debe4 | |
|
013c03161b | |
|
dd0758f77e | |
|
a1425acb41 | |
|
cc938add05 | |
|
42859963d2 | |
|
f0fb98e32b | |
|
ebf7738d36 | |
|
650e0ceb62 | |
|
225d5ff7c3 | |
|
2e9d8816a7 | |
|
aa5b65a649 | |
|
516aed9fdf | |
|
14c385e374 | |
|
72d7976820 | |
|
71e5417431 | |
|
3e27e2bb6f | |
|
34b505e1dd | |
|
d91dfc6ef1 | |
|
7d6f0e762a | |
|
0deabdde10 | |
|
4a6a3e0d71 | |
|
64285c24ef | |
|
7d910371b5 | |
|
a07677d19f | |
|
1dba100ab8 | |
|
22f7ff40cd | |
|
93266c4440 | |
|
55ec98bffa | |
|
dd2c1fbaf5 | |
|
24032f15fd | |
|
a63979ff8e | |
|
b8da3f8fcd | |
|
8552ff052b | |
|
36386ddcee | |
|
4a3dfac549 | |
|
92dd8ace2b | |
|
799ac2bb2a | |
|
9964152efb | |
|
734941cd81 | |
|
6346658e02 | |
|
0d98eee1f9 | |
|
5cac862fe3 | |
|
7643e54b2a | |
|
fbe36b6410 | |
|
9d1f243f3c | |
|
adce4f35f0 | |
|
62fc6442a1 | |
|
bf9f3f8b39 | |
|
7d732c8ebf | |
|
936bc7547e | |
|
a3a8f7030f | |
|
39833e50e0 | |
|
07f0cbe42c | |
|
baf0eaa36e | |
|
425bfb99fb | |
|
0909264e15 | |
|
6bbb1d77c6 | |
|
f67784ffb7 | |
|
f0039ce66a | |
|
15a956b400 | |
|
e1ab43c9c7 | |
|
58aba1f940 | |
|
37994588e1 | |
|
74a10283b5 | |
|
24081f2fb2 | |
|
d3d3a72bba | |
|
12acfe438e | |
|
e8e85d3650 | |
|
483f9d1935 | |
|
7ec295cf6e | |
|
57161de608 | |
|
732d0eaf9b | |
|
81f3d31693 | |
|
1c0515b0da | |
|
43987908b7 | |
|
422f552a3e | |
|
6d09a9baf2 | |
|
bc6f2d6817 | |
|
b676e0a782 | |
|
3f71b8804f | |
|
531ba2b64e | |
|
9dcefdd967 | |
|
341421d6ec | |
|
727844036d | |
|
96ac40257e | |
|
3a3d9e1ef4 | |
|
d2add82cec | |
|
b186c8190c | |
|
7c3180843c | |
|
d48736a1cd | |
|
1348bf3030 | |
|
7349fd6049 | |
|
c13ec2889f | |
|
62f080f73d | |
|
0c68cce9da | |
|
c1c005fda6 | |
|
4427011684 | |
|
fa59b691fa | |
|
5c46b08427 | |
|
6a44e04a76 | |
|
eece7e8ef0 | |
|
d9e07bf460 | |
|
b7736a6087 | |
|
a7154944f4 | |
|
d08d553be0 | |
|
4dd6b38553 | |
|
78c0811641 | |
|
8903ed5d32 | |
|
e11e6ea628 | |
|
b0ffa06d3a | |
|
3d30b10683 | |
|
de017df5db | |
|
a65e8103b7 | |
|
5191316ae0 | |
|
f6c352e989 | |
|
2d33aeaf99 | |
|
494ff1f7e6 | |
|
721bdbb15f | |
|
b6f755a55e | |
|
2a66e05af6 | |
|
1c0c19d030 | |
|
4d2c3b5e1e | |
|
b602428be4 | |
|
038a7be975 | |
|
680074ddde | |
|
fbfca12cb2 | |
|
798487db81 | |
|
597132355a | |
|
f98f48f254 | |
|
76468a3171 | |
|
afaa13283a | |
|
f494280393 | |
|
4a0e0c5b5c | |
|
7ed5ab0c34 | |
|
51b595f466 | |
|
dd816187d0 | |
|
ec7a933d60 | |
|
5b0b51b4b1 | |
|
9e8a902a35 | |
|
e435ffefc2 | |
|
cdaadbe8c2 | |
|
5000adfc23 | |
|
aaf0446d4a | |
|
3f7db8b748 | |
|
fa5d16193b | |
|
e676145176 | |
|
756e72ebf7 | |
|
1555484dc6 | |
|
563a92011d | |
|
2b6e3edfb4 | |
|
a1e01077da | |
|
69c749994e | |
|
8f22e5ab3f | |
|
6ecf34d28a | |
|
2de945bae1 | |
|
a1771e8d80 | |
|
5d7449d089 | |
|
17b6dc6a32 | |
|
f035f7e290 | |
|
8fe0252f4c | |
|
b495ac8ac7 | |
|
b0e279719c | |
|
4918e51c78 | |
|
c3dd1dfc61 | |
|
76a2063045 | |
|
44f3cb47b3 | |
|
4c2baebc76 | |
|
6b9bfef7f6 | |
|
9fe400e6b3 | |
|
2b42848e38 | |
|
2a29c9360d | |
|
0f356bc1ec | |
|
50dc6a2b5c | |
|
6f618560e9 | |
|
23090f2b6d | |
|
763d8f8153 | |
|
8d9ba4ec8e | |
|
21eee6f25f | |
|
73a352f96c | |
|
43bd74c02a | |
|
c236ad5877 | |
|
a670383519 | |
|
e51b9eae18 | |
|
f4903a1aa4 | |
|
c95d38f8bf | |
|
99e857fd60 | |
|
3b070384aa | |
|
08cfe5c292 | |
|
b27aa691a0 | |
|
f4cdee01ba | |
|
cd19fcb917 | |
|
4f9416376e | |
|
60301158eb | |
|
95cbaaea34 | |
|
3839342a87 | |
|
7487fcbdc0 | |
|
870d5263b4 | |
|
9c1d3bf758 | |
|
d336f4f495 | |
|
6e48ee09d8 | |
|
8e347489c4 | |
|
6424f1713d | |
|
07579b203f | |
|
4a00e328fe | |
|
2c48b1378f | |
|
2983ea70e9 | |
|
1b7d0c638f | |
|
1459cf3bed | |
|
c31ccd40dc | |
|
dc3b6e78a8 | |
|
ae0c8c2871 | |
|
a18177050f | |
|
c0b35d751d | |
|
066a16eebf | |
|
928b5a4756 | |
|
aeaa483561 | |
|
91789860f6 | |
|
62045cce44 | |
|
5c6ac6fa11 | |
|
e46643159a | |
|
27e17fd0e8 | |
|
c2ea6096c6 | |
|
5041ea585a | |
|
b9a5775386 | |
|
8b223f8abe | |
|
7f1560254e | |
|
f0f9abb12a | |
|
acbeb1c842 | |
|
9e372c06dc | |
|
0639f51206 | |
|
b9be6693c1 | |
|
b4fbdf6cf7 | |
|
7ea1ef8507 | |
|
ba896e1e0b | |
|
30492bff7f | |
|
9f8ff361a1 | |
|
1d14305fb0 | |
|
0f74a01809 | |
|
105e50c9d5 | |
|
5ad1035f54 | |
|
0a6184590f | |
|
0f2112f3cf | |
|
2acbe763f6 | |
|
538d860d17 | |
|
cecff4e195 | |
|
811d8d7e1c | |
|
d983e7f515 | |
|
bb019ac15e | |
|
424fbddd1d | |
|
b05023dc63 | |
|
b13576dca4 | |
|
43cabe43be | |
|
d56ea370f1 | |
|
7e4637a56a | |
|
2480afd12e | |
|
c193c0b1a1 | |
|
37e85b9aa8 | |
|
72c6603c62 | |
|
414a036e3d | |
|
9c58a399a6 | |
|
2fc1a5024a | |
|
9d93cd902f | |
|
9ae650b2bc | |
|
8bfad76187 | |
|
493f3c5b1a | |
|
8306007749 | |
|
ceb5a82d18 | |
|
863fb725e5 | |
|
30665f73d4 | |
|
be3aa3b157 | |
|
3af6cf32c1 | |
|
87825466e7 | |
|
c8917e4d2d | |
|
67ea05899b | |
|
ace301ed98 | |
|
224cf30680 | |
|
52658537aa | |
|
2fcaa202fd | |
|
bc93856e1b | |
|
06b613fe96 | |
|
015dd0a00c | |
|
5d18984248 | |
|
26760e7e8a | |
|
aa0ca10242 | |
|
d50127bd1a | |
|
f75d7b5c4d | |
|
9aa5505baa | |
|
c91adec0c8 | |
|
175effc610 | |
|
d0236147c7 | |
|
35b4e7a045 | |
|
fe3a41353b | |
|
47f05dee61 | |
|
ef14fb0ca7 | |
|
9e4de617d7 | |
|
895d43d8a3 | |
|
38b14d9b21 | |
|
eb5d1771ec | |
|
8d8ab736bf | |
|
99201134f6 | |
|
8e4b2caca3 | |
|
8ac6527752 | |
|
bda6cb0fec | |
|
e02149cbbc | |
|
ee7d0079c2 | |
|
cf0c35cde3 | |
|
29c09984d9 | |
|
994063d823 | |
|
5580960042 | |
|
2843f7ea55 | |
|
ffb2afe358 | |
|
80e4a8ee77 | |
|
2b8888dc38 | |
|
3623f98906 | |
|
16bc43e13b | |
|
a4a06efac1 | |
|
5035431b22 | |
|
d9aaf2d587 | |
|
17d03c84c0 | |
|
e9bdf1b397 | |
|
5bb0074c96 | |
|
9ddf337361 | |
|
b7fbaae0ba | |
|
401a85d475 | |
|
440cda1f8c | |
|
f683783a46 | |
|
d39cc1df40 | |
|
9aa0b3ff40 | |
|
3fda8acbe2 | |
|
f3f9e260fc | |
|
c4645f7d96 | |
|
047bd89f39 | |
|
763ddea48a | |
|
76a62a74c6 | |
|
1b1d85e8b3 | |
|
d1bbf13af3 | |
|
a6bdc99c76 | |
|
b901c6ee3b | |
|
1685c1dfbc | |
|
98a8ac877e | |
|
89bbca0536 | |
|
f741558fb7 | |
|
456e8c9d52 | |
|
889b5e2596 | |
|
92352b9998 | |
|
2857be5c45 | |
|
44824cf28c | |
|
fed25dd3e7 | |
|
117cced24b | |
|
733397ec17 | |
|
4b09932bdb | |
|
128d67cb5c | |
|
eecb9964ec | |
|
44a94dfbd4 | |
|
1f9784c186 | |
|
459f1012fc | |
|
722acacd44 | |
|
48ce0485e4 | |
|
2a65bc101c | |
|
22d0f8229b | |
|
c2566ce909 | |
|
390efd7be6 | |
|
591fe5084b | |
|
1c5e32c208 | |
|
5da7c48fee | |
|
db44bbb2a7 | |
|
4dbdf78633 | |
|
a55d0b2168 | |
|
e8edd8e300 | |
|
0ddf1de679 | |
|
2867fed05b | |
|
cd4eb24ed1 | |
|
5d70cdf60a | |
|
586086be0d | |
|
fbf8304688 | |
|
d305bf68c9 | |
|
475a47d112 | |
|
a12d572018 | |
|
6bb047b67d | |
|
3ab5f74307 | |
|
3ce85a2c2b | |
|
e24705d688 | |
|
fdc3060a81 | |
|
7c205af4be | |
|
fecac63e49 | |
|
ad2f76aa5a | |
|
ce96fb1c8e | |
|
2c65e25299 | |
|
43886c2029 | |
|
3634a770f0 | |
|
b34c6cbc10 | |
|
f01a619b4d | |
|
3ecbf02f43 | |
|
b3d13db3c2 | |
|
eb24f80e35 | |
|
721d1d57f5 | |
|
a31d919b6a | |
|
670769fc6b | |
|
cd5dabae29 | |
|
7b35d9b14a | |
|
fe2d9a0f9a | |
|
d03c3b5c2d | |
|
e9c52ab6cc | |
|
16b3c085da | |
|
eda4cfa6de | |
|
dc2bd02c2b | |
|
2fcdc054f5 | |
|
3c16977492 | |
|
b97c51f129 | |
|
99c217c29e | |
|
09887697a6 | |
|
a05520876b | |
|
4316fbbb41 | |
|
7be6a0ed1e | |
|
0ff7751f04 | |
|
2d135605f9 | |
|
73e6f43c60 | |
|
e1a86db303 | |
|
c5871a47de |
|
@ -0,0 +1,6 @@
|
|||
# see https://github.com/cncf/clomonitor/blob/main/docs/checks.md#exemptions
|
||||
exemptions:
|
||||
- check: artifacthub_badge
|
||||
reason: "Artifact Hub doesn't support Java packages"
|
||||
- check: openssf_badge
|
||||
reason: "ETOOMANYBADGES, but the work has been done: https://www.bestpractices.dev/projects/9992"
|
|
@ -0,0 +1,11 @@
|
|||
version: 3
|
||||
|
||||
targets:
|
||||
only:
|
||||
- type: gradle
|
||||
|
||||
experimental:
|
||||
gradle:
|
||||
configurations-only:
|
||||
# consumer will only be exposed to these dependencies
|
||||
- runtimeClasspath
|
|
@ -1,9 +1,9 @@
|
|||
#
|
||||
# Learn about membership in OpenTelemetry community:
|
||||
# https://github.com/open-telemetry/community/blob/main/community-membership.md
|
||||
#
|
||||
# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md
|
||||
#
|
||||
# Learn about CODEOWNERS file format:
|
||||
#
|
||||
# Learn about CODEOWNERS file format:
|
||||
# https://help.github.com/en/articles/about-code-owners
|
||||
#
|
||||
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Description**
|
||||
A clear and concise description of what the bug is or observed behavior.
|
||||
|
||||
**Steps to reproduce**
|
||||
Provide a (runnable) recipe for reproducing the error.
|
||||
|
||||
**Expectation**
|
||||
A clear and concise description of what you expected to see.
|
||||
|
||||
**What applicable config did you use?**
|
||||
Config: (e.g. the yaml config file)
|
||||
|
||||
**Relevant Environment Information**
|
||||
Version: (e.g., `v0.0.1`, `2bef20a`, etc.)
|
||||
OS: (e.g., "Ubuntu 20.04")
|
||||
Compiler (if manually compiled): (e.g., "openJDK 1.8.0_252")
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
|
@ -0,0 +1,59 @@
|
|||
name: Bug report
|
||||
description: Create a report to help us improve
|
||||
labels: [ bug ]
|
||||
body:
|
||||
- type: dropdown
|
||||
id: component
|
||||
attributes:
|
||||
label: Component(s)
|
||||
description: Which component(s) does your bug report concern?
|
||||
multiple: true
|
||||
options:
|
||||
- aws-resources
|
||||
- aws-xray
|
||||
- aws-xray-propagator
|
||||
- consistent-sampling
|
||||
- disk-buffering
|
||||
- gcp-auth-extension
|
||||
- gcp-resources
|
||||
- jfr-connection
|
||||
- jfr-events
|
||||
- jmx-metrics
|
||||
- jmx-scraper
|
||||
- maven-extension
|
||||
- micrometer-meter-provider
|
||||
- noop-api
|
||||
- processors
|
||||
- prometheus-client-bridge
|
||||
- resource-providers
|
||||
- runtime-attach
|
||||
- samplers
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: What happened?
|
||||
description: Please provide as much detail as you reasonably can.
|
||||
value: |
|
||||
## Description
|
||||
|
||||
## Steps to Reproduce
|
||||
|
||||
## Expected Result
|
||||
|
||||
## Actual Result
|
||||
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: Component version
|
||||
description: What version did you use? (e.g., `v1.26.0`, `1eb551b`, etc)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Log output
|
||||
description: Please copy and paste any relevant log output.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: Any additional information you think may be relevant to this issue.
|
|
@ -0,0 +1,7 @@
|
|||
contact_links:
|
||||
- name: StackOverflow
|
||||
url: https://stackoverflow.com/questions/ask?tags=open-telemetry+java
|
||||
about: Please ask questions here.
|
||||
- name: Slack
|
||||
url: https://cloud-native.slack.com/archives/C014L2KCTE3
|
||||
about: Or here.
|
|
@ -1,20 +0,0 @@
|
|||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: feature
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
|
@ -0,0 +1,50 @@
|
|||
name: Feature request
|
||||
description: Suggest an idea for this project
|
||||
labels: [ enhancement ]
|
||||
body:
|
||||
- type: dropdown
|
||||
id: component
|
||||
attributes:
|
||||
label: Component(s)
|
||||
description: Which component(s) does your feature request concern?
|
||||
multiple: true
|
||||
options:
|
||||
- aws-resources
|
||||
- aws-xray
|
||||
- aws-xray-propagator
|
||||
- consistent-sampling
|
||||
- disk-buffering
|
||||
- gcp-auth-extension
|
||||
- gcp-resources
|
||||
- jfr-connection
|
||||
- jfr-events
|
||||
- jmx-metrics
|
||||
- jmx-scraper
|
||||
- maven-extension
|
||||
- micrometer-meter-provider
|
||||
- noop-api
|
||||
- processors
|
||||
- prometheus-client-bridge
|
||||
- resource-providers
|
||||
- runtime-attach
|
||||
- samplers
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Is your feature request related to a problem? Please describe.
|
||||
description: A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe the solution you'd like
|
||||
description: A clear and concise description of what you want to happen.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe alternatives you've considered
|
||||
description: A clear and concise description of any alternative solutions or features you've considered.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: Add any other context or screenshots about the feature request here.
|
|
@ -1,4 +1,4 @@
|
|||
# this file is used by .github/workflows/component-codeowners.yml
|
||||
# this file is used by .github/workflows/assign-reviewers.yml
|
||||
#
|
||||
# NOTE component owners must be members of the GitHub OpenTelemetry organization
|
||||
# so that they can be added to @open-telemetry/java-contrib-triagers
|
||||
|
@ -11,28 +11,49 @@
|
|||
# `comp:*` labels
|
||||
components:
|
||||
aws-resources:
|
||||
- willarmiros
|
||||
- wangzlei
|
||||
- srprash
|
||||
aws-xray:
|
||||
- willarmiros
|
||||
- wangzlei
|
||||
- srprash
|
||||
aws-xray-propagator:
|
||||
- willarmiros
|
||||
- wangzlei
|
||||
- srprash
|
||||
azure-resources:
|
||||
- trask
|
||||
- zeitlinger
|
||||
baggage-processor:
|
||||
- mikegoldsmith
|
||||
- zeitlinger
|
||||
cloudfoundry-resources:
|
||||
- KarstenSchnitter
|
||||
compressors:
|
||||
- jack-berg
|
||||
consistent-sampling:
|
||||
- oertl
|
||||
- PeterF778
|
||||
samplers:
|
||||
- iNikem
|
||||
- trask
|
||||
disk-buffering:
|
||||
- LikeTheSalad
|
||||
- zeitlinger
|
||||
gcp-resources:
|
||||
- jsuereth
|
||||
- psx95
|
||||
gcp-auth-extension:
|
||||
- jsuereth
|
||||
- psx95
|
||||
jfr-connection:
|
||||
- breedx-splk
|
||||
- jeanbisutti
|
||||
- dsgrieve
|
||||
jfr-events:
|
||||
- sfriberg
|
||||
jfr-streaming:
|
||||
- breedx-splk
|
||||
- jack-berg
|
||||
- kittylyst
|
||||
jmx-metrics:
|
||||
- dehaansa
|
||||
- Mrod1598
|
||||
- rmfitzpatrick
|
||||
- breedx-splk
|
||||
- sylvainjuge
|
||||
jmx-scraper:
|
||||
- breedx-splk
|
||||
- robsunday
|
||||
- sylvainjuge
|
||||
maven-extension:
|
||||
- cyrille-leclerc
|
||||
- kenfinnigan
|
||||
|
@ -40,6 +61,10 @@ components:
|
|||
- HaloFour
|
||||
noop-api:
|
||||
- jack-berg
|
||||
processors:
|
||||
- LikeTheSalad
|
||||
- breedx-splk
|
||||
- jack-berg
|
||||
prometheus-collector:
|
||||
- jkwatson
|
||||
resource-providers:
|
||||
|
@ -47,7 +72,21 @@ components:
|
|||
- mateuszrzeszutek
|
||||
- laurit
|
||||
runtime-attach:
|
||||
- iNikem
|
||||
- jeanbisutti
|
||||
static-instrumenter:
|
||||
- anosek-an
|
||||
samplers:
|
||||
- trask
|
||||
- jack-berg
|
||||
kafka-exporter:
|
||||
- spockz
|
||||
- vincentfree
|
||||
span-stacktrace:
|
||||
- jackshirazi
|
||||
- jonaskunz
|
||||
- sylvainjuge
|
||||
inferred-spans:
|
||||
- jackshirazi
|
||||
- jonaskunz
|
||||
- sylvainjuge
|
||||
opamp-client:
|
||||
- LikeTheSalad
|
||||
- jackshirazi
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
{
|
||||
"retryOn429" : true,
|
||||
"ignorePatterns" : [
|
||||
{
|
||||
"pattern" : "^https://github\\.com/open-telemetry/opentelemetry-java-contrib/network/updates$"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
version: 2
|
||||
registries:
|
||||
gradle-plugin-portal:
|
||||
type: maven-repository
|
||||
url: https://plugins.gradle.org/m2
|
||||
username: dummy # Required by dependabot
|
||||
password: dummy # Required by dependabot
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
- package-ecosystem: "gradle"
|
||||
directory: "/"
|
||||
ignore:
|
||||
- dependency-name: "io.micrometer:micrometer-core"
|
||||
# compileOnly dependency on old micrometer-core version is intentional
|
||||
versions: [ "(1.1.0,)" ]
|
||||
- dependency-name: "org.apache.maven:maven-plugin-api"
|
||||
# static instrumenter maven plugin uses old maven API version for better compatibility
|
||||
versions: [ "(3.5.0,)" ]
|
||||
- dependency-name: "org.apache.maven:maven-core"
|
||||
# compileOnly dependency that matches the maven-plugin-api version in the static instrumenter maven plugin
|
||||
versions: [ "(3.5.0,)" ]
|
||||
- dependency-name: "org.junit-pioneer:junit-pioneer"
|
||||
# junit-pioneer 2.x requires Java 11
|
||||
versions: [ "[1,)" ]
|
||||
registries:
|
||||
- gradle-plugin-portal
|
||||
schedule:
|
||||
interval: "daily"
|
||||
open-pull-requests-limit: 10
|
|
@ -0,0 +1,180 @@
|
|||
{
|
||||
$schema: 'https://docs.renovatebot.com/renovate-schema.json',
|
||||
extends: [
|
||||
'config:best-practices',
|
||||
'helpers:pinGitHubActionDigestsToSemver',
|
||||
],
|
||||
ignorePresets: [
|
||||
':ignoreModulesAndTests', // needed to keep maven-extension test pom files up-to-date
|
||||
],
|
||||
prHourlyLimit: 5, // we have a large number of parallel runners
|
||||
labels: [
|
||||
'dependencies'
|
||||
],
|
||||
packageRules: [
|
||||
{
|
||||
// reduces the number of Renovate PRs
|
||||
// (patch updates are typically non-breaking)
|
||||
"groupName": "all patch versions",
|
||||
"matchUpdateTypes": ["patch"],
|
||||
"schedule": ["before 8am every weekday"]
|
||||
},
|
||||
{
|
||||
// avoids these Renovate PRs from trickling in throughout the week
|
||||
// (consolidating the review process)
|
||||
"matchUpdateTypes": ["minor", "major"],
|
||||
"schedule": ["before 8am on Monday"]
|
||||
},
|
||||
{
|
||||
matchPackageNames: [
|
||||
'io.opentelemetry:**',
|
||||
'io.opentelemetry.instrumentation:**',
|
||||
'io.opentelemetry.semconv:**',
|
||||
'io.opentelemetry.proto:**',
|
||||
],
|
||||
// Renovate's default behavior is only to update from unstable -> unstable if it's for the
|
||||
// major.minor.patch, under the assumption that you would want to update to the stable version
|
||||
// of that release instead of the unstable version for a future release
|
||||
ignoreUnstable: false,
|
||||
},
|
||||
{
|
||||
// prevent 3.0.1u2 -> 3.0.1
|
||||
matchPackageNames: [
|
||||
'com.google.code.findbugs:annotations',
|
||||
],
|
||||
allowedVersions: '!/^3\\.0\\.1$/',
|
||||
},
|
||||
{
|
||||
// disruptor 4+ requires Java 11+
|
||||
matchPackageNames: [
|
||||
'com.lmax:disruptor',
|
||||
],
|
||||
matchUpdateTypes: [
|
||||
'major',
|
||||
],
|
||||
enabled: false,
|
||||
},
|
||||
{
|
||||
// junit-pioneer 2+ requires Java 11+
|
||||
matchPackageNames: [
|
||||
'org.junit-pioneer:junit-pioneer',
|
||||
],
|
||||
matchUpdateTypes: [
|
||||
'major',
|
||||
],
|
||||
enabled: false,
|
||||
},
|
||||
{
|
||||
// mockito 5+ requires Java 11+
|
||||
matchUpdateTypes: [
|
||||
'major',
|
||||
],
|
||||
enabled: false,
|
||||
matchPackageNames: [
|
||||
'org.mockito:{/,}**',
|
||||
],
|
||||
},
|
||||
{
|
||||
// agrona 1.23+ requires Java 17+
|
||||
matchPackageNames: [
|
||||
'org.agrona:agrona',
|
||||
],
|
||||
matchUpdateTypes: [
|
||||
'major',
|
||||
'minor',
|
||||
],
|
||||
enabled: false,
|
||||
},
|
||||
{
|
||||
// system-stubs-jupiter 2.1+ requires Java 11+
|
||||
matchPackageNames: [
|
||||
'uk.org.webcompere:system-stubs-jupiter',
|
||||
],
|
||||
matchUpdateTypes: [
|
||||
'major',
|
||||
'minor',
|
||||
],
|
||||
enabled: false,
|
||||
},
|
||||
{
|
||||
// pinned version for compatibility
|
||||
matchPackageNames: [
|
||||
'io.micrometer:micrometer-core',
|
||||
],
|
||||
matchCurrentVersion: '1.5.0',
|
||||
enabled: false,
|
||||
},
|
||||
{
|
||||
// pinned version for compatibility
|
||||
matchCurrentVersion: '3.5.0',
|
||||
enabled: false,
|
||||
matchPackageNames: [
|
||||
'org.apache.maven:{/,}**',
|
||||
],
|
||||
},
|
||||
{
|
||||
groupName: 'spotless packages',
|
||||
matchPackageNames: [
|
||||
'com.diffplug.spotless{/,}**',
|
||||
],
|
||||
},
|
||||
{
|
||||
groupName: 'hipparchus packages',
|
||||
matchPackageNames: [
|
||||
'org.hipparchus{/,}**',
|
||||
],
|
||||
},
|
||||
{
|
||||
groupName: 'errorprone packages',
|
||||
matchPackageNames: [
|
||||
'com.google.errorprone{/,}**',
|
||||
],
|
||||
},
|
||||
{
|
||||
// pinned version for compatibility with java 8 JFR parsing
|
||||
matchUpdateTypes: [
|
||||
'major',
|
||||
],
|
||||
enabled: false,
|
||||
matchPackageNames: [
|
||||
'org.openjdk.jmc{/,}**',
|
||||
],
|
||||
},
|
||||
{
|
||||
// pinned version for compatibility
|
||||
matchFileNames: [
|
||||
'jmx-scraper/test-webapp/build.gradle.kts',
|
||||
],
|
||||
matchCurrentVersion: '5.0.0',
|
||||
enabled: false,
|
||||
matchPackageNames: [
|
||||
'jakarta.servlet:{/,}**',
|
||||
],
|
||||
},
|
||||
{
|
||||
// intentionally using Spring Boot 2 in gcp-auth-extension in order to test with Java 8+
|
||||
matchFileNames: [
|
||||
'gcp-auth-extension/build.gradle.kts',
|
||||
],
|
||||
matchUpdateTypes: [
|
||||
'major',
|
||||
],
|
||||
enabled: false,
|
||||
matchPackageNames: [
|
||||
'org.springframework.boot{/,}**',
|
||||
],
|
||||
},
|
||||
],
|
||||
customManagers: [
|
||||
{
|
||||
customType: 'regex',
|
||||
datasourceTemplate: 'npm',
|
||||
managerFilePatterns: [
|
||||
'/^.github/workflows//',
|
||||
],
|
||||
matchStrings: [
|
||||
'npx (?<depName>[^@]+)@(?<currentValue>[^\\s]+)',
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
|
@ -1,6 +1,17 @@
|
|||
# Repository settings
|
||||
|
||||
Same
|
||||
as [opentelemetry-java-instrumentation repository settings](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/.github/repository-settings.md#repository-settings)
|
||||
,
|
||||
except that the branch protection rules for `v*` and `gh-pages` are not needed in this repository.
|
||||
This document describes any changes that have been made to the
|
||||
settings in this repository outside the settings tracked in the
|
||||
private admin repo.
|
||||
|
||||
## Merge queue for `main`
|
||||
|
||||
[The admin repo doesn't currently support tracking merge queue settings.]
|
||||
|
||||
- Require merge queue: CHECKED
|
||||
- Build concurrency: 5
|
||||
- Maximum pull requests to build: 5
|
||||
- Minimum pull requests to merge: 1, or after 5 minutes
|
||||
- Maximum pull requests to merge: 5
|
||||
- Only merge non-failing pull requests: CHECKED
|
||||
- Status check timeout: 60 minutes
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
# this file exists so that Renovate can auto-update docker image versions that are then used elsewhere
|
||||
|
||||
FROM lycheeverse/lychee:sha-2aa22f8@sha256:2e3786630482c41f9f2dd081e06d7da1c36d66996e8cf6573409b8bc418d48c4 AS lychee
|
|
@ -27,18 +27,29 @@ declare -A component_names=()
|
|||
component_names["aws-resources/"]="AWS resources"
|
||||
component_names["aws-xray/"]="AWS X-Ray SDK support"
|
||||
component_names["aws-xray-propagator/"]="AWS X-Ray propagator"
|
||||
component_names["azure-resources/"]="Azure resources"
|
||||
component_names["baggage-processor/"]="Baggage processor"
|
||||
component_names["cloudfoundry-resources/"]="CloudFoundry resources"
|
||||
component_names["compressors/"]="Compressors"
|
||||
component_names["consistent-sampling/"]="Consistent sampling"
|
||||
component_names["disk-buffering/"]="Disk buffering"
|
||||
component_names["gcp-resources/"]="GCP resources"
|
||||
component_names["gcp-auth-extension/"]="GCP authentication extension"
|
||||
component_names["inferred-spans/"]="Inferred spans"
|
||||
component_names["jfr-connection/"]="JFR connection"
|
||||
component_names["jfr-events/"]="JFR events"
|
||||
component_names["jfr-streaming/"]="JFR streaming"
|
||||
component_names["jmx-metrics/"]="JMX metrics"
|
||||
component_names["jmx-scraper/"]="JMX scraper"
|
||||
component_names["kafka-exporter/"]="Kafka exporter"
|
||||
component_names["maven-extension/"]="Maven extension"
|
||||
component_names["micrometer-meter-provider/"]="Micrometer MeterProvider"
|
||||
component_names["noop-api/"]="No-op API"
|
||||
component_names["processors/"]="Telemetry processors"
|
||||
component_names["prometheus-client-bridge/"]="Prometheus client bridge"
|
||||
component_names["runtime-attach/"]="Runtime attach"
|
||||
component_names["resource-providers/"]="Resource providers"
|
||||
component_names["samplers/"]="Samplers"
|
||||
component_names["static-instrumenter/"]="Static instrumenter"
|
||||
component_names["span-stacktrace/"]="Span stack traces"
|
||||
|
||||
echo "## Unreleased"
|
||||
echo
|
||||
|
@ -47,7 +58,7 @@ for component in */ ; do
|
|||
component_name=${component_names[$component]:=$component}
|
||||
commits=$(git log --reverse \
|
||||
--perl-regexp \
|
||||
--author='^(?!dependabot\[bot\] )' \
|
||||
--author='^(?!renovate\[bot\] )' \
|
||||
--pretty=format:"- %s" \
|
||||
"$range" \
|
||||
"$component")
|
||||
|
|
|
@ -79,11 +79,12 @@ query($q: String!, $endCursor: String) {
|
|||
| sed 's/^\["//' \
|
||||
| sed 's/".*//')
|
||||
|
||||
# TODO (trask) can remove dependabot line after next release
|
||||
echo $contributors1 $contributors2 \
|
||||
| sed 's/ /\n/g' \
|
||||
| sort -uf \
|
||||
| grep -v linux-foundation-easycla \
|
||||
| grep -v github-actions \
|
||||
| grep -v dependabot \
|
||||
| grep -v opentelemetrybot \
|
||||
| grep -v renovate \
|
||||
| grep -v otelbot \
|
||||
| sed 's/^/@/'
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
#!/bin/bash -e
|
||||
|
||||
grep -Po "val stableVersion = \"\K[0-9]+.[0-9]+.[0-9]+" version.gradle.kts
|
||||
grep "val stableVersion = " version.gradle.kts | grep -Eo "[0-9]+.[0-9]+.[0-9]+"
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
export MSYS_NO_PATHCONV=1 # for Git Bash on Windows
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
ROOT_DIR="$SCRIPT_DIR/../.."
|
||||
DEPENDENCIES_DOCKERFILE="$SCRIPT_DIR/dependencies.Dockerfile"
|
||||
|
||||
# Parse command line arguments
|
||||
LOCAL_LINKS_ONLY=false
|
||||
TARGET=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--local-links-only)
|
||||
LOCAL_LINKS_ONLY=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
# Treat any other arguments as file paths
|
||||
TARGET="$TARGET $1"
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Extract lychee version from dependencies.dockerfile
|
||||
LYCHEE_VERSION=$(grep "FROM lycheeverse/lychee:" "$DEPENDENCIES_DOCKERFILE" | sed 's/.*FROM lycheeverse\/lychee:\([^ ]*\).*/\1/')
|
||||
|
||||
if [[ -z "$TARGET" ]]; then
|
||||
TARGET="."
|
||||
fi
|
||||
|
||||
# Build the lychee command with optional GitHub token
|
||||
CMD="lycheeverse/lychee:$LYCHEE_VERSION --verbose --root-dir /data"
|
||||
|
||||
# Add GitHub token if available
|
||||
if [[ -n "$GITHUB_TOKEN" ]]; then
|
||||
CMD="$CMD --github-token $GITHUB_TOKEN"
|
||||
fi
|
||||
|
||||
if [[ "$LOCAL_LINKS_ONLY" == "true" ]]; then
|
||||
CMD="$CMD --scheme file --include-fragments"
|
||||
else
|
||||
CMD="$CMD --config .github/scripts/lychee-config.toml"
|
||||
fi
|
||||
|
||||
CMD="$CMD $TARGET"
|
||||
|
||||
# Determine if we should allocate a TTY
|
||||
DOCKER_FLAGS="--rm --init"
|
||||
if [[ -t 0 ]]; then
|
||||
DOCKER_FLAGS="$DOCKER_FLAGS -it"
|
||||
else
|
||||
DOCKER_FLAGS="$DOCKER_FLAGS -i"
|
||||
fi
|
||||
|
||||
# Run lychee with proper signal handling
|
||||
# shellcheck disable=SC2086
|
||||
exec docker run $DOCKER_FLAGS -v "$ROOT_DIR":/data -w /data $CMD
|
|
@ -0,0 +1,16 @@
|
|||
timeout = 30
|
||||
retry_wait_time = 5
|
||||
max_retries = 6
|
||||
max_concurrency = 4
|
||||
|
||||
# Check link anchors
|
||||
include_fragments = true
|
||||
|
||||
# excluding links to pull requests and issues is done for performance
|
||||
# sonatype snapshots are currrently unbrowseable
|
||||
exclude = [
|
||||
"^https://github.com/open-telemetry/opentelemetry-java-contrib/(issues|pull)/\\d+$",
|
||||
'^https://central.sonatype.com/service/rest/repository/browse/maven-snapshots/io/opentelemetry/contrib/$',
|
||||
]
|
||||
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
#!/bin/bash -e
|
||||
|
||||
# this script helps to reduce sporadic link check failures by retrying at a file-by-file level
|
||||
|
||||
retry_count=3
|
||||
|
||||
for file in "$@"; do
|
||||
for i in $(seq 1 $retry_count); do
|
||||
if markdown-link-check --config "$(dirname "$0")/../config/markdown-link-check-config.json" \
|
||||
"$file"; then
|
||||
break
|
||||
elif [[ $i -eq $retry_count ]]; then
|
||||
exit 1
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
done
|
|
@ -0,0 +1,4 @@
|
|||
#!/bin/bash -e
|
||||
|
||||
git config user.name otelbot
|
||||
git config user.email 197425009+otelbot@users.noreply.github.com
|
|
@ -1,4 +0,0 @@
|
|||
#!/bin/bash -e
|
||||
|
||||
git config user.name opentelemetrybot
|
||||
git config user.email 107717825+opentelemetrybot@users.noreply.github.com
|
|
@ -8,8 +8,16 @@ on:
|
|||
# because repository write permission is needed to assign reviewers
|
||||
pull_request_target:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
assign-reviewers:
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write # for assigning reviewers
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: dyladan/component-owners@main
|
||||
- uses: open-telemetry/assign-reviewers-action@fcd27c5381c10288b23d423ab85473710a33389e # main
|
||||
with:
|
||||
config-file: .github/component_owners.yml
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
name: Auto spotless apply
|
||||
on:
|
||||
workflow_run:
|
||||
workflows:
|
||||
- "Auto spotless check"
|
||||
types:
|
||||
- completed
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
apply:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Download patch
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
run-id: ${{ github.event.workflow_run.id }}
|
||||
path: ${{ runner.temp }}
|
||||
merge-multiple: true
|
||||
github-token: ${{ github.token }}
|
||||
|
||||
- id: unzip-patch
|
||||
name: Unzip patch
|
||||
working-directory: ${{ runner.temp }}
|
||||
run: |
|
||||
if [ -f patch ]; then
|
||||
echo "exists=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
|
||||
if: steps.unzip-patch.outputs.exists == 'true'
|
||||
id: otelbot-token
|
||||
with:
|
||||
app-id: 1296620
|
||||
private-key: ${{ secrets.OTELBOT_JAVA_CONTRIB_PRIVATE_KEY }}
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
if: steps.unzip-patch.outputs.exists == 'true'
|
||||
with:
|
||||
repository: "${{ github.event.workflow_run.head_repository.full_name }}"
|
||||
ref: "${{ github.event.workflow_run.head_branch }}"
|
||||
token: ${{ steps.otelbot-token.outputs.token }}
|
||||
|
||||
- name: Use CLA approved github bot
|
||||
if: steps.unzip-patch.outputs.exists == 'true'
|
||||
# IMPORTANT do not call the .github/scripts/use-cla-approved-bot.sh
|
||||
# since that script could have been compromised in the PR branch
|
||||
run: |
|
||||
git config user.name otelbot
|
||||
git config user.email 197425009+otelbot@users.noreply.github.com
|
||||
|
||||
- name: Apply patch and push
|
||||
if: steps.unzip-patch.outputs.exists == 'true'
|
||||
run: |
|
||||
git apply "${{ runner.temp }}/patch"
|
||||
git commit -a -m "./gradlew spotlessApply"
|
||||
git push
|
||||
|
||||
- id: get-pr
|
||||
if: steps.unzip-patch.outputs.exists == 'true'
|
||||
name: Get PR
|
||||
env:
|
||||
GH_REPO: ${{ github.repository }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
PR_BRANCH: |-
|
||||
${{
|
||||
(github.event.workflow_run.head_repository.owner.login != github.event.workflow_run.repository.owner.login)
|
||||
&& format('{0}:{1}', github.event.workflow_run.head_repository.owner.login, github.event.workflow_run.head_branch)
|
||||
|| github.event.workflow_run.head_branch
|
||||
}}
|
||||
run: |
|
||||
number=$(gh pr view "$PR_BRANCH" --json number --jq .number)
|
||||
echo "number=$number" >> $GITHUB_OUTPUT
|
||||
|
||||
- if: steps.unzip-patch.outputs.exists == 'true' && success()
|
||||
env:
|
||||
GH_REPO: ${{ github.repository }}
|
||||
GH_TOKEN: ${{ steps.otelbot-token.outputs.token }}
|
||||
PR_NUMBER: ${{ steps.get-pr.outputs.number }}
|
||||
run: |
|
||||
gh pr comment $PR_NUMBER --body "🔧 The result from spotlessApply was committed to the PR branch."
|
||||
|
||||
- if: steps.unzip-patch.outputs.exists == 'true' && failure()
|
||||
env:
|
||||
GH_REPO: ${{ github.repository }}
|
||||
GH_TOKEN: ${{ steps.otelbot-token.outputs.token }}
|
||||
PR_NUMBER: ${{ steps.get-pr.outputs.number }}
|
||||
run: |
|
||||
gh pr comment $PR_NUMBER --body "❌ The result from spotlessApply could not be committed to the PR branch, see logs: $GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID."
|
|
@ -0,0 +1,53 @@
|
|||
name: Auto spotless check
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Set up JDK for running Gradle
|
||||
uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1
|
||||
with:
|
||||
distribution: temurin
|
||||
java-version: 17
|
||||
|
||||
- name: Set up gradle
|
||||
uses: gradle/actions/setup-gradle@ac638b010cf58a27ee6c972d7336334ccaf61c96 # v4.4.1
|
||||
with:
|
||||
cache-read-only: true
|
||||
|
||||
- name: Check out PR branch
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: gh pr checkout ${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Spotless
|
||||
run: ./gradlew spotlessApply
|
||||
|
||||
- id: create-patch
|
||||
name: Create patch file
|
||||
run: |
|
||||
git diff > patch
|
||||
if [ -s patch ]; then
|
||||
echo "exists=true" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Upload patch file
|
||||
if: steps.create-patch.outputs.exists == 'true'
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
path: patch
|
||||
name: patch
|
|
@ -6,8 +6,13 @@ on:
|
|||
description: "The pull request # to backport"
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
permissions:
|
||||
contents: write # for Git to git push
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: |
|
||||
|
@ -16,24 +21,30 @@ jobs:
|
|||
exit 1
|
||||
fi
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
# history is needed to run git cherry-pick below
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Use CLA approved github bot
|
||||
run: .github/scripts/use-cla-approved-github-bot.sh
|
||||
- name: Use CLA approved bot
|
||||
run: .github/scripts/use-cla-approved-bot.sh
|
||||
|
||||
- uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
|
||||
id: otelbot-token
|
||||
with:
|
||||
app-id: ${{ vars.OTELBOT_APP_ID }}
|
||||
private-key: ${{ secrets.OTELBOT_PRIVATE_KEY }}
|
||||
|
||||
- name: Create pull request
|
||||
env:
|
||||
NUMBER: ${{ github.event.inputs.number }}
|
||||
# not using secrets.GITHUB_TOKEN since pull requests from that token do not run workflows
|
||||
GH_TOKEN: ${{ secrets.BOT_TOKEN }}
|
||||
GH_TOKEN: ${{ steps.otelbot-token.outputs.token }}
|
||||
run: |
|
||||
commit=$(gh pr view $NUMBER --json mergeCommit --jq .mergeCommit.oid)
|
||||
title=$(gh pr view $NUMBER --json title --jq .title)
|
||||
|
||||
branch="opentelemetrybot/backport-${NUMBER}-to-${GITHUB_REF_NAME//\//-}"
|
||||
branch="otelbot/backport-${NUMBER}-to-${GITHUB_REF_NAME//\//-}"
|
||||
|
||||
git checkout -b $branch
|
||||
git cherry-pick $commit
|
||||
|
|
|
@ -6,7 +6,14 @@ on:
|
|||
- main
|
||||
- release/*
|
||||
pull_request:
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# Run daily at 7:30 AM UTC
|
||||
- cron: '30 7 * * *'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}
|
||||
|
@ -16,53 +23,113 @@ jobs:
|
|||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Set up JDK for running Gradle
|
||||
uses: actions/setup-java@v3
|
||||
uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1
|
||||
with:
|
||||
distribution: temurin
|
||||
java-version: 17
|
||||
|
||||
- name: Build
|
||||
uses: gradle/gradle-build-action@v2
|
||||
- name: Set up gradle
|
||||
uses: gradle/actions/setup-gradle@ac638b010cf58a27ee6c972d7336334ccaf61c96 # v4.4.1
|
||||
with:
|
||||
arguments: build
|
||||
cache-read-only: ${{ github.event_name == 'pull_request' }}
|
||||
- name: Gradle build and test
|
||||
run: ./gradlew build -x test
|
||||
|
||||
- name: Save unit test results
|
||||
uses: actions/upload-artifact@v3
|
||||
if: always()
|
||||
test:
|
||||
name: Test
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os:
|
||||
- macos-latest
|
||||
- macos-13
|
||||
- ubuntu-latest
|
||||
- windows-latest
|
||||
test-java-version:
|
||||
- 8
|
||||
- 11
|
||||
- 17
|
||||
- 21
|
||||
- 23
|
||||
# macos-latest drops support for java 8 temurin. Run java 8 on macos-13. Run java 11, 17, 21 on macos-latest.
|
||||
exclude:
|
||||
- os: macos-latest
|
||||
test-java-version: 8
|
||||
- os: macos-13
|
||||
test-java-version: 11
|
||||
- os: macos-13
|
||||
test-java-version: 17
|
||||
- os: macos-13
|
||||
test-java-version: 21
|
||||
- os: macos-13
|
||||
test-java-version: 23
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- id: setup-java-test
|
||||
name: Set up Java ${{ matrix.test-java-version }} for tests
|
||||
uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1
|
||||
with:
|
||||
name: test-results
|
||||
path: jmx-metrics/build/reports/tests/test
|
||||
distribution: temurin
|
||||
java-version: ${{ matrix.test-java-version }}
|
||||
|
||||
- id: setup-java
|
||||
name: Set up Java for build
|
||||
uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1
|
||||
with:
|
||||
distribution: temurin
|
||||
java-version: 17
|
||||
|
||||
- name: Set up gradle
|
||||
uses: gradle/actions/setup-gradle@ac638b010cf58a27ee6c972d7336334ccaf61c96 # v4.4.1
|
||||
with:
|
||||
cache-read-only: ${{ github.event_name == 'pull_request' }}
|
||||
- name: Gradle test
|
||||
run: >
|
||||
./gradlew test
|
||||
"-PtestJavaVersion=${{ matrix.test-java-version }}"
|
||||
"-Porg.gradle.java.installations.paths=${{ steps.setup-java-test.outputs.path }}"
|
||||
"-Porg.gradle.java.installations.auto-download=false"
|
||||
|
||||
integration-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Set up JDK for running Gradle
|
||||
uses: actions/setup-java@v3
|
||||
uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1
|
||||
with:
|
||||
distribution: temurin
|
||||
java-version: 17
|
||||
|
||||
- name: Integration test
|
||||
uses: gradle/gradle-build-action@v2
|
||||
- name: Set up gradle
|
||||
uses: gradle/actions/setup-gradle@ac638b010cf58a27ee6c972d7336334ccaf61c96 # v4.4.1
|
||||
with:
|
||||
arguments: integrationTest
|
||||
cache-read-only: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
- name: Integration test
|
||||
run: ./gradlew integrationTest
|
||||
|
||||
- name: Save integration test results
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: always()
|
||||
with:
|
||||
name: integration-test-results
|
||||
path: jmx-metrics/build/reports/tests/integrationTest
|
||||
|
||||
markdown-link-check:
|
||||
uses: ./.github/workflows/reusable-markdown-link-check.yml
|
||||
link-check:
|
||||
# merge group and push events are excluded to avoid unnecessary CI failures
|
||||
# (these failures will instead be captured by the daily scheduled run)
|
||||
#
|
||||
# release branches are excluded to avoid unnecessary maintenance if external links break
|
||||
# (and also because the README.md might need update on release branches before the release
|
||||
# download has been published)
|
||||
if: github.event_name != 'merge_group' && github.event_name != 'push' && !startsWith(github.ref_name, 'release/')
|
||||
uses: ./.github/workflows/reusable-link-check.yml
|
||||
|
||||
markdown-lint-check:
|
||||
uses: ./.github/workflows/reusable-markdown-lint.yml
|
||||
|
@ -85,26 +152,27 @@ jobs:
|
|||
# and so would not short-circuit if used in the second-last position
|
||||
name: publish-snapshots${{ (github.ref_name != 'main' || github.repository != 'open-telemetry/opentelemetry-java-contrib') && ' (skipped)' || '' }}
|
||||
needs:
|
||||
# intentionally not blocking snapshot publishing on markdown-link-check or misspell-check
|
||||
# intentionally not blocking snapshot publishing on link-check or misspell-check
|
||||
- build
|
||||
- integration-test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Set up JDK for running Gradle
|
||||
uses: actions/setup-java@v3
|
||||
uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1
|
||||
with:
|
||||
distribution: temurin
|
||||
java-version: 17
|
||||
|
||||
- name: Build and publish snapshots
|
||||
uses: gradle/gradle-build-action@v2
|
||||
- name: Set up gradle
|
||||
uses: gradle/actions/setup-gradle@ac638b010cf58a27ee6c972d7336334ccaf61c96 # v4.4.1
|
||||
# skipping release branches because the versions in those branches are not snapshots
|
||||
# (also this skips pull requests)
|
||||
if: ${{ github.ref_name == 'main' && github.repository == 'open-telemetry/opentelemetry-java-contrib' }}
|
||||
with:
|
||||
arguments: assemble publishToSonatype
|
||||
- name: Build and publish snapshots
|
||||
if: ${{ github.ref_name == 'main' && github.repository == 'open-telemetry/opentelemetry-java-contrib' }}
|
||||
run: ./gradlew assemble publishToSonatype
|
||||
env:
|
||||
SONATYPE_USER: ${{ secrets.SONATYPE_USER }}
|
||||
SONATYPE_KEY: ${{ secrets.SONATYPE_KEY }}
|
||||
|
@ -112,19 +180,21 @@ jobs:
|
|||
GPG_PASSWORD: ${{ secrets.GPG_PASSWORD }}
|
||||
|
||||
required-status-check:
|
||||
if: github.event_name == 'pull_request'
|
||||
if: (github.event_name == 'pull_request' || github.event_name == 'merge_group') && always()
|
||||
needs:
|
||||
- build
|
||||
- test
|
||||
- integration-test
|
||||
- markdown-lint-check
|
||||
- misspell-check
|
||||
- shell-script-check
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# only the "build" and "integration-test" checks are required for release branch PRs in order
|
||||
# only the build and test checks are required for release branch PRs in order
|
||||
# to avoid any unnecessary release branch maintenance (especially for patches)
|
||||
- if: |
|
||||
needs.build.result != 'success' ||
|
||||
needs.test.result != 'success' ||
|
||||
needs.integration-test.result != 'success' ||
|
||||
(
|
||||
!startsWith(github.base_ref, 'release/') &&
|
||||
|
@ -135,3 +205,31 @@ jobs:
|
|||
)
|
||||
)
|
||||
run: exit 1 # fail
|
||||
|
||||
workflow-notification:
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
if: (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch') && always()
|
||||
needs:
|
||||
- build
|
||||
- test
|
||||
- integration-test
|
||||
- link-check
|
||||
- markdown-lint-check
|
||||
- misspell-check
|
||||
- shell-script-check
|
||||
- publish-snapshots
|
||||
uses: ./.github/workflows/reusable-workflow-notification.yml
|
||||
with:
|
||||
success: >-
|
||||
${{
|
||||
needs.build.result == 'success' &&
|
||||
needs.test.result == 'success' &&
|
||||
needs.integration-test.result == 'success' &&
|
||||
needs.link-check.result == 'success' &&
|
||||
needs.markdown-lint-check.result == 'success' &&
|
||||
needs.misspell-check.result == 'success' &&
|
||||
needs.shell-script-check.result == 'success' &&
|
||||
needs.publish-snapshots.result == 'success'
|
||||
}}
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
name: CodeQL (daily)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Daily at 01:30 (UTC)
|
||||
- cron: '30 1 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Java 17
|
||||
uses: actions/setup-java@v3
|
||||
with:
|
||||
distribution: temurin
|
||||
java-version: 17
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: java
|
||||
# using "latest" helps to keep up with the latest Kotlin support
|
||||
# see https://github.com/github/codeql-action/issues/1555#issuecomment-1452228433
|
||||
tools: latest
|
||||
|
||||
- uses: gradle/gradle-build-action@v2
|
||||
with:
|
||||
# skipping build cache is needed so that all modules will be analyzed
|
||||
arguments: assemble --no-build-cache
|
||||
|
||||
- name: Perform CodeQL analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
|
||||
open-issue-on-failure:
|
||||
# open an issue on failure because it can be easy to miss CI failure notifications
|
||||
needs:
|
||||
- analyze
|
||||
if: failure() && github.run_attempt == 1
|
||||
uses: ./.github/workflows/reusable-open-issue-on-failure.yml
|
|
@ -0,0 +1,70 @@
|
|||
name: CodeQL
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- release/*
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- release/*
|
||||
# TODO (trask) adding this to the merge queue causes the merge queue to fail
|
||||
# see related issues
|
||||
# - https://github.com/github/codeql-action/issues/1572
|
||||
# - https://github.com/github/codeql-action/issues/1537
|
||||
# - https://github.com/github/codeql-action/issues/2691
|
||||
# merge_group:
|
||||
schedule:
|
||||
- cron: "29 13 * * 2" # weekly at 13:29 UTC on Tuesday
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze (${{ matrix.language }})
|
||||
permissions:
|
||||
contents: read
|
||||
actions: read # for github/codeql-action/init to get workflow details
|
||||
security-events: write # for github/codeql-action/analyze to upload SARIF results
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- language: actions
|
||||
- language: java
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Set up Java 17
|
||||
if: matrix.language == 'java'
|
||||
uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1
|
||||
with:
|
||||
distribution: temurin
|
||||
java-version: 17
|
||||
|
||||
- name: Set up gradle
|
||||
if: matrix.language == 'java'
|
||||
uses: gradle/actions/setup-gradle@ac638b010cf58a27ee6c972d7336334ccaf61c96 # v4.4.1
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# using "latest" helps to keep up with the latest Kotlin support
|
||||
# see https://github.com/github/codeql-action/issues/1555#issuecomment-1452228433
|
||||
tools: latest
|
||||
|
||||
- name: Assemble
|
||||
if: matrix.language == 'java'
|
||||
# --no-build-cache is required for codeql to analyze all modules
|
||||
# --no-daemon is required for codeql to observe the compilation
|
||||
# (see https://docs.github.com/en/code-security/codeql-cli/getting-started-with-the-codeql-cli/preparing-your-code-for-codeql-analysis#specifying-build-commands)
|
||||
run: ./gradlew assemble --no-build-cache --no-daemon
|
||||
|
||||
- name: Perform CodeQL analysis
|
||||
uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
|
@ -0,0 +1,20 @@
|
|||
name: FOSSA
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
fossa:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- uses: fossas/fossa-action@3ebcea1862c6ffbd5cf1b4d0bd6b3fe7bd6f2cac # v1.7.0
|
||||
with:
|
||||
api-key: ${{secrets.FOSSA_API_KEY}}
|
||||
team: OpenTelemetry
|
|
@ -1,16 +1,20 @@
|
|||
name: Gradle wrapper validation
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- '**/gradle/wrapper/**'
|
||||
push:
|
||||
paths:
|
||||
- '**/gradle/wrapper/**'
|
||||
branches:
|
||||
- main
|
||||
- release/*
|
||||
pull_request:
|
||||
merge_group:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
validation:
|
||||
gradle-wrapper-validation:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- uses: gradle/wrapper-validation-action@v1.0.6
|
||||
- uses: gradle/actions/wrapper-validation@ac638b010cf58a27ee6c972d7336334ccaf61c96 # v4.4.1
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
name: Issue management - remove labels as needed
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
issue_comment:
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
pull-requests: write
|
||||
if: >
|
||||
contains(github.event.issue.labels.*.name, 'needs author feedback') &&
|
||||
github.event.comment.user.login == github.event.issue.user.login
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Remove labels
|
||||
env:
|
||||
ISSUE_NUMBER: ${{ github.event.issue.number }}
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh issue edit --remove-label "needs author feedback" $ISSUE_NUMBER
|
||||
gh issue edit --remove-label "stale" $ISSUE_NUMBER
|
|
@ -0,0 +1,36 @@
|
|||
name: Issue management - run stale action
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# hourly at minute 23
|
||||
- cron: "23 * * * *"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write # for actions/stale to close stale issues
|
||||
pull-requests: write # for actions/stale to close stale PRs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
days-before-stale: 7
|
||||
days-before-close: 7
|
||||
only-labels: "needs author feedback"
|
||||
stale-issue-label: stale
|
||||
stale-issue-message: >
|
||||
This has been automatically marked as stale because it has been marked
|
||||
as needing author feedback and has not had any activity for 7 days.
|
||||
It will be closed automatically if there is no response from the author
|
||||
within 7 additional days from this comment.
|
||||
stale-pr-label: stale
|
||||
stale-pr-message: >
|
||||
This has been automatically marked as stale because it has been marked
|
||||
as needing author feedback and has not had any activity for 7 days.
|
||||
It will be closed automatically if there is no response from the author
|
||||
within 7 additional days from this comment.
|
|
@ -0,0 +1,57 @@
|
|||
name: OSSF Scorecard
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
schedule:
|
||||
- cron: "43 6 * * 5" # weekly at 06:43 (UTC) on Friday
|
||||
workflow_dispatch:
|
||||
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
analysis:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# Needed for Code scanning upload
|
||||
security-events: write
|
||||
# Needed for GitHub OIDC token if publish_results is true
|
||||
id-token: write
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
|
||||
id: create-token
|
||||
with:
|
||||
# analyzing classic branch protections requires a token with admin read permissions
|
||||
# see https://github.com/ossf/scorecard-action/blob/main/docs/authentication/fine-grained-auth-token.md
|
||||
# and https://github.com/open-telemetry/community/issues/2769
|
||||
app-id: ${{ vars.OSSF_SCORECARD_APP_ID }}
|
||||
private-key: ${{ secrets.OSSF_SCORECARD_PRIVATE_KEY }}
|
||||
|
||||
- uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2
|
||||
with:
|
||||
repo_token: ${{ steps.create-token.outputs.token }}
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
publish_results: true
|
||||
|
||||
# Upload the results as artifacts (optional). Commenting out will disable
|
||||
# uploads of run results in SARIF format to the repository Actions tab.
|
||||
# https://docs.github.com/en/actions/advanced-guides/storing-workflow-data-as-artifacts
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
retention-days: 5
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard (optional).
|
||||
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
|
||||
with:
|
||||
sarif_file: results.sarif
|
|
@ -0,0 +1,51 @@
|
|||
# the benefit of this over renovate is that this also analyzes transitive dependencies
|
||||
# while renovate (at least currently) only analyzes top-level dependencies
|
||||
name: OWASP dependency check (daily)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# daily at 1:30 UTC
|
||||
- cron: "30 1 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Set up JDK for running Gradle
|
||||
uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1
|
||||
with:
|
||||
distribution: temurin
|
||||
java-version: 17
|
||||
|
||||
- name: Increase gradle daemon heap size
|
||||
run: |
|
||||
sed -i "s/org.gradle.jvmargs=/org.gradle.jvmargs=-Xmx3g /" gradle.properties
|
||||
|
||||
- uses: gradle/actions/setup-gradle@ac638b010cf58a27ee6c972d7336334ccaf61c96 # v4.4.1
|
||||
|
||||
- run: ./gradlew dependencyCheckAnalyze
|
||||
env:
|
||||
NVD_API_KEY: ${{ secrets.NVD_API_KEY }}
|
||||
|
||||
- name: Upload report
|
||||
if: always()
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
path: "**/build/reports"
|
||||
|
||||
workflow-notification:
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
needs:
|
||||
- analyze
|
||||
if: always()
|
||||
uses: ./.github/workflows/reusable-workflow-notification.yml
|
||||
with:
|
||||
success: ${{ needs.analyze.result == 'success' }}
|
|
@ -2,11 +2,16 @@ name: Prepare patch release
|
|||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
prepare-patch-release:
|
||||
permissions:
|
||||
contents: write # for Git to git push
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- run: |
|
||||
if [[ ! $GITHUB_REF_NAME =~ ^release/v[0-9]+\.[0-9]+\.x$ ]]; then
|
||||
|
@ -39,16 +44,22 @@ jobs:
|
|||
date=$(date "+%Y-%m-%d")
|
||||
sed -Ei "s/^## Unreleased$/## Version $VERSION ($date)/" CHANGELOG.md
|
||||
|
||||
- name: Use CLA approved github bot
|
||||
run: .github/scripts/use-cla-approved-github-bot.sh
|
||||
- name: Use CLA approved bot
|
||||
run: .github/scripts/use-cla-approved-bot.sh
|
||||
|
||||
- uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
|
||||
id: otelbot-token
|
||||
with:
|
||||
app-id: ${{ vars.OTELBOT_APP_ID }}
|
||||
private-key: ${{ secrets.OTELBOT_PRIVATE_KEY }}
|
||||
|
||||
- name: Create pull request
|
||||
env:
|
||||
# not using secrets.GITHUB_TOKEN since pull requests from that token do not run workflows
|
||||
GH_TOKEN: ${{ secrets.BOT_TOKEN }}
|
||||
GH_TOKEN: ${{ steps.otelbot-token.outputs.token }}
|
||||
run: |
|
||||
message="Prepare release $VERSION"
|
||||
branch="opentelemetrybot/prepare-release-${VERSION}"
|
||||
branch="otelbot/prepare-release-${VERSION}"
|
||||
|
||||
git checkout -b $branch
|
||||
git commit -a -m "$message"
|
||||
|
|
|
@ -2,11 +2,14 @@ name: Prepare release branch
|
|||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
prereqs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Verify prerequisites
|
||||
run: |
|
||||
|
@ -21,11 +24,13 @@ jobs:
|
|||
fi
|
||||
|
||||
create-pull-request-against-release-branch:
|
||||
permissions:
|
||||
contents: write # for Git to git push
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- prereqs
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Create release branch
|
||||
run: |
|
||||
|
@ -51,16 +56,22 @@ jobs:
|
|||
date=$(date "+%Y-%m-%d")
|
||||
sed -Ei "s/^## Unreleased$/## Version $VERSION ($date)/" CHANGELOG.md
|
||||
|
||||
- name: Use CLA approved github bot
|
||||
run: .github/scripts/use-cla-approved-github-bot.sh
|
||||
- name: Use CLA approved bot
|
||||
run: .github/scripts/use-cla-approved-bot.sh
|
||||
|
||||
- uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
|
||||
id: otelbot-token
|
||||
with:
|
||||
app-id: ${{ vars.OTELBOT_APP_ID }}
|
||||
private-key: ${{ secrets.OTELBOT_PRIVATE_KEY }}
|
||||
|
||||
- name: Create pull request against the release branch
|
||||
env:
|
||||
# not using secrets.GITHUB_TOKEN since pull requests from that token do not run workflows
|
||||
GH_TOKEN: ${{ secrets.BOT_TOKEN }}
|
||||
GH_TOKEN: ${{ steps.otelbot-token.outputs.token }}
|
||||
run: |
|
||||
message="Prepare release $VERSION"
|
||||
branch="opentelemetrybot/prepare-release-${VERSION}"
|
||||
branch="otelbot/prepare-release-${VERSION}"
|
||||
|
||||
git checkout -b $branch
|
||||
git commit -a -m "$message"
|
||||
|
@ -70,11 +81,13 @@ jobs:
|
|||
--base $RELEASE_BRANCH_NAME
|
||||
|
||||
create-pull-request-against-main:
|
||||
permissions:
|
||||
contents: write # for Git to git push
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- prereqs
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Set environment variables
|
||||
run: |
|
||||
|
@ -100,17 +113,23 @@ jobs:
|
|||
date=$(date "+%Y-%m-%d")
|
||||
sed -Ei "s/^## Unreleased$/## Unreleased\n\n## Version $VERSION ($date)/" CHANGELOG.md
|
||||
|
||||
- name: Use CLA approved github bot
|
||||
run: .github/scripts/use-cla-approved-github-bot.sh
|
||||
- name: Use CLA approved bot
|
||||
run: .github/scripts/use-cla-approved-bot.sh
|
||||
|
||||
- uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
|
||||
id: otelbot-token
|
||||
with:
|
||||
app-id: ${{ vars.OTELBOT_APP_ID }}
|
||||
private-key: ${{ secrets.OTELBOT_PRIVATE_KEY }}
|
||||
|
||||
- name: Create pull request against main
|
||||
env:
|
||||
# not using secrets.GITHUB_TOKEN since pull requests from that token do not run workflows
|
||||
GH_TOKEN: ${{ secrets.BOT_TOKEN }}
|
||||
GH_TOKEN: ${{ steps.otelbot-token.outputs.token }}
|
||||
run: |
|
||||
message="Update version to $NEXT_VERSION"
|
||||
body="Update version to \`$NEXT_VERSION\`."
|
||||
branch="opentelemetrybot/update-version-to-${NEXT_VERSION}"
|
||||
branch="otelbot/update-version-to-${NEXT_VERSION}"
|
||||
|
||||
git checkout -b $branch
|
||||
git commit -a -m "$message"
|
||||
|
|
|
@ -2,24 +2,27 @@ name: Release
|
|||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Set up JDK for running Gradle
|
||||
uses: actions/setup-java@v3
|
||||
uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1
|
||||
with:
|
||||
distribution: temurin
|
||||
java-version: 17
|
||||
|
||||
- uses: gradle/gradle-build-action@v2
|
||||
name: Build
|
||||
with:
|
||||
arguments: build
|
||||
- name: Set up gradle
|
||||
uses: gradle/actions/setup-gradle@ac638b010cf58a27ee6c972d7336334ccaf61c96 # v4.4.1
|
||||
- name: Gradle build
|
||||
run: ./gradlew build
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
name: Save unit test results
|
||||
if: always()
|
||||
with:
|
||||
|
@ -29,20 +32,20 @@ jobs:
|
|||
integration-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Set up JDK for running Gradle
|
||||
uses: actions/setup-java@v3
|
||||
uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1
|
||||
with:
|
||||
distribution: temurin
|
||||
java-version: 17
|
||||
|
||||
- uses: gradle/gradle-build-action@v2
|
||||
name: Integration test
|
||||
with:
|
||||
arguments: integrationTest
|
||||
- name: Set up gradle
|
||||
uses: gradle/actions/setup-gradle@ac638b010cf58a27ee6c972d7336334ccaf61c96 # v4.4.1
|
||||
- name: Integration test
|
||||
run: ./gradlew integrationTest
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
name: Save integration test results
|
||||
if: always()
|
||||
with:
|
||||
|
@ -50,13 +53,14 @@ jobs:
|
|||
path: jmx-metrics/build/reports/tests/integrationTest
|
||||
|
||||
release:
|
||||
permissions:
|
||||
contents: write # for creating the release
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build
|
||||
- integration-test
|
||||
outputs:
|
||||
version: ${{ steps.create-github-release.outputs.version }}
|
||||
jmx-metrics-version: ${{ steps.create-github-release.outputs.jmx-metrics-version }}
|
||||
steps:
|
||||
- run: |
|
||||
if [[ $GITHUB_REF_NAME != release/* ]]; then
|
||||
|
@ -64,7 +68,7 @@ jobs:
|
|||
exit 1
|
||||
fi
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Set environment variables
|
||||
run: |
|
||||
|
@ -93,7 +97,7 @@ jobs:
|
|||
|
||||
# check out main branch to verify there won't be problems with merging the change log
|
||||
# at the end of this workflow
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: main
|
||||
|
||||
|
@ -108,21 +112,21 @@ jobs:
|
|||
fi
|
||||
|
||||
# back to the release branch
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
# tags are needed for the generate-release-contributors.sh script
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up JDK for running Gradle
|
||||
uses: actions/setup-java@v3
|
||||
uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1
|
||||
with:
|
||||
distribution: temurin
|
||||
java-version: 17
|
||||
|
||||
- name: Set up gradle
|
||||
uses: gradle/actions/setup-gradle@ac638b010cf58a27ee6c972d7336334ccaf61c96 # v4.4.1
|
||||
- name: Build and publish artifacts
|
||||
uses: gradle/gradle-build-action@v2
|
||||
with:
|
||||
arguments: assemble publishToSonatype closeAndReleaseSonatypeStagingRepository
|
||||
run: ./gradlew assemble publishToSonatype closeAndReleaseSonatypeStagingRepository
|
||||
env:
|
||||
SONATYPE_USER: ${{ secrets.SONATYPE_USER }}
|
||||
SONATYPE_KEY: ${{ secrets.SONATYPE_KEY }}
|
||||
|
@ -133,10 +137,12 @@ jobs:
|
|||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
instrumentation_version=$(grep -Po "val otelInstrumentationVersion = \"\K[0-9]+.[0-9]+.[0-9]+" dependencyManagement/build.gradle.kts)
|
||||
|
||||
# conditional blocks not indented because of the heredoc
|
||||
if [[ $VERSION == *.0 ]]; then
|
||||
cat > /tmp/release-notes.txt << EOF
|
||||
This release targets the OpenTelemetry SDK $VERSION.
|
||||
This release targets the OpenTelemetry Java Instrumentation [$instrumentation_version](https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/tag/v$instrumentation_version).
|
||||
|
||||
EOF
|
||||
else
|
||||
|
@ -173,24 +179,25 @@ jobs:
|
|||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
jmx_metrics_version=$VERSION-alpha
|
||||
cp jmx-metrics/build/libs/opentelemetry-jmx-metrics-$jmx_metrics_version.jar opentelemetry-jmx-metrics.jar
|
||||
cp jmx-metrics/build/libs/opentelemetry-jmx-metrics-$VERSION-alpha.jar opentelemetry-jmx-metrics.jar
|
||||
cp jmx-metrics/build/libs/opentelemetry-jmx-metrics-$VERSION-alpha.jar.asc opentelemetry-jmx-metrics.jar.asc
|
||||
gh release create --target $GITHUB_REF_NAME \
|
||||
--title "Version $VERSION" \
|
||||
--notes-file /tmp/release-notes.txt \
|
||||
--discussion-category announcements \
|
||||
v$VERSION \
|
||||
opentelemetry-jmx-metrics.jar
|
||||
opentelemetry-jmx-metrics.jar \
|
||||
opentelemetry-jmx-metrics.jar.asc
|
||||
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "jmx-metrics-version=$jmx_metrics_version" >> $GITHUB_OUTPUT
|
||||
|
||||
merge-change-log-to-main:
|
||||
permissions:
|
||||
contents: write # for git push to PR branch
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- release
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Copy change log section from release branch
|
||||
env:
|
||||
|
@ -199,7 +206,7 @@ jobs:
|
|||
sed -n "0,/^## Version $VERSION /d;/^## Version /q;p" CHANGELOG.md \
|
||||
> /tmp/changelog-section.md
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: main
|
||||
|
||||
|
@ -211,14 +218,20 @@ jobs:
|
|||
release_date=$(gh release view v$VERSION --json publishedAt --jq .publishedAt | sed 's/T.*//')
|
||||
RELEASE_DATE=$release_date .github/scripts/merge-change-log-after-release.sh
|
||||
|
||||
- name: Use CLA approved github bot
|
||||
run: .github/scripts/use-cla-approved-github-bot.sh
|
||||
- name: Use CLA approved bot
|
||||
run: .github/scripts/use-cla-approved-bot.sh
|
||||
|
||||
- uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
|
||||
id: otelbot-token
|
||||
with:
|
||||
app-id: ${{ vars.OTELBOT_APP_ID }}
|
||||
private-key: ${{ secrets.OTELBOT_PRIVATE_KEY }}
|
||||
|
||||
- name: Create pull request against main
|
||||
env:
|
||||
VERSION: ${{ needs.release.outputs.version }}
|
||||
# not using secrets.GITHUB_TOKEN since pull requests from that token do not run workflows
|
||||
GH_TOKEN: ${{ secrets.BOT_TOKEN }}
|
||||
GH_TOKEN: ${{ steps.otelbot-token.outputs.token }}
|
||||
run: |
|
||||
if git diff --quiet; then
|
||||
if [[ $VERSION == *.0 ]]; then
|
||||
|
@ -232,7 +245,7 @@ jobs:
|
|||
|
||||
message="Merge change log updates from $GITHUB_REF_NAME"
|
||||
body="Merge log updates from \`$GITHUB_REF_NAME\`."
|
||||
branch="opentelemetrybot/merge-change-log-updates-from-${GITHUB_REF_NAME//\//-}"
|
||||
branch="otelbot/merge-change-log-updates-from-${GITHUB_REF_NAME//\//-}"
|
||||
|
||||
git checkout -b $branch
|
||||
git commit -a -m "$message"
|
||||
|
@ -240,12 +253,3 @@ jobs:
|
|||
gh pr create --title "$message" \
|
||||
--body "$body" \
|
||||
--base main
|
||||
|
||||
create-collector-contrib-pull-request:
|
||||
needs:
|
||||
- release
|
||||
uses: ./.github/workflows/reusable-create-collector-contrib-pull-request.yml
|
||||
with:
|
||||
jmx-metrics-version: ${{ needs.release.outputs.jmx-metrics-version }}
|
||||
secrets:
|
||||
BOT_TOKEN: ${{ secrets.BOT_TOKEN }}
|
||||
|
|
|
@ -1,107 +0,0 @@
|
|||
name: Reusable - Create collector contrib pull request
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
jmx-metrics-version:
|
||||
type: string
|
||||
required: true
|
||||
secrets:
|
||||
BOT_TOKEN:
|
||||
required: true
|
||||
# to help with partial release build failures
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
jmx-metrics-version:
|
||||
description: "JMX metrics version"
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
create-collector-contrib-pull-request:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Sync opentelemetry-collector-contrib fork
|
||||
env:
|
||||
# this is the personal access token used for "gh repo sync" below
|
||||
GH_TOKEN: ${{ secrets.BOT_TOKEN }}
|
||||
run: |
|
||||
# synchronizing the fork is fast, and avoids the need to fetch the full upstream repo
|
||||
# (fetching the upstream repo with "--depth 1" would lead to "shallow update not allowed"
|
||||
# error when pushing back to the origin repo)
|
||||
gh repo sync opentelemetrybot/opentelemetry-collector-contrib \
|
||||
--source open-telemetry/opentelemetry-collector-contrib \
|
||||
--force
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: opentelemetrybot/opentelemetry-collector-contrib
|
||||
# this is the personal access token used for "git push" below
|
||||
token: ${{ secrets.BOT_TOKEN }}
|
||||
|
||||
- name: Update version
|
||||
env:
|
||||
JMX_METRICS_VERSION: ${{ inputs.jmx-metrics-version }}
|
||||
run: |
|
||||
if [[ ! $JMX_METRICS_VERSION =~ -alpha$ ]]; then
|
||||
echo currently expecting jmx metrics version to end with "-alpha"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
version=${JMX_METRICS_VERSION//-alpha/}
|
||||
hash=$(curl -L https://github.com/open-telemetry/opentelemetry-java-contrib/releases/download/v$version/opentelemetry-jmx-metrics.jar \
|
||||
| sha256sum \
|
||||
| cut -d ' ' -f 1)
|
||||
|
||||
# NOTE there are intentional tab characters in the line below
|
||||
sed -i "/^var jmxMetricsGathererVersions/a \ \"$hash\": {\n version: \"$JMX_METRICS_VERSION\",\n jar: \"JMX metrics gatherer\",\n }," receiver/jmxreceiver/supported_jars.go
|
||||
git diff
|
||||
|
||||
- name: Use CLA approved github bot
|
||||
run: |
|
||||
# cannot run the use-cla-approved-github-bot.sh script here since in a different repo
|
||||
git config user.name opentelemetrybot
|
||||
git config user.email 107717825+opentelemetrybot@users.noreply.github.com
|
||||
|
||||
- name: Create pull request against opentelemetry-collector-contrib
|
||||
env:
|
||||
JMX_METRICS_VERSION: ${{ inputs.jmx-metrics-version }}
|
||||
# this is the personal access token used for "gh pr create" below
|
||||
GH_TOKEN: ${{ secrets.BOT_TOKEN }}
|
||||
run: |
|
||||
message="Update the jmx-metrics version to $JMX_METRICS_VERSION"
|
||||
# note that @open-telemetry/java-contrib-approvers cannot be used below
|
||||
# because opentelemetrybot is not a member of the OpenTelemetry org,
|
||||
# and so it cannot @ mention OpenTelemetry org groups
|
||||
body="Update the jmx-metrics version to \`$JMX_METRICS_VERSION\`.
|
||||
|
||||
cc @jack-berg @mateuszrzeszutek @rmfitzpatrick @trask
|
||||
"
|
||||
branch="update-opentelemetry-jmx-metrics-to-${JMX_METRICS_VERSION}"
|
||||
|
||||
# gh pr create doesn't have a way to explicitly specify different head and base
|
||||
# repositories currently, but it will implicitly pick up the head from a different
|
||||
# repository if you set up a tracking branch
|
||||
|
||||
git checkout -b $branch
|
||||
git commit -a -m "$message"
|
||||
git push --set-upstream origin $branch
|
||||
url=$(gh pr create --title "$message" \
|
||||
--body "$body" \
|
||||
--repo open-telemetry/opentelemetry-collector-contrib \
|
||||
--base main)
|
||||
|
||||
pull_request_number=${url//*\//}
|
||||
|
||||
# see the template for change log entry file at
|
||||
# https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/.chloggen/TEMPLATE.yaml
|
||||
cat > .chloggen/add-jmx-metrics-gatherer-$JMX_METRICS_VERSION.yaml << EOF
|
||||
change_type: enhancement
|
||||
component: jmxreceiver
|
||||
note: Add the JMX metrics gatherer version $JMX_METRICS_VERSION to the supported jars hash list
|
||||
issues: [ $pull_request_number ]
|
||||
EOF
|
||||
|
||||
git add .chloggen/add-jmx-metrics-gatherer-$JMX_METRICS_VERSION.yaml
|
||||
|
||||
git commit -m "Add change log entry"
|
||||
git push
|
|
@ -0,0 +1,45 @@
|
|||
name: Reusable - Link check
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0 # needed for merge-base below
|
||||
|
||||
- name: Link check - relative links (all files)
|
||||
if: github.event_name == 'pull_request'
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
run: ./.github/scripts/link-check.sh --local-links-only
|
||||
|
||||
- name: Get modified files
|
||||
if: github.event_name == 'pull_request'
|
||||
id: modified-files
|
||||
run: |
|
||||
merge_base=$(git merge-base origin/${{ github.base_ref }} HEAD)
|
||||
# Using lychee's default extension filter here to match when it runs against all files
|
||||
modified_files=$(git diff --name-only $merge_base...${{ github.event.pull_request.head.sha }} \
|
||||
| grep -E '\.(md|mkd|mdx|mdown|mdwn|mkdn|mkdown|markdown|html|htm|txt)$' \
|
||||
| tr '\n' ' ' || true)
|
||||
echo "files=$modified_files" >> $GITHUB_OUTPUT
|
||||
echo "Modified files: $modified_files"
|
||||
|
||||
- name: Link check - all links (modified files only)
|
||||
if: github.event_name == 'pull_request' && steps.modified-files.outputs.files != ''
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
run: ./.github/scripts/link-check.sh ${{ steps.modified-files.outputs.files }}
|
||||
|
||||
- name: Link check - all links (all files)
|
||||
if: github.event_name != 'pull_request'
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
run: ./.github/scripts/link-check.sh
|
|
@ -1,21 +0,0 @@
|
|||
name: Reusable - Markdown link check
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install markdown-link-check
|
||||
# TODO(jack-berg): use latest when config file reading bug is fixed: https://github.com/tcort/markdown-link-check/issues/246
|
||||
run: npm install -g markdown-link-check@3.10.3
|
||||
|
||||
- name: Run markdown-link-check
|
||||
run: |
|
||||
find . -type f \
|
||||
-name '*.md' \
|
||||
-not -path './CHANGELOG.md' \
|
||||
| xargs .github/scripts/markdown-link-check-with-retry.sh
|
|
@ -3,15 +3,15 @@ name: Reusable - Markdown lint check
|
|||
on:
|
||||
workflow_call:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
markdown-lint-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install markdownlint
|
||||
run: npm install -g markdownlint-cli
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Run markdownlint
|
||||
run: |
|
||||
markdownlint -c .github/config/markdown-lint-config.yml **/*.md
|
||||
npx markdownlint-cli@0.45.0 -c .github/config/markdownlint.yml **/*.md
|
||||
|
|
|
@ -3,11 +3,14 @@ name: Reusable - Misspell check
|
|||
on:
|
||||
workflow_call:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
misspell-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Install misspell
|
||||
run: |
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
name: Reusable - Open issue on workflow failure
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
open-issue:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Open issue
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh issue create --title "$GITHUB_WORKFLOW #$GITHUB_RUN_NUMBER failed" \
|
||||
--label bug \
|
||||
--body "See [$GITHUB_WORKFLOW #$GITHUB_RUN_NUMBER](https://github.com/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID)."
|
|
@ -3,11 +3,14 @@ name: Reusable - Shell script check
|
|||
on:
|
||||
workflow_call:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
shell-script-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Install shell check
|
||||
run: wget -qO- "https://github.com/koalaman/shellcheck/releases/download/stable/shellcheck-stable.linux.x86_64.tar.xz" | tar -xJv
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
# this is useful because notifications for scheduled workflows are only sent to the user who
|
||||
# initially created the given workflow
|
||||
name: Reusable - Workflow notification
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
success:
|
||||
type: boolean
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
workflow-notification:
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Open issue or add comment if issue already open
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
# TODO (trask) search doesn't support exact phrases, so it's possible that this could grab the wrong issue
|
||||
number=$(gh issue list --search "in:title Workflow failed: $GITHUB_WORKFLOW" --limit 1 --json number -q .[].number)
|
||||
|
||||
echo $number
|
||||
echo ${{ inputs.success }}
|
||||
|
||||
if [[ $number ]]; then
|
||||
if [[ "${{ inputs.success }}" == "true" ]]; then
|
||||
gh issue close $number
|
||||
else
|
||||
gh issue comment $number \
|
||||
--body "See [$GITHUB_WORKFLOW #$GITHUB_RUN_NUMBER](https://github.com/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID)."
|
||||
fi
|
||||
elif [[ "${{ inputs.success }}" == "false" ]]; then
|
||||
gh issue create --title "Workflow failed: $GITHUB_WORKFLOW (#$GITHUB_RUN_NUMBER)" \
|
||||
--body "See [$GITHUB_WORKFLOW #$GITHUB_RUN_NUMBER](https://github.com/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID)."
|
||||
fi
|
570
CHANGELOG.md
570
CHANGELOG.md
|
@ -2,6 +2,576 @@
|
|||
|
||||
## Unreleased
|
||||
|
||||
## Version 1.47.0 (2025-07-04)
|
||||
|
||||
### Disk buffering
|
||||
|
||||
- Shared storage
|
||||
([#1912](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1912))
|
||||
- Implementing ExtendedLogRecordData
|
||||
([#1918](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1918))
|
||||
- Add missing EventName to disk-buffering LogRecordDataMapper
|
||||
([#1950](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1950))
|
||||
|
||||
### GCP authentication extension
|
||||
|
||||
- Update the internal implementation such that the required headers are retrieved
|
||||
from the Google Auth Library instead of manually constructing and passing them.
|
||||
([#1860](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1860))
|
||||
- Add metrics support to auth extension
|
||||
([#1891](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1891))
|
||||
- Update ConfigurableOptions to read from ConfigProperties
|
||||
([#1904](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1904))
|
||||
|
||||
### Inferred spans
|
||||
|
||||
- Upgrade async-profiler to 4.0
|
||||
([#1872](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1872))
|
||||
|
||||
### Kafka exporter
|
||||
|
||||
- Upgrade kafka-clients to 4.0 (and so now requires Java 11+)
|
||||
([#1802](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1802))
|
||||
|
||||
### Maven extension
|
||||
|
||||
- Add option to record transferred artifacts
|
||||
([#1875](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1875))
|
||||
|
||||
## Version 1.46.0 (2025-04-11)
|
||||
|
||||
### Baggage processor
|
||||
|
||||
- Remove the deprecated and unused bare Predicate
|
||||
([#1828](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1828))
|
||||
|
||||
### Telemetry processors
|
||||
|
||||
- Add logs filtering
|
||||
([#1823](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1823))
|
||||
|
||||
## Version 1.45.0 (2025-03-14)
|
||||
|
||||
### Disk buffering
|
||||
|
||||
- Make configuration package public
|
||||
([#1781](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1781))
|
||||
|
||||
### JMX scraper
|
||||
|
||||
- Reuse instrumentation metrics by default
|
||||
([#1782](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1782))
|
||||
|
||||
## Version 1.44.0 (2025-02-21)
|
||||
|
||||
### AWS resources
|
||||
|
||||
- Changed resource attribute `container.image.tag` to `container.image.tags`
|
||||
([#1736](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1736))
|
||||
|
||||
### AWS X-Ray propagator
|
||||
|
||||
- Make `xray-lambda` propagator available via SPI
|
||||
([#1669](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1669))
|
||||
- Support Lineage in XRay trace header and remove additional baggage from being added
|
||||
([#1671](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1671))
|
||||
|
||||
### CloudFoundry resources - New 🌟
|
||||
|
||||
CloudFoundry resource detector.
|
||||
|
||||
### Disk buffering
|
||||
|
||||
- Use delegate's temporality
|
||||
([#1672](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1672))
|
||||
|
||||
### GCP authentication extension
|
||||
|
||||
- Publish both shaded and unshaded variants
|
||||
([#1688](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1688))
|
||||
|
||||
### JMX metrics
|
||||
|
||||
- Updated Hadoop metric unit definitions to align with semantic conventions
|
||||
([#1675](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1675))
|
||||
- Updated Kafka metric unit definitions to align with semantic conventions
|
||||
([#1670](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1670))
|
||||
|
||||
### JMX scraper
|
||||
|
||||
- Use SDK autoconfigure module
|
||||
([#1651](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1651))
|
||||
- Rename `otel.jmx.custom.scraping.config` to `otel.jmx.config` in order to align
|
||||
with `io.opentelemetry.instrumentation:opentelemetry-jmx-metrics`
|
||||
([#1678](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1678))
|
||||
- Hadoop metrics added
|
||||
([#1675](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1675))
|
||||
- Add a CLI option to test the connection
|
||||
([#1684](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1684))
|
||||
- Kafka server, producer, and consumer metrics added
|
||||
([#1670](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1670))
|
||||
- Add custom YAML support
|
||||
([#1741](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1741))
|
||||
- Add SSL support
|
||||
([#1710](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1710))
|
||||
- Replicate JMXMP/SASL config from the JMX metrics module
|
||||
([#1749](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1749))
|
||||
|
||||
### Maven extension
|
||||
|
||||
- Support Maven 4.0
|
||||
([#1679](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1679))
|
||||
|
||||
### Processors
|
||||
|
||||
- Changed `EventToSpanEventBridge` from reading `event.name` to reading the new LogRecord
|
||||
[EventName](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#field-eventname)
|
||||
field.
|
||||
([#1736](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1736))
|
||||
|
||||
### Static instrumenter
|
||||
|
||||
- Module has been removed
|
||||
([#1755](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1755))
|
||||
|
||||
## Version 1.43.0 (2025-01-17)
|
||||
|
||||
### Azure resources - New 🌟
|
||||
|
||||
Azure resource detectors.
|
||||
|
||||
### Consistent sampling
|
||||
|
||||
- Improve interop with legacy samplers
|
||||
([#1629](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1629))
|
||||
|
||||
### GCP authentication extension - New 🌟
|
||||
|
||||
Allows users to export telemetry from their applications to Google Cloud using the built-in OTLP exporters.
|
||||
The extension takes care of the necessary configuration required to authenticate to GCP to successfully export telemetry.
|
||||
|
||||
### JMX scraper
|
||||
|
||||
- Add support for Solr
|
||||
([#1595](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1595))
|
||||
|
||||
## Version 1.42.0 (2024-12-13)
|
||||
|
||||
### AWS X-Ray SDK support
|
||||
|
||||
- Update semconv dependency version
|
||||
([#1585](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1585))
|
||||
|
||||
### Baggage processor
|
||||
|
||||
- [baggage-processor] Add BaggageLogRecordProcessor
|
||||
([#1576](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1576))
|
||||
|
||||
### Disk buffering
|
||||
|
||||
- Deserialization validation
|
||||
([#1571](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1571))
|
||||
|
||||
### JMX metrics
|
||||
|
||||
- Align HBase metric units to semconv
|
||||
([#1538](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1538))
|
||||
- Align Cassandra metric units to semconv
|
||||
([#1591](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1591))
|
||||
- Align Tomcat metric units to semconv
|
||||
([#1589](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1589))
|
||||
- Align JVM units to semconv
|
||||
([#1593](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1593))
|
||||
|
||||
### JMX scraper - New 🌟
|
||||
|
||||
The future of the [JMX metrics](./jmx-metrics/README.md) component,
|
||||
built on top of the
|
||||
[JMX metrics](https://github.com/open-telemetry/opentelemetry-java-instrumentation/tree/main/instrumentation/jmx-metrics/javaagent#jmx-metric-insight)
|
||||
component from the opentelemetry-java-instrumentation repository.
|
||||
|
||||
### Maven extension
|
||||
|
||||
- Load OTel SDK config from environment variables and system properties
|
||||
([#1434](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1434))
|
||||
- Workaround `NoClassDefFoundError` in `@PreDestroy` waiting for MNG-7056
|
||||
([#1431](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1431))
|
||||
|
||||
## Version 1.41.0 (2024-11-21)
|
||||
|
||||
### JMX metrics
|
||||
|
||||
- Align ActiveMQ metric units to semconv
|
||||
([#1553](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1553))
|
||||
- Align Jetty metric units to semconv
|
||||
([#1517](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1517))
|
||||
|
||||
### Inferred spans
|
||||
|
||||
- Allow customization of parent-override behavior
|
||||
([#1533](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1533))
|
||||
|
||||
### Telemetry processors
|
||||
|
||||
- Add LogRecordProcessor to record event log records as span events
|
||||
([#1551](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1551))
|
||||
|
||||
## Version 1.40.0 (2024-10-18)
|
||||
|
||||
### AWS X-Ray SDK support
|
||||
|
||||
- Ensure all XRay Sampler functionality is under ParentBased logic
|
||||
([#1488](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1488))
|
||||
|
||||
### GCP Resources
|
||||
|
||||
- Add gcr job support
|
||||
([#1462](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1462))
|
||||
|
||||
### Inferred spans
|
||||
|
||||
- Rename param and description to proper value
|
||||
([#1486](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1486))
|
||||
|
||||
### JFR connection
|
||||
|
||||
- Fix wrong parameter sent to JFR DiagnosticCommand
|
||||
([#1492](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1492))
|
||||
|
||||
### Span stack traces
|
||||
|
||||
- Support autoconfigure
|
||||
([#1499](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1499))
|
||||
|
||||
## Version 1.39.0 (2024-09-17)
|
||||
|
||||
### AWS X-Ray propagator
|
||||
|
||||
- Handle too short `X-Amzn-Trace-Id` header
|
||||
([#1036](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1036))
|
||||
- Add declarative config support for aws xray propagators
|
||||
([#1442](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1442))
|
||||
|
||||
### AWS X-Ray SDK support
|
||||
|
||||
- Fix native mode error cause by static init of random
|
||||
([#862](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/862))
|
||||
|
||||
### Consistent sampling
|
||||
|
||||
- Composite Samplers prototype
|
||||
([#1443](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1443))
|
||||
|
||||
### Disk buffering
|
||||
|
||||
- Add debug mode for verbose logging
|
||||
([#1455](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1455))
|
||||
|
||||
### GCP Resources
|
||||
|
||||
- Fix incorrect `cloud.platform` value for GCF
|
||||
([#1454](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1454))
|
||||
|
||||
### JMX metrics
|
||||
|
||||
- Add option to aggregate across multiple MBeans
|
||||
([#1366](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1366))
|
||||
|
||||
### Samplers
|
||||
|
||||
- Add declarative config support for `RuleBasedRoutingSampler`
|
||||
([#1440](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1440))
|
||||
|
||||
### Span stack traces
|
||||
|
||||
- Add config option `otel.java.experimental.span-stacktrace.min.duration`
|
||||
([#1414](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1414))
|
||||
|
||||
## Version 1.38.0 (2024-08-19)
|
||||
|
||||
### JFR connection
|
||||
|
||||
- Recording close should not throw exception
|
||||
([#1412](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1412))
|
||||
|
||||
## Version 1.37.0 (2024-07-18)
|
||||
|
||||
### AWS resources
|
||||
|
||||
- Add ECS cluster detection
|
||||
([#1354](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1354))
|
||||
|
||||
### Baggage processor
|
||||
|
||||
- Add config support
|
||||
([#1330](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1330))
|
||||
|
||||
### Inferred spans - New 🌟
|
||||
|
||||
An OpenTelemetry extension for generating spans via profiling instead of instrumentation.
|
||||
This extension enhances traces by
|
||||
running [async-profiler](https://github.com/async-profiler/async-profiler) in wall-clock profiling
|
||||
mode
|
||||
whenever there is an active sampled OpenTelemetry span.
|
||||
|
||||
The resulting profiling data is analyzed afterward and spans are "inferred".
|
||||
This means there is a delay between the regular and the inferred spans being visible
|
||||
in your OpenTelemetry backend/UI.
|
||||
|
||||
### JFR connection
|
||||
|
||||
- Fix for using diagnostic command to start a recording
|
||||
([#1352](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1352))
|
||||
|
||||
### JMX metrics
|
||||
|
||||
- Support both a script and target systems
|
||||
([#1339](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1339))
|
||||
|
||||
## Version 1.36.0 (2024-05-29)
|
||||
|
||||
### AWS resources
|
||||
|
||||
- Optimization: don't attempt detection if a cloud provider has already been detected
|
||||
([#1225](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1225))
|
||||
|
||||
### Baggage processor - New 🌟
|
||||
|
||||
This module provides a SpanProcessor that stamps baggage onto spans as attributes on start.
|
||||
|
||||
### Consistent sampling
|
||||
|
||||
- Assume random trace ID and set th-field only for sampled spans
|
||||
([#1278](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1278))
|
||||
|
||||
### GCP Resources
|
||||
|
||||
- Optimization: don't attempt detection if a cloud provider has already been detected
|
||||
([#1225](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1225))
|
||||
- Update guidance for manual instrumentation usage
|
||||
([#1250](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1250))
|
||||
|
||||
### JMX metrics
|
||||
|
||||
- Remove `slf4j-simple` dependency
|
||||
([#1283](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1283))
|
||||
|
||||
### Maven extension
|
||||
|
||||
- Disable metrics and logs by default
|
||||
([#1276](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1276))
|
||||
- Migrate to current semconv
|
||||
([#1299](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1299))
|
||||
- Migrate from Plexus to JSR 330 dependency injection APIs
|
||||
([#1320](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1320))
|
||||
|
||||
### Span stack trace
|
||||
|
||||
- Enable publishing to maven central
|
||||
([#1297](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1297))
|
||||
|
||||
## Version 1.35.0 (2024-04-16)
|
||||
|
||||
### JMX metrics
|
||||
|
||||
- Add support for newly named Tomcat MBean with Spring
|
||||
([#1269](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1269))
|
||||
|
||||
### Span stack traces - New 🌟
|
||||
|
||||
This module provides a SpanProcessor that captures stack traces on spans that meet
|
||||
a certain criteria such as exceeding a certain duration threshold.
|
||||
|
||||
## Version 1.34.0 (2024-03-27)
|
||||
|
||||
### AWS resources
|
||||
|
||||
- Add support for `cloud.account.id`, `cloud.availability_zone`, `cloud.region`,
|
||||
and `cloud.resource_id`
|
||||
([#1171](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1171))
|
||||
|
||||
### AWS X-Ray propagator
|
||||
|
||||
- Add xray propagators that prioritizes xray environment variable
|
||||
([#1032](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1032))
|
||||
|
||||
### GCP Resources
|
||||
|
||||
- Update docs on how to use with Java agent v2.2.0 and later
|
||||
([#1237](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1237))
|
||||
|
||||
### Micrometer MeterProvider
|
||||
|
||||
- Implement Metrics incubator APIs to accept advice
|
||||
([#1190](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1190))
|
||||
|
||||
## Version 1.33.0 (2024-02-21)
|
||||
|
||||
### Compressors
|
||||
|
||||
- Add zstd compressor implementation for OTLP exporters
|
||||
([#1108](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1108))
|
||||
|
||||
### Consistent sampling
|
||||
|
||||
- Switch from acceptance to rejection threshold
|
||||
([#1130](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1130))
|
||||
|
||||
### Disk buffering
|
||||
|
||||
- Shadowing generated proto java sources
|
||||
([#1146](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1146))
|
||||
- Single responsibility for disk exporters
|
||||
([#1161](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1161))
|
||||
- Split serializer
|
||||
([#1167](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1167))
|
||||
- Disk buffering config and README updates
|
||||
([#1169](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1169))
|
||||
- Ensure no sign propagation for flags byte
|
||||
([#1166](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1166))
|
||||
|
||||
### GCP Resources - New 🌟
|
||||
|
||||
This module provides GCP resource detectors for OpenTelemetry.
|
||||
|
||||
### JMX metrics
|
||||
|
||||
- Add Error handling for closure parameters
|
||||
([#1102](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1102))
|
||||
- Add `kafka.request.time.avg`
|
||||
([#1135](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1135))
|
||||
|
||||
### Kafka exporter - New 🌟
|
||||
|
||||
This module contains `KafkaSpanExporter`, which is an implementation of the
|
||||
`io.opentelemetry.sdk.trace.export.SpanExporter` interface.
|
||||
|
||||
`KafkaSpanExporter` can be used for sending `SpanData` to a Kafka topic.
|
||||
|
||||
## Version 1.32.0 (2023-11-27)
|
||||
|
||||
### Disk buffering
|
||||
|
||||
- Using Android 21 as minimum supported for disk-buffering
|
||||
([#1096](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1096))
|
||||
|
||||
## Version 1.31.0 (2023-10-18)
|
||||
|
||||
### Consistent sampling
|
||||
|
||||
- Explicitly pass invalid p-value to root sampler
|
||||
([#1053](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1053))
|
||||
- Consistent sampler prototypes using 56 bits of randomness
|
||||
([#1063](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1063))
|
||||
|
||||
### Runtime attach
|
||||
|
||||
- Rename runtime attach method from `attachJavaagentToCurrentJVM`
|
||||
to `attachJavaagentToCurrentJvm`
|
||||
([#1077](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1077))
|
||||
|
||||
### Samplers
|
||||
|
||||
- Support `thread.name` attributes in RuleBasedRoutingSampler
|
||||
([#1030](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1030))
|
||||
|
||||
## Version 1.30.0 (2023-09-18)
|
||||
|
||||
### Disk buffering
|
||||
|
||||
- Remove protobuf dependency
|
||||
([#1008](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1008))
|
||||
|
||||
### Maven extension
|
||||
|
||||
- Disable OTel SDK shutdown hook registration
|
||||
([#1022](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1022))
|
||||
|
||||
### Telemetry processors - New 🌟
|
||||
|
||||
This module contains tools for globally processing telemetry, including modifying and filtering
|
||||
telemetry.
|
||||
|
||||
## Version 1.29.0 (2023-08-23)
|
||||
|
||||
### Consistent sampling
|
||||
|
||||
- Add a provider for consistent parent based probability sampler
|
||||
([#1005](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1005))
|
||||
|
||||
### Disk buffering
|
||||
|
||||
- Migrate StorageFile to FileOperations
|
||||
([#986](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/986))
|
||||
|
||||
### JMX metrics
|
||||
|
||||
- [jmx-metrics] Collect in callback
|
||||
([#949](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/949))
|
||||
- Added transformation closure to MBeanHelper
|
||||
([#960](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/960))
|
||||
|
||||
## Version 1.28.0 (2023-07-14)
|
||||
|
||||
### AWS X-Ray SDK support
|
||||
|
||||
- generate error/fault metrics by aws sdk status code
|
||||
([#924](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/924))
|
||||
|
||||
### Disk buffering - New 🌟
|
||||
|
||||
This module provides signal exporter wrappers that intercept and store telemetry signals in files
|
||||
which can be sent later on demand.
|
||||
|
||||
## Version 1.27.0 (2023-06-16)
|
||||
|
||||
### AWS X-Ray SDK support
|
||||
|
||||
- Enhance AWS APM metrics mapping implementation
|
||||
([#906](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/906))
|
||||
|
||||
### Samplers
|
||||
|
||||
- Links based sampler
|
||||
([#813](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/813),
|
||||
[#903](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/903))
|
||||
|
||||
## Version 1.26.0 (2023-05-17)
|
||||
|
||||
### AWS X-Ray SDK support
|
||||
|
||||
- Add AttributePropagatingSpanProcessor component to AWS X-Ray
|
||||
([#856](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/856))
|
||||
- Add new components to allow for generating metrics from 100% of spans without impacting sampling
|
||||
([#802](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/802))
|
||||
|
||||
### JMX metrics
|
||||
|
||||
- Adding support for scenarios where the RMI registry has SSL enabled
|
||||
([#835](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/835))
|
||||
|
||||
## Version 1.25.1 (2023-04-21)
|
||||
|
||||
### 🛠️ Bug fixes
|
||||
|
||||
- Previously targeted OpenTelemetry SDK and Instrumentation versions had never been updated to
|
||||
target OpenTelemetry SDK 1.25
|
||||
|
||||
## Version 1.25.0 (2023-04-18)
|
||||
|
||||
### AWS X-Ray SDK support
|
||||
|
||||
- Breakout ResourceHolder from AwsXrayRemoteSamplerProvider
|
||||
([#801](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/801))
|
||||
|
||||
### JFR connection - New 🌟
|
||||
|
||||
- JFR connection is a library to allow configuration and control of JFR
|
||||
without depending on jdk.jfr.
|
||||
This is a contribution of https://github.com/microsoft/jfr-streaming.
|
||||
|
||||
## Version 1.24.0 (2023-04-03)
|
||||
|
||||
### Maven extension
|
||||
|
||||
- [maven-extension] Emit a warning rather than failing the build with an exception on illegal state
|
||||
|
|
|
@ -1,12 +1,38 @@
|
|||
## Contributing
|
||||
# Contributing
|
||||
|
||||
Pull requests for bug fixes are always welcome!
|
||||
Welcome to the OpenTelemetry Java Contrib Repository!
|
||||
|
||||
## Introduction
|
||||
|
||||
This repository focuses on providing tools and utilities for Java-based observability, such as remote JMX metric gathering and reporting. We’re excited to have you here! Whether you’re fixing a bug, adding a feature, or suggesting an idea, your contributions are invaluable.
|
||||
|
||||
Before submitting new features or changes to current functionality, it is recommended to first
|
||||
[open an issue](https://github.com/open-telemetry/opentelemetry-java-contrib/issues/new)
|
||||
and discuss your ideas or propose the changes you wish to make.
|
||||
|
||||
### Building
|
||||
Questions? Ask in the OpenTelemetry [java channel](https://cloud-native.slack.com/archives/C014L2KCTE3)
|
||||
|
||||
Pull requests for bug fixes are always welcome!
|
||||
|
||||
## Pre-requisites
|
||||
|
||||
To work with this repository, ensure you have:
|
||||
|
||||
### Tools:
|
||||
|
||||
Java 17 or higher
|
||||
|
||||
### Platform Notes:
|
||||
|
||||
macOS/Linux: Ensure JAVA_HOME is set correctly.
|
||||
|
||||
## Workflow
|
||||
|
||||
1. Fork the repository
|
||||
2. Clone locally
|
||||
3. Create a branch before working on an issue
|
||||
|
||||
## Local Run/Build
|
||||
|
||||
In order to build and test this whole repository you need JDK 11+.
|
||||
|
||||
|
@ -14,22 +40,43 @@ In order to build and test this whole repository you need JDK 11+.
|
|||
|
||||
For developers testing code changes before a release is complete, there are
|
||||
snapshot builds of the `main` branch. They are available from
|
||||
the Sonatype OSS snapshots repository at `https://oss.sonatype.org/content/repositories/snapshots/`
|
||||
([browse](https://oss.sonatype.org/content/repositories/snapshots/io/opentelemetry/contrib/))
|
||||
the Sonatype snapshot repository at `https://central.sonatype.com/repository/maven-snapshots/`
|
||||
([browse](https://central.sonatype.com/service/rest/repository/browse/maven-snapshots/io/opentelemetry/contrib/)).
|
||||
|
||||
#### Building from source
|
||||
|
||||
Building using Java 11+:
|
||||
|
||||
```bash
|
||||
java -version
|
||||
$ java -version
|
||||
```
|
||||
|
||||
```bash
|
||||
./gradlew assemble
|
||||
$ ./gradlew assemble
|
||||
```
|
||||
|
||||
### Style guide
|
||||
## Testing
|
||||
|
||||
See the [Style guide](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/docs/contributing/style-guideline.md)
|
||||
from the opentelemetry-java-instrumentation repository.
|
||||
```bash
|
||||
$ ./gradlew test
|
||||
```
|
||||
|
||||
### Some modules have integration tests
|
||||
|
||||
```
|
||||
$ ./gradlew integrationTest
|
||||
```
|
||||
|
||||
Follow the Java Instrumentation [Style Guide](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/docs/contributing/style-guideline.md) from the opentelemetry-java-instrumentation repository.
|
||||
|
||||
Failure? Check logs for errors or mismatched dependencies.
|
||||
|
||||
## Gradle conventions
|
||||
|
||||
- Use kotlin instead of groovy
|
||||
- Plugin versions should be specified in `settings.gradle.kts`, not in individual modules
|
||||
- All modules use `plugins { id("otel.java-conventions") }`
|
||||
|
||||
## Further Help
|
||||
|
||||
Join [#otel-java](https://cloud-native.slack.com/archives/C014L2KCTE3) on OpenTelemetry Slack
|
||||
|
|
89
README.md
89
README.md
|
@ -1,5 +1,10 @@
|
|||
# OpenTelemetry Java Contrib
|
||||
[](https://github.com/open-telemetry/opentelemetry-java-contrib/actions/workflows/build.yml)
|
||||
|
||||
[](https://github.com/open-telemetry/opentelemetry-java-contrib/releases/)
|
||||
[](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-java-contrib?ref=badge_shield&issueType=license)
|
||||
[](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-java-contrib?ref=badge_shield&issueType=security)
|
||||
[](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-java-contrib)
|
||||
[](https://cloud-native.slack.com/archives/C014L2KCTE3)
|
||||
|
||||
This project is intended to provide helpful libraries and standalone OpenTelemetry-based utilities that don't fit
|
||||
the express scope of the [OpenTelemetry Java](https://github.com/open-telemetry/opentelemetry-java) or
|
||||
|
@ -9,15 +14,38 @@ feature or via instrumentation, this project is hopefully for you.
|
|||
|
||||
## Provided Libraries
|
||||
|
||||
* [AWS Resources](./aws-resources/README.md)
|
||||
* [AWS X-Ray SDK Support](./aws-xray/README.md)
|
||||
* [AWS X-Ray Propagator](./aws-xray-propagator/README.md)
|
||||
* [Consistent sampling](./consistent-sampling/README.md)
|
||||
* [JFR Streaming](./jfr-streaming/README.md)
|
||||
* [JMX Metric Gatherer](./jmx-metrics/README.md)
|
||||
* [OpenTelemetry Maven Extension](./maven-extension/README.md)
|
||||
* [Runtime Attach](./runtime-attach/README.md)
|
||||
* [Samplers](./samplers/README.md)
|
||||
| Status* | Library |
|
||||
|---------|-------------------------------------------------------------------|
|
||||
| beta | [AWS Resources](./aws-resources/README.md) |
|
||||
| stable | [AWS X-Ray SDK Support](./aws-xray/README.md) |
|
||||
| alpha | [AWS X-Ray Propagator](./aws-xray-propagator/README.md) |
|
||||
| alpha | [Baggage Processors](./baggage-processor/README.md) |
|
||||
| alpha | [zstd Compressor](./compressors/compressor-zstd/README.md) |
|
||||
| alpha | [Consistent Sampling](./consistent-sampling/README.md) |
|
||||
| alpha | [Disk Buffering](./disk-buffering/README.md) |
|
||||
| alpha | [GCP Authentication Extension](./gcp-auth-extension/README.md) |
|
||||
| beta | [GCP Resources](./gcp-resources/README.md) |
|
||||
| beta | [Inferred Spans](./inferred-spans/README.md) |
|
||||
| alpha | [JFR Connection](./jfr-connection/README.md) |
|
||||
| alpha | [JFR Events](./jfr-events/README.md) |
|
||||
| alpha | [JMX Metric Gatherer](./jmx-metrics/README.md) |
|
||||
| alpha | [JMX Metric Scraper](./jmx-scraper/README.md) |
|
||||
| alpha | [Kafka Support](./kafka-exporter/README.md) |
|
||||
| alpha | [OpenTelemetry Maven Extension](./maven-extension/README.md) |
|
||||
| alpha | [Micrometer MeterProvider](./micrometer-meter-provider/README.md) |
|
||||
| alpha | [No-Op API](./noop-api/README.md) |
|
||||
| alpha | [Intercept and Process Signals Globally](./processors/README.md) |
|
||||
| alpha | [Prometheus Client Bridge](./prometheus-client-bridge/README.md) |
|
||||
| alpha | [Resource Providers](./resource-providers/README.md) |
|
||||
| alpha | [Runtime Attach](./runtime-attach/README.md) |
|
||||
| alpha | [Samplers](./samplers/README.md) |
|
||||
| beta | [Span Stacktrace Capture](./span-stacktrace/README.md) |
|
||||
|
||||
\* `alpha`, `beta` and `stable` are currently used to denote library status per [otep 0232](https://github.com/open-telemetry/oteps/blob/main/text/0232-maturity-of-otel.md).
|
||||
To reach stable status, the library needs to have stable APIs, stable semantic conventions, and be production ready.
|
||||
On reaching stable status, the `otel.stable` value in `gradle.properties` should be set to `true`.
|
||||
Note that currently all the libraries are released together with the version of this repo, so breaking changes (after stable
|
||||
status is reached) would bump the major version of all libraries together. This could get complicated so `stable` has a high bar.
|
||||
|
||||
## Getting Started
|
||||
|
||||
|
@ -44,30 +72,37 @@ domain we would be very interested in supporting it. Please
|
|||
suggestion. PRs are always welcome and greatly appreciated, but for larger functional changes a pre-coding introduction
|
||||
can be helpful to ensure this is the correct place and that active or conflicting efforts don't exist.
|
||||
|
||||
Triagers ([@open-telemetry/java-contrib-triagers](https://github.com/orgs/open-telemetry/teams/java-contrib-triagers)):
|
||||
### Maintainers
|
||||
|
||||
- [Jack Berg](https://github.com/jack-berg), New Relic
|
||||
- [Jason Plumb](https://github.com/breedx-splk), Splunk
|
||||
- [Lauri Tulmin](https://github.com/laurit), Splunk
|
||||
- [Trask Stalnaker](https://github.com/trask), Microsoft
|
||||
|
||||
For more information about the maintainer role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#maintainer).
|
||||
|
||||
### Approvers
|
||||
|
||||
- [John Watson](https://github.com/jkwatson), Verta.ai
|
||||
|
||||
For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver).
|
||||
|
||||
### Triagers
|
||||
|
||||
- All [component owners](https://github.com/open-telemetry/opentelemetry-java-contrib/blob/main/.github/component_owners.yml) are given Triager permissions to this repository.
|
||||
|
||||
Approvers ([@open-telemetry/java-contrib-approvers](https://github.com/orgs/open-telemetry/teams/java-contrib-approvers)):
|
||||
For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager).
|
||||
|
||||
- [John Watson](https://github.com/jkwatson), Verta.ai
|
||||
- [Lauri Tulmin](https://github.com/laurit), Splunk
|
||||
### Emeritus maintainers
|
||||
|
||||
Maintainers ([@open-telemetry/java-contrib-maintainers](https://github.com/orgs/open-telemetry/teams/java-contrib-maintainers)):
|
||||
- [Mateusz Rzeszutek](https://github.com/mateuszrzeszutek)
|
||||
- [Nikita Salnikov-Tarnovski](https://github.com/iNikem)
|
||||
- [Ryan Fitzpatrick](https://github.com/rmfitzpatrick)
|
||||
|
||||
- [Jack Berg](https://github.com/jack-berg), New Relic
|
||||
- [Mateusz Rzeszutek](https://github.com/mateuszrzeszutek), Splunk
|
||||
- [Nikita Salnikov-Tarnovski](https://github.com/iNikem), Splunk
|
||||
- [Trask Stalnaker](https://github.com/trask), Microsoft
|
||||
For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager).
|
||||
|
||||
Emeritus maintainers:
|
||||
|
||||
- [Ryan Fitzpatrick](https://github.com/rmfitzpatrick), Splunk
|
||||
|
||||
Learn more about roles in the [community repository](https://github.com/open-telemetry/community/blob/master/community-membership.md).
|
||||
|
||||
Thanks to all the people who already contributed!
|
||||
### Thanks to all of our contributors!
|
||||
|
||||
<a href="https://github.com/open-telemetry/opentelemetry-java-contrib/graphs/contributors">
|
||||
<img src="https://contributors-img.web.app/image?repo=open-telemetry/opentelemetry-java-contrib" />
|
||||
<img alt="Repo contributors" src="https://contrib.rocks/image?repo=open-telemetry/opentelemetry-java-contrib" />
|
||||
</a>
|
||||
|
|
10
RELEASING.md
10
RELEASING.md
|
@ -7,8 +7,8 @@ The version is specified in [version.gradle.kts](version.gradle.kts).
|
|||
## Snapshot builds
|
||||
|
||||
Every successful CI build of the main branch automatically executes `./gradlew publishToSonatype`
|
||||
as the last step, which publishes a snapshot build to
|
||||
[Sonatype OSS snapshots repository](https://oss.sonatype.org/content/repositories/snapshots/io/opentelemetry/contrib/).
|
||||
as the last step, which publishes a snapshot build to the
|
||||
[Sonatype snapshot repository](https://central.sonatype.com/service/rest/repository/browse/maven-snapshots/io/opentelemetry/contrib/).
|
||||
|
||||
## Release cadence
|
||||
|
||||
|
@ -18,8 +18,10 @@ the second Monday of the month (roughly a couple of days after the monthly minor
|
|||
|
||||
## Preparing a new major or minor release
|
||||
|
||||
* Check that [dependabot has run](https://github.com/open-telemetry/opentelemetry-java-contrib/network/updates)
|
||||
sometime in the past day.
|
||||
* Check that [renovate has run](https://developer.mend.io/github/open-telemetry/opentelemetry-java-contrib)
|
||||
sometime in the past day and that all
|
||||
[renovate PRs](https://github.com/open-telemetry/opentelemetry-java-contrib/pulls/app%2Frenovate)
|
||||
have been merged.
|
||||
* Check that the OpenTelemetry SDK and Instrumentation versions have been updated to the latest release.
|
||||
* Close the [release milestone](https://github.com/open-telemetry/opentelemetry-java-contrib/milestones)
|
||||
if there is one.
|
||||
|
|
|
@ -58,3 +58,7 @@ afterEvaluate {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
dependencyCheck {
|
||||
skip = true
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ This module contains AWS resource detectors including Beanstalk, EC2, ECS, EKS,
|
|||
|
||||
## Component owners
|
||||
|
||||
- [William Armiros](https://github.com/willarmiros), AWS
|
||||
- [Lei Wang](https://github.com/wangzlei), AWS
|
||||
- [Prashant Srivastava](https://github.com/srprash), AWS
|
||||
|
||||
Learn more about component owners in [component_owners.yml](../.github/component_owners.yml).
|
||||
|
|
|
@ -11,7 +11,8 @@ dependencies {
|
|||
api("io.opentelemetry:opentelemetry-api")
|
||||
api("io.opentelemetry:opentelemetry-sdk")
|
||||
|
||||
implementation("io.opentelemetry:opentelemetry-semconv")
|
||||
implementation("io.opentelemetry.semconv:opentelemetry-semconv")
|
||||
testImplementation("io.opentelemetry.semconv:opentelemetry-semconv-incubating")
|
||||
|
||||
compileOnly("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure")
|
||||
|
||||
|
|
|
@ -5,13 +5,21 @@
|
|||
|
||||
package io.opentelemetry.contrib.aws.resource;
|
||||
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CLOUD_PLATFORM;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CLOUD_PROVIDER;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CloudPlatformIncubatingValues.AWS_ELASTIC_BEANSTALK;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CloudProviderIncubatingValues.AWS;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.SERVICE_INSTANCE_ID;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.SERVICE_NAMESPACE;
|
||||
import static io.opentelemetry.semconv.ServiceAttributes.SERVICE_VERSION;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonFactory;
|
||||
import com.fasterxml.jackson.core.JsonParser;
|
||||
import com.fasterxml.jackson.core.JsonToken;
|
||||
import io.opentelemetry.api.common.Attributes;
|
||||
import io.opentelemetry.api.common.AttributesBuilder;
|
||||
import io.opentelemetry.sdk.resources.Resource;
|
||||
import io.opentelemetry.semconv.resource.attributes.ResourceAttributes;
|
||||
import io.opentelemetry.semconv.SchemaUrls;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.logging.Level;
|
||||
|
@ -58,21 +66,21 @@ public final class BeanstalkResource {
|
|||
|
||||
if (!parser.isExpectedStartObjectToken()) {
|
||||
logger.log(Level.WARNING, "Invalid Beanstalk config: ", configPath);
|
||||
return Resource.create(attrBuilders.build(), ResourceAttributes.SCHEMA_URL);
|
||||
return Resource.create(attrBuilders.build(), SchemaUrls.V1_25_0);
|
||||
}
|
||||
|
||||
while (parser.nextToken() != JsonToken.END_OBJECT) {
|
||||
parser.nextValue();
|
||||
String value = parser.getText();
|
||||
switch (parser.getCurrentName()) {
|
||||
switch (parser.currentName()) {
|
||||
case DEVELOPMENT_ID:
|
||||
attrBuilders.put(ResourceAttributes.SERVICE_INSTANCE_ID, value);
|
||||
attrBuilders.put(SERVICE_INSTANCE_ID, value);
|
||||
break;
|
||||
case VERSION_LABEL:
|
||||
attrBuilders.put(ResourceAttributes.SERVICE_VERSION, value);
|
||||
attrBuilders.put(SERVICE_VERSION, value);
|
||||
break;
|
||||
case ENVIRONMENT_NAME:
|
||||
attrBuilders.put(ResourceAttributes.SERVICE_NAMESPACE, value);
|
||||
attrBuilders.put(SERVICE_NAMESPACE, value);
|
||||
break;
|
||||
default:
|
||||
parser.skipChildren();
|
||||
|
@ -83,12 +91,10 @@ public final class BeanstalkResource {
|
|||
return Resource.empty();
|
||||
}
|
||||
|
||||
attrBuilders.put(ResourceAttributes.CLOUD_PROVIDER, ResourceAttributes.CloudProviderValues.AWS);
|
||||
attrBuilders.put(
|
||||
ResourceAttributes.CLOUD_PLATFORM,
|
||||
ResourceAttributes.CloudPlatformValues.AWS_ELASTIC_BEANSTALK);
|
||||
attrBuilders.put(CLOUD_PROVIDER, AWS);
|
||||
attrBuilders.put(CLOUD_PLATFORM, AWS_ELASTIC_BEANSTALK);
|
||||
|
||||
return Resource.create(attrBuilders.build(), ResourceAttributes.SCHEMA_URL);
|
||||
return Resource.create(attrBuilders.build(), SchemaUrls.V1_25_0);
|
||||
}
|
||||
|
||||
private BeanstalkResource() {}
|
||||
|
|
|
@ -10,7 +10,7 @@ import io.opentelemetry.sdk.autoconfigure.spi.ResourceProvider;
|
|||
import io.opentelemetry.sdk.resources.Resource;
|
||||
|
||||
/** {@link ResourceProvider} for automatically configuring {@link BeanstalkResource}. */
|
||||
public final class BeanstalkResourceProvider implements ResourceProvider {
|
||||
public final class BeanstalkResourceProvider extends CloudResourceProvider {
|
||||
@Override
|
||||
public Resource createResource(ConfigProperties config) {
|
||||
return BeanstalkResource.get();
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
* Copyright The OpenTelemetry Authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package io.opentelemetry.contrib.aws.resource;
|
||||
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CLOUD_PROVIDER;
|
||||
|
||||
import io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties;
|
||||
import io.opentelemetry.sdk.autoconfigure.spi.internal.ConditionalResourceProvider;
|
||||
import io.opentelemetry.sdk.resources.Resource;
|
||||
|
||||
abstract class CloudResourceProvider implements ConditionalResourceProvider {
|
||||
@Override
|
||||
public final boolean shouldApply(ConfigProperties config, Resource existing) {
|
||||
return existing.getAttribute(CLOUD_PROVIDER) == null;
|
||||
}
|
||||
}
|
|
@ -5,13 +5,24 @@
|
|||
|
||||
package io.opentelemetry.contrib.aws.resource;
|
||||
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CLOUD_ACCOUNT_ID;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CLOUD_AVAILABILITY_ZONE;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CLOUD_PLATFORM;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CLOUD_PROVIDER;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CLOUD_REGION;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CloudPlatformIncubatingValues.AWS_EC2;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.HOST_ID;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.HOST_IMAGE_ID;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.HOST_NAME;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.HOST_TYPE;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonFactory;
|
||||
import com.fasterxml.jackson.core.JsonParser;
|
||||
import com.fasterxml.jackson.core.JsonToken;
|
||||
import io.opentelemetry.api.common.Attributes;
|
||||
import io.opentelemetry.api.common.AttributesBuilder;
|
||||
import io.opentelemetry.sdk.resources.Resource;
|
||||
import io.opentelemetry.semconv.resource.attributes.ResourceAttributes;
|
||||
import io.opentelemetry.semconv.SchemaUrls;
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
|
@ -78,9 +89,8 @@ public final class Ec2Resource {
|
|||
String hostname = fetchHostname(hostnameUrl, token);
|
||||
|
||||
AttributesBuilder attrBuilders = Attributes.builder();
|
||||
attrBuilders.put(ResourceAttributes.CLOUD_PROVIDER, ResourceAttributes.CloudProviderValues.AWS);
|
||||
attrBuilders.put(
|
||||
ResourceAttributes.CLOUD_PLATFORM, ResourceAttributes.CloudPlatformValues.AWS_EC2);
|
||||
attrBuilders.put(CLOUD_PROVIDER, IncubatingAttributes.CloudProviderIncubatingValues.AWS);
|
||||
attrBuilders.put(CLOUD_PLATFORM, AWS_EC2);
|
||||
|
||||
try (JsonParser parser = JSON_FACTORY.createParser(identity)) {
|
||||
parser.nextToken();
|
||||
|
@ -91,24 +101,24 @@ public final class Ec2Resource {
|
|||
|
||||
while (parser.nextToken() != JsonToken.END_OBJECT) {
|
||||
String value = parser.nextTextValue();
|
||||
switch (parser.getCurrentName()) {
|
||||
switch (parser.currentName()) {
|
||||
case "instanceId":
|
||||
attrBuilders.put(ResourceAttributes.HOST_ID, value);
|
||||
attrBuilders.put(HOST_ID, value);
|
||||
break;
|
||||
case "availabilityZone":
|
||||
attrBuilders.put(ResourceAttributes.CLOUD_AVAILABILITY_ZONE, value);
|
||||
attrBuilders.put(CLOUD_AVAILABILITY_ZONE, value);
|
||||
break;
|
||||
case "instanceType":
|
||||
attrBuilders.put(ResourceAttributes.HOST_TYPE, value);
|
||||
attrBuilders.put(HOST_TYPE, value);
|
||||
break;
|
||||
case "imageId":
|
||||
attrBuilders.put(ResourceAttributes.HOST_IMAGE_ID, value);
|
||||
attrBuilders.put(HOST_IMAGE_ID, value);
|
||||
break;
|
||||
case "accountId":
|
||||
attrBuilders.put(ResourceAttributes.CLOUD_ACCOUNT_ID, value);
|
||||
attrBuilders.put(CLOUD_ACCOUNT_ID, value);
|
||||
break;
|
||||
case "region":
|
||||
attrBuilders.put(ResourceAttributes.CLOUD_REGION, value);
|
||||
attrBuilders.put(CLOUD_REGION, value);
|
||||
break;
|
||||
default:
|
||||
parser.skipChildren();
|
||||
|
@ -119,9 +129,9 @@ public final class Ec2Resource {
|
|||
return Resource.empty();
|
||||
}
|
||||
|
||||
attrBuilders.put(ResourceAttributes.HOST_NAME, hostname);
|
||||
attrBuilders.put(HOST_NAME, hostname);
|
||||
|
||||
return Resource.create(attrBuilders.build(), ResourceAttributes.SCHEMA_URL);
|
||||
return Resource.create(attrBuilders.build(), SchemaUrls.V1_25_0);
|
||||
}
|
||||
|
||||
private static String fetchToken(URL tokenUrl) {
|
||||
|
|
|
@ -10,7 +10,7 @@ import io.opentelemetry.sdk.autoconfigure.spi.ResourceProvider;
|
|||
import io.opentelemetry.sdk.resources.Resource;
|
||||
|
||||
/** {@link ResourceProvider} for automatically configuring {@link Ec2Resource}. */
|
||||
public final class Ec2ResourceProvider implements ResourceProvider {
|
||||
public final class Ec2ResourceProvider extends CloudResourceProvider {
|
||||
@Override
|
||||
public Resource createResource(ConfigProperties config) {
|
||||
return Ec2Resource.get();
|
||||
|
|
|
@ -5,15 +5,38 @@
|
|||
|
||||
package io.opentelemetry.contrib.aws.resource;
|
||||
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.AWS_ECS_CLUSTER_ARN;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.AWS_ECS_CONTAINER_ARN;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.AWS_ECS_LAUNCHTYPE;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.AWS_ECS_TASK_ARN;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.AWS_ECS_TASK_FAMILY;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.AWS_ECS_TASK_REVISION;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.AWS_LOG_GROUP_ARNS;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.AWS_LOG_GROUP_NAMES;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.AWS_LOG_STREAM_ARNS;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.AWS_LOG_STREAM_NAMES;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CLOUD_ACCOUNT_ID;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CLOUD_AVAILABILITY_ZONE;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CLOUD_PLATFORM;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CLOUD_PROVIDER;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CLOUD_REGION;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CLOUD_RESOURCE_ID;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CONTAINER_ID;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CONTAINER_IMAGE_NAME;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CONTAINER_NAME;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CloudPlatformIncubatingValues.AWS_ECS;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CloudProviderIncubatingValues.AWS;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonFactory;
|
||||
import com.fasterxml.jackson.core.JsonParser;
|
||||
import com.fasterxml.jackson.core.JsonToken;
|
||||
import io.opentelemetry.api.common.Attributes;
|
||||
import io.opentelemetry.api.common.AttributesBuilder;
|
||||
import io.opentelemetry.sdk.resources.Resource;
|
||||
import io.opentelemetry.semconv.resource.attributes.ResourceAttributes;
|
||||
import io.opentelemetry.semconv.SchemaUrls;
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.logging.Level;
|
||||
|
@ -57,7 +80,7 @@ public final class EcsResource {
|
|||
// For TaskARN, Family, Revision.
|
||||
// May put the same attribute twice but that shouldn't matter.
|
||||
fetchMetadata(httpClient, ecsMetadataUrl + "/task", attrBuilders);
|
||||
return Resource.create(attrBuilders.build(), ResourceAttributes.SCHEMA_URL);
|
||||
return Resource.create(attrBuilders.build(), SchemaUrls.V1_25_0);
|
||||
}
|
||||
// Not running on ECS
|
||||
return Resource.empty();
|
||||
|
@ -69,9 +92,8 @@ public final class EcsResource {
|
|||
if (json.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
attrBuilders.put(ResourceAttributes.CLOUD_PROVIDER, ResourceAttributes.CloudProviderValues.AWS);
|
||||
attrBuilders.put(
|
||||
ResourceAttributes.CLOUD_PLATFORM, ResourceAttributes.CloudPlatformValues.AWS_ECS);
|
||||
attrBuilders.put(CLOUD_PROVIDER, AWS);
|
||||
attrBuilders.put(CLOUD_PLATFORM, AWS_ECS);
|
||||
try (JsonParser parser = JSON_FACTORY.createParser(json)) {
|
||||
parser.nextToken();
|
||||
LogArnBuilder logArnBuilder = new LogArnBuilder();
|
||||
|
@ -81,23 +103,55 @@ public final class EcsResource {
|
|||
.getLogGroupArn()
|
||||
.ifPresent(
|
||||
logGroupArn -> {
|
||||
attrBuilders.put(
|
||||
ResourceAttributes.AWS_LOG_GROUP_ARNS, Collections.singletonList(logGroupArn));
|
||||
attrBuilders.put(AWS_LOG_GROUP_ARNS, Collections.singletonList(logGroupArn));
|
||||
});
|
||||
|
||||
logArnBuilder
|
||||
.getLogStreamArn()
|
||||
.ifPresent(
|
||||
logStreamArn -> {
|
||||
attrBuilders.put(
|
||||
ResourceAttributes.AWS_LOG_STREAM_ARNS,
|
||||
Collections.singletonList(logStreamArn));
|
||||
attrBuilders.put(AWS_LOG_STREAM_ARNS, Collections.singletonList(logStreamArn));
|
||||
});
|
||||
} catch (IOException e) {
|
||||
logger.log(Level.WARNING, "Can't get ECS metadata", e);
|
||||
}
|
||||
}
|
||||
|
||||
private static Optional<String> getAccountId(@Nullable String arn) {
|
||||
return getArnPart(arn, ArnPart.ACCOUNT);
|
||||
}
|
||||
|
||||
private static Optional<String> getRegion(@Nullable String arn) {
|
||||
return getArnPart(arn, ArnPart.REGION);
|
||||
}
|
||||
|
||||
private static enum ArnPart {
|
||||
REGION(3),
|
||||
ACCOUNT(4);
|
||||
|
||||
final int partIndex;
|
||||
|
||||
private ArnPart(int partIndex) {
|
||||
this.partIndex = partIndex;
|
||||
}
|
||||
}
|
||||
|
||||
private static Optional<String> getArnPart(@Nullable String arn, ArnPart arnPart) {
|
||||
if (arn == null) {
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
String[] arnParts = arn.split(":");
|
||||
|
||||
if (arnPart.partIndex >= arnParts.length) {
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
return Optional.of(arnParts[arnPart.partIndex]);
|
||||
}
|
||||
|
||||
// Suppression is required for CONTAINER_IMAGE_TAG until we are ready to upgrade.
|
||||
@SuppressWarnings("deprecation")
|
||||
static void parseResponse(
|
||||
JsonParser parser, AttributesBuilder attrBuilders, LogArnBuilder logArnBuilder)
|
||||
throws IOException {
|
||||
|
@ -106,24 +160,41 @@ public final class EcsResource {
|
|||
return;
|
||||
}
|
||||
|
||||
// Either the container ARN or the task ARN, they both contain the
|
||||
// account id and region tokens we need later for the cloud.account.id
|
||||
// and cloud.region attributes.
|
||||
String arn = null;
|
||||
// Cluster can either be ARN or short name.
|
||||
String cluster = null;
|
||||
|
||||
while (parser.nextToken() != JsonToken.END_OBJECT) {
|
||||
String value = parser.nextTextValue();
|
||||
switch (parser.getCurrentName()) {
|
||||
switch (parser.currentName()) {
|
||||
case "AvailabilityZone":
|
||||
attrBuilders.put(CLOUD_AVAILABILITY_ZONE, value);
|
||||
break;
|
||||
case "DockerId":
|
||||
attrBuilders.put(ResourceAttributes.CONTAINER_ID, value);
|
||||
attrBuilders.put(CONTAINER_ID, value);
|
||||
break;
|
||||
case "DockerName":
|
||||
attrBuilders.put(ResourceAttributes.CONTAINER_NAME, value);
|
||||
attrBuilders.put(CONTAINER_NAME, value);
|
||||
break;
|
||||
case "Cluster":
|
||||
cluster = value;
|
||||
break;
|
||||
case "ContainerARN":
|
||||
attrBuilders.put(ResourceAttributes.AWS_ECS_CONTAINER_ARN, value);
|
||||
arn = value;
|
||||
attrBuilders.put(AWS_ECS_CONTAINER_ARN, value);
|
||||
attrBuilders.put(CLOUD_RESOURCE_ID, value);
|
||||
logArnBuilder.setContainerArn(value);
|
||||
break;
|
||||
case "Image":
|
||||
DockerImage parsedImage = DockerImage.parse(value);
|
||||
if (parsedImage != null) {
|
||||
attrBuilders.put(ResourceAttributes.CONTAINER_IMAGE_NAME, parsedImage.getRepository());
|
||||
attrBuilders.put(ResourceAttributes.CONTAINER_IMAGE_TAG, parsedImage.getTag());
|
||||
attrBuilders.put(CONTAINER_IMAGE_NAME, parsedImage.getRepository());
|
||||
attrBuilders.put(
|
||||
io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CONTAINER_IMAGE_TAGS,
|
||||
parsedImage.getTag());
|
||||
}
|
||||
break;
|
||||
case "ImageID":
|
||||
|
@ -134,33 +205,51 @@ public final class EcsResource {
|
|||
parseResponse(parser, attrBuilders, logArnBuilder);
|
||||
break;
|
||||
case "awslogs-group":
|
||||
attrBuilders.put(ResourceAttributes.AWS_LOG_GROUP_NAMES, value);
|
||||
attrBuilders.put(AWS_LOG_GROUP_NAMES, value);
|
||||
logArnBuilder.setLogGroupName(value);
|
||||
break;
|
||||
case "awslogs-stream":
|
||||
attrBuilders.put(ResourceAttributes.AWS_LOG_STREAM_NAMES, value);
|
||||
attrBuilders.put(AWS_LOG_STREAM_NAMES, value);
|
||||
logArnBuilder.setLogStreamName(value);
|
||||
break;
|
||||
case "awslogs-region":
|
||||
logArnBuilder.setRegion(value);
|
||||
break;
|
||||
case "TaskARN":
|
||||
attrBuilders.put(ResourceAttributes.AWS_ECS_TASK_ARN, value);
|
||||
arn = value;
|
||||
attrBuilders.put(AWS_ECS_TASK_ARN, value);
|
||||
break;
|
||||
case "LaunchType":
|
||||
attrBuilders.put(ResourceAttributes.AWS_ECS_LAUNCHTYPE, value.toLowerCase());
|
||||
attrBuilders.put(AWS_ECS_LAUNCHTYPE, value.toLowerCase(Locale.ROOT));
|
||||
break;
|
||||
case "Family":
|
||||
attrBuilders.put(ResourceAttributes.AWS_ECS_TASK_FAMILY, value);
|
||||
attrBuilders.put(AWS_ECS_TASK_FAMILY, value);
|
||||
break;
|
||||
case "Revision":
|
||||
attrBuilders.put(ResourceAttributes.AWS_ECS_TASK_REVISION, value);
|
||||
attrBuilders.put(AWS_ECS_TASK_REVISION, value);
|
||||
break;
|
||||
default:
|
||||
parser.skipChildren();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
String region = getRegion(arn).orElse(null);
|
||||
String account = getAccountId(arn).orElse(null);
|
||||
if (region != null) {
|
||||
attrBuilders.put(CLOUD_REGION, region);
|
||||
}
|
||||
if (account != null) {
|
||||
attrBuilders.put(CLOUD_ACCOUNT_ID, account);
|
||||
}
|
||||
if (cluster != null) {
|
||||
if (cluster.contains(":")) {
|
||||
attrBuilders.put(AWS_ECS_CLUSTER_ARN, cluster);
|
||||
} else {
|
||||
String clusterArn = String.format("arn:aws:ecs:%s:%s:cluster/%s", region, account, cluster);
|
||||
attrBuilders.put(AWS_ECS_CLUSTER_ARN, clusterArn);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private EcsResource() {}
|
||||
|
@ -192,9 +281,7 @@ public final class EcsResource {
|
|||
}
|
||||
|
||||
void setContainerArn(@Nullable String containerArn) {
|
||||
if (containerArn != null) {
|
||||
account = containerArn.split(":")[4];
|
||||
}
|
||||
account = getAccountId(containerArn).orElse(null);
|
||||
}
|
||||
|
||||
Optional<String> getLogGroupArn() {
|
||||
|
@ -227,7 +314,7 @@ public final class EcsResource {
|
|||
|
||||
private static final Pattern imagePattern =
|
||||
Pattern.compile(
|
||||
"^(?<repository>([^/\\s]+/)?([^:\\s]+))(:(?<tag>[^@\\s]+))?(@sha256:(?<sha256>\\d+))?$");
|
||||
"^(?<repository>([^/\\s]+/)?([^:\\s]+))(:(?<tag>[^@\\s]+))?(@sha256:(?<sha256>[\\da-fA-F]+))?$");
|
||||
|
||||
final String repository;
|
||||
final String tag;
|
||||
|
|
|
@ -10,7 +10,7 @@ import io.opentelemetry.sdk.autoconfigure.spi.ResourceProvider;
|
|||
import io.opentelemetry.sdk.resources.Resource;
|
||||
|
||||
/** {@link ResourceProvider} for automatically configuring {@link EcsResource}. */
|
||||
public final class EcsResourceProvider implements ResourceProvider {
|
||||
public final class EcsResourceProvider extends CloudResourceProvider {
|
||||
@Override
|
||||
public Resource createResource(ConfigProperties config) {
|
||||
return EcsResource.get();
|
||||
|
|
|
@ -5,13 +5,20 @@
|
|||
|
||||
package io.opentelemetry.contrib.aws.resource;
|
||||
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CLOUD_PLATFORM;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CLOUD_PROVIDER;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CONTAINER_ID;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CloudPlatformIncubatingValues.AWS_EKS;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CloudProviderIncubatingValues.AWS;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.K8S_CLUSTER_NAME;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonFactory;
|
||||
import com.fasterxml.jackson.core.JsonParser;
|
||||
import com.fasterxml.jackson.core.JsonToken;
|
||||
import io.opentelemetry.api.common.Attributes;
|
||||
import io.opentelemetry.api.common.AttributesBuilder;
|
||||
import io.opentelemetry.sdk.resources.Resource;
|
||||
import io.opentelemetry.semconv.resource.attributes.ResourceAttributes;
|
||||
import io.opentelemetry.semconv.SchemaUrls;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
@ -65,21 +72,20 @@ public final class EksResource {
|
|||
}
|
||||
|
||||
AttributesBuilder attrBuilders = Attributes.builder();
|
||||
attrBuilders.put(ResourceAttributes.CLOUD_PROVIDER, ResourceAttributes.CloudProviderValues.AWS);
|
||||
attrBuilders.put(
|
||||
ResourceAttributes.CLOUD_PLATFORM, ResourceAttributes.CloudPlatformValues.AWS_EKS);
|
||||
attrBuilders.put(CLOUD_PROVIDER, AWS);
|
||||
attrBuilders.put(CLOUD_PLATFORM, AWS_EKS);
|
||||
|
||||
String clusterName = getClusterName(httpClient);
|
||||
if (clusterName != null && !clusterName.isEmpty()) {
|
||||
attrBuilders.put(ResourceAttributes.K8S_CLUSTER_NAME, clusterName);
|
||||
attrBuilders.put(K8S_CLUSTER_NAME, clusterName);
|
||||
}
|
||||
|
||||
String containerId = dockerHelper.getContainerId();
|
||||
if (containerId != null && !containerId.isEmpty()) {
|
||||
attrBuilders.put(ResourceAttributes.CONTAINER_ID, containerId);
|
||||
attrBuilders.put(CONTAINER_ID, containerId);
|
||||
}
|
||||
|
||||
return Resource.create(attrBuilders.build(), ResourceAttributes.SCHEMA_URL);
|
||||
return Resource.create(attrBuilders.build(), SchemaUrls.V1_25_0);
|
||||
}
|
||||
|
||||
private static boolean isEks(
|
||||
|
@ -120,7 +126,7 @@ public final class EksResource {
|
|||
|
||||
while (parser.nextToken() != JsonToken.END_OBJECT) {
|
||||
parser.nextToken();
|
||||
if (!parser.getCurrentName().equals("data")) {
|
||||
if (!parser.currentName().equals("data")) {
|
||||
parser.skipChildren();
|
||||
continue;
|
||||
}
|
||||
|
@ -131,7 +137,7 @@ public final class EksResource {
|
|||
|
||||
while (parser.nextToken() != JsonToken.END_OBJECT) {
|
||||
String value = parser.nextTextValue();
|
||||
if (!parser.getCurrentName().equals("cluster.name")) {
|
||||
if (!parser.currentName().equals("cluster.name")) {
|
||||
parser.skipChildren();
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import io.opentelemetry.sdk.autoconfigure.spi.ResourceProvider;
|
|||
import io.opentelemetry.sdk.resources.Resource;
|
||||
|
||||
/** {@link ResourceProvider} for automatically configuring {@link EksResource}. */
|
||||
public final class EksResourceProvider implements ResourceProvider {
|
||||
public final class EksResourceProvider extends CloudResourceProvider {
|
||||
@Override
|
||||
public Resource createResource(ConfigProperties config) {
|
||||
return EksResource.get();
|
||||
|
|
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
* Copyright The OpenTelemetry Authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package io.opentelemetry.contrib.aws.resource;
|
||||
|
||||
import io.opentelemetry.api.common.AttributeKey;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Inlines incubating attributes until they are stable, doing this prevents having a direct
|
||||
* dependency on incubating artifact which can conflict with another incubating version.
|
||||
*/
|
||||
class IncubatingAttributes {
|
||||
private IncubatingAttributes() {}
|
||||
|
||||
public static final AttributeKey<String> CLOUD_ACCOUNT_ID =
|
||||
AttributeKey.stringKey("cloud.account.id");
|
||||
public static final AttributeKey<String> CLOUD_AVAILABILITY_ZONE =
|
||||
AttributeKey.stringKey("cloud.availability_zone");
|
||||
public static final AttributeKey<String> CLOUD_PLATFORM =
|
||||
AttributeKey.stringKey("cloud.platform");
|
||||
public static final AttributeKey<String> CLOUD_PROVIDER =
|
||||
AttributeKey.stringKey("cloud.provider");
|
||||
public static final AttributeKey<String> CLOUD_REGION = AttributeKey.stringKey("cloud.region");
|
||||
public static final AttributeKey<String> CLOUD_RESOURCE_ID =
|
||||
AttributeKey.stringKey("cloud.resource_id");
|
||||
public static final AttributeKey<List<String>> CONTAINER_IMAGE_TAGS =
|
||||
AttributeKey.stringArrayKey("container.image.tags");
|
||||
|
||||
public static final class CloudPlatformIncubatingValues {
|
||||
public static final String AWS_EC2 = "aws_ec2";
|
||||
public static final String AWS_ECS = "aws_ecs";
|
||||
public static final String AWS_EKS = "aws_eks";
|
||||
public static final String AWS_LAMBDA = "aws_lambda";
|
||||
public static final String AWS_ELASTIC_BEANSTALK = "aws_elastic_beanstalk";
|
||||
|
||||
private CloudPlatformIncubatingValues() {}
|
||||
}
|
||||
|
||||
public static final class CloudProviderIncubatingValues {
|
||||
public static final String AWS = "aws";
|
||||
|
||||
private CloudProviderIncubatingValues() {}
|
||||
}
|
||||
|
||||
public static final AttributeKey<String> SERVICE_INSTANCE_ID =
|
||||
AttributeKey.stringKey("service.instance.id");
|
||||
public static final AttributeKey<String> SERVICE_NAMESPACE =
|
||||
AttributeKey.stringKey("service.namespace");
|
||||
|
||||
public static final AttributeKey<String> HOST_ID = AttributeKey.stringKey("host.id");
|
||||
public static final AttributeKey<String> HOST_IMAGE_ID = AttributeKey.stringKey("host.image.id");
|
||||
public static final AttributeKey<String> HOST_NAME = AttributeKey.stringKey("host.name");
|
||||
public static final AttributeKey<String> HOST_TYPE = AttributeKey.stringKey("host.type");
|
||||
|
||||
public static final AttributeKey<String> CONTAINER_ID = AttributeKey.stringKey("container.id");
|
||||
public static final AttributeKey<String> CONTAINER_IMAGE_NAME =
|
||||
AttributeKey.stringKey("container.image.name");
|
||||
public static final AttributeKey<String> CONTAINER_NAME =
|
||||
AttributeKey.stringKey("container.name");
|
||||
|
||||
public static final AttributeKey<String> K8S_CLUSTER_NAME =
|
||||
AttributeKey.stringKey("k8s.cluster.name");
|
||||
|
||||
public static final AttributeKey<String> AWS_ECS_CLUSTER_ARN =
|
||||
AttributeKey.stringKey("aws.ecs.cluster.arn");
|
||||
public static final AttributeKey<String> AWS_ECS_CONTAINER_ARN =
|
||||
AttributeKey.stringKey("aws.ecs.container.arn");
|
||||
public static final AttributeKey<String> AWS_ECS_LAUNCHTYPE =
|
||||
AttributeKey.stringKey("aws.ecs.launchtype");
|
||||
public static final AttributeKey<String> AWS_ECS_TASK_ARN =
|
||||
AttributeKey.stringKey("aws.ecs.task.arn");
|
||||
public static final AttributeKey<String> AWS_ECS_TASK_FAMILY =
|
||||
AttributeKey.stringKey("aws.ecs.task.family");
|
||||
public static final AttributeKey<String> AWS_ECS_TASK_REVISION =
|
||||
AttributeKey.stringKey("aws.ecs.task.revision");
|
||||
public static final AttributeKey<List<String>> AWS_LOG_GROUP_ARNS =
|
||||
AttributeKey.stringArrayKey("aws.log.group.arns");
|
||||
public static final AttributeKey<List<String>> AWS_LOG_GROUP_NAMES =
|
||||
AttributeKey.stringArrayKey("aws.log.group.names");
|
||||
public static final AttributeKey<List<String>> AWS_LOG_STREAM_ARNS =
|
||||
AttributeKey.stringArrayKey("aws.log.stream.arns");
|
||||
public static final AttributeKey<List<String>> AWS_LOG_STREAM_NAMES =
|
||||
AttributeKey.stringArrayKey("aws.log.stream.names");
|
||||
|
||||
public static final AttributeKey<String> FAAS_NAME = AttributeKey.stringKey("faas.name");
|
||||
public static final AttributeKey<String> FAAS_VERSION = AttributeKey.stringKey("faas.version");
|
||||
}
|
|
@ -5,10 +5,18 @@
|
|||
|
||||
package io.opentelemetry.contrib.aws.resource;
|
||||
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CLOUD_PLATFORM;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CLOUD_PROVIDER;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CLOUD_REGION;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CloudPlatformIncubatingValues.AWS_LAMBDA;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CloudProviderIncubatingValues.AWS;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.FAAS_NAME;
|
||||
import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.FAAS_VERSION;
|
||||
|
||||
import io.opentelemetry.api.common.Attributes;
|
||||
import io.opentelemetry.api.common.AttributesBuilder;
|
||||
import io.opentelemetry.sdk.resources.Resource;
|
||||
import io.opentelemetry.semconv.resource.attributes.ResourceAttributes;
|
||||
import io.opentelemetry.semconv.SchemaUrls;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
|
@ -39,23 +47,20 @@ public final class LambdaResource {
|
|||
return Resource.empty();
|
||||
}
|
||||
|
||||
AttributesBuilder builder =
|
||||
Attributes.builder()
|
||||
.put(ResourceAttributes.CLOUD_PROVIDER, ResourceAttributes.CloudProviderValues.AWS);
|
||||
builder.put(
|
||||
ResourceAttributes.CLOUD_PLATFORM, ResourceAttributes.CloudPlatformValues.AWS_LAMBDA);
|
||||
AttributesBuilder builder = Attributes.builder().put(CLOUD_PROVIDER, AWS);
|
||||
builder.put(CLOUD_PLATFORM, AWS_LAMBDA);
|
||||
|
||||
if (!region.isEmpty()) {
|
||||
builder.put(ResourceAttributes.CLOUD_REGION, region);
|
||||
builder.put(CLOUD_REGION, region);
|
||||
}
|
||||
if (!functionName.isEmpty()) {
|
||||
builder.put(ResourceAttributes.FAAS_NAME, functionName);
|
||||
builder.put(FAAS_NAME, functionName);
|
||||
}
|
||||
if (!functionVersion.isEmpty()) {
|
||||
builder.put(ResourceAttributes.FAAS_VERSION, functionVersion);
|
||||
builder.put(FAAS_VERSION, functionVersion);
|
||||
}
|
||||
|
||||
return Resource.create(builder.build(), ResourceAttributes.SCHEMA_URL);
|
||||
return Resource.create(builder.build(), SchemaUrls.V1_25_0);
|
||||
}
|
||||
|
||||
private static boolean isLambda(String... envVariables) {
|
||||
|
|
|
@ -10,7 +10,7 @@ import io.opentelemetry.sdk.autoconfigure.spi.ResourceProvider;
|
|||
import io.opentelemetry.sdk.resources.Resource;
|
||||
|
||||
/** {@link ResourceProvider} for automatically configuring {@link LambdaResource}. */
|
||||
public final class LambdaResourceProvider implements ResourceProvider {
|
||||
public final class LambdaResourceProvider extends CloudResourceProvider {
|
||||
@Override
|
||||
public Resource createResource(ConfigProperties config) {
|
||||
return LambdaResource.get();
|
||||
|
|
|
@ -6,14 +6,19 @@
|
|||
package io.opentelemetry.contrib.aws.resource;
|
||||
|
||||
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThat;
|
||||
import static io.opentelemetry.semconv.ServiceAttributes.SERVICE_VERSION;
|
||||
import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_PLATFORM;
|
||||
import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_PROVIDER;
|
||||
import static io.opentelemetry.semconv.incubating.ServiceIncubatingAttributes.SERVICE_INSTANCE_ID;
|
||||
import static io.opentelemetry.semconv.incubating.ServiceIncubatingAttributes.SERVICE_NAMESPACE;
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import static org.assertj.core.api.Assertions.entry;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
import com.google.common.io.Files;
|
||||
import io.opentelemetry.api.common.Attributes;
|
||||
import io.opentelemetry.sdk.autoconfigure.spi.ResourceProvider;
|
||||
import io.opentelemetry.sdk.resources.Resource;
|
||||
import io.opentelemetry.semconv.resource.attributes.ResourceAttributes;
|
||||
import io.opentelemetry.semconv.SchemaUrls;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.ServiceLoader;
|
||||
|
@ -28,17 +33,17 @@ class BeanstalkResourceTest {
|
|||
String content =
|
||||
"{\"noise\": \"noise\", \"deployment_id\":4,\""
|
||||
+ "version_label\":\"2\",\"environment_name\":\"HttpSubscriber-env\"}";
|
||||
Files.write(content.getBytes(Charsets.UTF_8), file);
|
||||
Files.write(content.getBytes(UTF_8), file);
|
||||
Resource resource = BeanstalkResource.buildResource(file.getPath());
|
||||
Attributes attributes = resource.getAttributes();
|
||||
assertThat(attributes)
|
||||
.containsOnly(
|
||||
entry(ResourceAttributes.CLOUD_PROVIDER, "aws"),
|
||||
entry(ResourceAttributes.CLOUD_PLATFORM, "aws_elastic_beanstalk"),
|
||||
entry(ResourceAttributes.SERVICE_INSTANCE_ID, "4"),
|
||||
entry(ResourceAttributes.SERVICE_VERSION, "2"),
|
||||
entry(ResourceAttributes.SERVICE_NAMESPACE, "HttpSubscriber-env"));
|
||||
assertThat(resource.getSchemaUrl()).isEqualTo(ResourceAttributes.SCHEMA_URL);
|
||||
entry(CLOUD_PROVIDER, "aws"),
|
||||
entry(CLOUD_PLATFORM, "aws_elastic_beanstalk"),
|
||||
entry(SERVICE_INSTANCE_ID, "4"),
|
||||
entry(SERVICE_VERSION, "2"),
|
||||
entry(SERVICE_NAMESPACE, "HttpSubscriber-env"));
|
||||
assertThat(resource.getSchemaUrl()).isEqualTo(SchemaUrls.V1_25_0);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -54,7 +59,7 @@ class BeanstalkResourceTest {
|
|||
String content =
|
||||
"\"deployment_id\":4,\"version_label\":\"2\",\""
|
||||
+ "environment_name\":\"HttpSubscriber-env\"}";
|
||||
Files.write(content.getBytes(Charsets.UTF_8), file);
|
||||
Files.write(content.getBytes(UTF_8), file);
|
||||
Attributes attributes = BeanstalkResource.buildResource(file.getPath()).getAttributes();
|
||||
assertThat(attributes).isEmpty();
|
||||
}
|
||||
|
|
|
@ -5,9 +5,9 @@
|
|||
|
||||
package io.opentelemetry.contrib.aws.resource;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
import com.google.common.io.Files;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
@ -26,7 +26,7 @@ class DockerHelperTest {
|
|||
void testContainerIdMissing(@TempDir File tempFolder) throws IOException {
|
||||
File file = new File(tempFolder, "no_container_id");
|
||||
String content = "13:pids:/\n" + "12:hugetlb:/\n" + "11:net_prio:/";
|
||||
Files.write(content.getBytes(Charsets.UTF_8), file);
|
||||
Files.write(content.getBytes(UTF_8), file);
|
||||
|
||||
DockerHelper dockerHelper = new DockerHelper(file.getPath());
|
||||
assertThat(dockerHelper.getContainerId()).isEmpty();
|
||||
|
@ -37,7 +37,7 @@ class DockerHelperTest {
|
|||
File file = new File(tempFolder, "cgroup");
|
||||
String expected = "386a1920640799b5bf5a39bd94e489e5159a88677d96ca822ce7c433ff350163";
|
||||
String content = "dummy\n11:devices:/ecs/bbc36dd0-5ee0-4007-ba96-c590e0b278d2/" + expected;
|
||||
Files.write(content.getBytes(Charsets.UTF_8), file);
|
||||
Files.write(content.getBytes(UTF_8), file);
|
||||
|
||||
DockerHelper dockerHelper = new DockerHelper(file.getPath());
|
||||
assertThat(dockerHelper.getContainerId()).isEqualTo(expected);
|
||||
|
|
|
@ -6,6 +6,15 @@
|
|||
package io.opentelemetry.contrib.aws.resource;
|
||||
|
||||
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThat;
|
||||
import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_ACCOUNT_ID;
|
||||
import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_AVAILABILITY_ZONE;
|
||||
import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_PLATFORM;
|
||||
import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_PROVIDER;
|
||||
import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_REGION;
|
||||
import static io.opentelemetry.semconv.incubating.HostIncubatingAttributes.HOST_ID;
|
||||
import static io.opentelemetry.semconv.incubating.HostIncubatingAttributes.HOST_IMAGE_ID;
|
||||
import static io.opentelemetry.semconv.incubating.HostIncubatingAttributes.HOST_NAME;
|
||||
import static io.opentelemetry.semconv.incubating.HostIncubatingAttributes.HOST_TYPE;
|
||||
import static org.assertj.core.api.Assertions.entry;
|
||||
|
||||
import com.linecorp.armeria.common.AggregatedHttpRequest;
|
||||
|
@ -16,7 +25,7 @@ import com.linecorp.armeria.testing.junit5.server.mock.MockWebServerExtension;
|
|||
import io.opentelemetry.api.common.Attributes;
|
||||
import io.opentelemetry.sdk.autoconfigure.spi.ResourceProvider;
|
||||
import io.opentelemetry.sdk.resources.Resource;
|
||||
import io.opentelemetry.semconv.resource.attributes.ResourceAttributes;
|
||||
import io.opentelemetry.semconv.SchemaUrls;
|
||||
import java.util.ServiceLoader;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.RegisterExtension;
|
||||
|
@ -43,7 +52,8 @@ class Ec2ResourceTest {
|
|||
+ " \"region\" : \"us-west-2\"\n"
|
||||
+ "}";
|
||||
|
||||
@RegisterExtension public static MockWebServerExtension server = new MockWebServerExtension();
|
||||
@RegisterExtension
|
||||
public static final MockWebServerExtension server = new MockWebServerExtension();
|
||||
|
||||
@Test
|
||||
void imdsv2() {
|
||||
|
@ -52,20 +62,20 @@ class Ec2ResourceTest {
|
|||
server.enqueue(HttpResponse.of("ec2-1-2-3-4"));
|
||||
|
||||
Resource resource = Ec2Resource.buildResource("localhost:" + server.httpPort());
|
||||
assertThat(resource.getSchemaUrl()).isEqualTo(ResourceAttributes.SCHEMA_URL);
|
||||
assertThat(resource.getSchemaUrl()).isEqualTo(SchemaUrls.V1_25_0);
|
||||
Attributes attributes = resource.getAttributes();
|
||||
|
||||
assertThat(attributes)
|
||||
.containsOnly(
|
||||
entry(ResourceAttributes.CLOUD_PROVIDER, "aws"),
|
||||
entry(ResourceAttributes.CLOUD_PLATFORM, "aws_ec2"),
|
||||
entry(ResourceAttributes.HOST_ID, "i-1234567890abcdef0"),
|
||||
entry(ResourceAttributes.CLOUD_AVAILABILITY_ZONE, "us-west-2b"),
|
||||
entry(ResourceAttributes.HOST_TYPE, "t2.micro"),
|
||||
entry(ResourceAttributes.HOST_IMAGE_ID, "ami-5fb8c835"),
|
||||
entry(ResourceAttributes.CLOUD_ACCOUNT_ID, "123456789012"),
|
||||
entry(ResourceAttributes.CLOUD_REGION, "us-west-2"),
|
||||
entry(ResourceAttributes.HOST_NAME, "ec2-1-2-3-4"));
|
||||
entry(CLOUD_PROVIDER, "aws"),
|
||||
entry(CLOUD_PLATFORM, "aws_ec2"),
|
||||
entry(HOST_ID, "i-1234567890abcdef0"),
|
||||
entry(CLOUD_AVAILABILITY_ZONE, "us-west-2b"),
|
||||
entry(HOST_TYPE, "t2.micro"),
|
||||
entry(HOST_IMAGE_ID, "ami-5fb8c835"),
|
||||
entry(CLOUD_ACCOUNT_ID, "123456789012"),
|
||||
entry(CLOUD_REGION, "us-west-2"),
|
||||
entry(HOST_NAME, "ec2-1-2-3-4"));
|
||||
|
||||
AggregatedHttpRequest request1 = server.takeRequest().request();
|
||||
assertThat(request1.path()).isEqualTo("/latest/api/token");
|
||||
|
@ -87,20 +97,20 @@ class Ec2ResourceTest {
|
|||
server.enqueue(HttpResponse.of("ec2-1-2-3-4"));
|
||||
|
||||
Resource resource = Ec2Resource.buildResource("localhost:" + server.httpPort());
|
||||
assertThat(resource.getSchemaUrl()).isEqualTo(ResourceAttributes.SCHEMA_URL);
|
||||
assertThat(resource.getSchemaUrl()).isEqualTo(SchemaUrls.V1_25_0);
|
||||
Attributes attributes = resource.getAttributes();
|
||||
|
||||
assertThat(attributes)
|
||||
.containsOnly(
|
||||
entry(ResourceAttributes.CLOUD_PROVIDER, "aws"),
|
||||
entry(ResourceAttributes.CLOUD_PLATFORM, "aws_ec2"),
|
||||
entry(ResourceAttributes.HOST_ID, "i-1234567890abcdef0"),
|
||||
entry(ResourceAttributes.CLOUD_AVAILABILITY_ZONE, "us-west-2b"),
|
||||
entry(ResourceAttributes.HOST_TYPE, "t2.micro"),
|
||||
entry(ResourceAttributes.HOST_IMAGE_ID, "ami-5fb8c835"),
|
||||
entry(ResourceAttributes.CLOUD_ACCOUNT_ID, "123456789012"),
|
||||
entry(ResourceAttributes.CLOUD_REGION, "us-west-2"),
|
||||
entry(ResourceAttributes.HOST_NAME, "ec2-1-2-3-4"));
|
||||
entry(CLOUD_PROVIDER, "aws"),
|
||||
entry(CLOUD_PLATFORM, "aws_ec2"),
|
||||
entry(HOST_ID, "i-1234567890abcdef0"),
|
||||
entry(CLOUD_AVAILABILITY_ZONE, "us-west-2b"),
|
||||
entry(HOST_TYPE, "t2.micro"),
|
||||
entry(HOST_IMAGE_ID, "ami-5fb8c835"),
|
||||
entry(CLOUD_ACCOUNT_ID, "123456789012"),
|
||||
entry(CLOUD_REGION, "us-west-2"),
|
||||
entry(HOST_NAME, "ec2-1-2-3-4"));
|
||||
|
||||
AggregatedHttpRequest request1 = server.takeRequest().request();
|
||||
assertThat(request1.path()).isEqualTo("/latest/api/token");
|
||||
|
|
|
@ -6,6 +6,27 @@
|
|||
package io.opentelemetry.contrib.aws.resource;
|
||||
|
||||
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThat;
|
||||
import static io.opentelemetry.semconv.incubating.AwsIncubatingAttributes.AWS_ECS_CLUSTER_ARN;
|
||||
import static io.opentelemetry.semconv.incubating.AwsIncubatingAttributes.AWS_ECS_CONTAINER_ARN;
|
||||
import static io.opentelemetry.semconv.incubating.AwsIncubatingAttributes.AWS_ECS_LAUNCHTYPE;
|
||||
import static io.opentelemetry.semconv.incubating.AwsIncubatingAttributes.AWS_ECS_TASK_ARN;
|
||||
import static io.opentelemetry.semconv.incubating.AwsIncubatingAttributes.AWS_ECS_TASK_FAMILY;
|
||||
import static io.opentelemetry.semconv.incubating.AwsIncubatingAttributes.AWS_ECS_TASK_REVISION;
|
||||
import static io.opentelemetry.semconv.incubating.AwsIncubatingAttributes.AWS_LOG_GROUP_ARNS;
|
||||
import static io.opentelemetry.semconv.incubating.AwsIncubatingAttributes.AWS_LOG_GROUP_NAMES;
|
||||
import static io.opentelemetry.semconv.incubating.AwsIncubatingAttributes.AWS_LOG_STREAM_ARNS;
|
||||
import static io.opentelemetry.semconv.incubating.AwsIncubatingAttributes.AWS_LOG_STREAM_NAMES;
|
||||
import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_ACCOUNT_ID;
|
||||
import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_AVAILABILITY_ZONE;
|
||||
import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_PLATFORM;
|
||||
import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_PROVIDER;
|
||||
import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_REGION;
|
||||
import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_RESOURCE_ID;
|
||||
import static io.opentelemetry.semconv.incubating.ContainerIncubatingAttributes.CONTAINER_ID;
|
||||
import static io.opentelemetry.semconv.incubating.ContainerIncubatingAttributes.CONTAINER_IMAGE_NAME;
|
||||
import static io.opentelemetry.semconv.incubating.ContainerIncubatingAttributes.CONTAINER_IMAGE_TAGS;
|
||||
import static io.opentelemetry.semconv.incubating.ContainerIncubatingAttributes.CONTAINER_NAME;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.assertj.core.api.Assertions.entry;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
|
@ -14,7 +35,7 @@ import io.opentelemetry.api.common.AttributeKey;
|
|||
import io.opentelemetry.api.common.Attributes;
|
||||
import io.opentelemetry.sdk.autoconfigure.spi.ResourceProvider;
|
||||
import io.opentelemetry.sdk.resources.Resource;
|
||||
import io.opentelemetry.semconv.resource.attributes.ResourceAttributes;
|
||||
import io.opentelemetry.semconv.SchemaUrls;
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Collections;
|
||||
|
@ -46,25 +67,27 @@ class EcsResourceTest {
|
|||
Resource resource = EcsResource.buildResource(mockSysEnv, mockHttpClient);
|
||||
Attributes attributes = resource.getAttributes();
|
||||
|
||||
assertThat(resource.getSchemaUrl()).isEqualTo(ResourceAttributes.SCHEMA_URL);
|
||||
assertThat(resource.getSchemaUrl()).isEqualTo(SchemaUrls.V1_25_0);
|
||||
assertThat(attributes)
|
||||
.containsOnly(
|
||||
entry(ResourceAttributes.CLOUD_PROVIDER, "aws"),
|
||||
entry(ResourceAttributes.CLOUD_PLATFORM, "aws_ecs"),
|
||||
entry(ResourceAttributes.CONTAINER_NAME, "ecs-nginx-5-nginx-curl-ccccb9f49db0dfe0d901"),
|
||||
entry(
|
||||
ResourceAttributes.CONTAINER_ID,
|
||||
"43481a6ce4842eec8fe72fc28500c6b52edcc0917f105b83379f88cac1ff3946"),
|
||||
entry(ResourceAttributes.CONTAINER_IMAGE_NAME, "nrdlngr/nginx-curl"),
|
||||
entry(ResourceAttributes.CONTAINER_IMAGE_TAG, "latest"),
|
||||
entry(CLOUD_PROVIDER, "aws"),
|
||||
entry(CLOUD_PLATFORM, "aws_ecs"),
|
||||
entry(CLOUD_ACCOUNT_ID, "012345678910"),
|
||||
entry(CLOUD_REGION, "us-east-2"),
|
||||
entry(CLOUD_AVAILABILITY_ZONE, "us-east-2b"),
|
||||
entry(CONTAINER_NAME, "ecs-nginx-5-nginx-curl-ccccb9f49db0dfe0d901"),
|
||||
entry(CONTAINER_ID, "43481a6ce4842eec8fe72fc28500c6b52edcc0917f105b83379f88cac1ff3946"),
|
||||
entry(CONTAINER_IMAGE_NAME, "nrdlngr/nginx-curl"),
|
||||
entry(CONTAINER_IMAGE_TAGS, singletonList("latest")),
|
||||
entry(AWS_ECS_CLUSTER_ARN, "arn:aws:ecs:us-east-2:012345678910:cluster/default"),
|
||||
entry(
|
||||
AttributeKey.stringKey("aws.ecs.container.image.id"),
|
||||
"sha256:2e00ae64383cfc865ba0a2ba37f61b50a120d2d9378559dcd458dc0de47bc165"),
|
||||
entry(
|
||||
ResourceAttributes.AWS_ECS_TASK_ARN,
|
||||
AWS_ECS_TASK_ARN,
|
||||
"arn:aws:ecs:us-east-2:012345678910:task/9781c248-0edd-4cdb-9a93-f63cb662a5d3"),
|
||||
entry(ResourceAttributes.AWS_ECS_TASK_FAMILY, "nginx"),
|
||||
entry(ResourceAttributes.AWS_ECS_TASK_REVISION, "5"));
|
||||
entry(AWS_ECS_TASK_FAMILY, "nginx"),
|
||||
entry(AWS_ECS_TASK_REVISION, "5"));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -80,44 +103,43 @@ class EcsResourceTest {
|
|||
Resource resource = EcsResource.buildResource(mockSysEnv, mockHttpClient);
|
||||
Attributes attributes = resource.getAttributes();
|
||||
|
||||
assertThat(resource.getSchemaUrl()).isEqualTo(ResourceAttributes.SCHEMA_URL);
|
||||
assertThat(resource.getSchemaUrl()).isEqualTo(SchemaUrls.V1_25_0);
|
||||
assertThat(attributes)
|
||||
.containsOnly(
|
||||
entry(ResourceAttributes.CLOUD_PROVIDER, "aws"),
|
||||
entry(ResourceAttributes.CLOUD_PLATFORM, "aws_ecs"),
|
||||
entry(ResourceAttributes.CONTAINER_NAME, "ecs-curltest-26-curl-cca48e8dcadd97805600"),
|
||||
entry(CLOUD_PROVIDER, "aws"),
|
||||
entry(CLOUD_PLATFORM, "aws_ecs"),
|
||||
entry(CLOUD_ACCOUNT_ID, "111122223333"),
|
||||
entry(CLOUD_REGION, "us-west-2"),
|
||||
entry(
|
||||
ResourceAttributes.CONTAINER_ID,
|
||||
"ea32192c8553fbff06c9340478a2ff089b2bb5646fb718b4ee206641c9086d66"),
|
||||
entry(
|
||||
ResourceAttributes.CONTAINER_IMAGE_NAME,
|
||||
"111122223333.dkr.ecr.us-west-2.amazonaws.com/curltest"),
|
||||
entry(ResourceAttributes.CONTAINER_IMAGE_TAG, "latest"),
|
||||
CLOUD_RESOURCE_ID,
|
||||
"arn:aws:ecs:us-west-2:111122223333:container/0206b271-b33f-47ab-86c6-a0ba208a70a9"),
|
||||
entry(CLOUD_AVAILABILITY_ZONE, "us-west-2d"),
|
||||
entry(CONTAINER_NAME, "ecs-curltest-26-curl-cca48e8dcadd97805600"),
|
||||
entry(CONTAINER_ID, "ea32192c8553fbff06c9340478a2ff089b2bb5646fb718b4ee206641c9086d66"),
|
||||
entry(CONTAINER_IMAGE_NAME, "111122223333.dkr.ecr.us-west-2.amazonaws.com/curltest"),
|
||||
entry(CONTAINER_IMAGE_TAGS, singletonList("latest")),
|
||||
entry(
|
||||
AttributeKey.stringKey("aws.ecs.container.image.id"),
|
||||
"sha256:d691691e9652791a60114e67b365688d20d19940dde7c4736ea30e660d8d3553"),
|
||||
entry(AWS_ECS_CLUSTER_ARN, "arn:aws:ecs:us-west-2:111122223333:cluster/default"),
|
||||
entry(
|
||||
ResourceAttributes.AWS_ECS_CONTAINER_ARN,
|
||||
AWS_ECS_CONTAINER_ARN,
|
||||
"arn:aws:ecs:us-west-2:111122223333:container/0206b271-b33f-47ab-86c6-a0ba208a70a9"),
|
||||
entry(AWS_LOG_GROUP_NAMES, singletonList("/ecs/metadata")),
|
||||
entry(
|
||||
ResourceAttributes.AWS_LOG_GROUP_NAMES, Collections.singletonList("/ecs/metadata")),
|
||||
AWS_LOG_GROUP_ARNS,
|
||||
singletonList("arn:aws:logs:us-west-2:111122223333:log-group:/ecs/metadata")),
|
||||
entry(AWS_LOG_STREAM_NAMES, singletonList("ecs/curl/8f03e41243824aea923aca126495f665")),
|
||||
entry(
|
||||
ResourceAttributes.AWS_LOG_GROUP_ARNS,
|
||||
Collections.singletonList(
|
||||
"arn:aws:logs:us-west-2:111122223333:log-group:/ecs/metadata")),
|
||||
entry(
|
||||
ResourceAttributes.AWS_LOG_STREAM_NAMES,
|
||||
Collections.singletonList("ecs/curl/8f03e41243824aea923aca126495f665")),
|
||||
entry(
|
||||
ResourceAttributes.AWS_LOG_STREAM_ARNS,
|
||||
Collections.singletonList(
|
||||
AWS_LOG_STREAM_ARNS,
|
||||
singletonList(
|
||||
"arn:aws:logs:us-west-2:111122223333:log-group:/ecs/metadata:log-stream:ecs/curl/8f03e41243824aea923aca126495f665")),
|
||||
entry(
|
||||
ResourceAttributes.AWS_ECS_TASK_ARN,
|
||||
AWS_ECS_TASK_ARN,
|
||||
"arn:aws:ecs:us-west-2:111122223333:task/default/158d1c8083dd49d6b527399fd6414f5c"),
|
||||
entry(ResourceAttributes.AWS_ECS_LAUNCHTYPE, "ec2"),
|
||||
entry(ResourceAttributes.AWS_ECS_TASK_FAMILY, "curltest"),
|
||||
entry(ResourceAttributes.AWS_ECS_TASK_REVISION, "26"));
|
||||
entry(AWS_ECS_LAUNCHTYPE, "ec2"),
|
||||
entry(AWS_ECS_TASK_FAMILY, "curltest"),
|
||||
entry(AWS_ECS_TASK_REVISION, "26"));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -9,16 +9,20 @@ import static io.opentelemetry.contrib.aws.resource.EksResource.AUTH_CONFIGMAP_P
|
|||
import static io.opentelemetry.contrib.aws.resource.EksResource.CW_CONFIGMAP_PATH;
|
||||
import static io.opentelemetry.contrib.aws.resource.EksResource.K8S_SVC_URL;
|
||||
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThat;
|
||||
import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_PLATFORM;
|
||||
import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_PROVIDER;
|
||||
import static io.opentelemetry.semconv.incubating.ContainerIncubatingAttributes.CONTAINER_ID;
|
||||
import static io.opentelemetry.semconv.incubating.K8sIncubatingAttributes.K8S_CLUSTER_NAME;
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import static org.assertj.core.api.Assertions.entry;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
import com.google.common.io.Files;
|
||||
import io.opentelemetry.api.common.Attributes;
|
||||
import io.opentelemetry.sdk.autoconfigure.spi.ResourceProvider;
|
||||
import io.opentelemetry.sdk.resources.Resource;
|
||||
import io.opentelemetry.semconv.resource.attributes.ResourceAttributes;
|
||||
import io.opentelemetry.semconv.SchemaUrls;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.ServiceLoader;
|
||||
|
@ -40,10 +44,10 @@ public class EksResourceTest {
|
|||
void testEks(@TempDir File tempFolder) throws IOException {
|
||||
File mockK8sTokenFile = new File(tempFolder, "k8sToken");
|
||||
String token = "token123";
|
||||
Files.write(token.getBytes(Charsets.UTF_8), mockK8sTokenFile);
|
||||
Files.write(token.getBytes(UTF_8), mockK8sTokenFile);
|
||||
File mockK8sKeystoreFile = new File(tempFolder, "k8sCert");
|
||||
String truststore = "truststore123";
|
||||
Files.write(truststore.getBytes(Charsets.UTF_8), mockK8sKeystoreFile);
|
||||
Files.write(truststore.getBytes(UTF_8), mockK8sKeystoreFile);
|
||||
|
||||
when(httpClient.fetchString(any(), Mockito.eq(K8S_SVC_URL + AUTH_CONFIGMAP_PATH), any(), any()))
|
||||
.thenReturn("not empty");
|
||||
|
@ -59,13 +63,13 @@ public class EksResourceTest {
|
|||
mockK8sKeystoreFile.getPath());
|
||||
Attributes attributes = eksResource.getAttributes();
|
||||
|
||||
assertThat(eksResource.getSchemaUrl()).isEqualTo(ResourceAttributes.SCHEMA_URL);
|
||||
assertThat(eksResource.getSchemaUrl()).isEqualTo(SchemaUrls.V1_25_0);
|
||||
assertThat(attributes)
|
||||
.containsOnly(
|
||||
entry(ResourceAttributes.CLOUD_PROVIDER, "aws"),
|
||||
entry(ResourceAttributes.CLOUD_PLATFORM, "aws_eks"),
|
||||
entry(ResourceAttributes.K8S_CLUSTER_NAME, "my-cluster"),
|
||||
entry(ResourceAttributes.CONTAINER_ID, "0123456789A"));
|
||||
entry(CLOUD_PROVIDER, "aws"),
|
||||
entry(CLOUD_PLATFORM, "aws_eks"),
|
||||
entry(K8S_CLUSTER_NAME, "my-cluster"),
|
||||
entry(CONTAINER_ID, "0123456789A"));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -6,6 +6,11 @@
|
|||
package io.opentelemetry.contrib.aws.resource;
|
||||
|
||||
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThat;
|
||||
import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_PLATFORM;
|
||||
import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_PROVIDER;
|
||||
import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_REGION;
|
||||
import static io.opentelemetry.semconv.incubating.FaasIncubatingAttributes.FAAS_NAME;
|
||||
import static io.opentelemetry.semconv.incubating.FaasIncubatingAttributes.FAAS_VERSION;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.assertj.core.api.Assertions.entry;
|
||||
|
@ -13,7 +18,7 @@ import static org.assertj.core.api.Assertions.entry;
|
|||
import io.opentelemetry.api.common.Attributes;
|
||||
import io.opentelemetry.sdk.autoconfigure.spi.ResourceProvider;
|
||||
import io.opentelemetry.sdk.resources.Resource;
|
||||
import io.opentelemetry.semconv.resource.attributes.ResourceAttributes;
|
||||
import io.opentelemetry.semconv.SchemaUrls;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.ServiceLoader;
|
||||
|
@ -32,12 +37,12 @@ class LambdaResourceTest {
|
|||
LambdaResource.buildResource(singletonMap("AWS_LAMBDA_FUNCTION_NAME", "my-function"));
|
||||
Attributes attributes = resource.getAttributes();
|
||||
|
||||
assertThat(resource.getSchemaUrl()).isEqualTo(ResourceAttributes.SCHEMA_URL);
|
||||
assertThat(resource.getSchemaUrl()).isEqualTo(SchemaUrls.V1_25_0);
|
||||
assertThat(attributes)
|
||||
.containsOnly(
|
||||
entry(ResourceAttributes.CLOUD_PROVIDER, "aws"),
|
||||
entry(ResourceAttributes.CLOUD_PLATFORM, "aws_lambda"),
|
||||
entry(ResourceAttributes.FAAS_NAME, "my-function"));
|
||||
entry(CLOUD_PROVIDER, "aws"),
|
||||
entry(CLOUD_PLATFORM, "aws_lambda"),
|
||||
entry(FAAS_NAME, "my-function"));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -50,14 +55,14 @@ class LambdaResourceTest {
|
|||
Resource resource = LambdaResource.buildResource(envVars);
|
||||
Attributes attributes = resource.getAttributes();
|
||||
|
||||
assertThat(resource.getSchemaUrl()).isEqualTo(ResourceAttributes.SCHEMA_URL);
|
||||
assertThat(resource.getSchemaUrl()).isEqualTo(SchemaUrls.V1_25_0);
|
||||
assertThat(attributes)
|
||||
.containsOnly(
|
||||
entry(ResourceAttributes.CLOUD_PROVIDER, "aws"),
|
||||
entry(ResourceAttributes.CLOUD_PLATFORM, "aws_lambda"),
|
||||
entry(ResourceAttributes.CLOUD_REGION, "us-east-1"),
|
||||
entry(ResourceAttributes.FAAS_NAME, "my-function"),
|
||||
entry(ResourceAttributes.FAAS_VERSION, "1.2.3"));
|
||||
entry(CLOUD_PROVIDER, "aws"),
|
||||
entry(CLOUD_PLATFORM, "aws_lambda"),
|
||||
entry(CLOUD_REGION, "us-east-1"),
|
||||
entry(FAAS_NAME, "my-function"),
|
||||
entry(FAAS_VERSION, "1.2.3"));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -10,6 +10,7 @@ import static org.assertj.core.api.Assertions.assertThat;
|
|||
import com.google.common.collect.ImmutableMap;
|
||||
import com.linecorp.armeria.common.AggregatedHttpRequest;
|
||||
import com.linecorp.armeria.common.HttpResponse;
|
||||
import com.linecorp.armeria.common.TlsKeyPair;
|
||||
import com.linecorp.armeria.server.ServerBuilder;
|
||||
import com.linecorp.armeria.testing.junit5.server.SelfSignedCertificateExtension;
|
||||
import com.linecorp.armeria.testing.junit5.server.ServerExtension;
|
||||
|
@ -25,7 +26,8 @@ import org.junit.jupiter.api.io.TempDir;
|
|||
|
||||
class SimpleHttpClientTest {
|
||||
|
||||
@RegisterExtension public static MockWebServerExtension server = new MockWebServerExtension();
|
||||
@RegisterExtension
|
||||
public static final MockWebServerExtension server = new MockWebServerExtension();
|
||||
|
||||
@Test
|
||||
void testFetchString() {
|
||||
|
@ -58,15 +60,16 @@ class SimpleHttpClientTest {
|
|||
static class HttpsServerTest {
|
||||
@RegisterExtension
|
||||
@Order(1)
|
||||
public static SelfSignedCertificateExtension certificate = new SelfSignedCertificateExtension();
|
||||
public static final SelfSignedCertificateExtension certificate =
|
||||
new SelfSignedCertificateExtension();
|
||||
|
||||
@RegisterExtension
|
||||
@Order(2)
|
||||
public static ServerExtension server =
|
||||
public static final ServerExtension server =
|
||||
new ServerExtension() {
|
||||
@Override
|
||||
protected void configure(ServerBuilder sb) {
|
||||
sb.tls(certificate.certificateFile(), certificate.privateKeyFile());
|
||||
sb.tls(TlsKeyPair.of(certificate.privateKeyFile(), certificate.certificateFile()));
|
||||
|
||||
sb.service("/", (ctx, req) -> HttpResponse.of("Thanks for trusting me"));
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
"DockerId": "43481a6ce4842eec8fe72fc28500c6b52edcc0917f105b83379f88cac1ff3946",
|
||||
"Name": "nginx-curl",
|
||||
"DockerName": "ecs-nginx-5-nginx-curl-ccccb9f49db0dfe0d901",
|
||||
"Image": "nrdlngr/nginx-curl",
|
||||
"Image": "nrdlngr/nginx-curl:latest@sha256:8dc35e9386b5d280d285ae7a78d271a5d4a82106cb254fbed5fde4923faa8deb",
|
||||
"ImageID": "sha256:2e00ae64383cfc865ba0a2ba37f61b50a120d2d9378559dcd458dc0de47bc165",
|
||||
"Labels": {
|
||||
"com.amazonaws.ecs.cluster": "default",
|
||||
|
@ -28,4 +28,4 @@
|
|||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"Cluster": "default",
|
||||
"Cluster": "arn:aws:ecs:us-west-2:111122223333:cluster/default",
|
||||
"TaskARN": "arn:aws:ecs:us-west-2:111122223333:task/default/158d1c8083dd49d6b527399fd6414f5c",
|
||||
"Family": "curltest",
|
||||
"Revision": "26",
|
||||
|
|
|
@ -5,6 +5,7 @@ the [AWS X-Ray Trace Header propagation protocol](https://docs.aws.amazon.com/xr
|
|||
|
||||
## Component owners
|
||||
|
||||
- [William Armiros](https://github.com/willarmiros), AWS
|
||||
- [Lei Wang](https://github.com/wangzlei), AWS
|
||||
- [Prashant Srivastava](https://github.com/srprash), AWS
|
||||
|
||||
Learn more about component owners in [component_owners.yml](../.github/component_owners.yml).
|
||||
|
|
|
@ -10,4 +10,11 @@ otelJava.moduleName.set("io.opentelemetry.contrib.awsxray.propagator")
|
|||
dependencies {
|
||||
api("io.opentelemetry:opentelemetry-api")
|
||||
compileOnly("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure-spi")
|
||||
compileOnly("io.opentelemetry:opentelemetry-api-incubator")
|
||||
testImplementation("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure")
|
||||
testImplementation("io.opentelemetry:opentelemetry-sdk-trace")
|
||||
testImplementation("io.opentelemetry:opentelemetry-sdk-testing")
|
||||
|
||||
testImplementation("io.opentelemetry:opentelemetry-sdk-extension-incubator")
|
||||
testImplementation("uk.org.webcompere:system-stubs-jupiter:2.0.3")
|
||||
}
|
||||
|
|
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
* Copyright The OpenTelemetry Authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package io.opentelemetry.contrib.awsxray.propagator;
|
||||
|
||||
import io.opentelemetry.api.trace.Span;
|
||||
import io.opentelemetry.context.Context;
|
||||
import io.opentelemetry.context.propagation.TextMapGetter;
|
||||
import io.opentelemetry.context.propagation.TextMapPropagator;
|
||||
import io.opentelemetry.context.propagation.TextMapSetter;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import javax.annotation.Nullable;
|
||||
|
||||
/**
|
||||
* Implementation of the AWS X-Ray Trace Header propagation protocol but with special handling for
|
||||
* Lambda's {@code _X_AMZN_TRACE_ID} environment variable and {@code com.amazonaws.xray.traceHeader}
|
||||
* system property.
|
||||
*
|
||||
* <p>To register the X-Ray propagator together with default propagator when using the SDK:
|
||||
*
|
||||
* <pre>{@code
|
||||
* OpenTelemetrySdk.builder()
|
||||
* .setPropagators(
|
||||
* ContextPropagators.create(
|
||||
* TextMapPropagator.composite(
|
||||
* W3CTraceContextPropagator.getInstance(),
|
||||
* AwsXrayLambdaPropagator.getInstance())))
|
||||
* .build();
|
||||
* }</pre>
|
||||
*/
|
||||
public final class AwsXrayLambdaPropagator implements TextMapPropagator {
|
||||
|
||||
private static final String AWS_TRACE_HEADER_ENV_KEY = "_X_AMZN_TRACE_ID";
|
||||
private static final String AWS_TRACE_HEADER_PROP = "com.amazonaws.xray.traceHeader";
|
||||
private final AwsXrayPropagator xrayPropagator = AwsXrayPropagator.getInstance();
|
||||
private static final AwsXrayLambdaPropagator INSTANCE = new AwsXrayLambdaPropagator();
|
||||
|
||||
private AwsXrayLambdaPropagator() {
|
||||
// singleton
|
||||
}
|
||||
|
||||
public static AwsXrayLambdaPropagator getInstance() {
|
||||
return INSTANCE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> fields() {
|
||||
return xrayPropagator.fields();
|
||||
}
|
||||
|
||||
@Override
|
||||
public <C> void inject(Context context, @Nullable C carrier, TextMapSetter<C> setter) {
|
||||
xrayPropagator.inject(context, carrier, setter);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <C> Context extract(Context context, @Nullable C carrier, TextMapGetter<C> getter) {
|
||||
Context xrayContext = xrayPropagator.extract(context, carrier, getter);
|
||||
|
||||
if (Span.fromContext(context).getSpanContext().isValid()) {
|
||||
return xrayContext;
|
||||
}
|
||||
|
||||
String traceHeader = System.getProperty(AWS_TRACE_HEADER_PROP);
|
||||
if (isEmptyOrNull(traceHeader)) {
|
||||
traceHeader = System.getenv(AWS_TRACE_HEADER_ENV_KEY);
|
||||
}
|
||||
if (isEmptyOrNull(traceHeader)) {
|
||||
return xrayContext;
|
||||
}
|
||||
return xrayPropagator.extract(
|
||||
xrayContext,
|
||||
Collections.singletonMap(AwsXrayPropagator.TRACE_HEADER_KEY, traceHeader),
|
||||
MapGetter.INSTANCE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AwsXrayLambdaPropagator";
|
||||
}
|
||||
|
||||
private static boolean isEmptyOrNull(@Nullable String value) {
|
||||
return value == null || value.isEmpty();
|
||||
}
|
||||
|
||||
private enum MapGetter implements TextMapGetter<Map<String, String>> {
|
||||
INSTANCE;
|
||||
|
||||
@Override
|
||||
public Set<String> keys(Map<String, String> map) {
|
||||
return map.keySet();
|
||||
}
|
||||
|
||||
@Override
|
||||
@Nullable
|
||||
public String get(@Nullable Map<String, String> map, String s) {
|
||||
return map == null ? null : map.get(s);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -5,9 +5,10 @@
|
|||
|
||||
package io.opentelemetry.contrib.awsxray.propagator;
|
||||
|
||||
import static io.opentelemetry.api.internal.OtelEncodingUtils.isValidBase16String;
|
||||
|
||||
import io.opentelemetry.api.baggage.Baggage;
|
||||
import io.opentelemetry.api.baggage.BaggageBuilder;
|
||||
import io.opentelemetry.api.baggage.BaggageEntry;
|
||||
import io.opentelemetry.api.internal.StringUtils;
|
||||
import io.opentelemetry.api.trace.Span;
|
||||
import io.opentelemetry.api.trace.SpanContext;
|
||||
|
@ -21,7 +22,7 @@ import io.opentelemetry.context.propagation.TextMapPropagator;
|
|||
import io.opentelemetry.context.propagation.TextMapSetter;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.Set;
|
||||
import java.util.logging.Logger;
|
||||
import javax.annotation.Nullable;
|
||||
|
||||
|
@ -38,7 +39,7 @@ import javax.annotation.Nullable;
|
|||
* ContextPropagators.create(
|
||||
* TextMapPropagator.composite(
|
||||
* W3CTraceContextPropagator.getInstance(),
|
||||
* AWSXrayPropagator.getInstance())))
|
||||
* AwsXrayPropagator.getInstance())))
|
||||
* .build();
|
||||
* }</pre>
|
||||
*/
|
||||
|
@ -68,6 +69,17 @@ public final class AwsXrayPropagator implements TextMapPropagator {
|
|||
private static final char IS_SAMPLED = '1';
|
||||
private static final char NOT_SAMPLED = '0';
|
||||
|
||||
private static final String LINEAGE_KEY = "Lineage";
|
||||
private static final char LINEAGE_DELIMITER = ':';
|
||||
private static final int LINEAGE_MAX_LENGTH = 18;
|
||||
private static final int LINEAGE_MIN_LENGTH = 12;
|
||||
private static final int LINEAGE_HASH_LENGTH = 8;
|
||||
private static final int LINEAGE_MAX_COUNTER1 = 32767;
|
||||
private static final int LINEAGE_MAX_COUNTER2 = 255;
|
||||
private static final int LINEAGE_MIN_COUNTER = 0;
|
||||
private static final String INVALID_LINEAGE = "-1:11111111:0";
|
||||
private static final int NUM_OF_LINEAGE_DELIMITERS = 2;
|
||||
|
||||
private static final List<String> FIELDS = Collections.singletonList(TRACE_HEADER_KEY);
|
||||
|
||||
private static final AwsXrayPropagator INSTANCE = new AwsXrayPropagator();
|
||||
|
@ -127,34 +139,19 @@ public final class AwsXrayPropagator implements TextMapPropagator {
|
|||
.append(samplingFlag);
|
||||
|
||||
Baggage baggage = Baggage.fromContext(context);
|
||||
// Truncate baggage to 256 chars per X-Ray spec.
|
||||
baggage.forEach(
|
||||
new BiConsumer<String, BaggageEntry>() {
|
||||
String lineageHeader = baggage.getEntryValue(LINEAGE_KEY);
|
||||
|
||||
private int baggageWrittenBytes;
|
||||
if (lineageHeader != null) {
|
||||
traceHeader
|
||||
.append(TRACE_HEADER_DELIMITER)
|
||||
.append(LINEAGE_KEY)
|
||||
.append(KV_DELIMITER)
|
||||
.append(lineageHeader);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(String key, BaggageEntry entry) {
|
||||
if (key.equals(TRACE_ID_KEY)
|
||||
|| key.equals(PARENT_ID_KEY)
|
||||
|| key.equals(SAMPLED_FLAG_KEY)) {
|
||||
return;
|
||||
}
|
||||
// Size is key/value pair, excludes delimiter.
|
||||
int size = key.length() + entry.getValue().length() + 1;
|
||||
if (baggageWrittenBytes + size > 256) {
|
||||
return;
|
||||
}
|
||||
traceHeader
|
||||
.append(TRACE_HEADER_DELIMITER)
|
||||
.append(key)
|
||||
.append(KV_DELIMITER)
|
||||
.append(entry.getValue());
|
||||
baggageWrittenBytes += size;
|
||||
}
|
||||
});
|
||||
|
||||
setter.set(carrier, TRACE_HEADER_KEY, traceHeader.toString());
|
||||
// add 256 character truncation
|
||||
String truncatedTraceHeader = traceHeader.substring(0, Math.min(traceHeader.length(), 256));
|
||||
setter.set(carrier, TRACE_HEADER_KEY, truncatedTraceHeader);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -169,6 +166,11 @@ public final class AwsXrayPropagator implements TextMapPropagator {
|
|||
return getContextFromHeader(context, carrier, getter);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AwsXrayPropagator";
|
||||
}
|
||||
|
||||
private static <C> Context getContextFromHeader(
|
||||
Context context, @Nullable C carrier, TextMapGetter<C> getter) {
|
||||
String traceHeader = getter.get(carrier, TRACE_HEADER_KEY);
|
||||
|
@ -178,10 +180,20 @@ public final class AwsXrayPropagator implements TextMapPropagator {
|
|||
|
||||
String traceId = TraceId.getInvalid();
|
||||
String spanId = SpanId.getInvalid();
|
||||
String lineageHeader;
|
||||
Boolean isSampled = false;
|
||||
|
||||
BaggageBuilder baggage = null;
|
||||
int baggageReadBytes = 0;
|
||||
Baggage contextBaggage = Baggage.fromContext(context);
|
||||
BaggageBuilder baggageBuilder = Baggage.builder();
|
||||
Set<String> baggageMap = contextBaggage.asMap().keySet();
|
||||
|
||||
// Copying baggage over to new Baggage object to add Lineage key
|
||||
for (String baggageKey : baggageMap) {
|
||||
String baggageValue = contextBaggage.getEntryValue(baggageKey);
|
||||
if (baggageValue != null) {
|
||||
baggageBuilder.put(baggageKey, baggageValue);
|
||||
}
|
||||
}
|
||||
|
||||
int pos = 0;
|
||||
while (pos < traceHeader.length()) {
|
||||
|
@ -210,12 +222,13 @@ public final class AwsXrayPropagator implements TextMapPropagator {
|
|||
spanId = parseSpanId(value);
|
||||
} else if (trimmedPart.startsWith(SAMPLED_FLAG_KEY)) {
|
||||
isSampled = parseTraceFlag(value);
|
||||
} else if (baggageReadBytes + trimmedPart.length() <= 256) {
|
||||
if (baggage == null) {
|
||||
baggage = Baggage.builder();
|
||||
} else if (trimmedPart.startsWith(LINEAGE_KEY)) {
|
||||
lineageHeader = parseLineageHeader(value);
|
||||
if (isValidLineage(lineageHeader)) {
|
||||
baggageBuilder.put(LINEAGE_KEY, lineageHeader);
|
||||
} else {
|
||||
logger.fine("Invalid Lineage header: " + value);
|
||||
}
|
||||
baggage.put(trimmedPart.substring(0, equalsIndex), value);
|
||||
baggageReadBytes += trimmedPart.length();
|
||||
}
|
||||
}
|
||||
if (isSampled == null) {
|
||||
|
@ -238,12 +251,17 @@ public final class AwsXrayPropagator implements TextMapPropagator {
|
|||
spanId,
|
||||
isSampled ? TraceFlags.getSampled() : TraceFlags.getDefault(),
|
||||
TraceState.getDefault());
|
||||
|
||||
if (spanContext.isValid()) {
|
||||
context = context.with(Span.wrap(spanContext));
|
||||
}
|
||||
if (baggage != null) {
|
||||
context = context.with(baggage.build());
|
||||
|
||||
Baggage baggage = baggageBuilder.build();
|
||||
|
||||
if (!baggage.isEmpty()) {
|
||||
context = context.with(baggage);
|
||||
}
|
||||
|
||||
return context;
|
||||
}
|
||||
|
||||
|
@ -290,7 +308,8 @@ public final class AwsXrayPropagator implements TextMapPropagator {
|
|||
int secondDelimiter = xrayTraceId.indexOf(TRACE_ID_DELIMITER, firstDelimiter + 2);
|
||||
if (firstDelimiter != TRACE_ID_DELIMITER_INDEX_1
|
||||
|| secondDelimiter == -1
|
||||
|| secondDelimiter > TRACE_ID_DELIMITER_INDEX_2) {
|
||||
|| secondDelimiter > TRACE_ID_DELIMITER_INDEX_2
|
||||
|| xrayTraceId.length() < secondDelimiter + 25) {
|
||||
return TraceId.getInvalid();
|
||||
}
|
||||
|
||||
|
@ -310,6 +329,31 @@ public final class AwsXrayPropagator implements TextMapPropagator {
|
|||
return xrayParentId;
|
||||
}
|
||||
|
||||
private static String parseLineageHeader(String xrayLineageHeader) {
|
||||
long numOfDelimiters = xrayLineageHeader.chars().filter(ch -> ch == LINEAGE_DELIMITER).count();
|
||||
|
||||
if (xrayLineageHeader.length() < LINEAGE_MIN_LENGTH
|
||||
|| xrayLineageHeader.length() > LINEAGE_MAX_LENGTH
|
||||
|| numOfDelimiters != NUM_OF_LINEAGE_DELIMITERS) {
|
||||
return INVALID_LINEAGE;
|
||||
}
|
||||
|
||||
return xrayLineageHeader;
|
||||
}
|
||||
|
||||
private static boolean isValidLineage(String key) {
|
||||
String[] split = key.split(String.valueOf(LINEAGE_DELIMITER));
|
||||
String hash = split[1];
|
||||
int counter1 = parseIntOrReturnNegative(split[0]);
|
||||
int counter2 = parseIntOrReturnNegative(split[2]);
|
||||
|
||||
boolean isHashValid = hash.length() == LINEAGE_HASH_LENGTH && isValidBase16String(hash);
|
||||
boolean isValidCounter2 = counter2 <= LINEAGE_MAX_COUNTER2 && counter2 >= LINEAGE_MIN_COUNTER;
|
||||
boolean isValidCounter1 = counter1 <= LINEAGE_MAX_COUNTER1 && counter1 >= LINEAGE_MIN_COUNTER;
|
||||
|
||||
return isHashValid && isValidCounter2 && isValidCounter1;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
private static Boolean parseTraceFlag(String xraySampledFlag) {
|
||||
if (xraySampledFlag.length() != SAMPLED_FLAG_LENGTH) {
|
||||
|
@ -326,4 +370,12 @@ public final class AwsXrayPropagator implements TextMapPropagator {
|
|||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private static int parseIntOrReturnNegative(String num) {
|
||||
try {
|
||||
return Integer.parseInt(num);
|
||||
} catch (NumberFormatException e) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,9 +3,10 @@
|
|||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package io.opentelemetry.contrib.awsxray.propagator;
|
||||
package io.opentelemetry.contrib.awsxray.propagator.internal;
|
||||
|
||||
import io.opentelemetry.context.propagation.TextMapPropagator;
|
||||
import io.opentelemetry.contrib.awsxray.propagator.AwsXrayPropagator;
|
||||
import io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties;
|
||||
import io.opentelemetry.sdk.autoconfigure.spi.ConfigurablePropagatorProvider;
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Copyright The OpenTelemetry Authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package io.opentelemetry.contrib.awsxray.propagator.internal;
|
||||
|
||||
import io.opentelemetry.api.incubator.config.DeclarativeConfigProperties;
|
||||
import io.opentelemetry.context.propagation.TextMapPropagator;
|
||||
import io.opentelemetry.contrib.awsxray.propagator.AwsXrayPropagator;
|
||||
import io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider;
|
||||
|
||||
public class AwsXrayComponentProvider implements ComponentProvider<TextMapPropagator> {
|
||||
@Override
|
||||
public Class<TextMapPropagator> getType() {
|
||||
return TextMapPropagator.class;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "xray";
|
||||
}
|
||||
|
||||
@Override
|
||||
public TextMapPropagator create(DeclarativeConfigProperties config) {
|
||||
return AwsXrayPropagator.getInstance();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Copyright The OpenTelemetry Authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package io.opentelemetry.contrib.awsxray.propagator.internal;
|
||||
|
||||
import io.opentelemetry.api.incubator.config.DeclarativeConfigProperties;
|
||||
import io.opentelemetry.context.propagation.TextMapPropagator;
|
||||
import io.opentelemetry.contrib.awsxray.propagator.AwsXrayLambdaPropagator;
|
||||
import io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider;
|
||||
|
||||
public class AwsXrayLambdaComponentProvider implements ComponentProvider<TextMapPropagator> {
|
||||
@Override
|
||||
public Class<TextMapPropagator> getType() {
|
||||
return TextMapPropagator.class;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "xray-lambda";
|
||||
}
|
||||
|
||||
@Override
|
||||
public TextMapPropagator create(DeclarativeConfigProperties config) {
|
||||
return AwsXrayLambdaPropagator.getInstance();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
* Copyright The OpenTelemetry Authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package io.opentelemetry.contrib.awsxray.propagator.internal;
|
||||
|
||||
import io.opentelemetry.context.propagation.TextMapPropagator;
|
||||
import io.opentelemetry.contrib.awsxray.propagator.AwsXrayLambdaPropagator;
|
||||
import io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties;
|
||||
import io.opentelemetry.sdk.autoconfigure.spi.ConfigurablePropagatorProvider;
|
||||
|
||||
/**
|
||||
* A {@link ConfigurablePropagatorProvider} which allows enabling the {@link
|
||||
* AwsXrayLambdaPropagator} with the propagator name {@code xray-lambda}.
|
||||
*/
|
||||
public final class AwsXrayLambdaConfigurablePropagator implements ConfigurablePropagatorProvider {
|
||||
@Override
|
||||
public TextMapPropagator getPropagator(ConfigProperties config) {
|
||||
return AwsXrayLambdaPropagator.getInstance();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "xray-lambda";
|
||||
}
|
||||
}
|
|
@ -1 +1,2 @@
|
|||
io.opentelemetry.contrib.awsxray.propagator.AwsConfigurablePropagator
|
||||
io.opentelemetry.contrib.awsxray.propagator.internal.AwsConfigurablePropagator
|
||||
io.opentelemetry.contrib.awsxray.propagator.internal.AwsXrayLambdaConfigurablePropagator
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
io.opentelemetry.contrib.awsxray.propagator.internal.AwsXrayComponentProvider
|
||||
io.opentelemetry.contrib.awsxray.propagator.internal.AwsXrayLambdaComponentProvider
|
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* Copyright The OpenTelemetry Authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package io.opentelemetry.contrib.awsxray.propagator;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import io.opentelemetry.api.baggage.propagation.W3CBaggagePropagator;
|
||||
import io.opentelemetry.api.trace.SpanContext;
|
||||
import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator;
|
||||
import io.opentelemetry.context.Context;
|
||||
import io.opentelemetry.context.propagation.TextMapPropagator;
|
||||
import java.util.LinkedHashMap;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
public class AwsXrayCompositePropagatorTest extends AwsXrayPropagatorTest {
|
||||
|
||||
@Override
|
||||
TextMapPropagator propagator() {
|
||||
return TextMapPropagator.composite(
|
||||
W3CBaggagePropagator.getInstance(),
|
||||
AwsXrayPropagator.getInstance(),
|
||||
W3CTraceContextPropagator.getInstance());
|
||||
}
|
||||
|
||||
@Test
|
||||
void extract_traceContextOverridesXray() {
|
||||
LinkedHashMap<String, String> carrier = new LinkedHashMap<>();
|
||||
String w3cTraceContextTraceId = "4bf92f3577b34da6a3ce929d0e0e4736";
|
||||
String w3cTraceContextSpanId = "00f067aa0ba902b7";
|
||||
String traceParent =
|
||||
String.format("00-%s-%s-01", w3cTraceContextTraceId, w3cTraceContextSpanId);
|
||||
String traceState = "rojo=00f067aa0ba902b7";
|
||||
String xrayTrace = String.format("Root=1-%s;Parent=%s;Sampled=0", TRACE_ID, SPAN_ID);
|
||||
|
||||
carrier.put("traceparent", traceParent);
|
||||
carrier.put("tracestate", traceState);
|
||||
carrier.put("X-Amzn-Trace-Id", xrayTrace);
|
||||
|
||||
SpanContext actualContext = getSpanContext(subject.extract(Context.current(), carrier, GETTER));
|
||||
|
||||
assertThat(actualContext.getTraceId()).isEqualTo(w3cTraceContextTraceId);
|
||||
assertThat(actualContext.getSpanId()).isEqualTo(w3cTraceContextSpanId);
|
||||
assertThat(actualContext.isSampled()).isEqualTo(true);
|
||||
}
|
||||
|
||||
@Test
|
||||
void extract_xrayOverridesTraceContext() {
|
||||
TextMapPropagator propagator =
|
||||
TextMapPropagator.composite(
|
||||
W3CBaggagePropagator.getInstance(),
|
||||
W3CTraceContextPropagator.getInstance(),
|
||||
AwsXrayPropagator.getInstance());
|
||||
|
||||
LinkedHashMap<String, String> carrier = new LinkedHashMap<>();
|
||||
String w3cTraceContextTraceId = "4bf92f3577b34da6a3ce929d0e0e4736";
|
||||
String w3cTraceContextSpanId = "00f067aa0ba902b7";
|
||||
String traceParent =
|
||||
String.format("00-%s-%s-01", w3cTraceContextTraceId, w3cTraceContextSpanId);
|
||||
String traceState = "rojo=00f067aa0ba902b7";
|
||||
String xrayTrace =
|
||||
String.format(
|
||||
"Root=1-%s;Parent=%s;Sampled=0", "8a3c60f7-d188f8fa79d48a391a778fa6", SPAN_ID);
|
||||
|
||||
carrier.put("traceparent", traceParent);
|
||||
carrier.put("tracestate", traceState);
|
||||
carrier.put("X-Amzn-Trace-Id", xrayTrace);
|
||||
|
||||
SpanContext actualContext =
|
||||
getSpanContext(propagator.extract(Context.current(), carrier, GETTER));
|
||||
|
||||
assertThat(actualContext.getTraceId()).isEqualTo(TRACE_ID);
|
||||
assertThat(actualContext.getSpanId()).isEqualTo(SPAN_ID);
|
||||
assertThat(actualContext.isSampled()).isEqualTo(false);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,141 @@
|
|||
/*
|
||||
* Copyright The OpenTelemetry Authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package io.opentelemetry.contrib.awsxray.propagator;
|
||||
|
||||
import static io.opentelemetry.contrib.awsxray.propagator.AwsXrayPropagator.TRACE_HEADER_KEY;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import io.opentelemetry.api.trace.Span;
|
||||
import io.opentelemetry.api.trace.SpanContext;
|
||||
import io.opentelemetry.api.trace.TraceFlags;
|
||||
import io.opentelemetry.api.trace.TraceState;
|
||||
import io.opentelemetry.api.trace.Tracer;
|
||||
import io.opentelemetry.context.Context;
|
||||
import io.opentelemetry.context.propagation.TextMapPropagator;
|
||||
import io.opentelemetry.sdk.trace.ReadableSpan;
|
||||
import io.opentelemetry.sdk.trace.SdkTracerProvider;
|
||||
import io.opentelemetry.sdk.trace.data.LinkData;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import uk.org.webcompere.systemstubs.environment.EnvironmentVariables;
|
||||
import uk.org.webcompere.systemstubs.jupiter.SystemStub;
|
||||
import uk.org.webcompere.systemstubs.jupiter.SystemStubsExtension;
|
||||
import uk.org.webcompere.systemstubs.properties.SystemProperties;
|
||||
|
||||
@ExtendWith(SystemStubsExtension.class)
|
||||
class AwsXrayLambdaPropagatorTest extends AwsXrayPropagatorTest {
|
||||
|
||||
@SystemStub final EnvironmentVariables environmentVariables = new EnvironmentVariables();
|
||||
@SystemStub final SystemProperties systemProperties = new SystemProperties();
|
||||
|
||||
private Tracer tracer;
|
||||
|
||||
@Override
|
||||
TextMapPropagator propagator() {
|
||||
return AwsXrayLambdaPropagator.getInstance();
|
||||
}
|
||||
|
||||
@BeforeEach
|
||||
public void setup() {
|
||||
tracer = SdkTracerProvider.builder().build().get("awsxray");
|
||||
}
|
||||
|
||||
@Test
|
||||
void extract_fromEnvironmentVariable() {
|
||||
environmentVariables.set(
|
||||
"_X_AMZN_TRACE_ID",
|
||||
"Root=1-00000001-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=1;Foo=Bar");
|
||||
|
||||
assertThat(
|
||||
getSpanContext(propagator().extract(Context.current(), Collections.emptyMap(), GETTER)))
|
||||
.isEqualTo(
|
||||
SpanContext.createFromRemoteParent(
|
||||
"00000001d188f8fa79d48a391a778fa6",
|
||||
SPAN_ID,
|
||||
TraceFlags.getSampled(),
|
||||
TraceState.getDefault()));
|
||||
}
|
||||
|
||||
@Test
|
||||
void extract_fromSystemProperty() {
|
||||
systemProperties.set(
|
||||
"com.amazonaws.xray.traceHeader",
|
||||
"Root=1-00000001-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=1;Foo=Bar");
|
||||
|
||||
assertThat(
|
||||
getSpanContext(propagator().extract(Context.current(), Collections.emptyMap(), GETTER)))
|
||||
.isEqualTo(
|
||||
SpanContext.createFromRemoteParent(
|
||||
"00000001d188f8fa79d48a391a778fa6",
|
||||
SPAN_ID,
|
||||
TraceFlags.getSampled(),
|
||||
TraceState.getDefault()));
|
||||
}
|
||||
|
||||
@Test
|
||||
void extract_systemPropertyBeforeEnvironmentVariable() {
|
||||
environmentVariables.set(
|
||||
"_X_AMZN_TRACE_ID",
|
||||
"Root=1-00000001-240000000000000000000001;Parent=1600000000000001;Sampled=1;Foo=Bar");
|
||||
systemProperties.set(
|
||||
"com.amazonaws.xray.traceHeader",
|
||||
"Root=1-00000002-240000000000000000000002;Parent=1600000000000002;Sampled=1;Foo=Baz");
|
||||
|
||||
assertThat(
|
||||
getSpanContext(propagator().extract(Context.current(), Collections.emptyMap(), GETTER)))
|
||||
.isEqualTo(
|
||||
SpanContext.createFromRemoteParent(
|
||||
"00000002240000000000000000000002",
|
||||
"1600000000000002",
|
||||
TraceFlags.getSampled(),
|
||||
TraceState.getDefault()));
|
||||
}
|
||||
|
||||
@Test
|
||||
void addLink_SystemProperty() {
|
||||
Map<String, String> carrier =
|
||||
Collections.singletonMap(
|
||||
TRACE_HEADER_KEY,
|
||||
"Root=1-00000001-240000000000000000000001;Parent=1600000000000001;Sampled=1");
|
||||
environmentVariables.set(
|
||||
"_X_AMZN_TRACE_ID",
|
||||
"Root=1-00000002-240000000000000000000002;Parent=1600000000000002;Sampled=1;Foo=Bar");
|
||||
systemProperties.set(
|
||||
"com.amazonaws.xray.traceHeader",
|
||||
"Root=1-00000003-240000000000000000000003;Parent=1600000000000003;Sampled=1;Foo=Baz");
|
||||
|
||||
Context extract = propagator().extract(Context.current(), carrier, GETTER);
|
||||
ReadableSpan span =
|
||||
(ReadableSpan)
|
||||
tracer
|
||||
.spanBuilder("test")
|
||||
.setParent(extract)
|
||||
.addLink(
|
||||
Span.fromContext(propagator().extract(extract, carrier, GETTER))
|
||||
.getSpanContext())
|
||||
.startSpan();
|
||||
assertThat(span.getParentSpanContext())
|
||||
.isEqualTo(
|
||||
SpanContext.createFromRemoteParent(
|
||||
"00000003240000000000000000000003",
|
||||
"1600000000000003",
|
||||
TraceFlags.getSampled(),
|
||||
TraceState.getDefault()));
|
||||
|
||||
assertThat(span.toSpanData().getLinks())
|
||||
.isEqualTo(
|
||||
Collections.singletonList(
|
||||
LinkData.create(
|
||||
SpanContext.createFromRemoteParent(
|
||||
"00000001240000000000000000000001",
|
||||
"1600000000000001",
|
||||
TraceFlags.getSampled(),
|
||||
TraceState.getDefault()))));
|
||||
}
|
||||
}
|
|
@ -15,23 +15,26 @@ import io.opentelemetry.api.trace.TraceFlags;
|
|||
import io.opentelemetry.api.trace.TraceState;
|
||||
import io.opentelemetry.context.Context;
|
||||
import io.opentelemetry.context.propagation.TextMapGetter;
|
||||
import io.opentelemetry.context.propagation.TextMapPropagator;
|
||||
import io.opentelemetry.context.propagation.TextMapSetter;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import javax.annotation.Nullable;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.Arguments;
|
||||
import org.junit.jupiter.params.provider.MethodSource;
|
||||
|
||||
class AwsXrayPropagatorTest {
|
||||
|
||||
private static final String TRACE_ID = "8a3c60f7d188f8fa79d48a391a778fa6";
|
||||
private static final String SPAN_ID = "53995c3f42cd8ad8";
|
||||
static final String TRACE_ID = "8a3c60f7d188f8fa79d48a391a778fa6";
|
||||
static final String SPAN_ID = "53995c3f42cd8ad8";
|
||||
|
||||
private static final TextMapSetter<Map<String, String>> setter = Map::put;
|
||||
private static final TextMapGetter<Map<String, String>> getter =
|
||||
static final TextMapSetter<Map<String, String>> SETTER = Map::put;
|
||||
static final TextMapGetter<Map<String, String>> GETTER =
|
||||
new TextMapGetter<Map<String, String>>() {
|
||||
@Override
|
||||
public Set<String> keys(Map<String, String> carrier) {
|
||||
|
@ -44,17 +47,27 @@ class AwsXrayPropagatorTest {
|
|||
return carrier.get(key);
|
||||
}
|
||||
};
|
||||
private final AwsXrayPropagator xrayPropagator = AwsXrayPropagator.getInstance();
|
||||
protected static final AwsXrayPropagator X_RAY = AwsXrayPropagator.getInstance();
|
||||
protected final TextMapPropagator subject = propagator();
|
||||
|
||||
TextMapPropagator propagator() {
|
||||
return AwsXrayPropagator.getInstance();
|
||||
}
|
||||
|
||||
@Test
|
||||
void fields_valid() {
|
||||
assertThat(subject.fields()).contains("X-Amzn-Trace-Id");
|
||||
}
|
||||
|
||||
@Test
|
||||
void inject_SampledContext() {
|
||||
Map<String, String> carrier = new LinkedHashMap<>();
|
||||
xrayPropagator.inject(
|
||||
subject.inject(
|
||||
withSpanContext(
|
||||
SpanContext.create(TRACE_ID, SPAN_ID, TraceFlags.getSampled(), TraceState.getDefault()),
|
||||
Context.current()),
|
||||
carrier,
|
||||
setter);
|
||||
SETTER);
|
||||
|
||||
assertThat(carrier)
|
||||
.containsEntry(
|
||||
|
@ -65,12 +78,12 @@ class AwsXrayPropagatorTest {
|
|||
@Test
|
||||
void inject_NotSampledContext() {
|
||||
Map<String, String> carrier = new LinkedHashMap<>();
|
||||
xrayPropagator.inject(
|
||||
subject.inject(
|
||||
withSpanContext(
|
||||
SpanContext.create(TRACE_ID, SPAN_ID, TraceFlags.getDefault(), TraceState.getDefault()),
|
||||
Context.current()),
|
||||
carrier,
|
||||
setter);
|
||||
SETTER);
|
||||
|
||||
assertThat(carrier)
|
||||
.containsEntry(
|
||||
|
@ -81,64 +94,44 @@ class AwsXrayPropagatorTest {
|
|||
@Test
|
||||
void inject_WithBaggage() {
|
||||
Map<String, String> carrier = new LinkedHashMap<>();
|
||||
xrayPropagator.inject(
|
||||
subject.inject(
|
||||
withSpanContext(
|
||||
SpanContext.create(
|
||||
TRACE_ID, SPAN_ID, TraceFlags.getDefault(), TraceState.getDefault()),
|
||||
Context.current())
|
||||
.with(
|
||||
Baggage.builder()
|
||||
.put("cat", "meow")
|
||||
.put("dog", "bark")
|
||||
.put("Root", "ignored")
|
||||
.put("Parent", "ignored")
|
||||
.put("Sampled", "ignored")
|
||||
.build()),
|
||||
.with(Baggage.builder().put("cat", "ignored").put("dog", "ignored").build()),
|
||||
carrier,
|
||||
setter);
|
||||
SETTER);
|
||||
|
||||
// all non-lineage baggage is dropped from trace header
|
||||
assertThat(carrier)
|
||||
.containsEntry(
|
||||
TRACE_HEADER_KEY,
|
||||
"Root=1-8a3c60f7-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=0;"
|
||||
+ "cat=meow;dog=bark");
|
||||
"Root=1-8a3c60f7-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=0");
|
||||
}
|
||||
|
||||
@Test
|
||||
void inject_WithBaggage_LimitTruncates() {
|
||||
void inject_WithLineage() {
|
||||
Map<String, String> carrier = new LinkedHashMap<>();
|
||||
// Limit is 256 characters for all baggage. We add a 254-character key/value pair and a
|
||||
// 3 character key value pair.
|
||||
String key1 = Stream.generate(() -> "a").limit(252).collect(Collectors.joining());
|
||||
String value1 = "a"; // 252 + 1 (=) + 1 = 254
|
||||
|
||||
String key2 = "b";
|
||||
String value2 = "b"; // 1 + 1 (=) + 1 = 3
|
||||
|
||||
Baggage baggage = Baggage.builder().put(key1, value1).put(key2, value2).build();
|
||||
|
||||
xrayPropagator.inject(
|
||||
subject.inject(
|
||||
withSpanContext(
|
||||
SpanContext.create(
|
||||
TRACE_ID, SPAN_ID, TraceFlags.getDefault(), TraceState.getDefault()),
|
||||
Context.current())
|
||||
.with(baggage),
|
||||
.with(Baggage.builder().put("Lineage", "32767:e65a2c4d:255").build()),
|
||||
carrier,
|
||||
setter);
|
||||
SETTER);
|
||||
|
||||
assertThat(carrier)
|
||||
.containsEntry(
|
||||
TRACE_HEADER_KEY,
|
||||
"Root=1-8a3c60f7-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=0;"
|
||||
+ key1
|
||||
+ '='
|
||||
+ value1);
|
||||
"Root=1-8a3c60f7-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=0;Lineage=32767:e65a2c4d:255");
|
||||
}
|
||||
|
||||
@Test
|
||||
void inject_WithTraceState() {
|
||||
Map<String, String> carrier = new LinkedHashMap<>();
|
||||
xrayPropagator.inject(
|
||||
subject.inject(
|
||||
withSpanContext(
|
||||
SpanContext.create(
|
||||
TRACE_ID,
|
||||
|
@ -147,7 +140,7 @@ class AwsXrayPropagatorTest {
|
|||
TraceState.builder().put("foo", "bar").build()),
|
||||
Context.current()),
|
||||
carrier,
|
||||
setter);
|
||||
SETTER);
|
||||
|
||||
// TODO: assert trace state when the propagator supports it, for general key/value pairs we are
|
||||
// mapping with baggage.
|
||||
|
@ -160,7 +153,7 @@ class AwsXrayPropagatorTest {
|
|||
@Test
|
||||
void inject_nullContext() {
|
||||
Map<String, String> carrier = new LinkedHashMap<>();
|
||||
xrayPropagator.inject(null, carrier, setter);
|
||||
subject.inject(null, carrier, SETTER);
|
||||
assertThat(carrier).isEmpty();
|
||||
}
|
||||
|
||||
|
@ -171,16 +164,14 @@ class AwsXrayPropagatorTest {
|
|||
withSpanContext(
|
||||
SpanContext.create(TRACE_ID, SPAN_ID, TraceFlags.getDefault(), TraceState.getDefault()),
|
||||
Context.current());
|
||||
xrayPropagator.inject(context, carrier, null);
|
||||
subject.inject(context, carrier, null);
|
||||
assertThat(carrier).isEmpty();
|
||||
}
|
||||
|
||||
@Test
|
||||
void extract_Nothing() {
|
||||
// Context remains untouched.
|
||||
assertThat(
|
||||
xrayPropagator.extract(
|
||||
Context.current(), Collections.<String, String>emptyMap(), getter))
|
||||
assertThat(subject.extract(Context.current(), Collections.<String, String>emptyMap(), GETTER))
|
||||
.isSameAs(Context.current());
|
||||
}
|
||||
|
||||
|
@ -191,7 +182,7 @@ class AwsXrayPropagatorTest {
|
|||
TRACE_HEADER_KEY,
|
||||
"Root=1-8a3c60f7-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=1");
|
||||
|
||||
assertThat(getSpanContext(xrayPropagator.extract(Context.current(), carrier, getter)))
|
||||
assertThat(getSpanContext(subject.extract(Context.current(), carrier, GETTER)))
|
||||
.isEqualTo(
|
||||
SpanContext.createFromRemoteParent(
|
||||
TRACE_ID, SPAN_ID, TraceFlags.getSampled(), TraceState.getDefault()));
|
||||
|
@ -204,7 +195,7 @@ class AwsXrayPropagatorTest {
|
|||
TRACE_HEADER_KEY,
|
||||
"Root=1-8a3c60f7-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=0");
|
||||
|
||||
assertThat(getSpanContext(xrayPropagator.extract(Context.current(), carrier, getter)))
|
||||
assertThat(getSpanContext(subject.extract(Context.current(), carrier, GETTER)))
|
||||
.isEqualTo(
|
||||
SpanContext.createFromRemoteParent(
|
||||
TRACE_ID, SPAN_ID, TraceFlags.getDefault(), TraceState.getDefault()));
|
||||
|
@ -217,56 +208,90 @@ class AwsXrayPropagatorTest {
|
|||
TRACE_HEADER_KEY,
|
||||
"Parent=53995c3f42cd8ad8;Sampled=1;Root=1-8a3c60f7-d188f8fa79d48a391a778fa6");
|
||||
|
||||
assertThat(getSpanContext(xrayPropagator.extract(Context.current(), carrier, getter)))
|
||||
assertThat(getSpanContext(subject.extract(Context.current(), carrier, GETTER)))
|
||||
.isEqualTo(
|
||||
SpanContext.createFromRemoteParent(
|
||||
TRACE_ID, SPAN_ID, TraceFlags.getSampled(), TraceState.getDefault()));
|
||||
}
|
||||
|
||||
@Test
|
||||
void extract_AdditionalFields() {
|
||||
void extract_WithLineage() {
|
||||
Map<String, String> carrier = new LinkedHashMap<>();
|
||||
carrier.put(
|
||||
TRACE_HEADER_KEY,
|
||||
"Root=1-8a3c60f7-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=1;Foo=Bar");
|
||||
"Root=1-8a3c60f7-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=1;Lineage=32767:e65a2c4d:255");
|
||||
|
||||
Context context = xrayPropagator.extract(Context.current(), carrier, getter);
|
||||
Context context = subject.extract(Context.current(), carrier, GETTER);
|
||||
assertThat(getSpanContext(context))
|
||||
.isEqualTo(
|
||||
SpanContext.createFromRemoteParent(
|
||||
TRACE_ID, SPAN_ID, TraceFlags.getSampled(), TraceState.getDefault()));
|
||||
assertThat(Baggage.fromContext(context).getEntryValue("Foo")).isEqualTo("Bar");
|
||||
assertThat(Baggage.fromContext(context).getEntryValue("Lineage"))
|
||||
.isEqualTo("32767:e65a2c4d:255");
|
||||
}
|
||||
|
||||
@Test
|
||||
void extract_Baggage_LimitTruncates() {
|
||||
// Limit is 256 characters for all baggage. We add a 254-character key/value pair and a
|
||||
// 3 character key value pair.
|
||||
String key1 = Stream.generate(() -> "a").limit(252).collect(Collectors.joining());
|
||||
String value1 = "a"; // 252 + 1 (=) + 1 = 254
|
||||
|
||||
String key2 = "b";
|
||||
String value2 = "b"; // 1 + 1 (=) + 1 = 3
|
||||
|
||||
void extract_AddedLineagePreservesExistingBaggage() {
|
||||
Baggage expectedBaggage =
|
||||
Baggage.builder()
|
||||
.put("cat", "meow")
|
||||
.put("dog", "bark")
|
||||
.put("Lineage", "32767:e65a2c4d:255")
|
||||
.build();
|
||||
Map<String, String> carrier = new LinkedHashMap<>();
|
||||
carrier.put(
|
||||
TRACE_HEADER_KEY,
|
||||
"Root=1-8a3c60f7-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=1;"
|
||||
+ key1
|
||||
+ '='
|
||||
+ value1
|
||||
+ ';'
|
||||
+ key2
|
||||
+ '='
|
||||
+ value2);
|
||||
"Root=1-8a3c60f7-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=1;Lineage=32767:e65a2c4d:255");
|
||||
|
||||
Context context = xrayPropagator.extract(Context.current(), carrier, getter);
|
||||
Context context =
|
||||
subject.extract(
|
||||
Context.current().with(Baggage.builder().put("cat", "meow").put("dog", "bark").build()),
|
||||
carrier,
|
||||
GETTER);
|
||||
assertThat(getSpanContext(context))
|
||||
.isEqualTo(
|
||||
SpanContext.createFromRemoteParent(
|
||||
TRACE_ID, SPAN_ID, TraceFlags.getSampled(), TraceState.getDefault()));
|
||||
assertThat(Baggage.fromContext(context).getEntryValue(key1)).isEqualTo(value1);
|
||||
assertThat(Baggage.fromContext(context).getEntryValue(key2)).isNull();
|
||||
|
||||
assertThat(Baggage.fromContext(context).asMap()).isEqualTo(expectedBaggage.asMap());
|
||||
}
|
||||
|
||||
@Test
|
||||
void extract_inject_ValidTraceHeader() {
|
||||
Map<String, String> carrier1 = new LinkedHashMap<>();
|
||||
carrier1.put(
|
||||
TRACE_HEADER_KEY,
|
||||
"Root=1-8a3c60f7-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=1;Lineage=32767:e65a2c4d:255");
|
||||
|
||||
Context context = subject.extract(Context.current(), carrier1, GETTER);
|
||||
|
||||
// inject extracted trace context into new trace header
|
||||
Map<String, String> carrier2 = new LinkedHashMap<>();
|
||||
subject.inject(context, carrier2, SETTER);
|
||||
|
||||
assertThat(carrier2)
|
||||
.containsEntry(
|
||||
TRACE_HEADER_KEY,
|
||||
"Root=1-8a3c60f7-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=1;Lineage=32767:e65a2c4d:255");
|
||||
}
|
||||
|
||||
@Test
|
||||
void extract_inject_InvalidLineage() {
|
||||
Map<String, String> carrier1 = new LinkedHashMap<>();
|
||||
carrier1.put(
|
||||
TRACE_HEADER_KEY,
|
||||
"Root=1-8a3c60f7-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=1;Lineage=1:badc0de:13");
|
||||
|
||||
Context context = subject.extract(Context.current(), carrier1, GETTER);
|
||||
|
||||
// inject extracted trace context into new trace header
|
||||
Map<String, String> carrier2 = new LinkedHashMap<>();
|
||||
subject.inject(context, carrier2, SETTER);
|
||||
|
||||
assertThat(carrier2)
|
||||
.containsEntry(
|
||||
TRACE_HEADER_KEY,
|
||||
"Root=1-8a3c60f7-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=1");
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -288,7 +313,7 @@ class AwsXrayPropagatorTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
void extract_InvalidTraceId_Size() {
|
||||
void extract_InvalidTraceId_Size_TooBig() {
|
||||
Map<String, String> invalidHeaders = new LinkedHashMap<>();
|
||||
invalidHeaders.put(
|
||||
TRACE_HEADER_KEY,
|
||||
|
@ -297,6 +322,16 @@ class AwsXrayPropagatorTest {
|
|||
verifyInvalidBehavior(invalidHeaders);
|
||||
}
|
||||
|
||||
@Test
|
||||
void extract_InvalidTraceId_Size_TooShort() {
|
||||
Map<String, String> invalidHeaders = new LinkedHashMap<>();
|
||||
invalidHeaders.put(
|
||||
TRACE_HEADER_KEY,
|
||||
"Root=1-64fbd5a9-2202432c9dfed25ae1e6996;Parent=53995c3f42cd8ad8;Sampled=0");
|
||||
|
||||
verifyInvalidBehavior(invalidHeaders);
|
||||
}
|
||||
|
||||
@Test
|
||||
void extract_InvalidSpanId() {
|
||||
Map<String, String> invalidHeaders = new LinkedHashMap<>();
|
||||
|
@ -357,15 +392,43 @@ class AwsXrayPropagatorTest {
|
|||
|
||||
private void verifyInvalidBehavior(Map<String, String> invalidHeaders) {
|
||||
Context input = Context.current();
|
||||
Context result = xrayPropagator.extract(input, invalidHeaders, getter);
|
||||
Context result = subject.extract(input, invalidHeaders, GETTER);
|
||||
assertThat(result).isSameAs(input);
|
||||
assertThat(getSpanContext(result)).isSameAs(SpanContext.getInvalid());
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource("providesBadLineages")
|
||||
void extract_invalidLineage(String lineage) {
|
||||
Map<String, String> carrier = new LinkedHashMap<>();
|
||||
carrier.put(
|
||||
TRACE_HEADER_KEY,
|
||||
String.format(
|
||||
"Root=2-1a2a3a4a-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=1;Lineage=%s",
|
||||
lineage));
|
||||
Context context = subject.extract(Context.current(), carrier, GETTER);
|
||||
assertThat(Baggage.fromContext(context).getEntryValue("Lineage")).isNull();
|
||||
}
|
||||
|
||||
static Stream<Arguments> providesBadLineages() {
|
||||
return Stream.of(
|
||||
Arguments.of("1::"),
|
||||
Arguments.of("1"),
|
||||
Arguments.of(""),
|
||||
Arguments.of(":"),
|
||||
Arguments.of("::"),
|
||||
Arguments.of("1:badc0de:13"),
|
||||
Arguments.of(":fbadc0de:13"),
|
||||
Arguments.of("1:fbadc0de:"),
|
||||
Arguments.of("1::1"),
|
||||
Arguments.of("65535:fbadc0de:255"),
|
||||
Arguments.of("-213:e65a2c4d:255"),
|
||||
Arguments.of("213:e65a2c4d:-22"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void extract_nullContext() {
|
||||
assertThat(xrayPropagator.extract(null, Collections.emptyMap(), getter))
|
||||
.isSameAs(Context.root());
|
||||
assertThat(subject.extract(null, Collections.emptyMap(), GETTER)).isSameAs(Context.root());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -374,7 +437,7 @@ class AwsXrayPropagatorTest {
|
|||
withSpanContext(
|
||||
SpanContext.create(TRACE_ID, SPAN_ID, TraceFlags.getDefault(), TraceState.getDefault()),
|
||||
Context.current());
|
||||
assertThat(xrayPropagator.extract(context, Collections.emptyMap(), null)).isSameAs(context);
|
||||
assertThat(subject.extract(context, Collections.emptyMap(), null)).isSameAs(context);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -384,7 +447,7 @@ class AwsXrayPropagatorTest {
|
|||
TRACE_HEADER_KEY,
|
||||
"Root=1-0-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=1;Foo=Bar");
|
||||
|
||||
assertThat(getSpanContext(xrayPropagator.extract(Context.current(), carrier, getter)))
|
||||
assertThat(getSpanContext(subject.extract(Context.current(), carrier, GETTER)))
|
||||
.isEqualTo(
|
||||
SpanContext.createFromRemoteParent(
|
||||
"00000000d188f8fa79d48a391a778fa6",
|
||||
|
@ -400,7 +463,7 @@ class AwsXrayPropagatorTest {
|
|||
TRACE_HEADER_KEY,
|
||||
"Root=1-1a-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=1;Foo=Bar");
|
||||
|
||||
assertThat(getSpanContext(xrayPropagator.extract(Context.current(), carrier, getter)))
|
||||
assertThat(getSpanContext(subject.extract(Context.current(), carrier, GETTER)))
|
||||
.isEqualTo(
|
||||
SpanContext.createFromRemoteParent(
|
||||
"0000001ad188f8fa79d48a391a778fa6",
|
||||
|
@ -416,7 +479,7 @@ class AwsXrayPropagatorTest {
|
|||
TRACE_HEADER_KEY,
|
||||
"Root=1-00000000-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=1;Foo=Bar");
|
||||
|
||||
assertThat(getSpanContext(xrayPropagator.extract(Context.current(), carrier, getter)))
|
||||
assertThat(getSpanContext(subject.extract(Context.current(), carrier, GETTER)))
|
||||
.isEqualTo(
|
||||
SpanContext.createFromRemoteParent(
|
||||
"00000000d188f8fa79d48a391a778fa6",
|
||||
|
@ -432,7 +495,7 @@ class AwsXrayPropagatorTest {
|
|||
TRACE_HEADER_KEY,
|
||||
"Root=1-8a3c60f711-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=0");
|
||||
|
||||
assertThat(getSpanContext(xrayPropagator.extract(Context.current(), invalidHeaders, getter)))
|
||||
assertThat(getSpanContext(subject.extract(Context.current(), invalidHeaders, GETTER)))
|
||||
.isSameAs(SpanContext.getInvalid());
|
||||
}
|
||||
|
||||
|
@ -442,7 +505,7 @@ class AwsXrayPropagatorTest {
|
|||
invalidHeaders.put(
|
||||
TRACE_HEADER_KEY, "Root=1--d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=0");
|
||||
|
||||
assertThat(getSpanContext(xrayPropagator.extract(Context.current(), invalidHeaders, getter)))
|
||||
assertThat(getSpanContext(subject.extract(Context.current(), invalidHeaders, GETTER)))
|
||||
.isSameAs(SpanContext.getInvalid());
|
||||
}
|
||||
|
||||
|
@ -452,7 +515,7 @@ class AwsXrayPropagatorTest {
|
|||
invalidHeaders.put(
|
||||
TRACE_HEADER_KEY, "Root=1-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=0");
|
||||
|
||||
assertThat(getSpanContext(xrayPropagator.extract(Context.current(), invalidHeaders, getter)))
|
||||
assertThat(getSpanContext(subject.extract(Context.current(), invalidHeaders, GETTER)))
|
||||
.isSameAs(SpanContext.getInvalid());
|
||||
}
|
||||
|
||||
|
@ -463,7 +526,7 @@ class AwsXrayPropagatorTest {
|
|||
TRACE_HEADER_KEY,
|
||||
"Root=2-1a2a3a4a-d188f8fa79d48a391a778fa6;Parent=53995c3f42cd8ad8;Sampled=1;Foo=Bar");
|
||||
|
||||
assertThat(getSpanContext(xrayPropagator.extract(Context.current(), carrier, getter)))
|
||||
assertThat(getSpanContext(subject.extract(Context.current(), carrier, GETTER)))
|
||||
.isSameAs(SpanContext.getInvalid());
|
||||
}
|
||||
|
||||
|
@ -471,7 +534,7 @@ class AwsXrayPropagatorTest {
|
|||
return context.with(Span.wrap(spanContext));
|
||||
}
|
||||
|
||||
private static SpanContext getSpanContext(Context context) {
|
||||
SpanContext getSpanContext(Context context) {
|
||||
return Span.fromContext(context).getSpanContext();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Copyright The OpenTelemetry Authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package io.opentelemetry.contrib.awsxray.propagator.internal;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import io.opentelemetry.context.propagation.TextMapPropagator;
|
||||
import io.opentelemetry.contrib.awsxray.propagator.AwsXrayLambdaPropagator;
|
||||
import io.opentelemetry.contrib.awsxray.propagator.AwsXrayPropagator;
|
||||
import io.opentelemetry.sdk.OpenTelemetrySdk;
|
||||
import io.opentelemetry.sdk.extension.incubator.fileconfig.DeclarativeConfiguration;
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
class AwsComponentProviderTest {
|
||||
|
||||
@Test
|
||||
void endToEnd() {
|
||||
String yaml =
|
||||
"file_format: 0.4\n"
|
||||
+ "propagator:\n"
|
||||
+ " composite:\n"
|
||||
+ " - xray:\n"
|
||||
+ " - xray-lambda:\n";
|
||||
|
||||
OpenTelemetrySdk openTelemetrySdk =
|
||||
DeclarativeConfiguration.parseAndCreate(
|
||||
new ByteArrayInputStream(yaml.getBytes(StandardCharsets.UTF_8)));
|
||||
TextMapPropagator expectedPropagator =
|
||||
TextMapPropagator.composite(
|
||||
AwsXrayPropagator.getInstance(), AwsXrayLambdaPropagator.getInstance());
|
||||
assertThat(openTelemetrySdk.getPropagators().getTextMapPropagator().toString())
|
||||
.isEqualTo(expectedPropagator.toString());
|
||||
}
|
||||
}
|
|
@ -4,6 +4,7 @@ This module contains a custom `IdGenerator` and `Sampler` for use with AWS X-Ray
|
|||
|
||||
## Component owners
|
||||
|
||||
- [William Armiros](https://github.com/willarmiros), AWS
|
||||
- [Lei Wang](https://github.com/wangzlei), AWS
|
||||
- [Prashant Srivastava](https://github.com/srprash), AWS
|
||||
|
||||
Learn more about component owners in [component_owners.yml](../.github/component_owners.yml).
|
||||
|
|
|
@ -13,9 +13,11 @@ dependencies {
|
|||
compileOnly("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure")
|
||||
|
||||
implementation("com.squareup.okhttp3:okhttp")
|
||||
implementation("io.opentelemetry:opentelemetry-semconv")
|
||||
implementation("io.opentelemetry.semconv:opentelemetry-semconv")
|
||||
testImplementation("io.opentelemetry.semconv:opentelemetry-semconv-incubating")
|
||||
|
||||
annotationProcessor("com.google.auto.service:auto-service")
|
||||
testImplementation("com.google.auto.service:auto-service")
|
||||
compileOnly("com.google.auto.service:auto-service-annotations")
|
||||
|
||||
annotationProcessor("com.google.auto.value:auto-value")
|
||||
|
|
|
@ -5,11 +5,11 @@ extensions:
|
|||
health_check:
|
||||
|
||||
exporters:
|
||||
logging:
|
||||
debug:
|
||||
|
||||
service:
|
||||
extensions: [health_check]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [awsxray]
|
||||
exporters: [logging]
|
||||
exporters: [debug]
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
* Copyright The OpenTelemetry Authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package io.opentelemetry.contrib.awsxray;
|
||||
|
||||
import io.opentelemetry.api.common.Attributes;
|
||||
import io.opentelemetry.api.trace.SpanKind;
|
||||
import io.opentelemetry.api.trace.TraceState;
|
||||
import io.opentelemetry.context.Context;
|
||||
import io.opentelemetry.sdk.trace.data.LinkData;
|
||||
import io.opentelemetry.sdk.trace.samplers.Sampler;
|
||||
import io.opentelemetry.sdk.trace.samplers.SamplingDecision;
|
||||
import io.opentelemetry.sdk.trace.samplers.SamplingResult;
|
||||
import java.util.List;
|
||||
import javax.annotation.concurrent.Immutable;
|
||||
|
||||
/**
|
||||
* This sampler will return the sampling result of the provided {@link #rootSampler}, unless the
|
||||
* sampling result contains the sampling decision {@link SamplingDecision#DROP}, in which case, a
|
||||
* new sampling result will be returned that is functionally equivalent to the original, except that
|
||||
* it contains the sampling decision {@link SamplingDecision#RECORD_ONLY}. This ensures that all
|
||||
* spans are recorded, with no change to sampling.
|
||||
*
|
||||
* <p>The intended use case of this sampler is to provide a means of sending all spans to a
|
||||
* processor without having an impact on the sampling rate. This may be desirable if a user wishes
|
||||
* to count or otherwise measure all spans produced in a service, without incurring the cost of 100%
|
||||
* sampling.
|
||||
*/
|
||||
@Immutable
|
||||
public final class AlwaysRecordSampler implements Sampler {
|
||||
|
||||
private final Sampler rootSampler;
|
||||
|
||||
public static AlwaysRecordSampler create(Sampler rootSampler) {
|
||||
return new AlwaysRecordSampler(rootSampler);
|
||||
}
|
||||
|
||||
private AlwaysRecordSampler(Sampler rootSampler) {
|
||||
this.rootSampler = rootSampler;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SamplingResult shouldSample(
|
||||
Context parentContext,
|
||||
String traceId,
|
||||
String name,
|
||||
SpanKind spanKind,
|
||||
Attributes attributes,
|
||||
List<LinkData> parentLinks) {
|
||||
SamplingResult result =
|
||||
rootSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks);
|
||||
if (result.getDecision() == SamplingDecision.DROP) {
|
||||
result = wrapResultWithRecordOnlyResult(result);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDescription() {
|
||||
return "AlwaysRecordSampler{" + rootSampler.getDescription() + "}";
|
||||
}
|
||||
|
||||
private static SamplingResult wrapResultWithRecordOnlyResult(SamplingResult result) {
|
||||
return new SamplingResult() {
|
||||
@Override
|
||||
public SamplingDecision getDecision() {
|
||||
return SamplingDecision.RECORD_ONLY;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Attributes getAttributes() {
|
||||
return result.getAttributes();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TraceState getUpdatedTraceState(TraceState parentTraceState) {
|
||||
return result.getUpdatedTraceState(parentTraceState);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
|
@ -0,0 +1,93 @@
|
|||
/*
|
||||
* Copyright The OpenTelemetry Authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package io.opentelemetry.contrib.awsxray;
|
||||
|
||||
import io.opentelemetry.api.common.AttributeKey;
|
||||
import io.opentelemetry.api.trace.Span;
|
||||
import io.opentelemetry.api.trace.SpanContext;
|
||||
import io.opentelemetry.api.trace.SpanKind;
|
||||
import io.opentelemetry.context.Context;
|
||||
import io.opentelemetry.sdk.trace.ReadWriteSpan;
|
||||
import io.opentelemetry.sdk.trace.ReadableSpan;
|
||||
import io.opentelemetry.sdk.trace.SpanProcessor;
|
||||
import java.util.List;
|
||||
import javax.annotation.concurrent.Immutable;
|
||||
|
||||
/**
|
||||
* AttributePropagatingSpanProcessor handles the propagation of attributes from parent spans to
|
||||
* child spans, specified in {@link #attributesKeysToPropagate}. AttributePropagatingSpanProcessor
|
||||
* also propagates the parent span name to child spans, as a new attribute specified by {@link
|
||||
* #spanNamePropagationKey}. Span name propagation only starts from local root server/consumer
|
||||
* spans, but from there will be propagated to any descendant spans.
|
||||
*/
|
||||
@Immutable
|
||||
public final class AttributePropagatingSpanProcessor implements SpanProcessor {
|
||||
|
||||
private final AttributeKey<String> spanNamePropagationKey;
|
||||
private final List<AttributeKey<String>> attributesKeysToPropagate;
|
||||
|
||||
public static AttributePropagatingSpanProcessor create(
|
||||
AttributeKey<String> spanNamePropagationKey,
|
||||
List<AttributeKey<String>> attributesKeysToPropagate) {
|
||||
return new AttributePropagatingSpanProcessor(spanNamePropagationKey, attributesKeysToPropagate);
|
||||
}
|
||||
|
||||
private AttributePropagatingSpanProcessor(
|
||||
AttributeKey<String> spanNamePropagationKey,
|
||||
List<AttributeKey<String>> attributesKeysToPropagate) {
|
||||
this.spanNamePropagationKey = spanNamePropagationKey;
|
||||
this.attributesKeysToPropagate = attributesKeysToPropagate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onStart(Context parentContext, ReadWriteSpan span) {
|
||||
Span parentSpan = Span.fromContextOrNull(parentContext);
|
||||
if (!(parentSpan instanceof ReadableSpan)) {
|
||||
return;
|
||||
}
|
||||
ReadableSpan parentReadableSpan = (ReadableSpan) parentSpan;
|
||||
|
||||
String spanNameToPropagate;
|
||||
if (isLocalRoot(parentReadableSpan.getParentSpanContext())
|
||||
&& isServerOrConsumer(parentReadableSpan)) {
|
||||
spanNameToPropagate = parentReadableSpan.getName();
|
||||
} else {
|
||||
spanNameToPropagate = parentReadableSpan.getAttribute(spanNamePropagationKey);
|
||||
}
|
||||
|
||||
if (spanNameToPropagate != null) {
|
||||
span.setAttribute(spanNamePropagationKey, spanNameToPropagate);
|
||||
}
|
||||
|
||||
for (AttributeKey<String> keyToPropagate : attributesKeysToPropagate) {
|
||||
String valueToPropagate = parentReadableSpan.getAttribute(keyToPropagate);
|
||||
if (valueToPropagate != null) {
|
||||
span.setAttribute(keyToPropagate, valueToPropagate);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean isLocalRoot(SpanContext parentSpanContext) {
|
||||
return !parentSpanContext.isValid() || parentSpanContext.isRemote();
|
||||
}
|
||||
|
||||
private static boolean isServerOrConsumer(ReadableSpan span) {
|
||||
return span.getKind() == SpanKind.SERVER || span.getKind() == SpanKind.CONSUMER;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isStartRequired() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onEnd(ReadableSpan span) {}
|
||||
|
||||
@Override
|
||||
public boolean isEndRequired() {
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Copyright The OpenTelemetry Authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package io.opentelemetry.contrib.awsxray;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
|
||||
import com.google.errorprone.annotations.CanIgnoreReturnValue;
|
||||
import io.opentelemetry.api.common.AttributeKey;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* AttributePropagatingSpanProcessorBuilder is used to construct a {@link
|
||||
* AttributePropagatingSpanProcessor}. If {@link #setSpanNamePropagationKey} or {@link
|
||||
* #setAttributesKeysToPropagate} are not invoked, the builder defaults to using specific {@link
|
||||
* AwsAttributeKeys} as propagation targets.
|
||||
*/
|
||||
public class AttributePropagatingSpanProcessorBuilder {
|
||||
|
||||
private AttributeKey<String> spanNamePropagationKey = AwsAttributeKeys.AWS_LOCAL_OPERATION;
|
||||
private List<AttributeKey<String>> attributesKeysToPropagate =
|
||||
Arrays.asList(AwsAttributeKeys.AWS_REMOTE_SERVICE, AwsAttributeKeys.AWS_REMOTE_OPERATION);
|
||||
|
||||
public static AttributePropagatingSpanProcessorBuilder create() {
|
||||
return new AttributePropagatingSpanProcessorBuilder();
|
||||
}
|
||||
|
||||
private AttributePropagatingSpanProcessorBuilder() {}
|
||||
|
||||
@CanIgnoreReturnValue
|
||||
public AttributePropagatingSpanProcessorBuilder setSpanNamePropagationKey(
|
||||
AttributeKey<String> spanNamePropagationKey) {
|
||||
requireNonNull(spanNamePropagationKey, "spanNamePropagationKey");
|
||||
this.spanNamePropagationKey = spanNamePropagationKey;
|
||||
return this;
|
||||
}
|
||||
|
||||
@CanIgnoreReturnValue
|
||||
public AttributePropagatingSpanProcessorBuilder setAttributesKeysToPropagate(
|
||||
List<AttributeKey<String>> attributesKeysToPropagate) {
|
||||
requireNonNull(attributesKeysToPropagate, "attributesKeysToPropagate");
|
||||
this.attributesKeysToPropagate = Collections.unmodifiableList(attributesKeysToPropagate);
|
||||
return this;
|
||||
}
|
||||
|
||||
public AttributePropagatingSpanProcessor build() {
|
||||
return AttributePropagatingSpanProcessor.create(
|
||||
spanNamePropagationKey, attributesKeysToPropagate);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Copyright The OpenTelemetry Authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package io.opentelemetry.contrib.awsxray;
|
||||
|
||||
import io.opentelemetry.api.common.AttributeKey;
|
||||
|
||||
/** Utility class holding attribute keys with special meaning to AWS components */
|
||||
final class AwsAttributeKeys {
|
||||
|
||||
private AwsAttributeKeys() {}
|
||||
|
||||
static final AttributeKey<String> AWS_SPAN_KIND = AttributeKey.stringKey("aws.span.kind");
|
||||
|
||||
static final AttributeKey<String> AWS_LOCAL_SERVICE = AttributeKey.stringKey("aws.local.service");
|
||||
|
||||
static final AttributeKey<String> AWS_LOCAL_OPERATION =
|
||||
AttributeKey.stringKey("aws.local.operation");
|
||||
|
||||
static final AttributeKey<String> AWS_REMOTE_SERVICE =
|
||||
AttributeKey.stringKey("aws.remote.service");
|
||||
|
||||
static final AttributeKey<String> AWS_REMOTE_OPERATION =
|
||||
AttributeKey.stringKey("aws.remote.operation");
|
||||
|
||||
static final AttributeKey<String> AWS_REMOTE_TARGET = AttributeKey.stringKey("aws.remote.target");
|
||||
|
||||
// use the same AWS Resource attribute name defined by OTel java auto-instr for aws_sdk_v_1_1
|
||||
// TODO: all AWS specific attributes should be defined in semconv package and reused cross all
|
||||
// otel packages. Related sim -
|
||||
// https://github.com/open-telemetry/opentelemetry-java-instrumentation/issues/8710
|
||||
|
||||
static final AttributeKey<String> AWS_BUCKET_NAME = AttributeKey.stringKey("aws.bucket.name");
|
||||
static final AttributeKey<String> AWS_QUEUE_NAME = AttributeKey.stringKey("aws.queue.name");
|
||||
static final AttributeKey<String> AWS_STREAM_NAME = AttributeKey.stringKey("aws.stream.name");
|
||||
static final AttributeKey<String> AWS_TABLE_NAME = AttributeKey.stringKey("aws.table.name");
|
||||
}
|
|
@ -0,0 +1,392 @@
|
|||
/*
|
||||
* Copyright The OpenTelemetry Authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package io.opentelemetry.contrib.awsxray;
|
||||
|
||||
import static io.opentelemetry.contrib.awsxray.AwsAttributeKeys.AWS_BUCKET_NAME;
|
||||
import static io.opentelemetry.contrib.awsxray.AwsAttributeKeys.AWS_LOCAL_OPERATION;
|
||||
import static io.opentelemetry.contrib.awsxray.AwsAttributeKeys.AWS_LOCAL_SERVICE;
|
||||
import static io.opentelemetry.contrib.awsxray.AwsAttributeKeys.AWS_QUEUE_NAME;
|
||||
import static io.opentelemetry.contrib.awsxray.AwsAttributeKeys.AWS_REMOTE_OPERATION;
|
||||
import static io.opentelemetry.contrib.awsxray.AwsAttributeKeys.AWS_REMOTE_SERVICE;
|
||||
import static io.opentelemetry.contrib.awsxray.AwsAttributeKeys.AWS_REMOTE_TARGET;
|
||||
import static io.opentelemetry.contrib.awsxray.AwsAttributeKeys.AWS_SPAN_KIND;
|
||||
import static io.opentelemetry.contrib.awsxray.AwsAttributeKeys.AWS_STREAM_NAME;
|
||||
import static io.opentelemetry.contrib.awsxray.AwsAttributeKeys.AWS_TABLE_NAME;
|
||||
import static io.opentelemetry.semconv.ServiceAttributes.SERVICE_NAME;
|
||||
|
||||
import io.opentelemetry.api.common.AttributeKey;
|
||||
import io.opentelemetry.api.common.Attributes;
|
||||
import io.opentelemetry.api.common.AttributesBuilder;
|
||||
import io.opentelemetry.api.trace.SpanKind;
|
||||
import io.opentelemetry.sdk.resources.Resource;
|
||||
import io.opentelemetry.sdk.trace.data.SpanData;
|
||||
import io.opentelemetry.semconv.ServiceAttributes;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.Optional;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
|
||||
/**
|
||||
* AwsMetricAttributeGenerator generates very specific metric attributes based on low-cardinality
|
||||
* span and resource attributes. If such attributes are not present, we fallback to default values.
|
||||
*
|
||||
* <p>The goal of these particular metric attributes is to get metrics for incoming and outgoing
|
||||
* traffic for a service. Namely, {@link SpanKind#SERVER} and {@link SpanKind#CONSUMER} spans
|
||||
* represent "incoming" traffic, {@link SpanKind#CLIENT} and {@link SpanKind#PRODUCER} spans
|
||||
* represent "outgoing" traffic, and {@link SpanKind#INTERNAL} spans are ignored.
|
||||
*/
|
||||
final class AwsMetricAttributeGenerator implements MetricAttributeGenerator {
|
||||
|
||||
private static final Logger logger =
|
||||
Logger.getLogger(AwsMetricAttributeGenerator.class.getName());
|
||||
|
||||
// Special SERVICE attribute value if GRAPHQL_OPERATION_TYPE attribute key is present.
|
||||
private static final String GRAPHQL = "graphql";
|
||||
|
||||
// Default attribute values if no valid span attribute value is identified
|
||||
private static final String UNKNOWN_SERVICE = "UnknownService";
|
||||
private static final String UNKNOWN_OPERATION = "UnknownOperation";
|
||||
private static final String UNKNOWN_REMOTE_SERVICE = "UnknownRemoteService";
|
||||
private static final String UNKNOWN_REMOTE_OPERATION = "UnknownRemoteOperation";
|
||||
|
||||
// copied from DbIncubatingAttributes
|
||||
private static final AttributeKey<String> DB_OPERATION = AttributeKey.stringKey("db.operation");
|
||||
private static final AttributeKey<String> DB_SYSTEM = AttributeKey.stringKey("db.system");
|
||||
// copied from FaasIncubatingAttributes
|
||||
private static final AttributeKey<String> FAAS_INVOKED_NAME =
|
||||
AttributeKey.stringKey("faas.invoked_name");
|
||||
private static final AttributeKey<String> FAAS_TRIGGER = AttributeKey.stringKey("faas.trigger");
|
||||
// copied from GraphqlIncubatingAttributes
|
||||
private static final AttributeKey<String> GRAPHQL_OPERATION_TYPE =
|
||||
AttributeKey.stringKey("graphql.operation.type");
|
||||
// copied from HttpIncubatingAttributes
|
||||
private static final AttributeKey<String> HTTP_METHOD = AttributeKey.stringKey("http.method");
|
||||
private static final AttributeKey<String> HTTP_TARGET = AttributeKey.stringKey("http.target");
|
||||
private static final AttributeKey<String> HTTP_URL = AttributeKey.stringKey("http.url");
|
||||
// copied from MessagingIncubatingAttributes
|
||||
private static final AttributeKey<String> MESSAGING_OPERATION =
|
||||
AttributeKey.stringKey("messaging.operation");
|
||||
private static final AttributeKey<String> MESSAGING_SYSTEM =
|
||||
AttributeKey.stringKey("messaging.system");
|
||||
// copied from NetIncubatingAttributes
|
||||
private static final AttributeKey<String> NET_PEER_NAME = AttributeKey.stringKey("net.peer.name");
|
||||
private static final AttributeKey<Long> NET_PEER_PORT = AttributeKey.longKey("net.peer.port");
|
||||
private static final AttributeKey<String> NET_SOCK_PEER_ADDR =
|
||||
AttributeKey.stringKey("net.sock.peer.addr");
|
||||
private static final AttributeKey<Long> NET_SOCK_PEER_PORT =
|
||||
AttributeKey.longKey("net.sock.peer.port");
|
||||
// copied from PeerIncubatingAttributes
|
||||
private static final AttributeKey<String> PEER_SERVICE = AttributeKey.stringKey("peer.service");
|
||||
// copied from RpcIncubatingAttributes
|
||||
private static final AttributeKey<String> RPC_METHOD = AttributeKey.stringKey("rpc.method");
|
||||
private static final AttributeKey<String> RPC_SERVICE = AttributeKey.stringKey("rpc.service");
|
||||
|
||||
@Override
|
||||
public Attributes generateMetricAttributesFromSpan(SpanData span, Resource resource) {
|
||||
AttributesBuilder builder = Attributes.builder();
|
||||
switch (span.getKind()) {
|
||||
case CONSUMER:
|
||||
case SERVER:
|
||||
setService(resource, span, builder);
|
||||
setIngressOperation(span, builder);
|
||||
setSpanKind(span, builder);
|
||||
break;
|
||||
case PRODUCER:
|
||||
case CLIENT:
|
||||
setService(resource, span, builder);
|
||||
setEgressOperation(span, builder);
|
||||
setRemoteServiceAndOperation(span, builder);
|
||||
setRemoteTarget(span, builder);
|
||||
setSpanKind(span, builder);
|
||||
break;
|
||||
default:
|
||||
// Add no attributes, signalling no metrics should be emitted.
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
private static void setRemoteTarget(SpanData span, AttributesBuilder builder) {
|
||||
Optional<String> remoteTarget = getRemoteTarget(span);
|
||||
remoteTarget.ifPresent(s -> builder.put(AWS_REMOTE_TARGET, s));
|
||||
}
|
||||
|
||||
/**
|
||||
* RemoteTarget attribute {@link AwsAttributeKeys#AWS_REMOTE_TARGET} is used to store the resource
|
||||
* name of the remote invokes, such as S3 bucket name, mysql table name, etc. TODO: currently only
|
||||
* support AWS resource name, will be extended to support the general remote targets, such as
|
||||
* ActiveMQ name, etc.
|
||||
*/
|
||||
private static Optional<String> getRemoteTarget(SpanData span) {
|
||||
if (isKeyPresent(span, AWS_BUCKET_NAME)) {
|
||||
return Optional.ofNullable(span.getAttributes().get(AWS_BUCKET_NAME));
|
||||
} else if (isKeyPresent(span, AWS_QUEUE_NAME)) {
|
||||
return Optional.ofNullable(span.getAttributes().get(AWS_QUEUE_NAME));
|
||||
} else if (isKeyPresent(span, AWS_STREAM_NAME)) {
|
||||
return Optional.ofNullable(span.getAttributes().get(AWS_STREAM_NAME));
|
||||
} else if (isKeyPresent(span, AWS_TABLE_NAME)) {
|
||||
return Optional.ofNullable(span.getAttributes().get(AWS_TABLE_NAME));
|
||||
}
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
/** Service is always derived from {@link ServiceAttributes#SERVICE_NAME} */
|
||||
private static void setService(Resource resource, SpanData span, AttributesBuilder builder) {
|
||||
String service = resource.getAttribute(SERVICE_NAME);
|
||||
if (service == null) {
|
||||
logUnknownAttribute(AWS_LOCAL_SERVICE, span);
|
||||
service = UNKNOWN_SERVICE;
|
||||
}
|
||||
builder.put(AWS_LOCAL_SERVICE, service);
|
||||
}
|
||||
|
||||
/**
|
||||
* Ingress operation (i.e. operation for Server and Consumer spans) will be generated from
|
||||
* "http.method + http.target/with the first API path parameter" if the default span name equals
|
||||
* null, UnknownOperation or http.method value.
|
||||
*/
|
||||
private static void setIngressOperation(SpanData span, AttributesBuilder builder) {
|
||||
String operation;
|
||||
if (!isValidOperation(span)) {
|
||||
operation = generateIngressOperation(span);
|
||||
} else {
|
||||
operation = span.getName();
|
||||
}
|
||||
if (operation.equals(UNKNOWN_OPERATION)) {
|
||||
logUnknownAttribute(AWS_LOCAL_OPERATION, span);
|
||||
}
|
||||
builder.put(AWS_LOCAL_OPERATION, operation);
|
||||
}
|
||||
|
||||
/**
|
||||
* When Span name is null, UnknownOperation or HttpMethod value, it will be treated as invalid
|
||||
* local operation value that needs to be further processed
|
||||
*/
|
||||
private static boolean isValidOperation(SpanData span) {
|
||||
String operation = span.getName();
|
||||
if (operation == null || operation.equals(UNKNOWN_OPERATION)) {
|
||||
return false;
|
||||
}
|
||||
if (isKeyPresent(span, HTTP_METHOD)) {
|
||||
String httpMethod = span.getAttributes().get(HTTP_METHOD);
|
||||
return !operation.equals(httpMethod);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Egress operation (i.e. operation for Client and Producer spans) is always derived from a
|
||||
* special span attribute, {@link AwsAttributeKeys#AWS_LOCAL_OPERATION}. This attribute is
|
||||
* generated with a separate SpanProcessor, {@link AttributePropagatingSpanProcessor}
|
||||
*/
|
||||
private static void setEgressOperation(SpanData span, AttributesBuilder builder) {
|
||||
String operation = span.getAttributes().get(AWS_LOCAL_OPERATION);
|
||||
if (operation == null) {
|
||||
logUnknownAttribute(AWS_LOCAL_OPERATION, span);
|
||||
operation = UNKNOWN_OPERATION;
|
||||
}
|
||||
builder.put(AWS_LOCAL_OPERATION, operation);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remote attributes (only for Client and Producer spans) are generated based on low-cardinality
|
||||
* span attributes, in priority order.
|
||||
*
|
||||
* <p>The first priority is the AWS Remote attributes, which are generated from manually
|
||||
* instrumented span attributes, and are clear indications of customer intent. If AWS Remote
|
||||
* attributes are not present, the next highest priority span attribute is Peer Service, which is
|
||||
* also a reliable indicator of customer intent. If this is set, it will override
|
||||
* AWS_REMOTE_SERVICE identified from any other span attribute, other than AWS Remote attributes.
|
||||
*
|
||||
* <p>After this, we look for the following low-cardinality span attributes that can be used to
|
||||
* determine the remote metric attributes:
|
||||
*
|
||||
* <ul>
|
||||
* <li>RPC
|
||||
* <li>DB
|
||||
* <li>FAAS
|
||||
* <li>Messaging
|
||||
* <li>GraphQL - Special case, if {@link #GRAPHQL_OPERATION_TYPE} is present, we use it for
|
||||
* RemoteOperation and set RemoteService to {@link #GRAPHQL}.
|
||||
* </ul>
|
||||
*
|
||||
* <p>In each case, these span attributes were selected from the OpenTelemetry trace semantic
|
||||
* convention specifications as they adhere to the three following criteria:
|
||||
*
|
||||
* <ul>
|
||||
* <li>Attributes are meaningfully indicative of remote service/operation names.
|
||||
* <li>Attributes are defined in the specification to be low cardinality, usually with a low-
|
||||
* cardinality list of values.
|
||||
* <li>Attributes are confirmed to have low-cardinality values, based on code analysis.
|
||||
* </ul>
|
||||
*
|
||||
* if the selected attributes are still producing the UnknownRemoteService or
|
||||
* UnknownRemoteOperation, `net.peer.name`, `net.peer.port`, `net.peer.sock.addr` and
|
||||
* `net.peer.sock.port` will be used to derive the RemoteService. And `http.method` and `http.url`
|
||||
* will be used to derive the RemoteOperation.
|
||||
*/
|
||||
private static void setRemoteServiceAndOperation(SpanData span, AttributesBuilder builder) {
|
||||
String remoteService = UNKNOWN_REMOTE_SERVICE;
|
||||
String remoteOperation = UNKNOWN_REMOTE_OPERATION;
|
||||
if (isKeyPresent(span, AWS_REMOTE_SERVICE) || isKeyPresent(span, AWS_REMOTE_OPERATION)) {
|
||||
remoteService = getRemoteService(span, AWS_REMOTE_SERVICE);
|
||||
remoteOperation = getRemoteOperation(span, AWS_REMOTE_OPERATION);
|
||||
} else if (isKeyPresent(span, RPC_SERVICE) || isKeyPresent(span, RPC_METHOD)) {
|
||||
remoteService = getRemoteService(span, RPC_SERVICE);
|
||||
remoteOperation = getRemoteOperation(span, RPC_METHOD);
|
||||
} else if (isKeyPresent(span, DB_SYSTEM) || isKeyPresent(span, DB_OPERATION)) {
|
||||
remoteService = getRemoteService(span, DB_SYSTEM);
|
||||
remoteOperation = getRemoteOperation(span, DB_OPERATION);
|
||||
} else if (isKeyPresent(span, FAAS_INVOKED_NAME) || isKeyPresent(span, FAAS_TRIGGER)) {
|
||||
remoteService = getRemoteService(span, FAAS_INVOKED_NAME);
|
||||
remoteOperation = getRemoteOperation(span, FAAS_TRIGGER);
|
||||
} else if (isKeyPresent(span, MESSAGING_SYSTEM) || isKeyPresent(span, MESSAGING_OPERATION)) {
|
||||
remoteService = getRemoteService(span, MESSAGING_SYSTEM);
|
||||
remoteOperation = getRemoteOperation(span, MESSAGING_OPERATION);
|
||||
} else if (isKeyPresent(span, GRAPHQL_OPERATION_TYPE)) {
|
||||
remoteService = GRAPHQL;
|
||||
remoteOperation = getRemoteOperation(span, GRAPHQL_OPERATION_TYPE);
|
||||
}
|
||||
|
||||
// Peer service takes priority as RemoteService over everything but AWS Remote.
|
||||
if (isKeyPresent(span, PEER_SERVICE) && !isKeyPresent(span, AWS_REMOTE_SERVICE)) {
|
||||
remoteService = getRemoteService(span, PEER_SERVICE);
|
||||
}
|
||||
|
||||
// try to derive RemoteService and RemoteOperation from the other related attributes
|
||||
if (remoteService.equals(UNKNOWN_REMOTE_SERVICE)) {
|
||||
remoteService = generateRemoteService(span);
|
||||
}
|
||||
if (remoteOperation.equals(UNKNOWN_REMOTE_OPERATION)) {
|
||||
remoteOperation = generateRemoteOperation(span);
|
||||
}
|
||||
|
||||
builder.put(AWS_REMOTE_SERVICE, remoteService);
|
||||
builder.put(AWS_REMOTE_OPERATION, remoteOperation);
|
||||
}
|
||||
|
||||
/**
|
||||
* When span name is not meaningful(null, unknown or http_method value) as operation name for http
|
||||
* use cases. Will try to extract the operation name from http target string
|
||||
*/
|
||||
private static String generateIngressOperation(SpanData span) {
|
||||
String operation = UNKNOWN_OPERATION;
|
||||
if (isKeyPresent(span, HTTP_TARGET)) {
|
||||
String httpTarget = span.getAttributes().get(HTTP_TARGET);
|
||||
// get the first part from API path string as operation value
|
||||
// the more levels/parts we get from API path the higher chance for getting high cardinality
|
||||
// data
|
||||
if (httpTarget != null) {
|
||||
operation = extractApiPathValue(httpTarget);
|
||||
if (isKeyPresent(span, HTTP_METHOD)) {
|
||||
String httpMethod = span.getAttributes().get(HTTP_METHOD);
|
||||
if (httpMethod != null) {
|
||||
operation = httpMethod + " " + operation;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return operation;
|
||||
}
|
||||
|
||||
/**
|
||||
* When the remote call operation is undetermined for http use cases, will try to extract the
|
||||
* remote operation name from http url string
|
||||
*/
|
||||
private static String generateRemoteOperation(SpanData span) {
|
||||
String remoteOperation = UNKNOWN_REMOTE_OPERATION;
|
||||
if (isKeyPresent(span, HTTP_URL)) {
|
||||
String httpUrl = span.getAttributes().get(HTTP_URL);
|
||||
try {
|
||||
URL url;
|
||||
if (httpUrl != null) {
|
||||
url = new URL(httpUrl);
|
||||
remoteOperation = extractApiPathValue(url.getPath());
|
||||
}
|
||||
} catch (MalformedURLException e) {
|
||||
logger.log(Level.FINEST, "invalid http.url attribute: ", httpUrl);
|
||||
}
|
||||
}
|
||||
if (isKeyPresent(span, HTTP_METHOD)) {
|
||||
String httpMethod = span.getAttributes().get(HTTP_METHOD);
|
||||
remoteOperation = httpMethod + " " + remoteOperation;
|
||||
}
|
||||
if (remoteOperation.equals(UNKNOWN_REMOTE_OPERATION)) {
|
||||
logUnknownAttribute(AWS_REMOTE_OPERATION, span);
|
||||
}
|
||||
return remoteOperation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract the first part from API http target if it exists
|
||||
*
|
||||
* @param httpTarget http request target string value. Eg, /payment/1234
|
||||
* @return the first part from the http target. Eg, /payment
|
||||
*/
|
||||
private static String extractApiPathValue(String httpTarget) {
|
||||
if (httpTarget == null || httpTarget.isEmpty()) {
|
||||
return "/";
|
||||
}
|
||||
String[] paths = httpTarget.split("/");
|
||||
if (paths.length > 1) {
|
||||
return "/" + paths[1];
|
||||
}
|
||||
return "/";
|
||||
}
|
||||
|
||||
private static String generateRemoteService(SpanData span) {
|
||||
String remoteService = UNKNOWN_REMOTE_SERVICE;
|
||||
if (isKeyPresent(span, NET_PEER_NAME)) {
|
||||
remoteService = getRemoteService(span, NET_PEER_NAME);
|
||||
if (isKeyPresent(span, NET_PEER_PORT)) {
|
||||
Long port = span.getAttributes().get(NET_PEER_PORT);
|
||||
remoteService += ":" + port;
|
||||
}
|
||||
} else if (isKeyPresent(span, NET_SOCK_PEER_ADDR)) {
|
||||
remoteService = getRemoteService(span, NET_SOCK_PEER_ADDR);
|
||||
if (isKeyPresent(span, NET_SOCK_PEER_PORT)) {
|
||||
Long port = span.getAttributes().get(NET_SOCK_PEER_PORT);
|
||||
remoteService += ":" + port;
|
||||
}
|
||||
} else {
|
||||
logUnknownAttribute(AWS_REMOTE_SERVICE, span);
|
||||
}
|
||||
return remoteService;
|
||||
}
|
||||
|
||||
/** Span kind is needed for differentiating metrics in the EMF exporter */
|
||||
private static void setSpanKind(SpanData span, AttributesBuilder builder) {
|
||||
String spanKind = span.getKind().name();
|
||||
builder.put(AWS_SPAN_KIND, spanKind);
|
||||
}
|
||||
|
||||
private static boolean isKeyPresent(SpanData span, AttributeKey<?> key) {
|
||||
return span.getAttributes().get(key) != null;
|
||||
}
|
||||
|
||||
private static String getRemoteService(SpanData span, AttributeKey<String> remoteServiceKey) {
|
||||
String remoteService = span.getAttributes().get(remoteServiceKey);
|
||||
if (remoteService == null) {
|
||||
remoteService = UNKNOWN_REMOTE_SERVICE;
|
||||
}
|
||||
return remoteService;
|
||||
}
|
||||
|
||||
private static String getRemoteOperation(SpanData span, AttributeKey<String> remoteOperationKey) {
|
||||
String remoteOperation = span.getAttributes().get(remoteOperationKey);
|
||||
if (remoteOperation == null) {
|
||||
remoteOperation = UNKNOWN_REMOTE_OPERATION;
|
||||
}
|
||||
return remoteOperation;
|
||||
}
|
||||
|
||||
private static void logUnknownAttribute(AttributeKey<String> attributeKey, SpanData span) {
|
||||
String[] params = {
|
||||
attributeKey.getKey(), span.getKind().name(), span.getSpanContext().getSpanId()
|
||||
};
|
||||
logger.log(Level.FINEST, "No valid {0} value found for {1} span {2}", params);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,122 @@
|
|||
/*
|
||||
* Copyright The OpenTelemetry Authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package io.opentelemetry.contrib.awsxray;
|
||||
|
||||
import io.opentelemetry.api.common.AttributeKey;
|
||||
import io.opentelemetry.api.common.Attributes;
|
||||
import io.opentelemetry.sdk.common.CompletableResultCode;
|
||||
import io.opentelemetry.sdk.resources.Resource;
|
||||
import io.opentelemetry.sdk.trace.data.DelegatingSpanData;
|
||||
import io.opentelemetry.sdk.trace.data.SpanData;
|
||||
import io.opentelemetry.sdk.trace.export.SpanExporter;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map.Entry;
|
||||
import javax.annotation.concurrent.Immutable;
|
||||
|
||||
/**
|
||||
* This exporter will update a span with metric attributes before exporting. It depends on a {@link
|
||||
* SpanExporter} being provided on instantiation, which the AwsSpanMetricsExporter will delegate
|
||||
* export to. Also, a {@link MetricAttributeGenerator} must be provided, which will provide a means
|
||||
* to determine attributes which should be applied to the span. Finally, a {@link Resource} must be
|
||||
* provided, which is used to generate metric attributes.
|
||||
*
|
||||
* <p>This exporter should be coupled with the {@link AwsSpanMetricsProcessor} using the same {@link
|
||||
* MetricAttributeGenerator}. This will result in metrics and spans being produced with common
|
||||
* attributes.
|
||||
*/
|
||||
@Immutable
|
||||
public class AwsMetricAttributesSpanExporter implements SpanExporter {
|
||||
|
||||
private final SpanExporter delegate;
|
||||
private final MetricAttributeGenerator generator;
|
||||
private final Resource resource;
|
||||
|
||||
/** Use {@link AwsMetricAttributesSpanExporterBuilder} to construct this exporter. */
|
||||
static AwsMetricAttributesSpanExporter create(
|
||||
SpanExporter delegate, MetricAttributeGenerator generator, Resource resource) {
|
||||
return new AwsMetricAttributesSpanExporter(delegate, generator, resource);
|
||||
}
|
||||
|
||||
private AwsMetricAttributesSpanExporter(
|
||||
SpanExporter delegate, MetricAttributeGenerator generator, Resource resource) {
|
||||
this.delegate = delegate;
|
||||
this.generator = generator;
|
||||
this.resource = resource;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableResultCode export(Collection<SpanData> spans) {
|
||||
List<SpanData> modifiedSpans = addMetricAttributes(spans);
|
||||
return delegate.export(modifiedSpans);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableResultCode flush() {
|
||||
return delegate.flush();
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableResultCode shutdown() {
|
||||
return delegate.shutdown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
delegate.close();
|
||||
}
|
||||
|
||||
private List<SpanData> addMetricAttributes(Collection<SpanData> spans) {
|
||||
List<SpanData> modifiedSpans = new ArrayList<>();
|
||||
|
||||
for (SpanData span : spans) {
|
||||
Attributes attributes = generator.generateMetricAttributesFromSpan(span, resource);
|
||||
if (!attributes.isEmpty()) {
|
||||
span = wrapSpanWithAttributes(span, attributes);
|
||||
}
|
||||
modifiedSpans.add(span);
|
||||
}
|
||||
|
||||
return modifiedSpans;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@link #export} works with a {@link SpanData}, which does not permit modification. However, we
|
||||
* need to add derived metric attributes to the span. To work around this, we will wrap the
|
||||
* SpanData with a {@link DelegatingSpanData} that simply passes through all API calls, except for
|
||||
* those pertaining to Attributes, i.e. {@link SpanData#getAttributes()} and {@link
|
||||
* SpanData#getTotalAttributeCount} APIs.
|
||||
*
|
||||
* <p>See https://github.com/open-telemetry/opentelemetry-specification/issues/1089 for more
|
||||
* context on this approach.
|
||||
*/
|
||||
private static SpanData wrapSpanWithAttributes(SpanData span, Attributes attributes) {
|
||||
Attributes originalAttributes = span.getAttributes();
|
||||
Attributes replacementAttributes = originalAttributes.toBuilder().putAll(attributes).build();
|
||||
|
||||
int newAttributeKeyCount = 0;
|
||||
for (Entry<AttributeKey<?>, Object> entry : attributes.asMap().entrySet()) {
|
||||
if (originalAttributes.get(entry.getKey()) == null) {
|
||||
newAttributeKeyCount++;
|
||||
}
|
||||
}
|
||||
int originalTotalAttributeCount = span.getTotalAttributeCount();
|
||||
int replacementTotalAttributeCount = originalTotalAttributeCount + newAttributeKeyCount;
|
||||
|
||||
return new DelegatingSpanData(span) {
|
||||
@Override
|
||||
public Attributes getAttributes() {
|
||||
return replacementAttributes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getTotalAttributeCount() {
|
||||
return replacementTotalAttributeCount;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* Copyright The OpenTelemetry Authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package io.opentelemetry.contrib.awsxray;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
|
||||
import com.google.errorprone.annotations.CanIgnoreReturnValue;
|
||||
import io.opentelemetry.sdk.resources.Resource;
|
||||
import io.opentelemetry.sdk.trace.export.SpanExporter;
|
||||
|
||||
public class AwsMetricAttributesSpanExporterBuilder {
|
||||
|
||||
// Defaults
|
||||
private static final MetricAttributeGenerator DEFAULT_GENERATOR =
|
||||
new AwsMetricAttributeGenerator();
|
||||
|
||||
// Required builder elements
|
||||
private final SpanExporter delegate;
|
||||
private final Resource resource;
|
||||
|
||||
// Optional builder elements
|
||||
private MetricAttributeGenerator generator = DEFAULT_GENERATOR;
|
||||
|
||||
public static AwsMetricAttributesSpanExporterBuilder create(
|
||||
SpanExporter delegate, Resource resource) {
|
||||
return new AwsMetricAttributesSpanExporterBuilder(delegate, resource);
|
||||
}
|
||||
|
||||
private AwsMetricAttributesSpanExporterBuilder(SpanExporter delegate, Resource resource) {
|
||||
this.delegate = delegate;
|
||||
this.resource = resource;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the generator used to generate attributes used spancs exported by the exporter. If unset,
|
||||
* defaults to {@link #DEFAULT_GENERATOR}. Must not be null.
|
||||
*/
|
||||
@CanIgnoreReturnValue
|
||||
public AwsMetricAttributesSpanExporterBuilder setGenerator(MetricAttributeGenerator generator) {
|
||||
requireNonNull(generator, "generator");
|
||||
this.generator = generator;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AwsMetricAttributesSpanExporter build() {
|
||||
return AwsMetricAttributesSpanExporter.create(delegate, generator, resource);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,180 @@
|
|||
/*
|
||||
* Copyright The OpenTelemetry Authors
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package io.opentelemetry.contrib.awsxray;
|
||||
|
||||
import io.opentelemetry.api.common.AttributeKey;
|
||||
import io.opentelemetry.api.common.Attributes;
|
||||
import io.opentelemetry.api.metrics.DoubleHistogram;
|
||||
import io.opentelemetry.api.metrics.LongCounter;
|
||||
import io.opentelemetry.context.Context;
|
||||
import io.opentelemetry.sdk.resources.Resource;
|
||||
import io.opentelemetry.sdk.trace.ReadWriteSpan;
|
||||
import io.opentelemetry.sdk.trace.ReadableSpan;
|
||||
import io.opentelemetry.sdk.trace.SpanProcessor;
|
||||
import io.opentelemetry.sdk.trace.data.EventData;
|
||||
import io.opentelemetry.sdk.trace.data.ExceptionEventData;
|
||||
import io.opentelemetry.sdk.trace.data.SpanData;
|
||||
import java.lang.reflect.Method;
|
||||
import javax.annotation.Nullable;
|
||||
import javax.annotation.concurrent.Immutable;
|
||||
|
||||
/**
|
||||
* This processor will generate metrics based on span data. It depends on a {@link
|
||||
* MetricAttributeGenerator} being provided on instantiation, which will provide a means to
|
||||
* determine attributes which should be used to create metrics. A {@link Resource} must also be
|
||||
* provided, which is used to generate metrics. Finally, two {@link LongCounter}'s and a {@link
|
||||
* DoubleHistogram} must be provided, which will be used to actually create desired metrics (see
|
||||
* below)
|
||||
*
|
||||
* <p>AwsSpanMetricsProcessor produces metrics for errors (e.g. HTTP 4XX status codes), faults (e.g.
|
||||
* HTTP 5XX status codes), and latency (in Milliseconds). Errors and faults are counted, while
|
||||
* latency is measured with a histogram. Metrics are emitted with attributes derived from span
|
||||
* attributes.
|
||||
*
|
||||
* <p>For highest fidelity metrics, this processor should be coupled with the {@link
|
||||
* AlwaysRecordSampler}, which will result in 100% of spans being sent to the processor.
|
||||
*/
|
||||
@Immutable
|
||||
public final class AwsSpanMetricsProcessor implements SpanProcessor {
|
||||
|
||||
private static final AttributeKey<Long> HTTP_STATUS_CODE =
|
||||
AttributeKey.longKey("http.status_code");
|
||||
|
||||
private static final double NANOS_TO_MILLIS = 1_000_000.0;
|
||||
|
||||
// Constants for deriving error and fault metrics
|
||||
private static final int ERROR_CODE_LOWER_BOUND = 400;
|
||||
private static final int ERROR_CODE_UPPER_BOUND = 499;
|
||||
private static final int FAULT_CODE_LOWER_BOUND = 500;
|
||||
private static final int FAULT_CODE_UPPER_BOUND = 599;
|
||||
|
||||
// Metric instruments
|
||||
private final LongCounter errorCounter;
|
||||
private final LongCounter faultCounter;
|
||||
private final DoubleHistogram latencyHistogram;
|
||||
|
||||
private final MetricAttributeGenerator generator;
|
||||
private final Resource resource;
|
||||
|
||||
/** Use {@link AwsSpanMetricsProcessorBuilder} to construct this processor. */
|
||||
static AwsSpanMetricsProcessor create(
|
||||
LongCounter errorCounter,
|
||||
LongCounter faultCounter,
|
||||
DoubleHistogram latencyHistogram,
|
||||
MetricAttributeGenerator generator,
|
||||
Resource resource) {
|
||||
return new AwsSpanMetricsProcessor(
|
||||
errorCounter, faultCounter, latencyHistogram, generator, resource);
|
||||
}
|
||||
|
||||
private AwsSpanMetricsProcessor(
|
||||
LongCounter errorCounter,
|
||||
LongCounter faultCounter,
|
||||
DoubleHistogram latencyHistogram,
|
||||
MetricAttributeGenerator generator,
|
||||
Resource resource) {
|
||||
this.errorCounter = errorCounter;
|
||||
this.faultCounter = faultCounter;
|
||||
this.latencyHistogram = latencyHistogram;
|
||||
this.generator = generator;
|
||||
this.resource = resource;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onStart(Context parentContext, ReadWriteSpan span) {}
|
||||
|
||||
@Override
|
||||
public boolean isStartRequired() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onEnd(ReadableSpan span) {
|
||||
SpanData spanData = span.toSpanData();
|
||||
Attributes attributes = generator.generateMetricAttributesFromSpan(spanData, resource);
|
||||
|
||||
// Only record metrics if non-empty attributes are returned.
|
||||
if (!attributes.isEmpty()) {
|
||||
recordErrorOrFault(spanData, attributes);
|
||||
recordLatency(span, attributes);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEndRequired() {
|
||||
return true;
|
||||
}
|
||||
|
||||
private void recordErrorOrFault(SpanData spanData, Attributes attributes) {
|
||||
Long httpStatusCode = spanData.getAttributes().get(HTTP_STATUS_CODE);
|
||||
if (httpStatusCode == null) {
|
||||
httpStatusCode = getAwsStatusCode(spanData);
|
||||
|
||||
if (httpStatusCode == null || httpStatusCode < 100L || httpStatusCode > 599L) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (httpStatusCode >= ERROR_CODE_LOWER_BOUND && httpStatusCode <= ERROR_CODE_UPPER_BOUND) {
|
||||
errorCounter.add(1, attributes);
|
||||
} else if (httpStatusCode >= FAULT_CODE_LOWER_BOUND
|
||||
&& httpStatusCode <= FAULT_CODE_UPPER_BOUND) {
|
||||
faultCounter.add(1, attributes);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to pull status code from spans produced by AWS SDK instrumentation (both v1 and v2).
|
||||
* AWS SDK instrumentation does not populate http.status_code when non-200 status codes are
|
||||
* returned, as the AWS SDK throws exceptions rather than returning responses with status codes.
|
||||
* To work around this, we are attempting to get the exception out of the events, then calling
|
||||
* getStatusCode (for AWS SDK V1) and statusCode (for AWS SDK V2) to get the status code fromt the
|
||||
* exception. We rely on reflection here because we cannot cast the throwable to
|
||||
* AmazonServiceExceptions (V1) or AwsServiceExceptions (V2) because the throwable comes from a
|
||||
* separate class loader and attempts to cast will fail with ClassCastException.
|
||||
*
|
||||
* <p>TODO: Short term workaround. This can be completely removed once
|
||||
* https://github.com/open-telemetry/opentelemetry-java-contrib/issues/919 is resolved.
|
||||
*/
|
||||
@Nullable
|
||||
private static Long getAwsStatusCode(SpanData spanData) {
|
||||
String scopeName = spanData.getInstrumentationScopeInfo().getName();
|
||||
if (!scopeName.contains("aws-sdk")) {
|
||||
return null;
|
||||
}
|
||||
|
||||
for (EventData event : spanData.getEvents()) {
|
||||
if (event instanceof ExceptionEventData) {
|
||||
ExceptionEventData exceptionEvent = (ExceptionEventData) event;
|
||||
Throwable throwable = exceptionEvent.getException();
|
||||
|
||||
try {
|
||||
Method method = throwable.getClass().getMethod("getStatusCode", new Class<?>[] {});
|
||||
Object code = method.invoke(throwable, new Object[] {});
|
||||
return Long.valueOf((Integer) code);
|
||||
} catch (Exception e) {
|
||||
// Take no action
|
||||
}
|
||||
|
||||
try {
|
||||
Method method = throwable.getClass().getMethod("statusCode", new Class<?>[] {});
|
||||
Object code = method.invoke(throwable, new Object[] {});
|
||||
return Long.valueOf((Integer) code);
|
||||
} catch (Exception e) {
|
||||
// Take no action
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
private void recordLatency(ReadableSpan span, Attributes attributes) {
|
||||
long nanos = span.getLatencyNanos();
|
||||
double millis = nanos / NANOS_TO_MILLIS;
|
||||
latencyHistogram.record(millis, attributes);
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue