Compare commits
243 Commits
openteleme
...
main
Author | SHA1 | Date |
---|---|---|
|
109173fec7 | |
|
b63ca133be | |
|
1f78c8acff | |
|
49fa53131d | |
|
333fc5dcb4 | |
|
80c357bb16 | |
|
3c4d18cc13 | |
|
b74633a552 | |
|
b1c2c7941b | |
|
b69ebb7224 | |
|
6977da3893 | |
|
ca079cbc56 | |
|
40d8942bf5 | |
|
78300e9642 | |
|
c4347e027c | |
|
0a03c9abf2 | |
|
85dbfe520a | |
|
b27225273b | |
|
591051f8bb | |
|
50cdeeee12 | |
|
04f8899252 | |
|
59cc34e9f3 | |
|
b7301823a0 | |
|
4a21b3974b | |
|
85ea8f382d | |
|
8f7bab5337 | |
|
ccf9cabeee | |
|
701d65b022 | |
|
b9a78e7475 | |
|
77325aa89a | |
|
6d8becf9ad | |
|
6c89a56da5 | |
|
3e0632cb31 | |
|
7bd0895d01 | |
|
a912c9e57c | |
|
75c73d1e29 | |
|
a164d37a3c | |
|
e2ba6d43c0 | |
|
4e42ed674a | |
|
72f437c456 | |
|
93353660f6 | |
|
1953d97958 | |
|
df275921a9 | |
|
c1a689507b | |
|
b3f98ab936 | |
|
dc35754dbd | |
|
fa6d972444 | |
|
5e4b55812a | |
|
680f197515 | |
|
1909c913b2 | |
|
f9d9f19aa5 | |
|
0db4d0bb8f | |
|
71bfc9550e | |
|
38f7413836 | |
|
4d6893e8fa | |
|
dbdff31220 | |
|
ef2b5468d5 | |
|
f21182890a | |
|
ec731581af | |
|
ccdf522626 | |
|
9e1284687a | |
|
1034b746af | |
|
3efd161cb6 | |
|
99049a9652 | |
|
4b9b949dde | |
|
dc37067b93 | |
|
517257cbef | |
|
4a1e0ce941 | |
|
9bc7764139 | |
|
abafc2131f | |
|
2c0033f842 | |
|
cc7169cf2c | |
|
45797ec3a1 | |
|
5c76d04a35 | |
|
9d41f9bf10 | |
|
02dc87ef73 | |
|
e6869cdc75 | |
|
dec311ff0e | |
|
da661f6547 | |
|
afc0d5531d | |
|
8a83770d7b | |
|
c6cdbeb51f | |
|
7f347e54df | |
|
369a9f2e00 | |
|
c54292fa08 | |
|
d6f6c60c16 | |
|
dd151673d1 | |
|
3431a4e8b0 | |
|
c47e341ab8 | |
|
8390db35ae | |
|
b8018c5262 | |
|
ce90639428 | |
|
fce17db166 | |
|
9c969f363e | |
|
7562ff08f3 | |
|
4b832859cc | |
|
6587485d05 | |
|
bb85f983a3 | |
|
04f9e8dd7f | |
|
6d5a5149d0 | |
|
6bde73ce34 | |
|
3a585b4b58 | |
|
e50cb3271f | |
|
5a2cfb3d65 | |
|
fdcd80d89f | |
|
ff18e7c18b | |
|
78373353f5 | |
|
fa499f5ca8 | |
|
642d8c4081 | |
|
27d5d93a6a | |
|
3d37106115 | |
|
dde065b139 | |
|
8b2558f22e | |
|
0ea9998c4c | |
|
50ab047143 | |
|
139d787168 | |
|
af179659a2 | |
|
76e614fac4 | |
|
46cf5b5257 | |
|
164259e149 | |
|
db617eb3fd | |
|
3c60b62ad1 | |
|
8d14f0bb2a | |
|
a5474c3b29 | |
|
e43e8c91cd | |
|
ad29af3996 | |
|
3c88163c99 | |
|
f96d14cc62 | |
|
6189be647c | |
|
6daf581f46 | |
|
db52193b3c | |
|
3708604bb5 | |
|
d5dce5de99 | |
|
ad2fe813ab | |
|
fde1ef84c7 | |
|
9811782358 | |
|
4f9ee01b5f | |
|
2d5a21a3da | |
|
3dd42960e0 | |
|
c0132c6ab0 | |
|
65720812f7 | |
|
fa8a6995c7 | |
|
23cadea66a | |
|
2371adf3f8 | |
|
a83c8d9a04 | |
|
a01564cf08 | |
|
9e4de00420 | |
|
0ff1032bdd | |
|
81eaea57f9 | |
|
c6c0162cef | |
|
f98f5688ae | |
|
6114b60506 | |
|
f17a1bd65a | |
|
e0189e25dc | |
|
2f4d4c56fc | |
|
c09a299010 | |
|
139f3e52d9 | |
|
b76119bb8f | |
|
c4eb3714c5 | |
|
dc01beda66 | |
|
38006e86c4 | |
|
b1f714ee0f | |
|
3c2599c761 | |
|
f8bb30ef67 | |
|
72490eafcd | |
|
638c57f7a4 | |
|
e4a5b54135 | |
|
b5a0ee526c | |
|
17a57bf6d3 | |
|
2f5b0bf1fc | |
|
6245fb833c | |
|
8644630e7f | |
|
52bbcd6001 | |
|
8e3cd65f28 | |
|
0cb9ba55e8 | |
|
96f4a039c5 | |
|
789bf866e3 | |
|
1623dc0e71 | |
|
63e43d5222 | |
|
95873604ab | |
|
34368612f4 | |
|
bf3c0be507 | |
|
42e8b0a451 | |
|
6e61ff0c32 | |
|
231d26c4be | |
|
c0bc2c9797 | |
|
64f28ca279 | |
|
d7bc137c60 | |
|
cca571ab72 | |
|
6b3a11beb2 | |
|
9d8a6c6420 | |
|
85e21a9e22 | |
|
d18c5fe19c | |
|
65a2713d9f | |
|
748c92592d | |
|
7af1918b89 | |
|
44754e2a50 | |
|
731054f736 | |
|
dd68241907 | |
|
eabab7d0ad | |
|
93e6fcfa26 | |
|
5478a0b77a | |
|
0bb1c42a78 | |
|
2756c1edff | |
|
ec3c51dcd1 | |
|
3f50c08580 | |
|
37f85bf8cc | |
|
20413ef7d7 | |
|
9460773e5c | |
|
86a7f6bab3 | |
|
9b217bb4ff | |
|
9d9353d4c6 | |
|
a716949d1c | |
|
07c97eac38 | |
|
52871b82b6 | |
|
e54256ddb7 | |
|
c59b514cda | |
|
5219242eaf | |
|
b7e7d0cbe5 | |
|
406707b2bd | |
|
8406e2e789 | |
|
29ef6a9455 | |
|
3ebdb6344b | |
|
a606fab849 | |
|
0ad779a5b3 | |
|
962a3aecdb | |
|
41e670aeee | |
|
9af3136e7f | |
|
cf6d45e96c | |
|
26bcc9347b | |
|
908437db5d | |
|
147e3f754e | |
|
3d5935f4f6 | |
|
95f14cd8df | |
|
e5eb524e89 | |
|
3e394a4814 | |
|
16c041e22b | |
|
54882871b9 | |
|
c084ca8fa5 | |
|
8db1479e0d | |
|
72dc1cf1f6 | |
|
16eaec8d03 | |
|
d116ae39d6 |
|
@ -18,7 +18,7 @@ body:
|
|||
Please describe any aspect of your environment relevant to the problem, including your Python version, [platform](https://docs.python.org/3/library/platform.html), version numbers of installed dependencies, information about your cloud hosting provider, etc. If you're reporting a problem with a specific version of a library in this repo, please check whether the problem has been fixed on main.
|
||||
value: |
|
||||
OS: (e.g, Ubuntu)
|
||||
Python version: (e.g., Python 3.8.10)
|
||||
Python version: (e.g., Python 3.9.10)
|
||||
Package version: (e.g., 0.46.0)
|
||||
|
||||
- type: textarea
|
||||
|
|
|
@ -11,6 +11,9 @@ components:
|
|||
- oxeye-nikolay
|
||||
- nikosokolik
|
||||
|
||||
instrumentation/opentelemetry-instrumentation-asyncclick:
|
||||
- jomcgi
|
||||
|
||||
instrumentation/opentelemetry-instrumentation-kafka-python:
|
||||
- nozik
|
||||
|
||||
|
@ -61,6 +64,9 @@ components:
|
|||
instrumentation/opentelemetry-instrumentation-psycopg:
|
||||
- federicobond
|
||||
|
||||
instrumentation/opentelemetry-instrumentation-pymssql:
|
||||
- guillaumep
|
||||
|
||||
instrumentation/opentelemetry-instrumentation-aiokafka:
|
||||
- dimastbk
|
||||
|
||||
|
|
|
@ -10,6 +10,10 @@ jobs:
|
|||
run_self:
|
||||
runs-on: ubuntu-latest
|
||||
name: Auto Assign Owners
|
||||
permissions:
|
||||
contents: read # to read changed files
|
||||
issues: write # to read/write issue assignees
|
||||
pull-requests: write # to read/write PR reviewers
|
||||
# Don't fail tests if this workflow fails. Some pending issues:
|
||||
# - https://github.com/dyladan/component-owners/issues/8
|
||||
continue-on-error: true
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,20 @@
|
|||
name: FOSSA scanning
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
fossa:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- uses: fossas/fossa-action@93a52ecf7c3ac7eb40f5de77fd69b1a19524de94 # v1.5.0
|
||||
with:
|
||||
api-key: ${{secrets.FOSSA_API_KEY}}
|
||||
team: OpenTelemetry
|
|
@ -7,7 +7,7 @@ name = "generate-workflows-lib"
|
|||
dynamic = ["version"]
|
||||
description = "A library to generate workflows"
|
||||
license = "Apache-2.0"
|
||||
requires-python = ">=3.8"
|
||||
requires-python = ">=3.9"
|
||||
authors = [
|
||||
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
|
||||
]
|
||||
|
@ -17,11 +17,11 @@ classifiers = [
|
|||
"License :: OSI Approved :: Apache Software License",
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
"Programming Language :: Python :: 3.13",
|
||||
"Typing :: Typed",
|
||||
]
|
||||
dependencies = ["Jinja2", "tox"]
|
||||
|
|
|
@ -14,7 +14,7 @@ _tox_test_env_regex = re_compile(
|
|||
)
|
||||
_tox_lint_env_regex = re_compile(r"lint-(?P<name>[-\w]+)")
|
||||
_tox_contrib_env_regex = re_compile(
|
||||
r"py38-test-(?P<name>[-\w]+\w)-?(?P<contrib_requirements>\d+)?"
|
||||
r"py39-test-(?P<name>[-\w]+\w)-?(?P<contrib_requirements>\d+)?"
|
||||
)
|
||||
|
||||
|
||||
|
@ -47,12 +47,13 @@ def get_test_job_datas(tox_envs: list, operating_systems: list) -> list:
|
|||
os_alias = {"ubuntu-latest": "Ubuntu", "windows-latest": "Windows"}
|
||||
|
||||
python_version_alias = {
|
||||
"pypy3": "pypy-3.8",
|
||||
"py38": "3.8",
|
||||
"pypy3": "pypy-3.9",
|
||||
"pypy310": "pypy-3.10",
|
||||
"py39": "3.9",
|
||||
"py310": "3.10",
|
||||
"py311": "3.11",
|
||||
"py312": "3.12",
|
||||
"py313": "3.13",
|
||||
}
|
||||
|
||||
test_job_datas = []
|
||||
|
|
|
@ -12,6 +12,7 @@ on:
|
|||
CONTRIB_REPO_SHA:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
env:
|
||||
CORE_REPO_SHA: ${% raw %}{{ inputs.CORE_REPO_SHA }}{% endraw %}
|
||||
CONTRIB_REPO_SHA: ${% raw %}{{ inputs.CONTRIB_REPO_SHA }}{% endraw %}
|
||||
|
@ -23,6 +24,7 @@ jobs:
|
|||
{{ job_data.tox_env }}:
|
||||
name: {{ job_data.ui_name }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout contrib repo @ SHA - ${% raw %}{{ env.CONTRIB_REPO_SHA }}{% endraw %}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -30,14 +32,14 @@ jobs:
|
|||
repository: open-telemetry/opentelemetry-python-contrib
|
||||
ref: ${% raw %}{{ env.CONTRIB_REPO_SHA }}{% endraw %}
|
||||
|
||||
- name: Set up Python 3.8
|
||||
- name: Set up Python 3.9
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.8"
|
||||
python-version: "3.9"
|
||||
architecture: "x64"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e {{ job_data.tox_env }} -- -ra
|
||||
|
|
|
@ -9,8 +9,20 @@ on:
|
|||
- 'release/*'
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${% raw %}{{ github.workflow }}-${{ github.head_ref || github.run_id }}{% endraw %}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CORE_REPO_SHA: main
|
||||
# Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main'
|
||||
# For PRs you can change the inner fallback ('main')
|
||||
# For pushes you change the outer fallback ('main')
|
||||
# The logic below is used during releases and depends on having an equivalent branch name in the core repo.
|
||||
CORE_REPO_SHA: {% raw %}${{ github.event_name == 'pull_request' && (
|
||||
contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref ||
|
||||
contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref ||
|
||||
'main'
|
||||
) || 'main' }}{% endraw %}
|
||||
CONTRIB_REPO_SHA: main
|
||||
PIP_EXISTS_ACTION: w
|
||||
|
||||
|
@ -20,17 +32,18 @@ jobs:
|
|||
{{ job_data.name }}:
|
||||
name: {{ job_data.ui_name }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${% raw %}{{ github.sha }}{% endraw %}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.12
|
||||
- name: Set up Python 3.13
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
python-version: "3.13"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e {{ job_data.tox_env }}
|
||||
|
|
|
@ -9,8 +9,20 @@ on:
|
|||
- 'release/*'
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${% raw %}{{ github.workflow }}-${{ github.head_ref || github.run_id }}{% endraw %}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CORE_REPO_SHA: main
|
||||
# Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main'
|
||||
# For PRs you can change the inner fallback ('main')
|
||||
# For pushes you change the outer fallback ('main')
|
||||
# The logic below is used during releases and depends on having an equivalent branch name in the core repo.
|
||||
CORE_REPO_SHA: {% raw %}${{ github.event_name == 'pull_request' && (
|
||||
contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref ||
|
||||
contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref ||
|
||||
'main'
|
||||
) || 'main' }}{% endraw %}
|
||||
CONTRIB_REPO_SHA: main
|
||||
PIP_EXISTS_ACTION: w
|
||||
|
||||
|
@ -20,6 +32,7 @@ jobs:
|
|||
{{ job_data }}:
|
||||
name: {{ job_data }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
{%- if job_data == "generate-workflows" %}
|
||||
if: |
|
||||
!contains(github.event.pull_request.labels.*.name, 'Skip generate-workflows')
|
||||
|
@ -57,7 +70,7 @@ jobs:
|
|||
python-version: "3.11"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e {{ job_data }}
|
||||
|
|
|
@ -9,8 +9,20 @@ on:
|
|||
- 'release/*'
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${% raw %}{{ github.workflow }}-${{ github.head_ref || github.run_id }}{% endraw %}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CORE_REPO_SHA: main
|
||||
# Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main'
|
||||
# For PRs you can change the inner fallback ('main')
|
||||
# For pushes you change the outer fallback ('main')
|
||||
# The logic below is used during releases and depends on having an equivalent branch name in the core repo.
|
||||
CORE_REPO_SHA: {% raw %}${{ github.event_name == 'pull_request' && (
|
||||
contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref ||
|
||||
contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref ||
|
||||
'main'
|
||||
) || 'main' }}{% endraw %}
|
||||
CONTRIB_REPO_SHA: main
|
||||
PIP_EXISTS_ACTION: w
|
||||
|
||||
|
@ -20,6 +32,7 @@ jobs:
|
|||
{{ job_data.name }}:
|
||||
name: {{ job_data.ui_name }}
|
||||
runs-on: {{ job_data.os }}
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${% raw %}{{ github.sha }}{% endraw %}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -30,7 +43,7 @@ jobs:
|
|||
python-version: "{{ job_data.python_version }}"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
{%- if job_data.os == "windows-latest" %}
|
||||
|
||||
- name: Configure git to support long filenames
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -9,8 +9,20 @@ on:
|
|||
- 'release/*'
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CORE_REPO_SHA: main
|
||||
# Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main'
|
||||
# For PRs you can change the inner fallback ('main')
|
||||
# For pushes you change the outer fallback ('main')
|
||||
# The logic below is used during releases and depends on having an equivalent branch name in the core repo.
|
||||
CORE_REPO_SHA: ${{ github.event_name == 'pull_request' && (
|
||||
contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref ||
|
||||
contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref ||
|
||||
'main'
|
||||
) || 'main' }}
|
||||
CONTRIB_REPO_SHA: main
|
||||
PIP_EXISTS_ACTION: w
|
||||
|
||||
|
@ -19,6 +31,7 @@ jobs:
|
|||
spellcheck:
|
||||
name: spellcheck
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -29,7 +42,7 @@ jobs:
|
|||
python-version: "3.11"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e spellcheck
|
||||
|
@ -37,6 +50,7 @@ jobs:
|
|||
docker-tests:
|
||||
name: docker-tests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -47,7 +61,7 @@ jobs:
|
|||
python-version: "3.11"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e docker-tests
|
||||
|
@ -55,6 +69,7 @@ jobs:
|
|||
docs:
|
||||
name: docs
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
if: |
|
||||
github.event.pull_request.user.login != 'opentelemetrybot' && github.event_name == 'pull_request'
|
||||
steps:
|
||||
|
@ -67,7 +82,7 @@ jobs:
|
|||
python-version: "3.11"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e docs
|
||||
|
@ -75,6 +90,7 @@ jobs:
|
|||
generate:
|
||||
name: generate
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -85,7 +101,7 @@ jobs:
|
|||
python-version: "3.11"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e generate
|
||||
|
@ -96,6 +112,7 @@ jobs:
|
|||
generate-workflows:
|
||||
name: generate-workflows
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
if: |
|
||||
!contains(github.event.pull_request.labels.*.name, 'Skip generate-workflows')
|
||||
&& github.event.pull_request.user.login != 'opentelemetrybot' && github.event_name == 'pull_request'
|
||||
|
@ -109,7 +126,7 @@ jobs:
|
|||
python-version: "3.11"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e generate-workflows
|
||||
|
@ -120,6 +137,7 @@ jobs:
|
|||
shellcheck:
|
||||
name: shellcheck
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -130,7 +148,7 @@ jobs:
|
|||
python-version: "3.11"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e shellcheck
|
||||
|
@ -138,6 +156,7 @@ jobs:
|
|||
ruff:
|
||||
name: ruff
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -148,7 +167,26 @@ jobs:
|
|||
python-version: "3.11"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e ruff
|
||||
|
||||
typecheck:
|
||||
name: typecheck
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e typecheck
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
name: OSSF Scorecard
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
schedule:
|
||||
- cron: "10 6 * * 1" # once a week
|
||||
workflow_dispatch:
|
||||
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
analysis:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# Needed for Code scanning upload
|
||||
security-events: write
|
||||
# Needed for GitHub OIDC token if publish_results is true
|
||||
id-token: write
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
publish_results: true
|
||||
|
||||
# Upload the results as artifacts (optional). Commenting out will disable
|
||||
# uploads of run results in SARIF format to the repository Actions tab.
|
||||
# https://docs.github.com/en/actions/advanced-guides/storing-workflow-data-as-artifacts
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
retention-days: 5
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard (optional).
|
||||
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@5f8171a638ada777af81d42b55959a643bb29017 # v3.28.12
|
||||
with:
|
||||
sarif_file: results.sarif
|
|
@ -9,8 +9,12 @@ on:
|
|||
- opentelemetry-resource-detector-azure
|
||||
- opentelemetry-sdk-extension-aws
|
||||
- opentelemetry-instrumentation-openai-v2
|
||||
- opentelemetry-instrumentation-vertexai
|
||||
- opentelemetry-instrumentation-google-genai
|
||||
description: 'Package to be released'
|
||||
required: true
|
||||
run-name: "[Package][${{ inputs.package }}] Prepare patch release"
|
||||
|
||||
jobs:
|
||||
prepare-patch-release:
|
||||
runs-on: ubuntu-latest
|
||||
|
|
|
@ -9,9 +9,12 @@ on:
|
|||
- opentelemetry-resource-detector-azure
|
||||
- opentelemetry-sdk-extension-aws
|
||||
- opentelemetry-instrumentation-openai-v2
|
||||
- opentelemetry-instrumentation-vertexai
|
||||
- opentelemetry-instrumentation-google-genai
|
||||
description: 'Package to be released'
|
||||
required: true
|
||||
|
||||
run-name: "[Package][${{ inputs.package }}] Prepare release"
|
||||
jobs:
|
||||
prereqs:
|
||||
runs-on: ubuntu-latest
|
||||
|
|
|
@ -9,8 +9,11 @@ on:
|
|||
- opentelemetry-resource-detector-azure
|
||||
- opentelemetry-sdk-extension-aws
|
||||
- opentelemetry-instrumentation-openai-v2
|
||||
- opentelemetry-instrumentation-vertexai
|
||||
- opentelemetry-instrumentation-google-genai
|
||||
description: 'Package to be released'
|
||||
required: true
|
||||
run-name: "[Package][${{ inputs.package }}] Release"
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -75,7 +78,7 @@ jobs:
|
|||
# next few steps publish to pypi
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.8'
|
||||
python-version: '3.9'
|
||||
|
||||
- name: Build wheels
|
||||
run: ./scripts/build_a_package.sh
|
||||
|
|
|
@ -71,6 +71,7 @@ jobs:
|
|||
run: .github/scripts/use-cla-approved-github-bot.sh
|
||||
|
||||
- name: Create pull request
|
||||
id: create_pr
|
||||
env:
|
||||
# not using secrets.GITHUB_TOKEN since pull requests from that token do not run workflows
|
||||
GITHUB_TOKEN: ${{ secrets.OPENTELEMETRYBOT_GITHUB_TOKEN }}
|
||||
|
@ -80,7 +81,15 @@ jobs:
|
|||
|
||||
git commit -a -m "$message"
|
||||
git push origin HEAD:$branch
|
||||
gh pr create --title "[$GITHUB_REF_NAME] $message" \
|
||||
pr_url=$(gh pr create --title "[$GITHUB_REF_NAME] $message" \
|
||||
--body "$message." \
|
||||
--head $branch \
|
||||
--base $GITHUB_REF_NAME
|
||||
--base $GITHUB_REF_NAME)
|
||||
echo "pr_url=$pr_url" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Add prepare-release label to PR
|
||||
if: steps.create_pr.outputs.pr_url != ''
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh pr edit ${{ steps.create_pr.outputs.pr_url }} --add-label "prepare-release"
|
||||
|
|
|
@ -94,6 +94,7 @@ jobs:
|
|||
run: .github/scripts/use-cla-approved-github-bot.sh
|
||||
|
||||
- name: Create pull request against the release branch
|
||||
id: create_release_branch_pr
|
||||
env:
|
||||
# not using secrets.GITHUB_TOKEN since pull requests from that token do not run workflows
|
||||
GITHUB_TOKEN: ${{ secrets.OPENTELEMETRYBOT_GITHUB_TOKEN }}
|
||||
|
@ -103,10 +104,18 @@ jobs:
|
|||
|
||||
git commit -a -m "$message"
|
||||
git push origin HEAD:$branch
|
||||
gh pr create --title "[$RELEASE_BRANCH_NAME] $message" \
|
||||
pr_url=$(gh pr create --title "[$RELEASE_BRANCH_NAME] $message" \
|
||||
--body "$message." \
|
||||
--head $branch \
|
||||
--base $RELEASE_BRANCH_NAME
|
||||
--base $RELEASE_BRANCH_NAME)
|
||||
echo "pr_url=$pr_url" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Add prepare-release label to PR
|
||||
if: steps.create_release_branch_pr.outputs.pr_url != ''
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh pr edit ${{ steps.create_release_branch_pr.outputs.pr_url }} --add-label "prepare-release"
|
||||
|
||||
create-pull-request-against-main:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -179,6 +188,7 @@ jobs:
|
|||
run: .github/scripts/use-cla-approved-github-bot.sh
|
||||
|
||||
- name: Create pull request against main
|
||||
id: create_main_pr
|
||||
env:
|
||||
# not using secrets.GITHUB_TOKEN since pull requests from that token do not run workflows
|
||||
GITHUB_TOKEN: ${{ secrets.OPENTELEMETRYBOT_GITHUB_TOKEN }}
|
||||
|
@ -189,7 +199,15 @@ jobs:
|
|||
|
||||
git commit -a -m "$message"
|
||||
git push origin HEAD:$branch
|
||||
gh pr create --title "$message" \
|
||||
pr_url=$(gh pr create --title "$message" \
|
||||
--body "$body" \
|
||||
--head $branch \
|
||||
--base main
|
||||
--base main)
|
||||
echo "pr_url=$pr_url" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Add prepare-release label to PR
|
||||
if: steps.create_main_pr.outputs.pr_url != ''
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh pr edit ${{ steps.create_main_pr.outputs.pr_url }} --add-label "prepare-release"
|
||||
|
|
|
@ -66,7 +66,7 @@ jobs:
|
|||
# next few steps publish to pypi
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.8'
|
||||
python-version: '3.9'
|
||||
|
||||
- name: Build wheels
|
||||
run: ./scripts/build.sh
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -9,34 +9,238 @@ on:
|
|||
- 'release/*'
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CORE_REPO_SHA: main
|
||||
# Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main'
|
||||
# For PRs you can change the inner fallback ('main')
|
||||
# For pushes you change the outer fallback ('main')
|
||||
# The logic below is used during releases and depends on having an equivalent branch name in the core repo.
|
||||
CORE_REPO_SHA: ${{ github.event_name == 'pull_request' && (
|
||||
contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref ||
|
||||
contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref ||
|
||||
'main'
|
||||
) || 'main' }}
|
||||
CONTRIB_REPO_SHA: main
|
||||
PIP_EXISTS_ACTION: w
|
||||
|
||||
jobs:
|
||||
|
||||
py38-test-instrumentation-confluent-kafka_ubuntu-latest:
|
||||
name: instrumentation-confluent-kafka 3.8 Ubuntu
|
||||
py312-test-instrumentation-aio-pika-1_ubuntu-latest:
|
||||
name: instrumentation-aio-pika-1 3.12 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.8
|
||||
- name: Set up Python 3.12
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.8"
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py38-test-instrumentation-confluent-kafka -- -ra
|
||||
run: tox -e py312-test-instrumentation-aio-pika-1 -- -ra
|
||||
|
||||
py39-test-instrumentation-confluent-kafka_ubuntu-latest:
|
||||
name: instrumentation-confluent-kafka 3.9 Ubuntu
|
||||
py312-test-instrumentation-aio-pika-2_ubuntu-latest:
|
||||
name: instrumentation-aio-pika-2 3.12 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.12
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py312-test-instrumentation-aio-pika-2 -- -ra
|
||||
|
||||
py312-test-instrumentation-aio-pika-3_ubuntu-latest:
|
||||
name: instrumentation-aio-pika-3 3.12 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.12
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py312-test-instrumentation-aio-pika-3 -- -ra
|
||||
|
||||
py313-test-instrumentation-aio-pika-0_ubuntu-latest:
|
||||
name: instrumentation-aio-pika-0 3.13 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.13
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.13"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py313-test-instrumentation-aio-pika-0 -- -ra
|
||||
|
||||
py313-test-instrumentation-aio-pika-1_ubuntu-latest:
|
||||
name: instrumentation-aio-pika-1 3.13 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.13
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.13"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py313-test-instrumentation-aio-pika-1 -- -ra
|
||||
|
||||
py313-test-instrumentation-aio-pika-2_ubuntu-latest:
|
||||
name: instrumentation-aio-pika-2 3.13 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.13
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.13"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py313-test-instrumentation-aio-pika-2 -- -ra
|
||||
|
||||
py313-test-instrumentation-aio-pika-3_ubuntu-latest:
|
||||
name: instrumentation-aio-pika-3 3.13 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.13
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.13"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py313-test-instrumentation-aio-pika-3 -- -ra
|
||||
|
||||
pypy3-test-instrumentation-aio-pika-0_ubuntu-latest:
|
||||
name: instrumentation-aio-pika-0 pypy-3.9 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python pypy-3.9
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "pypy-3.9"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e pypy3-test-instrumentation-aio-pika-0 -- -ra
|
||||
|
||||
pypy3-test-instrumentation-aio-pika-1_ubuntu-latest:
|
||||
name: instrumentation-aio-pika-1 pypy-3.9 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python pypy-3.9
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "pypy-3.9"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e pypy3-test-instrumentation-aio-pika-1 -- -ra
|
||||
|
||||
pypy3-test-instrumentation-aio-pika-2_ubuntu-latest:
|
||||
name: instrumentation-aio-pika-2 pypy-3.9 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python pypy-3.9
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "pypy-3.9"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e pypy3-test-instrumentation-aio-pika-2 -- -ra
|
||||
|
||||
pypy3-test-instrumentation-aio-pika-3_ubuntu-latest:
|
||||
name: instrumentation-aio-pika-3 pypy-3.9 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python pypy-3.9
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "pypy-3.9"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e pypy3-test-instrumentation-aio-pika-3 -- -ra
|
||||
|
||||
py39-test-instrumentation-aiokafka_ubuntu-latest:
|
||||
name: instrumentation-aiokafka 3.9 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -47,7 +251,311 @@ jobs:
|
|||
python-version: "3.9"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py39-test-instrumentation-aiokafka -- -ra
|
||||
|
||||
py310-test-instrumentation-aiokafka_ubuntu-latest:
|
||||
name: instrumentation-aiokafka 3.10 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py310-test-instrumentation-aiokafka -- -ra
|
||||
|
||||
py311-test-instrumentation-aiokafka_ubuntu-latest:
|
||||
name: instrumentation-aiokafka 3.11 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py311-test-instrumentation-aiokafka -- -ra
|
||||
|
||||
py312-test-instrumentation-aiokafka_ubuntu-latest:
|
||||
name: instrumentation-aiokafka 3.12 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.12
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py312-test-instrumentation-aiokafka -- -ra
|
||||
|
||||
py313-test-instrumentation-aiokafka_ubuntu-latest:
|
||||
name: instrumentation-aiokafka 3.13 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.13
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.13"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py313-test-instrumentation-aiokafka -- -ra
|
||||
|
||||
pypy3-test-instrumentation-aiokafka_ubuntu-latest:
|
||||
name: instrumentation-aiokafka pypy-3.9 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python pypy-3.9
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "pypy-3.9"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e pypy3-test-instrumentation-aiokafka -- -ra
|
||||
|
||||
py39-test-instrumentation-kafka-python_ubuntu-latest:
|
||||
name: instrumentation-kafka-python 3.9 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.9
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.9"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py39-test-instrumentation-kafka-python -- -ra
|
||||
|
||||
py310-test-instrumentation-kafka-python_ubuntu-latest:
|
||||
name: instrumentation-kafka-python 3.10 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py310-test-instrumentation-kafka-python -- -ra
|
||||
|
||||
py311-test-instrumentation-kafka-python_ubuntu-latest:
|
||||
name: instrumentation-kafka-python 3.11 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py311-test-instrumentation-kafka-python -- -ra
|
||||
|
||||
py39-test-instrumentation-kafka-pythonng_ubuntu-latest:
|
||||
name: instrumentation-kafka-pythonng 3.9 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.9
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.9"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py39-test-instrumentation-kafka-pythonng -- -ra
|
||||
|
||||
py310-test-instrumentation-kafka-pythonng_ubuntu-latest:
|
||||
name: instrumentation-kafka-pythonng 3.10 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py310-test-instrumentation-kafka-pythonng -- -ra
|
||||
|
||||
py311-test-instrumentation-kafka-pythonng_ubuntu-latest:
|
||||
name: instrumentation-kafka-pythonng 3.11 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py311-test-instrumentation-kafka-pythonng -- -ra
|
||||
|
||||
py312-test-instrumentation-kafka-pythonng_ubuntu-latest:
|
||||
name: instrumentation-kafka-pythonng 3.12 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.12
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py312-test-instrumentation-kafka-pythonng -- -ra
|
||||
|
||||
py313-test-instrumentation-kafka-pythonng_ubuntu-latest:
|
||||
name: instrumentation-kafka-pythonng 3.13 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.13
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.13"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py313-test-instrumentation-kafka-pythonng -- -ra
|
||||
|
||||
pypy3-test-instrumentation-kafka-python_ubuntu-latest:
|
||||
name: instrumentation-kafka-python pypy-3.9 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python pypy-3.9
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "pypy-3.9"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e pypy3-test-instrumentation-kafka-python -- -ra
|
||||
|
||||
pypy3-test-instrumentation-kafka-pythonng_ubuntu-latest:
|
||||
name: instrumentation-kafka-pythonng pypy-3.9 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python pypy-3.9
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "pypy-3.9"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e pypy3-test-instrumentation-kafka-pythonng -- -ra
|
||||
|
||||
py39-test-instrumentation-confluent-kafka_ubuntu-latest:
|
||||
name: instrumentation-confluent-kafka 3.9 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.9
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.9"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py39-test-instrumentation-confluent-kafka -- -ra
|
||||
|
@ -55,6 +563,7 @@ jobs:
|
|||
py310-test-instrumentation-confluent-kafka_ubuntu-latest:
|
||||
name: instrumentation-confluent-kafka 3.10 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -65,7 +574,7 @@ jobs:
|
|||
python-version: "3.10"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py310-test-instrumentation-confluent-kafka -- -ra
|
||||
|
@ -73,6 +582,7 @@ jobs:
|
|||
py311-test-instrumentation-confluent-kafka_ubuntu-latest:
|
||||
name: instrumentation-confluent-kafka 3.11 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -83,7 +593,7 @@ jobs:
|
|||
python-version: "3.11"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py311-test-instrumentation-confluent-kafka -- -ra
|
||||
|
@ -91,6 +601,7 @@ jobs:
|
|||
py312-test-instrumentation-confluent-kafka_ubuntu-latest:
|
||||
name: instrumentation-confluent-kafka 3.12 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -101,32 +612,34 @@ jobs:
|
|||
python-version: "3.12"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py312-test-instrumentation-confluent-kafka -- -ra
|
||||
|
||||
py38-test-instrumentation-asyncio_ubuntu-latest:
|
||||
name: instrumentation-asyncio 3.8 Ubuntu
|
||||
py313-test-instrumentation-confluent-kafka_ubuntu-latest:
|
||||
name: instrumentation-confluent-kafka 3.13 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.8
|
||||
- name: Set up Python 3.13
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.8"
|
||||
python-version: "3.13"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py38-test-instrumentation-asyncio -- -ra
|
||||
run: tox -e py313-test-instrumentation-confluent-kafka -- -ra
|
||||
|
||||
py39-test-instrumentation-asyncio_ubuntu-latest:
|
||||
name: instrumentation-asyncio 3.9 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -137,7 +650,7 @@ jobs:
|
|||
python-version: "3.9"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py39-test-instrumentation-asyncio -- -ra
|
||||
|
@ -145,6 +658,7 @@ jobs:
|
|||
py310-test-instrumentation-asyncio_ubuntu-latest:
|
||||
name: instrumentation-asyncio 3.10 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -155,7 +669,7 @@ jobs:
|
|||
python-version: "3.10"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py310-test-instrumentation-asyncio -- -ra
|
||||
|
@ -163,6 +677,7 @@ jobs:
|
|||
py311-test-instrumentation-asyncio_ubuntu-latest:
|
||||
name: instrumentation-asyncio 3.11 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -173,7 +688,7 @@ jobs:
|
|||
python-version: "3.11"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py311-test-instrumentation-asyncio -- -ra
|
||||
|
@ -181,6 +696,7 @@ jobs:
|
|||
py312-test-instrumentation-asyncio_ubuntu-latest:
|
||||
name: instrumentation-asyncio 3.12 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -191,32 +707,34 @@ jobs:
|
|||
python-version: "3.12"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py312-test-instrumentation-asyncio -- -ra
|
||||
|
||||
py38-test-instrumentation-cassandra_ubuntu-latest:
|
||||
name: instrumentation-cassandra 3.8 Ubuntu
|
||||
py313-test-instrumentation-asyncio_ubuntu-latest:
|
||||
name: instrumentation-asyncio 3.13 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.8
|
||||
- name: Set up Python 3.13
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.8"
|
||||
python-version: "3.13"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py38-test-instrumentation-cassandra -- -ra
|
||||
run: tox -e py313-test-instrumentation-asyncio -- -ra
|
||||
|
||||
py39-test-instrumentation-cassandra_ubuntu-latest:
|
||||
name: instrumentation-cassandra 3.9 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -227,7 +745,7 @@ jobs:
|
|||
python-version: "3.9"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py39-test-instrumentation-cassandra -- -ra
|
||||
|
@ -235,6 +753,7 @@ jobs:
|
|||
py310-test-instrumentation-cassandra_ubuntu-latest:
|
||||
name: instrumentation-cassandra 3.10 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -245,7 +764,7 @@ jobs:
|
|||
python-version: "3.10"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py310-test-instrumentation-cassandra -- -ra
|
||||
|
@ -253,6 +772,7 @@ jobs:
|
|||
py311-test-instrumentation-cassandra_ubuntu-latest:
|
||||
name: instrumentation-cassandra 3.11 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -263,7 +783,7 @@ jobs:
|
|||
python-version: "3.11"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py311-test-instrumentation-cassandra -- -ra
|
||||
|
@ -271,6 +791,7 @@ jobs:
|
|||
py312-test-instrumentation-cassandra_ubuntu-latest:
|
||||
name: instrumentation-cassandra 3.12 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -281,50 +802,53 @@ jobs:
|
|||
python-version: "3.12"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py312-test-instrumentation-cassandra -- -ra
|
||||
|
||||
pypy3-test-instrumentation-cassandra_ubuntu-latest:
|
||||
name: instrumentation-cassandra pypy-3.8 Ubuntu
|
||||
py313-test-instrumentation-cassandra_ubuntu-latest:
|
||||
name: instrumentation-cassandra 3.13 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python pypy-3.8
|
||||
- name: Set up Python 3.13
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "pypy-3.8"
|
||||
python-version: "3.13"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py313-test-instrumentation-cassandra -- -ra
|
||||
|
||||
pypy3-test-instrumentation-cassandra_ubuntu-latest:
|
||||
name: instrumentation-cassandra pypy-3.9 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python pypy-3.9
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "pypy-3.9"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e pypy3-test-instrumentation-cassandra -- -ra
|
||||
|
||||
py38-test-processor-baggage_ubuntu-latest:
|
||||
name: processor-baggage 3.8 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.8
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.8"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py38-test-processor-baggage -- -ra
|
||||
|
||||
py39-test-processor-baggage_ubuntu-latest:
|
||||
name: processor-baggage 3.9 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -335,7 +859,7 @@ jobs:
|
|||
python-version: "3.9"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py39-test-processor-baggage -- -ra
|
||||
|
@ -343,6 +867,7 @@ jobs:
|
|||
py310-test-processor-baggage_ubuntu-latest:
|
||||
name: processor-baggage 3.10 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -353,7 +878,7 @@ jobs:
|
|||
python-version: "3.10"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py310-test-processor-baggage -- -ra
|
||||
|
@ -361,6 +886,7 @@ jobs:
|
|||
py311-test-processor-baggage_ubuntu-latest:
|
||||
name: processor-baggage 3.11 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -371,7 +897,7 @@ jobs:
|
|||
python-version: "3.11"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py311-test-processor-baggage -- -ra
|
||||
|
@ -379,6 +905,7 @@ jobs:
|
|||
py312-test-processor-baggage_ubuntu-latest:
|
||||
name: processor-baggage 3.12 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -389,25 +916,45 @@ jobs:
|
|||
python-version: "3.12"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py312-test-processor-baggage -- -ra
|
||||
|
||||
pypy3-test-processor-baggage_ubuntu-latest:
|
||||
name: processor-baggage pypy-3.8 Ubuntu
|
||||
py313-test-processor-baggage_ubuntu-latest:
|
||||
name: processor-baggage 3.13 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python pypy-3.8
|
||||
- name: Set up Python 3.13
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "pypy-3.8"
|
||||
python-version: "3.13"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e py313-test-processor-baggage -- -ra
|
||||
|
||||
pypy3-test-processor-baggage_ubuntu-latest:
|
||||
name: processor-baggage pypy-3.9 Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python pypy-3.9
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "pypy-3.9"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e pypy3-test-processor-baggage -- -ra
|
||||
|
|
|
@ -1,10 +1,15 @@
|
|||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
# Ruff version.
|
||||
rev: v0.6.9
|
||||
hooks:
|
||||
# Run the linter.
|
||||
- id: ruff
|
||||
args: ["--fix", "--show-fixes"]
|
||||
# Run the formatter.
|
||||
- id: ruff-format
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
# Ruff version.
|
||||
rev: v0.6.9
|
||||
hooks:
|
||||
# Run the linter.
|
||||
- id: ruff
|
||||
args: ["--fix", "--show-fixes"]
|
||||
# Run the formatter.
|
||||
- id: ruff-format
|
||||
- repo: https://github.com/astral-sh/uv-pre-commit
|
||||
# uv version.
|
||||
rev: 0.6.0
|
||||
hooks:
|
||||
- id: uv-lock
|
||||
|
|
|
@ -7,7 +7,7 @@ extension-pkg-whitelist=cassandra
|
|||
|
||||
# Add list of files or directories to be excluded. They should be base names, not
|
||||
# paths.
|
||||
ignore=CVS,gen,Dockerfile,docker-compose.yml,README.md,requirements.txt,docs,.venv
|
||||
ignore=CVS,gen,Dockerfile,docker-compose.yml,README.md,requirements.txt,docs,.venv,site-packages,.tox
|
||||
|
||||
# Add files or directories matching the regex patterns to be excluded. The
|
||||
# regex matches against base names, not paths.
|
||||
|
@ -46,7 +46,7 @@ suggestion-mode=yes
|
|||
unsafe-load-any-extension=no
|
||||
|
||||
# Run python dependant checks considering the baseline version
|
||||
py-version=3.8
|
||||
py-version=3.9
|
||||
|
||||
|
||||
[MESSAGES CONTROL]
|
||||
|
|
|
@ -6,9 +6,9 @@ sphinx:
|
|||
configuration: docs/conf.py
|
||||
|
||||
build:
|
||||
os: "ubuntu-22.04"
|
||||
os: "ubuntu-24.04"
|
||||
tools:
|
||||
python: "3.8"
|
||||
python: "3.11"
|
||||
|
||||
python:
|
||||
install:
|
||||
|
|
231
CHANGELOG.md
231
CHANGELOG.md
|
@ -11,21 +11,250 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||
|
||||
## Unreleased
|
||||
|
||||
## Version 1.35.0/0.56b0 (2025-07-11)
|
||||
|
||||
### Added
|
||||
|
||||
- `opentelemetry-instrumentation-pika` Added instrumentation for All `SelectConnection` adapters
|
||||
([#3584](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3584))
|
||||
- `opentelemetry-instrumentation-tornado` Add support for `WebSocketHandler` instrumentation
|
||||
([#3498](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3498))
|
||||
- `opentelemetry-util-http` Added support for redacting specific url query string values and url credentials in instrumentations
|
||||
([#3508](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3508))
|
||||
- `opentelemetry-instrumentation-pymongo` `aggregate` and `getMore` capture statements support
|
||||
([#3601](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3601))
|
||||
|
||||
### Fixed
|
||||
|
||||
- `opentelemetry-instrumentation-asgi`: fix excluded_urls in instrumentation-asgi
|
||||
([#3567](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3567))
|
||||
- `opentelemetry-resource-detector-containerid`: make it more quiet on platforms without cgroups
|
||||
([#3579](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3579))
|
||||
|
||||
## Version 1.34.0/0.55b0 (2025-06-04)
|
||||
|
||||
### Fixed
|
||||
|
||||
- `opentelemetry-instrumentation-system-metrics`: fix loading on Google Cloud Run
|
||||
([#3533](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3533))
|
||||
- `opentelemetry-instrumentation-fastapi`: fix wrapping of middlewares
|
||||
([#3012](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3012))
|
||||
- `opentelemetry-instrumentation-starlette` Remove max version constraint on starlette
|
||||
([#3456](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3456))
|
||||
- `opentelemetry-instrumentation-starlette` Fix memory leak and double middleware
|
||||
([#3529](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3529))
|
||||
- `opentelemetry-instrumentation-urllib3`: proper bucket boundaries in stable semconv http duration metrics
|
||||
([#3518](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3518))
|
||||
- `opentelemetry-instrumentation-urllib`: proper bucket boundaries in stable semconv http duration metrics
|
||||
([#3519](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3519))
|
||||
- `opentelemetry-instrumentation-falcon`: proper bucket boundaries in stable semconv http duration
|
||||
([#3525](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3525))
|
||||
- `opentelemetry-instrumentation-wsgi`: add explicit http duration buckets for stable semconv
|
||||
([#3527](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3527))
|
||||
- `opentelemetry-instrumentation-asgi`: add explicit http duration buckets for stable semconv
|
||||
([#3526](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3526))
|
||||
- `opentelemetry-instrumentation-flask`: proper bucket boundaries in stable semconv http duration
|
||||
([#3523](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3523))
|
||||
- `opentelemetry-instrumentation-django`: proper bucket boundaries in stable semconv http duration
|
||||
([#3524](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3524))
|
||||
- `opentelemetry-instrumentation-grpc`: support non-list interceptors
|
||||
([#3520](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3520))
|
||||
- `opentelemetry-instrumentation-botocore` Ensure spans end on early stream closure for Bedrock Streaming APIs
|
||||
([#3481](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3481))
|
||||
- `opentelemetry-instrumentation-sqlalchemy` Respect suppress_instrumentation functionality
|
||||
([#3477](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3477))
|
||||
- `opentelemetry-instrumentation-botocore`: fix handling of tool input in Bedrock ConverseStream
|
||||
([#3544](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3544))
|
||||
- `opentelemetry-instrumentation-botocore` Add type check when extracting tool use from Bedrock request message content
|
||||
([#3548](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3548))
|
||||
- `opentelemetry-instrumentation-dbapi` Respect suppress_instrumentation functionality ([#3460](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3460))
|
||||
- `opentelemetry-resource-detector-container` Correctly parse container id when using systemd and cgroupsv1
|
||||
([#3429](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3429))
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- `opentelemetry-instrumentation-botocore` Use `cloud.region` instead of `aws.region` span attribute as per semantic conventions.
|
||||
([#3474](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3474))
|
||||
- `opentelemetry-instrumentation-fastapi`: Drop support for FastAPI versions earlier than `0.92`
|
||||
([#3012](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3012))
|
||||
- `opentelemetry-resource-detector-container`: rename package name to `opentelemetry-resource-detector-containerid`
|
||||
([#3536](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3536))
|
||||
|
||||
### Added
|
||||
|
||||
- `opentelemetry-instrumentation-aiohttp-client` Add support for HTTP metrics
|
||||
([#3517](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3517))
|
||||
- `opentelemetry-instrumentation-httpx` Add support for HTTP metrics
|
||||
([#3513](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3513))
|
||||
- `opentelemetry-instrumentation` Allow re-raising exception when instrumentation fails
|
||||
([#3545](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3545))
|
||||
- `opentelemetry-instrumentation-aiokafka` Add instrumentation of `consumer.getmany` (batch)
|
||||
([#3257](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3257))
|
||||
|
||||
### Deprecated
|
||||
|
||||
- Drop support for Python 3.8, bump baseline to Python 3.9.
|
||||
([#3399](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3399))
|
||||
|
||||
## Version 1.33.0/0.54b0 (2025-05-09)
|
||||
|
||||
### Added
|
||||
|
||||
- `opentelemetry-instrumentation-requests` Support explicit_bucket_boundaries_advisory in duration metrics
|
||||
([#3464](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3464))
|
||||
- `opentelemetry-instrumentation-redis` Add support for redis client-specific instrumentation.
|
||||
([#3143](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3143))
|
||||
|
||||
### Fixed
|
||||
|
||||
- `opentelemetry-instrumentation` Catch `ModuleNotFoundError` when the library is not installed
|
||||
and log as debug instead of exception
|
||||
([#3423](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3423))
|
||||
- `opentelemetry-instrumentation-asyncio` Fix duplicate instrumentation
|
||||
([#3383](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3383))
|
||||
- `opentelemetry-instrumentation-botocore` Add GenAI instrumentation for additional Bedrock models for InvokeModel API
|
||||
([#3419](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3419))
|
||||
- `opentelemetry-instrumentation` don't print duplicated conflict log error message
|
||||
([#3432](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3432))
|
||||
- `opentelemetry-instrumentation-grpc` Check for None result in gRPC
|
||||
([#3380](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3381))
|
||||
- `opentelemetry-instrumentation-[asynclick/click]` Add missing opentelemetry-instrumentation dep
|
||||
([#3447](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3447))
|
||||
- `opentelemetry-instrumentation-botocore` Capture server attributes for botocore API calls
|
||||
([#3448](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3448))
|
||||
|
||||
## Version 1.32.0/0.53b0 (2025-04-10)
|
||||
|
||||
### Added
|
||||
|
||||
- `opentelemetry-instrumentation-asyncclick`: new instrumentation to trace asyncclick commands
|
||||
([#3319](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3319))
|
||||
- `opentelemetry-instrumentation-botocore` Add support for GenAI tool events using Amazon Nova models and `InvokeModel*` APIs
|
||||
([#3385](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3385))
|
||||
- `opentelemetry-instrumentation` Make auto instrumentation use the same dependency resolver as manual instrumentation does
|
||||
([#3202](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3202))
|
||||
|
||||
### Fixed
|
||||
|
||||
- `opentelemetry-instrumentation` Fix client address is set to server address in new semconv
|
||||
([#3354](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3354))
|
||||
- `opentelemetry-instrumentation-dbapi`, `opentelemetry-instrumentation-django`,
|
||||
`opentelemetry-instrumentation-sqlalchemy`: Fix sqlcomment for non string query and composable object.
|
||||
([#3113](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3113))
|
||||
- `opentelemetry-instrumentation-grpc` Fix error when using gprc versions <= 1.50.0 with unix sockets.
|
||||
([[#3393](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3393)])
|
||||
- `opentelemetry-instrumentation-asyncio` Fix duplicate instrumentation.
|
||||
([[#3383](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3383)])
|
||||
- `opentelemetry-instrumentation-aiokafka` Fix send_and_wait method no headers kwargs error.
|
||||
([[#3332](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3332)])
|
||||
|
||||
## Version 1.31.0/0.52b0 (2025-03-12)
|
||||
|
||||
### Added
|
||||
|
||||
- `opentelemetry-instrumentation-openai-v2` Update doc for OpenAI Instrumentation to support OpenAI Compatible Platforms
|
||||
([#3279](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3279))
|
||||
- `opentelemetry-instrumentation-system-metrics` Add `process` metrics and deprecated `process.runtime` prefixed ones
|
||||
([#3250](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3250))
|
||||
- `opentelemetry-instrumentation-botocore` Add support for GenAI user events and lazy initialize tracer
|
||||
([#3258](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3258))
|
||||
- `opentelemetry-instrumentation-botocore` Add support for GenAI system events
|
||||
([#3266](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3266))
|
||||
- `opentelemetry-instrumentation-botocore` Add support for GenAI choice events
|
||||
([#3275](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3275))
|
||||
- `opentelemetry-instrumentation-botocore` Add support for GenAI tool events
|
||||
([#3302](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3302))
|
||||
- `opentelemetry-instrumentation-botocore` Add support for GenAI metrics
|
||||
([#3326](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3326))
|
||||
- `opentelemetry-instrumentation` make it simpler to initialize auto-instrumentation programmatically
|
||||
([#3273](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3273))
|
||||
- Add `opentelemetry-instrumentation-vertexai>=2.0b0` to `opentelemetry-bootstrap`
|
||||
([#3307](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3307))
|
||||
- Loosen `opentelemetry-instrumentation-starlette[instruments]` specifier
|
||||
([#3304](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3304))
|
||||
|
||||
### Fixed
|
||||
|
||||
- `opentelemetry-instrumentation-redis` Add missing entry in doc string for `def _instrument`
|
||||
([#3247](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3247))
|
||||
- `opentelemetry-instrumentation-botocore` sns-extension: Change destination name attribute
|
||||
to match topic ARN and redact phone number from attributes
|
||||
([#3249](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3249))
|
||||
- `opentelemetry-instrumentation-asyncpg` Fix fallback for empty queries.
|
||||
([#3253](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3253))
|
||||
- `opentelemetry-instrumentation` Fix a traceback in sqlcommenter when psycopg connection pooling is enabled.
|
||||
([#3309](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3309))
|
||||
- `opentelemetry-instrumentation-threading` Fix broken context typehints
|
||||
([#3322](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3322))
|
||||
- `opentelemetry-instrumentation-requests` always record span status code in duration metric
|
||||
([#3323](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3323))
|
||||
|
||||
## Version 1.30.0/0.51b0 (2025-02-03)
|
||||
|
||||
### Added
|
||||
|
||||
- `opentelemetry-instrumentation-confluent-kafka` Add support for confluent-kafka <=2.7.0
|
||||
([#3100](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3100))
|
||||
- Add support to database stability opt-in in `_semconv` utilities and add tests
|
||||
([#3111](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3111))
|
||||
- `opentelemetry-instrumentation-urllib` Add `py.typed` file to enable PEP 561
|
||||
([#3131](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3131))
|
||||
- `opentelemetry-opentelemetry-pymongo` Add `py.typed` file to enable PEP 561
|
||||
([#3136](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3136))
|
||||
- `opentelemetry-opentelemetry-requests` Add `py.typed` file to enable PEP 561
|
||||
([#3135](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3135))
|
||||
- `opentelemetry-instrumentation-system-metrics` Add `py.typed` file to enable PEP 561
|
||||
([#3132](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3132))
|
||||
- `opentelemetry-opentelemetry-sqlite3` Add `py.typed` file to enable PEP 561
|
||||
([#3133](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3133))
|
||||
- `opentelemetry-instrumentation-falcon` add support version to v4
|
||||
([#3086](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3086))
|
||||
|
||||
- `opentelemetry-instrumentation-falcon` Implement new HTTP semantic convention opt-in for Falcon
|
||||
([#2790](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2790))
|
||||
- `opentelemetry-instrumentation-wsgi` always record span status code to have it available in metrics
|
||||
([#3148](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3148))
|
||||
- add support to Python 3.13
|
||||
([#3134](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3134))
|
||||
- `opentelemetry-opentelemetry-wsgi` Add `py.typed` file to enable PEP 561
|
||||
([#3129](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3129))
|
||||
- `opentelemetry-util-http` Add `py.typed` file to enable PEP 561
|
||||
([#3127](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3127))
|
||||
- `opentelemetry-instrumentation-psycopg2` Add support for psycopg2-binary
|
||||
([#3186](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3186))
|
||||
- `opentelemetry-opentelemetry-botocore` Add basic support for GenAI attributes for AWS Bedrock Converse API
|
||||
([#3161](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3161))
|
||||
- `opentelemetry-opentelemetry-botocore` Add basic support for GenAI attributes for AWS Bedrock InvokeModel API
|
||||
([#3200](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3200))
|
||||
- `opentelemetry-opentelemetry-botocore` Add basic support for GenAI attributes for AWS Bedrock ConverseStream API
|
||||
([#3204](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3204))
|
||||
- `opentelemetry-opentelemetry-botocore` Add basic support for GenAI attributes for AWS Bedrock InvokeModelWithStreamResponse API
|
||||
([#3206](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3206))
|
||||
- `opentelemetry-instrumentation-pymssql` Add pymssql instrumentation
|
||||
([#394](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/394))
|
||||
- `opentelemetry-instrumentation-mysql` Add sqlcommenter support
|
||||
([#3163](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3163))
|
||||
|
||||
### Fixed
|
||||
|
||||
- `opentelemetry-instrumentation-httpx` Fix `RequestInfo`/`ResponseInfo` type hints
|
||||
([#3105](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3105))
|
||||
- `opentelemetry-instrumentation-dbapi` Move `TracedCursorProxy` and `TracedConnectionProxy` to the module level
|
||||
([#3068](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3068))
|
||||
- `opentelemetry-instrumentation-click` Disable tracing of well-known server click commands
|
||||
([#3174](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3174))
|
||||
- `opentelemetry-instrumentation` Fix `get_dist_dependency_conflicts` if no distribution requires
|
||||
([#3168](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3168))
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- `opentelemetry-exporter-prometheus-remote-write` updated protobuf required version from 4.21 to 5.26 and regenerated protobufs
|
||||
([#3219](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3219))
|
||||
- `opentelemetry-instrumentation-sqlalchemy` including sqlcomment in `db.statement` span attribute value is now opt-in
|
||||
([#3112](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3112))
|
||||
- `opentelemetry-instrumentation-dbapi` including sqlcomment in `db.statement` span attribute value is now opt-in
|
||||
([#3115](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3115))
|
||||
- `opentelemetry-instrumentation-psycopg2`, `opentelemetry-instrumentation-psycopg`, `opentelemetry-instrumentation-mysqlclient`, `opentelemetry-instrumentation-pymysql`: including sqlcomment in `db.statement` span attribute value is now opt-in
|
||||
([#3121](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3121))
|
||||
|
||||
## Version 1.29.0/0.50b0 (2024-12-11)
|
||||
|
||||
|
|
|
@ -19,23 +19,31 @@ Please also read the [OpenTelemetry Contributor Guide](https://github.com/open-t
|
|||
|
||||
## Index
|
||||
|
||||
* [Find a Buddy and get Started Quickly](#find-a-buddy-and-get-started-quickly)
|
||||
* [Development](#development)
|
||||
* [Troubleshooting](#troubleshooting)
|
||||
* [Benchmarks](#benchmarks)
|
||||
* [Pull requests](#pull-requests)
|
||||
* [How to Send Pull Requests](#how-to-send-pull-requests)
|
||||
* [How to Receive Comments](#how-to-receive-comments)
|
||||
* [How to Get PRs Reviewed](#how-to-get-prs-reviewed)
|
||||
* [How to Get PRs Merged](#how-to-get-prs-merged)
|
||||
* [Design Choices](#design-choices)
|
||||
* [Focus on Capabilities, Not Structure Compliance](#focus-on-capabilities-not-structure-compliance)
|
||||
* [Running Tests Locally](#running-tests-locally)
|
||||
* [Testing against a different Core repo branch/commit](#testing-against-a-different-core-repo-branchcommit)
|
||||
* [Style Guide](#style-guide)
|
||||
* [Guideline for instrumentations](#guideline-for-instrumentations)
|
||||
* [Guideline for GenAI instrumentations](#guideline-for-genai-instrumentations)
|
||||
* [Expectations from contributors](#expectations-from-contributors)
|
||||
- [Contributing to opentelemetry-python-contrib](#contributing-to-opentelemetry-python-contrib)
|
||||
- [Index](#index)
|
||||
- [Find a Buddy and get Started Quickly](#find-a-buddy-and-get-started-quickly)
|
||||
- [Development](#development)
|
||||
- [Virtual Environment](#virtual-environment)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [Benchmarks](#benchmarks)
|
||||
- [Pull Requests](#pull-requests)
|
||||
- [How to Send Pull Requests](#how-to-send-pull-requests)
|
||||
- [How to Receive Comments](#how-to-receive-comments)
|
||||
- [How to Get PRs Reviewed](#how-to-get-prs-reviewed)
|
||||
- [How to Get PRs Merged](#how-to-get-prs-merged)
|
||||
- [Design Choices](#design-choices)
|
||||
- [Focus on Capabilities, Not Structure Compliance](#focus-on-capabilities-not-structure-compliance)
|
||||
- [Running Tests Locally](#running-tests-locally)
|
||||
- [Testing against a different Core repo branch/commit](#testing-against-a-different-core-repo-branchcommit)
|
||||
- [Style Guide](#style-guide)
|
||||
- [Guideline for instrumentations](#guideline-for-instrumentations)
|
||||
- [Update supported instrumentation package versions](#update-supported-instrumentation-package-versions)
|
||||
- [Guideline for GenAI instrumentations](#guideline-for-genai-instrumentations)
|
||||
- [Get Involved](#get-involved)
|
||||
- [Expectations from contributors](#expectations-from-contributors)
|
||||
- [Updating supported Python versions](#updating-supported-python-versions)
|
||||
- [Bumping the Python baseline](#bumping-the-python-baseline)
|
||||
- [Adding support for a new Python release](#adding-support-for-a-new-python-release)
|
||||
|
||||
## Find a Buddy and get Started Quickly
|
||||
|
||||
|
@ -59,6 +67,12 @@ To install `tox`, run:
|
|||
pip install tox
|
||||
```
|
||||
|
||||
You can also run tox with `uv` support. By default [tox.ini](./tox.ini) will automatically create a provisioned tox environment with `tox-uv`, but you can install it at host level:
|
||||
|
||||
```sh
|
||||
pip install tox-uv
|
||||
```
|
||||
|
||||
You can run `tox` with the following arguments:
|
||||
|
||||
* `tox` to run all existing tox commands, including unit tests for all packages
|
||||
|
@ -87,9 +101,23 @@ See
|
|||
[`tox.ini`](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/tox.ini)
|
||||
for more detail on available tox commands.
|
||||
|
||||
### Virtual Environment
|
||||
|
||||
You can also create a single virtual environment to make it easier to run local tests.
|
||||
|
||||
For that, you'll need to install [`uv`](https://docs.astral.sh/uv/getting-started/installation/).
|
||||
|
||||
After installing `uv`, you can run the following command:
|
||||
|
||||
```sh
|
||||
uv sync
|
||||
```
|
||||
|
||||
This will create a virtual environment in the `.venv` directory and install all the necessary dependencies.
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
Some packages may require additional system-wide dependencies to be installed. For example, you may need to install `libpq-dev` to run the postgresql client libraries instrumentation tests or `libsnappy-dev` to run the prometheus exporter tests. If you encounter a build error, please check the installation instructions for the package you are trying to run tests for.
|
||||
Some packages may require additional system-wide dependencies to be installed. For example, you may need to install `libpq-dev` to run the postgresql client libraries instrumentation tests or `libsnappy-dev` to run the prometheus exporter tests. If you encounter a build error, please check the installation instructions for the package you are trying to run tests for.
|
||||
|
||||
For `docs` building, you may need to install `mysql-client` and other required dependencies as necessary. Ensure the Python version used in your local setup matches the version used in the [CI](./.github/workflows/) to maintain compatibility when building the documentation.
|
||||
|
||||
|
@ -139,7 +167,7 @@ git remote add fork https://github.com/YOUR_GITHUB_USERNAME/opentelemetry-python
|
|||
make sure you have all supported versions of Python installed, install `tox` only for the first time:
|
||||
|
||||
```sh
|
||||
pip install tox
|
||||
pip install tox tox-uv
|
||||
```
|
||||
|
||||
Run tests in the root of the repository (this will run all tox environments and may take some time):
|
||||
|
|
55
README.md
55
README.md
|
@ -33,7 +33,7 @@
|
|||
<strong>
|
||||
<a href="CONTRIBUTING.md">Contributing<a/>
|
||||
•
|
||||
<a href="https://opentelemetry-python-contrib.readthedocs.io/en/stable/#examples">Examples<a/>
|
||||
<a href="https://opentelemetry-python-contrib.readthedocs.io/en/latest/#instrumentations">Instrumentations<a/>
|
||||
</strong>
|
||||
</p>
|
||||
|
||||
|
@ -48,6 +48,7 @@ The Python auto-instrumentation libraries for [OpenTelemetry](https://openteleme
|
|||
* [Installation](#installation)
|
||||
* [Releasing](#releasing)
|
||||
* [Releasing a package as `1.0` stable](#releasing-a-package-as-10-stable)
|
||||
* [Semantic Convention status of instrumentations](#semantic-convention-status-of-instrumentations)
|
||||
* [Contributing](#contributing)
|
||||
* [Thanks to all the people who already contributed](#thanks-to-all-the-people-who-already-contributed)
|
||||
|
||||
|
@ -100,7 +101,7 @@ To release a package as `1.0` stable, the package:
|
|||
|
||||
## Semantic Convention status of instrumentations
|
||||
|
||||
In our efforts to maintain optimal user experience and prevent breaking changes for transitioning into stable semantic conventions, OpenTelemetry Python is adopting the [semantic convention migration plan](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/http/migration-guide.md) for several instrumentations. Currently this plan is only being adopted for HTTP-related instrumentations, but will eventually cover all types. Please refer to the `semconv status` column of the [instrumentation README](instrumentation/README.md) of the current status of instrumentations' semantic conventions. The possible values are `experimental`, `stable` and `migration` referring to [status](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.31.0/specification/document-status.md#lifecycle-status) of that particular semantic convention. `Migration` refers to an instrumentation that currently supports the migration plan.
|
||||
In our efforts to maintain optimal user experience and prevent breaking changes for transitioning into stable semantic conventions, OpenTelemetry Python is adopting the semantic convention migration plan for several instrumentations. Currently this plan is only being adopted for [HTTP-related instrumentations](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/non-normative/http-migration.md), but will eventually cover all types. Please refer to the `semconv status` column of the [instrumentation README](instrumentation/README.md) of the current status of instrumentations' semantic conventions. The possible values are `development`, `stable` and `migration` referring to [status](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.31.0/specification/document-status.md#lifecycle-status) of that particular semantic convention. `Migration` refers to an instrumentation that currently supports the migration plan.
|
||||
|
||||
## Contributing
|
||||
|
||||
|
@ -110,9 +111,18 @@ We meet weekly on Thursday at 9AM PT. The meeting is subject to change depending
|
|||
|
||||
Meeting notes are available as a public [Google doc](https://docs.google.com/document/d/1CIMGoIOZ-c3-igzbd6_Pnxx1SjAkjwqoYSUWxPY8XIs/edit). For edit access, get in touch on [GitHub Discussions](https://github.com/open-telemetry/opentelemetry-python/discussions).
|
||||
|
||||
Approvers ([@open-telemetry/python-approvers](https://github.com/orgs/open-telemetry/teams/python-approvers)):
|
||||
### Maintainers
|
||||
|
||||
- [Emídio Neto](https://github.com/emdneto), Zenvia
|
||||
- [Aaron Abbott](https://github.com/aabmass), Google
|
||||
- [Leighton Chen](https://github.com/lzchen), Microsoft
|
||||
- [Riccardo Magliocchetti](https://github.com/xrmx), Elastic
|
||||
- [Shalev Roda](https://github.com/shalevr), Cisco
|
||||
|
||||
For more information about the maintainer role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#maintainer).
|
||||
|
||||
### Approvers
|
||||
|
||||
- [Emídio Neto](https://github.com/emdneto), PicPay
|
||||
- [Jeremy Voss](https://github.com/jeremydvoss), Microsoft
|
||||
- [Owais Lone](https://github.com/owais), Splunk
|
||||
- [Pablo Collins](https://github.com/pmcollins), Splunk
|
||||
|
@ -120,34 +130,29 @@ Approvers ([@open-telemetry/python-approvers](https://github.com/orgs/open-telem
|
|||
- [Srikanth Chekuri](https://github.com/srikanthccv), signoz.io
|
||||
- [Tammy Baylis](https://github.com/tammy-baylis-swi), SolarWinds
|
||||
|
||||
Emeritus Approvers:
|
||||
For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver).
|
||||
|
||||
- [Ashutosh Goel](https://github.com/ashu658), Cisco
|
||||
- [Héctor Hernández](https://github.com/hectorhdzg), Microsoft
|
||||
- [Nikolay Sokolik](https://github.com/oxeye-nikolay), Oxeye
|
||||
- [Nikolay Sokolik](https://github.com/nikosokolik), Oxeye
|
||||
- [Nathaniel Ruiz Nowell](https://github.com/NathanielRN), AWS
|
||||
### Emeritus Maintainers
|
||||
|
||||
*Find more about the approver role in [community repository](https://github.com/open-telemetry/community/blob/main/community-membership.md#approver).*
|
||||
- [Alex Boten](https://github.com/codeboten)
|
||||
- [Diego Hurtado](https://github.com/ocelotl)
|
||||
- [Owais Lone](https://github.com/owais)
|
||||
- [Yusuke Tsutsumi](https://github.com/toumorokoshi)
|
||||
|
||||
Maintainers ([@open-telemetry/python-maintainers](https://github.com/orgs/open-telemetry/teams/python-maintainers)):
|
||||
For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager).
|
||||
|
||||
- [Aaron Abbott](https://github.com/aabmass), Google
|
||||
- [Leighton Chen](https://github.com/lzchen), Microsoft
|
||||
- [Riccardo Magliocchetti](https://github.com/xrmx), Elastic
|
||||
- [Shalev Roda](https://github.com/shalevr), Cisco
|
||||
### Emeritus Approvers
|
||||
|
||||
Emeritus Maintainers:
|
||||
- [Ashutosh Goel](https://github.com/ashu658)
|
||||
- [Héctor Hernández](https://github.com/hectorhdzg)
|
||||
- [Nathaniel Ruiz Nowell](https://github.com/NathanielRN)
|
||||
- [Nikolay Sokolik](https://github.com/nikosokolik)
|
||||
- [Nikolay Sokolik](https://github.com/oxeye-nikolay)
|
||||
|
||||
- [Alex Boten](https://github.com/codeboten), Lightstep
|
||||
- [Diego Hurtado](https://github.com/ocelotl), Lightstep
|
||||
- [Owais Lone](https://github.com/owais), Splunk
|
||||
- [Yusuke Tsutsumi](https://github.com/toumorokoshi), Google
|
||||
For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager).
|
||||
|
||||
*Find more about the maintainer role in [community repository](https://github.com/open-telemetry/community/blob/main/community-membership.md#maintainer).*
|
||||
|
||||
### Thanks to all the people who already contributed
|
||||
### Thanks to all of our contributors!
|
||||
|
||||
<a href="https://github.com/open-telemetry/opentelemetry-python-contrib/graphs/contributors">
|
||||
<img src="https://contributors-img.web.app/image?repo=open-telemetry/opentelemetry-python-contrib" />
|
||||
<img alt="Repo contributors" src="https://contrib.rocks/image?repo=open-telemetry/opentelemetry-python-contrib" />
|
||||
</a>
|
||||
|
|
15
RELEASING.md
15
RELEASING.md
|
@ -9,7 +9,8 @@
|
|||
(otherwise the workflow will pick up the version from `main` and just remove the `.dev` suffix).
|
||||
* Review the two pull requests that it creates.
|
||||
(one is targeted to the release branch and one is targeted to `main`).
|
||||
* The builds will fail for both the `main` and release pr because of validation rules. Follow the [release workflow](https://github.com/open-telemetry/opentelemetry-python/blob/main/RELEASING.md) for the core repo up until this same point. Change the SHAs of each PR to point at each other to get the `main` and release builds to pass.
|
||||
* The builds will fail for the release PR because of validation rules. Follow the [release workflow](https://github.com/open-telemetry/opentelemetry-python/blob/main/RELEASING.md) for the core repo up until this same point.
|
||||
* Close and reopen the PR so that the workflow will take into account the label automation we have in place
|
||||
* Merge the release PR.
|
||||
* Merge the PR to main (this can be done separately from [making the release](#making-the-release))
|
||||
|
||||
|
@ -21,6 +22,9 @@
|
|||
> - opentelemetry-resource-detector-azure
|
||||
> - opentelemetry-sdk-extension-aws
|
||||
> - opentelemetry-instrumentation-openai-v2
|
||||
> - opentelemetry-instrumentation-vertexai
|
||||
> - opentelemetry-instrumentation-google-genai
|
||||
>
|
||||
> These libraries are also excluded from the general release.
|
||||
|
||||
Package release preparation is handled by the [`[Package] Prepare release`](./.github/workflows/package-prepare-release.yml) workflow that allows
|
||||
|
@ -28,6 +32,8 @@ to pick a specific package to release. It follows the same versioning strategy a
|
|||
|
||||
Long-term package release branch follows `package-release/{package-name}/v{major}.{minor}.x` (or `package-release/{package-name}/v{major}.{minor}bx`) naming pattern.
|
||||
|
||||
The workflow will create two pull requests, one against the `main` and one against the `package-release/` branch; both should be merged in order to proceed with the release.
|
||||
|
||||
## Preparing a new patch release
|
||||
|
||||
* Backport pull request(s) to the release branch.
|
||||
|
@ -35,6 +41,8 @@ Long-term package release branch follows `package-release/{package-name}/v{major
|
|||
* Press the "Run workflow" button, then select the release branch from the dropdown list,
|
||||
e.g. `release/v1.9.x`, then enter the pull request number that you want to backport,
|
||||
then click the "Run workflow" button below that.
|
||||
* Add the label `backport` to the generated pull request.
|
||||
* In case label automation doesn't work, just close and reopen the PR so that the workflow will take into account the label automation we have in place.
|
||||
* Review and merge the backport pull request that it generates.
|
||||
* Merge a pull request to the release branch updating the `CHANGELOG.md`.
|
||||
* The heading for the unreleased entries should be `## Unreleased`.
|
||||
|
@ -42,6 +50,7 @@ Long-term package release branch follows `package-release/{package-name}/v{major
|
|||
* Press the "Run workflow" button, then select the release branch from the dropdown list,
|
||||
e.g. `release/v1.9.x`, and click the "Run workflow" button below that.
|
||||
* Review and merge the pull request that it creates for updating the version.
|
||||
* Note: If you are doing a patch release in `-contrib` repo, you should also do an equivalent patch release in `-core` repo (even if there's no fix to release), otherwise tests in CI will fail.
|
||||
|
||||
### Preparing a patch release for individual package
|
||||
|
||||
|
@ -54,6 +63,8 @@ to pick a specific package to release.
|
|||
|
||||
The workflow can only be run against long-term release branch such as `package-release/{package-name}/v{major}.{minor}.x` or `package-release/{package-name}/v{major}.{minor}bx`.
|
||||
|
||||
The workflow will create a pull request that should be merged in order to proceed with the release.
|
||||
|
||||
## Making the release
|
||||
|
||||
* Run the [Release workflow](https://github.com/open-telemetry/opentelemetry-python-contrib/actions/workflows/release.yml).
|
||||
|
@ -73,6 +84,8 @@ The workflow can only be run against long-term release branch such as `package-r
|
|||
> - opentelemetry-resource-detector-azure
|
||||
> - opentelemetry-sdk-extension-aws
|
||||
> - opentelemetry-instrumentation-openai-v2
|
||||
> - opentelemetry-instrumentation-vertexai
|
||||
> - opentelemetry-instrumentation-google-genai
|
||||
>
|
||||
> These libraries are also excluded from the general patch release.
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ dynamic = ["version"]
|
|||
description = "<REPLACE ME>"
|
||||
readme = "README.rst"
|
||||
license = "Apache-2.0"
|
||||
requires-python = ">=3.8"
|
||||
requires-python = ">=3.9"
|
||||
authors = [
|
||||
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
|
||||
]
|
||||
|
@ -22,11 +22,11 @@ classifiers = [
|
|||
"License :: OSI Approved :: Apache Software License",
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
"Programming Language :: Python :: 3.13",
|
||||
]
|
||||
dependencies = [
|
||||
"opentelemetry-api ~= 1.12",
|
||||
|
|
|
@ -12,4 +12,4 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__version__ = "0.51b0.dev"
|
||||
__version__ = "0.57b0.dev"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
pylint==3.0.2
|
||||
httpretty==1.1.4
|
||||
mypy==0.931
|
||||
pyright==v1.1.396
|
||||
sphinx==7.1.2
|
||||
sphinx-rtd-theme==2.0.0rc4
|
||||
sphinx-autodoc-typehints==1.25.2
|
||||
|
|
|
@ -15,6 +15,7 @@ aiohttp~=3.0
|
|||
aiokafka~=0.11.0
|
||||
aiopg>=0.13.0,<1.3.0
|
||||
asyncpg>=0.12.0
|
||||
asyncclick~=8.0
|
||||
boto~=2.0
|
||||
botocore~=1.0
|
||||
boto3~=1.0
|
||||
|
@ -35,6 +36,7 @@ psycopg~=3.1.17
|
|||
pika>=0.12.0
|
||||
pymongo~=4.6.3
|
||||
PyMySQL~=1.1.1
|
||||
pymssql~=2.3.2
|
||||
pyramid>=1.7
|
||||
redis>=2.6
|
||||
remoulade>=0.50
|
||||
|
|
|
@ -122,6 +122,7 @@ intersphinx_mapping = {
|
|||
"https://opentelemetry-python.readthedocs.io/en/latest/",
|
||||
None,
|
||||
),
|
||||
"redis": ("https://redis.readthedocs.io/en/latest/", None),
|
||||
}
|
||||
|
||||
# http://www.sphinx-doc.org/en/master/config.html#confval-nitpicky
|
||||
|
|
|
@ -52,7 +52,7 @@ install <https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs>
|
|||
pip install -e ./instrumentation/opentelemetry-instrumentation-botocore
|
||||
pip install -e ./instrumentation-genai/opentelemetry-instrumentation-openai-v2
|
||||
pip install -e ./sdk-extension/opentelemetry-sdk-extension-aws
|
||||
pip install -e ./resource/opentelemetry-resource-detector-container
|
||||
pip install -e ./resource/opentelemetry-resource-detector-containerid
|
||||
|
||||
|
||||
.. toctree::
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
.. include:: ../../../instrumentation/opentelemetry-instrumentation-asyncclick/README.rst
|
||||
:end-before: References
|
||||
|
||||
.. automodule:: opentelemetry.instrumentation.asyncclick
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
|
@ -0,0 +1,7 @@
|
|||
OpenTelemetry pymssql Instrumentation
|
||||
=====================================
|
||||
|
||||
.. automodule:: opentelemetry.instrumentation.pymssql
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
|
@ -1,7 +1,10 @@
|
|||
OpenTelemetry Redis Instrumentation
|
||||
===================================
|
||||
.. include:: ../../../instrumentation/opentelemetry-instrumentation-redis/README.rst
|
||||
:end-before: References
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
.. automodule:: opentelemetry.instrumentation.redis
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
:show-inheritance:
|
|
@ -41,6 +41,10 @@ py-class=
|
|||
callable
|
||||
Consumer
|
||||
confluent_kafka.Message
|
||||
psycopg.Connection
|
||||
psycopg.AsyncConnection
|
||||
ObjectProxy
|
||||
fastapi.applications.FastAPI
|
||||
|
||||
any=
|
||||
; API
|
||||
|
@ -68,6 +72,8 @@ any=
|
|||
|
||||
py-obj=
|
||||
opentelemetry.propagators.textmap.CarrierT
|
||||
opentelemetry.instrumentation.dbapi.ConnectionT
|
||||
opentelemetry.instrumentation.dbapi.CursorT
|
||||
|
||||
py-func=
|
||||
poll
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
OpenTelemetry Python - Resource Detector for Containers
|
||||
=======================================================
|
||||
|
||||
.. automodule:: opentelemetry.resource.detector.container
|
||||
.. automodule:: opentelemetry.resource.detector.containerid
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
|
|
@ -16,7 +16,7 @@ sortfirst=
|
|||
ext/*
|
||||
|
||||
[stable]
|
||||
version=1.30.0.dev
|
||||
version=1.36.0.dev
|
||||
|
||||
packages=
|
||||
opentelemetry-sdk
|
||||
|
@ -34,7 +34,7 @@ packages=
|
|||
opentelemetry-api
|
||||
|
||||
[prerelease]
|
||||
version=0.51b0.dev
|
||||
version=0.57b0.dev
|
||||
|
||||
packages=
|
||||
all
|
||||
|
@ -43,13 +43,15 @@ packages=
|
|||
opentelemetry-instrumentation
|
||||
opentelemetry-contrib-instrumentations
|
||||
opentelemetry-distro
|
||||
opentelemetry-resource-detector-container
|
||||
opentelemetry-resource-detector-containerid
|
||||
|
||||
[exclude_release]
|
||||
packages=
|
||||
opentelemetry-resource-detector-azure
|
||||
opentelemetry-sdk-extension-aws
|
||||
opentelemetry-propagator-aws-xray
|
||||
opentelemetry-instrumentation-google-genai
|
||||
opentelemetry-instrumentation-vertexai
|
||||
opentelemetry-instrumentation-openai-v2
|
||||
opentelemetry-instrumentation-test
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM python:3.8
|
||||
FROM python:3.9
|
||||
|
||||
RUN apt-get update -y && apt-get install libsnappy-dev -y
|
||||
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
## Instructions
|
||||
1. Install protobuf tools. Can use your package manager or download from [GitHub](https://github.com/protocolbuffers/protobuf/releases/tag/v21.7)
|
||||
2. Run `generate-proto-py.sh` from inside the `proto/` directory
|
||||
1. Install protobuf tools. Can use your package manager or download from [GitHub](https://github.com/protocolbuffers/protobuf/releases/tag/v26.0)
|
||||
2. Run `generate-proto-py.sh` from inside the `proto/` directory
|
|
@ -49,7 +49,7 @@ sed -i 's/import "gogoproto\/gogo.proto";/import "opentelemetry\/exporter\/prome
|
|||
echo "Removing clones..."
|
||||
rm -rf protobuf prometheus
|
||||
|
||||
# Used libprotoc 3.21.1 & protoc 21.7
|
||||
# Used libprotoc 26
|
||||
echo "Compiling proto files to Python"
|
||||
protoc -I . --python_out=../src ${SRC_DIR}/gogoproto/gogo.proto ${SRC_DIR}/remote.proto ${SRC_DIR}/types.proto
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ dynamic = ["version"]
|
|||
description = "Prometheus Remote Write Metrics Exporter for OpenTelemetry"
|
||||
readme = "README.rst"
|
||||
license = "Apache-2.0"
|
||||
requires-python = ">=3.8"
|
||||
requires-python = ">=3.9"
|
||||
authors = [
|
||||
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
|
||||
]
|
||||
|
@ -19,14 +19,14 @@ classifiers = [
|
|||
"License :: OSI Approved :: Apache Software License",
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
"Programming Language :: Python :: 3.13",
|
||||
]
|
||||
dependencies = [
|
||||
"protobuf ~= 4.21",
|
||||
"protobuf ~= 5.26",
|
||||
"requests ~= 2.28",
|
||||
"opentelemetry-api ~= 1.12",
|
||||
"opentelemetry-sdk ~= 1.12",
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -1,58 +1,45 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: opentelemetry/exporter/prometheus_remote_write/gen/remote.proto
|
||||
# Protobuf Python Version: 5.26.0
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf.internal import builder as _builder
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
|
||||
from google.protobuf.internal import builder as _builder
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
from opentelemetry.exporter.prometheus_remote_write.gen import (
|
||||
types_pb2 as opentelemetry_dot_exporter_dot_prometheus__remote__write_dot_gen_dot_types__pb2,
|
||||
)
|
||||
from opentelemetry.exporter.prometheus_remote_write.gen.gogoproto import (
|
||||
gogo_pb2 as opentelemetry_dot_exporter_dot_prometheus__remote__write_dot_gen_dot_gogoproto_dot_gogo__pb2,
|
||||
)
|
||||
from opentelemetry.exporter.prometheus_remote_write.gen import types_pb2 as opentelemetry_dot_exporter_dot_prometheus__remote__write_dot_gen_dot_types__pb2
|
||||
from opentelemetry.exporter.prometheus_remote_write.gen.gogoproto import gogo_pb2 as opentelemetry_dot_exporter_dot_prometheus__remote__write_dot_gen_dot_gogoproto_dot_gogo__pb2
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
|
||||
b'\n?opentelemetry/exporter/prometheus_remote_write/gen/remote.proto\x12\nprometheus\x1a>opentelemetry/exporter/prometheus_remote_write/gen/types.proto\x1aGopentelemetry/exporter/prometheus_remote_write/gen/gogoproto/gogo.proto"z\n\x0cWriteRequest\x12\x30\n\ntimeseries\x18\x01 \x03(\x0b\x32\x16.prometheus.TimeSeriesB\x04\xc8\xde\x1f\x00\x12\x32\n\x08metadata\x18\x03 \x03(\x0b\x32\x1a.prometheus.MetricMetadataB\x04\xc8\xde\x1f\x00J\x04\x08\x02\x10\x03"\xae\x01\n\x0bReadRequest\x12"\n\x07queries\x18\x01 \x03(\x0b\x32\x11.prometheus.Query\x12\x45\n\x17\x61\x63\x63\x65pted_response_types\x18\x02 \x03(\x0e\x32$.prometheus.ReadRequest.ResponseType"4\n\x0cResponseType\x12\x0b\n\x07SAMPLES\x10\x00\x12\x17\n\x13STREAMED_XOR_CHUNKS\x10\x01"8\n\x0cReadResponse\x12(\n\x07results\x18\x01 \x03(\x0b\x32\x17.prometheus.QueryResult"\x8f\x01\n\x05Query\x12\x1a\n\x12start_timestamp_ms\x18\x01 \x01(\x03\x12\x18\n\x10\x65nd_timestamp_ms\x18\x02 \x01(\x03\x12*\n\x08matchers\x18\x03 \x03(\x0b\x32\x18.prometheus.LabelMatcher\x12$\n\x05hints\x18\x04 \x01(\x0b\x32\x15.prometheus.ReadHints"9\n\x0bQueryResult\x12*\n\ntimeseries\x18\x01 \x03(\x0b\x32\x16.prometheus.TimeSeries"]\n\x13\x43hunkedReadResponse\x12\x31\n\x0e\x63hunked_series\x18\x01 \x03(\x0b\x32\x19.prometheus.ChunkedSeries\x12\x13\n\x0bquery_index\x18\x02 \x01(\x03\x42\x08Z\x06prompbb\x06proto3'
|
||||
)
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n?opentelemetry/exporter/prometheus_remote_write/gen/remote.proto\x12\nprometheus\x1a>opentelemetry/exporter/prometheus_remote_write/gen/types.proto\x1aGopentelemetry/exporter/prometheus_remote_write/gen/gogoproto/gogo.proto\"z\n\x0cWriteRequest\x12\x30\n\ntimeseries\x18\x01 \x03(\x0b\x32\x16.prometheus.TimeSeriesB\x04\xc8\xde\x1f\x00\x12\x32\n\x08metadata\x18\x03 \x03(\x0b\x32\x1a.prometheus.MetricMetadataB\x04\xc8\xde\x1f\x00J\x04\x08\x02\x10\x03\"\xae\x01\n\x0bReadRequest\x12\"\n\x07queries\x18\x01 \x03(\x0b\x32\x11.prometheus.Query\x12\x45\n\x17\x61\x63\x63\x65pted_response_types\x18\x02 \x03(\x0e\x32$.prometheus.ReadRequest.ResponseType\"4\n\x0cResponseType\x12\x0b\n\x07SAMPLES\x10\x00\x12\x17\n\x13STREAMED_XOR_CHUNKS\x10\x01\"8\n\x0cReadResponse\x12(\n\x07results\x18\x01 \x03(\x0b\x32\x17.prometheus.QueryResult\"\x8f\x01\n\x05Query\x12\x1a\n\x12start_timestamp_ms\x18\x01 \x01(\x03\x12\x18\n\x10\x65nd_timestamp_ms\x18\x02 \x01(\x03\x12*\n\x08matchers\x18\x03 \x03(\x0b\x32\x18.prometheus.LabelMatcher\x12$\n\x05hints\x18\x04 \x01(\x0b\x32\x15.prometheus.ReadHints\"9\n\x0bQueryResult\x12*\n\ntimeseries\x18\x01 \x03(\x0b\x32\x16.prometheus.TimeSeries\"]\n\x13\x43hunkedReadResponse\x12\x31\n\x0e\x63hunked_series\x18\x01 \x03(\x0b\x32\x19.prometheus.ChunkedSeries\x12\x13\n\x0bquery_index\x18\x02 \x01(\x03\x42\x08Z\x06prompbb\x06proto3')
|
||||
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||
_builder.BuildTopDescriptorsAndMessages(
|
||||
DESCRIPTOR,
|
||||
"opentelemetry.exporter.prometheus_remote_write.gen.remote_pb2",
|
||||
globals(),
|
||||
)
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
DESCRIPTOR._options = None
|
||||
DESCRIPTOR._serialized_options = b"Z\006prompb"
|
||||
_WRITEREQUEST.fields_by_name["timeseries"]._options = None
|
||||
_WRITEREQUEST.fields_by_name["timeseries"]._serialized_options = (
|
||||
b"\310\336\037\000"
|
||||
)
|
||||
_WRITEREQUEST.fields_by_name["metadata"]._options = None
|
||||
_WRITEREQUEST.fields_by_name["metadata"]._serialized_options = (
|
||||
b"\310\336\037\000"
|
||||
)
|
||||
_WRITEREQUEST._serialized_start = 216
|
||||
_WRITEREQUEST._serialized_end = 338
|
||||
_READREQUEST._serialized_start = 341
|
||||
_READREQUEST._serialized_end = 515
|
||||
_READREQUEST_RESPONSETYPE._serialized_start = 463
|
||||
_READREQUEST_RESPONSETYPE._serialized_end = 515
|
||||
_READRESPONSE._serialized_start = 517
|
||||
_READRESPONSE._serialized_end = 573
|
||||
_QUERY._serialized_start = 576
|
||||
_QUERY._serialized_end = 719
|
||||
_QUERYRESULT._serialized_start = 721
|
||||
_QUERYRESULT._serialized_end = 778
|
||||
_CHUNKEDREADRESPONSE._serialized_start = 780
|
||||
_CHUNKEDREADRESPONSE._serialized_end = 873
|
||||
_globals = globals()
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'opentelemetry.exporter.prometheus_remote_write.gen.remote_pb2', _globals)
|
||||
if not _descriptor._USE_C_DESCRIPTORS:
|
||||
_globals['DESCRIPTOR']._loaded_options = None
|
||||
_globals['DESCRIPTOR']._serialized_options = b'Z\006prompb'
|
||||
_globals['_WRITEREQUEST'].fields_by_name['timeseries']._loaded_options = None
|
||||
_globals['_WRITEREQUEST'].fields_by_name['timeseries']._serialized_options = b'\310\336\037\000'
|
||||
_globals['_WRITEREQUEST'].fields_by_name['metadata']._loaded_options = None
|
||||
_globals['_WRITEREQUEST'].fields_by_name['metadata']._serialized_options = b'\310\336\037\000'
|
||||
_globals['_WRITEREQUEST']._serialized_start=216
|
||||
_globals['_WRITEREQUEST']._serialized_end=338
|
||||
_globals['_READREQUEST']._serialized_start=341
|
||||
_globals['_READREQUEST']._serialized_end=515
|
||||
_globals['_READREQUEST_RESPONSETYPE']._serialized_start=463
|
||||
_globals['_READREQUEST_RESPONSETYPE']._serialized_end=515
|
||||
_globals['_READRESPONSE']._serialized_start=517
|
||||
_globals['_READRESPONSE']._serialized_end=573
|
||||
_globals['_QUERY']._serialized_start=576
|
||||
_globals['_QUERY']._serialized_end=719
|
||||
_globals['_QUERYRESULT']._serialized_start=721
|
||||
_globals['_QUERYRESULT']._serialized_end=778
|
||||
_globals['_CHUNKEDREADRESPONSE']._serialized_start=780
|
||||
_globals['_CHUNKEDREADRESPONSE']._serialized_end=873
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
|
|
|
@ -1,85 +1,66 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: opentelemetry/exporter/prometheus_remote_write/gen/types.proto
|
||||
# Protobuf Python Version: 5.26.0
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf.internal import builder as _builder
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
|
||||
from google.protobuf.internal import builder as _builder
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
from opentelemetry.exporter.prometheus_remote_write.gen.gogoproto import (
|
||||
gogo_pb2 as opentelemetry_dot_exporter_dot_prometheus__remote__write_dot_gen_dot_gogoproto_dot_gogo__pb2,
|
||||
)
|
||||
from opentelemetry.exporter.prometheus_remote_write.gen.gogoproto import gogo_pb2 as opentelemetry_dot_exporter_dot_prometheus__remote__write_dot_gen_dot_gogoproto_dot_gogo__pb2
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
|
||||
b'\n>opentelemetry/exporter/prometheus_remote_write/gen/types.proto\x12\nprometheus\x1aGopentelemetry/exporter/prometheus_remote_write/gen/gogoproto/gogo.proto"\xf8\x01\n\x0eMetricMetadata\x12\x33\n\x04type\x18\x01 \x01(\x0e\x32%.prometheus.MetricMetadata.MetricType\x12\x1a\n\x12metric_family_name\x18\x02 \x01(\t\x12\x0c\n\x04help\x18\x04 \x01(\t\x12\x0c\n\x04unit\x18\x05 \x01(\t"y\n\nMetricType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07\x43OUNTER\x10\x01\x12\t\n\x05GAUGE\x10\x02\x12\r\n\tHISTOGRAM\x10\x03\x12\x12\n\x0eGAUGEHISTOGRAM\x10\x04\x12\x0b\n\x07SUMMARY\x10\x05\x12\x08\n\x04INFO\x10\x06\x12\x0c\n\x08STATESET\x10\x07"*\n\x06Sample\x12\r\n\x05value\x18\x01 \x01(\x01\x12\x11\n\ttimestamp\x18\x02 \x01(\x03"U\n\x08\x45xemplar\x12\'\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.LabelB\x04\xc8\xde\x1f\x00\x12\r\n\x05value\x18\x02 \x01(\x01\x12\x11\n\ttimestamp\x18\x03 \x01(\x03"\x8f\x01\n\nTimeSeries\x12\'\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.LabelB\x04\xc8\xde\x1f\x00\x12)\n\x07samples\x18\x02 \x03(\x0b\x32\x12.prometheus.SampleB\x04\xc8\xde\x1f\x00\x12-\n\texemplars\x18\x03 \x03(\x0b\x32\x14.prometheus.ExemplarB\x04\xc8\xde\x1f\x00"$\n\x05Label\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t"1\n\x06Labels\x12\'\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.LabelB\x04\xc8\xde\x1f\x00"\x82\x01\n\x0cLabelMatcher\x12+\n\x04type\x18\x01 \x01(\x0e\x32\x1d.prometheus.LabelMatcher.Type\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t"(\n\x04Type\x12\x06\n\x02\x45Q\x10\x00\x12\x07\n\x03NEQ\x10\x01\x12\x06\n\x02RE\x10\x02\x12\x07\n\x03NRE\x10\x03"|\n\tReadHints\x12\x0f\n\x07step_ms\x18\x01 \x01(\x03\x12\x0c\n\x04\x66unc\x18\x02 \x01(\t\x12\x10\n\x08start_ms\x18\x03 \x01(\x03\x12\x0e\n\x06\x65nd_ms\x18\x04 \x01(\x03\x12\x10\n\x08grouping\x18\x05 \x03(\t\x12\n\n\x02\x62y\x18\x06 \x01(\x08\x12\x10\n\x08range_ms\x18\x07 \x01(\x03"\x8b\x01\n\x05\x43hunk\x12\x13\n\x0bmin_time_ms\x18\x01 \x01(\x03\x12\x13\n\x0bmax_time_ms\x18\x02 \x01(\x03\x12(\n\x04type\x18\x03 \x01(\x0e\x32\x1a.prometheus.Chunk.Encoding\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c" \n\x08\x45ncoding\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x07\n\x03XOR\x10\x01"a\n\rChunkedSeries\x12\'\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.LabelB\x04\xc8\xde\x1f\x00\x12\'\n\x06\x63hunks\x18\x02 \x03(\x0b\x32\x11.prometheus.ChunkB\x04\xc8\xde\x1f\x00\x42\x08Z\x06prompbb\x06proto3'
|
||||
)
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n>opentelemetry/exporter/prometheus_remote_write/gen/types.proto\x12\nprometheus\x1aGopentelemetry/exporter/prometheus_remote_write/gen/gogoproto/gogo.proto\"\xf8\x01\n\x0eMetricMetadata\x12\x33\n\x04type\x18\x01 \x01(\x0e\x32%.prometheus.MetricMetadata.MetricType\x12\x1a\n\x12metric_family_name\x18\x02 \x01(\t\x12\x0c\n\x04help\x18\x04 \x01(\t\x12\x0c\n\x04unit\x18\x05 \x01(\t\"y\n\nMetricType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07\x43OUNTER\x10\x01\x12\t\n\x05GAUGE\x10\x02\x12\r\n\tHISTOGRAM\x10\x03\x12\x12\n\x0eGAUGEHISTOGRAM\x10\x04\x12\x0b\n\x07SUMMARY\x10\x05\x12\x08\n\x04INFO\x10\x06\x12\x0c\n\x08STATESET\x10\x07\"*\n\x06Sample\x12\r\n\x05value\x18\x01 \x01(\x01\x12\x11\n\ttimestamp\x18\x02 \x01(\x03\"U\n\x08\x45xemplar\x12\'\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.LabelB\x04\xc8\xde\x1f\x00\x12\r\n\x05value\x18\x02 \x01(\x01\x12\x11\n\ttimestamp\x18\x03 \x01(\x03\"\x8f\x01\n\nTimeSeries\x12\'\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.LabelB\x04\xc8\xde\x1f\x00\x12)\n\x07samples\x18\x02 \x03(\x0b\x32\x12.prometheus.SampleB\x04\xc8\xde\x1f\x00\x12-\n\texemplars\x18\x03 \x03(\x0b\x32\x14.prometheus.ExemplarB\x04\xc8\xde\x1f\x00\"$\n\x05Label\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"1\n\x06Labels\x12\'\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.LabelB\x04\xc8\xde\x1f\x00\"\x82\x01\n\x0cLabelMatcher\x12+\n\x04type\x18\x01 \x01(\x0e\x32\x1d.prometheus.LabelMatcher.Type\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\"(\n\x04Type\x12\x06\n\x02\x45Q\x10\x00\x12\x07\n\x03NEQ\x10\x01\x12\x06\n\x02RE\x10\x02\x12\x07\n\x03NRE\x10\x03\"|\n\tReadHints\x12\x0f\n\x07step_ms\x18\x01 \x01(\x03\x12\x0c\n\x04\x66unc\x18\x02 \x01(\t\x12\x10\n\x08start_ms\x18\x03 \x01(\x03\x12\x0e\n\x06\x65nd_ms\x18\x04 \x01(\x03\x12\x10\n\x08grouping\x18\x05 \x03(\t\x12\n\n\x02\x62y\x18\x06 \x01(\x08\x12\x10\n\x08range_ms\x18\x07 \x01(\x03\"\x8b\x01\n\x05\x43hunk\x12\x13\n\x0bmin_time_ms\x18\x01 \x01(\x03\x12\x13\n\x0bmax_time_ms\x18\x02 \x01(\x03\x12(\n\x04type\x18\x03 \x01(\x0e\x32\x1a.prometheus.Chunk.Encoding\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\" \n\x08\x45ncoding\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x07\n\x03XOR\x10\x01\"a\n\rChunkedSeries\x12\'\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.LabelB\x04\xc8\xde\x1f\x00\x12\'\n\x06\x63hunks\x18\x02 \x03(\x0b\x32\x11.prometheus.ChunkB\x04\xc8\xde\x1f\x00\x42\x08Z\x06prompbb\x06proto3')
|
||||
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||
_builder.BuildTopDescriptorsAndMessages(
|
||||
DESCRIPTOR,
|
||||
"opentelemetry.exporter.prometheus_remote_write.gen.types_pb2",
|
||||
globals(),
|
||||
)
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
DESCRIPTOR._options = None
|
||||
DESCRIPTOR._serialized_options = b"Z\006prompb"
|
||||
_EXEMPLAR.fields_by_name["labels"]._options = None
|
||||
_EXEMPLAR.fields_by_name["labels"]._serialized_options = (
|
||||
b"\310\336\037\000"
|
||||
)
|
||||
_TIMESERIES.fields_by_name["labels"]._options = None
|
||||
_TIMESERIES.fields_by_name["labels"]._serialized_options = (
|
||||
b"\310\336\037\000"
|
||||
)
|
||||
_TIMESERIES.fields_by_name["samples"]._options = None
|
||||
_TIMESERIES.fields_by_name["samples"]._serialized_options = (
|
||||
b"\310\336\037\000"
|
||||
)
|
||||
_TIMESERIES.fields_by_name["exemplars"]._options = None
|
||||
_TIMESERIES.fields_by_name["exemplars"]._serialized_options = (
|
||||
b"\310\336\037\000"
|
||||
)
|
||||
_LABELS.fields_by_name["labels"]._options = None
|
||||
_LABELS.fields_by_name["labels"]._serialized_options = b"\310\336\037\000"
|
||||
_CHUNKEDSERIES.fields_by_name["labels"]._options = None
|
||||
_CHUNKEDSERIES.fields_by_name["labels"]._serialized_options = (
|
||||
b"\310\336\037\000"
|
||||
)
|
||||
_CHUNKEDSERIES.fields_by_name["chunks"]._options = None
|
||||
_CHUNKEDSERIES.fields_by_name["chunks"]._serialized_options = (
|
||||
b"\310\336\037\000"
|
||||
)
|
||||
_METRICMETADATA._serialized_start = 152
|
||||
_METRICMETADATA._serialized_end = 400
|
||||
_METRICMETADATA_METRICTYPE._serialized_start = 279
|
||||
_METRICMETADATA_METRICTYPE._serialized_end = 400
|
||||
_SAMPLE._serialized_start = 402
|
||||
_SAMPLE._serialized_end = 444
|
||||
_EXEMPLAR._serialized_start = 446
|
||||
_EXEMPLAR._serialized_end = 531
|
||||
_TIMESERIES._serialized_start = 534
|
||||
_TIMESERIES._serialized_end = 677
|
||||
_LABEL._serialized_start = 679
|
||||
_LABEL._serialized_end = 715
|
||||
_LABELS._serialized_start = 717
|
||||
_LABELS._serialized_end = 766
|
||||
_LABELMATCHER._serialized_start = 769
|
||||
_LABELMATCHER._serialized_end = 899
|
||||
_LABELMATCHER_TYPE._serialized_start = 859
|
||||
_LABELMATCHER_TYPE._serialized_end = 899
|
||||
_READHINTS._serialized_start = 901
|
||||
_READHINTS._serialized_end = 1025
|
||||
_CHUNK._serialized_start = 1028
|
||||
_CHUNK._serialized_end = 1167
|
||||
_CHUNK_ENCODING._serialized_start = 1135
|
||||
_CHUNK_ENCODING._serialized_end = 1167
|
||||
_CHUNKEDSERIES._serialized_start = 1169
|
||||
_CHUNKEDSERIES._serialized_end = 1266
|
||||
_globals = globals()
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'opentelemetry.exporter.prometheus_remote_write.gen.types_pb2', _globals)
|
||||
if not _descriptor._USE_C_DESCRIPTORS:
|
||||
_globals['DESCRIPTOR']._loaded_options = None
|
||||
_globals['DESCRIPTOR']._serialized_options = b'Z\006prompb'
|
||||
_globals['_EXEMPLAR'].fields_by_name['labels']._loaded_options = None
|
||||
_globals['_EXEMPLAR'].fields_by_name['labels']._serialized_options = b'\310\336\037\000'
|
||||
_globals['_TIMESERIES'].fields_by_name['labels']._loaded_options = None
|
||||
_globals['_TIMESERIES'].fields_by_name['labels']._serialized_options = b'\310\336\037\000'
|
||||
_globals['_TIMESERIES'].fields_by_name['samples']._loaded_options = None
|
||||
_globals['_TIMESERIES'].fields_by_name['samples']._serialized_options = b'\310\336\037\000'
|
||||
_globals['_TIMESERIES'].fields_by_name['exemplars']._loaded_options = None
|
||||
_globals['_TIMESERIES'].fields_by_name['exemplars']._serialized_options = b'\310\336\037\000'
|
||||
_globals['_LABELS'].fields_by_name['labels']._loaded_options = None
|
||||
_globals['_LABELS'].fields_by_name['labels']._serialized_options = b'\310\336\037\000'
|
||||
_globals['_CHUNKEDSERIES'].fields_by_name['labels']._loaded_options = None
|
||||
_globals['_CHUNKEDSERIES'].fields_by_name['labels']._serialized_options = b'\310\336\037\000'
|
||||
_globals['_CHUNKEDSERIES'].fields_by_name['chunks']._loaded_options = None
|
||||
_globals['_CHUNKEDSERIES'].fields_by_name['chunks']._serialized_options = b'\310\336\037\000'
|
||||
_globals['_METRICMETADATA']._serialized_start=152
|
||||
_globals['_METRICMETADATA']._serialized_end=400
|
||||
_globals['_METRICMETADATA_METRICTYPE']._serialized_start=279
|
||||
_globals['_METRICMETADATA_METRICTYPE']._serialized_end=400
|
||||
_globals['_SAMPLE']._serialized_start=402
|
||||
_globals['_SAMPLE']._serialized_end=444
|
||||
_globals['_EXEMPLAR']._serialized_start=446
|
||||
_globals['_EXEMPLAR']._serialized_end=531
|
||||
_globals['_TIMESERIES']._serialized_start=534
|
||||
_globals['_TIMESERIES']._serialized_end=677
|
||||
_globals['_LABEL']._serialized_start=679
|
||||
_globals['_LABEL']._serialized_end=715
|
||||
_globals['_LABELS']._serialized_start=717
|
||||
_globals['_LABELS']._serialized_end=766
|
||||
_globals['_LABELMATCHER']._serialized_start=769
|
||||
_globals['_LABELMATCHER']._serialized_end=899
|
||||
_globals['_LABELMATCHER_TYPE']._serialized_start=859
|
||||
_globals['_LABELMATCHER_TYPE']._serialized_end=899
|
||||
_globals['_READHINTS']._serialized_start=901
|
||||
_globals['_READHINTS']._serialized_end=1025
|
||||
_globals['_CHUNK']._serialized_start=1028
|
||||
_globals['_CHUNK']._serialized_end=1167
|
||||
_globals['_CHUNK_ENCODING']._serialized_start=1135
|
||||
_globals['_CHUNK_ENCODING']._serialized_end=1167
|
||||
_globals['_CHUNKEDSERIES']._serialized_start=1169
|
||||
_globals['_CHUNKEDSERIES']._serialized_end=1266
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
|
|
|
@ -12,4 +12,4 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__version__ = "0.51b0.dev"
|
||||
__version__ = "0.57b0.dev"
|
||||
|
|
|
@ -1,15 +1,13 @@
|
|||
asgiref==3.8.1
|
||||
certifi==2024.7.4
|
||||
charset-normalizer==3.3.2
|
||||
# We can drop this after bumping baseline to pypy-39
|
||||
cramjam==2.1.0; platform_python_implementation == "PyPy"
|
||||
cramjam==2.8.1; platform_python_implementation != "PyPy"
|
||||
cramjam==2.8.4
|
||||
Deprecated==1.2.14
|
||||
idna==3.7
|
||||
iniconfig==2.0.0
|
||||
packaging==24.0
|
||||
pluggy==1.5.0
|
||||
protobuf==4.25.3
|
||||
protobuf==5.26
|
||||
py-cpuinfo==9.0.0
|
||||
pytest==7.4.4
|
||||
python-snappy==0.7.1
|
||||
|
|
|
@ -8,7 +8,7 @@ dynamic = ["version"]
|
|||
description = "Rich Console Exporter for OpenTelemetry"
|
||||
readme = "README.rst"
|
||||
license = "Apache-2.0"
|
||||
requires-python = ">=3.8"
|
||||
requires-python = ">=3.9"
|
||||
authors = [
|
||||
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
|
||||
]
|
||||
|
@ -18,16 +18,16 @@ classifiers = [
|
|||
"License :: OSI Approved :: Apache Software License",
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
"Programming Language :: Python :: 3.13",
|
||||
]
|
||||
dependencies = [
|
||||
"opentelemetry-api ~= 1.12",
|
||||
"opentelemetry-sdk ~= 1.12",
|
||||
"opentelemetry-semantic-conventions == 0.51b0.dev",
|
||||
"opentelemetry-semantic-conventions == 0.57b0.dev",
|
||||
"rich>=10.0.0",
|
||||
]
|
||||
|
||||
|
|
|
@ -64,11 +64,15 @@ from rich.tree import Tree
|
|||
import opentelemetry.trace
|
||||
from opentelemetry.sdk.trace import ReadableSpan
|
||||
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
|
||||
from opentelemetry.semconv.trace import SpanAttributes
|
||||
from opentelemetry.semconv._incubating.attributes.db_attributes import (
|
||||
DB_STATEMENT,
|
||||
)
|
||||
|
||||
|
||||
def _ns_to_time(nanoseconds):
|
||||
ts = datetime.datetime.utcfromtimestamp(nanoseconds / 1e9)
|
||||
ts = datetime.datetime.fromtimestamp(
|
||||
nanoseconds / 1e9, datetime.timezone.utc
|
||||
)
|
||||
return ts.strftime("%H:%M:%S.%f")
|
||||
|
||||
|
||||
|
@ -118,7 +122,7 @@ def _child_add_optional_attributes(child: Tree, span: ReadableSpan):
|
|||
label=Text.from_markup("[bold cyan]Attributes :[/bold cyan] ")
|
||||
)
|
||||
for attribute in span.attributes:
|
||||
if attribute == SpanAttributes.DB_STATEMENT:
|
||||
if attribute == DB_STATEMENT:
|
||||
attributes.add(
|
||||
Text.from_markup(f"[bold cyan]{attribute} :[/bold cyan] ")
|
||||
)
|
||||
|
|
|
@ -12,4 +12,4 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__version__ = "0.51b0.dev"
|
||||
__version__ = "0.57b0.dev"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
-c dev-requirements.txt
|
||||
astor==0.8.1
|
||||
jinja2==3.1.4
|
||||
jinja2==3.1.6
|
||||
markupsafe==2.0.1
|
||||
ruff==0.6.9
|
||||
requests
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
|
||||
| Instrumentation | Supported Packages | Metrics support | Semconv status |
|
||||
| --------------- | ------------------ | --------------- | -------------- |
|
||||
| [opentelemetry-instrumentation-google-genai](./opentelemetry-instrumentation-google-genai) | google-genai >= 1.0.0 | No | development
|
||||
| [opentelemetry-instrumentation-openai-v2](./opentelemetry-instrumentation-openai-v2) | openai >= 1.26.0 | Yes | development
|
||||
| [opentelemetry-instrumentation-vertexai](./opentelemetry-instrumentation-vertexai) | google-cloud-aiplatform >= 1.64 | No | development
|
|
@ -0,0 +1,4 @@
|
|||
.build
|
||||
.test
|
||||
dist
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## Unreleased
|
||||
|
||||
## Version 0.3b0 (2025-07-08)
|
||||
|
||||
- Add automatic instrumentation to tool call functions ([#3446](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3446))
|
||||
|
||||
## Version 0.2b0 (2025-04-28)
|
||||
|
||||
- Add more request configuration options to the span attributes ([#3374](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3374))
|
||||
- Restructure tests to keep in line with repository conventions ([#3344](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3344))
|
||||
|
||||
- Fix [bug](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3416) where
|
||||
span attribute `gen_ai.response.finish_reasons` is empty ([#3417](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3417))
|
||||
|
||||
## Version 0.1b0 (2025-03-05)
|
||||
|
||||
- Add support for async and streaming.
|
||||
([#3298](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3298))
|
||||
|
||||
Create an initial version of Open Telemetry instrumentation for github.com/googleapis/python-genai.
|
||||
([#3256](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3256))
|
|
@ -0,0 +1,203 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -0,0 +1,92 @@
|
|||
OpenTelemetry Google GenAI SDK Instrumentation
|
||||
==============================================
|
||||
|
||||
|pypi|
|
||||
|
||||
.. |pypi| image:: https://badge.fury.io/py/opentelemetry-instrumentation-google-genai.svg
|
||||
:target: https://pypi.org/project/opentelemetry-instrumentation-google-genai/
|
||||
|
||||
This library adds instrumentation to the `Google GenAI SDK library <https://pypi.org/project/google-genai/>`_
|
||||
to emit telemetry data following `Semantic Conventions for GenAI systems <https://opentelemetry.io/docs/specs/semconv/gen-ai/>`_.
|
||||
It adds trace spans for GenAI operations, events/logs for recording prompts/responses, and emits metrics that describe the
|
||||
GenAI operations in aggregate.
|
||||
|
||||
|
||||
Experimental
|
||||
------------
|
||||
|
||||
This package is still experimental. The instrumentation may not be complete or correct just yet.
|
||||
|
||||
Please see "TODOS.md" for a list of known defects/TODOs that are blockers to package stability.
|
||||
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
If your application is already instrumented with OpenTelemetry, add this
|
||||
package to your requirements.
|
||||
::
|
||||
|
||||
pip install opentelemetry-instrumentation-google-genai
|
||||
|
||||
If you don't have a Google GenAI SDK application, yet, try our `examples <examples>`_.
|
||||
|
||||
Check out `zero-code example <examples/zero-code>`_ for a quick start.
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
This section describes how to set up Google GenAI SDK instrumentation if you're setting OpenTelemetry up manually.
|
||||
Check out the `manual example <examples/manual>`_ for more details.
|
||||
|
||||
Instrumenting all clients
|
||||
*************************
|
||||
|
||||
When using the instrumentor, all clients will automatically trace GenAI `generate_content` operations.
|
||||
You can also optionally capture prompts and responses as log events.
|
||||
|
||||
Make sure to configure OpenTelemetry tracing, logging, metrics, and events to capture all telemetry emitted by the instrumentation.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from opentelemetry.instrumentation.google_genai import GoogleGenAiSdkInstrumentor
|
||||
from google.genai import Client
|
||||
|
||||
GoogleGenAiSdkInstrumentor().instrument()
|
||||
|
||||
|
||||
client = Client()
|
||||
response = client.models.generate_content(
|
||||
model="gemini-1.5-flash-002",
|
||||
contents="Write a short poem on OpenTelemetry.")
|
||||
|
||||
Enabling message content
|
||||
*************************
|
||||
|
||||
Message content such as the contents of the prompt and response
|
||||
are not captured by default. To capture message content as log events, set the environment variable
|
||||
`OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT` to `true`.
|
||||
|
||||
Uninstrument
|
||||
************
|
||||
|
||||
To uninstrument clients, call the uninstrument method:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from opentelemetry.instrumentation.google_genai import GoogleGenAiSdkInstrumentor
|
||||
|
||||
GoogleGenAiSdkInstrumentor().instrument()
|
||||
# ...
|
||||
|
||||
# Uninstrument all clients
|
||||
GoogleGenAiSdkInstrumentor().uninstrument()
|
||||
|
||||
References
|
||||
----------
|
||||
* `Google Gen AI SDK Documentation <https://ai.google.dev/gemini-api/docs/sdks>`_
|
||||
* `Google Gen AI SDK on GitHub <https://github.com/googleapis/python-genai>`_
|
||||
* `Using Vertex AI with Google Gen AI SDK <https://cloud.google.com/vertex-ai/generative-ai/docs/sdks/overview>`_
|
||||
* `OpenTelemetry Project <https://opentelemetry.io/>`_
|
||||
* `OpenTelemetry Python Examples <https://github.com/open-telemetry/opentelemetry-python/tree/main/docs/examples>`_
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
# TODOs
|
||||
|
||||
## Fundamentals
|
||||
|
||||
Here are some TODO items required to achieve stability for this package:
|
||||
|
||||
- Add more span-level attributes for response information
|
||||
- Verify and correct formatting of events:
|
||||
- Including the 'role' field for message events
|
||||
- Including tool invocation information
|
||||
- Emit events for safety ratings when they block responses
|
||||
- Additional cleanup/improvement tasks such as:
|
||||
- Adoption of 'wrapt' instead of 'functools.wraps'
|
||||
- Bolstering test coverage
|
||||
|
||||
## Future
|
||||
|
||||
Beyond the above TODOs, it would also be desirable to extend the
|
||||
instrumentation beyond `generate_content` to other API surfaces.
|
|
@ -0,0 +1,28 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Uncomment and change to your OTLP endpoint
|
||||
# OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317
|
||||
# OTEL_EXPORTER_OTLP_PROTOCOL=grpc
|
||||
|
||||
# Uncomment to change parameters used to configure 'google.genai'
|
||||
# GOOGLE_GENAI_USE_VERTEXAI=1
|
||||
# GOOGLE_API_KEY=<your api key>
|
||||
# GOOGLE_CLOUD_PROJECT=<your cloud project>
|
||||
# GOOGLE_CLOUD_LOCATION=<your cloud location>
|
||||
|
||||
OTEL_SERVICE_NAME=opentelemetry-python-google-genai
|
||||
|
||||
# Change to 'false' to hide prompt and completion content
|
||||
OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true
|
|
@ -0,0 +1,44 @@
|
|||
OpenTelemetry Google GenAI SDK Manual Instrumentation Example
|
||||
============================================
|
||||
|
||||
This is an example of how to instrument Google GenAI SDK calls when configuring
|
||||
OpenTelemetry SDK and Instrumentations manually.
|
||||
|
||||
When `main.py <main.py>`_ is run, it exports traces, logs, and metrics to an OTLP
|
||||
compatible endpoint. Traces include details such as the model used and the
|
||||
duration of the chat request. Logs capture the chat request and the generated
|
||||
response, providing a comprehensive view of the performance and behavior of
|
||||
your GenAI SDK requests. Metrics include aggregate statistics such as the aggregate
|
||||
token usage as well as the latency distribution of the GenAI operations.
|
||||
|
||||
Note: `.env <.env>`_ file configures additional environment variables:
|
||||
|
||||
- `OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true`
|
||||
|
||||
... configures Google GenAI SDK instrumentation to capture prompt/response content.
|
||||
|
||||
Setup
|
||||
-----
|
||||
|
||||
An OTLP compatible endpoint should be listening for traces, logs, and metrics on
|
||||
http://localhost:4317. If not, update "OTEL_EXPORTER_OTLP_ENDPOINT" as well.
|
||||
|
||||
Next, set up a virtual environment like this:
|
||||
|
||||
::
|
||||
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install "python-dotenv[cli]"
|
||||
pip install -r requirements.txt
|
||||
|
||||
Run
|
||||
---
|
||||
|
||||
Run the example like this:
|
||||
|
||||
::
|
||||
|
||||
export PROMPT="Your prompt here"
|
||||
dotenv run -- python main.py
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# We skip linting this file with pylint, because the linter is not
|
||||
# configured with the "requirements.txt" dependencies and therefore
|
||||
# will give multiple "no-name-in-module" errors for the imports.
|
||||
#
|
||||
# pylint: skip-file
|
||||
|
||||
import os
|
||||
|
||||
import google.genai
|
||||
|
||||
# NOTE: OpenTelemetry Python Logs and Events APIs are in beta
|
||||
from opentelemetry import _events as otel_events
|
||||
from opentelemetry import _logs as otel_logs
|
||||
from opentelemetry import metrics as otel_metrics
|
||||
from opentelemetry import trace as otel_trace
|
||||
from opentelemetry.exporter.otlp.proto.grpc._log_exporter import (
|
||||
OTLPLogExporter,
|
||||
)
|
||||
from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import (
|
||||
OTLPMetricExporter,
|
||||
)
|
||||
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
|
||||
OTLPSpanExporter,
|
||||
)
|
||||
from opentelemetry.instrumentation.google_genai import (
|
||||
GoogleGenAiSdkInstrumentor,
|
||||
)
|
||||
from opentelemetry.instrumentation.requests import RequestsInstrumentor
|
||||
from opentelemetry.sdk._events import EventLoggerProvider
|
||||
from opentelemetry.sdk._logs import LoggerProvider
|
||||
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
|
||||
from opentelemetry.sdk.metrics import MeterProvider
|
||||
from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
||||
|
||||
|
||||
def setup_otel_tracing():
|
||||
otel_trace.set_tracer_provider(TracerProvider())
|
||||
otel_trace.get_tracer_provider().add_span_processor(
|
||||
BatchSpanProcessor(OTLPSpanExporter())
|
||||
)
|
||||
|
||||
|
||||
def setup_otel_logs_and_events():
|
||||
otel_logs.set_logger_provider(LoggerProvider())
|
||||
otel_logs.get_logger_provider().add_log_record_processor(
|
||||
BatchLogRecordProcessor(OTLPLogExporter())
|
||||
)
|
||||
otel_events.set_event_logger_provider(EventLoggerProvider())
|
||||
|
||||
|
||||
def setup_otel_metrics():
|
||||
meter_provider = MeterProvider(
|
||||
metric_readers=[
|
||||
PeriodicExportingMetricReader(
|
||||
OTLPMetricExporter(),
|
||||
),
|
||||
]
|
||||
)
|
||||
otel_metrics.set_meter_provider(meter_provider)
|
||||
|
||||
|
||||
def setup_opentelemetry():
|
||||
setup_otel_tracing()
|
||||
setup_otel_logs_and_events()
|
||||
setup_otel_metrics()
|
||||
|
||||
|
||||
def instrument_google_genai():
|
||||
GoogleGenAiSdkInstrumentor().instrument()
|
||||
RequestsInstrumentor().instrument()
|
||||
|
||||
|
||||
def main():
|
||||
setup_opentelemetry()
|
||||
instrument_google_genai()
|
||||
client = google.genai.Client()
|
||||
response = client.models.generate_content(
|
||||
model=os.getenv("MODEL", "gemini-2.0-flash-001"),
|
||||
contents=os.getenv("PROMPT", "Why is the sky blue?"),
|
||||
)
|
||||
print(response.text)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -0,0 +1,20 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
google-genai ~= 1.0.0
|
||||
opentelemetry-api ~= 1.30.0
|
||||
opentelemetry-sdk ~= 1.30.0
|
||||
opentelemetry-exporter-otlp-proto-grpc ~= 1.30.0
|
||||
opentelemetry-instrumentation-requests ~= 0.51b0
|
||||
opentelemetry-instrumentation-google-genai ~= 0.0.1.dev
|
|
@ -0,0 +1,30 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# Update to change exporter configuration as desired.
|
||||
# See: https://opentelemetry.io/docs/zero-code/python/configuration/
|
||||
OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317
|
||||
OTEL_EXPORTER_OTLP_PROTOCOL=grpc
|
||||
|
||||
# Uncomment to change parameters used to configure 'google.genai'
|
||||
# GOOGLE_GENAI_USE_VERTEXAI=1
|
||||
# GOOGLE_API_KEY=<your api key>
|
||||
# GOOGLE_CLOUD_PROJECT=<your cloud project>
|
||||
# GOOGLE_CLOUD_LOCATION=<your cloud location>
|
||||
|
||||
OTEL_SERVICE_NAME=opentelemetry-python-google-genai
|
||||
|
||||
# Change to 'false' to hide prompt and completion content
|
||||
OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true
|
|
@ -0,0 +1,46 @@
|
|||
OpenTelemetry Google GenAI SDK Manual Instrumentation Example
|
||||
============================================
|
||||
|
||||
This is an example of how to instrument Google GenAI SDK calls with zero code changes,
|
||||
using `opentelemetryh-instrument`.
|
||||
|
||||
When `main.py <main.py>`_ is run, it exports traces, logs, and metrics to an OTLP
|
||||
compatible endpoint. Traces include details such as the model used and the
|
||||
duration of the chat request. Logs capture the chat request and the generated
|
||||
response, providing a comprehensive view of the performance and behavior of
|
||||
your GenAI SDK requests. Metrics include aggregate statistics such as the aggregate
|
||||
token usage as well as the latency distribution of the GenAI operations.
|
||||
|
||||
Note: `.env <.env>`_ file configures additional environment variables:
|
||||
|
||||
- `OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true`
|
||||
|
||||
... configures Google GenAI SDK instrumentation to capture prompt/response content.
|
||||
|
||||
Setup
|
||||
-----
|
||||
|
||||
An OTLP compatible endpoint should be listening for traces, logs, and metrics on
|
||||
http://localhost:4317. If not, update "OTEL_EXPORTER_OTLP_ENDPOINT" as well.
|
||||
|
||||
Next, set up a virtual environment like this:
|
||||
|
||||
::
|
||||
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install "python-dotenv[cli]"
|
||||
pip install -r requirements.txt
|
||||
opentelemetry-bootstrap -a install
|
||||
|
||||
|
||||
Run
|
||||
---
|
||||
|
||||
Run the example like this:
|
||||
|
||||
::
|
||||
|
||||
export PROMPT="Your prompt here"
|
||||
dotenv run -- opentelemetry-instrument python main.py
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
|
||||
import google.genai
|
||||
|
||||
|
||||
def main():
|
||||
client = google.genai.Client()
|
||||
response = client.models.generate_content(
|
||||
model=os.getenv("MODEL", "gemini-2.0-flash-001"),
|
||||
contents=os.getenv("PROMPT", "Why is the sky blue?"),
|
||||
)
|
||||
print(response.text)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -0,0 +1,23 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
google-genai ~= 1.0.0
|
||||
opentelemetry-api ~= 1.30.0
|
||||
opentelemetry-sdk ~= 1.30.0
|
||||
opentelemetry-exporter-otlp-proto-grpc ~= 1.30.0
|
||||
opentelemetry-instrumentation ~= 0.51b0
|
||||
opentelemetry-instrumentation-requests ~= 0.51b0
|
||||
opentelemetry-instrumentation-google-genai ~= 0.0.1.dev
|
||||
opentelemetry-contrib-instrumentations ~= 0.51b0
|
||||
opentelemetry-distro[otlp] ~= 0.51b0
|
|
@ -0,0 +1,79 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
[project]
|
||||
name = "opentelemetry-instrumentation-google-genai"
|
||||
dynamic = ["version"]
|
||||
description = "OpenTelemetry"
|
||||
readme = "README.rst"
|
||||
license = "Apache-2.0"
|
||||
requires-python = ">=3.9"
|
||||
authors = [
|
||||
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
|
||||
]
|
||||
classifiers = [
|
||||
"Development Status :: 4 - Beta",
|
||||
"Intended Audience :: Developers",
|
||||
"License :: OSI Approved :: Apache Software License",
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12"
|
||||
]
|
||||
dependencies = [
|
||||
"opentelemetry-api >=1.31.1, <2",
|
||||
"opentelemetry-instrumentation >=0.52b1, <2",
|
||||
"opentelemetry-semantic-conventions >=0.52b1, <2"
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
instruments = [
|
||||
"google-genai >= 1.0.0"
|
||||
]
|
||||
|
||||
[project.entry-points.opentelemetry_instrumentor]
|
||||
google-genai = "opentelemetry.instrumentation.google_genai:GoogleGenAiSdkInstrumentor"
|
||||
|
||||
[project.urls]
|
||||
Homepage = "https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation-genai/opentelemetry-instrumentation-google-genai"
|
||||
Repository = "https://github.com/open-telemetry/opentelemetry-python-contrib"
|
||||
|
||||
[tool.hatch.version]
|
||||
path = "src/opentelemetry/instrumentation/google_genai/version.py"
|
||||
|
||||
[tool.hatch.build.targets.sdist]
|
||||
include = [
|
||||
"/src",
|
||||
"/tests",
|
||||
]
|
||||
|
||||
[tool.hatch.build.targets.wheel]
|
||||
packages = ["src/opentelemetry"]
|
||||
|
||||
[tool.pyright]
|
||||
include = [
|
||||
"src",
|
||||
]
|
||||
exclude = [
|
||||
"**/__pycache__",
|
||||
]
|
||||
stubPath = "types"
|
||||
reportMissingImports = "error"
|
||||
reportMissingTypeStubs = false
|
||||
pythonVersion = "3.9"
|
|
@ -0,0 +1,47 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Google Gen AI SDK client instrumentation supporting the `google-genai` package.
|
||||
|
||||
It can be enabled using ``GoogleGenAiSdkInstrumentor``.
|
||||
|
||||
.. _google-genai: https://pypi.org/project/google-genai/
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
.. code:: python
|
||||
|
||||
import os
|
||||
import google.genai
|
||||
from opentelemetry.instrumentation.google_genai import GoogleGenAiSdkInstrumentor
|
||||
|
||||
GoogleGenAiSdkInstrumentor().instrument()
|
||||
model = os.getenv('MODEL', 'gemini-2.0-flash-001')
|
||||
client = google.genai.Client()
|
||||
response = client.models.generate_content(
|
||||
model=model,
|
||||
contents='why is the sky blue?'
|
||||
)
|
||||
print(response.text)
|
||||
|
||||
API
|
||||
---
|
||||
"""
|
||||
|
||||
from .instrumentor import GoogleGenAiSdkInstrumentor
|
||||
from .version import __version__
|
||||
|
||||
__all__ = ["GoogleGenAiSdkInstrumentor", "__version__"]
|
|
@ -0,0 +1,97 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import re
|
||||
from typing import Iterable, Optional, Set
|
||||
|
||||
ALLOWED = True
|
||||
DENIED = False
|
||||
|
||||
|
||||
def _parse_env_list(s: str) -> Set[str]:
|
||||
result = set()
|
||||
for entry in s.split(","):
|
||||
stripped_entry = entry.strip()
|
||||
if not stripped_entry:
|
||||
continue
|
||||
result.add(stripped_entry)
|
||||
return result
|
||||
|
||||
|
||||
class _CompoundMatcher:
|
||||
def __init__(self, entries: Set[str]):
|
||||
self._match_all = "*" in entries
|
||||
self._entries = entries
|
||||
self._regex_matcher = None
|
||||
regex_entries = []
|
||||
for entry in entries:
|
||||
if "*" not in entry:
|
||||
continue
|
||||
if entry == "*":
|
||||
continue
|
||||
entry = entry.replace("[", "\\[")
|
||||
entry = entry.replace("]", "\\]")
|
||||
entry = entry.replace(".", "\\.")
|
||||
entry = entry.replace("*", ".*")
|
||||
regex_entries.append(f"({entry})")
|
||||
if regex_entries:
|
||||
joined_regex = "|".join(regex_entries)
|
||||
regex_str = f"^({joined_regex})$"
|
||||
self._regex_matcher = re.compile(regex_str)
|
||||
|
||||
@property
|
||||
def match_all(self):
|
||||
return self._match_all
|
||||
|
||||
def matches(self, x):
|
||||
if self._match_all:
|
||||
return True
|
||||
if x in self._entries:
|
||||
return True
|
||||
if (self._regex_matcher is not None) and (
|
||||
self._regex_matcher.fullmatch(x)
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class AllowList:
|
||||
def __init__(
|
||||
self,
|
||||
includes: Optional[Iterable[str]] = None,
|
||||
excludes: Optional[Iterable[str]] = None,
|
||||
):
|
||||
self._includes = _CompoundMatcher(set(includes or []))
|
||||
self._excludes = _CompoundMatcher(set(excludes or []))
|
||||
assert (not self._includes.match_all) or (
|
||||
not self._excludes.match_all
|
||||
), "Can't have '*' in both includes and excludes."
|
||||
|
||||
def allowed(self, x: str):
|
||||
if self._excludes.match_all:
|
||||
return self._includes.matches(x)
|
||||
if self._includes.match_all:
|
||||
return not self._excludes.matches(x)
|
||||
return self._includes.matches(x) and not self._excludes.matches(x)
|
||||
|
||||
@staticmethod
|
||||
def from_env(
|
||||
includes_env_var: str, excludes_env_var: Optional[str] = None
|
||||
):
|
||||
includes = _parse_env_list(os.getenv(includes_env_var) or "")
|
||||
excludes = set()
|
||||
if excludes_env_var:
|
||||
excludes = _parse_env_list(os.getenv(excludes_env_var) or "")
|
||||
return AllowList(includes=includes, excludes=excludes)
|
|
@ -0,0 +1,18 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# Semantic Convention still being defined in:
|
||||
# https://github.com/open-telemetry/semantic-conventions/pull/2125
|
||||
GCP_GENAI_OPERATION_CONFIG = "gcp.gen_ai.operation.config"
|
|
@ -0,0 +1,301 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
Optional,
|
||||
Protocol,
|
||||
Sequence,
|
||||
Set,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
Primitive = Union[bool, str, int, float]
|
||||
BoolList = list[bool]
|
||||
StringList = list[str]
|
||||
IntList = list[int]
|
||||
FloatList = list[float]
|
||||
HomogenousPrimitiveList = Union[BoolList, StringList, IntList, FloatList]
|
||||
FlattenedValue = Union[Primitive, HomogenousPrimitiveList]
|
||||
FlattenedDict = Dict[str, FlattenedValue]
|
||||
|
||||
|
||||
class FlattenFunc(Protocol):
|
||||
def __call__(
|
||||
self,
|
||||
key: str,
|
||||
value: Any,
|
||||
exclude_keys: Set[str],
|
||||
rename_keys: Dict[str, str],
|
||||
flatten_functions: Dict[str, "FlattenFunc"],
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
return None
|
||||
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _concat_key(prefix: Optional[str], suffix: str):
|
||||
if not prefix:
|
||||
return suffix
|
||||
return f"{prefix}.{suffix}"
|
||||
|
||||
|
||||
def _is_primitive(v):
|
||||
for t in [str, bool, int, float]:
|
||||
if isinstance(v, t):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _is_homogenous_primitive_list(v):
|
||||
if not isinstance(v, list):
|
||||
return False
|
||||
if len(v) == 0:
|
||||
return True
|
||||
if not _is_primitive(v[0]):
|
||||
return False
|
||||
first_entry_value_type = type(v[0])
|
||||
for entry in v[1:]:
|
||||
if not isinstance(entry, first_entry_value_type):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _get_flatten_func(
|
||||
flatten_functions: Dict[str, FlattenFunc], key_names: set[str]
|
||||
) -> Optional[FlattenFunc]:
|
||||
for key in key_names:
|
||||
flatten_func = flatten_functions.get(key)
|
||||
if flatten_func is not None:
|
||||
return flatten_func
|
||||
return None
|
||||
|
||||
|
||||
def _flatten_with_flatten_func(
|
||||
key: str,
|
||||
value: Any,
|
||||
exclude_keys: Set[str],
|
||||
rename_keys: Dict[str, str],
|
||||
flatten_functions: Dict[str, FlattenFunc],
|
||||
key_names: Set[str],
|
||||
) -> Tuple[bool, Any]:
|
||||
flatten_func = _get_flatten_func(flatten_functions, key_names)
|
||||
if flatten_func is None:
|
||||
return False, value
|
||||
func_output = flatten_func(
|
||||
key,
|
||||
value,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
)
|
||||
if func_output is None:
|
||||
return True, {}
|
||||
if _is_primitive(func_output) or _is_homogenous_primitive_list(
|
||||
func_output
|
||||
):
|
||||
return True, {key: func_output}
|
||||
return False, func_output
|
||||
|
||||
|
||||
def _flatten_compound_value_using_json(
|
||||
key: str,
|
||||
value: Any,
|
||||
exclude_keys: Set[str],
|
||||
rename_keys: Dict[str, str],
|
||||
flatten_functions: Dict[str, FlattenFunc],
|
||||
_from_json=False,
|
||||
) -> FlattenedDict:
|
||||
if _from_json:
|
||||
_logger.debug(
|
||||
"Cannot flatten value with key %s; value: %s", key, value
|
||||
)
|
||||
return {}
|
||||
try:
|
||||
json_string = json.dumps(value)
|
||||
except TypeError:
|
||||
_logger.debug(
|
||||
"Cannot flatten value with key %s; value: %s. Not JSON serializable.",
|
||||
key,
|
||||
value,
|
||||
)
|
||||
return {}
|
||||
json_value = json.loads(json_string)
|
||||
return _flatten_value(
|
||||
key,
|
||||
json_value,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
# Ensure that we don't recurse indefinitely if "json.loads()" somehow returns
|
||||
# a complex, compound object that does not get handled by the "primitive", "list",
|
||||
# or "dict" cases. Prevents falling back on the JSON serialization fallback path.
|
||||
_from_json=True,
|
||||
)
|
||||
|
||||
|
||||
def _flatten_compound_value(
|
||||
key: str,
|
||||
value: Any,
|
||||
exclude_keys: Set[str],
|
||||
rename_keys: Dict[str, str],
|
||||
flatten_functions: Dict[str, FlattenFunc],
|
||||
key_names: Set[str],
|
||||
_from_json=False,
|
||||
) -> FlattenedDict:
|
||||
fully_flattened_with_flatten_func, value = _flatten_with_flatten_func(
|
||||
key=key,
|
||||
value=value,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
key_names=key_names,
|
||||
)
|
||||
if fully_flattened_with_flatten_func:
|
||||
return value
|
||||
if isinstance(value, dict):
|
||||
return _flatten_dict(
|
||||
value,
|
||||
key_prefix=key,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
)
|
||||
if isinstance(value, list):
|
||||
if _is_homogenous_primitive_list(value):
|
||||
return {key: value}
|
||||
return _flatten_list(
|
||||
value,
|
||||
key_prefix=key,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
)
|
||||
if hasattr(value, "model_dump"):
|
||||
return _flatten_dict(
|
||||
value.model_dump(),
|
||||
key_prefix=key,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
)
|
||||
return _flatten_compound_value_using_json(
|
||||
key,
|
||||
value,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
_from_json=_from_json,
|
||||
)
|
||||
|
||||
|
||||
def _flatten_value(
|
||||
key: str,
|
||||
value: Any,
|
||||
exclude_keys: Set[str],
|
||||
rename_keys: Dict[str, str],
|
||||
flatten_functions: Dict[str, FlattenFunc],
|
||||
_from_json=False,
|
||||
) -> FlattenedDict:
|
||||
if value is None:
|
||||
return {}
|
||||
key_names = set([key])
|
||||
renamed_key = rename_keys.get(key)
|
||||
if renamed_key is not None:
|
||||
key_names.add(renamed_key)
|
||||
key = renamed_key
|
||||
if key_names & exclude_keys:
|
||||
return {}
|
||||
if _is_primitive(value):
|
||||
return {key: value}
|
||||
return _flatten_compound_value(
|
||||
key=key,
|
||||
value=value,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
key_names=key_names,
|
||||
_from_json=_from_json,
|
||||
)
|
||||
|
||||
|
||||
def _flatten_dict(
|
||||
d: Dict[str, Any],
|
||||
key_prefix: str,
|
||||
exclude_keys: Set[str],
|
||||
rename_keys: Dict[str, str],
|
||||
flatten_functions: Dict[str, FlattenFunc],
|
||||
) -> FlattenedDict:
|
||||
result = {}
|
||||
for key, value in d.items():
|
||||
if key in exclude_keys:
|
||||
continue
|
||||
full_key = _concat_key(key_prefix, key)
|
||||
flattened = _flatten_value(
|
||||
full_key,
|
||||
value,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
)
|
||||
result.update(flattened)
|
||||
return result
|
||||
|
||||
|
||||
def _flatten_list(
|
||||
lst: list[Any],
|
||||
key_prefix: str,
|
||||
exclude_keys: Set[str],
|
||||
rename_keys: Dict[str, str],
|
||||
flatten_functions: Dict[str, FlattenFunc],
|
||||
) -> FlattenedDict:
|
||||
result = {}
|
||||
result[_concat_key(key_prefix, "length")] = len(lst)
|
||||
for index, value in enumerate(lst):
|
||||
full_key = f"{key_prefix}[{index}]"
|
||||
flattened = _flatten_value(
|
||||
full_key,
|
||||
value,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
)
|
||||
result.update(flattened)
|
||||
return result
|
||||
|
||||
|
||||
def flatten_dict(
|
||||
d: Dict[str, Any],
|
||||
key_prefix: Optional[str] = None,
|
||||
exclude_keys: Optional[Sequence[str]] = None,
|
||||
rename_keys: Optional[Dict[str, str]] = None,
|
||||
flatten_functions: Optional[Dict[str, FlattenFunc]] = None,
|
||||
):
|
||||
key_prefix = key_prefix or ""
|
||||
exclude_keys = set(exclude_keys or [])
|
||||
rename_keys = rename_keys or {}
|
||||
flatten_functions = flatten_functions or {}
|
||||
return _flatten_dict(
|
||||
d,
|
||||
key_prefix=key_prefix,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
)
|
|
@ -0,0 +1,23 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
|
||||
_CONTENT_RECORDING_ENV_VAR = (
|
||||
"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"
|
||||
)
|
||||
|
||||
|
||||
def is_content_recording_enabled():
|
||||
return os.getenv(_CONTENT_RECORDING_ENV_VAR, "false").lower() == "true"
|
|
@ -0,0 +1,790 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import copy
|
||||
import functools
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from typing import Any, AsyncIterator, Awaitable, Iterator, Optional, Union
|
||||
|
||||
from google.genai.models import AsyncModels, Models
|
||||
from google.genai.types import (
|
||||
BlockedReason,
|
||||
Candidate,
|
||||
Content,
|
||||
ContentListUnion,
|
||||
ContentListUnionDict,
|
||||
ContentUnion,
|
||||
ContentUnionDict,
|
||||
GenerateContentConfig,
|
||||
GenerateContentConfigOrDict,
|
||||
GenerateContentResponse,
|
||||
)
|
||||
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.semconv._incubating.attributes import (
|
||||
code_attributes,
|
||||
gen_ai_attributes,
|
||||
)
|
||||
from opentelemetry.semconv.attributes import error_attributes
|
||||
|
||||
from .allowlist_util import AllowList
|
||||
from .custom_semconv import GCP_GENAI_OPERATION_CONFIG
|
||||
from .dict_util import flatten_dict
|
||||
from .flags import is_content_recording_enabled
|
||||
from .otel_wrapper import OTelWrapper
|
||||
from .tool_call_wrapper import wrapped as wrapped_tool
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Constant used to make the absence of content more understandable.
|
||||
_CONTENT_ELIDED = "<elided>"
|
||||
|
||||
# Constant used for the value of 'gen_ai.operation.name".
|
||||
_GENERATE_CONTENT_OP_NAME = "generate_content"
|
||||
|
||||
|
||||
class _MethodsSnapshot:
|
||||
def __init__(self):
|
||||
self._original_generate_content = Models.generate_content
|
||||
self._original_generate_content_stream = Models.generate_content_stream
|
||||
self._original_async_generate_content = AsyncModels.generate_content
|
||||
self._original_async_generate_content_stream = (
|
||||
AsyncModels.generate_content_stream
|
||||
)
|
||||
|
||||
@property
|
||||
def generate_content(self):
|
||||
return self._original_generate_content
|
||||
|
||||
@property
|
||||
def generate_content_stream(self):
|
||||
return self._original_generate_content_stream
|
||||
|
||||
@property
|
||||
def async_generate_content(self):
|
||||
return self._original_async_generate_content
|
||||
|
||||
@property
|
||||
def async_generate_content_stream(self):
|
||||
return self._original_async_generate_content_stream
|
||||
|
||||
def restore(self):
|
||||
Models.generate_content = self._original_generate_content
|
||||
Models.generate_content_stream = self._original_generate_content_stream
|
||||
AsyncModels.generate_content = self._original_async_generate_content
|
||||
AsyncModels.generate_content_stream = (
|
||||
self._original_async_generate_content_stream
|
||||
)
|
||||
|
||||
|
||||
def _get_vertexai_system_name():
|
||||
return gen_ai_attributes.GenAiSystemValues.VERTEX_AI.name.lower()
|
||||
|
||||
|
||||
def _get_gemini_system_name():
|
||||
return gen_ai_attributes.GenAiSystemValues.GEMINI.name.lower()
|
||||
|
||||
|
||||
def _guess_genai_system_from_env():
|
||||
if os.environ.get("GOOGLE_GENAI_USE_VERTEXAI", "0").lower() in [
|
||||
"true",
|
||||
"1",
|
||||
]:
|
||||
return _get_vertexai_system_name()
|
||||
return _get_gemini_system_name()
|
||||
|
||||
|
||||
def _get_is_vertexai(models_object: Union[Models, AsyncModels]):
|
||||
# Since commit 8e561de04965bb8766db87ad8eea7c57c1040442 of "googleapis/python-genai",
|
||||
# it is possible to obtain the information using a documented property.
|
||||
if hasattr(models_object, "vertexai"):
|
||||
vertexai_attr = getattr(models_object, "vertexai")
|
||||
if vertexai_attr is not None:
|
||||
return vertexai_attr
|
||||
# For earlier revisions, it is necessary to deeply inspect the internals.
|
||||
if hasattr(models_object, "_api_client"):
|
||||
client = getattr(models_object, "_api_client")
|
||||
if not client:
|
||||
return None
|
||||
if hasattr(client, "vertexai"):
|
||||
return getattr(client, "vertexai")
|
||||
return None
|
||||
|
||||
|
||||
def _determine_genai_system(models_object: Union[Models, AsyncModels]):
|
||||
vertexai_attr = _get_is_vertexai(models_object)
|
||||
if vertexai_attr is None:
|
||||
return _guess_genai_system_from_env()
|
||||
if vertexai_attr:
|
||||
return _get_vertexai_system_name()
|
||||
return _get_gemini_system_name()
|
||||
|
||||
|
||||
def _to_dict(value: object):
|
||||
if isinstance(value, dict):
|
||||
return value
|
||||
if hasattr(value, "model_dump"):
|
||||
return value.model_dump()
|
||||
return json.loads(json.dumps(value))
|
||||
|
||||
|
||||
def _add_request_options_to_span(
|
||||
span, config: Optional[GenerateContentConfigOrDict], allow_list: AllowList
|
||||
):
|
||||
if config is None:
|
||||
return
|
||||
span_context = span.get_span_context()
|
||||
if not span_context.trace_flags.sampled:
|
||||
# Avoid potentially costly traversal of config
|
||||
# options if the span will be dropped, anyway.
|
||||
return
|
||||
# Automatically derive attributes from the contents of the
|
||||
# config object. This ensures that all relevant parameters
|
||||
# are captured in the telemetry data (except for those
|
||||
# that are excluded via "exclude_keys"). Dynamic attributes (those
|
||||
# starting with "gcp.gen_ai." instead of simply "gen_ai.request.")
|
||||
# are filtered with the "allow_list" before inclusion in the span.
|
||||
attributes = flatten_dict(
|
||||
_to_dict(config),
|
||||
# A custom prefix is used, because the names/structure of the
|
||||
# configuration is likely to be specific to Google Gen AI SDK.
|
||||
key_prefix=GCP_GENAI_OPERATION_CONFIG,
|
||||
exclude_keys=[
|
||||
# System instruction can be overly long for a span attribute.
|
||||
# Additionally, it is recorded as an event (log), instead.
|
||||
"gcp.gen_ai.operation.config.system_instruction",
|
||||
],
|
||||
# Although a custom prefix is used by default, some of the attributes
|
||||
# are captured in common, standard, Semantic Conventions. For the
|
||||
# well-known properties whose values align with Semantic Conventions,
|
||||
# we ensure that the key name matches the standard SemConv name.
|
||||
rename_keys={
|
||||
# TODO: add more entries here as more semantic conventions are
|
||||
# generalized to cover more of the available config options.
|
||||
"gcp.gen_ai.operation.config.temperature": gen_ai_attributes.GEN_AI_REQUEST_TEMPERATURE,
|
||||
"gcp.gen_ai.operation.config.top_k": gen_ai_attributes.GEN_AI_REQUEST_TOP_K,
|
||||
"gcp.gen_ai.operation.config.top_p": gen_ai_attributes.GEN_AI_REQUEST_TOP_P,
|
||||
"gcp.gen_ai.operation.config.candidate_count": gen_ai_attributes.GEN_AI_REQUEST_CHOICE_COUNT,
|
||||
"gcp.gen_ai.operation.config.max_output_tokens": gen_ai_attributes.GEN_AI_REQUEST_MAX_TOKENS,
|
||||
"gcp.gen_ai.operation.config.stop_sequences": gen_ai_attributes.GEN_AI_REQUEST_STOP_SEQUENCES,
|
||||
"gcp.gen_ai.operation.config.frequency_penalty": gen_ai_attributes.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
||||
"gcp.gen_ai.operation.config.presence_penalty": gen_ai_attributes.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
||||
"gcp.gen_ai.operation.config.seed": gen_ai_attributes.GEN_AI_REQUEST_SEED,
|
||||
},
|
||||
)
|
||||
for key, value in attributes.items():
|
||||
if key.startswith(
|
||||
GCP_GENAI_OPERATION_CONFIG
|
||||
) and not allow_list.allowed(key):
|
||||
# The allowlist is used to control inclusion of the dynamic keys.
|
||||
continue
|
||||
span.set_attribute(key, value)
|
||||
|
||||
|
||||
def _get_response_property(response: GenerateContentResponse, path: str):
|
||||
path_segments = path.split(".")
|
||||
current_context = response
|
||||
for path_segment in path_segments:
|
||||
if current_context is None:
|
||||
return None
|
||||
if isinstance(current_context, dict):
|
||||
current_context = current_context.get(path_segment)
|
||||
else:
|
||||
current_context = getattr(current_context, path_segment)
|
||||
return current_context
|
||||
|
||||
|
||||
def _coerce_config_to_object(
|
||||
config: GenerateContentConfigOrDict,
|
||||
) -> GenerateContentConfig:
|
||||
if isinstance(config, GenerateContentConfig):
|
||||
return config
|
||||
# Input must be a dictionary; convert by invoking the constructor.
|
||||
return GenerateContentConfig(**config)
|
||||
|
||||
|
||||
def _wrapped_config_with_tools(
|
||||
otel_wrapper: OTelWrapper,
|
||||
config: GenerateContentConfig,
|
||||
**kwargs,
|
||||
):
|
||||
if not config.tools:
|
||||
return config
|
||||
result = copy.copy(config)
|
||||
result.tools = [
|
||||
wrapped_tool(tool, otel_wrapper, **kwargs) for tool in config.tools
|
||||
]
|
||||
return result
|
||||
|
||||
|
||||
class _GenerateContentInstrumentationHelper:
|
||||
def __init__(
|
||||
self,
|
||||
models_object: Union[Models, AsyncModels],
|
||||
otel_wrapper: OTelWrapper,
|
||||
model: str,
|
||||
generate_content_config_key_allowlist: Optional[AllowList] = None,
|
||||
):
|
||||
self._start_time = time.time_ns()
|
||||
self._otel_wrapper = otel_wrapper
|
||||
self._genai_system = _determine_genai_system(models_object)
|
||||
self._genai_request_model = model
|
||||
self._finish_reasons_set = set()
|
||||
self._error_type = None
|
||||
self._input_tokens = 0
|
||||
self._output_tokens = 0
|
||||
self._content_recording_enabled = is_content_recording_enabled()
|
||||
self._response_index = 0
|
||||
self._candidate_index = 0
|
||||
self._generate_content_config_key_allowlist = (
|
||||
generate_content_config_key_allowlist or AllowList()
|
||||
)
|
||||
|
||||
def wrapped_config(
|
||||
self, config: Optional[GenerateContentConfigOrDict]
|
||||
) -> Optional[GenerateContentConfig]:
|
||||
if config is None:
|
||||
return None
|
||||
return _wrapped_config_with_tools(
|
||||
self._otel_wrapper,
|
||||
_coerce_config_to_object(config),
|
||||
extra_span_attributes={"gen_ai.system": self._genai_system},
|
||||
)
|
||||
|
||||
def start_span_as_current_span(
|
||||
self, model_name, function_name, end_on_exit=True
|
||||
):
|
||||
return self._otel_wrapper.start_as_current_span(
|
||||
f"{_GENERATE_CONTENT_OP_NAME} {model_name}",
|
||||
start_time=self._start_time,
|
||||
attributes={
|
||||
code_attributes.CODE_FUNCTION_NAME: function_name,
|
||||
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
|
||||
gen_ai_attributes.GEN_AI_REQUEST_MODEL: self._genai_request_model,
|
||||
gen_ai_attributes.GEN_AI_OPERATION_NAME: _GENERATE_CONTENT_OP_NAME,
|
||||
},
|
||||
end_on_exit=end_on_exit,
|
||||
)
|
||||
|
||||
def process_request(
|
||||
self,
|
||||
contents: Union[ContentListUnion, ContentListUnionDict],
|
||||
config: Optional[GenerateContentConfigOrDict],
|
||||
):
|
||||
span = trace.get_current_span()
|
||||
_add_request_options_to_span(
|
||||
span, config, self._generate_content_config_key_allowlist
|
||||
)
|
||||
self._maybe_log_system_instruction(config=config)
|
||||
self._maybe_log_user_prompt(contents)
|
||||
|
||||
def process_response(self, response: GenerateContentResponse):
|
||||
# TODO: Determine if there are other response properties that
|
||||
# need to be reflected back into the span attributes.
|
||||
#
|
||||
# See also: TODOS.md.
|
||||
self._update_finish_reasons(response)
|
||||
self._maybe_update_token_counts(response)
|
||||
self._maybe_update_error_type(response)
|
||||
self._maybe_log_response(response)
|
||||
self._response_index += 1
|
||||
|
||||
def process_error(self, e: Exception):
|
||||
self._error_type = str(e.__class__.__name__)
|
||||
|
||||
def finalize_processing(self):
|
||||
span = trace.get_current_span()
|
||||
span.set_attribute(
|
||||
gen_ai_attributes.GEN_AI_USAGE_INPUT_TOKENS, self._input_tokens
|
||||
)
|
||||
span.set_attribute(
|
||||
gen_ai_attributes.GEN_AI_USAGE_OUTPUT_TOKENS, self._output_tokens
|
||||
)
|
||||
span.set_attribute(
|
||||
gen_ai_attributes.GEN_AI_RESPONSE_FINISH_REASONS,
|
||||
sorted(self._finish_reasons_set),
|
||||
)
|
||||
self._record_token_usage_metric()
|
||||
self._record_duration_metric()
|
||||
|
||||
def _update_finish_reasons(self, response):
|
||||
if not response.candidates:
|
||||
return
|
||||
for candidate in response.candidates:
|
||||
finish_reason = candidate.finish_reason
|
||||
if finish_reason is None:
|
||||
continue
|
||||
finish_reason_str = finish_reason.name.lower().removeprefix(
|
||||
"finish_reason_"
|
||||
)
|
||||
self._finish_reasons_set.add(finish_reason_str)
|
||||
|
||||
def _maybe_update_token_counts(self, response: GenerateContentResponse):
|
||||
input_tokens = _get_response_property(
|
||||
response, "usage_metadata.prompt_token_count"
|
||||
)
|
||||
output_tokens = _get_response_property(
|
||||
response, "usage_metadata.candidates_token_count"
|
||||
)
|
||||
if input_tokens and isinstance(input_tokens, int):
|
||||
self._input_tokens += input_tokens
|
||||
if output_tokens and isinstance(output_tokens, int):
|
||||
self._output_tokens += output_tokens
|
||||
|
||||
def _maybe_update_error_type(self, response: GenerateContentResponse):
|
||||
if response.candidates:
|
||||
return
|
||||
if (
|
||||
(not response.prompt_feedback)
|
||||
or (not response.prompt_feedback.block_reason)
|
||||
or (
|
||||
response.prompt_feedback.block_reason
|
||||
== BlockedReason.BLOCKED_REASON_UNSPECIFIED
|
||||
)
|
||||
):
|
||||
self._error_type = "NO_CANDIDATES"
|
||||
return
|
||||
# TODO: in the case where there are no candidate responses due to
|
||||
# safety settings like this, it might make sense to emit an event
|
||||
# that contains more details regarding the safety settings, their
|
||||
# thresholds, etc. However, this requires defining an associated
|
||||
# semantic convention to capture this. Follow up with SemConv to
|
||||
# establish appropriate data modelling to capture these details,
|
||||
# and then emit those details accordingly. (For the time being,
|
||||
# we use the defined 'error.type' semantic convention to relay
|
||||
# just the minimum amount of error information here).
|
||||
#
|
||||
# See also: "TODOS.md"
|
||||
block_reason = response.prompt_feedback.block_reason.name.upper()
|
||||
self._error_type = f"BLOCKED_{block_reason}"
|
||||
|
||||
def _maybe_log_system_instruction(
|
||||
self, config: Optional[GenerateContentConfigOrDict] = None
|
||||
):
|
||||
system_instruction = None
|
||||
if config is not None:
|
||||
if isinstance(config, dict):
|
||||
system_instruction = config.get("system_instruction")
|
||||
else:
|
||||
system_instruction = config.system_instruction
|
||||
if not system_instruction:
|
||||
return
|
||||
attributes = {
|
||||
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
|
||||
}
|
||||
# TODO: determine if "role" should be reported here or not. It is unclear
|
||||
# since the caller does not supply a "role" and since this comes through
|
||||
# a property named "system_instruction" which would seem to align with
|
||||
# the default "role" that is allowed to be omitted by default.
|
||||
#
|
||||
# See also: "TODOS.md"
|
||||
body = {}
|
||||
if self._content_recording_enabled:
|
||||
body["content"] = _to_dict(system_instruction)
|
||||
else:
|
||||
body["content"] = _CONTENT_ELIDED
|
||||
self._otel_wrapper.log_system_prompt(
|
||||
attributes=attributes,
|
||||
body=body,
|
||||
)
|
||||
|
||||
def _maybe_log_user_prompt(
|
||||
self, contents: Union[ContentListUnion, ContentListUnionDict]
|
||||
):
|
||||
if isinstance(contents, list):
|
||||
total = len(contents)
|
||||
index = 0
|
||||
for entry in contents:
|
||||
self._maybe_log_single_user_prompt(
|
||||
entry, index=index, total=total
|
||||
)
|
||||
index += 1
|
||||
else:
|
||||
self._maybe_log_single_user_prompt(contents)
|
||||
|
||||
def _maybe_log_single_user_prompt(
|
||||
self, contents: Union[ContentUnion, ContentUnionDict], index=0, total=1
|
||||
):
|
||||
# TODO: figure out how to report the index in a manner that is
|
||||
# aligned with the OTel semantic conventions.
|
||||
attributes = {
|
||||
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
|
||||
}
|
||||
|
||||
# TODO: determine if "role" should be reported here or not and, if so,
|
||||
# what the value ought to be. It is not clear whether there is always
|
||||
# a role supplied (and it looks like there could be cases where there
|
||||
# is more than one role present in the supplied contents)?
|
||||
#
|
||||
# See also: "TODOS.md"
|
||||
body = {}
|
||||
if self._content_recording_enabled:
|
||||
logged_contents = contents
|
||||
if isinstance(contents, list):
|
||||
logged_contents = Content(parts=contents)
|
||||
body["content"] = _to_dict(logged_contents)
|
||||
else:
|
||||
body["content"] = _CONTENT_ELIDED
|
||||
self._otel_wrapper.log_user_prompt(
|
||||
attributes=attributes,
|
||||
body=body,
|
||||
)
|
||||
|
||||
def _maybe_log_response_stats(self, response: GenerateContentResponse):
|
||||
# TODO: Determine if there is a way that we can log a summary
|
||||
# of the overall response in a manner that is aligned with
|
||||
# Semantic Conventions. For example, it would be natural
|
||||
# to report an event that looks something like:
|
||||
#
|
||||
# gen_ai.response.stats {
|
||||
# response_index: 0,
|
||||
# candidate_count: 3,
|
||||
# parts_per_candidate: [
|
||||
# 3,
|
||||
# 1,
|
||||
# 5
|
||||
# ]
|
||||
# }
|
||||
#
|
||||
pass
|
||||
|
||||
def _maybe_log_response_safety_ratings(
|
||||
self, response: GenerateContentResponse
|
||||
):
|
||||
# TODO: Determine if there is a way that we can log
|
||||
# the "prompt_feedback". This would be especially useful
|
||||
# in the case where the response is blocked.
|
||||
pass
|
||||
|
||||
def _maybe_log_response(self, response: GenerateContentResponse):
|
||||
self._maybe_log_response_stats(response)
|
||||
self._maybe_log_response_safety_ratings(response)
|
||||
if not response.candidates:
|
||||
return
|
||||
candidate_in_response_index = 0
|
||||
for candidate in response.candidates:
|
||||
self._maybe_log_response_candidate(
|
||||
candidate,
|
||||
flat_candidate_index=self._candidate_index,
|
||||
candidate_in_response_index=candidate_in_response_index,
|
||||
response_index=self._response_index,
|
||||
)
|
||||
self._candidate_index += 1
|
||||
candidate_in_response_index += 1
|
||||
|
||||
def _maybe_log_response_candidate(
|
||||
self,
|
||||
candidate: Candidate,
|
||||
flat_candidate_index: int,
|
||||
candidate_in_response_index: int,
|
||||
response_index: int,
|
||||
):
|
||||
# TODO: Determine if there might be a way to report the
|
||||
# response index and candidate response index.
|
||||
attributes = {
|
||||
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
|
||||
}
|
||||
# TODO: determine if "role" should be reported here or not and, if so,
|
||||
# what the value ought to be.
|
||||
#
|
||||
# TODO: extract tool information into a separate tool message.
|
||||
#
|
||||
# TODO: determine if/when we need to emit a 'gen_ai.assistant.message' event.
|
||||
#
|
||||
# TODO: determine how to report other relevant details in the candidate that
|
||||
# are not presently captured by Semantic Conventions. For example, the
|
||||
# "citation_metadata", "grounding_metadata", "logprobs_result", etc.
|
||||
#
|
||||
# See also: "TODOS.md"
|
||||
body = {
|
||||
"index": flat_candidate_index,
|
||||
}
|
||||
if self._content_recording_enabled:
|
||||
if candidate.content:
|
||||
body["content"] = _to_dict(candidate.content)
|
||||
else:
|
||||
body["content"] = _CONTENT_ELIDED
|
||||
if candidate.finish_reason is not None:
|
||||
body["finish_reason"] = candidate.finish_reason.name
|
||||
self._otel_wrapper.log_response_content(
|
||||
attributes=attributes,
|
||||
body=body,
|
||||
)
|
||||
|
||||
def _record_token_usage_metric(self):
|
||||
self._otel_wrapper.token_usage_metric.record(
|
||||
self._input_tokens,
|
||||
attributes={
|
||||
gen_ai_attributes.GEN_AI_TOKEN_TYPE: "input",
|
||||
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
|
||||
gen_ai_attributes.GEN_AI_REQUEST_MODEL: self._genai_request_model,
|
||||
gen_ai_attributes.GEN_AI_OPERATION_NAME: _GENERATE_CONTENT_OP_NAME,
|
||||
},
|
||||
)
|
||||
self._otel_wrapper.token_usage_metric.record(
|
||||
self._output_tokens,
|
||||
attributes={
|
||||
gen_ai_attributes.GEN_AI_TOKEN_TYPE: "output",
|
||||
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
|
||||
gen_ai_attributes.GEN_AI_REQUEST_MODEL: self._genai_request_model,
|
||||
gen_ai_attributes.GEN_AI_OPERATION_NAME: _GENERATE_CONTENT_OP_NAME,
|
||||
},
|
||||
)
|
||||
|
||||
def _record_duration_metric(self):
|
||||
attributes = {
|
||||
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
|
||||
gen_ai_attributes.GEN_AI_REQUEST_MODEL: self._genai_request_model,
|
||||
gen_ai_attributes.GEN_AI_OPERATION_NAME: _GENERATE_CONTENT_OP_NAME,
|
||||
}
|
||||
if self._error_type is not None:
|
||||
attributes[error_attributes.ERROR_TYPE] = self._error_type
|
||||
duration_nanos = time.time_ns() - self._start_time
|
||||
duration_seconds = duration_nanos / 1e9
|
||||
self._otel_wrapper.operation_duration_metric.record(
|
||||
duration_seconds,
|
||||
attributes=attributes,
|
||||
)
|
||||
|
||||
|
||||
def _create_instrumented_generate_content(
|
||||
snapshot: _MethodsSnapshot,
|
||||
otel_wrapper: OTelWrapper,
|
||||
generate_content_config_key_allowlist: Optional[AllowList] = None,
|
||||
):
|
||||
wrapped_func = snapshot.generate_content
|
||||
|
||||
@functools.wraps(wrapped_func)
|
||||
def instrumented_generate_content(
|
||||
self: Models,
|
||||
*,
|
||||
model: str,
|
||||
contents: Union[ContentListUnion, ContentListUnionDict],
|
||||
config: Optional[GenerateContentConfigOrDict] = None,
|
||||
**kwargs: Any,
|
||||
) -> GenerateContentResponse:
|
||||
helper = _GenerateContentInstrumentationHelper(
|
||||
self,
|
||||
otel_wrapper,
|
||||
model,
|
||||
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
|
||||
)
|
||||
with helper.start_span_as_current_span(
|
||||
model, "google.genai.Models.generate_content"
|
||||
):
|
||||
helper.process_request(contents, config)
|
||||
try:
|
||||
response = wrapped_func(
|
||||
self,
|
||||
model=model,
|
||||
contents=contents,
|
||||
config=helper.wrapped_config(config),
|
||||
**kwargs,
|
||||
)
|
||||
helper.process_response(response)
|
||||
return response
|
||||
except Exception as error:
|
||||
helper.process_error(error)
|
||||
raise
|
||||
finally:
|
||||
helper.finalize_processing()
|
||||
|
||||
return instrumented_generate_content
|
||||
|
||||
|
||||
def _create_instrumented_generate_content_stream(
|
||||
snapshot: _MethodsSnapshot,
|
||||
otel_wrapper: OTelWrapper,
|
||||
generate_content_config_key_allowlist: Optional[AllowList] = None,
|
||||
):
|
||||
wrapped_func = snapshot.generate_content_stream
|
||||
|
||||
@functools.wraps(wrapped_func)
|
||||
def instrumented_generate_content_stream(
|
||||
self: Models,
|
||||
*,
|
||||
model: str,
|
||||
contents: Union[ContentListUnion, ContentListUnionDict],
|
||||
config: Optional[GenerateContentConfigOrDict] = None,
|
||||
**kwargs: Any,
|
||||
) -> Iterator[GenerateContentResponse]:
|
||||
helper = _GenerateContentInstrumentationHelper(
|
||||
self,
|
||||
otel_wrapper,
|
||||
model,
|
||||
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
|
||||
)
|
||||
with helper.start_span_as_current_span(
|
||||
model, "google.genai.Models.generate_content_stream"
|
||||
):
|
||||
helper.process_request(contents, config)
|
||||
try:
|
||||
for response in wrapped_func(
|
||||
self,
|
||||
model=model,
|
||||
contents=contents,
|
||||
config=helper.wrapped_config(config),
|
||||
**kwargs,
|
||||
):
|
||||
helper.process_response(response)
|
||||
yield response
|
||||
except Exception as error:
|
||||
helper.process_error(error)
|
||||
raise
|
||||
finally:
|
||||
helper.finalize_processing()
|
||||
|
||||
return instrumented_generate_content_stream
|
||||
|
||||
|
||||
def _create_instrumented_async_generate_content(
|
||||
snapshot: _MethodsSnapshot,
|
||||
otel_wrapper: OTelWrapper,
|
||||
generate_content_config_key_allowlist: Optional[AllowList] = None,
|
||||
):
|
||||
wrapped_func = snapshot.async_generate_content
|
||||
|
||||
@functools.wraps(wrapped_func)
|
||||
async def instrumented_generate_content(
|
||||
self: AsyncModels,
|
||||
*,
|
||||
model: str,
|
||||
contents: Union[ContentListUnion, ContentListUnionDict],
|
||||
config: Optional[GenerateContentConfigOrDict] = None,
|
||||
**kwargs: Any,
|
||||
) -> GenerateContentResponse:
|
||||
helper = _GenerateContentInstrumentationHelper(
|
||||
self,
|
||||
otel_wrapper,
|
||||
model,
|
||||
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
|
||||
)
|
||||
with helper.start_span_as_current_span(
|
||||
model, "google.genai.AsyncModels.generate_content"
|
||||
):
|
||||
helper.process_request(contents, config)
|
||||
try:
|
||||
response = await wrapped_func(
|
||||
self,
|
||||
model=model,
|
||||
contents=contents,
|
||||
config=helper.wrapped_config(config),
|
||||
**kwargs,
|
||||
)
|
||||
helper.process_response(response)
|
||||
return response
|
||||
except Exception as error:
|
||||
helper.process_error(error)
|
||||
raise
|
||||
finally:
|
||||
helper.finalize_processing()
|
||||
|
||||
return instrumented_generate_content
|
||||
|
||||
|
||||
# Disabling type checking because this is not yet implemented and tested fully.
|
||||
def _create_instrumented_async_generate_content_stream( # type: ignore
|
||||
snapshot: _MethodsSnapshot,
|
||||
otel_wrapper: OTelWrapper,
|
||||
generate_content_config_key_allowlist: Optional[AllowList] = None,
|
||||
):
|
||||
wrapped_func = snapshot.async_generate_content_stream
|
||||
|
||||
@functools.wraps(wrapped_func)
|
||||
async def instrumented_generate_content_stream(
|
||||
self: AsyncModels,
|
||||
*,
|
||||
model: str,
|
||||
contents: Union[ContentListUnion, ContentListUnionDict],
|
||||
config: Optional[GenerateContentConfigOrDict] = None,
|
||||
**kwargs: Any,
|
||||
) -> Awaitable[AsyncIterator[GenerateContentResponse]]: # type: ignore
|
||||
helper = _GenerateContentInstrumentationHelper(
|
||||
self,
|
||||
otel_wrapper,
|
||||
model,
|
||||
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
|
||||
)
|
||||
with helper.start_span_as_current_span(
|
||||
model,
|
||||
"google.genai.AsyncModels.generate_content_stream",
|
||||
end_on_exit=False,
|
||||
) as span:
|
||||
helper.process_request(contents, config)
|
||||
try:
|
||||
response_async_generator = await wrapped_func(
|
||||
self,
|
||||
model=model,
|
||||
contents=contents,
|
||||
config=helper.wrapped_config(config),
|
||||
**kwargs,
|
||||
)
|
||||
except Exception as error: # pylint: disable=broad-exception-caught
|
||||
helper.process_error(error)
|
||||
helper.finalize_processing()
|
||||
with trace.use_span(span, end_on_exit=True):
|
||||
raise
|
||||
|
||||
async def _response_async_generator_wrapper():
|
||||
with trace.use_span(span, end_on_exit=True):
|
||||
try:
|
||||
async for response in response_async_generator:
|
||||
helper.process_response(response)
|
||||
yield response
|
||||
except Exception as error:
|
||||
helper.process_error(error)
|
||||
raise
|
||||
finally:
|
||||
helper.finalize_processing()
|
||||
|
||||
return _response_async_generator_wrapper()
|
||||
|
||||
return instrumented_generate_content_stream
|
||||
|
||||
|
||||
def uninstrument_generate_content(snapshot: object):
|
||||
assert isinstance(snapshot, _MethodsSnapshot)
|
||||
snapshot.restore()
|
||||
|
||||
|
||||
def instrument_generate_content(
|
||||
otel_wrapper: OTelWrapper,
|
||||
generate_content_config_key_allowlist: Optional[AllowList] = None,
|
||||
) -> object:
|
||||
snapshot = _MethodsSnapshot()
|
||||
Models.generate_content = _create_instrumented_generate_content(
|
||||
snapshot,
|
||||
otel_wrapper,
|
||||
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
|
||||
)
|
||||
Models.generate_content_stream = _create_instrumented_generate_content_stream(
|
||||
snapshot,
|
||||
otel_wrapper,
|
||||
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
|
||||
)
|
||||
AsyncModels.generate_content = _create_instrumented_async_generate_content(
|
||||
snapshot,
|
||||
otel_wrapper,
|
||||
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
|
||||
)
|
||||
AsyncModels.generate_content_stream = _create_instrumented_async_generate_content_stream(
|
||||
snapshot,
|
||||
otel_wrapper,
|
||||
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
|
||||
)
|
||||
return snapshot
|
|
@ -0,0 +1,67 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Any, Collection, Optional
|
||||
|
||||
from opentelemetry._events import get_event_logger_provider
|
||||
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
||||
from opentelemetry.metrics import get_meter_provider
|
||||
from opentelemetry.trace import get_tracer_provider
|
||||
|
||||
from .allowlist_util import AllowList
|
||||
from .generate_content import (
|
||||
instrument_generate_content,
|
||||
uninstrument_generate_content,
|
||||
)
|
||||
from .otel_wrapper import OTelWrapper
|
||||
|
||||
|
||||
class GoogleGenAiSdkInstrumentor(BaseInstrumentor):
|
||||
def __init__(
|
||||
self, generate_content_config_key_allowlist: Optional[AllowList] = None
|
||||
):
|
||||
self._generate_content_snapshot = None
|
||||
self._generate_content_config_key_allowlist = (
|
||||
generate_content_config_key_allowlist
|
||||
or AllowList.from_env(
|
||||
"OTEL_GOOGLE_GENAI_GENERATE_CONTENT_CONFIG_INCLUDES",
|
||||
excludes_env_var="OTEL_GOOGLE_GENAI_GENERATE_CONTENT_CONFIG_EXCLUDES",
|
||||
)
|
||||
)
|
||||
|
||||
# Inherited, abstract function from 'BaseInstrumentor'. Even though 'self' is
|
||||
# not used in the definition, a method is required per the API contract.
|
||||
def instrumentation_dependencies(self) -> Collection[str]: # pylint: disable=no-self-use
|
||||
return ["google-genai>=1.0.0,<2"]
|
||||
|
||||
def _instrument(self, **kwargs: Any):
|
||||
tracer_provider = (
|
||||
kwargs.get("tracer_provider") or get_tracer_provider()
|
||||
)
|
||||
event_logger_provider = (
|
||||
kwargs.get("event_logger_provider") or get_event_logger_provider()
|
||||
)
|
||||
meter_provider = kwargs.get("meter_provider") or get_meter_provider()
|
||||
otel_wrapper = OTelWrapper.from_providers(
|
||||
tracer_provider=tracer_provider,
|
||||
event_logger_provider=event_logger_provider,
|
||||
meter_provider=meter_provider,
|
||||
)
|
||||
self._generate_content_snapshot = instrument_generate_content(
|
||||
otel_wrapper,
|
||||
generate_content_config_key_allowlist=self._generate_content_config_key_allowlist,
|
||||
)
|
||||
|
||||
def _uninstrument(self, **kwargs: Any):
|
||||
uninstrument_generate_content(self._generate_content_snapshot)
|
|
@ -0,0 +1,92 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
import google.genai
|
||||
|
||||
from opentelemetry._events import Event
|
||||
from opentelemetry.semconv._incubating.metrics import gen_ai_metrics
|
||||
from opentelemetry.semconv.schemas import Schemas
|
||||
|
||||
from .version import __version__ as _LIBRARY_VERSION
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
_SCOPE_NAME = "opentelemetry.instrumentation.google_genai"
|
||||
_PYPI_PACKAGE_NAME = "opentelemetry-instrumentation-google-genai"
|
||||
_SCHEMA_URL = Schemas.V1_30_0.value
|
||||
_SCOPE_ATTRIBUTES = {
|
||||
"gcp.client.name": "google.genai",
|
||||
"gcp.client.repo": "googleapis/python-genai",
|
||||
"gcp.client.version": google.genai.__version__,
|
||||
"pypi.package.name": _PYPI_PACKAGE_NAME,
|
||||
}
|
||||
|
||||
|
||||
class OTelWrapper:
|
||||
def __init__(self, tracer, event_logger, meter):
|
||||
self._tracer = tracer
|
||||
self._event_logger = event_logger
|
||||
self._meter = meter
|
||||
self._operation_duration_metric = (
|
||||
gen_ai_metrics.create_gen_ai_client_operation_duration(meter)
|
||||
)
|
||||
self._token_usage_metric = (
|
||||
gen_ai_metrics.create_gen_ai_client_token_usage(meter)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_providers(tracer_provider, event_logger_provider, meter_provider):
|
||||
return OTelWrapper(
|
||||
tracer_provider.get_tracer(
|
||||
_SCOPE_NAME, _LIBRARY_VERSION, _SCHEMA_URL, _SCOPE_ATTRIBUTES
|
||||
),
|
||||
event_logger_provider.get_event_logger(
|
||||
_SCOPE_NAME, _LIBRARY_VERSION, _SCHEMA_URL, _SCOPE_ATTRIBUTES
|
||||
),
|
||||
meter=meter_provider.get_meter(
|
||||
_SCOPE_NAME, _LIBRARY_VERSION, _SCHEMA_URL, _SCOPE_ATTRIBUTES
|
||||
),
|
||||
)
|
||||
|
||||
def start_as_current_span(self, *args, **kwargs):
|
||||
return self._tracer.start_as_current_span(*args, **kwargs)
|
||||
|
||||
@property
|
||||
def operation_duration_metric(self):
|
||||
return self._operation_duration_metric
|
||||
|
||||
@property
|
||||
def token_usage_metric(self):
|
||||
return self._token_usage_metric
|
||||
|
||||
def log_system_prompt(self, attributes, body):
|
||||
_logger.debug("Recording system prompt.")
|
||||
event_name = "gen_ai.system.message"
|
||||
self._log_event(event_name, attributes, body)
|
||||
|
||||
def log_user_prompt(self, attributes, body):
|
||||
_logger.debug("Recording user prompt.")
|
||||
event_name = "gen_ai.user.message"
|
||||
self._log_event(event_name, attributes, body)
|
||||
|
||||
def log_response_content(self, attributes, body):
|
||||
_logger.debug("Recording response.")
|
||||
event_name = "gen_ai.choice"
|
||||
self._log_event(event_name, attributes, body)
|
||||
|
||||
def _log_event(self, event_name, attributes, body):
|
||||
event = Event(event_name, body=body, attributes=attributes)
|
||||
self._event_logger.emit(event)
|
|
@ -0,0 +1,15 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
_instruments = ("google-genai >= 1.0.0",)
|
|
@ -0,0 +1,220 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import json
|
||||
from typing import Any, Callable, Optional, Union
|
||||
|
||||
from google.genai.types import (
|
||||
ToolListUnion,
|
||||
ToolListUnionDict,
|
||||
ToolOrDict,
|
||||
)
|
||||
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.semconv._incubating.attributes import (
|
||||
code_attributes,
|
||||
)
|
||||
|
||||
from .flags import is_content_recording_enabled
|
||||
from .otel_wrapper import OTelWrapper
|
||||
|
||||
ToolFunction = Callable[..., Any]
|
||||
|
||||
|
||||
def _is_primitive(value):
|
||||
return isinstance(value, (str, int, bool, float))
|
||||
|
||||
|
||||
def _to_otel_value(python_value):
|
||||
"""Coerces parameters to something representable with Open Telemetry."""
|
||||
if python_value is None or _is_primitive(python_value):
|
||||
return python_value
|
||||
if isinstance(python_value, list):
|
||||
return [_to_otel_value(x) for x in python_value]
|
||||
if isinstance(python_value, dict):
|
||||
return {
|
||||
key: _to_otel_value(val) for (key, val) in python_value.items()
|
||||
}
|
||||
if hasattr(python_value, "model_dump"):
|
||||
return python_value.model_dump()
|
||||
if hasattr(python_value, "__dict__"):
|
||||
return _to_otel_value(python_value.__dict__)
|
||||
return repr(python_value)
|
||||
|
||||
|
||||
def _is_homogenous_primitive_list(value):
|
||||
if not isinstance(value, list):
|
||||
return False
|
||||
if not value:
|
||||
return True
|
||||
if not _is_primitive(value[0]):
|
||||
return False
|
||||
first_type = type(value[0])
|
||||
for entry in value[1:]:
|
||||
if not isinstance(entry, first_type):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _to_otel_attribute(python_value):
|
||||
otel_value = _to_otel_value(python_value)
|
||||
if _is_primitive(otel_value) or _is_homogenous_primitive_list(otel_value):
|
||||
return otel_value
|
||||
return json.dumps(otel_value)
|
||||
|
||||
|
||||
def _create_function_span_name(wrapped_function):
|
||||
"""Constructs the span name for a given local function tool call."""
|
||||
function_name = wrapped_function.__name__
|
||||
return f"execute_tool {function_name}"
|
||||
|
||||
|
||||
def _create_function_span_attributes(
|
||||
wrapped_function, function_args, function_kwargs, extra_span_attributes
|
||||
):
|
||||
"""Creates the attributes for a tool call function span."""
|
||||
result = {}
|
||||
if extra_span_attributes:
|
||||
result.update(extra_span_attributes)
|
||||
result["gen_ai.operation.name"] = "execute_tool"
|
||||
result["gen_ai.tool.name"] = wrapped_function.__name__
|
||||
if wrapped_function.__doc__:
|
||||
result["gen_ai.tool.description"] = wrapped_function.__doc__
|
||||
result[code_attributes.CODE_FUNCTION_NAME] = wrapped_function.__name__
|
||||
result["code.module"] = wrapped_function.__module__
|
||||
result["code.args.positional.count"] = len(function_args)
|
||||
result["code.args.keyword.count"] = len(function_kwargs)
|
||||
return result
|
||||
|
||||
|
||||
def _record_function_call_argument(
|
||||
span, param_name, param_value, include_values
|
||||
):
|
||||
attribute_prefix = f"code.function.parameters.{param_name}"
|
||||
type_attribute = f"{attribute_prefix}.type"
|
||||
span.set_attribute(type_attribute, type(param_value).__name__)
|
||||
if include_values:
|
||||
value_attribute = f"{attribute_prefix}.value"
|
||||
span.set_attribute(value_attribute, _to_otel_attribute(param_value))
|
||||
|
||||
|
||||
def _record_function_call_arguments(
|
||||
otel_wrapper, wrapped_function, function_args, function_kwargs
|
||||
):
|
||||
"""Records the details about a function invocation as span attributes."""
|
||||
include_values = is_content_recording_enabled()
|
||||
span = trace.get_current_span()
|
||||
signature = inspect.signature(wrapped_function)
|
||||
params = list(signature.parameters.values())
|
||||
for index, entry in enumerate(function_args):
|
||||
param_name = f"args[{index}]"
|
||||
if index < len(params):
|
||||
param_name = params[index].name
|
||||
_record_function_call_argument(span, param_name, entry, include_values)
|
||||
for key, value in function_kwargs.items():
|
||||
_record_function_call_argument(span, key, value, include_values)
|
||||
|
||||
|
||||
def _record_function_call_result(otel_wrapper, wrapped_function, result):
|
||||
"""Records the details about a function result as span attributes."""
|
||||
include_values = is_content_recording_enabled()
|
||||
span = trace.get_current_span()
|
||||
span.set_attribute("code.function.return.type", type(result).__name__)
|
||||
if include_values:
|
||||
span.set_attribute(
|
||||
"code.function.return.value", _to_otel_attribute(result)
|
||||
)
|
||||
|
||||
|
||||
def _wrap_sync_tool_function(
|
||||
tool_function: ToolFunction,
|
||||
otel_wrapper: OTelWrapper,
|
||||
extra_span_attributes: Optional[dict[str, str]] = None,
|
||||
**unused_kwargs,
|
||||
):
|
||||
@functools.wraps(tool_function)
|
||||
def wrapped_function(*args, **kwargs):
|
||||
span_name = _create_function_span_name(tool_function)
|
||||
attributes = _create_function_span_attributes(
|
||||
tool_function, args, kwargs, extra_span_attributes
|
||||
)
|
||||
with otel_wrapper.start_as_current_span(
|
||||
span_name, attributes=attributes
|
||||
):
|
||||
_record_function_call_arguments(
|
||||
otel_wrapper, tool_function, args, kwargs
|
||||
)
|
||||
result = tool_function(*args, **kwargs)
|
||||
_record_function_call_result(otel_wrapper, tool_function, result)
|
||||
return result
|
||||
|
||||
return wrapped_function
|
||||
|
||||
|
||||
def _wrap_async_tool_function(
|
||||
tool_function: ToolFunction,
|
||||
otel_wrapper: OTelWrapper,
|
||||
extra_span_attributes: Optional[dict[str, str]] = None,
|
||||
**unused_kwargs,
|
||||
):
|
||||
@functools.wraps(tool_function)
|
||||
async def wrapped_function(*args, **kwargs):
|
||||
span_name = _create_function_span_name(tool_function)
|
||||
attributes = _create_function_span_attributes(
|
||||
tool_function, args, kwargs, extra_span_attributes
|
||||
)
|
||||
with otel_wrapper.start_as_current_span(
|
||||
span_name, attributes=attributes
|
||||
):
|
||||
_record_function_call_arguments(
|
||||
otel_wrapper, tool_function, args, kwargs
|
||||
)
|
||||
result = await tool_function(*args, **kwargs)
|
||||
_record_function_call_result(otel_wrapper, tool_function, result)
|
||||
return result
|
||||
|
||||
return wrapped_function
|
||||
|
||||
|
||||
def _wrap_tool_function(
|
||||
tool_function: ToolFunction, otel_wrapper: OTelWrapper, **kwargs
|
||||
):
|
||||
if inspect.iscoroutinefunction(tool_function):
|
||||
return _wrap_async_tool_function(tool_function, otel_wrapper, **kwargs)
|
||||
return _wrap_sync_tool_function(tool_function, otel_wrapper, **kwargs)
|
||||
|
||||
|
||||
def wrapped(
|
||||
tool_or_tools: Optional[
|
||||
Union[ToolFunction, ToolOrDict, ToolListUnion, ToolListUnionDict]
|
||||
],
|
||||
otel_wrapper: OTelWrapper,
|
||||
**kwargs,
|
||||
):
|
||||
if tool_or_tools is None:
|
||||
return None
|
||||
if isinstance(tool_or_tools, list):
|
||||
return [
|
||||
wrapped(item, otel_wrapper, **kwargs) for item in tool_or_tools
|
||||
]
|
||||
if isinstance(tool_or_tools, dict):
|
||||
return {
|
||||
key: wrapped(value, otel_wrapper, **kwargs)
|
||||
for (key, value) in tool_or_tools.items()
|
||||
}
|
||||
if callable(tool_or_tools):
|
||||
return _wrap_tool_function(tool_or_tools, otel_wrapper, **kwargs)
|
||||
return tool_or_tools
|
|
@ -0,0 +1,20 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# **IMPORTANT**:
|
||||
#
|
||||
# This version should stay below "1.0" until the fundamentals
|
||||
# in "TODOS.md" have been addressed. Please revisit the TODOs
|
||||
# listed there before bumping to a stable version.
|
||||
__version__ = "0.4b0.dev"
|
|
@ -12,9 +12,9 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# TODO: adapt tests from OpenLLMetry here along with tests from
|
||||
# instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py
|
||||
import google.auth.credentials
|
||||
|
||||
|
||||
def test_placeholder():
|
||||
assert True
|
||||
class FakeCredentials(google.auth.credentials.AnonymousCredentials):
|
||||
def refresh(self, request):
|
||||
pass
|
|
@ -0,0 +1,84 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import unittest
|
||||
|
||||
import google.genai
|
||||
|
||||
from .auth import FakeCredentials
|
||||
from .instrumentation_context import InstrumentationContext
|
||||
from .otel_mocker import OTelMocker
|
||||
|
||||
|
||||
class TestCase(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self._otel = OTelMocker()
|
||||
self._otel.install()
|
||||
self._instrumentation_context = None
|
||||
self._api_key = "test-api-key"
|
||||
self._project = "test-project"
|
||||
self._location = "test-location"
|
||||
self._client = None
|
||||
self._uses_vertex = False
|
||||
self._credentials = FakeCredentials()
|
||||
self._instrumentor_args = {}
|
||||
|
||||
def _lazy_init(self):
|
||||
self._instrumentation_context = InstrumentationContext(
|
||||
**self._instrumentor_args
|
||||
)
|
||||
self._instrumentation_context.install()
|
||||
|
||||
def set_instrumentor_constructor_kwarg(self, key, value):
|
||||
self._instrumentor_args[key] = value
|
||||
|
||||
@property
|
||||
def client(self):
|
||||
if self._client is None:
|
||||
self._client = self._create_client()
|
||||
return self._client
|
||||
|
||||
@property
|
||||
def otel(self):
|
||||
return self._otel
|
||||
|
||||
def set_use_vertex(self, use_vertex):
|
||||
self._uses_vertex = use_vertex
|
||||
|
||||
def reset_client(self):
|
||||
self._client = None
|
||||
|
||||
def reset_instrumentation(self):
|
||||
if self._instrumentation_context is None:
|
||||
return
|
||||
self._instrumentation_context.uninstall()
|
||||
self._instrumentation_context = None
|
||||
|
||||
def _create_client(self):
|
||||
self._lazy_init()
|
||||
if self._uses_vertex:
|
||||
os.environ["GOOGLE_API_KEY"] = self._api_key
|
||||
return google.genai.Client(
|
||||
vertexai=True,
|
||||
project=self._project,
|
||||
location=self._location,
|
||||
credentials=self._credentials,
|
||||
)
|
||||
return google.genai.Client(vertexai=False, api_key=self._api_key)
|
||||
|
||||
def tearDown(self):
|
||||
if self._instrumentation_context is not None:
|
||||
self._instrumentation_context.uninstall()
|
||||
self._otel.uninstall()
|
|
@ -0,0 +1,28 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from opentelemetry.instrumentation.google_genai import (
|
||||
GoogleGenAiSdkInstrumentor,
|
||||
)
|
||||
|
||||
|
||||
class InstrumentationContext:
|
||||
def __init__(self, **kwargs):
|
||||
self._instrumentor = GoogleGenAiSdkInstrumentor(**kwargs)
|
||||
|
||||
def install(self):
|
||||
self._instrumentor.instrument()
|
||||
|
||||
def uninstall(self):
|
||||
self._instrumentor.uninstrument()
|
|
@ -0,0 +1,232 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import opentelemetry._events
|
||||
import opentelemetry._logs._internal
|
||||
import opentelemetry.metrics._internal
|
||||
import opentelemetry.trace
|
||||
from opentelemetry._events import (
|
||||
get_event_logger_provider,
|
||||
set_event_logger_provider,
|
||||
)
|
||||
from opentelemetry._logs import get_logger_provider, set_logger_provider
|
||||
from opentelemetry.metrics import get_meter_provider, set_meter_provider
|
||||
from opentelemetry.sdk._events import EventLoggerProvider
|
||||
from opentelemetry.sdk._logs import LoggerProvider
|
||||
from opentelemetry.sdk._logs.export import (
|
||||
InMemoryLogExporter,
|
||||
SimpleLogRecordProcessor,
|
||||
)
|
||||
from opentelemetry.sdk.metrics import MeterProvider
|
||||
from opentelemetry.sdk.metrics._internal.export import InMemoryMetricReader
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
|
||||
from opentelemetry.sdk.trace.export.in_memory_span_exporter import (
|
||||
InMemorySpanExporter,
|
||||
)
|
||||
from opentelemetry.trace import get_tracer_provider, set_tracer_provider
|
||||
from opentelemetry.util._once import Once
|
||||
|
||||
|
||||
def _bypass_otel_once():
|
||||
opentelemetry.trace._TRACER_PROVIDER_SET_ONCE = Once()
|
||||
opentelemetry._logs._internal._LOGGER_PROVIDER_SET_ONCE = Once()
|
||||
opentelemetry._events._EVENT_LOGGER_PROVIDER_SET_ONCE = Once()
|
||||
opentelemetry.metrics._internal._METER_PROVIDER_SET_ONCE = Once()
|
||||
|
||||
|
||||
class OTelProviderSnapshot:
|
||||
def __init__(self):
|
||||
self._tracer_provider = get_tracer_provider()
|
||||
self._logger_provider = get_logger_provider()
|
||||
self._event_logger_provider = get_event_logger_provider()
|
||||
self._meter_provider = get_meter_provider()
|
||||
|
||||
def restore(self):
|
||||
_bypass_otel_once()
|
||||
set_tracer_provider(self._tracer_provider)
|
||||
set_logger_provider(self._logger_provider)
|
||||
set_event_logger_provider(self._event_logger_provider)
|
||||
set_meter_provider(self._meter_provider)
|
||||
|
||||
|
||||
class _LogWrapper:
|
||||
def __init__(self, log_data):
|
||||
self._log_data = log_data
|
||||
|
||||
@property
|
||||
def scope(self):
|
||||
return self._log_data.instrumentation_scope
|
||||
|
||||
@property
|
||||
def resource(self):
|
||||
return self._log_data.log_record.resource
|
||||
|
||||
@property
|
||||
def attributes(self):
|
||||
return self._log_data.log_record.attributes
|
||||
|
||||
@property
|
||||
def body(self):
|
||||
return self._log_data.log_record.body
|
||||
|
||||
def __str__(self):
|
||||
return self._log_data.log_record.to_json()
|
||||
|
||||
|
||||
class _MetricDataPointWrapper:
|
||||
def __init__(self, resource, scope, metric):
|
||||
self._resource = resource
|
||||
self._scope = scope
|
||||
self._metric = metric
|
||||
|
||||
@property
|
||||
def resource(self):
|
||||
return self._resource
|
||||
|
||||
@property
|
||||
def scope(self):
|
||||
return self._scope
|
||||
|
||||
@property
|
||||
def metric(self):
|
||||
return self._metric
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self._metric.name
|
||||
|
||||
@property
|
||||
def data(self):
|
||||
return self._metric.data
|
||||
|
||||
|
||||
class OTelMocker:
|
||||
def __init__(self):
|
||||
self._snapshot = None
|
||||
self._logs = InMemoryLogExporter()
|
||||
self._traces = InMemorySpanExporter()
|
||||
self._metrics = InMemoryMetricReader()
|
||||
self._spans = []
|
||||
self._finished_logs = []
|
||||
self._metrics_data = []
|
||||
|
||||
def install(self):
|
||||
self._snapshot = OTelProviderSnapshot()
|
||||
_bypass_otel_once()
|
||||
self._install_logs()
|
||||
self._install_metrics()
|
||||
self._install_traces()
|
||||
|
||||
def uninstall(self):
|
||||
self._snapshot.restore()
|
||||
|
||||
def get_finished_logs(self):
|
||||
for log_data in self._logs.get_finished_logs():
|
||||
self._finished_logs.append(_LogWrapper(log_data))
|
||||
return self._finished_logs
|
||||
|
||||
def get_finished_spans(self):
|
||||
for span in self._traces.get_finished_spans():
|
||||
self._spans.append(span)
|
||||
return self._spans
|
||||
|
||||
def get_metrics_data(self):
|
||||
data = self._metrics.get_metrics_data()
|
||||
if data is not None:
|
||||
for resource_metric in data.resource_metrics:
|
||||
resource = resource_metric.resource
|
||||
for scope_metrics in resource_metric.scope_metrics:
|
||||
scope = scope_metrics.scope
|
||||
for metric in scope_metrics.metrics:
|
||||
wrapper = _MetricDataPointWrapper(
|
||||
resource, scope, metric
|
||||
)
|
||||
self._metrics_data.append(wrapper)
|
||||
return self._metrics_data
|
||||
|
||||
def get_span_named(self, name):
|
||||
for span in self.get_finished_spans():
|
||||
if span.name == name:
|
||||
return span
|
||||
return None
|
||||
|
||||
def assert_has_span_named(self, name):
|
||||
span = self.get_span_named(name)
|
||||
finished_spans = [span.name for span in self.get_finished_spans()]
|
||||
assert (
|
||||
span is not None
|
||||
), f'Could not find span named "{name}"; finished spans: {finished_spans}'
|
||||
|
||||
def assert_does_not_have_span_named(self, name):
|
||||
span = self.get_span_named(name)
|
||||
assert span is None, f"Found unexpected span named {name}"
|
||||
|
||||
def get_event_named(self, event_name):
|
||||
for event in self.get_finished_logs():
|
||||
event_name_attr = event.attributes.get("event.name")
|
||||
if event_name_attr is None:
|
||||
continue
|
||||
if event_name_attr == event_name:
|
||||
return event
|
||||
return None
|
||||
|
||||
def get_events_named(self, event_name):
|
||||
result = []
|
||||
for event in self.get_finished_logs():
|
||||
event_name_attr = event.attributes.get("event.name")
|
||||
if event_name_attr is None:
|
||||
continue
|
||||
if event_name_attr == event_name:
|
||||
result.append(event)
|
||||
return result
|
||||
|
||||
def assert_has_event_named(self, name):
|
||||
event = self.get_event_named(name)
|
||||
finished_logs = self.get_finished_logs()
|
||||
assert (
|
||||
event is not None
|
||||
), f'Could not find event named "{name}"; finished logs: {finished_logs}'
|
||||
|
||||
def assert_does_not_have_event_named(self, name):
|
||||
event = self.get_event_named(name)
|
||||
assert event is None, f"Unexpected event: {event}"
|
||||
|
||||
def get_metrics_data_named(self, name):
|
||||
results = []
|
||||
for entry in self.get_metrics_data():
|
||||
if entry.name == name:
|
||||
results.append(entry)
|
||||
return results
|
||||
|
||||
def assert_has_metrics_data_named(self, name):
|
||||
data = self.get_metrics_data_named(name)
|
||||
assert len(data) > 0
|
||||
|
||||
def _install_logs(self):
|
||||
provider = LoggerProvider()
|
||||
provider.add_log_record_processor(SimpleLogRecordProcessor(self._logs))
|
||||
set_logger_provider(provider)
|
||||
event_provider = EventLoggerProvider(logger_provider=provider)
|
||||
set_event_logger_provider(event_provider)
|
||||
|
||||
def _install_metrics(self):
|
||||
provider = MeterProvider(metric_readers=[self._metrics])
|
||||
set_meter_provider(provider)
|
||||
|
||||
def _install_traces(self):
|
||||
provider = TracerProvider()
|
||||
provider.add_span_processor(SimpleSpanProcessor(self._traces))
|
||||
set_tracer_provider(provider)
|
|
@ -0,0 +1,163 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
import unittest.mock
|
||||
|
||||
from google.genai.models import AsyncModels, Models
|
||||
|
||||
from ..common.base import TestCase as CommonTestCaseBase
|
||||
from .util import convert_to_response, create_response
|
||||
|
||||
|
||||
# Helper used in "_install_mocks" below.
|
||||
def _wrap_output(mock_generate_content):
|
||||
def _wrapped(*args, **kwargs):
|
||||
return convert_to_response(mock_generate_content(*args, **kwargs))
|
||||
|
||||
return _wrapped
|
||||
|
||||
|
||||
# Helper used in "_install_mocks" below.
|
||||
def _wrap_output_stream(mock_generate_content_stream):
|
||||
def _wrapped(*args, **kwargs):
|
||||
for output in mock_generate_content_stream(*args, **kwargs):
|
||||
yield convert_to_response(output)
|
||||
|
||||
return _wrapped
|
||||
|
||||
|
||||
# Helper used in "_install_mocks" below.
|
||||
def _async_wrapper(mock_generate_content):
|
||||
async def _wrapped(*args, **kwargs):
|
||||
return mock_generate_content(*args, **kwargs)
|
||||
|
||||
return _wrapped
|
||||
|
||||
|
||||
# Helper used in "_install_mocks" below.
|
||||
def _async_stream_wrapper(mock_generate_content_stream):
|
||||
async def _wrapped(*args, **kwargs):
|
||||
async def _internal_generator():
|
||||
for result in mock_generate_content_stream(*args, **kwargs):
|
||||
yield result
|
||||
|
||||
return _internal_generator()
|
||||
|
||||
return _wrapped
|
||||
|
||||
|
||||
class TestCase(CommonTestCaseBase):
|
||||
# The "setUp" function is defined by "unittest.TestCase" and thus
|
||||
# this name must be used. Uncertain why pylint doesn't seem to
|
||||
# recognize that this is a unit test class for which this is inherited.
|
||||
def setUp(self): # pylint: disable=invalid-name
|
||||
super().setUp()
|
||||
if self.__class__ == TestCase:
|
||||
raise unittest.SkipTest("Skipping testcase base.")
|
||||
self._generate_content_mock = None
|
||||
self._generate_content_stream_mock = None
|
||||
self._original_generate_content = Models.generate_content
|
||||
self._original_generate_content_stream = Models.generate_content_stream
|
||||
self._original_async_generate_content = AsyncModels.generate_content
|
||||
self._original_async_generate_content_stream = (
|
||||
AsyncModels.generate_content_stream
|
||||
)
|
||||
self._responses = []
|
||||
self._response_index = 0
|
||||
|
||||
@property
|
||||
def mock_generate_content(self):
|
||||
if self._generate_content_mock is None:
|
||||
self._create_and_install_mocks()
|
||||
return self._generate_content_mock
|
||||
|
||||
@property
|
||||
def mock_generate_content_stream(self):
|
||||
if self._generate_content_stream_mock is None:
|
||||
self._create_and_install_mocks()
|
||||
return self._generate_content_stream_mock
|
||||
|
||||
def configure_valid_response(self, **kwargs):
|
||||
self._create_and_install_mocks()
|
||||
response = create_response(**kwargs)
|
||||
self._responses.append(response)
|
||||
|
||||
def _create_and_install_mocks(self):
|
||||
if self._generate_content_mock is not None:
|
||||
return
|
||||
self.reset_client()
|
||||
self.reset_instrumentation()
|
||||
self._generate_content_mock = self._create_nonstream_mock()
|
||||
self._generate_content_stream_mock = self._create_stream_mock()
|
||||
self._install_mocks()
|
||||
|
||||
def _create_nonstream_mock(self):
|
||||
mock = unittest.mock.MagicMock()
|
||||
|
||||
def _default_impl(*args, **kwargs):
|
||||
if not self._responses:
|
||||
return create_response(text="Some response")
|
||||
index = self._response_index % len(self._responses)
|
||||
result = self._responses[index]
|
||||
self._response_index += 1
|
||||
return result
|
||||
|
||||
mock.side_effect = _default_impl
|
||||
return mock
|
||||
|
||||
def _create_stream_mock(self):
|
||||
mock = unittest.mock.MagicMock()
|
||||
|
||||
def _default_impl(*args, **kwargs):
|
||||
for response in self._responses:
|
||||
yield response
|
||||
|
||||
mock.side_effect = _default_impl
|
||||
return mock
|
||||
|
||||
def _install_mocks(self):
|
||||
output_wrapped = _wrap_output(self._generate_content_mock)
|
||||
output_wrapped_stream = _wrap_output_stream(
|
||||
self._generate_content_stream_mock
|
||||
)
|
||||
Models.generate_content = output_wrapped
|
||||
Models.generate_content_stream = output_wrapped_stream
|
||||
AsyncModels.generate_content = _async_wrapper(output_wrapped)
|
||||
AsyncModels.generate_content_stream = _async_stream_wrapper(
|
||||
output_wrapped_stream
|
||||
)
|
||||
|
||||
def tearDown(self):
|
||||
super().tearDown()
|
||||
if self._generate_content_mock is None:
|
||||
assert Models.generate_content == self._original_generate_content
|
||||
assert (
|
||||
Models.generate_content_stream
|
||||
== self._original_generate_content_stream
|
||||
)
|
||||
assert (
|
||||
AsyncModels.generate_content
|
||||
== self._original_async_generate_content
|
||||
)
|
||||
assert (
|
||||
AsyncModels.generate_content_stream
|
||||
== self._original_async_generate_content_stream
|
||||
)
|
||||
Models.generate_content = self._original_generate_content
|
||||
Models.generate_content_stream = self._original_generate_content_stream
|
||||
AsyncModels.generate_content = self._original_async_generate_content
|
||||
AsyncModels.generate_content_stream = (
|
||||
self._original_async_generate_content_stream
|
||||
)
|
|
@ -0,0 +1,94 @@
|
|||
interactions:
|
||||
- request:
|
||||
body: |-
|
||||
{
|
||||
"contents": [
|
||||
{
|
||||
"parts": [
|
||||
{
|
||||
"text": "Create a poem about Open Telemetry."
|
||||
}
|
||||
],
|
||||
"role": "user"
|
||||
}
|
||||
]
|
||||
}
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '92'
|
||||
Content-Type:
|
||||
- application/json
|
||||
user-agent:
|
||||
- google-genai-sdk/1.0.0 gl-python/3.12.8
|
||||
x-goog-api-client:
|
||||
- <REDACTED>
|
||||
x-goog-user-project:
|
||||
- <REDACTED>
|
||||
method: POST
|
||||
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent
|
||||
response:
|
||||
body:
|
||||
string: |-
|
||||
{
|
||||
"candidates": [
|
||||
{
|
||||
"content": {
|
||||
"role": "model",
|
||||
"parts": [
|
||||
{
|
||||
"text": "No more dark, inscrutable ways,\nTo trace a request through hazy days.\nOpen Telemetry, a beacon bright,\nIlluminates the path, both day and night.\n\nFrom metrics gathered, a clear display,\nOf latency's dance, and errors' sway.\nTraces unwind, a silken thread,\nShowing the journey, from start to head.\n\nLogs interweave, a richer hue,\nContextual clues, for me and you.\nNo vendor lock-in, a freedom's call,\nTo choose your tools, to stand up tall.\n\nExporters aplenty, a varied choice,\nTo send your data, amplify your voice.\nJaeger, Zipkin, Prometheus' might,\nAll integrate, a glorious sight.\n\nWith spans and attributes, a detailed scene,\nOf how your system works, both sleek and keen.\nPerformance bottlenecks, now laid bare,\nOpen Telemetry, beyond compare.\n\nSo embrace the light, let darkness flee,\nWith Open Telemetry, set your systems free.\nObserve, and learn, and optimize with grace,\nA brighter future, in this digital space.\n"
|
||||
}
|
||||
]
|
||||
},
|
||||
"finishReason": "STOP",
|
||||
"avgLogprobs": -0.3303731600443522
|
||||
}
|
||||
],
|
||||
"usageMetadata": {
|
||||
"promptTokenCount": 8,
|
||||
"candidatesTokenCount": 240,
|
||||
"totalTokenCount": 248,
|
||||
"promptTokensDetails": [
|
||||
{
|
||||
"modality": "TEXT",
|
||||
"tokenCount": 8
|
||||
}
|
||||
],
|
||||
"candidatesTokensDetails": [
|
||||
{
|
||||
"modality": "TEXT",
|
||||
"tokenCount": 240
|
||||
}
|
||||
]
|
||||
},
|
||||
"modelVersion": "gemini-1.5-flash-002",
|
||||
"createTime": "2025-03-07T22:19:18.083091Z",
|
||||
"responseId": "5nDLZ5OJBdyY3NoPiZGx0Ag"
|
||||
}
|
||||
headers:
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
X-Frame-Options:
|
||||
- SAMEORIGIN
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
|
@ -0,0 +1,94 @@
|
|||
interactions:
|
||||
- request:
|
||||
body: |-
|
||||
{
|
||||
"contents": [
|
||||
{
|
||||
"parts": [
|
||||
{
|
||||
"text": "Create a poem about Open Telemetry."
|
||||
}
|
||||
],
|
||||
"role": "user"
|
||||
}
|
||||
]
|
||||
}
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '92'
|
||||
Content-Type:
|
||||
- application/json
|
||||
user-agent:
|
||||
- google-genai-sdk/1.0.0 gl-python/3.12.8
|
||||
x-goog-api-client:
|
||||
- <REDACTED>
|
||||
x-goog-user-project:
|
||||
- <REDACTED>
|
||||
method: POST
|
||||
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent
|
||||
response:
|
||||
body:
|
||||
string: |-
|
||||
{
|
||||
"candidates": [
|
||||
{
|
||||
"content": {
|
||||
"role": "model",
|
||||
"parts": [
|
||||
{
|
||||
"text": "No more dark logs, a cryptic, hidden trace,\nOf failing systems, lost in time and space.\nOpenTelemetry, a beacon shining bright,\nIlluminating paths, both dark and light.\n\nFrom microservices, a sprawling, tangled mesh,\nTo monolithic beasts, put to the test,\nIt gathers traces, spans, and metrics too,\nA holistic view, for me and you.\n\nWith signals clear, from every single node,\nPerformance bottlenecks, instantly bestowed.\nDistributed tracing, paints a vivid scene,\nWhere latency lurks, and slowdowns intervene.\n\nExporters rise, to send the data forth,\nTo dashboards grand, of proven, measured worth.\nPrometheus, Grafana, Jaeger, fluent streams,\nVisualizing insights, fulfilling data dreams.\n\nFrom Jaeger's diagrams, a branching, flowing art,\nTo Grafana's charts, that play a vital part,\nThe mysteries unravel, hidden deep inside,\nWhere errors slumber, and slow responses hide.\n\nSo hail OpenTelemetry, a gift to all who code,\nA brighter future, on a well-lit road.\nNo more guesswork, no more fruitless chase,\nJust clear observability, in time and space.\n"
|
||||
}
|
||||
]
|
||||
},
|
||||
"finishReason": "STOP",
|
||||
"avgLogprobs": -0.45532724261283875
|
||||
}
|
||||
],
|
||||
"usageMetadata": {
|
||||
"promptTokenCount": 8,
|
||||
"candidatesTokenCount": 256,
|
||||
"totalTokenCount": 264,
|
||||
"promptTokensDetails": [
|
||||
{
|
||||
"modality": "TEXT",
|
||||
"tokenCount": 8
|
||||
}
|
||||
],
|
||||
"candidatesTokensDetails": [
|
||||
{
|
||||
"modality": "TEXT",
|
||||
"tokenCount": 256
|
||||
}
|
||||
]
|
||||
},
|
||||
"modelVersion": "gemini-1.5-flash-002",
|
||||
"createTime": "2025-03-07T22:19:15.268428Z",
|
||||
"responseId": "43DLZ4yxEM6F3NoPzaTkiQU"
|
||||
}
|
||||
headers:
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
X-Frame-Options:
|
||||
- SAMEORIGIN
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
|
@ -0,0 +1,94 @@
|
|||
interactions:
|
||||
- request:
|
||||
body: |-
|
||||
{
|
||||
"contents": [
|
||||
{
|
||||
"parts": [
|
||||
{
|
||||
"text": "Create a poem about Open Telemetry."
|
||||
}
|
||||
],
|
||||
"role": "user"
|
||||
}
|
||||
]
|
||||
}
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '92'
|
||||
Content-Type:
|
||||
- application/json
|
||||
user-agent:
|
||||
- google-genai-sdk/1.0.0 gl-python/3.12.8
|
||||
x-goog-api-client:
|
||||
- <REDACTED>
|
||||
x-goog-user-project:
|
||||
- <REDACTED>
|
||||
method: POST
|
||||
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent
|
||||
response:
|
||||
body:
|
||||
string: |-
|
||||
{
|
||||
"candidates": [
|
||||
{
|
||||
"content": {
|
||||
"role": "model",
|
||||
"parts": [
|
||||
{
|
||||
"text": "No more dark, mysterious traces,\nNo more guessing, in empty spaces.\nOpenTelemetry's light now shines,\nIlluminating all our designs.\n\nFrom microservices, small and fleet,\nTo monolithic beasts, hard to beat,\nIt weaves a net, both fine and strong,\nWhere metrics flow, where logs belong.\n\nTraces dance, a vibrant hue,\nShowing journeys, old and new.\nSpans unfold, a story told,\nOf requests handled, brave and bold.\n\nMetrics hum, a steady beat,\nLatency, errors, can't be beat.\nDistribution charts, a clear display,\nGuiding us along the way.\n\nLogs provide a detailed view,\nOf what happened, me and you.\nContext rich, with helpful clues,\nDebugging woes, it quickly subdues.\n\nWith exporters wise, a thoughtful choice,\nTo Prometheus, Jaeger, or Zipkin's voice,\nOur data flows, a precious stream,\nReal-time insights, a waking dream.\n\nSo hail to OpenTelemetry's might,\nBringing clarity to our darkest night.\nObservability's champion, bold and true,\nA brighter future, for me and you.\n"
|
||||
}
|
||||
]
|
||||
},
|
||||
"finishReason": "STOP",
|
||||
"avgLogprobs": -0.4071464086238575
|
||||
}
|
||||
],
|
||||
"usageMetadata": {
|
||||
"promptTokenCount": 8,
|
||||
"candidatesTokenCount": 253,
|
||||
"totalTokenCount": 261,
|
||||
"promptTokensDetails": [
|
||||
{
|
||||
"modality": "TEXT",
|
||||
"tokenCount": 8
|
||||
}
|
||||
],
|
||||
"candidatesTokensDetails": [
|
||||
{
|
||||
"modality": "TEXT",
|
||||
"tokenCount": 253
|
||||
}
|
||||
]
|
||||
},
|
||||
"modelVersion": "gemini-1.5-flash-002",
|
||||
"createTime": "2025-03-07T22:19:12.443989Z",
|
||||
"responseId": "4HDLZ9WMG6SK698Pr5uZ2Qw"
|
||||
}
|
||||
headers:
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
X-Frame-Options:
|
||||
- SAMEORIGIN
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
|
@ -0,0 +1,94 @@
|
|||
interactions:
|
||||
- request:
|
||||
body: |-
|
||||
{
|
||||
"contents": [
|
||||
{
|
||||
"parts": [
|
||||
{
|
||||
"text": "Create a poem about Open Telemetry."
|
||||
}
|
||||
],
|
||||
"role": "user"
|
||||
}
|
||||
]
|
||||
}
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '92'
|
||||
Content-Type:
|
||||
- application/json
|
||||
user-agent:
|
||||
- google-genai-sdk/1.0.0 gl-python/3.12.8
|
||||
x-goog-api-client:
|
||||
- <REDACTED>
|
||||
x-goog-user-project:
|
||||
- <REDACTED>
|
||||
method: POST
|
||||
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent
|
||||
response:
|
||||
body:
|
||||
string: |-
|
||||
{
|
||||
"candidates": [
|
||||
{
|
||||
"content": {
|
||||
"role": "model",
|
||||
"parts": [
|
||||
{
|
||||
"text": "No more dark, mysterious traces,\nOf failing systems, hidden spaces.\nOpen Telemetry's light shines bright,\nGuiding us through the darkest night.\n\nFrom metrics gathered, finely spun,\nTo logs that tell of tasks undone,\nAnd traces linking every call,\nIt answers questions, standing tall.\n\nDistributed systems, complex and vast,\nTheir hidden flaws, no longer cast\nIn shadows deep, beyond our view,\nOpen Telemetry sees them through.\n\nWith spans and attributes, it weaves a tale,\nOf requests flowing, never frail.\nIt pinpoints bottlenecks, slow and grim,\nAnd helps us optimize, system trim.\n\nAcross languages, a common ground,\nWhere data's shared, and insights found.\nExporters whisper, collectors hum,\nA symphony of data, overcome.\n\nSo raise a glass, to this open source,\nA shining beacon, a powerful force.\nOpen Telemetry, a guiding star,\nRevealing secrets, near and far.\n"
|
||||
}
|
||||
]
|
||||
},
|
||||
"finishReason": "STOP",
|
||||
"avgLogprobs": -0.3586180628193498
|
||||
}
|
||||
],
|
||||
"usageMetadata": {
|
||||
"promptTokenCount": 8,
|
||||
"candidatesTokenCount": 211,
|
||||
"totalTokenCount": 219,
|
||||
"promptTokensDetails": [
|
||||
{
|
||||
"modality": "TEXT",
|
||||
"tokenCount": 8
|
||||
}
|
||||
],
|
||||
"candidatesTokensDetails": [
|
||||
{
|
||||
"modality": "TEXT",
|
||||
"tokenCount": 211
|
||||
}
|
||||
]
|
||||
},
|
||||
"modelVersion": "gemini-1.5-flash-002",
|
||||
"createTime": "2025-03-07T22:19:09.936326Z",
|
||||
"responseId": "3XDLZ4aTOZSpnvgPn-e0qQk"
|
||||
}
|
||||
headers:
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
X-Frame-Options:
|
||||
- SAMEORIGIN
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
|
@ -0,0 +1,97 @@
|
|||
interactions:
|
||||
- request:
|
||||
body: |-
|
||||
{
|
||||
"contents": [
|
||||
{
|
||||
"parts": [
|
||||
{
|
||||
"text": "Create a poem about Open Telemetry."
|
||||
}
|
||||
],
|
||||
"role": "user"
|
||||
}
|
||||
]
|
||||
}
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '92'
|
||||
Content-Type:
|
||||
- application/json
|
||||
user-agent:
|
||||
- google-genai-sdk/1.0.0 gl-python/3.12.8
|
||||
x-goog-api-client:
|
||||
- <REDACTED>
|
||||
x-goog-user-project:
|
||||
- <REDACTED>
|
||||
method: POST
|
||||
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse
|
||||
response:
|
||||
body:
|
||||
string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"No\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\
|
||||
,\"createTime\": \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \" longer dark, the tracing's light,\\nOpen Telemetry, shining\
|
||||
\ bright\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\
|
||||
: \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \".\\nA beacon in the coding night,\\nRevealing paths, both\
|
||||
\ dark\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"\
|
||||
2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \" and bright.\\n\\nFrom microservice to sprawling beast,\\\
|
||||
nIts watchful eye, a silent priest.\\nObserving calls, both small and vast,\\\
|
||||
nPerformance\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\
|
||||
: \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \" flaws, revealed at last.\\n\\nWith metrics gleaned and logs\
|
||||
\ aligned,\\nA clearer picture, you will find.\\nOf latency, and errors dire,\\\
|
||||
n\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:29.293930Z\"\
|
||||
,\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"}\r\n\r\ndata: {\"candidates\"\
|
||||
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"And bottlenecks\
|
||||
\ that set afire.\\n\\nIt spans the clouds, a network wide,\\nWhere data streams,\
|
||||
\ a surging tide.\\nCollecting traces, rich and deep,\\nWhile slumbering apps\
|
||||
\ their secrets keep.\\n\\nJaeger, Zip\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\
|
||||
,\"createTime\": \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"kin, the tools it holds,\\nA tapestry of stories told.\\nOf\
|
||||
\ requests flowing, swift and free,\\nOr tangled knots, for all to see.\\\
|
||||
n\\nSo embrace the power, understand,\\nThe vital role, across the\"}]}}],\"\
|
||||
modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:29.293930Z\"\
|
||||
,\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"}\r\n\r\ndata: {\"candidates\"\
|
||||
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" land.\\nOpen\
|
||||
\ Telemetry, a guiding star,\\nTo navigate the digital afar.\\n\"}]},\"finishReason\"\
|
||||
: \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\"\
|
||||
: 212,\"totalTokenCount\": 220,\"promptTokensDetails\": [{\"modality\": \"\
|
||||
TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\": [{\"modality\": \"\
|
||||
TEXT\",\"tokenCount\": 212}]},\"modelVersion\": \"gemini-1.5-flash-002\",\"\
|
||||
createTime\": \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\
|
||||
}\r\n\r\n"
|
||||
headers:
|
||||
Content-Disposition:
|
||||
- attachment
|
||||
Content-Type:
|
||||
- text/event-stream
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
X-Frame-Options:
|
||||
- SAMEORIGIN
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
|
@ -0,0 +1,102 @@
|
|||
interactions:
|
||||
- request:
|
||||
body: |-
|
||||
{
|
||||
"contents": [
|
||||
{
|
||||
"parts": [
|
||||
{
|
||||
"text": "Create a poem about Open Telemetry."
|
||||
}
|
||||
],
|
||||
"role": "user"
|
||||
}
|
||||
]
|
||||
}
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '92'
|
||||
Content-Type:
|
||||
- application/json
|
||||
user-agent:
|
||||
- google-genai-sdk/1.0.0 gl-python/3.12.8
|
||||
x-goog-api-client:
|
||||
- <REDACTED>
|
||||
x-goog-user-project:
|
||||
- <REDACTED>
|
||||
method: POST
|
||||
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse
|
||||
response:
|
||||
body:
|
||||
string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"The\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\
|
||||
,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \" black box whispers, secrets deep,\\nOf failing systems, promises\
|
||||
\ to keep.\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\
|
||||
: \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"\\nBut tracing's light, a guiding hand,\\nReveals the path\"\
|
||||
}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\
|
||||
,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\
|
||||
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \", across the\
|
||||
\ land.\\n\\nOpen Telemetry, a beacon bright,\\nIlluminating pathways, day\
|
||||
\ and night.\\nFrom spans and traces, stories told,\"}]}}],\"modelVersion\"\
|
||||
: \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\
|
||||
,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\
|
||||
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"\\nOf requests\
|
||||
\ flowing, brave and bold.\\n\\nThe metrics rise, a vibrant chart,\\nDisplaying\
|
||||
\ latency, a work of art.\\nEach request'\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\
|
||||
,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"s journey, clearly shown,\\nWhere bottlenecks slumber, seeds\
|
||||
\ are sown.\\n\\nWith logs appended, context clear,\\nThe root of problems,\
|
||||
\ drawing near.\\nObservability's embrace, so wide,\\nUnraveling mysteries,\"\
|
||||
}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\
|
||||
,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\
|
||||
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" deep inside.\\\
|
||||
n\\nFrom simple apps to complex weaves,\\nOpen Telemetry's power achieves,\\\
|
||||
nA unified vision, strong and true,\\nMonitoring systems, old and new.\\n\\\
|
||||
nNo vendor lock-in, free to roam,\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\
|
||||
,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"\\nAcross the clouds, and find your home.\\nA standard rising,\
|
||||
\ strong and bold,\\nA future brighter, to behold.\\n\\nSo let the traces\
|
||||
\ flow and gleam,\\nOpen Telemetry, a vibrant dream.\\nOf healthy systems,\
|
||||
\ running free,\\nFor all to see, for all to be.\"}]}}],\"modelVersion\":\
|
||||
\ \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\
|
||||
,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\
|
||||
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"\\n\"}]},\"\
|
||||
finishReason\": \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"\
|
||||
candidatesTokenCount\": 258,\"totalTokenCount\": 266,\"promptTokensDetails\"\
|
||||
: [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\"\
|
||||
: [{\"modality\": \"TEXT\",\"tokenCount\": 258}]},\"modelVersion\": \"gemini-1.5-flash-002\"\
|
||||
,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\
|
||||
}\r\n\r\n"
|
||||
headers:
|
||||
Content-Disposition:
|
||||
- attachment
|
||||
Content-Type:
|
||||
- text/event-stream
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
X-Frame-Options:
|
||||
- SAMEORIGIN
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
|
@ -0,0 +1,99 @@
|
|||
interactions:
|
||||
- request:
|
||||
body: |-
|
||||
{
|
||||
"contents": [
|
||||
{
|
||||
"parts": [
|
||||
{
|
||||
"text": "Create a poem about Open Telemetry."
|
||||
}
|
||||
],
|
||||
"role": "user"
|
||||
}
|
||||
]
|
||||
}
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '92'
|
||||
Content-Type:
|
||||
- application/json
|
||||
user-agent:
|
||||
- google-genai-sdk/1.0.0 gl-python/3.12.8
|
||||
x-goog-api-client:
|
||||
- <REDACTED>
|
||||
x-goog-user-project:
|
||||
- <REDACTED>
|
||||
method: POST
|
||||
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse
|
||||
response:
|
||||
body:
|
||||
string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"No\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\
|
||||
,\"createTime\": \"2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \" more dark logs, a cryptic, silent scream,\\nNo more the hunt\
|
||||
\ for\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"\
|
||||
2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \" errors, a lost, fading dream.\\nOpen Telemetry, a beacon\
|
||||
\ in\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"\
|
||||
2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \" the night,\\nShining forth its data, clear and burning bright.\\\
|
||||
n\\nFrom traces spanning systems, a flowing, silver thread,\\nMetrics pulse\
|
||||
\ and measure,\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\
|
||||
: \"2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \" insights finely spread.\\nLogs enriched with context, a story\
|
||||
\ they unfold,\\nOf requests and responses, both brave and bold.\\n\\nObservability's\
|
||||
\ promise\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\
|
||||
: \"2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \", a future now at hand,\\nWith vendors interoperable, a collaborative\
|
||||
\ band.\\nNo longer vendor lock-in, a restrictive, iron cage,\\nBut freedom\
|
||||
\ of selection, turning a new page.\\n\\nFrom microservices humming,\"}]}}],\"\
|
||||
modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:23.579184Z\"\
|
||||
,\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\
|
||||
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" a symphony\
|
||||
\ of calls,\\nTo monolithic giants, answering their thralls,\\nOpen Telemetry\
|
||||
\ watches, with keen and watchful eye,\\nDetecting the anomalies, before they\
|
||||
\ rise and fly.\\n\\nSo let the data flow freely, a\"}]}}],\"modelVersion\"\
|
||||
: \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:23.579184Z\"\
|
||||
,\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\
|
||||
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" river strong\
|
||||
\ and deep,\\nIts secrets it will whisper, while the systems sleep.\\nOpen\
|
||||
\ Telemetry's power, a force that we can wield,\\nTo build more stable systems,\
|
||||
\ in the digital field.\\n\"}]},\"finishReason\": \"STOP\"}],\"usageMetadata\"\
|
||||
: {\"promptTokenCount\": 8,\"candidatesTokenCount\": 238,\"totalTokenCount\"\
|
||||
: 246,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"\
|
||||
candidatesTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 238}]},\"\
|
||||
modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:23.579184Z\"\
|
||||
,\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"}\r\n\r\n"
|
||||
headers:
|
||||
Content-Disposition:
|
||||
- attachment
|
||||
Content-Type:
|
||||
- text/event-stream
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
X-Frame-Options:
|
||||
- SAMEORIGIN
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
|
@ -0,0 +1,99 @@
|
|||
interactions:
|
||||
- request:
|
||||
body: |-
|
||||
{
|
||||
"contents": [
|
||||
{
|
||||
"parts": [
|
||||
{
|
||||
"text": "Create a poem about Open Telemetry."
|
||||
}
|
||||
],
|
||||
"role": "user"
|
||||
}
|
||||
]
|
||||
}
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '92'
|
||||
Content-Type:
|
||||
- application/json
|
||||
user-agent:
|
||||
- google-genai-sdk/1.0.0 gl-python/3.12.8
|
||||
x-goog-api-client:
|
||||
- <REDACTED>
|
||||
x-goog-user-project:
|
||||
- <REDACTED>
|
||||
method: POST
|
||||
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse
|
||||
response:
|
||||
body:
|
||||
string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"No\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\
|
||||
,\"createTime\": \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \" more dark, mysterious traces,\\nNo more guessing, in time\
|
||||
\ and spaces.\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\
|
||||
: \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"\\nOpen Telemetry's light shines bright,\\nIlluminating the\
|
||||
\ code'\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\":\
|
||||
\ \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"s dark night.\\n\\nFrom spans and metrics, a story told,\\\
|
||||
nOf requests flowing, both brave and bold.\\nTraces weaving, a tapestry grand,\"\
|
||||
}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:20.770456Z\"\
|
||||
,\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"}\r\n\r\ndata: {\"candidates\"\
|
||||
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"\\nShowing\
|
||||
\ performance, across the land.\\n\\nLogs and metrics, a perfect blend,\\\
|
||||
nInformation's flow, without end.\\nObservability's promise\"}]}}],\"modelVersion\"\
|
||||
: \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:20.770456Z\"\
|
||||
,\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"}\r\n\r\ndata: {\"candidates\"\
|
||||
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \", clear and\
|
||||
\ true,\\nInsights revealed, for me and you.\\n\\nJaeger, Zipkin, a chorus\
|
||||
\ sings,\\nWith exporters ready, for all the things.\\nFrom simple apps to\
|
||||
\ systems vast,\\nOpen Telemetry'\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\
|
||||
,\"createTime\": \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"s power will last.\\n\\nNo vendor lock-in, a freedom sweet,\\\
|
||||
nOpen source glory, can't be beat.\\nSo let us embrace, this modern way,\\\
|
||||
nTo monitor systems, come what may.\\n\\nFrom\"}]}}],\"modelVersion\": \"\
|
||||
gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:20.770456Z\",\"\
|
||||
responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"}\r\n\r\ndata: {\"candidates\": [{\"\
|
||||
content\": {\"role\": \"model\",\"parts\": [{\"text\": \" microservices, small\
|
||||
\ and slight,\\nTo monolithic giants, shining bright,\\nOpen Telemetry shows\
|
||||
\ the path,\\nTo understand, and fix the wrath,\\nOf latency demons, lurking\
|
||||
\ near,\\nBringing clarity, year after year.\\n\"}]},\"finishReason\": \"\
|
||||
STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\"\
|
||||
: 242,\"totalTokenCount\": 250,\"promptTokensDetails\": [{\"modality\": \"\
|
||||
TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\": [{\"modality\": \"\
|
||||
TEXT\",\"tokenCount\": 242}]},\"modelVersion\": \"gemini-1.5-flash-002\",\"\
|
||||
createTime\": \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\
|
||||
}\r\n\r\n"
|
||||
headers:
|
||||
Content-Disposition:
|
||||
- attachment
|
||||
Content-Type:
|
||||
- text/event-stream
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
X-Frame-Options:
|
||||
- SAMEORIGIN
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
|
@ -0,0 +1,204 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from .base import TestCase
|
||||
|
||||
|
||||
class NonStreamingTestCase(TestCase):
|
||||
# The "setUp" function is defined by "unittest.TestCase" and thus
|
||||
# this name must be used. Uncertain why pylint doesn't seem to
|
||||
# recognize that this is a unit test class for which this is inherited.
|
||||
def setUp(self): # pylint: disable=invalid-name
|
||||
super().setUp()
|
||||
if self.__class__ == NonStreamingTestCase:
|
||||
raise unittest.SkipTest("Skipping testcase base.")
|
||||
|
||||
def generate_content(self, *args, **kwargs):
|
||||
raise NotImplementedError("Must implement 'generate_content'.")
|
||||
|
||||
@property
|
||||
def expected_function_name(self):
|
||||
raise NotImplementedError("Must implement 'expected_function_name'.")
|
||||
|
||||
def _generate_and_get_span(self, config):
|
||||
self.generate_content(
|
||||
model="gemini-2.0-flash",
|
||||
contents="Some input prompt",
|
||||
config=config,
|
||||
)
|
||||
self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
|
||||
return self.otel.get_span_named("generate_content gemini-2.0-flash")
|
||||
|
||||
def test_instrumentation_does_not_break_core_functionality(self):
|
||||
self.configure_valid_response(text="Yep, it works!")
|
||||
response = self.generate_content(
|
||||
model="gemini-2.0-flash", contents="Does this work?"
|
||||
)
|
||||
self.assertEqual(response.text, "Yep, it works!")
|
||||
|
||||
def test_generates_span(self):
|
||||
self.configure_valid_response(text="Yep, it works!")
|
||||
response = self.generate_content(
|
||||
model="gemini-2.0-flash", contents="Does this work?"
|
||||
)
|
||||
self.assertEqual(response.text, "Yep, it works!")
|
||||
self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
|
||||
|
||||
def test_model_reflected_into_span_name(self):
|
||||
self.configure_valid_response(text="Yep, it works!")
|
||||
response = self.generate_content(
|
||||
model="gemini-1.5-flash", contents="Does this work?"
|
||||
)
|
||||
self.assertEqual(response.text, "Yep, it works!")
|
||||
self.otel.assert_has_span_named("generate_content gemini-1.5-flash")
|
||||
|
||||
def test_generated_span_has_minimal_genai_attributes(self):
|
||||
self.configure_valid_response(text="Yep, it works!")
|
||||
self.generate_content(
|
||||
model="gemini-2.0-flash", contents="Does this work?"
|
||||
)
|
||||
self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
|
||||
span = self.otel.get_span_named("generate_content gemini-2.0-flash")
|
||||
self.assertEqual(span.attributes["gen_ai.system"], "gemini")
|
||||
self.assertEqual(
|
||||
span.attributes["gen_ai.operation.name"], "generate_content"
|
||||
)
|
||||
|
||||
def test_generated_span_has_correct_function_name(self):
|
||||
self.configure_valid_response(text="Yep, it works!")
|
||||
self.generate_content(
|
||||
model="gemini-2.0-flash", contents="Does this work?"
|
||||
)
|
||||
self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
|
||||
span = self.otel.get_span_named("generate_content gemini-2.0-flash")
|
||||
self.assertEqual(
|
||||
span.attributes["code.function.name"], self.expected_function_name
|
||||
)
|
||||
|
||||
def test_generated_span_has_vertex_ai_system_when_configured(self):
|
||||
self.set_use_vertex(True)
|
||||
self.configure_valid_response(text="Yep, it works!")
|
||||
self.generate_content(
|
||||
model="gemini-2.0-flash", contents="Does this work?"
|
||||
)
|
||||
self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
|
||||
span = self.otel.get_span_named("generate_content gemini-2.0-flash")
|
||||
self.assertEqual(span.attributes["gen_ai.system"], "vertex_ai")
|
||||
self.assertEqual(
|
||||
span.attributes["gen_ai.operation.name"], "generate_content"
|
||||
)
|
||||
|
||||
def test_generated_span_counts_tokens(self):
|
||||
self.configure_valid_response(input_tokens=123, output_tokens=456)
|
||||
self.generate_content(model="gemini-2.0-flash", contents="Some input")
|
||||
self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
|
||||
span = self.otel.get_span_named("generate_content gemini-2.0-flash")
|
||||
self.assertEqual(span.attributes["gen_ai.usage.input_tokens"], 123)
|
||||
self.assertEqual(span.attributes["gen_ai.usage.output_tokens"], 456)
|
||||
|
||||
def test_records_system_prompt_as_log(self):
|
||||
os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
|
||||
"true"
|
||||
)
|
||||
config = {"system_instruction": "foo"}
|
||||
self.configure_valid_response()
|
||||
self.generate_content(
|
||||
model="gemini-2.0-flash", contents="Some input", config=config
|
||||
)
|
||||
self.otel.assert_has_event_named("gen_ai.system.message")
|
||||
event_record = self.otel.get_event_named("gen_ai.system.message")
|
||||
self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
|
||||
self.assertEqual(event_record.body["content"], "foo")
|
||||
|
||||
def test_does_not_record_system_prompt_as_log_if_disabled_by_env(self):
|
||||
os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
|
||||
"false"
|
||||
)
|
||||
config = {"system_instruction": "foo"}
|
||||
self.configure_valid_response()
|
||||
self.generate_content(
|
||||
model="gemini-2.0-flash", contents="Some input", config=config
|
||||
)
|
||||
self.otel.assert_has_event_named("gen_ai.system.message")
|
||||
event_record = self.otel.get_event_named("gen_ai.system.message")
|
||||
self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
|
||||
self.assertEqual(event_record.body["content"], "<elided>")
|
||||
|
||||
def test_does_not_record_system_prompt_as_log_if_no_system_prompt_present(
|
||||
self,
|
||||
):
|
||||
os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
|
||||
"true"
|
||||
)
|
||||
self.configure_valid_response()
|
||||
self.generate_content(model="gemini-2.0-flash", contents="Some input")
|
||||
self.otel.assert_does_not_have_event_named("gen_ai.system.message")
|
||||
|
||||
def test_records_user_prompt_as_log(self):
|
||||
os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
|
||||
"true"
|
||||
)
|
||||
self.configure_valid_response()
|
||||
self.generate_content(model="gemini-2.0-flash", contents="Some input")
|
||||
self.otel.assert_has_event_named("gen_ai.user.message")
|
||||
event_record = self.otel.get_event_named("gen_ai.user.message")
|
||||
self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
|
||||
self.assertEqual(event_record.body["content"], "Some input")
|
||||
|
||||
def test_does_not_record_user_prompt_as_log_if_disabled_by_env(self):
|
||||
os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
|
||||
"false"
|
||||
)
|
||||
self.configure_valid_response()
|
||||
self.generate_content(model="gemini-2.0-flash", contents="Some input")
|
||||
self.otel.assert_has_event_named("gen_ai.user.message")
|
||||
event_record = self.otel.get_event_named("gen_ai.user.message")
|
||||
self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
|
||||
self.assertEqual(event_record.body["content"], "<elided>")
|
||||
|
||||
def test_records_response_as_log(self):
|
||||
os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
|
||||
"true"
|
||||
)
|
||||
self.configure_valid_response(text="Some response content")
|
||||
self.generate_content(model="gemini-2.0-flash", contents="Some input")
|
||||
self.otel.assert_has_event_named("gen_ai.choice")
|
||||
event_record = self.otel.get_event_named("gen_ai.choice")
|
||||
self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
|
||||
self.assertIn(
|
||||
"Some response content", json.dumps(event_record.body["content"])
|
||||
)
|
||||
|
||||
def test_does_not_record_response_as_log_if_disabled_by_env(self):
|
||||
os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
|
||||
"false"
|
||||
)
|
||||
self.configure_valid_response(text="Some response content")
|
||||
self.generate_content(model="gemini-2.0-flash", contents="Some input")
|
||||
self.otel.assert_has_event_named("gen_ai.choice")
|
||||
event_record = self.otel.get_event_named("gen_ai.choice")
|
||||
self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
|
||||
self.assertEqual(event_record.body["content"], "<elided>")
|
||||
|
||||
def test_records_metrics_data(self):
|
||||
self.configure_valid_response()
|
||||
self.generate_content(model="gemini-2.0-flash", contents="Some input")
|
||||
self.otel.assert_has_metrics_data_named("gen_ai.client.token.usage")
|
||||
self.otel.assert_has_metrics_data_named(
|
||||
"gen_ai.client.operation.duration"
|
||||
)
|
|
@ -0,0 +1,72 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
|
||||
from .base import TestCase
|
||||
|
||||
|
||||
class StreamingTestCase(TestCase):
|
||||
# The "setUp" function is defined by "unittest.TestCase" and thus
|
||||
# this name must be used. Uncertain why pylint doesn't seem to
|
||||
# recognize that this is a unit test class for which this is inherited.
|
||||
def setUp(self): # pylint: disable=invalid-name
|
||||
super().setUp()
|
||||
if self.__class__ == StreamingTestCase:
|
||||
raise unittest.SkipTest("Skipping testcase base.")
|
||||
|
||||
def generate_content(self, *args, **kwargs):
|
||||
raise NotImplementedError("Must implement 'generate_content'.")
|
||||
|
||||
@property
|
||||
def expected_function_name(self):
|
||||
raise NotImplementedError("Must implement 'expected_function_name'.")
|
||||
|
||||
def test_instrumentation_does_not_break_core_functionality(self):
|
||||
self.configure_valid_response(text="Yep, it works!")
|
||||
responses = self.generate_content(
|
||||
model="gemini-2.0-flash", contents="Does this work?"
|
||||
)
|
||||
self.assertEqual(len(responses), 1)
|
||||
response = responses[0]
|
||||
self.assertEqual(response.text, "Yep, it works!")
|
||||
|
||||
def test_handles_multiple_ressponses(self):
|
||||
self.configure_valid_response(text="First response")
|
||||
self.configure_valid_response(text="Second response")
|
||||
responses = self.generate_content(
|
||||
model="gemini-2.0-flash", contents="Does this work?"
|
||||
)
|
||||
self.assertEqual(len(responses), 2)
|
||||
self.assertEqual(responses[0].text, "First response")
|
||||
self.assertEqual(responses[1].text, "Second response")
|
||||
choice_events = self.otel.get_events_named("gen_ai.choice")
|
||||
self.assertEqual(len(choice_events), 2)
|
||||
|
||||
def test_includes_token_counts_in_span_aggregated_from_responses(self):
|
||||
# Configure multiple responses whose input/output tokens should be
|
||||
# accumulated together when summarizing the end-to-end request.
|
||||
#
|
||||
# Input: 1 + 3 + 5 => 4 + 5 => 9
|
||||
# Output: 2 + 4 + 6 => 6 + 6 => 12
|
||||
self.configure_valid_response(input_tokens=1, output_tokens=2)
|
||||
self.configure_valid_response(input_tokens=3, output_tokens=4)
|
||||
self.configure_valid_response(input_tokens=5, output_tokens=6)
|
||||
|
||||
self.generate_content(model="gemini-2.0-flash", contents="Some input")
|
||||
|
||||
self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
|
||||
span = self.otel.get_span_named("generate_content gemini-2.0-flash")
|
||||
self.assertEqual(span.attributes["gen_ai.usage.input_tokens"], 9)
|
||||
self.assertEqual(span.attributes["gen_ai.usage.output_tokens"], 12)
|
|
@ -0,0 +1,28 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import asyncio
|
||||
|
||||
from .nonstreaming_base import NonStreamingTestCase
|
||||
|
||||
|
||||
class TestGenerateContentAsyncNonstreaming(NonStreamingTestCase):
|
||||
def generate_content(self, *args, **kwargs):
|
||||
return asyncio.run(
|
||||
self.client.aio.models.generate_content(*args, **kwargs) # pylint: disable=missing-kwoa
|
||||
)
|
||||
|
||||
@property
|
||||
def expected_function_name(self):
|
||||
return "google.genai.AsyncModels.generate_content"
|
|
@ -0,0 +1,55 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import asyncio
|
||||
|
||||
from .nonstreaming_base import NonStreamingTestCase
|
||||
from .streaming_base import StreamingTestCase
|
||||
|
||||
|
||||
class AsyncStreamingMixin:
|
||||
@property
|
||||
def expected_function_name(self):
|
||||
return "google.genai.AsyncModels.generate_content_stream"
|
||||
|
||||
async def _generate_content_stream_helper(self, *args, **kwargs):
|
||||
result = []
|
||||
async for (
|
||||
response
|
||||
) in await self.client.aio.models.generate_content_stream( # pylint: disable=missing-kwoa
|
||||
*args, **kwargs
|
||||
):
|
||||
result.append(response)
|
||||
return result
|
||||
|
||||
def generate_content_stream(self, *args, **kwargs):
|
||||
return asyncio.run(
|
||||
self._generate_content_stream_helper(*args, **kwargs)
|
||||
)
|
||||
|
||||
|
||||
class TestGenerateContentAsyncStreamingWithSingleResult(
|
||||
AsyncStreamingMixin, NonStreamingTestCase
|
||||
):
|
||||
def generate_content(self, *args, **kwargs):
|
||||
responses = self.generate_content_stream(*args, **kwargs)
|
||||
self.assertEqual(len(responses), 1)
|
||||
return responses[0]
|
||||
|
||||
|
||||
class TestGenerateContentAsyncStreamingWithStreamedResults(
|
||||
AsyncStreamingMixin, StreamingTestCase
|
||||
):
|
||||
def generate_content(self, *args, **kwargs):
|
||||
return self.generate_content_stream(*args, **kwargs)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue