Compare commits
129 Commits
Author | SHA1 | Date |
---|---|---|
|
109173fec7 | |
|
b63ca133be | |
|
1f78c8acff | |
|
49fa53131d | |
|
333fc5dcb4 | |
|
80c357bb16 | |
|
3c4d18cc13 | |
|
b74633a552 | |
|
b1c2c7941b | |
|
b69ebb7224 | |
|
6977da3893 | |
|
ca079cbc56 | |
|
40d8942bf5 | |
|
78300e9642 | |
|
c4347e027c | |
|
0a03c9abf2 | |
|
85dbfe520a | |
|
b27225273b | |
|
591051f8bb | |
|
50cdeeee12 | |
|
04f8899252 | |
|
59cc34e9f3 | |
|
b7301823a0 | |
|
4a21b3974b | |
|
85ea8f382d | |
|
8f7bab5337 | |
|
ccf9cabeee | |
|
701d65b022 | |
|
b9a78e7475 | |
|
77325aa89a | |
|
6d8becf9ad | |
|
6c89a56da5 | |
|
3e0632cb31 | |
|
7bd0895d01 | |
|
a912c9e57c | |
|
75c73d1e29 | |
|
a164d37a3c | |
|
e2ba6d43c0 | |
|
4e42ed674a | |
|
72f437c456 | |
|
93353660f6 | |
|
1953d97958 | |
|
df275921a9 | |
|
c1a689507b | |
|
b3f98ab936 | |
|
dc35754dbd | |
|
fa6d972444 | |
|
5e4b55812a | |
|
680f197515 | |
|
1909c913b2 | |
|
f9d9f19aa5 | |
|
0db4d0bb8f | |
|
71bfc9550e | |
|
38f7413836 | |
|
4d6893e8fa | |
|
dbdff31220 | |
|
ef2b5468d5 | |
|
f21182890a | |
|
ec731581af | |
|
ccdf522626 | |
|
9e1284687a | |
|
1034b746af | |
|
3efd161cb6 | |
|
99049a9652 | |
|
4b9b949dde | |
|
dc37067b93 | |
|
517257cbef | |
|
4a1e0ce941 | |
|
9bc7764139 | |
|
abafc2131f | |
|
2c0033f842 | |
|
cc7169cf2c | |
|
45797ec3a1 | |
|
5c76d04a35 | |
|
9d41f9bf10 | |
|
02dc87ef73 | |
|
e6869cdc75 | |
|
dec311ff0e | |
|
da661f6547 | |
|
afc0d5531d | |
|
8a83770d7b | |
|
c6cdbeb51f | |
|
7f347e54df | |
|
369a9f2e00 | |
|
c54292fa08 | |
|
d6f6c60c16 | |
|
dd151673d1 | |
|
3431a4e8b0 | |
|
c47e341ab8 | |
|
8390db35ae | |
|
b8018c5262 | |
|
ce90639428 | |
|
fce17db166 | |
|
9c969f363e | |
|
7562ff08f3 | |
|
4b832859cc | |
|
6587485d05 | |
|
bb85f983a3 | |
|
04f9e8dd7f | |
|
6d5a5149d0 | |
|
6bde73ce34 | |
|
3a585b4b58 | |
|
e50cb3271f | |
|
5a2cfb3d65 | |
|
fdcd80d89f | |
|
ff18e7c18b | |
|
78373353f5 | |
|
fa499f5ca8 | |
|
642d8c4081 | |
|
27d5d93a6a | |
|
3d37106115 | |
|
dde065b139 | |
|
8b2558f22e | |
|
0ea9998c4c | |
|
50ab047143 | |
|
139d787168 | |
|
af179659a2 | |
|
76e614fac4 | |
|
46cf5b5257 | |
|
164259e149 | |
|
db617eb3fd | |
|
3c60b62ad1 | |
|
8d14f0bb2a | |
|
a5474c3b29 | |
|
e43e8c91cd | |
|
ad29af3996 | |
|
3c88163c99 | |
|
f96d14cc62 | |
|
6189be647c |
|
@ -18,7 +18,7 @@ body:
|
|||
Please describe any aspect of your environment relevant to the problem, including your Python version, [platform](https://docs.python.org/3/library/platform.html), version numbers of installed dependencies, information about your cloud hosting provider, etc. If you're reporting a problem with a specific version of a library in this repo, please check whether the problem has been fixed on main.
|
||||
value: |
|
||||
OS: (e.g, Ubuntu)
|
||||
Python version: (e.g., Python 3.8.10)
|
||||
Python version: (e.g., Python 3.9.10)
|
||||
Package version: (e.g., 0.46.0)
|
||||
|
||||
- type: textarea
|
||||
|
|
|
@ -11,6 +11,9 @@ components:
|
|||
- oxeye-nikolay
|
||||
- nikosokolik
|
||||
|
||||
instrumentation/opentelemetry-instrumentation-asyncclick:
|
||||
- jomcgi
|
||||
|
||||
instrumentation/opentelemetry-instrumentation-kafka-python:
|
||||
- nozik
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -7,7 +7,7 @@ name = "generate-workflows-lib"
|
|||
dynamic = ["version"]
|
||||
description = "A library to generate workflows"
|
||||
license = "Apache-2.0"
|
||||
requires-python = ">=3.8"
|
||||
requires-python = ">=3.9"
|
||||
authors = [
|
||||
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
|
||||
]
|
||||
|
@ -17,7 +17,6 @@ classifiers = [
|
|||
"License :: OSI Approved :: Apache Software License",
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
|
|
|
@ -14,7 +14,7 @@ _tox_test_env_regex = re_compile(
|
|||
)
|
||||
_tox_lint_env_regex = re_compile(r"lint-(?P<name>[-\w]+)")
|
||||
_tox_contrib_env_regex = re_compile(
|
||||
r"py38-test-(?P<name>[-\w]+\w)-?(?P<contrib_requirements>\d+)?"
|
||||
r"py39-test-(?P<name>[-\w]+\w)-?(?P<contrib_requirements>\d+)?"
|
||||
)
|
||||
|
||||
|
||||
|
@ -47,8 +47,8 @@ def get_test_job_datas(tox_envs: list, operating_systems: list) -> list:
|
|||
os_alias = {"ubuntu-latest": "Ubuntu", "windows-latest": "Windows"}
|
||||
|
||||
python_version_alias = {
|
||||
"pypy3": "pypy-3.8",
|
||||
"py38": "3.8",
|
||||
"pypy3": "pypy-3.9",
|
||||
"pypy310": "pypy-3.10",
|
||||
"py39": "3.9",
|
||||
"py310": "3.10",
|
||||
"py311": "3.11",
|
||||
|
|
|
@ -12,6 +12,7 @@ on:
|
|||
CONTRIB_REPO_SHA:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
env:
|
||||
CORE_REPO_SHA: ${% raw %}{{ inputs.CORE_REPO_SHA }}{% endraw %}
|
||||
CONTRIB_REPO_SHA: ${% raw %}{{ inputs.CONTRIB_REPO_SHA }}{% endraw %}
|
||||
|
@ -23,6 +24,7 @@ jobs:
|
|||
{{ job_data.tox_env }}:
|
||||
name: {{ job_data.ui_name }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout contrib repo @ SHA - ${% raw %}{{ env.CONTRIB_REPO_SHA }}{% endraw %}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -30,10 +32,10 @@ jobs:
|
|||
repository: open-telemetry/opentelemetry-python-contrib
|
||||
ref: ${% raw %}{{ env.CONTRIB_REPO_SHA }}{% endraw %}
|
||||
|
||||
- name: Set up Python 3.8
|
||||
- name: Set up Python 3.9
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.8"
|
||||
python-version: "3.9"
|
||||
architecture: "x64"
|
||||
|
||||
- name: Install tox
|
||||
|
|
|
@ -9,8 +9,20 @@ on:
|
|||
- 'release/*'
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${% raw %}{{ github.workflow }}-${{ github.head_ref || github.run_id }}{% endraw %}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CORE_REPO_SHA: main
|
||||
# Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main'
|
||||
# For PRs you can change the inner fallback ('main')
|
||||
# For pushes you change the outer fallback ('main')
|
||||
# The logic below is used during releases and depends on having an equivalent branch name in the core repo.
|
||||
CORE_REPO_SHA: {% raw %}${{ github.event_name == 'pull_request' && (
|
||||
contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref ||
|
||||
contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref ||
|
||||
'main'
|
||||
) || 'main' }}{% endraw %}
|
||||
CONTRIB_REPO_SHA: main
|
||||
PIP_EXISTS_ACTION: w
|
||||
|
||||
|
@ -20,6 +32,7 @@ jobs:
|
|||
{{ job_data.name }}:
|
||||
name: {{ job_data.ui_name }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${% raw %}{{ github.sha }}{% endraw %}
|
||||
uses: actions/checkout@v4
|
||||
|
|
|
@ -9,8 +9,20 @@ on:
|
|||
- 'release/*'
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${% raw %}{{ github.workflow }}-${{ github.head_ref || github.run_id }}{% endraw %}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CORE_REPO_SHA: main
|
||||
# Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main'
|
||||
# For PRs you can change the inner fallback ('main')
|
||||
# For pushes you change the outer fallback ('main')
|
||||
# The logic below is used during releases and depends on having an equivalent branch name in the core repo.
|
||||
CORE_REPO_SHA: {% raw %}${{ github.event_name == 'pull_request' && (
|
||||
contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref ||
|
||||
contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref ||
|
||||
'main'
|
||||
) || 'main' }}{% endraw %}
|
||||
CONTRIB_REPO_SHA: main
|
||||
PIP_EXISTS_ACTION: w
|
||||
|
||||
|
@ -20,6 +32,7 @@ jobs:
|
|||
{{ job_data }}:
|
||||
name: {{ job_data }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
{%- if job_data == "generate-workflows" %}
|
||||
if: |
|
||||
!contains(github.event.pull_request.labels.*.name, 'Skip generate-workflows')
|
||||
|
|
|
@ -9,8 +9,20 @@ on:
|
|||
- 'release/*'
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${% raw %}{{ github.workflow }}-${{ github.head_ref || github.run_id }}{% endraw %}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CORE_REPO_SHA: main
|
||||
# Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main'
|
||||
# For PRs you can change the inner fallback ('main')
|
||||
# For pushes you change the outer fallback ('main')
|
||||
# The logic below is used during releases and depends on having an equivalent branch name in the core repo.
|
||||
CORE_REPO_SHA: {% raw %}${{ github.event_name == 'pull_request' && (
|
||||
contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref ||
|
||||
contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref ||
|
||||
'main'
|
||||
) || 'main' }}{% endraw %}
|
||||
CONTRIB_REPO_SHA: main
|
||||
PIP_EXISTS_ACTION: w
|
||||
|
||||
|
@ -20,6 +32,7 @@ jobs:
|
|||
{{ job_data.name }}:
|
||||
name: {{ job_data.ui_name }}
|
||||
runs-on: {{ job_data.os }}
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${% raw %}{{ github.sha }}{% endraw %}
|
||||
uses: actions/checkout@v4
|
||||
|
|
|
@ -9,8 +9,20 @@ on:
|
|||
- 'release/*'
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CORE_REPO_SHA: main
|
||||
# Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main'
|
||||
# For PRs you can change the inner fallback ('main')
|
||||
# For pushes you change the outer fallback ('main')
|
||||
# The logic below is used during releases and depends on having an equivalent branch name in the core repo.
|
||||
CORE_REPO_SHA: ${{ github.event_name == 'pull_request' && (
|
||||
contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref ||
|
||||
contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref ||
|
||||
'main'
|
||||
) || 'main' }}
|
||||
CONTRIB_REPO_SHA: main
|
||||
PIP_EXISTS_ACTION: w
|
||||
|
||||
|
@ -19,6 +31,7 @@ jobs:
|
|||
lint-instrumentation-openai-v2:
|
||||
name: instrumentation-openai-v2
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -37,6 +50,7 @@ jobs:
|
|||
lint-instrumentation-vertexai:
|
||||
name: instrumentation-vertexai
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -55,6 +69,7 @@ jobs:
|
|||
lint-instrumentation-google-genai:
|
||||
name: instrumentation-google-genai
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -70,9 +85,10 @@ jobs:
|
|||
- name: Run tests
|
||||
run: tox -e lint-instrumentation-google-genai
|
||||
|
||||
lint-resource-detector-container:
|
||||
name: resource-detector-container
|
||||
lint-resource-detector-containerid:
|
||||
name: resource-detector-containerid
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -86,11 +102,12 @@ jobs:
|
|||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e lint-resource-detector-container
|
||||
run: tox -e lint-resource-detector-containerid
|
||||
|
||||
lint-resource-detector-azure:
|
||||
name: resource-detector-azure
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -109,6 +126,7 @@ jobs:
|
|||
lint-sdk-extension-aws:
|
||||
name: sdk-extension-aws
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -127,6 +145,7 @@ jobs:
|
|||
lint-distro:
|
||||
name: distro
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -145,6 +164,7 @@ jobs:
|
|||
lint-opentelemetry-instrumentation:
|
||||
name: opentelemetry-instrumentation
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -163,6 +183,7 @@ jobs:
|
|||
lint-instrumentation-aiohttp-client:
|
||||
name: instrumentation-aiohttp-client
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -181,6 +202,7 @@ jobs:
|
|||
lint-instrumentation-aiohttp-server:
|
||||
name: instrumentation-aiohttp-server
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -199,6 +221,7 @@ jobs:
|
|||
lint-instrumentation-aiopg:
|
||||
name: instrumentation-aiopg
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -217,6 +240,7 @@ jobs:
|
|||
lint-instrumentation-aws-lambda:
|
||||
name: instrumentation-aws-lambda
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -235,6 +259,7 @@ jobs:
|
|||
lint-instrumentation-botocore:
|
||||
name: instrumentation-botocore
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -253,6 +278,7 @@ jobs:
|
|||
lint-instrumentation-boto3sqs:
|
||||
name: instrumentation-boto3sqs
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -271,6 +297,7 @@ jobs:
|
|||
lint-instrumentation-django:
|
||||
name: instrumentation-django
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -289,6 +316,7 @@ jobs:
|
|||
lint-instrumentation-dbapi:
|
||||
name: instrumentation-dbapi
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -307,6 +335,7 @@ jobs:
|
|||
lint-instrumentation-boto:
|
||||
name: instrumentation-boto
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -322,9 +351,29 @@ jobs:
|
|||
- name: Run tests
|
||||
run: tox -e lint-instrumentation-boto
|
||||
|
||||
lint-instrumentation-asyncclick:
|
||||
name: instrumentation-asyncclick
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.13
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.13"
|
||||
|
||||
- name: Install tox
|
||||
run: pip install tox-uv
|
||||
|
||||
- name: Run tests
|
||||
run: tox -e lint-instrumentation-asyncclick
|
||||
|
||||
lint-instrumentation-click:
|
||||
name: instrumentation-click
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -343,6 +392,7 @@ jobs:
|
|||
lint-instrumentation-elasticsearch:
|
||||
name: instrumentation-elasticsearch
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -361,6 +411,7 @@ jobs:
|
|||
lint-instrumentation-falcon:
|
||||
name: instrumentation-falcon
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -379,6 +430,7 @@ jobs:
|
|||
lint-instrumentation-fastapi:
|
||||
name: instrumentation-fastapi
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -397,6 +449,7 @@ jobs:
|
|||
lint-instrumentation-flask:
|
||||
name: instrumentation-flask
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -415,6 +468,7 @@ jobs:
|
|||
lint-instrumentation-urllib:
|
||||
name: instrumentation-urllib
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -433,6 +487,7 @@ jobs:
|
|||
lint-instrumentation-urllib3:
|
||||
name: instrumentation-urllib3
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -451,6 +506,7 @@ jobs:
|
|||
lint-instrumentation-requests:
|
||||
name: instrumentation-requests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -469,6 +525,7 @@ jobs:
|
|||
lint-instrumentation-starlette:
|
||||
name: instrumentation-starlette
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -487,6 +544,7 @@ jobs:
|
|||
lint-instrumentation-jinja2:
|
||||
name: instrumentation-jinja2
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -505,6 +563,7 @@ jobs:
|
|||
lint-instrumentation-logging:
|
||||
name: instrumentation-logging
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -523,6 +582,7 @@ jobs:
|
|||
lint-exporter-richconsole:
|
||||
name: exporter-richconsole
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -541,6 +601,7 @@ jobs:
|
|||
lint-exporter-prometheus-remote-write:
|
||||
name: exporter-prometheus-remote-write
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -559,6 +620,7 @@ jobs:
|
|||
lint-instrumentation-mysql:
|
||||
name: instrumentation-mysql
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -577,6 +639,7 @@ jobs:
|
|||
lint-instrumentation-mysqlclient:
|
||||
name: instrumentation-mysqlclient
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -595,6 +658,7 @@ jobs:
|
|||
lint-instrumentation-psycopg2:
|
||||
name: instrumentation-psycopg2
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -613,6 +677,7 @@ jobs:
|
|||
lint-instrumentation-psycopg:
|
||||
name: instrumentation-psycopg
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -631,6 +696,7 @@ jobs:
|
|||
lint-instrumentation-pymemcache:
|
||||
name: instrumentation-pymemcache
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -649,6 +715,7 @@ jobs:
|
|||
lint-instrumentation-pymongo:
|
||||
name: instrumentation-pymongo
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -667,6 +734,7 @@ jobs:
|
|||
lint-instrumentation-pymysql:
|
||||
name: instrumentation-pymysql
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -685,6 +753,7 @@ jobs:
|
|||
lint-instrumentation-pymssql:
|
||||
name: instrumentation-pymssql
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -703,6 +772,7 @@ jobs:
|
|||
lint-instrumentation-pyramid:
|
||||
name: instrumentation-pyramid
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -721,6 +791,7 @@ jobs:
|
|||
lint-instrumentation-asgi:
|
||||
name: instrumentation-asgi
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -739,6 +810,7 @@ jobs:
|
|||
lint-instrumentation-asyncpg:
|
||||
name: instrumentation-asyncpg
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -757,6 +829,7 @@ jobs:
|
|||
lint-instrumentation-sqlite3:
|
||||
name: instrumentation-sqlite3
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -775,6 +848,7 @@ jobs:
|
|||
lint-instrumentation-wsgi:
|
||||
name: instrumentation-wsgi
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -793,6 +867,7 @@ jobs:
|
|||
lint-instrumentation-grpc:
|
||||
name: instrumentation-grpc
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -811,6 +886,7 @@ jobs:
|
|||
lint-instrumentation-sqlalchemy:
|
||||
name: instrumentation-sqlalchemy
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -829,6 +905,7 @@ jobs:
|
|||
lint-instrumentation-redis:
|
||||
name: instrumentation-redis
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -847,6 +924,7 @@ jobs:
|
|||
lint-instrumentation-remoulade:
|
||||
name: instrumentation-remoulade
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -865,6 +943,7 @@ jobs:
|
|||
lint-instrumentation-celery:
|
||||
name: instrumentation-celery
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -883,6 +962,7 @@ jobs:
|
|||
lint-instrumentation-system-metrics:
|
||||
name: instrumentation-system-metrics
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -901,6 +981,7 @@ jobs:
|
|||
lint-instrumentation-threading:
|
||||
name: instrumentation-threading
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -919,6 +1000,7 @@ jobs:
|
|||
lint-instrumentation-tornado:
|
||||
name: instrumentation-tornado
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -937,6 +1019,7 @@ jobs:
|
|||
lint-instrumentation-tortoiseorm:
|
||||
name: instrumentation-tortoiseorm
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -955,6 +1038,7 @@ jobs:
|
|||
lint-instrumentation-httpx:
|
||||
name: instrumentation-httpx
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -973,6 +1057,7 @@ jobs:
|
|||
lint-util-http:
|
||||
name: util-http
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -991,6 +1076,7 @@ jobs:
|
|||
lint-propagator-aws-xray:
|
||||
name: propagator-aws-xray
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -1009,6 +1095,7 @@ jobs:
|
|||
lint-propagator-ot-trace:
|
||||
name: propagator-ot-trace
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -1027,6 +1114,7 @@ jobs:
|
|||
lint-instrumentation-sio-pika:
|
||||
name: instrumentation-sio-pika
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -1045,6 +1133,7 @@ jobs:
|
|||
lint-instrumentation-aio-pika:
|
||||
name: instrumentation-aio-pika
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -1063,6 +1152,7 @@ jobs:
|
|||
lint-instrumentation-aiokafka:
|
||||
name: instrumentation-aiokafka
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -1081,6 +1171,7 @@ jobs:
|
|||
lint-instrumentation-kafka-python:
|
||||
name: instrumentation-kafka-python
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -1099,6 +1190,7 @@ jobs:
|
|||
lint-instrumentation-confluent-kafka:
|
||||
name: instrumentation-confluent-kafka
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -1117,6 +1209,7 @@ jobs:
|
|||
lint-instrumentation-asyncio:
|
||||
name: instrumentation-asyncio
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -1135,6 +1228,7 @@ jobs:
|
|||
lint-instrumentation-cassandra:
|
||||
name: instrumentation-cassandra
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -1153,6 +1247,7 @@ jobs:
|
|||
lint-processor-baggage:
|
||||
name: processor-baggage
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
|
|
@ -9,8 +9,20 @@ on:
|
|||
- 'release/*'
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CORE_REPO_SHA: main
|
||||
# Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main'
|
||||
# For PRs you can change the inner fallback ('main')
|
||||
# For pushes you change the outer fallback ('main')
|
||||
# The logic below is used during releases and depends on having an equivalent branch name in the core repo.
|
||||
CORE_REPO_SHA: ${{ github.event_name == 'pull_request' && (
|
||||
contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref ||
|
||||
contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref ||
|
||||
'main'
|
||||
) || 'main' }}
|
||||
CONTRIB_REPO_SHA: main
|
||||
PIP_EXISTS_ACTION: w
|
||||
|
||||
|
@ -19,6 +31,7 @@ jobs:
|
|||
spellcheck:
|
||||
name: spellcheck
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -37,6 +50,7 @@ jobs:
|
|||
docker-tests:
|
||||
name: docker-tests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -55,6 +69,7 @@ jobs:
|
|||
docs:
|
||||
name: docs
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
if: |
|
||||
github.event.pull_request.user.login != 'opentelemetrybot' && github.event_name == 'pull_request'
|
||||
steps:
|
||||
|
@ -75,6 +90,7 @@ jobs:
|
|||
generate:
|
||||
name: generate
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -96,6 +112,7 @@ jobs:
|
|||
generate-workflows:
|
||||
name: generate-workflows
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
if: |
|
||||
!contains(github.event.pull_request.labels.*.name, 'Skip generate-workflows')
|
||||
&& github.event.pull_request.user.login != 'opentelemetrybot' && github.event_name == 'pull_request'
|
||||
|
@ -120,6 +137,7 @@ jobs:
|
|||
shellcheck:
|
||||
name: shellcheck
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -138,6 +156,7 @@ jobs:
|
|||
ruff:
|
||||
name: ruff
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
@ -156,6 +175,7 @@ jobs:
|
|||
typecheck:
|
||||
name: typecheck
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repo @ SHA - ${{ github.sha }}
|
||||
uses: actions/checkout@v4
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
name: OSSF Scorecard
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
schedule:
|
||||
- cron: "10 6 * * 1" # once a week
|
||||
workflow_dispatch:
|
||||
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
analysis:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# Needed for Code scanning upload
|
||||
security-events: write
|
||||
# Needed for GitHub OIDC token if publish_results is true
|
||||
id-token: write
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
publish_results: true
|
||||
|
||||
# Upload the results as artifacts (optional). Commenting out will disable
|
||||
# uploads of run results in SARIF format to the repository Actions tab.
|
||||
# https://docs.github.com/en/actions/advanced-guides/storing-workflow-data-as-artifacts
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
retention-days: 5
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard (optional).
|
||||
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@5f8171a638ada777af81d42b55959a643bb29017 # v3.28.12
|
||||
with:
|
||||
sarif_file: results.sarif
|
|
@ -78,7 +78,7 @@ jobs:
|
|||
# next few steps publish to pypi
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.8'
|
||||
python-version: '3.9'
|
||||
|
||||
- name: Build wheels
|
||||
run: ./scripts/build_a_package.sh
|
||||
|
|
|
@ -71,6 +71,7 @@ jobs:
|
|||
run: .github/scripts/use-cla-approved-github-bot.sh
|
||||
|
||||
- name: Create pull request
|
||||
id: create_pr
|
||||
env:
|
||||
# not using secrets.GITHUB_TOKEN since pull requests from that token do not run workflows
|
||||
GITHUB_TOKEN: ${{ secrets.OPENTELEMETRYBOT_GITHUB_TOKEN }}
|
||||
|
@ -80,7 +81,15 @@ jobs:
|
|||
|
||||
git commit -a -m "$message"
|
||||
git push origin HEAD:$branch
|
||||
gh pr create --title "[$GITHUB_REF_NAME] $message" \
|
||||
pr_url=$(gh pr create --title "[$GITHUB_REF_NAME] $message" \
|
||||
--body "$message." \
|
||||
--head $branch \
|
||||
--base $GITHUB_REF_NAME
|
||||
--base $GITHUB_REF_NAME)
|
||||
echo "pr_url=$pr_url" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Add prepare-release label to PR
|
||||
if: steps.create_pr.outputs.pr_url != ''
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh pr edit ${{ steps.create_pr.outputs.pr_url }} --add-label "prepare-release"
|
||||
|
|
|
@ -94,6 +94,7 @@ jobs:
|
|||
run: .github/scripts/use-cla-approved-github-bot.sh
|
||||
|
||||
- name: Create pull request against the release branch
|
||||
id: create_release_branch_pr
|
||||
env:
|
||||
# not using secrets.GITHUB_TOKEN since pull requests from that token do not run workflows
|
||||
GITHUB_TOKEN: ${{ secrets.OPENTELEMETRYBOT_GITHUB_TOKEN }}
|
||||
|
@ -103,10 +104,18 @@ jobs:
|
|||
|
||||
git commit -a -m "$message"
|
||||
git push origin HEAD:$branch
|
||||
gh pr create --title "[$RELEASE_BRANCH_NAME] $message" \
|
||||
pr_url=$(gh pr create --title "[$RELEASE_BRANCH_NAME] $message" \
|
||||
--body "$message." \
|
||||
--head $branch \
|
||||
--base $RELEASE_BRANCH_NAME
|
||||
--base $RELEASE_BRANCH_NAME)
|
||||
echo "pr_url=$pr_url" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Add prepare-release label to PR
|
||||
if: steps.create_release_branch_pr.outputs.pr_url != ''
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh pr edit ${{ steps.create_release_branch_pr.outputs.pr_url }} --add-label "prepare-release"
|
||||
|
||||
create-pull-request-against-main:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -179,6 +188,7 @@ jobs:
|
|||
run: .github/scripts/use-cla-approved-github-bot.sh
|
||||
|
||||
- name: Create pull request against main
|
||||
id: create_main_pr
|
||||
env:
|
||||
# not using secrets.GITHUB_TOKEN since pull requests from that token do not run workflows
|
||||
GITHUB_TOKEN: ${{ secrets.OPENTELEMETRYBOT_GITHUB_TOKEN }}
|
||||
|
@ -189,7 +199,15 @@ jobs:
|
|||
|
||||
git commit -a -m "$message"
|
||||
git push origin HEAD:$branch
|
||||
gh pr create --title "$message" \
|
||||
pr_url=$(gh pr create --title "$message" \
|
||||
--body "$body" \
|
||||
--head $branch \
|
||||
--base main
|
||||
--base main)
|
||||
echo "pr_url=$pr_url" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Add prepare-release label to PR
|
||||
if: steps.create_main_pr.outputs.pr_url != ''
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh pr edit ${{ steps.create_main_pr.outputs.pr_url }} --add-label "prepare-release"
|
||||
|
|
|
@ -66,7 +66,7 @@ jobs:
|
|||
# next few steps publish to pypi
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.8'
|
||||
python-version: '3.9'
|
||||
|
||||
- name: Build wheels
|
||||
run: ./scripts/build.sh
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -46,7 +46,7 @@ suggestion-mode=yes
|
|||
unsafe-load-any-extension=no
|
||||
|
||||
# Run python dependant checks considering the baseline version
|
||||
py-version=3.8
|
||||
py-version=3.9
|
||||
|
||||
|
||||
[MESSAGES CONTROL]
|
||||
|
|
144
CHANGELOG.md
144
CHANGELOG.md
|
@ -11,6 +11,145 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||
|
||||
## Unreleased
|
||||
|
||||
## Version 1.35.0/0.56b0 (2025-07-11)
|
||||
|
||||
### Added
|
||||
|
||||
- `opentelemetry-instrumentation-pika` Added instrumentation for All `SelectConnection` adapters
|
||||
([#3584](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3584))
|
||||
- `opentelemetry-instrumentation-tornado` Add support for `WebSocketHandler` instrumentation
|
||||
([#3498](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3498))
|
||||
- `opentelemetry-util-http` Added support for redacting specific url query string values and url credentials in instrumentations
|
||||
([#3508](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3508))
|
||||
- `opentelemetry-instrumentation-pymongo` `aggregate` and `getMore` capture statements support
|
||||
([#3601](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3601))
|
||||
|
||||
### Fixed
|
||||
|
||||
- `opentelemetry-instrumentation-asgi`: fix excluded_urls in instrumentation-asgi
|
||||
([#3567](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3567))
|
||||
- `opentelemetry-resource-detector-containerid`: make it more quiet on platforms without cgroups
|
||||
([#3579](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3579))
|
||||
|
||||
## Version 1.34.0/0.55b0 (2025-06-04)
|
||||
|
||||
### Fixed
|
||||
|
||||
- `opentelemetry-instrumentation-system-metrics`: fix loading on Google Cloud Run
|
||||
([#3533](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3533))
|
||||
- `opentelemetry-instrumentation-fastapi`: fix wrapping of middlewares
|
||||
([#3012](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3012))
|
||||
- `opentelemetry-instrumentation-starlette` Remove max version constraint on starlette
|
||||
([#3456](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3456))
|
||||
- `opentelemetry-instrumentation-starlette` Fix memory leak and double middleware
|
||||
([#3529](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3529))
|
||||
- `opentelemetry-instrumentation-urllib3`: proper bucket boundaries in stable semconv http duration metrics
|
||||
([#3518](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3518))
|
||||
- `opentelemetry-instrumentation-urllib`: proper bucket boundaries in stable semconv http duration metrics
|
||||
([#3519](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3519))
|
||||
- `opentelemetry-instrumentation-falcon`: proper bucket boundaries in stable semconv http duration
|
||||
([#3525](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3525))
|
||||
- `opentelemetry-instrumentation-wsgi`: add explicit http duration buckets for stable semconv
|
||||
([#3527](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3527))
|
||||
- `opentelemetry-instrumentation-asgi`: add explicit http duration buckets for stable semconv
|
||||
([#3526](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3526))
|
||||
- `opentelemetry-instrumentation-flask`: proper bucket boundaries in stable semconv http duration
|
||||
([#3523](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3523))
|
||||
- `opentelemetry-instrumentation-django`: proper bucket boundaries in stable semconv http duration
|
||||
([#3524](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3524))
|
||||
- `opentelemetry-instrumentation-grpc`: support non-list interceptors
|
||||
([#3520](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3520))
|
||||
- `opentelemetry-instrumentation-botocore` Ensure spans end on early stream closure for Bedrock Streaming APIs
|
||||
([#3481](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3481))
|
||||
- `opentelemetry-instrumentation-sqlalchemy` Respect suppress_instrumentation functionality
|
||||
([#3477](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3477))
|
||||
- `opentelemetry-instrumentation-botocore`: fix handling of tool input in Bedrock ConverseStream
|
||||
([#3544](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3544))
|
||||
- `opentelemetry-instrumentation-botocore` Add type check when extracting tool use from Bedrock request message content
|
||||
([#3548](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3548))
|
||||
- `opentelemetry-instrumentation-dbapi` Respect suppress_instrumentation functionality ([#3460](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3460))
|
||||
- `opentelemetry-resource-detector-container` Correctly parse container id when using systemd and cgroupsv1
|
||||
([#3429](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3429))
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- `opentelemetry-instrumentation-botocore` Use `cloud.region` instead of `aws.region` span attribute as per semantic conventions.
|
||||
([#3474](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3474))
|
||||
- `opentelemetry-instrumentation-fastapi`: Drop support for FastAPI versions earlier than `0.92`
|
||||
([#3012](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3012))
|
||||
- `opentelemetry-resource-detector-container`: rename package name to `opentelemetry-resource-detector-containerid`
|
||||
([#3536](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3536))
|
||||
|
||||
### Added
|
||||
|
||||
- `opentelemetry-instrumentation-aiohttp-client` Add support for HTTP metrics
|
||||
([#3517](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3517))
|
||||
- `opentelemetry-instrumentation-httpx` Add support for HTTP metrics
|
||||
([#3513](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3513))
|
||||
- `opentelemetry-instrumentation` Allow re-raising exception when instrumentation fails
|
||||
([#3545](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3545))
|
||||
- `opentelemetry-instrumentation-aiokafka` Add instrumentation of `consumer.getmany` (batch)
|
||||
([#3257](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3257))
|
||||
|
||||
### Deprecated
|
||||
|
||||
- Drop support for Python 3.8, bump baseline to Python 3.9.
|
||||
([#3399](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3399))
|
||||
|
||||
## Version 1.33.0/0.54b0 (2025-05-09)
|
||||
|
||||
### Added
|
||||
|
||||
- `opentelemetry-instrumentation-requests` Support explicit_bucket_boundaries_advisory in duration metrics
|
||||
([#3464](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3464))
|
||||
- `opentelemetry-instrumentation-redis` Add support for redis client-specific instrumentation.
|
||||
([#3143](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3143))
|
||||
|
||||
### Fixed
|
||||
|
||||
- `opentelemetry-instrumentation` Catch `ModuleNotFoundError` when the library is not installed
|
||||
and log as debug instead of exception
|
||||
([#3423](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3423))
|
||||
- `opentelemetry-instrumentation-asyncio` Fix duplicate instrumentation
|
||||
([#3383](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3383))
|
||||
- `opentelemetry-instrumentation-botocore` Add GenAI instrumentation for additional Bedrock models for InvokeModel API
|
||||
([#3419](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3419))
|
||||
- `opentelemetry-instrumentation` don't print duplicated conflict log error message
|
||||
([#3432](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3432))
|
||||
- `opentelemetry-instrumentation-grpc` Check for None result in gRPC
|
||||
([#3380](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3381))
|
||||
- `opentelemetry-instrumentation-[asynclick/click]` Add missing opentelemetry-instrumentation dep
|
||||
([#3447](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3447))
|
||||
- `opentelemetry-instrumentation-botocore` Capture server attributes for botocore API calls
|
||||
([#3448](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3448))
|
||||
|
||||
## Version 1.32.0/0.53b0 (2025-04-10)
|
||||
|
||||
### Added
|
||||
|
||||
- `opentelemetry-instrumentation-asyncclick`: new instrumentation to trace asyncclick commands
|
||||
([#3319](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3319))
|
||||
- `opentelemetry-instrumentation-botocore` Add support for GenAI tool events using Amazon Nova models and `InvokeModel*` APIs
|
||||
([#3385](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3385))
|
||||
- `opentelemetry-instrumentation` Make auto instrumentation use the same dependency resolver as manual instrumentation does
|
||||
([#3202](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3202))
|
||||
|
||||
### Fixed
|
||||
|
||||
- `opentelemetry-instrumentation` Fix client address is set to server address in new semconv
|
||||
([#3354](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3354))
|
||||
- `opentelemetry-instrumentation-dbapi`, `opentelemetry-instrumentation-django`,
|
||||
`opentelemetry-instrumentation-sqlalchemy`: Fix sqlcomment for non string query and composable object.
|
||||
([#3113](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3113))
|
||||
- `opentelemetry-instrumentation-grpc` Fix error when using gprc versions <= 1.50.0 with unix sockets.
|
||||
([[#3393](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3393)])
|
||||
- `opentelemetry-instrumentation-asyncio` Fix duplicate instrumentation.
|
||||
([[#3383](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3383)])
|
||||
- `opentelemetry-instrumentation-aiokafka` Fix send_and_wait method no headers kwargs error.
|
||||
([[#3332](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3332)])
|
||||
|
||||
## Version 1.31.0/0.52b0 (2025-03-12)
|
||||
|
||||
### Added
|
||||
|
||||
- `opentelemetry-instrumentation-openai-v2` Update doc for OpenAI Instrumentation to support OpenAI Compatible Platforms
|
||||
|
@ -34,7 +173,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||
- Loosen `opentelemetry-instrumentation-starlette[instruments]` specifier
|
||||
([#3304](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3304))
|
||||
|
||||
|
||||
### Fixed
|
||||
|
||||
- `opentelemetry-instrumentation-redis` Add missing entry in doc string for `def _instrument`
|
||||
|
@ -44,6 +182,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||
([#3249](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3249))
|
||||
- `opentelemetry-instrumentation-asyncpg` Fix fallback for empty queries.
|
||||
([#3253](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3253))
|
||||
- `opentelemetry-instrumentation` Fix a traceback in sqlcommenter when psycopg connection pooling is enabled.
|
||||
([#3309](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3309))
|
||||
- `opentelemetry-instrumentation-threading` Fix broken context typehints
|
||||
([#3322](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3322))
|
||||
- `opentelemetry-instrumentation-requests` always record span status code in duration metric
|
||||
|
@ -108,7 +248,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||
### Breaking changes
|
||||
|
||||
- `opentelemetry-exporter-prometheus-remote-write` updated protobuf required version from 4.21 to 5.26 and regenerated protobufs
|
||||
([#3219](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3219))
|
||||
([#3219](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3219))
|
||||
- `opentelemetry-instrumentation-sqlalchemy` including sqlcomment in `db.statement` span attribute value is now opt-in
|
||||
([#3112](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3112))
|
||||
- `opentelemetry-instrumentation-dbapi` including sqlcomment in `db.statement` span attribute value is now opt-in
|
||||
|
|
48
README.md
48
README.md
|
@ -111,7 +111,16 @@ We meet weekly on Thursday at 9AM PT. The meeting is subject to change depending
|
|||
|
||||
Meeting notes are available as a public [Google doc](https://docs.google.com/document/d/1CIMGoIOZ-c3-igzbd6_Pnxx1SjAkjwqoYSUWxPY8XIs/edit). For edit access, get in touch on [GitHub Discussions](https://github.com/open-telemetry/opentelemetry-python/discussions).
|
||||
|
||||
Approvers ([@open-telemetry/python-approvers](https://github.com/orgs/open-telemetry/teams/python-approvers)):
|
||||
### Maintainers
|
||||
|
||||
- [Aaron Abbott](https://github.com/aabmass), Google
|
||||
- [Leighton Chen](https://github.com/lzchen), Microsoft
|
||||
- [Riccardo Magliocchetti](https://github.com/xrmx), Elastic
|
||||
- [Shalev Roda](https://github.com/shalevr), Cisco
|
||||
|
||||
For more information about the maintainer role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#maintainer).
|
||||
|
||||
### Approvers
|
||||
|
||||
- [Emídio Neto](https://github.com/emdneto), PicPay
|
||||
- [Jeremy Voss](https://github.com/jeremydvoss), Microsoft
|
||||
|
@ -121,34 +130,29 @@ Approvers ([@open-telemetry/python-approvers](https://github.com/orgs/open-telem
|
|||
- [Srikanth Chekuri](https://github.com/srikanthccv), signoz.io
|
||||
- [Tammy Baylis](https://github.com/tammy-baylis-swi), SolarWinds
|
||||
|
||||
Emeritus Approvers:
|
||||
For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver).
|
||||
|
||||
- [Ashutosh Goel](https://github.com/ashu658), Cisco
|
||||
- [Héctor Hernández](https://github.com/hectorhdzg), Microsoft
|
||||
- [Nikolay Sokolik](https://github.com/oxeye-nikolay), Oxeye
|
||||
- [Nikolay Sokolik](https://github.com/nikosokolik), Oxeye
|
||||
- [Nathaniel Ruiz Nowell](https://github.com/NathanielRN), AWS
|
||||
### Emeritus Maintainers
|
||||
|
||||
*Find more about the approver role in [community repository](https://github.com/open-telemetry/community/blob/main/community-membership.md#approver).*
|
||||
- [Alex Boten](https://github.com/codeboten)
|
||||
- [Diego Hurtado](https://github.com/ocelotl)
|
||||
- [Owais Lone](https://github.com/owais)
|
||||
- [Yusuke Tsutsumi](https://github.com/toumorokoshi)
|
||||
|
||||
Maintainers ([@open-telemetry/python-maintainers](https://github.com/orgs/open-telemetry/teams/python-maintainers)):
|
||||
For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager).
|
||||
|
||||
- [Aaron Abbott](https://github.com/aabmass), Google
|
||||
- [Leighton Chen](https://github.com/lzchen), Microsoft
|
||||
- [Riccardo Magliocchetti](https://github.com/xrmx), Elastic
|
||||
- [Shalev Roda](https://github.com/shalevr), Cisco
|
||||
### Emeritus Approvers
|
||||
|
||||
Emeritus Maintainers:
|
||||
- [Ashutosh Goel](https://github.com/ashu658)
|
||||
- [Héctor Hernández](https://github.com/hectorhdzg)
|
||||
- [Nathaniel Ruiz Nowell](https://github.com/NathanielRN)
|
||||
- [Nikolay Sokolik](https://github.com/nikosokolik)
|
||||
- [Nikolay Sokolik](https://github.com/oxeye-nikolay)
|
||||
|
||||
- [Alex Boten](https://github.com/codeboten), Lightstep
|
||||
- [Diego Hurtado](https://github.com/ocelotl), Lightstep
|
||||
- [Owais Lone](https://github.com/owais), Splunk
|
||||
- [Yusuke Tsutsumi](https://github.com/toumorokoshi), Google
|
||||
For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager).
|
||||
|
||||
*Find more about the maintainer role in [community repository](https://github.com/open-telemetry/community/blob/main/community-membership.md#maintainer).*
|
||||
|
||||
### Thanks to all the people who already contributed
|
||||
### Thanks to all of our contributors!
|
||||
|
||||
<a href="https://github.com/open-telemetry/opentelemetry-python-contrib/graphs/contributors">
|
||||
<img src="https://contributors-img.web.app/image?repo=open-telemetry/opentelemetry-python-contrib" />
|
||||
<img alt="Repo contributors" src="https://contrib.rocks/image?repo=open-telemetry/opentelemetry-python-contrib" />
|
||||
</a>
|
||||
|
|
|
@ -9,7 +9,8 @@
|
|||
(otherwise the workflow will pick up the version from `main` and just remove the `.dev` suffix).
|
||||
* Review the two pull requests that it creates.
|
||||
(one is targeted to the release branch and one is targeted to `main`).
|
||||
* The builds will fail for both the `main` and release pr because of validation rules. Follow the [release workflow](https://github.com/open-telemetry/opentelemetry-python/blob/main/RELEASING.md) for the core repo up until this same point. Change the SHAs of each PR to point at each other to get the `main` and release builds to pass.
|
||||
* The builds will fail for the release PR because of validation rules. Follow the [release workflow](https://github.com/open-telemetry/opentelemetry-python/blob/main/RELEASING.md) for the core repo up until this same point.
|
||||
* Close and reopen the PR so that the workflow will take into account the label automation we have in place
|
||||
* Merge the release PR.
|
||||
* Merge the PR to main (this can be done separately from [making the release](#making-the-release))
|
||||
|
||||
|
@ -40,6 +41,8 @@ The workflow will create two pull requests, one against the `main` and one again
|
|||
* Press the "Run workflow" button, then select the release branch from the dropdown list,
|
||||
e.g. `release/v1.9.x`, then enter the pull request number that you want to backport,
|
||||
then click the "Run workflow" button below that.
|
||||
* Add the label `backport` to the generated pull request.
|
||||
* In case label automation doesn't work, just close and reopen the PR so that the workflow will take into account the label automation we have in place.
|
||||
* Review and merge the backport pull request that it generates.
|
||||
* Merge a pull request to the release branch updating the `CHANGELOG.md`.
|
||||
* The heading for the unreleased entries should be `## Unreleased`.
|
||||
|
@ -47,6 +50,7 @@ The workflow will create two pull requests, one against the `main` and one again
|
|||
* Press the "Run workflow" button, then select the release branch from the dropdown list,
|
||||
e.g. `release/v1.9.x`, and click the "Run workflow" button below that.
|
||||
* Review and merge the pull request that it creates for updating the version.
|
||||
* Note: If you are doing a patch release in `-contrib` repo, you should also do an equivalent patch release in `-core` repo (even if there's no fix to release), otherwise tests in CI will fail.
|
||||
|
||||
### Preparing a patch release for individual package
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ dynamic = ["version"]
|
|||
description = "<REPLACE ME>"
|
||||
readme = "README.rst"
|
||||
license = "Apache-2.0"
|
||||
requires-python = ">=3.8"
|
||||
requires-python = ">=3.9"
|
||||
authors = [
|
||||
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
|
||||
]
|
||||
|
@ -22,7 +22,6 @@ classifiers = [
|
|||
"License :: OSI Approved :: Apache Software License",
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
|
|
|
@ -12,4 +12,4 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__version__ = "0.52b0.dev"
|
||||
__version__ = "0.57b0.dev"
|
||||
|
|
|
@ -15,6 +15,7 @@ aiohttp~=3.0
|
|||
aiokafka~=0.11.0
|
||||
aiopg>=0.13.0,<1.3.0
|
||||
asyncpg>=0.12.0
|
||||
asyncclick~=8.0
|
||||
boto~=2.0
|
||||
botocore~=1.0
|
||||
boto3~=1.0
|
||||
|
|
|
@ -122,6 +122,7 @@ intersphinx_mapping = {
|
|||
"https://opentelemetry-python.readthedocs.io/en/latest/",
|
||||
None,
|
||||
),
|
||||
"redis": ("https://redis.readthedocs.io/en/latest/", None),
|
||||
}
|
||||
|
||||
# http://www.sphinx-doc.org/en/master/config.html#confval-nitpicky
|
||||
|
|
|
@ -52,7 +52,7 @@ install <https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs>
|
|||
pip install -e ./instrumentation/opentelemetry-instrumentation-botocore
|
||||
pip install -e ./instrumentation-genai/opentelemetry-instrumentation-openai-v2
|
||||
pip install -e ./sdk-extension/opentelemetry-sdk-extension-aws
|
||||
pip install -e ./resource/opentelemetry-resource-detector-container
|
||||
pip install -e ./resource/opentelemetry-resource-detector-containerid
|
||||
|
||||
|
||||
.. toctree::
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
.. include:: ../../../instrumentation/opentelemetry-instrumentation-asyncclick/README.rst
|
||||
:end-before: References
|
||||
|
||||
.. automodule:: opentelemetry.instrumentation.asyncclick
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
|
@ -1,7 +1,10 @@
|
|||
OpenTelemetry Redis Instrumentation
|
||||
===================================
|
||||
.. include:: ../../../instrumentation/opentelemetry-instrumentation-redis/README.rst
|
||||
:end-before: References
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
.. automodule:: opentelemetry.instrumentation.redis
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
:show-inheritance:
|
|
@ -44,6 +44,7 @@ py-class=
|
|||
psycopg.Connection
|
||||
psycopg.AsyncConnection
|
||||
ObjectProxy
|
||||
fastapi.applications.FastAPI
|
||||
|
||||
any=
|
||||
; API
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
OpenTelemetry Python - Resource Detector for Containers
|
||||
=======================================================
|
||||
|
||||
.. automodule:: opentelemetry.resource.detector.container
|
||||
.. automodule:: opentelemetry.resource.detector.containerid
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
|
|
@ -16,7 +16,7 @@ sortfirst=
|
|||
ext/*
|
||||
|
||||
[stable]
|
||||
version=1.31.0.dev
|
||||
version=1.36.0.dev
|
||||
|
||||
packages=
|
||||
opentelemetry-sdk
|
||||
|
@ -34,7 +34,7 @@ packages=
|
|||
opentelemetry-api
|
||||
|
||||
[prerelease]
|
||||
version=0.52b0.dev
|
||||
version=0.57b0.dev
|
||||
|
||||
packages=
|
||||
all
|
||||
|
@ -43,7 +43,7 @@ packages=
|
|||
opentelemetry-instrumentation
|
||||
opentelemetry-contrib-instrumentations
|
||||
opentelemetry-distro
|
||||
opentelemetry-resource-detector-container
|
||||
opentelemetry-resource-detector-containerid
|
||||
|
||||
[exclude_release]
|
||||
packages=
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM python:3.8
|
||||
FROM python:3.9
|
||||
|
||||
RUN apt-get update -y && apt-get install libsnappy-dev -y
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ dynamic = ["version"]
|
|||
description = "Prometheus Remote Write Metrics Exporter for OpenTelemetry"
|
||||
readme = "README.rst"
|
||||
license = "Apache-2.0"
|
||||
requires-python = ">=3.8"
|
||||
requires-python = ">=3.9"
|
||||
authors = [
|
||||
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
|
||||
]
|
||||
|
@ -19,7 +19,6 @@ classifiers = [
|
|||
"License :: OSI Approved :: Apache Software License",
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
|
|
|
@ -12,4 +12,4 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__version__ = "0.52b0.dev"
|
||||
__version__ = "0.57b0.dev"
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
asgiref==3.8.1
|
||||
certifi==2024.7.4
|
||||
charset-normalizer==3.3.2
|
||||
# We can drop this after bumping baseline to pypy-39
|
||||
cramjam==2.1.0; platform_python_implementation == "PyPy"
|
||||
cramjam==2.8.4; platform_python_implementation != "PyPy"
|
||||
cramjam==2.8.4
|
||||
Deprecated==1.2.14
|
||||
idna==3.7
|
||||
iniconfig==2.0.0
|
||||
|
|
|
@ -8,7 +8,7 @@ dynamic = ["version"]
|
|||
description = "Rich Console Exporter for OpenTelemetry"
|
||||
readme = "README.rst"
|
||||
license = "Apache-2.0"
|
||||
requires-python = ">=3.8"
|
||||
requires-python = ">=3.9"
|
||||
authors = [
|
||||
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
|
||||
]
|
||||
|
@ -18,7 +18,6 @@ classifiers = [
|
|||
"License :: OSI Approved :: Apache Software License",
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
|
@ -28,7 +27,7 @@ classifiers = [
|
|||
dependencies = [
|
||||
"opentelemetry-api ~= 1.12",
|
||||
"opentelemetry-sdk ~= 1.12",
|
||||
"opentelemetry-semantic-conventions == 0.52b0.dev",
|
||||
"opentelemetry-semantic-conventions == 0.57b0.dev",
|
||||
"rich>=10.0.0",
|
||||
]
|
||||
|
||||
|
|
|
@ -64,11 +64,15 @@ from rich.tree import Tree
|
|||
import opentelemetry.trace
|
||||
from opentelemetry.sdk.trace import ReadableSpan
|
||||
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
|
||||
from opentelemetry.semconv.trace import SpanAttributes
|
||||
from opentelemetry.semconv._incubating.attributes.db_attributes import (
|
||||
DB_STATEMENT,
|
||||
)
|
||||
|
||||
|
||||
def _ns_to_time(nanoseconds):
|
||||
ts = datetime.datetime.utcfromtimestamp(nanoseconds / 1e9)
|
||||
ts = datetime.datetime.fromtimestamp(
|
||||
nanoseconds / 1e9, datetime.timezone.utc
|
||||
)
|
||||
return ts.strftime("%H:%M:%S.%f")
|
||||
|
||||
|
||||
|
@ -118,7 +122,7 @@ def _child_add_optional_attributes(child: Tree, span: ReadableSpan):
|
|||
label=Text.from_markup("[bold cyan]Attributes :[/bold cyan] ")
|
||||
)
|
||||
for attribute in span.attributes:
|
||||
if attribute == SpanAttributes.DB_STATEMENT:
|
||||
if attribute == DB_STATEMENT:
|
||||
attributes.add(
|
||||
Text.from_markup(f"[bold cyan]{attribute} :[/bold cyan] ")
|
||||
)
|
||||
|
|
|
@ -12,4 +12,4 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__version__ = "0.52b0.dev"
|
||||
__version__ = "0.57b0.dev"
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
|
||||
| Instrumentation | Supported Packages | Metrics support | Semconv status |
|
||||
| --------------- | ------------------ | --------------- | -------------- |
|
||||
| [opentelemetry-instrumentation-google-genai](./opentelemetry-instrumentation-google-genai) | google-genai >= 1.0.0 | No | development
|
||||
| [opentelemetry-instrumentation-openai-v2](./opentelemetry-instrumentation-openai-v2) | openai >= 1.26.0 | Yes | development
|
||||
| [opentelemetry-instrumentation-vertexai](./opentelemetry-instrumentation-vertexai) | google-cloud-aiplatform >= 1.64 | No | development
|
|
@ -7,6 +7,18 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||
|
||||
## Unreleased
|
||||
|
||||
## Version 0.3b0 (2025-07-08)
|
||||
|
||||
- Add automatic instrumentation to tool call functions ([#3446](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3446))
|
||||
|
||||
## Version 0.2b0 (2025-04-28)
|
||||
|
||||
- Add more request configuration options to the span attributes ([#3374](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3374))
|
||||
- Restructure tests to keep in line with repository conventions ([#3344](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3344))
|
||||
|
||||
- Fix [bug](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3416) where
|
||||
span attribute `gen_ai.response.finish_reasons` is empty ([#3417](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3417))
|
||||
|
||||
## Version 0.1b0 (2025-03-05)
|
||||
|
||||
- Add support for async and streaming.
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
|
||||
Here are some TODO items required to achieve stability for this package:
|
||||
|
||||
- Add more span-level attributes for request configuration
|
||||
- Add more span-level attributes for response information
|
||||
- Verify and correct formatting of events:
|
||||
- Including the 'role' field for message events
|
||||
|
@ -13,7 +12,6 @@ Here are some TODO items required to achieve stability for this package:
|
|||
- Additional cleanup/improvement tasks such as:
|
||||
- Adoption of 'wrapt' instead of 'functools.wraps'
|
||||
- Bolstering test coverage
|
||||
- Migrate tests to use VCR.py
|
||||
|
||||
## Future
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ dynamic = ["version"]
|
|||
description = "OpenTelemetry"
|
||||
readme = "README.rst"
|
||||
license = "Apache-2.0"
|
||||
requires-python = ">=3.8"
|
||||
requires-python = ">=3.9"
|
||||
authors = [
|
||||
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
|
||||
]
|
||||
|
@ -37,9 +37,9 @@ classifiers = [
|
|||
"Programming Language :: Python :: 3.12"
|
||||
]
|
||||
dependencies = [
|
||||
"opentelemetry-api >=1.30.0, <2",
|
||||
"opentelemetry-instrumentation >=0.51b0, <2",
|
||||
"opentelemetry-semantic-conventions >=0.51b0, <2"
|
||||
"opentelemetry-api >=1.31.1, <2",
|
||||
"opentelemetry-instrumentation >=0.52b1, <2",
|
||||
"opentelemetry-semantic-conventions >=0.52b1, <2"
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
|
|
|
@ -0,0 +1,97 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import re
|
||||
from typing import Iterable, Optional, Set
|
||||
|
||||
ALLOWED = True
|
||||
DENIED = False
|
||||
|
||||
|
||||
def _parse_env_list(s: str) -> Set[str]:
|
||||
result = set()
|
||||
for entry in s.split(","):
|
||||
stripped_entry = entry.strip()
|
||||
if not stripped_entry:
|
||||
continue
|
||||
result.add(stripped_entry)
|
||||
return result
|
||||
|
||||
|
||||
class _CompoundMatcher:
|
||||
def __init__(self, entries: Set[str]):
|
||||
self._match_all = "*" in entries
|
||||
self._entries = entries
|
||||
self._regex_matcher = None
|
||||
regex_entries = []
|
||||
for entry in entries:
|
||||
if "*" not in entry:
|
||||
continue
|
||||
if entry == "*":
|
||||
continue
|
||||
entry = entry.replace("[", "\\[")
|
||||
entry = entry.replace("]", "\\]")
|
||||
entry = entry.replace(".", "\\.")
|
||||
entry = entry.replace("*", ".*")
|
||||
regex_entries.append(f"({entry})")
|
||||
if regex_entries:
|
||||
joined_regex = "|".join(regex_entries)
|
||||
regex_str = f"^({joined_regex})$"
|
||||
self._regex_matcher = re.compile(regex_str)
|
||||
|
||||
@property
|
||||
def match_all(self):
|
||||
return self._match_all
|
||||
|
||||
def matches(self, x):
|
||||
if self._match_all:
|
||||
return True
|
||||
if x in self._entries:
|
||||
return True
|
||||
if (self._regex_matcher is not None) and (
|
||||
self._regex_matcher.fullmatch(x)
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class AllowList:
|
||||
def __init__(
|
||||
self,
|
||||
includes: Optional[Iterable[str]] = None,
|
||||
excludes: Optional[Iterable[str]] = None,
|
||||
):
|
||||
self._includes = _CompoundMatcher(set(includes or []))
|
||||
self._excludes = _CompoundMatcher(set(excludes or []))
|
||||
assert (not self._includes.match_all) or (
|
||||
not self._excludes.match_all
|
||||
), "Can't have '*' in both includes and excludes."
|
||||
|
||||
def allowed(self, x: str):
|
||||
if self._excludes.match_all:
|
||||
return self._includes.matches(x)
|
||||
if self._includes.match_all:
|
||||
return not self._excludes.matches(x)
|
||||
return self._includes.matches(x) and not self._excludes.matches(x)
|
||||
|
||||
@staticmethod
|
||||
def from_env(
|
||||
includes_env_var: str, excludes_env_var: Optional[str] = None
|
||||
):
|
||||
includes = _parse_env_list(os.getenv(includes_env_var) or "")
|
||||
excludes = set()
|
||||
if excludes_env_var:
|
||||
excludes = _parse_env_list(os.getenv(excludes_env_var) or "")
|
||||
return AllowList(includes=includes, excludes=excludes)
|
|
@ -0,0 +1,18 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# Semantic Convention still being defined in:
|
||||
# https://github.com/open-telemetry/semantic-conventions/pull/2125
|
||||
GCP_GENAI_OPERATION_CONFIG = "gcp.gen_ai.operation.config"
|
|
@ -0,0 +1,301 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
Optional,
|
||||
Protocol,
|
||||
Sequence,
|
||||
Set,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
Primitive = Union[bool, str, int, float]
|
||||
BoolList = list[bool]
|
||||
StringList = list[str]
|
||||
IntList = list[int]
|
||||
FloatList = list[float]
|
||||
HomogenousPrimitiveList = Union[BoolList, StringList, IntList, FloatList]
|
||||
FlattenedValue = Union[Primitive, HomogenousPrimitiveList]
|
||||
FlattenedDict = Dict[str, FlattenedValue]
|
||||
|
||||
|
||||
class FlattenFunc(Protocol):
|
||||
def __call__(
|
||||
self,
|
||||
key: str,
|
||||
value: Any,
|
||||
exclude_keys: Set[str],
|
||||
rename_keys: Dict[str, str],
|
||||
flatten_functions: Dict[str, "FlattenFunc"],
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
return None
|
||||
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _concat_key(prefix: Optional[str], suffix: str):
|
||||
if not prefix:
|
||||
return suffix
|
||||
return f"{prefix}.{suffix}"
|
||||
|
||||
|
||||
def _is_primitive(v):
|
||||
for t in [str, bool, int, float]:
|
||||
if isinstance(v, t):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _is_homogenous_primitive_list(v):
|
||||
if not isinstance(v, list):
|
||||
return False
|
||||
if len(v) == 0:
|
||||
return True
|
||||
if not _is_primitive(v[0]):
|
||||
return False
|
||||
first_entry_value_type = type(v[0])
|
||||
for entry in v[1:]:
|
||||
if not isinstance(entry, first_entry_value_type):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _get_flatten_func(
|
||||
flatten_functions: Dict[str, FlattenFunc], key_names: set[str]
|
||||
) -> Optional[FlattenFunc]:
|
||||
for key in key_names:
|
||||
flatten_func = flatten_functions.get(key)
|
||||
if flatten_func is not None:
|
||||
return flatten_func
|
||||
return None
|
||||
|
||||
|
||||
def _flatten_with_flatten_func(
|
||||
key: str,
|
||||
value: Any,
|
||||
exclude_keys: Set[str],
|
||||
rename_keys: Dict[str, str],
|
||||
flatten_functions: Dict[str, FlattenFunc],
|
||||
key_names: Set[str],
|
||||
) -> Tuple[bool, Any]:
|
||||
flatten_func = _get_flatten_func(flatten_functions, key_names)
|
||||
if flatten_func is None:
|
||||
return False, value
|
||||
func_output = flatten_func(
|
||||
key,
|
||||
value,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
)
|
||||
if func_output is None:
|
||||
return True, {}
|
||||
if _is_primitive(func_output) or _is_homogenous_primitive_list(
|
||||
func_output
|
||||
):
|
||||
return True, {key: func_output}
|
||||
return False, func_output
|
||||
|
||||
|
||||
def _flatten_compound_value_using_json(
|
||||
key: str,
|
||||
value: Any,
|
||||
exclude_keys: Set[str],
|
||||
rename_keys: Dict[str, str],
|
||||
flatten_functions: Dict[str, FlattenFunc],
|
||||
_from_json=False,
|
||||
) -> FlattenedDict:
|
||||
if _from_json:
|
||||
_logger.debug(
|
||||
"Cannot flatten value with key %s; value: %s", key, value
|
||||
)
|
||||
return {}
|
||||
try:
|
||||
json_string = json.dumps(value)
|
||||
except TypeError:
|
||||
_logger.debug(
|
||||
"Cannot flatten value with key %s; value: %s. Not JSON serializable.",
|
||||
key,
|
||||
value,
|
||||
)
|
||||
return {}
|
||||
json_value = json.loads(json_string)
|
||||
return _flatten_value(
|
||||
key,
|
||||
json_value,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
# Ensure that we don't recurse indefinitely if "json.loads()" somehow returns
|
||||
# a complex, compound object that does not get handled by the "primitive", "list",
|
||||
# or "dict" cases. Prevents falling back on the JSON serialization fallback path.
|
||||
_from_json=True,
|
||||
)
|
||||
|
||||
|
||||
def _flatten_compound_value(
|
||||
key: str,
|
||||
value: Any,
|
||||
exclude_keys: Set[str],
|
||||
rename_keys: Dict[str, str],
|
||||
flatten_functions: Dict[str, FlattenFunc],
|
||||
key_names: Set[str],
|
||||
_from_json=False,
|
||||
) -> FlattenedDict:
|
||||
fully_flattened_with_flatten_func, value = _flatten_with_flatten_func(
|
||||
key=key,
|
||||
value=value,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
key_names=key_names,
|
||||
)
|
||||
if fully_flattened_with_flatten_func:
|
||||
return value
|
||||
if isinstance(value, dict):
|
||||
return _flatten_dict(
|
||||
value,
|
||||
key_prefix=key,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
)
|
||||
if isinstance(value, list):
|
||||
if _is_homogenous_primitive_list(value):
|
||||
return {key: value}
|
||||
return _flatten_list(
|
||||
value,
|
||||
key_prefix=key,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
)
|
||||
if hasattr(value, "model_dump"):
|
||||
return _flatten_dict(
|
||||
value.model_dump(),
|
||||
key_prefix=key,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
)
|
||||
return _flatten_compound_value_using_json(
|
||||
key,
|
||||
value,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
_from_json=_from_json,
|
||||
)
|
||||
|
||||
|
||||
def _flatten_value(
|
||||
key: str,
|
||||
value: Any,
|
||||
exclude_keys: Set[str],
|
||||
rename_keys: Dict[str, str],
|
||||
flatten_functions: Dict[str, FlattenFunc],
|
||||
_from_json=False,
|
||||
) -> FlattenedDict:
|
||||
if value is None:
|
||||
return {}
|
||||
key_names = set([key])
|
||||
renamed_key = rename_keys.get(key)
|
||||
if renamed_key is not None:
|
||||
key_names.add(renamed_key)
|
||||
key = renamed_key
|
||||
if key_names & exclude_keys:
|
||||
return {}
|
||||
if _is_primitive(value):
|
||||
return {key: value}
|
||||
return _flatten_compound_value(
|
||||
key=key,
|
||||
value=value,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
key_names=key_names,
|
||||
_from_json=_from_json,
|
||||
)
|
||||
|
||||
|
||||
def _flatten_dict(
|
||||
d: Dict[str, Any],
|
||||
key_prefix: str,
|
||||
exclude_keys: Set[str],
|
||||
rename_keys: Dict[str, str],
|
||||
flatten_functions: Dict[str, FlattenFunc],
|
||||
) -> FlattenedDict:
|
||||
result = {}
|
||||
for key, value in d.items():
|
||||
if key in exclude_keys:
|
||||
continue
|
||||
full_key = _concat_key(key_prefix, key)
|
||||
flattened = _flatten_value(
|
||||
full_key,
|
||||
value,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
)
|
||||
result.update(flattened)
|
||||
return result
|
||||
|
||||
|
||||
def _flatten_list(
|
||||
lst: list[Any],
|
||||
key_prefix: str,
|
||||
exclude_keys: Set[str],
|
||||
rename_keys: Dict[str, str],
|
||||
flatten_functions: Dict[str, FlattenFunc],
|
||||
) -> FlattenedDict:
|
||||
result = {}
|
||||
result[_concat_key(key_prefix, "length")] = len(lst)
|
||||
for index, value in enumerate(lst):
|
||||
full_key = f"{key_prefix}[{index}]"
|
||||
flattened = _flatten_value(
|
||||
full_key,
|
||||
value,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
)
|
||||
result.update(flattened)
|
||||
return result
|
||||
|
||||
|
||||
def flatten_dict(
|
||||
d: Dict[str, Any],
|
||||
key_prefix: Optional[str] = None,
|
||||
exclude_keys: Optional[Sequence[str]] = None,
|
||||
rename_keys: Optional[Dict[str, str]] = None,
|
||||
flatten_functions: Optional[Dict[str, FlattenFunc]] = None,
|
||||
):
|
||||
key_prefix = key_prefix or ""
|
||||
exclude_keys = set(exclude_keys or [])
|
||||
rename_keys = rename_keys or {}
|
||||
flatten_functions = flatten_functions or {}
|
||||
return _flatten_dict(
|
||||
d,
|
||||
key_prefix=key_prefix,
|
||||
exclude_keys=exclude_keys,
|
||||
rename_keys=rename_keys,
|
||||
flatten_functions=flatten_functions,
|
||||
)
|
|
@ -12,6 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import copy
|
||||
import functools
|
||||
import json
|
||||
import logging
|
||||
|
@ -28,6 +29,7 @@ from google.genai.types import (
|
|||
ContentListUnionDict,
|
||||
ContentUnion,
|
||||
ContentUnionDict,
|
||||
GenerateContentConfig,
|
||||
GenerateContentConfigOrDict,
|
||||
GenerateContentResponse,
|
||||
)
|
||||
|
@ -39,8 +41,12 @@ from opentelemetry.semconv._incubating.attributes import (
|
|||
)
|
||||
from opentelemetry.semconv.attributes import error_attributes
|
||||
|
||||
from .allowlist_util import AllowList
|
||||
from .custom_semconv import GCP_GENAI_OPERATION_CONFIG
|
||||
from .dict_util import flatten_dict
|
||||
from .flags import is_content_recording_enabled
|
||||
from .otel_wrapper import OTelWrapper
|
||||
from .tool_call_wrapper import wrapped as wrapped_tool
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -129,21 +135,65 @@ def _determine_genai_system(models_object: Union[Models, AsyncModels]):
|
|||
return _get_gemini_system_name()
|
||||
|
||||
|
||||
def _get_config_property(
|
||||
config: Optional[GenerateContentConfigOrDict], path: str
|
||||
) -> Any:
|
||||
def _to_dict(value: object):
|
||||
if isinstance(value, dict):
|
||||
return value
|
||||
if hasattr(value, "model_dump"):
|
||||
return value.model_dump()
|
||||
return json.loads(json.dumps(value))
|
||||
|
||||
|
||||
def _add_request_options_to_span(
|
||||
span, config: Optional[GenerateContentConfigOrDict], allow_list: AllowList
|
||||
):
|
||||
if config is None:
|
||||
return None
|
||||
path_segments = path.split(".")
|
||||
current_context: Any = config
|
||||
for path_segment in path_segments:
|
||||
if current_context is None:
|
||||
return None
|
||||
if isinstance(current_context, dict):
|
||||
current_context = current_context.get(path_segment)
|
||||
else:
|
||||
current_context = getattr(current_context, path_segment)
|
||||
return current_context
|
||||
return
|
||||
span_context = span.get_span_context()
|
||||
if not span_context.trace_flags.sampled:
|
||||
# Avoid potentially costly traversal of config
|
||||
# options if the span will be dropped, anyway.
|
||||
return
|
||||
# Automatically derive attributes from the contents of the
|
||||
# config object. This ensures that all relevant parameters
|
||||
# are captured in the telemetry data (except for those
|
||||
# that are excluded via "exclude_keys"). Dynamic attributes (those
|
||||
# starting with "gcp.gen_ai." instead of simply "gen_ai.request.")
|
||||
# are filtered with the "allow_list" before inclusion in the span.
|
||||
attributes = flatten_dict(
|
||||
_to_dict(config),
|
||||
# A custom prefix is used, because the names/structure of the
|
||||
# configuration is likely to be specific to Google Gen AI SDK.
|
||||
key_prefix=GCP_GENAI_OPERATION_CONFIG,
|
||||
exclude_keys=[
|
||||
# System instruction can be overly long for a span attribute.
|
||||
# Additionally, it is recorded as an event (log), instead.
|
||||
"gcp.gen_ai.operation.config.system_instruction",
|
||||
],
|
||||
# Although a custom prefix is used by default, some of the attributes
|
||||
# are captured in common, standard, Semantic Conventions. For the
|
||||
# well-known properties whose values align with Semantic Conventions,
|
||||
# we ensure that the key name matches the standard SemConv name.
|
||||
rename_keys={
|
||||
# TODO: add more entries here as more semantic conventions are
|
||||
# generalized to cover more of the available config options.
|
||||
"gcp.gen_ai.operation.config.temperature": gen_ai_attributes.GEN_AI_REQUEST_TEMPERATURE,
|
||||
"gcp.gen_ai.operation.config.top_k": gen_ai_attributes.GEN_AI_REQUEST_TOP_K,
|
||||
"gcp.gen_ai.operation.config.top_p": gen_ai_attributes.GEN_AI_REQUEST_TOP_P,
|
||||
"gcp.gen_ai.operation.config.candidate_count": gen_ai_attributes.GEN_AI_REQUEST_CHOICE_COUNT,
|
||||
"gcp.gen_ai.operation.config.max_output_tokens": gen_ai_attributes.GEN_AI_REQUEST_MAX_TOKENS,
|
||||
"gcp.gen_ai.operation.config.stop_sequences": gen_ai_attributes.GEN_AI_REQUEST_STOP_SEQUENCES,
|
||||
"gcp.gen_ai.operation.config.frequency_penalty": gen_ai_attributes.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
||||
"gcp.gen_ai.operation.config.presence_penalty": gen_ai_attributes.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
||||
"gcp.gen_ai.operation.config.seed": gen_ai_attributes.GEN_AI_REQUEST_SEED,
|
||||
},
|
||||
)
|
||||
for key, value in attributes.items():
|
||||
if key.startswith(
|
||||
GCP_GENAI_OPERATION_CONFIG
|
||||
) and not allow_list.allowed(key):
|
||||
# The allowlist is used to control inclusion of the dynamic keys.
|
||||
continue
|
||||
span.set_attribute(key, value)
|
||||
|
||||
|
||||
def _get_response_property(response: GenerateContentResponse, path: str):
|
||||
|
@ -159,42 +209,27 @@ def _get_response_property(response: GenerateContentResponse, path: str):
|
|||
return current_context
|
||||
|
||||
|
||||
def _get_temperature(config: Optional[GenerateContentConfigOrDict]):
|
||||
return _get_config_property(config, "temperature")
|
||||
def _coerce_config_to_object(
|
||||
config: GenerateContentConfigOrDict,
|
||||
) -> GenerateContentConfig:
|
||||
if isinstance(config, GenerateContentConfig):
|
||||
return config
|
||||
# Input must be a dictionary; convert by invoking the constructor.
|
||||
return GenerateContentConfig(**config)
|
||||
|
||||
|
||||
def _get_top_k(config: Optional[GenerateContentConfigOrDict]):
|
||||
return _get_config_property(config, "top_k")
|
||||
|
||||
|
||||
def _get_top_p(config: Optional[GenerateContentConfigOrDict]):
|
||||
return _get_config_property(config, "top_p")
|
||||
|
||||
|
||||
# A map from define attributes to the function that can obtain
|
||||
# the relevant information from the request object.
|
||||
#
|
||||
# TODO: expand this to cover a larger set of the available
|
||||
# span attributes from GenAI semantic conventions.
|
||||
#
|
||||
# TODO: define semantic conventions for attributes that
|
||||
# are relevant for the Google GenAI SDK which are not
|
||||
# currently covered by the existing semantic conventions.
|
||||
#
|
||||
# See also: TODOS.md
|
||||
_SPAN_ATTRIBUTE_TO_CONFIG_EXTRACTOR = {
|
||||
gen_ai_attributes.GEN_AI_REQUEST_TEMPERATURE: _get_temperature,
|
||||
gen_ai_attributes.GEN_AI_REQUEST_TOP_K: _get_top_k,
|
||||
gen_ai_attributes.GEN_AI_REQUEST_TOP_P: _get_top_p,
|
||||
}
|
||||
|
||||
|
||||
def _to_dict(value: object):
|
||||
if isinstance(value, dict):
|
||||
return value
|
||||
if hasattr(value, "model_dump"):
|
||||
return value.model_dump()
|
||||
return json.loads(json.dumps(value))
|
||||
def _wrapped_config_with_tools(
|
||||
otel_wrapper: OTelWrapper,
|
||||
config: GenerateContentConfig,
|
||||
**kwargs,
|
||||
):
|
||||
if not config.tools:
|
||||
return config
|
||||
result = copy.copy(config)
|
||||
result.tools = [
|
||||
wrapped_tool(tool, otel_wrapper, **kwargs) for tool in config.tools
|
||||
]
|
||||
return result
|
||||
|
||||
|
||||
class _GenerateContentInstrumentationHelper:
|
||||
|
@ -203,6 +238,7 @@ class _GenerateContentInstrumentationHelper:
|
|||
models_object: Union[Models, AsyncModels],
|
||||
otel_wrapper: OTelWrapper,
|
||||
model: str,
|
||||
generate_content_config_key_allowlist: Optional[AllowList] = None,
|
||||
):
|
||||
self._start_time = time.time_ns()
|
||||
self._otel_wrapper = otel_wrapper
|
||||
|
@ -215,6 +251,20 @@ class _GenerateContentInstrumentationHelper:
|
|||
self._content_recording_enabled = is_content_recording_enabled()
|
||||
self._response_index = 0
|
||||
self._candidate_index = 0
|
||||
self._generate_content_config_key_allowlist = (
|
||||
generate_content_config_key_allowlist or AllowList()
|
||||
)
|
||||
|
||||
def wrapped_config(
|
||||
self, config: Optional[GenerateContentConfigOrDict]
|
||||
) -> Optional[GenerateContentConfig]:
|
||||
if config is None:
|
||||
return None
|
||||
return _wrapped_config_with_tools(
|
||||
self._otel_wrapper,
|
||||
_coerce_config_to_object(config),
|
||||
extra_span_attributes={"gen_ai.system": self._genai_system},
|
||||
)
|
||||
|
||||
def start_span_as_current_span(
|
||||
self, model_name, function_name, end_on_exit=True
|
||||
|
@ -237,13 +287,9 @@ class _GenerateContentInstrumentationHelper:
|
|||
config: Optional[GenerateContentConfigOrDict],
|
||||
):
|
||||
span = trace.get_current_span()
|
||||
for (
|
||||
attribute_key,
|
||||
extractor,
|
||||
) in _SPAN_ATTRIBUTE_TO_CONFIG_EXTRACTOR.items():
|
||||
attribute_value = extractor(config)
|
||||
if attribute_value is not None:
|
||||
span.set_attribute(attribute_key, attribute_value)
|
||||
_add_request_options_to_span(
|
||||
span, config, self._generate_content_config_key_allowlist
|
||||
)
|
||||
self._maybe_log_system_instruction(config=config)
|
||||
self._maybe_log_user_prompt(contents)
|
||||
|
||||
|
@ -252,6 +298,7 @@ class _GenerateContentInstrumentationHelper:
|
|||
# need to be reflected back into the span attributes.
|
||||
#
|
||||
# See also: TODOS.md.
|
||||
self._update_finish_reasons(response)
|
||||
self._maybe_update_token_counts(response)
|
||||
self._maybe_update_error_type(response)
|
||||
self._maybe_log_response(response)
|
||||
|
@ -275,6 +322,18 @@ class _GenerateContentInstrumentationHelper:
|
|||
self._record_token_usage_metric()
|
||||
self._record_duration_metric()
|
||||
|
||||
def _update_finish_reasons(self, response):
|
||||
if not response.candidates:
|
||||
return
|
||||
for candidate in response.candidates:
|
||||
finish_reason = candidate.finish_reason
|
||||
if finish_reason is None:
|
||||
continue
|
||||
finish_reason_str = finish_reason.name.lower().removeprefix(
|
||||
"finish_reason_"
|
||||
)
|
||||
self._finish_reasons_set.add(finish_reason_str)
|
||||
|
||||
def _maybe_update_token_counts(self, response: GenerateContentResponse):
|
||||
input_tokens = _get_response_property(
|
||||
response, "usage_metadata.prompt_token_count"
|
||||
|
@ -317,7 +376,12 @@ class _GenerateContentInstrumentationHelper:
|
|||
def _maybe_log_system_instruction(
|
||||
self, config: Optional[GenerateContentConfigOrDict] = None
|
||||
):
|
||||
system_instruction = _get_config_property(config, "system_instruction")
|
||||
system_instruction = None
|
||||
if config is not None:
|
||||
if isinstance(config, dict):
|
||||
system_instruction = config.get("system_instruction")
|
||||
else:
|
||||
system_instruction = config.system_instruction
|
||||
if not system_instruction:
|
||||
return
|
||||
attributes = {
|
||||
|
@ -499,7 +563,9 @@ class _GenerateContentInstrumentationHelper:
|
|||
|
||||
|
||||
def _create_instrumented_generate_content(
|
||||
snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper
|
||||
snapshot: _MethodsSnapshot,
|
||||
otel_wrapper: OTelWrapper,
|
||||
generate_content_config_key_allowlist: Optional[AllowList] = None,
|
||||
):
|
||||
wrapped_func = snapshot.generate_content
|
||||
|
||||
|
@ -513,7 +579,10 @@ def _create_instrumented_generate_content(
|
|||
**kwargs: Any,
|
||||
) -> GenerateContentResponse:
|
||||
helper = _GenerateContentInstrumentationHelper(
|
||||
self, otel_wrapper, model
|
||||
self,
|
||||
otel_wrapper,
|
||||
model,
|
||||
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
|
||||
)
|
||||
with helper.start_span_as_current_span(
|
||||
model, "google.genai.Models.generate_content"
|
||||
|
@ -524,7 +593,7 @@ def _create_instrumented_generate_content(
|
|||
self,
|
||||
model=model,
|
||||
contents=contents,
|
||||
config=config,
|
||||
config=helper.wrapped_config(config),
|
||||
**kwargs,
|
||||
)
|
||||
helper.process_response(response)
|
||||
|
@ -539,7 +608,9 @@ def _create_instrumented_generate_content(
|
|||
|
||||
|
||||
def _create_instrumented_generate_content_stream(
|
||||
snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper
|
||||
snapshot: _MethodsSnapshot,
|
||||
otel_wrapper: OTelWrapper,
|
||||
generate_content_config_key_allowlist: Optional[AllowList] = None,
|
||||
):
|
||||
wrapped_func = snapshot.generate_content_stream
|
||||
|
||||
|
@ -553,7 +624,10 @@ def _create_instrumented_generate_content_stream(
|
|||
**kwargs: Any,
|
||||
) -> Iterator[GenerateContentResponse]:
|
||||
helper = _GenerateContentInstrumentationHelper(
|
||||
self, otel_wrapper, model
|
||||
self,
|
||||
otel_wrapper,
|
||||
model,
|
||||
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
|
||||
)
|
||||
with helper.start_span_as_current_span(
|
||||
model, "google.genai.Models.generate_content_stream"
|
||||
|
@ -564,7 +638,7 @@ def _create_instrumented_generate_content_stream(
|
|||
self,
|
||||
model=model,
|
||||
contents=contents,
|
||||
config=config,
|
||||
config=helper.wrapped_config(config),
|
||||
**kwargs,
|
||||
):
|
||||
helper.process_response(response)
|
||||
|
@ -579,7 +653,9 @@ def _create_instrumented_generate_content_stream(
|
|||
|
||||
|
||||
def _create_instrumented_async_generate_content(
|
||||
snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper
|
||||
snapshot: _MethodsSnapshot,
|
||||
otel_wrapper: OTelWrapper,
|
||||
generate_content_config_key_allowlist: Optional[AllowList] = None,
|
||||
):
|
||||
wrapped_func = snapshot.async_generate_content
|
||||
|
||||
|
@ -593,7 +669,10 @@ def _create_instrumented_async_generate_content(
|
|||
**kwargs: Any,
|
||||
) -> GenerateContentResponse:
|
||||
helper = _GenerateContentInstrumentationHelper(
|
||||
self, otel_wrapper, model
|
||||
self,
|
||||
otel_wrapper,
|
||||
model,
|
||||
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
|
||||
)
|
||||
with helper.start_span_as_current_span(
|
||||
model, "google.genai.AsyncModels.generate_content"
|
||||
|
@ -604,7 +683,7 @@ def _create_instrumented_async_generate_content(
|
|||
self,
|
||||
model=model,
|
||||
contents=contents,
|
||||
config=config,
|
||||
config=helper.wrapped_config(config),
|
||||
**kwargs,
|
||||
)
|
||||
helper.process_response(response)
|
||||
|
@ -619,8 +698,10 @@ def _create_instrumented_async_generate_content(
|
|||
|
||||
|
||||
# Disabling type checking because this is not yet implemented and tested fully.
|
||||
def _create_instrumented_async_generate_content_stream( # pyright: ignore
|
||||
snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper
|
||||
def _create_instrumented_async_generate_content_stream( # type: ignore
|
||||
snapshot: _MethodsSnapshot,
|
||||
otel_wrapper: OTelWrapper,
|
||||
generate_content_config_key_allowlist: Optional[AllowList] = None,
|
||||
):
|
||||
wrapped_func = snapshot.async_generate_content_stream
|
||||
|
||||
|
@ -632,9 +713,12 @@ def _create_instrumented_async_generate_content_stream( # pyright: ignore
|
|||
contents: Union[ContentListUnion, ContentListUnionDict],
|
||||
config: Optional[GenerateContentConfigOrDict] = None,
|
||||
**kwargs: Any,
|
||||
) -> Awaitable[AsyncIterator[GenerateContentResponse]]: # pyright: ignore
|
||||
) -> Awaitable[AsyncIterator[GenerateContentResponse]]: # type: ignore
|
||||
helper = _GenerateContentInstrumentationHelper(
|
||||
self, otel_wrapper, model
|
||||
self,
|
||||
otel_wrapper,
|
||||
model,
|
||||
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
|
||||
)
|
||||
with helper.start_span_as_current_span(
|
||||
model,
|
||||
|
@ -647,7 +731,7 @@ def _create_instrumented_async_generate_content_stream( # pyright: ignore
|
|||
self,
|
||||
model=model,
|
||||
contents=contents,
|
||||
config=config,
|
||||
config=helper.wrapped_config(config),
|
||||
**kwargs,
|
||||
)
|
||||
except Exception as error: # pylint: disable=broad-exception-caught
|
||||
|
@ -678,20 +762,29 @@ def uninstrument_generate_content(snapshot: object):
|
|||
snapshot.restore()
|
||||
|
||||
|
||||
def instrument_generate_content(otel_wrapper: OTelWrapper) -> object:
|
||||
def instrument_generate_content(
|
||||
otel_wrapper: OTelWrapper,
|
||||
generate_content_config_key_allowlist: Optional[AllowList] = None,
|
||||
) -> object:
|
||||
snapshot = _MethodsSnapshot()
|
||||
Models.generate_content = _create_instrumented_generate_content(
|
||||
snapshot, otel_wrapper
|
||||
snapshot,
|
||||
otel_wrapper,
|
||||
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
|
||||
)
|
||||
Models.generate_content_stream = (
|
||||
_create_instrumented_generate_content_stream(snapshot, otel_wrapper)
|
||||
Models.generate_content_stream = _create_instrumented_generate_content_stream(
|
||||
snapshot,
|
||||
otel_wrapper,
|
||||
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
|
||||
)
|
||||
AsyncModels.generate_content = _create_instrumented_async_generate_content(
|
||||
snapshot, otel_wrapper
|
||||
snapshot,
|
||||
otel_wrapper,
|
||||
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
|
||||
)
|
||||
AsyncModels.generate_content_stream = (
|
||||
_create_instrumented_async_generate_content_stream(
|
||||
snapshot, otel_wrapper
|
||||
)
|
||||
AsyncModels.generate_content_stream = _create_instrumented_async_generate_content_stream(
|
||||
snapshot,
|
||||
otel_wrapper,
|
||||
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
|
||||
)
|
||||
return snapshot
|
||||
|
|
|
@ -12,13 +12,14 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Any, Collection
|
||||
from typing import Any, Collection, Optional
|
||||
|
||||
from opentelemetry._events import get_event_logger_provider
|
||||
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
||||
from opentelemetry.metrics import get_meter_provider
|
||||
from opentelemetry.trace import get_tracer_provider
|
||||
|
||||
from .allowlist_util import AllowList
|
||||
from .generate_content import (
|
||||
instrument_generate_content,
|
||||
uninstrument_generate_content,
|
||||
|
@ -27,8 +28,17 @@ from .otel_wrapper import OTelWrapper
|
|||
|
||||
|
||||
class GoogleGenAiSdkInstrumentor(BaseInstrumentor):
|
||||
def __init__(self):
|
||||
def __init__(
|
||||
self, generate_content_config_key_allowlist: Optional[AllowList] = None
|
||||
):
|
||||
self._generate_content_snapshot = None
|
||||
self._generate_content_config_key_allowlist = (
|
||||
generate_content_config_key_allowlist
|
||||
or AllowList.from_env(
|
||||
"OTEL_GOOGLE_GENAI_GENERATE_CONTENT_CONFIG_INCLUDES",
|
||||
excludes_env_var="OTEL_GOOGLE_GENAI_GENERATE_CONTENT_CONFIG_EXCLUDES",
|
||||
)
|
||||
)
|
||||
|
||||
# Inherited, abstract function from 'BaseInstrumentor'. Even though 'self' is
|
||||
# not used in the definition, a method is required per the API contract.
|
||||
|
@ -49,7 +59,8 @@ class GoogleGenAiSdkInstrumentor(BaseInstrumentor):
|
|||
meter_provider=meter_provider,
|
||||
)
|
||||
self._generate_content_snapshot = instrument_generate_content(
|
||||
otel_wrapper
|
||||
otel_wrapper,
|
||||
generate_content_config_key_allowlist=self._generate_content_config_key_allowlist,
|
||||
)
|
||||
|
||||
def _uninstrument(self, **kwargs: Any):
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
_instruments = ("google-genai >= 1.0.0",)
|
|
@ -0,0 +1,220 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import json
|
||||
from typing import Any, Callable, Optional, Union
|
||||
|
||||
from google.genai.types import (
|
||||
ToolListUnion,
|
||||
ToolListUnionDict,
|
||||
ToolOrDict,
|
||||
)
|
||||
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.semconv._incubating.attributes import (
|
||||
code_attributes,
|
||||
)
|
||||
|
||||
from .flags import is_content_recording_enabled
|
||||
from .otel_wrapper import OTelWrapper
|
||||
|
||||
ToolFunction = Callable[..., Any]
|
||||
|
||||
|
||||
def _is_primitive(value):
|
||||
return isinstance(value, (str, int, bool, float))
|
||||
|
||||
|
||||
def _to_otel_value(python_value):
|
||||
"""Coerces parameters to something representable with Open Telemetry."""
|
||||
if python_value is None or _is_primitive(python_value):
|
||||
return python_value
|
||||
if isinstance(python_value, list):
|
||||
return [_to_otel_value(x) for x in python_value]
|
||||
if isinstance(python_value, dict):
|
||||
return {
|
||||
key: _to_otel_value(val) for (key, val) in python_value.items()
|
||||
}
|
||||
if hasattr(python_value, "model_dump"):
|
||||
return python_value.model_dump()
|
||||
if hasattr(python_value, "__dict__"):
|
||||
return _to_otel_value(python_value.__dict__)
|
||||
return repr(python_value)
|
||||
|
||||
|
||||
def _is_homogenous_primitive_list(value):
|
||||
if not isinstance(value, list):
|
||||
return False
|
||||
if not value:
|
||||
return True
|
||||
if not _is_primitive(value[0]):
|
||||
return False
|
||||
first_type = type(value[0])
|
||||
for entry in value[1:]:
|
||||
if not isinstance(entry, first_type):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _to_otel_attribute(python_value):
|
||||
otel_value = _to_otel_value(python_value)
|
||||
if _is_primitive(otel_value) or _is_homogenous_primitive_list(otel_value):
|
||||
return otel_value
|
||||
return json.dumps(otel_value)
|
||||
|
||||
|
||||
def _create_function_span_name(wrapped_function):
|
||||
"""Constructs the span name for a given local function tool call."""
|
||||
function_name = wrapped_function.__name__
|
||||
return f"execute_tool {function_name}"
|
||||
|
||||
|
||||
def _create_function_span_attributes(
|
||||
wrapped_function, function_args, function_kwargs, extra_span_attributes
|
||||
):
|
||||
"""Creates the attributes for a tool call function span."""
|
||||
result = {}
|
||||
if extra_span_attributes:
|
||||
result.update(extra_span_attributes)
|
||||
result["gen_ai.operation.name"] = "execute_tool"
|
||||
result["gen_ai.tool.name"] = wrapped_function.__name__
|
||||
if wrapped_function.__doc__:
|
||||
result["gen_ai.tool.description"] = wrapped_function.__doc__
|
||||
result[code_attributes.CODE_FUNCTION_NAME] = wrapped_function.__name__
|
||||
result["code.module"] = wrapped_function.__module__
|
||||
result["code.args.positional.count"] = len(function_args)
|
||||
result["code.args.keyword.count"] = len(function_kwargs)
|
||||
return result
|
||||
|
||||
|
||||
def _record_function_call_argument(
|
||||
span, param_name, param_value, include_values
|
||||
):
|
||||
attribute_prefix = f"code.function.parameters.{param_name}"
|
||||
type_attribute = f"{attribute_prefix}.type"
|
||||
span.set_attribute(type_attribute, type(param_value).__name__)
|
||||
if include_values:
|
||||
value_attribute = f"{attribute_prefix}.value"
|
||||
span.set_attribute(value_attribute, _to_otel_attribute(param_value))
|
||||
|
||||
|
||||
def _record_function_call_arguments(
|
||||
otel_wrapper, wrapped_function, function_args, function_kwargs
|
||||
):
|
||||
"""Records the details about a function invocation as span attributes."""
|
||||
include_values = is_content_recording_enabled()
|
||||
span = trace.get_current_span()
|
||||
signature = inspect.signature(wrapped_function)
|
||||
params = list(signature.parameters.values())
|
||||
for index, entry in enumerate(function_args):
|
||||
param_name = f"args[{index}]"
|
||||
if index < len(params):
|
||||
param_name = params[index].name
|
||||
_record_function_call_argument(span, param_name, entry, include_values)
|
||||
for key, value in function_kwargs.items():
|
||||
_record_function_call_argument(span, key, value, include_values)
|
||||
|
||||
|
||||
def _record_function_call_result(otel_wrapper, wrapped_function, result):
|
||||
"""Records the details about a function result as span attributes."""
|
||||
include_values = is_content_recording_enabled()
|
||||
span = trace.get_current_span()
|
||||
span.set_attribute("code.function.return.type", type(result).__name__)
|
||||
if include_values:
|
||||
span.set_attribute(
|
||||
"code.function.return.value", _to_otel_attribute(result)
|
||||
)
|
||||
|
||||
|
||||
def _wrap_sync_tool_function(
|
||||
tool_function: ToolFunction,
|
||||
otel_wrapper: OTelWrapper,
|
||||
extra_span_attributes: Optional[dict[str, str]] = None,
|
||||
**unused_kwargs,
|
||||
):
|
||||
@functools.wraps(tool_function)
|
||||
def wrapped_function(*args, **kwargs):
|
||||
span_name = _create_function_span_name(tool_function)
|
||||
attributes = _create_function_span_attributes(
|
||||
tool_function, args, kwargs, extra_span_attributes
|
||||
)
|
||||
with otel_wrapper.start_as_current_span(
|
||||
span_name, attributes=attributes
|
||||
):
|
||||
_record_function_call_arguments(
|
||||
otel_wrapper, tool_function, args, kwargs
|
||||
)
|
||||
result = tool_function(*args, **kwargs)
|
||||
_record_function_call_result(otel_wrapper, tool_function, result)
|
||||
return result
|
||||
|
||||
return wrapped_function
|
||||
|
||||
|
||||
def _wrap_async_tool_function(
|
||||
tool_function: ToolFunction,
|
||||
otel_wrapper: OTelWrapper,
|
||||
extra_span_attributes: Optional[dict[str, str]] = None,
|
||||
**unused_kwargs,
|
||||
):
|
||||
@functools.wraps(tool_function)
|
||||
async def wrapped_function(*args, **kwargs):
|
||||
span_name = _create_function_span_name(tool_function)
|
||||
attributes = _create_function_span_attributes(
|
||||
tool_function, args, kwargs, extra_span_attributes
|
||||
)
|
||||
with otel_wrapper.start_as_current_span(
|
||||
span_name, attributes=attributes
|
||||
):
|
||||
_record_function_call_arguments(
|
||||
otel_wrapper, tool_function, args, kwargs
|
||||
)
|
||||
result = await tool_function(*args, **kwargs)
|
||||
_record_function_call_result(otel_wrapper, tool_function, result)
|
||||
return result
|
||||
|
||||
return wrapped_function
|
||||
|
||||
|
||||
def _wrap_tool_function(
|
||||
tool_function: ToolFunction, otel_wrapper: OTelWrapper, **kwargs
|
||||
):
|
||||
if inspect.iscoroutinefunction(tool_function):
|
||||
return _wrap_async_tool_function(tool_function, otel_wrapper, **kwargs)
|
||||
return _wrap_sync_tool_function(tool_function, otel_wrapper, **kwargs)
|
||||
|
||||
|
||||
def wrapped(
|
||||
tool_or_tools: Optional[
|
||||
Union[ToolFunction, ToolOrDict, ToolListUnion, ToolListUnionDict]
|
||||
],
|
||||
otel_wrapper: OTelWrapper,
|
||||
**kwargs,
|
||||
):
|
||||
if tool_or_tools is None:
|
||||
return None
|
||||
if isinstance(tool_or_tools, list):
|
||||
return [
|
||||
wrapped(item, otel_wrapper, **kwargs) for item in tool_or_tools
|
||||
]
|
||||
if isinstance(tool_or_tools, dict):
|
||||
return {
|
||||
key: wrapped(value, otel_wrapper, **kwargs)
|
||||
for (key, value) in tool_or_tools.items()
|
||||
}
|
||||
if callable(tool_or_tools):
|
||||
return _wrap_tool_function(tool_or_tools, otel_wrapper, **kwargs)
|
||||
return tool_or_tools
|
|
@ -17,4 +17,4 @@
|
|||
# This version should stay below "1.0" until the fundamentals
|
||||
# in "TODOS.md" have been addressed. Please revisit the TODOs
|
||||
# listed there before bumping to a stable version.
|
||||
__version__ = "0.2b0.dev"
|
||||
__version__ = "0.4b0.dev"
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import google.auth.credentials
|
||||
|
||||
|
||||
class FakeCredentials(google.auth.credentials.AnonymousCredentials):
|
||||
def refresh(self, request):
|
||||
pass
|
|
@ -17,44 +17,39 @@ import unittest
|
|||
|
||||
import google.genai
|
||||
|
||||
from .auth import FakeCredentials
|
||||
from .instrumentation_context import InstrumentationContext
|
||||
from .otel_mocker import OTelMocker
|
||||
from .requests_mocker import RequestsMocker
|
||||
|
||||
|
||||
class _FakeCredentials(google.auth.credentials.AnonymousCredentials):
|
||||
def refresh(self, request):
|
||||
pass
|
||||
|
||||
|
||||
class TestCase(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self._otel = OTelMocker()
|
||||
self._otel.install()
|
||||
self._requests = RequestsMocker()
|
||||
self._requests.install()
|
||||
self._instrumentation_context = None
|
||||
self._api_key = "test-api-key"
|
||||
self._project = "test-project"
|
||||
self._location = "test-location"
|
||||
self._client = None
|
||||
self._uses_vertex = False
|
||||
self._credentials = _FakeCredentials()
|
||||
self._credentials = FakeCredentials()
|
||||
self._instrumentor_args = {}
|
||||
|
||||
def _lazy_init(self):
|
||||
self._instrumentation_context = InstrumentationContext()
|
||||
self._instrumentation_context = InstrumentationContext(
|
||||
**self._instrumentor_args
|
||||
)
|
||||
self._instrumentation_context.install()
|
||||
|
||||
def set_instrumentor_constructor_kwarg(self, key, value):
|
||||
self._instrumentor_args[key] = value
|
||||
|
||||
@property
|
||||
def client(self):
|
||||
if self._client is None:
|
||||
self._client = self._create_client()
|
||||
return self._client
|
||||
|
||||
@property
|
||||
def requests(self):
|
||||
return self._requests
|
||||
|
||||
@property
|
||||
def otel(self):
|
||||
return self._otel
|
||||
|
@ -62,6 +57,15 @@ class TestCase(unittest.TestCase):
|
|||
def set_use_vertex(self, use_vertex):
|
||||
self._uses_vertex = use_vertex
|
||||
|
||||
def reset_client(self):
|
||||
self._client = None
|
||||
|
||||
def reset_instrumentation(self):
|
||||
if self._instrumentation_context is None:
|
||||
return
|
||||
self._instrumentation_context.uninstall()
|
||||
self._instrumentation_context = None
|
||||
|
||||
def _create_client(self):
|
||||
self._lazy_init()
|
||||
if self._uses_vertex:
|
||||
|
@ -72,10 +76,9 @@ class TestCase(unittest.TestCase):
|
|||
location=self._location,
|
||||
credentials=self._credentials,
|
||||
)
|
||||
return google.genai.Client(api_key=self._api_key)
|
||||
return google.genai.Client(vertexai=False, api_key=self._api_key)
|
||||
|
||||
def tearDown(self):
|
||||
if self._instrumentation_context is not None:
|
||||
self._instrumentation_context.uninstall()
|
||||
self._requests.uninstall()
|
||||
self._otel.uninstall()
|
||||
|
|
|
@ -18,8 +18,8 @@ from opentelemetry.instrumentation.google_genai import (
|
|||
|
||||
|
||||
class InstrumentationContext:
|
||||
def __init__(self):
|
||||
self._instrumentor = GoogleGenAiSdkInstrumentor()
|
||||
def __init__(self, **kwargs):
|
||||
self._instrumentor = GoogleGenAiSdkInstrumentor(**kwargs)
|
||||
|
||||
def install(self):
|
||||
self._instrumentor.instrument()
|
||||
|
|
|
@ -170,6 +170,10 @@ class OTelMocker:
|
|||
span is not None
|
||||
), f'Could not find span named "{name}"; finished spans: {finished_spans}'
|
||||
|
||||
def assert_does_not_have_span_named(self, name):
|
||||
span = self.get_span_named(name)
|
||||
assert span is None, f"Found unexpected span named {name}"
|
||||
|
||||
def get_event_named(self, event_name):
|
||||
for event in self.get_finished_logs():
|
||||
event_name_attr = event.attributes.get("event.name")
|
||||
|
|
|
@ -1,238 +0,0 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This file defines a "RequestMocker" that facilities mocking the "requests"
|
||||
# API. There are a few reasons that we use this approach to testing:
|
||||
#
|
||||
# 1. Security - although "vcrpy" provides a means of filtering data,
|
||||
# it can be error-prone; use of this solution risks exposing API keys,
|
||||
# auth tokens, etc. It can also inadvertently record fields that are
|
||||
# visibility-restricted (such as fields that are returned and available
|
||||
# when recording using privileged API keys where such fields would not
|
||||
# ordinarily be returned to users with non-privileged API keys).
|
||||
#
|
||||
# 2. Reproducibility - although the tests may be reproducible once the
|
||||
# recording is present, updating the recording often has external
|
||||
# dependencies that may be difficult to reproduce.
|
||||
#
|
||||
# 3. Costs - there are both time costs and monetary costs to the external
|
||||
# dependencies required for a record/replay solution.
|
||||
#
|
||||
# Because they APIs that need to be mocked are simple enough and well documented
|
||||
# enough, it seems approachable to mock the requests library, instead.
|
||||
|
||||
import copy
|
||||
import functools
|
||||
import http.client
|
||||
import io
|
||||
import json
|
||||
from typing import Optional
|
||||
|
||||
import requests
|
||||
import requests.sessions
|
||||
|
||||
|
||||
class RequestsCallArgs:
|
||||
def __init__(
|
||||
self,
|
||||
session: requests.sessions.Session,
|
||||
request: requests.PreparedRequest,
|
||||
**kwargs,
|
||||
):
|
||||
self._session = session
|
||||
self._request = request
|
||||
self._kwargs = kwargs
|
||||
|
||||
@property
|
||||
def session(self):
|
||||
return self._session
|
||||
|
||||
@property
|
||||
def request(self):
|
||||
return self._request
|
||||
|
||||
@property
|
||||
def kwargs(self):
|
||||
return self._kwargs
|
||||
|
||||
|
||||
class RequestsCall:
|
||||
def __init__(self, args: RequestsCallArgs, response_generator):
|
||||
self._args = args
|
||||
self._response_generator = response_generator
|
||||
|
||||
@property
|
||||
def args(self):
|
||||
return self._args
|
||||
|
||||
@property
|
||||
def response(self):
|
||||
return self._response_generator(self._args)
|
||||
|
||||
|
||||
def _return_error_status(
|
||||
args: RequestsCallArgs, status_code: int, reason: Optional[str] = None
|
||||
):
|
||||
result = requests.Response()
|
||||
result.url = args.request.url
|
||||
result.status_code = status_code
|
||||
result.reason = reason or http.client.responses.get(status_code)
|
||||
result.request = args.request
|
||||
return result
|
||||
|
||||
|
||||
def _return_404(args: RequestsCallArgs):
|
||||
return _return_error_status(args, 404, "Not Found")
|
||||
|
||||
|
||||
def _to_response_generator(response):
|
||||
if response is None:
|
||||
raise ValueError("response must not be None")
|
||||
if isinstance(response, int):
|
||||
return lambda args: _return_error_status(args, response)
|
||||
if isinstance(response, requests.Response):
|
||||
|
||||
def generate_response_from_response(args):
|
||||
new_response = copy.deepcopy(response)
|
||||
new_response.request = args.request
|
||||
new_response.url = args.request.url
|
||||
return new_response
|
||||
|
||||
return generate_response_from_response
|
||||
if isinstance(response, dict):
|
||||
|
||||
def generate_response_from_dict(args):
|
||||
result = requests.Response()
|
||||
result.status_code = 200
|
||||
result.headers["content-type"] = "application/json"
|
||||
result.encoding = "utf-8"
|
||||
result.raw = io.BytesIO(json.dumps(response).encode())
|
||||
return result
|
||||
|
||||
return generate_response_from_dict
|
||||
raise ValueError(f"Unsupported response type: {type(response)}")
|
||||
|
||||
|
||||
def _to_stream_response_generator(response_generators):
|
||||
if len(response_generators) == 1:
|
||||
return response_generators[0]
|
||||
|
||||
def combined_generator(args):
|
||||
first_response = response_generators[0](args)
|
||||
if first_response.status_code != 200:
|
||||
return first_response
|
||||
result = requests.Response()
|
||||
result.status_code = 200
|
||||
result.headers["content-type"] = "application/json"
|
||||
result.encoding = "utf-8"
|
||||
result.headers["transfer-encoding"] = "chunked"
|
||||
contents = []
|
||||
for generator in response_generators:
|
||||
response = generator(args)
|
||||
if response.status_code != 200:
|
||||
continue
|
||||
response_json = response.json()
|
||||
response_json_str = json.dumps(response_json)
|
||||
contents.append(f"data: {response_json_str}")
|
||||
contents_str = "\r\n".join(contents)
|
||||
full_contents = f"{contents_str}\r\n\r\n"
|
||||
result.raw = io.BytesIO(full_contents.encode())
|
||||
return result
|
||||
|
||||
return combined_generator
|
||||
|
||||
|
||||
class RequestsMocker:
|
||||
def __init__(self):
|
||||
self._original_send = requests.sessions.Session.send
|
||||
self._calls = []
|
||||
self._handlers = []
|
||||
|
||||
def install(self):
|
||||
@functools.wraps(requests.sessions.Session.send)
|
||||
def replacement_send(
|
||||
s: requests.sessions.Session,
|
||||
request: requests.PreparedRequest,
|
||||
**kwargs,
|
||||
):
|
||||
return self._do_send(s, request, **kwargs)
|
||||
|
||||
requests.sessions.Session.send = replacement_send
|
||||
|
||||
def uninstall(self):
|
||||
requests.sessions.Session.send = self._original_send
|
||||
|
||||
def reset(self):
|
||||
self._calls = []
|
||||
self._handlers = []
|
||||
|
||||
def add_response(self, response, if_matches=None):
|
||||
self._handlers.append((if_matches, _to_response_generator(response)))
|
||||
|
||||
@property
|
||||
def calls(self):
|
||||
return self._calls
|
||||
|
||||
def _do_send(
|
||||
self,
|
||||
session: requests.sessions.Session,
|
||||
request: requests.PreparedRequest,
|
||||
**kwargs,
|
||||
):
|
||||
stream = kwargs.get("stream", False)
|
||||
if not stream:
|
||||
return self._do_send_non_streaming(session, request, **kwargs)
|
||||
return self._do_send_streaming(session, request, **kwargs)
|
||||
|
||||
def _do_send_streaming(
|
||||
self,
|
||||
session: requests.sessions.Session,
|
||||
request: requests.PreparedRequest,
|
||||
**kwargs,
|
||||
):
|
||||
args = RequestsCallArgs(session, request, **kwargs)
|
||||
response_generators = []
|
||||
for matcher, response_generator in self._handlers:
|
||||
if matcher is None:
|
||||
response_generators.append(response_generator)
|
||||
elif matcher(args):
|
||||
response_generators.append(response_generator)
|
||||
if not response_generators:
|
||||
response_generators.append(_return_404)
|
||||
response_generator = _to_stream_response_generator(response_generators)
|
||||
call = RequestsCall(args, response_generator)
|
||||
result = call.response
|
||||
self._calls.append(call)
|
||||
return result
|
||||
|
||||
def _do_send_non_streaming(
|
||||
self,
|
||||
session: requests.sessions.Session,
|
||||
request: requests.PreparedRequest,
|
||||
**kwargs,
|
||||
):
|
||||
args = RequestsCallArgs(session, request, **kwargs)
|
||||
response_generator = self._lookup_response_generator(args)
|
||||
call = RequestsCall(args, response_generator)
|
||||
result = call.response
|
||||
self._calls.append(call)
|
||||
return result
|
||||
|
||||
def _lookup_response_generator(self, args: RequestsCallArgs):
|
||||
for matcher, response_generator in self._handlers:
|
||||
if matcher is None:
|
||||
return response_generator
|
||||
if matcher(args):
|
||||
return response_generator
|
||||
return _return_404
|
|
@ -0,0 +1,163 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
import unittest.mock
|
||||
|
||||
from google.genai.models import AsyncModels, Models
|
||||
|
||||
from ..common.base import TestCase as CommonTestCaseBase
|
||||
from .util import convert_to_response, create_response
|
||||
|
||||
|
||||
# Helper used in "_install_mocks" below.
|
||||
def _wrap_output(mock_generate_content):
|
||||
def _wrapped(*args, **kwargs):
|
||||
return convert_to_response(mock_generate_content(*args, **kwargs))
|
||||
|
||||
return _wrapped
|
||||
|
||||
|
||||
# Helper used in "_install_mocks" below.
|
||||
def _wrap_output_stream(mock_generate_content_stream):
|
||||
def _wrapped(*args, **kwargs):
|
||||
for output in mock_generate_content_stream(*args, **kwargs):
|
||||
yield convert_to_response(output)
|
||||
|
||||
return _wrapped
|
||||
|
||||
|
||||
# Helper used in "_install_mocks" below.
|
||||
def _async_wrapper(mock_generate_content):
|
||||
async def _wrapped(*args, **kwargs):
|
||||
return mock_generate_content(*args, **kwargs)
|
||||
|
||||
return _wrapped
|
||||
|
||||
|
||||
# Helper used in "_install_mocks" below.
|
||||
def _async_stream_wrapper(mock_generate_content_stream):
|
||||
async def _wrapped(*args, **kwargs):
|
||||
async def _internal_generator():
|
||||
for result in mock_generate_content_stream(*args, **kwargs):
|
||||
yield result
|
||||
|
||||
return _internal_generator()
|
||||
|
||||
return _wrapped
|
||||
|
||||
|
||||
class TestCase(CommonTestCaseBase):
|
||||
# The "setUp" function is defined by "unittest.TestCase" and thus
|
||||
# this name must be used. Uncertain why pylint doesn't seem to
|
||||
# recognize that this is a unit test class for which this is inherited.
|
||||
def setUp(self): # pylint: disable=invalid-name
|
||||
super().setUp()
|
||||
if self.__class__ == TestCase:
|
||||
raise unittest.SkipTest("Skipping testcase base.")
|
||||
self._generate_content_mock = None
|
||||
self._generate_content_stream_mock = None
|
||||
self._original_generate_content = Models.generate_content
|
||||
self._original_generate_content_stream = Models.generate_content_stream
|
||||
self._original_async_generate_content = AsyncModels.generate_content
|
||||
self._original_async_generate_content_stream = (
|
||||
AsyncModels.generate_content_stream
|
||||
)
|
||||
self._responses = []
|
||||
self._response_index = 0
|
||||
|
||||
@property
|
||||
def mock_generate_content(self):
|
||||
if self._generate_content_mock is None:
|
||||
self._create_and_install_mocks()
|
||||
return self._generate_content_mock
|
||||
|
||||
@property
|
||||
def mock_generate_content_stream(self):
|
||||
if self._generate_content_stream_mock is None:
|
||||
self._create_and_install_mocks()
|
||||
return self._generate_content_stream_mock
|
||||
|
||||
def configure_valid_response(self, **kwargs):
|
||||
self._create_and_install_mocks()
|
||||
response = create_response(**kwargs)
|
||||
self._responses.append(response)
|
||||
|
||||
def _create_and_install_mocks(self):
|
||||
if self._generate_content_mock is not None:
|
||||
return
|
||||
self.reset_client()
|
||||
self.reset_instrumentation()
|
||||
self._generate_content_mock = self._create_nonstream_mock()
|
||||
self._generate_content_stream_mock = self._create_stream_mock()
|
||||
self._install_mocks()
|
||||
|
||||
def _create_nonstream_mock(self):
|
||||
mock = unittest.mock.MagicMock()
|
||||
|
||||
def _default_impl(*args, **kwargs):
|
||||
if not self._responses:
|
||||
return create_response(text="Some response")
|
||||
index = self._response_index % len(self._responses)
|
||||
result = self._responses[index]
|
||||
self._response_index += 1
|
||||
return result
|
||||
|
||||
mock.side_effect = _default_impl
|
||||
return mock
|
||||
|
||||
def _create_stream_mock(self):
|
||||
mock = unittest.mock.MagicMock()
|
||||
|
||||
def _default_impl(*args, **kwargs):
|
||||
for response in self._responses:
|
||||
yield response
|
||||
|
||||
mock.side_effect = _default_impl
|
||||
return mock
|
||||
|
||||
def _install_mocks(self):
|
||||
output_wrapped = _wrap_output(self._generate_content_mock)
|
||||
output_wrapped_stream = _wrap_output_stream(
|
||||
self._generate_content_stream_mock
|
||||
)
|
||||
Models.generate_content = output_wrapped
|
||||
Models.generate_content_stream = output_wrapped_stream
|
||||
AsyncModels.generate_content = _async_wrapper(output_wrapped)
|
||||
AsyncModels.generate_content_stream = _async_stream_wrapper(
|
||||
output_wrapped_stream
|
||||
)
|
||||
|
||||
def tearDown(self):
|
||||
super().tearDown()
|
||||
if self._generate_content_mock is None:
|
||||
assert Models.generate_content == self._original_generate_content
|
||||
assert (
|
||||
Models.generate_content_stream
|
||||
== self._original_generate_content_stream
|
||||
)
|
||||
assert (
|
||||
AsyncModels.generate_content
|
||||
== self._original_async_generate_content
|
||||
)
|
||||
assert (
|
||||
AsyncModels.generate_content_stream
|
||||
== self._original_async_generate_content_stream
|
||||
)
|
||||
Models.generate_content = self._original_generate_content
|
||||
Models.generate_content_stream = self._original_generate_content_stream
|
||||
AsyncModels.generate_content = self._original_async_generate_content
|
||||
AsyncModels.generate_content_stream = (
|
||||
self._original_async_generate_content_stream
|
||||
)
|
|
@ -0,0 +1,94 @@
|
|||
interactions:
|
||||
- request:
|
||||
body: |-
|
||||
{
|
||||
"contents": [
|
||||
{
|
||||
"parts": [
|
||||
{
|
||||
"text": "Create a poem about Open Telemetry."
|
||||
}
|
||||
],
|
||||
"role": "user"
|
||||
}
|
||||
]
|
||||
}
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '92'
|
||||
Content-Type:
|
||||
- application/json
|
||||
user-agent:
|
||||
- google-genai-sdk/1.0.0 gl-python/3.12.8
|
||||
x-goog-api-client:
|
||||
- <REDACTED>
|
||||
x-goog-user-project:
|
||||
- <REDACTED>
|
||||
method: POST
|
||||
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent
|
||||
response:
|
||||
body:
|
||||
string: |-
|
||||
{
|
||||
"candidates": [
|
||||
{
|
||||
"content": {
|
||||
"role": "model",
|
||||
"parts": [
|
||||
{
|
||||
"text": "No more dark, inscrutable ways,\nTo trace a request through hazy days.\nOpen Telemetry, a beacon bright,\nIlluminates the path, both day and night.\n\nFrom metrics gathered, a clear display,\nOf latency's dance, and errors' sway.\nTraces unwind, a silken thread,\nShowing the journey, from start to head.\n\nLogs interweave, a richer hue,\nContextual clues, for me and you.\nNo vendor lock-in, a freedom's call,\nTo choose your tools, to stand up tall.\n\nExporters aplenty, a varied choice,\nTo send your data, amplify your voice.\nJaeger, Zipkin, Prometheus' might,\nAll integrate, a glorious sight.\n\nWith spans and attributes, a detailed scene,\nOf how your system works, both sleek and keen.\nPerformance bottlenecks, now laid bare,\nOpen Telemetry, beyond compare.\n\nSo embrace the light, let darkness flee,\nWith Open Telemetry, set your systems free.\nObserve, and learn, and optimize with grace,\nA brighter future, in this digital space.\n"
|
||||
}
|
||||
]
|
||||
},
|
||||
"finishReason": "STOP",
|
||||
"avgLogprobs": -0.3303731600443522
|
||||
}
|
||||
],
|
||||
"usageMetadata": {
|
||||
"promptTokenCount": 8,
|
||||
"candidatesTokenCount": 240,
|
||||
"totalTokenCount": 248,
|
||||
"promptTokensDetails": [
|
||||
{
|
||||
"modality": "TEXT",
|
||||
"tokenCount": 8
|
||||
}
|
||||
],
|
||||
"candidatesTokensDetails": [
|
||||
{
|
||||
"modality": "TEXT",
|
||||
"tokenCount": 240
|
||||
}
|
||||
]
|
||||
},
|
||||
"modelVersion": "gemini-1.5-flash-002",
|
||||
"createTime": "2025-03-07T22:19:18.083091Z",
|
||||
"responseId": "5nDLZ5OJBdyY3NoPiZGx0Ag"
|
||||
}
|
||||
headers:
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
X-Frame-Options:
|
||||
- SAMEORIGIN
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
|
@ -0,0 +1,94 @@
|
|||
interactions:
|
||||
- request:
|
||||
body: |-
|
||||
{
|
||||
"contents": [
|
||||
{
|
||||
"parts": [
|
||||
{
|
||||
"text": "Create a poem about Open Telemetry."
|
||||
}
|
||||
],
|
||||
"role": "user"
|
||||
}
|
||||
]
|
||||
}
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '92'
|
||||
Content-Type:
|
||||
- application/json
|
||||
user-agent:
|
||||
- google-genai-sdk/1.0.0 gl-python/3.12.8
|
||||
x-goog-api-client:
|
||||
- <REDACTED>
|
||||
x-goog-user-project:
|
||||
- <REDACTED>
|
||||
method: POST
|
||||
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent
|
||||
response:
|
||||
body:
|
||||
string: |-
|
||||
{
|
||||
"candidates": [
|
||||
{
|
||||
"content": {
|
||||
"role": "model",
|
||||
"parts": [
|
||||
{
|
||||
"text": "No more dark logs, a cryptic, hidden trace,\nOf failing systems, lost in time and space.\nOpenTelemetry, a beacon shining bright,\nIlluminating paths, both dark and light.\n\nFrom microservices, a sprawling, tangled mesh,\nTo monolithic beasts, put to the test,\nIt gathers traces, spans, and metrics too,\nA holistic view, for me and you.\n\nWith signals clear, from every single node,\nPerformance bottlenecks, instantly bestowed.\nDistributed tracing, paints a vivid scene,\nWhere latency lurks, and slowdowns intervene.\n\nExporters rise, to send the data forth,\nTo dashboards grand, of proven, measured worth.\nPrometheus, Grafana, Jaeger, fluent streams,\nVisualizing insights, fulfilling data dreams.\n\nFrom Jaeger's diagrams, a branching, flowing art,\nTo Grafana's charts, that play a vital part,\nThe mysteries unravel, hidden deep inside,\nWhere errors slumber, and slow responses hide.\n\nSo hail OpenTelemetry, a gift to all who code,\nA brighter future, on a well-lit road.\nNo more guesswork, no more fruitless chase,\nJust clear observability, in time and space.\n"
|
||||
}
|
||||
]
|
||||
},
|
||||
"finishReason": "STOP",
|
||||
"avgLogprobs": -0.45532724261283875
|
||||
}
|
||||
],
|
||||
"usageMetadata": {
|
||||
"promptTokenCount": 8,
|
||||
"candidatesTokenCount": 256,
|
||||
"totalTokenCount": 264,
|
||||
"promptTokensDetails": [
|
||||
{
|
||||
"modality": "TEXT",
|
||||
"tokenCount": 8
|
||||
}
|
||||
],
|
||||
"candidatesTokensDetails": [
|
||||
{
|
||||
"modality": "TEXT",
|
||||
"tokenCount": 256
|
||||
}
|
||||
]
|
||||
},
|
||||
"modelVersion": "gemini-1.5-flash-002",
|
||||
"createTime": "2025-03-07T22:19:15.268428Z",
|
||||
"responseId": "43DLZ4yxEM6F3NoPzaTkiQU"
|
||||
}
|
||||
headers:
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
X-Frame-Options:
|
||||
- SAMEORIGIN
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
|
@ -0,0 +1,94 @@
|
|||
interactions:
|
||||
- request:
|
||||
body: |-
|
||||
{
|
||||
"contents": [
|
||||
{
|
||||
"parts": [
|
||||
{
|
||||
"text": "Create a poem about Open Telemetry."
|
||||
}
|
||||
],
|
||||
"role": "user"
|
||||
}
|
||||
]
|
||||
}
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '92'
|
||||
Content-Type:
|
||||
- application/json
|
||||
user-agent:
|
||||
- google-genai-sdk/1.0.0 gl-python/3.12.8
|
||||
x-goog-api-client:
|
||||
- <REDACTED>
|
||||
x-goog-user-project:
|
||||
- <REDACTED>
|
||||
method: POST
|
||||
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent
|
||||
response:
|
||||
body:
|
||||
string: |-
|
||||
{
|
||||
"candidates": [
|
||||
{
|
||||
"content": {
|
||||
"role": "model",
|
||||
"parts": [
|
||||
{
|
||||
"text": "No more dark, mysterious traces,\nNo more guessing, in empty spaces.\nOpenTelemetry's light now shines,\nIlluminating all our designs.\n\nFrom microservices, small and fleet,\nTo monolithic beasts, hard to beat,\nIt weaves a net, both fine and strong,\nWhere metrics flow, where logs belong.\n\nTraces dance, a vibrant hue,\nShowing journeys, old and new.\nSpans unfold, a story told,\nOf requests handled, brave and bold.\n\nMetrics hum, a steady beat,\nLatency, errors, can't be beat.\nDistribution charts, a clear display,\nGuiding us along the way.\n\nLogs provide a detailed view,\nOf what happened, me and you.\nContext rich, with helpful clues,\nDebugging woes, it quickly subdues.\n\nWith exporters wise, a thoughtful choice,\nTo Prometheus, Jaeger, or Zipkin's voice,\nOur data flows, a precious stream,\nReal-time insights, a waking dream.\n\nSo hail to OpenTelemetry's might,\nBringing clarity to our darkest night.\nObservability's champion, bold and true,\nA brighter future, for me and you.\n"
|
||||
}
|
||||
]
|
||||
},
|
||||
"finishReason": "STOP",
|
||||
"avgLogprobs": -0.4071464086238575
|
||||
}
|
||||
],
|
||||
"usageMetadata": {
|
||||
"promptTokenCount": 8,
|
||||
"candidatesTokenCount": 253,
|
||||
"totalTokenCount": 261,
|
||||
"promptTokensDetails": [
|
||||
{
|
||||
"modality": "TEXT",
|
||||
"tokenCount": 8
|
||||
}
|
||||
],
|
||||
"candidatesTokensDetails": [
|
||||
{
|
||||
"modality": "TEXT",
|
||||
"tokenCount": 253
|
||||
}
|
||||
]
|
||||
},
|
||||
"modelVersion": "gemini-1.5-flash-002",
|
||||
"createTime": "2025-03-07T22:19:12.443989Z",
|
||||
"responseId": "4HDLZ9WMG6SK698Pr5uZ2Qw"
|
||||
}
|
||||
headers:
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
X-Frame-Options:
|
||||
- SAMEORIGIN
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
|
@ -0,0 +1,94 @@
|
|||
interactions:
|
||||
- request:
|
||||
body: |-
|
||||
{
|
||||
"contents": [
|
||||
{
|
||||
"parts": [
|
||||
{
|
||||
"text": "Create a poem about Open Telemetry."
|
||||
}
|
||||
],
|
||||
"role": "user"
|
||||
}
|
||||
]
|
||||
}
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '92'
|
||||
Content-Type:
|
||||
- application/json
|
||||
user-agent:
|
||||
- google-genai-sdk/1.0.0 gl-python/3.12.8
|
||||
x-goog-api-client:
|
||||
- <REDACTED>
|
||||
x-goog-user-project:
|
||||
- <REDACTED>
|
||||
method: POST
|
||||
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent
|
||||
response:
|
||||
body:
|
||||
string: |-
|
||||
{
|
||||
"candidates": [
|
||||
{
|
||||
"content": {
|
||||
"role": "model",
|
||||
"parts": [
|
||||
{
|
||||
"text": "No more dark, mysterious traces,\nOf failing systems, hidden spaces.\nOpen Telemetry's light shines bright,\nGuiding us through the darkest night.\n\nFrom metrics gathered, finely spun,\nTo logs that tell of tasks undone,\nAnd traces linking every call,\nIt answers questions, standing tall.\n\nDistributed systems, complex and vast,\nTheir hidden flaws, no longer cast\nIn shadows deep, beyond our view,\nOpen Telemetry sees them through.\n\nWith spans and attributes, it weaves a tale,\nOf requests flowing, never frail.\nIt pinpoints bottlenecks, slow and grim,\nAnd helps us optimize, system trim.\n\nAcross languages, a common ground,\nWhere data's shared, and insights found.\nExporters whisper, collectors hum,\nA symphony of data, overcome.\n\nSo raise a glass, to this open source,\nA shining beacon, a powerful force.\nOpen Telemetry, a guiding star,\nRevealing secrets, near and far.\n"
|
||||
}
|
||||
]
|
||||
},
|
||||
"finishReason": "STOP",
|
||||
"avgLogprobs": -0.3586180628193498
|
||||
}
|
||||
],
|
||||
"usageMetadata": {
|
||||
"promptTokenCount": 8,
|
||||
"candidatesTokenCount": 211,
|
||||
"totalTokenCount": 219,
|
||||
"promptTokensDetails": [
|
||||
{
|
||||
"modality": "TEXT",
|
||||
"tokenCount": 8
|
||||
}
|
||||
],
|
||||
"candidatesTokensDetails": [
|
||||
{
|
||||
"modality": "TEXT",
|
||||
"tokenCount": 211
|
||||
}
|
||||
]
|
||||
},
|
||||
"modelVersion": "gemini-1.5-flash-002",
|
||||
"createTime": "2025-03-07T22:19:09.936326Z",
|
||||
"responseId": "3XDLZ4aTOZSpnvgPn-e0qQk"
|
||||
}
|
||||
headers:
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
X-Frame-Options:
|
||||
- SAMEORIGIN
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
|
@ -0,0 +1,97 @@
|
|||
interactions:
|
||||
- request:
|
||||
body: |-
|
||||
{
|
||||
"contents": [
|
||||
{
|
||||
"parts": [
|
||||
{
|
||||
"text": "Create a poem about Open Telemetry."
|
||||
}
|
||||
],
|
||||
"role": "user"
|
||||
}
|
||||
]
|
||||
}
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '92'
|
||||
Content-Type:
|
||||
- application/json
|
||||
user-agent:
|
||||
- google-genai-sdk/1.0.0 gl-python/3.12.8
|
||||
x-goog-api-client:
|
||||
- <REDACTED>
|
||||
x-goog-user-project:
|
||||
- <REDACTED>
|
||||
method: POST
|
||||
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse
|
||||
response:
|
||||
body:
|
||||
string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"No\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\
|
||||
,\"createTime\": \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \" longer dark, the tracing's light,\\nOpen Telemetry, shining\
|
||||
\ bright\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\
|
||||
: \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \".\\nA beacon in the coding night,\\nRevealing paths, both\
|
||||
\ dark\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"\
|
||||
2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \" and bright.\\n\\nFrom microservice to sprawling beast,\\\
|
||||
nIts watchful eye, a silent priest.\\nObserving calls, both small and vast,\\\
|
||||
nPerformance\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\
|
||||
: \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \" flaws, revealed at last.\\n\\nWith metrics gleaned and logs\
|
||||
\ aligned,\\nA clearer picture, you will find.\\nOf latency, and errors dire,\\\
|
||||
n\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:29.293930Z\"\
|
||||
,\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"}\r\n\r\ndata: {\"candidates\"\
|
||||
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"And bottlenecks\
|
||||
\ that set afire.\\n\\nIt spans the clouds, a network wide,\\nWhere data streams,\
|
||||
\ a surging tide.\\nCollecting traces, rich and deep,\\nWhile slumbering apps\
|
||||
\ their secrets keep.\\n\\nJaeger, Zip\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\
|
||||
,\"createTime\": \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"kin, the tools it holds,\\nA tapestry of stories told.\\nOf\
|
||||
\ requests flowing, swift and free,\\nOr tangled knots, for all to see.\\\
|
||||
n\\nSo embrace the power, understand,\\nThe vital role, across the\"}]}}],\"\
|
||||
modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:29.293930Z\"\
|
||||
,\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"}\r\n\r\ndata: {\"candidates\"\
|
||||
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" land.\\nOpen\
|
||||
\ Telemetry, a guiding star,\\nTo navigate the digital afar.\\n\"}]},\"finishReason\"\
|
||||
: \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\"\
|
||||
: 212,\"totalTokenCount\": 220,\"promptTokensDetails\": [{\"modality\": \"\
|
||||
TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\": [{\"modality\": \"\
|
||||
TEXT\",\"tokenCount\": 212}]},\"modelVersion\": \"gemini-1.5-flash-002\",\"\
|
||||
createTime\": \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\
|
||||
}\r\n\r\n"
|
||||
headers:
|
||||
Content-Disposition:
|
||||
- attachment
|
||||
Content-Type:
|
||||
- text/event-stream
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
X-Frame-Options:
|
||||
- SAMEORIGIN
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
|
@ -0,0 +1,102 @@
|
|||
interactions:
|
||||
- request:
|
||||
body: |-
|
||||
{
|
||||
"contents": [
|
||||
{
|
||||
"parts": [
|
||||
{
|
||||
"text": "Create a poem about Open Telemetry."
|
||||
}
|
||||
],
|
||||
"role": "user"
|
||||
}
|
||||
]
|
||||
}
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '92'
|
||||
Content-Type:
|
||||
- application/json
|
||||
user-agent:
|
||||
- google-genai-sdk/1.0.0 gl-python/3.12.8
|
||||
x-goog-api-client:
|
||||
- <REDACTED>
|
||||
x-goog-user-project:
|
||||
- <REDACTED>
|
||||
method: POST
|
||||
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse
|
||||
response:
|
||||
body:
|
||||
string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"The\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\
|
||||
,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \" black box whispers, secrets deep,\\nOf failing systems, promises\
|
||||
\ to keep.\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\
|
||||
: \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"\\nBut tracing's light, a guiding hand,\\nReveals the path\"\
|
||||
}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\
|
||||
,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\
|
||||
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \", across the\
|
||||
\ land.\\n\\nOpen Telemetry, a beacon bright,\\nIlluminating pathways, day\
|
||||
\ and night.\\nFrom spans and traces, stories told,\"}]}}],\"modelVersion\"\
|
||||
: \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\
|
||||
,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\
|
||||
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"\\nOf requests\
|
||||
\ flowing, brave and bold.\\n\\nThe metrics rise, a vibrant chart,\\nDisplaying\
|
||||
\ latency, a work of art.\\nEach request'\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\
|
||||
,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"s journey, clearly shown,\\nWhere bottlenecks slumber, seeds\
|
||||
\ are sown.\\n\\nWith logs appended, context clear,\\nThe root of problems,\
|
||||
\ drawing near.\\nObservability's embrace, so wide,\\nUnraveling mysteries,\"\
|
||||
}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\
|
||||
,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\
|
||||
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" deep inside.\\\
|
||||
n\\nFrom simple apps to complex weaves,\\nOpen Telemetry's power achieves,\\\
|
||||
nA unified vision, strong and true,\\nMonitoring systems, old and new.\\n\\\
|
||||
nNo vendor lock-in, free to roam,\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\
|
||||
,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"\\nAcross the clouds, and find your home.\\nA standard rising,\
|
||||
\ strong and bold,\\nA future brighter, to behold.\\n\\nSo let the traces\
|
||||
\ flow and gleam,\\nOpen Telemetry, a vibrant dream.\\nOf healthy systems,\
|
||||
\ running free,\\nFor all to see, for all to be.\"}]}}],\"modelVersion\":\
|
||||
\ \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\
|
||||
,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\
|
||||
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"\\n\"}]},\"\
|
||||
finishReason\": \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"\
|
||||
candidatesTokenCount\": 258,\"totalTokenCount\": 266,\"promptTokensDetails\"\
|
||||
: [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\"\
|
||||
: [{\"modality\": \"TEXT\",\"tokenCount\": 258}]},\"modelVersion\": \"gemini-1.5-flash-002\"\
|
||||
,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\
|
||||
}\r\n\r\n"
|
||||
headers:
|
||||
Content-Disposition:
|
||||
- attachment
|
||||
Content-Type:
|
||||
- text/event-stream
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
X-Frame-Options:
|
||||
- SAMEORIGIN
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
|
@ -0,0 +1,99 @@
|
|||
interactions:
|
||||
- request:
|
||||
body: |-
|
||||
{
|
||||
"contents": [
|
||||
{
|
||||
"parts": [
|
||||
{
|
||||
"text": "Create a poem about Open Telemetry."
|
||||
}
|
||||
],
|
||||
"role": "user"
|
||||
}
|
||||
]
|
||||
}
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '92'
|
||||
Content-Type:
|
||||
- application/json
|
||||
user-agent:
|
||||
- google-genai-sdk/1.0.0 gl-python/3.12.8
|
||||
x-goog-api-client:
|
||||
- <REDACTED>
|
||||
x-goog-user-project:
|
||||
- <REDACTED>
|
||||
method: POST
|
||||
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse
|
||||
response:
|
||||
body:
|
||||
string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"No\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\
|
||||
,\"createTime\": \"2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \" more dark logs, a cryptic, silent scream,\\nNo more the hunt\
|
||||
\ for\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"\
|
||||
2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \" errors, a lost, fading dream.\\nOpen Telemetry, a beacon\
|
||||
\ in\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"\
|
||||
2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \" the night,\\nShining forth its data, clear and burning bright.\\\
|
||||
n\\nFrom traces spanning systems, a flowing, silver thread,\\nMetrics pulse\
|
||||
\ and measure,\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\
|
||||
: \"2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \" insights finely spread.\\nLogs enriched with context, a story\
|
||||
\ they unfold,\\nOf requests and responses, both brave and bold.\\n\\nObservability's\
|
||||
\ promise\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\
|
||||
: \"2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \", a future now at hand,\\nWith vendors interoperable, a collaborative\
|
||||
\ band.\\nNo longer vendor lock-in, a restrictive, iron cage,\\nBut freedom\
|
||||
\ of selection, turning a new page.\\n\\nFrom microservices humming,\"}]}}],\"\
|
||||
modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:23.579184Z\"\
|
||||
,\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\
|
||||
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" a symphony\
|
||||
\ of calls,\\nTo monolithic giants, answering their thralls,\\nOpen Telemetry\
|
||||
\ watches, with keen and watchful eye,\\nDetecting the anomalies, before they\
|
||||
\ rise and fly.\\n\\nSo let the data flow freely, a\"}]}}],\"modelVersion\"\
|
||||
: \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:23.579184Z\"\
|
||||
,\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\
|
||||
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" river strong\
|
||||
\ and deep,\\nIts secrets it will whisper, while the systems sleep.\\nOpen\
|
||||
\ Telemetry's power, a force that we can wield,\\nTo build more stable systems,\
|
||||
\ in the digital field.\\n\"}]},\"finishReason\": \"STOP\"}],\"usageMetadata\"\
|
||||
: {\"promptTokenCount\": 8,\"candidatesTokenCount\": 238,\"totalTokenCount\"\
|
||||
: 246,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"\
|
||||
candidatesTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 238}]},\"\
|
||||
modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:23.579184Z\"\
|
||||
,\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"}\r\n\r\n"
|
||||
headers:
|
||||
Content-Disposition:
|
||||
- attachment
|
||||
Content-Type:
|
||||
- text/event-stream
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
X-Frame-Options:
|
||||
- SAMEORIGIN
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
|
@ -0,0 +1,99 @@
|
|||
interactions:
|
||||
- request:
|
||||
body: |-
|
||||
{
|
||||
"contents": [
|
||||
{
|
||||
"parts": [
|
||||
{
|
||||
"text": "Create a poem about Open Telemetry."
|
||||
}
|
||||
],
|
||||
"role": "user"
|
||||
}
|
||||
]
|
||||
}
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '92'
|
||||
Content-Type:
|
||||
- application/json
|
||||
user-agent:
|
||||
- google-genai-sdk/1.0.0 gl-python/3.12.8
|
||||
x-goog-api-client:
|
||||
- <REDACTED>
|
||||
x-goog-user-project:
|
||||
- <REDACTED>
|
||||
method: POST
|
||||
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse
|
||||
response:
|
||||
body:
|
||||
string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"No\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\
|
||||
,\"createTime\": \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \" more dark, mysterious traces,\\nNo more guessing, in time\
|
||||
\ and spaces.\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\
|
||||
: \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"\\nOpen Telemetry's light shines bright,\\nIlluminating the\
|
||||
\ code'\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\":\
|
||||
\ \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"s dark night.\\n\\nFrom spans and metrics, a story told,\\\
|
||||
nOf requests flowing, both brave and bold.\\nTraces weaving, a tapestry grand,\"\
|
||||
}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:20.770456Z\"\
|
||||
,\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"}\r\n\r\ndata: {\"candidates\"\
|
||||
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"\\nShowing\
|
||||
\ performance, across the land.\\n\\nLogs and metrics, a perfect blend,\\\
|
||||
nInformation's flow, without end.\\nObservability's promise\"}]}}],\"modelVersion\"\
|
||||
: \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:20.770456Z\"\
|
||||
,\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"}\r\n\r\ndata: {\"candidates\"\
|
||||
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \", clear and\
|
||||
\ true,\\nInsights revealed, for me and you.\\n\\nJaeger, Zipkin, a chorus\
|
||||
\ sings,\\nWith exporters ready, for all the things.\\nFrom simple apps to\
|
||||
\ systems vast,\\nOpen Telemetry'\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\
|
||||
,\"createTime\": \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\
|
||||
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
|
||||
: [{\"text\": \"s power will last.\\n\\nNo vendor lock-in, a freedom sweet,\\\
|
||||
nOpen source glory, can't be beat.\\nSo let us embrace, this modern way,\\\
|
||||
nTo monitor systems, come what may.\\n\\nFrom\"}]}}],\"modelVersion\": \"\
|
||||
gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:20.770456Z\",\"\
|
||||
responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"}\r\n\r\ndata: {\"candidates\": [{\"\
|
||||
content\": {\"role\": \"model\",\"parts\": [{\"text\": \" microservices, small\
|
||||
\ and slight,\\nTo monolithic giants, shining bright,\\nOpen Telemetry shows\
|
||||
\ the path,\\nTo understand, and fix the wrath,\\nOf latency demons, lurking\
|
||||
\ near,\\nBringing clarity, year after year.\\n\"}]},\"finishReason\": \"\
|
||||
STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\"\
|
||||
: 242,\"totalTokenCount\": 250,\"promptTokensDetails\": [{\"modality\": \"\
|
||||
TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\": [{\"modality\": \"\
|
||||
TEXT\",\"tokenCount\": 242}]},\"modelVersion\": \"gemini-1.5-flash-002\",\"\
|
||||
createTime\": \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\
|
||||
}\r\n\r\n"
|
||||
headers:
|
||||
Content-Disposition:
|
||||
- attachment
|
||||
Content-Type:
|
||||
- text/event-stream
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
X-Frame-Options:
|
||||
- SAMEORIGIN
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
|
@ -16,8 +16,7 @@ import json
|
|||
import os
|
||||
import unittest
|
||||
|
||||
from ..common.base import TestCase
|
||||
from .util import create_valid_response
|
||||
from .base import TestCase
|
||||
|
||||
|
||||
class NonStreamingTestCase(TestCase):
|
||||
|
@ -36,18 +35,24 @@ class NonStreamingTestCase(TestCase):
|
|||
def expected_function_name(self):
|
||||
raise NotImplementedError("Must implement 'expected_function_name'.")
|
||||
|
||||
def configure_valid_response(self, *args, **kwargs):
|
||||
self.requests.add_response(create_valid_response(*args, **kwargs))
|
||||
def _generate_and_get_span(self, config):
|
||||
self.generate_content(
|
||||
model="gemini-2.0-flash",
|
||||
contents="Some input prompt",
|
||||
config=config,
|
||||
)
|
||||
self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
|
||||
return self.otel.get_span_named("generate_content gemini-2.0-flash")
|
||||
|
||||
def test_instrumentation_does_not_break_core_functionality(self):
|
||||
self.configure_valid_response(response_text="Yep, it works!")
|
||||
self.configure_valid_response(text="Yep, it works!")
|
||||
response = self.generate_content(
|
||||
model="gemini-2.0-flash", contents="Does this work?"
|
||||
)
|
||||
self.assertEqual(response.text, "Yep, it works!")
|
||||
|
||||
def test_generates_span(self):
|
||||
self.configure_valid_response(response_text="Yep, it works!")
|
||||
self.configure_valid_response(text="Yep, it works!")
|
||||
response = self.generate_content(
|
||||
model="gemini-2.0-flash", contents="Does this work?"
|
||||
)
|
||||
|
@ -55,7 +60,7 @@ class NonStreamingTestCase(TestCase):
|
|||
self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
|
||||
|
||||
def test_model_reflected_into_span_name(self):
|
||||
self.configure_valid_response(response_text="Yep, it works!")
|
||||
self.configure_valid_response(text="Yep, it works!")
|
||||
response = self.generate_content(
|
||||
model="gemini-1.5-flash", contents="Does this work?"
|
||||
)
|
||||
|
@ -63,7 +68,7 @@ class NonStreamingTestCase(TestCase):
|
|||
self.otel.assert_has_span_named("generate_content gemini-1.5-flash")
|
||||
|
||||
def test_generated_span_has_minimal_genai_attributes(self):
|
||||
self.configure_valid_response(response_text="Yep, it works!")
|
||||
self.configure_valid_response(text="Yep, it works!")
|
||||
self.generate_content(
|
||||
model="gemini-2.0-flash", contents="Does this work?"
|
||||
)
|
||||
|
@ -75,7 +80,7 @@ class NonStreamingTestCase(TestCase):
|
|||
)
|
||||
|
||||
def test_generated_span_has_correct_function_name(self):
|
||||
self.configure_valid_response(response_text="Yep, it works!")
|
||||
self.configure_valid_response(text="Yep, it works!")
|
||||
self.generate_content(
|
||||
model="gemini-2.0-flash", contents="Does this work?"
|
||||
)
|
||||
|
@ -87,7 +92,7 @@ class NonStreamingTestCase(TestCase):
|
|||
|
||||
def test_generated_span_has_vertex_ai_system_when_configured(self):
|
||||
self.set_use_vertex(True)
|
||||
self.configure_valid_response(response_text="Yep, it works!")
|
||||
self.configure_valid_response(text="Yep, it works!")
|
||||
self.generate_content(
|
||||
model="gemini-2.0-flash", contents="Does this work?"
|
||||
)
|
||||
|
@ -170,7 +175,7 @@ class NonStreamingTestCase(TestCase):
|
|||
os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
|
||||
"true"
|
||||
)
|
||||
self.configure_valid_response(response_text="Some response content")
|
||||
self.configure_valid_response(text="Some response content")
|
||||
self.generate_content(model="gemini-2.0-flash", contents="Some input")
|
||||
self.otel.assert_has_event_named("gen_ai.choice")
|
||||
event_record = self.otel.get_event_named("gen_ai.choice")
|
||||
|
@ -183,7 +188,7 @@ class NonStreamingTestCase(TestCase):
|
|||
os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
|
||||
"false"
|
||||
)
|
||||
self.configure_valid_response(response_text="Some response content")
|
||||
self.configure_valid_response(text="Some response content")
|
||||
self.generate_content(model="gemini-2.0-flash", contents="Some input")
|
||||
self.otel.assert_has_event_named("gen_ai.choice")
|
||||
event_record = self.otel.get_event_named("gen_ai.choice")
|
||||
|
|
|
@ -14,8 +14,7 @@
|
|||
|
||||
import unittest
|
||||
|
||||
from ..common.base import TestCase
|
||||
from .util import create_valid_response
|
||||
from .base import TestCase
|
||||
|
||||
|
||||
class StreamingTestCase(TestCase):
|
||||
|
@ -34,11 +33,8 @@ class StreamingTestCase(TestCase):
|
|||
def expected_function_name(self):
|
||||
raise NotImplementedError("Must implement 'expected_function_name'.")
|
||||
|
||||
def configure_valid_response(self, *args, **kwargs):
|
||||
self.requests.add_response(create_valid_response(*args, **kwargs))
|
||||
|
||||
def test_instrumentation_does_not_break_core_functionality(self):
|
||||
self.configure_valid_response(response_text="Yep, it works!")
|
||||
self.configure_valid_response(text="Yep, it works!")
|
||||
responses = self.generate_content(
|
||||
model="gemini-2.0-flash", contents="Does this work?"
|
||||
)
|
||||
|
@ -47,8 +43,8 @@ class StreamingTestCase(TestCase):
|
|||
self.assertEqual(response.text, "Yep, it works!")
|
||||
|
||||
def test_handles_multiple_ressponses(self):
|
||||
self.configure_valid_response(response_text="First response")
|
||||
self.configure_valid_response(response_text="Second response")
|
||||
self.configure_valid_response(text="First response")
|
||||
self.configure_valid_response(text="Second response")
|
||||
responses = self.generate_content(
|
||||
model="gemini-2.0-flash", contents="Does this work?"
|
||||
)
|
||||
|
|
|
@ -0,0 +1,162 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
from unittest import mock
|
||||
|
||||
from google.genai.types import GenerateContentConfig
|
||||
|
||||
from opentelemetry.instrumentation.google_genai.allowlist_util import AllowList
|
||||
|
||||
from .base import TestCase
|
||||
|
||||
|
||||
class ConfigSpanAttributesTestCase(TestCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.configure_valid_response(text="Some response")
|
||||
|
||||
def generate_content(self, *args, **kwargs):
|
||||
return self.client.models.generate_content(*args, **kwargs)
|
||||
|
||||
def generate_and_get_span(self, config):
|
||||
self.client.models.generate_content(
|
||||
model="gemini-2.0-flash",
|
||||
contents="Some input prompt",
|
||||
config=config,
|
||||
)
|
||||
self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
|
||||
return self.otel.get_span_named("generate_content gemini-2.0-flash")
|
||||
|
||||
def test_option_reflected_to_span_attribute_choice_count_config_dict(self):
|
||||
span = self.generate_and_get_span(config={"candidate_count": 2})
|
||||
self.assertEqual(span.attributes["gen_ai.request.choice.count"], 2)
|
||||
|
||||
def test_option_reflected_to_span_attribute_choice_count_config_obj(self):
|
||||
span = self.generate_and_get_span(
|
||||
config=GenerateContentConfig(candidate_count=2)
|
||||
)
|
||||
self.assertEqual(span.attributes["gen_ai.request.choice.count"], 2)
|
||||
|
||||
def test_option_reflected_to_span_attribute_seed_config_dict(self):
|
||||
span = self.generate_and_get_span(config={"seed": 12345})
|
||||
self.assertEqual(span.attributes["gen_ai.request.seed"], 12345)
|
||||
|
||||
def test_option_reflected_to_span_attribute_seed_config_obj(self):
|
||||
span = self.generate_and_get_span(
|
||||
config=GenerateContentConfig(seed=12345)
|
||||
)
|
||||
self.assertEqual(span.attributes["gen_ai.request.seed"], 12345)
|
||||
|
||||
def test_option_reflected_to_span_attribute_frequency_penalty(self):
|
||||
span = self.generate_and_get_span(config={"frequency_penalty": 1.0})
|
||||
self.assertEqual(
|
||||
span.attributes["gen_ai.request.frequency_penalty"], 1.0
|
||||
)
|
||||
|
||||
def test_option_reflected_to_span_attribute_max_tokens(self):
|
||||
span = self.generate_and_get_span(
|
||||
config=GenerateContentConfig(max_output_tokens=5000)
|
||||
)
|
||||
self.assertEqual(span.attributes["gen_ai.request.max_tokens"], 5000)
|
||||
|
||||
def test_option_reflected_to_span_attribute_presence_penalty(self):
|
||||
span = self.generate_and_get_span(
|
||||
config=GenerateContentConfig(presence_penalty=0.5)
|
||||
)
|
||||
self.assertEqual(
|
||||
span.attributes["gen_ai.request.presence_penalty"], 0.5
|
||||
)
|
||||
|
||||
def test_option_reflected_to_span_attribute_stop_sequences(self):
|
||||
span = self.generate_and_get_span(
|
||||
config={"stop_sequences": ["foo", "bar"]}
|
||||
)
|
||||
stop_sequences = span.attributes["gen_ai.request.stop_sequences"]
|
||||
self.assertEqual(len(stop_sequences), 2)
|
||||
self.assertEqual(stop_sequences[0], "foo")
|
||||
self.assertEqual(stop_sequences[1], "bar")
|
||||
|
||||
def test_option_reflected_to_span_attribute_top_k(self):
|
||||
span = self.generate_and_get_span(
|
||||
config=GenerateContentConfig(top_k=20)
|
||||
)
|
||||
self.assertEqual(span.attributes["gen_ai.request.top_k"], 20)
|
||||
|
||||
def test_option_reflected_to_span_attribute_top_p(self):
|
||||
span = self.generate_and_get_span(config={"top_p": 10})
|
||||
self.assertEqual(span.attributes["gen_ai.request.top_p"], 10)
|
||||
|
||||
@mock.patch.dict(
|
||||
os.environ, {"OTEL_GOOGLE_GENAI_GENERATE_CONTENT_CONFIG_INCLUDES": "*"}
|
||||
)
|
||||
def test_option_not_reflected_to_span_attribute_system_instruction(self):
|
||||
span = self.generate_and_get_span(
|
||||
config={"system_instruction": "Yadda yadda yadda"}
|
||||
)
|
||||
self.assertNotIn(
|
||||
"gcp.gen_ai.operation.config.system_instruction", span.attributes
|
||||
)
|
||||
self.assertNotIn("gen_ai.request.system_instruction", span.attributes)
|
||||
for key in span.attributes:
|
||||
value = span.attributes[key]
|
||||
if isinstance(value, str):
|
||||
self.assertNotIn("Yadda yadda yadda", value)
|
||||
|
||||
@mock.patch.dict(
|
||||
os.environ, {"OTEL_GOOGLE_GENAI_GENERATE_CONTENT_CONFIG_INCLUDES": "*"}
|
||||
)
|
||||
def test_option_reflected_to_span_attribute_automatic_func_calling(self):
|
||||
span = self.generate_and_get_span(
|
||||
config={
|
||||
"automatic_function_calling": {
|
||||
"ignore_call_history": True,
|
||||
}
|
||||
}
|
||||
)
|
||||
self.assertTrue(
|
||||
span.attributes[
|
||||
"gcp.gen_ai.operation.config.automatic_function_calling.ignore_call_history"
|
||||
]
|
||||
)
|
||||
|
||||
def test_dynamic_config_options_not_included_without_allow_list(self):
|
||||
span = self.generate_and_get_span(
|
||||
config={
|
||||
"automatic_function_calling": {
|
||||
"ignore_call_history": True,
|
||||
}
|
||||
}
|
||||
)
|
||||
self.assertNotIn(
|
||||
"gcp.gen_ai.operation.config.automatic_function_calling.ignore_call_history",
|
||||
span.attributes,
|
||||
)
|
||||
|
||||
def test_can_supply_allow_list_via_instrumentor_constructor(self):
|
||||
self.set_instrumentor_constructor_kwarg(
|
||||
"generate_content_config_key_allowlist", AllowList(includes=["*"])
|
||||
)
|
||||
span = self.generate_and_get_span(
|
||||
config={
|
||||
"automatic_function_calling": {
|
||||
"ignore_call_history": True,
|
||||
}
|
||||
}
|
||||
)
|
||||
self.assertTrue(
|
||||
span.attributes[
|
||||
"gcp.gen_ai.operation.config.automatic_function_calling.ignore_call_history"
|
||||
]
|
||||
)
|
|
@ -0,0 +1,504 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""High level end-to-end test of the generate content instrumentation.
|
||||
|
||||
The primary purpose of this test is to verify that the instrumentation
|
||||
package does not break the underlying GenAI SDK that it instruments.
|
||||
|
||||
This test suite also has some minimal validation of the instrumentation
|
||||
outputs; however, validating the instrumentation output (other than
|
||||
verifying that instrumentation does not break the GenAI SDK) is a
|
||||
secondary goal of this test. Detailed testing of the instrumentation
|
||||
output is the purview of the other tests in this directory."""
|
||||
|
||||
import asyncio
|
||||
import gzip
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
import google.auth
|
||||
import google.auth.credentials
|
||||
import google.genai
|
||||
import pytest
|
||||
import yaml
|
||||
from vcr.record_mode import RecordMode
|
||||
|
||||
from opentelemetry.instrumentation.google_genai import (
|
||||
GoogleGenAiSdkInstrumentor,
|
||||
)
|
||||
|
||||
from ..common.auth import FakeCredentials
|
||||
from ..common.otel_mocker import OTelMocker
|
||||
|
||||
_FAKE_PROJECT = "test-project"
|
||||
_FAKE_LOCATION = "test-location"
|
||||
_FAKE_API_KEY = "test-api-key"
|
||||
_DEFAULT_REAL_LOCATION = "us-central1"
|
||||
|
||||
|
||||
def _get_project_from_env():
|
||||
return (
|
||||
os.getenv("GCLOUD_PROJECT") or os.getenv("GOOGLE_CLOUD_PROJECT") or ""
|
||||
)
|
||||
|
||||
|
||||
def _get_project_from_gcloud_cli():
|
||||
try:
|
||||
gcloud_call_result = subprocess.run(
|
||||
"gcloud config get project",
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
check=True,
|
||||
)
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
gcloud_output = gcloud_call_result.stdout.decode()
|
||||
return gcloud_output.strip()
|
||||
|
||||
|
||||
def _get_project_from_credentials():
|
||||
_, from_creds = google.auth.default()
|
||||
return from_creds
|
||||
|
||||
|
||||
def _get_real_project():
|
||||
from_env = _get_project_from_env()
|
||||
if from_env:
|
||||
return from_env
|
||||
from_cli = _get_project_from_gcloud_cli()
|
||||
if from_cli:
|
||||
return from_cli
|
||||
return _get_project_from_credentials()
|
||||
|
||||
|
||||
def _get_location_from_env():
|
||||
return (
|
||||
os.getenv("GCLOUD_LOCATION")
|
||||
or os.getenv("GOOGLE_CLOUD_LOCATION")
|
||||
or ""
|
||||
)
|
||||
|
||||
|
||||
def _get_real_location():
|
||||
return _get_location_from_env() or _DEFAULT_REAL_LOCATION
|
||||
|
||||
|
||||
def _get_vertex_api_key_from_env():
|
||||
return os.getenv("GOOGLE_API_KEY")
|
||||
|
||||
|
||||
def _get_gemini_api_key_from_env():
|
||||
return os.getenv("GEMINI_API_KEY")
|
||||
|
||||
|
||||
def _should_redact_header(header_key):
|
||||
if header_key.startswith("x-goog"):
|
||||
return True
|
||||
if header_key.startswith("sec-goog"):
|
||||
return True
|
||||
if header_key in ["server", "server-timing"]:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _redact_headers(headers):
|
||||
for header_key in headers:
|
||||
if _should_redact_header(header_key.lower()):
|
||||
headers[header_key] = "<REDACTED>"
|
||||
|
||||
|
||||
def _before_record_request(request):
|
||||
if request.headers:
|
||||
_redact_headers(request.headers)
|
||||
uri = request.uri
|
||||
project = _get_project_from_env()
|
||||
if project:
|
||||
uri = uri.replace(f"projects/{project}", f"projects/{_FAKE_PROJECT}")
|
||||
location = _get_real_location()
|
||||
if location:
|
||||
uri = uri.replace(
|
||||
f"locations/{location}", f"locations/{_FAKE_LOCATION}"
|
||||
)
|
||||
uri = uri.replace(
|
||||
f"//{location}-aiplatform.googleapis.com",
|
||||
f"//{_FAKE_LOCATION}-aiplatform.googleapis.com",
|
||||
)
|
||||
request.uri = uri
|
||||
return request
|
||||
|
||||
|
||||
def _before_record_response(response):
|
||||
if hasattr(response, "headers") and response.headers:
|
||||
_redact_headers(response.headers)
|
||||
return response
|
||||
|
||||
|
||||
@pytest.fixture(name="vcr_config", scope="module")
|
||||
def fixture_vcr_config():
|
||||
return {
|
||||
"filter_query_parameters": [
|
||||
"key",
|
||||
"apiKey",
|
||||
"quotaUser",
|
||||
"userProject",
|
||||
"token",
|
||||
"access_token",
|
||||
"accessToken",
|
||||
"refesh_token",
|
||||
"refreshToken",
|
||||
"authuser",
|
||||
"bearer",
|
||||
"bearer_token",
|
||||
"bearerToken",
|
||||
"userIp",
|
||||
],
|
||||
"filter_post_data_parameters": ["apikey", "api_key", "key"],
|
||||
"filter_headers": [
|
||||
"x-goog-api-key",
|
||||
"authorization",
|
||||
"server",
|
||||
"Server",
|
||||
"Server-Timing",
|
||||
"Date",
|
||||
],
|
||||
"before_record_request": _before_record_request,
|
||||
"before_record_response": _before_record_response,
|
||||
"ignore_hosts": [
|
||||
"oauth2.googleapis.com",
|
||||
"iam.googleapis.com",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
class _LiteralBlockScalar(str):
|
||||
"""Formats the string as a literal block scalar, preserving whitespace and
|
||||
without interpreting escape characters"""
|
||||
|
||||
|
||||
def _literal_block_scalar_presenter(dumper, data):
|
||||
"""Represents a scalar string as a literal block, via '|' syntax"""
|
||||
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
name="internal_setup_yaml_pretty_formatting", scope="module", autouse=True
|
||||
)
|
||||
def fixture_setup_yaml_pretty_formatting():
|
||||
yaml.add_representer(_LiteralBlockScalar, _literal_block_scalar_presenter)
|
||||
|
||||
|
||||
def _process_string_value(string_value):
|
||||
"""Pretty-prints JSON or returns long strings as a LiteralBlockScalar"""
|
||||
try:
|
||||
json_data = json.loads(string_value)
|
||||
return _LiteralBlockScalar(json.dumps(json_data, indent=2))
|
||||
except (ValueError, TypeError):
|
||||
if len(string_value) > 80:
|
||||
return _LiteralBlockScalar(string_value)
|
||||
return string_value
|
||||
|
||||
|
||||
def _convert_body_to_literal(data):
|
||||
"""Searches the data for body strings, attempting to pretty-print JSON"""
|
||||
if isinstance(data, dict):
|
||||
for key, value in data.items():
|
||||
# Handle response body case (e.g., response.body.string)
|
||||
if key == "body" and isinstance(value, dict) and "string" in value:
|
||||
value["string"] = _process_string_value(value["string"])
|
||||
|
||||
# Handle request body case (e.g., request.body)
|
||||
elif key == "body" and isinstance(value, str):
|
||||
data[key] = _process_string_value(value)
|
||||
|
||||
else:
|
||||
_convert_body_to_literal(value)
|
||||
|
||||
elif isinstance(data, list):
|
||||
for idx, choice in enumerate(data):
|
||||
data[idx] = _convert_body_to_literal(choice)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
# Helper for enforcing GZIP compression where it was originally.
|
||||
def _ensure_gzip_single_response(data: bytes):
|
||||
try:
|
||||
# Attempt to decompress, first, to avoid double compression.
|
||||
gzip.decompress(data)
|
||||
return data
|
||||
except gzip.BadGzipFile:
|
||||
# It must not have been compressed in the first place.
|
||||
return gzip.compress(data)
|
||||
|
||||
|
||||
# VCRPy automatically decompresses responses before saving them, but it may forget to
|
||||
# re-encode them when the data is loaded. This can create issues with decompression.
|
||||
# This is why we re-encode on load; to accurately replay what was originally sent.
|
||||
#
|
||||
# https://vcrpy.readthedocs.io/en/latest/advanced.html#decode-compressed-response
|
||||
def _ensure_casette_gzip(loaded_casette):
|
||||
for interaction in loaded_casette["interactions"]:
|
||||
response = interaction["response"]
|
||||
headers = response["headers"]
|
||||
if (
|
||||
"content-encoding" not in headers
|
||||
and "Content-Encoding" not in headers
|
||||
):
|
||||
continue
|
||||
if (
|
||||
"content-encoding" in headers
|
||||
and "gzip" not in headers["content-encoding"]
|
||||
):
|
||||
continue
|
||||
if (
|
||||
"Content-Encoding" in headers
|
||||
and "gzip" not in headers["Content-Encoding"]
|
||||
):
|
||||
continue
|
||||
response["body"]["string"] = _ensure_gzip_single_response(
|
||||
response["body"]["string"].encode()
|
||||
)
|
||||
|
||||
|
||||
def _maybe_ensure_casette_gzip(result):
|
||||
if sys.version_info[0] == 3 and sys.version_info[1] == 9:
|
||||
_ensure_casette_gzip(result)
|
||||
|
||||
|
||||
class _PrettyPrintJSONBody:
|
||||
"""This makes request and response body recordings more readable."""
|
||||
|
||||
@staticmethod
|
||||
def serialize(cassette_dict):
|
||||
cassette_dict = _convert_body_to_literal(cassette_dict)
|
||||
return yaml.dump(
|
||||
cassette_dict, default_flow_style=False, allow_unicode=True
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def deserialize(cassette_string):
|
||||
result = yaml.load(cassette_string, Loader=yaml.Loader)
|
||||
_maybe_ensure_casette_gzip(result)
|
||||
return result
|
||||
|
||||
|
||||
@pytest.fixture(name="fully_initialized_vcr", scope="module", autouse=True)
|
||||
def setup_vcr(vcr):
|
||||
vcr.register_serializer("yaml", _PrettyPrintJSONBody)
|
||||
vcr.serializer = "yaml"
|
||||
return vcr
|
||||
|
||||
|
||||
@pytest.fixture(name="instrumentor")
|
||||
def fixture_instrumentor():
|
||||
return GoogleGenAiSdkInstrumentor()
|
||||
|
||||
|
||||
@pytest.fixture(name="internal_instrumentation_setup", autouse=True)
|
||||
def fixture_setup_instrumentation(instrumentor):
|
||||
instrumentor.instrument()
|
||||
yield
|
||||
instrumentor.uninstrument()
|
||||
|
||||
|
||||
@pytest.fixture(name="otel_mocker", autouse=True)
|
||||
def fixture_otel_mocker():
|
||||
result = OTelMocker()
|
||||
result.install()
|
||||
yield result
|
||||
result.uninstall()
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
name="setup_content_recording",
|
||||
autouse=True,
|
||||
params=["logcontent", "excludecontent"],
|
||||
)
|
||||
def fixture_setup_content_recording(request):
|
||||
enabled = request.param == "logcontent"
|
||||
os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = str(
|
||||
enabled
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(name="vcr_record_mode")
|
||||
def fixture_vcr_record_mode(vcr):
|
||||
return vcr.record_mode
|
||||
|
||||
|
||||
@pytest.fixture(name="in_replay_mode")
|
||||
def fixture_in_replay_mode(vcr_record_mode):
|
||||
return vcr_record_mode == RecordMode.NONE
|
||||
|
||||
|
||||
@pytest.fixture(name="gcloud_project", autouse=True)
|
||||
def fixture_gcloud_project(in_replay_mode):
|
||||
if in_replay_mode:
|
||||
return _FAKE_PROJECT
|
||||
result = _get_real_project()
|
||||
for env_var in ["GCLOUD_PROJECT", "GOOGLE_CLOUD_PROJECT"]:
|
||||
os.environ[env_var] = result
|
||||
return result
|
||||
|
||||
|
||||
@pytest.fixture(name="gcloud_location")
|
||||
def fixture_gcloud_location(in_replay_mode):
|
||||
if in_replay_mode:
|
||||
return _FAKE_LOCATION
|
||||
return _get_real_location()
|
||||
|
||||
|
||||
@pytest.fixture(name="gcloud_credentials")
|
||||
def fixture_gcloud_credentials(in_replay_mode):
|
||||
if in_replay_mode:
|
||||
return FakeCredentials()
|
||||
creds, _ = google.auth.default()
|
||||
return google.auth.credentials.with_scopes_if_required(
|
||||
creds, ["https://www.googleapis.com/auth/cloud-platform"]
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(name="gemini_api_key")
|
||||
def fixture_gemini_api_key(in_replay_mode):
|
||||
if in_replay_mode:
|
||||
return _FAKE_API_KEY
|
||||
return os.getenv("GEMINI_API_KEY")
|
||||
|
||||
|
||||
@pytest.fixture(name="gcloud_api_key", autouse=True)
|
||||
def fixture_gcloud_api_key(gemini_api_key):
|
||||
if "GOOGLE_API_KEY" not in os.environ:
|
||||
os.environ["GOOGLE_API_KEY"] = gemini_api_key
|
||||
return os.getenv("GOOGLE_API_KEY")
|
||||
|
||||
|
||||
@pytest.fixture(name="nonvertex_client_factory")
|
||||
def fixture_nonvertex_client_factory(gemini_api_key):
|
||||
def _factory():
|
||||
return google.genai.Client(api_key=gemini_api_key, vertexai=False)
|
||||
|
||||
return _factory
|
||||
|
||||
|
||||
@pytest.fixture(name="vertex_client_factory")
|
||||
def fixture_vertex_client_factory(
|
||||
gcloud_project, gcloud_location, gcloud_credentials
|
||||
):
|
||||
def _factory():
|
||||
return google.genai.Client(
|
||||
vertexai=True,
|
||||
project=gcloud_project,
|
||||
location=gcloud_location,
|
||||
credentials=gcloud_credentials,
|
||||
)
|
||||
|
||||
return _factory
|
||||
|
||||
|
||||
@pytest.fixture(name="genai_sdk_backend", params=["vertexaiapi"])
|
||||
def fixture_genai_sdk_backend(request):
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture(name="use_vertex", autouse=True)
|
||||
def fixture_use_vertex(genai_sdk_backend):
|
||||
result = bool(genai_sdk_backend == "vertexaiapi")
|
||||
os.environ["GOOGLE_GENAI_USE_VERTEXAI"] = "1" if result else "0"
|
||||
return result
|
||||
|
||||
|
||||
@pytest.fixture(name="client")
|
||||
def fixture_client(
|
||||
vertex_client_factory, nonvertex_client_factory, use_vertex
|
||||
):
|
||||
if use_vertex:
|
||||
return vertex_client_factory()
|
||||
return nonvertex_client_factory()
|
||||
|
||||
|
||||
@pytest.fixture(name="is_async", params=["sync", "async"])
|
||||
def fixture_is_async(request):
|
||||
return request.param == "async"
|
||||
|
||||
|
||||
@pytest.fixture(name="model", params=["gemini-1.5-flash-002"])
|
||||
def fixture_model(request):
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture(name="generate_content")
|
||||
def fixture_generate_content(client, is_async):
|
||||
def _sync_impl(*args, **kwargs):
|
||||
return client.models.generate_content(*args, **kwargs)
|
||||
|
||||
def _async_impl(*args, **kwargs):
|
||||
return asyncio.run(client.aio.models.generate_content(*args, **kwargs))
|
||||
|
||||
if is_async:
|
||||
return _async_impl
|
||||
return _sync_impl
|
||||
|
||||
|
||||
@pytest.fixture(name="generate_content_stream")
|
||||
def fixture_generate_content_stream(client, is_async):
|
||||
def _sync_impl(*args, **kwargs):
|
||||
results = []
|
||||
for result in client.models.generate_content_stream(*args, **kwargs):
|
||||
results.append(result)
|
||||
return results
|
||||
|
||||
def _async_impl(*args, **kwargs):
|
||||
async def _gather_all():
|
||||
results = []
|
||||
async for (
|
||||
result
|
||||
) in await client.aio.models.generate_content_stream(
|
||||
*args, **kwargs
|
||||
):
|
||||
results.append(result)
|
||||
return results
|
||||
|
||||
return asyncio.run(_gather_all())
|
||||
|
||||
if is_async:
|
||||
return _async_impl
|
||||
return _sync_impl
|
||||
|
||||
|
||||
@pytest.mark.vcr
|
||||
def test_non_streaming(generate_content, model, otel_mocker):
|
||||
response = generate_content(
|
||||
model=model, contents="Create a poem about Open Telemetry."
|
||||
)
|
||||
assert response is not None
|
||||
assert response.text is not None
|
||||
assert len(response.text) > 0
|
||||
otel_mocker.assert_has_span_named(f"generate_content {model}")
|
||||
|
||||
|
||||
@pytest.mark.vcr
|
||||
def test_streaming(generate_content_stream, model, otel_mocker):
|
||||
count = 0
|
||||
for response in generate_content_stream(
|
||||
model=model, contents="Create a poem about Open Telemetry."
|
||||
):
|
||||
assert response is not None
|
||||
assert response.text is not None
|
||||
assert len(response.text) > 0
|
||||
count += 1
|
||||
assert count > 0
|
||||
otel_mocker.assert_has_span_named(f"generate_content {model}")
|
|
@ -0,0 +1,143 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from google.genai import types as genai_types
|
||||
|
||||
from .base import TestCase
|
||||
|
||||
|
||||
class FinishReasonsTestCase(TestCase):
|
||||
def generate_and_get_span_finish_reasons(self):
|
||||
self.client.models.generate_content(
|
||||
model="gemini-2.5-flash-001", contents="Some prompt"
|
||||
)
|
||||
span = self.otel.get_span_named(
|
||||
"generate_content gemini-2.5-flash-001"
|
||||
)
|
||||
assert span is not None
|
||||
assert "gen_ai.response.finish_reasons" in span.attributes
|
||||
return list(span.attributes["gen_ai.response.finish_reasons"])
|
||||
|
||||
def test_single_candidate_with_valid_reason(self):
|
||||
self.configure_valid_response(
|
||||
candidate=genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.STOP
|
||||
)
|
||||
)
|
||||
self.assertEqual(self.generate_and_get_span_finish_reasons(), ["stop"])
|
||||
|
||||
def test_single_candidate_with_safety_reason(self):
|
||||
self.configure_valid_response(
|
||||
candidate=genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.SAFETY
|
||||
)
|
||||
)
|
||||
self.assertEqual(
|
||||
self.generate_and_get_span_finish_reasons(), ["safety"]
|
||||
)
|
||||
|
||||
def test_single_candidate_with_max_tokens_reason(self):
|
||||
self.configure_valid_response(
|
||||
candidate=genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.MAX_TOKENS
|
||||
)
|
||||
)
|
||||
self.assertEqual(
|
||||
self.generate_and_get_span_finish_reasons(), ["max_tokens"]
|
||||
)
|
||||
|
||||
def test_single_candidate_with_no_reason(self):
|
||||
self.configure_valid_response(
|
||||
candidate=genai_types.Candidate(finish_reason=None)
|
||||
)
|
||||
self.assertEqual(self.generate_and_get_span_finish_reasons(), [])
|
||||
|
||||
def test_single_candidate_with_unspecified_reason(self):
|
||||
self.configure_valid_response(
|
||||
candidate=genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.FINISH_REASON_UNSPECIFIED
|
||||
)
|
||||
)
|
||||
self.assertEqual(
|
||||
self.generate_and_get_span_finish_reasons(), ["unspecified"]
|
||||
)
|
||||
|
||||
def test_multiple_candidates_with_valid_reasons(self):
|
||||
self.configure_valid_response(
|
||||
candidates=[
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.MAX_TOKENS
|
||||
),
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.STOP
|
||||
),
|
||||
]
|
||||
)
|
||||
self.assertEqual(
|
||||
self.generate_and_get_span_finish_reasons(), ["max_tokens", "stop"]
|
||||
)
|
||||
|
||||
def test_sorts_finish_reasons(self):
|
||||
self.configure_valid_response(
|
||||
candidates=[
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.STOP
|
||||
),
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.MAX_TOKENS
|
||||
),
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.SAFETY
|
||||
),
|
||||
]
|
||||
)
|
||||
self.assertEqual(
|
||||
self.generate_and_get_span_finish_reasons(),
|
||||
["max_tokens", "safety", "stop"],
|
||||
)
|
||||
|
||||
def test_deduplicates_finish_reasons(self):
|
||||
self.configure_valid_response(
|
||||
candidates=[
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.STOP
|
||||
),
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.MAX_TOKENS
|
||||
),
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.STOP
|
||||
),
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.STOP
|
||||
),
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.SAFETY
|
||||
),
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.STOP
|
||||
),
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.STOP
|
||||
),
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.STOP
|
||||
),
|
||||
]
|
||||
)
|
||||
self.assertEqual(
|
||||
self.generate_and_get_span_finish_reasons(),
|
||||
["max_tokens", "safety", "stop"],
|
||||
)
|
|
@ -0,0 +1,277 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
import google.genai.types as genai_types
|
||||
|
||||
from .base import TestCase
|
||||
|
||||
|
||||
class ToolCallInstrumentationTestCase(TestCase):
|
||||
def test_tool_calls_with_config_dict_outputs_spans(self):
|
||||
calls = []
|
||||
|
||||
def handle(*args, **kwargs):
|
||||
calls.append((args, kwargs))
|
||||
return "some result"
|
||||
|
||||
def somefunction(somearg):
|
||||
print("somearg=%s", somearg)
|
||||
|
||||
self.mock_generate_content.side_effect = handle
|
||||
self.client.models.generate_content(
|
||||
model="some-model-name",
|
||||
contents="Some content",
|
||||
config={
|
||||
"tools": [somefunction],
|
||||
},
|
||||
)
|
||||
self.assertEqual(len(calls), 1)
|
||||
config = calls[0][1]["config"]
|
||||
tools = config.tools
|
||||
wrapped_somefunction = tools[0]
|
||||
|
||||
self.assertIsNone(
|
||||
self.otel.get_span_named("execute_tool somefunction")
|
||||
)
|
||||
wrapped_somefunction("someparam")
|
||||
self.otel.assert_has_span_named("execute_tool somefunction")
|
||||
generated_span = self.otel.get_span_named("execute_tool somefunction")
|
||||
self.assertIn("gen_ai.system", generated_span.attributes)
|
||||
self.assertEqual(
|
||||
generated_span.attributes["gen_ai.tool.name"], "somefunction"
|
||||
)
|
||||
self.assertEqual(
|
||||
generated_span.attributes["code.args.positional.count"], 1
|
||||
)
|
||||
self.assertEqual(
|
||||
generated_span.attributes["code.args.keyword.count"], 0
|
||||
)
|
||||
|
||||
def test_tool_calls_with_config_object_outputs_spans(self):
|
||||
calls = []
|
||||
|
||||
def handle(*args, **kwargs):
|
||||
calls.append((args, kwargs))
|
||||
return "some result"
|
||||
|
||||
def somefunction(somearg):
|
||||
print("somearg=%s", somearg)
|
||||
|
||||
self.mock_generate_content.side_effect = handle
|
||||
self.client.models.generate_content(
|
||||
model="some-model-name",
|
||||
contents="Some content",
|
||||
config=genai_types.GenerateContentConfig(
|
||||
tools=[somefunction],
|
||||
),
|
||||
)
|
||||
self.assertEqual(len(calls), 1)
|
||||
config = calls[0][1]["config"]
|
||||
tools = config.tools
|
||||
wrapped_somefunction = tools[0]
|
||||
|
||||
self.assertIsNone(
|
||||
self.otel.get_span_named("execute_tool somefunction")
|
||||
)
|
||||
wrapped_somefunction("someparam")
|
||||
self.otel.assert_has_span_named("execute_tool somefunction")
|
||||
generated_span = self.otel.get_span_named("execute_tool somefunction")
|
||||
self.assertIn("gen_ai.system", generated_span.attributes)
|
||||
self.assertEqual(
|
||||
generated_span.attributes["gen_ai.tool.name"], "somefunction"
|
||||
)
|
||||
self.assertEqual(
|
||||
generated_span.attributes["code.args.positional.count"], 1
|
||||
)
|
||||
self.assertEqual(
|
||||
generated_span.attributes["code.args.keyword.count"], 0
|
||||
)
|
||||
|
||||
@patch.dict(
|
||||
"os.environ",
|
||||
{"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"},
|
||||
)
|
||||
def test_tool_calls_record_parameter_values_on_span_if_enabled(self):
|
||||
calls = []
|
||||
|
||||
def handle(*args, **kwargs):
|
||||
calls.append((args, kwargs))
|
||||
return "some result"
|
||||
|
||||
def somefunction(someparam, otherparam=2):
|
||||
print("someparam=%s, otherparam=%s", someparam, otherparam)
|
||||
|
||||
self.mock_generate_content.side_effect = handle
|
||||
self.client.models.generate_content(
|
||||
model="some-model-name",
|
||||
contents="Some content",
|
||||
config={
|
||||
"tools": [somefunction],
|
||||
},
|
||||
)
|
||||
self.assertEqual(len(calls), 1)
|
||||
config = calls[0][1]["config"]
|
||||
tools = config.tools
|
||||
wrapped_somefunction = tools[0]
|
||||
wrapped_somefunction(123, otherparam="abc")
|
||||
self.otel.assert_has_span_named("execute_tool somefunction")
|
||||
generated_span = self.otel.get_span_named("execute_tool somefunction")
|
||||
self.assertEqual(
|
||||
generated_span.attributes[
|
||||
"code.function.parameters.someparam.type"
|
||||
],
|
||||
"int",
|
||||
)
|
||||
self.assertEqual(
|
||||
generated_span.attributes[
|
||||
"code.function.parameters.otherparam.type"
|
||||
],
|
||||
"str",
|
||||
)
|
||||
self.assertEqual(
|
||||
generated_span.attributes[
|
||||
"code.function.parameters.someparam.value"
|
||||
],
|
||||
123,
|
||||
)
|
||||
self.assertEqual(
|
||||
generated_span.attributes[
|
||||
"code.function.parameters.otherparam.value"
|
||||
],
|
||||
"abc",
|
||||
)
|
||||
|
||||
@patch.dict(
|
||||
"os.environ",
|
||||
{"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"},
|
||||
)
|
||||
def test_tool_calls_do_not_record_parameter_values_if_not_enabled(self):
|
||||
calls = []
|
||||
|
||||
def handle(*args, **kwargs):
|
||||
calls.append((args, kwargs))
|
||||
return "some result"
|
||||
|
||||
def somefunction(someparam, otherparam=2):
|
||||
print("someparam=%s, otherparam=%s", someparam, otherparam)
|
||||
|
||||
self.mock_generate_content.side_effect = handle
|
||||
self.client.models.generate_content(
|
||||
model="some-model-name",
|
||||
contents="Some content",
|
||||
config={
|
||||
"tools": [somefunction],
|
||||
},
|
||||
)
|
||||
self.assertEqual(len(calls), 1)
|
||||
config = calls[0][1]["config"]
|
||||
tools = config.tools
|
||||
wrapped_somefunction = tools[0]
|
||||
wrapped_somefunction(123, otherparam="abc")
|
||||
self.otel.assert_has_span_named("execute_tool somefunction")
|
||||
generated_span = self.otel.get_span_named("execute_tool somefunction")
|
||||
self.assertEqual(
|
||||
generated_span.attributes[
|
||||
"code.function.parameters.someparam.type"
|
||||
],
|
||||
"int",
|
||||
)
|
||||
self.assertEqual(
|
||||
generated_span.attributes[
|
||||
"code.function.parameters.otherparam.type"
|
||||
],
|
||||
"str",
|
||||
)
|
||||
self.assertNotIn(
|
||||
"code.function.parameters.someparam.value",
|
||||
generated_span.attributes,
|
||||
)
|
||||
self.assertNotIn(
|
||||
"code.function.parameters.otherparam.value",
|
||||
generated_span.attributes,
|
||||
)
|
||||
|
||||
@patch.dict(
|
||||
"os.environ",
|
||||
{"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"},
|
||||
)
|
||||
def test_tool_calls_record_return_values_on_span_if_enabled(self):
|
||||
calls = []
|
||||
|
||||
def handle(*args, **kwargs):
|
||||
calls.append((args, kwargs))
|
||||
return "some result"
|
||||
|
||||
def somefunction(x, y=2):
|
||||
return x + y
|
||||
|
||||
self.mock_generate_content.side_effect = handle
|
||||
self.client.models.generate_content(
|
||||
model="some-model-name",
|
||||
contents="Some content",
|
||||
config={
|
||||
"tools": [somefunction],
|
||||
},
|
||||
)
|
||||
self.assertEqual(len(calls), 1)
|
||||
config = calls[0][1]["config"]
|
||||
tools = config.tools
|
||||
wrapped_somefunction = tools[0]
|
||||
wrapped_somefunction(123)
|
||||
self.otel.assert_has_span_named("execute_tool somefunction")
|
||||
generated_span = self.otel.get_span_named("execute_tool somefunction")
|
||||
self.assertEqual(
|
||||
generated_span.attributes["code.function.return.type"], "int"
|
||||
)
|
||||
self.assertEqual(
|
||||
generated_span.attributes["code.function.return.value"], 125
|
||||
)
|
||||
|
||||
@patch.dict(
|
||||
"os.environ",
|
||||
{"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"},
|
||||
)
|
||||
def test_tool_calls_do_not_record_return_values_if_not_enabled(self):
|
||||
calls = []
|
||||
|
||||
def handle(*args, **kwargs):
|
||||
calls.append((args, kwargs))
|
||||
return "some result"
|
||||
|
||||
def somefunction(x, y=2):
|
||||
return x + y
|
||||
|
||||
self.mock_generate_content.side_effect = handle
|
||||
self.client.models.generate_content(
|
||||
model="some-model-name",
|
||||
contents="Some content",
|
||||
config={
|
||||
"tools": [somefunction],
|
||||
},
|
||||
)
|
||||
self.assertEqual(len(calls), 1)
|
||||
config = calls[0][1]["config"]
|
||||
tools = config.tools
|
||||
wrapped_somefunction = tools[0]
|
||||
wrapped_somefunction(123)
|
||||
self.otel.assert_has_span_named("execute_tool somefunction")
|
||||
generated_span = self.otel.get_span_named("execute_tool somefunction")
|
||||
self.assertEqual(
|
||||
generated_span.attributes["code.function.return.type"], "int"
|
||||
)
|
||||
self.assertNotIn(
|
||||
"code.function.return.value", generated_span.attributes
|
||||
)
|
|
@ -12,27 +12,64 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Optional, Union
|
||||
|
||||
def create_valid_response(
|
||||
response_text="The model response", input_tokens=10, output_tokens=20
|
||||
):
|
||||
return {
|
||||
"modelVersion": "gemini-2.0-flash-test123",
|
||||
"usageMetadata": {
|
||||
"promptTokenCount": input_tokens,
|
||||
"candidatesTokenCount": output_tokens,
|
||||
"totalTokenCount": input_tokens + output_tokens,
|
||||
},
|
||||
"candidates": [
|
||||
{
|
||||
"content": {
|
||||
"role": "model",
|
||||
"parts": [
|
||||
{
|
||||
"text": response_text,
|
||||
}
|
||||
],
|
||||
}
|
||||
}
|
||||
],
|
||||
}
|
||||
import google.genai.types as genai_types
|
||||
|
||||
|
||||
def create_response(
|
||||
part: Optional[genai_types.Part] = None,
|
||||
parts: Optional[list[genai_types.Part]] = None,
|
||||
content: Optional[genai_types.Content] = None,
|
||||
candidate: Optional[genai_types.Candidate] = None,
|
||||
candidates: Optional[list[genai_types.Candidate]] = None,
|
||||
text: Optional[str] = None,
|
||||
input_tokens: Optional[int] = None,
|
||||
output_tokens: Optional[int] = None,
|
||||
model_version: Optional[str] = None,
|
||||
usage_metadata: Optional[
|
||||
genai_types.GenerateContentResponseUsageMetadata
|
||||
] = None,
|
||||
**kwargs,
|
||||
) -> genai_types.GenerateContentResponse:
|
||||
# Build up the "candidates" subfield
|
||||
if text is None:
|
||||
text = "Some response text"
|
||||
if part is None:
|
||||
part = genai_types.Part(text=text)
|
||||
if parts is None:
|
||||
parts = [part]
|
||||
if content is None:
|
||||
content = genai_types.Content(parts=parts, role="model")
|
||||
if candidate is None:
|
||||
candidate = genai_types.Candidate(content=content)
|
||||
if candidates is None:
|
||||
candidates = [candidate]
|
||||
|
||||
# Build up the "usage_metadata" subfield
|
||||
if usage_metadata is None:
|
||||
usage_metadata = genai_types.GenerateContentResponseUsageMetadata()
|
||||
if input_tokens is not None:
|
||||
usage_metadata.prompt_token_count = input_tokens
|
||||
if output_tokens is not None:
|
||||
usage_metadata.candidates_token_count = output_tokens
|
||||
return genai_types.GenerateContentResponse(
|
||||
candidates=candidates,
|
||||
usage_metadata=usage_metadata,
|
||||
model_version=model_version,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
def convert_to_response(
|
||||
arg: Union[str, genai_types.GenerateContentResponse, dict],
|
||||
) -> genai_types.GenerateContentResponse:
|
||||
if isinstance(arg, str):
|
||||
return create_response(text=arg)
|
||||
if isinstance(arg, genai_types.GenerateContentResponse):
|
||||
return arg
|
||||
if isinstance(arg, dict):
|
||||
return create_response(**arg)
|
||||
raise ValueError(
|
||||
f"Unsure how to convert {arg} of type {arg.__class__.__name__} to response."
|
||||
)
|
||||
|
|
|
@ -21,10 +21,10 @@ pytest-vcr==1.0.2
|
|||
|
||||
google-auth==2.15.0
|
||||
google-genai==1.0.0
|
||||
opentelemetry-api==1.30.0
|
||||
opentelemetry-sdk==1.30.0
|
||||
opentelemetry-semantic-conventions==0.51b0
|
||||
opentelemetry-instrumentation==0.51b0
|
||||
opentelemetry-api==1.31.1
|
||||
opentelemetry-sdk==1.31.1
|
||||
opentelemetry-semantic-conventions==0.52b1
|
||||
opentelemetry-instrumentation==0.52b1
|
||||
|
||||
# Install locally from the folder. This path is relative to the
|
||||
# root directory, given invocation from "tox" at root level.
|
||||
|
|
|
@ -0,0 +1,162 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
from unittest import mock
|
||||
|
||||
from opentelemetry.instrumentation.google_genai.allowlist_util import AllowList
|
||||
|
||||
|
||||
def test_empty_allowlist_allows_nothing():
|
||||
allow_list = AllowList()
|
||||
assert not allow_list.allowed("")
|
||||
assert not allow_list.allowed("foo")
|
||||
assert not allow_list.allowed("bar")
|
||||
assert not allow_list.allowed("baz")
|
||||
assert not allow_list.allowed("anything at all")
|
||||
|
||||
|
||||
def test_simple_include_allow_list():
|
||||
allow_list = AllowList(includes=["abc", "xyz"])
|
||||
assert allow_list.allowed("abc")
|
||||
assert not allow_list.allowed("abc.xyz")
|
||||
assert allow_list.allowed("xyz")
|
||||
assert not allow_list.allowed("blah")
|
||||
assert not allow_list.allowed("other value not in includes")
|
||||
|
||||
|
||||
def test_allow_list_with_prefix_matching():
|
||||
allow_list = AllowList(includes=["abc.*", "xyz"])
|
||||
assert not allow_list.allowed("abc")
|
||||
assert allow_list.allowed("abc.foo")
|
||||
assert allow_list.allowed("abc.bar")
|
||||
assert allow_list.allowed("xyz")
|
||||
assert not allow_list.allowed("blah")
|
||||
assert not allow_list.allowed("other value not in includes")
|
||||
|
||||
|
||||
def test_allow_list_with_array_wildcard_matching():
|
||||
allow_list = AllowList(includes=["abc[*].foo", "xyz[*].*"])
|
||||
assert not allow_list.allowed("abc")
|
||||
assert allow_list.allowed("abc[0].foo")
|
||||
assert not allow_list.allowed("abc[0].bar")
|
||||
assert allow_list.allowed("abc[1].foo")
|
||||
assert allow_list.allowed("xyz[0].blah")
|
||||
assert allow_list.allowed("xyz[1].yadayada")
|
||||
assert not allow_list.allowed("blah")
|
||||
assert not allow_list.allowed("other value not in includes")
|
||||
|
||||
|
||||
def test_includes_and_excludes():
|
||||
allow_list = AllowList(includes=["abc", "xyz"], excludes=["xyz"])
|
||||
assert allow_list.allowed("abc")
|
||||
assert not allow_list.allowed("xyz")
|
||||
assert not allow_list.allowed("blah")
|
||||
assert not allow_list.allowed("other value not in includes")
|
||||
|
||||
|
||||
def test_includes_and_excludes_with_wildcards():
|
||||
allow_list = AllowList(
|
||||
includes=["abc", "xyz", "xyz.*"], excludes=["xyz.foo", "xyz.foo.*"]
|
||||
)
|
||||
assert allow_list.allowed("abc")
|
||||
assert allow_list.allowed("xyz")
|
||||
assert not allow_list.allowed("xyz.foo")
|
||||
assert not allow_list.allowed("xyz.foo.bar")
|
||||
assert not allow_list.allowed("xyz.foo.baz")
|
||||
assert allow_list.allowed("xyz.not_foo")
|
||||
assert allow_list.allowed("xyz.blah")
|
||||
assert not allow_list.allowed("blah")
|
||||
assert not allow_list.allowed("other value not in includes")
|
||||
|
||||
|
||||
def test_default_include_with_excludes():
|
||||
allow_list = AllowList(includes=["*"], excludes=["foo", "bar"])
|
||||
assert not allow_list.allowed("foo")
|
||||
assert not allow_list.allowed("bar")
|
||||
assert allow_list.allowed("abc")
|
||||
assert allow_list.allowed("xyz")
|
||||
assert allow_list.allowed("blah")
|
||||
assert allow_list.allowed("other value not in includes")
|
||||
|
||||
|
||||
def test_default_exclude_with_includes():
|
||||
allow_list = AllowList(includes=["foo", "bar"], excludes=["*"])
|
||||
assert allow_list.allowed("foo")
|
||||
assert allow_list.allowed("bar")
|
||||
assert not allow_list.allowed("abc")
|
||||
assert not allow_list.allowed("xyz")
|
||||
assert not allow_list.allowed("blah")
|
||||
assert not allow_list.allowed("other value not in includes")
|
||||
|
||||
|
||||
@mock.patch.dict(os.environ, {"TEST_ALLOW_LIST_INCLUDE_KEYS": "abc,xyz"})
|
||||
def test_can_load_from_env_with_just_include_list():
|
||||
allow_list = AllowList.from_env("TEST_ALLOW_LIST_INCLUDE_KEYS")
|
||||
assert allow_list.allowed("abc")
|
||||
assert allow_list.allowed("xyz")
|
||||
assert not allow_list.allowed("blah")
|
||||
assert not allow_list.allowed("other value not in includes")
|
||||
|
||||
|
||||
@mock.patch.dict(
|
||||
os.environ, {"TEST_ALLOW_LIST_INCLUDE_KEYS": " abc , , xyz ,"}
|
||||
)
|
||||
def test_can_handle_spaces_and_empty_entries():
|
||||
allow_list = AllowList.from_env("TEST_ALLOW_LIST_INCLUDE_KEYS")
|
||||
assert allow_list.allowed("abc")
|
||||
assert allow_list.allowed("xyz")
|
||||
assert not allow_list.allowed("")
|
||||
assert not allow_list.allowed(",")
|
||||
assert not allow_list.allowed("blah")
|
||||
assert not allow_list.allowed("other value not in includes")
|
||||
|
||||
|
||||
@mock.patch.dict(
|
||||
os.environ,
|
||||
{
|
||||
"TEST_ALLOW_LIST_INCLUDE_KEYS": "abc,xyz",
|
||||
"TEST_ALLOW_LIST_EXCLUDE_KEYS": "xyz, foo, bar",
|
||||
},
|
||||
)
|
||||
def test_can_load_from_env_with_includes_and_excludes():
|
||||
allow_list = AllowList.from_env(
|
||||
"TEST_ALLOW_LIST_INCLUDE_KEYS",
|
||||
excludes_env_var="TEST_ALLOW_LIST_EXCLUDE_KEYS",
|
||||
)
|
||||
assert allow_list.allowed("abc")
|
||||
assert not allow_list.allowed("xyz")
|
||||
assert not allow_list.allowed("foo")
|
||||
assert not allow_list.allowed("bar")
|
||||
assert not allow_list.allowed("not in the list")
|
||||
|
||||
|
||||
@mock.patch.dict(
|
||||
os.environ,
|
||||
{
|
||||
"TEST_ALLOW_LIST_INCLUDE_KEYS": "*",
|
||||
"TEST_ALLOW_LIST_EXCLUDE_KEYS": "xyz, foo, bar",
|
||||
},
|
||||
)
|
||||
def test_supports_wildcards_in_loading_from_env():
|
||||
allow_list = AllowList.from_env(
|
||||
"TEST_ALLOW_LIST_INCLUDE_KEYS",
|
||||
excludes_env_var="TEST_ALLOW_LIST_EXCLUDE_KEYS",
|
||||
)
|
||||
assert allow_list.allowed("abc")
|
||||
assert not allow_list.allowed("xyz")
|
||||
assert not allow_list.allowed("foo")
|
||||
assert not allow_list.allowed("bar")
|
||||
assert allow_list.allowed("blah")
|
||||
assert allow_list.allowed("not in the list")
|
|
@ -0,0 +1,357 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from opentelemetry.instrumentation.google_genai import dict_util
|
||||
|
||||
|
||||
class PydanticModel(BaseModel):
|
||||
"""Used to verify handling of pydantic models in the flattener."""
|
||||
|
||||
str_value: str = ""
|
||||
int_value: int = 0
|
||||
|
||||
|
||||
class ModelDumpableNotPydantic:
|
||||
"""Used to verify general handling of 'model_dump'."""
|
||||
|
||||
def __init__(self, dump_output):
|
||||
self._dump_output = dump_output
|
||||
|
||||
def model_dump(self):
|
||||
return self._dump_output
|
||||
|
||||
|
||||
class NotJsonSerializable:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
|
||||
def test_flatten_empty_dict():
|
||||
input_dict = {}
|
||||
output_dict = dict_util.flatten_dict(input_dict)
|
||||
assert output_dict is not None
|
||||
assert isinstance(output_dict, dict)
|
||||
assert not output_dict
|
||||
|
||||
|
||||
def test_flatten_simple_dict():
|
||||
input_dict = {
|
||||
"int_key": 1,
|
||||
"string_key": "somevalue",
|
||||
"float_key": 3.14,
|
||||
"bool_key": True,
|
||||
}
|
||||
assert dict_util.flatten_dict(input_dict) == input_dict
|
||||
|
||||
|
||||
def test_flatten_nested_dict():
|
||||
input_dict = {
|
||||
"int_key": 1,
|
||||
"string_key": "somevalue",
|
||||
"float_key": 3.14,
|
||||
"bool_key": True,
|
||||
"object_key": {
|
||||
"nested": {
|
||||
"foo": 1,
|
||||
"bar": "baz",
|
||||
},
|
||||
"qux": 54321,
|
||||
},
|
||||
}
|
||||
assert dict_util.flatten_dict(input_dict) == {
|
||||
"int_key": 1,
|
||||
"string_key": "somevalue",
|
||||
"float_key": 3.14,
|
||||
"bool_key": True,
|
||||
"object_key.nested.foo": 1,
|
||||
"object_key.nested.bar": "baz",
|
||||
"object_key.qux": 54321,
|
||||
}
|
||||
|
||||
|
||||
def test_flatten_with_key_exclusion():
|
||||
input_dict = {
|
||||
"int_key": 1,
|
||||
"string_key": "somevalue",
|
||||
"float_key": 3.14,
|
||||
"bool_key": True,
|
||||
}
|
||||
output = dict_util.flatten_dict(input_dict, exclude_keys=["int_key"])
|
||||
assert "int_key" not in output
|
||||
assert output == {
|
||||
"string_key": "somevalue",
|
||||
"float_key": 3.14,
|
||||
"bool_key": True,
|
||||
}
|
||||
|
||||
|
||||
def test_flatten_with_renaming():
|
||||
input_dict = {
|
||||
"int_key": 1,
|
||||
"string_key": "somevalue",
|
||||
"float_key": 3.14,
|
||||
"bool_key": True,
|
||||
}
|
||||
output = dict_util.flatten_dict(
|
||||
input_dict, rename_keys={"float_key": "math_key"}
|
||||
)
|
||||
assert "float_key" not in output
|
||||
assert "math_key" in output
|
||||
assert output == {
|
||||
"int_key": 1,
|
||||
"string_key": "somevalue",
|
||||
"math_key": 3.14,
|
||||
"bool_key": True,
|
||||
}
|
||||
|
||||
|
||||
def test_flatten_with_prefixing():
|
||||
input_dict = {
|
||||
"int_key": 1,
|
||||
"string_key": "somevalue",
|
||||
"float_key": 3.14,
|
||||
"bool_key": True,
|
||||
}
|
||||
output = dict_util.flatten_dict(input_dict, key_prefix="someprefix")
|
||||
assert output == {
|
||||
"someprefix.int_key": 1,
|
||||
"someprefix.string_key": "somevalue",
|
||||
"someprefix.float_key": 3.14,
|
||||
"someprefix.bool_key": True,
|
||||
}
|
||||
|
||||
|
||||
def test_flatten_with_custom_flatten_func():
|
||||
def summarize_int_list(key, value, **kwargs):
|
||||
total = 0
|
||||
for item in value:
|
||||
total += item
|
||||
avg = total / len(value)
|
||||
return f"{len(value)} items (total: {total}, average: {avg})"
|
||||
|
||||
flatten_functions = {"some.deeply.nested.key": summarize_int_list}
|
||||
input_dict = {
|
||||
"some": {
|
||||
"deeply": {
|
||||
"nested": {
|
||||
"key": [1, 2, 3, 4, 5, 6, 7, 8, 9],
|
||||
},
|
||||
},
|
||||
},
|
||||
"other": [1, 2, 3, 4, 5, 6, 7, 8, 9],
|
||||
}
|
||||
output = dict_util.flatten_dict(
|
||||
input_dict, flatten_functions=flatten_functions
|
||||
)
|
||||
assert output == {
|
||||
"some.deeply.nested.key": "9 items (total: 45, average: 5.0)",
|
||||
"other": [1, 2, 3, 4, 5, 6, 7, 8, 9],
|
||||
}
|
||||
|
||||
|
||||
def test_flatten_with_pydantic_model_value():
|
||||
input_dict = {
|
||||
"foo": PydanticModel(str_value="bar", int_value=123),
|
||||
}
|
||||
|
||||
output = dict_util.flatten_dict(input_dict)
|
||||
assert output == {
|
||||
"foo.str_value": "bar",
|
||||
"foo.int_value": 123,
|
||||
}
|
||||
|
||||
|
||||
def test_flatten_with_model_dumpable_value():
|
||||
input_dict = {
|
||||
"foo": ModelDumpableNotPydantic(
|
||||
{
|
||||
"str_value": "bar",
|
||||
"int_value": 123,
|
||||
}
|
||||
),
|
||||
}
|
||||
|
||||
output = dict_util.flatten_dict(input_dict)
|
||||
assert output == {
|
||||
"foo.str_value": "bar",
|
||||
"foo.int_value": 123,
|
||||
}
|
||||
|
||||
|
||||
def test_flatten_with_mixed_structures():
|
||||
input_dict = {
|
||||
"foo": ModelDumpableNotPydantic(
|
||||
{
|
||||
"pydantic": PydanticModel(str_value="bar", int_value=123),
|
||||
}
|
||||
),
|
||||
}
|
||||
|
||||
output = dict_util.flatten_dict(input_dict)
|
||||
assert output == {
|
||||
"foo.pydantic.str_value": "bar",
|
||||
"foo.pydantic.int_value": 123,
|
||||
}
|
||||
|
||||
|
||||
def test_converts_tuple_with_json_fallback():
|
||||
input_dict = {
|
||||
"foo": ("abc", 123),
|
||||
}
|
||||
output = dict_util.flatten_dict(input_dict)
|
||||
assert output == {
|
||||
"foo.length": 2,
|
||||
"foo[0]": "abc",
|
||||
"foo[1]": 123,
|
||||
}
|
||||
|
||||
|
||||
def test_json_conversion_handles_unicode():
|
||||
input_dict = {
|
||||
"foo": ("❤️", 123),
|
||||
}
|
||||
output = dict_util.flatten_dict(input_dict)
|
||||
assert output == {
|
||||
"foo.length": 2,
|
||||
"foo[0]": "❤️",
|
||||
"foo[1]": 123,
|
||||
}
|
||||
|
||||
|
||||
def test_flatten_with_complex_object_not_json_serializable():
|
||||
result = dict_util.flatten_dict(
|
||||
{
|
||||
"cannot_serialize_directly": NotJsonSerializable(),
|
||||
}
|
||||
)
|
||||
assert result is not None
|
||||
assert isinstance(result, dict)
|
||||
assert len(result) == 0
|
||||
|
||||
|
||||
def test_flatten_good_with_non_serializable_complex_object():
|
||||
result = dict_util.flatten_dict(
|
||||
{
|
||||
"foo": {
|
||||
"bar": "blah",
|
||||
"baz": 5,
|
||||
},
|
||||
"cannot_serialize_directly": NotJsonSerializable(),
|
||||
}
|
||||
)
|
||||
assert result == {
|
||||
"foo.bar": "blah",
|
||||
"foo.baz": 5,
|
||||
}
|
||||
|
||||
|
||||
def test_flatten_with_complex_object_not_json_serializable_and_custom_flatten_func():
|
||||
def flatten_not_json_serializable(key, value, **kwargs):
|
||||
assert isinstance(value, NotJsonSerializable)
|
||||
return "blah"
|
||||
|
||||
output = dict_util.flatten_dict(
|
||||
{
|
||||
"cannot_serialize_directly": NotJsonSerializable(),
|
||||
},
|
||||
flatten_functions={
|
||||
"cannot_serialize_directly": flatten_not_json_serializable,
|
||||
},
|
||||
)
|
||||
assert output == {
|
||||
"cannot_serialize_directly": "blah",
|
||||
}
|
||||
|
||||
|
||||
def test_flatten_simple_homogenous_primitive_string_list():
|
||||
input_dict = {"list_value": ["abc", "def"]}
|
||||
assert dict_util.flatten_dict(input_dict) == input_dict
|
||||
|
||||
|
||||
def test_flatten_simple_homogenous_primitive_int_list():
|
||||
input_dict = {"list_value": [123, 456]}
|
||||
assert dict_util.flatten_dict(input_dict) == input_dict
|
||||
|
||||
|
||||
def test_flatten_simple_homogenous_primitive_bool_list():
|
||||
input_dict = {"list_value": [True, False]}
|
||||
assert dict_util.flatten_dict(input_dict) == input_dict
|
||||
|
||||
|
||||
def test_flatten_simple_heterogenous_primitive_list():
|
||||
input_dict = {"list_value": ["abc", 123]}
|
||||
assert dict_util.flatten_dict(input_dict) == {
|
||||
"list_value.length": 2,
|
||||
"list_value[0]": "abc",
|
||||
"list_value[1]": 123,
|
||||
}
|
||||
|
||||
|
||||
def test_flatten_list_of_compound_types():
|
||||
input_dict = {
|
||||
"list_value": [
|
||||
{"a": 1, "b": 2},
|
||||
{"x": 100, "y": 123, "z": 321},
|
||||
"blah",
|
||||
[
|
||||
"abc",
|
||||
123,
|
||||
],
|
||||
]
|
||||
}
|
||||
assert dict_util.flatten_dict(input_dict) == {
|
||||
"list_value.length": 4,
|
||||
"list_value[0].a": 1,
|
||||
"list_value[0].b": 2,
|
||||
"list_value[1].x": 100,
|
||||
"list_value[1].y": 123,
|
||||
"list_value[1].z": 321,
|
||||
"list_value[2]": "blah",
|
||||
"list_value[3].length": 2,
|
||||
"list_value[3][0]": "abc",
|
||||
"list_value[3][1]": 123,
|
||||
}
|
||||
|
||||
|
||||
def test_handles_simple_output_from_flatten_func():
|
||||
def f(*args, **kwargs):
|
||||
return "baz"
|
||||
|
||||
input_dict = {
|
||||
"foo": PydanticModel(),
|
||||
}
|
||||
|
||||
output = dict_util.flatten_dict(input_dict, flatten_functions={"foo": f})
|
||||
|
||||
assert output == {
|
||||
"foo": "baz",
|
||||
}
|
||||
|
||||
|
||||
def test_handles_compound_output_from_flatten_func():
|
||||
def f(*args, **kwargs):
|
||||
return {"baz": 123, "qux": 456}
|
||||
|
||||
input_dict = {
|
||||
"foo": PydanticModel(),
|
||||
}
|
||||
|
||||
output = dict_util.flatten_dict(input_dict, flatten_functions={"foo": f})
|
||||
|
||||
assert output == {
|
||||
"foo.baz": 123,
|
||||
"foo.qux": 456,
|
||||
}
|
|
@ -0,0 +1,280 @@
|
|||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import asyncio
|
||||
import unittest
|
||||
from unittest.mock import patch
|
||||
|
||||
from google.genai import types as genai_types
|
||||
|
||||
from opentelemetry._events import get_event_logger_provider
|
||||
from opentelemetry.instrumentation.google_genai import (
|
||||
otel_wrapper,
|
||||
tool_call_wrapper,
|
||||
)
|
||||
from opentelemetry.metrics import get_meter_provider
|
||||
from opentelemetry.trace import get_tracer_provider
|
||||
|
||||
from ..common import otel_mocker
|
||||
|
||||
|
||||
class TestCase(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self._otel = otel_mocker.OTelMocker()
|
||||
self._otel.install()
|
||||
self._otel_wrapper = otel_wrapper.OTelWrapper.from_providers(
|
||||
get_tracer_provider(),
|
||||
get_event_logger_provider(),
|
||||
get_meter_provider(),
|
||||
)
|
||||
|
||||
@property
|
||||
def otel(self):
|
||||
return self._otel
|
||||
|
||||
@property
|
||||
def otel_wrapper(self):
|
||||
return self._otel_wrapper
|
||||
|
||||
def wrap(self, tool_or_tools, **kwargs):
|
||||
return tool_call_wrapper.wrapped(
|
||||
tool_or_tools, self.otel_wrapper, **kwargs
|
||||
)
|
||||
|
||||
def test_wraps_none(self):
|
||||
result = self.wrap(None)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_wraps_single_tool_function(self):
|
||||
def somefunction():
|
||||
pass
|
||||
|
||||
wrapped_somefunction = self.wrap(somefunction)
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
somefunction()
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
wrapped_somefunction()
|
||||
self.otel.assert_has_span_named("execute_tool somefunction")
|
||||
span = self.otel.get_span_named("execute_tool somefunction")
|
||||
self.assertEqual(
|
||||
span.attributes["gen_ai.operation.name"], "execute_tool"
|
||||
)
|
||||
self.assertEqual(span.attributes["gen_ai.tool.name"], "somefunction")
|
||||
|
||||
def test_wraps_multiple_tool_functions_as_list(self):
|
||||
def somefunction():
|
||||
pass
|
||||
|
||||
def otherfunction():
|
||||
pass
|
||||
|
||||
wrapped_functions = self.wrap([somefunction, otherfunction])
|
||||
wrapped_somefunction = wrapped_functions[0]
|
||||
wrapped_otherfunction = wrapped_functions[1]
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
self.otel.assert_does_not_have_span_named("execute_tool otherfunction")
|
||||
somefunction()
|
||||
otherfunction()
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
self.otel.assert_does_not_have_span_named("execute_tool otherfunction")
|
||||
wrapped_somefunction()
|
||||
self.otel.assert_has_span_named("execute_tool somefunction")
|
||||
self.otel.assert_does_not_have_span_named("execute_tool otherfunction")
|
||||
wrapped_otherfunction()
|
||||
self.otel.assert_has_span_named("execute_tool otherfunction")
|
||||
|
||||
def test_wraps_multiple_tool_functions_as_dict(self):
|
||||
def somefunction():
|
||||
pass
|
||||
|
||||
def otherfunction():
|
||||
pass
|
||||
|
||||
wrapped_functions = self.wrap(
|
||||
{"somefunction": somefunction, "otherfunction": otherfunction}
|
||||
)
|
||||
wrapped_somefunction = wrapped_functions["somefunction"]
|
||||
wrapped_otherfunction = wrapped_functions["otherfunction"]
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
self.otel.assert_does_not_have_span_named("execute_tool otherfunction")
|
||||
somefunction()
|
||||
otherfunction()
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
self.otel.assert_does_not_have_span_named("execute_tool otherfunction")
|
||||
wrapped_somefunction()
|
||||
self.otel.assert_has_span_named("execute_tool somefunction")
|
||||
self.otel.assert_does_not_have_span_named("execute_tool otherfunction")
|
||||
wrapped_otherfunction()
|
||||
self.otel.assert_has_span_named("execute_tool otherfunction")
|
||||
|
||||
def test_wraps_async_tool_function(self):
|
||||
async def somefunction():
|
||||
pass
|
||||
|
||||
wrapped_somefunction = self.wrap(somefunction)
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
asyncio.run(somefunction())
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
asyncio.run(wrapped_somefunction())
|
||||
self.otel.assert_has_span_named("execute_tool somefunction")
|
||||
|
||||
def test_preserves_tool_dict(self):
|
||||
tool_dict = genai_types.ToolDict()
|
||||
wrapped_tool_dict = self.wrap(tool_dict)
|
||||
self.assertEqual(tool_dict, wrapped_tool_dict)
|
||||
|
||||
def test_does_not_have_description_if_no_doc_string(self):
|
||||
def somefunction():
|
||||
pass
|
||||
|
||||
wrapped_somefunction = self.wrap(somefunction)
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
somefunction()
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
wrapped_somefunction()
|
||||
self.otel.assert_has_span_named("execute_tool somefunction")
|
||||
span = self.otel.get_span_named("execute_tool somefunction")
|
||||
self.assertNotIn("gen_ai.tool.description", span.attributes)
|
||||
|
||||
def test_has_description_if_doc_string_present(self):
|
||||
def somefunction():
|
||||
"""An example tool call function."""
|
||||
|
||||
wrapped_somefunction = self.wrap(somefunction)
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
somefunction()
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
wrapped_somefunction()
|
||||
self.otel.assert_has_span_named("execute_tool somefunction")
|
||||
span = self.otel.get_span_named("execute_tool somefunction")
|
||||
self.assertEqual(
|
||||
span.attributes["gen_ai.tool.description"],
|
||||
"An example tool call function.",
|
||||
)
|
||||
|
||||
@patch.dict(
|
||||
"os.environ",
|
||||
{"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"},
|
||||
)
|
||||
def test_handles_primitive_int_arg(self):
|
||||
def somefunction(arg=None):
|
||||
pass
|
||||
|
||||
wrapped_somefunction = self.wrap(somefunction)
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
somefunction(12345)
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
wrapped_somefunction(12345)
|
||||
self.otel.assert_has_span_named("execute_tool somefunction")
|
||||
span = self.otel.get_span_named("execute_tool somefunction")
|
||||
self.assertEqual(
|
||||
span.attributes["code.function.parameters.arg.type"], "int"
|
||||
)
|
||||
self.assertEqual(
|
||||
span.attributes["code.function.parameters.arg.value"], 12345
|
||||
)
|
||||
|
||||
@patch.dict(
|
||||
"os.environ",
|
||||
{"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"},
|
||||
)
|
||||
def test_handles_primitive_string_arg(self):
|
||||
def somefunction(arg=None):
|
||||
pass
|
||||
|
||||
wrapped_somefunction = self.wrap(somefunction)
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
somefunction("a string value")
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
wrapped_somefunction("a string value")
|
||||
self.otel.assert_has_span_named("execute_tool somefunction")
|
||||
span = self.otel.get_span_named("execute_tool somefunction")
|
||||
self.assertEqual(
|
||||
span.attributes["code.function.parameters.arg.type"], "str"
|
||||
)
|
||||
self.assertEqual(
|
||||
span.attributes["code.function.parameters.arg.value"],
|
||||
"a string value",
|
||||
)
|
||||
|
||||
@patch.dict(
|
||||
"os.environ",
|
||||
{"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"},
|
||||
)
|
||||
def test_handles_dict_arg(self):
|
||||
def somefunction(arg=None):
|
||||
pass
|
||||
|
||||
wrapped_somefunction = self.wrap(somefunction)
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
somefunction({"key": "value"})
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
wrapped_somefunction({"key": "value"})
|
||||
self.otel.assert_has_span_named("execute_tool somefunction")
|
||||
span = self.otel.get_span_named("execute_tool somefunction")
|
||||
self.assertEqual(
|
||||
span.attributes["code.function.parameters.arg.type"], "dict"
|
||||
)
|
||||
self.assertEqual(
|
||||
span.attributes["code.function.parameters.arg.value"],
|
||||
'{"key": "value"}',
|
||||
)
|
||||
|
||||
@patch.dict(
|
||||
"os.environ",
|
||||
{"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"},
|
||||
)
|
||||
def test_handles_primitive_list_arg(self):
|
||||
def somefunction(arg=None):
|
||||
pass
|
||||
|
||||
wrapped_somefunction = self.wrap(somefunction)
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
somefunction([1, 2, 3])
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
wrapped_somefunction([1, 2, 3])
|
||||
self.otel.assert_has_span_named("execute_tool somefunction")
|
||||
span = self.otel.get_span_named("execute_tool somefunction")
|
||||
self.assertEqual(
|
||||
span.attributes["code.function.parameters.arg.type"], "list"
|
||||
)
|
||||
# A conversion is required here, because the Open Telemetry code converts the
|
||||
# list into a tuple. (But this conversion isn't happening in "tool_call_wrapper.py").
|
||||
self.assertEqual(
|
||||
list(span.attributes["code.function.parameters.arg.value"]),
|
||||
[1, 2, 3],
|
||||
)
|
||||
|
||||
@patch.dict(
|
||||
"os.environ",
|
||||
{"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"},
|
||||
)
|
||||
def test_handles_heterogenous_list_arg(self):
|
||||
def somefunction(arg=None):
|
||||
pass
|
||||
|
||||
wrapped_somefunction = self.wrap(somefunction)
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
somefunction([123, "abc"])
|
||||
self.otel.assert_does_not_have_span_named("execute_tool somefunction")
|
||||
wrapped_somefunction([123, "abc"])
|
||||
self.otel.assert_has_span_named("execute_tool somefunction")
|
||||
span = self.otel.get_span_named("execute_tool somefunction")
|
||||
self.assertEqual(
|
||||
span.attributes["code.function.parameters.arg.type"], "list"
|
||||
)
|
||||
self.assertEqual(
|
||||
span.attributes["code.function.parameters.arg.value"],
|
||||
'[123, "abc"]',
|
||||
)
|
|
@ -2,4 +2,4 @@ openai~=1.57.3
|
|||
|
||||
opentelemetry-sdk~=1.30.0
|
||||
opentelemetry-exporter-otlp-proto-grpc~=1.30.0
|
||||
opentelemetry-instrumentation-openai-v2~=2.2b0
|
||||
opentelemetry-instrumentation-openai-v2~=2.1b0
|
||||
|
|
|
@ -3,4 +3,4 @@ openai~=1.57.3
|
|||
opentelemetry-sdk~=1.30.0
|
||||
opentelemetry-exporter-otlp-proto-grpc~=1.30.0
|
||||
opentelemetry-distro~=0.51b0
|
||||
opentelemetry-instrumentation-openai-v2~=2.2b0
|
||||
opentelemetry-instrumentation-openai-v2~=2.1b0
|
||||
|
|
|
@ -8,7 +8,7 @@ dynamic = ["version"]
|
|||
description = "OpenTelemetry Official OpenAI instrumentation"
|
||||
readme = "README.rst"
|
||||
license = "Apache-2.0"
|
||||
requires-python = ">=3.8"
|
||||
requires-python = ">=3.9"
|
||||
authors = [
|
||||
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
|
||||
]
|
||||
|
@ -18,7 +18,6 @@ classifiers = [
|
|||
"License :: OSI Approved :: Apache Software License",
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
|
|
|
@ -14,3 +14,5 @@
|
|||
|
||||
|
||||
_instruments = ("openai >= 1.26.0",)
|
||||
|
||||
_supports_metrics = True
|
||||
|
|
|
@ -9,6 +9,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||
|
||||
- Implement uninstrument for `opentelemetry-instrumentation-vertexai`
|
||||
([#3328](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3328))
|
||||
- VertexAI support for async calling
|
||||
([#3386](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3386))
|
||||
|
||||
## Version 2.0b0 (2025-02-24)
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ dynamic = ["version"]
|
|||
description = "OpenTelemetry Official VertexAI instrumentation"
|
||||
readme = "README.rst"
|
||||
license = "Apache-2.0"
|
||||
requires-python = ">=3.8"
|
||||
requires-python = ">=3.9"
|
||||
authors = [
|
||||
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
|
||||
]
|
||||
|
@ -18,7 +18,6 @@ classifiers = [
|
|||
"License :: OSI Approved :: Apache Software License",
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
|
|
|
@ -39,6 +39,8 @@ API
|
|||
---
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Collection
|
||||
|
||||
from wrapt import (
|
||||
|
@ -49,32 +51,54 @@ from opentelemetry._events import get_event_logger
|
|||
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
||||
from opentelemetry.instrumentation.utils import unwrap
|
||||
from opentelemetry.instrumentation.vertexai.package import _instruments
|
||||
from opentelemetry.instrumentation.vertexai.patch import (
|
||||
generate_content_create,
|
||||
)
|
||||
from opentelemetry.instrumentation.vertexai.patch import MethodWrappers
|
||||
from opentelemetry.instrumentation.vertexai.utils import is_content_enabled
|
||||
from opentelemetry.semconv.schemas import Schemas
|
||||
from opentelemetry.trace import get_tracer
|
||||
|
||||
|
||||
def _client_classes():
|
||||
def _methods_to_wrap(
|
||||
method_wrappers: MethodWrappers,
|
||||
):
|
||||
# This import is very slow, do it lazily in case instrument() is not called
|
||||
|
||||
# pylint: disable=import-outside-toplevel
|
||||
from google.cloud.aiplatform_v1.services.prediction_service import (
|
||||
async_client,
|
||||
client,
|
||||
)
|
||||
from google.cloud.aiplatform_v1beta1.services.prediction_service import (
|
||||
async_client as async_client_v1beta1,
|
||||
)
|
||||
from google.cloud.aiplatform_v1beta1.services.prediction_service import (
|
||||
client as client_v1beta1,
|
||||
)
|
||||
|
||||
return (
|
||||
for client_class in (
|
||||
client.PredictionServiceClient,
|
||||
client_v1beta1.PredictionServiceClient,
|
||||
)
|
||||
):
|
||||
yield (
|
||||
client_class,
|
||||
client_class.generate_content.__name__, # type: ignore[reportUnknownMemberType]
|
||||
method_wrappers.generate_content,
|
||||
)
|
||||
|
||||
for client_class in (
|
||||
async_client.PredictionServiceAsyncClient,
|
||||
async_client_v1beta1.PredictionServiceAsyncClient,
|
||||
):
|
||||
yield (
|
||||
client_class,
|
||||
client_class.generate_content.__name__, # type: ignore[reportUnknownMemberType]
|
||||
method_wrappers.agenerate_content,
|
||||
)
|
||||
|
||||
|
||||
class VertexAIInstrumentor(BaseInstrumentor):
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._methods_to_unwrap: list[tuple[Any, str]] = []
|
||||
|
||||
def instrumentation_dependencies(self) -> Collection[str]:
|
||||
return _instruments
|
||||
|
||||
|
@ -95,15 +119,19 @@ class VertexAIInstrumentor(BaseInstrumentor):
|
|||
event_logger_provider=event_logger_provider,
|
||||
)
|
||||
|
||||
for client_class in _client_classes():
|
||||
method_wrappers = MethodWrappers(
|
||||
tracer, event_logger, is_content_enabled()
|
||||
)
|
||||
for client_class, method_name, wrapper in _methods_to_wrap(
|
||||
method_wrappers
|
||||
):
|
||||
wrap_function_wrapper(
|
||||
client_class,
|
||||
name="generate_content",
|
||||
wrapper=generate_content_create(
|
||||
tracer, event_logger, is_content_enabled()
|
||||
),
|
||||
name=method_name,
|
||||
wrapper=wrapper,
|
||||
)
|
||||
self._methods_to_unwrap.append((client_class, method_name))
|
||||
|
||||
def _uninstrument(self, **kwargs: Any) -> None:
|
||||
for client_class in _client_classes():
|
||||
unwrap(client_class, "generate_content")
|
||||
for client_class, method_name in self._methods_to_unwrap:
|
||||
unwrap(client_class, method_name)
|
||||
|
|
|
@ -14,9 +14,11 @@
|
|||
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import contextmanager
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
MutableSequence,
|
||||
)
|
||||
|
@ -87,17 +89,17 @@ def _extract_params(
|
|||
)
|
||||
|
||||
|
||||
def generate_content_create(
|
||||
tracer: Tracer, event_logger: EventLogger, capture_content: bool
|
||||
):
|
||||
"""Wrap the `generate_content` method of the `GenerativeModel` class to trace it."""
|
||||
class MethodWrappers:
|
||||
def __init__(
|
||||
self, tracer: Tracer, event_logger: EventLogger, capture_content: bool
|
||||
) -> None:
|
||||
self.tracer = tracer
|
||||
self.event_logger = event_logger
|
||||
self.capture_content = capture_content
|
||||
|
||||
def traced_method(
|
||||
wrapped: Callable[
|
||||
...,
|
||||
prediction_service.GenerateContentResponse
|
||||
| prediction_service_v1beta1.GenerateContentResponse,
|
||||
],
|
||||
@contextmanager
|
||||
def _with_instrumentation(
|
||||
self,
|
||||
instance: client.PredictionServiceClient
|
||||
| client_v1beta1.PredictionServiceClient,
|
||||
args: Any,
|
||||
|
@ -111,32 +113,82 @@ def generate_content_create(
|
|||
}
|
||||
|
||||
span_name = get_span_name(span_attributes)
|
||||
with tracer.start_as_current_span(
|
||||
|
||||
with self.tracer.start_as_current_span(
|
||||
name=span_name,
|
||||
kind=SpanKind.CLIENT,
|
||||
attributes=span_attributes,
|
||||
) as span:
|
||||
for event in request_to_events(
|
||||
params=params, capture_content=capture_content
|
||||
params=params, capture_content=self.capture_content
|
||||
):
|
||||
event_logger.emit(event)
|
||||
self.event_logger.emit(event)
|
||||
|
||||
# TODO: set error.type attribute
|
||||
# https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/gen-ai-spans.md
|
||||
|
||||
def handle_response(
|
||||
response: prediction_service.GenerateContentResponse
|
||||
| prediction_service_v1beta1.GenerateContentResponse,
|
||||
) -> None:
|
||||
if span.is_recording():
|
||||
# When streaming, this is called multiple times so attributes would be
|
||||
# overwritten. In practice, it looks the API only returns the interesting
|
||||
# attributes on the last streamed response. However, I couldn't find
|
||||
# documentation for this and setting attributes shouldn't be too expensive.
|
||||
span.set_attributes(
|
||||
get_genai_response_attributes(response)
|
||||
)
|
||||
|
||||
for event in response_to_events(
|
||||
response=response, capture_content=self.capture_content
|
||||
):
|
||||
self.event_logger.emit(event)
|
||||
|
||||
yield handle_response
|
||||
|
||||
def generate_content(
|
||||
self,
|
||||
wrapped: Callable[
|
||||
...,
|
||||
prediction_service.GenerateContentResponse
|
||||
| prediction_service_v1beta1.GenerateContentResponse,
|
||||
],
|
||||
instance: client.PredictionServiceClient
|
||||
| client_v1beta1.PredictionServiceClient,
|
||||
args: Any,
|
||||
kwargs: Any,
|
||||
) -> (
|
||||
prediction_service.GenerateContentResponse
|
||||
| prediction_service_v1beta1.GenerateContentResponse
|
||||
):
|
||||
with self._with_instrumentation(
|
||||
instance, args, kwargs
|
||||
) as handle_response:
|
||||
response = wrapped(*args, **kwargs)
|
||||
# TODO: handle streaming
|
||||
# if is_streaming(kwargs):
|
||||
# return StreamWrapper(
|
||||
# result, span, event_logger, capture_content
|
||||
# )
|
||||
|
||||
if span.is_recording():
|
||||
span.set_attributes(get_genai_response_attributes(response))
|
||||
for event in response_to_events(
|
||||
response=response, capture_content=capture_content
|
||||
):
|
||||
event_logger.emit(event)
|
||||
|
||||
handle_response(response)
|
||||
return response
|
||||
|
||||
return traced_method
|
||||
async def agenerate_content(
|
||||
self,
|
||||
wrapped: Callable[
|
||||
...,
|
||||
Awaitable[
|
||||
prediction_service.GenerateContentResponse
|
||||
| prediction_service_v1beta1.GenerateContentResponse
|
||||
],
|
||||
],
|
||||
instance: client.PredictionServiceClient
|
||||
| client_v1beta1.PredictionServiceClient,
|
||||
args: Any,
|
||||
kwargs: Any,
|
||||
) -> (
|
||||
prediction_service.GenerateContentResponse
|
||||
| prediction_service_v1beta1.GenerateContentResponse
|
||||
):
|
||||
with self._with_instrumentation(
|
||||
instance, args, kwargs
|
||||
) as handle_response:
|
||||
response = await wrapped(*args, **kwargs)
|
||||
handle_response(response)
|
||||
return response
|
||||
|
|
|
@ -44,8 +44,8 @@ charset-normalizer==3.4.0
|
|||
Deprecated==1.2.15
|
||||
docstring_parser==0.16
|
||||
exceptiongroup==1.2.2
|
||||
google-api-core==2.23.0
|
||||
google-auth==2.36.0
|
||||
google-api-core[grpc, async_rest]==2.23.0
|
||||
google-auth[aiohttp]==2.36.0
|
||||
google-cloud-aiplatform==1.79.0
|
||||
google-cloud-bigquery==3.27.0
|
||||
google-cloud-core==2.4.1
|
||||
|
|
|
@ -22,8 +22,8 @@ charset-normalizer==3.4.0
|
|||
Deprecated==1.2.14
|
||||
docstring_parser==0.16
|
||||
exceptiongroup==1.2.2
|
||||
google-api-core==2.23.0
|
||||
google-auth==2.36.0
|
||||
google-api-core[grpc, async_rest]==2.23.0
|
||||
google-auth[aiohttp]==2.36.0
|
||||
google-cloud-aiplatform==1.79.0
|
||||
google-cloud-bigquery==3.27.0
|
||||
google-cloud-core==2.4.1
|
||||
|
|
|
@ -1,7 +1,22 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Generator,
|
||||
Protocol,
|
||||
TypeVar,
|
||||
)
|
||||
|
||||
import pytest
|
||||
from google.api_core.exceptions import BadRequest, NotFound
|
||||
from google.auth.aio.credentials import (
|
||||
AnonymousCredentials as AsyncAnonymousCredentials,
|
||||
)
|
||||
from google.cloud.aiplatform.initializer import _set_async_rest_credentials
|
||||
from typing_extensions import Concatenate, ParamSpec
|
||||
from vcr import VCR
|
||||
from vertexai.generative_models import (
|
||||
Content,
|
||||
GenerationConfig,
|
||||
|
@ -27,13 +42,15 @@ from opentelemetry.trace import StatusCode
|
|||
def test_generate_content(
|
||||
span_exporter: InMemorySpanExporter,
|
||||
log_exporter: InMemoryLogExporter,
|
||||
generate_content: GenerateContentFixture,
|
||||
instrument_with_content: VertexAIInstrumentor,
|
||||
):
|
||||
model = GenerativeModel("gemini-1.5-flash-002")
|
||||
model.generate_content(
|
||||
generate_content(
|
||||
model,
|
||||
[
|
||||
Content(role="user", parts=[Part.from_text("Say this is a test")]),
|
||||
]
|
||||
],
|
||||
)
|
||||
|
||||
# Emits span
|
||||
|
@ -95,13 +112,15 @@ def test_generate_content(
|
|||
def test_generate_content_without_events(
|
||||
span_exporter: InMemorySpanExporter,
|
||||
log_exporter: InMemoryLogExporter,
|
||||
generate_content: GenerateContentFixture,
|
||||
instrument_no_content: VertexAIInstrumentor,
|
||||
):
|
||||
model = GenerativeModel("gemini-1.5-flash-002")
|
||||
model.generate_content(
|
||||
generate_content(
|
||||
model,
|
||||
[
|
||||
Content(role="user", parts=[Part.from_text("Say this is a test")]),
|
||||
]
|
||||
],
|
||||
)
|
||||
|
||||
# Emits span
|
||||
|
@ -144,11 +163,13 @@ def test_generate_content_without_events(
|
|||
@pytest.mark.vcr
|
||||
def test_generate_content_empty_model(
|
||||
span_exporter: InMemorySpanExporter,
|
||||
generate_content: GenerateContentFixture,
|
||||
instrument_with_content: VertexAIInstrumentor,
|
||||
):
|
||||
model = GenerativeModel("")
|
||||
try:
|
||||
model.generate_content(
|
||||
generate_content(
|
||||
model,
|
||||
[
|
||||
Content(
|
||||
role="user", parts=[Part.from_text("Say this is a test")]
|
||||
|
@ -175,11 +196,13 @@ def test_generate_content_empty_model(
|
|||
@pytest.mark.vcr
|
||||
def test_generate_content_missing_model(
|
||||
span_exporter: InMemorySpanExporter,
|
||||
generate_content: GenerateContentFixture,
|
||||
instrument_with_content: VertexAIInstrumentor,
|
||||
):
|
||||
model = GenerativeModel("gemini-does-not-exist")
|
||||
try:
|
||||
model.generate_content(
|
||||
generate_content(
|
||||
model,
|
||||
[
|
||||
Content(
|
||||
role="user", parts=[Part.from_text("Say this is a test")]
|
||||
|
@ -206,12 +229,14 @@ def test_generate_content_missing_model(
|
|||
@pytest.mark.vcr
|
||||
def test_generate_content_invalid_temperature(
|
||||
span_exporter: InMemorySpanExporter,
|
||||
generate_content: GenerateContentFixture,
|
||||
instrument_with_content: VertexAIInstrumentor,
|
||||
):
|
||||
model = GenerativeModel("gemini-1.5-flash-002")
|
||||
try:
|
||||
# Temperature out of range causes error
|
||||
model.generate_content(
|
||||
generate_content(
|
||||
model,
|
||||
[
|
||||
Content(
|
||||
role="user", parts=[Part.from_text("Say this is a test")]
|
||||
|
@ -239,18 +264,20 @@ def test_generate_content_invalid_temperature(
|
|||
@pytest.mark.vcr
|
||||
def test_generate_content_invalid_role(
|
||||
log_exporter: InMemoryLogExporter,
|
||||
generate_content: GenerateContentFixture,
|
||||
instrument_with_content: VertexAIInstrumentor,
|
||||
):
|
||||
model = GenerativeModel("gemini-1.5-flash-002")
|
||||
try:
|
||||
# Fails because role must be "user" or "model"
|
||||
model.generate_content(
|
||||
generate_content(
|
||||
model,
|
||||
[
|
||||
Content(
|
||||
role="invalid_role",
|
||||
parts=[Part.from_text("Say this is a test")],
|
||||
)
|
||||
]
|
||||
],
|
||||
)
|
||||
except BadRequest:
|
||||
pass
|
||||
|
@ -269,7 +296,11 @@ def test_generate_content_invalid_role(
|
|||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_generate_content_extra_params(span_exporter, instrument_no_content):
|
||||
def test_generate_content_extra_params(
|
||||
span_exporter,
|
||||
instrument_no_content,
|
||||
generate_content: GenerateContentFixture,
|
||||
):
|
||||
generation_config = GenerationConfig(
|
||||
top_k=2,
|
||||
top_p=0.95,
|
||||
|
@ -281,7 +312,8 @@ def test_generate_content_extra_params(span_exporter, instrument_no_content):
|
|||
seed=12345,
|
||||
)
|
||||
model = GenerativeModel("gemini-1.5-flash-002")
|
||||
model.generate_content(
|
||||
generate_content(
|
||||
model,
|
||||
[
|
||||
Content(role="user", parts=[Part.from_text("Say this is a test")]),
|
||||
],
|
||||
|
@ -324,6 +356,7 @@ def assert_span_error(span: ReadableSpan) -> None:
|
|||
@pytest.mark.vcr
|
||||
def test_generate_content_all_events(
|
||||
log_exporter: InMemoryLogExporter,
|
||||
generate_content: GenerateContentFixture,
|
||||
instrument_with_content: VertexAIInstrumentor,
|
||||
):
|
||||
generate_content_all_input_events(
|
||||
|
@ -333,6 +366,7 @@ def test_generate_content_all_events(
|
|||
"You are a clever language model"
|
||||
),
|
||||
),
|
||||
generate_content,
|
||||
log_exporter,
|
||||
)
|
||||
|
||||
|
@ -340,6 +374,7 @@ def test_generate_content_all_events(
|
|||
@pytest.mark.vcr
|
||||
def test_preview_generate_content_all_input_events(
|
||||
log_exporter: InMemoryLogExporter,
|
||||
generate_content: GenerateContentFixture,
|
||||
instrument_with_content: VertexAIInstrumentor,
|
||||
):
|
||||
generate_content_all_input_events(
|
||||
|
@ -349,12 +384,14 @@ def test_preview_generate_content_all_input_events(
|
|||
"You are a clever language model"
|
||||
),
|
||||
),
|
||||
generate_content,
|
||||
log_exporter,
|
||||
)
|
||||
|
||||
|
||||
def generate_content_all_input_events(
|
||||
model: GenerativeModel | PreviewGenerativeModel,
|
||||
generate_content: GenerateContentFixture,
|
||||
log_exporter: InMemoryLogExporter,
|
||||
):
|
||||
model.generate_content(
|
||||
|
@ -430,3 +467,53 @@ def generate_content_all_input_events(
|
|||
"role": "model",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
_P = ParamSpec("_P")
|
||||
_R = TypeVar("_R")
|
||||
|
||||
|
||||
def _copy_signature(
|
||||
func_type: Callable[_P, _R],
|
||||
) -> Callable[
|
||||
[Callable[..., Any]], Callable[Concatenate[GenerativeModel, _P], _R]
|
||||
]:
|
||||
return lambda func: func
|
||||
|
||||
|
||||
# Type annotation for fixture to make LSP work properly
|
||||
class GenerateContentFixture(Protocol):
|
||||
@_copy_signature(GenerativeModel.generate_content)
|
||||
def __call__(self): ...
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
name="generate_content",
|
||||
params=(
|
||||
pytest.param(False, id="sync"),
|
||||
pytest.param(True, id="async"),
|
||||
),
|
||||
)
|
||||
def fixture_generate_content(
|
||||
request: pytest.FixtureRequest,
|
||||
vcr: VCR,
|
||||
) -> Generator[GenerateContentFixture, None, None]:
|
||||
"""This fixture parameterizes tests that use it to test calling both
|
||||
GenerativeModel.generate_content() and GenerativeModel.generate_content_async().
|
||||
"""
|
||||
is_async: bool = request.param
|
||||
|
||||
if is_async:
|
||||
# See
|
||||
# https://github.com/googleapis/python-aiplatform/blob/cb0e5fedbf45cb0531c0b8611fb7fabdd1f57e56/google/cloud/aiplatform/initializer.py#L717-L729
|
||||
_set_async_rest_credentials(credentials=AsyncAnonymousCredentials())
|
||||
|
||||
def wrapper(model: GenerativeModel, *args, **kwargs) -> None:
|
||||
if is_async:
|
||||
return asyncio.run(model.generate_content_async(*args, **kwargs))
|
||||
return model.generate_content(*args, **kwargs)
|
||||
|
||||
with vcr.use_cassette(
|
||||
request.node.originalname, allow_playback_repeats=True
|
||||
):
|
||||
yield wrapper
|
||||
|
|
|
@ -2,11 +2,12 @@
|
|||
| Instrumentation | Supported Packages | Metrics support | Semconv status |
|
||||
| --------------- | ------------------ | --------------- | -------------- |
|
||||
| [opentelemetry-instrumentation-aio-pika](./opentelemetry-instrumentation-aio-pika) | aio_pika >= 7.2.0, < 10.0.0 | No | development
|
||||
| [opentelemetry-instrumentation-aiohttp-client](./opentelemetry-instrumentation-aiohttp-client) | aiohttp ~= 3.0 | No | migration
|
||||
| [opentelemetry-instrumentation-aiohttp-server](./opentelemetry-instrumentation-aiohttp-server) | aiohttp ~= 3.0 | No | development
|
||||
| [opentelemetry-instrumentation-aiohttp-client](./opentelemetry-instrumentation-aiohttp-client) | aiohttp ~= 3.0 | Yes | migration
|
||||
| [opentelemetry-instrumentation-aiohttp-server](./opentelemetry-instrumentation-aiohttp-server) | aiohttp ~= 3.0 | Yes | development
|
||||
| [opentelemetry-instrumentation-aiokafka](./opentelemetry-instrumentation-aiokafka) | aiokafka >= 0.8, < 1.0 | No | development
|
||||
| [opentelemetry-instrumentation-aiopg](./opentelemetry-instrumentation-aiopg) | aiopg >= 0.13.0, < 2.0.0 | No | development
|
||||
| [opentelemetry-instrumentation-asgi](./opentelemetry-instrumentation-asgi) | asgiref ~= 3.0 | Yes | migration
|
||||
| [opentelemetry-instrumentation-asyncclick](./opentelemetry-instrumentation-asyncclick) | asyncclick ~= 8.0 | No | development
|
||||
| [opentelemetry-instrumentation-asyncio](./opentelemetry-instrumentation-asyncio) | asyncio | No | development
|
||||
| [opentelemetry-instrumentation-asyncpg](./opentelemetry-instrumentation-asyncpg) | asyncpg >= 0.12.0 | No | development
|
||||
| [opentelemetry-instrumentation-aws-lambda](./opentelemetry-instrumentation-aws-lambda) | aws_lambda | No | development
|
||||
|
@ -21,10 +22,10 @@
|
|||
| [opentelemetry-instrumentation-django](./opentelemetry-instrumentation-django) | django >= 1.10 | Yes | development
|
||||
| [opentelemetry-instrumentation-elasticsearch](./opentelemetry-instrumentation-elasticsearch) | elasticsearch >= 6.0 | No | development
|
||||
| [opentelemetry-instrumentation-falcon](./opentelemetry-instrumentation-falcon) | falcon >= 1.4.1, < 5.0.0 | Yes | migration
|
||||
| [opentelemetry-instrumentation-fastapi](./opentelemetry-instrumentation-fastapi) | fastapi ~= 0.58 | Yes | migration
|
||||
| [opentelemetry-instrumentation-fastapi](./opentelemetry-instrumentation-fastapi) | fastapi ~= 0.92 | Yes | migration
|
||||
| [opentelemetry-instrumentation-flask](./opentelemetry-instrumentation-flask) | flask >= 1.0 | Yes | migration
|
||||
| [opentelemetry-instrumentation-grpc](./opentelemetry-instrumentation-grpc) | grpcio >= 1.42.0 | No | development
|
||||
| [opentelemetry-instrumentation-httpx](./opentelemetry-instrumentation-httpx) | httpx >= 0.18.0 | No | migration
|
||||
| [opentelemetry-instrumentation-httpx](./opentelemetry-instrumentation-httpx) | httpx >= 0.18.0 | Yes | migration
|
||||
| [opentelemetry-instrumentation-jinja2](./opentelemetry-instrumentation-jinja2) | jinja2 >= 2.7, < 4.0 | No | development
|
||||
| [opentelemetry-instrumentation-kafka-python](./opentelemetry-instrumentation-kafka-python) | kafka-python >= 2.0, < 3.0,kafka-python-ng >= 2.0, < 3.0 | No | development
|
||||
| [opentelemetry-instrumentation-logging](./opentelemetry-instrumentation-logging) | logging | No | development
|
||||
|
@ -43,7 +44,7 @@
|
|||
| [opentelemetry-instrumentation-requests](./opentelemetry-instrumentation-requests) | requests ~= 2.0 | Yes | migration
|
||||
| [opentelemetry-instrumentation-sqlalchemy](./opentelemetry-instrumentation-sqlalchemy) | sqlalchemy >= 1.0.0, < 2.1.0 | Yes | development
|
||||
| [opentelemetry-instrumentation-sqlite3](./opentelemetry-instrumentation-sqlite3) | sqlite3 | No | development
|
||||
| [opentelemetry-instrumentation-starlette](./opentelemetry-instrumentation-starlette) | starlette >= 0.13, <0.15 | Yes | development
|
||||
| [opentelemetry-instrumentation-starlette](./opentelemetry-instrumentation-starlette) | starlette >= 0.13 | Yes | development
|
||||
| [opentelemetry-instrumentation-system-metrics](./opentelemetry-instrumentation-system-metrics) | psutil >= 5 | No | development
|
||||
| [opentelemetry-instrumentation-threading](./opentelemetry-instrumentation-threading) | threading | No | development
|
||||
| [opentelemetry-instrumentation-tornado](./opentelemetry-instrumentation-tornado) | tornado >= 5.1.1 | Yes | development
|
||||
|
|
|
@ -8,7 +8,7 @@ dynamic = ["version"]
|
|||
description = "OpenTelemetry Aio-pika instrumentation"
|
||||
readme = "README.rst"
|
||||
license = "Apache-2.0"
|
||||
requires-python = ">=3.8"
|
||||
requires-python = ">=3.9"
|
||||
authors = [
|
||||
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
|
||||
]
|
||||
|
@ -18,7 +18,6 @@ classifiers = [
|
|||
"License :: OSI Approved :: Apache Software License",
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
|
@ -27,7 +26,7 @@ classifiers = [
|
|||
]
|
||||
dependencies = [
|
||||
"opentelemetry-api ~= 1.5",
|
||||
"opentelemetry-instrumentation == 0.52b0.dev",
|
||||
"opentelemetry-instrumentation == 0.57b0.dev",
|
||||
"wrapt >= 1.0.0, < 2.0.0",
|
||||
]
|
||||
|
||||
|
|
|
@ -12,4 +12,4 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__version__ = "0.52b0.dev"
|
||||
__version__ = "0.57b0.dev"
|
||||
|
|
|
@ -8,7 +8,7 @@ dynamic = ["version"]
|
|||
description = "OpenTelemetry aiohttp client instrumentation"
|
||||
readme = "README.rst"
|
||||
license = "Apache-2.0"
|
||||
requires-python = ">=3.8"
|
||||
requires-python = ">=3.9"
|
||||
authors = [
|
||||
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
|
||||
]
|
||||
|
@ -18,7 +18,6 @@ classifiers = [
|
|||
"License :: OSI Approved :: Apache Software License",
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
|
@ -27,9 +26,9 @@ classifiers = [
|
|||
]
|
||||
dependencies = [
|
||||
"opentelemetry-api ~= 1.12",
|
||||
"opentelemetry-instrumentation == 0.52b0.dev",
|
||||
"opentelemetry-semantic-conventions == 0.52b0.dev",
|
||||
"opentelemetry-util-http == 0.52b0.dev",
|
||||
"opentelemetry-instrumentation == 0.57b0.dev",
|
||||
"opentelemetry-semantic-conventions == 0.57b0.dev",
|
||||
"opentelemetry-util-http == 0.57b0.dev",
|
||||
"wrapt >= 1.0.0, < 2.0.0",
|
||||
]
|
||||
|
||||
|
|
|
@ -90,7 +90,9 @@ API
|
|||
|
||||
import types
|
||||
import typing
|
||||
from timeit import default_timer
|
||||
from typing import Collection
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import aiohttp
|
||||
import wrapt
|
||||
|
@ -99,11 +101,20 @@ import yarl
|
|||
from opentelemetry import context as context_api
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.instrumentation._semconv import (
|
||||
HTTP_DURATION_HISTOGRAM_BUCKETS_NEW,
|
||||
HTTP_DURATION_HISTOGRAM_BUCKETS_OLD,
|
||||
_client_duration_attrs_new,
|
||||
_client_duration_attrs_old,
|
||||
_filter_semconv_duration_attrs,
|
||||
_get_schema_url,
|
||||
_OpenTelemetrySemanticConventionStability,
|
||||
_OpenTelemetryStabilitySignalType,
|
||||
_report_new,
|
||||
_report_old,
|
||||
_set_http_host_client,
|
||||
_set_http_method,
|
||||
_set_http_net_peer_name_client,
|
||||
_set_http_peer_port_client,
|
||||
_set_http_url,
|
||||
_set_status,
|
||||
_StabilityMode,
|
||||
|
@ -115,11 +126,16 @@ from opentelemetry.instrumentation.utils import (
|
|||
is_instrumentation_enabled,
|
||||
unwrap,
|
||||
)
|
||||
from opentelemetry.metrics import MeterProvider, get_meter
|
||||
from opentelemetry.propagate import inject
|
||||
from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
|
||||
from opentelemetry.semconv.metrics import MetricInstruments
|
||||
from opentelemetry.semconv.metrics.http_metrics import (
|
||||
HTTP_CLIENT_REQUEST_DURATION,
|
||||
)
|
||||
from opentelemetry.trace import Span, SpanKind, TracerProvider, get_tracer
|
||||
from opentelemetry.trace.status import Status, StatusCode
|
||||
from opentelemetry.util.http import remove_url_credentials, sanitize_method
|
||||
from opentelemetry.util.http import redact_url, sanitize_method
|
||||
|
||||
_UrlFilterT = typing.Optional[typing.Callable[[yarl.URL], str]]
|
||||
_RequestHookT = typing.Optional[
|
||||
|
@ -172,11 +188,14 @@ def _set_http_status_code_attribute(
|
|||
)
|
||||
|
||||
|
||||
# pylint: disable=too-many-locals
|
||||
# pylint: disable=too-many-statements
|
||||
def create_trace_config(
|
||||
url_filter: _UrlFilterT = None,
|
||||
request_hook: _RequestHookT = None,
|
||||
response_hook: _ResponseHookT = None,
|
||||
tracer_provider: TracerProvider = None,
|
||||
meter_provider: MeterProvider = None,
|
||||
sem_conv_opt_in_mode: _StabilityMode = _StabilityMode.DEFAULT,
|
||||
) -> aiohttp.TraceConfig:
|
||||
"""Create an aiohttp-compatible trace configuration.
|
||||
|
@ -205,6 +224,7 @@ def create_trace_config(
|
|||
:param Callable request_hook: Optional callback that can modify span name and request params.
|
||||
:param Callable response_hook: Optional callback that can modify span name and response params.
|
||||
:param tracer_provider: optional TracerProvider from which to get a Tracer
|
||||
:param meter_provider: optional Meter provider to use
|
||||
|
||||
:return: An object suitable for use with :py:class:`aiohttp.ClientSession`.
|
||||
:rtype: :py:class:`aiohttp.TraceConfig`
|
||||
|
@ -214,20 +234,70 @@ def create_trace_config(
|
|||
# Explicitly specify the type for the `request_hook` and `response_hook` param and rtype to work
|
||||
# around this issue.
|
||||
|
||||
schema_url = _get_schema_url(sem_conv_opt_in_mode)
|
||||
|
||||
tracer = get_tracer(
|
||||
__name__,
|
||||
__version__,
|
||||
tracer_provider,
|
||||
schema_url=_get_schema_url(sem_conv_opt_in_mode),
|
||||
schema_url=schema_url,
|
||||
)
|
||||
|
||||
# TODO: Use this when we have durations for aiohttp-client
|
||||
meter = get_meter(
|
||||
__name__,
|
||||
__version__,
|
||||
meter_provider,
|
||||
schema_url,
|
||||
)
|
||||
|
||||
start_time = 0
|
||||
|
||||
duration_histogram_old = None
|
||||
if _report_old(sem_conv_opt_in_mode):
|
||||
duration_histogram_old = meter.create_histogram(
|
||||
name=MetricInstruments.HTTP_CLIENT_DURATION,
|
||||
unit="ms",
|
||||
description="measures the duration of the outbound HTTP request",
|
||||
explicit_bucket_boundaries_advisory=HTTP_DURATION_HISTOGRAM_BUCKETS_OLD,
|
||||
)
|
||||
duration_histogram_new = None
|
||||
if _report_new(sem_conv_opt_in_mode):
|
||||
duration_histogram_new = meter.create_histogram(
|
||||
name=HTTP_CLIENT_REQUEST_DURATION,
|
||||
unit="s",
|
||||
description="Duration of HTTP client requests.",
|
||||
explicit_bucket_boundaries_advisory=HTTP_DURATION_HISTOGRAM_BUCKETS_NEW,
|
||||
)
|
||||
|
||||
metric_attributes = {}
|
||||
|
||||
def _end_trace(trace_config_ctx: types.SimpleNamespace):
|
||||
elapsed_time = max(default_timer() - trace_config_ctx.start_time, 0)
|
||||
context_api.detach(trace_config_ctx.token)
|
||||
trace_config_ctx.span.end()
|
||||
|
||||
if trace_config_ctx.duration_histogram_old is not None:
|
||||
duration_attrs_old = _filter_semconv_duration_attrs(
|
||||
metric_attributes,
|
||||
_client_duration_attrs_old,
|
||||
_client_duration_attrs_new,
|
||||
_StabilityMode.DEFAULT,
|
||||
)
|
||||
trace_config_ctx.duration_histogram_old.record(
|
||||
max(round(elapsed_time * 1000), 0),
|
||||
attributes=duration_attrs_old,
|
||||
)
|
||||
if trace_config_ctx.duration_histogram_new is not None:
|
||||
duration_attrs_new = _filter_semconv_duration_attrs(
|
||||
metric_attributes,
|
||||
_client_duration_attrs_old,
|
||||
_client_duration_attrs_new,
|
||||
_StabilityMode.HTTP,
|
||||
)
|
||||
trace_config_ctx.duration_histogram_new.record(
|
||||
elapsed_time, attributes=duration_attrs_new
|
||||
)
|
||||
|
||||
async def on_request_start(
|
||||
unused_session: aiohttp.ClientSession,
|
||||
trace_config_ctx: types.SimpleNamespace,
|
||||
|
@ -237,12 +307,13 @@ def create_trace_config(
|
|||
trace_config_ctx.span = None
|
||||
return
|
||||
|
||||
trace_config_ctx.start_time = default_timer()
|
||||
method = params.method
|
||||
request_span_name = _get_span_name(method)
|
||||
request_url = (
|
||||
remove_url_credentials(trace_config_ctx.url_filter(params.url))
|
||||
redact_url(trace_config_ctx.url_filter(params.url))
|
||||
if callable(trace_config_ctx.url_filter)
|
||||
else remove_url_credentials(str(params.url))
|
||||
else redact_url(str(params.url))
|
||||
)
|
||||
|
||||
span_attributes = {}
|
||||
|
@ -252,8 +323,44 @@ def create_trace_config(
|
|||
sanitize_method(method),
|
||||
sem_conv_opt_in_mode,
|
||||
)
|
||||
_set_http_method(
|
||||
metric_attributes,
|
||||
method,
|
||||
sanitize_method(method),
|
||||
sem_conv_opt_in_mode,
|
||||
)
|
||||
_set_http_url(span_attributes, request_url, sem_conv_opt_in_mode)
|
||||
|
||||
try:
|
||||
parsed_url = urlparse(request_url)
|
||||
if parsed_url.hostname:
|
||||
_set_http_host_client(
|
||||
metric_attributes,
|
||||
parsed_url.hostname,
|
||||
sem_conv_opt_in_mode,
|
||||
)
|
||||
_set_http_net_peer_name_client(
|
||||
metric_attributes,
|
||||
parsed_url.hostname,
|
||||
sem_conv_opt_in_mode,
|
||||
)
|
||||
if _report_new(sem_conv_opt_in_mode):
|
||||
_set_http_host_client(
|
||||
span_attributes,
|
||||
parsed_url.hostname,
|
||||
sem_conv_opt_in_mode,
|
||||
)
|
||||
if parsed_url.port:
|
||||
_set_http_peer_port_client(
|
||||
metric_attributes, parsed_url.port, sem_conv_opt_in_mode
|
||||
)
|
||||
if _report_new(sem_conv_opt_in_mode):
|
||||
_set_http_peer_port_client(
|
||||
span_attributes, parsed_url.port, sem_conv_opt_in_mode
|
||||
)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
trace_config_ctx.span = trace_config_ctx.tracer.start_span(
|
||||
request_span_name, kind=SpanKind.CLIENT, attributes=span_attributes
|
||||
)
|
||||
|
@ -298,6 +405,7 @@ def create_trace_config(
|
|||
exc_type = type(params.exception).__qualname__
|
||||
if _report_new(sem_conv_opt_in_mode):
|
||||
trace_config_ctx.span.set_attribute(ERROR_TYPE, exc_type)
|
||||
metric_attributes[ERROR_TYPE] = exc_type
|
||||
|
||||
trace_config_ctx.span.set_status(
|
||||
Status(StatusCode.ERROR, exc_type)
|
||||
|
@ -312,7 +420,12 @@ def create_trace_config(
|
|||
def _trace_config_ctx_factory(**kwargs):
|
||||
kwargs.setdefault("trace_request_ctx", {})
|
||||
return types.SimpleNamespace(
|
||||
tracer=tracer, url_filter=url_filter, **kwargs
|
||||
tracer=tracer,
|
||||
url_filter=url_filter,
|
||||
start_time=start_time,
|
||||
duration_histogram_old=duration_histogram_old,
|
||||
duration_histogram_new=duration_histogram_new,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
trace_config = aiohttp.TraceConfig(
|
||||
|
@ -328,6 +441,7 @@ def create_trace_config(
|
|||
|
||||
def _instrument(
|
||||
tracer_provider: TracerProvider = None,
|
||||
meter_provider: MeterProvider = None,
|
||||
url_filter: _UrlFilterT = None,
|
||||
request_hook: _RequestHookT = None,
|
||||
response_hook: _ResponseHookT = None,
|
||||
|
@ -357,6 +471,7 @@ def _instrument(
|
|||
request_hook=request_hook,
|
||||
response_hook=response_hook,
|
||||
tracer_provider=tracer_provider,
|
||||
meter_provider=meter_provider,
|
||||
sem_conv_opt_in_mode=sem_conv_opt_in_mode,
|
||||
)
|
||||
trace_config._is_instrumented_by_opentelemetry = True
|
||||
|
@ -401,6 +516,7 @@ class AioHttpClientInstrumentor(BaseInstrumentor):
|
|||
Args:
|
||||
**kwargs: Optional arguments
|
||||
``tracer_provider``: a TracerProvider, defaults to global
|
||||
``meter_provider``: a MeterProvider, defaults to global
|
||||
``url_filter``: A callback to process the requested URL prior to adding
|
||||
it as a span attribute. This can be useful to remove sensitive data
|
||||
such as API keys or user personal information.
|
||||
|
@ -415,6 +531,7 @@ class AioHttpClientInstrumentor(BaseInstrumentor):
|
|||
)
|
||||
_instrument(
|
||||
tracer_provider=kwargs.get("tracer_provider"),
|
||||
meter_provider=kwargs.get("meter_provider"),
|
||||
url_filter=kwargs.get("url_filter"),
|
||||
request_hook=kwargs.get("request_hook"),
|
||||
response_hook=kwargs.get("response_hook"),
|
||||
|
|
|
@ -15,6 +15,6 @@
|
|||
|
||||
_instruments = ("aiohttp ~= 3.0",)
|
||||
|
||||
_supports_metrics = False
|
||||
_supports_metrics = True
|
||||
|
||||
_semconv_status = "migration"
|
||||
|
|
|
@ -12,4 +12,4 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__version__ = "0.52b0.dev"
|
||||
__version__ = "0.57b0.dev"
|
||||
|
|
|
@ -13,7 +13,7 @@ http_server_mock==1.7
|
|||
idna==3.7
|
||||
iniconfig==2.0.0
|
||||
itsdangerous==2.1.2
|
||||
Jinja2==3.1.4
|
||||
Jinja2==3.1.6
|
||||
MarkupSafe==2.1.5
|
||||
multidict==6.0.5
|
||||
packaging==24.0
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# pylint: disable=too-many-lines
|
||||
|
||||
import asyncio
|
||||
import contextlib
|
||||
import typing
|
||||
|
@ -28,6 +30,8 @@ from http_server_mock import HttpServerMock
|
|||
from opentelemetry import trace as trace_api
|
||||
from opentelemetry.instrumentation import aiohttp_client
|
||||
from opentelemetry.instrumentation._semconv import (
|
||||
HTTP_DURATION_HISTOGRAM_BUCKETS_NEW,
|
||||
HTTP_DURATION_HISTOGRAM_BUCKETS_OLD,
|
||||
OTEL_SEMCONV_STABILITY_OPT_IN,
|
||||
_OpenTelemetrySemanticConventionStability,
|
||||
_StabilityMode,
|
||||
|
@ -36,6 +40,20 @@ from opentelemetry.instrumentation.aiohttp_client import (
|
|||
AioHttpClientInstrumentor,
|
||||
)
|
||||
from opentelemetry.instrumentation.utils import suppress_instrumentation
|
||||
from opentelemetry.semconv._incubating.attributes.http_attributes import (
|
||||
HTTP_HOST,
|
||||
HTTP_METHOD,
|
||||
HTTP_STATUS_CODE,
|
||||
HTTP_URL,
|
||||
)
|
||||
from opentelemetry.semconv._incubating.attributes.net_attributes import (
|
||||
NET_PEER_NAME,
|
||||
NET_PEER_PORT,
|
||||
)
|
||||
from opentelemetry.semconv._incubating.attributes.server_attributes import (
|
||||
SERVER_ADDRESS,
|
||||
SERVER_PORT,
|
||||
)
|
||||
from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
|
||||
from opentelemetry.semconv.attributes.http_attributes import (
|
||||
HTTP_REQUEST_METHOD,
|
||||
|
@ -43,7 +61,6 @@ from opentelemetry.semconv.attributes.http_attributes import (
|
|||
HTTP_RESPONSE_STATUS_CODE,
|
||||
)
|
||||
from opentelemetry.semconv.attributes.url_attributes import URL_FULL
|
||||
from opentelemetry.semconv.trace import SpanAttributes
|
||||
from opentelemetry.test.test_base import TestBase
|
||||
from opentelemetry.trace import Span, StatusCode
|
||||
from opentelemetry.util._importlib_metadata import entry_points
|
||||
|
@ -84,7 +101,7 @@ class TestAioHttpIntegration(TestBase):
|
|||
super().setUp()
|
||||
_OpenTelemetrySemanticConventionStability._initialized = False
|
||||
|
||||
def assert_spans(self, spans, num_spans=1):
|
||||
def _assert_spans(self, spans, num_spans=1):
|
||||
finished_spans = self.memory_exporter.get_finished_spans()
|
||||
self.assertEqual(num_spans, len(finished_spans))
|
||||
self.assertEqual(
|
||||
|
@ -99,6 +116,11 @@ class TestAioHttpIntegration(TestBase):
|
|||
spans,
|
||||
)
|
||||
|
||||
def _assert_metrics(self, num_metrics: int = 1):
|
||||
metrics = self.get_sorted_metrics()
|
||||
self.assertEqual(len(metrics), num_metrics)
|
||||
return metrics
|
||||
|
||||
@staticmethod
|
||||
def _http_request(
|
||||
trace_config,
|
||||
|
@ -126,6 +148,7 @@ class TestAioHttpIntegration(TestBase):
|
|||
return run_with_test_server(client_request, url, handler)
|
||||
|
||||
def test_status_codes(self):
|
||||
index = 0
|
||||
for status_code, span_status in self._test_status_codes:
|
||||
with self.subTest(status_code=status_code):
|
||||
path = "test-path?query=param#foobar"
|
||||
|
@ -136,15 +159,34 @@ class TestAioHttpIntegration(TestBase):
|
|||
)
|
||||
url = f"http://{host}:{port}/{path}"
|
||||
attributes = {
|
||||
SpanAttributes.HTTP_METHOD: "GET",
|
||||
SpanAttributes.HTTP_URL: url,
|
||||
SpanAttributes.HTTP_STATUS_CODE: status_code,
|
||||
HTTP_METHOD: "GET",
|
||||
HTTP_URL: url,
|
||||
HTTP_STATUS_CODE: status_code,
|
||||
}
|
||||
|
||||
spans = [("GET", (span_status, None), attributes)]
|
||||
self.assert_spans(spans)
|
||||
self._assert_spans(spans)
|
||||
self.memory_exporter.clear()
|
||||
metrics = self._assert_metrics(1)
|
||||
duration_data_point = metrics[0].data.data_points[index]
|
||||
self.assertEqual(
|
||||
dict(duration_data_point.attributes),
|
||||
{
|
||||
HTTP_STATUS_CODE: status_code,
|
||||
HTTP_METHOD: "GET",
|
||||
HTTP_HOST: host,
|
||||
NET_PEER_NAME: host,
|
||||
NET_PEER_PORT: port,
|
||||
},
|
||||
)
|
||||
self.assertEqual(
|
||||
duration_data_point.explicit_bounds,
|
||||
HTTP_DURATION_HISTOGRAM_BUCKETS_OLD,
|
||||
)
|
||||
index += 1
|
||||
|
||||
def test_status_codes_new_semconv(self):
|
||||
index = 0
|
||||
for status_code, span_status in self._test_status_codes:
|
||||
with self.subTest(status_code=status_code):
|
||||
path = "test-path?query=param#foobar"
|
||||
|
@ -160,14 +202,39 @@ class TestAioHttpIntegration(TestBase):
|
|||
HTTP_REQUEST_METHOD: "GET",
|
||||
URL_FULL: url,
|
||||
HTTP_RESPONSE_STATUS_CODE: status_code,
|
||||
SERVER_ADDRESS: host,
|
||||
SERVER_PORT: port,
|
||||
}
|
||||
if status_code >= 400:
|
||||
attributes[ERROR_TYPE] = str(status_code.value)
|
||||
spans = [("GET", (span_status, None), attributes)]
|
||||
self.assert_spans(spans)
|
||||
self._assert_spans(spans)
|
||||
self.memory_exporter.clear()
|
||||
metrics = self._assert_metrics(1)
|
||||
duration_data_point = metrics[0].data.data_points[index]
|
||||
self.assertEqual(
|
||||
duration_data_point.attributes.get(
|
||||
HTTP_RESPONSE_STATUS_CODE
|
||||
),
|
||||
status_code,
|
||||
)
|
||||
self.assertEqual(
|
||||
duration_data_point.attributes.get(HTTP_REQUEST_METHOD),
|
||||
"GET",
|
||||
)
|
||||
if status_code >= 400:
|
||||
self.assertEqual(
|
||||
duration_data_point.attributes.get(ERROR_TYPE),
|
||||
str(status_code.value),
|
||||
)
|
||||
self.assertEqual(
|
||||
duration_data_point.explicit_bounds,
|
||||
HTTP_DURATION_HISTOGRAM_BUCKETS_NEW,
|
||||
)
|
||||
index += 1
|
||||
|
||||
def test_status_codes_both_semconv(self):
|
||||
index = 0
|
||||
for status_code, span_status in self._test_status_codes:
|
||||
with self.subTest(status_code=status_code):
|
||||
path = "test-path?query=param#foobar"
|
||||
|
@ -181,18 +248,79 @@ class TestAioHttpIntegration(TestBase):
|
|||
url = f"http://{host}:{port}/{path}"
|
||||
attributes = {
|
||||
HTTP_REQUEST_METHOD: "GET",
|
||||
SpanAttributes.HTTP_METHOD: "GET",
|
||||
HTTP_METHOD: "GET",
|
||||
HTTP_HOST: host,
|
||||
URL_FULL: url,
|
||||
SpanAttributes.HTTP_URL: url,
|
||||
HTTP_URL: url,
|
||||
HTTP_RESPONSE_STATUS_CODE: status_code,
|
||||
SpanAttributes.HTTP_STATUS_CODE: status_code,
|
||||
HTTP_STATUS_CODE: status_code,
|
||||
SERVER_ADDRESS: host,
|
||||
SERVER_PORT: port,
|
||||
NET_PEER_PORT: port,
|
||||
}
|
||||
|
||||
if status_code >= 400:
|
||||
attributes[ERROR_TYPE] = str(status_code.value)
|
||||
|
||||
spans = [("GET", (span_status, None), attributes)]
|
||||
self.assert_spans(spans, 1)
|
||||
self._assert_spans(spans, 1)
|
||||
self.memory_exporter.clear()
|
||||
metrics = self._assert_metrics(2)
|
||||
duration_data_point = metrics[0].data.data_points[index]
|
||||
self.assertEqual(
|
||||
duration_data_point.attributes.get(HTTP_STATUS_CODE),
|
||||
status_code,
|
||||
)
|
||||
self.assertEqual(
|
||||
duration_data_point.attributes.get(HTTP_METHOD),
|
||||
"GET",
|
||||
)
|
||||
self.assertEqual(
|
||||
duration_data_point.attributes.get(ERROR_TYPE),
|
||||
None,
|
||||
)
|
||||
duration_data_point = metrics[1].data.data_points[index]
|
||||
self.assertEqual(
|
||||
duration_data_point.attributes.get(
|
||||
HTTP_RESPONSE_STATUS_CODE
|
||||
),
|
||||
status_code,
|
||||
)
|
||||
self.assertEqual(
|
||||
duration_data_point.attributes.get(HTTP_REQUEST_METHOD),
|
||||
"GET",
|
||||
)
|
||||
if status_code >= 400:
|
||||
self.assertEqual(
|
||||
duration_data_point.attributes.get(ERROR_TYPE),
|
||||
str(status_code.value),
|
||||
)
|
||||
index += 1
|
||||
|
||||
def test_metrics(self):
|
||||
with self.subTest(status_code=200):
|
||||
host, port = self._http_request(
|
||||
trace_config=aiohttp_client.create_trace_config(),
|
||||
url="/test-path?query=param#foobar",
|
||||
status_code=200,
|
||||
)
|
||||
metrics = self._assert_metrics(1)
|
||||
self.assertEqual(len(metrics[0].data.data_points), 1)
|
||||
duration_data_point = metrics[0].data.data_points[0]
|
||||
self.assertEqual(
|
||||
dict(metrics[0].data.data_points[0].attributes),
|
||||
{
|
||||
HTTP_STATUS_CODE: 200,
|
||||
HTTP_METHOD: "GET",
|
||||
HTTP_HOST: host,
|
||||
NET_PEER_NAME: host,
|
||||
NET_PEER_PORT: port,
|
||||
},
|
||||
)
|
||||
self.assertEqual(duration_data_point.count, 1)
|
||||
self.assertTrue(duration_data_point.min > 0)
|
||||
self.assertTrue(duration_data_point.max > 0)
|
||||
self.assertTrue(duration_data_point.sum > 0)
|
||||
|
||||
def test_schema_url(self):
|
||||
with self.subTest(status_code=200):
|
||||
|
@ -292,16 +420,12 @@ class TestAioHttpIntegration(TestBase):
|
|||
(span.status.status_code, span.status.description),
|
||||
(StatusCode.UNSET, None),
|
||||
)
|
||||
self.assertEqual(span.attributes[HTTP_METHOD], method)
|
||||
self.assertEqual(
|
||||
span.attributes[SpanAttributes.HTTP_METHOD], method
|
||||
)
|
||||
self.assertEqual(
|
||||
span.attributes[SpanAttributes.HTTP_URL],
|
||||
span.attributes[HTTP_URL],
|
||||
f"http://{host}:{port}{path}",
|
||||
)
|
||||
self.assertEqual(
|
||||
span.attributes[SpanAttributes.HTTP_STATUS_CODE], HTTPStatus.OK
|
||||
)
|
||||
self.assertEqual(span.attributes[HTTP_STATUS_CODE], HTTPStatus.OK)
|
||||
self.assertIn("response_hook_attr", span.attributes)
|
||||
self.assertEqual(span.attributes["response_hook_attr"], "value")
|
||||
self.memory_exporter.clear()
|
||||
|
@ -319,15 +443,15 @@ class TestAioHttpIntegration(TestBase):
|
|||
status_code=HTTPStatus.OK,
|
||||
)
|
||||
|
||||
self.assert_spans(
|
||||
self._assert_spans(
|
||||
[
|
||||
(
|
||||
"GET",
|
||||
(StatusCode.UNSET, None),
|
||||
{
|
||||
SpanAttributes.HTTP_METHOD: "GET",
|
||||
SpanAttributes.HTTP_URL: f"http://{host}:{port}/some/path",
|
||||
SpanAttributes.HTTP_STATUS_CODE: int(HTTPStatus.OK),
|
||||
HTTP_METHOD: "GET",
|
||||
HTTP_URL: f"http://{host}:{port}/some/path",
|
||||
HTTP_STATUS_CODE: int(HTTPStatus.OK),
|
||||
},
|
||||
)
|
||||
]
|
||||
|
@ -353,14 +477,14 @@ class TestAioHttpIntegration(TestBase):
|
|||
with self.assertRaises(aiohttp.ClientConnectorError):
|
||||
loop.run_until_complete(do_request(url))
|
||||
|
||||
self.assert_spans(
|
||||
self._assert_spans(
|
||||
[
|
||||
(
|
||||
"GET",
|
||||
(expected_status, "ClientConnectorError"),
|
||||
{
|
||||
SpanAttributes.HTTP_METHOD: "GET",
|
||||
SpanAttributes.HTTP_URL: url,
|
||||
HTTP_METHOD: "GET",
|
||||
HTTP_URL: url,
|
||||
},
|
||||
)
|
||||
]
|
||||
|
@ -379,18 +503,29 @@ class TestAioHttpIntegration(TestBase):
|
|||
span = self.memory_exporter.get_finished_spans()[0]
|
||||
self.assertEqual(len(span.events), 1)
|
||||
self.assertEqual(span.events[0].name, "exception")
|
||||
self.assert_spans(
|
||||
self._assert_spans(
|
||||
[
|
||||
(
|
||||
"GET",
|
||||
(StatusCode.ERROR, "ServerDisconnectedError"),
|
||||
{
|
||||
SpanAttributes.HTTP_METHOD: "GET",
|
||||
SpanAttributes.HTTP_URL: f"http://{host}:{port}/test",
|
||||
HTTP_METHOD: "GET",
|
||||
HTTP_URL: f"http://{host}:{port}/test",
|
||||
},
|
||||
)
|
||||
]
|
||||
)
|
||||
metrics = self._assert_metrics(1)
|
||||
duration_data_point = metrics[0].data.data_points[0]
|
||||
self.assertEqual(
|
||||
dict(duration_data_point.attributes),
|
||||
{
|
||||
HTTP_METHOD: "GET",
|
||||
HTTP_HOST: host,
|
||||
NET_PEER_NAME: host,
|
||||
NET_PEER_PORT: port,
|
||||
},
|
||||
)
|
||||
|
||||
def test_basic_exception_new_semconv(self):
|
||||
async def request_handler(request):
|
||||
|
@ -406,7 +541,7 @@ class TestAioHttpIntegration(TestBase):
|
|||
span = self.memory_exporter.get_finished_spans()[0]
|
||||
self.assertEqual(len(span.events), 1)
|
||||
self.assertEqual(span.events[0].name, "exception")
|
||||
self.assert_spans(
|
||||
self._assert_spans(
|
||||
[
|
||||
(
|
||||
"GET",
|
||||
|
@ -415,10 +550,23 @@ class TestAioHttpIntegration(TestBase):
|
|||
HTTP_REQUEST_METHOD: "GET",
|
||||
URL_FULL: f"http://{host}:{port}/test",
|
||||
ERROR_TYPE: "ServerDisconnectedError",
|
||||
SERVER_ADDRESS: host,
|
||||
SERVER_PORT: port,
|
||||
},
|
||||
)
|
||||
]
|
||||
)
|
||||
metrics = self._assert_metrics(1)
|
||||
duration_data_point = metrics[0].data.data_points[0]
|
||||
self.assertEqual(
|
||||
dict(duration_data_point.attributes),
|
||||
{
|
||||
HTTP_REQUEST_METHOD: "GET",
|
||||
ERROR_TYPE: "ServerDisconnectedError",
|
||||
SERVER_ADDRESS: host,
|
||||
SERVER_PORT: port,
|
||||
},
|
||||
)
|
||||
|
||||
def test_basic_exception_both_semconv(self):
|
||||
async def request_handler(request):
|
||||
|
@ -434,7 +582,7 @@ class TestAioHttpIntegration(TestBase):
|
|||
span = self.memory_exporter.get_finished_spans()[0]
|
||||
self.assertEqual(len(span.events), 1)
|
||||
self.assertEqual(span.events[0].name, "exception")
|
||||
self.assert_spans(
|
||||
self._assert_spans(
|
||||
[
|
||||
(
|
||||
"GET",
|
||||
|
@ -443,12 +591,37 @@ class TestAioHttpIntegration(TestBase):
|
|||
HTTP_REQUEST_METHOD: "GET",
|
||||
URL_FULL: f"http://{host}:{port}/test",
|
||||
ERROR_TYPE: "ServerDisconnectedError",
|
||||
SpanAttributes.HTTP_METHOD: "GET",
|
||||
SpanAttributes.HTTP_URL: f"http://{host}:{port}/test",
|
||||
HTTP_METHOD: "GET",
|
||||
HTTP_URL: f"http://{host}:{port}/test",
|
||||
HTTP_HOST: host,
|
||||
SERVER_ADDRESS: host,
|
||||
SERVER_PORT: port,
|
||||
NET_PEER_PORT: port,
|
||||
},
|
||||
)
|
||||
]
|
||||
)
|
||||
metrics = self._assert_metrics(2)
|
||||
duration_data_point = metrics[0].data.data_points[0]
|
||||
self.assertEqual(
|
||||
dict(duration_data_point.attributes),
|
||||
{
|
||||
HTTP_METHOD: "GET",
|
||||
HTTP_HOST: host,
|
||||
NET_PEER_NAME: host,
|
||||
NET_PEER_PORT: port,
|
||||
},
|
||||
)
|
||||
duration_data_point = metrics[1].data.data_points[0]
|
||||
self.assertEqual(
|
||||
dict(duration_data_point.attributes),
|
||||
{
|
||||
HTTP_REQUEST_METHOD: "GET",
|
||||
ERROR_TYPE: "ServerDisconnectedError",
|
||||
SERVER_ADDRESS: host,
|
||||
SERVER_PORT: port,
|
||||
},
|
||||
)
|
||||
|
||||
def test_timeout(self):
|
||||
async def request_handler(request):
|
||||
|
@ -463,14 +636,14 @@ class TestAioHttpIntegration(TestBase):
|
|||
timeout=aiohttp.ClientTimeout(sock_read=0.01),
|
||||
)
|
||||
|
||||
self.assert_spans(
|
||||
self._assert_spans(
|
||||
[
|
||||
(
|
||||
"GET",
|
||||
(StatusCode.ERROR, "SocketTimeoutError"),
|
||||
{
|
||||
SpanAttributes.HTTP_METHOD: "GET",
|
||||
SpanAttributes.HTTP_URL: f"http://{host}:{port}/test_timeout",
|
||||
HTTP_METHOD: "GET",
|
||||
HTTP_URL: f"http://{host}:{port}/test_timeout",
|
||||
},
|
||||
)
|
||||
]
|
||||
|
@ -490,14 +663,14 @@ class TestAioHttpIntegration(TestBase):
|
|||
max_redirects=2,
|
||||
)
|
||||
|
||||
self.assert_spans(
|
||||
self._assert_spans(
|
||||
[
|
||||
(
|
||||
"GET",
|
||||
(StatusCode.ERROR, "TooManyRedirects"),
|
||||
{
|
||||
SpanAttributes.HTTP_METHOD: "GET",
|
||||
SpanAttributes.HTTP_URL: f"http://{host}:{port}/test_too_many_redirects",
|
||||
HTTP_METHOD: "GET",
|
||||
HTTP_URL: f"http://{host}:{port}/test_too_many_redirects",
|
||||
},
|
||||
)
|
||||
]
|
||||
|
@ -526,17 +699,15 @@ class TestAioHttpIntegration(TestBase):
|
|||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(do_request(url))
|
||||
|
||||
self.assert_spans(
|
||||
self._assert_spans(
|
||||
[
|
||||
(
|
||||
"HTTP",
|
||||
(StatusCode.ERROR, None),
|
||||
{
|
||||
SpanAttributes.HTTP_METHOD: "_OTHER",
|
||||
SpanAttributes.HTTP_URL: url,
|
||||
SpanAttributes.HTTP_STATUS_CODE: int(
|
||||
HTTPStatus.METHOD_NOT_ALLOWED
|
||||
),
|
||||
HTTP_METHOD: "_OTHER",
|
||||
HTTP_URL: url,
|
||||
HTTP_STATUS_CODE: int(HTTPStatus.METHOD_NOT_ALLOWED),
|
||||
},
|
||||
)
|
||||
]
|
||||
|
@ -570,7 +741,7 @@ class TestAioHttpIntegration(TestBase):
|
|||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(do_request(url))
|
||||
|
||||
self.assert_spans(
|
||||
self._assert_spans(
|
||||
[
|
||||
(
|
||||
"HTTP",
|
||||
|
@ -583,22 +754,24 @@ class TestAioHttpIntegration(TestBase):
|
|||
),
|
||||
HTTP_REQUEST_METHOD_ORIGINAL: "NONSTANDARD",
|
||||
ERROR_TYPE: "405",
|
||||
SERVER_ADDRESS: "localhost",
|
||||
SERVER_PORT: 5000,
|
||||
},
|
||||
)
|
||||
]
|
||||
)
|
||||
self.memory_exporter.clear()
|
||||
|
||||
def test_credential_removal(self):
|
||||
def test_remove_sensitive_params(self):
|
||||
trace_configs = [aiohttp_client.create_trace_config()]
|
||||
|
||||
app = HttpServerMock("test_credential_removal")
|
||||
app = HttpServerMock("test_remove_sensitive_params")
|
||||
|
||||
@app.route("/status/200")
|
||||
def index():
|
||||
return "hello"
|
||||
|
||||
url = "http://username:password@localhost:5000/status/200"
|
||||
url = "http://username:password@localhost:5000/status/200?Signature=secret"
|
||||
|
||||
with app.run("localhost", 5000):
|
||||
with self.subTest(url=url):
|
||||
|
@ -613,17 +786,17 @@ class TestAioHttpIntegration(TestBase):
|
|||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(do_request(url))
|
||||
|
||||
self.assert_spans(
|
||||
self._assert_spans(
|
||||
[
|
||||
(
|
||||
"GET",
|
||||
(StatusCode.UNSET, None),
|
||||
{
|
||||
SpanAttributes.HTTP_METHOD: "GET",
|
||||
SpanAttributes.HTTP_URL: (
|
||||
"http://localhost:5000/status/200"
|
||||
HTTP_METHOD: "GET",
|
||||
HTTP_URL: (
|
||||
"http://REDACTED:REDACTED@localhost:5000/status/200?Signature=REDACTED"
|
||||
),
|
||||
SpanAttributes.HTTP_STATUS_CODE: int(HTTPStatus.OK),
|
||||
HTTP_STATUS_CODE: int(HTTPStatus.OK),
|
||||
},
|
||||
)
|
||||
]
|
||||
|
@ -656,7 +829,7 @@ class TestAioHttpClientInstrumentor(TestBase):
|
|||
|
||||
return default_request
|
||||
|
||||
def assert_spans(self, num_spans: int):
|
||||
def _assert_spans(self, num_spans: int):
|
||||
finished_spans = self.memory_exporter.get_finished_spans()
|
||||
self.assertEqual(num_spans, len(finished_spans))
|
||||
if num_spans == 0:
|
||||
|
@ -665,18 +838,36 @@ class TestAioHttpClientInstrumentor(TestBase):
|
|||
return finished_spans[0]
|
||||
return finished_spans
|
||||
|
||||
def _assert_metrics(self, num_metrics: int = 1):
|
||||
metrics = self.get_sorted_metrics()
|
||||
self.assertEqual(len(metrics), num_metrics)
|
||||
return metrics
|
||||
|
||||
def test_instrument(self):
|
||||
host, port = run_with_test_server(
|
||||
self.get_default_request(), self.URL, self.default_handler
|
||||
)
|
||||
span = self.assert_spans(1)
|
||||
span = self._assert_spans(1)
|
||||
self.assertEqual("GET", span.name)
|
||||
self.assertEqual("GET", span.attributes[SpanAttributes.HTTP_METHOD])
|
||||
self.assertEqual("GET", span.attributes[HTTP_METHOD])
|
||||
self.assertEqual(
|
||||
f"http://{host}:{port}/test-path",
|
||||
span.attributes[SpanAttributes.HTTP_URL],
|
||||
span.attributes[HTTP_URL],
|
||||
)
|
||||
self.assertEqual(200, span.attributes[HTTP_STATUS_CODE])
|
||||
metrics = self._assert_metrics(1)
|
||||
duration_data_point = metrics[0].data.data_points[0]
|
||||
self.assertEqual(duration_data_point.count, 1)
|
||||
self.assertEqual(
|
||||
dict(duration_data_point.attributes),
|
||||
{
|
||||
HTTP_HOST: host,
|
||||
HTTP_STATUS_CODE: 200,
|
||||
HTTP_METHOD: "GET",
|
||||
NET_PEER_NAME: host,
|
||||
NET_PEER_PORT: port,
|
||||
},
|
||||
)
|
||||
self.assertEqual(200, span.attributes[SpanAttributes.HTTP_STATUS_CODE])
|
||||
|
||||
def test_instrument_new_semconv(self):
|
||||
AioHttpClientInstrumentor().uninstrument()
|
||||
|
@ -687,7 +878,7 @@ class TestAioHttpClientInstrumentor(TestBase):
|
|||
host, port = run_with_test_server(
|
||||
self.get_default_request(), self.URL, self.default_handler
|
||||
)
|
||||
span = self.assert_spans(1)
|
||||
span = self._assert_spans(1)
|
||||
self.assertEqual("GET", span.name)
|
||||
self.assertEqual("GET", span.attributes[HTTP_REQUEST_METHOD])
|
||||
self.assertEqual(
|
||||
|
@ -695,6 +886,18 @@ class TestAioHttpClientInstrumentor(TestBase):
|
|||
span.attributes[URL_FULL],
|
||||
)
|
||||
self.assertEqual(200, span.attributes[HTTP_RESPONSE_STATUS_CODE])
|
||||
metrics = self._assert_metrics(1)
|
||||
duration_data_point = metrics[0].data.data_points[0]
|
||||
self.assertEqual(duration_data_point.count, 1)
|
||||
self.assertEqual(
|
||||
dict(duration_data_point.attributes),
|
||||
{
|
||||
HTTP_RESPONSE_STATUS_CODE: 200,
|
||||
HTTP_REQUEST_METHOD: "GET",
|
||||
SERVER_ADDRESS: host,
|
||||
SERVER_PORT: port,
|
||||
},
|
||||
)
|
||||
|
||||
def test_instrument_both_semconv(self):
|
||||
AioHttpClientInstrumentor().uninstrument()
|
||||
|
@ -706,17 +909,47 @@ class TestAioHttpClientInstrumentor(TestBase):
|
|||
self.get_default_request(), self.URL, self.default_handler
|
||||
)
|
||||
url = f"http://{host}:{port}/test-path"
|
||||
attributes = {
|
||||
HTTP_REQUEST_METHOD: "GET",
|
||||
SpanAttributes.HTTP_METHOD: "GET",
|
||||
URL_FULL: url,
|
||||
SpanAttributes.HTTP_URL: url,
|
||||
HTTP_RESPONSE_STATUS_CODE: 200,
|
||||
SpanAttributes.HTTP_STATUS_CODE: 200,
|
||||
}
|
||||
span = self.assert_spans(1)
|
||||
span = self._assert_spans(1)
|
||||
self.assertEqual("GET", span.name)
|
||||
self.assertEqual(span.attributes, attributes)
|
||||
self.assertEqual(
|
||||
dict(span.attributes),
|
||||
{
|
||||
HTTP_REQUEST_METHOD: "GET",
|
||||
HTTP_METHOD: "GET",
|
||||
HTTP_HOST: host,
|
||||
URL_FULL: url,
|
||||
HTTP_URL: url,
|
||||
HTTP_RESPONSE_STATUS_CODE: 200,
|
||||
HTTP_STATUS_CODE: 200,
|
||||
SERVER_ADDRESS: host,
|
||||
SERVER_PORT: port,
|
||||
NET_PEER_PORT: port,
|
||||
},
|
||||
)
|
||||
metrics = self._assert_metrics(2)
|
||||
duration_data_point = metrics[0].data.data_points[0]
|
||||
self.assertEqual(duration_data_point.count, 1)
|
||||
self.assertEqual(
|
||||
dict(duration_data_point.attributes),
|
||||
{
|
||||
HTTP_STATUS_CODE: 200,
|
||||
HTTP_METHOD: "GET",
|
||||
HTTP_HOST: host,
|
||||
NET_PEER_NAME: host,
|
||||
NET_PEER_PORT: port,
|
||||
},
|
||||
)
|
||||
duration_data_point = metrics[1].data.data_points[0]
|
||||
self.assertEqual(duration_data_point.count, 1)
|
||||
self.assertEqual(
|
||||
dict(duration_data_point.attributes),
|
||||
{
|
||||
HTTP_RESPONSE_STATUS_CODE: 200,
|
||||
HTTP_REQUEST_METHOD: "GET",
|
||||
SERVER_ADDRESS: host,
|
||||
SERVER_PORT: port,
|
||||
},
|
||||
)
|
||||
|
||||
def test_instrument_with_custom_trace_config(self):
|
||||
trace_config = aiohttp.TraceConfig()
|
||||
|
@ -733,7 +966,7 @@ class TestAioHttpClientInstrumentor(TestBase):
|
|||
await session.get(TestAioHttpClientInstrumentor.URL)
|
||||
|
||||
run_with_test_server(make_request, self.URL, self.default_handler)
|
||||
self.assert_spans(1)
|
||||
self._assert_spans(1)
|
||||
|
||||
def test_every_request_by_new_session_creates_one_span(self):
|
||||
async def make_request(server: aiohttp.test_utils.TestServer):
|
||||
|
@ -747,7 +980,7 @@ class TestAioHttpClientInstrumentor(TestBase):
|
|||
run_with_test_server(
|
||||
make_request, self.URL, self.default_handler
|
||||
)
|
||||
self.assert_spans(1)
|
||||
self._assert_spans(1)
|
||||
|
||||
def test_instrument_with_existing_trace_config(self):
|
||||
trace_config = aiohttp.TraceConfig()
|
||||
|
@ -764,7 +997,7 @@ class TestAioHttpClientInstrumentor(TestBase):
|
|||
await session.get(TestAioHttpClientInstrumentor.URL)
|
||||
|
||||
run_with_test_server(create_session, self.URL, self.default_handler)
|
||||
self.assert_spans(1)
|
||||
self._assert_spans(1)
|
||||
|
||||
def test_no_op_tracer_provider(self):
|
||||
AioHttpClientInstrumentor().uninstrument()
|
||||
|
@ -784,13 +1017,13 @@ class TestAioHttpClientInstrumentor(TestBase):
|
|||
self.get_default_request(), self.URL, self.default_handler
|
||||
)
|
||||
|
||||
self.assert_spans(0)
|
||||
self._assert_spans(0)
|
||||
|
||||
AioHttpClientInstrumentor().instrument()
|
||||
run_with_test_server(
|
||||
self.get_default_request(), self.URL, self.default_handler
|
||||
)
|
||||
self.assert_spans(1)
|
||||
self._assert_spans(1)
|
||||
|
||||
def test_uninstrument_session(self):
|
||||
async def uninstrument_request(server: aiohttp.test_utils.TestServer):
|
||||
|
@ -802,19 +1035,19 @@ class TestAioHttpClientInstrumentor(TestBase):
|
|||
run_with_test_server(
|
||||
uninstrument_request, self.URL, self.default_handler
|
||||
)
|
||||
self.assert_spans(0)
|
||||
self._assert_spans(0)
|
||||
|
||||
run_with_test_server(
|
||||
self.get_default_request(), self.URL, self.default_handler
|
||||
)
|
||||
self.assert_spans(1)
|
||||
self._assert_spans(1)
|
||||
|
||||
def test_suppress_instrumentation(self):
|
||||
with suppress_instrumentation():
|
||||
run_with_test_server(
|
||||
self.get_default_request(), self.URL, self.default_handler
|
||||
)
|
||||
self.assert_spans(0)
|
||||
self._assert_spans(0)
|
||||
|
||||
@staticmethod
|
||||
async def suppressed_request(server: aiohttp.test_utils.TestServer):
|
||||
|
@ -826,7 +1059,7 @@ class TestAioHttpClientInstrumentor(TestBase):
|
|||
run_with_test_server(
|
||||
self.suppressed_request, self.URL, self.default_handler
|
||||
)
|
||||
self.assert_spans(0)
|
||||
self._assert_spans(0)
|
||||
|
||||
def test_suppress_instrumentation_with_server_exception(self):
|
||||
# pylint:disable=unused-argument
|
||||
|
@ -836,7 +1069,7 @@ class TestAioHttpClientInstrumentor(TestBase):
|
|||
run_with_test_server(
|
||||
self.suppressed_request, self.URL, raising_handler
|
||||
)
|
||||
self.assert_spans(0)
|
||||
self._assert_spans(0)
|
||||
|
||||
def test_url_filter(self):
|
||||
def strip_query_params(url: yarl.URL) -> str:
|
||||
|
@ -849,10 +1082,10 @@ class TestAioHttpClientInstrumentor(TestBase):
|
|||
host, port = run_with_test_server(
|
||||
self.get_default_request(url), url, self.default_handler
|
||||
)
|
||||
span = self.assert_spans(1)
|
||||
span = self._assert_spans(1)
|
||||
self.assertEqual(
|
||||
f"http://{host}:{port}/test-path",
|
||||
span.attributes[SpanAttributes.HTTP_URL],
|
||||
span.attributes[HTTP_URL],
|
||||
)
|
||||
|
||||
def test_hooks(self):
|
||||
|
@ -877,7 +1110,7 @@ class TestAioHttpClientInstrumentor(TestBase):
|
|||
run_with_test_server(
|
||||
self.get_default_request(url), url, self.default_handler
|
||||
)
|
||||
span = self.assert_spans(1)
|
||||
span = self._assert_spans(1)
|
||||
self.assertEqual("GET - /test-path", span.name)
|
||||
self.assertIn("response_hook_attr", span.attributes)
|
||||
self.assertEqual(span.attributes["response_hook_attr"], "value")
|
||||
|
|
|
@ -8,7 +8,7 @@ dynamic = ["version"]
|
|||
description = "Aiohttp server instrumentation for OpenTelemetry"
|
||||
readme = "README.rst"
|
||||
license = "Apache-2.0"
|
||||
requires-python = ">=3.8"
|
||||
requires-python = ">=3.9"
|
||||
authors = [
|
||||
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io"}
|
||||
]
|
||||
|
@ -18,7 +18,6 @@ classifiers = [
|
|||
"License :: OSI Approved :: Apache Software License",
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
|
@ -27,9 +26,9 @@ classifiers = [
|
|||
]
|
||||
dependencies = [
|
||||
"opentelemetry-api ~= 1.12",
|
||||
"opentelemetry-instrumentation == 0.52b0.dev",
|
||||
"opentelemetry-semantic-conventions == 0.52b0.dev",
|
||||
"opentelemetry-util-http == 0.52b0.dev",
|
||||
"opentelemetry-instrumentation == 0.57b0.dev",
|
||||
"opentelemetry-semantic-conventions == 0.57b0.dev",
|
||||
"opentelemetry-util-http == 0.57b0.dev",
|
||||
"wrapt >= 1.0.0, < 2.0.0",
|
||||
]
|
||||
|
||||
|
|
|
@ -54,29 +54,44 @@ from opentelemetry.instrumentation.utils import (
|
|||
)
|
||||
from opentelemetry.propagate import extract
|
||||
from opentelemetry.propagators.textmap import Getter
|
||||
from opentelemetry.semconv._incubating.attributes.http_attributes import (
|
||||
HTTP_FLAVOR,
|
||||
HTTP_HOST,
|
||||
HTTP_METHOD,
|
||||
HTTP_ROUTE,
|
||||
HTTP_SCHEME,
|
||||
HTTP_SERVER_NAME,
|
||||
HTTP_STATUS_CODE,
|
||||
HTTP_TARGET,
|
||||
HTTP_URL,
|
||||
HTTP_USER_AGENT,
|
||||
)
|
||||
from opentelemetry.semconv._incubating.attributes.net_attributes import (
|
||||
NET_HOST_NAME,
|
||||
NET_HOST_PORT,
|
||||
)
|
||||
from opentelemetry.semconv.metrics import MetricInstruments
|
||||
from opentelemetry.semconv.trace import SpanAttributes
|
||||
from opentelemetry.trace.status import Status, StatusCode
|
||||
from opentelemetry.util.http import get_excluded_urls, remove_url_credentials
|
||||
from opentelemetry.util.http import get_excluded_urls, redact_url
|
||||
|
||||
_duration_attrs = [
|
||||
SpanAttributes.HTTP_METHOD,
|
||||
SpanAttributes.HTTP_HOST,
|
||||
SpanAttributes.HTTP_SCHEME,
|
||||
SpanAttributes.HTTP_STATUS_CODE,
|
||||
SpanAttributes.HTTP_FLAVOR,
|
||||
SpanAttributes.HTTP_SERVER_NAME,
|
||||
SpanAttributes.NET_HOST_NAME,
|
||||
SpanAttributes.NET_HOST_PORT,
|
||||
SpanAttributes.HTTP_ROUTE,
|
||||
HTTP_METHOD,
|
||||
HTTP_HOST,
|
||||
HTTP_SCHEME,
|
||||
HTTP_STATUS_CODE,
|
||||
HTTP_FLAVOR,
|
||||
HTTP_SERVER_NAME,
|
||||
NET_HOST_NAME,
|
||||
NET_HOST_PORT,
|
||||
HTTP_ROUTE,
|
||||
]
|
||||
|
||||
_active_requests_count_attrs = [
|
||||
SpanAttributes.HTTP_METHOD,
|
||||
SpanAttributes.HTTP_HOST,
|
||||
SpanAttributes.HTTP_SCHEME,
|
||||
SpanAttributes.HTTP_FLAVOR,
|
||||
SpanAttributes.HTTP_SERVER_NAME,
|
||||
HTTP_METHOD,
|
||||
HTTP_HOST,
|
||||
HTTP_SCHEME,
|
||||
HTTP_FLAVOR,
|
||||
HTTP_SERVER_NAME,
|
||||
]
|
||||
|
||||
tracer = trace.get_tracer(__name__)
|
||||
|
@ -133,6 +148,7 @@ def collect_request_attributes(request: web.Request) -> Dict:
|
|||
request.url.port,
|
||||
str(request.url),
|
||||
)
|
||||
|
||||
query_string = request.query_string
|
||||
if query_string and http_url:
|
||||
if isinstance(query_string, bytes):
|
||||
|
@ -140,29 +156,27 @@ def collect_request_attributes(request: web.Request) -> Dict:
|
|||
http_url += "?" + urllib.parse.unquote(query_string)
|
||||
|
||||
result = {
|
||||
SpanAttributes.HTTP_SCHEME: request.scheme,
|
||||
SpanAttributes.HTTP_HOST: server_host,
|
||||
SpanAttributes.NET_HOST_PORT: port,
|
||||
SpanAttributes.HTTP_ROUTE: _get_view_func(request),
|
||||
SpanAttributes.HTTP_FLAVOR: f"{request.version.major}.{request.version.minor}",
|
||||
SpanAttributes.HTTP_TARGET: request.path,
|
||||
SpanAttributes.HTTP_URL: remove_url_credentials(http_url),
|
||||
HTTP_SCHEME: request.scheme,
|
||||
HTTP_HOST: server_host,
|
||||
NET_HOST_PORT: port,
|
||||
HTTP_ROUTE: _get_view_func(request),
|
||||
HTTP_FLAVOR: f"{request.version.major}.{request.version.minor}",
|
||||
HTTP_TARGET: request.path,
|
||||
HTTP_URL: redact_url(http_url),
|
||||
}
|
||||
|
||||
http_method = request.method
|
||||
if http_method:
|
||||
result[SpanAttributes.HTTP_METHOD] = http_method
|
||||
result[HTTP_METHOD] = http_method
|
||||
|
||||
http_host_value_list = (
|
||||
[request.host] if not isinstance(request.host, list) else request.host
|
||||
)
|
||||
if http_host_value_list:
|
||||
result[SpanAttributes.HTTP_SERVER_NAME] = ",".join(
|
||||
http_host_value_list
|
||||
)
|
||||
result[HTTP_SERVER_NAME] = ",".join(http_host_value_list)
|
||||
http_user_agent = request.headers.get("user-agent")
|
||||
if http_user_agent:
|
||||
result[SpanAttributes.HTTP_USER_AGENT] = http_user_agent
|
||||
result[HTTP_USER_AGENT] = http_user_agent
|
||||
|
||||
# remove None values
|
||||
result = {k: v for k, v in result.items() if v is not None}
|
||||
|
@ -183,7 +197,7 @@ def set_status_code(span, status_code: int) -> None:
|
|||
)
|
||||
)
|
||||
else:
|
||||
span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code)
|
||||
span.set_attribute(HTTP_STATUS_CODE, status_code)
|
||||
span.set_status(
|
||||
Status(http_status_to_status_code(status_code, server_span=True))
|
||||
)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue