Compare commits

..

1 Commits

Author SHA1 Message Date
OpenTelemetry Bot a8c12a490b
Prepare release for opentelemetry-sdk-extension-aws v2.1.0 (#3150) 2024-12-24 15:51:40 +01:00
576 changed files with 7559 additions and 49741 deletions

View File

@ -18,7 +18,7 @@ body:
Please describe any aspect of your environment relevant to the problem, including your Python version, [platform](https://docs.python.org/3/library/platform.html), version numbers of installed dependencies, information about your cloud hosting provider, etc. If you're reporting a problem with a specific version of a library in this repo, please check whether the problem has been fixed on main.
value: |
OS: (e.g, Ubuntu)
Python version: (e.g., Python 3.9.10)
Python version: (e.g., Python 3.8.10)
Package version: (e.g., 0.46.0)
- type: textarea

View File

@ -11,9 +11,6 @@ components:
- oxeye-nikolay
- nikosokolik
instrumentation/opentelemetry-instrumentation-asyncclick:
- jomcgi
instrumentation/opentelemetry-instrumentation-kafka-python:
- nozik
@ -64,9 +61,6 @@ components:
instrumentation/opentelemetry-instrumentation-psycopg:
- federicobond
instrumentation/opentelemetry-instrumentation-pymssql:
- guillaumep
instrumentation/opentelemetry-instrumentation-aiokafka:
- dimastbk

View File

@ -10,10 +10,6 @@ jobs:
run_self:
runs-on: ubuntu-latest
name: Auto Assign Owners
permissions:
contents: read # to read changed files
issues: write # to read/write issue assignees
pull-requests: write # to read/write PR reviewers
# Don't fail tests if this workflow fails. Some pending issues:
# - https://github.com/dyladan/component-owners/issues/8
continue-on-error: true

File diff suppressed because it is too large Load Diff

View File

@ -1,20 +0,0 @@
name: FOSSA scanning
on:
push:
branches:
- main
permissions:
contents: read
jobs:
fossa:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: fossas/fossa-action@93a52ecf7c3ac7eb40f5de77fd69b1a19524de94 # v1.5.0
with:
api-key: ${{secrets.FOSSA_API_KEY}}
team: OpenTelemetry

View File

@ -7,7 +7,7 @@ name = "generate-workflows-lib"
dynamic = ["version"]
description = "A library to generate workflows"
license = "Apache-2.0"
requires-python = ">=3.9"
requires-python = ">=3.8"
authors = [
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
]
@ -17,11 +17,11 @@ classifiers = [
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Typing :: Typed",
]
dependencies = ["Jinja2", "tox"]

View File

@ -14,7 +14,7 @@ _tox_test_env_regex = re_compile(
)
_tox_lint_env_regex = re_compile(r"lint-(?P<name>[-\w]+)")
_tox_contrib_env_regex = re_compile(
r"py39-test-(?P<name>[-\w]+\w)-?(?P<contrib_requirements>\d+)?"
r"py38-test-(?P<name>[-\w]+\w)-?(?P<contrib_requirements>\d+)?"
)
@ -47,13 +47,12 @@ def get_test_job_datas(tox_envs: list, operating_systems: list) -> list:
os_alias = {"ubuntu-latest": "Ubuntu", "windows-latest": "Windows"}
python_version_alias = {
"pypy3": "pypy-3.9",
"pypy310": "pypy-3.10",
"pypy3": "pypy-3.8",
"py38": "3.8",
"py39": "3.9",
"py310": "3.10",
"py311": "3.11",
"py312": "3.12",
"py313": "3.13",
}
test_job_datas = []

View File

@ -12,7 +12,6 @@ on:
CONTRIB_REPO_SHA:
required: true
type: string
env:
CORE_REPO_SHA: ${% raw %}{{ inputs.CORE_REPO_SHA }}{% endraw %}
CONTRIB_REPO_SHA: ${% raw %}{{ inputs.CONTRIB_REPO_SHA }}{% endraw %}
@ -24,7 +23,6 @@ jobs:
{{ job_data.tox_env }}:
name: {{ job_data.ui_name }}
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout contrib repo @ SHA - ${% raw %}{{ env.CONTRIB_REPO_SHA }}{% endraw %}
uses: actions/checkout@v4
@ -32,14 +30,14 @@ jobs:
repository: open-telemetry/opentelemetry-python-contrib
ref: ${% raw %}{{ env.CONTRIB_REPO_SHA }}{% endraw %}
- name: Set up Python 3.9
- name: Set up Python 3.8
uses: actions/setup-python@v5
with:
python-version: "3.9"
python-version: "3.8"
architecture: "x64"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e {{ job_data.tox_env }} -- -ra

View File

@ -9,20 +9,8 @@ on:
- 'release/*'
pull_request:
concurrency:
group: ${% raw %}{{ github.workflow }}-${{ github.head_ref || github.run_id }}{% endraw %}
cancel-in-progress: true
env:
# Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main'
# For PRs you can change the inner fallback ('main')
# For pushes you change the outer fallback ('main')
# The logic below is used during releases and depends on having an equivalent branch name in the core repo.
CORE_REPO_SHA: {% raw %}${{ github.event_name == 'pull_request' && (
contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref ||
contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref ||
'main'
) || 'main' }}{% endraw %}
CORE_REPO_SHA: main
CONTRIB_REPO_SHA: main
PIP_EXISTS_ACTION: w
@ -32,18 +20,17 @@ jobs:
{{ job_data.name }}:
name: {{ job_data.ui_name }}
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${% raw %}{{ github.sha }}{% endraw %}
uses: actions/checkout@v4
- name: Set up Python 3.13
- name: Set up Python 3.12
uses: actions/setup-python@v5
with:
python-version: "3.13"
python-version: "3.12"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e {{ job_data.tox_env }}

View File

@ -9,20 +9,8 @@ on:
- 'release/*'
pull_request:
concurrency:
group: ${% raw %}{{ github.workflow }}-${{ github.head_ref || github.run_id }}{% endraw %}
cancel-in-progress: true
env:
# Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main'
# For PRs you can change the inner fallback ('main')
# For pushes you change the outer fallback ('main')
# The logic below is used during releases and depends on having an equivalent branch name in the core repo.
CORE_REPO_SHA: {% raw %}${{ github.event_name == 'pull_request' && (
contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref ||
contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref ||
'main'
) || 'main' }}{% endraw %}
CORE_REPO_SHA: main
CONTRIB_REPO_SHA: main
PIP_EXISTS_ACTION: w
@ -32,7 +20,6 @@ jobs:
{{ job_data }}:
name: {{ job_data }}
runs-on: ubuntu-latest
timeout-minutes: 30
{%- if job_data == "generate-workflows" %}
if: |
!contains(github.event.pull_request.labels.*.name, 'Skip generate-workflows')
@ -70,7 +57,7 @@ jobs:
python-version: "3.11"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e {{ job_data }}

View File

@ -9,20 +9,8 @@ on:
- 'release/*'
pull_request:
concurrency:
group: ${% raw %}{{ github.workflow }}-${{ github.head_ref || github.run_id }}{% endraw %}
cancel-in-progress: true
env:
# Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main'
# For PRs you can change the inner fallback ('main')
# For pushes you change the outer fallback ('main')
# The logic below is used during releases and depends on having an equivalent branch name in the core repo.
CORE_REPO_SHA: {% raw %}${{ github.event_name == 'pull_request' && (
contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref ||
contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref ||
'main'
) || 'main' }}{% endraw %}
CORE_REPO_SHA: main
CONTRIB_REPO_SHA: main
PIP_EXISTS_ACTION: w
@ -32,7 +20,6 @@ jobs:
{{ job_data.name }}:
name: {{ job_data.ui_name }}
runs-on: {{ job_data.os }}
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${% raw %}{{ github.sha }}{% endraw %}
uses: actions/checkout@v4
@ -43,7 +30,7 @@ jobs:
python-version: "{{ job_data.python_version }}"
- name: Install tox
run: pip install tox-uv
run: pip install tox
{%- if job_data.os == "windows-latest" %}
- name: Configure git to support long filenames

File diff suppressed because it is too large Load Diff

View File

@ -9,20 +9,8 @@ on:
- 'release/*'
pull_request:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
env:
# Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main'
# For PRs you can change the inner fallback ('main')
# For pushes you change the outer fallback ('main')
# The logic below is used during releases and depends on having an equivalent branch name in the core repo.
CORE_REPO_SHA: ${{ github.event_name == 'pull_request' && (
contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref ||
contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref ||
'main'
) || 'main' }}
CORE_REPO_SHA: main
CONTRIB_REPO_SHA: main
PIP_EXISTS_ACTION: w
@ -31,7 +19,6 @@ jobs:
spellcheck:
name: spellcheck
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -42,7 +29,7 @@ jobs:
python-version: "3.11"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e spellcheck
@ -50,7 +37,6 @@ jobs:
docker-tests:
name: docker-tests
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -61,7 +47,7 @@ jobs:
python-version: "3.11"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e docker-tests
@ -69,7 +55,6 @@ jobs:
docs:
name: docs
runs-on: ubuntu-latest
timeout-minutes: 30
if: |
github.event.pull_request.user.login != 'opentelemetrybot' && github.event_name == 'pull_request'
steps:
@ -82,7 +67,7 @@ jobs:
python-version: "3.11"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e docs
@ -90,7 +75,6 @@ jobs:
generate:
name: generate
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -101,7 +85,7 @@ jobs:
python-version: "3.11"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e generate
@ -112,7 +96,6 @@ jobs:
generate-workflows:
name: generate-workflows
runs-on: ubuntu-latest
timeout-minutes: 30
if: |
!contains(github.event.pull_request.labels.*.name, 'Skip generate-workflows')
&& github.event.pull_request.user.login != 'opentelemetrybot' && github.event_name == 'pull_request'
@ -126,7 +109,7 @@ jobs:
python-version: "3.11"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e generate-workflows
@ -137,7 +120,6 @@ jobs:
shellcheck:
name: shellcheck
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -148,7 +130,7 @@ jobs:
python-version: "3.11"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e shellcheck
@ -156,7 +138,6 @@ jobs:
ruff:
name: ruff
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -167,26 +148,7 @@ jobs:
python-version: "3.11"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e ruff
typecheck:
name: typecheck
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.11
uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e typecheck

View File

@ -1,47 +0,0 @@
name: OSSF Scorecard
on:
push:
branches:
- main
schedule:
- cron: "10 6 * * 1" # once a week
workflow_dispatch:
permissions: read-all
jobs:
analysis:
runs-on: ubuntu-latest
permissions:
# Needed for Code scanning upload
security-events: write
# Needed for GitHub OIDC token if publish_results is true
id-token: write
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1
with:
results_file: results.sarif
results_format: sarif
publish_results: true
# Upload the results as artifacts (optional). Commenting out will disable
# uploads of run results in SARIF format to the repository Actions tab.
# https://docs.github.com/en/actions/advanced-guides/storing-workflow-data-as-artifacts
- name: "Upload artifact"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: SARIF file
path: results.sarif
retention-days: 5
# Upload the results to GitHub's code scanning dashboard (optional).
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@5f8171a638ada777af81d42b55959a643bb29017 # v3.28.12
with:
sarif_file: results.sarif

View File

@ -9,12 +9,8 @@ on:
- opentelemetry-resource-detector-azure
- opentelemetry-sdk-extension-aws
- opentelemetry-instrumentation-openai-v2
- opentelemetry-instrumentation-vertexai
- opentelemetry-instrumentation-google-genai
description: 'Package to be released'
required: true
run-name: "[Package][${{ inputs.package }}] Prepare patch release"
jobs:
prepare-patch-release:
runs-on: ubuntu-latest

View File

@ -9,12 +9,9 @@ on:
- opentelemetry-resource-detector-azure
- opentelemetry-sdk-extension-aws
- opentelemetry-instrumentation-openai-v2
- opentelemetry-instrumentation-vertexai
- opentelemetry-instrumentation-google-genai
description: 'Package to be released'
required: true
run-name: "[Package][${{ inputs.package }}] Prepare release"
jobs:
prereqs:
runs-on: ubuntu-latest

View File

@ -9,11 +9,8 @@ on:
- opentelemetry-resource-detector-azure
- opentelemetry-sdk-extension-aws
- opentelemetry-instrumentation-openai-v2
- opentelemetry-instrumentation-vertexai
- opentelemetry-instrumentation-google-genai
description: 'Package to be released'
required: true
run-name: "[Package][${{ inputs.package }}] Release"
jobs:
release:
runs-on: ubuntu-latest
@ -78,7 +75,7 @@ jobs:
# next few steps publish to pypi
- uses: actions/setup-python@v5
with:
python-version: '3.9'
python-version: '3.8'
- name: Build wheels
run: ./scripts/build_a_package.sh

View File

@ -71,7 +71,6 @@ jobs:
run: .github/scripts/use-cla-approved-github-bot.sh
- name: Create pull request
id: create_pr
env:
# not using secrets.GITHUB_TOKEN since pull requests from that token do not run workflows
GITHUB_TOKEN: ${{ secrets.OPENTELEMETRYBOT_GITHUB_TOKEN }}
@ -81,15 +80,7 @@ jobs:
git commit -a -m "$message"
git push origin HEAD:$branch
pr_url=$(gh pr create --title "[$GITHUB_REF_NAME] $message" \
gh pr create --title "[$GITHUB_REF_NAME] $message" \
--body "$message." \
--head $branch \
--base $GITHUB_REF_NAME)
echo "pr_url=$pr_url" >> $GITHUB_OUTPUT
- name: Add prepare-release label to PR
if: steps.create_pr.outputs.pr_url != ''
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
gh pr edit ${{ steps.create_pr.outputs.pr_url }} --add-label "prepare-release"
--base $GITHUB_REF_NAME

View File

@ -94,7 +94,6 @@ jobs:
run: .github/scripts/use-cla-approved-github-bot.sh
- name: Create pull request against the release branch
id: create_release_branch_pr
env:
# not using secrets.GITHUB_TOKEN since pull requests from that token do not run workflows
GITHUB_TOKEN: ${{ secrets.OPENTELEMETRYBOT_GITHUB_TOKEN }}
@ -104,18 +103,10 @@ jobs:
git commit -a -m "$message"
git push origin HEAD:$branch
pr_url=$(gh pr create --title "[$RELEASE_BRANCH_NAME] $message" \
gh pr create --title "[$RELEASE_BRANCH_NAME] $message" \
--body "$message." \
--head $branch \
--base $RELEASE_BRANCH_NAME)
echo "pr_url=$pr_url" >> $GITHUB_OUTPUT
- name: Add prepare-release label to PR
if: steps.create_release_branch_pr.outputs.pr_url != ''
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
gh pr edit ${{ steps.create_release_branch_pr.outputs.pr_url }} --add-label "prepare-release"
--base $RELEASE_BRANCH_NAME
create-pull-request-against-main:
runs-on: ubuntu-latest
@ -188,7 +179,6 @@ jobs:
run: .github/scripts/use-cla-approved-github-bot.sh
- name: Create pull request against main
id: create_main_pr
env:
# not using secrets.GITHUB_TOKEN since pull requests from that token do not run workflows
GITHUB_TOKEN: ${{ secrets.OPENTELEMETRYBOT_GITHUB_TOKEN }}
@ -199,15 +189,7 @@ jobs:
git commit -a -m "$message"
git push origin HEAD:$branch
pr_url=$(gh pr create --title "$message" \
gh pr create --title "$message" \
--body "$body" \
--head $branch \
--base main)
echo "pr_url=$pr_url" >> $GITHUB_OUTPUT
- name: Add prepare-release label to PR
if: steps.create_main_pr.outputs.pr_url != ''
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
gh pr edit ${{ steps.create_main_pr.outputs.pr_url }} --add-label "prepare-release"
--base main

View File

@ -66,7 +66,7 @@ jobs:
# next few steps publish to pypi
- uses: actions/setup-python@v5
with:
python-version: '3.9'
python-version: '3.8'
- name: Build wheels
run: ./scripts/build.sh

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -9,542 +9,34 @@ on:
- 'release/*'
pull_request:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
env:
# Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main'
# For PRs you can change the inner fallback ('main')
# For pushes you change the outer fallback ('main')
# The logic below is used during releases and depends on having an equivalent branch name in the core repo.
CORE_REPO_SHA: ${{ github.event_name == 'pull_request' && (
contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref ||
contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref ||
'main'
) || 'main' }}
CORE_REPO_SHA: main
CONTRIB_REPO_SHA: main
PIP_EXISTS_ACTION: w
jobs:
py312-test-instrumentation-aio-pika-1_ubuntu-latest:
name: instrumentation-aio-pika-1 3.12 Ubuntu
py38-test-instrumentation-confluent-kafka_ubuntu-latest:
name: instrumentation-confluent-kafka 3.8 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.12
- name: Set up Python 3.8
uses: actions/setup-python@v5
with:
python-version: "3.12"
python-version: "3.8"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e py312-test-instrumentation-aio-pika-1 -- -ra
py312-test-instrumentation-aio-pika-2_ubuntu-latest:
name: instrumentation-aio-pika-2 3.12 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.12
uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py312-test-instrumentation-aio-pika-2 -- -ra
py312-test-instrumentation-aio-pika-3_ubuntu-latest:
name: instrumentation-aio-pika-3 3.12 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.12
uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py312-test-instrumentation-aio-pika-3 -- -ra
py313-test-instrumentation-aio-pika-0_ubuntu-latest:
name: instrumentation-aio-pika-0 3.13 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.13
uses: actions/setup-python@v5
with:
python-version: "3.13"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py313-test-instrumentation-aio-pika-0 -- -ra
py313-test-instrumentation-aio-pika-1_ubuntu-latest:
name: instrumentation-aio-pika-1 3.13 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.13
uses: actions/setup-python@v5
with:
python-version: "3.13"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py313-test-instrumentation-aio-pika-1 -- -ra
py313-test-instrumentation-aio-pika-2_ubuntu-latest:
name: instrumentation-aio-pika-2 3.13 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.13
uses: actions/setup-python@v5
with:
python-version: "3.13"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py313-test-instrumentation-aio-pika-2 -- -ra
py313-test-instrumentation-aio-pika-3_ubuntu-latest:
name: instrumentation-aio-pika-3 3.13 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.13
uses: actions/setup-python@v5
with:
python-version: "3.13"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py313-test-instrumentation-aio-pika-3 -- -ra
pypy3-test-instrumentation-aio-pika-0_ubuntu-latest:
name: instrumentation-aio-pika-0 pypy-3.9 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python pypy-3.9
uses: actions/setup-python@v5
with:
python-version: "pypy-3.9"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e pypy3-test-instrumentation-aio-pika-0 -- -ra
pypy3-test-instrumentation-aio-pika-1_ubuntu-latest:
name: instrumentation-aio-pika-1 pypy-3.9 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python pypy-3.9
uses: actions/setup-python@v5
with:
python-version: "pypy-3.9"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e pypy3-test-instrumentation-aio-pika-1 -- -ra
pypy3-test-instrumentation-aio-pika-2_ubuntu-latest:
name: instrumentation-aio-pika-2 pypy-3.9 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python pypy-3.9
uses: actions/setup-python@v5
with:
python-version: "pypy-3.9"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e pypy3-test-instrumentation-aio-pika-2 -- -ra
pypy3-test-instrumentation-aio-pika-3_ubuntu-latest:
name: instrumentation-aio-pika-3 pypy-3.9 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python pypy-3.9
uses: actions/setup-python@v5
with:
python-version: "pypy-3.9"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e pypy3-test-instrumentation-aio-pika-3 -- -ra
py39-test-instrumentation-aiokafka_ubuntu-latest:
name: instrumentation-aiokafka 3.9 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.9
uses: actions/setup-python@v5
with:
python-version: "3.9"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py39-test-instrumentation-aiokafka -- -ra
py310-test-instrumentation-aiokafka_ubuntu-latest:
name: instrumentation-aiokafka 3.10 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.10
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py310-test-instrumentation-aiokafka -- -ra
py311-test-instrumentation-aiokafka_ubuntu-latest:
name: instrumentation-aiokafka 3.11 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.11
uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py311-test-instrumentation-aiokafka -- -ra
py312-test-instrumentation-aiokafka_ubuntu-latest:
name: instrumentation-aiokafka 3.12 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.12
uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py312-test-instrumentation-aiokafka -- -ra
py313-test-instrumentation-aiokafka_ubuntu-latest:
name: instrumentation-aiokafka 3.13 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.13
uses: actions/setup-python@v5
with:
python-version: "3.13"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py313-test-instrumentation-aiokafka -- -ra
pypy3-test-instrumentation-aiokafka_ubuntu-latest:
name: instrumentation-aiokafka pypy-3.9 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python pypy-3.9
uses: actions/setup-python@v5
with:
python-version: "pypy-3.9"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e pypy3-test-instrumentation-aiokafka -- -ra
py39-test-instrumentation-kafka-python_ubuntu-latest:
name: instrumentation-kafka-python 3.9 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.9
uses: actions/setup-python@v5
with:
python-version: "3.9"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py39-test-instrumentation-kafka-python -- -ra
py310-test-instrumentation-kafka-python_ubuntu-latest:
name: instrumentation-kafka-python 3.10 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.10
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py310-test-instrumentation-kafka-python -- -ra
py311-test-instrumentation-kafka-python_ubuntu-latest:
name: instrumentation-kafka-python 3.11 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.11
uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py311-test-instrumentation-kafka-python -- -ra
py39-test-instrumentation-kafka-pythonng_ubuntu-latest:
name: instrumentation-kafka-pythonng 3.9 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.9
uses: actions/setup-python@v5
with:
python-version: "3.9"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py39-test-instrumentation-kafka-pythonng -- -ra
py310-test-instrumentation-kafka-pythonng_ubuntu-latest:
name: instrumentation-kafka-pythonng 3.10 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.10
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py310-test-instrumentation-kafka-pythonng -- -ra
py311-test-instrumentation-kafka-pythonng_ubuntu-latest:
name: instrumentation-kafka-pythonng 3.11 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.11
uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py311-test-instrumentation-kafka-pythonng -- -ra
py312-test-instrumentation-kafka-pythonng_ubuntu-latest:
name: instrumentation-kafka-pythonng 3.12 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.12
uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py312-test-instrumentation-kafka-pythonng -- -ra
py313-test-instrumentation-kafka-pythonng_ubuntu-latest:
name: instrumentation-kafka-pythonng 3.13 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.13
uses: actions/setup-python@v5
with:
python-version: "3.13"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py313-test-instrumentation-kafka-pythonng -- -ra
pypy3-test-instrumentation-kafka-python_ubuntu-latest:
name: instrumentation-kafka-python pypy-3.9 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python pypy-3.9
uses: actions/setup-python@v5
with:
python-version: "pypy-3.9"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e pypy3-test-instrumentation-kafka-python -- -ra
pypy3-test-instrumentation-kafka-pythonng_ubuntu-latest:
name: instrumentation-kafka-pythonng pypy-3.9 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python pypy-3.9
uses: actions/setup-python@v5
with:
python-version: "pypy-3.9"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e pypy3-test-instrumentation-kafka-pythonng -- -ra
run: tox -e py38-test-instrumentation-confluent-kafka -- -ra
py39-test-instrumentation-confluent-kafka_ubuntu-latest:
name: instrumentation-confluent-kafka 3.9 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -555,7 +47,7 @@ jobs:
python-version: "3.9"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e py39-test-instrumentation-confluent-kafka -- -ra
@ -563,7 +55,6 @@ jobs:
py310-test-instrumentation-confluent-kafka_ubuntu-latest:
name: instrumentation-confluent-kafka 3.10 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -574,7 +65,7 @@ jobs:
python-version: "3.10"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e py310-test-instrumentation-confluent-kafka -- -ra
@ -582,7 +73,6 @@ jobs:
py311-test-instrumentation-confluent-kafka_ubuntu-latest:
name: instrumentation-confluent-kafka 3.11 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -593,7 +83,7 @@ jobs:
python-version: "3.11"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e py311-test-instrumentation-confluent-kafka -- -ra
@ -601,7 +91,6 @@ jobs:
py312-test-instrumentation-confluent-kafka_ubuntu-latest:
name: instrumentation-confluent-kafka 3.12 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -612,34 +101,32 @@ jobs:
python-version: "3.12"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e py312-test-instrumentation-confluent-kafka -- -ra
py313-test-instrumentation-confluent-kafka_ubuntu-latest:
name: instrumentation-confluent-kafka 3.13 Ubuntu
py38-test-instrumentation-asyncio_ubuntu-latest:
name: instrumentation-asyncio 3.8 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.13
- name: Set up Python 3.8
uses: actions/setup-python@v5
with:
python-version: "3.13"
python-version: "3.8"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e py313-test-instrumentation-confluent-kafka -- -ra
run: tox -e py38-test-instrumentation-asyncio -- -ra
py39-test-instrumentation-asyncio_ubuntu-latest:
name: instrumentation-asyncio 3.9 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -650,7 +137,7 @@ jobs:
python-version: "3.9"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e py39-test-instrumentation-asyncio -- -ra
@ -658,7 +145,6 @@ jobs:
py310-test-instrumentation-asyncio_ubuntu-latest:
name: instrumentation-asyncio 3.10 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -669,7 +155,7 @@ jobs:
python-version: "3.10"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e py310-test-instrumentation-asyncio -- -ra
@ -677,7 +163,6 @@ jobs:
py311-test-instrumentation-asyncio_ubuntu-latest:
name: instrumentation-asyncio 3.11 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -688,7 +173,7 @@ jobs:
python-version: "3.11"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e py311-test-instrumentation-asyncio -- -ra
@ -696,7 +181,6 @@ jobs:
py312-test-instrumentation-asyncio_ubuntu-latest:
name: instrumentation-asyncio 3.12 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -707,34 +191,32 @@ jobs:
python-version: "3.12"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e py312-test-instrumentation-asyncio -- -ra
py313-test-instrumentation-asyncio_ubuntu-latest:
name: instrumentation-asyncio 3.13 Ubuntu
py38-test-instrumentation-cassandra_ubuntu-latest:
name: instrumentation-cassandra 3.8 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.13
- name: Set up Python 3.8
uses: actions/setup-python@v5
with:
python-version: "3.13"
python-version: "3.8"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e py313-test-instrumentation-asyncio -- -ra
run: tox -e py38-test-instrumentation-cassandra -- -ra
py39-test-instrumentation-cassandra_ubuntu-latest:
name: instrumentation-cassandra 3.9 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -745,7 +227,7 @@ jobs:
python-version: "3.9"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e py39-test-instrumentation-cassandra -- -ra
@ -753,7 +235,6 @@ jobs:
py310-test-instrumentation-cassandra_ubuntu-latest:
name: instrumentation-cassandra 3.10 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -764,7 +245,7 @@ jobs:
python-version: "3.10"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e py310-test-instrumentation-cassandra -- -ra
@ -772,7 +253,6 @@ jobs:
py311-test-instrumentation-cassandra_ubuntu-latest:
name: instrumentation-cassandra 3.11 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -783,7 +263,7 @@ jobs:
python-version: "3.11"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e py311-test-instrumentation-cassandra -- -ra
@ -791,7 +271,6 @@ jobs:
py312-test-instrumentation-cassandra_ubuntu-latest:
name: instrumentation-cassandra 3.12 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -802,53 +281,50 @@ jobs:
python-version: "3.12"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e py312-test-instrumentation-cassandra -- -ra
py313-test-instrumentation-cassandra_ubuntu-latest:
name: instrumentation-cassandra 3.13 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.13
uses: actions/setup-python@v5
with:
python-version: "3.13"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py313-test-instrumentation-cassandra -- -ra
pypy3-test-instrumentation-cassandra_ubuntu-latest:
name: instrumentation-cassandra pypy-3.9 Ubuntu
name: instrumentation-cassandra pypy-3.8 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python pypy-3.9
- name: Set up Python pypy-3.8
uses: actions/setup-python@v5
with:
python-version: "pypy-3.9"
python-version: "pypy-3.8"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e pypy3-test-instrumentation-cassandra -- -ra
py38-test-processor-baggage_ubuntu-latest:
name: processor-baggage 3.8 Ubuntu
runs-on: ubuntu-latest
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.8
uses: actions/setup-python@v5
with:
python-version: "3.8"
- name: Install tox
run: pip install tox
- name: Run tests
run: tox -e py38-test-processor-baggage -- -ra
py39-test-processor-baggage_ubuntu-latest:
name: processor-baggage 3.9 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -859,7 +335,7 @@ jobs:
python-version: "3.9"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e py39-test-processor-baggage -- -ra
@ -867,7 +343,6 @@ jobs:
py310-test-processor-baggage_ubuntu-latest:
name: processor-baggage 3.10 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -878,7 +353,7 @@ jobs:
python-version: "3.10"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e py310-test-processor-baggage -- -ra
@ -886,7 +361,6 @@ jobs:
py311-test-processor-baggage_ubuntu-latest:
name: processor-baggage 3.11 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -897,7 +371,7 @@ jobs:
python-version: "3.11"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e py311-test-processor-baggage -- -ra
@ -905,7 +379,6 @@ jobs:
py312-test-processor-baggage_ubuntu-latest:
name: processor-baggage 3.12 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
@ -916,45 +389,25 @@ jobs:
python-version: "3.12"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e py312-test-processor-baggage -- -ra
py313-test-processor-baggage_ubuntu-latest:
name: processor-baggage 3.13 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python 3.13
uses: actions/setup-python@v5
with:
python-version: "3.13"
- name: Install tox
run: pip install tox-uv
- name: Run tests
run: tox -e py313-test-processor-baggage -- -ra
pypy3-test-processor-baggage_ubuntu-latest:
name: processor-baggage pypy-3.9 Ubuntu
name: processor-baggage pypy-3.8 Ubuntu
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout repo @ SHA - ${{ github.sha }}
uses: actions/checkout@v4
- name: Set up Python pypy-3.9
- name: Set up Python pypy-3.8
uses: actions/setup-python@v5
with:
python-version: "pypy-3.9"
python-version: "pypy-3.8"
- name: Install tox
run: pip install tox-uv
run: pip install tox
- name: Run tests
run: tox -e pypy3-test-processor-baggage -- -ra

View File

@ -1,15 +1,10 @@
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.6.9
hooks:
# Run the linter.
- id: ruff
args: ["--fix", "--show-fixes"]
# Run the formatter.
- id: ruff-format
- repo: https://github.com/astral-sh/uv-pre-commit
# uv version.
rev: 0.6.0
hooks:
- id: uv-lock
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.6.9
hooks:
# Run the linter.
- id: ruff
args: ["--fix", "--show-fixes"]
# Run the formatter.
- id: ruff-format

View File

@ -7,7 +7,7 @@ extension-pkg-whitelist=cassandra
# Add list of files or directories to be excluded. They should be base names, not
# paths.
ignore=CVS,gen,Dockerfile,docker-compose.yml,README.md,requirements.txt,docs,.venv,site-packages,.tox
ignore=CVS,gen,Dockerfile,docker-compose.yml,README.md,requirements.txt,docs,.venv
# Add files or directories matching the regex patterns to be excluded. The
# regex matches against base names, not paths.
@ -46,7 +46,7 @@ suggestion-mode=yes
unsafe-load-any-extension=no
# Run python dependant checks considering the baseline version
py-version=3.9
py-version=3.8
[MESSAGES CONTROL]

View File

@ -6,9 +6,9 @@ sphinx:
configuration: docs/conf.py
build:
os: "ubuntu-24.04"
os: "ubuntu-22.04"
tools:
python: "3.11"
python: "3.8"
python:
install:

View File

@ -11,250 +11,21 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## Unreleased
## Version 1.35.0/0.56b0 (2025-07-11)
### Added
- `opentelemetry-instrumentation-pika` Added instrumentation for All `SelectConnection` adapters
([#3584](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3584))
- `opentelemetry-instrumentation-tornado` Add support for `WebSocketHandler` instrumentation
([#3498](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3498))
- `opentelemetry-util-http` Added support for redacting specific url query string values and url credentials in instrumentations
([#3508](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3508))
- `opentelemetry-instrumentation-pymongo` `aggregate` and `getMore` capture statements support
([#3601](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3601))
### Fixed
- `opentelemetry-instrumentation-asgi`: fix excluded_urls in instrumentation-asgi
([#3567](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3567))
- `opentelemetry-resource-detector-containerid`: make it more quiet on platforms without cgroups
([#3579](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3579))
## Version 1.34.0/0.55b0 (2025-06-04)
### Fixed
- `opentelemetry-instrumentation-system-metrics`: fix loading on Google Cloud Run
([#3533](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3533))
- `opentelemetry-instrumentation-fastapi`: fix wrapping of middlewares
([#3012](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3012))
- `opentelemetry-instrumentation-starlette` Remove max version constraint on starlette
([#3456](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3456))
- `opentelemetry-instrumentation-starlette` Fix memory leak and double middleware
([#3529](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3529))
- `opentelemetry-instrumentation-urllib3`: proper bucket boundaries in stable semconv http duration metrics
([#3518](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3518))
- `opentelemetry-instrumentation-urllib`: proper bucket boundaries in stable semconv http duration metrics
([#3519](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3519))
- `opentelemetry-instrumentation-falcon`: proper bucket boundaries in stable semconv http duration
([#3525](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3525))
- `opentelemetry-instrumentation-wsgi`: add explicit http duration buckets for stable semconv
([#3527](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3527))
- `opentelemetry-instrumentation-asgi`: add explicit http duration buckets for stable semconv
([#3526](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3526))
- `opentelemetry-instrumentation-flask`: proper bucket boundaries in stable semconv http duration
([#3523](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3523))
- `opentelemetry-instrumentation-django`: proper bucket boundaries in stable semconv http duration
([#3524](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3524))
- `opentelemetry-instrumentation-grpc`: support non-list interceptors
([#3520](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3520))
- `opentelemetry-instrumentation-botocore` Ensure spans end on early stream closure for Bedrock Streaming APIs
([#3481](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3481))
- `opentelemetry-instrumentation-sqlalchemy` Respect suppress_instrumentation functionality
([#3477](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3477))
- `opentelemetry-instrumentation-botocore`: fix handling of tool input in Bedrock ConverseStream
([#3544](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3544))
- `opentelemetry-instrumentation-botocore` Add type check when extracting tool use from Bedrock request message content
([#3548](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3548))
- `opentelemetry-instrumentation-dbapi` Respect suppress_instrumentation functionality ([#3460](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3460))
- `opentelemetry-resource-detector-container` Correctly parse container id when using systemd and cgroupsv1
([#3429](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3429))
### Breaking changes
- `opentelemetry-instrumentation-botocore` Use `cloud.region` instead of `aws.region` span attribute as per semantic conventions.
([#3474](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3474))
- `opentelemetry-instrumentation-fastapi`: Drop support for FastAPI versions earlier than `0.92`
([#3012](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3012))
- `opentelemetry-resource-detector-container`: rename package name to `opentelemetry-resource-detector-containerid`
([#3536](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3536))
### Added
- `opentelemetry-instrumentation-aiohttp-client` Add support for HTTP metrics
([#3517](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3517))
- `opentelemetry-instrumentation-httpx` Add support for HTTP metrics
([#3513](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3513))
- `opentelemetry-instrumentation` Allow re-raising exception when instrumentation fails
([#3545](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3545))
- `opentelemetry-instrumentation-aiokafka` Add instrumentation of `consumer.getmany` (batch)
([#3257](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3257))
### Deprecated
- Drop support for Python 3.8, bump baseline to Python 3.9.
([#3399](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3399))
## Version 1.33.0/0.54b0 (2025-05-09)
### Added
- `opentelemetry-instrumentation-requests` Support explicit_bucket_boundaries_advisory in duration metrics
([#3464](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3464))
- `opentelemetry-instrumentation-redis` Add support for redis client-specific instrumentation.
([#3143](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3143))
### Fixed
- `opentelemetry-instrumentation` Catch `ModuleNotFoundError` when the library is not installed
and log as debug instead of exception
([#3423](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3423))
- `opentelemetry-instrumentation-asyncio` Fix duplicate instrumentation
([#3383](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3383))
- `opentelemetry-instrumentation-botocore` Add GenAI instrumentation for additional Bedrock models for InvokeModel API
([#3419](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3419))
- `opentelemetry-instrumentation` don't print duplicated conflict log error message
([#3432](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3432))
- `opentelemetry-instrumentation-grpc` Check for None result in gRPC
([#3380](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3381))
- `opentelemetry-instrumentation-[asynclick/click]` Add missing opentelemetry-instrumentation dep
([#3447](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3447))
- `opentelemetry-instrumentation-botocore` Capture server attributes for botocore API calls
([#3448](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3448))
## Version 1.32.0/0.53b0 (2025-04-10)
### Added
- `opentelemetry-instrumentation-asyncclick`: new instrumentation to trace asyncclick commands
([#3319](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3319))
- `opentelemetry-instrumentation-botocore` Add support for GenAI tool events using Amazon Nova models and `InvokeModel*` APIs
([#3385](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3385))
- `opentelemetry-instrumentation` Make auto instrumentation use the same dependency resolver as manual instrumentation does
([#3202](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3202))
### Fixed
- `opentelemetry-instrumentation` Fix client address is set to server address in new semconv
([#3354](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3354))
- `opentelemetry-instrumentation-dbapi`, `opentelemetry-instrumentation-django`,
`opentelemetry-instrumentation-sqlalchemy`: Fix sqlcomment for non string query and composable object.
([#3113](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3113))
- `opentelemetry-instrumentation-grpc` Fix error when using gprc versions <= 1.50.0 with unix sockets.
([[#3393](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3393)])
- `opentelemetry-instrumentation-asyncio` Fix duplicate instrumentation.
([[#3383](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3383)])
- `opentelemetry-instrumentation-aiokafka` Fix send_and_wait method no headers kwargs error.
([[#3332](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3332)])
## Version 1.31.0/0.52b0 (2025-03-12)
### Added
- `opentelemetry-instrumentation-openai-v2` Update doc for OpenAI Instrumentation to support OpenAI Compatible Platforms
([#3279](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3279))
- `opentelemetry-instrumentation-system-metrics` Add `process` metrics and deprecated `process.runtime` prefixed ones
([#3250](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3250))
- `opentelemetry-instrumentation-botocore` Add support for GenAI user events and lazy initialize tracer
([#3258](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3258))
- `opentelemetry-instrumentation-botocore` Add support for GenAI system events
([#3266](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3266))
- `opentelemetry-instrumentation-botocore` Add support for GenAI choice events
([#3275](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3275))
- `opentelemetry-instrumentation-botocore` Add support for GenAI tool events
([#3302](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3302))
- `opentelemetry-instrumentation-botocore` Add support for GenAI metrics
([#3326](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3326))
- `opentelemetry-instrumentation` make it simpler to initialize auto-instrumentation programmatically
([#3273](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3273))
- Add `opentelemetry-instrumentation-vertexai>=2.0b0` to `opentelemetry-bootstrap`
([#3307](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3307))
- Loosen `opentelemetry-instrumentation-starlette[instruments]` specifier
([#3304](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3304))
### Fixed
- `opentelemetry-instrumentation-redis` Add missing entry in doc string for `def _instrument`
([#3247](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3247))
- `opentelemetry-instrumentation-botocore` sns-extension: Change destination name attribute
to match topic ARN and redact phone number from attributes
([#3249](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3249))
- `opentelemetry-instrumentation-asyncpg` Fix fallback for empty queries.
([#3253](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3253))
- `opentelemetry-instrumentation` Fix a traceback in sqlcommenter when psycopg connection pooling is enabled.
([#3309](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3309))
- `opentelemetry-instrumentation-threading` Fix broken context typehints
([#3322](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3322))
- `opentelemetry-instrumentation-requests` always record span status code in duration metric
([#3323](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3323))
## Version 1.30.0/0.51b0 (2025-02-03)
### Added
- `opentelemetry-instrumentation-confluent-kafka` Add support for confluent-kafka <=2.7.0
([#3100](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3100))
- Add support to database stability opt-in in `_semconv` utilities and add tests
([#3111](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3111))
- `opentelemetry-instrumentation-urllib` Add `py.typed` file to enable PEP 561
([#3131](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3131))
- `opentelemetry-opentelemetry-pymongo` Add `py.typed` file to enable PEP 561
([#3136](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3136))
- `opentelemetry-opentelemetry-requests` Add `py.typed` file to enable PEP 561
([#3135](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3135))
- `opentelemetry-instrumentation-system-metrics` Add `py.typed` file to enable PEP 561
([#3132](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3132))
- `opentelemetry-opentelemetry-sqlite3` Add `py.typed` file to enable PEP 561
([#3133](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3133))
- `opentelemetry-instrumentation-falcon` add support version to v4
([#3086](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3086))
- `opentelemetry-instrumentation-falcon` Implement new HTTP semantic convention opt-in for Falcon
([#2790](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2790))
- `opentelemetry-instrumentation-wsgi` always record span status code to have it available in metrics
([#3148](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3148))
- add support to Python 3.13
([#3134](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3134))
- `opentelemetry-opentelemetry-wsgi` Add `py.typed` file to enable PEP 561
([#3129](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3129))
- `opentelemetry-util-http` Add `py.typed` file to enable PEP 561
([#3127](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3127))
- `opentelemetry-instrumentation-psycopg2` Add support for psycopg2-binary
([#3186](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3186))
- `opentelemetry-opentelemetry-botocore` Add basic support for GenAI attributes for AWS Bedrock Converse API
([#3161](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3161))
- `opentelemetry-opentelemetry-botocore` Add basic support for GenAI attributes for AWS Bedrock InvokeModel API
([#3200](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3200))
- `opentelemetry-opentelemetry-botocore` Add basic support for GenAI attributes for AWS Bedrock ConverseStream API
([#3204](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3204))
- `opentelemetry-opentelemetry-botocore` Add basic support for GenAI attributes for AWS Bedrock InvokeModelWithStreamResponse API
([#3206](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3206))
- `opentelemetry-instrumentation-pymssql` Add pymssql instrumentation
([#394](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/394))
- `opentelemetry-instrumentation-mysql` Add sqlcommenter support
([#3163](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3163))
### Fixed
- `opentelemetry-instrumentation-httpx` Fix `RequestInfo`/`ResponseInfo` type hints
([#3105](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3105))
- `opentelemetry-instrumentation-dbapi` Move `TracedCursorProxy` and `TracedConnectionProxy` to the module level
([#3068](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3068))
- `opentelemetry-instrumentation-click` Disable tracing of well-known server click commands
([#3174](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3174))
- `opentelemetry-instrumentation` Fix `get_dist_dependency_conflicts` if no distribution requires
([#3168](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3168))
### Breaking changes
- `opentelemetry-exporter-prometheus-remote-write` updated protobuf required version from 4.21 to 5.26 and regenerated protobufs
([#3219](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3219))
- `opentelemetry-instrumentation-sqlalchemy` including sqlcomment in `db.statement` span attribute value is now opt-in
([#3112](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3112))
- `opentelemetry-instrumentation-dbapi` including sqlcomment in `db.statement` span attribute value is now opt-in
([#3115](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3115))
- `opentelemetry-instrumentation-psycopg2`, `opentelemetry-instrumentation-psycopg`, `opentelemetry-instrumentation-mysqlclient`, `opentelemetry-instrumentation-pymysql`: including sqlcomment in `db.statement` span attribute value is now opt-in
([#3121](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3121))
## Version 1.29.0/0.50b0 (2024-12-11)

View File

@ -19,31 +19,23 @@ Please also read the [OpenTelemetry Contributor Guide](https://github.com/open-t
## Index
- [Contributing to opentelemetry-python-contrib](#contributing-to-opentelemetry-python-contrib)
- [Index](#index)
- [Find a Buddy and get Started Quickly](#find-a-buddy-and-get-started-quickly)
- [Development](#development)
- [Virtual Environment](#virtual-environment)
- [Troubleshooting](#troubleshooting)
- [Benchmarks](#benchmarks)
- [Pull Requests](#pull-requests)
- [How to Send Pull Requests](#how-to-send-pull-requests)
- [How to Receive Comments](#how-to-receive-comments)
- [How to Get PRs Reviewed](#how-to-get-prs-reviewed)
- [How to Get PRs Merged](#how-to-get-prs-merged)
- [Design Choices](#design-choices)
- [Focus on Capabilities, Not Structure Compliance](#focus-on-capabilities-not-structure-compliance)
- [Running Tests Locally](#running-tests-locally)
- [Testing against a different Core repo branch/commit](#testing-against-a-different-core-repo-branchcommit)
- [Style Guide](#style-guide)
- [Guideline for instrumentations](#guideline-for-instrumentations)
- [Update supported instrumentation package versions](#update-supported-instrumentation-package-versions)
- [Guideline for GenAI instrumentations](#guideline-for-genai-instrumentations)
- [Get Involved](#get-involved)
- [Expectations from contributors](#expectations-from-contributors)
- [Updating supported Python versions](#updating-supported-python-versions)
- [Bumping the Python baseline](#bumping-the-python-baseline)
- [Adding support for a new Python release](#adding-support-for-a-new-python-release)
* [Find a Buddy and get Started Quickly](#find-a-buddy-and-get-started-quickly)
* [Development](#development)
* [Troubleshooting](#troubleshooting)
* [Benchmarks](#benchmarks)
* [Pull requests](#pull-requests)
* [How to Send Pull Requests](#how-to-send-pull-requests)
* [How to Receive Comments](#how-to-receive-comments)
* [How to Get PRs Reviewed](#how-to-get-prs-reviewed)
* [How to Get PRs Merged](#how-to-get-prs-merged)
* [Design Choices](#design-choices)
* [Focus on Capabilities, Not Structure Compliance](#focus-on-capabilities-not-structure-compliance)
* [Running Tests Locally](#running-tests-locally)
* [Testing against a different Core repo branch/commit](#testing-against-a-different-core-repo-branchcommit)
* [Style Guide](#style-guide)
* [Guideline for instrumentations](#guideline-for-instrumentations)
* [Guideline for GenAI instrumentations](#guideline-for-genai-instrumentations)
* [Expectations from contributors](#expectations-from-contributors)
## Find a Buddy and get Started Quickly
@ -67,12 +59,6 @@ To install `tox`, run:
pip install tox
```
You can also run tox with `uv` support. By default [tox.ini](./tox.ini) will automatically create a provisioned tox environment with `tox-uv`, but you can install it at host level:
```sh
pip install tox-uv
```
You can run `tox` with the following arguments:
* `tox` to run all existing tox commands, including unit tests for all packages
@ -101,23 +87,9 @@ See
[`tox.ini`](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/tox.ini)
for more detail on available tox commands.
### Virtual Environment
You can also create a single virtual environment to make it easier to run local tests.
For that, you'll need to install [`uv`](https://docs.astral.sh/uv/getting-started/installation/).
After installing `uv`, you can run the following command:
```sh
uv sync
```
This will create a virtual environment in the `.venv` directory and install all the necessary dependencies.
### Troubleshooting
Some packages may require additional system-wide dependencies to be installed. For example, you may need to install `libpq-dev` to run the postgresql client libraries instrumentation tests or `libsnappy-dev` to run the prometheus exporter tests. If you encounter a build error, please check the installation instructions for the package you are trying to run tests for.
Some packages may require additional system-wide dependencies to be installed. For example, you may need to install `libpq-dev` to run the postgresql client libraries instrumentation tests or `libsnappy-dev` to run the prometheus exporter tests. If you encounter a build error, please check the installation instructions for the package you are trying to run tests for.
For `docs` building, you may need to install `mysql-client` and other required dependencies as necessary. Ensure the Python version used in your local setup matches the version used in the [CI](./.github/workflows/) to maintain compatibility when building the documentation.
@ -167,7 +139,7 @@ git remote add fork https://github.com/YOUR_GITHUB_USERNAME/opentelemetry-python
make sure you have all supported versions of Python installed, install `tox` only for the first time:
```sh
pip install tox tox-uv
pip install tox
```
Run tests in the root of the repository (this will run all tox environments and may take some time):

View File

@ -33,7 +33,7 @@
<strong>
<a href="CONTRIBUTING.md">Contributing<a/>
&nbsp;&nbsp;&bull;&nbsp;&nbsp;
<a href="https://opentelemetry-python-contrib.readthedocs.io/en/latest/#instrumentations">Instrumentations<a/>
<a href="https://opentelemetry-python-contrib.readthedocs.io/en/stable/#examples">Examples<a/>
</strong>
</p>
@ -48,7 +48,6 @@ The Python auto-instrumentation libraries for [OpenTelemetry](https://openteleme
* [Installation](#installation)
* [Releasing](#releasing)
* [Releasing a package as `1.0` stable](#releasing-a-package-as-10-stable)
* [Semantic Convention status of instrumentations](#semantic-convention-status-of-instrumentations)
* [Contributing](#contributing)
* [Thanks to all the people who already contributed](#thanks-to-all-the-people-who-already-contributed)
@ -101,7 +100,7 @@ To release a package as `1.0` stable, the package:
## Semantic Convention status of instrumentations
In our efforts to maintain optimal user experience and prevent breaking changes for transitioning into stable semantic conventions, OpenTelemetry Python is adopting the semantic convention migration plan for several instrumentations. Currently this plan is only being adopted for [HTTP-related instrumentations](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/non-normative/http-migration.md), but will eventually cover all types. Please refer to the `semconv status` column of the [instrumentation README](instrumentation/README.md) of the current status of instrumentations' semantic conventions. The possible values are `development`, `stable` and `migration` referring to [status](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.31.0/specification/document-status.md#lifecycle-status) of that particular semantic convention. `Migration` refers to an instrumentation that currently supports the migration plan.
In our efforts to maintain optimal user experience and prevent breaking changes for transitioning into stable semantic conventions, OpenTelemetry Python is adopting the [semantic convention migration plan](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/http/migration-guide.md) for several instrumentations. Currently this plan is only being adopted for HTTP-related instrumentations, but will eventually cover all types. Please refer to the `semconv status` column of the [instrumentation README](instrumentation/README.md) of the current status of instrumentations' semantic conventions. The possible values are `experimental`, `stable` and `migration` referring to [status](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.31.0/specification/document-status.md#lifecycle-status) of that particular semantic convention. `Migration` refers to an instrumentation that currently supports the migration plan.
## Contributing
@ -111,18 +110,9 @@ We meet weekly on Thursday at 9AM PT. The meeting is subject to change depending
Meeting notes are available as a public [Google doc](https://docs.google.com/document/d/1CIMGoIOZ-c3-igzbd6_Pnxx1SjAkjwqoYSUWxPY8XIs/edit). For edit access, get in touch on [GitHub Discussions](https://github.com/open-telemetry/opentelemetry-python/discussions).
### Maintainers
Approvers ([@open-telemetry/python-approvers](https://github.com/orgs/open-telemetry/teams/python-approvers)):
- [Aaron Abbott](https://github.com/aabmass), Google
- [Leighton Chen](https://github.com/lzchen), Microsoft
- [Riccardo Magliocchetti](https://github.com/xrmx), Elastic
- [Shalev Roda](https://github.com/shalevr), Cisco
For more information about the maintainer role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#maintainer).
### Approvers
- [Emídio Neto](https://github.com/emdneto), PicPay
- [Emídio Neto](https://github.com/emdneto), Zenvia
- [Jeremy Voss](https://github.com/jeremydvoss), Microsoft
- [Owais Lone](https://github.com/owais), Splunk
- [Pablo Collins](https://github.com/pmcollins), Splunk
@ -130,29 +120,34 @@ For more information about the maintainer role, see the [community repository](h
- [Srikanth Chekuri](https://github.com/srikanthccv), signoz.io
- [Tammy Baylis](https://github.com/tammy-baylis-swi), SolarWinds
For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver).
Emeritus Approvers:
### Emeritus Maintainers
- [Ashutosh Goel](https://github.com/ashu658), Cisco
- [Héctor Hernández](https://github.com/hectorhdzg), Microsoft
- [Nikolay Sokolik](https://github.com/oxeye-nikolay), Oxeye
- [Nikolay Sokolik](https://github.com/nikosokolik), Oxeye
- [Nathaniel Ruiz Nowell](https://github.com/NathanielRN), AWS
- [Alex Boten](https://github.com/codeboten)
- [Diego Hurtado](https://github.com/ocelotl)
- [Owais Lone](https://github.com/owais)
- [Yusuke Tsutsumi](https://github.com/toumorokoshi)
*Find more about the approver role in [community repository](https://github.com/open-telemetry/community/blob/main/community-membership.md#approver).*
For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager).
Maintainers ([@open-telemetry/python-maintainers](https://github.com/orgs/open-telemetry/teams/python-maintainers)):
### Emeritus Approvers
- [Aaron Abbott](https://github.com/aabmass), Google
- [Leighton Chen](https://github.com/lzchen), Microsoft
- [Riccardo Magliocchetti](https://github.com/xrmx), Elastic
- [Shalev Roda](https://github.com/shalevr), Cisco
- [Ashutosh Goel](https://github.com/ashu658)
- [Héctor Hernández](https://github.com/hectorhdzg)
- [Nathaniel Ruiz Nowell](https://github.com/NathanielRN)
- [Nikolay Sokolik](https://github.com/nikosokolik)
- [Nikolay Sokolik](https://github.com/oxeye-nikolay)
Emeritus Maintainers:
For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager).
- [Alex Boten](https://github.com/codeboten), Lightstep
- [Diego Hurtado](https://github.com/ocelotl), Lightstep
- [Owais Lone](https://github.com/owais), Splunk
- [Yusuke Tsutsumi](https://github.com/toumorokoshi), Google
### Thanks to all of our contributors!
*Find more about the maintainer role in [community repository](https://github.com/open-telemetry/community/blob/main/community-membership.md#maintainer).*
### Thanks to all the people who already contributed
<a href="https://github.com/open-telemetry/opentelemetry-python-contrib/graphs/contributors">
<img alt="Repo contributors" src="https://contrib.rocks/image?repo=open-telemetry/opentelemetry-python-contrib" />
<img src="https://contributors-img.web.app/image?repo=open-telemetry/opentelemetry-python-contrib" />
</a>

View File

@ -9,8 +9,7 @@
(otherwise the workflow will pick up the version from `main` and just remove the `.dev` suffix).
* Review the two pull requests that it creates.
(one is targeted to the release branch and one is targeted to `main`).
* The builds will fail for the release PR because of validation rules. Follow the [release workflow](https://github.com/open-telemetry/opentelemetry-python/blob/main/RELEASING.md) for the core repo up until this same point.
* Close and reopen the PR so that the workflow will take into account the label automation we have in place
* The builds will fail for both the `main` and release pr because of validation rules. Follow the [release workflow](https://github.com/open-telemetry/opentelemetry-python/blob/main/RELEASING.md) for the core repo up until this same point. Change the SHAs of each PR to point at each other to get the `main` and release builds to pass.
* Merge the release PR.
* Merge the PR to main (this can be done separately from [making the release](#making-the-release))
@ -22,9 +21,6 @@
> - opentelemetry-resource-detector-azure
> - opentelemetry-sdk-extension-aws
> - opentelemetry-instrumentation-openai-v2
> - opentelemetry-instrumentation-vertexai
> - opentelemetry-instrumentation-google-genai
>
> These libraries are also excluded from the general release.
Package release preparation is handled by the [`[Package] Prepare release`](./.github/workflows/package-prepare-release.yml) workflow that allows
@ -32,8 +28,6 @@ to pick a specific package to release. It follows the same versioning strategy a
Long-term package release branch follows `package-release/{package-name}/v{major}.{minor}.x` (or `package-release/{package-name}/v{major}.{minor}bx`) naming pattern.
The workflow will create two pull requests, one against the `main` and one against the `package-release/` branch; both should be merged in order to proceed with the release.
## Preparing a new patch release
* Backport pull request(s) to the release branch.
@ -41,8 +35,6 @@ The workflow will create two pull requests, one against the `main` and one again
* Press the "Run workflow" button, then select the release branch from the dropdown list,
e.g. `release/v1.9.x`, then enter the pull request number that you want to backport,
then click the "Run workflow" button below that.
* Add the label `backport` to the generated pull request.
* In case label automation doesn't work, just close and reopen the PR so that the workflow will take into account the label automation we have in place.
* Review and merge the backport pull request that it generates.
* Merge a pull request to the release branch updating the `CHANGELOG.md`.
* The heading for the unreleased entries should be `## Unreleased`.
@ -50,7 +42,6 @@ The workflow will create two pull requests, one against the `main` and one again
* Press the "Run workflow" button, then select the release branch from the dropdown list,
e.g. `release/v1.9.x`, and click the "Run workflow" button below that.
* Review and merge the pull request that it creates for updating the version.
* Note: If you are doing a patch release in `-contrib` repo, you should also do an equivalent patch release in `-core` repo (even if there's no fix to release), otherwise tests in CI will fail.
### Preparing a patch release for individual package
@ -63,8 +54,6 @@ to pick a specific package to release.
The workflow can only be run against long-term release branch such as `package-release/{package-name}/v{major}.{minor}.x` or `package-release/{package-name}/v{major}.{minor}bx`.
The workflow will create a pull request that should be merged in order to proceed with the release.
## Making the release
* Run the [Release workflow](https://github.com/open-telemetry/opentelemetry-python-contrib/actions/workflows/release.yml).
@ -84,8 +73,6 @@ The workflow will create a pull request that should be merged in order to procee
> - opentelemetry-resource-detector-azure
> - opentelemetry-sdk-extension-aws
> - opentelemetry-instrumentation-openai-v2
> - opentelemetry-instrumentation-vertexai
> - opentelemetry-instrumentation-google-genai
>
> These libraries are also excluded from the general patch release.

View File

@ -12,7 +12,7 @@ dynamic = ["version"]
description = "<REPLACE ME>"
readme = "README.rst"
license = "Apache-2.0"
requires-python = ">=3.9"
requires-python = ">=3.8"
authors = [
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
]
@ -22,11 +22,11 @@ classifiers = [
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
]
dependencies = [
"opentelemetry-api ~= 1.12",

View File

@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.57b0.dev"
__version__ = "0.51b0.dev"

View File

@ -1,6 +1,6 @@
pylint==3.0.2
httpretty==1.1.4
pyright==v1.1.396
mypy==0.931
sphinx==7.1.2
sphinx-rtd-theme==2.0.0rc4
sphinx-autodoc-typehints==1.25.2

View File

@ -15,7 +15,6 @@ aiohttp~=3.0
aiokafka~=0.11.0
aiopg>=0.13.0,<1.3.0
asyncpg>=0.12.0
asyncclick~=8.0
boto~=2.0
botocore~=1.0
boto3~=1.0
@ -36,7 +35,6 @@ psycopg~=3.1.17
pika>=0.12.0
pymongo~=4.6.3
PyMySQL~=1.1.1
pymssql~=2.3.2
pyramid>=1.7
redis>=2.6
remoulade>=0.50

View File

@ -122,7 +122,6 @@ intersphinx_mapping = {
"https://opentelemetry-python.readthedocs.io/en/latest/",
None,
),
"redis": ("https://redis.readthedocs.io/en/latest/", None),
}
# http://www.sphinx-doc.org/en/master/config.html#confval-nitpicky

View File

@ -52,7 +52,7 @@ install <https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs>
pip install -e ./instrumentation/opentelemetry-instrumentation-botocore
pip install -e ./instrumentation-genai/opentelemetry-instrumentation-openai-v2
pip install -e ./sdk-extension/opentelemetry-sdk-extension-aws
pip install -e ./resource/opentelemetry-resource-detector-containerid
pip install -e ./resource/opentelemetry-resource-detector-container
.. toctree::

View File

@ -1,7 +0,0 @@
.. include:: ../../../instrumentation/opentelemetry-instrumentation-asyncclick/README.rst
:end-before: References
.. automodule:: opentelemetry.instrumentation.asyncclick
:members:
:undoc-members:
:show-inheritance:

View File

@ -1,7 +0,0 @@
OpenTelemetry pymssql Instrumentation
=====================================
.. automodule:: opentelemetry.instrumentation.pymssql
:members:
:undoc-members:
:show-inheritance:

View File

@ -1,10 +1,7 @@
.. include:: ../../../instrumentation/opentelemetry-instrumentation-redis/README.rst
:end-before: References
Usage
-----
OpenTelemetry Redis Instrumentation
===================================
.. automodule:: opentelemetry.instrumentation.redis
:members:
:undoc-members:
:show-inheritance:
:show-inheritance:

View File

@ -41,10 +41,6 @@ py-class=
callable
Consumer
confluent_kafka.Message
psycopg.Connection
psycopg.AsyncConnection
ObjectProxy
fastapi.applications.FastAPI
any=
; API
@ -72,8 +68,6 @@ any=
py-obj=
opentelemetry.propagators.textmap.CarrierT
opentelemetry.instrumentation.dbapi.ConnectionT
opentelemetry.instrumentation.dbapi.CursorT
py-func=
poll

View File

@ -1,7 +1,7 @@
OpenTelemetry Python - Resource Detector for Containers
=======================================================
.. automodule:: opentelemetry.resource.detector.containerid
.. automodule:: opentelemetry.resource.detector.container
:members:
:undoc-members:
:show-inheritance:

View File

@ -16,7 +16,7 @@ sortfirst=
ext/*
[stable]
version=1.36.0.dev
version=1.30.0.dev
packages=
opentelemetry-sdk
@ -34,7 +34,7 @@ packages=
opentelemetry-api
[prerelease]
version=0.57b0.dev
version=0.51b0.dev
packages=
all
@ -43,15 +43,13 @@ packages=
opentelemetry-instrumentation
opentelemetry-contrib-instrumentations
opentelemetry-distro
opentelemetry-resource-detector-containerid
opentelemetry-resource-detector-container
[exclude_release]
packages=
opentelemetry-resource-detector-azure
opentelemetry-sdk-extension-aws
opentelemetry-propagator-aws-xray
opentelemetry-instrumentation-google-genai
opentelemetry-instrumentation-vertexai
opentelemetry-instrumentation-openai-v2
opentelemetry-instrumentation-test

View File

@ -1,4 +1,4 @@
FROM python:3.9
FROM python:3.8
RUN apt-get update -y && apt-get install libsnappy-dev -y

View File

@ -1,3 +1,3 @@
## Instructions
1. Install protobuf tools. Can use your package manager or download from [GitHub](https://github.com/protocolbuffers/protobuf/releases/tag/v26.0)
2. Run `generate-proto-py.sh` from inside the `proto/` directory
1. Install protobuf tools. Can use your package manager or download from [GitHub](https://github.com/protocolbuffers/protobuf/releases/tag/v21.7)
2. Run `generate-proto-py.sh` from inside the `proto/` directory

View File

@ -49,7 +49,7 @@ sed -i 's/import "gogoproto\/gogo.proto";/import "opentelemetry\/exporter\/prome
echo "Removing clones..."
rm -rf protobuf prometheus
# Used libprotoc 26
# Used libprotoc 3.21.1 & protoc 21.7
echo "Compiling proto files to Python"
protoc -I . --python_out=../src ${SRC_DIR}/gogoproto/gogo.proto ${SRC_DIR}/remote.proto ${SRC_DIR}/types.proto

View File

@ -9,7 +9,7 @@ dynamic = ["version"]
description = "Prometheus Remote Write Metrics Exporter for OpenTelemetry"
readme = "README.rst"
license = "Apache-2.0"
requires-python = ">=3.9"
requires-python = ">=3.8"
authors = [
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
]
@ -19,14 +19,14 @@ classifiers = [
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
]
dependencies = [
"protobuf ~= 5.26",
"protobuf ~= 4.21",
"requests ~= 2.28",
"opentelemetry-api ~= 1.12",
"opentelemetry-sdk ~= 1.12",

View File

@ -1,45 +1,58 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: opentelemetry/exporter/prometheus_remote_write/gen/remote.proto
# Protobuf Python Version: 5.26.0
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from opentelemetry.exporter.prometheus_remote_write.gen import types_pb2 as opentelemetry_dot_exporter_dot_prometheus__remote__write_dot_gen_dot_types__pb2
from opentelemetry.exporter.prometheus_remote_write.gen.gogoproto import gogo_pb2 as opentelemetry_dot_exporter_dot_prometheus__remote__write_dot_gen_dot_gogoproto_dot_gogo__pb2
from opentelemetry.exporter.prometheus_remote_write.gen import (
types_pb2 as opentelemetry_dot_exporter_dot_prometheus__remote__write_dot_gen_dot_types__pb2,
)
from opentelemetry.exporter.prometheus_remote_write.gen.gogoproto import (
gogo_pb2 as opentelemetry_dot_exporter_dot_prometheus__remote__write_dot_gen_dot_gogoproto_dot_gogo__pb2,
)
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n?opentelemetry/exporter/prometheus_remote_write/gen/remote.proto\x12\nprometheus\x1a>opentelemetry/exporter/prometheus_remote_write/gen/types.proto\x1aGopentelemetry/exporter/prometheus_remote_write/gen/gogoproto/gogo.proto\"z\n\x0cWriteRequest\x12\x30\n\ntimeseries\x18\x01 \x03(\x0b\x32\x16.prometheus.TimeSeriesB\x04\xc8\xde\x1f\x00\x12\x32\n\x08metadata\x18\x03 \x03(\x0b\x32\x1a.prometheus.MetricMetadataB\x04\xc8\xde\x1f\x00J\x04\x08\x02\x10\x03\"\xae\x01\n\x0bReadRequest\x12\"\n\x07queries\x18\x01 \x03(\x0b\x32\x11.prometheus.Query\x12\x45\n\x17\x61\x63\x63\x65pted_response_types\x18\x02 \x03(\x0e\x32$.prometheus.ReadRequest.ResponseType\"4\n\x0cResponseType\x12\x0b\n\x07SAMPLES\x10\x00\x12\x17\n\x13STREAMED_XOR_CHUNKS\x10\x01\"8\n\x0cReadResponse\x12(\n\x07results\x18\x01 \x03(\x0b\x32\x17.prometheus.QueryResult\"\x8f\x01\n\x05Query\x12\x1a\n\x12start_timestamp_ms\x18\x01 \x01(\x03\x12\x18\n\x10\x65nd_timestamp_ms\x18\x02 \x01(\x03\x12*\n\x08matchers\x18\x03 \x03(\x0b\x32\x18.prometheus.LabelMatcher\x12$\n\x05hints\x18\x04 \x01(\x0b\x32\x15.prometheus.ReadHints\"9\n\x0bQueryResult\x12*\n\ntimeseries\x18\x01 \x03(\x0b\x32\x16.prometheus.TimeSeries\"]\n\x13\x43hunkedReadResponse\x12\x31\n\x0e\x63hunked_series\x18\x01 \x03(\x0b\x32\x19.prometheus.ChunkedSeries\x12\x13\n\x0bquery_index\x18\x02 \x01(\x03\x42\x08Z\x06prompbb\x06proto3')
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n?opentelemetry/exporter/prometheus_remote_write/gen/remote.proto\x12\nprometheus\x1a>opentelemetry/exporter/prometheus_remote_write/gen/types.proto\x1aGopentelemetry/exporter/prometheus_remote_write/gen/gogoproto/gogo.proto"z\n\x0cWriteRequest\x12\x30\n\ntimeseries\x18\x01 \x03(\x0b\x32\x16.prometheus.TimeSeriesB\x04\xc8\xde\x1f\x00\x12\x32\n\x08metadata\x18\x03 \x03(\x0b\x32\x1a.prometheus.MetricMetadataB\x04\xc8\xde\x1f\x00J\x04\x08\x02\x10\x03"\xae\x01\n\x0bReadRequest\x12"\n\x07queries\x18\x01 \x03(\x0b\x32\x11.prometheus.Query\x12\x45\n\x17\x61\x63\x63\x65pted_response_types\x18\x02 \x03(\x0e\x32$.prometheus.ReadRequest.ResponseType"4\n\x0cResponseType\x12\x0b\n\x07SAMPLES\x10\x00\x12\x17\n\x13STREAMED_XOR_CHUNKS\x10\x01"8\n\x0cReadResponse\x12(\n\x07results\x18\x01 \x03(\x0b\x32\x17.prometheus.QueryResult"\x8f\x01\n\x05Query\x12\x1a\n\x12start_timestamp_ms\x18\x01 \x01(\x03\x12\x18\n\x10\x65nd_timestamp_ms\x18\x02 \x01(\x03\x12*\n\x08matchers\x18\x03 \x03(\x0b\x32\x18.prometheus.LabelMatcher\x12$\n\x05hints\x18\x04 \x01(\x0b\x32\x15.prometheus.ReadHints"9\n\x0bQueryResult\x12*\n\ntimeseries\x18\x01 \x03(\x0b\x32\x16.prometheus.TimeSeries"]\n\x13\x43hunkedReadResponse\x12\x31\n\x0e\x63hunked_series\x18\x01 \x03(\x0b\x32\x19.prometheus.ChunkedSeries\x12\x13\n\x0bquery_index\x18\x02 \x01(\x03\x42\x08Z\x06prompbb\x06proto3'
)
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'opentelemetry.exporter.prometheus_remote_write.gen.remote_pb2', _globals)
if not _descriptor._USE_C_DESCRIPTORS:
_globals['DESCRIPTOR']._loaded_options = None
_globals['DESCRIPTOR']._serialized_options = b'Z\006prompb'
_globals['_WRITEREQUEST'].fields_by_name['timeseries']._loaded_options = None
_globals['_WRITEREQUEST'].fields_by_name['timeseries']._serialized_options = b'\310\336\037\000'
_globals['_WRITEREQUEST'].fields_by_name['metadata']._loaded_options = None
_globals['_WRITEREQUEST'].fields_by_name['metadata']._serialized_options = b'\310\336\037\000'
_globals['_WRITEREQUEST']._serialized_start=216
_globals['_WRITEREQUEST']._serialized_end=338
_globals['_READREQUEST']._serialized_start=341
_globals['_READREQUEST']._serialized_end=515
_globals['_READREQUEST_RESPONSETYPE']._serialized_start=463
_globals['_READREQUEST_RESPONSETYPE']._serialized_end=515
_globals['_READRESPONSE']._serialized_start=517
_globals['_READRESPONSE']._serialized_end=573
_globals['_QUERY']._serialized_start=576
_globals['_QUERY']._serialized_end=719
_globals['_QUERYRESULT']._serialized_start=721
_globals['_QUERYRESULT']._serialized_end=778
_globals['_CHUNKEDREADRESPONSE']._serialized_start=780
_globals['_CHUNKEDREADRESPONSE']._serialized_end=873
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(
DESCRIPTOR,
"opentelemetry.exporter.prometheus_remote_write.gen.remote_pb2",
globals(),
)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b"Z\006prompb"
_WRITEREQUEST.fields_by_name["timeseries"]._options = None
_WRITEREQUEST.fields_by_name["timeseries"]._serialized_options = (
b"\310\336\037\000"
)
_WRITEREQUEST.fields_by_name["metadata"]._options = None
_WRITEREQUEST.fields_by_name["metadata"]._serialized_options = (
b"\310\336\037\000"
)
_WRITEREQUEST._serialized_start = 216
_WRITEREQUEST._serialized_end = 338
_READREQUEST._serialized_start = 341
_READREQUEST._serialized_end = 515
_READREQUEST_RESPONSETYPE._serialized_start = 463
_READREQUEST_RESPONSETYPE._serialized_end = 515
_READRESPONSE._serialized_start = 517
_READRESPONSE._serialized_end = 573
_QUERY._serialized_start = 576
_QUERY._serialized_end = 719
_QUERYRESULT._serialized_start = 721
_QUERYRESULT._serialized_end = 778
_CHUNKEDREADRESPONSE._serialized_start = 780
_CHUNKEDREADRESPONSE._serialized_end = 873
# @@protoc_insertion_point(module_scope)

View File

@ -1,66 +1,85 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: opentelemetry/exporter/prometheus_remote_write/gen/types.proto
# Protobuf Python Version: 5.26.0
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from opentelemetry.exporter.prometheus_remote_write.gen.gogoproto import gogo_pb2 as opentelemetry_dot_exporter_dot_prometheus__remote__write_dot_gen_dot_gogoproto_dot_gogo__pb2
from opentelemetry.exporter.prometheus_remote_write.gen.gogoproto import (
gogo_pb2 as opentelemetry_dot_exporter_dot_prometheus__remote__write_dot_gen_dot_gogoproto_dot_gogo__pb2,
)
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n>opentelemetry/exporter/prometheus_remote_write/gen/types.proto\x12\nprometheus\x1aGopentelemetry/exporter/prometheus_remote_write/gen/gogoproto/gogo.proto\"\xf8\x01\n\x0eMetricMetadata\x12\x33\n\x04type\x18\x01 \x01(\x0e\x32%.prometheus.MetricMetadata.MetricType\x12\x1a\n\x12metric_family_name\x18\x02 \x01(\t\x12\x0c\n\x04help\x18\x04 \x01(\t\x12\x0c\n\x04unit\x18\x05 \x01(\t\"y\n\nMetricType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07\x43OUNTER\x10\x01\x12\t\n\x05GAUGE\x10\x02\x12\r\n\tHISTOGRAM\x10\x03\x12\x12\n\x0eGAUGEHISTOGRAM\x10\x04\x12\x0b\n\x07SUMMARY\x10\x05\x12\x08\n\x04INFO\x10\x06\x12\x0c\n\x08STATESET\x10\x07\"*\n\x06Sample\x12\r\n\x05value\x18\x01 \x01(\x01\x12\x11\n\ttimestamp\x18\x02 \x01(\x03\"U\n\x08\x45xemplar\x12\'\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.LabelB\x04\xc8\xde\x1f\x00\x12\r\n\x05value\x18\x02 \x01(\x01\x12\x11\n\ttimestamp\x18\x03 \x01(\x03\"\x8f\x01\n\nTimeSeries\x12\'\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.LabelB\x04\xc8\xde\x1f\x00\x12)\n\x07samples\x18\x02 \x03(\x0b\x32\x12.prometheus.SampleB\x04\xc8\xde\x1f\x00\x12-\n\texemplars\x18\x03 \x03(\x0b\x32\x14.prometheus.ExemplarB\x04\xc8\xde\x1f\x00\"$\n\x05Label\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"1\n\x06Labels\x12\'\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.LabelB\x04\xc8\xde\x1f\x00\"\x82\x01\n\x0cLabelMatcher\x12+\n\x04type\x18\x01 \x01(\x0e\x32\x1d.prometheus.LabelMatcher.Type\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\"(\n\x04Type\x12\x06\n\x02\x45Q\x10\x00\x12\x07\n\x03NEQ\x10\x01\x12\x06\n\x02RE\x10\x02\x12\x07\n\x03NRE\x10\x03\"|\n\tReadHints\x12\x0f\n\x07step_ms\x18\x01 \x01(\x03\x12\x0c\n\x04\x66unc\x18\x02 \x01(\t\x12\x10\n\x08start_ms\x18\x03 \x01(\x03\x12\x0e\n\x06\x65nd_ms\x18\x04 \x01(\x03\x12\x10\n\x08grouping\x18\x05 \x03(\t\x12\n\n\x02\x62y\x18\x06 \x01(\x08\x12\x10\n\x08range_ms\x18\x07 \x01(\x03\"\x8b\x01\n\x05\x43hunk\x12\x13\n\x0bmin_time_ms\x18\x01 \x01(\x03\x12\x13\n\x0bmax_time_ms\x18\x02 \x01(\x03\x12(\n\x04type\x18\x03 \x01(\x0e\x32\x1a.prometheus.Chunk.Encoding\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\" \n\x08\x45ncoding\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x07\n\x03XOR\x10\x01\"a\n\rChunkedSeries\x12\'\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.LabelB\x04\xc8\xde\x1f\x00\x12\'\n\x06\x63hunks\x18\x02 \x03(\x0b\x32\x11.prometheus.ChunkB\x04\xc8\xde\x1f\x00\x42\x08Z\x06prompbb\x06proto3')
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n>opentelemetry/exporter/prometheus_remote_write/gen/types.proto\x12\nprometheus\x1aGopentelemetry/exporter/prometheus_remote_write/gen/gogoproto/gogo.proto"\xf8\x01\n\x0eMetricMetadata\x12\x33\n\x04type\x18\x01 \x01(\x0e\x32%.prometheus.MetricMetadata.MetricType\x12\x1a\n\x12metric_family_name\x18\x02 \x01(\t\x12\x0c\n\x04help\x18\x04 \x01(\t\x12\x0c\n\x04unit\x18\x05 \x01(\t"y\n\nMetricType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07\x43OUNTER\x10\x01\x12\t\n\x05GAUGE\x10\x02\x12\r\n\tHISTOGRAM\x10\x03\x12\x12\n\x0eGAUGEHISTOGRAM\x10\x04\x12\x0b\n\x07SUMMARY\x10\x05\x12\x08\n\x04INFO\x10\x06\x12\x0c\n\x08STATESET\x10\x07"*\n\x06Sample\x12\r\n\x05value\x18\x01 \x01(\x01\x12\x11\n\ttimestamp\x18\x02 \x01(\x03"U\n\x08\x45xemplar\x12\'\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.LabelB\x04\xc8\xde\x1f\x00\x12\r\n\x05value\x18\x02 \x01(\x01\x12\x11\n\ttimestamp\x18\x03 \x01(\x03"\x8f\x01\n\nTimeSeries\x12\'\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.LabelB\x04\xc8\xde\x1f\x00\x12)\n\x07samples\x18\x02 \x03(\x0b\x32\x12.prometheus.SampleB\x04\xc8\xde\x1f\x00\x12-\n\texemplars\x18\x03 \x03(\x0b\x32\x14.prometheus.ExemplarB\x04\xc8\xde\x1f\x00"$\n\x05Label\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t"1\n\x06Labels\x12\'\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.LabelB\x04\xc8\xde\x1f\x00"\x82\x01\n\x0cLabelMatcher\x12+\n\x04type\x18\x01 \x01(\x0e\x32\x1d.prometheus.LabelMatcher.Type\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t"(\n\x04Type\x12\x06\n\x02\x45Q\x10\x00\x12\x07\n\x03NEQ\x10\x01\x12\x06\n\x02RE\x10\x02\x12\x07\n\x03NRE\x10\x03"|\n\tReadHints\x12\x0f\n\x07step_ms\x18\x01 \x01(\x03\x12\x0c\n\x04\x66unc\x18\x02 \x01(\t\x12\x10\n\x08start_ms\x18\x03 \x01(\x03\x12\x0e\n\x06\x65nd_ms\x18\x04 \x01(\x03\x12\x10\n\x08grouping\x18\x05 \x03(\t\x12\n\n\x02\x62y\x18\x06 \x01(\x08\x12\x10\n\x08range_ms\x18\x07 \x01(\x03"\x8b\x01\n\x05\x43hunk\x12\x13\n\x0bmin_time_ms\x18\x01 \x01(\x03\x12\x13\n\x0bmax_time_ms\x18\x02 \x01(\x03\x12(\n\x04type\x18\x03 \x01(\x0e\x32\x1a.prometheus.Chunk.Encoding\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c" \n\x08\x45ncoding\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x07\n\x03XOR\x10\x01"a\n\rChunkedSeries\x12\'\n\x06labels\x18\x01 \x03(\x0b\x32\x11.prometheus.LabelB\x04\xc8\xde\x1f\x00\x12\'\n\x06\x63hunks\x18\x02 \x03(\x0b\x32\x11.prometheus.ChunkB\x04\xc8\xde\x1f\x00\x42\x08Z\x06prompbb\x06proto3'
)
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'opentelemetry.exporter.prometheus_remote_write.gen.types_pb2', _globals)
if not _descriptor._USE_C_DESCRIPTORS:
_globals['DESCRIPTOR']._loaded_options = None
_globals['DESCRIPTOR']._serialized_options = b'Z\006prompb'
_globals['_EXEMPLAR'].fields_by_name['labels']._loaded_options = None
_globals['_EXEMPLAR'].fields_by_name['labels']._serialized_options = b'\310\336\037\000'
_globals['_TIMESERIES'].fields_by_name['labels']._loaded_options = None
_globals['_TIMESERIES'].fields_by_name['labels']._serialized_options = b'\310\336\037\000'
_globals['_TIMESERIES'].fields_by_name['samples']._loaded_options = None
_globals['_TIMESERIES'].fields_by_name['samples']._serialized_options = b'\310\336\037\000'
_globals['_TIMESERIES'].fields_by_name['exemplars']._loaded_options = None
_globals['_TIMESERIES'].fields_by_name['exemplars']._serialized_options = b'\310\336\037\000'
_globals['_LABELS'].fields_by_name['labels']._loaded_options = None
_globals['_LABELS'].fields_by_name['labels']._serialized_options = b'\310\336\037\000'
_globals['_CHUNKEDSERIES'].fields_by_name['labels']._loaded_options = None
_globals['_CHUNKEDSERIES'].fields_by_name['labels']._serialized_options = b'\310\336\037\000'
_globals['_CHUNKEDSERIES'].fields_by_name['chunks']._loaded_options = None
_globals['_CHUNKEDSERIES'].fields_by_name['chunks']._serialized_options = b'\310\336\037\000'
_globals['_METRICMETADATA']._serialized_start=152
_globals['_METRICMETADATA']._serialized_end=400
_globals['_METRICMETADATA_METRICTYPE']._serialized_start=279
_globals['_METRICMETADATA_METRICTYPE']._serialized_end=400
_globals['_SAMPLE']._serialized_start=402
_globals['_SAMPLE']._serialized_end=444
_globals['_EXEMPLAR']._serialized_start=446
_globals['_EXEMPLAR']._serialized_end=531
_globals['_TIMESERIES']._serialized_start=534
_globals['_TIMESERIES']._serialized_end=677
_globals['_LABEL']._serialized_start=679
_globals['_LABEL']._serialized_end=715
_globals['_LABELS']._serialized_start=717
_globals['_LABELS']._serialized_end=766
_globals['_LABELMATCHER']._serialized_start=769
_globals['_LABELMATCHER']._serialized_end=899
_globals['_LABELMATCHER_TYPE']._serialized_start=859
_globals['_LABELMATCHER_TYPE']._serialized_end=899
_globals['_READHINTS']._serialized_start=901
_globals['_READHINTS']._serialized_end=1025
_globals['_CHUNK']._serialized_start=1028
_globals['_CHUNK']._serialized_end=1167
_globals['_CHUNK_ENCODING']._serialized_start=1135
_globals['_CHUNK_ENCODING']._serialized_end=1167
_globals['_CHUNKEDSERIES']._serialized_start=1169
_globals['_CHUNKEDSERIES']._serialized_end=1266
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(
DESCRIPTOR,
"opentelemetry.exporter.prometheus_remote_write.gen.types_pb2",
globals(),
)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b"Z\006prompb"
_EXEMPLAR.fields_by_name["labels"]._options = None
_EXEMPLAR.fields_by_name["labels"]._serialized_options = (
b"\310\336\037\000"
)
_TIMESERIES.fields_by_name["labels"]._options = None
_TIMESERIES.fields_by_name["labels"]._serialized_options = (
b"\310\336\037\000"
)
_TIMESERIES.fields_by_name["samples"]._options = None
_TIMESERIES.fields_by_name["samples"]._serialized_options = (
b"\310\336\037\000"
)
_TIMESERIES.fields_by_name["exemplars"]._options = None
_TIMESERIES.fields_by_name["exemplars"]._serialized_options = (
b"\310\336\037\000"
)
_LABELS.fields_by_name["labels"]._options = None
_LABELS.fields_by_name["labels"]._serialized_options = b"\310\336\037\000"
_CHUNKEDSERIES.fields_by_name["labels"]._options = None
_CHUNKEDSERIES.fields_by_name["labels"]._serialized_options = (
b"\310\336\037\000"
)
_CHUNKEDSERIES.fields_by_name["chunks"]._options = None
_CHUNKEDSERIES.fields_by_name["chunks"]._serialized_options = (
b"\310\336\037\000"
)
_METRICMETADATA._serialized_start = 152
_METRICMETADATA._serialized_end = 400
_METRICMETADATA_METRICTYPE._serialized_start = 279
_METRICMETADATA_METRICTYPE._serialized_end = 400
_SAMPLE._serialized_start = 402
_SAMPLE._serialized_end = 444
_EXEMPLAR._serialized_start = 446
_EXEMPLAR._serialized_end = 531
_TIMESERIES._serialized_start = 534
_TIMESERIES._serialized_end = 677
_LABEL._serialized_start = 679
_LABEL._serialized_end = 715
_LABELS._serialized_start = 717
_LABELS._serialized_end = 766
_LABELMATCHER._serialized_start = 769
_LABELMATCHER._serialized_end = 899
_LABELMATCHER_TYPE._serialized_start = 859
_LABELMATCHER_TYPE._serialized_end = 899
_READHINTS._serialized_start = 901
_READHINTS._serialized_end = 1025
_CHUNK._serialized_start = 1028
_CHUNK._serialized_end = 1167
_CHUNK_ENCODING._serialized_start = 1135
_CHUNK_ENCODING._serialized_end = 1167
_CHUNKEDSERIES._serialized_start = 1169
_CHUNKEDSERIES._serialized_end = 1266
# @@protoc_insertion_point(module_scope)

View File

@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.57b0.dev"
__version__ = "0.51b0.dev"

View File

@ -1,13 +1,15 @@
asgiref==3.8.1
certifi==2024.7.4
charset-normalizer==3.3.2
cramjam==2.8.4
# We can drop this after bumping baseline to pypy-39
cramjam==2.1.0; platform_python_implementation == "PyPy"
cramjam==2.8.1; platform_python_implementation != "PyPy"
Deprecated==1.2.14
idna==3.7
iniconfig==2.0.0
packaging==24.0
pluggy==1.5.0
protobuf==5.26
protobuf==4.25.3
py-cpuinfo==9.0.0
pytest==7.4.4
python-snappy==0.7.1

View File

@ -8,7 +8,7 @@ dynamic = ["version"]
description = "Rich Console Exporter for OpenTelemetry"
readme = "README.rst"
license = "Apache-2.0"
requires-python = ">=3.9"
requires-python = ">=3.8"
authors = [
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
]
@ -18,16 +18,16 @@ classifiers = [
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
]
dependencies = [
"opentelemetry-api ~= 1.12",
"opentelemetry-sdk ~= 1.12",
"opentelemetry-semantic-conventions == 0.57b0.dev",
"opentelemetry-semantic-conventions == 0.51b0.dev",
"rich>=10.0.0",
]

View File

@ -64,15 +64,11 @@ from rich.tree import Tree
import opentelemetry.trace
from opentelemetry.sdk.trace import ReadableSpan
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
from opentelemetry.semconv._incubating.attributes.db_attributes import (
DB_STATEMENT,
)
from opentelemetry.semconv.trace import SpanAttributes
def _ns_to_time(nanoseconds):
ts = datetime.datetime.fromtimestamp(
nanoseconds / 1e9, datetime.timezone.utc
)
ts = datetime.datetime.utcfromtimestamp(nanoseconds / 1e9)
return ts.strftime("%H:%M:%S.%f")
@ -122,7 +118,7 @@ def _child_add_optional_attributes(child: Tree, span: ReadableSpan):
label=Text.from_markup("[bold cyan]Attributes :[/bold cyan] ")
)
for attribute in span.attributes:
if attribute == DB_STATEMENT:
if attribute == SpanAttributes.DB_STATEMENT:
attributes.add(
Text.from_markup(f"[bold cyan]{attribute} :[/bold cyan] ")
)

View File

@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.57b0.dev"
__version__ = "0.51b0.dev"

View File

@ -1,6 +1,6 @@
-c dev-requirements.txt
astor==0.8.1
jinja2==3.1.6
jinja2==3.1.4
markupsafe==2.0.1
ruff==0.6.9
requests

View File

@ -1,6 +0,0 @@
| Instrumentation | Supported Packages | Metrics support | Semconv status |
| --------------- | ------------------ | --------------- | -------------- |
| [opentelemetry-instrumentation-google-genai](./opentelemetry-instrumentation-google-genai) | google-genai >= 1.0.0 | No | development
| [opentelemetry-instrumentation-openai-v2](./opentelemetry-instrumentation-openai-v2) | openai >= 1.26.0 | Yes | development
| [opentelemetry-instrumentation-vertexai](./opentelemetry-instrumentation-vertexai) | google-cloud-aiplatform >= 1.64 | No | development

View File

@ -1,4 +0,0 @@
.build
.test
dist

View File

@ -1,28 +0,0 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## Unreleased
## Version 0.3b0 (2025-07-08)
- Add automatic instrumentation to tool call functions ([#3446](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3446))
## Version 0.2b0 (2025-04-28)
- Add more request configuration options to the span attributes ([#3374](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3374))
- Restructure tests to keep in line with repository conventions ([#3344](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3344))
- Fix [bug](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3416) where
span attribute `gen_ai.response.finish_reasons` is empty ([#3417](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3417))
## Version 0.1b0 (2025-03-05)
- Add support for async and streaming.
([#3298](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3298))
Create an initial version of Open Telemetry instrumentation for github.com/googleapis/python-genai.
([#3256](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3256))

View File

@ -1,203 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,92 +0,0 @@
OpenTelemetry Google GenAI SDK Instrumentation
==============================================
|pypi|
.. |pypi| image:: https://badge.fury.io/py/opentelemetry-instrumentation-google-genai.svg
:target: https://pypi.org/project/opentelemetry-instrumentation-google-genai/
This library adds instrumentation to the `Google GenAI SDK library <https://pypi.org/project/google-genai/>`_
to emit telemetry data following `Semantic Conventions for GenAI systems <https://opentelemetry.io/docs/specs/semconv/gen-ai/>`_.
It adds trace spans for GenAI operations, events/logs for recording prompts/responses, and emits metrics that describe the
GenAI operations in aggregate.
Experimental
------------
This package is still experimental. The instrumentation may not be complete or correct just yet.
Please see "TODOS.md" for a list of known defects/TODOs that are blockers to package stability.
Installation
------------
If your application is already instrumented with OpenTelemetry, add this
package to your requirements.
::
pip install opentelemetry-instrumentation-google-genai
If you don't have a Google GenAI SDK application, yet, try our `examples <examples>`_.
Check out `zero-code example <examples/zero-code>`_ for a quick start.
Usage
-----
This section describes how to set up Google GenAI SDK instrumentation if you're setting OpenTelemetry up manually.
Check out the `manual example <examples/manual>`_ for more details.
Instrumenting all clients
*************************
When using the instrumentor, all clients will automatically trace GenAI `generate_content` operations.
You can also optionally capture prompts and responses as log events.
Make sure to configure OpenTelemetry tracing, logging, metrics, and events to capture all telemetry emitted by the instrumentation.
.. code-block:: python
from opentelemetry.instrumentation.google_genai import GoogleGenAiSdkInstrumentor
from google.genai import Client
GoogleGenAiSdkInstrumentor().instrument()
client = Client()
response = client.models.generate_content(
model="gemini-1.5-flash-002",
contents="Write a short poem on OpenTelemetry.")
Enabling message content
*************************
Message content such as the contents of the prompt and response
are not captured by default. To capture message content as log events, set the environment variable
`OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT` to `true`.
Uninstrument
************
To uninstrument clients, call the uninstrument method:
.. code-block:: python
from opentelemetry.instrumentation.google_genai import GoogleGenAiSdkInstrumentor
GoogleGenAiSdkInstrumentor().instrument()
# ...
# Uninstrument all clients
GoogleGenAiSdkInstrumentor().uninstrument()
References
----------
* `Google Gen AI SDK Documentation <https://ai.google.dev/gemini-api/docs/sdks>`_
* `Google Gen AI SDK on GitHub <https://github.com/googleapis/python-genai>`_
* `Using Vertex AI with Google Gen AI SDK <https://cloud.google.com/vertex-ai/generative-ai/docs/sdks/overview>`_
* `OpenTelemetry Project <https://opentelemetry.io/>`_
* `OpenTelemetry Python Examples <https://github.com/open-telemetry/opentelemetry-python/tree/main/docs/examples>`_

View File

@ -1,19 +0,0 @@
# TODOs
## Fundamentals
Here are some TODO items required to achieve stability for this package:
- Add more span-level attributes for response information
- Verify and correct formatting of events:
- Including the 'role' field for message events
- Including tool invocation information
- Emit events for safety ratings when they block responses
- Additional cleanup/improvement tasks such as:
- Adoption of 'wrapt' instead of 'functools.wraps'
- Bolstering test coverage
## Future
Beyond the above TODOs, it would also be desirable to extend the
instrumentation beyond `generate_content` to other API surfaces.

View File

@ -1,28 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Uncomment and change to your OTLP endpoint
# OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317
# OTEL_EXPORTER_OTLP_PROTOCOL=grpc
# Uncomment to change parameters used to configure 'google.genai'
# GOOGLE_GENAI_USE_VERTEXAI=1
# GOOGLE_API_KEY=<your api key>
# GOOGLE_CLOUD_PROJECT=<your cloud project>
# GOOGLE_CLOUD_LOCATION=<your cloud location>
OTEL_SERVICE_NAME=opentelemetry-python-google-genai
# Change to 'false' to hide prompt and completion content
OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true

View File

@ -1,44 +0,0 @@
OpenTelemetry Google GenAI SDK Manual Instrumentation Example
============================================
This is an example of how to instrument Google GenAI SDK calls when configuring
OpenTelemetry SDK and Instrumentations manually.
When `main.py <main.py>`_ is run, it exports traces, logs, and metrics to an OTLP
compatible endpoint. Traces include details such as the model used and the
duration of the chat request. Logs capture the chat request and the generated
response, providing a comprehensive view of the performance and behavior of
your GenAI SDK requests. Metrics include aggregate statistics such as the aggregate
token usage as well as the latency distribution of the GenAI operations.
Note: `.env <.env>`_ file configures additional environment variables:
- `OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true`
... configures Google GenAI SDK instrumentation to capture prompt/response content.
Setup
-----
An OTLP compatible endpoint should be listening for traces, logs, and metrics on
http://localhost:4317. If not, update "OTEL_EXPORTER_OTLP_ENDPOINT" as well.
Next, set up a virtual environment like this:
::
python3 -m venv .venv
source .venv/bin/activate
pip install "python-dotenv[cli]"
pip install -r requirements.txt
Run
---
Run the example like this:
::
export PROMPT="Your prompt here"
dotenv run -- python main.py

View File

@ -1,101 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# We skip linting this file with pylint, because the linter is not
# configured with the "requirements.txt" dependencies and therefore
# will give multiple "no-name-in-module" errors for the imports.
#
# pylint: skip-file
import os
import google.genai
# NOTE: OpenTelemetry Python Logs and Events APIs are in beta
from opentelemetry import _events as otel_events
from opentelemetry import _logs as otel_logs
from opentelemetry import metrics as otel_metrics
from opentelemetry import trace as otel_trace
from opentelemetry.exporter.otlp.proto.grpc._log_exporter import (
OTLPLogExporter,
)
from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import (
OTLPMetricExporter,
)
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
OTLPSpanExporter,
)
from opentelemetry.instrumentation.google_genai import (
GoogleGenAiSdkInstrumentor,
)
from opentelemetry.instrumentation.requests import RequestsInstrumentor
from opentelemetry.sdk._events import EventLoggerProvider
from opentelemetry.sdk._logs import LoggerProvider
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
def setup_otel_tracing():
otel_trace.set_tracer_provider(TracerProvider())
otel_trace.get_tracer_provider().add_span_processor(
BatchSpanProcessor(OTLPSpanExporter())
)
def setup_otel_logs_and_events():
otel_logs.set_logger_provider(LoggerProvider())
otel_logs.get_logger_provider().add_log_record_processor(
BatchLogRecordProcessor(OTLPLogExporter())
)
otel_events.set_event_logger_provider(EventLoggerProvider())
def setup_otel_metrics():
meter_provider = MeterProvider(
metric_readers=[
PeriodicExportingMetricReader(
OTLPMetricExporter(),
),
]
)
otel_metrics.set_meter_provider(meter_provider)
def setup_opentelemetry():
setup_otel_tracing()
setup_otel_logs_and_events()
setup_otel_metrics()
def instrument_google_genai():
GoogleGenAiSdkInstrumentor().instrument()
RequestsInstrumentor().instrument()
def main():
setup_opentelemetry()
instrument_google_genai()
client = google.genai.Client()
response = client.models.generate_content(
model=os.getenv("MODEL", "gemini-2.0-flash-001"),
contents=os.getenv("PROMPT", "Why is the sky blue?"),
)
print(response.text)
if __name__ == "__main__":
main()

View File

@ -1,20 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
google-genai ~= 1.0.0
opentelemetry-api ~= 1.30.0
opentelemetry-sdk ~= 1.30.0
opentelemetry-exporter-otlp-proto-grpc ~= 1.30.0
opentelemetry-instrumentation-requests ~= 0.51b0
opentelemetry-instrumentation-google-genai ~= 0.0.1.dev

View File

@ -1,30 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Update to change exporter configuration as desired.
# See: https://opentelemetry.io/docs/zero-code/python/configuration/
OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317
OTEL_EXPORTER_OTLP_PROTOCOL=grpc
# Uncomment to change parameters used to configure 'google.genai'
# GOOGLE_GENAI_USE_VERTEXAI=1
# GOOGLE_API_KEY=<your api key>
# GOOGLE_CLOUD_PROJECT=<your cloud project>
# GOOGLE_CLOUD_LOCATION=<your cloud location>
OTEL_SERVICE_NAME=opentelemetry-python-google-genai
# Change to 'false' to hide prompt and completion content
OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true

View File

@ -1,46 +0,0 @@
OpenTelemetry Google GenAI SDK Manual Instrumentation Example
============================================
This is an example of how to instrument Google GenAI SDK calls with zero code changes,
using `opentelemetryh-instrument`.
When `main.py <main.py>`_ is run, it exports traces, logs, and metrics to an OTLP
compatible endpoint. Traces include details such as the model used and the
duration of the chat request. Logs capture the chat request and the generated
response, providing a comprehensive view of the performance and behavior of
your GenAI SDK requests. Metrics include aggregate statistics such as the aggregate
token usage as well as the latency distribution of the GenAI operations.
Note: `.env <.env>`_ file configures additional environment variables:
- `OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true`
... configures Google GenAI SDK instrumentation to capture prompt/response content.
Setup
-----
An OTLP compatible endpoint should be listening for traces, logs, and metrics on
http://localhost:4317. If not, update "OTEL_EXPORTER_OTLP_ENDPOINT" as well.
Next, set up a virtual environment like this:
::
python3 -m venv .venv
source .venv/bin/activate
pip install "python-dotenv[cli]"
pip install -r requirements.txt
opentelemetry-bootstrap -a install
Run
---
Run the example like this:
::
export PROMPT="Your prompt here"
dotenv run -- opentelemetry-instrument python main.py

View File

@ -1,30 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import google.genai
def main():
client = google.genai.Client()
response = client.models.generate_content(
model=os.getenv("MODEL", "gemini-2.0-flash-001"),
contents=os.getenv("PROMPT", "Why is the sky blue?"),
)
print(response.text)
if __name__ == "__main__":
main()

View File

@ -1,23 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
google-genai ~= 1.0.0
opentelemetry-api ~= 1.30.0
opentelemetry-sdk ~= 1.30.0
opentelemetry-exporter-otlp-proto-grpc ~= 1.30.0
opentelemetry-instrumentation ~= 0.51b0
opentelemetry-instrumentation-requests ~= 0.51b0
opentelemetry-instrumentation-google-genai ~= 0.0.1.dev
opentelemetry-contrib-instrumentations ~= 0.51b0
opentelemetry-distro[otlp] ~= 0.51b0

View File

@ -1,79 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[project]
name = "opentelemetry-instrumentation-google-genai"
dynamic = ["version"]
description = "OpenTelemetry"
readme = "README.rst"
license = "Apache-2.0"
requires-python = ">=3.9"
authors = [
{ name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
]
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12"
]
dependencies = [
"opentelemetry-api >=1.31.1, <2",
"opentelemetry-instrumentation >=0.52b1, <2",
"opentelemetry-semantic-conventions >=0.52b1, <2"
]
[project.optional-dependencies]
instruments = [
"google-genai >= 1.0.0"
]
[project.entry-points.opentelemetry_instrumentor]
google-genai = "opentelemetry.instrumentation.google_genai:GoogleGenAiSdkInstrumentor"
[project.urls]
Homepage = "https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation-genai/opentelemetry-instrumentation-google-genai"
Repository = "https://github.com/open-telemetry/opentelemetry-python-contrib"
[tool.hatch.version]
path = "src/opentelemetry/instrumentation/google_genai/version.py"
[tool.hatch.build.targets.sdist]
include = [
"/src",
"/tests",
]
[tool.hatch.build.targets.wheel]
packages = ["src/opentelemetry"]
[tool.pyright]
include = [
"src",
]
exclude = [
"**/__pycache__",
]
stubPath = "types"
reportMissingImports = "error"
reportMissingTypeStubs = false
pythonVersion = "3.9"

View File

@ -1,47 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Google Gen AI SDK client instrumentation supporting the `google-genai` package.
It can be enabled using ``GoogleGenAiSdkInstrumentor``.
.. _google-genai: https://pypi.org/project/google-genai/
Usage
-----
.. code:: python
import os
import google.genai
from opentelemetry.instrumentation.google_genai import GoogleGenAiSdkInstrumentor
GoogleGenAiSdkInstrumentor().instrument()
model = os.getenv('MODEL', 'gemini-2.0-flash-001')
client = google.genai.Client()
response = client.models.generate_content(
model=model,
contents='why is the sky blue?'
)
print(response.text)
API
---
"""
from .instrumentor import GoogleGenAiSdkInstrumentor
from .version import __version__
__all__ = ["GoogleGenAiSdkInstrumentor", "__version__"]

View File

@ -1,97 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from typing import Iterable, Optional, Set
ALLOWED = True
DENIED = False
def _parse_env_list(s: str) -> Set[str]:
result = set()
for entry in s.split(","):
stripped_entry = entry.strip()
if not stripped_entry:
continue
result.add(stripped_entry)
return result
class _CompoundMatcher:
def __init__(self, entries: Set[str]):
self._match_all = "*" in entries
self._entries = entries
self._regex_matcher = None
regex_entries = []
for entry in entries:
if "*" not in entry:
continue
if entry == "*":
continue
entry = entry.replace("[", "\\[")
entry = entry.replace("]", "\\]")
entry = entry.replace(".", "\\.")
entry = entry.replace("*", ".*")
regex_entries.append(f"({entry})")
if regex_entries:
joined_regex = "|".join(regex_entries)
regex_str = f"^({joined_regex})$"
self._regex_matcher = re.compile(regex_str)
@property
def match_all(self):
return self._match_all
def matches(self, x):
if self._match_all:
return True
if x in self._entries:
return True
if (self._regex_matcher is not None) and (
self._regex_matcher.fullmatch(x)
):
return True
return False
class AllowList:
def __init__(
self,
includes: Optional[Iterable[str]] = None,
excludes: Optional[Iterable[str]] = None,
):
self._includes = _CompoundMatcher(set(includes or []))
self._excludes = _CompoundMatcher(set(excludes or []))
assert (not self._includes.match_all) or (
not self._excludes.match_all
), "Can't have '*' in both includes and excludes."
def allowed(self, x: str):
if self._excludes.match_all:
return self._includes.matches(x)
if self._includes.match_all:
return not self._excludes.matches(x)
return self._includes.matches(x) and not self._excludes.matches(x)
@staticmethod
def from_env(
includes_env_var: str, excludes_env_var: Optional[str] = None
):
includes = _parse_env_list(os.getenv(includes_env_var) or "")
excludes = set()
if excludes_env_var:
excludes = _parse_env_list(os.getenv(excludes_env_var) or "")
return AllowList(includes=includes, excludes=excludes)

View File

@ -1,18 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Semantic Convention still being defined in:
# https://github.com/open-telemetry/semantic-conventions/pull/2125
GCP_GENAI_OPERATION_CONFIG = "gcp.gen_ai.operation.config"

View File

@ -1,301 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from typing import (
Any,
Dict,
Optional,
Protocol,
Sequence,
Set,
Tuple,
Union,
)
Primitive = Union[bool, str, int, float]
BoolList = list[bool]
StringList = list[str]
IntList = list[int]
FloatList = list[float]
HomogenousPrimitiveList = Union[BoolList, StringList, IntList, FloatList]
FlattenedValue = Union[Primitive, HomogenousPrimitiveList]
FlattenedDict = Dict[str, FlattenedValue]
class FlattenFunc(Protocol):
def __call__(
self,
key: str,
value: Any,
exclude_keys: Set[str],
rename_keys: Dict[str, str],
flatten_functions: Dict[str, "FlattenFunc"],
**kwargs: Any,
) -> Any:
return None
_logger = logging.getLogger(__name__)
def _concat_key(prefix: Optional[str], suffix: str):
if not prefix:
return suffix
return f"{prefix}.{suffix}"
def _is_primitive(v):
for t in [str, bool, int, float]:
if isinstance(v, t):
return True
return False
def _is_homogenous_primitive_list(v):
if not isinstance(v, list):
return False
if len(v) == 0:
return True
if not _is_primitive(v[0]):
return False
first_entry_value_type = type(v[0])
for entry in v[1:]:
if not isinstance(entry, first_entry_value_type):
return False
return True
def _get_flatten_func(
flatten_functions: Dict[str, FlattenFunc], key_names: set[str]
) -> Optional[FlattenFunc]:
for key in key_names:
flatten_func = flatten_functions.get(key)
if flatten_func is not None:
return flatten_func
return None
def _flatten_with_flatten_func(
key: str,
value: Any,
exclude_keys: Set[str],
rename_keys: Dict[str, str],
flatten_functions: Dict[str, FlattenFunc],
key_names: Set[str],
) -> Tuple[bool, Any]:
flatten_func = _get_flatten_func(flatten_functions, key_names)
if flatten_func is None:
return False, value
func_output = flatten_func(
key,
value,
exclude_keys=exclude_keys,
rename_keys=rename_keys,
flatten_functions=flatten_functions,
)
if func_output is None:
return True, {}
if _is_primitive(func_output) or _is_homogenous_primitive_list(
func_output
):
return True, {key: func_output}
return False, func_output
def _flatten_compound_value_using_json(
key: str,
value: Any,
exclude_keys: Set[str],
rename_keys: Dict[str, str],
flatten_functions: Dict[str, FlattenFunc],
_from_json=False,
) -> FlattenedDict:
if _from_json:
_logger.debug(
"Cannot flatten value with key %s; value: %s", key, value
)
return {}
try:
json_string = json.dumps(value)
except TypeError:
_logger.debug(
"Cannot flatten value with key %s; value: %s. Not JSON serializable.",
key,
value,
)
return {}
json_value = json.loads(json_string)
return _flatten_value(
key,
json_value,
exclude_keys=exclude_keys,
rename_keys=rename_keys,
flatten_functions=flatten_functions,
# Ensure that we don't recurse indefinitely if "json.loads()" somehow returns
# a complex, compound object that does not get handled by the "primitive", "list",
# or "dict" cases. Prevents falling back on the JSON serialization fallback path.
_from_json=True,
)
def _flatten_compound_value(
key: str,
value: Any,
exclude_keys: Set[str],
rename_keys: Dict[str, str],
flatten_functions: Dict[str, FlattenFunc],
key_names: Set[str],
_from_json=False,
) -> FlattenedDict:
fully_flattened_with_flatten_func, value = _flatten_with_flatten_func(
key=key,
value=value,
exclude_keys=exclude_keys,
rename_keys=rename_keys,
flatten_functions=flatten_functions,
key_names=key_names,
)
if fully_flattened_with_flatten_func:
return value
if isinstance(value, dict):
return _flatten_dict(
value,
key_prefix=key,
exclude_keys=exclude_keys,
rename_keys=rename_keys,
flatten_functions=flatten_functions,
)
if isinstance(value, list):
if _is_homogenous_primitive_list(value):
return {key: value}
return _flatten_list(
value,
key_prefix=key,
exclude_keys=exclude_keys,
rename_keys=rename_keys,
flatten_functions=flatten_functions,
)
if hasattr(value, "model_dump"):
return _flatten_dict(
value.model_dump(),
key_prefix=key,
exclude_keys=exclude_keys,
rename_keys=rename_keys,
flatten_functions=flatten_functions,
)
return _flatten_compound_value_using_json(
key,
value,
exclude_keys=exclude_keys,
rename_keys=rename_keys,
flatten_functions=flatten_functions,
_from_json=_from_json,
)
def _flatten_value(
key: str,
value: Any,
exclude_keys: Set[str],
rename_keys: Dict[str, str],
flatten_functions: Dict[str, FlattenFunc],
_from_json=False,
) -> FlattenedDict:
if value is None:
return {}
key_names = set([key])
renamed_key = rename_keys.get(key)
if renamed_key is not None:
key_names.add(renamed_key)
key = renamed_key
if key_names & exclude_keys:
return {}
if _is_primitive(value):
return {key: value}
return _flatten_compound_value(
key=key,
value=value,
exclude_keys=exclude_keys,
rename_keys=rename_keys,
flatten_functions=flatten_functions,
key_names=key_names,
_from_json=_from_json,
)
def _flatten_dict(
d: Dict[str, Any],
key_prefix: str,
exclude_keys: Set[str],
rename_keys: Dict[str, str],
flatten_functions: Dict[str, FlattenFunc],
) -> FlattenedDict:
result = {}
for key, value in d.items():
if key in exclude_keys:
continue
full_key = _concat_key(key_prefix, key)
flattened = _flatten_value(
full_key,
value,
exclude_keys=exclude_keys,
rename_keys=rename_keys,
flatten_functions=flatten_functions,
)
result.update(flattened)
return result
def _flatten_list(
lst: list[Any],
key_prefix: str,
exclude_keys: Set[str],
rename_keys: Dict[str, str],
flatten_functions: Dict[str, FlattenFunc],
) -> FlattenedDict:
result = {}
result[_concat_key(key_prefix, "length")] = len(lst)
for index, value in enumerate(lst):
full_key = f"{key_prefix}[{index}]"
flattened = _flatten_value(
full_key,
value,
exclude_keys=exclude_keys,
rename_keys=rename_keys,
flatten_functions=flatten_functions,
)
result.update(flattened)
return result
def flatten_dict(
d: Dict[str, Any],
key_prefix: Optional[str] = None,
exclude_keys: Optional[Sequence[str]] = None,
rename_keys: Optional[Dict[str, str]] = None,
flatten_functions: Optional[Dict[str, FlattenFunc]] = None,
):
key_prefix = key_prefix or ""
exclude_keys = set(exclude_keys or [])
rename_keys = rename_keys or {}
flatten_functions = flatten_functions or {}
return _flatten_dict(
d,
key_prefix=key_prefix,
exclude_keys=exclude_keys,
rename_keys=rename_keys,
flatten_functions=flatten_functions,
)

View File

@ -1,23 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
_CONTENT_RECORDING_ENV_VAR = (
"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"
)
def is_content_recording_enabled():
return os.getenv(_CONTENT_RECORDING_ENV_VAR, "false").lower() == "true"

View File

@ -1,790 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import json
import logging
import os
import time
from typing import Any, AsyncIterator, Awaitable, Iterator, Optional, Union
from google.genai.models import AsyncModels, Models
from google.genai.types import (
BlockedReason,
Candidate,
Content,
ContentListUnion,
ContentListUnionDict,
ContentUnion,
ContentUnionDict,
GenerateContentConfig,
GenerateContentConfigOrDict,
GenerateContentResponse,
)
from opentelemetry import trace
from opentelemetry.semconv._incubating.attributes import (
code_attributes,
gen_ai_attributes,
)
from opentelemetry.semconv.attributes import error_attributes
from .allowlist_util import AllowList
from .custom_semconv import GCP_GENAI_OPERATION_CONFIG
from .dict_util import flatten_dict
from .flags import is_content_recording_enabled
from .otel_wrapper import OTelWrapper
from .tool_call_wrapper import wrapped as wrapped_tool
_logger = logging.getLogger(__name__)
# Constant used to make the absence of content more understandable.
_CONTENT_ELIDED = "<elided>"
# Constant used for the value of 'gen_ai.operation.name".
_GENERATE_CONTENT_OP_NAME = "generate_content"
class _MethodsSnapshot:
def __init__(self):
self._original_generate_content = Models.generate_content
self._original_generate_content_stream = Models.generate_content_stream
self._original_async_generate_content = AsyncModels.generate_content
self._original_async_generate_content_stream = (
AsyncModels.generate_content_stream
)
@property
def generate_content(self):
return self._original_generate_content
@property
def generate_content_stream(self):
return self._original_generate_content_stream
@property
def async_generate_content(self):
return self._original_async_generate_content
@property
def async_generate_content_stream(self):
return self._original_async_generate_content_stream
def restore(self):
Models.generate_content = self._original_generate_content
Models.generate_content_stream = self._original_generate_content_stream
AsyncModels.generate_content = self._original_async_generate_content
AsyncModels.generate_content_stream = (
self._original_async_generate_content_stream
)
def _get_vertexai_system_name():
return gen_ai_attributes.GenAiSystemValues.VERTEX_AI.name.lower()
def _get_gemini_system_name():
return gen_ai_attributes.GenAiSystemValues.GEMINI.name.lower()
def _guess_genai_system_from_env():
if os.environ.get("GOOGLE_GENAI_USE_VERTEXAI", "0").lower() in [
"true",
"1",
]:
return _get_vertexai_system_name()
return _get_gemini_system_name()
def _get_is_vertexai(models_object: Union[Models, AsyncModels]):
# Since commit 8e561de04965bb8766db87ad8eea7c57c1040442 of "googleapis/python-genai",
# it is possible to obtain the information using a documented property.
if hasattr(models_object, "vertexai"):
vertexai_attr = getattr(models_object, "vertexai")
if vertexai_attr is not None:
return vertexai_attr
# For earlier revisions, it is necessary to deeply inspect the internals.
if hasattr(models_object, "_api_client"):
client = getattr(models_object, "_api_client")
if not client:
return None
if hasattr(client, "vertexai"):
return getattr(client, "vertexai")
return None
def _determine_genai_system(models_object: Union[Models, AsyncModels]):
vertexai_attr = _get_is_vertexai(models_object)
if vertexai_attr is None:
return _guess_genai_system_from_env()
if vertexai_attr:
return _get_vertexai_system_name()
return _get_gemini_system_name()
def _to_dict(value: object):
if isinstance(value, dict):
return value
if hasattr(value, "model_dump"):
return value.model_dump()
return json.loads(json.dumps(value))
def _add_request_options_to_span(
span, config: Optional[GenerateContentConfigOrDict], allow_list: AllowList
):
if config is None:
return
span_context = span.get_span_context()
if not span_context.trace_flags.sampled:
# Avoid potentially costly traversal of config
# options if the span will be dropped, anyway.
return
# Automatically derive attributes from the contents of the
# config object. This ensures that all relevant parameters
# are captured in the telemetry data (except for those
# that are excluded via "exclude_keys"). Dynamic attributes (those
# starting with "gcp.gen_ai." instead of simply "gen_ai.request.")
# are filtered with the "allow_list" before inclusion in the span.
attributes = flatten_dict(
_to_dict(config),
# A custom prefix is used, because the names/structure of the
# configuration is likely to be specific to Google Gen AI SDK.
key_prefix=GCP_GENAI_OPERATION_CONFIG,
exclude_keys=[
# System instruction can be overly long for a span attribute.
# Additionally, it is recorded as an event (log), instead.
"gcp.gen_ai.operation.config.system_instruction",
],
# Although a custom prefix is used by default, some of the attributes
# are captured in common, standard, Semantic Conventions. For the
# well-known properties whose values align with Semantic Conventions,
# we ensure that the key name matches the standard SemConv name.
rename_keys={
# TODO: add more entries here as more semantic conventions are
# generalized to cover more of the available config options.
"gcp.gen_ai.operation.config.temperature": gen_ai_attributes.GEN_AI_REQUEST_TEMPERATURE,
"gcp.gen_ai.operation.config.top_k": gen_ai_attributes.GEN_AI_REQUEST_TOP_K,
"gcp.gen_ai.operation.config.top_p": gen_ai_attributes.GEN_AI_REQUEST_TOP_P,
"gcp.gen_ai.operation.config.candidate_count": gen_ai_attributes.GEN_AI_REQUEST_CHOICE_COUNT,
"gcp.gen_ai.operation.config.max_output_tokens": gen_ai_attributes.GEN_AI_REQUEST_MAX_TOKENS,
"gcp.gen_ai.operation.config.stop_sequences": gen_ai_attributes.GEN_AI_REQUEST_STOP_SEQUENCES,
"gcp.gen_ai.operation.config.frequency_penalty": gen_ai_attributes.GEN_AI_REQUEST_FREQUENCY_PENALTY,
"gcp.gen_ai.operation.config.presence_penalty": gen_ai_attributes.GEN_AI_REQUEST_PRESENCE_PENALTY,
"gcp.gen_ai.operation.config.seed": gen_ai_attributes.GEN_AI_REQUEST_SEED,
},
)
for key, value in attributes.items():
if key.startswith(
GCP_GENAI_OPERATION_CONFIG
) and not allow_list.allowed(key):
# The allowlist is used to control inclusion of the dynamic keys.
continue
span.set_attribute(key, value)
def _get_response_property(response: GenerateContentResponse, path: str):
path_segments = path.split(".")
current_context = response
for path_segment in path_segments:
if current_context is None:
return None
if isinstance(current_context, dict):
current_context = current_context.get(path_segment)
else:
current_context = getattr(current_context, path_segment)
return current_context
def _coerce_config_to_object(
config: GenerateContentConfigOrDict,
) -> GenerateContentConfig:
if isinstance(config, GenerateContentConfig):
return config
# Input must be a dictionary; convert by invoking the constructor.
return GenerateContentConfig(**config)
def _wrapped_config_with_tools(
otel_wrapper: OTelWrapper,
config: GenerateContentConfig,
**kwargs,
):
if not config.tools:
return config
result = copy.copy(config)
result.tools = [
wrapped_tool(tool, otel_wrapper, **kwargs) for tool in config.tools
]
return result
class _GenerateContentInstrumentationHelper:
def __init__(
self,
models_object: Union[Models, AsyncModels],
otel_wrapper: OTelWrapper,
model: str,
generate_content_config_key_allowlist: Optional[AllowList] = None,
):
self._start_time = time.time_ns()
self._otel_wrapper = otel_wrapper
self._genai_system = _determine_genai_system(models_object)
self._genai_request_model = model
self._finish_reasons_set = set()
self._error_type = None
self._input_tokens = 0
self._output_tokens = 0
self._content_recording_enabled = is_content_recording_enabled()
self._response_index = 0
self._candidate_index = 0
self._generate_content_config_key_allowlist = (
generate_content_config_key_allowlist or AllowList()
)
def wrapped_config(
self, config: Optional[GenerateContentConfigOrDict]
) -> Optional[GenerateContentConfig]:
if config is None:
return None
return _wrapped_config_with_tools(
self._otel_wrapper,
_coerce_config_to_object(config),
extra_span_attributes={"gen_ai.system": self._genai_system},
)
def start_span_as_current_span(
self, model_name, function_name, end_on_exit=True
):
return self._otel_wrapper.start_as_current_span(
f"{_GENERATE_CONTENT_OP_NAME} {model_name}",
start_time=self._start_time,
attributes={
code_attributes.CODE_FUNCTION_NAME: function_name,
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
gen_ai_attributes.GEN_AI_REQUEST_MODEL: self._genai_request_model,
gen_ai_attributes.GEN_AI_OPERATION_NAME: _GENERATE_CONTENT_OP_NAME,
},
end_on_exit=end_on_exit,
)
def process_request(
self,
contents: Union[ContentListUnion, ContentListUnionDict],
config: Optional[GenerateContentConfigOrDict],
):
span = trace.get_current_span()
_add_request_options_to_span(
span, config, self._generate_content_config_key_allowlist
)
self._maybe_log_system_instruction(config=config)
self._maybe_log_user_prompt(contents)
def process_response(self, response: GenerateContentResponse):
# TODO: Determine if there are other response properties that
# need to be reflected back into the span attributes.
#
# See also: TODOS.md.
self._update_finish_reasons(response)
self._maybe_update_token_counts(response)
self._maybe_update_error_type(response)
self._maybe_log_response(response)
self._response_index += 1
def process_error(self, e: Exception):
self._error_type = str(e.__class__.__name__)
def finalize_processing(self):
span = trace.get_current_span()
span.set_attribute(
gen_ai_attributes.GEN_AI_USAGE_INPUT_TOKENS, self._input_tokens
)
span.set_attribute(
gen_ai_attributes.GEN_AI_USAGE_OUTPUT_TOKENS, self._output_tokens
)
span.set_attribute(
gen_ai_attributes.GEN_AI_RESPONSE_FINISH_REASONS,
sorted(self._finish_reasons_set),
)
self._record_token_usage_metric()
self._record_duration_metric()
def _update_finish_reasons(self, response):
if not response.candidates:
return
for candidate in response.candidates:
finish_reason = candidate.finish_reason
if finish_reason is None:
continue
finish_reason_str = finish_reason.name.lower().removeprefix(
"finish_reason_"
)
self._finish_reasons_set.add(finish_reason_str)
def _maybe_update_token_counts(self, response: GenerateContentResponse):
input_tokens = _get_response_property(
response, "usage_metadata.prompt_token_count"
)
output_tokens = _get_response_property(
response, "usage_metadata.candidates_token_count"
)
if input_tokens and isinstance(input_tokens, int):
self._input_tokens += input_tokens
if output_tokens and isinstance(output_tokens, int):
self._output_tokens += output_tokens
def _maybe_update_error_type(self, response: GenerateContentResponse):
if response.candidates:
return
if (
(not response.prompt_feedback)
or (not response.prompt_feedback.block_reason)
or (
response.prompt_feedback.block_reason
== BlockedReason.BLOCKED_REASON_UNSPECIFIED
)
):
self._error_type = "NO_CANDIDATES"
return
# TODO: in the case where there are no candidate responses due to
# safety settings like this, it might make sense to emit an event
# that contains more details regarding the safety settings, their
# thresholds, etc. However, this requires defining an associated
# semantic convention to capture this. Follow up with SemConv to
# establish appropriate data modelling to capture these details,
# and then emit those details accordingly. (For the time being,
# we use the defined 'error.type' semantic convention to relay
# just the minimum amount of error information here).
#
# See also: "TODOS.md"
block_reason = response.prompt_feedback.block_reason.name.upper()
self._error_type = f"BLOCKED_{block_reason}"
def _maybe_log_system_instruction(
self, config: Optional[GenerateContentConfigOrDict] = None
):
system_instruction = None
if config is not None:
if isinstance(config, dict):
system_instruction = config.get("system_instruction")
else:
system_instruction = config.system_instruction
if not system_instruction:
return
attributes = {
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
}
# TODO: determine if "role" should be reported here or not. It is unclear
# since the caller does not supply a "role" and since this comes through
# a property named "system_instruction" which would seem to align with
# the default "role" that is allowed to be omitted by default.
#
# See also: "TODOS.md"
body = {}
if self._content_recording_enabled:
body["content"] = _to_dict(system_instruction)
else:
body["content"] = _CONTENT_ELIDED
self._otel_wrapper.log_system_prompt(
attributes=attributes,
body=body,
)
def _maybe_log_user_prompt(
self, contents: Union[ContentListUnion, ContentListUnionDict]
):
if isinstance(contents, list):
total = len(contents)
index = 0
for entry in contents:
self._maybe_log_single_user_prompt(
entry, index=index, total=total
)
index += 1
else:
self._maybe_log_single_user_prompt(contents)
def _maybe_log_single_user_prompt(
self, contents: Union[ContentUnion, ContentUnionDict], index=0, total=1
):
# TODO: figure out how to report the index in a manner that is
# aligned with the OTel semantic conventions.
attributes = {
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
}
# TODO: determine if "role" should be reported here or not and, if so,
# what the value ought to be. It is not clear whether there is always
# a role supplied (and it looks like there could be cases where there
# is more than one role present in the supplied contents)?
#
# See also: "TODOS.md"
body = {}
if self._content_recording_enabled:
logged_contents = contents
if isinstance(contents, list):
logged_contents = Content(parts=contents)
body["content"] = _to_dict(logged_contents)
else:
body["content"] = _CONTENT_ELIDED
self._otel_wrapper.log_user_prompt(
attributes=attributes,
body=body,
)
def _maybe_log_response_stats(self, response: GenerateContentResponse):
# TODO: Determine if there is a way that we can log a summary
# of the overall response in a manner that is aligned with
# Semantic Conventions. For example, it would be natural
# to report an event that looks something like:
#
# gen_ai.response.stats {
# response_index: 0,
# candidate_count: 3,
# parts_per_candidate: [
# 3,
# 1,
# 5
# ]
# }
#
pass
def _maybe_log_response_safety_ratings(
self, response: GenerateContentResponse
):
# TODO: Determine if there is a way that we can log
# the "prompt_feedback". This would be especially useful
# in the case where the response is blocked.
pass
def _maybe_log_response(self, response: GenerateContentResponse):
self._maybe_log_response_stats(response)
self._maybe_log_response_safety_ratings(response)
if not response.candidates:
return
candidate_in_response_index = 0
for candidate in response.candidates:
self._maybe_log_response_candidate(
candidate,
flat_candidate_index=self._candidate_index,
candidate_in_response_index=candidate_in_response_index,
response_index=self._response_index,
)
self._candidate_index += 1
candidate_in_response_index += 1
def _maybe_log_response_candidate(
self,
candidate: Candidate,
flat_candidate_index: int,
candidate_in_response_index: int,
response_index: int,
):
# TODO: Determine if there might be a way to report the
# response index and candidate response index.
attributes = {
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
}
# TODO: determine if "role" should be reported here or not and, if so,
# what the value ought to be.
#
# TODO: extract tool information into a separate tool message.
#
# TODO: determine if/when we need to emit a 'gen_ai.assistant.message' event.
#
# TODO: determine how to report other relevant details in the candidate that
# are not presently captured by Semantic Conventions. For example, the
# "citation_metadata", "grounding_metadata", "logprobs_result", etc.
#
# See also: "TODOS.md"
body = {
"index": flat_candidate_index,
}
if self._content_recording_enabled:
if candidate.content:
body["content"] = _to_dict(candidate.content)
else:
body["content"] = _CONTENT_ELIDED
if candidate.finish_reason is not None:
body["finish_reason"] = candidate.finish_reason.name
self._otel_wrapper.log_response_content(
attributes=attributes,
body=body,
)
def _record_token_usage_metric(self):
self._otel_wrapper.token_usage_metric.record(
self._input_tokens,
attributes={
gen_ai_attributes.GEN_AI_TOKEN_TYPE: "input",
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
gen_ai_attributes.GEN_AI_REQUEST_MODEL: self._genai_request_model,
gen_ai_attributes.GEN_AI_OPERATION_NAME: _GENERATE_CONTENT_OP_NAME,
},
)
self._otel_wrapper.token_usage_metric.record(
self._output_tokens,
attributes={
gen_ai_attributes.GEN_AI_TOKEN_TYPE: "output",
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
gen_ai_attributes.GEN_AI_REQUEST_MODEL: self._genai_request_model,
gen_ai_attributes.GEN_AI_OPERATION_NAME: _GENERATE_CONTENT_OP_NAME,
},
)
def _record_duration_metric(self):
attributes = {
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
gen_ai_attributes.GEN_AI_REQUEST_MODEL: self._genai_request_model,
gen_ai_attributes.GEN_AI_OPERATION_NAME: _GENERATE_CONTENT_OP_NAME,
}
if self._error_type is not None:
attributes[error_attributes.ERROR_TYPE] = self._error_type
duration_nanos = time.time_ns() - self._start_time
duration_seconds = duration_nanos / 1e9
self._otel_wrapper.operation_duration_metric.record(
duration_seconds,
attributes=attributes,
)
def _create_instrumented_generate_content(
snapshot: _MethodsSnapshot,
otel_wrapper: OTelWrapper,
generate_content_config_key_allowlist: Optional[AllowList] = None,
):
wrapped_func = snapshot.generate_content
@functools.wraps(wrapped_func)
def instrumented_generate_content(
self: Models,
*,
model: str,
contents: Union[ContentListUnion, ContentListUnionDict],
config: Optional[GenerateContentConfigOrDict] = None,
**kwargs: Any,
) -> GenerateContentResponse:
helper = _GenerateContentInstrumentationHelper(
self,
otel_wrapper,
model,
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
)
with helper.start_span_as_current_span(
model, "google.genai.Models.generate_content"
):
helper.process_request(contents, config)
try:
response = wrapped_func(
self,
model=model,
contents=contents,
config=helper.wrapped_config(config),
**kwargs,
)
helper.process_response(response)
return response
except Exception as error:
helper.process_error(error)
raise
finally:
helper.finalize_processing()
return instrumented_generate_content
def _create_instrumented_generate_content_stream(
snapshot: _MethodsSnapshot,
otel_wrapper: OTelWrapper,
generate_content_config_key_allowlist: Optional[AllowList] = None,
):
wrapped_func = snapshot.generate_content_stream
@functools.wraps(wrapped_func)
def instrumented_generate_content_stream(
self: Models,
*,
model: str,
contents: Union[ContentListUnion, ContentListUnionDict],
config: Optional[GenerateContentConfigOrDict] = None,
**kwargs: Any,
) -> Iterator[GenerateContentResponse]:
helper = _GenerateContentInstrumentationHelper(
self,
otel_wrapper,
model,
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
)
with helper.start_span_as_current_span(
model, "google.genai.Models.generate_content_stream"
):
helper.process_request(contents, config)
try:
for response in wrapped_func(
self,
model=model,
contents=contents,
config=helper.wrapped_config(config),
**kwargs,
):
helper.process_response(response)
yield response
except Exception as error:
helper.process_error(error)
raise
finally:
helper.finalize_processing()
return instrumented_generate_content_stream
def _create_instrumented_async_generate_content(
snapshot: _MethodsSnapshot,
otel_wrapper: OTelWrapper,
generate_content_config_key_allowlist: Optional[AllowList] = None,
):
wrapped_func = snapshot.async_generate_content
@functools.wraps(wrapped_func)
async def instrumented_generate_content(
self: AsyncModels,
*,
model: str,
contents: Union[ContentListUnion, ContentListUnionDict],
config: Optional[GenerateContentConfigOrDict] = None,
**kwargs: Any,
) -> GenerateContentResponse:
helper = _GenerateContentInstrumentationHelper(
self,
otel_wrapper,
model,
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
)
with helper.start_span_as_current_span(
model, "google.genai.AsyncModels.generate_content"
):
helper.process_request(contents, config)
try:
response = await wrapped_func(
self,
model=model,
contents=contents,
config=helper.wrapped_config(config),
**kwargs,
)
helper.process_response(response)
return response
except Exception as error:
helper.process_error(error)
raise
finally:
helper.finalize_processing()
return instrumented_generate_content
# Disabling type checking because this is not yet implemented and tested fully.
def _create_instrumented_async_generate_content_stream( # type: ignore
snapshot: _MethodsSnapshot,
otel_wrapper: OTelWrapper,
generate_content_config_key_allowlist: Optional[AllowList] = None,
):
wrapped_func = snapshot.async_generate_content_stream
@functools.wraps(wrapped_func)
async def instrumented_generate_content_stream(
self: AsyncModels,
*,
model: str,
contents: Union[ContentListUnion, ContentListUnionDict],
config: Optional[GenerateContentConfigOrDict] = None,
**kwargs: Any,
) -> Awaitable[AsyncIterator[GenerateContentResponse]]: # type: ignore
helper = _GenerateContentInstrumentationHelper(
self,
otel_wrapper,
model,
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
)
with helper.start_span_as_current_span(
model,
"google.genai.AsyncModels.generate_content_stream",
end_on_exit=False,
) as span:
helper.process_request(contents, config)
try:
response_async_generator = await wrapped_func(
self,
model=model,
contents=contents,
config=helper.wrapped_config(config),
**kwargs,
)
except Exception as error: # pylint: disable=broad-exception-caught
helper.process_error(error)
helper.finalize_processing()
with trace.use_span(span, end_on_exit=True):
raise
async def _response_async_generator_wrapper():
with trace.use_span(span, end_on_exit=True):
try:
async for response in response_async_generator:
helper.process_response(response)
yield response
except Exception as error:
helper.process_error(error)
raise
finally:
helper.finalize_processing()
return _response_async_generator_wrapper()
return instrumented_generate_content_stream
def uninstrument_generate_content(snapshot: object):
assert isinstance(snapshot, _MethodsSnapshot)
snapshot.restore()
def instrument_generate_content(
otel_wrapper: OTelWrapper,
generate_content_config_key_allowlist: Optional[AllowList] = None,
) -> object:
snapshot = _MethodsSnapshot()
Models.generate_content = _create_instrumented_generate_content(
snapshot,
otel_wrapper,
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
)
Models.generate_content_stream = _create_instrumented_generate_content_stream(
snapshot,
otel_wrapper,
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
)
AsyncModels.generate_content = _create_instrumented_async_generate_content(
snapshot,
otel_wrapper,
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
)
AsyncModels.generate_content_stream = _create_instrumented_async_generate_content_stream(
snapshot,
otel_wrapper,
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
)
return snapshot

View File

@ -1,67 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Collection, Optional
from opentelemetry._events import get_event_logger_provider
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
from opentelemetry.metrics import get_meter_provider
from opentelemetry.trace import get_tracer_provider
from .allowlist_util import AllowList
from .generate_content import (
instrument_generate_content,
uninstrument_generate_content,
)
from .otel_wrapper import OTelWrapper
class GoogleGenAiSdkInstrumentor(BaseInstrumentor):
def __init__(
self, generate_content_config_key_allowlist: Optional[AllowList] = None
):
self._generate_content_snapshot = None
self._generate_content_config_key_allowlist = (
generate_content_config_key_allowlist
or AllowList.from_env(
"OTEL_GOOGLE_GENAI_GENERATE_CONTENT_CONFIG_INCLUDES",
excludes_env_var="OTEL_GOOGLE_GENAI_GENERATE_CONTENT_CONFIG_EXCLUDES",
)
)
# Inherited, abstract function from 'BaseInstrumentor'. Even though 'self' is
# not used in the definition, a method is required per the API contract.
def instrumentation_dependencies(self) -> Collection[str]: # pylint: disable=no-self-use
return ["google-genai>=1.0.0,<2"]
def _instrument(self, **kwargs: Any):
tracer_provider = (
kwargs.get("tracer_provider") or get_tracer_provider()
)
event_logger_provider = (
kwargs.get("event_logger_provider") or get_event_logger_provider()
)
meter_provider = kwargs.get("meter_provider") or get_meter_provider()
otel_wrapper = OTelWrapper.from_providers(
tracer_provider=tracer_provider,
event_logger_provider=event_logger_provider,
meter_provider=meter_provider,
)
self._generate_content_snapshot = instrument_generate_content(
otel_wrapper,
generate_content_config_key_allowlist=self._generate_content_config_key_allowlist,
)
def _uninstrument(self, **kwargs: Any):
uninstrument_generate_content(self._generate_content_snapshot)

View File

@ -1,92 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import google.genai
from opentelemetry._events import Event
from opentelemetry.semconv._incubating.metrics import gen_ai_metrics
from opentelemetry.semconv.schemas import Schemas
from .version import __version__ as _LIBRARY_VERSION
_logger = logging.getLogger(__name__)
_SCOPE_NAME = "opentelemetry.instrumentation.google_genai"
_PYPI_PACKAGE_NAME = "opentelemetry-instrumentation-google-genai"
_SCHEMA_URL = Schemas.V1_30_0.value
_SCOPE_ATTRIBUTES = {
"gcp.client.name": "google.genai",
"gcp.client.repo": "googleapis/python-genai",
"gcp.client.version": google.genai.__version__,
"pypi.package.name": _PYPI_PACKAGE_NAME,
}
class OTelWrapper:
def __init__(self, tracer, event_logger, meter):
self._tracer = tracer
self._event_logger = event_logger
self._meter = meter
self._operation_duration_metric = (
gen_ai_metrics.create_gen_ai_client_operation_duration(meter)
)
self._token_usage_metric = (
gen_ai_metrics.create_gen_ai_client_token_usage(meter)
)
@staticmethod
def from_providers(tracer_provider, event_logger_provider, meter_provider):
return OTelWrapper(
tracer_provider.get_tracer(
_SCOPE_NAME, _LIBRARY_VERSION, _SCHEMA_URL, _SCOPE_ATTRIBUTES
),
event_logger_provider.get_event_logger(
_SCOPE_NAME, _LIBRARY_VERSION, _SCHEMA_URL, _SCOPE_ATTRIBUTES
),
meter=meter_provider.get_meter(
_SCOPE_NAME, _LIBRARY_VERSION, _SCHEMA_URL, _SCOPE_ATTRIBUTES
),
)
def start_as_current_span(self, *args, **kwargs):
return self._tracer.start_as_current_span(*args, **kwargs)
@property
def operation_duration_metric(self):
return self._operation_duration_metric
@property
def token_usage_metric(self):
return self._token_usage_metric
def log_system_prompt(self, attributes, body):
_logger.debug("Recording system prompt.")
event_name = "gen_ai.system.message"
self._log_event(event_name, attributes, body)
def log_user_prompt(self, attributes, body):
_logger.debug("Recording user prompt.")
event_name = "gen_ai.user.message"
self._log_event(event_name, attributes, body)
def log_response_content(self, attributes, body):
_logger.debug("Recording response.")
event_name = "gen_ai.choice"
self._log_event(event_name, attributes, body)
def _log_event(self, event_name, attributes, body):
event = Event(event_name, body=body, attributes=attributes)
self._event_logger.emit(event)

View File

@ -1,15 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_instruments = ("google-genai >= 1.0.0",)

View File

@ -1,220 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import inspect
import json
from typing import Any, Callable, Optional, Union
from google.genai.types import (
ToolListUnion,
ToolListUnionDict,
ToolOrDict,
)
from opentelemetry import trace
from opentelemetry.semconv._incubating.attributes import (
code_attributes,
)
from .flags import is_content_recording_enabled
from .otel_wrapper import OTelWrapper
ToolFunction = Callable[..., Any]
def _is_primitive(value):
return isinstance(value, (str, int, bool, float))
def _to_otel_value(python_value):
"""Coerces parameters to something representable with Open Telemetry."""
if python_value is None or _is_primitive(python_value):
return python_value
if isinstance(python_value, list):
return [_to_otel_value(x) for x in python_value]
if isinstance(python_value, dict):
return {
key: _to_otel_value(val) for (key, val) in python_value.items()
}
if hasattr(python_value, "model_dump"):
return python_value.model_dump()
if hasattr(python_value, "__dict__"):
return _to_otel_value(python_value.__dict__)
return repr(python_value)
def _is_homogenous_primitive_list(value):
if not isinstance(value, list):
return False
if not value:
return True
if not _is_primitive(value[0]):
return False
first_type = type(value[0])
for entry in value[1:]:
if not isinstance(entry, first_type):
return False
return True
def _to_otel_attribute(python_value):
otel_value = _to_otel_value(python_value)
if _is_primitive(otel_value) or _is_homogenous_primitive_list(otel_value):
return otel_value
return json.dumps(otel_value)
def _create_function_span_name(wrapped_function):
"""Constructs the span name for a given local function tool call."""
function_name = wrapped_function.__name__
return f"execute_tool {function_name}"
def _create_function_span_attributes(
wrapped_function, function_args, function_kwargs, extra_span_attributes
):
"""Creates the attributes for a tool call function span."""
result = {}
if extra_span_attributes:
result.update(extra_span_attributes)
result["gen_ai.operation.name"] = "execute_tool"
result["gen_ai.tool.name"] = wrapped_function.__name__
if wrapped_function.__doc__:
result["gen_ai.tool.description"] = wrapped_function.__doc__
result[code_attributes.CODE_FUNCTION_NAME] = wrapped_function.__name__
result["code.module"] = wrapped_function.__module__
result["code.args.positional.count"] = len(function_args)
result["code.args.keyword.count"] = len(function_kwargs)
return result
def _record_function_call_argument(
span, param_name, param_value, include_values
):
attribute_prefix = f"code.function.parameters.{param_name}"
type_attribute = f"{attribute_prefix}.type"
span.set_attribute(type_attribute, type(param_value).__name__)
if include_values:
value_attribute = f"{attribute_prefix}.value"
span.set_attribute(value_attribute, _to_otel_attribute(param_value))
def _record_function_call_arguments(
otel_wrapper, wrapped_function, function_args, function_kwargs
):
"""Records the details about a function invocation as span attributes."""
include_values = is_content_recording_enabled()
span = trace.get_current_span()
signature = inspect.signature(wrapped_function)
params = list(signature.parameters.values())
for index, entry in enumerate(function_args):
param_name = f"args[{index}]"
if index < len(params):
param_name = params[index].name
_record_function_call_argument(span, param_name, entry, include_values)
for key, value in function_kwargs.items():
_record_function_call_argument(span, key, value, include_values)
def _record_function_call_result(otel_wrapper, wrapped_function, result):
"""Records the details about a function result as span attributes."""
include_values = is_content_recording_enabled()
span = trace.get_current_span()
span.set_attribute("code.function.return.type", type(result).__name__)
if include_values:
span.set_attribute(
"code.function.return.value", _to_otel_attribute(result)
)
def _wrap_sync_tool_function(
tool_function: ToolFunction,
otel_wrapper: OTelWrapper,
extra_span_attributes: Optional[dict[str, str]] = None,
**unused_kwargs,
):
@functools.wraps(tool_function)
def wrapped_function(*args, **kwargs):
span_name = _create_function_span_name(tool_function)
attributes = _create_function_span_attributes(
tool_function, args, kwargs, extra_span_attributes
)
with otel_wrapper.start_as_current_span(
span_name, attributes=attributes
):
_record_function_call_arguments(
otel_wrapper, tool_function, args, kwargs
)
result = tool_function(*args, **kwargs)
_record_function_call_result(otel_wrapper, tool_function, result)
return result
return wrapped_function
def _wrap_async_tool_function(
tool_function: ToolFunction,
otel_wrapper: OTelWrapper,
extra_span_attributes: Optional[dict[str, str]] = None,
**unused_kwargs,
):
@functools.wraps(tool_function)
async def wrapped_function(*args, **kwargs):
span_name = _create_function_span_name(tool_function)
attributes = _create_function_span_attributes(
tool_function, args, kwargs, extra_span_attributes
)
with otel_wrapper.start_as_current_span(
span_name, attributes=attributes
):
_record_function_call_arguments(
otel_wrapper, tool_function, args, kwargs
)
result = await tool_function(*args, **kwargs)
_record_function_call_result(otel_wrapper, tool_function, result)
return result
return wrapped_function
def _wrap_tool_function(
tool_function: ToolFunction, otel_wrapper: OTelWrapper, **kwargs
):
if inspect.iscoroutinefunction(tool_function):
return _wrap_async_tool_function(tool_function, otel_wrapper, **kwargs)
return _wrap_sync_tool_function(tool_function, otel_wrapper, **kwargs)
def wrapped(
tool_or_tools: Optional[
Union[ToolFunction, ToolOrDict, ToolListUnion, ToolListUnionDict]
],
otel_wrapper: OTelWrapper,
**kwargs,
):
if tool_or_tools is None:
return None
if isinstance(tool_or_tools, list):
return [
wrapped(item, otel_wrapper, **kwargs) for item in tool_or_tools
]
if isinstance(tool_or_tools, dict):
return {
key: wrapped(value, otel_wrapper, **kwargs)
for (key, value) in tool_or_tools.items()
}
if callable(tool_or_tools):
return _wrap_tool_function(tool_or_tools, otel_wrapper, **kwargs)
return tool_or_tools

View File

@ -1,20 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# **IMPORTANT**:
#
# This version should stay below "1.0" until the fundamentals
# in "TODOS.md" have been addressed. Please revisit the TODOs
# listed there before bumping to a stable version.
__version__ = "0.4b0.dev"

View File

@ -1,84 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import google.genai
from .auth import FakeCredentials
from .instrumentation_context import InstrumentationContext
from .otel_mocker import OTelMocker
class TestCase(unittest.TestCase):
def setUp(self):
self._otel = OTelMocker()
self._otel.install()
self._instrumentation_context = None
self._api_key = "test-api-key"
self._project = "test-project"
self._location = "test-location"
self._client = None
self._uses_vertex = False
self._credentials = FakeCredentials()
self._instrumentor_args = {}
def _lazy_init(self):
self._instrumentation_context = InstrumentationContext(
**self._instrumentor_args
)
self._instrumentation_context.install()
def set_instrumentor_constructor_kwarg(self, key, value):
self._instrumentor_args[key] = value
@property
def client(self):
if self._client is None:
self._client = self._create_client()
return self._client
@property
def otel(self):
return self._otel
def set_use_vertex(self, use_vertex):
self._uses_vertex = use_vertex
def reset_client(self):
self._client = None
def reset_instrumentation(self):
if self._instrumentation_context is None:
return
self._instrumentation_context.uninstall()
self._instrumentation_context = None
def _create_client(self):
self._lazy_init()
if self._uses_vertex:
os.environ["GOOGLE_API_KEY"] = self._api_key
return google.genai.Client(
vertexai=True,
project=self._project,
location=self._location,
credentials=self._credentials,
)
return google.genai.Client(vertexai=False, api_key=self._api_key)
def tearDown(self):
if self._instrumentation_context is not None:
self._instrumentation_context.uninstall()
self._otel.uninstall()

View File

@ -1,28 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from opentelemetry.instrumentation.google_genai import (
GoogleGenAiSdkInstrumentor,
)
class InstrumentationContext:
def __init__(self, **kwargs):
self._instrumentor = GoogleGenAiSdkInstrumentor(**kwargs)
def install(self):
self._instrumentor.instrument()
def uninstall(self):
self._instrumentor.uninstrument()

View File

@ -1,232 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import opentelemetry._events
import opentelemetry._logs._internal
import opentelemetry.metrics._internal
import opentelemetry.trace
from opentelemetry._events import (
get_event_logger_provider,
set_event_logger_provider,
)
from opentelemetry._logs import get_logger_provider, set_logger_provider
from opentelemetry.metrics import get_meter_provider, set_meter_provider
from opentelemetry.sdk._events import EventLoggerProvider
from opentelemetry.sdk._logs import LoggerProvider
from opentelemetry.sdk._logs.export import (
InMemoryLogExporter,
SimpleLogRecordProcessor,
)
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics._internal.export import InMemoryMetricReader
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
from opentelemetry.sdk.trace.export.in_memory_span_exporter import (
InMemorySpanExporter,
)
from opentelemetry.trace import get_tracer_provider, set_tracer_provider
from opentelemetry.util._once import Once
def _bypass_otel_once():
opentelemetry.trace._TRACER_PROVIDER_SET_ONCE = Once()
opentelemetry._logs._internal._LOGGER_PROVIDER_SET_ONCE = Once()
opentelemetry._events._EVENT_LOGGER_PROVIDER_SET_ONCE = Once()
opentelemetry.metrics._internal._METER_PROVIDER_SET_ONCE = Once()
class OTelProviderSnapshot:
def __init__(self):
self._tracer_provider = get_tracer_provider()
self._logger_provider = get_logger_provider()
self._event_logger_provider = get_event_logger_provider()
self._meter_provider = get_meter_provider()
def restore(self):
_bypass_otel_once()
set_tracer_provider(self._tracer_provider)
set_logger_provider(self._logger_provider)
set_event_logger_provider(self._event_logger_provider)
set_meter_provider(self._meter_provider)
class _LogWrapper:
def __init__(self, log_data):
self._log_data = log_data
@property
def scope(self):
return self._log_data.instrumentation_scope
@property
def resource(self):
return self._log_data.log_record.resource
@property
def attributes(self):
return self._log_data.log_record.attributes
@property
def body(self):
return self._log_data.log_record.body
def __str__(self):
return self._log_data.log_record.to_json()
class _MetricDataPointWrapper:
def __init__(self, resource, scope, metric):
self._resource = resource
self._scope = scope
self._metric = metric
@property
def resource(self):
return self._resource
@property
def scope(self):
return self._scope
@property
def metric(self):
return self._metric
@property
def name(self):
return self._metric.name
@property
def data(self):
return self._metric.data
class OTelMocker:
def __init__(self):
self._snapshot = None
self._logs = InMemoryLogExporter()
self._traces = InMemorySpanExporter()
self._metrics = InMemoryMetricReader()
self._spans = []
self._finished_logs = []
self._metrics_data = []
def install(self):
self._snapshot = OTelProviderSnapshot()
_bypass_otel_once()
self._install_logs()
self._install_metrics()
self._install_traces()
def uninstall(self):
self._snapshot.restore()
def get_finished_logs(self):
for log_data in self._logs.get_finished_logs():
self._finished_logs.append(_LogWrapper(log_data))
return self._finished_logs
def get_finished_spans(self):
for span in self._traces.get_finished_spans():
self._spans.append(span)
return self._spans
def get_metrics_data(self):
data = self._metrics.get_metrics_data()
if data is not None:
for resource_metric in data.resource_metrics:
resource = resource_metric.resource
for scope_metrics in resource_metric.scope_metrics:
scope = scope_metrics.scope
for metric in scope_metrics.metrics:
wrapper = _MetricDataPointWrapper(
resource, scope, metric
)
self._metrics_data.append(wrapper)
return self._metrics_data
def get_span_named(self, name):
for span in self.get_finished_spans():
if span.name == name:
return span
return None
def assert_has_span_named(self, name):
span = self.get_span_named(name)
finished_spans = [span.name for span in self.get_finished_spans()]
assert (
span is not None
), f'Could not find span named "{name}"; finished spans: {finished_spans}'
def assert_does_not_have_span_named(self, name):
span = self.get_span_named(name)
assert span is None, f"Found unexpected span named {name}"
def get_event_named(self, event_name):
for event in self.get_finished_logs():
event_name_attr = event.attributes.get("event.name")
if event_name_attr is None:
continue
if event_name_attr == event_name:
return event
return None
def get_events_named(self, event_name):
result = []
for event in self.get_finished_logs():
event_name_attr = event.attributes.get("event.name")
if event_name_attr is None:
continue
if event_name_attr == event_name:
result.append(event)
return result
def assert_has_event_named(self, name):
event = self.get_event_named(name)
finished_logs = self.get_finished_logs()
assert (
event is not None
), f'Could not find event named "{name}"; finished logs: {finished_logs}'
def assert_does_not_have_event_named(self, name):
event = self.get_event_named(name)
assert event is None, f"Unexpected event: {event}"
def get_metrics_data_named(self, name):
results = []
for entry in self.get_metrics_data():
if entry.name == name:
results.append(entry)
return results
def assert_has_metrics_data_named(self, name):
data = self.get_metrics_data_named(name)
assert len(data) > 0
def _install_logs(self):
provider = LoggerProvider()
provider.add_log_record_processor(SimpleLogRecordProcessor(self._logs))
set_logger_provider(provider)
event_provider = EventLoggerProvider(logger_provider=provider)
set_event_logger_provider(event_provider)
def _install_metrics(self):
provider = MeterProvider(metric_readers=[self._metrics])
set_meter_provider(provider)
def _install_traces(self):
provider = TracerProvider()
provider.add_span_processor(SimpleSpanProcessor(self._traces))
set_tracer_provider(provider)

View File

@ -1,163 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import unittest.mock
from google.genai.models import AsyncModels, Models
from ..common.base import TestCase as CommonTestCaseBase
from .util import convert_to_response, create_response
# Helper used in "_install_mocks" below.
def _wrap_output(mock_generate_content):
def _wrapped(*args, **kwargs):
return convert_to_response(mock_generate_content(*args, **kwargs))
return _wrapped
# Helper used in "_install_mocks" below.
def _wrap_output_stream(mock_generate_content_stream):
def _wrapped(*args, **kwargs):
for output in mock_generate_content_stream(*args, **kwargs):
yield convert_to_response(output)
return _wrapped
# Helper used in "_install_mocks" below.
def _async_wrapper(mock_generate_content):
async def _wrapped(*args, **kwargs):
return mock_generate_content(*args, **kwargs)
return _wrapped
# Helper used in "_install_mocks" below.
def _async_stream_wrapper(mock_generate_content_stream):
async def _wrapped(*args, **kwargs):
async def _internal_generator():
for result in mock_generate_content_stream(*args, **kwargs):
yield result
return _internal_generator()
return _wrapped
class TestCase(CommonTestCaseBase):
# The "setUp" function is defined by "unittest.TestCase" and thus
# this name must be used. Uncertain why pylint doesn't seem to
# recognize that this is a unit test class for which this is inherited.
def setUp(self): # pylint: disable=invalid-name
super().setUp()
if self.__class__ == TestCase:
raise unittest.SkipTest("Skipping testcase base.")
self._generate_content_mock = None
self._generate_content_stream_mock = None
self._original_generate_content = Models.generate_content
self._original_generate_content_stream = Models.generate_content_stream
self._original_async_generate_content = AsyncModels.generate_content
self._original_async_generate_content_stream = (
AsyncModels.generate_content_stream
)
self._responses = []
self._response_index = 0
@property
def mock_generate_content(self):
if self._generate_content_mock is None:
self._create_and_install_mocks()
return self._generate_content_mock
@property
def mock_generate_content_stream(self):
if self._generate_content_stream_mock is None:
self._create_and_install_mocks()
return self._generate_content_stream_mock
def configure_valid_response(self, **kwargs):
self._create_and_install_mocks()
response = create_response(**kwargs)
self._responses.append(response)
def _create_and_install_mocks(self):
if self._generate_content_mock is not None:
return
self.reset_client()
self.reset_instrumentation()
self._generate_content_mock = self._create_nonstream_mock()
self._generate_content_stream_mock = self._create_stream_mock()
self._install_mocks()
def _create_nonstream_mock(self):
mock = unittest.mock.MagicMock()
def _default_impl(*args, **kwargs):
if not self._responses:
return create_response(text="Some response")
index = self._response_index % len(self._responses)
result = self._responses[index]
self._response_index += 1
return result
mock.side_effect = _default_impl
return mock
def _create_stream_mock(self):
mock = unittest.mock.MagicMock()
def _default_impl(*args, **kwargs):
for response in self._responses:
yield response
mock.side_effect = _default_impl
return mock
def _install_mocks(self):
output_wrapped = _wrap_output(self._generate_content_mock)
output_wrapped_stream = _wrap_output_stream(
self._generate_content_stream_mock
)
Models.generate_content = output_wrapped
Models.generate_content_stream = output_wrapped_stream
AsyncModels.generate_content = _async_wrapper(output_wrapped)
AsyncModels.generate_content_stream = _async_stream_wrapper(
output_wrapped_stream
)
def tearDown(self):
super().tearDown()
if self._generate_content_mock is None:
assert Models.generate_content == self._original_generate_content
assert (
Models.generate_content_stream
== self._original_generate_content_stream
)
assert (
AsyncModels.generate_content
== self._original_async_generate_content
)
assert (
AsyncModels.generate_content_stream
== self._original_async_generate_content_stream
)
Models.generate_content = self._original_generate_content
Models.generate_content_stream = self._original_generate_content_stream
AsyncModels.generate_content = self._original_async_generate_content
AsyncModels.generate_content_stream = (
self._original_async_generate_content_stream
)

View File

@ -1,94 +0,0 @@
interactions:
- request:
body: |-
{
"contents": [
{
"parts": [
{
"text": "Create a poem about Open Telemetry."
}
],
"role": "user"
}
]
}
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '92'
Content-Type:
- application/json
user-agent:
- google-genai-sdk/1.0.0 gl-python/3.12.8
x-goog-api-client:
- <REDACTED>
x-goog-user-project:
- <REDACTED>
method: POST
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent
response:
body:
string: |-
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": "No more dark, inscrutable ways,\nTo trace a request through hazy days.\nOpen Telemetry, a beacon bright,\nIlluminates the path, both day and night.\n\nFrom metrics gathered, a clear display,\nOf latency's dance, and errors' sway.\nTraces unwind, a silken thread,\nShowing the journey, from start to head.\n\nLogs interweave, a richer hue,\nContextual clues, for me and you.\nNo vendor lock-in, a freedom's call,\nTo choose your tools, to stand up tall.\n\nExporters aplenty, a varied choice,\nTo send your data, amplify your voice.\nJaeger, Zipkin, Prometheus' might,\nAll integrate, a glorious sight.\n\nWith spans and attributes, a detailed scene,\nOf how your system works, both sleek and keen.\nPerformance bottlenecks, now laid bare,\nOpen Telemetry, beyond compare.\n\nSo embrace the light, let darkness flee,\nWith Open Telemetry, set your systems free.\nObserve, and learn, and optimize with grace,\nA brighter future, in this digital space.\n"
}
]
},
"finishReason": "STOP",
"avgLogprobs": -0.3303731600443522
}
],
"usageMetadata": {
"promptTokenCount": 8,
"candidatesTokenCount": 240,
"totalTokenCount": 248,
"promptTokensDetails": [
{
"modality": "TEXT",
"tokenCount": 8
}
],
"candidatesTokensDetails": [
{
"modality": "TEXT",
"tokenCount": 240
}
]
},
"modelVersion": "gemini-1.5-flash-002",
"createTime": "2025-03-07T22:19:18.083091Z",
"responseId": "5nDLZ5OJBdyY3NoPiZGx0Ag"
}
headers:
Content-Encoding:
- gzip
Content-Type:
- application/json; charset=UTF-8
Transfer-Encoding:
- chunked
Vary:
- Origin
- X-Origin
- Referer
X-Content-Type-Options:
- nosniff
X-Frame-Options:
- SAMEORIGIN
X-XSS-Protection:
- '0'
status:
code: 200
message: OK
version: 1

View File

@ -1,94 +0,0 @@
interactions:
- request:
body: |-
{
"contents": [
{
"parts": [
{
"text": "Create a poem about Open Telemetry."
}
],
"role": "user"
}
]
}
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '92'
Content-Type:
- application/json
user-agent:
- google-genai-sdk/1.0.0 gl-python/3.12.8
x-goog-api-client:
- <REDACTED>
x-goog-user-project:
- <REDACTED>
method: POST
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent
response:
body:
string: |-
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": "No more dark logs, a cryptic, hidden trace,\nOf failing systems, lost in time and space.\nOpenTelemetry, a beacon shining bright,\nIlluminating paths, both dark and light.\n\nFrom microservices, a sprawling, tangled mesh,\nTo monolithic beasts, put to the test,\nIt gathers traces, spans, and metrics too,\nA holistic view, for me and you.\n\nWith signals clear, from every single node,\nPerformance bottlenecks, instantly bestowed.\nDistributed tracing, paints a vivid scene,\nWhere latency lurks, and slowdowns intervene.\n\nExporters rise, to send the data forth,\nTo dashboards grand, of proven, measured worth.\nPrometheus, Grafana, Jaeger, fluent streams,\nVisualizing insights, fulfilling data dreams.\n\nFrom Jaeger's diagrams, a branching, flowing art,\nTo Grafana's charts, that play a vital part,\nThe mysteries unravel, hidden deep inside,\nWhere errors slumber, and slow responses hide.\n\nSo hail OpenTelemetry, a gift to all who code,\nA brighter future, on a well-lit road.\nNo more guesswork, no more fruitless chase,\nJust clear observability, in time and space.\n"
}
]
},
"finishReason": "STOP",
"avgLogprobs": -0.45532724261283875
}
],
"usageMetadata": {
"promptTokenCount": 8,
"candidatesTokenCount": 256,
"totalTokenCount": 264,
"promptTokensDetails": [
{
"modality": "TEXT",
"tokenCount": 8
}
],
"candidatesTokensDetails": [
{
"modality": "TEXT",
"tokenCount": 256
}
]
},
"modelVersion": "gemini-1.5-flash-002",
"createTime": "2025-03-07T22:19:15.268428Z",
"responseId": "43DLZ4yxEM6F3NoPzaTkiQU"
}
headers:
Content-Encoding:
- gzip
Content-Type:
- application/json; charset=UTF-8
Transfer-Encoding:
- chunked
Vary:
- Origin
- X-Origin
- Referer
X-Content-Type-Options:
- nosniff
X-Frame-Options:
- SAMEORIGIN
X-XSS-Protection:
- '0'
status:
code: 200
message: OK
version: 1

View File

@ -1,94 +0,0 @@
interactions:
- request:
body: |-
{
"contents": [
{
"parts": [
{
"text": "Create a poem about Open Telemetry."
}
],
"role": "user"
}
]
}
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '92'
Content-Type:
- application/json
user-agent:
- google-genai-sdk/1.0.0 gl-python/3.12.8
x-goog-api-client:
- <REDACTED>
x-goog-user-project:
- <REDACTED>
method: POST
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent
response:
body:
string: |-
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": "No more dark, mysterious traces,\nNo more guessing, in empty spaces.\nOpenTelemetry's light now shines,\nIlluminating all our designs.\n\nFrom microservices, small and fleet,\nTo monolithic beasts, hard to beat,\nIt weaves a net, both fine and strong,\nWhere metrics flow, where logs belong.\n\nTraces dance, a vibrant hue,\nShowing journeys, old and new.\nSpans unfold, a story told,\nOf requests handled, brave and bold.\n\nMetrics hum, a steady beat,\nLatency, errors, can't be beat.\nDistribution charts, a clear display,\nGuiding us along the way.\n\nLogs provide a detailed view,\nOf what happened, me and you.\nContext rich, with helpful clues,\nDebugging woes, it quickly subdues.\n\nWith exporters wise, a thoughtful choice,\nTo Prometheus, Jaeger, or Zipkin's voice,\nOur data flows, a precious stream,\nReal-time insights, a waking dream.\n\nSo hail to OpenTelemetry's might,\nBringing clarity to our darkest night.\nObservability's champion, bold and true,\nA brighter future, for me and you.\n"
}
]
},
"finishReason": "STOP",
"avgLogprobs": -0.4071464086238575
}
],
"usageMetadata": {
"promptTokenCount": 8,
"candidatesTokenCount": 253,
"totalTokenCount": 261,
"promptTokensDetails": [
{
"modality": "TEXT",
"tokenCount": 8
}
],
"candidatesTokensDetails": [
{
"modality": "TEXT",
"tokenCount": 253
}
]
},
"modelVersion": "gemini-1.5-flash-002",
"createTime": "2025-03-07T22:19:12.443989Z",
"responseId": "4HDLZ9WMG6SK698Pr5uZ2Qw"
}
headers:
Content-Encoding:
- gzip
Content-Type:
- application/json; charset=UTF-8
Transfer-Encoding:
- chunked
Vary:
- Origin
- X-Origin
- Referer
X-Content-Type-Options:
- nosniff
X-Frame-Options:
- SAMEORIGIN
X-XSS-Protection:
- '0'
status:
code: 200
message: OK
version: 1

View File

@ -1,94 +0,0 @@
interactions:
- request:
body: |-
{
"contents": [
{
"parts": [
{
"text": "Create a poem about Open Telemetry."
}
],
"role": "user"
}
]
}
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '92'
Content-Type:
- application/json
user-agent:
- google-genai-sdk/1.0.0 gl-python/3.12.8
x-goog-api-client:
- <REDACTED>
x-goog-user-project:
- <REDACTED>
method: POST
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent
response:
body:
string: |-
{
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": "No more dark, mysterious traces,\nOf failing systems, hidden spaces.\nOpen Telemetry's light shines bright,\nGuiding us through the darkest night.\n\nFrom metrics gathered, finely spun,\nTo logs that tell of tasks undone,\nAnd traces linking every call,\nIt answers questions, standing tall.\n\nDistributed systems, complex and vast,\nTheir hidden flaws, no longer cast\nIn shadows deep, beyond our view,\nOpen Telemetry sees them through.\n\nWith spans and attributes, it weaves a tale,\nOf requests flowing, never frail.\nIt pinpoints bottlenecks, slow and grim,\nAnd helps us optimize, system trim.\n\nAcross languages, a common ground,\nWhere data's shared, and insights found.\nExporters whisper, collectors hum,\nA symphony of data, overcome.\n\nSo raise a glass, to this open source,\nA shining beacon, a powerful force.\nOpen Telemetry, a guiding star,\nRevealing secrets, near and far.\n"
}
]
},
"finishReason": "STOP",
"avgLogprobs": -0.3586180628193498
}
],
"usageMetadata": {
"promptTokenCount": 8,
"candidatesTokenCount": 211,
"totalTokenCount": 219,
"promptTokensDetails": [
{
"modality": "TEXT",
"tokenCount": 8
}
],
"candidatesTokensDetails": [
{
"modality": "TEXT",
"tokenCount": 211
}
]
},
"modelVersion": "gemini-1.5-flash-002",
"createTime": "2025-03-07T22:19:09.936326Z",
"responseId": "3XDLZ4aTOZSpnvgPn-e0qQk"
}
headers:
Content-Encoding:
- gzip
Content-Type:
- application/json; charset=UTF-8
Transfer-Encoding:
- chunked
Vary:
- Origin
- X-Origin
- Referer
X-Content-Type-Options:
- nosniff
X-Frame-Options:
- SAMEORIGIN
X-XSS-Protection:
- '0'
status:
code: 200
message: OK
version: 1

View File

@ -1,97 +0,0 @@
interactions:
- request:
body: |-
{
"contents": [
{
"parts": [
{
"text": "Create a poem about Open Telemetry."
}
],
"role": "user"
}
]
}
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '92'
Content-Type:
- application/json
user-agent:
- google-genai-sdk/1.0.0 gl-python/3.12.8
x-goog-api-client:
- <REDACTED>
x-goog-user-project:
- <REDACTED>
method: POST
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse
response:
body:
string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \"No\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\
,\"createTime\": \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \" longer dark, the tracing's light,\\nOpen Telemetry, shining\
\ bright\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\
: \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \".\\nA beacon in the coding night,\\nRevealing paths, both\
\ dark\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"\
2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \" and bright.\\n\\nFrom microservice to sprawling beast,\\\
nIts watchful eye, a silent priest.\\nObserving calls, both small and vast,\\\
nPerformance\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\
: \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \" flaws, revealed at last.\\n\\nWith metrics gleaned and logs\
\ aligned,\\nA clearer picture, you will find.\\nOf latency, and errors dire,\\\
n\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:29.293930Z\"\
,\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"}\r\n\r\ndata: {\"candidates\"\
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"And bottlenecks\
\ that set afire.\\n\\nIt spans the clouds, a network wide,\\nWhere data streams,\
\ a surging tide.\\nCollecting traces, rich and deep,\\nWhile slumbering apps\
\ their secrets keep.\\n\\nJaeger, Zip\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\
,\"createTime\": \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \"kin, the tools it holds,\\nA tapestry of stories told.\\nOf\
\ requests flowing, swift and free,\\nOr tangled knots, for all to see.\\\
n\\nSo embrace the power, understand,\\nThe vital role, across the\"}]}}],\"\
modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:29.293930Z\"\
,\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"}\r\n\r\ndata: {\"candidates\"\
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" land.\\nOpen\
\ Telemetry, a guiding star,\\nTo navigate the digital afar.\\n\"}]},\"finishReason\"\
: \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\"\
: 212,\"totalTokenCount\": 220,\"promptTokensDetails\": [{\"modality\": \"\
TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\": [{\"modality\": \"\
TEXT\",\"tokenCount\": 212}]},\"modelVersion\": \"gemini-1.5-flash-002\",\"\
createTime\": \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\
}\r\n\r\n"
headers:
Content-Disposition:
- attachment
Content-Type:
- text/event-stream
Transfer-Encoding:
- chunked
Vary:
- Origin
- X-Origin
- Referer
X-Content-Type-Options:
- nosniff
X-Frame-Options:
- SAMEORIGIN
X-XSS-Protection:
- '0'
status:
code: 200
message: OK
version: 1

View File

@ -1,102 +0,0 @@
interactions:
- request:
body: |-
{
"contents": [
{
"parts": [
{
"text": "Create a poem about Open Telemetry."
}
],
"role": "user"
}
]
}
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '92'
Content-Type:
- application/json
user-agent:
- google-genai-sdk/1.0.0 gl-python/3.12.8
x-goog-api-client:
- <REDACTED>
x-goog-user-project:
- <REDACTED>
method: POST
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse
response:
body:
string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \"The\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\
,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \" black box whispers, secrets deep,\\nOf failing systems, promises\
\ to keep.\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\
: \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \"\\nBut tracing's light, a guiding hand,\\nReveals the path\"\
}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\
,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \", across the\
\ land.\\n\\nOpen Telemetry, a beacon bright,\\nIlluminating pathways, day\
\ and night.\\nFrom spans and traces, stories told,\"}]}}],\"modelVersion\"\
: \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\
,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"\\nOf requests\
\ flowing, brave and bold.\\n\\nThe metrics rise, a vibrant chart,\\nDisplaying\
\ latency, a work of art.\\nEach request'\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\
,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \"s journey, clearly shown,\\nWhere bottlenecks slumber, seeds\
\ are sown.\\n\\nWith logs appended, context clear,\\nThe root of problems,\
\ drawing near.\\nObservability's embrace, so wide,\\nUnraveling mysteries,\"\
}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\
,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" deep inside.\\\
n\\nFrom simple apps to complex weaves,\\nOpen Telemetry's power achieves,\\\
nA unified vision, strong and true,\\nMonitoring systems, old and new.\\n\\\
nNo vendor lock-in, free to roam,\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\
,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \"\\nAcross the clouds, and find your home.\\nA standard rising,\
\ strong and bold,\\nA future brighter, to behold.\\n\\nSo let the traces\
\ flow and gleam,\\nOpen Telemetry, a vibrant dream.\\nOf healthy systems,\
\ running free,\\nFor all to see, for all to be.\"}]}}],\"modelVersion\":\
\ \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\
,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"\\n\"}]},\"\
finishReason\": \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"\
candidatesTokenCount\": 258,\"totalTokenCount\": 266,\"promptTokensDetails\"\
: [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\"\
: [{\"modality\": \"TEXT\",\"tokenCount\": 258}]},\"modelVersion\": \"gemini-1.5-flash-002\"\
,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\
}\r\n\r\n"
headers:
Content-Disposition:
- attachment
Content-Type:
- text/event-stream
Transfer-Encoding:
- chunked
Vary:
- Origin
- X-Origin
- Referer
X-Content-Type-Options:
- nosniff
X-Frame-Options:
- SAMEORIGIN
X-XSS-Protection:
- '0'
status:
code: 200
message: OK
version: 1

View File

@ -1,99 +0,0 @@
interactions:
- request:
body: |-
{
"contents": [
{
"parts": [
{
"text": "Create a poem about Open Telemetry."
}
],
"role": "user"
}
]
}
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '92'
Content-Type:
- application/json
user-agent:
- google-genai-sdk/1.0.0 gl-python/3.12.8
x-goog-api-client:
- <REDACTED>
x-goog-user-project:
- <REDACTED>
method: POST
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse
response:
body:
string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \"No\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\
,\"createTime\": \"2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \" more dark logs, a cryptic, silent scream,\\nNo more the hunt\
\ for\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"\
2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \" errors, a lost, fading dream.\\nOpen Telemetry, a beacon\
\ in\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"\
2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \" the night,\\nShining forth its data, clear and burning bright.\\\
n\\nFrom traces spanning systems, a flowing, silver thread,\\nMetrics pulse\
\ and measure,\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\
: \"2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \" insights finely spread.\\nLogs enriched with context, a story\
\ they unfold,\\nOf requests and responses, both brave and bold.\\n\\nObservability's\
\ promise\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\
: \"2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \", a future now at hand,\\nWith vendors interoperable, a collaborative\
\ band.\\nNo longer vendor lock-in, a restrictive, iron cage,\\nBut freedom\
\ of selection, turning a new page.\\n\\nFrom microservices humming,\"}]}}],\"\
modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:23.579184Z\"\
,\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" a symphony\
\ of calls,\\nTo monolithic giants, answering their thralls,\\nOpen Telemetry\
\ watches, with keen and watchful eye,\\nDetecting the anomalies, before they\
\ rise and fly.\\n\\nSo let the data flow freely, a\"}]}}],\"modelVersion\"\
: \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:23.579184Z\"\
,\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" river strong\
\ and deep,\\nIts secrets it will whisper, while the systems sleep.\\nOpen\
\ Telemetry's power, a force that we can wield,\\nTo build more stable systems,\
\ in the digital field.\\n\"}]},\"finishReason\": \"STOP\"}],\"usageMetadata\"\
: {\"promptTokenCount\": 8,\"candidatesTokenCount\": 238,\"totalTokenCount\"\
: 246,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"\
candidatesTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 238}]},\"\
modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:23.579184Z\"\
,\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"}\r\n\r\n"
headers:
Content-Disposition:
- attachment
Content-Type:
- text/event-stream
Transfer-Encoding:
- chunked
Vary:
- Origin
- X-Origin
- Referer
X-Content-Type-Options:
- nosniff
X-Frame-Options:
- SAMEORIGIN
X-XSS-Protection:
- '0'
status:
code: 200
message: OK
version: 1

View File

@ -1,99 +0,0 @@
interactions:
- request:
body: |-
{
"contents": [
{
"parts": [
{
"text": "Create a poem about Open Telemetry."
}
],
"role": "user"
}
]
}
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '92'
Content-Type:
- application/json
user-agent:
- google-genai-sdk/1.0.0 gl-python/3.12.8
x-goog-api-client:
- <REDACTED>
x-goog-user-project:
- <REDACTED>
method: POST
uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse
response:
body:
string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \"No\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\
,\"createTime\": \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \" more dark, mysterious traces,\\nNo more guessing, in time\
\ and spaces.\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\
: \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \"\\nOpen Telemetry's light shines bright,\\nIlluminating the\
\ code'\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\":\
\ \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \"s dark night.\\n\\nFrom spans and metrics, a story told,\\\
nOf requests flowing, both brave and bold.\\nTraces weaving, a tapestry grand,\"\
}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:20.770456Z\"\
,\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"}\r\n\r\ndata: {\"candidates\"\
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"\\nShowing\
\ performance, across the land.\\n\\nLogs and metrics, a perfect blend,\\\
nInformation's flow, without end.\\nObservability's promise\"}]}}],\"modelVersion\"\
: \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:20.770456Z\"\
,\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"}\r\n\r\ndata: {\"candidates\"\
: [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \", clear and\
\ true,\\nInsights revealed, for me and you.\\n\\nJaeger, Zipkin, a chorus\
\ sings,\\nWith exporters ready, for all the things.\\nFrom simple apps to\
\ systems vast,\\nOpen Telemetry'\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\
,\"createTime\": \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\
}\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\
: [{\"text\": \"s power will last.\\n\\nNo vendor lock-in, a freedom sweet,\\\
nOpen source glory, can't be beat.\\nSo let us embrace, this modern way,\\\
nTo monitor systems, come what may.\\n\\nFrom\"}]}}],\"modelVersion\": \"\
gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:20.770456Z\",\"\
responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"}\r\n\r\ndata: {\"candidates\": [{\"\
content\": {\"role\": \"model\",\"parts\": [{\"text\": \" microservices, small\
\ and slight,\\nTo monolithic giants, shining bright,\\nOpen Telemetry shows\
\ the path,\\nTo understand, and fix the wrath,\\nOf latency demons, lurking\
\ near,\\nBringing clarity, year after year.\\n\"}]},\"finishReason\": \"\
STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\"\
: 242,\"totalTokenCount\": 250,\"promptTokensDetails\": [{\"modality\": \"\
TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\": [{\"modality\": \"\
TEXT\",\"tokenCount\": 242}]},\"modelVersion\": \"gemini-1.5-flash-002\",\"\
createTime\": \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\
}\r\n\r\n"
headers:
Content-Disposition:
- attachment
Content-Type:
- text/event-stream
Transfer-Encoding:
- chunked
Vary:
- Origin
- X-Origin
- Referer
X-Content-Type-Options:
- nosniff
X-Frame-Options:
- SAMEORIGIN
X-XSS-Protection:
- '0'
status:
code: 200
message: OK
version: 1

View File

@ -1,204 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
from .base import TestCase
class NonStreamingTestCase(TestCase):
# The "setUp" function is defined by "unittest.TestCase" and thus
# this name must be used. Uncertain why pylint doesn't seem to
# recognize that this is a unit test class for which this is inherited.
def setUp(self): # pylint: disable=invalid-name
super().setUp()
if self.__class__ == NonStreamingTestCase:
raise unittest.SkipTest("Skipping testcase base.")
def generate_content(self, *args, **kwargs):
raise NotImplementedError("Must implement 'generate_content'.")
@property
def expected_function_name(self):
raise NotImplementedError("Must implement 'expected_function_name'.")
def _generate_and_get_span(self, config):
self.generate_content(
model="gemini-2.0-flash",
contents="Some input prompt",
config=config,
)
self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
return self.otel.get_span_named("generate_content gemini-2.0-flash")
def test_instrumentation_does_not_break_core_functionality(self):
self.configure_valid_response(text="Yep, it works!")
response = self.generate_content(
model="gemini-2.0-flash", contents="Does this work?"
)
self.assertEqual(response.text, "Yep, it works!")
def test_generates_span(self):
self.configure_valid_response(text="Yep, it works!")
response = self.generate_content(
model="gemini-2.0-flash", contents="Does this work?"
)
self.assertEqual(response.text, "Yep, it works!")
self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
def test_model_reflected_into_span_name(self):
self.configure_valid_response(text="Yep, it works!")
response = self.generate_content(
model="gemini-1.5-flash", contents="Does this work?"
)
self.assertEqual(response.text, "Yep, it works!")
self.otel.assert_has_span_named("generate_content gemini-1.5-flash")
def test_generated_span_has_minimal_genai_attributes(self):
self.configure_valid_response(text="Yep, it works!")
self.generate_content(
model="gemini-2.0-flash", contents="Does this work?"
)
self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
span = self.otel.get_span_named("generate_content gemini-2.0-flash")
self.assertEqual(span.attributes["gen_ai.system"], "gemini")
self.assertEqual(
span.attributes["gen_ai.operation.name"], "generate_content"
)
def test_generated_span_has_correct_function_name(self):
self.configure_valid_response(text="Yep, it works!")
self.generate_content(
model="gemini-2.0-flash", contents="Does this work?"
)
self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
span = self.otel.get_span_named("generate_content gemini-2.0-flash")
self.assertEqual(
span.attributes["code.function.name"], self.expected_function_name
)
def test_generated_span_has_vertex_ai_system_when_configured(self):
self.set_use_vertex(True)
self.configure_valid_response(text="Yep, it works!")
self.generate_content(
model="gemini-2.0-flash", contents="Does this work?"
)
self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
span = self.otel.get_span_named("generate_content gemini-2.0-flash")
self.assertEqual(span.attributes["gen_ai.system"], "vertex_ai")
self.assertEqual(
span.attributes["gen_ai.operation.name"], "generate_content"
)
def test_generated_span_counts_tokens(self):
self.configure_valid_response(input_tokens=123, output_tokens=456)
self.generate_content(model="gemini-2.0-flash", contents="Some input")
self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
span = self.otel.get_span_named("generate_content gemini-2.0-flash")
self.assertEqual(span.attributes["gen_ai.usage.input_tokens"], 123)
self.assertEqual(span.attributes["gen_ai.usage.output_tokens"], 456)
def test_records_system_prompt_as_log(self):
os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
"true"
)
config = {"system_instruction": "foo"}
self.configure_valid_response()
self.generate_content(
model="gemini-2.0-flash", contents="Some input", config=config
)
self.otel.assert_has_event_named("gen_ai.system.message")
event_record = self.otel.get_event_named("gen_ai.system.message")
self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
self.assertEqual(event_record.body["content"], "foo")
def test_does_not_record_system_prompt_as_log_if_disabled_by_env(self):
os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
"false"
)
config = {"system_instruction": "foo"}
self.configure_valid_response()
self.generate_content(
model="gemini-2.0-flash", contents="Some input", config=config
)
self.otel.assert_has_event_named("gen_ai.system.message")
event_record = self.otel.get_event_named("gen_ai.system.message")
self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
self.assertEqual(event_record.body["content"], "<elided>")
def test_does_not_record_system_prompt_as_log_if_no_system_prompt_present(
self,
):
os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
"true"
)
self.configure_valid_response()
self.generate_content(model="gemini-2.0-flash", contents="Some input")
self.otel.assert_does_not_have_event_named("gen_ai.system.message")
def test_records_user_prompt_as_log(self):
os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
"true"
)
self.configure_valid_response()
self.generate_content(model="gemini-2.0-flash", contents="Some input")
self.otel.assert_has_event_named("gen_ai.user.message")
event_record = self.otel.get_event_named("gen_ai.user.message")
self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
self.assertEqual(event_record.body["content"], "Some input")
def test_does_not_record_user_prompt_as_log_if_disabled_by_env(self):
os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
"false"
)
self.configure_valid_response()
self.generate_content(model="gemini-2.0-flash", contents="Some input")
self.otel.assert_has_event_named("gen_ai.user.message")
event_record = self.otel.get_event_named("gen_ai.user.message")
self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
self.assertEqual(event_record.body["content"], "<elided>")
def test_records_response_as_log(self):
os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
"true"
)
self.configure_valid_response(text="Some response content")
self.generate_content(model="gemini-2.0-flash", contents="Some input")
self.otel.assert_has_event_named("gen_ai.choice")
event_record = self.otel.get_event_named("gen_ai.choice")
self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
self.assertIn(
"Some response content", json.dumps(event_record.body["content"])
)
def test_does_not_record_response_as_log_if_disabled_by_env(self):
os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
"false"
)
self.configure_valid_response(text="Some response content")
self.generate_content(model="gemini-2.0-flash", contents="Some input")
self.otel.assert_has_event_named("gen_ai.choice")
event_record = self.otel.get_event_named("gen_ai.choice")
self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
self.assertEqual(event_record.body["content"], "<elided>")
def test_records_metrics_data(self):
self.configure_valid_response()
self.generate_content(model="gemini-2.0-flash", contents="Some input")
self.otel.assert_has_metrics_data_named("gen_ai.client.token.usage")
self.otel.assert_has_metrics_data_named(
"gen_ai.client.operation.duration"
)

View File

@ -1,72 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from .base import TestCase
class StreamingTestCase(TestCase):
# The "setUp" function is defined by "unittest.TestCase" and thus
# this name must be used. Uncertain why pylint doesn't seem to
# recognize that this is a unit test class for which this is inherited.
def setUp(self): # pylint: disable=invalid-name
super().setUp()
if self.__class__ == StreamingTestCase:
raise unittest.SkipTest("Skipping testcase base.")
def generate_content(self, *args, **kwargs):
raise NotImplementedError("Must implement 'generate_content'.")
@property
def expected_function_name(self):
raise NotImplementedError("Must implement 'expected_function_name'.")
def test_instrumentation_does_not_break_core_functionality(self):
self.configure_valid_response(text="Yep, it works!")
responses = self.generate_content(
model="gemini-2.0-flash", contents="Does this work?"
)
self.assertEqual(len(responses), 1)
response = responses[0]
self.assertEqual(response.text, "Yep, it works!")
def test_handles_multiple_ressponses(self):
self.configure_valid_response(text="First response")
self.configure_valid_response(text="Second response")
responses = self.generate_content(
model="gemini-2.0-flash", contents="Does this work?"
)
self.assertEqual(len(responses), 2)
self.assertEqual(responses[0].text, "First response")
self.assertEqual(responses[1].text, "Second response")
choice_events = self.otel.get_events_named("gen_ai.choice")
self.assertEqual(len(choice_events), 2)
def test_includes_token_counts_in_span_aggregated_from_responses(self):
# Configure multiple responses whose input/output tokens should be
# accumulated together when summarizing the end-to-end request.
#
# Input: 1 + 3 + 5 => 4 + 5 => 9
# Output: 2 + 4 + 6 => 6 + 6 => 12
self.configure_valid_response(input_tokens=1, output_tokens=2)
self.configure_valid_response(input_tokens=3, output_tokens=4)
self.configure_valid_response(input_tokens=5, output_tokens=6)
self.generate_content(model="gemini-2.0-flash", contents="Some input")
self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
span = self.otel.get_span_named("generate_content gemini-2.0-flash")
self.assertEqual(span.attributes["gen_ai.usage.input_tokens"], 9)
self.assertEqual(span.attributes["gen_ai.usage.output_tokens"], 12)

View File

@ -1,28 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from .nonstreaming_base import NonStreamingTestCase
class TestGenerateContentAsyncNonstreaming(NonStreamingTestCase):
def generate_content(self, *args, **kwargs):
return asyncio.run(
self.client.aio.models.generate_content(*args, **kwargs) # pylint: disable=missing-kwoa
)
@property
def expected_function_name(self):
return "google.genai.AsyncModels.generate_content"

View File

@ -1,55 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from .nonstreaming_base import NonStreamingTestCase
from .streaming_base import StreamingTestCase
class AsyncStreamingMixin:
@property
def expected_function_name(self):
return "google.genai.AsyncModels.generate_content_stream"
async def _generate_content_stream_helper(self, *args, **kwargs):
result = []
async for (
response
) in await self.client.aio.models.generate_content_stream( # pylint: disable=missing-kwoa
*args, **kwargs
):
result.append(response)
return result
def generate_content_stream(self, *args, **kwargs):
return asyncio.run(
self._generate_content_stream_helper(*args, **kwargs)
)
class TestGenerateContentAsyncStreamingWithSingleResult(
AsyncStreamingMixin, NonStreamingTestCase
):
def generate_content(self, *args, **kwargs):
responses = self.generate_content_stream(*args, **kwargs)
self.assertEqual(len(responses), 1)
return responses[0]
class TestGenerateContentAsyncStreamingWithStreamedResults(
AsyncStreamingMixin, StreamingTestCase
):
def generate_content(self, *args, **kwargs):
return self.generate_content_stream(*args, **kwargs)

View File

@ -1,162 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import mock
from google.genai.types import GenerateContentConfig
from opentelemetry.instrumentation.google_genai.allowlist_util import AllowList
from .base import TestCase
class ConfigSpanAttributesTestCase(TestCase):
def setUp(self):
super().setUp()
self.configure_valid_response(text="Some response")
def generate_content(self, *args, **kwargs):
return self.client.models.generate_content(*args, **kwargs)
def generate_and_get_span(self, config):
self.client.models.generate_content(
model="gemini-2.0-flash",
contents="Some input prompt",
config=config,
)
self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
return self.otel.get_span_named("generate_content gemini-2.0-flash")
def test_option_reflected_to_span_attribute_choice_count_config_dict(self):
span = self.generate_and_get_span(config={"candidate_count": 2})
self.assertEqual(span.attributes["gen_ai.request.choice.count"], 2)
def test_option_reflected_to_span_attribute_choice_count_config_obj(self):
span = self.generate_and_get_span(
config=GenerateContentConfig(candidate_count=2)
)
self.assertEqual(span.attributes["gen_ai.request.choice.count"], 2)
def test_option_reflected_to_span_attribute_seed_config_dict(self):
span = self.generate_and_get_span(config={"seed": 12345})
self.assertEqual(span.attributes["gen_ai.request.seed"], 12345)
def test_option_reflected_to_span_attribute_seed_config_obj(self):
span = self.generate_and_get_span(
config=GenerateContentConfig(seed=12345)
)
self.assertEqual(span.attributes["gen_ai.request.seed"], 12345)
def test_option_reflected_to_span_attribute_frequency_penalty(self):
span = self.generate_and_get_span(config={"frequency_penalty": 1.0})
self.assertEqual(
span.attributes["gen_ai.request.frequency_penalty"], 1.0
)
def test_option_reflected_to_span_attribute_max_tokens(self):
span = self.generate_and_get_span(
config=GenerateContentConfig(max_output_tokens=5000)
)
self.assertEqual(span.attributes["gen_ai.request.max_tokens"], 5000)
def test_option_reflected_to_span_attribute_presence_penalty(self):
span = self.generate_and_get_span(
config=GenerateContentConfig(presence_penalty=0.5)
)
self.assertEqual(
span.attributes["gen_ai.request.presence_penalty"], 0.5
)
def test_option_reflected_to_span_attribute_stop_sequences(self):
span = self.generate_and_get_span(
config={"stop_sequences": ["foo", "bar"]}
)
stop_sequences = span.attributes["gen_ai.request.stop_sequences"]
self.assertEqual(len(stop_sequences), 2)
self.assertEqual(stop_sequences[0], "foo")
self.assertEqual(stop_sequences[1], "bar")
def test_option_reflected_to_span_attribute_top_k(self):
span = self.generate_and_get_span(
config=GenerateContentConfig(top_k=20)
)
self.assertEqual(span.attributes["gen_ai.request.top_k"], 20)
def test_option_reflected_to_span_attribute_top_p(self):
span = self.generate_and_get_span(config={"top_p": 10})
self.assertEqual(span.attributes["gen_ai.request.top_p"], 10)
@mock.patch.dict(
os.environ, {"OTEL_GOOGLE_GENAI_GENERATE_CONTENT_CONFIG_INCLUDES": "*"}
)
def test_option_not_reflected_to_span_attribute_system_instruction(self):
span = self.generate_and_get_span(
config={"system_instruction": "Yadda yadda yadda"}
)
self.assertNotIn(
"gcp.gen_ai.operation.config.system_instruction", span.attributes
)
self.assertNotIn("gen_ai.request.system_instruction", span.attributes)
for key in span.attributes:
value = span.attributes[key]
if isinstance(value, str):
self.assertNotIn("Yadda yadda yadda", value)
@mock.patch.dict(
os.environ, {"OTEL_GOOGLE_GENAI_GENERATE_CONTENT_CONFIG_INCLUDES": "*"}
)
def test_option_reflected_to_span_attribute_automatic_func_calling(self):
span = self.generate_and_get_span(
config={
"automatic_function_calling": {
"ignore_call_history": True,
}
}
)
self.assertTrue(
span.attributes[
"gcp.gen_ai.operation.config.automatic_function_calling.ignore_call_history"
]
)
def test_dynamic_config_options_not_included_without_allow_list(self):
span = self.generate_and_get_span(
config={
"automatic_function_calling": {
"ignore_call_history": True,
}
}
)
self.assertNotIn(
"gcp.gen_ai.operation.config.automatic_function_calling.ignore_call_history",
span.attributes,
)
def test_can_supply_allow_list_via_instrumentor_constructor(self):
self.set_instrumentor_constructor_kwarg(
"generate_content_config_key_allowlist", AllowList(includes=["*"])
)
span = self.generate_and_get_span(
config={
"automatic_function_calling": {
"ignore_call_history": True,
}
}
)
self.assertTrue(
span.attributes[
"gcp.gen_ai.operation.config.automatic_function_calling.ignore_call_history"
]
)

View File

@ -1,504 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""High level end-to-end test of the generate content instrumentation.
The primary purpose of this test is to verify that the instrumentation
package does not break the underlying GenAI SDK that it instruments.
This test suite also has some minimal validation of the instrumentation
outputs; however, validating the instrumentation output (other than
verifying that instrumentation does not break the GenAI SDK) is a
secondary goal of this test. Detailed testing of the instrumentation
output is the purview of the other tests in this directory."""
import asyncio
import gzip
import json
import os
import subprocess
import sys
import google.auth
import google.auth.credentials
import google.genai
import pytest
import yaml
from vcr.record_mode import RecordMode
from opentelemetry.instrumentation.google_genai import (
GoogleGenAiSdkInstrumentor,
)
from ..common.auth import FakeCredentials
from ..common.otel_mocker import OTelMocker
_FAKE_PROJECT = "test-project"
_FAKE_LOCATION = "test-location"
_FAKE_API_KEY = "test-api-key"
_DEFAULT_REAL_LOCATION = "us-central1"
def _get_project_from_env():
return (
os.getenv("GCLOUD_PROJECT") or os.getenv("GOOGLE_CLOUD_PROJECT") or ""
)
def _get_project_from_gcloud_cli():
try:
gcloud_call_result = subprocess.run(
"gcloud config get project",
shell=True,
capture_output=True,
check=True,
)
except subprocess.CalledProcessError:
return None
gcloud_output = gcloud_call_result.stdout.decode()
return gcloud_output.strip()
def _get_project_from_credentials():
_, from_creds = google.auth.default()
return from_creds
def _get_real_project():
from_env = _get_project_from_env()
if from_env:
return from_env
from_cli = _get_project_from_gcloud_cli()
if from_cli:
return from_cli
return _get_project_from_credentials()
def _get_location_from_env():
return (
os.getenv("GCLOUD_LOCATION")
or os.getenv("GOOGLE_CLOUD_LOCATION")
or ""
)
def _get_real_location():
return _get_location_from_env() or _DEFAULT_REAL_LOCATION
def _get_vertex_api_key_from_env():
return os.getenv("GOOGLE_API_KEY")
def _get_gemini_api_key_from_env():
return os.getenv("GEMINI_API_KEY")
def _should_redact_header(header_key):
if header_key.startswith("x-goog"):
return True
if header_key.startswith("sec-goog"):
return True
if header_key in ["server", "server-timing"]:
return True
return False
def _redact_headers(headers):
for header_key in headers:
if _should_redact_header(header_key.lower()):
headers[header_key] = "<REDACTED>"
def _before_record_request(request):
if request.headers:
_redact_headers(request.headers)
uri = request.uri
project = _get_project_from_env()
if project:
uri = uri.replace(f"projects/{project}", f"projects/{_FAKE_PROJECT}")
location = _get_real_location()
if location:
uri = uri.replace(
f"locations/{location}", f"locations/{_FAKE_LOCATION}"
)
uri = uri.replace(
f"//{location}-aiplatform.googleapis.com",
f"//{_FAKE_LOCATION}-aiplatform.googleapis.com",
)
request.uri = uri
return request
def _before_record_response(response):
if hasattr(response, "headers") and response.headers:
_redact_headers(response.headers)
return response
@pytest.fixture(name="vcr_config", scope="module")
def fixture_vcr_config():
return {
"filter_query_parameters": [
"key",
"apiKey",
"quotaUser",
"userProject",
"token",
"access_token",
"accessToken",
"refesh_token",
"refreshToken",
"authuser",
"bearer",
"bearer_token",
"bearerToken",
"userIp",
],
"filter_post_data_parameters": ["apikey", "api_key", "key"],
"filter_headers": [
"x-goog-api-key",
"authorization",
"server",
"Server",
"Server-Timing",
"Date",
],
"before_record_request": _before_record_request,
"before_record_response": _before_record_response,
"ignore_hosts": [
"oauth2.googleapis.com",
"iam.googleapis.com",
],
}
class _LiteralBlockScalar(str):
"""Formats the string as a literal block scalar, preserving whitespace and
without interpreting escape characters"""
def _literal_block_scalar_presenter(dumper, data):
"""Represents a scalar string as a literal block, via '|' syntax"""
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
@pytest.fixture(
name="internal_setup_yaml_pretty_formatting", scope="module", autouse=True
)
def fixture_setup_yaml_pretty_formatting():
yaml.add_representer(_LiteralBlockScalar, _literal_block_scalar_presenter)
def _process_string_value(string_value):
"""Pretty-prints JSON or returns long strings as a LiteralBlockScalar"""
try:
json_data = json.loads(string_value)
return _LiteralBlockScalar(json.dumps(json_data, indent=2))
except (ValueError, TypeError):
if len(string_value) > 80:
return _LiteralBlockScalar(string_value)
return string_value
def _convert_body_to_literal(data):
"""Searches the data for body strings, attempting to pretty-print JSON"""
if isinstance(data, dict):
for key, value in data.items():
# Handle response body case (e.g., response.body.string)
if key == "body" and isinstance(value, dict) and "string" in value:
value["string"] = _process_string_value(value["string"])
# Handle request body case (e.g., request.body)
elif key == "body" and isinstance(value, str):
data[key] = _process_string_value(value)
else:
_convert_body_to_literal(value)
elif isinstance(data, list):
for idx, choice in enumerate(data):
data[idx] = _convert_body_to_literal(choice)
return data
# Helper for enforcing GZIP compression where it was originally.
def _ensure_gzip_single_response(data: bytes):
try:
# Attempt to decompress, first, to avoid double compression.
gzip.decompress(data)
return data
except gzip.BadGzipFile:
# It must not have been compressed in the first place.
return gzip.compress(data)
# VCRPy automatically decompresses responses before saving them, but it may forget to
# re-encode them when the data is loaded. This can create issues with decompression.
# This is why we re-encode on load; to accurately replay what was originally sent.
#
# https://vcrpy.readthedocs.io/en/latest/advanced.html#decode-compressed-response
def _ensure_casette_gzip(loaded_casette):
for interaction in loaded_casette["interactions"]:
response = interaction["response"]
headers = response["headers"]
if (
"content-encoding" not in headers
and "Content-Encoding" not in headers
):
continue
if (
"content-encoding" in headers
and "gzip" not in headers["content-encoding"]
):
continue
if (
"Content-Encoding" in headers
and "gzip" not in headers["Content-Encoding"]
):
continue
response["body"]["string"] = _ensure_gzip_single_response(
response["body"]["string"].encode()
)
def _maybe_ensure_casette_gzip(result):
if sys.version_info[0] == 3 and sys.version_info[1] == 9:
_ensure_casette_gzip(result)
class _PrettyPrintJSONBody:
"""This makes request and response body recordings more readable."""
@staticmethod
def serialize(cassette_dict):
cassette_dict = _convert_body_to_literal(cassette_dict)
return yaml.dump(
cassette_dict, default_flow_style=False, allow_unicode=True
)
@staticmethod
def deserialize(cassette_string):
result = yaml.load(cassette_string, Loader=yaml.Loader)
_maybe_ensure_casette_gzip(result)
return result
@pytest.fixture(name="fully_initialized_vcr", scope="module", autouse=True)
def setup_vcr(vcr):
vcr.register_serializer("yaml", _PrettyPrintJSONBody)
vcr.serializer = "yaml"
return vcr
@pytest.fixture(name="instrumentor")
def fixture_instrumentor():
return GoogleGenAiSdkInstrumentor()
@pytest.fixture(name="internal_instrumentation_setup", autouse=True)
def fixture_setup_instrumentation(instrumentor):
instrumentor.instrument()
yield
instrumentor.uninstrument()
@pytest.fixture(name="otel_mocker", autouse=True)
def fixture_otel_mocker():
result = OTelMocker()
result.install()
yield result
result.uninstall()
@pytest.fixture(
name="setup_content_recording",
autouse=True,
params=["logcontent", "excludecontent"],
)
def fixture_setup_content_recording(request):
enabled = request.param == "logcontent"
os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = str(
enabled
)
@pytest.fixture(name="vcr_record_mode")
def fixture_vcr_record_mode(vcr):
return vcr.record_mode
@pytest.fixture(name="in_replay_mode")
def fixture_in_replay_mode(vcr_record_mode):
return vcr_record_mode == RecordMode.NONE
@pytest.fixture(name="gcloud_project", autouse=True)
def fixture_gcloud_project(in_replay_mode):
if in_replay_mode:
return _FAKE_PROJECT
result = _get_real_project()
for env_var in ["GCLOUD_PROJECT", "GOOGLE_CLOUD_PROJECT"]:
os.environ[env_var] = result
return result
@pytest.fixture(name="gcloud_location")
def fixture_gcloud_location(in_replay_mode):
if in_replay_mode:
return _FAKE_LOCATION
return _get_real_location()
@pytest.fixture(name="gcloud_credentials")
def fixture_gcloud_credentials(in_replay_mode):
if in_replay_mode:
return FakeCredentials()
creds, _ = google.auth.default()
return google.auth.credentials.with_scopes_if_required(
creds, ["https://www.googleapis.com/auth/cloud-platform"]
)
@pytest.fixture(name="gemini_api_key")
def fixture_gemini_api_key(in_replay_mode):
if in_replay_mode:
return _FAKE_API_KEY
return os.getenv("GEMINI_API_KEY")
@pytest.fixture(name="gcloud_api_key", autouse=True)
def fixture_gcloud_api_key(gemini_api_key):
if "GOOGLE_API_KEY" not in os.environ:
os.environ["GOOGLE_API_KEY"] = gemini_api_key
return os.getenv("GOOGLE_API_KEY")
@pytest.fixture(name="nonvertex_client_factory")
def fixture_nonvertex_client_factory(gemini_api_key):
def _factory():
return google.genai.Client(api_key=gemini_api_key, vertexai=False)
return _factory
@pytest.fixture(name="vertex_client_factory")
def fixture_vertex_client_factory(
gcloud_project, gcloud_location, gcloud_credentials
):
def _factory():
return google.genai.Client(
vertexai=True,
project=gcloud_project,
location=gcloud_location,
credentials=gcloud_credentials,
)
return _factory
@pytest.fixture(name="genai_sdk_backend", params=["vertexaiapi"])
def fixture_genai_sdk_backend(request):
return request.param
@pytest.fixture(name="use_vertex", autouse=True)
def fixture_use_vertex(genai_sdk_backend):
result = bool(genai_sdk_backend == "vertexaiapi")
os.environ["GOOGLE_GENAI_USE_VERTEXAI"] = "1" if result else "0"
return result
@pytest.fixture(name="client")
def fixture_client(
vertex_client_factory, nonvertex_client_factory, use_vertex
):
if use_vertex:
return vertex_client_factory()
return nonvertex_client_factory()
@pytest.fixture(name="is_async", params=["sync", "async"])
def fixture_is_async(request):
return request.param == "async"
@pytest.fixture(name="model", params=["gemini-1.5-flash-002"])
def fixture_model(request):
return request.param
@pytest.fixture(name="generate_content")
def fixture_generate_content(client, is_async):
def _sync_impl(*args, **kwargs):
return client.models.generate_content(*args, **kwargs)
def _async_impl(*args, **kwargs):
return asyncio.run(client.aio.models.generate_content(*args, **kwargs))
if is_async:
return _async_impl
return _sync_impl
@pytest.fixture(name="generate_content_stream")
def fixture_generate_content_stream(client, is_async):
def _sync_impl(*args, **kwargs):
results = []
for result in client.models.generate_content_stream(*args, **kwargs):
results.append(result)
return results
def _async_impl(*args, **kwargs):
async def _gather_all():
results = []
async for (
result
) in await client.aio.models.generate_content_stream(
*args, **kwargs
):
results.append(result)
return results
return asyncio.run(_gather_all())
if is_async:
return _async_impl
return _sync_impl
@pytest.mark.vcr
def test_non_streaming(generate_content, model, otel_mocker):
response = generate_content(
model=model, contents="Create a poem about Open Telemetry."
)
assert response is not None
assert response.text is not None
assert len(response.text) > 0
otel_mocker.assert_has_span_named(f"generate_content {model}")
@pytest.mark.vcr
def test_streaming(generate_content_stream, model, otel_mocker):
count = 0
for response in generate_content_stream(
model=model, contents="Create a poem about Open Telemetry."
):
assert response is not None
assert response.text is not None
assert len(response.text) > 0
count += 1
assert count > 0
otel_mocker.assert_has_span_named(f"generate_content {model}")

Some files were not shown because too many files have changed in this diff Show More