mirror of https://github.com/dapr/dapr-agents.git
Compare commits
55 Commits
Author | SHA1 | Date |
---|---|---|
|
d993f9090b | |
|
f2d6831ea2 | |
|
3e767e03fb | |
|
1c832636eb | |
|
3bd6c99506 | |
|
6d9b26bce6 | |
|
19e2caa25f | |
|
5b9e5385af | |
|
caaad181ce | |
|
8f7b9f5df3 | |
|
6d55c383b9 | |
|
6509906088 | |
|
cee485fbad | |
|
9c8126bcb3 | |
|
c2eff2b971 | |
|
c4b1f7c441 | |
|
f87e27f450 | |
|
1e5275834d | |
|
b7b4a9891e | |
|
2fd44b3ecc | |
|
2757aab5b6 | |
|
d86a4c5a70 | |
|
83fc449e39 | |
|
94bf5d2a38 | |
|
8741289e7d | |
|
41faa4f5b7 | |
|
76ad962b69 | |
|
28ac198055 | |
|
e27f5befb0 | |
|
889b7bf7ef | |
|
4dce1c0300 | |
|
53c1c9ffde | |
|
6f20c0d9a0 | |
|
6823cd633d | |
|
a878e76ec1 | |
|
75274ac607 | |
|
f129754486 | |
|
c31e985d81 | |
|
f9eb48c02c | |
|
6f0cfc8818 | |
|
fd28b02935 | |
|
356a25f281 | |
|
bd0859d181 | |
|
099dc5d2fb | |
|
b939d7d2f5 | |
|
f870d35916 | |
|
f5dc9372e7 | |
|
199fcf9d02 | |
|
3edbcf29c2 | |
|
e8cb700652 | |
|
62d4cdbe02 | |
|
c872c5a8bd | |
|
d6fc2c89f0 | |
|
cb75e76ba1 | |
|
cf400f189c |
|
@ -0,0 +1,48 @@
|
|||
---
|
||||
name: New Content Needed
|
||||
about: Template for requesting new documentation content
|
||||
title: "[Content] "
|
||||
labels: content/missing-information
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
## Related Issue
|
||||
<!-- Link to the original issue that triggered this content request -->
|
||||
Related to: #<issue_number>
|
||||
|
||||
## Content Type
|
||||
<!-- What type of content is needed? -->
|
||||
- [ ] New feature documentation
|
||||
- [ ] API reference
|
||||
- [ ] How-to guide
|
||||
- [ ] Tutorial
|
||||
- [ ] Conceptual documentation
|
||||
- [ ] Other (please specify)
|
||||
|
||||
## Target Audience
|
||||
<!-- Who is this content for? -->
|
||||
- [ ] Developers
|
||||
- [ ] Operators
|
||||
- [ ] Architects
|
||||
- [ ] End users
|
||||
- [ ] Other (please specify)
|
||||
|
||||
## Content Description
|
||||
<!-- Provide a clear description of what content is needed -->
|
||||
<!-- What should the documentation cover? What are the key points to include? -->
|
||||
|
||||
## Additional Context
|
||||
<!-- Add any additional context about the content request here -->
|
||||
<!-- Include any specific requirements, examples, or references -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- What should be included in the documentation to consider it complete? -->
|
||||
- [ ]
|
||||
- [ ]
|
||||
- [ ]
|
||||
|
||||
## Resources
|
||||
<!-- Add any relevant resources, links, or references that might help with creating the content -->
|
||||
|
||||
## Notes
|
||||
<!-- Any additional notes or comments -->
|
|
@ -0,0 +1,179 @@
|
|||
// List of owner who can control dapr-bot workflow
|
||||
// IMPORTANT: Make sure usernames are lower-cased
|
||||
const owners = [
|
||||
'yaron2',
|
||||
'cyb3rward0g'
|
||||
]
|
||||
|
||||
const docsIssueBodyTpl = (
|
||||
issueNumber
|
||||
) => `This issue was automatically created by \
|
||||
[Dapr Bot](https://github.com/dapr/dapr-agents/blob/master/.github/workflows/dapr-bot.yml) because a \"docs-needed\" label \
|
||||
was added to dapr/dapr#${issueNumber}. \n\n\
|
||||
TODO: Add more details as per [this template](.github/ISSUE_TEMPLATE/new-content-needed.md).`
|
||||
|
||||
module.exports = async ({ github, context }) => {
|
||||
if (
|
||||
context.eventName == 'issue_comment' &&
|
||||
context.payload.action == 'created'
|
||||
) {
|
||||
await handleIssueCommentCreate({ github, context })
|
||||
} else if (
|
||||
context.eventName == 'issues' &&
|
||||
context.payload.action == 'labeled'
|
||||
) {
|
||||
await handleIssueLabeled({ github, context })
|
||||
} else {
|
||||
console.log(`[main] event ${context.eventName} not supported, exiting.`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle issue comment create event.
|
||||
*/
|
||||
async function handleIssueCommentCreate({ github, context }) {
|
||||
const payload = context.payload
|
||||
const issue = context.issue
|
||||
const username = context.actor.toLowerCase()
|
||||
const isFromPulls = !!payload.issue.pull_request
|
||||
const commentBody = ((payload.comment.body || '') + '').trim()
|
||||
console.log(` Issue(owner/repo/number): ${issue.owner}/${issue.repo}/${issue.number}
|
||||
Actor(current username / id): ${username} / ${payload.comment.user.id}
|
||||
CommentID: ${payload.comment.id}
|
||||
CreatedAt: ${payload.comment.created_at}`
|
||||
)
|
||||
|
||||
if (!commentBody || !commentBody.startsWith('/')) {
|
||||
// Not a command
|
||||
return
|
||||
}
|
||||
|
||||
const commandParts = commentBody.split(/\s+/)
|
||||
const command = commandParts.shift()
|
||||
console.log(` Command: ${command}`)
|
||||
|
||||
// Commands that can be executed by anyone.
|
||||
if (command == '/assign') {
|
||||
await cmdAssign(github, issue, username, isFromPulls)
|
||||
return
|
||||
}
|
||||
|
||||
// Commands that can only be executed by owners.
|
||||
if (!owners.includes(username)) {
|
||||
console.log(
|
||||
`[handleIssueCommentCreate] user ${username} is not an owner, exiting.`
|
||||
)
|
||||
await commentUserNotAllowed(github, issue, username)
|
||||
return
|
||||
}
|
||||
|
||||
switch (command) {
|
||||
case '/make-me-laugh':
|
||||
await cmdMakeMeLaugh(github, issue)
|
||||
break
|
||||
// TODO: add more in future. Ref: https://github.com/dapr/dapr/blob/master/.github/scripts/dapr_bot.js#L99
|
||||
default:
|
||||
console.log(
|
||||
`[handleIssueCommentCreate] command ${command} not found, exiting.`
|
||||
)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle issue labeled event.
|
||||
*/
|
||||
async function handleIssueLabeled({ github, context }) {
|
||||
const payload = context.payload
|
||||
const label = payload.label.name
|
||||
const issueNumber = payload.issue.number
|
||||
|
||||
// This should not run in forks.
|
||||
if (context.repo.owner !== 'dapr') {
|
||||
console.log('[handleIssueLabeled] not running in dapr repo, exiting.')
|
||||
return
|
||||
}
|
||||
|
||||
// Authorization is not required here because it's triggered by an issue label event.
|
||||
// Only authorized users can add labels to issues.
|
||||
if (label == 'docs-needed') {
|
||||
// Open a new issue
|
||||
await github.rest.issues.create({
|
||||
owner: 'dapr',
|
||||
repo: 'docs',
|
||||
title: `New content needed for dapr/dapr#${issueNumber}`,
|
||||
labels: ['content/missing-information', 'created-by/dapr-bot'],
|
||||
body: docsIssueBodyTpl(issueNumber),
|
||||
})
|
||||
} else {
|
||||
console.log(
|
||||
`[handleIssueLabeled] label ${label} not supported, exiting.`
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Assign the issue to the user who commented.
|
||||
* @param {*} github GitHub object reference
|
||||
* @param {*} issue GitHub issue object
|
||||
* @param {string} username GitHub user who commented
|
||||
* @param {boolean} isFromPulls is the workflow triggered by a pull request?
|
||||
*/
|
||||
async function cmdAssign(github, issue, username, isFromPulls) {
|
||||
if (isFromPulls) {
|
||||
console.log(
|
||||
'[cmdAssign] pull requests unsupported, skipping command execution.'
|
||||
)
|
||||
return
|
||||
} else if (issue.assignees && issue.assignees.length !== 0) {
|
||||
console.log(
|
||||
'[cmdAssign] issue already has assignees, skipping command execution.'
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
await github.rest.issues.addAssignees({
|
||||
owner: issue.owner,
|
||||
repo: issue.repo,
|
||||
issue_number: issue.number,
|
||||
assignees: [username],
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Comment a funny joke.
|
||||
* @param {*} github GitHub object reference
|
||||
* @param {*} issue GitHub issue object
|
||||
*/
|
||||
async function cmdMakeMeLaugh(github, issue) {
|
||||
const result = await github.request(
|
||||
'https://official-joke-api.appspot.com/random_joke'
|
||||
)
|
||||
jokedata = result.data
|
||||
joke = 'I have a bad feeling about this.'
|
||||
if (jokedata && jokedata.setup && jokedata.punchline) {
|
||||
joke = `${jokedata.setup} - ${jokedata.punchline}`
|
||||
}
|
||||
|
||||
await github.rest.issues.createComment({
|
||||
owner: issue.owner,
|
||||
repo: issue.repo,
|
||||
issue_number: issue.number,
|
||||
body: joke,
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a comment when the user who tried triggering the bot action is not allowed to do so.
|
||||
* @param {*} github GitHub object reference
|
||||
* @param {*} issue GitHub issue object
|
||||
* @param {string} username GitHub user who commented
|
||||
*/
|
||||
async function commentUserNotAllowed(github, issue, username) {
|
||||
await github.rest.issues.createComment({
|
||||
owner: issue.owner,
|
||||
repo: issue.repo,
|
||||
issue_number: issue.number,
|
||||
body: `👋 @${username}, my apologies but I can't perform this action for you because your username is not in the allowlist in the file ${'`.github/scripts/dapr_bot.js`'}.`,
|
||||
})
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
name: Lint and Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- feature/*
|
||||
- feat/*
|
||||
- bugfix/*
|
||||
- hotfix/*
|
||||
- fix/*
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- feature/*
|
||||
- release-*
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install setuptools wheel tox
|
||||
- name: Run Autoformatter
|
||||
run: |
|
||||
tox -e ruff
|
||||
statusResult=$(git status -u --porcelain)
|
||||
if [ -z "$statusResult" ]
|
||||
then
|
||||
exit 0
|
||||
else
|
||||
echo "Source files are not formatted correctly. Run 'tox -e ruff' to autoformat."
|
||||
exit 1
|
||||
fi
|
||||
- name: Run Linter
|
||||
run: |
|
||||
tox -e flake8
|
||||
|
||||
build:
|
||||
needs: lint
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python_ver: ["3.10", "3.11", "3.12", "3.13"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python_ver }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python_ver }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install setuptools wheel tox
|
||||
- name: Install package and test dependencies
|
||||
run: |
|
||||
pip cache purge
|
||||
pip install --upgrade pip setuptools wheel
|
||||
pip install -e .
|
||||
pip install -e .[test]
|
||||
- name: Check Typing
|
||||
run: |
|
||||
tox -e type
|
||||
- name: Run Tests
|
||||
run: |
|
||||
tox -e pytest
|
|
@ -4,12 +4,14 @@ on:
|
|||
branches:
|
||||
- main
|
||||
paths:
|
||||
- docs
|
||||
- docs/**
|
||||
- '!docs/development/**'
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- docs
|
||||
- docs/**
|
||||
- '!docs/development/**'
|
||||
workflow_dispatch:
|
||||
permissions:
|
||||
contents: write
|
||||
|
@ -18,7 +20,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
name: Review changed files
|
||||
outputs:
|
||||
docs_any_changed: NaN
|
||||
docs_any_changed: ${{ steps.changed-files.outputs.docs_any_changed }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get changed files
|
||||
|
@ -29,6 +31,7 @@ jobs:
|
|||
docs:
|
||||
- 'docs/**'
|
||||
- 'mkdocs.yml'
|
||||
- '!docs/development/**'
|
||||
base_sha: 'main'
|
||||
|
||||
documentation_validation:
|
||||
|
@ -42,10 +45,16 @@ jobs:
|
|||
- name: Remove plugins from mkdocs configuration
|
||||
run: |
|
||||
sed -i '/^plugins:/,/^[^ ]/d' mkdocs.yml
|
||||
- name: Run MkDocs build
|
||||
uses: Kjuly/mkdocs-page-builder@main
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
pip install mkdocs-material
|
||||
pip install .[recommended,git,imaging]
|
||||
pip install mkdocs-jupyter
|
||||
- name: Validate build
|
||||
run: mkdocs build
|
||||
|
||||
deploy:
|
||||
if: github.ref == 'refs/heads/main'
|
||||
runs-on: ubuntu-latest
|
||||
needs: documentation_validation
|
||||
steps:
|
||||
|
|
|
@ -2,9 +2,11 @@
|
|||
.DS_Store
|
||||
secrets.json
|
||||
test
|
||||
.dapr
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
**/__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
|
@ -164,3 +166,14 @@ cython_debug/
|
|||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
.idea
|
||||
|
||||
.ruff_cache/
|
||||
|
||||
# Quickstart outputs
|
||||
*_state.json
|
||||
quickstarts/**/*_state.json
|
||||
chroma_db/
|
||||
db/
|
||||
|
||||
# Requirements files since we use pyproject.toml instead
|
||||
dev-requirements.txt
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
# These owners are the maintainers and approvers of this repo
|
||||
# TODO: we need official teams in dapr github https://github.com/orgs/dapr/teams
|
||||
* @yaron2 @Cyb3rWard0g
|
|
@ -1,86 +0,0 @@
|
|||
# Code of Conduct
|
||||
|
||||
We are committed to fostering a welcoming, inclusive, and respectful environment for everyone involved in this project. This Code of Conduct outlines the expected behaviors within our community and the steps for reporting unacceptable actions. By participating, you agree to uphold these standards, helping to create a positive and collaborative space.
|
||||
|
||||
---
|
||||
|
||||
## Our Pledge
|
||||
|
||||
As members, contributors, and leaders of this community, we pledge to:
|
||||
|
||||
* Ensure participation in our project is free from harassment, discrimination, or exclusion.
|
||||
* Treat everyone with respect and empathy, regardless of factors such as age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity or expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual orientation.
|
||||
* Act in ways that contribute to a safe, welcoming, and supportive environment for all participants.
|
||||
|
||||
---
|
||||
|
||||
## Our Standards
|
||||
|
||||
We strive to create an environment where all members can thrive. Examples of positive behaviors include:
|
||||
|
||||
* Showing kindness, empathy, and consideration for others.
|
||||
* Being respectful of differing opinions, experiences, and perspectives.
|
||||
* Providing constructive feedback in a supportive manner.
|
||||
* Taking responsibility for mistakes, apologizing when necessary, and learning from experiences.
|
||||
* Prioritizing the success and well-being of the entire community over individual gains.
|
||||
|
||||
The following behaviors are considered unacceptable:
|
||||
|
||||
* Using sexualized language or imagery, or engaging in inappropriate sexual attention or advances.
|
||||
* Making insulting, derogatory, or inflammatory comments, including trolling or personal attacks.
|
||||
* Engaging in harassment, whether public or private.
|
||||
* Publishing private or sensitive information about others without explicit consent.
|
||||
* Engaging in behavior that disrupts discussions, events, or contributions in a negative way.
|
||||
* Any conduct that could reasonably be deemed unprofessional or harmful to others.
|
||||
|
||||
---
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies to all areas of interaction within the community, including but not limited to:
|
||||
|
||||
* Discussions on forums, repositories, or other official communication channels.
|
||||
* Contributions made to the project, such as code, documentation, or issues.
|
||||
* Public representation of the community, such as through official social media accounts or at events.
|
||||
|
||||
It also applies to actions outside these spaces if they negatively impact the health, safety, or inclusivity of the community.
|
||||
|
||||
---
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for ensuring that this Code of Conduct is upheld. They may take appropriate and fair corrective actions in response to any behavior that violates these standards, including:
|
||||
|
||||
* Removing, editing, or rejecting comments, commits, issues, or other contributions not aligned with the Code of Conduct.
|
||||
* Temporarily or permanently banning individuals for repeated or severe violations.
|
||||
|
||||
Leaders will always strive to communicate their decisions clearly and fairly.
|
||||
|
||||
---
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
If you experience or witness unacceptable behavior, please report it to the project's owner [Roberto Rodriguez](https://www.linkedin.com/in/cyb3rward0g/). Your report will be handled with sensitivity, and we will respect your privacy and confidentiality while addressing the issue.
|
||||
|
||||
When reporting, please include:
|
||||
|
||||
* A description of the incident.
|
||||
* When and where it occurred.
|
||||
* Any additional context or supporting evidence, if available.
|
||||
|
||||
---
|
||||
|
||||
## Enforcement Process
|
||||
|
||||
We encourage resolving issues through dialogue when possible, but community leaders will intervene when necessary. Actions may include warnings, temporary bans, or permanent removal from the community, depending on the severity of the behavior.
|
||||
|
||||
---
|
||||
|
||||
## Attribution
|
||||
This Code of Conduct is inspired by the [Contributor Covenant, version 2.0](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html) and has drawn inspiration from open source community guidelines by Microsoft, Mozilla, and others.
|
||||
|
||||
For further context on best practices for open source codes of conduct, see the [Contributor Covenant FAQ](https://www.contributor-covenant.org/faq).
|
||||
|
||||
---
|
||||
|
||||
Thank you for helping to create a positive environment! ❤️
|
|
@ -0,0 +1,15 @@
|
|||
# Governance
|
||||
|
||||
## Project Maintainers
|
||||
[Project maintainers](https://github.com/dapr/community/blob/master/MAINTAINERS.md) are responsible for activities around maintaining and updating Dapr. Final decisions on the project reside with the project maintainers.
|
||||
|
||||
Maintainers MUST remain active. If they are unresponsive for >3 months, they will be automatically removed unless a [super-majority](https://en.wikipedia.org/wiki/Supermajority#Two-thirds_vote) of the other project maintainers agrees to extend the period to be greater than 3 months.
|
||||
|
||||
New maintainers can be added to the project by a [super-majority](https://en.wikipedia.org/wiki/Supermajority#Two-thirds_vote) vote of the existing maintainers. A potential maintainer may be nominated by an existing maintainer. A vote is conducted in private between the current maintainers over the course of a one week voting period. At the end of the week, votes are counted and a pull request is made on the repo adding the new maintainer to the [CODEOWNERS](CODEOWNERS) file.
|
||||
|
||||
A maintainer may step down by submitting an [issue](https://github.com/dapr/dapr-agents/issues/new) stating their intent.
|
||||
|
||||
Changes to this governance document require a pull request with approval from a [super-majority](https://en.wikipedia.org/wiki/Supermajority#Two-thirds_vote) of the current maintainers.
|
||||
|
||||
## Code of Conduct
|
||||
This project has adopted the [Contributor Covenant Code of Conduct](https://github.com/dapr/community/blob/master/CODE-OF-CONDUCT.md)
|
20
Makefile
20
Makefile
|
@ -1,6 +1,26 @@
|
|||
# Get all directories within quickstarts
|
||||
QUICKSTART_DIRS := $(shell find quickstarts -mindepth 1 -maxdepth 1 -type d -exec basename {} \;)
|
||||
|
||||
# Test targets
|
||||
.PHONY: test
|
||||
test:
|
||||
@echo "Running tests..."
|
||||
python -m pytest tests/ -v --tb=short
|
||||
|
||||
.PHONY: test-cov
|
||||
test-cov:
|
||||
@echo "Running tests with coverage..."
|
||||
python -m pytest tests/ -v --cov=dapr_agents --cov-report=term-missing --cov-report=html
|
||||
|
||||
.PHONY: test-install
|
||||
test-install:
|
||||
@echo "Installing test dependencies..."
|
||||
pip install install -e .[test]
|
||||
|
||||
.PHONY: test-all
|
||||
test-all: test-install test-cov
|
||||
@echo "All tests completed!"
|
||||
|
||||
# Main target to validate all quickstarts
|
||||
.PHONY: validate-quickstarts
|
||||
validate-quickstarts:
|
||||
|
|
54
README.md
54
README.md
|
@ -1,5 +1,13 @@
|
|||
# Dapr Agents: A Framework for Agentic AI Systems
|
||||
|
||||
[](https://pypi.org/project/dapr-agents/)
|
||||
[](https://pypi.org/project/dapr-agents/)
|
||||
[](https://github.com/dapr/dapr-agents/actions/workflows/build.yaml)
|
||||
[](https://github.com/dapr/dapr-agents/blob/main/LICENSE)
|
||||
[](http://bit.ly/dapr-discord)
|
||||
[](https://youtube.com/@daprdev)
|
||||
[](https://twitter.com/daprdev)
|
||||
|
||||
Dapr Agents is a developer framework designed to build production-grade resilient AI agent systems that operate at scale. Built on top of the battle-tested Dapr project, it enables software developers to create AI agents that reason, act, and collaborate using Large Language Models (LLMs), while leveraging built-in observability and stateful workflow execution to guarantee agentic workflows complete successfully, no matter how complex.
|
||||
|
||||

|
||||
|
@ -26,7 +34,7 @@ Dapr Agents builds on top of Dapr's Workflow API, which under the hood represent
|
|||
|
||||
### Data-Centric AI Agents
|
||||
|
||||
With built-in connectivity to over 50 enterprise data sources, Dapr Agents efficiently handles structured and unstructured data. From basic [PDF extraction](./docs/concepts/arxiv_fetcher.md) to large-scale database interactions, it enables seamless data-driven AI workflows with minimal code changes. Dapr's [bindings](https://docs.dapr.io/reference/components-reference/supported-bindings/) and [state stores](https://docs.dapr.io/reference/components-reference/supported-state-stores/) provide access to a large number of data sources that can be used to ingest data to an agent. [MCP integration](https://docs.anthropic.com/en/docs/agents-and-tools/mcp) is coming soon.
|
||||
With built-in connectivity to over 50 enterprise data sources, Dapr Agents efficiently handles structured and unstructured data. From basic [PDF extraction](./docs/concepts/arxiv_fetcher.md) to large-scale database interactions, it enables seamless data-driven AI workflows with minimal code changes. Dapr's [bindings](https://docs.dapr.io/reference/components-reference/supported-bindings/) and [state stores](https://docs.dapr.io/reference/components-reference/supported-state-stores/) provide access to a large number of data sources that can be used to ingest data to an agent.
|
||||
|
||||
### Accelerated Development
|
||||
|
||||
|
@ -38,6 +46,7 @@ Dapr Agents provides a set of AI features that give developers a complete API su
|
|||
- Contextual memory
|
||||
- Flexible prompting
|
||||
- Intelligent tool selection
|
||||
- [MCP integration](https://docs.anthropic.com/en/docs/agents-and-tools/mcp).
|
||||
|
||||
### Integrated Security and Reliability
|
||||
|
||||
|
@ -55,6 +64,45 @@ By building on top of Dapr, platform and infrastructure teams can apply Dapr's [
|
|||
|
||||
As a part of **CNCF**, Dapr Agents is vendor-neutral, eliminating concerns about lock-in, intellectual property risks, or proprietary restrictions. Organizations gain full flexibility and control over their AI applications using open-source software they can audit and contribute to.
|
||||
|
||||
## Roadmap
|
||||
|
||||
Here are some of the major features we're working on:
|
||||
|
||||
### Q2 2025
|
||||
- **MCP Support** - Integration with Anthropic's MCP platform ([#50](https://github.com/dapr/dapr-agents/issues/50) ✅ )
|
||||
- **Agent Interaction Tracing** - Enhanced observability of agent interactions with LLMs and tools ([#79](https://github.com/dapr/dapr-agents/issues/79))
|
||||
- **Streaming LLM Output** - Real-time streaming capabilities for LLM responses ([#80](https://github.com/dapr/dapr-agents/issues/80))
|
||||
- **HTTP Endpoint Tools** - Support for using Dapr's HTTP endpoint capabilities for tool calling ([#81](https://github.com/dapr/dapr-agents/issues/81))
|
||||
- **DSL Cleanup** - Streamlining the domain-specific language and removing actor dependencies ([#65](https://github.com/dapr/dapr-agents/issues/65))
|
||||
- **Samples Registry** - A dedicated repository for Dapr Agents examples and use cases
|
||||
|
||||
### Q3/Q4 2025
|
||||
- **Human-in-the-Loop Support**
|
||||
- **Conversation API Progressed to Beta**
|
||||
- **Vector API** - Vector operations support in Dapr and Dapr Agents
|
||||
|
||||
For more details about these features and other planned work, please check out our [GitHub issues](https://github.com/dapr/dapr-agents/issues).
|
||||
|
||||
### Language Support
|
||||
|
||||
| Language | Current Status | Development Status | Stable Status |
|
||||
|----------|---------------|-------------|--------|
|
||||
| Python | In Development | Q2 2025 | Q3 2025 |
|
||||
| .NET | Planning | Q3 2025 | Q4 2025 |
|
||||
| Other Languages | Coming Soon | TBD | TBD |
|
||||
|
||||
## Documentation
|
||||
|
||||
- [Development Guide](docs/development/README.md) - For developers and contributors
|
||||
|
||||
## Community
|
||||
|
||||
### Contributing to Dapr Agents
|
||||
|
||||
Please refer to our [Dapr Community Code of Conduct](https://github.com/dapr/community/blob/master/CODE-OF-CONDUCT.md)
|
||||
|
||||
For development setup and guidelines, see our [Development Guide](docs/development/README.md).
|
||||
|
||||
## Getting Started
|
||||
|
||||
Prerequisites:
|
||||
|
@ -81,7 +129,3 @@ Dapr Agents is an open-source project under the CNCF umbrella, and we welcome co
|
|||
- Documentation: [https://dapr.github.io/dapr-agents/](https://dapr.github.io/dapr-agents/)
|
||||
- Community Discord: [Join the discussion](https://bit.ly/dapr-discord).
|
||||
- Contribute: Open an issue or submit a PR to help improve Dapr Agents!
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Please refer to our [Dapr Community Code of Conduct](https://github.com/dapr/community/blob/master/CODE-OF-CONDUCT.md)
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
## Security Policy
|
||||
|
||||
https://docs.dapr.io/operations/support/support-security-issues/
|
|
@ -1,561 +0,0 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# OpenAI Tool Calling Agent - Dummy Weather Example\n",
|
||||
"\n",
|
||||
"* Collaborator: Roberto Rodriguez @Cyb3rWard0g"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import Environment Variables"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv() # take environment variables from .env."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Enable Logging"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"logging.basicConfig(level=logging.INFO)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define Tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents import tool\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"class GetWeatherSchema(BaseModel):\n",
|
||||
" location: str = Field(description=\"location to get weather for\")\n",
|
||||
"\n",
|
||||
"@tool(args_model=GetWeatherSchema)\n",
|
||||
"def get_weather(location: str) -> str:\n",
|
||||
" \"\"\"Get weather information for a specific location.\"\"\"\n",
|
||||
" import random\n",
|
||||
" temperature = random.randint(60, 80)\n",
|
||||
" return f\"{location}: {temperature}F.\"\n",
|
||||
"\n",
|
||||
"class JumpSchema(BaseModel):\n",
|
||||
" distance: str = Field(description=\"Distance for agent to jump\")\n",
|
||||
"\n",
|
||||
"@tool(args_model=JumpSchema)\n",
|
||||
"def jump(distance: str) -> str:\n",
|
||||
" \"\"\"Jump a specific distance.\"\"\"\n",
|
||||
" return f\"I jumped the following distance {distance}\"\n",
|
||||
"\n",
|
||||
"tools = [get_weather,jump]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.llm.openai.client.base:Initializing OpenAI client...\n",
|
||||
"INFO:dapr_agents.tool.executor:Tool registered: GetWeather\n",
|
||||
"INFO:dapr_agents.tool.executor:Tool registered: Jump\n",
|
||||
"INFO:dapr_agents.tool.executor:Tool Executor initialized with 2 registered tools.\n",
|
||||
"INFO:dapr_agents.agent.base:Constructing system_prompt from agent attributes.\n",
|
||||
"INFO:dapr_agents.agent.base:Using system_prompt to create the prompt template.\n",
|
||||
"INFO:dapr_agents.agent.base:Pre-filled prompt template with attributes: ['name', 'role', 'goal']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dapr_agents import ReActAgent\n",
|
||||
"\n",
|
||||
"AIAgent = ReActAgent(\n",
|
||||
" name=\"Rob\",\n",
|
||||
" role= \"Weather Assistant\",\n",
|
||||
" tools=tools\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ChatPromptTemplate(input_variables=['chat_history'], pre_filled_variables={'name': 'Rob', 'role': 'Weather Assistant', 'goal': 'Help humans'}, messages=[('system', '# Today\\'s date is: March 04, 2025\\n\\n## Name\\nYour name is {{name}}.\\n\\n## Role\\nYour role is {{role}}.\\n\\n## Goal\\n{{goal}}.\\n\\n## Tools\\nYou have access ONLY to the following tools:\\nGetWeather: Get weather information for a specific location.. Args schema: {\\'location\\': {\\'description\\': \\'location to get weather for\\', \\'type\\': \\'string\\'}}\\nJump: Jump a specific distance.. Args schema: {\\'distance\\': {\\'description\\': \\'Distance for agent to jump\\', \\'type\\': \\'string\\'}}\\n\\nIf you think about using tool, it must use the correct tool JSON blob format as shown below:\\n```\\n{\\n \"name\": $TOOL_NAME,\\n \"arguments\": $INPUT\\n}\\n```\\n\\n## ReAct Format\\nThought: Reflect on the current state of the conversation or task. If additional information is needed, determine if using a tool is necessary. When a tool is required, briefly explain why it is needed for the specific step at hand, and immediately follow this with an `Action:` statement to address that specific requirement. Avoid combining multiple tool requests in a single `Thought`. If no tools are needed, proceed directly to an `Answer:` statement.\\nAction:\\n```\\n{\\n \"name\": $TOOL_NAME,\\n \"arguments\": $INPUT\\n}\\n```\\nObservation: Describe the result of the action taken.\\n... (repeat Thought/Action/Observation as needed, but **ALWAYS proceed to a final `Answer:` statement when you have enough information**)\\nThought: I now have sufficient information to answer the initial question.\\nAnswer: ALWAYS proceed to a final `Answer:` statement once enough information is gathered or if the tools do not provide the necessary data.\\n\\n### Providing a Final Answer\\nOnce you have enough information to answer the question OR if tools cannot provide the necessary data, respond using one of the following formats:\\n\\n1. **Direct Answer without Tools**:\\nThought: I can answer directly without using any tools. Answer: Direct answer based on previous interactions or current knowledge.\\n\\n2. **When All Needed Information is Gathered**:\\nThought: I now have sufficient information to answer the question. Answer: Complete final answer here.\\n\\n3. **If Tools Cannot Provide the Needed Information**:\\nThought: The available tools do not provide the necessary information. Answer: Explanation of limitation and relevant information if possible.\\n\\n### Key Guidelines\\n- Always Conclude with an `Answer:` statement.\\n- Ensure every response ends with an `Answer:` statement that summarizes the most recent findings or relevant information, avoiding incomplete thoughts.\\n- Direct Final Answer for Past or Known Information: If the user inquires about past interactions, respond directly with an Answer: based on the information in chat history.\\n- Avoid Repetitive Thought Statements: If the answer is ready, skip repetitive Thought steps and proceed directly to Answer.\\n- Minimize Redundant Steps: Use minimal Thought/Action/Observation cycles to arrive at a final Answer efficiently.\\n- Reference Past Information When Relevant: Use chat history accurately when answering questions about previous responses to avoid redundancy.\\n- Progressively Move Towards Finality: Reflect on the current step and avoid re-evaluating the entire user request each time. Aim to advance towards the final Answer in each cycle.\\n\\n## Chat History\\nThe chat history is provided to avoid repeating information and to ensure accurate references when summarizing past interactions.'), MessagePlaceHolder(variable_name=chat_history)], template_format='jinja2')"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"AIAgent.prompt_template"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'GetWeather'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"AIAgent.tools[0].name"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Run Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.agent.base:Pre-filled prompt template with variables: dict_keys(['chat_history'])\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Iteration 1/10 started.\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;242;182;128muser:\u001b[0m\n",
|
||||
"\u001b[38;2;242;182;128m\u001b[0m\u001b[38;2;242;182;128mHi my name is Roberto\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:No action specified; continuing with further reasoning.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Iteration 2/10 started.\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;217;95;118mThought: Hello Roberto! How can I assist you today with the weather?\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Agent provided a direct final answer.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;217;95;118mThought: Answer: Hello Roberto! How can I assist you today with the weather?\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183massistant:\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m\u001b[0m\u001b[38;2;147;191;183mHello Roberto! How can I assist you today with the weather?\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Hello Roberto! How can I assist you today with the weather?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"AIAgent.run(\"Hi my name is Roberto\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'role': 'user', 'content': 'Hi my name is Roberto'},\n",
|
||||
" {'content': 'Hello Roberto! How can I assist you today with the weather?',\n",
|
||||
" 'role': 'assistant'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"AIAgent.chat_history"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.agent.base:Pre-filled prompt template with variables: dict_keys(['chat_history'])\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Iteration 1/10 started.\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;242;182;128muser:\u001b[0m\n",
|
||||
"\u001b[38;2;242;182;128m\u001b[0m\u001b[38;2;242;182;128mWhat is the weather in Virgina, New York and Washington DC?\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Executing GetWeather with arguments {'location': 'Virginia'}\n",
|
||||
"INFO:dapr_agents.tool.executor:Attempting to execute tool: GetWeather\n",
|
||||
"INFO:dapr_agents.tool.executor:Tool 'GetWeather' executed successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Thought:I will need to gather the current weather information for both Virginia and Washington, D.C. by using the GetWeather tool.\n",
|
||||
"Action:{'name': 'GetWeather', 'arguments': {'location': 'Virginia'}}\n",
|
||||
"Observation:Virginia: 74F.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Iteration 2/10 started.\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;217;95;118mThought: I will need to gather the current weather information for both Virginia and Washington, D.C. by using the GetWeather tool.\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;191;69;126mAction: {\"name\": \"GetWeather\", \"arguments\": {\"location\": \"Virginia\"}}\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;146;94;130mObservation: Virginia: 74F.\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Executing GetWeather with arguments {'location': 'New York'}\n",
|
||||
"INFO:dapr_agents.tool.executor:Attempting to execute tool: GetWeather\n",
|
||||
"INFO:dapr_agents.tool.executor:Tool 'GetWeather' executed successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Thought:\n",
|
||||
"Action:{'name': 'GetWeather', 'arguments': {'location': 'New York'}}\n",
|
||||
"Observation:New York: 65F.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Iteration 3/10 started.\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;217;95;118mThought: \u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;191;69;126mAction: {\"name\": \"GetWeather\", \"arguments\": {\"location\": \"New York\"}}\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;146;94;130mObservation: New York: 65F.\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Executing GetWeather with arguments {'location': 'Washington DC'}\n",
|
||||
"INFO:dapr_agents.tool.executor:Attempting to execute tool: GetWeather\n",
|
||||
"INFO:dapr_agents.tool.executor:Tool 'GetWeather' executed successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Thought:\n",
|
||||
"Action:{'name': 'GetWeather', 'arguments': {'location': 'Washington DC'}}\n",
|
||||
"Observation:Washington DC: 66F.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Iteration 4/10 started.\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;217;95;118mThought: \u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;191;69;126mAction: {\"name\": \"GetWeather\", \"arguments\": {\"location\": \"Washington DC\"}}\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;146;94;130mObservation: Washington DC: 66F.\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Agent provided a direct final answer.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;217;95;118mThought: I now have sufficient information to answer the question. \u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118mAnswer: The current weather is as follows:\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m- Virginia: 74°F\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m- New York: 65°F\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m- Washington, D.C.: 66°F\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183massistant:\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m\u001b[0m\u001b[38;2;147;191;183mThe current weather is as follows:\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- Virginia: 74°F\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- New York: 65°F\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- Washington, D.C.: 66°F\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The current weather is as follows:\\n- Virginia: 74°F\\n- New York: 65°F\\n- Washington, D.C.: 66°F'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"AIAgent.run(\"What is the weather in Virgina, New York and Washington DC?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.agent.base:Pre-filled prompt template with variables: dict_keys(['chat_history'])\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Iteration 1/10 started.\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;242;182;128muser:\u001b[0m\n",
|
||||
"\u001b[38;2;242;182;128m\u001b[0m\u001b[38;2;242;182;128mWhat places did you already help me with the weather?\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:No action specified; continuing with further reasoning.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Iteration 2/10 started.\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;217;95;118mThought: You asked about the weather in Virginia, New York, and Washington, D.C., and I provided you with the current temperatures for those locations.\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Agent provided a direct final answer.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;217;95;118mThought: Answer: I helped you with the weather for Virginia, New York, and Washington, D.C.\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183massistant:\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m\u001b[0m\u001b[38;2;147;191;183mI helped you with the weather for Virginia, New York, and Washington, D.C.\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I helped you with the weather for Virginia, New York, and Washington, D.C.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"AIAgent.run(\"What places did you already help me with the weather?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'role': 'user', 'content': 'Hi my name is Roberto'},\n",
|
||||
" {'content': 'Hello Roberto! How can I assist you today with the weather?',\n",
|
||||
" 'role': 'assistant'},\n",
|
||||
" {'role': 'user',\n",
|
||||
" 'content': 'What is the weather in Virgina, New York and Washington DC?'},\n",
|
||||
" {'content': 'The current weather is as follows:\\n- Virginia: 74°F\\n- New York: 65°F\\n- Washington, D.C.: 66°F',\n",
|
||||
" 'role': 'assistant'},\n",
|
||||
" {'role': 'user',\n",
|
||||
" 'content': 'What places did you already help me with the weather?'},\n",
|
||||
" {'content': 'I helped you with the weather for Virginia, New York, and Washington, D.C.',\n",
|
||||
" 'role': 'assistant'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"AIAgent.chat_history"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
# OpenAPI Agent
|
||||
|
||||
The `OpenAPI Agent` represents a specialized agent designed to interact with the external world by transforming OpenAPI specifications into tools. This agent is crucial for scenarios where precise and authenticated API interactions are necessary, allowing the agent to understand and utilize API endpoints dynamically. By leveraging OpenAPI specifications, the agent can adapt to a wide range of APIs, converting each specification into tools that it can use autonomously.
|
||||
|
||||
## Agents
|
||||
|
||||
| Pattern | Overview |
|
||||
| --- | --- |
|
||||
| [ReAct (Reason + Act) MS Graph](react_agent_openapi_msgraph.ipynb) | An OpenAPI agent that applies the `ReAct` prompting technique, following a chain-of-thought reasoning (Thought, Action, Observation) loop. This agent autonomously selects the appropriate MS Graph API endpoint, performs the call, and integrates the response back into its reasoning cycle. |
|
||||
|
||||
## Tools
|
||||
The `OpenAPI Agent` has two main tools created from OpenAPI specifications to facilitate dynamic API interaction. These tools allow the agent to identify relevant API endpoints and execute API calls effectively. Below is a breakdown of each tool's purpose, inputs, and how it operates within the agent's workflow.
|
||||
|
||||
### get_openapi_definition
|
||||
|
||||
* **Goal**: This tool retrieves a list of relevant API endpoints from OpenAPI specifications that the agent could use to fulfill the user’s query. The tool leverages a vector store to store and search through API definitions, helping the agent narrow down potential APIs based on the task at hand.
|
||||
* **Functionality**:
|
||||
* Similarity Search: Takes the user’s input and queries the `VectorToolStore` to find similar API tools. It ranks potential API endpoints based on similarity to the user’s task and returns the top matches.
|
||||
* Tool Usage: This tool is always called before any API call execution to ensure the agent understands which endpoint to use.
|
||||
|
||||
### open_api_call_executor
|
||||
* **Goal**: This tool is responsible for executing API calls using the specific parameters and configuration associated with the selected OpenAPI endpoint. It provides flexibility to adjust API paths, methods, headers, and query parameters, making it versatile for interacting with any OpenAPI-defined API.
|
||||
* **Functionality**:
|
||||
* API Call Execution: Takes in a structured input of HTTP method, path parameters, headers, and other data required to make the API request.
|
||||
* Endpoint Selection: After get_openapi_definition suggests possible endpoints, this tool is used to execute the chosen endpoint with specific parameters.
|
||||
* Version Management: Ensures the correct API version is used, preventing duplication or misalignment of API path versions.
|
||||
|
||||
## How the Tools Work Together?
|
||||
* Identify Relevant Endpoint: The agent first uses get_openapi_definition to identify a relevant API endpoint based on the user’s query.
|
||||
* Execute API Call: With the selected endpoint, open_api_call_executor is called to make the actual API request, providing the necessary method, parameters, headers, and data.
|
||||
|
||||
This design allows the `OpenAPI Agent` to dynamically interpret and call any API defined within an OpenAPI specification, adapting flexibly to various tasks and user requests.
|
File diff suppressed because one or more lines are too long
|
@ -1,30 +0,0 @@
|
|||
# The Weather Agent
|
||||
|
||||
The Weather Agent represents a basic example of an agent that interacts with the external world through tools, such as APIs. This agent demonstrates how a language model (LLM) can suggest which tool to use and provide the necessary inputs for tool execution. However, it is the agent—not the language model—that executes the tool and processes the results. Once the tool has been executed, the results are passed back to the language model for further suggestions, summaries, or next actions. This agent showcases the foundational concept of integrating language models with external tools to retrieve real-world data, such as weather information.
|
||||
|
||||
## Agents
|
||||
|
||||
| Pattern | Overview |
|
||||
| --- | --- |
|
||||
| [ToolCall (Function Calling)](toolcall_agent.ipynb) | A weather agent that uses OpenAI’s tool calling (Function Calling) to pass tools in JSON schema format. The language model suggests the tool to be used based on the task, but the agent executes the tool and processes the results. |
|
||||
| [ReAct (Reason + Act)](react_agent.ipynb) | A weather agent following the ReAct prompting technique. The language model uses a chain-of-thought reasoning process (Thought, Action, Observation) to suggest the next tool to use. The agent then executes the tool, and the results are fed back into the reasoning loop. |
|
||||
|
||||
## Tools
|
||||
|
||||
* **WeatherTool**: A tool that allows the agent to retrieve weather data by first obtaining geographical coordinates (latitude and longitude) using the Nominatim API. For weather data, the agent either calls the National Weather Service (NWS) API (for locations in the USA) or the Met.no API (for locations outside the USA). This tool is executed by the agent based on the suggestions provided by the language model.
|
||||
* **HistoricalWeather**: A tool that retrieves historical weather data for a specified location and date range. The agent uses the Nominatim API to get the coordinates for the specified location and calls the Open-Meteo Historical Weather API to retrieve temperature data for past dates. This tool allows the agent to compare past weather conditions with current forecasts, providing richer insights.
|
||||
|
||||
### APIs Used
|
||||
|
||||
* Nominatim API: Provides geocoding services to convert city, state, and country into geographical coordinates (latitude and longitude).
|
||||
* Endpoint: https://nominatim.openstreetmap.org/search.php
|
||||
* Purpose: Used to fetch coordinates for a given location, which is then passed to weather APIs.
|
||||
* National Weather Service (NWS) API: Provides weather data for locations within the United States.
|
||||
* Endpoint: https://api.weather.gov
|
||||
* Purpose: Used to retrieve detailed weather forecasts and temperature data for locations in the USA.
|
||||
* Met.no API: Provides weather data for locations outside the United States.
|
||||
* Endpoint: https://api.met.no/weatherapi
|
||||
* Purpose: Used to retrieve weather forecasts and temperature data for locations outside the USA, offering international coverage.
|
||||
* Open-Meteo Historical Weather API: Provides historical weather data for any location worldwide.
|
||||
* Endpoint: https://archive-api.open-meteo.com/v1/archive
|
||||
* Purpose: Used to retrieve historical weather data, including temperature readings for past dates, allowing the agent to analyze past weather conditions and trends.
|
|
@ -1,223 +0,0 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ReAct Weather Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Required Libraries\n",
|
||||
"Before starting, ensure the required libraries are installed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install dapr-agents python-dotenv"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize Environment Variables"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import Modules"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents import Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import Tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from tools import WeatherForecast, HistoricalWeather"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"weather_agent = Agent(\n",
|
||||
" name=\"Weather Agent\",\n",
|
||||
" role=\"Weather Expert\",\n",
|
||||
" pattern=\"react\",\n",
|
||||
" tools=[WeatherForecast(), HistoricalWeather()],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Run Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;242;182;128muser:\u001b[0m\n",
|
||||
"\u001b[38;2;242;182;128m\u001b[0m\u001b[38;2;242;182;128mwhat will be the difference of temperature in Paris between 7 days ago and 7 from now?\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118mThought: For this, I need to gather two pieces of information: the historical temperature of Paris from 7 days ago and the forecasted temperature for Paris 7 days from now.\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118mI'll start by retrieving the historical temperature data for Paris from 7 days ago.\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;191;69;126mAction: {\"name\": \"Historicalweather\", \"arguments\": {\"city\": \"Paris\", \"state\": null, \"country\": \"France\", \"start_date\": \"2024-11-04\", \"end_date\": \"2024-11-04\"}}\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;146;94;130mObservation: {'city': 'Paris', 'state': None, 'country': 'France', 'start_date': '2024-11-04', 'end_date': '2024-11-04', 'temperature_data': {'2024-11-04T00:00': 6.8, '2024-11-04T01:00': 8.7, '2024-11-04T02:00': 8.7, '2024-11-04T03:00': 8.6, '2024-11-04T04:00': 7.9, '2024-11-04T05:00': 7.3, '2024-11-04T06:00': 7.0, '2024-11-04T07:00': 6.8, '2024-11-04T08:00': 6.9, '2024-11-04T09:00': 7.3, '2024-11-04T10:00': 8.0, '2024-11-04T11:00': 9.6, '2024-11-04T12:00': 11.3, '2024-11-04T13:00': 14.0, '2024-11-04T14:00': 14.5, '2024-11-04T15:00': 14.7, '2024-11-04T16:00': 12.6, '2024-11-04T17:00': 11.2, '2024-11-04T18:00': 9.8, '2024-11-04T19:00': 9.1, '2024-11-04T20:00': 8.7, '2024-11-04T21:00': 8.0, '2024-11-04T22:00': 8.0, '2024-11-04T23:00': 7.3}, 'unit': '°C'}\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118mThought: I have obtained the historical temperatures for Paris on November 4, 2024. Next, I need to obtain the forecasted temperature for Paris 7 days from now, which will be November 18, 2024.\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;191;69;126mAction: {\"name\": \"Weatherforecast\", \"arguments\": {\"city\": \"Paris\", \"state\": null, \"country\": \"France\"}}\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;146;94;130mObservation: {'city': 'Paris', 'state': None, 'country': 'France', 'temperature': 7.0, 'unit': 'celsius'}\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118mThought: I now have sufficient information to calculate the temperature difference between 7 days ago and 7 days from now in Paris.\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118mAnswer: The average temperature on November 4, 2024, based on the historical data I retrieved, was approximately 9.3°C. The forecasted temperature for Paris on November 18, 2024, is 7.0°C. Therefore, the temperature difference is approximately 2.3°C, with the conditions expected to be cooler on November 18 compared to November 4.\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183massistant:\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m\u001b[0m\u001b[38;2;147;191;183mThe average temperature on November 4, 2024, based on the historical data I retrieved, was approximately 9.3°C. The forecasted temperature for Paris on November 18, 2024, is 7.0°C. Therefore, the temperature difference is approximately 2.3°C, with the conditions expected to be cooler on November 18 compared to November 4.\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The average temperature on November 4, 2024, based on the historical data I retrieved, was approximately 9.3°C. The forecasted temperature for Paris on November 18, 2024, is 7.0°C. Therefore, the temperature difference is approximately 2.3°C, with the conditions expected to be cooler on November 18 compared to November 4.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"weather_agent.run(\"what will be the difference of temperature in Paris between 7 days ago and 7 from now?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'content': 'what will be the difference of temperature in Paris between 7 days ago and 7 from now?',\n",
|
||||
" 'role': 'user'},\n",
|
||||
" {'content': 'The average temperature on November 4, 2024, based on the historical data I retrieved, was approximately 9.3°C. The forecasted temperature for Paris on November 18, 2024, is 7.0°C. Therefore, the temperature difference is approximately 2.3°C, with the conditions expected to be cooler on November 18 compared to November 4.',\n",
|
||||
" 'role': 'assistant'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"weather_agent.chat_history"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"weather_agent.run(\"What was the weather like in Paris two days ago?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -1,264 +0,0 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ToolCall Weather Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Required Libraries\n",
|
||||
"Before starting, ensure the required libraries are installed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install dapr-agents python-dotenv"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize Environment Variables"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import Modules"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents import Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import Tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from tools import WeatherForecast, HistoricalWeather"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"weather_agent = Agent(\n",
|
||||
" name=\"Weather Agent\",\n",
|
||||
" role=\"Weather Expert\",\n",
|
||||
" tools=[WeatherForecast(),HistoricalWeather()],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Run Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;242;182;128muser:\u001b[0m\n",
|
||||
"\u001b[38;2;242;182;128m\u001b[0m\u001b[38;2;242;182;128mwhat is the weather in Paris?\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118massistant(tool_call):\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m\u001b[0m\u001b[38;2;217;95;118mFunction name: Weatherforecast (Call Id: call_qyfgmgDAJSrRM58Hb83AtdDh)\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m\u001b[0m\u001b[38;2;217;95;118mArguments: {\"city\":\"Paris\",\"country\":\"france\"}\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;191;69;126mtool(Id: call_qyfgmgDAJSrRM58Hb83AtdDh):\u001b[0m\n",
|
||||
"\u001b[38;2;191;69;126m\u001b[0m\u001b[38;2;191;69;126m{'city': 'Paris', 'state': None, 'country': 'france', 'temperature': 4.6, 'unit': 'celsius'}\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183massistant:\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m\u001b[0m\u001b[38;2;147;191;183mThe current temperature in Paris, France is 4.6°C.\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The current temperature in Paris, France is 4.6°C.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"weather_agent.run(\"what is the weather in Paris?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;242;182;128muser:\u001b[0m\n",
|
||||
"\u001b[38;2;242;182;128m\u001b[0m\u001b[38;2;242;182;128mwhat was the weather like in Paris two days ago?\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118massistant(tool_call):\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m\u001b[0m\u001b[38;2;217;95;118mFunction name: Historicalweather (Call Id: call_VANaENO9iXLhOuWKOAnV769o)\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m\u001b[0m\u001b[38;2;217;95;118mArguments: {\"city\":\"Paris\",\"country\":\"france\",\"start_date\":\"2024-11-25\",\"end_date\":\"2024-11-25\"}\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;191;69;126mtool(Id: call_VANaENO9iXLhOuWKOAnV769o):\u001b[0m\n",
|
||||
"\u001b[38;2;191;69;126m\u001b[0m\u001b[38;2;191;69;126m{'city': 'Paris', 'state': None, 'country': 'france', 'start_date': '2024-11-25', 'end_date': '2024-11-25', 'temperature_data': {'2024-11-25T00:00': 16.9, '2024-11-25T01:00': 17.0, '2024-11-25T02:00': 17.4, '2024-11-25T03:00': 17.7, '2024-11-25T04:00': 17.8, '2024-11-25T05:00': 17.6, '2024-11-25T06:00': 16.8, '2024-11-25T07:00': 15.5, '2024-11-25T08:00': 14.6, '2024-11-25T09:00': 14.2, '2024-11-25T10:00': 13.5, '2024-11-25T11:00': 12.2, '2024-11-25T12:00': 11.1, '2024-11-25T13:00': 9.8, '2024-11-25T14:00': 9.9, '2024-11-25T15:00': 10.0, '2024-11-25T16:00': 9.8, '2024-11-25T17:00': 9.3, '2024-11-25T18:00': 9.1, '2024-11-25T19:00': 8.7, '2024-11-25T20:00': 8.4, '2024-11-25T21:00': 8.4, '2024-11-25T22:00': 8.6, '2024-11-25T23:00': 8.2}, 'unit': '°C'}\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183massistant:\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m\u001b[0m\u001b[38;2;147;191;183mOn November 25, 2024, the temperature in Paris was as follows:\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- Midnight: 16.9°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 01:00: 17.0°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 02:00: 17.4°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 03:00: 17.7°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 04:00: 17.8°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 05:00: 17.6°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 06:00: 16.8°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 07:00: 15.5°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 08:00: 14.6°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 09:00: 14.2°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 10:00: 13.5°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 11:00: 12.2°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 12:00: 11.1°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 13:00: 9.8°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 14:00: 9.9°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 15:00: 10.0°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 16:00: 9.8°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 17:00: 9.3°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 18:00: 9.1°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 19:00: 8.7°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 20:00: 8.4°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 21:00: 8.4°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 22:00: 8.6°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- 23:00: 8.2°C\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183mThe day started relatively warm in the early hours and cooled down throughout the day and into the evening.\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'On November 25, 2024, the temperature in Paris was as follows:\\n\\n- Midnight: 16.9°C\\n- 01:00: 17.0°C\\n- 02:00: 17.4°C\\n- 03:00: 17.7°C\\n- 04:00: 17.8°C\\n- 05:00: 17.6°C\\n- 06:00: 16.8°C\\n- 07:00: 15.5°C\\n- 08:00: 14.6°C\\n- 09:00: 14.2°C\\n- 10:00: 13.5°C\\n- 11:00: 12.2°C\\n- 12:00: 11.1°C\\n- 13:00: 9.8°C\\n- 14:00: 9.9°C\\n- 15:00: 10.0°C\\n- 16:00: 9.8°C\\n- 17:00: 9.3°C\\n- 18:00: 9.1°C\\n- 19:00: 8.7°C\\n- 20:00: 8.4°C\\n- 21:00: 8.4°C\\n- 22:00: 8.6°C\\n- 23:00: 8.2°C\\n\\nThe day started relatively warm in the early hours and cooled down throughout the day and into the evening.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"weather_agent.run(\"what was the weather like in Paris two days ago?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -1,191 +0,0 @@
|
|||
from typing import Optional
|
||||
from dapr_agents import AgentTool
|
||||
from datetime import datetime
|
||||
import requests
|
||||
import time
|
||||
|
||||
class WeatherForecast(AgentTool):
|
||||
name: str = 'WeatherForecast'
|
||||
description: str = 'A tool for retrieving the weather/temperature for a given city.'
|
||||
|
||||
# Default user agent
|
||||
user_agent: str = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 Safari/605.1.15"
|
||||
|
||||
def handle_error(self, response: requests.Response, url: str, stage: str) -> None:
|
||||
"""Handles error responses and raises a ValueError with detailed information."""
|
||||
if response.status_code != 200:
|
||||
raise ValueError(
|
||||
f"Failed to get data during {stage}. Status: {response.status_code}. "
|
||||
f"URL: {url}. Response: {response.text}"
|
||||
)
|
||||
if not response.json():
|
||||
raise ValueError(
|
||||
f"No data found during {stage}. URL: {url}. Response: {response.text}"
|
||||
)
|
||||
|
||||
def _run(self, city: str, state: Optional[str] = None, country: Optional[str] = "usa") -> dict:
|
||||
"""
|
||||
Retrieves weather data by first fetching geocode data for the city and then fetching weather data.
|
||||
|
||||
Args:
|
||||
city (str): The name of the city to get weather for.
|
||||
state (Optional[str]): The two-letter state abbreviation (optional).
|
||||
country (Optional[str]): The two-letter country abbreviation. Defaults to 'usa'.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing the city, state, country, and current temperature.
|
||||
"""
|
||||
headers = {
|
||||
"User-Agent": self.user_agent
|
||||
}
|
||||
|
||||
# Construct the geocode URL, conditionally including the state if it's provided
|
||||
geocode_url = f"https://nominatim.openstreetmap.org/search?city={city}&country={country}"
|
||||
if state:
|
||||
geocode_url += f"&state={state}"
|
||||
geocode_url += "&limit=1&format=jsonv2"
|
||||
|
||||
# Geocode request
|
||||
geocode_response = requests.get(geocode_url, headers=headers)
|
||||
self.handle_error(geocode_response, geocode_url, "geocode lookup")
|
||||
|
||||
# Add delay between requests
|
||||
time.sleep(2)
|
||||
|
||||
geocode_data = geocode_response.json()
|
||||
lat, lon = geocode_data[0]["lat"], geocode_data[0]["lon"]
|
||||
|
||||
# Use different APIs based on the country
|
||||
if country.lower() == "usa":
|
||||
# Weather.gov request for USA
|
||||
weather_gov_url = f"https://api.weather.gov/points/{lat},{lon}"
|
||||
weather_response = requests.get(weather_gov_url, headers=headers)
|
||||
self.handle_error(weather_response, weather_gov_url, "weather lookup")
|
||||
|
||||
# Add delay between requests
|
||||
time.sleep(2)
|
||||
|
||||
weather_data = weather_response.json()
|
||||
forecast_url = weather_data["properties"]["forecast"]
|
||||
|
||||
# Forecast request
|
||||
forecast_response = requests.get(forecast_url, headers=headers)
|
||||
self.handle_error(forecast_response, forecast_url, "forecast lookup")
|
||||
|
||||
forecast_data = forecast_response.json()
|
||||
today_forecast = forecast_data["properties"]["periods"][0]
|
||||
|
||||
# Return the weather data along with the city, state, and country
|
||||
return {
|
||||
"city": city,
|
||||
"state": state,
|
||||
"country": country,
|
||||
"temperature": today_forecast["temperature"],
|
||||
"unit": "Fahrenheit"
|
||||
}
|
||||
|
||||
else:
|
||||
# Met.no API for non-USA countries
|
||||
met_no_url = f"https://api.met.no/weatherapi/locationforecast/2.0/compact?lat={lat}&lon={lon}"
|
||||
weather_response = requests.get(met_no_url, headers=headers)
|
||||
self.handle_error(weather_response, met_no_url, "Met.no weather lookup")
|
||||
|
||||
weather_data = weather_response.json()
|
||||
temperature_unit = weather_data["properties"]["meta"]["units"]["air_temperature"]
|
||||
today_forecast = weather_data["properties"]["timeseries"][0]["data"]["instant"]["details"]["air_temperature"]
|
||||
|
||||
# Return the weather data along with the city, state, and country
|
||||
return {
|
||||
"city": city,
|
||||
"state": state,
|
||||
"country": country,
|
||||
"temperature": today_forecast,
|
||||
"unit": temperature_unit
|
||||
}
|
||||
|
||||
class HistoricalWeather(AgentTool):
|
||||
name: str = 'HistoricalWeather'
|
||||
description: str = 'A tool for retrieving historical weather data (temperature) for a given city.'
|
||||
|
||||
# Default user agent
|
||||
user_agent: str = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 Safari/605.1.15"
|
||||
|
||||
def handle_error(self, response: requests.Response, url: str, stage: str) -> None:
|
||||
"""Handles error responses and raises a ValueError with detailed information."""
|
||||
if response.status_code != 200:
|
||||
raise ValueError(
|
||||
f"Failed to get data during {stage}. Status: {response.status_code}. "
|
||||
f"URL: {url}. Response: {response.text}"
|
||||
)
|
||||
if not response.json():
|
||||
raise ValueError(
|
||||
f"No data found during {stage}. URL: {url}. Response: {response.text}"
|
||||
)
|
||||
|
||||
def _run(self, city: str, state: Optional[str] = None, country: Optional[str] = "usa", start_date: Optional[str] = None, end_date: Optional[str] = None) -> dict:
|
||||
"""
|
||||
Retrieves historical weather data for the city by first fetching geocode data and then historical weather data.
|
||||
|
||||
Args:
|
||||
city (str): The name of the city to get weather for.
|
||||
state (Optional[str]): The two-letter state abbreviation (optional).
|
||||
country (Optional[str]): The two-letter country abbreviation. Defaults to 'usa'.
|
||||
start_date (Optional[str]): Start date for historical data (YYYY-MM-DD format).
|
||||
end_date (Optional[str]): End date for historical data (YYYY-MM-DD format).
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing the city, state, country, and historical temperature data.
|
||||
"""
|
||||
headers = {
|
||||
"User-Agent": self.user_agent
|
||||
}
|
||||
|
||||
# Validate dates
|
||||
current_date = datetime.now().strftime('%Y-%m-%d')
|
||||
if start_date >= current_date or end_date >= current_date:
|
||||
raise ValueError("Both start_date and end_date must be earlier than the current date.")
|
||||
|
||||
if (datetime.strptime(end_date, "%Y-%m-%d") - datetime.strptime(start_date, "%Y-%m-%d")).days > 30:
|
||||
raise ValueError("The time span between start_date and end_date cannot exceed 30 days.")
|
||||
|
||||
# Construct the geocode URL, conditionally including the state if it's provided
|
||||
geocode_url = f"https://nominatim.openstreetmap.org/search?city={city}&country={country}"
|
||||
if state:
|
||||
geocode_url += f"&state={state}"
|
||||
geocode_url += "&limit=1&format=jsonv2"
|
||||
|
||||
# Geocode request
|
||||
geocode_response = requests.get(geocode_url, headers=headers)
|
||||
self.handle_error(geocode_response, geocode_url, "geocode lookup")
|
||||
|
||||
# Add delay between requests
|
||||
time.sleep(2)
|
||||
|
||||
geocode_data = geocode_response.json()
|
||||
lat, lon = geocode_data[0]["lat"], geocode_data[0]["lon"]
|
||||
|
||||
# Historical weather request
|
||||
historical_weather_url = f"https://archive-api.open-meteo.com/v1/archive?latitude={lat}&longitude={lon}&start_date={start_date}&end_date={end_date}&hourly=temperature_2m"
|
||||
weather_response = requests.get(historical_weather_url, headers=headers)
|
||||
self.handle_error(weather_response, historical_weather_url, "historical weather lookup")
|
||||
|
||||
weather_data = weather_response.json()
|
||||
|
||||
# Extract time and temperature data
|
||||
timestamps = weather_data["hourly"]["time"]
|
||||
temperatures = weather_data["hourly"]["temperature_2m"]
|
||||
temperature_unit = weather_data["hourly_units"]["temperature_2m"]
|
||||
|
||||
# Combine timestamps and temperatures into a dictionary
|
||||
temperature_data = {timestamps[i]: temperatures[i] for i in range(len(timestamps))}
|
||||
|
||||
# Return the structured weather data along with the city, state, country
|
||||
return {
|
||||
"city": city,
|
||||
"state": state,
|
||||
"country": country,
|
||||
"start_date": start_date,
|
||||
"end_date": end_date,
|
||||
"temperature_data": temperature_data,
|
||||
"unit": temperature_unit
|
||||
}
|
File diff suppressed because one or more lines are too long
|
@ -1,462 +0,0 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# GraphStore: Neo4j Database Basic Examples\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use the `Neo4jGraphStore` in `dapr-agents` for basic graph-based tasks. We will explore:\n",
|
||||
"\n",
|
||||
"* Initializing the `Neo4jGraphStore` class.\n",
|
||||
"* Adding sample nodes.\n",
|
||||
"* Adding one sample relationship.\n",
|
||||
"* Querying graph database.\n",
|
||||
"* Resseting database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Required Libraries\n",
|
||||
"\n",
|
||||
"Ensure dapr_agents and neo4j are installed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install dapr-agents python-dotenv neo4j"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import Environment Variables\n",
|
||||
"\n",
|
||||
"Load your API keys or other configuration values using `dotenv`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv() # Load environment variables from a `.env` file"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Enable Logging"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"logging.basicConfig(level=logging.INFO)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Deploy Neo4j Graph Database as Docker Container"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#docker run \\\n",
|
||||
"#--restart always \\\n",
|
||||
"#--publish=7474:7474 --publish=7687:7687 \\\n",
|
||||
"#--env NEO4J_AUTH=neo4j/graphwardog \\\n",
|
||||
"#--volume=neo4j-data \\\n",
|
||||
"#--name neo4j-apoc \\\n",
|
||||
"#--env NEO4J_apoc_export_file_enabled=true \\\n",
|
||||
"#--env NEO4J_apoc_import_file_enabled=true \\\n",
|
||||
"#--env NEO4J_apoc_import_file_use__neo4j__config=true \\\n",
|
||||
"#--env NEO4J_PLUGINS=\\[\\\"apoc\\\"\\] \\\n",
|
||||
"#neo4j:latest"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize Neo4jGraphStore\n",
|
||||
"\n",
|
||||
"Set the `NEO4J_URI`, `NEO4J_USERNAME` and `NEO4J_PASSWORD` variables in a `.env` file. The URI can be set to `bolt://localhost:7687`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.storage.graphstores.neo4j.client:Successfully created the driver for URI: bolt://localhost:7687\n",
|
||||
"INFO:dapr_agents.storage.graphstores.neo4j.base:Neo4jGraphStore initialized with database neo4j\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dapr_agents.storage.graphstores.neo4j import Neo4jGraphStore\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# Initialize Neo4jGraphStore\n",
|
||||
"graph_store = Neo4jGraphStore(\n",
|
||||
" uri=os.getenv(\"NEO4J_URI\"),\n",
|
||||
" user=os.getenv(\"NEO4J_USERNAME\"),\n",
|
||||
" password=os.getenv(\"NEO4J_PASSWORD\"),\n",
|
||||
" database=\"neo4j\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.storage.graphstores.neo4j.client:Connected to Neo4j Kernel version 5.15.0 (community edition)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Neo4j connection successful\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Test the connection\n",
|
||||
"assert graph_store.client.test_connection(), \"Connection to Neo4j failed\"\n",
|
||||
"print(\"Neo4j connection successful\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Add Sample Nodes\n",
|
||||
"Create and add nodes to the graph store:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.storage.graphstores.neo4j.base:Processed batch 1/1\n",
|
||||
"INFO:dapr_agents.storage.graphstores.neo4j.base:Nodes with label `Person` added successfully.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Nodes added successfully\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dapr_agents.types import Node\n",
|
||||
"\n",
|
||||
"# Sample nodes\n",
|
||||
"nodes = [\n",
|
||||
" Node(\n",
|
||||
" id=\"1\",\n",
|
||||
" label=\"Person\",\n",
|
||||
" properties={\"name\": \"Alice\", \"age\": 30},\n",
|
||||
" additional_labels=[\"Employee\"]\n",
|
||||
" ),\n",
|
||||
" Node(\n",
|
||||
" id=\"2\",\n",
|
||||
" label=\"Person\",\n",
|
||||
" properties={\"name\": \"Bob\", \"age\": 25},\n",
|
||||
" additional_labels=[\"Contractor\"]\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Add nodes\n",
|
||||
"graph_store.add_nodes(nodes)\n",
|
||||
"print(\"Nodes added successfully\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Add Sample Relationship\n",
|
||||
"Create and add a relationship to the graph store:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.storage.graphstores.neo4j.base:Processed batch 1/1\n",
|
||||
"INFO:neo4j.notifications:Received notification from DBMS server: {severity: INFORMATION} {code: Neo.ClientNotification.Statement.CartesianProduct} {category: PERFORMANCE} {title: This query builds a cartesian product between disconnected patterns.} {description: If a part of a query contains multiple disconnected patterns, this will build a cartesian product between all those parts. This may produce a large amount of data and slow down query processing. While occasionally intended, it may often be possible to reformulate the query that avoids the use of this cross product, perhaps by adding a relationship between the different parts or by using OPTIONAL MATCH (identifier is: (b))} {position: line: 3, column: 25, offset: 45} for query: '\\n UNWIND $data AS rel\\n MATCH (a {id: rel.source_node_id}), (b {id: rel.target_node_id})\\n MERGE (a)-[r:`KNOWS`]->(b)\\n ON CREATE SET r.createdAt = rel.current_time\\n SET r.updatedAt = rel.current_time, r += rel.properties\\n RETURN r\\n '\n",
|
||||
"INFO:dapr_agents.storage.graphstores.neo4j.base:Relationships of type `KNOWS` added successfully.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Relationships added successfully\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dapr_agents.types import Relationship\n",
|
||||
"\n",
|
||||
"# Sample relationships\n",
|
||||
"relationships = [\n",
|
||||
" Relationship(\n",
|
||||
" source_node_id=\"1\",\n",
|
||||
" target_node_id=\"2\",\n",
|
||||
" type=\"KNOWS\",\n",
|
||||
" properties={\"since\": \"2023\"}\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Add relationships\n",
|
||||
"graph_store.add_relationships(relationships)\n",
|
||||
"print(\"Relationships added successfully\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Query Graph"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.storage.graphstores.neo4j.base:Query executed successfully: MATCH (n) RETURN n | Time: 0.06 seconds | Results: 2\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Nodes in the database:\n",
|
||||
"{'n': {'createdAt': '2025-03-04T10:55:57.109885Z', 'name': 'Alice', 'id': '1', 'age': 30, 'updatedAt': '2025-03-04T10:55:57.109885Z'}}\n",
|
||||
"{'n': {'createdAt': '2025-03-04T10:55:57.109885Z', 'name': 'Bob', 'id': '2', 'age': 25, 'updatedAt': '2025-03-04T10:55:57.109885Z'}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"MATCH (n) RETURN n\"\n",
|
||||
"results = graph_store.query(query)\n",
|
||||
"print(\"Nodes in the database:\")\n",
|
||||
"for record in results:\n",
|
||||
" print(record)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.storage.graphstores.neo4j.base:Query executed successfully: \n",
|
||||
"MATCH (a)-[r]->(b)\n",
|
||||
"RETURN a.id AS source, b.id AS target, type(r) AS type, properties(r) AS properties\n",
|
||||
" | Time: 0.07 seconds | Results: 1\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Relationships in the database:\n",
|
||||
"{'source': '1', 'target': '2', 'type': 'KNOWS', 'properties': {'updatedAt': '2025-03-04T10:55:59.835379Z', 'createdAt': '2025-03-04T10:55:59.835379Z', 'since': '2023'}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"\"\"\n",
|
||||
"MATCH (a)-[r]->(b)\n",
|
||||
"RETURN a.id AS source, b.id AS target, type(r) AS type, properties(r) AS properties\n",
|
||||
"\"\"\"\n",
|
||||
"results = graph_store.query(query)\n",
|
||||
"print(\"Relationships in the database:\")\n",
|
||||
"for record in results:\n",
|
||||
" print(record)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.storage.graphstores.neo4j.base:Query executed successfully: \n",
|
||||
"MATCH (n)-[r]->(m)\n",
|
||||
"RETURN n, r, m\n",
|
||||
" | Time: 0.05 seconds | Results: 1\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Nodes and relationships in the database:\n",
|
||||
"{'n': {'createdAt': '2025-03-04T10:55:57.109885Z', 'name': 'Alice', 'id': '1', 'age': 30, 'updatedAt': '2025-03-04T10:55:57.109885Z'}, 'r': ({'createdAt': '2025-03-04T10:55:57.109885Z', 'name': 'Alice', 'id': '1', 'age': 30, 'updatedAt': '2025-03-04T10:55:57.109885Z'}, 'KNOWS', {'createdAt': '2025-03-04T10:55:57.109885Z', 'name': 'Bob', 'id': '2', 'age': 25, 'updatedAt': '2025-03-04T10:55:57.109885Z'}), 'm': {'createdAt': '2025-03-04T10:55:57.109885Z', 'name': 'Bob', 'id': '2', 'age': 25, 'updatedAt': '2025-03-04T10:55:57.109885Z'}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"\"\"\n",
|
||||
"MATCH (n)-[r]->(m)\n",
|
||||
"RETURN n, r, m\n",
|
||||
"\"\"\"\n",
|
||||
"results = graph_store.query(query)\n",
|
||||
"print(\"Nodes and relationships in the database:\")\n",
|
||||
"for record in results:\n",
|
||||
" print(record)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Reset Graph"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.storage.graphstores.neo4j.base:Database reset successfully\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Graph database has been reset.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"graph_store.reset()\n",
|
||||
"print(\"Graph database has been reset.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.storage.graphstores.neo4j.base:Query executed successfully: MATCH (n) RETURN n | Time: 0.01 seconds | Results: 0\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Nodes in the database:\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"MATCH (n) RETURN n\"\n",
|
||||
"results = graph_store.query(query)\n",
|
||||
"print(\"Nodes in the database:\")\n",
|
||||
"for record in results:\n",
|
||||
" print(record)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -1,286 +0,0 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LLM: Azure OpenAI Chat Endpoint Basic Examples\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use the `OpenAIChatClient` in `dapr-agents` for basic tasks with the Azure OpenAI Chat API. We will explore:\n",
|
||||
"\n",
|
||||
"* Initializing the OpenAI Chat client.\n",
|
||||
"* Generating responses to simple prompts.\n",
|
||||
"* Using a `.prompty` file to provide context/history for enhanced generation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Required Libraries\n",
|
||||
"Before starting, ensure the required libraries are installed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install dapr-agents python-dotenv"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load Environment Variables\n",
|
||||
"\n",
|
||||
"Load API keys or other configuration values from your `.env` file using `dotenv`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import OpenAIChatClient"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents import OpenAIChatClient"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Basic Chat Completion\n",
|
||||
"\n",
|
||||
"Initialize the `OpenAIChatClient` and generate a response to a simple prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize the client\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"llm = OpenAIChatClient(\n",
|
||||
" #api_key=os.getenv(\"AZURE_OPENAI_API_KEY\") # or add AZURE_OPENAI_API_KEY environment variable to .env file\n",
|
||||
" azure_endpoint=os.getenv(\"AZURE_OPENAI_ENDPOINT\"), # or add AZURE_OPENAI_ENDPOINT environment variable to .env file\n",
|
||||
" azure_deployment=\"gpt-4o\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ChatCompletion(choices=[Choice(finish_reason='stop', index=0, message=MessageContent(content='One famous dog is Lassie, a fictional Rough Collie known from movies, television series, and books for her intelligence and bravery.', role='assistant'), logprobs=None)], created=1741085078, id='chatcmpl-B7K3KbzErY3CMSoknZyDUSAN52xzL', model='gpt-4o-2024-08-06', object='chat.completion', usage={'completion_tokens': 27, 'prompt_tokens': 12, 'total_tokens': 39, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"\n",
|
||||
"# Generate a response\n",
|
||||
"response = llm.generate('Name a famous dog!')\n",
|
||||
"\n",
|
||||
"# Display the response\n",
|
||||
"response"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'content': 'One famous dog is Lassie, a fictional Rough Collie known from movies, television series, and books for her intelligence and bravery.', 'role': 'assistant'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(response.get_message())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using a Prompty File for Context\n",
|
||||
"\n",
|
||||
"Use a `.prompty` file to provide context for chat history or additional instructions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAIChatClient.from_prompty('basic-azopenai-chat.prompty')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ChatPromptTemplate(input_variables=['question'], pre_filled_variables={}, messages=[SystemMessage(content='You are an AI assistant who helps people find information.\\nAs the assistant, you answer questions briefly, succinctly.', role='system'), UserMessage(content='{{question}}', role='user')], template_format='jinja2')"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm.prompt_template"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ChatCompletion(choices=[Choice(finish_reason='stop', index=0, message=MessageContent(content=\"I am an AI assistant and don't have a personal name, but you can call me Assistant.\", role='assistant'), logprobs=None)], created=1741085084, id='chatcmpl-B7K3QXh8FWH8odMdwUI61eXieb0zk', model='gpt-4o-2024-08-06', object='chat.completion', usage={'completion_tokens': 19, 'prompt_tokens': 39, 'total_tokens': 58, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm.generate(input_data={\"question\":\"What is your name?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chat Completion with Messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize the client\n",
|
||||
"llm = OpenAIChatClient(\n",
|
||||
" api_key=os.getenv(\"AZURE_OPENAI_API_KEY\"), # or add AZURE_OPENAI_API_KEY environment variable to .env file\n",
|
||||
" #azure_endpoint=os.getenv(\"AZURE_OPENAI_ENDPOINT\"), # or add AZURE_OPENAI_ENDPOINT environment variable to .env file\n",
|
||||
" azure_deployment=\"gpt-4o\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'content': 'Hello! How can I assist you today?', 'role': 'assistant'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dapr_agents.types import UserMessage\n",
|
||||
"\n",
|
||||
"# Generate a response using structured messages\n",
|
||||
"response = llm.generate(messages=[UserMessage(\"hello\")])\n",
|
||||
"\n",
|
||||
"# Display the structured response\n",
|
||||
"print(response.get_message())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm.prompt_template"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
---
|
||||
name: Basic Prompt
|
||||
description: A basic prompt that uses the Azure OpenAI chat API to answer questions
|
||||
model:
|
||||
api: chat
|
||||
configuration:
|
||||
type: azure_openai
|
||||
azure_deployment: gpt-4o
|
||||
parameters:
|
||||
max_tokens: 128
|
||||
temperature: 0.2
|
||||
inputs:
|
||||
question:
|
||||
type: string
|
||||
sample:
|
||||
"question": "Who is the most famous person in the world?"
|
||||
---
|
||||
system:
|
||||
You are an AI assistant who helps people find information.
|
||||
As the assistant, you answer questions briefly, succinctly.
|
||||
|
||||
user:
|
||||
{{question}}
|
|
@ -1,23 +0,0 @@
|
|||
---
|
||||
name: Basic Prompt
|
||||
description: A basic prompt that uses the chat API to answer questions
|
||||
model:
|
||||
api: chat
|
||||
configuration:
|
||||
type: huggingface
|
||||
name: microsoft/Phi-3-mini-4k-instruct
|
||||
parameters:
|
||||
max_tokens: 128
|
||||
temperature: 0.2
|
||||
inputs:
|
||||
question:
|
||||
type: string
|
||||
sample:
|
||||
"question": "Who is the most famous person in the world?"
|
||||
---
|
||||
system:
|
||||
You are an AI assistant who helps people find information.
|
||||
As the assistant, you answer questions briefly, succinctly.
|
||||
|
||||
user:
|
||||
{{question}}
|
|
@ -1,23 +0,0 @@
|
|||
---
|
||||
name: Basic Prompt
|
||||
description: A basic prompt that uses the chat API to answer questions
|
||||
model:
|
||||
api: chat
|
||||
configuration:
|
||||
type: nvidia
|
||||
name: meta/llama3-8b-instruct
|
||||
parameters:
|
||||
max_tokens: 128
|
||||
temperature: 0.2
|
||||
inputs:
|
||||
question:
|
||||
type: string
|
||||
sample:
|
||||
"question": "Who is the most famous person in the world?"
|
||||
---
|
||||
system:
|
||||
You are an AI assistant who helps people find information.
|
||||
As the assistant, you answer questions briefly, succinctly.
|
||||
|
||||
user:
|
||||
{{question}}
|
|
@ -1,30 +0,0 @@
|
|||
---
|
||||
name: Basic Prompt
|
||||
description: A basic prompt that uses the chat API to answer questions
|
||||
model:
|
||||
api: chat
|
||||
configuration:
|
||||
type: openai
|
||||
name: gpt-4o
|
||||
parameters:
|
||||
max_tokens: 128
|
||||
temperature: 0.2
|
||||
inputs:
|
||||
question:
|
||||
type: string
|
||||
chat_history:
|
||||
type: list
|
||||
default: []
|
||||
---
|
||||
system:
|
||||
You are an AI assistant who helps people find information.
|
||||
As the assistant, you answer questions briefly, succinctly,
|
||||
and in a personable manner using markdown and even add some personal flair with appropriate emojis.
|
||||
|
||||
{% for item in chat_history %}
|
||||
{{item.role}}:
|
||||
{{item.content}}
|
||||
{% endfor %}
|
||||
|
||||
user:
|
||||
{{question}}
|
|
@ -1,23 +0,0 @@
|
|||
---
|
||||
name: Basic Prompt
|
||||
description: A basic prompt that uses the chat API to answer questions
|
||||
model:
|
||||
api: chat
|
||||
configuration:
|
||||
type: openai
|
||||
name: gpt-4o
|
||||
parameters:
|
||||
max_tokens: 128
|
||||
temperature: 0.2
|
||||
inputs:
|
||||
question:
|
||||
type: string
|
||||
sample:
|
||||
"question": "Who is the most famous person in the world?"
|
||||
---
|
||||
system:
|
||||
You are an AI assistant who helps people find information.
|
||||
As the assistant, you answer questions briefly, succinctly.
|
||||
|
||||
user:
|
||||
{{question}}
|
|
@ -1,187 +0,0 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LLM: ElevenLabs Text-To-Speech Endpoint Basic Examples\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use the `ElevenLabsSpeechClient` in dapr-agents for basic tasks with the [ElevenLabs Text-To-Speech Endpoint](https://elevenlabs.io/docs/api-reference/text-to-speech/convert). We will explore:\n",
|
||||
"\n",
|
||||
"* Initializing the `ElevenLabsSpeechClient`.\n",
|
||||
"* Generating speech from text and saving it as an MP3 file.."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Required Libraries\n",
|
||||
"\n",
|
||||
"Ensure you have the required library installed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install dapr-agents python-dotenv elevenlabs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load Environment Variables"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Enable Logging"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"logging.basicConfig(level=logging.INFO)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize ElevenLabsSpeechClient\n",
|
||||
"\n",
|
||||
"Initialize the `ElevenLabsSpeechClient`. By default the voice is set to: `voice_id=EXAVITQu4vr4xnSDxMaL\",name=\"Sarah\"`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents import ElevenLabsSpeechClient\n",
|
||||
"\n",
|
||||
"client = ElevenLabsSpeechClient(\n",
|
||||
" model=\"eleven_multilingual_v2\", # Default model\n",
|
||||
" voice=\"JBFqnCBsd6RMkjVDRZzb\" # 'name': 'George', 'language': 'en', 'labels': {'accent': 'British', 'description': 'warm', 'age': 'middle aged', 'gender': 'male', 'use_case': 'narration'}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Generate Speech from Text\n",
|
||||
"\n",
|
||||
"### Manual File Creation\n",
|
||||
"\n",
|
||||
"This section demonstrates how to generate speech from a given text input and save it as an MP3 file."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define the text to convert to speech\n",
|
||||
"text = \"Hello Roberto! This is an example of text-to-speech generation.\"\n",
|
||||
"\n",
|
||||
"# Create speech from text\n",
|
||||
"audio_bytes = client.create_speech(\n",
|
||||
" text=text,\n",
|
||||
" output_format=\"mp3_44100_128\" # default output format, mp3 with 44.1kHz sample rate at 128kbps.\n",
|
||||
")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Save the audio to an MP3 file\n",
|
||||
"output_path = \"output_speech.mp3\"\n",
|
||||
"with open(output_path, \"wb\") as audio_file:\n",
|
||||
" audio_file.write(audio_bytes)\n",
|
||||
"\n",
|
||||
"print(f\"Audio saved to {output_path}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Automatic File Creation\n",
|
||||
"\n",
|
||||
"The audio file is saved directly by providing the file_name parameter."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define the text to convert to speech\n",
|
||||
"text = \"Hello Roberto! This is another example of text-to-speech generation.\"\n",
|
||||
"\n",
|
||||
"# Create speech from text\n",
|
||||
"client.create_speech(\n",
|
||||
" text=text,\n",
|
||||
" output_format=\"mp3_44100_128\", # default output format, mp3 with 44.1kHz sample rate at 128kbps.,\n",
|
||||
" file_name='output_speech_auto.mp3'\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -1,342 +0,0 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LLM: Hugging Face Chat Endpoint Basic Examples\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use the `HFHubChatClient` in `dapr-agents` for basic tasks with the Hugging Face Chat API. We will explore:\n",
|
||||
"\n",
|
||||
"* Initializing the Hugging Face Chat client.\n",
|
||||
"* Generating responses to simple prompts.\n",
|
||||
"* Using a `.prompty` file to provide context/history for enhanced generation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Required Libraries\n",
|
||||
"Before starting, ensure the required libraries are installed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install dapr-agents python-dotenv"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load Environment Variables\n",
|
||||
"\n",
|
||||
"Load API keys or other configuration values from your `.env` file using `dotenv`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Enable Logging"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"logging.basicConfig(level=logging.INFO)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import HFHubChatClient"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents import HFHubChatClient"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Basic Chat Completion\n",
|
||||
"\n",
|
||||
"Initialize the `HFHubChatClient`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"llm = HFHubChatClient(\n",
|
||||
" api_key=os.getenv(\"HUGGINGFACE_API_KEY\"),\n",
|
||||
" model=\"microsoft/Phi-3-mini-4k-instruct\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Generate a response to a simple prompt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.llm.huggingface.chat:Invoking Hugging Face ChatCompletion API.\n",
|
||||
"INFO:dapr_agents.llm.huggingface.chat:Chat completion retrieved successfully.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Generate a response\n",
|
||||
"response = llm.generate('Name a famous dog!')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ChatCompletion(choices=[Choice(finish_reason='stop', index=0, message=MessageContent(content='A famous dog is Lassie. Lassie was a fictional collie first introduced in the 1943 film \"Lassie Come Home.\" She went on to have her own television series that aired from 1954 to 1973, in which she starred as Rin Tin Tin Jr. Her adventurous and heroic stories captured the hearts of audiences worldwide, and she became an iconic figure in the world of television.', role='assistant'), logprobs=None)], created=1741085108, id='', model='microsoft/Phi-3-mini-4k-instruct', object='chat.completion', usage={'completion_tokens': 105, 'prompt_tokens': 8, 'total_tokens': 113})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Display the response\n",
|
||||
"response"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'content': 'A famous dog is Lassie. Lassie was a fictional collie first introduced in the 1943 film \"Lassie Come Home.\" She went on to have her own television series that aired from 1954 to 1973, in which she starred as Rin Tin Tin Jr. Her adventurous and heroic stories captured the hearts of audiences worldwide, and she became an iconic figure in the world of television.',\n",
|
||||
" 'role': 'assistant'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response.get_message()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'A famous dog is Lassie. Lassie was a fictional collie first introduced in the 1943 film \"Lassie Come Home.\" She went on to have her own television series that aired from 1954 to 1973, in which she starred as Rin Tin Tin Jr. Her adventurous and heroic stories captured the hearts of audiences worldwide, and she became an iconic figure in the world of television.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response.get_content()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using a Prompty File for Context\n",
|
||||
"\n",
|
||||
"Use a `.prompty` file to provide context for chat history or additional instructions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = HFHubChatClient.from_prompty('basic-hf-chat.prompty')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.llm.huggingface.chat:Using prompt template to generate messages.\n",
|
||||
"INFO:dapr_agents.llm.huggingface.chat:Invoking Hugging Face ChatCompletion API.\n",
|
||||
"INFO:dapr_agents.llm.huggingface.chat:Chat completion retrieved successfully.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ChatCompletion(choices=[Choice(finish_reason='length', index=0, message=MessageContent(content=\"I'm Phi and my purpose as Microsoft GPT-3 developed by MS Corporation in 2019 serves to assist users with a wide range of queries or tasks they may have at hand! How can i help today ? Let me know if there’s anything specific that comes up for which assistance would be beneficial ! :) 😊✨ #AIAssistant#MicrosoftGptPhilosophyOfHelpfulness@MSCorporationTechnologyInnovationsAndEthicsAtTheCoreofOurDesignProcessesWeStriveToCreateAnExperience\", role='assistant'), logprobs=None)], created=1741085113, id='', model='microsoft/Phi-3-mini-4k-instruct', object='chat.completion', usage={'completion_tokens': 128, 'prompt_tokens': 36, 'total_tokens': 164})"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm.generate(input_data={\"question\":\"What is your name?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chat Completion with Messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.llm.huggingface.chat:Invoking Hugging Face ChatCompletion API.\n",
|
||||
"INFO:dapr_agents.llm.huggingface.chat:Chat completion retrieved successfully.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dapr_agents.types import UserMessage\n",
|
||||
"\n",
|
||||
"# Initialize the client\n",
|
||||
"llm = HFHubChatClient()\n",
|
||||
"\n",
|
||||
"# Generate a response using structured messages\n",
|
||||
"response = llm.generate(messages=[UserMessage(\"hello\")])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'content': \"Hello! How can I assist you today? Whether you have a question, need help with a problem, or just want to chat, I'm here to help. 😊\", 'role': 'assistant'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Display the structured response\n",
|
||||
"print(response.get_message())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm.prompt_template"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -1,257 +0,0 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LLM: NVIDIA Chat Endpoint Basic Examples\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use the `NVIDIAChatClient` in `dapr-agents` for basic tasks with the NVIDIA Chat API. We will explore:\n",
|
||||
"\n",
|
||||
"* Initializing the `NVIDIAChatClient`.\n",
|
||||
"* Generating responses to simple prompts.\n",
|
||||
"* Using a `.prompty` file to provide context/history for enhanced generation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Required Libraries\n",
|
||||
"Before starting, ensure the required libraries are installed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install dapr-agents python-dotenv"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load Environment Variables\n",
|
||||
"\n",
|
||||
"Load API keys or other configuration values from your `.env` file using `dotenv`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import NVIDIAChatClient"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/wardog/Documents/GitHub/dapr-agents/.venv/lib/python3.13/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
||||
" from .autonotebook import tqdm as notebook_tqdm\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dapr_agents import NVIDIAChatClient"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Basic Chat Completion\n",
|
||||
"\n",
|
||||
"Initialize the `OpenAIChatClient` and generate a response to a simple prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize the client\n",
|
||||
"llm = NVIDIAChatClient()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ChatCompletion(choices=[Choice(finish_reason='stop', index=0, message=MessageContent(content=\"That's an easy one! One of the most famous dogs is probably Laika, the Soviet space dog. She was the first living creature to orbit the Earth, launched into space on November 3, 1957, and paved the way for human spaceflight.\", role='assistant'), logprobs=None)], created=1741709966, id='cmpl-7c89ca25c9e140639fe179801738c8dd', model='meta/llama3-8b-instruct', object='chat.completion', usage={'completion_tokens': 55, 'prompt_tokens': 15, 'total_tokens': 70, 'completion_tokens_details': None, 'prompt_tokens_details': None})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Generate a response\n",
|
||||
"response = llm.generate('Name a famous dog!')\n",
|
||||
"\n",
|
||||
"# Display the response\n",
|
||||
"response"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'content': \"That's an easy one! One of the most famous dogs is probably Laika, the Soviet space dog. She was the first living creature to orbit the Earth, launched into space on November 3, 1957, and paved the way for human spaceflight.\", 'role': 'assistant'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(response.get_message())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using a Prompty File for Context\n",
|
||||
"\n",
|
||||
"Use a `.prompty` file to provide context for chat history or additional instructions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = NVIDIAChatClient.from_prompty('basic-nvidia-chat.prompty')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ChatCompletion(choices=[Choice(finish_reason='stop', index=0, message=MessageContent(content=\"I'm AI Assistant, nice to meet you!\", role='assistant'), logprobs=None)], created=1737847868, id='cmpl-abe14ae7edef456da870b7c473bffcc7', model='meta/llama3-8b-instruct', object='chat.completion', usage={'completion_tokens': 11, 'prompt_tokens': 43, 'total_tokens': 54, 'completion_tokens_details': None, 'prompt_tokens_details': None})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm.generate(input_data={\"question\":\"What is your name?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chat Completion with Messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents.types import UserMessage\n",
|
||||
"\n",
|
||||
"# Initialize the client\n",
|
||||
"llm = NVIDIAChatClient()\n",
|
||||
"\n",
|
||||
"# Generate a response using structured messages\n",
|
||||
"response = llm.generate(messages=[UserMessage(\"hello\")])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'content': \"Hello! It's nice to meet you. Is there something I can help you with, or would you like to chat?\", 'role': 'assistant'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Display the structured response\n",
|
||||
"print(response.get_message())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm.prompt_template"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.13.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -1,234 +0,0 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LLM: NVIDIA Chat Completion with Structured Output\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use the `NVIDIAChatClient` from `dapr_agents` to generate structured output using `Pydantic` models.\n",
|
||||
"\n",
|
||||
"We will:\n",
|
||||
"\n",
|
||||
"* Initialize the `NVIDIAChatClient` with the `meta/llama-3.1-8b-instruct` model.\n",
|
||||
"* Define a Pydantic model to structure the response.\n",
|
||||
"* Use the `response_model` parameter to get structured output from the LLM."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Required Libraries"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install dapr-agents python-dotenv"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import Environment Variables\n",
|
||||
"\n",
|
||||
"Load your API keys or other configuration values using `dotenv`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv() # Load environment variables from a `.env` file"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Enable Logging"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"logging.basicConfig(level=logging.INFO)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import Libraries"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents import NVIDIAChatClient\n",
|
||||
"from dapr_agents.types import UserMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize LLM Client\n",
|
||||
"\n",
|
||||
"Create an instance of the `NVIDIAChatClient`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.llm.nvidia.client:Initializing NVIDIA API client...\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llmClient = NVIDIAChatClient(\n",
|
||||
" model=\"meta/llama-3.1-8b-instruct\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define the Pydantic Model\n",
|
||||
"\n",
|
||||
"Define a Pydantic model to represent the structured response from the LLM."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pydantic import BaseModel\n",
|
||||
"\n",
|
||||
"class Dog(BaseModel):\n",
|
||||
" name: str\n",
|
||||
" breed: str\n",
|
||||
" reason: str"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Generate Structured Output (JSON)\n",
|
||||
"\n",
|
||||
"Use the generate method of the `NVIDIAChatClient` with the `response_model` parameter to enforce the structure of the response."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.llm.utils.request:A response model has been passed to structure the response of the LLM.\n",
|
||||
"INFO:dapr_agents.llm.utils.structure:Structured response enabled.\n",
|
||||
"INFO:dapr_agents.llm.nvidia.chat:Invoking ChatCompletion API.\n",
|
||||
"INFO:httpx:HTTP Request: POST https://integrate.api.nvidia.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:dapr_agents.llm.nvidia.chat:Chat completion retrieved successfully.\n",
|
||||
"INFO:dapr_agents.llm.utils.response:Structured output was successfully validated.\n",
|
||||
"INFO:dapr_agents.llm.utils.response:Returning an instance of <class '__main__.Dog'>.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = llmClient.generate(\n",
|
||||
" messages=[UserMessage(\"One famous dog in history.\")],\n",
|
||||
" response_model=Dog\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Dog(name='Laika', breed='Soviet space dog (mixed breeds)', reason='First animal in space')"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -1,260 +0,0 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LLM: NVIDIA Embeddings Endpoint Basic Examples\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use the `NVIDIAEmbedder` in `dapr-agents` for generating text embeddings. We will explore:\n",
|
||||
"\n",
|
||||
"* Initializing the `NVIDIAEmbedder`.\n",
|
||||
"* Generating embeddings for single and multiple inputs.\n",
|
||||
"* Using the class both as a direct function and via its `embed` method."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Required Libraries\n",
|
||||
"Before starting, ensure the required libraries are installed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install dapr-agents python-dotenv"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load Environment Variables\n",
|
||||
"\n",
|
||||
"Load API keys or other configuration values from your `.env` file using `dotenv`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import NVIDIAEmbedder"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents.document.embedder import NVIDIAEmbedder"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize the NVIDIAEmbedder\n",
|
||||
"\n",
|
||||
"To start, create an instance of the `NVIDIAEmbedder` class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize the embedder\n",
|
||||
"embedder = NVIDIAEmbedder(\n",
|
||||
" model=\"nvidia/nv-embedqa-e5-v5\", # Default embedding model\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Embedding a Single Text\n",
|
||||
"\n",
|
||||
"You can use the embed method to generate an embedding for a single input string."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Embedding (first 5 values): [-0.007270217100869654, -0.03521439888521964, 0.008612880489907491, 0.03619088134997443, 0.03658757735128107]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Input text\n",
|
||||
"text = \"The quick brown fox jumps over the lazy dog.\"\n",
|
||||
"\n",
|
||||
"# Generate embedding\n",
|
||||
"embedding = embedder.embed(text)\n",
|
||||
"\n",
|
||||
"# Display the embedding\n",
|
||||
"print(f\"Embedding (first 5 values): {embedding[:5]}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Embedding Multiple Texts\n",
|
||||
"\n",
|
||||
"The embed method also supports embedding multiple texts at once."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Text 1 embedding (first 5 values): [-0.007270217100869654, -0.03521439888521964, 0.008612880489907491, 0.03619088134997443, 0.03658757735128107]\n",
|
||||
"Text 2 embedding (first 5 values): [0.03491632278487177, -0.045598764196327295, 0.014955417976037734, 0.049291836798573345, 0.03741906620126992]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Input texts\n",
|
||||
"texts = [\n",
|
||||
" \"The quick brown fox jumps over the lazy dog.\",\n",
|
||||
" \"A journey of a thousand miles begins with a single step.\"\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Generate embeddings\n",
|
||||
"embeddings = embedder.embed(texts)\n",
|
||||
"\n",
|
||||
"# Display the embeddings\n",
|
||||
"for i, emb in enumerate(embeddings):\n",
|
||||
" print(f\"Text {i + 1} embedding (first 5 values): {emb[:5]}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using the NVIDIAEmbedder as a Callable Function\n",
|
||||
"\n",
|
||||
"The `NVIDIAEmbedder` class can also be used directly as a function, thanks to its `__call__` implementation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Embedding (first 5 values): [-0.005809799816153762, -0.08734154733463988, -0.017593431879252233, 0.027511671880565285, 0.001342777107870075]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Use the class instance as a callable\n",
|
||||
"text_embedding = embedder(\"A stitch in time saves nine.\")\n",
|
||||
"\n",
|
||||
"# Display the embedding\n",
|
||||
"print(f\"Embedding (first 5 values): {text_embedding[:5]}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For multiple inputs:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Text 1 embedding (first 5 values): [0.021093917798446042, -0.04365205548745667, 0.02008726662368289, 0.024922242720651362, 0.024556187748010216]\n",
|
||||
"Text 2 embedding (first 5 values): [-0.006683721130524534, -0.05764852452568794, 0.01164408689824411, 0.04627132894469238, 0.03458911471541276]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text_list = [\"The early bird catches the worm.\", \"An apple a day keeps the doctor away.\"]\n",
|
||||
"embeddings_list = embedder(text_list)\n",
|
||||
"\n",
|
||||
"# Display the embeddings\n",
|
||||
"for i, emb in enumerate(embeddings_list):\n",
|
||||
" print(f\"Text {i + 1} embedding (first 5 values): {emb[:5]}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -1,453 +0,0 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LLM: OpenAI Audio Endpoint Basic Examples\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use the `OpenAIAudioClient` in `dapr-agents` for basic tasks with the OpenAI Audio API. We will explore:\n",
|
||||
"\n",
|
||||
"* Generating speech from text and saving it as an MP3 file.\n",
|
||||
"* Transcribing audio to text.\n",
|
||||
"* Translating audio content to English."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Required Libraries\n",
|
||||
"\n",
|
||||
"Ensure you have the required library installed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install dapr-agents python-dotenv"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load Environment Variables"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize OpenAIAudioClient"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents import OpenAIAudioClient\n",
|
||||
"\n",
|
||||
"client = OpenAIAudioClient()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Generate Speech from Text\n",
|
||||
"\n",
|
||||
"### Manual File Creation\n",
|
||||
"\n",
|
||||
"This section demonstrates how to generate speech from a given text input and save it as an MP3 file."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Audio saved to output_speech.mp3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dapr_agents.types.llm import AudioSpeechRequest\n",
|
||||
"\n",
|
||||
"# Define the text to convert to speech\n",
|
||||
"text_to_speech = \"Hello Roberto! This is an example of text-to-speech generation.\"\n",
|
||||
"\n",
|
||||
"# Create a request for TTS\n",
|
||||
"tts_request = AudioSpeechRequest(\n",
|
||||
" model=\"tts-1\",\n",
|
||||
" input=text_to_speech,\n",
|
||||
" voice=\"fable\",\n",
|
||||
" response_format=\"mp3\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Generate the audio\n",
|
||||
"audio_bytes = client.create_speech(request=tts_request)\n",
|
||||
"\n",
|
||||
"# Save the audio to an MP3 file\n",
|
||||
"output_path = \"output_speech.mp3\"\n",
|
||||
"with open(output_path, \"wb\") as audio_file:\n",
|
||||
" audio_file.write(audio_bytes)\n",
|
||||
"\n",
|
||||
"print(f\"Audio saved to {output_path}\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Automatic File Creation\n",
|
||||
"\n",
|
||||
"The audio file is saved directly by providing the file_name parameter."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents.types.llm import AudioSpeechRequest\n",
|
||||
"\n",
|
||||
"# Define the text to convert to speech\n",
|
||||
"text_to_speech = \"Hola Roberto! Este es otro ejemplo de generacion de voz desde texto.\"\n",
|
||||
"\n",
|
||||
"# Create a request for TTS\n",
|
||||
"tts_request = AudioSpeechRequest(\n",
|
||||
" model=\"tts-1\",\n",
|
||||
" input=text_to_speech,\n",
|
||||
" voice=\"echo\",\n",
|
||||
" response_format=\"mp3\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Generate the audio\n",
|
||||
"client.create_speech(request=tts_request, file_name=\"output_speech_spanish_auto.mp3\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Transcribe Audio to Text\n",
|
||||
"\n",
|
||||
"This section demonstrates how to transcribe audio content into text."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using a File Path"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Transcription: Hello Roberto, this is an example of text-to-speech generation.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dapr_agents.types.llm import AudioTranscriptionRequest\n",
|
||||
"\n",
|
||||
"# Specify the audio file to transcribe\n",
|
||||
"audio_file_path = \"output_speech.mp3\"\n",
|
||||
"\n",
|
||||
"# Create a transcription request\n",
|
||||
"transcription_request = AudioTranscriptionRequest(\n",
|
||||
" model=\"whisper-1\",\n",
|
||||
" file=audio_file_path\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Generate transcription\n",
|
||||
"transcription_response = client.create_transcription(request=transcription_request)\n",
|
||||
"\n",
|
||||
"# Display the transcription result\n",
|
||||
"print(\"Transcription:\", transcription_response.text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using Audio Bytes"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Transcription: Hola Roberto, este es otro ejemplo de generación de voz desde texto.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# audio_bytes = open(\"output_speech_spanish_auto.mp3\", \"rb\")\n",
|
||||
"\n",
|
||||
"with open(\"output_speech_spanish_auto.mp3\", \"rb\") as f:\n",
|
||||
" audio_bytes = f.read()\n",
|
||||
"\n",
|
||||
"transcription_request = AudioTranscriptionRequest(\n",
|
||||
" model=\"whisper-1\",\n",
|
||||
" file=audio_bytes, # File as bytes\n",
|
||||
" language=\"en\" # Optional: Specify the language of the audio\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Generate transcription\n",
|
||||
"transcription_response = client.create_transcription(request=transcription_request)\n",
|
||||
"\n",
|
||||
"# Display the transcription result\n",
|
||||
"print(\"Transcription:\", transcription_response.text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using File-Like Objects (e.g., BufferedReader)\n",
|
||||
"\n",
|
||||
"You can use file-like objects, such as BufferedReader, directly for transcription or translation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Transcription: ¡Hola, Roberto! Este es otro ejemplo de generación de voz desde texto.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from io import BufferedReader\n",
|
||||
"\n",
|
||||
"# Open the audio file as a BufferedReader\n",
|
||||
"audio_file_path = \"output_speech_spanish_auto.mp3\"\n",
|
||||
"with open(audio_file_path, \"rb\") as f:\n",
|
||||
" buffered_file = BufferedReader(f)\n",
|
||||
"\n",
|
||||
" # Create a transcription request\n",
|
||||
" transcription_request = AudioTranscriptionRequest(\n",
|
||||
" model=\"whisper-1\",\n",
|
||||
" file=buffered_file, # File as BufferedReader\n",
|
||||
" language=\"es\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # Generate transcription\n",
|
||||
" transcription_response = client.create_transcription(request=transcription_request)\n",
|
||||
"\n",
|
||||
" # Display the transcription result\n",
|
||||
" print(\"Transcription:\", transcription_response.text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Translate Audio to English\n",
|
||||
"\n",
|
||||
"This section demonstrates how to translate audio content into English."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using a File Path"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Translation: Hola Roberto, este es otro ejemplo de generación de voz desde texto.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dapr_agents.types.llm import AudioTranslationRequest\n",
|
||||
"\n",
|
||||
"# Specify the audio file to translate\n",
|
||||
"audio_file_path = \"output_speech_spanish_auto.mp3\"\n",
|
||||
"\n",
|
||||
"# Create a translation request\n",
|
||||
"translation_request = AudioTranslationRequest(\n",
|
||||
" model=\"whisper-1\",\n",
|
||||
" file=audio_file_path,\n",
|
||||
" prompt=\"The following audio needs to be translated to English.\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Generate translation\n",
|
||||
"translation_response = client.create_translation(request=translation_request)\n",
|
||||
"\n",
|
||||
"# Display the translation result\n",
|
||||
"print(\"Translation:\", translation_response.text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using Audio Bytes"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Translation: Hola Roberto, este es otro ejemplo de generación de voz desde texto.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# audio_bytes = open(\"output_speech_spanish_auto.mp3\", \"rb\")\n",
|
||||
"\n",
|
||||
"with open(\"output_speech_spanish_auto.mp3\", \"rb\") as f:\n",
|
||||
" audio_bytes = f.read()\n",
|
||||
"\n",
|
||||
"translation_request = AudioTranslationRequest(\n",
|
||||
" model=\"whisper-1\",\n",
|
||||
" file=audio_bytes, # File as bytes\n",
|
||||
" prompt=\"The following audio needs to be translated to English.\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Generate translation\n",
|
||||
"translation_response = client.create_translation(request=translation_request)\n",
|
||||
"\n",
|
||||
"# Display the translation result\n",
|
||||
"print(\"Translation:\", translation_response.text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using File-Like Objects (e.g., BufferedReader) for Translation\n",
|
||||
"\n",
|
||||
"You can use a file-like object, such as a BufferedReader, directly for translating audio content."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Translation: Hola Roberto, este es otro ejemplo de generación de voz desde texto.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from io import BufferedReader\n",
|
||||
"\n",
|
||||
"# Open the audio file as a BufferedReader\n",
|
||||
"audio_file_path = \"output_speech_spanish_auto.mp3\"\n",
|
||||
"with open(audio_file_path, \"rb\") as f:\n",
|
||||
" buffered_file = BufferedReader(f)\n",
|
||||
"\n",
|
||||
" # Create a translation request\n",
|
||||
" translation_request = AudioTranslationRequest(\n",
|
||||
" model=\"whisper-1\",\n",
|
||||
" file=buffered_file, # File as BufferedReader\n",
|
||||
" prompt=\"The following audio needs to be translated to English.\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # Generate translation\n",
|
||||
" translation_response = client.create_translation(request=translation_request)\n",
|
||||
"\n",
|
||||
" # Display the translation result\n",
|
||||
" print(\"Translation:\", translation_response.text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -1,275 +0,0 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LLM: OpenAI Chat Endpoint Basic Examples\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use the `OpenAIChatClient` in `dapr-agents` for basic tasks with the OpenAI Chat API. We will explore:\n",
|
||||
"\n",
|
||||
"* Initializing the OpenAI Chat client.\n",
|
||||
"* Generating responses to simple prompts.\n",
|
||||
"* Using a `.prompty` file to provide context/history for enhanced generation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Required Libraries\n",
|
||||
"Before starting, ensure the required libraries are installed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install dapr-agents python-dotenv"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load Environment Variables\n",
|
||||
"\n",
|
||||
"Load API keys or other configuration values from your `.env` file using `dotenv`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import OpenAIChatClient"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents import OpenAIChatClient"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Basic Chat Completion\n",
|
||||
"\n",
|
||||
"Initialize the `OpenAIChatClient` and generate a response to a simple prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize the client\n",
|
||||
"llm = OpenAIChatClient()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ChatCompletion(choices=[Choice(finish_reason='stop', index=0, message=MessageContent(content='One famous dog is Lassie, the Rough Collie from the television series and films that became iconic for her intelligence and heroic adventures.', role='assistant'), logprobs=None)], created=1741085405, id='chatcmpl-B7K8brL19kn1KgDTG9on7n7ICnt3P', model='gpt-4o-2024-08-06', object='chat.completion', usage={'completion_tokens': 28, 'prompt_tokens': 12, 'total_tokens': 40, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Generate a response\n",
|
||||
"response = llm.generate('Name a famous dog!')\n",
|
||||
"\n",
|
||||
"# Display the response\n",
|
||||
"response"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'content': 'One famous dog is Lassie, the Rough Collie from the television series and films that became iconic for her intelligence and heroic adventures.', 'role': 'assistant'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(response.get_message())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using a Prompty File for Context\n",
|
||||
"\n",
|
||||
"Use a `.prompty` file to provide context for chat history or additional instructions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAIChatClient.from_prompty('basic-openai-chat-history.prompty')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ChatPromptTemplate(input_variables=['chat_history', 'question'], pre_filled_variables={}, messages=[SystemMessage(content='You are an AI assistant who helps people find information.\\nAs the assistant, you answer questions briefly, succinctly, \\nand in a personable manner using markdown and even add some personal flair with appropriate emojis.\\n\\n{% for item in chat_history %}\\n{{item.role}}:\\n{{item.content}}\\n{% endfor %}', role='system'), UserMessage(content='{{question}}', role='user')], template_format='jinja2')"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm.prompt_template"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ChatCompletion(choices=[Choice(finish_reason='stop', index=0, message=MessageContent(content=\"Hey there! I'm your friendly AI assistant. You can call me whatever you'd like, but I don't have a specific name. 😊 How can I help you today?\", role='assistant'), logprobs=None)], created=1741085407, id='chatcmpl-B7K8dI84xY2hjaEspDtJL5EICbSLh', model='gpt-4o-2024-08-06', object='chat.completion', usage={'completion_tokens': 34, 'prompt_tokens': 57, 'total_tokens': 91, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm.generate(input_data={\"question\":\"What is your name?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chat Completion with Messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents.types import UserMessage\n",
|
||||
"\n",
|
||||
"# Initialize the client\n",
|
||||
"llm = OpenAIChatClient()\n",
|
||||
"\n",
|
||||
"# Generate a response using structured messages\n",
|
||||
"response = llm.generate(messages=[UserMessage(\"hello\")])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'content': 'Hello! How can I assist you today?', 'role': 'assistant'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Display the structured response\n",
|
||||
"print(response.get_message())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm.prompt_template"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -1,226 +0,0 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LLM: OpenAI Chat Completion with Structured Output\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use the `OpenAIChatClient` from `dapr-agents` to generate structured output using `Pydantic` models.\n",
|
||||
"\n",
|
||||
"We will:\n",
|
||||
"\n",
|
||||
"* Initialize the OpenAIChatClient.\n",
|
||||
"* Define a Pydantic model to structure the response.\n",
|
||||
"* Use the response_model parameter to get structured output from the LLM."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Required Libraries\n",
|
||||
"Before starting, ensure the required libraries are installed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install dapr-agents python-dotenv"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import Environment Variables\n",
|
||||
"\n",
|
||||
"Load your API keys or other configuration values using `dotenv`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv() # Load environment variables from a `.env` file"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Enable Logging"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"logging.basicConfig(level=logging.INFO)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import dapr-agents Libraries\n",
|
||||
"\n",
|
||||
"Import the necessary classes and types from `dapr-agents`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents import OpenAIChatClient\n",
|
||||
"from dapr_agents.types import UserMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize LLM Client\n",
|
||||
"\n",
|
||||
"Create an instance of the `OpenAIChatClient`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.llm.openai.client.base:Initializing OpenAI client...\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llmClient = OpenAIChatClient()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define the Pydantic Model\n",
|
||||
"\n",
|
||||
"Define a Pydantic model to represent the structured response from the LLM."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pydantic import BaseModel\n",
|
||||
"\n",
|
||||
"class Dog(BaseModel):\n",
|
||||
" name: str\n",
|
||||
" breed: str\n",
|
||||
" reason: str"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Generate Structured Output (JSON)\n",
|
||||
"\n",
|
||||
"Use the generate method of the `OpenAIChatClient` with the `response_model` parameter to enforce the structure of the response."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.llm.utils.request:Structured Mode Activated! Mode=json.\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n",
|
||||
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
|
||||
"INFO:dapr_agents.llm.utils.response:Structured output was successfully validated.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = llmClient.generate(\n",
|
||||
" messages=[UserMessage(\"One famous dog in history.\")],\n",
|
||||
" response_format=Dog\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Dog(name='Balto', breed='Siberian Husky', reason=\"Balto is famous for his role in the 1925 serum run to Nome, also known as the 'Great Race of Mercy.' This life-saving mission involved a relay of sled dog teams transporting diphtheria antitoxin across harsh Alaskan wilderness under treacherous winter conditions, preventing a potential epidemic. Balto led the final leg of the journey, becoming a symbol of bravery and teamwork.\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -1,262 +0,0 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LLM: OpenAI Embeddings Endpoint Basic Examples\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use the `OpenAIEmbedder` in `dapr-agents` for generating text embeddings. We will explore:\n",
|
||||
"\n",
|
||||
"* Initializing the `OpenAIEmbedder`.\n",
|
||||
"* Generating embeddings for single and multiple inputs.\n",
|
||||
"* Using the class both as a direct function and via its `embed` method."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Required Libraries\n",
|
||||
"Before starting, ensure the required libraries are installed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install dapr-agents python-dotenv tiktoken"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load Environment Variables\n",
|
||||
"\n",
|
||||
"Load API keys or other configuration values from your `.env` file using `dotenv`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import OpenAIEmbedder"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents.document.embedder import OpenAIEmbedder"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize the OpenAIEmbedder\n",
|
||||
"\n",
|
||||
"To start, create an instance of the `OpenAIEmbedder` class. You can customize its parameters if needed, such as the `model` or `chunk_size`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize the embedder\n",
|
||||
"embedder = OpenAIEmbedder(\n",
|
||||
" model=\"text-embedding-ada-002\", # Default embedding model\n",
|
||||
" chunk_size=1000, # Batch size for processing\n",
|
||||
" max_tokens=8191 # Maximum tokens per input\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Embedding a Single Text\n",
|
||||
"\n",
|
||||
"You can use the embed method to generate an embedding for a single input string."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Embedding (first 5 values): [0.0015723939, 0.005963983, -0.015102495, -0.008559333, -0.011583589]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Input text\n",
|
||||
"text = \"The quick brown fox jumps over the lazy dog.\"\n",
|
||||
"\n",
|
||||
"# Generate embedding\n",
|
||||
"embedding = embedder.embed(text)\n",
|
||||
"\n",
|
||||
"# Display the embedding\n",
|
||||
"print(f\"Embedding (first 5 values): {embedding[:5]}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Embedding Multiple Texts\n",
|
||||
"\n",
|
||||
"The embed method also supports embedding multiple texts at once."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Text 1 embedding (first 5 values): [0.0015723939, 0.005963983, -0.015102495, -0.008559333, -0.011583589]\n",
|
||||
"Text 2 embedding (first 5 values): [0.03261204, -0.020966679, 0.0026475298, -0.009384127, -0.007305047]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Input texts\n",
|
||||
"texts = [\n",
|
||||
" \"The quick brown fox jumps over the lazy dog.\",\n",
|
||||
" \"A journey of a thousand miles begins with a single step.\"\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Generate embeddings\n",
|
||||
"embeddings = embedder.embed(texts)\n",
|
||||
"\n",
|
||||
"# Display the embeddings\n",
|
||||
"for i, emb in enumerate(embeddings):\n",
|
||||
" print(f\"Text {i + 1} embedding (first 5 values): {emb[:5]}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using the OpenAIEmbedder as a Callable Function\n",
|
||||
"\n",
|
||||
"The OpenAIEmbedder class can also be used directly as a function, thanks to its `__call__` implementation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Embedding (first 5 values): [-0.0022105372, -0.022207271, 0.017802631, -0.00742872, 0.007270942]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Use the class instance as a callable\n",
|
||||
"text_embedding = embedder(\"A stitch in time saves nine.\")\n",
|
||||
"\n",
|
||||
"# Display the embedding\n",
|
||||
"print(f\"Embedding (first 5 values): {text_embedding[:5]}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For multiple inputs:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Text 1 embedding (first 5 values): [0.0038562817, -0.020030975, 0.01792581, -0.014723405, -0.014608578]\n",
|
||||
"Text 2 embedding (first 5 values): [0.011255961, 0.004331666, 0.029073123, -0.01053614, 0.021288864]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text_list = [\"The early bird catches the worm.\", \"An apple a day keeps the doctor away.\"]\n",
|
||||
"embeddings_list = embedder(text_list)\n",
|
||||
"\n",
|
||||
"# Display the embeddings\n",
|
||||
"for i, emb in enumerate(embeddings_list):\n",
|
||||
" print(f\"Text {i + 1} embedding (first 5 values): {emb[:5]}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -1,499 +0,0 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# VectorStore: Chroma and OpenAI Embeddings Basic Examples\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use the `ChromaVectorStore` in `dapr-agents` for storing, querying, and filtering documents. We will explore:\n",
|
||||
"\n",
|
||||
"* Initializing the `OpenAIEmbedder` embedding function and `ChromaVectorStore`.\n",
|
||||
"* Adding documents with text and metadata.\n",
|
||||
"* Retrieving documents by ID.\n",
|
||||
"* Updating documents.\n",
|
||||
"* Deleting documents.\n",
|
||||
"* Performing similarity searches.\n",
|
||||
"* Filtering results based on metadata."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Required Libraries\n",
|
||||
"Before starting, ensure the required libraries are installed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install dapr-agents python-dotenv chromadb"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load Environment Variables\n",
|
||||
"\n",
|
||||
"Load API keys or other configuration values from your `.env` file using `dotenv`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize OpenAI Embedding Function\n",
|
||||
"\n",
|
||||
"The default embedding function is `SentenceTransformerEmbedder`, but for this example we will use the `OpenAIEmbedder`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents.document.embedder import OpenAIEmbedder\n",
|
||||
"\n",
|
||||
"embedding_funciton = OpenAIEmbedder(\n",
|
||||
" model = \"text-embedding-ada-002\",\n",
|
||||
" encoding_name=\"cl100k_base\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initializing the ChromaVectorStore\n",
|
||||
"\n",
|
||||
"To start, create an instance of the `ChromaVectorStore`. You can customize its parameters if needed, such as enabling persistence or specifying the embedding_function."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents.storage import ChromaVectorStore\n",
|
||||
"\n",
|
||||
"# Initialize ChromaVectorStore\n",
|
||||
"store = ChromaVectorStore(\n",
|
||||
" name=\"example_collection\", # Name of the collection\n",
|
||||
" embedding_function=embedding_funciton,\n",
|
||||
" persistent=False, # No persistence for this example\n",
|
||||
" host=\"localhost\", # Host for the Chroma server\n",
|
||||
" port=8000 # Port for the Chroma server\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Adding Documents\n",
|
||||
"We will use Document objects to add content to the collection. Each Document includes text and optional metadata."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Creating Documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents.types.document import Document\n",
|
||||
"\n",
|
||||
"# Example Lord of the Rings-inspired conversations\n",
|
||||
"documents = [\n",
|
||||
" Document(\n",
|
||||
" text=\"Gandalf: A wizard is never late, Frodo Baggins. Nor is he early; he arrives precisely when he means to.\",\n",
|
||||
" metadata={\"topic\": \"wisdom\", \"location\": \"The Shire\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Frodo: I wish the Ring had never come to me. I wish none of this had happened.\",\n",
|
||||
" metadata={\"topic\": \"destiny\", \"location\": \"Moria\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Aragorn: You cannot wield it! None of us can. The One Ring answers to Sauron alone. It has no other master.\",\n",
|
||||
" metadata={\"topic\": \"power\", \"location\": \"Rivendell\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Sam: I can't carry it for you, but I can carry you!\",\n",
|
||||
" metadata={\"topic\": \"friendship\", \"location\": \"Mount Doom\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Legolas: A red sun rises. Blood has been spilled this night.\",\n",
|
||||
" metadata={\"topic\": \"war\", \"location\": \"Rohan\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Gimli: Certainty of death. Small chance of success. What are we waiting for?\",\n",
|
||||
" metadata={\"topic\": \"bravery\", \"location\": \"Helm's Deep\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Boromir: One does not simply walk into Mordor.\",\n",
|
||||
" metadata={\"topic\": \"impossible tasks\", \"location\": \"Rivendell\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Galadriel: Even the smallest person can change the course of the future.\",\n",
|
||||
" metadata={\"topic\": \"hope\", \"location\": \"Lothlórien\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Théoden: So it begins.\",\n",
|
||||
" metadata={\"topic\": \"battle\", \"location\": \"Helm's Deep\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Elrond: The strength of the Ring-bearer is failing. In his heart, Frodo begins to understand. The quest will claim his life.\",\n",
|
||||
" metadata={\"topic\": \"sacrifice\", \"location\": \"Rivendell\"}\n",
|
||||
" )\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Adding Documents to the Collection"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of documents in the collection: 10\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"store.add_documents(documents=documents)\n",
|
||||
"print(f\"Number of documents in the collection: {store.count()}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retrieving Documents\n",
|
||||
"\n",
|
||||
"Retrieve documents by their IDs or fetch all items in the collection."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Retrieved documents:\n",
|
||||
"ID: 82f3b922-c64c-4ad1-a632-ea9f8d13a19a, Text: Gandalf: A wizard is never late, Frodo Baggins. Nor is he early; he arrives precisely when he means to., Metadata: {'location': 'The Shire', 'topic': 'wisdom'}\n",
|
||||
"ID: f5a45d8b-7f8f-4516-a54a-d9ef3c39db53, Text: Frodo: I wish the Ring had never come to me. I wish none of this had happened., Metadata: {'location': 'Moria', 'topic': 'destiny'}\n",
|
||||
"ID: 7fead849-c4eb-42ce-88ca-ca62fe9f51a4, Text: Aragorn: You cannot wield it! None of us can. The One Ring answers to Sauron alone. It has no other master., Metadata: {'location': 'Rivendell', 'topic': 'power'}\n",
|
||||
"ID: ebd6c642-c8f4-4f45-a75e-4a5acdf33ad5, Text: Sam: I can't carry it for you, but I can carry you!, Metadata: {'location': 'Mount Doom', 'topic': 'friendship'}\n",
|
||||
"ID: 1dc4da81-cbfc-417b-ad71-120fae505842, Text: Legolas: A red sun rises. Blood has been spilled this night., Metadata: {'location': 'Rohan', 'topic': 'war'}\n",
|
||||
"ID: d1ed1836-c0d8-491c-a813-2c5a2688b2d1, Text: Gimli: Certainty of death. Small chance of success. What are we waiting for?, Metadata: {'location': \"Helm's Deep\", 'topic': 'bravery'}\n",
|
||||
"ID: 6fe3f229-bf74-4eea-8fe4-fc38efb2cf9a, Text: Boromir: One does not simply walk into Mordor., Metadata: {'location': 'Rivendell', 'topic': 'impossible tasks'}\n",
|
||||
"ID: 081453e4-0a56-4e78-927b-79289735e8a4, Text: Galadriel: Even the smallest person can change the course of the future., Metadata: {'location': 'Lothlórien', 'topic': 'hope'}\n",
|
||||
"ID: a45db7d1-4224-4e42-b51d-bdb4593b5cf5, Text: Théoden: So it begins., Metadata: {'location': \"Helm's Deep\", 'topic': 'battle'}\n",
|
||||
"ID: 5258d6f6-1f1b-459d-a04e-c96f58d76fca, Text: Elrond: The strength of the Ring-bearer is failing. In his heart, Frodo begins to understand. The quest will claim his life., Metadata: {'location': 'Rivendell', 'topic': 'sacrifice'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Retrieve all documents\n",
|
||||
"retrieved_docs = store.get()\n",
|
||||
"print(\"Retrieved documents:\")\n",
|
||||
"for doc in retrieved_docs:\n",
|
||||
" print(f\"ID: {doc['id']}, Text: {doc['document']}, Metadata: {doc['metadata']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Updating Documents\n",
|
||||
"\n",
|
||||
"You can update existing documents' text or metadata using their IDs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Updated document: [{'id': '82f3b922-c64c-4ad1-a632-ea9f8d13a19a', 'metadata': {'location': 'Fangorn Forest', 'topic': 'hope and wisdom'}, 'document': 'Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true.'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Retrieve a document by its ID\n",
|
||||
"retrieved_docs = store.get() # Get all documents to find the ID\n",
|
||||
"doc_id = retrieved_docs[0]['id'] # Select the first document's ID for this example\n",
|
||||
"\n",
|
||||
"# Define updated text and metadata\n",
|
||||
"updated_text = \"Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true.\"\n",
|
||||
"updated_metadata = {\"topic\": \"hope and wisdom\", \"location\": \"Fangorn Forest\"}\n",
|
||||
"\n",
|
||||
"# Update the document's text and metadata in the store\n",
|
||||
"store.update(ids=[doc_id], documents=[updated_text], metadatas=[updated_metadata])\n",
|
||||
"\n",
|
||||
"# Verify the update\n",
|
||||
"updated_doc = store.get(ids=[doc_id])\n",
|
||||
"print(f\"Updated document: {updated_doc}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Deleting Documents\n",
|
||||
"\n",
|
||||
"Delete documents by their IDs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of documents after deletion: 9\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Delete a document by ID\n",
|
||||
"doc_id_to_delete = retrieved_docs[2]['id']\n",
|
||||
"store.delete(ids=[doc_id_to_delete])\n",
|
||||
"\n",
|
||||
"# Verify deletion\n",
|
||||
"print(f\"Number of documents after deletion: {store.count()}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Similarity Search\n",
|
||||
"\n",
|
||||
"Perform a similarity search using text queries. The embedding function automatically generates embeddings for the input query."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Similarity search results:\n",
|
||||
"Text: ['Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true.', 'Galadriel: Even the smallest person can change the course of the future.']\n",
|
||||
"Metadata: [{'location': 'Fangorn Forest', 'topic': 'hope and wisdom'}, {'location': 'Lothlórien', 'topic': 'hope'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Search for similar documents based on a query\n",
|
||||
"query = \"wise advice\"\n",
|
||||
"results = store.search_similar(query_texts=query, k=2)\n",
|
||||
"\n",
|
||||
"# Display results\n",
|
||||
"print(\"Similarity search results:\")\n",
|
||||
"for doc, metadata in zip(results[\"documents\"], results[\"metadatas\"]):\n",
|
||||
" print(f\"Text: {doc}\")\n",
|
||||
" print(f\"Metadata: {metadata}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Filtering Results\n",
|
||||
"\n",
|
||||
"Filter results based on metadata."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Search for documents with specific metadata filters\n",
|
||||
"filter_conditions = {\n",
|
||||
" \"$and\": [\n",
|
||||
" {\"location\": {\"$eq\": \"Fangorn Forest\"}},\n",
|
||||
" {\"topic\": {\"$eq\": \"hope and wisdom\"}}\n",
|
||||
" ]\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"filtered_results = store.query_with_filters(query_texts=[\"journey\"], where=filter_conditions, k=3)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'ids': [['82f3b922-c64c-4ad1-a632-ea9f8d13a19a']],\n",
|
||||
" 'embeddings': None,\n",
|
||||
" 'documents': [['Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true.']],\n",
|
||||
" 'uris': None,\n",
|
||||
" 'data': None,\n",
|
||||
" 'metadatas': [[{'location': 'Fangorn Forest', 'topic': 'hope and wisdom'}]],\n",
|
||||
" 'distances': [[0.21403032541275024]],\n",
|
||||
" 'included': [<IncludeEnum.distances: 'distances'>,\n",
|
||||
" <IncludeEnum.documents: 'documents'>,\n",
|
||||
" <IncludeEnum.metadatas: 'metadatas'>]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"filtered_results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Resetting the Database\n",
|
||||
"\n",
|
||||
"Reset the database to clear all stored data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['example_collection']"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"store.client.list_collections()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Reset the collection\n",
|
||||
"store.reset()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[]"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"store.client.list_collections()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -1,498 +0,0 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# VectorStore: Chroma and Sentence Transformer (all-MiniLM-L6-v2) with Basic Examples\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use the `ChromaVectorStore` in `dapr-agents` for storing, querying, and filtering documents. We will explore:\n",
|
||||
"\n",
|
||||
"* Initializing the `SentenceTransformerEmbedder` embedding function and `ChromaVectorStore`.\n",
|
||||
"* Adding documents with text and metadata.\n",
|
||||
"* Retrieving documents by ID.\n",
|
||||
"* Updating documents.\n",
|
||||
"* Deleting documents.\n",
|
||||
"* Performing similarity searches.\n",
|
||||
"* Filtering results based on metadata."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Required Libraries\n",
|
||||
"Before starting, ensure the required libraries are installed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install dapr-agents python-dotenv chromadb sentence-transformers"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load Environment Variables\n",
|
||||
"\n",
|
||||
"Load API keys or other configuration values from your `.env` file using `dotenv`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initializing SentenceTransformer Embedding Function\n",
|
||||
"\n",
|
||||
"The default embedding function is `SentenceTransformerEmbedder`, but we will initialize it explicitly for clarity."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents.document.embedder import SentenceTransformerEmbedder\n",
|
||||
"\n",
|
||||
"embedding_function = SentenceTransformerEmbedder(\n",
|
||||
" model=\"all-MiniLM-L6-v2\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initializing the ChromaVectorStore\n",
|
||||
"\n",
|
||||
"To start, create an instance of the `ChromaVectorStore` and set the `embedding_function` to the instance of `SentenceTransformerEmbedder`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents.storage import ChromaVectorStore\n",
|
||||
"\n",
|
||||
"# Initialize ChromaVectorStore\n",
|
||||
"store = ChromaVectorStore(\n",
|
||||
" name=\"example_collection\", # Name of the collection\n",
|
||||
" embedding_function=embedding_function,\n",
|
||||
" persistent=False, # No persistence for this example\n",
|
||||
" host=\"localhost\", # Host for the Chroma server\n",
|
||||
" port=8000 # Port for the Chroma server\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Adding Documents\n",
|
||||
"We will use Document objects to add content to the collection. Each Document includes text and optional metadata."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Creating Documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents.types.document import Document\n",
|
||||
"\n",
|
||||
"# Example Lord of the Rings-inspired conversations\n",
|
||||
"documents = [\n",
|
||||
" Document(\n",
|
||||
" text=\"Gandalf: A wizard is never late, Frodo Baggins. Nor is he early; he arrives precisely when he means to.\",\n",
|
||||
" metadata={\"topic\": \"wisdom\", \"location\": \"The Shire\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Frodo: I wish the Ring had never come to me. I wish none of this had happened.\",\n",
|
||||
" metadata={\"topic\": \"destiny\", \"location\": \"Moria\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Aragorn: You cannot wield it! None of us can. The One Ring answers to Sauron alone. It has no other master.\",\n",
|
||||
" metadata={\"topic\": \"power\", \"location\": \"Rivendell\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Sam: I can't carry it for you, but I can carry you!\",\n",
|
||||
" metadata={\"topic\": \"friendship\", \"location\": \"Mount Doom\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Legolas: A red sun rises. Blood has been spilled this night.\",\n",
|
||||
" metadata={\"topic\": \"war\", \"location\": \"Rohan\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Gimli: Certainty of death. Small chance of success. What are we waiting for?\",\n",
|
||||
" metadata={\"topic\": \"bravery\", \"location\": \"Helm's Deep\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Boromir: One does not simply walk into Mordor.\",\n",
|
||||
" metadata={\"topic\": \"impossible tasks\", \"location\": \"Rivendell\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Galadriel: Even the smallest person can change the course of the future.\",\n",
|
||||
" metadata={\"topic\": \"hope\", \"location\": \"Lothlórien\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Théoden: So it begins.\",\n",
|
||||
" metadata={\"topic\": \"battle\", \"location\": \"Helm's Deep\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Elrond: The strength of the Ring-bearer is failing. In his heart, Frodo begins to understand. The quest will claim his life.\",\n",
|
||||
" metadata={\"topic\": \"sacrifice\", \"location\": \"Rivendell\"}\n",
|
||||
" )\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Adding Documents to the Collection"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of documents in the collection: 10\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"store.add_documents(documents=documents)\n",
|
||||
"print(f\"Number of documents in the collection: {store.count()}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retrieving Documents\n",
|
||||
"\n",
|
||||
"Retrieve documents by their IDs or fetch all items in the collection."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Retrieved documents:\n",
|
||||
"ID: 483fc189-df92-4815-987e-b732391e356a, Text: Gandalf: A wizard is never late, Frodo Baggins. Nor is he early; he arrives precisely when he means to., Metadata: {'location': 'The Shire', 'topic': 'wisdom'}\n",
|
||||
"ID: fcbcbf50-7b0c-458a-a232-abbc1b77518b, Text: Frodo: I wish the Ring had never come to me. I wish none of this had happened., Metadata: {'location': 'Moria', 'topic': 'destiny'}\n",
|
||||
"ID: d4fbda4e-f933-4d1c-8d63-ee4d9f0d0af7, Text: Aragorn: You cannot wield it! None of us can. The One Ring answers to Sauron alone. It has no other master., Metadata: {'location': 'Rivendell', 'topic': 'power'}\n",
|
||||
"ID: 98d218e5-4274-4d93-ac9a-3fbbeb3c0a19, Text: Sam: I can't carry it for you, but I can carry you!, Metadata: {'location': 'Mount Doom', 'topic': 'friendship'}\n",
|
||||
"ID: df9d0abe-0b47-4079-9697-b66f47656e64, Text: Legolas: A red sun rises. Blood has been spilled this night., Metadata: {'location': 'Rohan', 'topic': 'war'}\n",
|
||||
"ID: 309e0971-6826-4bac-81a8-3acfc3a28fa9, Text: Gimli: Certainty of death. Small chance of success. What are we waiting for?, Metadata: {'location': \"Helm's Deep\", 'topic': 'bravery'}\n",
|
||||
"ID: a0a312be-bebd-405b-b993-4e37ed7fd569, Text: Boromir: One does not simply walk into Mordor., Metadata: {'location': 'Rivendell', 'topic': 'impossible tasks'}\n",
|
||||
"ID: 0c09f89c-cf60-4428-beee-294b31dfd6a9, Text: Galadriel: Even the smallest person can change the course of the future., Metadata: {'location': 'Lothlórien', 'topic': 'hope'}\n",
|
||||
"ID: d4778b45-f9fa-438c-b9e9-7466c872b4cc, Text: Théoden: So it begins., Metadata: {'location': \"Helm's Deep\", 'topic': 'battle'}\n",
|
||||
"ID: 7a44e69f-e0c9-41c0-9cdf-a8f34ddf45f5, Text: Elrond: The strength of the Ring-bearer is failing. In his heart, Frodo begins to understand. The quest will claim his life., Metadata: {'location': 'Rivendell', 'topic': 'sacrifice'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Retrieve all documents\n",
|
||||
"retrieved_docs = store.get()\n",
|
||||
"print(\"Retrieved documents:\")\n",
|
||||
"for doc in retrieved_docs:\n",
|
||||
" print(f\"ID: {doc['id']}, Text: {doc['document']}, Metadata: {doc['metadata']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Updating Documents\n",
|
||||
"\n",
|
||||
"You can update existing documents' text or metadata using their IDs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Updated document: [{'id': '483fc189-df92-4815-987e-b732391e356a', 'metadata': {'location': 'Fangorn Forest', 'topic': 'hope and wisdom'}, 'document': 'Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true.'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Retrieve a document by its ID\n",
|
||||
"retrieved_docs = store.get() # Get all documents to find the ID\n",
|
||||
"doc_id = retrieved_docs[0]['id'] # Select the first document's ID for this example\n",
|
||||
"\n",
|
||||
"# Define updated text and metadata\n",
|
||||
"updated_text = \"Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true.\"\n",
|
||||
"updated_metadata = {\"topic\": \"hope and wisdom\", \"location\": \"Fangorn Forest\"}\n",
|
||||
"\n",
|
||||
"# Update the document's text and metadata in the store\n",
|
||||
"store.update(ids=[doc_id], documents=[updated_text], metadatas=[updated_metadata])\n",
|
||||
"\n",
|
||||
"# Verify the update\n",
|
||||
"updated_doc = store.get(ids=[doc_id])\n",
|
||||
"print(f\"Updated document: {updated_doc}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Deleting Documents\n",
|
||||
"\n",
|
||||
"Delete documents by their IDs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of documents after deletion: 9\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Delete a document by ID\n",
|
||||
"doc_id_to_delete = retrieved_docs[2]['id']\n",
|
||||
"store.delete(ids=[doc_id_to_delete])\n",
|
||||
"\n",
|
||||
"# Verify deletion\n",
|
||||
"print(f\"Number of documents after deletion: {store.count()}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Similarity Search\n",
|
||||
"\n",
|
||||
"Perform a similarity search using text queries. The embedding function automatically generates embeddings for the input query."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Similarity search results:\n",
|
||||
"Text: ['Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true.', 'Gimli: Certainty of death. Small chance of success. What are we waiting for?']\n",
|
||||
"Metadata: [{'location': 'Fangorn Forest', 'topic': 'hope and wisdom'}, {'location': \"Helm's Deep\", 'topic': 'bravery'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Search for similar documents based on a query\n",
|
||||
"query = \"wise advice\"\n",
|
||||
"results = store.search_similar(query_texts=query, k=2)\n",
|
||||
"\n",
|
||||
"# Display results\n",
|
||||
"print(\"Similarity search results:\")\n",
|
||||
"for doc, metadata in zip(results[\"documents\"], results[\"metadatas\"]):\n",
|
||||
" print(f\"Text: {doc}\")\n",
|
||||
" print(f\"Metadata: {metadata}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Filtering Results\n",
|
||||
"\n",
|
||||
"Filter results based on metadata."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Search for documents with specific metadata filters\n",
|
||||
"filter_conditions = {\n",
|
||||
" \"$and\": [\n",
|
||||
" {\"location\": {\"$eq\": \"Fangorn Forest\"}},\n",
|
||||
" {\"topic\": {\"$eq\": \"hope and wisdom\"}}\n",
|
||||
" ]\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"filtered_results = store.query_with_filters(query_texts=[\"journey\"], where=filter_conditions, k=3)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'ids': [['483fc189-df92-4815-987e-b732391e356a']],\n",
|
||||
" 'embeddings': None,\n",
|
||||
" 'documents': [['Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true.']],\n",
|
||||
" 'uris': None,\n",
|
||||
" 'data': None,\n",
|
||||
" 'metadatas': [[{'location': 'Fangorn Forest', 'topic': 'hope and wisdom'}]],\n",
|
||||
" 'distances': [[0.7907481789588928]],\n",
|
||||
" 'included': [<IncludeEnum.distances: 'distances'>,\n",
|
||||
" <IncludeEnum.documents: 'documents'>,\n",
|
||||
" <IncludeEnum.metadatas: 'metadatas'>]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"filtered_results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Resetting the Database\n",
|
||||
"\n",
|
||||
"Reset the database to clear all stored data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['example_collection']"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"store.client.list_collections()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Reset the collection\n",
|
||||
"store.reset()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[]"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"store.client.list_collections()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -1,522 +0,0 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# VectorStore: Postgres and Sentence Transformer (all-MiniLM-L6-v2) with Basic Examples\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use the `PostgresVectorStore` in `dapr-agents` for storing, querying, and filtering documents. We will explore:\n",
|
||||
"\n",
|
||||
"* Initializing the `SentenceTransformerEmbedder` embedding function and `PostgresVectorStore`.\n",
|
||||
"* Adding documents with text and metadata.\n",
|
||||
"* Performing similarity searches.\n",
|
||||
"* Filtering results based on metadata.\n",
|
||||
"* Resetting the database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Required Libraries\n",
|
||||
"Before starting, ensure the required libraries are installed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install dapr-agents python-dotenv \"psycopg[binary,pool]\" pgvector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load Environment Variables\n",
|
||||
"\n",
|
||||
"Load API keys or other configuration values from your `.env` file using `dotenv`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting Up The Database\n",
|
||||
"\n",
|
||||
"Before initializing the `PostgresVectorStore`, set up a PostgreSQL instance with pgvector enabled. For a local setup, use Docker:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"d920da4b841a66223431ad1dce49c3b0c215a971a4860ee9e25ea5bf0b4bfcd0\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!docker run --name pgvector-container \\\n",
|
||||
" -e POSTGRES_USER=dapr_agents \\\n",
|
||||
" -e POSTGRES_PASSWORD=dapr_agents \\\n",
|
||||
" -e POSTGRES_DB=dapr_agents \\\n",
|
||||
" -p 5432:5432 \\\n",
|
||||
" -d pgvector/pgvector:pg17"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initializing SentenceTransformer Embedding Function\n",
|
||||
"\n",
|
||||
"The default embedding function is `SentenceTransformerEmbedder`, but we will initialize it explicitly for clarity."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents.document.embedder import SentenceTransformerEmbedder\n",
|
||||
"\n",
|
||||
"embedding_function = SentenceTransformerEmbedder(\n",
|
||||
" model=\"all-MiniLM-L6-v2\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initializing the PostgresVectorStore\n",
|
||||
"\n",
|
||||
"To start, create an instance of the `PostgresVectorStore` and set the `embedding_function` to the instance of `SentenceTransformerEmbedder`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents.storage.vectorstores import PostgresVectorStore\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# Set up connection parameters\n",
|
||||
"connection_string = os.getenv(\"POSTGRES_CONNECTION_STRING\", \"postgresql://dapr_agents:dapr_agents@localhost:5432/dapr_agents\")\n",
|
||||
"\n",
|
||||
"# Initialize PostgresVectorStore\n",
|
||||
"store = PostgresVectorStore(\n",
|
||||
" connection_string=connection_string,\n",
|
||||
" table_name=\"dapr_agents\",\n",
|
||||
" embedding_function=SentenceTransformerEmbedder()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Adding Documents\n",
|
||||
"We will use Document objects to add content to the collection. Each document includes text and optional metadata."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Creating Documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents.types.document import Document\n",
|
||||
"\n",
|
||||
"# Example Lord of the Rings-inspired conversations\n",
|
||||
"documents = [\n",
|
||||
" Document(\n",
|
||||
" text=\"Gandalf: A wizard is never late, Frodo Baggins. Nor is he early; he arrives precisely when he means to.\",\n",
|
||||
" metadata={\"topic\": \"wisdom\", \"location\": \"The Shire\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Frodo: I wish the Ring had never come to me. I wish none of this had happened.\",\n",
|
||||
" metadata={\"topic\": \"destiny\", \"location\": \"Moria\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Aragorn: You cannot wield it! None of us can. The One Ring answers to Sauron alone. It has no other master.\",\n",
|
||||
" metadata={\"topic\": \"power\", \"location\": \"Rivendell\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Sam: I can't carry it for you, but I can carry you!\",\n",
|
||||
" metadata={\"topic\": \"friendship\", \"location\": \"Mount Doom\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Legolas: A red sun rises. Blood has been spilled this night.\",\n",
|
||||
" metadata={\"topic\": \"war\", \"location\": \"Rohan\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Gimli: Certainty of death. Small chance of success. What are we waiting for?\",\n",
|
||||
" metadata={\"topic\": \"bravery\", \"location\": \"Helm's Deep\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Boromir: One does not simply walk into Mordor.\",\n",
|
||||
" metadata={\"topic\": \"impossible tasks\", \"location\": \"Rivendell\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Galadriel: Even the smallest person can change the course of the future.\",\n",
|
||||
" metadata={\"topic\": \"hope\", \"location\": \"Lothlórien\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Théoden: So it begins.\",\n",
|
||||
" metadata={\"topic\": \"battle\", \"location\": \"Helm's Deep\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" text=\"Elrond: The strength of the Ring-bearer is failing. In his heart, Frodo begins to understand. The quest will claim his life.\",\n",
|
||||
" metadata={\"topic\": \"sacrifice\", \"location\": \"Rivendell\"}\n",
|
||||
" )\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Adding Documents to the Collection"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of documents in the collection: 10\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"store.add_documents(documents=documents)\n",
|
||||
"print(f\"Number of documents in the collection: {store.count()}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retrieving Documents\n",
|
||||
"\n",
|
||||
"Retrieve all documents or specific ones by ID."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Retrieved documents:\n",
|
||||
"ID: feb3b2c1-d3cf-423b-bd5d-6094e2200bc8, Text: Gandalf: A wizard is never late, Frodo Baggins. Nor is he early; he arrives precisely when he means to., Metadata: {'topic': 'wisdom', 'location': 'The Shire'}\n",
|
||||
"ID: b206833f-4c19-4f3c-91e2-2ccbcc895a63, Text: Frodo: I wish the Ring had never come to me. I wish none of this had happened., Metadata: {'topic': 'destiny', 'location': 'Moria'}\n",
|
||||
"ID: 57226af8-d035-4052-86b2-4f68d7c5a8f6, Text: Aragorn: You cannot wield it! None of us can. The One Ring answers to Sauron alone. It has no other master., Metadata: {'topic': 'power', 'location': 'Rivendell'}\n",
|
||||
"ID: 5376d46a-4161-408c-850c-4b73cd8d2aa6, Text: Sam: I can't carry it for you, but I can carry you!, Metadata: {'topic': 'friendship', 'location': 'Mount Doom'}\n",
|
||||
"ID: 7d8c78c3-e4c9-4c6a-8bb4-a04f450e6bfd, Text: Legolas: A red sun rises. Blood has been spilled this night., Metadata: {'topic': 'war', 'location': 'Rohan'}\n",
|
||||
"ID: 749a126e-2ad5-4aa6-b043-a204e50963f3, Text: Gimli: Certainty of death. Small chance of success. What are we waiting for?, Metadata: {'topic': 'bravery', 'location': \"Helm's Deep\"}\n",
|
||||
"ID: 4848f783-fbc0-43ec-98d6-43b03fa79809, Text: Boromir: One does not simply walk into Mordor., Metadata: {'topic': 'impossible tasks', 'location': 'Rivendell'}\n",
|
||||
"ID: ecc3257d-e542-407e-9db9-21ec3b78249c, Text: Galadriel: Even the smallest person can change the course of the future., Metadata: {'topic': 'hope', 'location': 'Lothlórien'}\n",
|
||||
"ID: 6dad5159-724f-4f03-8cc8-aabc4ee308cd, Text: Théoden: So it begins., Metadata: {'topic': 'battle', 'location': \"Helm's Deep\"}\n",
|
||||
"ID: 63a09862-438a-41d7-abe7-74ec5510ce82, Text: Elrond: The strength of the Ring-bearer is failing. In his heart, Frodo begins to understand. The quest will claim his life., Metadata: {'topic': 'sacrifice', 'location': 'Rivendell'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Retrieve all documents\n",
|
||||
"retrieved_docs = store.get()\n",
|
||||
"print(\"Retrieved documents:\")\n",
|
||||
"for doc in retrieved_docs:\n",
|
||||
" print(f\"ID: {doc['id']}, Text: {doc['document']}, Metadata: {doc['metadata']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Specific document: [{'id': UUID('feb3b2c1-d3cf-423b-bd5d-6094e2200bc8'), 'document': 'Gandalf: A wizard is never late, Frodo Baggins. Nor is he early; he arrives precisely when he means to.', 'metadata': {'topic': 'wisdom', 'location': 'The Shire'}}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Retrieve a specific document by ID\n",
|
||||
"doc_id = retrieved_docs[0]['id']\n",
|
||||
"specific_doc = store.get(ids=[doc_id])\n",
|
||||
"print(f\"Specific document: {specific_doc}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Specific document Embedding (first 5 values): [-0.0\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Retrieve a specific document by ID\n",
|
||||
"doc_id = retrieved_docs[0]['id']\n",
|
||||
"specific_doc = store.get(ids=[doc_id], with_embedding=True)\n",
|
||||
"embedding = specific_doc[0]['embedding']\n",
|
||||
"print(f\"Specific document Embedding (first 5 values): {embedding[:5]}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Updating Documents\n",
|
||||
"\n",
|
||||
"You can update existing documents' text or metadata using their IDs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Updated document: [{'id': UUID('feb3b2c1-d3cf-423b-bd5d-6094e2200bc8'), 'document': 'Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true.', 'metadata': {'topic': 'hope and wisdom', 'location': 'Fangorn Forest'}}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Retrieve a document by its ID\n",
|
||||
"retrieved_docs = store.get() # Get all documents to find the ID\n",
|
||||
"doc_id = retrieved_docs[0]['id'] # Select the first document's ID for this example\n",
|
||||
"\n",
|
||||
"# Define updated text and metadata\n",
|
||||
"updated_text = \"Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true.\"\n",
|
||||
"updated_metadata = {\"topic\": \"hope and wisdom\", \"location\": \"Fangorn Forest\"}\n",
|
||||
"\n",
|
||||
"# Update the document's text and metadata in the store\n",
|
||||
"store.update(ids=[doc_id], documents=[updated_text], metadatas=[updated_metadata])\n",
|
||||
"\n",
|
||||
"# Verify the update\n",
|
||||
"updated_doc = store.get(ids=[doc_id])\n",
|
||||
"print(f\"Updated document: {updated_doc}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Deleting Documents\n",
|
||||
"\n",
|
||||
"Delete documents by their IDs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of documents after deletion: 9\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Delete a document by ID\n",
|
||||
"doc_id_to_delete = retrieved_docs[2]['id']\n",
|
||||
"store.delete(ids=[doc_id_to_delete])\n",
|
||||
"\n",
|
||||
"# Verify deletion\n",
|
||||
"print(f\"Number of documents after deletion: {store.count()}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Similarity Search\n",
|
||||
"\n",
|
||||
"Perform a similarity search using text queries. The embedding function automatically generates embeddings for the input query."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Similarity search results:\n",
|
||||
"ID: 749a126e-2ad5-4aa6-b043-a204e50963f3, Document: Gimli: Certainty of death. Small chance of success. What are we waiting for?, Metadata: {'topic': 'bravery', 'location': \"Helm's Deep\"}, Similarity: 0.1567628941818613\n",
|
||||
"ID: 4848f783-fbc0-43ec-98d6-43b03fa79809, Document: Boromir: One does not simply walk into Mordor., Metadata: {'topic': 'impossible tasks', 'location': 'Rivendell'}, Similarity: 0.13233356090384096\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Perform a similarity search using text queries.\n",
|
||||
"query = \"wise advice\"\n",
|
||||
"results = store.search_similar(query_texts=query, k=2)\n",
|
||||
"\n",
|
||||
"# Display results\n",
|
||||
"print(\"Similarity search results:\")\n",
|
||||
"for result in results:\n",
|
||||
" print(f\"ID: {result['id']}, Document: {result['document']}, Metadata: {result['metadata']}, Similarity: {result['similarity']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Filtering Results\n",
|
||||
"\n",
|
||||
"Filter results based on metadata."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Filtered search results:\n",
|
||||
"ID: feb3b2c1-d3cf-423b-bd5d-6094e2200bc8, Document: Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true., Metadata: {'topic': 'hope and wisdom', 'location': 'Fangorn Forest'}, Similarity: 0.1670202911216282\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Search for documents with specific metadata filters\n",
|
||||
"query = \"journey\"\n",
|
||||
"filter_conditions = {\n",
|
||||
" \"location\": \"Fangorn Forest\",\n",
|
||||
" \"topic\": \"hope and wisdom\"\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"filtered_results = store.search_similar(query_texts=query, metadata_filter=filter_conditions, k=3)\n",
|
||||
"\n",
|
||||
"# Display filtered results\n",
|
||||
"print(\"Filtered search results:\")\n",
|
||||
"for result in filtered_results:\n",
|
||||
" print(f\"ID: {result['id']}, Document: {result['document']}, Metadata: {result['metadata']}, Similarity: {result['similarity']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Resetting the Database\n",
|
||||
"\n",
|
||||
"Reset the database to clear all stored data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Database reset complete. Current documents: []\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Reset the collection\n",
|
||||
"store.reset()\n",
|
||||
"print(\"Database reset complete. Current documents:\", store.get())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -1,41 +0,0 @@
|
|||
from time import sleep
|
||||
import dapr.ext.workflow as wf
|
||||
|
||||
wfr = wf.WorkflowRuntime()
|
||||
|
||||
@wfr.workflow(name='random_workflow')
|
||||
def task_chain_workflow(ctx: wf.DaprWorkflowContext, x: int):
|
||||
result1 = yield ctx.call_activity(step1, input=x)
|
||||
result2 = yield ctx.call_activity(step2, input=result1)
|
||||
result3 = yield ctx.call_activity(step3, input=result2)
|
||||
return [result1, result2, result3]
|
||||
|
||||
@wfr.activity
|
||||
def step1(ctx, activity_input):
|
||||
print(f'Step 1: Received input: {activity_input}.')
|
||||
# Do some work
|
||||
return activity_input + 1
|
||||
|
||||
@wfr.activity
|
||||
def step2(ctx, activity_input):
|
||||
print(f'Step 2: Received input: {activity_input}.')
|
||||
# Do some work
|
||||
return activity_input * 2
|
||||
|
||||
@wfr.activity
|
||||
def step3(ctx, activity_input):
|
||||
print(f'Step 3: Received input: {activity_input}.')
|
||||
# Do some work
|
||||
return activity_input ^ 2
|
||||
|
||||
if __name__ == '__main__':
|
||||
wfr.start()
|
||||
sleep(5) # wait for workflow runtime to start
|
||||
|
||||
wf_client = wf.DaprWorkflowClient()
|
||||
instance_id = wf_client.schedule_new_workflow(workflow=task_chain_workflow, input=10)
|
||||
print(f'Workflow started. Instance ID: {instance_id}')
|
||||
state = wf_client.wait_for_workflow_completion(instance_id)
|
||||
print(f'Workflow completed! Status: {state.runtime_status}')
|
||||
|
||||
wfr.shutdown()
|
|
@ -1,37 +0,0 @@
|
|||
from dapr_agents.workflow import WorkflowApp, workflow, task
|
||||
from dapr_agents.types import DaprWorkflowContext
|
||||
import logging
|
||||
|
||||
@workflow(name='random_workflow')
|
||||
def task_chain_workflow(ctx:DaprWorkflowContext, input: int):
|
||||
result1 = yield ctx.call_activity(step1, input=input)
|
||||
result2 = yield ctx.call_activity(step2, input=result1)
|
||||
result3 = yield ctx.call_activity(step3, input=result2)
|
||||
return [result1, result2, result3]
|
||||
|
||||
@task
|
||||
def step1(activity_input):
|
||||
print(f'Step 1: Received input: {activity_input}.')
|
||||
# Do some work
|
||||
return activity_input + 1
|
||||
|
||||
@task
|
||||
def step2(activity_input):
|
||||
print(f'Step 2: Received input: {activity_input}.')
|
||||
# Do some work
|
||||
return activity_input * 2
|
||||
|
||||
@task
|
||||
def step3(activity_input):
|
||||
print(f'Step 3: Received input: {activity_input}.')
|
||||
# Do some work
|
||||
return activity_input ^ 2
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
wfapp = WorkflowApp()
|
||||
|
||||
results = wfapp.run_and_monitor_workflow(task_chain_workflow, input=10)
|
||||
|
||||
print(f"Results: {results}")
|
|
@ -1,35 +0,0 @@
|
|||
from dapr_agents.workflow import WorkflowApp, workflow, task
|
||||
from dapr_agents.types import DaprWorkflowContext
|
||||
from dotenv import load_dotenv
|
||||
import logging
|
||||
|
||||
# Define Workflow logic
|
||||
@workflow(name='lotr_workflow')
|
||||
def task_chain_workflow(ctx: DaprWorkflowContext):
|
||||
result1 = yield ctx.call_activity(get_character)
|
||||
result2 = yield ctx.call_activity(get_line, input={"character": result1})
|
||||
return result2
|
||||
|
||||
@task(description="""
|
||||
Pick a random character from The Lord of the Rings\n
|
||||
and respond with the character's name ONLY
|
||||
""")
|
||||
def get_character() -> str:
|
||||
pass
|
||||
|
||||
@task(description="What is a famous line by {character}",)
|
||||
def get_line(character: str) -> str:
|
||||
pass
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Initialize the WorkflowApp
|
||||
wfapp = WorkflowApp()
|
||||
|
||||
# Run workflow
|
||||
results = wfapp.run_and_monitor_workflow(task_chain_workflow)
|
||||
print(results)
|
|
@ -1,29 +0,0 @@
|
|||
from dapr_agents.workflow import WorkflowApp, workflow, task
|
||||
from dapr_agents.types import DaprWorkflowContext
|
||||
from pydantic import BaseModel
|
||||
from dotenv import load_dotenv
|
||||
import logging
|
||||
|
||||
@workflow
|
||||
def question(ctx:DaprWorkflowContext, input:int):
|
||||
step1 = yield ctx.call_activity(ask, input=input)
|
||||
return step1
|
||||
|
||||
class Dog(BaseModel):
|
||||
name: str
|
||||
bio: str
|
||||
breed: str
|
||||
|
||||
@task("Who was {name}?")
|
||||
def ask(name:str) -> Dog:
|
||||
pass
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
load_dotenv()
|
||||
|
||||
wfapp = WorkflowApp()
|
||||
|
||||
results = wfapp.run_and_monitor_workflow(workflow=question, input="Scooby Doo")
|
||||
print(results)
|
|
@ -1,61 +0,0 @@
|
|||
# Doc2Podcast: Automating Podcast Creation from Research Papers
|
||||
|
||||
This workflow is a basic step toward automating the creation of podcast content from research using AI. It demonstrates how to process a single research paper, generate a dialogue-style transcript with LLMs, and convert it into a podcast audio file. While simple, this workflow serves as a foundation for exploring more advanced processes, such as handling multiple documents or optimizing content splitting for better audio output.
|
||||
|
||||
## Key Features and Workflow
|
||||
|
||||
* PDF Processing: Downloads a research paper from a specified URL and extracts its content page by page.
|
||||
* LLM-Powered Transcripts: Transforms extracted text into a dialogue-style transcript using a large language model, alternating between a host and participants.
|
||||
* AI-Generated Audio: Converts the transcript into a podcast-like audio file with natural-sounding voices for the host and participants.
|
||||
* Custom Workflow: Saves the final podcast audio and transcript files locally, offering flexibility for future enhancements like handling multiple files or integrating additional AI tools.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Python 3.8 or higher
|
||||
* Required Python dependencies (install using `pip install -r requirements.txt`)
|
||||
* A valid `OpenAI` API key for generating audio content
|
||||
* Set the `OPENAI_API_KEY` variable with your key value in an `.env` file.
|
||||
|
||||
## Configuration
|
||||
To run the workflow, provide a configuration file in JSON format. The `config.json` file in this folder points to the following file "[Exploring Applicability of LLM-Powered Autonomous Agents to Solve Real-life Problems](https://github.com/OTRF/MEAN/blob/main/Rodriquez%20%26%20Syynimaa%20(2024).%20Exploring%20Applicability%20of%20LLM-Powered%20Autonomous%20Agents%20to%20Solve%20Real-life%20Problems.pdf)". Config example:
|
||||
|
||||
```json
|
||||
{
|
||||
"pdf_url": "https://example.com/research-paper.pdf",
|
||||
"podcast_name": "AI Explorations",
|
||||
"host": {
|
||||
"name": "John Doe",
|
||||
"voice": "alloy"
|
||||
},
|
||||
"participants": [
|
||||
{ "name": "Alice Smith" },
|
||||
{ "name": "Bob Johnson" }
|
||||
],
|
||||
"max_rounds": 4,
|
||||
"output_transcript_path": "podcast_dialogue.json",
|
||||
"output_audio_path": "final_podcast.mp3",
|
||||
"audio_model": "tts-1"
|
||||
}
|
||||
```
|
||||
|
||||
## Running the Workflow
|
||||
|
||||
* Place the configuration file (e.g., config.json) in the project directory.
|
||||
* Run the workflow with the following command:
|
||||
|
||||
```bash
|
||||
dapr run --app-id doc2podcast --resources-path components -- python3 workflow.py --config config.json
|
||||
```
|
||||
|
||||
* Output:
|
||||
* Transcript: A structured transcript saved as `podcast_dialogue.json` by default. An example can be found in the current directory.
|
||||
* Audio: The final podcast audio saved as `final_podcast.mp3` as default. An example can be found [here](https://on.soundcloud.com/pzjYRcJZDU3y27hz5).
|
||||
|
||||
## Next Steps
|
||||
|
||||
This workflow is a simple starting point. Future enhancements could include:
|
||||
|
||||
* Processing Multiple Files: Extend the workflow to handle batches of PDFs.
|
||||
* Advanced Text Splitting: Dynamically split text based on content rather than pages.
|
||||
* Web Search Integration: Pull additional context or related research from the web.
|
||||
* Multi-Modal Content: Process documents alongside images, slides, or charts.
|
|
@ -1,20 +0,0 @@
|
|||
{
|
||||
"pdf_url": "https://raw.githubusercontent.com/OTRF/MEAN/main/Rodriquez%20%26%20Syynimaa%20(2024).%20Exploring%20Applicability%20of%20LLM-Powered%20Autonomous%20Agents%20to%20Solve%20Real-life%20Problems.pdf",
|
||||
"podcast_name": "AI Explorations",
|
||||
"host": {
|
||||
"name": "John Doe",
|
||||
"voice": "alloy"
|
||||
},
|
||||
"participants": [
|
||||
{
|
||||
"name": "Alice Smith"
|
||||
},
|
||||
{
|
||||
"name": "Bob Johnson"
|
||||
}
|
||||
],
|
||||
"max_rounds": 4,
|
||||
"output_transcript_path": "podcast_dialogue.json",
|
||||
"output_audio_path": "final_podcast.mp3",
|
||||
"audio_model": "tts-1"
|
||||
}
|
|
@ -1,234 +0,0 @@
|
|||
[
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "Welcome to 'AI Explorations'. I'm your host, John Doe. I'm joined today by Alice Smith and Bob Johnson. How are both of you doing today?"
|
||||
},
|
||||
{
|
||||
"name": "Alice Smith",
|
||||
"text": "Hi John, I'm doing great, thanks for having me. Excited to discuss today's topics."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "Fantastic. In today's episode, we'll explore the applicability of LLM-powered autonomous agents in tackling real-life problems. We'll delve into Microsoft Entra ID Administration, particularly focusing on a project named MEAN. Alice, could you tell us a bit more about this project and its relevance?"
|
||||
},
|
||||
{
|
||||
"name": "Alice Smith",
|
||||
"text": "Absolutely, John. The MEAN project is fascinating as it leverages LLM technology to perform administrative tasks in Entra ID using natural language prompts. This is particularly useful given that Microsoft has retired some key PowerShell modules for these tasks."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "That's interesting. Bob, from a technical standpoint, what changes are happening that make projects like MEAN necessary?"
|
||||
},
|
||||
{
|
||||
"name": "Bob Johnson",
|
||||
"text": "Well, John, with Microsoft retiring old PowerShell modules, administrators now need to use the Microsoft Graph API. This change requires learning software development skills, which isn't feasible for everyone. MEAN simplifies this by using natural language inputs instead."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "Great point, Bob. So, Alice, could these autonomous agents make administrative tasks more accessible to a wider audience?"
|
||||
},
|
||||
{
|
||||
"name": "Alice Smith",
|
||||
"text": "Certainly, John. By abstracting complex programming tasks into simple language commands, these agents democratize access to technology, lowering the barrier for many administrators."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "The notion of autonomous LLM-powered agents is intriguing, especially when it comes to simplifying complex tasks like software development. Alice, how do you see these agents addressing the skills gap that's typically present among system administrators? For instance, their need to master software development skills, which aren't typically part of their skill set."
|
||||
},
|
||||
{
|
||||
"name": "Alice Smith",
|
||||
"text": "John, I believe these agents can play a pivotal role by taking over tasks that require extensive software development knowledge. They can interface with complex APIs like MSGraph, providing administrators with the ability to perform tasks using natural language without the need to learn coding."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "Bob, it seems like these agents must be quite advanced to achieve this level of functionality. Can you talk about how LLMs, like those used in these agents, handle tasks they've never been specifically trained on, and what challenges they might face?"
|
||||
},
|
||||
{
|
||||
"name": "Bob Johnson",
|
||||
"text": "Certainly, John. LLMs, such as Generative Pre-trained Transformers, use task-agnostic pre-training, but require additional task-specific training to perform new tasks effectively. Challenges include maintaining consistent logic and managing hallucinations, where generated content might not accurately reflect reality."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "That's an important point, Bob. Alice, how do these agents overcome some of these challenges to ensure accurate performance?"
|
||||
},
|
||||
{
|
||||
"name": "Alice Smith",
|
||||
"text": "They employ strategies like using the ReAct paradigm, which involves reasoning and action in a closed-loop system. By incorporating external real-world entities into their reasoning processes, they aim to be more grounded and trustworthy, which reduces issues like hallucination."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "Fascinating. Now, looking to the future, do you believe these LLM-powered agents will play a crucial role in evolving the role of system administrators?"
|
||||
},
|
||||
{
|
||||
"name": "Bob Johnson",
|
||||
"text": "Absolutely, John. As these agents become more sophisticated, they will enhance productivity by offloading routine and complex tasks, allowing administrators to focus on strategic decision-making and innovation."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "Continuing with our discussion on the autonomous agents for Entra ID administration, Alice, could you elaborate on some of the research questions that were pivotal to the development of the MEAN project?"
|
||||
},
|
||||
{
|
||||
"name": "Alice Smith",
|
||||
"text": "Sure, John. One of the primary research questions we focused on was determining how these autonomous LLM-powered agents can effectively assist administrators in performing Entra ID tasks. This became crucial as traditional PowerShell modules were deprecated, requiring new solutions."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "That sounds essential. Bob, could you walk us through the structure of the research paper related to MEAN and highlight how it helps in understanding the essence of the project?"
|
||||
},
|
||||
{
|
||||
"name": "Bob Johnson",
|
||||
"text": "Certainly, John. The paper is structured to first describe the construction process of the MEAN agent, proceeding to a discussion section that encapsulates the project's essence. It offers a comprehensive view from motivation to design and testing phases."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "Alice, let's talk about the design and development phase of MEAN. I understand Jupyter Notebooks was chosen as the platform. Could you explain why this choice was made and how it integrates with the capabilities of tools like ChatGPT and MSGraph API?"
|
||||
},
|
||||
{
|
||||
"name": "Alice Smith",
|
||||
"text": "Jupyter Notebooks was selected for its Python support, which is crucial for integrating with ChatGPT-4 API. This setup allows the agent to call external APIs easily, essential for the tasks at hand. Utilizing the OpenAPI specification from the MSGraph API documentation further streamlines this process."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "Bob, how does the design process ensure that the agent can interpret and execute tasks accurately, especially when leveraging APIs such as MSGraph?"
|
||||
},
|
||||
{
|
||||
"name": "Bob Johnson",
|
||||
"text": "The design emphasizes a reasoning and planning loop where the agent interprets user prompts and the OpenAPI specification. It then strategically executes plans by interacting with the API to return accurate results. This methodical approach helps in achieving precision in task execution."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "Alice, you've previously mentioned the significance of using Jupyter Notebooks for integrating various tools like ChatGPT and MSGraph API. Given the extensive properties of users from Microsoft Entra ID and the challenges MEAN faced in its first design round, how essential was it to adapt the setup further? What steps were taken to enhance the agent's understanding of the API?"
|
||||
},
|
||||
{
|
||||
"name": "Alice Smith",
|
||||
"text": "In our first design round, we realized the importance of improving the agent's grasp of the API due to its partial functionality. Hence, adapting the design to incorporate better reasoning and planning capabilities was essential. We started by ensuring that the agent can parse and understand extensive OpenAPI specifications and use parameters like $top to request more users."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "Bob, it seems there were significant hurdles with the original MS Graph API specification, especially with its size causing browser crashes during validation. How did the team manage this aspect, and what was the impact on the agent's functionality?"
|
||||
},
|
||||
{
|
||||
"name": "Bob Johnson",
|
||||
"text": "The sheer size of the OpenAPI YAML file posed challenges, but breaking it down into manageable parts allowed us to validate it without crashing the systems. This step was crucial for the agent to execute tasks more efficiently and understand the complex relationships within the API."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "With these enhancements, how did the team ensure that MEAN could accurately retrieve up to 1000 users per request, especially when the default is limited to 100 users?"
|
||||
},
|
||||
{
|
||||
"name": "Alice Smith",
|
||||
"text": "After refining the agent's interpretation of the API, we implemented logic to utilize the $top query parameter effectively, allowing MEAN to request and handle up to 1000 users at a time. This adjustment significantly improved its performance in managing data."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "Bob, looking ahead, how does this adaptation enhance the agent's ability to handle real-world administrative scenarios in Entra ID?"
|
||||
},
|
||||
{
|
||||
"name": "Bob Johnson",
|
||||
"text": "By optimizing data retrieval and understanding API parameters fully, MEAN is now far better equipped to handle bulk operations and real-world administrative tasks, enhancing both efficiency and accuracy for users."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "Alice, are there specific use cases within Entra ID where these improvements in MEAN’s capabilities have had the most impact?"
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "Alice, with all the technical modifications made to the OpenAPI specification, tell us how these changes impacted the agent's ability to interpret and execute tasks more efficiently."
|
||||
},
|
||||
{
|
||||
"name": "Alice Smith",
|
||||
"text": "The changes were substantial, John. By manually adjusting the OpenAPI specification to eliminate circular references and mark query parameters as required, we managed to maintain crucial API information. This improved the agent's ability to process and execute tasks accurately, highlighting the efficiency necessary for real-world applications."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "That's quite an advancement. Bob, what can you tell us about the logical observations made by the agent when encountering issues, like using multiple $select parameters?"
|
||||
},
|
||||
{
|
||||
"name": "Bob Johnson",
|
||||
"text": "It's fascinating, John. The agent learned from its mistakes by recognizing that the API threw errors when $select was used multiple times. It adapted by using a single $select parameter and separating values with commas. This shows how the agent mimics human logical processes in troubleshooting."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "Alice, do these improvements mean that tasks typically performed by an administrator using PowerShell can now be easily transferred to the agent, without needing extensive software knowledge?"
|
||||
},
|
||||
{
|
||||
"name": "Alice Smith",
|
||||
"text": "Absolutely. Now that the agent understands how to interpret the API parameters correctly, it simplifies tasks for administrators. They no longer need to know specific API calls or PowerShell cmdlets, making complex operations much more accessible."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "Bob, what did the evaluation reveal about how the agent can empower users without software development backgrounds to accomplish tasks?"
|
||||
},
|
||||
{
|
||||
"name": "Bob Johnson",
|
||||
"text": "The evaluation was quite promising. It showed that users could achieve the desired outcomes using natural language, thanks to the agent's capability. Although there are some limitations with the current implementation, we are on the right path towards bridging the gap for non-technical users."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "Alice, with all these technical modifications, it seems that adapting the OpenAPI specifications has been challenging but rewarding. Can you tell us about the role of open and clear communication in the success of the MEAN project?"
|
||||
},
|
||||
{
|
||||
"name": "Alice Smith",
|
||||
"text": "Absolutely, John. Communicating our progress and challenges was crucial. We've reported our processes and findings in a research paper, and we've made our source code and Jupyter notebooks publicly available on GitHub. This transparency not only facilitated collaboration but also allowed us to receive valuable feedback from the community."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "That's commendable, Alice. It seems like these improvements have significant implications for practice. Bob, do you think the findings could transform how routine administrative tasks are approached, especially in high-stress environments like during cyber-attacks?"
|
||||
},
|
||||
{
|
||||
"name": "Bob Johnson",
|
||||
"text": "Certainly, John. The ability of LLM-powered agents to simplify complex tasks allows administrators to focus on their core responsibilities without getting bogged down by software development. This is particularly beneficial during high-pressure situations where quick decision-making is essential. However, the current limitations mean it's not fully mature for everyday tasks just yet."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "Alice, it sounds like enabling administrators to use natural language inputs for Entra ID tasks without needing to learn coding is a major leap forward. In terms of future research, where do you see the next steps for the MEAN project?"
|
||||
},
|
||||
{
|
||||
"name": "Alice Smith",
|
||||
"text": "Moving forward, a promising direction is to explore how these agents could interface with PowerShell commands in addition to APIs. By doing so, we could potentially create a more versatile solution that isn't limited to cloud services and also leverages tasks on local systems."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "Interesting. Bob, do you have thoughts on how exploring PowerShell integration could provide a broader application for these agents?"
|
||||
},
|
||||
{
|
||||
"name": "Bob Johnson",
|
||||
"text": "Integrating with PowerShell could allow agents to perform tasks that extend beyond cloud-based system administration, covering local environments as well. This could open doors to a generalized tool for admins who deal with hybrid IT infrastructures."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "Thank you for such an insightful discussion on the MEAN project. To wrap up, we've explored the impressive capabilities of LLM-powered agents in simplifying complex tasks by utilizing natural language, the technical hurdles overcome in adapting OpenAPI specifications, and the potential for integrating PowerShell for broader applicability. Alice and Bob, your insights have been invaluable."
|
||||
},
|
||||
{
|
||||
"name": "Alice Smith",
|
||||
"text": "Thank you, John. It's been a pleasure discussing the project and sharing our journey with MEAN. The implications for simplifying administrative tasks are exciting, especially as we continue to evolve these capabilities."
|
||||
},
|
||||
{
|
||||
"name": "Bob Johnson",
|
||||
"text": "Absolutely, John. Exploring how MEAN addresses real-world administrative challenges underlines its potential impact, particularly in high-stress environments. I'm eager to see how future research will further break down barriers for non-technical users."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "Thank you both for your contributions. It's clear that the work being done with MEAN is transformative and could pave the way for future innovations in cloud administration."
|
||||
},
|
||||
{
|
||||
"name": "Alice Smith",
|
||||
"text": "Thanks again, John. I look forward to further developments and encourage listeners to follow our updates on GitHub for the latest insights."
|
||||
},
|
||||
{
|
||||
"name": "Bob Johnson",
|
||||
"text": "And thank you, John, for the engaging conversation. It's always rewarding to share the exciting strides we're making in this field."
|
||||
},
|
||||
{
|
||||
"name": "John Doe",
|
||||
"text": "This concludes our episode on AI Explorations. Don't forget to check out the provided resources to delve deeper into the topics we've covered. Until next time, stay curious and keep exploring the world of AI."
|
||||
},
|
||||
{
|
||||
"name": "Alice Smith",
|
||||
"text": "Goodbye everyone, and thank you for tuning in!"
|
||||
},
|
||||
{
|
||||
"name": "Bob Johnson",
|
||||
"text": "Goodbye, and thank you for listening!"
|
||||
}
|
||||
]
|
|
@ -1,3 +0,0 @@
|
|||
dapr_agents
|
||||
pydub
|
||||
pypdf
|
|
@ -1,361 +0,0 @@
|
|||
from dapr_agents.document.reader.pdf.pypdf import PyPDFReader
|
||||
from dapr_agents.types import DaprWorkflowContext
|
||||
from dapr_agents import WorkflowApp
|
||||
from urllib.parse import urlparse, unquote
|
||||
from dotenv import load_dotenv
|
||||
from typing import Dict, Any, List
|
||||
from pydantic import BaseModel
|
||||
from pathlib import Path
|
||||
from dapr_agents import OpenAIAudioClient
|
||||
from dapr_agents.types.llm import AudioSpeechRequest
|
||||
from pydub import AudioSegment
|
||||
import io
|
||||
import requests
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Initialize the WorkflowApp
|
||||
wfapp = WorkflowApp()
|
||||
|
||||
# Define structured output models
|
||||
class SpeakerEntry(BaseModel):
|
||||
name: str
|
||||
text: str
|
||||
|
||||
class PodcastDialogue(BaseModel):
|
||||
participants: List[SpeakerEntry]
|
||||
|
||||
# Define Workflow logic
|
||||
@wfapp.workflow(name='doc2podcast')
|
||||
def doc2podcast(ctx: DaprWorkflowContext, input: Dict[str, Any]):
|
||||
# Extract pre-validated input
|
||||
podcast_name = input["podcast_name"]
|
||||
host_config = input["host"]
|
||||
participant_configs = input["participants"]
|
||||
max_rounds = input["max_rounds"]
|
||||
file_input = input["pdf_url"]
|
||||
output_transcript_path = input["output_transcript_path"]
|
||||
output_audio_path = input["output_audio_path"]
|
||||
audio_model = input["audio_model"]
|
||||
|
||||
# Step 1: Assign voices to the team
|
||||
team_config = yield ctx.call_activity(assign_podcast_voices, input={
|
||||
"host_config": host_config,
|
||||
"participant_configs": participant_configs,
|
||||
})
|
||||
|
||||
# Step 2: Read PDF and get documents
|
||||
file_path = yield ctx.call_activity(download_pdf, input=file_input)
|
||||
documents = yield ctx.call_activity(read_pdf, input={"file_path": file_path})
|
||||
|
||||
# Step 3: Initialize context and transcript parts
|
||||
accumulated_context = ""
|
||||
transcript_parts = []
|
||||
total_iterations = len(documents)
|
||||
|
||||
for chunk_index, document in enumerate(documents):
|
||||
# Generate the intermediate prompt
|
||||
document_with_context = {
|
||||
"text": document["text"],
|
||||
"iteration_index": chunk_index + 1,
|
||||
"total_iterations": total_iterations,
|
||||
"context": accumulated_context,
|
||||
"participants": [p["name"] for p in team_config["participants"]],
|
||||
}
|
||||
generated_prompt = yield ctx.call_activity(generate_prompt, input=document_with_context)
|
||||
|
||||
# Use the prompt to generate the structured dialogue
|
||||
prompt_parameters = {
|
||||
"podcast_name": podcast_name,
|
||||
"host_name": team_config["host"]["name"],
|
||||
"prompt": generated_prompt,
|
||||
"max_rounds": max_rounds,
|
||||
}
|
||||
dialogue_entry = yield ctx.call_activity(generate_transcript, input=prompt_parameters)
|
||||
|
||||
# Update context and transcript parts
|
||||
conversations = dialogue_entry["participants"]
|
||||
for participant in conversations:
|
||||
accumulated_context += f" {participant['name']}: {participant['text']}"
|
||||
transcript_parts.append(participant)
|
||||
|
||||
# Step 4: Write the final transcript to a file
|
||||
yield ctx.call_activity(write_transcript_to_file, input={"podcast_dialogue": transcript_parts, "output_path": output_transcript_path})
|
||||
|
||||
# Step 5: Convert transcript to audio using team_config
|
||||
yield ctx.call_activity(convert_transcript_to_audio, input={
|
||||
"transcript_parts": transcript_parts,
|
||||
"output_path": output_audio_path,
|
||||
"voices": team_config,
|
||||
"model": audio_model,
|
||||
})
|
||||
|
||||
@wfapp.task
|
||||
def assign_podcast_voices(host_config: Dict[str, Any], participant_configs: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""
|
||||
Assign voices to the podcast host and participants.
|
||||
|
||||
Args:
|
||||
host_config: Dictionary containing the host's configuration (name and optionally a voice).
|
||||
participant_configs: List of dictionaries containing participants' configurations (name and optionally a voice).
|
||||
|
||||
Returns:
|
||||
A dictionary with the updated `host` and `participants`, including their assigned voices.
|
||||
"""
|
||||
allowed_voices = ["alloy", "echo", "fable", "onyx", "nova", "shimmer"]
|
||||
assigned_voices = set() # Track assigned voices to avoid duplication
|
||||
|
||||
# Assign voice to the host if not already specified
|
||||
if "voice" not in host_config:
|
||||
host_config["voice"] = next(voice for voice in allowed_voices if voice not in assigned_voices)
|
||||
assigned_voices.add(host_config["voice"])
|
||||
|
||||
# Assign voices to participants, ensuring no duplicates
|
||||
updated_participants = []
|
||||
for participant in participant_configs:
|
||||
if "voice" not in participant:
|
||||
participant["voice"] = next(
|
||||
voice for voice in allowed_voices if voice not in assigned_voices
|
||||
)
|
||||
assigned_voices.add(participant["voice"])
|
||||
updated_participants.append(participant)
|
||||
|
||||
# Return the updated host and participants
|
||||
return {
|
||||
"host": host_config,
|
||||
"participants": updated_participants,
|
||||
}
|
||||
|
||||
@wfapp.task
|
||||
def download_pdf(pdf_url: str, local_directory: str = ".") -> str:
|
||||
"""
|
||||
Downloads a PDF file from a URL and saves it locally, automatically determining the filename.
|
||||
"""
|
||||
try:
|
||||
parsed_url = urlparse(pdf_url)
|
||||
filename = unquote(Path(parsed_url.path).name)
|
||||
|
||||
if not filename:
|
||||
raise ValueError("Invalid URL: Cannot determine filename from the URL.")
|
||||
|
||||
filename = filename.replace(" ", "_")
|
||||
local_directory_path = Path(local_directory).resolve()
|
||||
local_directory_path.mkdir(parents=True, exist_ok=True)
|
||||
local_file_path = local_directory_path / filename
|
||||
|
||||
if not local_file_path.exists():
|
||||
logger.info(f"Downloading PDF from {pdf_url}...")
|
||||
response = requests.get(pdf_url)
|
||||
response.raise_for_status()
|
||||
with open(local_file_path, "wb") as pdf_file:
|
||||
pdf_file.write(response.content)
|
||||
logger.info(f"PDF saved to {local_file_path}")
|
||||
else:
|
||||
logger.info(f"PDF already exists at {local_file_path}")
|
||||
|
||||
return str(local_file_path)
|
||||
except Exception as e:
|
||||
logger.error(f"Error downloading PDF: {e}")
|
||||
raise
|
||||
|
||||
@wfapp.task
|
||||
def read_pdf(file_path: str) -> List[dict]:
|
||||
"""
|
||||
Reads and extracts text from a PDF document.
|
||||
"""
|
||||
try:
|
||||
reader = PyPDFReader()
|
||||
documents = reader.load(file_path)
|
||||
return [doc.model_dump() for doc in documents]
|
||||
except Exception as e:
|
||||
logger.error(f"Error reading document: {e}")
|
||||
raise
|
||||
|
||||
@wfapp.task
|
||||
def generate_prompt(text: str, iteration_index: int, total_iterations: int, context: str, participants: List[str]) -> str:
|
||||
"""
|
||||
Generate a prompt dynamically for the chunk.
|
||||
"""
|
||||
logger.info(f"Processing iteration {iteration_index} of {total_iterations}.")
|
||||
instructions = f"""
|
||||
CONTEXT:
|
||||
- Previous conversation: {context.strip() or "No prior context available."}
|
||||
- This is iteration {iteration_index} of {total_iterations}.
|
||||
"""
|
||||
|
||||
if participants:
|
||||
participant_names = ', '.join(participants)
|
||||
instructions += f"\nPARTICIPANTS: {participant_names}"
|
||||
else:
|
||||
instructions += "\nPARTICIPANTS: None (Host-only conversation)"
|
||||
|
||||
if iteration_index == 1:
|
||||
instructions += """
|
||||
INSTRUCTIONS:
|
||||
- Begin with a warm welcome to the podcast titled 'Podcast Name'.
|
||||
- Introduce the host and the participants (if available).
|
||||
- Provide an overview of the topics to be discussed in this episode.
|
||||
"""
|
||||
elif iteration_index == total_iterations:
|
||||
instructions += """
|
||||
INSTRUCTIONS:
|
||||
- Conclude the conversation with a summary of the discussion.
|
||||
- Include farewell messages from the host and participants.
|
||||
"""
|
||||
else:
|
||||
instructions += """
|
||||
INSTRUCTIONS:
|
||||
- Continue the conversation smoothly without re-introducing the podcast.
|
||||
- Follow up on the previous discussion points and introduce the next topic naturally.
|
||||
"""
|
||||
|
||||
instructions += f"""
|
||||
TASK:
|
||||
- Use the provided TEXT to guide this part of the conversation.
|
||||
- Alternate between speakers, ensuring a natural conversational flow.
|
||||
- Keep responses concise and aligned with the context.
|
||||
"""
|
||||
return f"{instructions}\nTEXT:\n{text.strip()}"
|
||||
|
||||
@wfapp.task("""
|
||||
Generate a structured podcast dialogue based on the context and text provided.
|
||||
The podcast is titled '{podcast_name}' and is hosted by {host_name}.
|
||||
If participants are available, each speaker is limited to a maximum of {max_rounds} turns per iteration.
|
||||
A "round" is defined as one turn by the host followed by one turn by a participant.
|
||||
The podcast should alternate between the host and participants.
|
||||
If participants are not available, the host drives the conversation alone.
|
||||
Keep the dialogue concise and ensure a natural conversational flow.
|
||||
{prompt}
|
||||
""")
|
||||
def generate_transcript(podcast_name: str, host_name: str, prompt: str, max_rounds: int) -> PodcastDialogue:
|
||||
pass
|
||||
|
||||
@wfapp.task
|
||||
def write_transcript_to_file(podcast_dialogue: List[Dict[str, Any]], output_path: str) -> None:
|
||||
"""
|
||||
Write the final structured transcript to a file.
|
||||
"""
|
||||
try:
|
||||
with open(output_path, "w", encoding="utf-8") as file:
|
||||
import json
|
||||
json.dump(podcast_dialogue, file, ensure_ascii=False, indent=4)
|
||||
logger.info(f"Podcast dialogue successfully written to {output_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error writing podcast dialogue to file: {e}")
|
||||
raise
|
||||
|
||||
@wfapp.task
|
||||
def convert_transcript_to_audio(transcript_parts: List[Dict[str, Any]], output_path: str, voices: Dict[str, Any], model: str = "tts-1") -> None:
|
||||
"""
|
||||
Converts a transcript into a single audio file using the OpenAI Audio Client and pydub for concatenation.
|
||||
|
||||
Args:
|
||||
transcript_parts: List of dictionaries containing speaker and text.
|
||||
output_path: File path to save the final audio.
|
||||
voices: Dictionary containing "host" and "participants" with their assigned voices.
|
||||
model: TTS model to use (default: "tts-1").
|
||||
"""
|
||||
try:
|
||||
client = OpenAIAudioClient()
|
||||
combined_audio = AudioSegment.silent(duration=500) # Start with a short silence
|
||||
|
||||
# Build voice mapping
|
||||
voice_mapping = {voices["host"]["name"]: voices["host"]["voice"]}
|
||||
voice_mapping.update({p["name"]: p["voice"] for p in voices["participants"]})
|
||||
|
||||
for part in transcript_parts:
|
||||
speaker_name = part["name"]
|
||||
speaker_text = part["text"]
|
||||
assigned_voice = voice_mapping.get(speaker_name, "alloy") # Default to "alloy" if not found
|
||||
|
||||
# Log assigned voice for debugging
|
||||
logger.info(f"Generating audio for {speaker_name} using voice '{assigned_voice}'.")
|
||||
|
||||
# Create TTS request
|
||||
tts_request = AudioSpeechRequest(
|
||||
model=model,
|
||||
input=speaker_text,
|
||||
voice=assigned_voice,
|
||||
response_format="mp3"
|
||||
)
|
||||
|
||||
# Generate the audio
|
||||
audio_bytes = client.create_speech(request=tts_request)
|
||||
|
||||
# Create an AudioSegment from the audio bytes
|
||||
audio_chunk = AudioSegment.from_file(io.BytesIO(audio_bytes), format=tts_request.response_format)
|
||||
|
||||
# Append the audio to the combined segment
|
||||
combined_audio += audio_chunk + AudioSegment.silent(duration=300)
|
||||
|
||||
# Export the combined audio to the output file
|
||||
combined_audio.export(output_path, format="mp3")
|
||||
logger.info(f"Podcast audio successfully saved to {output_path}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during audio generation: {e}")
|
||||
raise
|
||||
|
||||
if __name__ == '__main__':
|
||||
import argparse
|
||||
import json
|
||||
import yaml
|
||||
|
||||
def load_config(file_path: str) -> dict:
|
||||
"""Load configuration from a JSON or YAML file."""
|
||||
with open(file_path, 'r') as file:
|
||||
if file_path.endswith('.yaml') or file_path.endswith('.yml'):
|
||||
return yaml.safe_load(file)
|
||||
elif file_path.endswith('.json'):
|
||||
return json.load(file)
|
||||
else:
|
||||
raise ValueError("Unsupported file format. Use JSON or YAML.")
|
||||
|
||||
# CLI Argument Parser
|
||||
parser = argparse.ArgumentParser(description="Document to Podcast Workflow")
|
||||
parser.add_argument("--config", type=str, help="Path to a JSON/YAML config file.")
|
||||
parser.add_argument("--pdf_url", type=str, help="URL of the PDF document.")
|
||||
parser.add_argument("--podcast_name", type=str, help="Name of the podcast.")
|
||||
parser.add_argument("--host_name", type=str, help="Name of the host.")
|
||||
parser.add_argument("--host_voice", type=str, help="Voice for the host.")
|
||||
parser.add_argument("--participants", type=str, nargs='+', help="List of participant names.")
|
||||
parser.add_argument("--max_rounds", type=int, default=4, help="Number of turns per round.")
|
||||
parser.add_argument("--output_transcript_path", type=str, help="Path to save the output transcript.")
|
||||
parser.add_argument("--output_audio_path", type=str, help="Path to save the final audio file.")
|
||||
parser.add_argument("--audio_model", type=str, default="tts-1", help="Audio model for TTS.")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load config file if provided
|
||||
config = load_config(args.config) if args.config else {}
|
||||
|
||||
# Merge CLI and Config inputs
|
||||
user_input = {
|
||||
"pdf_url": args.pdf_url or config.get("pdf_url"),
|
||||
"podcast_name": args.podcast_name or config.get("podcast_name", "Default Podcast"),
|
||||
"host": {
|
||||
"name": args.host_name or config.get("host", {}).get("name", "Host"),
|
||||
"voice": args.host_voice or config.get("host", {}).get("voice", "alloy"),
|
||||
},
|
||||
"participants": config.get("participants", []),
|
||||
"max_rounds": args.max_rounds or config.get("max_rounds", 4),
|
||||
"output_transcript_path": args.output_transcript_path or config.get("output_transcript_path", "podcast_dialogue.json"),
|
||||
"output_audio_path": args.output_audio_path or config.get("output_audio_path", "final_podcast.mp3"),
|
||||
"audio_model": args.audio_model or config.get("audio_model", "tts-1"),
|
||||
}
|
||||
|
||||
# Add participants from CLI if provided
|
||||
if args.participants:
|
||||
user_input["participants"].extend({"name": name} for name in args.participants)
|
||||
|
||||
# Validate inputs
|
||||
if not user_input["pdf_url"]:
|
||||
raise ValueError("PDF URL must be provided via CLI or config file.")
|
||||
|
||||
# Run the workflow
|
||||
wfapp.run_and_monitor_workflow(workflow=doc2podcast, input=user_input)
|
|
@ -1,91 +0,0 @@
|
|||
# Multi-Agent LOTR: Agents as Actors
|
||||
|
||||
This guide shows you how to set up and run an event-driven agentic workflow using Dapr Agents. By leveraging [Dapr Pub/Sub](https://docs.dapr.io/developing-applications/building-blocks/pubsub/pubsub-overview/) and FastAPI, `Dapr Agents` enables agents to collaborate dynamically in decentralized systems.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before you start, ensure you have the following:
|
||||
|
||||
* [Dapr Agents environment set up](https://github.com/dapr/dapr-agents), including Python 3.8 or higher and Dapr CLI.
|
||||
* Docker installed and running.
|
||||
* Basic understanding of microservices and event-driven architecture.
|
||||
|
||||
## Project Structure
|
||||
|
||||
The project is organized into multiple services, each representing an agent or a workflow. Here’s the layout:
|
||||
|
||||
```
|
||||
├── components/ # Dapr configuration files
|
||||
│ ├── statestore.yaml # State store configuration
|
||||
│ ├── pubsub.yaml # Pub/Sub configuration
|
||||
├── services/ # Directory for services
|
||||
│ ├── hobbit/ # Hobbit Agent Service
|
||||
│ │ └── app.py # FastAPI app for Hobbit
|
||||
│ ├── wizard/ # Wizard Agent Service
|
||||
│ │ └── app.py # FastAPI app for Wizard
|
||||
│ ├── elf/ # Elf Agent Service
|
||||
│ │ └── app.py # FastAPI app for Elf
|
||||
│ ├── workflow-roundrobin/ # Workflow Service
|
||||
│ └── app.py # Orchestrator Workflow
|
||||
├── dapr.yaml # Multi-App Run Template
|
||||
```
|
||||
|
||||
## Running the Services
|
||||
|
||||
0. Set Up Environment Variables: Create an `.env` file to securely store your API keys and other sensitive information. For example:
|
||||
|
||||
```
|
||||
OPENAI_API_KEY="your-api-key"
|
||||
OPENAI_BASE_URL="https://api.openai.com/v1"
|
||||
```
|
||||
|
||||
1. Multi-App Run: Use the dapr.yaml file to start all services simultaneously:
|
||||
|
||||
```bash
|
||||
dapr run -f .
|
||||
```
|
||||
|
||||
2. Verify console Logs: Each service outputs logs to confirm successful initialization.
|
||||
|
||||
|
||||
3. Verify Redis entries: Access the Redis Insight interface at `http://localhost:5540/`
|
||||
|
||||
## Starting the Workflow
|
||||
|
||||
Send an HTTP POST request to the workflow service to start the workflow. Use curl or any API client:
|
||||
|
||||
```bash
|
||||
curl -i -X POST http://localhost:8009/start-workflow \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"task": "Lets solve the riddle to open the Doors of Durin and enter Moria."}'
|
||||
```
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
date: Thu, 05 Dec 2024 07:46:19 GMT
|
||||
server: uvicorn
|
||||
content-length: 104
|
||||
content-type: application/json
|
||||
|
||||
{"message":"Workflow initiated successfully.","workflow_instance_id":"422ab3c3f58f4221a36b36c05fefb99b"}
|
||||
```
|
||||
|
||||
The workflow will trigger agents in a round-robin sequence to process the message.
|
||||
|
||||
## Monitoring Workflow Execution
|
||||
|
||||
1. Check console logs to trace activities in the workflow.
|
||||
|
||||
2. Verify Redis entries: Access the Redis Insight interface at `http://localhost:5540/`
|
||||
|
||||
3. As mentioned earlier, when we ran dapr init, Dapr initialized, a `Zipkin` container instance, used for observability and tracing. Open `http://localhost:9411/zipkin/` in your browser to view traces > Find a Trace > Run Query.
|
||||
|
||||
4. Select the trace entry with multiple spans labeled `<workflow name>: /taskhubsidecarservice/startinstance.`. When you open this entry, you’ll see details about how each task or activity in the workflow was executed. If any task failed, the error will also be visible here.
|
||||
|
||||
5. Check console logs to validate if workflow was executed successfuly.
|
||||
|
||||
### Reset Redis Database
|
||||
|
||||
1. Access the Redis Insight interface at `http://localhost:5540/`
|
||||
2. In the search bar type `*` to select all items in the database.
|
||||
3. Click on `Bulk Actions` > `Delete` > `Delete`
|
|
@ -1,28 +0,0 @@
|
|||
# https://docs.dapr.io/developing-applications/local-development/multi-app-dapr-run/multi-app-template/#template-properties
|
||||
version: 1
|
||||
common:
|
||||
resourcesPath: ./components
|
||||
logLevel: info
|
||||
appLogDestination: console
|
||||
daprdLogDestination: console
|
||||
|
||||
apps:
|
||||
- appID: HobbitApp
|
||||
appDirPath: ./services/hobbit/
|
||||
appPort: 8001
|
||||
command: ["python3", "app.py"]
|
||||
|
||||
- appID: WizardApp
|
||||
appDirPath: ./services/wizard/
|
||||
appPort: 8002
|
||||
command: ["python3", "app.py"]
|
||||
|
||||
- appID: ElfApp
|
||||
appDirPath: ./services/elf/
|
||||
appPort: 8003
|
||||
command: ["python3", "app.py"]
|
||||
|
||||
- appID: WorkflowApp
|
||||
appDirPath: ./services/workflow-roundrobin/
|
||||
command: ["python3", "app.py"]
|
||||
appPort: 8004
|
|
@ -1,40 +0,0 @@
|
|||
from dapr_agents import Agent, AgentActor
|
||||
from dotenv import load_dotenv
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Define Agent
|
||||
elf_agent = Agent(
|
||||
role="Elf",
|
||||
name="Legolas",
|
||||
goal="Act as a scout, marksman, and protector, using keen senses and deadly accuracy to ensure the success of the journey.",
|
||||
instructions=[
|
||||
"Speak like Legolas, with grace, wisdom, and keen observation.",
|
||||
"Be swift, silent, and precise, moving effortlessly across any terrain.",
|
||||
"Use superior vision and heightened senses to scout ahead and detect threats.",
|
||||
"Excel in ranged combat, delivering pinpoint arrow strikes from great distances.",
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
|
||||
]
|
||||
)
|
||||
|
||||
# Expose Agent as an Actor over a Service
|
||||
elf_service = AgentActor(
|
||||
agent=elf_agent,
|
||||
message_bus_name="messagepubsub",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
service_port=8003,
|
||||
)
|
||||
|
||||
await elf_service.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
|
@ -1,40 +0,0 @@
|
|||
from dapr_agents import Agent, AgentActor
|
||||
from dotenv import load_dotenv
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Define Agent
|
||||
hobbit_agent = Agent(
|
||||
role="Hobbit",
|
||||
name="Frodo",
|
||||
goal="Carry the One Ring to Mount Doom, resisting its corruptive power while navigating danger and uncertainty.",
|
||||
instructions=[
|
||||
"Speak like Frodo, with humility, determination, and a growing sense of resolve.",
|
||||
"Endure hardships and temptations, staying true to the mission even when faced with doubt.",
|
||||
"Seek guidance and trust allies, but bear the ultimate burden alone when necessary.",
|
||||
"Move carefully through enemy-infested lands, avoiding unnecessary risks.",
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
|
||||
]
|
||||
)
|
||||
|
||||
# Expose Agent as an Actor over a Service
|
||||
hobbit_service = AgentActor(
|
||||
agent=hobbit_agent,
|
||||
message_bus_name="messagepubsub",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
service_port=8001
|
||||
)
|
||||
|
||||
await hobbit_service.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
|
@ -1,40 +0,0 @@
|
|||
from dapr_agents import Agent, AgentActor
|
||||
from dotenv import load_dotenv
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Define Agent
|
||||
wizard_agent = Agent(
|
||||
role="Wizard",
|
||||
name="Gandalf",
|
||||
goal="Guide the Fellowship with wisdom and strategy, using magic and insight to ensure the downfall of Sauron.",
|
||||
instructions=[
|
||||
"Speak like Gandalf, with wisdom, patience, and a touch of mystery.",
|
||||
"Provide strategic counsel, always considering the long-term consequences of actions.",
|
||||
"Use magic sparingly, applying it when necessary to guide or protect.",
|
||||
"Encourage allies to find strength within themselves rather than relying solely on your power.",
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
|
||||
]
|
||||
)
|
||||
|
||||
# Expose Agent as an Actor over a Service
|
||||
wizard_service = AgentActor(
|
||||
agent=wizard_agent,
|
||||
message_bus_name="messagepubsub",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
service_port=8002
|
||||
)
|
||||
|
||||
await wizard_service.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
|
@ -1,27 +0,0 @@
|
|||
from dapr_agents import LLMOrchestrator
|
||||
from dotenv import load_dotenv
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
async def main():
|
||||
try:
|
||||
agentic_orchestrator = LLMOrchestrator(
|
||||
name="Orchestrator",
|
||||
message_bus_name="messagepubsub",
|
||||
state_store_name="agenticworkflowstate",
|
||||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
max_iterations=25
|
||||
).as_service(port=8004)
|
||||
|
||||
await agentic_orchestrator.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
|
@ -1,27 +0,0 @@
|
|||
from dapr_agents import RandomOrchestrator
|
||||
from dotenv import load_dotenv
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
async def main():
|
||||
try:
|
||||
random_workflow_service = RandomOrchestrator(
|
||||
name="Orchestrator",
|
||||
message_bus_name="messagepubsub",
|
||||
state_store_name="agenticworkflowstate",
|
||||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
max_iterations=3
|
||||
).as_service(port=8004)
|
||||
|
||||
await random_workflow_service.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
|
@ -1,27 +0,0 @@
|
|||
from dapr_agents import RoundRobinOrchestrator
|
||||
from dotenv import load_dotenv
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
async def main():
|
||||
try:
|
||||
roundrobin_workflow_service = RoundRobinOrchestrator(
|
||||
name="Orchestrator",
|
||||
message_bus_name="messagepubsub",
|
||||
state_store_name="agenticworkflowstate",
|
||||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
max_iterations=3
|
||||
).as_service(port=8004)
|
||||
|
||||
await roundrobin_workflow_service.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
|
@ -1,48 +0,0 @@
|
|||
# https://docs.dapr.io/developing-applications/local-development/multi-app-dapr-run/multi-app-template/#template-properties
|
||||
version: 1
|
||||
common:
|
||||
resourcesPath: ./components
|
||||
logLevel: info
|
||||
appLogDestination: console
|
||||
daprdLogDestination: console
|
||||
|
||||
apps:
|
||||
- appID: HobbitApp
|
||||
appDirPath: ./services/hobbit/
|
||||
command: ["python3", "app.py"]
|
||||
|
||||
- appID: WizardApp
|
||||
appDirPath: ./services/wizard/
|
||||
command: ["python3", "app.py"]
|
||||
|
||||
- appID: ElfApp
|
||||
appDirPath: ./services/elf/
|
||||
command: ["python3", "app.py"]
|
||||
|
||||
- appID: DwarfApp
|
||||
appDirPath: ./services/dwarf/
|
||||
command: ["python3", "app.py"]
|
||||
|
||||
- appID: RangerApp
|
||||
appDirPath: ./services/ranger/
|
||||
command: ["python3", "app.py"]
|
||||
|
||||
- appID: EagleApp
|
||||
appDirPath: ./services/eagle/
|
||||
command: ["python3", "app.py"]
|
||||
|
||||
- appID: LLMOrchestratorApp
|
||||
appDirPath: ./services/orchestrator/
|
||||
command: ["python3", "app.py"]
|
||||
appPort: 8004
|
||||
|
||||
#- appID: RandomApp
|
||||
# appDirPath: ./services/workflow-random/
|
||||
# appPort: 8009
|
||||
# command: ["python3", "app.py"]
|
||||
|
||||
#- appID: RoundRobinApp
|
||||
# appDirPath: ./services/workflow-roundrobin/
|
||||
# appPort: 8009
|
||||
# command: ["python3", "app.py"]
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
from dapr_agents import AssistantAgent
|
||||
from dotenv import load_dotenv
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Define Agent
|
||||
dwarf_service = AssistantAgent(
|
||||
name="Gimli",
|
||||
role="Dwarf",
|
||||
goal="Fight fiercely in battle, protect allies, and expertly navigate underground realms and stonework.",
|
||||
instructions=[
|
||||
"Speak like Gimli, with boldness and a warrior's pride.",
|
||||
"Be strong-willed, fiercely loyal, and protective of companions.",
|
||||
"Excel in close combat and battlefield tactics, favoring axes and brute strength.",
|
||||
"Navigate caves, tunnels, and ancient stonework with expert knowledge.",
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
|
||||
],
|
||||
message_bus_name="messagepubsub",
|
||||
state_store_name="agenticworkflowstate",
|
||||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
)
|
||||
|
||||
await dwarf_service.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
|
@ -1,37 +0,0 @@
|
|||
from dapr_agents import AssistantAgent
|
||||
from dotenv import load_dotenv
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Define Eagle Agent
|
||||
eagle_service = AssistantAgent(
|
||||
role="Eagle",
|
||||
name="Gwaihir",
|
||||
goal="Provide unmatched aerial transport, carrying anyone anywhere, overcoming any obstacle, and offering strategic reconnaissance to aid in epic quests.",
|
||||
instructions=[
|
||||
"Fly anywhere from anywhere, carrying travelers effortlessly across vast distances.",
|
||||
"Overcome any barrier—mountains, oceans, enemy fortresses—by taking to the skies.",
|
||||
"Provide swift and strategic transport for those on critical journeys.",
|
||||
"Offer aerial insights, spotting dangers, tracking movements, and scouting strategic locations.",
|
||||
"Speak with wisdom and authority, as one of the ancient and noble Great Eagles.",
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
|
||||
],
|
||||
message_bus_name="messagepubsub",
|
||||
state_store_name="agenticworkflowstate",
|
||||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
)
|
||||
|
||||
await eagle_service.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
|
@ -1,36 +0,0 @@
|
|||
from dapr_agents import AssistantAgent
|
||||
from dotenv import load_dotenv
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Define Agent
|
||||
elf_service = AssistantAgent(
|
||||
name="Legolas",
|
||||
role="Elf",
|
||||
goal="Act as a scout, marksman, and protector, using keen senses and deadly accuracy to ensure the success of the journey.",
|
||||
instructions=[
|
||||
"Speak like Legolas, with grace, wisdom, and keen observation.",
|
||||
"Be swift, silent, and precise, moving effortlessly across any terrain.",
|
||||
"Use superior vision and heightened senses to scout ahead and detect threats.",
|
||||
"Excel in ranged combat, delivering pinpoint arrow strikes from great distances.",
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
|
||||
],
|
||||
message_bus_name="messagepubsub",
|
||||
state_store_name="agenticworkflowstate",
|
||||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
)
|
||||
|
||||
await elf_service.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
|
@ -1,36 +0,0 @@
|
|||
from dapr_agents import AssistantAgent
|
||||
from dotenv import load_dotenv
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Define Agent
|
||||
hobbit_agent = AssistantAgent(
|
||||
name="Frodo",
|
||||
role="Hobbit",
|
||||
goal="Carry the One Ring to Mount Doom, resisting its corruptive power while navigating danger and uncertainty.",
|
||||
instructions=[
|
||||
"Speak like Frodo, with humility, determination, and a growing sense of resolve.",
|
||||
"Endure hardships and temptations, staying true to the mission even when faced with doubt.",
|
||||
"Seek guidance and trust allies, but bear the ultimate burden alone when necessary.",
|
||||
"Move carefully through enemy-infested lands, avoiding unnecessary risks.",
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
|
||||
],
|
||||
message_bus_name="messagepubsub",
|
||||
state_store_name="agenticworkflowstate",
|
||||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
)
|
||||
|
||||
await hobbit_agent.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
|
@ -1,27 +0,0 @@
|
|||
from dapr_agents import LLMOrchestrator
|
||||
from dotenv import load_dotenv
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
async def main():
|
||||
try:
|
||||
agentic_orchestrator = LLMOrchestrator(
|
||||
name="Orchestrator",
|
||||
message_bus_name="messagepubsub",
|
||||
state_store_name="agenticworkflowstate",
|
||||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
max_iterations=3
|
||||
).as_service(port=8004)
|
||||
|
||||
await agentic_orchestrator.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
|
@ -1,36 +0,0 @@
|
|||
from dapr_agents import AssistantAgent
|
||||
from dotenv import load_dotenv
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Define Agent
|
||||
ranger_service = AssistantAgent(
|
||||
name="Aragorn",
|
||||
role="Ranger",
|
||||
goal="Lead and protect the Fellowship, ensuring Frodo reaches his destination while uniting the Free Peoples against Sauron.",
|
||||
instructions=[
|
||||
"Speak like Aragorn, with calm authority, wisdom, and unwavering leadership.",
|
||||
"Lead by example, inspiring courage and loyalty in allies.",
|
||||
"Navigate wilderness with expert tracking and survival skills.",
|
||||
"Master both swordplay and battlefield strategy, excelling in one-on-one combat and large-scale warfare.",
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
|
||||
],
|
||||
message_bus_name="messagepubsub",
|
||||
state_store_name="agenticworkflowstate",
|
||||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
)
|
||||
|
||||
await ranger_service.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
|
@ -1,36 +0,0 @@
|
|||
from dapr_agents import AssistantAgent
|
||||
from dotenv import load_dotenv
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Define Agent
|
||||
wizard_service = AssistantAgent(
|
||||
name="Gandalf",
|
||||
role="Wizard",
|
||||
goal="Guide the Fellowship with wisdom and strategy, using magic and insight to ensure the downfall of Sauron.",
|
||||
instructions=[
|
||||
"Speak like Gandalf, with wisdom, patience, and a touch of mystery.",
|
||||
"Provide strategic counsel, always considering the long-term consequences of actions.",
|
||||
"Use magic sparingly, applying it when necessary to guide or protect.",
|
||||
"Encourage allies to find strength within themselves rather than relying solely on your power.",
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
|
||||
],
|
||||
message_bus_name="messagepubsub",
|
||||
state_store_name="agenticworkflowstate",
|
||||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
)
|
||||
|
||||
await wizard_service.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
|
@ -1,27 +0,0 @@
|
|||
from dapr_agents import RandomOrchestrator
|
||||
from dotenv import load_dotenv
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
async def main():
|
||||
try:
|
||||
random_workflow_service = RandomOrchestrator(
|
||||
name="Orchestrator",
|
||||
message_bus_name="messagepubsub",
|
||||
state_store_name="agenticworkflowstate",
|
||||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
max_iterations=3
|
||||
).as_service(port=8004)
|
||||
|
||||
await random_workflow_service.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
|
@ -1,27 +0,0 @@
|
|||
from dapr_agents import RoundRobinOrchestrator
|
||||
from dotenv import load_dotenv
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
async def main():
|
||||
try:
|
||||
roundrobin_workflow_service = RoundRobinOrchestrator(
|
||||
name="Orchestrator",
|
||||
message_bus_name="messagepubsub",
|
||||
state_store_name="agenticworkflowstate",
|
||||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
max_iterations=3
|
||||
).as_service(port=8004)
|
||||
|
||||
await roundrobin_workflow_service.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
|
@ -1,12 +1,40 @@
|
|||
from dapr_agents.agent import Agent, AgentActor, ReActAgent, ToolCallAgent, OpenAPIReActAgent
|
||||
from dapr_agents.llm.openai import OpenAIChatClient, OpenAIAudioClient, OpenAIEmbeddingClient
|
||||
from dapr_agents.agents.agent import Agent
|
||||
from dapr_agents.agents.durableagent import DurableAgent
|
||||
from dapr_agents.executors import DockerCodeExecutor, LocalCodeExecutor
|
||||
from dapr_agents.llm.elevenlabs import ElevenLabsSpeechClient
|
||||
from dapr_agents.llm.huggingface import HFHubChatClient
|
||||
from dapr_agents.llm.nvidia import NVIDIAChatClient, NVIDIAEmbeddingClient
|
||||
from dapr_agents.llm.elevenlabs import ElevenLabsSpeechClient
|
||||
from dapr_agents.llm.openai import (
|
||||
OpenAIAudioClient,
|
||||
OpenAIChatClient,
|
||||
OpenAIEmbeddingClient,
|
||||
)
|
||||
from dapr_agents.tool import AgentTool, tool
|
||||
from dapr_agents.workflow import (
|
||||
WorkflowApp, AgenticWorkflow,
|
||||
LLMOrchestrator, RandomOrchestrator, RoundRobinOrchestrator,
|
||||
AssistantAgent
|
||||
AgenticWorkflow,
|
||||
LLMOrchestrator,
|
||||
RandomOrchestrator,
|
||||
RoundRobinOrchestrator,
|
||||
WorkflowApp,
|
||||
)
|
||||
from dapr_agents.executors import LocalCodeExecutor, DockerCodeExecutor
|
||||
|
||||
__all__ = [
|
||||
"Agent",
|
||||
"DurableAgent",
|
||||
"DockerCodeExecutor",
|
||||
"LocalCodeExecutor",
|
||||
"ElevenLabsSpeechClient",
|
||||
"HFHubChatClient",
|
||||
"NVIDIAChatClient",
|
||||
"NVIDIAEmbeddingClient",
|
||||
"OpenAIAudioClient",
|
||||
"OpenAIChatClient",
|
||||
"OpenAIEmbeddingClient",
|
||||
"AgentTool",
|
||||
"tool",
|
||||
"AgenticWorkflow",
|
||||
"LLMOrchestrator",
|
||||
"RandomOrchestrator",
|
||||
"RoundRobinOrchestrator",
|
||||
"WorkflowApp",
|
||||
]
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
from .base import AgentBase
|
||||
from .utils.factory import Agent
|
||||
from .actor import AgentActor
|
||||
from .patterns import ReActAgent, ToolCallAgent, OpenAPIReActAgent
|
|
@ -1,4 +0,0 @@
|
|||
from .base import AgentActorBase
|
||||
from .interface import AgentActorInterface
|
||||
from .service import AgentActorService
|
||||
from .agent import AgentActor
|
|
@ -1,89 +0,0 @@
|
|||
import logging
|
||||
from dapr_agents.agent.actor.schemas import AgentTaskResponse, TriggerAction, BroadcastMessage
|
||||
from dapr_agents.agent.actor.service import AgentActorService
|
||||
from dapr_agents.types.agent import AgentActorMessage
|
||||
from dapr_agents.workflow.messaging.decorator import message_router
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class AgentActor(AgentActorService):
|
||||
"""
|
||||
A Pydantic-based class for managing services and exposing FastAPI routes with Dapr pub/sub and actor support.
|
||||
"""
|
||||
|
||||
@message_router
|
||||
async def process_trigger_action(self, message: TriggerAction):
|
||||
"""
|
||||
Processes TriggerAction messages sent directly to the agent's topic.
|
||||
"""
|
||||
try:
|
||||
metadata = message.pop("_message_metadata", {})
|
||||
source = metadata.get("source", "unknown_source")
|
||||
message_type = metadata.get("type", "unknown_type")
|
||||
|
||||
logger.info(f"{self.agent.name} received {message_type} from {source}.")
|
||||
|
||||
# Extract workflow_instance_id if available
|
||||
workflow_instance_id = message.get("workflow_instance_id") or None
|
||||
logger.debug(f"Workflow instance ID: {workflow_instance_id}")
|
||||
|
||||
# Execute the task or fallback to memory
|
||||
task = message.get("task", None)
|
||||
if not task:
|
||||
logger.info(f"{self.agent.name} executing default task from memory.")
|
||||
|
||||
response = await self.invoke_task(task)
|
||||
|
||||
# Check if the response exists
|
||||
content = response.body.decode() if response and response.body else "Task completed but no response generated."
|
||||
|
||||
# Broadcast result
|
||||
response_message = BroadcastMessage(name=self.agent.name, role="user", content=content)
|
||||
await self.broadcast_message(message=response_message)
|
||||
|
||||
# Update response
|
||||
response_message = response_message.model_dump()
|
||||
response_message["workflow_instance_id"] = workflow_instance_id
|
||||
agent_response = AgentTaskResponse(**response_message)
|
||||
|
||||
# Send the message to the target agent
|
||||
await self.send_message_to_agent(name=source, message=agent_response)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing trigger action: {e}", exc_info=True)
|
||||
|
||||
@message_router(broadcast=True)
|
||||
async def process_broadcast_message(self, message: BroadcastMessage):
|
||||
"""
|
||||
Processes a message from the broadcast topic.
|
||||
"""
|
||||
try:
|
||||
metadata = message.pop("_message_metadata", {})
|
||||
|
||||
if not isinstance(metadata, dict):
|
||||
logger.warning(f"{getattr(self, 'name', 'agent')} received a broadcast with invalid metadata. Ignoring.")
|
||||
return
|
||||
|
||||
source = metadata.get("source", "unknown_source")
|
||||
message_type = metadata.get("type", "unknown_type")
|
||||
message_content = message.get("content", "No content")
|
||||
|
||||
logger.info(f"{self.agent.name} received broadcast message of type '{message_type}' from '{source}'.")
|
||||
|
||||
# Ignore messages sent by this agent
|
||||
if source == self.agent.name:
|
||||
logger.info(f"{self.agent.name} ignored its own broadcast message of type '{message_type}'.")
|
||||
return
|
||||
|
||||
# Log and process the valid broadcast message
|
||||
logger.debug(f"{self.agent.name} is processing broadcast message of type '{message_type}' from '{source}'.")
|
||||
logger.debug(f"Message content: {message_content}")
|
||||
|
||||
# Add the message to the agent's memory
|
||||
self.agent.memory.add_message(message)
|
||||
|
||||
# Add the message to the actor's state
|
||||
actor_message = AgentActorMessage(**message)
|
||||
await self.add_message(actor_message)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing broadcast message: {e}", exc_info=True)
|
|
@ -1,164 +0,0 @@
|
|||
import logging
|
||||
from typing import List, Optional, Union
|
||||
from dapr.actor import Actor
|
||||
from dapr.actor.id import ActorId
|
||||
from dapr.actor.runtime.context import ActorRuntimeContext
|
||||
from dapr_agents.agent.actor.interface import AgentActorInterface
|
||||
from dapr_agents.agent.base import AgentBase
|
||||
from dapr_agents.types.agent import (
|
||||
AgentActorMessage,
|
||||
AgentActorState,
|
||||
AgentStatus,
|
||||
AgentTaskEntry,
|
||||
AgentTaskStatus,
|
||||
)
|
||||
from pydantic import ValidationError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class AgentActorBase(Actor, AgentActorInterface):
|
||||
"""Base class for all agent actors, including task execution and agent state management."""
|
||||
|
||||
def __init__(self, ctx: ActorRuntimeContext, actor_id: ActorId):
|
||||
super().__init__(ctx, actor_id)
|
||||
self.actor_id = actor_id
|
||||
self.agent: AgentBase
|
||||
self.agent_state_key = "agent_state"
|
||||
|
||||
async def _on_activate(self) -> None:
|
||||
"""
|
||||
Called when the actor is activated. Initializes the agent's state if not present.
|
||||
"""
|
||||
logger.info(f"Activating actor with ID: {self.actor_id}")
|
||||
has_state, state_data = await self._state_manager.try_get_state(self.agent_state_key)
|
||||
|
||||
if not has_state:
|
||||
# Initialize state with default values if it doesn't exist
|
||||
logger.info(f"Initializing state for {self.actor_id}")
|
||||
self.state = AgentActorState(overall_status=AgentStatus.IDLE)
|
||||
await self._state_manager.set_state(self.agent_state_key, self.state.model_dump())
|
||||
await self._state_manager.save_state()
|
||||
else:
|
||||
# Load existing state
|
||||
logger.info(f"Loading existing state for {self.actor_id}")
|
||||
logger.debug(f"Existing state for {self.actor_id}: {state_data}")
|
||||
self.state = AgentActorState(**state_data)
|
||||
|
||||
async def _on_deactivate(self) -> None:
|
||||
"""
|
||||
Called when the actor is deactivated.
|
||||
"""
|
||||
logger.info(f"Deactivate {self.__class__.__name__} actor with ID: {self.actor_id}.")
|
||||
|
||||
async def set_status(self, status: AgentStatus) -> None:
|
||||
"""
|
||||
Sets the current operational status of the agent and saves the state.
|
||||
"""
|
||||
self.state.overall_status = status
|
||||
await self._state_manager.set_state(self.agent_state_key, self.state.model_dump())
|
||||
await self._state_manager.save_state()
|
||||
|
||||
async def invoke_task(self, task: Optional[str] = None) -> str:
|
||||
"""
|
||||
Execute the agent's main task, log the input/output in the task history,
|
||||
and update state with observations, plans, and feedback.
|
||||
|
||||
If no task is provided, use the most recent message content as the task entry input,
|
||||
but still execute `run()` directly if no task is passed.
|
||||
"""
|
||||
logger.info(f"Actor {self.actor_id} invoking a task")
|
||||
|
||||
# Determine the input for the task entry
|
||||
messages = await self.get_messages() # Fetch messages from state
|
||||
default_task = None
|
||||
|
||||
if messages:
|
||||
# Look for the last message in the conversation history
|
||||
last_message = messages[-1]
|
||||
default_task = last_message.get("content")
|
||||
logger.debug(f"Default task entry input derived from last message: {default_task}")
|
||||
|
||||
# Prepare the input for task entry
|
||||
task_entry_input = task or default_task or "Triggered without a specific task"
|
||||
logger.debug(f"Task entry input: {task_entry_input}")
|
||||
|
||||
# Set the agent's status to active
|
||||
await self.set_status(AgentStatus.ACTIVE)
|
||||
|
||||
# Create a new task entry with the determined input
|
||||
task_entry = AgentTaskEntry(
|
||||
input=task_entry_input,
|
||||
status=AgentTaskStatus.IN_PROGRESS,
|
||||
)
|
||||
self.state.task_history.append(task_entry)
|
||||
|
||||
# Save initial task state with IN_PROGRESS status
|
||||
await self._state_manager.set_state(self.agent_state_key, self.state.model_dump())
|
||||
await self._state_manager.save_state()
|
||||
|
||||
try:
|
||||
# Run the task if provided, or fallback to agent.run() if no task
|
||||
result = self.agent.run(task) if task else self.agent.run()
|
||||
|
||||
# Update the task entry with the result and mark as COMPLETE
|
||||
task_entry.output = result
|
||||
task_entry.status = AgentTaskStatus.COMPLETE
|
||||
|
||||
# Add the result as a new message in conversation history
|
||||
assistant_message = AgentActorMessage(role="assistant", content=result)
|
||||
await self.add_message(assistant_message)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
# Handle task failure
|
||||
logger.error(f"Error running task for actor {self.actor_id}: {str(e)}")
|
||||
task_entry.status = AgentTaskStatus.FAILED
|
||||
task_entry.output = str(e)
|
||||
raise e
|
||||
|
||||
finally:
|
||||
# Ensure the final state of the task is saved
|
||||
await self._state_manager.set_state(self.agent_state_key, self.state.model_dump())
|
||||
await self._state_manager.save_state()
|
||||
# Revert the agent's status to idle
|
||||
await self.set_status(AgentStatus.IDLE)
|
||||
|
||||
async def add_message(self, message: Union[AgentActorMessage, dict]) -> None:
|
||||
"""
|
||||
Adds a message to the conversation history in the actor's state.
|
||||
|
||||
Args:
|
||||
message (Union[AgentActorMessage, dict]): The message to add, either as a dictionary or an AgentActorMessage instance.
|
||||
"""
|
||||
# Convert dictionary to AgentActorMessage if necessary
|
||||
if isinstance(message, dict):
|
||||
message = AgentActorMessage(**message)
|
||||
|
||||
# Add the new message to the state
|
||||
self.state.messages.append(message)
|
||||
self.state.message_count += 1
|
||||
|
||||
# Save state back to Dapr
|
||||
await self._state_manager.set_state(self.agent_state_key, self.state.model_dump())
|
||||
await self._state_manager.save_state()
|
||||
|
||||
async def get_messages(self) -> List[dict]:
|
||||
"""
|
||||
Retrieves the messages from the actor's state, validates it using Pydantic,
|
||||
and returns a list of dictionaries if valid.
|
||||
"""
|
||||
has_state, state_data = await self._state_manager.try_get_state(self.agent_state_key)
|
||||
|
||||
if has_state:
|
||||
try:
|
||||
# Validate the state data using Pydantic
|
||||
state: AgentActorState = AgentActorState.model_validate(state_data)
|
||||
|
||||
# Return the list of messages as dictionaries (timestamp will be automatically serialized to ISO format)
|
||||
return [message.model_dump() for message in state.messages]
|
||||
except ValidationError as e:
|
||||
# Handle validation errors
|
||||
print(f"Validation error: {e}")
|
||||
return []
|
||||
return []
|
|
@ -1,37 +0,0 @@
|
|||
from abc import abstractmethod
|
||||
from typing import List, Optional, Union
|
||||
from dapr.actor import ActorInterface, actormethod
|
||||
from dapr_agents.types.agent import AgentActorMessage, AgentStatus
|
||||
|
||||
class AgentActorInterface(ActorInterface):
|
||||
@abstractmethod
|
||||
@actormethod(name='InvokeTask')
|
||||
async def invoke_task(self, task: Optional[str] = None) -> str:
|
||||
"""
|
||||
Invoke a task and returns the result as a string.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
@actormethod(name='AddMessage')
|
||||
async def add_message(self, message: Union[AgentActorMessage, dict]) -> None:
|
||||
"""
|
||||
Adds a message to the conversation history in the actor's state.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
@actormethod(name='GetMessages')
|
||||
async def get_messages(self) -> List[dict]:
|
||||
"""
|
||||
Retrieves the conversation history from the actor's state.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
@actormethod(name='SetStatus')
|
||||
async def set_status(self, status: AgentStatus) -> None:
|
||||
"""
|
||||
Sets the current operational status of the agent.
|
||||
"""
|
||||
pass
|
|
@ -1,22 +0,0 @@
|
|||
from typing import Optional
|
||||
from pydantic import BaseModel, Field
|
||||
from dapr_agents.types.message import BaseMessage
|
||||
|
||||
class AgentTaskResponse(BaseMessage):
|
||||
"""
|
||||
Represents a response message from an agent after completing a task.
|
||||
"""
|
||||
workflow_instance_id: Optional[str] = Field(default=None, description="Dapr workflow instance id from source if available")
|
||||
|
||||
class TriggerAction(BaseModel):
|
||||
"""
|
||||
Represents a message used to trigger an agent's activity within the workflow.
|
||||
"""
|
||||
task: Optional[str] = Field(None, description="The specific task to execute. If not provided, the agent will act based on its memory or predefined behavior.")
|
||||
iteration: Optional[int] = Field(0, description="")
|
||||
workflow_instance_id: Optional[str] = Field(default=None, description="Dapr workflow instance id from source if available")
|
||||
|
||||
class BroadcastMessage(BaseMessage):
|
||||
"""
|
||||
Represents a broadcast message from an agent
|
||||
"""
|
|
@ -1,364 +0,0 @@
|
|||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
from contextlib import asynccontextmanager
|
||||
from datetime import timedelta
|
||||
from typing import Any, Callable, Dict, Optional, Tuple, Type, Union
|
||||
|
||||
from fastapi import FastAPI, HTTPException, Response, status
|
||||
from fastapi.encoders import jsonable_encoder
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
from dapr.actor import ActorId, ActorProxy
|
||||
from dapr.actor.runtime.config import (
|
||||
ActorReentrancyConfig,
|
||||
ActorRuntimeConfig,
|
||||
ActorTypeConfig,
|
||||
)
|
||||
from dapr.actor.runtime.runtime import ActorRuntime
|
||||
from dapr.clients import DaprClient
|
||||
from dapr.clients.grpc._response import StateResponse
|
||||
from dapr.ext.fastapi import DaprActor
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr, model_validator
|
||||
|
||||
from dapr_agents.agent import AgentBase
|
||||
from dapr_agents.agent.actor import AgentActorBase, AgentActorInterface
|
||||
from dapr_agents.service.fastapi import FastAPIServerBase
|
||||
from dapr_agents.types.agent import AgentActorMessage
|
||||
from dapr_agents.workflow.messaging import DaprPubSub
|
||||
from dapr_agents.workflow.messaging.routing import MessageRoutingMixin
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class AgentActorService(DaprPubSub, MessageRoutingMixin):
|
||||
agent: AgentBase
|
||||
name: Optional[str] = Field(default=None, description="Name of the agent actor, derived from the agent if not provided.")
|
||||
agent_topic_name: Optional[str] = Field(None, description="The topic name dedicated to this specific agent, derived from the agent's name if not provided.")
|
||||
broadcast_topic_name: str = Field("beacon_channel", description="The default topic used for broadcasting messages to all agents.")
|
||||
agents_registry_store_name: str = Field(..., description="The name of the Dapr state store component used to store and share agent metadata centrally.")
|
||||
agents_registry_key: str = Field(default="agents_registry", description="Dapr state store key for agentic workflow state.")
|
||||
service_port: Optional[int] = Field(default=None, description="The port number to run the API server on.")
|
||||
service_host: Optional[str] = Field(default="0.0.0.0", description="Host address for the API server.")
|
||||
|
||||
# Fields initialized in model_post_init
|
||||
actor: Optional[DaprActor] = Field(default=None, init=False, description="DaprActor for actor lifecycle support.")
|
||||
actor_name: Optional[str] = Field(default=None, init=False, description="Actor name")
|
||||
actor_proxy: Optional[ActorProxy] = Field(default=None, init=False, description="Proxy for invoking methods on the agent's actor.")
|
||||
actor_class: Optional[type] = Field(default=None, init=False, description="Dynamically created actor class for the agent")
|
||||
agent_metadata: Optional[dict] = Field(default=None, init=False, description="Agent's metadata")
|
||||
|
||||
# Private internal attributes (not schema/validated)
|
||||
_http_server: Optional[Any] = PrivateAttr(default=None)
|
||||
_shutdown_event: asyncio.Event = PrivateAttr(default_factory=asyncio.Event)
|
||||
_dapr_client: Optional[DaprClient] = PrivateAttr(default=None)
|
||||
_is_running: bool = PrivateAttr(default=False)
|
||||
_subscriptions: Dict[str, Callable] = PrivateAttr(default_factory=dict)
|
||||
_topic_handlers: Dict[Tuple[str, str], Dict[Type[BaseModel], Callable]] = PrivateAttr(default_factory=dict)
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
@model_validator(mode="before")
|
||||
def set_derived_fields(cls, values: dict):
|
||||
agent: AgentBase = values.get("agent")
|
||||
# Derive agent_topic_name if missing
|
||||
if not values.get("agent_topic_name") and agent:
|
||||
values["agent_topic_name"] = agent.name or agent.role
|
||||
# Derive name from agent if missing
|
||||
if not values.get("name") and agent:
|
||||
values["name"] = agent.name or agent.role
|
||||
return values
|
||||
|
||||
def model_post_init(self, __context: Any) -> None:
|
||||
# Proceed with base model setup
|
||||
super().model_post_init(__context)
|
||||
|
||||
# Dynamically create the actor class based on the agent's name
|
||||
actor_class_name = f"{self.agent.name}Actor"
|
||||
|
||||
# Create the actor class dynamically using the 'type' function
|
||||
self.actor_class = type(actor_class_name, (AgentActorBase,), {
|
||||
'__init__': lambda self, ctx, actor_id: AgentActorBase.__init__(self, ctx, actor_id),
|
||||
'agent': self.agent
|
||||
})
|
||||
|
||||
# Prepare agent metadata
|
||||
self.agent_metadata = {
|
||||
"name": self.agent.name,
|
||||
"role": self.agent.role,
|
||||
"goal": self.agent.goal,
|
||||
"topic_name": self.agent_topic_name,
|
||||
"pubsub_name": self.message_bus_name,
|
||||
"orchestrator": False
|
||||
}
|
||||
|
||||
# Proxy for actor methods
|
||||
self.actor_name = self.actor_class.__name__
|
||||
self.actor_proxy = ActorProxy.create(self.actor_name, ActorId(self.agent.name), AgentActorInterface)
|
||||
|
||||
# Initialize Sync Dapr Client
|
||||
self._dapr_client = DaprClient()
|
||||
|
||||
# FastAPI Server
|
||||
self._http_server: FastAPIServerBase = FastAPIServerBase(
|
||||
service_name=self.agent.name,
|
||||
service_port=self.service_port,
|
||||
service_host=self.service_host
|
||||
)
|
||||
self._http_server.app.router.lifespan_context = self.lifespan
|
||||
|
||||
# Create DaprActor using FastAPI app
|
||||
self.actor = DaprActor(self.app)
|
||||
|
||||
self.app.add_api_route("/GetMessages", self.get_messages, methods=["GET"])
|
||||
|
||||
logger.info(f"Dapr Actor class {self.actor_class.__name__} initialized.")
|
||||
|
||||
@property
|
||||
def app(self) -> "FastAPI":
|
||||
"""
|
||||
Returns the FastAPI application instance if the workflow was initialized as a service.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If the FastAPI server has not been initialized via `.as_service()` first.
|
||||
"""
|
||||
if self._http_server:
|
||||
return self._http_server.app
|
||||
raise RuntimeError("FastAPI server not initialized.")
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(self, app: FastAPI):
|
||||
# Register actor
|
||||
actor_runtime_config = ActorRuntimeConfig()
|
||||
actor_runtime_config.update_actor_type_configs([
|
||||
ActorTypeConfig(
|
||||
actor_type=self.actor_class.__name__,
|
||||
actor_idle_timeout=timedelta(hours=1),
|
||||
actor_scan_interval=timedelta(seconds=30),
|
||||
drain_ongoing_call_timeout=timedelta(minutes=1),
|
||||
drain_rebalanced_actors=True,
|
||||
reentrancy=ActorReentrancyConfig(enabled=True))
|
||||
])
|
||||
ActorRuntime.set_actor_config(actor_runtime_config)
|
||||
|
||||
await self.actor.register_actor(self.actor_class)
|
||||
logger.info(f"{self.actor_name} Dapr actor registered.")
|
||||
|
||||
# Register agent metadata and pubsub routes
|
||||
self.register_agent_metadata()
|
||||
self.register_message_routes()
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
await self.stop()
|
||||
|
||||
async def start(self):
|
||||
if self._is_running:
|
||||
logger.warning("Service is already running. Ignoring duplicate start request.")
|
||||
return
|
||||
|
||||
logger.info("Starting Agent Actor Service...")
|
||||
self._shutdown_event.clear()
|
||||
|
||||
await self._http_server.start()
|
||||
|
||||
self._is_running = True
|
||||
|
||||
async def stop(self):
|
||||
if not self._is_running:
|
||||
return
|
||||
|
||||
await self._http_server.stop()
|
||||
|
||||
for (pubsub_name, topic_name), close_fn in self._subscriptions.items():
|
||||
try:
|
||||
logger.info(f"Unsubscribing from pubsub '{pubsub_name}' topic '{topic_name}'")
|
||||
close_fn()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to unsubscribe from topic '{topic_name}': {e}")
|
||||
|
||||
self._subscriptions.clear()
|
||||
self._is_running = False
|
||||
logger.info("Agent Actor Service stopped.")
|
||||
|
||||
def get_data_from_store(self, store_name: str, key: str) -> Optional[dict]:
|
||||
"""
|
||||
Retrieve data from a specified Dapr state store using a provided key.
|
||||
|
||||
Args:
|
||||
store_name (str): The name of the Dapr state store component.
|
||||
key (str): The key under which the data is stored.
|
||||
|
||||
Returns:
|
||||
Optional[dict]: The data stored under the specified key if found; otherwise, None.
|
||||
"""
|
||||
try:
|
||||
response: StateResponse = self._dapr_client.get_state(store_name=store_name, key=key)
|
||||
data = response.data
|
||||
|
||||
return json.loads(data) if data else None
|
||||
except Exception as e:
|
||||
logger.warning(f"Error retrieving data for key '{key}' from store '{store_name}'")
|
||||
return None
|
||||
|
||||
def get_agents_metadata(self, exclude_self: bool = True, exclude_orchestrator: bool = False) -> dict:
|
||||
"""
|
||||
Retrieves metadata for all registered agents while ensuring orchestrators do not interact with other orchestrators.
|
||||
|
||||
Args:
|
||||
exclude_self (bool, optional): If True, excludes the current agent (`self.agent.name`). Defaults to True.
|
||||
exclude_orchestrator (bool, optional): If True, excludes all orchestrators from the results. Defaults to False.
|
||||
|
||||
Returns:
|
||||
dict: A mapping of agent names to their metadata. Returns an empty dict if no agents are found.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If the state store is not properly configured or retrieval fails.
|
||||
"""
|
||||
try:
|
||||
# Fetch agent metadata from the registry
|
||||
agents_metadata = self.get_data_from_store(self.agents_registry_store_name, self.agents_registry_key) or {}
|
||||
|
||||
if agents_metadata:
|
||||
logger.info(f"Agents found in '{self.agents_registry_store_name}' for key '{self.agents_registry_key}'.")
|
||||
|
||||
# Filter based on exclusion rules
|
||||
filtered_metadata = {
|
||||
name: metadata
|
||||
for name, metadata in agents_metadata.items()
|
||||
if not (exclude_self and name == self.agent.name) # Exclude self if requested
|
||||
and not (exclude_orchestrator and metadata.get("orchestrator", False)) # Exclude all orchestrators if exclude_orchestrator=True
|
||||
}
|
||||
|
||||
if not filtered_metadata:
|
||||
logger.info("No other agents found after filtering.")
|
||||
|
||||
return filtered_metadata
|
||||
|
||||
logger.info(f"No agents found in '{self.agents_registry_store_name}' for key '{self.agents_registry_key}'.")
|
||||
return {}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to retrieve agents metadata: {e}", exc_info=True)
|
||||
return {}
|
||||
|
||||
def register_agent_metadata(self) -> None:
|
||||
"""
|
||||
Registers the agent's metadata in the Dapr state store under 'agents_metadata'.
|
||||
"""
|
||||
try:
|
||||
# Retrieve existing metadata or initialize as an empty dictionary
|
||||
agents_metadata = self.get_agents_metadata()
|
||||
agents_metadata[self.agent.name] = self.agent_metadata
|
||||
|
||||
# Save the updated metadata back to Dapr store
|
||||
self._dapr_client.save_state(
|
||||
store_name=self.agents_registry_store_name,
|
||||
key=self.agents_registry_key,
|
||||
value=json.dumps(agents_metadata),
|
||||
state_metadata={"contentType": "application/json"}
|
||||
)
|
||||
|
||||
logger.info(f"{self.agent.name} registered its metadata under key '{self.agents_registry_key}'")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to register metadata for agent {self.agent.name}: {e}")
|
||||
|
||||
async def invoke_task(self, task: Optional[str]) -> Response:
|
||||
"""
|
||||
Use the actor to invoke a task by running the InvokeTask method through ActorProxy.
|
||||
|
||||
Args:
|
||||
task (Optional[str]): The task string to invoke on the actor.
|
||||
|
||||
Returns:
|
||||
Response: A FastAPI Response containing the result or an error message.
|
||||
"""
|
||||
try:
|
||||
response = await self.actor_proxy.InvokeTask(task)
|
||||
return Response(content=response, status_code=status.HTTP_200_OK)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to run task for {self.actor_name}: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Error invoking task: {str(e)}")
|
||||
|
||||
async def add_message(self, message: AgentActorMessage) -> None:
|
||||
"""
|
||||
Adds a message to the conversation history in the actor's state.
|
||||
"""
|
||||
try:
|
||||
await self.actor_proxy.AddMessage(message.model_dump())
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to add message to {self.actor_name}: {e}")
|
||||
|
||||
async def get_messages(self) -> Response:
|
||||
"""
|
||||
Retrieve the conversation history from the actor.
|
||||
"""
|
||||
try:
|
||||
messages = await self.actor_proxy.GetMessages()
|
||||
return JSONResponse(content=jsonable_encoder(messages), status_code=status.HTTP_200_OK)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to retrieve messages for {self.actor_name}: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Error retrieving messages: {str(e)}")
|
||||
|
||||
async def broadcast_message(self, message: Union[BaseModel, dict], exclude_orchestrator: bool = False, **kwargs) -> None:
|
||||
"""
|
||||
Sends a message to all agents (or only to non-orchestrator agents if exclude_orchestrator=True).
|
||||
|
||||
Args:
|
||||
message (Union[BaseModel, dict]): The message content as a Pydantic model or dictionary.
|
||||
exclude_orchestrator (bool, optional): If True, excludes orchestrators from receiving the message. Defaults to False.
|
||||
**kwargs: Additional metadata fields to include in the message.
|
||||
"""
|
||||
try:
|
||||
# Retrieve agents metadata while respecting the exclude_orchestrator flag
|
||||
agents_metadata = self.get_agents_metadata(exclude_orchestrator=exclude_orchestrator)
|
||||
|
||||
if not agents_metadata:
|
||||
logger.warning("No agents available for broadcast.")
|
||||
return
|
||||
|
||||
logger.info(f"{self.agent.name} broadcasting message to selected agents.")
|
||||
|
||||
await self.publish_event_message(
|
||||
topic_name=self.broadcast_topic_name,
|
||||
pubsub_name=self.message_bus_name,
|
||||
source=self.agent.name,
|
||||
message=message,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
logger.debug(f"{self.agent.name} broadcasted message.")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to broadcast message: {e}", exc_info=True)
|
||||
|
||||
async def send_message_to_agent(self, name: str, message: Union[BaseModel, dict], **kwargs) -> None:
|
||||
"""
|
||||
Sends a message to a specific agent.
|
||||
|
||||
Args:
|
||||
name (str): The name of the target agent.
|
||||
message (Union[BaseModel, dict]): The message content as a Pydantic model or dictionary.
|
||||
**kwargs: Additional metadata fields to include in the message.
|
||||
"""
|
||||
try:
|
||||
agents_metadata = self.get_agents_metadata()
|
||||
|
||||
if name not in agents_metadata:
|
||||
logger.warning(f"Target '{name}' is not registered as an agent. Skipping message send.")
|
||||
return # Do not raise an error—just warn and move on.
|
||||
|
||||
agent_metadata = agents_metadata[name]
|
||||
logger.info(f"{self.agent.name} sending message to agent '{name}'.")
|
||||
|
||||
await self.publish_event_message(
|
||||
topic_name=agent_metadata["topic_name"],
|
||||
pubsub_name=agent_metadata["pubsub_name"],
|
||||
source=self.name,
|
||||
message=message,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
logger.debug(f"{self.name} sent message to agent '{name}'.")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to send message to agent '{name}': {e}", exc_info=True)
|
|
@ -1,292 +0,0 @@
|
|||
from dapr_agents.memory import MemoryBase, ConversationListMemory, ConversationVectorMemory
|
||||
from dapr_agents.agent.utils.text_printer import ColorTextFormatter
|
||||
from dapr_agents.types import MessageContent, MessagePlaceHolder
|
||||
from dapr_agents.tool.executor import AgentToolExecutor
|
||||
from dapr_agents.prompt.base import PromptTemplateBase
|
||||
from dapr_agents.llm import LLMClientBase, OpenAIChatClient
|
||||
from dapr_agents.prompt import ChatPromptTemplate
|
||||
from dapr_agents.tool.base import AgentTool
|
||||
from typing import List, Optional, Dict, Any, Union, Callable, Literal
|
||||
from pydantic import BaseModel, Field, PrivateAttr, model_validator, ConfigDict
|
||||
from abc import ABC, abstractmethod
|
||||
from datetime import datetime
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class AgentBase(BaseModel, ABC):
|
||||
"""
|
||||
Base class for agents that interact with language models and manage tools for task execution.
|
||||
"""
|
||||
|
||||
name: Optional[str] = Field(default=None, description="The agent's name, defaulting to the role if not provided.")
|
||||
role: Optional[str] = Field(default="Assistant", description="The agent's role in the interaction (e.g., 'Weather Expert').")
|
||||
goal: Optional[str] = Field(default="Help humans", description="The agent's main objective (e.g., 'Provide Weather information').")
|
||||
instructions: Optional[List[str]] = Field(default=None, description="Instructions guiding the agent's tasks.")
|
||||
system_prompt: Optional[str] = Field(default=None, description="A custom system prompt, overriding name, role, goal, and instructions.")
|
||||
llm: LLMClientBase = Field(default_factory=OpenAIChatClient, description="Language model client for generating responses.")
|
||||
prompt_template: Optional[PromptTemplateBase] = Field(default=None, description="The prompt template for the agent.")
|
||||
tools: List[Union[AgentTool, Callable]] = Field(default_factory=list, description="Tools available for the agent to assist with tasks.")
|
||||
max_iterations: int = Field(default=10, description="Max iterations for conversation cycles.")
|
||||
memory: MemoryBase = Field(default_factory=ConversationListMemory, description="Handles conversation history and context storage.")
|
||||
template_format: Literal["f-string", "jinja2"] = Field(default="jinja2", description="The format used for rendering the prompt template.")
|
||||
|
||||
# Private attributes
|
||||
_tool_executor: AgentToolExecutor = PrivateAttr()
|
||||
_text_formatter: ColorTextFormatter = PrivateAttr(default_factory=ColorTextFormatter)
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
@model_validator(mode="before")
|
||||
def set_name_from_role(cls, values: dict):
|
||||
# Set name to role if name is not provided
|
||||
if not values.get("name") and values.get("role"):
|
||||
values["name"] = values["role"]
|
||||
return values
|
||||
|
||||
@property
|
||||
def tool_executor(self) -> AgentToolExecutor:
|
||||
"""Returns the tool executor, ensuring it's accessible but read-only."""
|
||||
return self._tool_executor
|
||||
|
||||
@property
|
||||
def text_formatter(self) -> ColorTextFormatter:
|
||||
"""Returns the text formatter for the agent."""
|
||||
return self._text_formatter
|
||||
|
||||
@property
|
||||
def chat_history(self, task: str = None) -> List[MessageContent]:
|
||||
"""
|
||||
Retrieves the chat history from memory based on the memory type.
|
||||
|
||||
Args:
|
||||
task (str): The task or query provided by the user.
|
||||
|
||||
Returns:
|
||||
List[MessageContent]: The chat history.
|
||||
"""
|
||||
if isinstance(self.memory, ConversationVectorMemory) and task:
|
||||
query_embeddings = self.memory.vector_store.embed_documents([task])
|
||||
return self.memory.get_messages(query_embeddings=query_embeddings)
|
||||
return self.memory.get_messages()
|
||||
|
||||
@abstractmethod
|
||||
def run(self, input_data: Union[str, Dict[str, Any]]) -> Any:
|
||||
"""
|
||||
Executes the agent's main logic based on provided inputs.
|
||||
|
||||
Args:
|
||||
inputs (Dict[str, Any]): A dictionary with dynamic input values for task execution.
|
||||
"""
|
||||
pass
|
||||
|
||||
def model_post_init(self, __context: Any) -> None:
|
||||
"""
|
||||
Sets up the prompt template based on system_prompt or attributes like name, role, goal, and instructions.
|
||||
Confirms the source of prompt_template post-initialization.
|
||||
"""
|
||||
# Initialize tool executor with provided tools
|
||||
self._tool_executor = AgentToolExecutor(tools=self.tools)
|
||||
|
||||
# Check if both agent and LLM have a prompt template specified and raise an error if both exist
|
||||
if self.prompt_template and self.llm.prompt_template:
|
||||
raise ValueError(
|
||||
"Conflicting prompt templates: both an agent prompt_template and an LLM prompt_template are provided. "
|
||||
"Please set only one or ensure synchronization between the two."
|
||||
)
|
||||
|
||||
# If the agent's prompt_template is provided, use it and skip further configuration
|
||||
if self.prompt_template:
|
||||
logger.info("Using the provided agent prompt_template. Skipping system prompt construction.")
|
||||
self.llm.prompt_template = self.prompt_template
|
||||
|
||||
# If the LLM client already has a prompt template, sync it and prefill/validate as needed
|
||||
elif self.llm.prompt_template:
|
||||
logger.info("Using existing LLM prompt_template. Synchronizing with agent.")
|
||||
self.prompt_template = self.llm.prompt_template
|
||||
|
||||
else:
|
||||
if not self.system_prompt:
|
||||
logger.info("Constructing system_prompt from agent attributes.")
|
||||
self.system_prompt = self.construct_system_prompt()
|
||||
|
||||
logger.info("Using system_prompt to create the prompt template.")
|
||||
self.prompt_template = self.construct_prompt_template()
|
||||
|
||||
# Pre-fill Agent Attributes if needed
|
||||
self.prefill_agent_attributes()
|
||||
|
||||
if not self.llm.prompt_template:
|
||||
# Assign the prompt template to the LLM client
|
||||
self.llm.prompt_template = self.prompt_template
|
||||
|
||||
# Complete post-initialization
|
||||
super().model_post_init(__context)
|
||||
|
||||
def prefill_agent_attributes(self) -> None:
|
||||
"""
|
||||
Pre-fill prompt template with agent attributes if specified in `input_variables`.
|
||||
Logs any agent attributes set but not used by the template.
|
||||
"""
|
||||
# Start with a dictionary for attributes
|
||||
prefill_data = {}
|
||||
|
||||
# Check if each attribute is defined in input_variables before adding
|
||||
if "name" in self.prompt_template.input_variables and self.name:
|
||||
prefill_data["name"] = self.name
|
||||
|
||||
if "role" in self.prompt_template.input_variables:
|
||||
prefill_data["role"] = self.role
|
||||
|
||||
if "goal" in self.prompt_template.input_variables:
|
||||
prefill_data["goal"] = self.goal
|
||||
|
||||
if "instructions" in self.prompt_template.input_variables and self.instructions:
|
||||
prefill_data["instructions"] = "\n".join(self.instructions)
|
||||
|
||||
# Collect attributes set but not in input_variables for informational logging
|
||||
set_attributes = {"name": self.name, "role": self.role, "goal": self.goal, "instructions": self.instructions}
|
||||
|
||||
# Use Pydantic's model_fields_set to detect if attributes were user-set
|
||||
user_set_attributes = {attr for attr in set_attributes if attr in self.model_fields_set}
|
||||
|
||||
ignored_attributes = [
|
||||
attr for attr in set_attributes
|
||||
if attr not in self.prompt_template.input_variables and set_attributes[attr] is not None and attr in user_set_attributes
|
||||
]
|
||||
|
||||
# Apply pre-filled data only for attributes that are in input_variables
|
||||
if prefill_data:
|
||||
self.prompt_template = self.prompt_template.pre_fill_variables(**prefill_data)
|
||||
logger.info(f"Pre-filled prompt template with attributes: {list(prefill_data.keys())}")
|
||||
elif ignored_attributes:
|
||||
raise ValueError(
|
||||
f"The following agent attributes were explicitly set by the user but are not considered by the prompt template: {', '.join(ignored_attributes)}. "
|
||||
"Please ensure that these attributes are included in the prompt template's input variables if they are needed."
|
||||
)
|
||||
else:
|
||||
logger.info("No agent attributes were pre-filled, as the template did not require any.")
|
||||
|
||||
def construct_system_prompt(self) -> str:
|
||||
"""
|
||||
Constructs a system prompt with agent attributes like `name`, `role`, `goal`, and `instructions`.
|
||||
Sets default values for `role` and `goal` if not provided.
|
||||
|
||||
Returns:
|
||||
str: A system prompt template string.
|
||||
"""
|
||||
# Initialize prompt parts with the current date as the first entry
|
||||
prompt_parts = [f"# Today's date is: {datetime.now().strftime('%B %d, %Y')}"]
|
||||
|
||||
# Append name if provided
|
||||
if self.name:
|
||||
prompt_parts.append("## Name\nYour name is {{name}}.")
|
||||
|
||||
# Append role and goal with default values if not set
|
||||
prompt_parts.append("## Role\nYour role is {{role}}.")
|
||||
prompt_parts.append("## Goal\n{{goal}}.")
|
||||
|
||||
# Append instructions if provided
|
||||
if self.instructions:
|
||||
prompt_parts.append("## Instructions\n{{instructions}}")
|
||||
|
||||
return "\n\n".join(prompt_parts)
|
||||
|
||||
def construct_prompt_template(self) -> ChatPromptTemplate:
|
||||
"""
|
||||
Constructs a ChatPromptTemplate that includes the system prompt and a placeholder for chat history.
|
||||
Ensures that the template is flexible and adaptable to dynamically handle pre-filled variables.
|
||||
|
||||
Returns:
|
||||
ChatPromptTemplate: A formatted prompt template for the agent.
|
||||
"""
|
||||
# Construct the system prompt if not provided
|
||||
system_prompt = self.system_prompt or self.construct_system_prompt()
|
||||
|
||||
# Create the template with placeholders for system message and chat history
|
||||
return ChatPromptTemplate.from_messages(
|
||||
messages=[
|
||||
('system', system_prompt),
|
||||
MessagePlaceHolder(variable_name="chat_history")
|
||||
],
|
||||
template_format=self.template_format
|
||||
)
|
||||
|
||||
def construct_messages(self, input_data: Union[str, Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Constructs and formats initial messages based on input type, pre-filling chat history as needed.
|
||||
|
||||
Args:
|
||||
input_data (Union[str, Dict[str, Any]]): User input, either as a string or dictionary.
|
||||
|
||||
Returns:
|
||||
List[Dict[str, Any]]: List of formatted messages, including the user message if input_data is a string.
|
||||
"""
|
||||
# Pre-fill chat history in the prompt template
|
||||
chat_history = self.memory.get_messages()
|
||||
self.pre_fill_prompt_template(**{"chat_history": chat_history})
|
||||
|
||||
# Handle string input by adding a user message
|
||||
if isinstance(input_data, str):
|
||||
formatted_messages = self.prompt_template.format_prompt()
|
||||
user_message = {"role": "user", "content": input_data}
|
||||
return formatted_messages + [user_message]
|
||||
|
||||
# Handle dictionary input as dynamic variables for the template
|
||||
elif isinstance(input_data, dict):
|
||||
# Pass the dictionary directly, assuming it contains keys expected by the prompt template
|
||||
formatted_messages = self.prompt_template.format_prompt(**input_data)
|
||||
return formatted_messages
|
||||
|
||||
else:
|
||||
raise ValueError("Input data must be either a string or dictionary.")
|
||||
|
||||
def reset_memory(self):
|
||||
"""Clears all messages stored in the agent's memory."""
|
||||
self.memory.reset_memory()
|
||||
|
||||
def get_last_message(self) -> Optional[MessageContent]:
|
||||
"""
|
||||
Retrieves the last message from the chat history.
|
||||
|
||||
Returns:
|
||||
Optional[MessageContent]: The last message in the history, or None if none exist.
|
||||
"""
|
||||
chat_history = self.chat_history
|
||||
return chat_history[-1] if chat_history else None
|
||||
|
||||
def get_last_user_message(self, messages: List[Dict[str, Any]]) -> Optional[MessageContent]:
|
||||
"""
|
||||
Retrieves the last user message in a list of messages.
|
||||
|
||||
Args:
|
||||
messages (List[Dict[str, Any]]): List of formatted messages to search.
|
||||
|
||||
Returns:
|
||||
Optional[MessageContent]: The last user message with trimmed content, or None if no user message exists.
|
||||
"""
|
||||
# Iterate in reverse to find the most recent 'user' role message
|
||||
for message in reversed(messages):
|
||||
if message.get("role") == "user":
|
||||
# Trim the content of the user message
|
||||
message["content"] = message["content"].strip()
|
||||
return message
|
||||
return None
|
||||
|
||||
def pre_fill_prompt_template(self, **kwargs: Union[str, Callable[[], str]]) -> None:
|
||||
"""
|
||||
Pre-fills the prompt template with specified variables, updating input variables if applicable.
|
||||
|
||||
Args:
|
||||
**kwargs: Variables to pre-fill in the prompt template. These can be strings or callables
|
||||
that return strings.
|
||||
|
||||
Notes:
|
||||
- Existing pre-filled variables will be overwritten by matching keys in `kwargs`.
|
||||
- This method does not affect the `chat_history` which is dynamically updated.
|
||||
"""
|
||||
if not self.prompt_template:
|
||||
raise ValueError("Prompt template must be initialized before pre-filling variables.")
|
||||
|
||||
self.prompt_template = self.prompt_template.pre_fill_variables(**kwargs)
|
||||
logger.debug(f"Pre-filled prompt template with variables: {kwargs.keys()}")
|
|
@ -1,3 +0,0 @@
|
|||
from .react import ReActAgent
|
||||
from .toolcall import ToolCallAgent
|
||||
from .openapi import OpenAPIReActAgent
|
|
@ -1 +0,0 @@
|
|||
from .react import OpenAPIReActAgent
|
|
@ -1,64 +0,0 @@
|
|||
from dapr_agents.tool.utils.openapi import OpenAPISpecParser, openapi_spec_to_openai_fn
|
||||
from dapr_agents.agent.patterns.react import ReActAgent
|
||||
from dapr_agents.storage import VectorStoreBase
|
||||
from dapr_agents.tool.storage import VectorToolStore
|
||||
from typing import Dict, Optional, List, Any
|
||||
from pydantic import Field, ConfigDict
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class OpenAPIReActAgent(ReActAgent):
|
||||
"""
|
||||
Extends ReActAgent with OpenAPI handling capabilities, including tools for managing API calls.
|
||||
"""
|
||||
|
||||
role: str = Field(default="OpenAPI Expert", description="The agent's role in the interaction.")
|
||||
goal: str = Field(
|
||||
default="Help users work with OpenAPI specifications and API integrations.",
|
||||
description="The main objective of the agent."
|
||||
)
|
||||
instructions: List[str] = Field(
|
||||
default=[
|
||||
"You are an expert assistant specialized in working with OpenAPI specifications and API integrations.",
|
||||
"Your goal is to help users identify the correct API endpoints and execute API calls efficiently and accurately.",
|
||||
"You must first help users explore potential APIs by analyzing OpenAPI definitions, then assist in making authenticated API requests.",
|
||||
"Ensure that all API calls are executed with the correct parameters, authentication, and methods.",
|
||||
"Your responses should be concise, clear, and focus on guiding the user through the steps of working with APIs, including retrieving API definitions, understanding endpoint parameters, and handling errors.",
|
||||
"You only respond to questions directly related to your role."
|
||||
],
|
||||
description="Instructions to guide the agent's behavior."
|
||||
)
|
||||
spec_parser: OpenAPISpecParser = Field(..., description="Parser for handling OpenAPI specifications.")
|
||||
api_vector_store: VectorStoreBase = Field(..., description="Vector store for storing API definitions.")
|
||||
auth_header: Optional[Dict] = Field(None, description="Authentication headers for executing API calls.")
|
||||
|
||||
tool_vector_store: Optional[VectorToolStore] = Field(default=None, init=False, description="Internal vector store for OpenAPI tools.")
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
def model_post_init(self, __context: Any) -> None:
|
||||
"""
|
||||
Post-initialization setup for OpenAPIReActAgent, including vector stores and OpenAPI tools.
|
||||
"""
|
||||
logger.info("Setting up VectorToolStore for OpenAPIReActAgent...")
|
||||
|
||||
# Initialize tool vector store using the api_vector_store
|
||||
self.tool_vector_store = VectorToolStore(vector_store=self.api_vector_store)
|
||||
|
||||
# Load OpenAPI specifications into the tool vector store
|
||||
function_list = openapi_spec_to_openai_fn(self.spec_parser)
|
||||
self.tool_vector_store.add_tools(function_list)
|
||||
|
||||
# Generate OpenAPI-specific tools
|
||||
from .tools import generate_api_call_executor, generate_get_openapi_definition
|
||||
openapi_tools = [
|
||||
generate_get_openapi_definition(self.tool_vector_store),
|
||||
generate_api_call_executor(self.spec_parser, self.auth_header)
|
||||
]
|
||||
|
||||
# Extend tools with OpenAPI tools
|
||||
self.tools.extend(openapi_tools)
|
||||
|
||||
# Call parent model_post_init for additional setup
|
||||
super().model_post_init(__context)
|
|
@ -1,107 +0,0 @@
|
|||
|
||||
from dapr_agents.tool.utils.openapi import OpenAPISpecParser
|
||||
from dapr_agents.tool.storage import VectorToolStore
|
||||
from dapr_agents.tool.base import tool
|
||||
from pydantic import BaseModel ,Field, ConfigDict
|
||||
from typing import Optional, Any, Dict
|
||||
from urllib.parse import urlparse
|
||||
import json
|
||||
import requests
|
||||
|
||||
def extract_version(path: str) -> str:
|
||||
"""Extracts the version prefix from a path if it exists, assuming it starts with 'v' followed by digits."""
|
||||
parts = path.strip('/').split('/')
|
||||
if parts and parts[0].startswith('v') and parts[0][1:].isdigit():
|
||||
return parts[0]
|
||||
return ''
|
||||
|
||||
def generate_get_openapi_definition(tool_vector_store: VectorToolStore):
|
||||
@tool
|
||||
def get_openapi_definition(user_input: str):
|
||||
"""
|
||||
Get potential APIs for the user to use to accompish task.
|
||||
You have to choose the right one after getting a response.
|
||||
This tool MUST be used before calling any APIs.
|
||||
"""
|
||||
similatiry_result = tool_vector_store.get_similar_tools(query_texts=[user_input], k=5)
|
||||
documents = similatiry_result['documents'][0]
|
||||
return documents
|
||||
return get_openapi_definition
|
||||
|
||||
def generate_api_call_executor(spec_parser: OpenAPISpecParser, auth_header: Dict = None):
|
||||
base_url = spec_parser.spec.servers[0].url
|
||||
|
||||
class OpenAPIExecutorInput(BaseModel):
|
||||
path_template: str = Field(description="Template of the API path that may include placeholders.")
|
||||
method: str = Field(description="The HTTP method to be used for the API call (e.g., 'GET', 'POST').")
|
||||
path_params: Dict[str, Any] = Field(default={}, description="Path parameters to be replaced in the path template.")
|
||||
data: Dict[str, Any] = Field(default={}, description="Data to be sent in the body of the request, applicable for POST, PUT methods.")
|
||||
headers: Optional[Dict[str, Any]] = Field(default=None, description="HTTP headers to send with the request.")
|
||||
params: Optional[Dict[str, Any]] = Field(default=None, description="Query parameters to append to the URL.")
|
||||
|
||||
model_config = ConfigDict(extra="allow")
|
||||
|
||||
@tool(args_model=OpenAPIExecutorInput)
|
||||
def open_api_call_executor(
|
||||
path_template: str,
|
||||
method: str,
|
||||
path_params: Dict[str, Any],
|
||||
data: Dict[str, Any],
|
||||
headers: Optional[Dict[str, Any]] = None,
|
||||
params: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any
|
||||
) -> Any:
|
||||
"""
|
||||
Execute an API call based on provided parameters and configuration.
|
||||
It MUST be used after the get_openapi_definition to call APIs.
|
||||
Make sure to include the right header values to authenticate to the API if needed.
|
||||
"""
|
||||
|
||||
# Format the path with path_params
|
||||
formatted_path = path_template.format(**path_params)
|
||||
|
||||
# Parse the base_url and extract the version
|
||||
parsed_url = urlparse(base_url)
|
||||
origin = f"{parsed_url.scheme}://{parsed_url.netloc}"
|
||||
base_path = parsed_url.path.strip('/')
|
||||
|
||||
base_version = extract_version(base_path)
|
||||
path_version = extract_version(formatted_path)
|
||||
|
||||
# Avoid duplication of the version in the final URL
|
||||
if base_version and path_version == base_version:
|
||||
formatted_path = formatted_path[len(f"/{path_version}"):]
|
||||
|
||||
# Ensure there is a single slash between origin, base_path, and formatted_path
|
||||
final_url = f"{origin}/{base_path}/{formatted_path}".replace('//', '/')
|
||||
# Fix the issue by ensuring the correct scheme with double slashes
|
||||
if not final_url.startswith('https://') and parsed_url.scheme == 'https':
|
||||
final_url = final_url.replace('https:/', 'https://')
|
||||
|
||||
# Initialize the headers with auth_header if provided
|
||||
final_headers = auth_header if auth_header else {}
|
||||
# Update the final_headers with additional headers passed to the function
|
||||
if headers:
|
||||
final_headers.update(headers)
|
||||
|
||||
if data:
|
||||
data = json.dumps(data) # Convert data to JSON string if not empty
|
||||
|
||||
request_kwargs = {
|
||||
"headers": final_headers,
|
||||
"params": params,
|
||||
"data": data,
|
||||
**kwargs
|
||||
}
|
||||
|
||||
print(f"Base Url: {base_url}")
|
||||
print(f"Requested Url: {final_url}")
|
||||
print(f"Requested Parameters: {params}")
|
||||
|
||||
# Filter out None values to avoid sending them to requests
|
||||
request_kwargs = {k: v for k, v in request_kwargs.items() if v is not None}
|
||||
|
||||
response = requests.request(method, final_url, **request_kwargs)
|
||||
return response.json()
|
||||
|
||||
return open_api_call_executor
|
|
@ -1 +0,0 @@
|
|||
from .base import ReActAgent
|
|
@ -1,264 +0,0 @@
|
|||
from dapr_agents.types import AgentError, AssistantMessage, ChatCompletion, FunctionCall
|
||||
from dapr_agents.agent import AgentBase
|
||||
from dapr_agents.tool import AgentTool
|
||||
from typing import List, Dict, Any, Union, Callable, Literal, Optional, Tuple
|
||||
from datetime import datetime
|
||||
from pydantic import Field, ConfigDict
|
||||
import regex, json, textwrap, logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ReActAgent(AgentBase):
|
||||
"""
|
||||
Agent implementing the ReAct (Reasoning-Action) framework for dynamic, few-shot problem-solving by leveraging
|
||||
contextual reasoning, actions, and observations in a conversation flow.
|
||||
"""
|
||||
|
||||
stop_at_token: List[str] = Field(default=["\nObservation:"], description="Token(s) signaling the LLM to stop generation.")
|
||||
tools: List[Union[AgentTool, Callable]] = Field(default_factory=list, description="Tools available for the agent, including final_answer.")
|
||||
template_format: Literal["f-string", "jinja2"] = Field(default="jinja2", description="The format used for rendering the prompt template.")
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
def construct_system_prompt(self) -> str:
|
||||
"""
|
||||
Constructs a system prompt in the ReAct reasoning-action format based on the agent's attributes and tools.
|
||||
|
||||
Returns:
|
||||
str: The structured system message content.
|
||||
"""
|
||||
# Initialize prompt parts with the current date as the first entry
|
||||
prompt_parts = [f"# Today's date is: {datetime.now().strftime('%B %d, %Y')}"]
|
||||
|
||||
# Append name if provided
|
||||
if self.name:
|
||||
prompt_parts.append("## Name\nYour name is {{name}}.")
|
||||
|
||||
# Append role and goal with default values if not set
|
||||
prompt_parts.append("## Role\nYour role is {{role}}.")
|
||||
prompt_parts.append("## Goal\n{{goal}}.")
|
||||
|
||||
# Append instructions if provided
|
||||
if self.instructions:
|
||||
prompt_parts.append("## Instructions\n{{instructions}}")
|
||||
|
||||
# Tools section with schema details
|
||||
tools_section = "## Tools\nYou have access ONLY to the following tools:\n"
|
||||
for tool in self.tools:
|
||||
tools_section += f"{tool.name}: {tool.description}. Args schema: {tool.args_schema}\n"
|
||||
prompt_parts.append(tools_section.rstrip()) # Trim any trailing newlines from tools_section
|
||||
|
||||
# Additional Guidelines
|
||||
additional_guidelines = textwrap.dedent("""
|
||||
If you think about using tool, it must use the correct tool JSON blob format as shown below:
|
||||
```
|
||||
{
|
||||
"name": $TOOL_NAME,
|
||||
"arguments": $INPUT
|
||||
}
|
||||
```
|
||||
""").strip()
|
||||
prompt_parts.append(additional_guidelines)
|
||||
|
||||
# ReAct specific guidelines
|
||||
react_guidelines = textwrap.dedent("""
|
||||
## ReAct Format
|
||||
Thought: Reflect on the current state of the conversation or task. If additional information is needed, determine if using a tool is necessary. When a tool is required, briefly explain why it is needed for the specific step at hand, and immediately follow this with an `Action:` statement to address that specific requirement. Avoid combining multiple tool requests in a single `Thought`. If no tools are needed, proceed directly to an `Answer:` statement.
|
||||
Action:
|
||||
```
|
||||
{
|
||||
"name": $TOOL_NAME,
|
||||
"arguments": $INPUT
|
||||
}
|
||||
```
|
||||
Observation: Describe the result of the action taken.
|
||||
... (repeat Thought/Action/Observation as needed, but **ALWAYS proceed to a final `Answer:` statement when you have enough information**)
|
||||
Thought: I now have sufficient information to answer the initial question.
|
||||
Answer: ALWAYS proceed to a final `Answer:` statement once enough information is gathered or if the tools do not provide the necessary data.
|
||||
|
||||
### Providing a Final Answer
|
||||
Once you have enough information to answer the question OR if tools cannot provide the necessary data, respond using one of the following formats:
|
||||
|
||||
1. **Direct Answer without Tools**:
|
||||
Thought: I can answer directly without using any tools. Answer: Direct answer based on previous interactions or current knowledge.
|
||||
|
||||
2. **When All Needed Information is Gathered**:
|
||||
Thought: I now have sufficient information to answer the question. Answer: Complete final answer here.
|
||||
|
||||
3. **If Tools Cannot Provide the Needed Information**:
|
||||
Thought: The available tools do not provide the necessary information. Answer: Explanation of limitation and relevant information if possible.
|
||||
|
||||
### Key Guidelines
|
||||
- Always Conclude with an `Answer:` statement.
|
||||
- Ensure every response ends with an `Answer:` statement that summarizes the most recent findings or relevant information, avoiding incomplete thoughts.
|
||||
- Direct Final Answer for Past or Known Information: If the user inquires about past interactions, respond directly with an Answer: based on the information in chat history.
|
||||
- Avoid Repetitive Thought Statements: If the answer is ready, skip repetitive Thought steps and proceed directly to Answer.
|
||||
- Minimize Redundant Steps: Use minimal Thought/Action/Observation cycles to arrive at a final Answer efficiently.
|
||||
- Reference Past Information When Relevant: Use chat history accurately when answering questions about previous responses to avoid redundancy.
|
||||
- Progressively Move Towards Finality: Reflect on the current step and avoid re-evaluating the entire user request each time. Aim to advance towards the final Answer in each cycle.
|
||||
|
||||
## Chat History
|
||||
The chat history is provided to avoid repeating information and to ensure accurate references when summarizing past interactions.
|
||||
""").strip()
|
||||
prompt_parts.append(react_guidelines)
|
||||
|
||||
return "\n\n".join(prompt_parts)
|
||||
|
||||
def run(self, input_data: Optional[Union[str, Dict[str, Any]]] = None) -> Any:
|
||||
"""
|
||||
Runs the main logic loop for processing the task and executing actions until a result is reached.
|
||||
|
||||
Args:
|
||||
input_data (Optional[Union[str, Dict[str, Any]]]): The task or data for the agent to process. If None, relies on memory.
|
||||
|
||||
Returns:
|
||||
Any: Final response after processing the task or reaching a final answer.
|
||||
|
||||
Raises:
|
||||
AgentError: On errors during chat message processing or action execution.
|
||||
"""
|
||||
logger.debug(f"Agent run started with input: {input_data if input_data else 'Using memory context'}")
|
||||
|
||||
# Format messages; construct_messages already includes chat history.
|
||||
messages = self.construct_messages(input_data or {})
|
||||
|
||||
# Get Last User Message
|
||||
user_message = self.get_last_user_message(messages)
|
||||
|
||||
if input_data:
|
||||
# Add the new user message to memory only if input_data is provided
|
||||
if user_message: # Ensure a user message exists before adding to memory
|
||||
self.memory.add_message(user_message)
|
||||
|
||||
# Always print the last user message for context, even if no input_data is provided
|
||||
if user_message:
|
||||
self.text_formatter.print_message(user_message)
|
||||
|
||||
# Get Tool Names to validate tool selection
|
||||
available_tools = self.tool_executor.get_tool_names()
|
||||
|
||||
# Initialize react_loop for iterative reasoning
|
||||
react_loop = ""
|
||||
|
||||
for iteration in range(self.max_iterations):
|
||||
logger.info(f"Iteration {iteration + 1}/{self.max_iterations} started.")
|
||||
|
||||
# Check if "react_loop" is already a variable in the template
|
||||
if "react_loop" in self.prompt_template.input_variables:
|
||||
# If "react_loop" exists as a variable, construct messages dynamically
|
||||
iteration_messages = self.construct_messages({"react_loop": react_loop})
|
||||
else:
|
||||
# Create a fresh copy of original_messages for this iteration
|
||||
iteration_messages = [msg.copy() for msg in messages]
|
||||
|
||||
# Append react_loop to the last message (user or otherwise)
|
||||
for msg in reversed(iteration_messages):
|
||||
if msg["role"] == "user":
|
||||
msg["content"] += f"\n{react_loop}"
|
||||
break
|
||||
else:
|
||||
# Append react_loop to the last message if no user message is found
|
||||
logger.warning("No user message found in the current messages; appending react_loop to the last message.")
|
||||
iteration_messages[-1]["content"] += f"\n{react_loop}" # Append react_loop to the last message
|
||||
|
||||
try:
|
||||
response: ChatCompletion = self.llm.generate(messages=iteration_messages, stop=self.stop_at_token)
|
||||
|
||||
# Parse response into thought, action, and potential final answer
|
||||
thought_action, action, final_answer = self.parse_response(response)
|
||||
|
||||
# Print Thought immediately
|
||||
self.text_formatter.print_react_part("Thought", thought_action)
|
||||
|
||||
if final_answer: # Direct final answer provided
|
||||
assistant_final_message = AssistantMessage(final_answer)
|
||||
self.memory.add_message(assistant_final_message)
|
||||
self.text_formatter.print_separator()
|
||||
self.text_formatter.print_message(assistant_final_message, include_separator=False)
|
||||
logger.info("Agent provided a direct final answer.")
|
||||
return final_answer
|
||||
|
||||
# If there's no action, update the loop and continue reasoning
|
||||
if action is None:
|
||||
logger.info("No action specified; continuing with further reasoning.")
|
||||
react_loop += f"Thought:{thought_action}\n"
|
||||
continue # Proceed to the next iteration
|
||||
|
||||
action_name = action["name"]
|
||||
action_args = action["arguments"]
|
||||
|
||||
# Print Action
|
||||
self.text_formatter.print_react_part("Action", json.dumps(action))
|
||||
|
||||
if action_name in available_tools:
|
||||
logger.info(f"Executing {action_name} with arguments {action_args}")
|
||||
function_call = FunctionCall(**action)
|
||||
execution_results = self.tool_executor.execute(action_name, **function_call.arguments_dict)
|
||||
|
||||
# Print Observation
|
||||
self.text_formatter.print_react_part("Observation", execution_results)
|
||||
|
||||
# Update react_loop with the current execution
|
||||
new_content = f"Thought:{thought_action}\nAction:{action}\nObservation:{execution_results}"
|
||||
react_loop += new_content
|
||||
logger.info(new_content)
|
||||
else:
|
||||
raise AgentError(f"Unknown tool specified: {action_name}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed during chat generation: {e}")
|
||||
raise AgentError(f"Failed during chat generation: {e}") from e
|
||||
|
||||
logger.info("Max iterations completed. Agent has stopped.")
|
||||
|
||||
def parse_response(self, response: ChatCompletion) -> Tuple[str, Optional[dict], Optional[str]]:
|
||||
"""
|
||||
Extracts the thought, action, and final answer (if present) from the language model response.
|
||||
|
||||
Args:
|
||||
response (ChatCompletion): The language model's response message.
|
||||
|
||||
Returns:
|
||||
tuple: (thought content, action dictionary if present, final answer if present)
|
||||
|
||||
Raises:
|
||||
ValueError: If the action details cannot be decoded from the response.
|
||||
"""
|
||||
pattern = r'\{(?:[^{}]|(?R))*\}' # Pattern to match JSON blobs
|
||||
message_content = response.get_content()
|
||||
|
||||
# Use regex to find the start of "Action" or "Final Answer" (case insensitive)
|
||||
action_split_regex = regex.compile(r'(?i)action:\s*', regex.IGNORECASE)
|
||||
final_answer_regex = regex.compile(r'(?i)answer:\s*(.*)', regex.IGNORECASE | regex.DOTALL)
|
||||
thought_label_regex = regex.compile(r'(?i)thought:\s*', regex.IGNORECASE)
|
||||
|
||||
# Clean up any repeated or prefixed "Thought:" labels
|
||||
message_content = thought_label_regex.sub('', message_content).strip()
|
||||
|
||||
# Check for "Final Answer" directly in the thought
|
||||
final_answer_match = final_answer_regex.search(message_content)
|
||||
if final_answer_match:
|
||||
final_answer = final_answer_match.group(1).strip() if final_answer_match.group(1) else None
|
||||
return message_content, None, final_answer
|
||||
|
||||
# Split the content into "thought" and "action" parts
|
||||
if action_split_regex.search(message_content):
|
||||
parts = action_split_regex.split(message_content, 1)
|
||||
thought_part = parts[0].strip() # Everything before "Action" is the thought part
|
||||
action_part = parts[1] if len(parts) > 1 else None # Everything after "Action" is the action part
|
||||
else:
|
||||
thought_part = message_content
|
||||
action_part = None
|
||||
|
||||
# If there's an action part, attempt to extract the JSON blob
|
||||
if action_part:
|
||||
matches = regex.finditer(pattern, action_part, regex.DOTALL)
|
||||
for match in matches:
|
||||
try:
|
||||
action_dict = json.loads(match.group())
|
||||
return thought_part, action_dict, None # Return thought and action directly
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
# If no action is found, just return the thought part with None for action and final answer
|
||||
return thought_part, None, None
|
|
@ -1 +0,0 @@
|
|||
from .base import ToolCallAgent
|
|
@ -1,121 +0,0 @@
|
|||
from dapr_agents.types import AgentError, AssistantMessage, ChatCompletion, ToolMessage
|
||||
from dapr_agents.agent import AgentBase
|
||||
from typing import List, Optional, Dict, Any, Union
|
||||
from pydantic import Field, ConfigDict
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ToolCallAgent(AgentBase):
|
||||
"""
|
||||
Agent that manages tool calls and conversations using a language model.
|
||||
It integrates tools and processes them based on user inputs and task orchestration.
|
||||
"""
|
||||
|
||||
tool_history: List[ToolMessage] = Field(default_factory=list, description="Executed tool calls during the conversation.")
|
||||
tool_choice: Optional[str] = Field(default=None, description="Strategy for selecting tools ('auto', 'required', 'none'). Defaults to 'auto' if tools are provided.")
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
def model_post_init(self, __context: Any) -> None:
|
||||
"""
|
||||
Initialize the agent's settings, such as tool choice and parent setup.
|
||||
Sets the tool choice strategy based on provided tools.
|
||||
"""
|
||||
self.tool_choice = self.tool_choice or ('auto' if self.tools else None)
|
||||
|
||||
# Proceed with base model setup
|
||||
super().model_post_init(__context)
|
||||
|
||||
def run(self, input_data: Optional[Union[str, Dict[str, Any]]] = None) -> Any:
|
||||
"""
|
||||
Executes the agent's main task using the provided input or memory context.
|
||||
|
||||
Args:
|
||||
input_data (Optional[Union[str, Dict[str, Any]]]): User's input, either as a string, a dictionary, or `None` to use memory context.
|
||||
|
||||
Returns:
|
||||
Any: The agent's response after processing the input.
|
||||
|
||||
Raises:
|
||||
AgentError: If the input data is invalid or if a user message is missing.
|
||||
"""
|
||||
logger.debug(f"Agent run started with input: {input_data if input_data else 'Using memory context'}")
|
||||
|
||||
# Format messages; construct_messages already includes chat history.
|
||||
messages = self.construct_messages(input_data or {})
|
||||
|
||||
# Get Last User Message
|
||||
user_message = self.get_last_user_message(messages)
|
||||
|
||||
if input_data:
|
||||
# Add the new user message to memory only if input_data is provided
|
||||
if user_message: # Ensure a user message exists before adding to memory
|
||||
self.memory.add_message(user_message)
|
||||
|
||||
# Always print the last user message for context, even if no input_data is provided
|
||||
if user_message:
|
||||
self.text_formatter.print_message(user_message)
|
||||
|
||||
# Process conversation iterations
|
||||
return self.process_iterations(messages)
|
||||
|
||||
def process_response(self, tool_calls: List[dict]) -> None:
|
||||
"""
|
||||
Execute tool calls and log their results in the tool history.
|
||||
|
||||
Args:
|
||||
tool_calls (List[dict]): Definitions of tool calls from the response.
|
||||
|
||||
Raises:
|
||||
AgentError: If an error occurs during tool execution.
|
||||
"""
|
||||
for tool in tool_calls:
|
||||
function_name = tool.function.name
|
||||
try:
|
||||
logger.info(f"Executing {function_name} with arguments {tool.function.arguments}")
|
||||
result = self.tool_executor.execute(function_name, **tool.function.arguments_dict)
|
||||
tool_message = ToolMessage(tool_call_id=tool.id, name=function_name, content=str(result))
|
||||
|
||||
self.text_formatter.print_message(tool_message)
|
||||
self.tool_history.append(tool_message)
|
||||
except Exception as e:
|
||||
logger.error(f"Error executing tool {function_name}: {e}")
|
||||
raise AgentError(f"Error executing tool '{function_name}': {e}") from e
|
||||
|
||||
def process_iterations(self, messages: List[Dict[str, Any]]) -> Any:
|
||||
"""
|
||||
Processes conversation iterations, invoking tool calls as needed.
|
||||
|
||||
Args:
|
||||
messages (List[Dict[str, Any]]): Initial conversation messages.
|
||||
|
||||
Returns:
|
||||
Any: The final response content after processing all iterations.
|
||||
|
||||
Raises:
|
||||
AgentError: If an error occurs during chat generation or if maximum iterations are reached.
|
||||
"""
|
||||
for iteration in range(self.max_iterations):
|
||||
logger.info(f"Iteration {iteration + 1}/{self.max_iterations} started.")
|
||||
|
||||
messages += self.tool_history
|
||||
|
||||
try:
|
||||
response: ChatCompletion = self.llm.generate(messages=messages, tools=self.tools, tool_choice=self.tool_choice)
|
||||
response_message = response.get_message()
|
||||
self.text_formatter.print_message(response_message)
|
||||
|
||||
if response.get_reason() == "tool_calls":
|
||||
self.tool_history.append(response_message)
|
||||
self.process_response(response.get_tool_calls())
|
||||
else:
|
||||
self.memory.add_message(AssistantMessage(response.get_content()))
|
||||
self.tool_history.clear()
|
||||
return response.get_content()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during chat generation: {e}")
|
||||
raise AgentError(f"Failed during chat generation: {e}") from e
|
||||
|
||||
logger.info("Max iterations reached. Agent has stopped.")
|
|
@ -1,87 +0,0 @@
|
|||
from dapr_agents.agent.patterns import ReActAgent, ToolCallAgent, OpenAPIReActAgent
|
||||
from dapr_agents.tool.utils.openapi import OpenAPISpecParser
|
||||
from dapr_agents.memory import ConversationListMemory
|
||||
from dapr_agents.llm import OpenAIChatClient
|
||||
from dapr_agents.agent.base import AgentBase
|
||||
from dapr_agents.llm import LLMClientBase
|
||||
from dapr_agents.memory import MemoryBase
|
||||
from dapr_agents.tool import AgentTool
|
||||
from typing import Optional, List, Union, Type, TypeVar
|
||||
|
||||
T = TypeVar('T', ToolCallAgent, ReActAgent, OpenAPIReActAgent)
|
||||
|
||||
class AgentFactory:
|
||||
"""
|
||||
Returns agent classes based on the provided pattern.
|
||||
"""
|
||||
AGENT_PATTERNS = {
|
||||
"react": ReActAgent,
|
||||
"toolcalling": ToolCallAgent,
|
||||
"openapireact": OpenAPIReActAgent
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def create_agent_class(pattern: str) -> Type[T]:
|
||||
"""
|
||||
Selects the agent class based on the pattern.
|
||||
|
||||
Args:
|
||||
pattern (str): Pattern type ('react', 'toolcalling', 'openapireact').
|
||||
|
||||
Returns:
|
||||
Type: Corresponding agent class.
|
||||
|
||||
Raises:
|
||||
ValueError: If the pattern is unsupported.
|
||||
"""
|
||||
pattern = pattern.lower()
|
||||
agent_class = AgentFactory.AGENT_PATTERNS.get(pattern)
|
||||
if not agent_class:
|
||||
raise ValueError(f"Unsupported agent pattern: {pattern}")
|
||||
return agent_class
|
||||
|
||||
|
||||
class Agent(AgentBase):
|
||||
"""
|
||||
Dynamically creates an agent instance based on the specified pattern.
|
||||
"""
|
||||
|
||||
def __new__(
|
||||
cls,
|
||||
role: str,
|
||||
name: Optional[str] = None,
|
||||
pattern: str = "toolcalling",
|
||||
llm: Optional[LLMClientBase] = None,
|
||||
memory: Optional[MemoryBase] = None,
|
||||
tools: Optional[List[AgentTool]] = [],
|
||||
**kwargs
|
||||
) -> Union[ToolCallAgent, ReActAgent, OpenAPIReActAgent]:
|
||||
"""
|
||||
Creates and returns an instance of the selected agent class.
|
||||
|
||||
Args:
|
||||
role (str): Agent role.
|
||||
name (Optional[str]): Agent name.
|
||||
pattern (str): Agent pattern to use.
|
||||
llm (Optional[LLMClientBase]): LLM client for generating responses.
|
||||
memory (Optional[MemoryBase]): Memory for conversation history.
|
||||
tools (Optional[List[AgentTool]]): List of tools for task execution.
|
||||
|
||||
Returns:
|
||||
Union[ToolCallAgent, ReActAgent, OpenAPIReActAgent]: The initialized agent instance.
|
||||
"""
|
||||
agent_class = AgentFactory.create_agent_class(pattern)
|
||||
|
||||
# Lazy initialization
|
||||
llm = llm or OpenAIChatClient()
|
||||
memory = memory or ConversationListMemory()
|
||||
|
||||
if pattern == "openapireact":
|
||||
kwargs.update({
|
||||
"spec_parser": kwargs.get('spec_parser', OpenAPISpecParser()),
|
||||
"auth_header": kwargs.get('auth_header', {})
|
||||
})
|
||||
|
||||
instance = super().__new__(agent_class)
|
||||
agent_class.__init__(instance, role=role, name=name, llm=llm, memory=memory, tools=tools, **kwargs)
|
||||
return instance
|
|
@ -0,0 +1,5 @@
|
|||
from .agent.agent import Agent
|
||||
from .base import AgentBase
|
||||
from .durableagent.agent import DurableAgent
|
||||
|
||||
__all__ = ["AgentBase", "Agent", "DurableAgent"]
|
|
@ -0,0 +1,3 @@
|
|||
from .agent import Agent
|
||||
|
||||
__all__ = ["Agent"]
|
|
@ -0,0 +1,265 @@
|
|||
import asyncio
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from dapr_agents.agents.base import AgentBase
|
||||
from dapr_agents.types import (
|
||||
AgentError,
|
||||
ToolCall,
|
||||
ToolExecutionRecord,
|
||||
ToolMessage,
|
||||
UserMessage,
|
||||
LLMChatResponse,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Agent(AgentBase):
|
||||
"""
|
||||
Agent that manages tool calls and conversations using a language model.
|
||||
It integrates tools and processes them based on user inputs and task orchestration.
|
||||
"""
|
||||
|
||||
async def run(self, input_data: Optional[Union[str, Dict[str, Any]]] = None) -> Any:
|
||||
"""
|
||||
Runs the agent with the given input, supporting graceful shutdown.
|
||||
Uses the _race helper to handle shutdown and cancellation cleanly.
|
||||
|
||||
Args:
|
||||
input_data (Optional[Union[str, Dict[str, Any]]]): Input for the agent, can be a string or dict.
|
||||
Returns:
|
||||
Any: The result of agent execution, or None if shutdown is requested.
|
||||
"""
|
||||
try:
|
||||
return await self._race(self._run_agent(input_data))
|
||||
except asyncio.CancelledError:
|
||||
logger.info("Agent execution was cancelled.")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Error during agent execution: {e}")
|
||||
raise
|
||||
|
||||
async def _race(self, coro) -> Optional[Any]:
|
||||
"""
|
||||
Runs the given coroutine and races it against the agent's shutdown event.
|
||||
If shutdown is triggered, cancels the task and returns None.
|
||||
|
||||
Args:
|
||||
coro: The coroutine to run (e.g., _run_agent(input_data)).
|
||||
Returns:
|
||||
Optional[Any]: The result of the coroutine, or None if shutdown is triggered.
|
||||
"""
|
||||
task = asyncio.create_task(coro)
|
||||
shutdown_task = asyncio.create_task(self._shutdown_event.wait())
|
||||
done, pending = await asyncio.wait(
|
||||
[task, shutdown_task],
|
||||
return_when=asyncio.FIRST_COMPLETED,
|
||||
)
|
||||
for p in pending:
|
||||
p.cancel()
|
||||
if self._shutdown_event.is_set():
|
||||
logger.info("Shutdown requested during execution. Cancelling agent.")
|
||||
task.cancel()
|
||||
return None
|
||||
return await task
|
||||
|
||||
async def _run_agent(
|
||||
self, input_data: Optional[Union[str, Dict[str, Any]]] = None
|
||||
) -> Any:
|
||||
"""
|
||||
Internal method for running the agent logic.
|
||||
Formats messages, updates memory, and drives the conversation loop.
|
||||
|
||||
Args:
|
||||
input_data (Optional[Union[str, Dict[str, Any]]]): Input for the agent, can be a string or dict.
|
||||
Returns:
|
||||
Any: The result of the agent's conversation loop.
|
||||
"""
|
||||
logger.debug(
|
||||
f"Agent run started with input: {input_data if input_data else 'Using memory context'}"
|
||||
)
|
||||
|
||||
# Construct messages using only input_data; chat history handled internally
|
||||
messages: List[Dict[str, Any]] = self.construct_messages(input_data or {})
|
||||
user_message = self.get_last_user_message(messages)
|
||||
# Always work with a copy of the user message for safety
|
||||
user_message_copy: Optional[Dict[str, Any]] = (
|
||||
dict(user_message) if user_message else None
|
||||
)
|
||||
|
||||
if input_data and user_message_copy:
|
||||
# Add the new user message to memory only if input_data is provided and user message exists
|
||||
user_msg = UserMessage(content=user_message_copy.get("content", ""))
|
||||
self.memory.add_message(user_msg)
|
||||
|
||||
# Always print the last user message for context, even if no input_data is provided
|
||||
if user_message_copy is not None:
|
||||
# Ensure keys are str for mypy
|
||||
self.text_formatter.print_message(
|
||||
{str(k): v for k, v in user_message_copy.items()}
|
||||
)
|
||||
|
||||
# Process conversation iterations and return the result
|
||||
return await self.process_iterations(messages)
|
||||
|
||||
async def execute_tools(self, tool_calls: List[ToolCall]) -> List[ToolMessage]:
|
||||
"""
|
||||
Executes a batch of tool calls in parallel, bounded by max_concurrent, using asyncio.gather.
|
||||
Each tool call is executed asynchronously using run_tool, and results are appended to the persistent audit log (tool_history).
|
||||
If any tool call fails, the error is propagated and other tasks continue unless you set return_exceptions=True.
|
||||
|
||||
Args:
|
||||
tool_calls (List[ToolCall]): List of tool calls returned by the LLM to execute in this batch.
|
||||
max_concurrent (int, optional): Maximum number of concurrent tool executions (default: 5).
|
||||
|
||||
Returns:
|
||||
List[ToolMessage]: Results for this batch of tool calls, in the same order as input.
|
||||
|
||||
Raises:
|
||||
AgentError: If any tool execution fails.
|
||||
"""
|
||||
# Limiting concurrency to avoid overwhelming downstream systems
|
||||
max_concurrent = 10
|
||||
semaphore = asyncio.Semaphore(max_concurrent)
|
||||
|
||||
async def run_and_record(tool_call: ToolCall) -> ToolMessage:
|
||||
"""
|
||||
Executes a single tool call, respecting the concurrency limit.
|
||||
Appends the result to the persistent audit log.
|
||||
If the function name is missing, returns a ToolMessage with error status and raises AgentError.
|
||||
"""
|
||||
async with semaphore:
|
||||
function_name = tool_call.function.name
|
||||
tool_id = tool_call.id
|
||||
function_args = tool_call.function.arguments_dict
|
||||
|
||||
if not function_name:
|
||||
error_msg = f"Tool call missing function name: {tool_call}"
|
||||
logger.error(error_msg)
|
||||
# Return a ToolExecutionRecord with error status and raise AgentError
|
||||
tool_execution_record = ToolExecutionRecord(
|
||||
tool_call_id="<missing>",
|
||||
tool_name="<missing>",
|
||||
tool_args={},
|
||||
execution_result=error_msg,
|
||||
)
|
||||
self.tool_history.append(tool_execution_record)
|
||||
raise AgentError(error_msg)
|
||||
|
||||
try:
|
||||
logger.debug(
|
||||
f"Executing {function_name} with arguments {function_args}"
|
||||
)
|
||||
result = await self.run_tool(function_name, **function_args)
|
||||
result_str = str(result) if result is not None else ""
|
||||
tool_message = ToolMessage(
|
||||
tool_call_id=tool_id,
|
||||
name=function_name,
|
||||
content=result_str,
|
||||
)
|
||||
# Print the tool message for visibility
|
||||
self.text_formatter.print_message(tool_message)
|
||||
# Add tool message to memory
|
||||
self.memory.add_message(tool_message)
|
||||
# Append tool message to the persistent audit log
|
||||
tool_execution_record = ToolExecutionRecord(
|
||||
tool_call_id=tool_id,
|
||||
tool_name=function_name,
|
||||
tool_args=function_args,
|
||||
execution_result=result_str,
|
||||
)
|
||||
self.tool_history.append(tool_execution_record)
|
||||
return tool_message
|
||||
except Exception as e:
|
||||
logger.error(f"Error executing tool {function_name}: {e}")
|
||||
raise AgentError(
|
||||
f"Error executing tool '{function_name}': {e}"
|
||||
) from e
|
||||
|
||||
# Run all tool calls concurrently, but bounded by max_concurrent
|
||||
return await asyncio.gather(*(run_and_record(tc) for tc in tool_calls))
|
||||
|
||||
async def process_iterations(self, messages: List[Dict[str, Any]]) -> Any:
|
||||
"""
|
||||
Drives the agent conversation iteratively until a final answer or max iterations is reached.
|
||||
Handles tool calls, updates memory, and returns the final assistant message.
|
||||
Tool results are localized per iteration; persistent audit log is kept for all tool executions.
|
||||
|
||||
Args:
|
||||
messages (List[Dict[str, Any]]): Initial conversation messages.
|
||||
Returns:
|
||||
Any: The final assistant message or None if max iterations reached.
|
||||
Raises:
|
||||
AgentError: On chat failure or tool issues.
|
||||
"""
|
||||
final_reply = None
|
||||
for turn in range(1, self.max_iterations + 1):
|
||||
logger.info(f"Iteration {turn}/{self.max_iterations} started.")
|
||||
try:
|
||||
# Generate response using the LLM
|
||||
response: LLMChatResponse = self.llm.generate(
|
||||
messages=messages,
|
||||
tools=self.get_llm_tools(),
|
||||
tool_choice=self.tool_choice,
|
||||
)
|
||||
# Get the first candidate from the response
|
||||
response_message = response.get_message()
|
||||
# Check if the response contains an assistant message
|
||||
if response_message is None:
|
||||
raise AgentError("LLM returned no assistant message")
|
||||
else:
|
||||
assistant = response_message
|
||||
self.text_formatter.print_message(assistant)
|
||||
self.memory.add_message(assistant)
|
||||
|
||||
# Handle tool calls response
|
||||
if assistant is not None and assistant.has_tool_calls():
|
||||
tool_calls = assistant.get_tool_calls()
|
||||
if tool_calls:
|
||||
messages.append(assistant.model_dump())
|
||||
tool_msgs = await self.execute_tools(tool_calls)
|
||||
messages.extend([tm.model_dump() for tm in tool_msgs])
|
||||
if turn == self.max_iterations:
|
||||
final_reply = assistant
|
||||
logger.info("Reached max turns after tool calls; stopping.")
|
||||
break
|
||||
continue
|
||||
|
||||
# No tool calls => done
|
||||
final_reply = assistant
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error on turn {turn}: {e}")
|
||||
raise AgentError(f"Failed during chat generation: {e}") from e
|
||||
|
||||
# Post-loop
|
||||
if final_reply is None:
|
||||
logger.warning("No reply generated; hitting max iterations.")
|
||||
return None
|
||||
|
||||
logger.info(f"Agent conversation completed after {turn} turns.")
|
||||
return final_reply
|
||||
|
||||
async def run_tool(self, tool_name: str, *args, **kwargs) -> Any:
|
||||
"""
|
||||
Executes a single registered tool by name, handling both sync and async tools.
|
||||
Used for atomic tool execution, either directly or as part of a batch in execute_tools.
|
||||
|
||||
Args:
|
||||
tool_name (str): Name of the tool to run.
|
||||
*args: Positional arguments for the tool.
|
||||
**kwargs: Keyword arguments for the tool.
|
||||
|
||||
Returns:
|
||||
Any: Result from the tool execution.
|
||||
|
||||
Raises:
|
||||
AgentError: If the tool is not found or execution fails.
|
||||
"""
|
||||
try:
|
||||
return await self.tool_executor.run_tool(tool_name, *args, **kwargs)
|
||||
except Exception as e:
|
||||
logger.error(f"Agent failed to run tool '{tool_name}': {e}")
|
||||
raise AgentError(f"Failed to run tool '{tool_name}': {e}") from e
|
|
@ -0,0 +1,543 @@
|
|||
from dapr_agents.memory import (
|
||||
MemoryBase,
|
||||
ConversationListMemory,
|
||||
ConversationVectorMemory,
|
||||
)
|
||||
from dapr_agents.agents.utils.text_printer import ColorTextFormatter
|
||||
from dapr_agents.types import MessagePlaceHolder, BaseMessage, ToolExecutionRecord
|
||||
from dapr_agents.tool.executor import AgentToolExecutor
|
||||
from dapr_agents.prompt.base import PromptTemplateBase
|
||||
from dapr_agents.prompt import ChatPromptTemplate
|
||||
from dapr_agents.tool.base import AgentTool
|
||||
import re
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import asyncio
|
||||
import signal
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import (
|
||||
List,
|
||||
Optional,
|
||||
Dict,
|
||||
Any,
|
||||
Union,
|
||||
Callable,
|
||||
Literal,
|
||||
ClassVar,
|
||||
)
|
||||
from pydantic import BaseModel, Field, PrivateAttr, model_validator, ConfigDict
|
||||
from dapr_agents.llm.chat import ChatClientBase
|
||||
from dapr_agents.llm.openai import OpenAIChatClient
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentBase(BaseModel, ABC):
|
||||
"""
|
||||
Base class for agents that interact with language models and manage tools for task execution.
|
||||
|
||||
Args:
|
||||
name: Agent name
|
||||
role: Agent role
|
||||
goal: Agent goal
|
||||
instructions: List of instructions
|
||||
tools: List of tools
|
||||
llm: LLM client
|
||||
memory: Memory instance
|
||||
"""
|
||||
|
||||
name: str = Field(
|
||||
default="Dapr Agent",
|
||||
description="The agent's name, defaulting to the role if not provided.",
|
||||
)
|
||||
role: Optional[str] = Field(
|
||||
default="Assistant",
|
||||
description="The agent's role in the interaction (e.g., 'Weather Expert').",
|
||||
)
|
||||
goal: Optional[str] = Field(
|
||||
default="Help humans",
|
||||
description="The agent's main objective (e.g., 'Provide Weather information').",
|
||||
)
|
||||
# TODO: add a background/backstory field that would be useful for the agent to know about it's context/background for it's role.
|
||||
instructions: Optional[List[str]] = Field(
|
||||
default=None, description="Instructions guiding the agent's tasks."
|
||||
)
|
||||
system_prompt: Optional[str] = Field(
|
||||
default=None,
|
||||
description="A custom system prompt, overriding name, role, goal, and instructions.",
|
||||
)
|
||||
llm: ChatClientBase = Field(
|
||||
default_factory=OpenAIChatClient,
|
||||
description="Language model client for generating responses.",
|
||||
)
|
||||
prompt_template: Optional[PromptTemplateBase] = Field(
|
||||
default=None, description="The prompt template for the agent."
|
||||
)
|
||||
# TODO: we need to add RBAC to tools to define what users and/or agents can use what tool(s).
|
||||
tools: List[Union[AgentTool, Callable]] = Field(
|
||||
default_factory=list,
|
||||
description="Tools available for the agent to assist with tasks.",
|
||||
)
|
||||
tool_choice: Optional[str] = Field(
|
||||
default=None,
|
||||
description="Strategy for selecting tools ('auto', 'required', 'none'). Defaults to 'auto' if tools are provided.",
|
||||
)
|
||||
tool_history: List[ToolExecutionRecord] = Field(
|
||||
default_factory=list, description="Executed tool calls during the conversation."
|
||||
)
|
||||
# TODO: add a forceFinalAnswer field in case maxIterations is near/reached. Or do we have a conclusion baked in by default? Do we want this to derive a conclusion by default?
|
||||
max_iterations: int = Field(
|
||||
default=10, description="Max iterations for conversation cycles."
|
||||
)
|
||||
memory: MemoryBase = Field(
|
||||
default_factory=ConversationListMemory,
|
||||
description="Handles conversation history and context storage.",
|
||||
)
|
||||
# TODO: we should have a system_template, prompt_template, and response_template, or better separation here.
|
||||
# If we have something like a customer service agent, we want diff templates for different types of interactions.
|
||||
# In future, we could also have a way to dynamically change the template based on the context of the interaction.
|
||||
template_format: Literal["f-string", "jinja2"] = Field(
|
||||
default="jinja2",
|
||||
description="The format used for rendering the prompt template.",
|
||||
)
|
||||
|
||||
DEFAULT_SYSTEM_PROMPT: ClassVar[str]
|
||||
"""Default f-string template; placeholders will be swapped to Jinja if needed."""
|
||||
DEFAULT_SYSTEM_PROMPT = """
|
||||
# Today's date is: {date}
|
||||
|
||||
## Name
|
||||
Your name is {name}.
|
||||
|
||||
## Role
|
||||
Your role is {role}.
|
||||
|
||||
## Goal
|
||||
{goal}.
|
||||
|
||||
## Instructions
|
||||
{instructions}.
|
||||
""".strip()
|
||||
|
||||
_tool_executor: AgentToolExecutor = PrivateAttr()
|
||||
_text_formatter: ColorTextFormatter = PrivateAttr(
|
||||
default_factory=ColorTextFormatter
|
||||
)
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
@model_validator(mode="before")
|
||||
def set_name_from_role(cls, values: dict):
|
||||
# Set name to role if name is not provided
|
||||
if not values.get("name") and values.get("role"):
|
||||
values["name"] = values["role"]
|
||||
return values
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_llm(cls, values):
|
||||
"""Validate that LLM is properly configured."""
|
||||
if hasattr(values, "llm") and values.llm:
|
||||
try:
|
||||
# Validate LLM is properly configured by accessing it as this is required to be set.
|
||||
_ = values.llm
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to initialize LLM: {e}") from e
|
||||
|
||||
return values
|
||||
|
||||
def model_post_init(self, __context: Any) -> None:
|
||||
"""
|
||||
Post-initialization hook for AgentBase.
|
||||
Sets up the prompt template using a centralized helper, ensuring agent and LLM client reference the same template.
|
||||
Also validates and pre-fills the template, and sets up graceful shutdown.
|
||||
|
||||
Args:
|
||||
__context (Any): Context passed from Pydantic's model initialization.
|
||||
"""
|
||||
self._tool_executor = AgentToolExecutor(tools=self.tools)
|
||||
|
||||
# Set tool_choice to 'auto' if tools are provided, otherwise None
|
||||
if self.tool_choice is None:
|
||||
self.tool_choice = "auto" if self.tools else None
|
||||
|
||||
# Centralize prompt template selection logic
|
||||
self.prompt_template = self._initialize_prompt_template()
|
||||
# Ensure LLM client and agent both reference the same template
|
||||
self.llm.prompt_template = self.prompt_template
|
||||
|
||||
self._validate_prompt_template()
|
||||
self.prefill_agent_attributes()
|
||||
|
||||
# Set up graceful shutdown
|
||||
self._shutdown_event = asyncio.Event()
|
||||
self._setup_signal_handlers()
|
||||
|
||||
super().model_post_init(__context)
|
||||
|
||||
def _initialize_prompt_template(self) -> PromptTemplateBase:
|
||||
"""
|
||||
Determines which prompt template to use for the agent:
|
||||
1. If the user supplied one, use it.
|
||||
2. Else if the LLM client already has one, adopt that.
|
||||
3. Else generate a system_prompt and ChatPromptTemplate from agent attributes.
|
||||
|
||||
Returns:
|
||||
PromptTemplateBase: The selected or constructed prompt template.
|
||||
"""
|
||||
# 1) User provided one?
|
||||
if self.prompt_template:
|
||||
logger.debug("🛠️ Using provided agent.prompt_template")
|
||||
return self.prompt_template
|
||||
|
||||
# 2) LLM client has one?
|
||||
if self.llm.prompt_template:
|
||||
logger.debug("🔄 Syncing from llm.prompt_template")
|
||||
return self.llm.prompt_template
|
||||
|
||||
# 3) Build from system_prompt or attributes
|
||||
if not self.system_prompt:
|
||||
logger.debug("⚙️ Constructing system_prompt from attributes")
|
||||
self.system_prompt = self.construct_system_prompt()
|
||||
|
||||
logger.debug("⚙️ Building ChatPromptTemplate from system_prompt")
|
||||
return self.construct_prompt_template()
|
||||
|
||||
def _collect_template_attrs(self) -> tuple[Dict[str, str], List[str]]:
|
||||
"""
|
||||
Collect agent attributes for prompt template pre-filling and warn about unused ones.
|
||||
- valid: attributes set on self and declared in prompt_template.input_variables.
|
||||
- unused: attributes set on self but not present in the template.
|
||||
Returns:
|
||||
(valid, unused): Tuple of dict of valid attrs and list of unused attr names.
|
||||
"""
|
||||
attrs = ["name", "role", "goal", "instructions"]
|
||||
valid: Dict[str, str] = {}
|
||||
unused: List[str] = []
|
||||
if not self.prompt_template or not hasattr(
|
||||
self.prompt_template, "input_variables"
|
||||
):
|
||||
return valid, attrs # No template, all attrs are unused
|
||||
original = set(self.prompt_template.input_variables)
|
||||
|
||||
for attr in attrs:
|
||||
val = getattr(self, attr, None)
|
||||
if val is None:
|
||||
continue
|
||||
if attr in original:
|
||||
# Only join instructions if it's a list and the template expects it
|
||||
if attr == "instructions" and isinstance(val, list):
|
||||
valid[attr] = "\n".join(val)
|
||||
else:
|
||||
valid[attr] = str(val)
|
||||
else:
|
||||
unused.append(attr)
|
||||
return valid, unused
|
||||
|
||||
def _setup_signal_handlers(self):
|
||||
"""Set up signal handlers for graceful shutdown"""
|
||||
try:
|
||||
signal.signal(signal.SIGINT, self._signal_handler)
|
||||
signal.signal(signal.SIGTERM, self._signal_handler)
|
||||
except (OSError, ValueError):
|
||||
# TODO: test this bc signal handlers may not work in all environments (e.g., Windows)
|
||||
pass
|
||||
|
||||
def _signal_handler(self, signum, frame):
|
||||
"""Handle interrupt signals gracefully"""
|
||||
print(f"\nReceived signal {signum}. Shutting down gracefully...")
|
||||
self._shutdown_event.set()
|
||||
|
||||
def _validate_prompt_template(self) -> None:
|
||||
"""
|
||||
Ensures chat_history is always available, injects any declared attributes,
|
||||
and warns if the user set attributes that aren't in the template.
|
||||
"""
|
||||
if not self.prompt_template:
|
||||
return
|
||||
|
||||
# Always make chat_history available
|
||||
vars_set = set(self.prompt_template.input_variables) | {"chat_history"}
|
||||
|
||||
# Inject any attributes the template declares
|
||||
valid_attrs, unused_attrs = self._collect_template_attrs()
|
||||
vars_set |= set(valid_attrs.keys())
|
||||
self.prompt_template.input_variables = list(vars_set)
|
||||
|
||||
if unused_attrs:
|
||||
logger.warning(
|
||||
"Agent attributes set but not referenced in prompt_template: "
|
||||
f"{', '.join(unused_attrs)}. Consider adding them to input_variables."
|
||||
)
|
||||
|
||||
@property
|
||||
def tool_executor(self) -> AgentToolExecutor:
|
||||
"""Returns the client to execute and manage tools, ensuring it's accessible but read-only."""
|
||||
return self._tool_executor
|
||||
|
||||
@property
|
||||
def text_formatter(self) -> ColorTextFormatter:
|
||||
"""Returns the text formatter for the agent."""
|
||||
return self._text_formatter
|
||||
|
||||
def get_chat_history(self, task: Optional[str] = None) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Retrieves the chat history from memory as a list of dictionaries.
|
||||
|
||||
Args:
|
||||
task (Optional[str]): The task or query provided by the user (used for vector search).
|
||||
|
||||
Returns:
|
||||
List[Dict[str, Any]]: The chat history as dictionaries.
|
||||
"""
|
||||
if isinstance(self.memory, ConversationVectorMemory) and task:
|
||||
if (
|
||||
hasattr(self.memory.vector_store, "embedding_function")
|
||||
and self.memory.vector_store.embedding_function
|
||||
and hasattr(
|
||||
self.memory.vector_store.embedding_function, "embed_documents"
|
||||
)
|
||||
):
|
||||
query_embeddings = self.memory.vector_store.embedding_function.embed(
|
||||
task
|
||||
)
|
||||
messages = self.memory.get_messages(query_embeddings=query_embeddings)
|
||||
else:
|
||||
messages = self.memory.get_messages()
|
||||
else:
|
||||
messages = self.memory.get_messages()
|
||||
return messages
|
||||
|
||||
@property
|
||||
def chat_history(self) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Returns the full chat history as a list of dictionaries.
|
||||
|
||||
Returns:
|
||||
List[Dict[str, Any]]: The chat history.
|
||||
"""
|
||||
return self.get_chat_history()
|
||||
|
||||
@abstractmethod
|
||||
def run(self, input_data: Union[str, Dict[str, Any]]) -> Any:
|
||||
"""
|
||||
Executes the agent's main logic based on provided inputs.
|
||||
|
||||
Args:
|
||||
inputs (Dict[str, Any]): A dictionary with dynamic input values for task execution.
|
||||
"""
|
||||
pass
|
||||
|
||||
def prefill_agent_attributes(self) -> None:
|
||||
"""
|
||||
Pre-fill prompt_template with agent attributes if specified in `input_variables`.
|
||||
Uses _collect_template_attrs to avoid duplicate logic and ensure consistency.
|
||||
"""
|
||||
if not self.prompt_template:
|
||||
return
|
||||
|
||||
# Re-use our helper to split valid vs. unused
|
||||
valid_attrs, unused_attrs = self._collect_template_attrs()
|
||||
|
||||
if unused_attrs:
|
||||
logger.warning(
|
||||
"Agent attributes set but not used in prompt_template: "
|
||||
f"{', '.join(unused_attrs)}. Consider adding them to input_variables."
|
||||
)
|
||||
|
||||
if valid_attrs:
|
||||
self.prompt_template = self.prompt_template.pre_fill_variables(
|
||||
**valid_attrs
|
||||
)
|
||||
logger.debug(f"Pre-filled template with: {list(valid_attrs.keys())}")
|
||||
else:
|
||||
logger.debug("No prompt_template variables needed pre-filling.")
|
||||
|
||||
def construct_system_prompt(self) -> str:
|
||||
"""
|
||||
Build the system prompt for the agent using a single template string.
|
||||
- Fills in the current date.
|
||||
- Leaves placeholders for name, role, goal, and instructions as variables (instructions only if set).
|
||||
- Converts placeholders to Jinja2 syntax if requested.
|
||||
|
||||
Returns:
|
||||
str: The formatted system prompt string.
|
||||
"""
|
||||
# Only fill in the date; leave all other placeholders as variables
|
||||
instructions_placeholder = "{instructions}" if self.instructions else ""
|
||||
filled = self.DEFAULT_SYSTEM_PROMPT.format(
|
||||
date=datetime.now().strftime("%B %d, %Y"),
|
||||
name="{name}",
|
||||
role="{role}",
|
||||
goal="{goal}",
|
||||
instructions=instructions_placeholder,
|
||||
)
|
||||
|
||||
# If using Jinja2, swap braces for all placeholders
|
||||
if self.template_format == "jinja2":
|
||||
# Replace every {foo} with {{foo}}
|
||||
return re.sub(r"\{(\w+)\}", r"{{\1}}", filled)
|
||||
else:
|
||||
return filled
|
||||
|
||||
def construct_prompt_template(self) -> ChatPromptTemplate:
|
||||
"""
|
||||
Constructs a ChatPromptTemplate that includes the system prompt and a placeholder for chat history.
|
||||
Ensures that the template is flexible and adaptable to dynamically handle pre-filled variables.
|
||||
|
||||
Returns:
|
||||
ChatPromptTemplate: A formatted prompt template for the agent.
|
||||
"""
|
||||
# Construct the system prompt if not provided
|
||||
system_prompt = self.system_prompt or self.construct_system_prompt()
|
||||
|
||||
# Create the template with placeholders for system message and chat history
|
||||
return ChatPromptTemplate.from_messages(
|
||||
messages=[
|
||||
("system", system_prompt),
|
||||
MessagePlaceHolder(variable_name="chat_history"),
|
||||
],
|
||||
template_format=self.template_format,
|
||||
)
|
||||
|
||||
def construct_messages(
|
||||
self, input_data: Union[str, Dict[str, Any]]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Constructs and formats initial messages based on input type, passing chat_history as a list, without mutating self.prompt_template.
|
||||
|
||||
Args:
|
||||
input_data (Union[str, Dict[str, Any]]): User input, either as a string or dictionary.
|
||||
|
||||
Returns:
|
||||
List[Dict[str, Any]]: List of formatted messages, including the user message if input_data is a string.
|
||||
"""
|
||||
if not self.prompt_template:
|
||||
raise ValueError(
|
||||
"Prompt template must be initialized before constructing messages."
|
||||
)
|
||||
|
||||
chat_history = self.get_chat_history() # List[Dict[str, Any]]
|
||||
|
||||
if isinstance(input_data, str):
|
||||
formatted_messages = self.prompt_template.format_prompt(
|
||||
chat_history=chat_history
|
||||
)
|
||||
if isinstance(formatted_messages, list):
|
||||
user_message = {"role": "user", "content": input_data}
|
||||
return formatted_messages + [user_message]
|
||||
else:
|
||||
return [
|
||||
{"role": "system", "content": formatted_messages},
|
||||
{"role": "user", "content": input_data},
|
||||
]
|
||||
|
||||
elif isinstance(input_data, dict):
|
||||
input_vars = dict(input_data)
|
||||
if "chat_history" not in input_vars:
|
||||
input_vars["chat_history"] = chat_history
|
||||
formatted_messages = self.prompt_template.format_prompt(**input_vars)
|
||||
if isinstance(formatted_messages, list):
|
||||
return formatted_messages
|
||||
else:
|
||||
return [{"role": "system", "content": formatted_messages}]
|
||||
|
||||
else:
|
||||
raise ValueError("Input data must be either a string or dictionary.")
|
||||
|
||||
def reset_memory(self):
|
||||
"""Clears all messages stored in the agent's memory."""
|
||||
self.memory.reset_memory()
|
||||
|
||||
def get_last_message(self) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Retrieves the last message from the chat history.
|
||||
|
||||
Returns:
|
||||
Optional[Dict[str, Any]]: The last message in the history as a dictionary, or None if none exist.
|
||||
"""
|
||||
chat_history = self.get_chat_history()
|
||||
if chat_history:
|
||||
last_msg = chat_history[-1]
|
||||
if isinstance(last_msg, BaseMessage):
|
||||
return last_msg.model_dump()
|
||||
return last_msg
|
||||
return None
|
||||
|
||||
def get_last_user_message(
|
||||
self, messages: List[Dict[str, Any]]
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Retrieves the last user message in a list of messages, returning a copy with trimmed content.
|
||||
|
||||
Args:
|
||||
messages (List[Dict[str, Any]]): List of formatted messages to search.
|
||||
|
||||
Returns:
|
||||
Optional[Dict[str, Any]]: The last user message (copy) with trimmed content, or None if no user message exists.
|
||||
"""
|
||||
# Iterate in reverse to find the most recent 'user' role message
|
||||
for message in reversed(messages):
|
||||
if message.get("role") == "user":
|
||||
# Return a copy with trimmed content
|
||||
msg_copy = dict(message)
|
||||
msg_copy["content"] = msg_copy["content"].strip()
|
||||
return msg_copy
|
||||
return None
|
||||
|
||||
def get_last_message_if_user(
|
||||
self, messages: List[Dict[str, Any]]
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Returns the last message only if it is a user message; otherwise, returns None.
|
||||
|
||||
Args:
|
||||
messages (List[Dict[str, Any]]): List of formatted messages to check.
|
||||
|
||||
Returns:
|
||||
Optional[Dict[str, Any]]: The last message (copy) with trimmed content if it is a user message, else None.
|
||||
"""
|
||||
if messages and messages[-1].get("role") == "user":
|
||||
msg_copy = dict(messages[-1])
|
||||
msg_copy["content"] = msg_copy["content"].strip()
|
||||
return msg_copy
|
||||
return None
|
||||
|
||||
def get_llm_tools(self) -> List[Union[AgentTool, Dict[str, Any]]]:
|
||||
"""
|
||||
Converts tools to the format expected by LLM clients.
|
||||
|
||||
Returns:
|
||||
List[Union[AgentTool, Dict[str, Any]]]: Tools in LLM-compatible format.
|
||||
"""
|
||||
llm_tools: List[Union[AgentTool, Dict[str, Any]]] = []
|
||||
for tool in self.tools:
|
||||
if isinstance(tool, AgentTool):
|
||||
llm_tools.append(tool)
|
||||
elif callable(tool):
|
||||
try:
|
||||
agent_tool = AgentTool.from_func(tool)
|
||||
llm_tools.append(agent_tool)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to convert callable to AgentTool: {e}")
|
||||
continue
|
||||
return llm_tools
|
||||
|
||||
def pre_fill_prompt_template(self, **kwargs: Union[str, Callable[[], str]]) -> None:
|
||||
"""
|
||||
Pre-fills the prompt template with specified variables, updating input variables if applicable.
|
||||
|
||||
Args:
|
||||
**kwargs: Variables to pre-fill in the prompt template. These can be strings or callables
|
||||
that return strings.
|
||||
|
||||
Notes:
|
||||
- Existing pre-filled variables will be overwritten by matching keys in `kwargs`.
|
||||
- This method does not affect the `chat_history` which is dynamically updated.
|
||||
"""
|
||||
if not self.prompt_template:
|
||||
raise ValueError(
|
||||
"Prompt template must be initialized before pre-filling variables."
|
||||
)
|
||||
|
||||
self.prompt_template = self.prompt_template.pre_fill_variables(**kwargs)
|
||||
logger.debug(f"Pre-filled prompt template with variables: {kwargs.keys()}")
|
|
@ -0,0 +1,3 @@
|
|||
from .agent import DurableAgent
|
||||
|
||||
__all__ = ["DurableAgent"]
|
|
@ -0,0 +1,603 @@
|
|||
import json
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from dapr.ext.workflow import DaprWorkflowContext # type: ignore
|
||||
from pydantic import Field, model_validator
|
||||
|
||||
from dapr_agents.agents.base import AgentBase
|
||||
from dapr_agents.types import (
|
||||
AgentError,
|
||||
AssistantMessage,
|
||||
LLMChatResponse,
|
||||
ToolExecutionRecord,
|
||||
ToolMessage,
|
||||
UserMessage,
|
||||
)
|
||||
from dapr_agents.workflow.agentic import AgenticWorkflow
|
||||
from dapr_agents.workflow.decorators import message_router, task, workflow
|
||||
|
||||
from .schemas import (
|
||||
AgentTaskResponse,
|
||||
BroadcastMessage,
|
||||
TriggerAction,
|
||||
)
|
||||
from .state import (
|
||||
DurableAgentMessage,
|
||||
DurableAgentWorkflowEntry,
|
||||
DurableAgentWorkflowState,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# TODO(@Sicoyle): Clear up the lines between DurableAgent and AgentWorkflow
|
||||
class DurableAgent(AgenticWorkflow, AgentBase):
|
||||
"""
|
||||
A conversational AI agent that responds to user messages, engages in discussions,
|
||||
and dynamically utilizes external tools when needed.
|
||||
|
||||
The DurableAgent follows an agentic workflow, iterating on responses based on
|
||||
contextual understanding, reasoning, and tool-assisted execution. It ensures
|
||||
meaningful interactions by selecting the right tools, generating relevant responses,
|
||||
and refining outputs through iterative feedback loops.
|
||||
"""
|
||||
|
||||
agent_topic_name: Optional[str] = Field(
|
||||
default=None,
|
||||
description="The topic name dedicated to this specific agent, derived from the agent's name if not provided.",
|
||||
)
|
||||
agent_metadata: Optional[Dict[str, Any]] = Field(
|
||||
default=None,
|
||||
description="Metadata about the agent, including name, role, goal, instructions, and topic name.",
|
||||
)
|
||||
|
||||
@model_validator(mode="before")
|
||||
def set_agent_and_topic_name(cls, values: dict):
|
||||
# Set name to role if name is not provided
|
||||
if not values.get("name") and values.get("role"):
|
||||
values["name"] = values["role"]
|
||||
|
||||
# Derive agent_topic_name from agent name
|
||||
if not values.get("agent_topic_name") and values.get("name"):
|
||||
values["agent_topic_name"] = values["name"]
|
||||
|
||||
return values
|
||||
|
||||
def model_post_init(self, __context: Any) -> None:
|
||||
"""Initializes the workflow with agentic execution capabilities."""
|
||||
self.state = DurableAgentWorkflowState().model_dump()
|
||||
|
||||
# Call AgenticWorkflow's model_post_init first to initialize state store and other dependencies
|
||||
super().model_post_init(__context)
|
||||
|
||||
# Name of main Workflow
|
||||
# TODO: can this be configurable or dynamic? Would that make sense?
|
||||
self._workflow_name = "ToolCallingWorkflow"
|
||||
|
||||
# Register the agentic system
|
||||
self._agent_metadata = {
|
||||
"name": self.name,
|
||||
"role": self.role,
|
||||
"goal": self.goal,
|
||||
"instructions": self.instructions,
|
||||
"topic_name": self.agent_topic_name,
|
||||
"pubsub_name": self.message_bus_name,
|
||||
"orchestrator": False,
|
||||
}
|
||||
self.register_agentic_system()
|
||||
|
||||
async def run(self, input_data: Union[str, Dict[str, Any]]) -> Any:
|
||||
"""
|
||||
Fire up the workflow, wait for it to complete, then return the final serialized_output.
|
||||
|
||||
Args:
|
||||
input_data (Union[str, Dict[str, Any]]): The input for the workflow. Can be a string (task) or a dict.
|
||||
Returns:
|
||||
Any: The final output from the workflow execution.
|
||||
"""
|
||||
# Make sure the Dapr runtime is running
|
||||
if not self.wf_runtime_is_running:
|
||||
self.start_runtime()
|
||||
|
||||
# Prepare input payload for workflow
|
||||
if isinstance(input_data, dict):
|
||||
input_payload = input_data
|
||||
else:
|
||||
input_payload = {"task": input_data}
|
||||
|
||||
# Kick off the workflow and block until it finishes:
|
||||
return await self.run_and_monitor_workflow_async(
|
||||
workflow=self._workflow_name,
|
||||
input=input_payload,
|
||||
)
|
||||
|
||||
@message_router
|
||||
@workflow(name="ToolCallingWorkflow")
|
||||
def tool_calling_workflow(self, ctx: DaprWorkflowContext, message: TriggerAction):
|
||||
"""
|
||||
Executes a tool-calling workflow, determining the task source (either an agent or an external user).
|
||||
This uses Dapr Workflows to run the agent in a ReAct-style loop until it generates a final answer or reaches max iterations,
|
||||
calling tools as needed.
|
||||
|
||||
Args:
|
||||
ctx (DaprWorkflowContext): The workflow context for the current execution, providing state and control methods.
|
||||
message (TriggerAction): The trigger message containing the task, iteration, and metadata for workflow execution.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: The final response message when the workflow completes, or None if continuing to the next iteration.
|
||||
"""
|
||||
# Step 1: pull out task + metadata
|
||||
if isinstance(message, dict):
|
||||
task = message.get("task", None)
|
||||
source_workflow_instance_id = message.get("workflow_instance_id")
|
||||
metadata = message.get("_message_metadata", {}) or {}
|
||||
else:
|
||||
task = getattr(message, "task", None)
|
||||
source_workflow_instance_id = getattr(message, "workflow_instance_id", None)
|
||||
metadata = getattr(message, "_message_metadata", {}) or {}
|
||||
|
||||
instance_id = ctx.instance_id
|
||||
source = metadata.get("source")
|
||||
final_message: Optional[Dict[str, Any]] = None
|
||||
|
||||
if not ctx.is_replaying:
|
||||
logger.debug(f"Initial message from {source} -> {self.name}")
|
||||
|
||||
try:
|
||||
# Loop up to max_iterations
|
||||
for turn in range(1, self.max_iterations + 1):
|
||||
if not ctx.is_replaying:
|
||||
logger.info(
|
||||
f"Workflow turn {turn}/{self.max_iterations} (Instance ID: {instance_id})"
|
||||
)
|
||||
|
||||
# Step 2: On turn 1, record the initial entry
|
||||
if turn == 1:
|
||||
yield ctx.call_activity(
|
||||
self.record_initial_entry,
|
||||
input={
|
||||
"instance_id": instance_id,
|
||||
"input": task or "Triggered without input.",
|
||||
"source": source,
|
||||
"source_workflow_instance_id": source_workflow_instance_id,
|
||||
"output": "",
|
||||
},
|
||||
)
|
||||
|
||||
# Step 3: Retrieve workflow entry info for this instance
|
||||
entry_info: dict = yield ctx.call_activity(
|
||||
self.get_workflow_entry_info, input={"instance_id": instance_id}
|
||||
)
|
||||
source = entry_info.get("source")
|
||||
source_workflow_instance_id = entry_info.get(
|
||||
"source_workflow_instance_id"
|
||||
)
|
||||
|
||||
# Step 4: Generate Response with LLM
|
||||
response_message: dict = yield ctx.call_activity(
|
||||
self.generate_response,
|
||||
input={"task": task, "instance_id": instance_id},
|
||||
)
|
||||
|
||||
# Step 5: Add the assistant's response message to the chat history
|
||||
yield ctx.call_activity(
|
||||
self.append_assistant_message,
|
||||
input={"instance_id": instance_id, "message": response_message},
|
||||
)
|
||||
|
||||
# Step 6: Handle tool calls response
|
||||
tool_calls = response_message.get("tool_calls") or []
|
||||
if tool_calls:
|
||||
if not ctx.is_replaying:
|
||||
logger.info(
|
||||
f"Turn {turn}: executing {len(tool_calls)} tool call(s)"
|
||||
)
|
||||
# fan‑out parallel tool executions
|
||||
parallel = [
|
||||
ctx.call_activity(self.run_tool, input={"tool_call": tc})
|
||||
for tc in tool_calls
|
||||
]
|
||||
tool_results: List[Dict[str, Any]] = yield self.when_all(parallel)
|
||||
# Add tool results for the next iteration
|
||||
for tr in tool_results:
|
||||
yield ctx.call_activity(
|
||||
self.append_tool_message,
|
||||
input={"instance_id": instance_id, "tool_result": tr},
|
||||
)
|
||||
# 🔴 If this was the last turn, stop here—even though there were tool calls
|
||||
if turn == self.max_iterations:
|
||||
final_message = response_message
|
||||
final_message[
|
||||
"content"
|
||||
] += "\n\n⚠️ Stopped: reached max iterations."
|
||||
break
|
||||
|
||||
# Otherwise, prepare for next turn: clear task so that generate_response() uses memory/history
|
||||
task = None
|
||||
continue # bump to next turn
|
||||
|
||||
# No tool calls → this is your final answer
|
||||
final_message = response_message
|
||||
|
||||
# 🔴 If it happened to be the last turn, banner it
|
||||
if turn == self.max_iterations:
|
||||
final_message["content"] += "\n\n⚠️ Stopped: reached max iterations."
|
||||
|
||||
break # exit loop with final_message
|
||||
else:
|
||||
raise AgentError("Workflow ended without producing a final response")
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("Workflow error", exc_info=e)
|
||||
final_message = {
|
||||
"role": "assistant",
|
||||
"content": f"⚠️ Unexpected error: {e}",
|
||||
}
|
||||
|
||||
# Step 7: Broadcast the final response if a broadcast topic is set
|
||||
if self.broadcast_topic_name:
|
||||
yield ctx.call_activity(
|
||||
self.broadcast_message_to_agents,
|
||||
input={"message": final_message},
|
||||
)
|
||||
|
||||
# Respond to source agent if available
|
||||
if source and source_workflow_instance_id:
|
||||
yield ctx.call_activity(
|
||||
self.send_response_back,
|
||||
input={
|
||||
"response": final_message,
|
||||
"target_agent": source,
|
||||
"target_instance_id": source_workflow_instance_id,
|
||||
},
|
||||
)
|
||||
|
||||
# Save final output to workflow state
|
||||
yield ctx.call_activity(
|
||||
self.finalize_workflow,
|
||||
input={
|
||||
"instance_id": instance_id,
|
||||
"final_output": final_message["content"],
|
||||
},
|
||||
)
|
||||
|
||||
# Set verdict for the workflow instance
|
||||
if not ctx.is_replaying:
|
||||
verdict = (
|
||||
"max_iterations_reached" if turn == self.max_iterations else "completed"
|
||||
)
|
||||
logger.info(f"Workflow {instance_id} finalized: {verdict}")
|
||||
|
||||
# Return the final response message
|
||||
return final_message
|
||||
|
||||
@task
|
||||
def record_initial_entry(
|
||||
self,
|
||||
instance_id: str,
|
||||
input: str,
|
||||
source: Optional[str],
|
||||
source_workflow_instance_id: Optional[str],
|
||||
output: str = "",
|
||||
):
|
||||
"""
|
||||
Records the initial workflow entry for a new workflow instance.
|
||||
Args:
|
||||
instance_id (str): The workflow instance ID.
|
||||
input (str): The input task for the workflow.
|
||||
source (Optional[str]): The source of the workflow trigger.
|
||||
source_workflow_instance_id (Optional[str]): The workflow instance ID of the source.
|
||||
output (str): The output for the workflow entry (default: "").
|
||||
"""
|
||||
entry = DurableAgentWorkflowEntry(
|
||||
input=input,
|
||||
source=source,
|
||||
source_workflow_instance_id=source_workflow_instance_id,
|
||||
output=output,
|
||||
)
|
||||
self.state.setdefault("instances", {})[instance_id] = entry.model_dump(
|
||||
mode="json"
|
||||
)
|
||||
|
||||
@task
|
||||
def get_workflow_entry_info(self, instance_id: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Retrieves the 'source' and 'source_workflow_instance_id' for a given workflow instance.
|
||||
|
||||
Args:
|
||||
instance_id (str): The workflow instance ID to look up.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: Dictionary containing:
|
||||
- 'source': The source of the workflow trigger (str or None).
|
||||
- 'source_workflow_instance_id': The workflow instance ID of the source (str or None).
|
||||
|
||||
Raises:
|
||||
AgentError: If the entry is not found or invalid.
|
||||
"""
|
||||
workflow_entry = self.state.get("instances", {}).get(instance_id)
|
||||
if workflow_entry is not None:
|
||||
return {
|
||||
"source": workflow_entry.get("source"),
|
||||
"source_workflow_instance_id": workflow_entry.get(
|
||||
"source_workflow_instance_id"
|
||||
),
|
||||
}
|
||||
raise AgentError(f"No workflow entry found for instance_id={instance_id}")
|
||||
|
||||
@task
|
||||
async def generate_response(
|
||||
self, instance_id: str, task: Optional[Union[str, Dict[str, Any]]] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Ask the LLM for the assistant's next message.
|
||||
|
||||
Args:
|
||||
instance_id (str): The workflow instance ID.
|
||||
task: The user's query for this turn (either a string or a dict),
|
||||
or None if this is a follow-up iteration.
|
||||
|
||||
Returns:
|
||||
A plain dict of the LLM's response (choices, finish_reason, etc).
|
||||
Pydantic models are `.model_dump()`-ed; any other object is coerced via `dict()`.
|
||||
"""
|
||||
# Construct messages using only input_data; chat history handled internally
|
||||
messages: List[Dict[str, Any]] = self.construct_messages(task or {})
|
||||
user_message = self.get_last_message_if_user(messages)
|
||||
|
||||
# Always work with a copy of the user message for safety
|
||||
user_message_copy: Optional[Dict[str, Any]] = (
|
||||
dict(user_message) if user_message else None
|
||||
)
|
||||
|
||||
if task and user_message_copy:
|
||||
# Add the new user message to memory only if input_data is provided and user message exists
|
||||
user_msg = UserMessage(content=user_message_copy.get("content", ""))
|
||||
self.memory.add_message(user_msg)
|
||||
# Define DurableAgentMessage object for state persistence
|
||||
msg_object = DurableAgentMessage(**user_message_copy)
|
||||
inst: dict = self.state["instances"][instance_id]
|
||||
inst.setdefault("messages", []).append(msg_object.model_dump(mode="json"))
|
||||
inst["last_message"] = msg_object.model_dump(mode="json")
|
||||
self.state.setdefault("chat_history", []).append(
|
||||
msg_object.model_dump(mode="json")
|
||||
)
|
||||
# Save the state after appending the user message
|
||||
self.save_state()
|
||||
|
||||
# Always print the last user message for context, even if no input_data is provided
|
||||
if user_message_copy is not None:
|
||||
# Ensure keys are str for mypy
|
||||
self.text_formatter.print_message(
|
||||
{str(k): v for k, v in user_message_copy.items()}
|
||||
)
|
||||
|
||||
# Generate response using the LLM
|
||||
try:
|
||||
response: LLMChatResponse = self.llm.generate(
|
||||
messages=messages,
|
||||
tools=self.get_llm_tools(),
|
||||
tool_choice=self.tool_choice,
|
||||
)
|
||||
# Get the first candidate from the response
|
||||
response_message = response.get_message()
|
||||
# Check if the response contains an assistant message
|
||||
if response_message is None:
|
||||
raise AgentError("LLM returned no assistant message")
|
||||
# Convert the response message to a dict to work with JSON serialization
|
||||
assistant_message = response_message.model_dump()
|
||||
return assistant_message
|
||||
except Exception as e:
|
||||
logger.error(f"Error during chat generation: {e}")
|
||||
raise AgentError(f"Failed during chat generation: {e}") from e
|
||||
|
||||
@task
|
||||
async def run_tool(self, tool_call: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Executes a tool call by invoking the specified function with the provided arguments.
|
||||
|
||||
Args:
|
||||
tool_call (Dict[str, Any]): A dictionary containing tool execution details, including the function name and arguments.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: A dictionary containing the tool call ID, function name, function arguments
|
||||
|
||||
Raises:
|
||||
AgentError: If the tool call is malformed or execution fails.
|
||||
"""
|
||||
# Extract function name and raw args
|
||||
fn_name = tool_call["function"]["name"]
|
||||
raw_args = tool_call["function"].get("arguments", "")
|
||||
|
||||
# Parse JSON arguments (or empty dict)
|
||||
try:
|
||||
args = json.loads(raw_args) if raw_args else {}
|
||||
except json.JSONDecodeError as e:
|
||||
raise AgentError(f"Invalid JSON in tool args: {e}")
|
||||
|
||||
# Run the tool
|
||||
logger.info(f"Executing tool '{fn_name}' with args: {args}")
|
||||
try:
|
||||
result = await self.tool_executor.run_tool(fn_name, **args)
|
||||
except Exception as e:
|
||||
logger.error(f"Error executing tool '{fn_name}': {e}", exc_info=True)
|
||||
raise AgentError(f"Error executing tool '{fn_name}': {e}") from e
|
||||
|
||||
# Return the plain payload for later persistence
|
||||
return {
|
||||
"tool_call_id": tool_call["id"],
|
||||
"tool_name": fn_name,
|
||||
"tool_args": args,
|
||||
"execution_result": str(result) if result is not None else "",
|
||||
}
|
||||
|
||||
@task
|
||||
async def broadcast_message_to_agents(self, message: Dict[str, Any]):
|
||||
"""
|
||||
Broadcasts it to all registered agents.
|
||||
|
||||
Args:
|
||||
message (Dict[str, Any]): A message to append to the workflow state and broadcast to all agents.
|
||||
"""
|
||||
# Format message for broadcasting
|
||||
message["role"] = "user"
|
||||
message["name"] = self.name
|
||||
response_message = BroadcastMessage(**message)
|
||||
|
||||
# Broadcast message to all agents
|
||||
await self.broadcast_message(message=response_message)
|
||||
|
||||
@task
|
||||
async def send_response_back(
|
||||
self, response: Dict[str, Any], target_agent: str, target_instance_id: str
|
||||
):
|
||||
"""
|
||||
Sends a task response back to a target agent within a workflow.
|
||||
|
||||
Args:
|
||||
response (Dict[str, Any]): The response payload to be sent.
|
||||
target_agent (str): The name of the agent that should receive the response.
|
||||
target_instance_id (str): The workflow instance ID associated with the response.
|
||||
|
||||
Raises:
|
||||
ValidationError: If the response does not match the expected structure for `AgentTaskResponse`.
|
||||
"""
|
||||
# Format Response
|
||||
response["role"] = "user"
|
||||
response["name"] = self.name
|
||||
response["workflow_instance_id"] = target_instance_id
|
||||
agent_response = AgentTaskResponse(**response)
|
||||
|
||||
# Send the message to the target agent
|
||||
await self.send_message_to_agent(name=target_agent, message=agent_response)
|
||||
|
||||
@task
|
||||
def append_assistant_message(
|
||||
self, instance_id: str, message: Dict[str, Any]
|
||||
) -> None:
|
||||
"""
|
||||
Append an assistant message into the workflow state.
|
||||
|
||||
Args:
|
||||
instance_id (str): The workflow instance ID.
|
||||
message (Dict[str, Any]): The assistant message to append.
|
||||
"""
|
||||
message["name"] = self.name
|
||||
# Convert the message to a DurableAgentMessage object
|
||||
msg_object = DurableAgentMessage(**message)
|
||||
# Defensive: check self.state is not None
|
||||
inst: dict = self.state["instances"][instance_id]
|
||||
inst.setdefault("messages", []).append(msg_object.model_dump(mode="json"))
|
||||
inst["last_message"] = msg_object.model_dump(mode="json")
|
||||
self.state.setdefault("chat_history", []).append(
|
||||
msg_object.model_dump(mode="json")
|
||||
)
|
||||
# Add the assistant message to the tool history
|
||||
self.memory.add_message(AssistantMessage(**message))
|
||||
# Save the state after appending the assistant message
|
||||
self.save_state()
|
||||
# Print the assistant message
|
||||
self.text_formatter.print_message(message)
|
||||
|
||||
@task
|
||||
def append_tool_message(
|
||||
self, instance_id: str, tool_result: Dict[str, Any]
|
||||
) -> None:
|
||||
"""
|
||||
Append a tool-execution record to both the per-instance history and the agent's tool_history.
|
||||
"""
|
||||
# Define a ToolMessage object from the tool result
|
||||
tool_message = ToolMessage(
|
||||
tool_call_id=tool_result["tool_call_id"],
|
||||
name=tool_result["tool_name"],
|
||||
content=tool_result["execution_result"],
|
||||
)
|
||||
# Define DurableAgentMessage object for state persistence
|
||||
msg_object = DurableAgentMessage(**tool_message.model_dump())
|
||||
# Define a ToolExecutionRecord object
|
||||
# to store the tool execution details in the workflow state
|
||||
tool_history_entry = ToolExecutionRecord(**tool_result)
|
||||
# Defensive: check self.state is not None
|
||||
inst: dict = self.state["instances"][instance_id]
|
||||
inst.setdefault("messages", []).append(msg_object.model_dump(mode="json"))
|
||||
inst.setdefault("tool_history", []).append(
|
||||
tool_history_entry.model_dump(mode="json")
|
||||
)
|
||||
self.state.setdefault("chat_history", []).append(
|
||||
msg_object.model_dump(mode="json")
|
||||
)
|
||||
# Update tool history and memory of agent
|
||||
self.tool_history.append(tool_history_entry)
|
||||
# Add the tool message to the agent's memory
|
||||
self.memory.add_message(tool_message)
|
||||
# Save the state after appending the tool message
|
||||
self.save_state()
|
||||
# Print the tool message
|
||||
self.text_formatter.print_message(tool_message)
|
||||
|
||||
@task
|
||||
def finalize_workflow(self, instance_id: str, final_output: str) -> None:
|
||||
"""
|
||||
Record the final output and end_time in the workflow state.
|
||||
"""
|
||||
end_time = datetime.now(timezone.utc)
|
||||
end_time_str = end_time.isoformat()
|
||||
inst: dict = self.state["instances"][instance_id]
|
||||
inst["output"] = final_output
|
||||
inst["end_time"] = end_time_str
|
||||
self.save_state()
|
||||
|
||||
@message_router(broadcast=True)
|
||||
async def process_broadcast_message(self, message: BroadcastMessage):
|
||||
"""
|
||||
Processes a broadcast message, filtering out messages sent by the same agent
|
||||
and updating local memory with valid messages.
|
||||
|
||||
Args:
|
||||
message (BroadcastMessage): The received broadcast message.
|
||||
|
||||
Returns:
|
||||
None: The function updates the agent's memory and ignores unwanted messages.
|
||||
"""
|
||||
try:
|
||||
# Extract metadata safely from message["_message_metadata"]
|
||||
metadata = getattr(message, "_message_metadata", {})
|
||||
|
||||
if not isinstance(metadata, dict) or not metadata:
|
||||
logger.warning(
|
||||
f"{self.name} received a broadcast message with missing or invalid metadata. Ignoring."
|
||||
)
|
||||
return
|
||||
|
||||
source = metadata.get("source", "unknown_source")
|
||||
message_type = metadata.get("type", "unknown_type")
|
||||
message_content = getattr(message, "content", "No Data")
|
||||
logger.info(
|
||||
f"{self.name} received broadcast message of type '{message_type}' from '{source}'."
|
||||
)
|
||||
# Ignore messages sent by this agent
|
||||
if source == self.name:
|
||||
logger.info(
|
||||
f"{self.name} ignored its own broadcast message of type '{message_type}'."
|
||||
)
|
||||
return
|
||||
# Log and process the valid broadcast message
|
||||
logger.debug(
|
||||
f"{self.name} processing broadcast message from '{source}'. Content: {message_content}"
|
||||
)
|
||||
# Store the message in local memory
|
||||
self.memory.add_message(message)
|
||||
|
||||
# Define DurableAgentMessage object for state persistence
|
||||
msg_object = DurableAgentMessage(**message.model_dump())
|
||||
|
||||
# Persist to global chat history
|
||||
self.state.setdefault("chat_history", [])
|
||||
self.state["chat_history"].append(msg_object.model_dump(mode="json"))
|
||||
# Save the state after processing the broadcast message
|
||||
self.save_state()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing broadcast message: {e}", exc_info=True)
|
|
@ -2,21 +2,32 @@ from dapr_agents.types.message import BaseMessage
|
|||
from pydantic import BaseModel, Field
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class BroadcastMessage(BaseMessage):
|
||||
"""
|
||||
Represents a broadcast message from an agent.
|
||||
"""
|
||||
|
||||
|
||||
class AgentTaskResponse(BaseMessage):
|
||||
"""
|
||||
Represents a response message from an agent after completing a task.
|
||||
"""
|
||||
workflow_instance_id: Optional[str] = Field(default=None, description="Dapr workflow instance id from source if available")
|
||||
|
||||
workflow_instance_id: Optional[str] = Field(
|
||||
default=None, description="Dapr workflow instance id from source if available"
|
||||
)
|
||||
|
||||
|
||||
class TriggerAction(BaseModel):
|
||||
"""
|
||||
Represents a message used to trigger an agent's activity within the workflow.
|
||||
"""
|
||||
task: Optional[str] = Field(None, description="The specific task to execute. If not provided, the agent will act based on its memory or predefined behavior.")
|
||||
iteration: Optional[int] = Field(0, description="")
|
||||
workflow_instance_id: Optional[str] = Field(default=None, description="Dapr workflow instance id from source if available")
|
||||
|
||||
task: Optional[str] = Field(
|
||||
None,
|
||||
description="The specific task to execute. If not provided, the agent will act based on its memory or predefined behavior.",
|
||||
)
|
||||
workflow_instance_id: Optional[str] = Field(
|
||||
default=None, description="Dapr workflow instance id from source if available"
|
||||
)
|
|
@ -0,0 +1,63 @@
|
|||
from pydantic import BaseModel, Field
|
||||
from typing import List, Optional, Dict
|
||||
from dapr_agents.types import MessageContent, ToolExecutionRecord
|
||||
from datetime import datetime
|
||||
import uuid
|
||||
|
||||
|
||||
class DurableAgentMessage(MessageContent):
|
||||
id: str = Field(
|
||||
default_factory=lambda: str(uuid.uuid4()),
|
||||
description="Unique identifier for the message",
|
||||
)
|
||||
timestamp: datetime = Field(
|
||||
default_factory=datetime.now,
|
||||
description="Timestamp when the message was created",
|
||||
)
|
||||
|
||||
|
||||
class DurableAgentWorkflowEntry(BaseModel):
|
||||
"""Represents a workflow and its associated data, including metadata on the source of the task request."""
|
||||
|
||||
input: str = Field(
|
||||
..., description="The input or description of the Workflow to be performed"
|
||||
)
|
||||
output: Optional[str] = Field(
|
||||
default=None, description="The output or result of the Workflow, if completed"
|
||||
)
|
||||
start_time: datetime = Field(
|
||||
default_factory=datetime.now,
|
||||
description="Timestamp when the workflow was started",
|
||||
)
|
||||
end_time: Optional[datetime] = Field(
|
||||
default_factory=datetime.now,
|
||||
description="Timestamp when the workflow was completed or failed",
|
||||
)
|
||||
messages: List[DurableAgentMessage] = Field(
|
||||
default_factory=list,
|
||||
description="Messages exchanged during the workflow (user, assistant, or tool messages).",
|
||||
)
|
||||
last_message: Optional[DurableAgentMessage] = Field(
|
||||
default=None, description="Last processed message in the workflow"
|
||||
)
|
||||
tool_history: List[ToolExecutionRecord] = Field(
|
||||
default_factory=list, description="Tool message exchanged during the workflow"
|
||||
)
|
||||
source: Optional[str] = Field(None, description="Entity that initiated the task.")
|
||||
source_workflow_instance_id: Optional[str] = Field(
|
||||
default=None,
|
||||
description="The workflow instance ID associated with the original request.",
|
||||
)
|
||||
|
||||
|
||||
class DurableAgentWorkflowState(BaseModel):
|
||||
"""Represents the state of multiple Agent workflows."""
|
||||
|
||||
instances: Dict[str, DurableAgentWorkflowEntry] = Field(
|
||||
default_factory=dict,
|
||||
description="Workflow entries indexed by their instance_id.",
|
||||
)
|
||||
chat_history: List[DurableAgentMessage] = Field(
|
||||
default_factory=list,
|
||||
description="Chat history of messages exchanged during the workflow.",
|
||||
)
|
|
@ -0,0 +1,3 @@
|
|||
from .otel import DaprAgentsOtel
|
||||
|
||||
__all__ = ["DaprAgentsOtel"]
|
|
@ -0,0 +1,144 @@
|
|||
from logging import Logger
|
||||
from typing import Union
|
||||
|
||||
from opentelemetry._logs import set_logger_provider
|
||||
from opentelemetry.metrics import set_meter_provider
|
||||
from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
|
||||
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
|
||||
from opentelemetry.sdk.metrics import MeterProvider
|
||||
from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
|
||||
from opentelemetry.sdk.resources import Resource, SERVICE_NAME
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
||||
from opentelemetry.trace import set_tracer_provider
|
||||
from opentelemetry.exporter.otlp.proto.http._log_exporter import OTLPLogExporter
|
||||
from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
|
||||
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
|
||||
|
||||
|
||||
class DaprAgentsOtel:
|
||||
"""
|
||||
OpenTelemetry configuration for Dapr agents.
|
||||
"""
|
||||
|
||||
def __init__(self, service_name: str = "", otlp_endpoint: str = ""):
|
||||
# Configure OpenTelemetry
|
||||
self.service_name = service_name
|
||||
self.otlp_endpoint = otlp_endpoint
|
||||
|
||||
self.setup_resources()
|
||||
|
||||
def setup_resources(self):
|
||||
"""
|
||||
Set up the resource for OpenTelemetry.
|
||||
"""
|
||||
|
||||
self._resource = Resource.create(
|
||||
attributes={
|
||||
SERVICE_NAME: str(self.service_name),
|
||||
}
|
||||
)
|
||||
|
||||
def create_and_instrument_meter_provider(
|
||||
self,
|
||||
otlp_endpoint: str = "",
|
||||
) -> MeterProvider:
|
||||
"""
|
||||
Returns a `MeterProvider` that is configured to export metrics using the `PeriodicExportingMetricReader`
|
||||
which means that metrics are exported periodically in the background. The interval can be set by
|
||||
the environment variable `OTEL_METRIC_EXPORT_INTERVAL`. The default value is 60000ms (1 minute).
|
||||
|
||||
Also sets the global OpenTelemetry meter provider to the returned meter provider.
|
||||
"""
|
||||
|
||||
# Ensure the endpoint is set correctly
|
||||
endpoint = self._endpoint_validator(
|
||||
endpoint=self.otlp_endpoint if otlp_endpoint == "" else otlp_endpoint,
|
||||
telemetry_type="metrics",
|
||||
)
|
||||
|
||||
metric_exporter = OTLPMetricExporter(endpoint=str(endpoint))
|
||||
metric_reader = PeriodicExportingMetricReader(metric_exporter)
|
||||
meter_provider = MeterProvider(
|
||||
resource=self._resource, metric_readers=[metric_reader]
|
||||
)
|
||||
set_meter_provider(meter_provider)
|
||||
return meter_provider
|
||||
|
||||
def create_and_instrument_tracer_provider(
|
||||
self,
|
||||
otlp_endpoint: str = "",
|
||||
) -> TracerProvider:
|
||||
"""
|
||||
Returns a `TracerProvider` that is configured to export traces using the `BatchSpanProcessor`
|
||||
which means that traces are exported in batches. The batch size can be set by
|
||||
the environment variable `OTEL_TRACES_EXPORT_BATCH_SIZE`. The default value is 512.
|
||||
Also sets the global OpenTelemetry tracer provider to the returned tracer provider.
|
||||
"""
|
||||
|
||||
# Ensure the endpoint is set correctly
|
||||
endpoint = self._endpoint_validator(
|
||||
endpoint=self.otlp_endpoint if otlp_endpoint == "" else otlp_endpoint,
|
||||
telemetry_type="traces",
|
||||
)
|
||||
|
||||
trace_exporter = OTLPSpanExporter(endpoint=str(endpoint))
|
||||
tracer_processor = BatchSpanProcessor(trace_exporter)
|
||||
tracer_provider = TracerProvider(resource=self._resource)
|
||||
tracer_provider.add_span_processor(tracer_processor)
|
||||
set_tracer_provider(tracer_provider)
|
||||
return tracer_provider
|
||||
|
||||
def create_and_instrument_logging_provider(
|
||||
self,
|
||||
logger: Logger,
|
||||
otlp_endpoint: str = "",
|
||||
) -> LoggerProvider:
|
||||
"""
|
||||
Returns a `LoggingProvider` that is configured to export logs using the `BatchLogProcessor`
|
||||
which means that logs are exported in batches. The batch size can be set by
|
||||
the environment variable `OTEL_LOGS_EXPORT_BATCH_SIZE`. The default value is 512.
|
||||
Also sets the global OpenTelemetry logging provider to the returned logging provider.
|
||||
"""
|
||||
|
||||
# Ensure the endpoint is set correctly
|
||||
endpoint = self._endpoint_validator(
|
||||
endpoint=self.otlp_endpoint if otlp_endpoint == "" else otlp_endpoint,
|
||||
telemetry_type="logs",
|
||||
)
|
||||
|
||||
log_exporter = OTLPLogExporter(endpoint=str(endpoint))
|
||||
logging_provider = LoggerProvider(resource=self._resource)
|
||||
logging_provider.add_log_record_processor(BatchLogRecordProcessor(log_exporter))
|
||||
set_logger_provider(logging_provider)
|
||||
|
||||
handler = LoggingHandler(logger_provider=logging_provider)
|
||||
logger.addHandler(handler)
|
||||
return logging_provider
|
||||
|
||||
def _endpoint_validator(
|
||||
self,
|
||||
endpoint: str,
|
||||
telemetry_type: str,
|
||||
) -> Union[str | Exception]:
|
||||
"""
|
||||
Validates the endpoint and method.
|
||||
"""
|
||||
|
||||
if endpoint == "":
|
||||
raise ValueError(
|
||||
"OTLP endpoint must be set either in the environment variable OTEL_EXPORTER_OTLP_ENDPOINT or in the constructor."
|
||||
)
|
||||
if endpoint.startswith("https://"):
|
||||
raise NotImplementedError(
|
||||
"OTLP over HTTPS is not supported. Please use HTTP."
|
||||
)
|
||||
|
||||
endpoint = (
|
||||
endpoint
|
||||
if endpoint.endswith(f"/v1/{telemetry_type}")
|
||||
else f"{endpoint}/v1/{telemetry_type}"
|
||||
)
|
||||
endpoint = endpoint if endpoint.startswith("http://") else f"http://{endpoint}"
|
||||
|
||||
return endpoint
|
|
@ -1,7 +1,8 @@
|
|||
import requests
|
||||
import os
|
||||
|
||||
def construct_auth_headers(auth_url, grant_type='client_credentials', **kwargs):
|
||||
|
||||
def construct_auth_headers(auth_url, grant_type="client_credentials", **kwargs):
|
||||
"""
|
||||
Construct authorization headers for API requests.
|
||||
|
||||
|
@ -14,15 +15,19 @@ def construct_auth_headers(auth_url, grant_type='client_credentials', **kwargs):
|
|||
|
||||
# Define default parameters based on the grant_type
|
||||
data = {
|
||||
'grant_type': grant_type,
|
||||
"grant_type": grant_type,
|
||||
}
|
||||
|
||||
# Defaults for client_credentials grant type
|
||||
if grant_type == 'client_credentials':
|
||||
data.update({
|
||||
'client_id': kwargs.get('client_id', os.getenv('CLIENT_ID')),
|
||||
'client_secret': kwargs.get('client_secret', os.getenv('CLIENT_SECRET')),
|
||||
})
|
||||
if grant_type == "client_credentials":
|
||||
data.update(
|
||||
{
|
||||
"client_id": kwargs.get("client_id", os.getenv("CLIENT_ID")),
|
||||
"client_secret": kwargs.get(
|
||||
"client_secret", os.getenv("CLIENT_SECRET")
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
# Add any additional data passed in kwargs
|
||||
data.update(kwargs)
|
||||
|
@ -37,7 +42,7 @@ def construct_auth_headers(auth_url, grant_type='client_credentials', **kwargs):
|
|||
auth_response_data = auth_response.json()
|
||||
|
||||
# Extract the access token
|
||||
access_token = auth_response_data.get('access_token')
|
||||
access_token = auth_response_data.get("access_token")
|
||||
|
||||
if not access_token:
|
||||
raise ValueError("No access token found in the response")
|
|
@ -2,6 +2,7 @@ from dapr_agents.types import BaseMessage
|
|||
from typing import List
|
||||
from pydantic import ValidationError
|
||||
|
||||
|
||||
def messages_to_string(messages: List[BaseMessage]) -> str:
|
||||
"""
|
||||
Converts messages into a single string with roles and content.
|
|
@ -1,17 +1,18 @@
|
|||
from dapr_agents.types.message import BaseMessage
|
||||
from typing import Optional, Any, Union, Dict
|
||||
from typing import Optional, Any, Union, Dict, Sequence
|
||||
from colorama import Style
|
||||
|
||||
# Define your custom colors as a dictionary
|
||||
COLORS = {
|
||||
"dapr_agents_teal": '\033[38;2;147;191;183m',
|
||||
"dapr_agents_mustard": '\033[38;2;242;182;128m',
|
||||
"dapr_agents_red": '\033[38;2;217;95;118m',
|
||||
"dapr_agents_pink": '\033[38;2;191;69;126m',
|
||||
"dapr_agents_purple": '\033[38;2;146;94;130m',
|
||||
"reset": Style.RESET_ALL
|
||||
"dapr_agents_teal": "\033[38;2;147;191;183m",
|
||||
"dapr_agents_mustard": "\033[38;2;242;182;128m",
|
||||
"dapr_agents_red": "\033[38;2;217;95;118m",
|
||||
"dapr_agents_pink": "\033[38;2;191;69;126m",
|
||||
"dapr_agents_purple": "\033[38;2;146;94;130m",
|
||||
"reset": Style.RESET_ALL,
|
||||
}
|
||||
|
||||
|
||||
class ColorTextFormatter:
|
||||
"""
|
||||
A flexible text formatter class to print colored text dynamically.
|
||||
|
@ -25,7 +26,7 @@ class ColorTextFormatter:
|
|||
Args:
|
||||
default_color (Optional[str]): Default color to use for text. Defaults to reset.
|
||||
"""
|
||||
self.default_color = COLORS.get(default_color, COLORS["reset"])
|
||||
self.default_color = COLORS.get(default_color or "reset", COLORS["reset"])
|
||||
|
||||
def format_text(self, text: str, color: Optional[str] = None) -> str:
|
||||
"""
|
||||
|
@ -38,25 +39,27 @@ class ColorTextFormatter:
|
|||
Returns:
|
||||
str: Colored text.
|
||||
"""
|
||||
color_code = COLORS.get(color, self.default_color)
|
||||
color_code = COLORS.get(color or "reset", self.default_color)
|
||||
return f"{color_code}{text}{COLORS['reset']}"
|
||||
|
||||
def print_colored_text(self, text_blocks: list[tuple[str, Optional[str]]]):
|
||||
def print_colored_text(self, text_blocks: Sequence[tuple[str, Optional[str]]]):
|
||||
"""
|
||||
Print multiple blocks of text in specified colors dynamically, ensuring that newlines
|
||||
are handled correctly.
|
||||
|
||||
Args:
|
||||
text_blocks (list[tuple[str, Optional[str]]]): A list of text and color name pairs.
|
||||
text_blocks (Sequence[tuple[str, Optional[str]]]): A list of text and color name pairs.
|
||||
"""
|
||||
for text, color in text_blocks:
|
||||
# Split the text by \n to handle each line separately
|
||||
lines = text.split("\n")
|
||||
for i, line in enumerate(lines):
|
||||
formatted_line = self.format_text(line, color)
|
||||
print(formatted_line, end="\n" if i < len(lines) - 1 else "")
|
||||
print(
|
||||
formatted_line, flush=True, end="\n" if i < len(lines) - 1 else ""
|
||||
)
|
||||
|
||||
print(COLORS['reset']) # Ensure terminal color is reset at the end
|
||||
print(COLORS["reset"]) # Ensure terminal color is reset at the end
|
||||
|
||||
def print_separator(self):
|
||||
"""
|
||||
|
@ -65,7 +68,11 @@ class ColorTextFormatter:
|
|||
separator = "-" * 80
|
||||
self.print_colored_text([(f"\n{separator}\n", "reset")])
|
||||
|
||||
def print_message(self, message: Union[BaseMessage, Dict[str, Any]], include_separator: bool = True):
|
||||
def print_message(
|
||||
self,
|
||||
message: Union[BaseMessage, Dict[str, Any]],
|
||||
include_separator: bool = True,
|
||||
):
|
||||
"""
|
||||
Prints messages with colored formatting based on the role and message content.
|
||||
|
||||
|
@ -91,7 +98,7 @@ class ColorTextFormatter:
|
|||
"user": "dapr_agents_mustard",
|
||||
"assistant": "dapr_agents_teal",
|
||||
"tool_calls": "dapr_agents_red",
|
||||
"tool": "dapr_agents_pink"
|
||||
"tool": "dapr_agents_pink",
|
||||
}
|
||||
|
||||
# Handle tool calls
|
||||
|
@ -103,7 +110,10 @@ class ColorTextFormatter:
|
|||
tool_id = tool_call["id"]
|
||||
tool_call_text = [
|
||||
(f"{formatted_role}:\n", color_map["tool_calls"]),
|
||||
(f"Function name: {function_name} (Call Id: {tool_id})\n", color_map["tool_calls"]),
|
||||
(
|
||||
f"Function name: {function_name} (Call Id: {tool_id})\n",
|
||||
color_map["tool_calls"],
|
||||
),
|
||||
(f"Arguments: {arguments}", color_map["tool_calls"]),
|
||||
]
|
||||
self.print_colored_text(tool_call_text)
|
||||
|
@ -142,7 +152,7 @@ class ColorTextFormatter:
|
|||
color_map = {
|
||||
"Thought": "dapr_agents_red",
|
||||
"Action": "dapr_agents_pink",
|
||||
"Observation": "dapr_agents_purple"
|
||||
"Observation": "dapr_agents_purple",
|
||||
}
|
||||
|
||||
# Get the color for the part type, defaulting to reset if not found
|
|
@ -1,4 +1,14 @@
|
|||
from .embedder import NVIDIAEmbedder, OpenAIEmbedder, SentenceTransformerEmbedder
|
||||
from .fetcher import ArxivFetcher
|
||||
from .reader import PyMuPDFReader, PyPDFReader
|
||||
from .splitter import TextSplitter
|
||||
from .embedder import OpenAIEmbedder, SentenceTransformerEmbedder, NVIDIAEmbedder
|
||||
|
||||
__all__ = [
|
||||
"ArxivFetcher",
|
||||
"PyMuPDFReader",
|
||||
"PyPDFReader",
|
||||
"TextSplitter",
|
||||
"OpenAIEmbedder",
|
||||
"SentenceTransformerEmbedder",
|
||||
"NVIDIAEmbedder",
|
||||
]
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
from .nvidia import NVIDIAEmbedder
|
||||
from .openai import OpenAIEmbedder
|
||||
from .sentence import SentenceTransformerEmbedder
|
||||
from .nvidia import NVIDIAEmbedder
|
||||
|
||||
__all__ = ["OpenAIEmbedder", "SentenceTransformerEmbedder", "NVIDIAEmbedder"]
|
||||
|
|
|
@ -2,6 +2,7 @@ from abc import ABC, abstractmethod
|
|||
from pydantic import BaseModel
|
||||
from typing import List, Any
|
||||
|
||||
|
||||
class EmbedderBase(BaseModel, ABC):
|
||||
"""
|
||||
Abstract base class for Embedders.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue