Compare commits

..

No commits in common. "main" and "v0.5.0" have entirely different histories.
main ... v0.5.0

469 changed files with 20031 additions and 22531 deletions

View File

@ -1,48 +0,0 @@
---
name: New Content Needed
about: Template for requesting new documentation content
title: "[Content] "
labels: content/missing-information
assignees: ''
---
## Related Issue
<!-- Link to the original issue that triggered this content request -->
Related to: #<issue_number>
## Content Type
<!-- What type of content is needed? -->
- [ ] New feature documentation
- [ ] API reference
- [ ] How-to guide
- [ ] Tutorial
- [ ] Conceptual documentation
- [ ] Other (please specify)
## Target Audience
<!-- Who is this content for? -->
- [ ] Developers
- [ ] Operators
- [ ] Architects
- [ ] End users
- [ ] Other (please specify)
## Content Description
<!-- Provide a clear description of what content is needed -->
<!-- What should the documentation cover? What are the key points to include? -->
## Additional Context
<!-- Add any additional context about the content request here -->
<!-- Include any specific requirements, examples, or references -->
## Acceptance Criteria
<!-- What should be included in the documentation to consider it complete? -->
- [ ]
- [ ]
- [ ]
## Resources
<!-- Add any relevant resources, links, or references that might help with creating the content -->
## Notes
<!-- Any additional notes or comments -->

View File

@ -1,179 +0,0 @@
// List of owner who can control dapr-bot workflow
// IMPORTANT: Make sure usernames are lower-cased
const owners = [
'yaron2',
'cyb3rward0g'
]
const docsIssueBodyTpl = (
issueNumber
) => `This issue was automatically created by \
[Dapr Bot](https://github.com/dapr/dapr-agents/blob/master/.github/workflows/dapr-bot.yml) because a \"docs-needed\" label \
was added to dapr/dapr#${issueNumber}. \n\n\
TODO: Add more details as per [this template](.github/ISSUE_TEMPLATE/new-content-needed.md).`
module.exports = async ({ github, context }) => {
if (
context.eventName == 'issue_comment' &&
context.payload.action == 'created'
) {
await handleIssueCommentCreate({ github, context })
} else if (
context.eventName == 'issues' &&
context.payload.action == 'labeled'
) {
await handleIssueLabeled({ github, context })
} else {
console.log(`[main] event ${context.eventName} not supported, exiting.`)
}
}
/**
* Handle issue comment create event.
*/
async function handleIssueCommentCreate({ github, context }) {
const payload = context.payload
const issue = context.issue
const username = context.actor.toLowerCase()
const isFromPulls = !!payload.issue.pull_request
const commentBody = ((payload.comment.body || '') + '').trim()
console.log(` Issue(owner/repo/number): ${issue.owner}/${issue.repo}/${issue.number}
Actor(current username / id): ${username} / ${payload.comment.user.id}
CommentID: ${payload.comment.id}
CreatedAt: ${payload.comment.created_at}`
)
if (!commentBody || !commentBody.startsWith('/')) {
// Not a command
return
}
const commandParts = commentBody.split(/\s+/)
const command = commandParts.shift()
console.log(` Command: ${command}`)
// Commands that can be executed by anyone.
if (command == '/assign') {
await cmdAssign(github, issue, username, isFromPulls)
return
}
// Commands that can only be executed by owners.
if (!owners.includes(username)) {
console.log(
`[handleIssueCommentCreate] user ${username} is not an owner, exiting.`
)
await commentUserNotAllowed(github, issue, username)
return
}
switch (command) {
case '/make-me-laugh':
await cmdMakeMeLaugh(github, issue)
break
// TODO: add more in future. Ref: https://github.com/dapr/dapr/blob/master/.github/scripts/dapr_bot.js#L99
default:
console.log(
`[handleIssueCommentCreate] command ${command} not found, exiting.`
)
break
}
}
/**
* Handle issue labeled event.
*/
async function handleIssueLabeled({ github, context }) {
const payload = context.payload
const label = payload.label.name
const issueNumber = payload.issue.number
// This should not run in forks.
if (context.repo.owner !== 'dapr') {
console.log('[handleIssueLabeled] not running in dapr repo, exiting.')
return
}
// Authorization is not required here because it's triggered by an issue label event.
// Only authorized users can add labels to issues.
if (label == 'docs-needed') {
// Open a new issue
await github.rest.issues.create({
owner: 'dapr',
repo: 'docs',
title: `New content needed for dapr/dapr#${issueNumber}`,
labels: ['content/missing-information', 'created-by/dapr-bot'],
body: docsIssueBodyTpl(issueNumber),
})
} else {
console.log(
`[handleIssueLabeled] label ${label} not supported, exiting.`
)
}
}
/**
* Assign the issue to the user who commented.
* @param {*} github GitHub object reference
* @param {*} issue GitHub issue object
* @param {string} username GitHub user who commented
* @param {boolean} isFromPulls is the workflow triggered by a pull request?
*/
async function cmdAssign(github, issue, username, isFromPulls) {
if (isFromPulls) {
console.log(
'[cmdAssign] pull requests unsupported, skipping command execution.'
)
return
} else if (issue.assignees && issue.assignees.length !== 0) {
console.log(
'[cmdAssign] issue already has assignees, skipping command execution.'
)
return
}
await github.rest.issues.addAssignees({
owner: issue.owner,
repo: issue.repo,
issue_number: issue.number,
assignees: [username],
})
}
/**
* Comment a funny joke.
* @param {*} github GitHub object reference
* @param {*} issue GitHub issue object
*/
async function cmdMakeMeLaugh(github, issue) {
const result = await github.request(
'https://official-joke-api.appspot.com/random_joke'
)
jokedata = result.data
joke = 'I have a bad feeling about this.'
if (jokedata && jokedata.setup && jokedata.punchline) {
joke = `${jokedata.setup} - ${jokedata.punchline}`
}
await github.rest.issues.createComment({
owner: issue.owner,
repo: issue.repo,
issue_number: issue.number,
body: joke,
})
}
/**
* Sends a comment when the user who tried triggering the bot action is not allowed to do so.
* @param {*} github GitHub object reference
* @param {*} issue GitHub issue object
* @param {string} username GitHub user who commented
*/
async function commentUserNotAllowed(github, issue, username) {
await github.rest.issues.createComment({
owner: issue.owner,
repo: issue.repo,
issue_number: issue.number,
body: `👋 @${username}, my apologies but I can't perform this action for you because your username is not in the allowlist in the file ${'`.github/scripts/dapr_bot.js`'}.`,
})
}

View File

@ -1,74 +0,0 @@
name: Lint and Build
on:
push:
branches:
- feature/*
- feat/*
- bugfix/*
- hotfix/*
- fix/*
pull_request:
branches:
- main
- feature/*
- release-*
workflow_dispatch:
jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.10
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install setuptools wheel tox
- name: Run Autoformatter
run: |
tox -e ruff
statusResult=$(git status -u --porcelain)
if [ -z "$statusResult" ]
then
exit 0
else
echo "Source files are not formatted correctly. Run 'tox -e ruff' to autoformat."
exit 1
fi
- name: Run Linter
run: |
tox -e flake8
build:
needs: lint
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python_ver: ["3.10", "3.11", "3.12", "3.13"]
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python_ver }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python_ver }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install setuptools wheel tox
- name: Install package and test dependencies
run: |
pip cache purge
pip install --upgrade pip setuptools wheel
pip install -e .
pip install -e .[test]
- name: Check Typing
run: |
tox -e type
- name: Run Tests
run: |
tox -e pytest

View File

@ -4,14 +4,12 @@ on:
branches: branches:
- main - main
paths: paths:
- docs/** - docs
- '!docs/development/**'
pull_request: pull_request:
branches: branches:
- main - main
paths: paths:
- docs/** - docs
- '!docs/development/**'
workflow_dispatch: workflow_dispatch:
permissions: permissions:
contents: write contents: write
@ -20,7 +18,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Review changed files name: Review changed files
outputs: outputs:
docs_any_changed: ${{ steps.changed-files.outputs.docs_any_changed }} docs_any_changed: NaN
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Get changed files - name: Get changed files
@ -31,7 +29,6 @@ jobs:
docs: docs:
- 'docs/**' - 'docs/**'
- 'mkdocs.yml' - 'mkdocs.yml'
- '!docs/development/**'
base_sha: 'main' base_sha: 'main'
documentation_validation: documentation_validation:
@ -45,16 +42,10 @@ jobs:
- name: Remove plugins from mkdocs configuration - name: Remove plugins from mkdocs configuration
run: | run: |
sed -i '/^plugins:/,/^[^ ]/d' mkdocs.yml sed -i '/^plugins:/,/^[^ ]/d' mkdocs.yml
- name: Install Python dependencies - name: Run MkDocs build
run: | uses: Kjuly/mkdocs-page-builder@main
pip install mkdocs-material
pip install .[recommended,git,imaging]
pip install mkdocs-jupyter
- name: Validate build
run: mkdocs build
deploy: deploy:
if: github.ref == 'refs/heads/main'
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: documentation_validation needs: documentation_validation
steps: steps:

12
.gitignore vendored
View File

@ -6,7 +6,6 @@ test
# Byte-compiled / optimized / DLL files # Byte-compiled / optimized / DLL files
__pycache__/ __pycache__/
**/__pycache__/
*.py[cod] *.py[cod]
*$py.class *$py.class
@ -166,14 +165,3 @@ cython_debug/
# option (not recommended) you can uncomment the following to ignore the entire idea folder. # option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/ #.idea/
.idea .idea
.ruff_cache/
# Quickstart outputs
*_state.json
quickstarts/**/*_state.json
chroma_db/
db/
# Requirements files since we use pyproject.toml instead
dev-requirements.txt

View File

@ -1,3 +0,0 @@
# These owners are the maintainers and approvers of this repo
# TODO: we need official teams in dapr github https://github.com/orgs/dapr/teams
* @yaron2 @Cyb3rWard0g

86
CODE_OF_CONDUCT.md Normal file
View File

@ -0,0 +1,86 @@
# Code of Conduct
We are committed to fostering a welcoming, inclusive, and respectful environment for everyone involved in this project. This Code of Conduct outlines the expected behaviors within our community and the steps for reporting unacceptable actions. By participating, you agree to uphold these standards, helping to create a positive and collaborative space.
---
## Our Pledge
As members, contributors, and leaders of this community, we pledge to:
* Ensure participation in our project is free from harassment, discrimination, or exclusion.
* Treat everyone with respect and empathy, regardless of factors such as age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity or expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual orientation.
* Act in ways that contribute to a safe, welcoming, and supportive environment for all participants.
---
## Our Standards
We strive to create an environment where all members can thrive. Examples of positive behaviors include:
* Showing kindness, empathy, and consideration for others.
* Being respectful of differing opinions, experiences, and perspectives.
* Providing constructive feedback in a supportive manner.
* Taking responsibility for mistakes, apologizing when necessary, and learning from experiences.
* Prioritizing the success and well-being of the entire community over individual gains.
The following behaviors are considered unacceptable:
* Using sexualized language or imagery, or engaging in inappropriate sexual attention or advances.
* Making insulting, derogatory, or inflammatory comments, including trolling or personal attacks.
* Engaging in harassment, whether public or private.
* Publishing private or sensitive information about others without explicit consent.
* Engaging in behavior that disrupts discussions, events, or contributions in a negative way.
* Any conduct that could reasonably be deemed unprofessional or harmful to others.
---
## Scope
This Code of Conduct applies to all areas of interaction within the community, including but not limited to:
* Discussions on forums, repositories, or other official communication channels.
* Contributions made to the project, such as code, documentation, or issues.
* Public representation of the community, such as through official social media accounts or at events.
It also applies to actions outside these spaces if they negatively impact the health, safety, or inclusivity of the community.
---
## Enforcement Responsibilities
Community leaders are responsible for ensuring that this Code of Conduct is upheld. They may take appropriate and fair corrective actions in response to any behavior that violates these standards, including:
* Removing, editing, or rejecting comments, commits, issues, or other contributions not aligned with the Code of Conduct.
* Temporarily or permanently banning individuals for repeated or severe violations.
Leaders will always strive to communicate their decisions clearly and fairly.
---
## Reporting Issues
If you experience or witness unacceptable behavior, please report it to the project's owner [Roberto Rodriguez](https://www.linkedin.com/in/cyb3rward0g/). Your report will be handled with sensitivity, and we will respect your privacy and confidentiality while addressing the issue.
When reporting, please include:
* A description of the incident.
* When and where it occurred.
* Any additional context or supporting evidence, if available.
---
## Enforcement Process
We encourage resolving issues through dialogue when possible, but community leaders will intervene when necessary. Actions may include warnings, temporary bans, or permanent removal from the community, depending on the severity of the behavior.
---
## Attribution
This Code of Conduct is inspired by the [Contributor Covenant, version 2.0](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html) and has drawn inspiration from open source community guidelines by Microsoft, Mozilla, and others.
For further context on best practices for open source codes of conduct, see the [Contributor Covenant FAQ](https://www.contributor-covenant.org/faq).
---
Thank you for helping to create a positive environment! ❤️

View File

@ -1,15 +0,0 @@
# Governance
## Project Maintainers
[Project maintainers](https://github.com/dapr/community/blob/master/MAINTAINERS.md) are responsible for activities around maintaining and updating Dapr. Final decisions on the project reside with the project maintainers.
Maintainers MUST remain active. If they are unresponsive for >3 months, they will be automatically removed unless a [super-majority](https://en.wikipedia.org/wiki/Supermajority#Two-thirds_vote) of the other project maintainers agrees to extend the period to be greater than 3 months.
New maintainers can be added to the project by a [super-majority](https://en.wikipedia.org/wiki/Supermajority#Two-thirds_vote) vote of the existing maintainers. A potential maintainer may be nominated by an existing maintainer. A vote is conducted in private between the current maintainers over the course of a one week voting period. At the end of the week, votes are counted and a pull request is made on the repo adding the new maintainer to the [CODEOWNERS](CODEOWNERS) file.
A maintainer may step down by submitting an [issue](https://github.com/dapr/dapr-agents/issues/new) stating their intent.
Changes to this governance document require a pull request with approval from a [super-majority](https://en.wikipedia.org/wiki/Supermajority#Two-thirds_vote) of the current maintainers.
## Code of Conduct
This project has adopted the [Contributor Covenant Code of Conduct](https://github.com/dapr/community/blob/master/CODE-OF-CONDUCT.md)

View File

@ -1,26 +1,6 @@
# Get all directories within quickstarts # Get all directories within quickstarts
QUICKSTART_DIRS := $(shell find quickstarts -mindepth 1 -maxdepth 1 -type d -exec basename {} \;) QUICKSTART_DIRS := $(shell find quickstarts -mindepth 1 -maxdepth 1 -type d -exec basename {} \;)
# Test targets
.PHONY: test
test:
@echo "Running tests..."
python -m pytest tests/ -v --tb=short
.PHONY: test-cov
test-cov:
@echo "Running tests with coverage..."
python -m pytest tests/ -v --cov=dapr_agents --cov-report=term-missing --cov-report=html
.PHONY: test-install
test-install:
@echo "Installing test dependencies..."
pip install install -e .[test]
.PHONY: test-all
test-all: test-install test-cov
@echo "All tests completed!"
# Main target to validate all quickstarts # Main target to validate all quickstarts
.PHONY: validate-quickstarts .PHONY: validate-quickstarts
validate-quickstarts: validate-quickstarts:

View File

@ -1,13 +1,5 @@
# Dapr Agents: A Framework for Agentic AI Systems # Dapr Agents: A Framework for Agentic AI Systems
[![PyPI - Version](https://img.shields.io/pypi/v/dapr-agents?style=flat&logo=pypi&logoColor=white&label=Latest%20version)](https://pypi.org/project/dapr-agents/)
[![PyPI - Downloads](https://img.shields.io/pypi/dm/dapr-agents?style=flat&logo=pypi&logoColor=white&label=Downloads)](https://pypi.org/project/dapr-agents/)
[![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/dapr/dapr-agents/.github%2Fworkflows%2Fbuild.yaml?branch=main&label=Build&logo=github)](https://github.com/dapr/dapr-agents/actions/workflows/build.yaml)
[![GitHub License](https://img.shields.io/github/license/dapr/dapr-agents?style=flat&label=License&logo=github)](https://github.com/dapr/dapr-agents/blob/main/LICENSE)
[![Discord](https://img.shields.io/discord/778680217417809931?label=Discord&style=flat&logo=discord)](http://bit.ly/dapr-discord)
[![YouTube Channel Views](https://img.shields.io/youtube/channel/views/UCtpSQ9BLB_3EXdWAUQYwnRA?style=flat&label=YouTube%20views&logo=youtube)](https://youtube.com/@daprdev)
[![X (formerly Twitter) Follow](https://img.shields.io/twitter/follow/daprdev?logo=x&style=flat)](https://twitter.com/daprdev)
Dapr Agents is a developer framework designed to build production-grade resilient AI agent systems that operate at scale. Built on top of the battle-tested Dapr project, it enables software developers to create AI agents that reason, act, and collaborate using Large Language Models (LLMs), while leveraging built-in observability and stateful workflow execution to guarantee agentic workflows complete successfully, no matter how complex. Dapr Agents is a developer framework designed to build production-grade resilient AI agent systems that operate at scale. Built on top of the battle-tested Dapr project, it enables software developers to create AI agents that reason, act, and collaborate using Large Language Models (LLMs), while leveraging built-in observability and stateful workflow execution to guarantee agentic workflows complete successfully, no matter how complex.
![](./docs/img/logo-workflows.png) ![](./docs/img/logo-workflows.png)
@ -66,10 +58,10 @@ As a part of **CNCF**, Dapr Agents is vendor-neutral, eliminating concerns about
## Roadmap ## Roadmap
Here are some of the major features we're working on: Here are some of the major features we're working on for the current quarter:
### Q2 2025 ### Q2 2024
- **MCP Support** - Integration with Anthropic's MCP platform ([#50](https://github.com/dapr/dapr-agents/issues/50)) - **MCP Support** - Integration with Anthropic's MCP platform ([#50](https://github.com/dapr/dapr-agents/issues/50))
- **Agent Interaction Tracing** - Enhanced observability of agent interactions with LLMs and tools ([#79](https://github.com/dapr/dapr-agents/issues/79)) - **Agent Interaction Tracing** - Enhanced observability of agent interactions with LLMs and tools ([#79](https://github.com/dapr/dapr-agents/issues/79))
- **Streaming LLM Output** - Real-time streaming capabilities for LLM responses ([#80](https://github.com/dapr/dapr-agents/issues/80)) - **Streaming LLM Output** - Real-time streaming capabilities for LLM responses ([#80](https://github.com/dapr/dapr-agents/issues/80))
- **HTTP Endpoint Tools** - Support for using Dapr's HTTP endpoint capabilities for tool calling ([#81](https://github.com/dapr/dapr-agents/issues/81)) - **HTTP Endpoint Tools** - Support for using Dapr's HTTP endpoint capabilities for tool calling ([#81](https://github.com/dapr/dapr-agents/issues/81))
@ -83,6 +75,7 @@ Here are some of the major features we're working on:
For more details about these features and other planned work, please check out our [GitHub issues](https://github.com/dapr/dapr-agents/issues). For more details about these features and other planned work, please check out our [GitHub issues](https://github.com/dapr/dapr-agents/issues).
### Language Support ### Language Support
| Language | Current Status | Development Status | Stable Status | | Language | Current Status | Development Status | Stable Status |
@ -91,17 +84,6 @@ For more details about these features and other planned work, please check out o
| .NET | Planning | Q3 2025 | Q4 2025 | | .NET | Planning | Q3 2025 | Q4 2025 |
| Other Languages | Coming Soon | TBD | TBD | | Other Languages | Coming Soon | TBD | TBD |
## Documentation
- [Development Guide](docs/development/README.md) - For developers and contributors
## Community
### Contributing to Dapr Agents
Please refer to our [Dapr Community Code of Conduct](https://github.com/dapr/community/blob/master/CODE-OF-CONDUCT.md)
For development setup and guidelines, see our [Development Guide](docs/development/README.md).
## Getting Started ## Getting Started
@ -110,6 +92,8 @@ Prerequisites:
- [Dapr CLI](https://docs.dapr.io/getting-started/install-dapr-cli/) - [Dapr CLI](https://docs.dapr.io/getting-started/install-dapr-cli/)
- [Python 3.10](https://www.python.org/downloads/release/python-3100/) - [Python 3.10](https://www.python.org/downloads/release/python-3100/)
### Install Dapr Agents ### Install Dapr Agents
```bash ```bash
@ -129,3 +113,7 @@ Dapr Agents is an open-source project under the CNCF umbrella, and we welcome co
- Documentation: [https://dapr.github.io/dapr-agents/](https://dapr.github.io/dapr-agents/) - Documentation: [https://dapr.github.io/dapr-agents/](https://dapr.github.io/dapr-agents/)
- Community Discord: [Join the discussion](https://bit.ly/dapr-discord). - Community Discord: [Join the discussion](https://bit.ly/dapr-discord).
- Contribute: Open an issue or submit a PR to help improve Dapr Agents! - Contribute: Open an issue or submit a PR to help improve Dapr Agents!
## Code of Conduct
Please refer to our [Dapr Community Code of Conduct](https://github.com/dapr/community/blob/master/CODE-OF-CONDUCT.md)

View File

@ -1,3 +0,0 @@
## Security Policy
https://docs.dapr.io/operations/support/support-security-issues/

View File

@ -0,0 +1,431 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# OpenAI Tool Calling Agent - Dummy Weather Example\n",
"\n",
"* Collaborator: Roberto Rodriguez @Cyb3rWard0g"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import Environment Variables"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from dotenv import load_dotenv\n",
"load_dotenv() # take environment variables from .env."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Enable Logging"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"logging.basicConfig(level=logging.INFO)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Define Tools"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents import tool\n",
"from pydantic import BaseModel, Field\n",
"\n",
"class GetWeatherSchema(BaseModel):\n",
" location: str = Field(description=\"location to get weather for\")\n",
"\n",
"@tool(args_model=GetWeatherSchema)\n",
"def get_weather(location: str) -> str:\n",
" \"\"\"Get weather information for a specific location.\"\"\"\n",
" import random\n",
" temperature = random.randint(60, 80)\n",
" return f\"{location}: {temperature}F.\"\n",
"\n",
"class JumpSchema(BaseModel):\n",
" distance: str = Field(description=\"Distance for agent to jump\")\n",
"\n",
"@tool(args_model=JumpSchema)\n",
"def jump(distance: str) -> str:\n",
" \"\"\"Jump a specific distance.\"\"\"\n",
" return f\"I jumped the following distance {distance}\"\n",
"\n",
"tools = [get_weather,jump]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize Agent"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.llm.openai.client.base:Initializing OpenAI client...\n",
"INFO:dapr_agents.tool.executor:Tool registered: GetWeather\n",
"INFO:dapr_agents.tool.executor:Tool registered: Jump\n",
"INFO:dapr_agents.tool.executor:Tool Executor initialized with 2 tool(s).\n",
"INFO:dapr_agents.agent.base:Constructing system_prompt from agent attributes.\n",
"INFO:dapr_agents.agent.base:Using system_prompt to create the prompt template.\n",
"INFO:dapr_agents.agent.base:Pre-filled prompt template with attributes: ['name', 'role', 'goal']\n"
]
}
],
"source": [
"from dapr_agents import ReActAgent\n",
"\n",
"AIAgent = ReActAgent(\n",
" name=\"Rob\",\n",
" role= \"Weather Assistant\",\n",
" tools=tools\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"ChatPromptTemplate(input_variables=['chat_history'], pre_filled_variables={'name': 'Rob', 'role': 'Weather Assistant', 'goal': 'Help humans'}, messages=[('system', '# Today\\'s date is: April 05, 2025\\n\\n## Name\\nYour name is {{name}}.\\n\\n## Role\\nYour role is {{role}}.\\n\\n## Goal\\n{{goal}}.\\n\\n## Tools\\nYou have access ONLY to the following tools:\\nGetWeather: Get weather information for a specific location.. Args schema: {\\'location\\': {\\'description\\': \\'location to get weather for\\', \\'type\\': \\'string\\'}}\\nJump: Jump a specific distance.. Args schema: {\\'distance\\': {\\'description\\': \\'Distance for agent to jump\\', \\'type\\': \\'string\\'}}\\n\\nIf you think about using tool, it must use the correct tool JSON blob format as shown below:\\n```\\n{\\n \"name\": $TOOL_NAME,\\n \"arguments\": $INPUT\\n}\\n```\\n\\n## ReAct Format\\nThought: Reflect on the current state of the conversation or task. If additional information is needed, determine if using a tool is necessary. When a tool is required, briefly explain why it is needed for the specific step at hand, and immediately follow this with an `Action:` statement to address that specific requirement. Avoid combining multiple tool requests in a single `Thought`. If no tools are needed, proceed directly to an `Answer:` statement.\\nAction:\\n```\\n{\\n \"name\": $TOOL_NAME,\\n \"arguments\": $INPUT\\n}\\n```\\nObservation: Describe the result of the action taken.\\n... (repeat Thought/Action/Observation as needed, but **ALWAYS proceed to a final `Answer:` statement when you have enough information**)\\nThought: I now have sufficient information to answer the initial question.\\nAnswer: ALWAYS proceed to a final `Answer:` statement once enough information is gathered or if the tools do not provide the necessary data.\\n\\n### Providing a Final Answer\\nOnce you have enough information to answer the question OR if tools cannot provide the necessary data, respond using one of the following formats:\\n\\n1. **Direct Answer without Tools**:\\nThought: I can answer directly without using any tools. Answer: Direct answer based on previous interactions or current knowledge.\\n\\n2. **When All Needed Information is Gathered**:\\nThought: I now have sufficient information to answer the question. Answer: Complete final answer here.\\n\\n3. **If Tools Cannot Provide the Needed Information**:\\nThought: The available tools do not provide the necessary information. Answer: Explanation of limitation and relevant information if possible.\\n\\n### Key Guidelines\\n- Always Conclude with an `Answer:` statement.\\n- Ensure every response ends with an `Answer:` statement that summarizes the most recent findings or relevant information, avoiding incomplete thoughts.\\n- Direct Final Answer for Past or Known Information: If the user inquires about past interactions, respond directly with an Answer: based on the information in chat history.\\n- Avoid Repetitive Thought Statements: If the answer is ready, skip repetitive Thought steps and proceed directly to Answer.\\n- Minimize Redundant Steps: Use minimal Thought/Action/Observation cycles to arrive at a final Answer efficiently.\\n- Reference Past Information When Relevant: Use chat history accurately when answering questions about previous responses to avoid redundancy.\\n- Progressively Move Towards Finality: Reflect on the current step and avoid re-evaluating the entire user request each time. Aim to advance towards the final Answer in each cycle.\\n\\n## Chat History\\nThe chat history is provided to avoid repeating information and to ensure accurate references when summarizing past interactions.'), MessagePlaceHolder(variable_name=chat_history)], template_format='jinja2')"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"AIAgent.prompt_template"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'GetWeather'"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"AIAgent.tools[0].name"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Run Agent"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.agent.patterns.react.base:Iteration 1/10 started.\n",
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[38;2;242;182;128muser:\u001b[0m\n",
"\u001b[38;2;242;182;128m\u001b[0m\u001b[38;2;242;182;128mHi my name is Roberto\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
"INFO:dapr_agents.agent.patterns.react.base:No action specified; continuing with further reasoning.\n",
"INFO:dapr_agents.agent.patterns.react.base:Iteration 2/10 started.\n",
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[38;2;217;95;118mThought: Hello Roberto! How can I assist you today?\u001b[0m\u001b[0m\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
"INFO:dapr_agents.agent.patterns.react.base:Agent provided a direct final answer.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[38;2;217;95;118mThought: Answer: Hello Roberto! How can I assist you today?\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n",
"\u001b[38;2;147;191;183massistant:\u001b[0m\n",
"\u001b[38;2;147;191;183m\u001b[0m\u001b[38;2;147;191;183mHello Roberto! How can I assist you today?\u001b[0m\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"'Hello Roberto! How can I assist you today?'"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"await AIAgent.run(\"Hi my name is Roberto\")"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'role': 'user', 'content': 'Hi my name is Roberto'},\n",
" {'content': 'Hello Roberto! How can I assist you today?',\n",
" 'role': 'assistant'}]"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"AIAgent.chat_history"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.agent.patterns.react.base:Iteration 1/10 started.\n",
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[38;2;242;182;128muser:\u001b[0m\n",
"\u001b[38;2;242;182;128m\u001b[0m\u001b[38;2;242;182;128mWhat is the weather in Virgina, New York and Washington DC?\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
"INFO:dapr_agents.agent.patterns.react.base:Executing GetWeather with arguments {'location': 'Virginia'}\n",
"INFO:dapr_agents.tool.executor:Running tool (auto): GetWeather\n",
"INFO:dapr_agents.agent.patterns.react.base:Iteration 2/10 started.\n",
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[38;2;217;95;118mThought: I need to get the current weather information for Virginia, New York, and Washington DC. I will fetch the data for each location separately. Let's start with Virginia.\u001b[0m\u001b[0m\n",
"\u001b[38;2;191;69;126mAction: {\"name\": \"GetWeather\", \"arguments\": {\"location\": \"Virginia\"}}\u001b[0m\u001b[0m\n",
"\u001b[38;2;146;94;130mObservation: Virginia: 77F.\u001b[0m\u001b[0m\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
"INFO:dapr_agents.agent.patterns.react.base:Executing GetWeather with arguments {'location': 'New York'}\n",
"INFO:dapr_agents.tool.executor:Running tool (auto): GetWeather\n",
"INFO:dapr_agents.agent.patterns.react.base:Iteration 3/10 started.\n",
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[38;2;217;95;118mThought: I now have the weather information for Virginia. Next, I will get the weather information for New York.\u001b[0m\u001b[0m\n",
"\u001b[38;2;191;69;126mAction: {\"name\": \"GetWeather\", \"arguments\": {\"location\": \"New York\"}}\u001b[0m\u001b[0m\n",
"\u001b[38;2;146;94;130mObservation: New York: 68F.\u001b[0m\u001b[0m\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
"INFO:dapr_agents.agent.patterns.react.base:Executing GetWeather with arguments {'location': 'Washington DC'}\n",
"INFO:dapr_agents.tool.executor:Running tool (auto): GetWeather\n",
"INFO:dapr_agents.agent.patterns.react.base:Iteration 4/10 started.\n",
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[38;2;217;95;118mThought: I have the weather information for Virginia and New York. Next, I will get the weather information for Washington DC.\u001b[0m\u001b[0m\n",
"\u001b[38;2;191;69;126mAction: {\"name\": \"GetWeather\", \"arguments\": {\"location\": \"Washington DC\"}}\u001b[0m\u001b[0m\n",
"\u001b[38;2;146;94;130mObservation: Washington DC: 69F.\u001b[0m\u001b[0m\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
"INFO:dapr_agents.agent.patterns.react.base:Agent provided a direct final answer.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[38;2;217;95;118mThought: I now have the weather information for all requested locations. \u001b[0m\n",
"\u001b[38;2;217;95;118m\u001b[0m\n",
"\u001b[38;2;217;95;118mAnswer: The current weather is as follows:\u001b[0m\n",
"\u001b[38;2;217;95;118m- Virginia: 77°F\u001b[0m\n",
"\u001b[38;2;217;95;118m- New York: 68°F\u001b[0m\n",
"\u001b[38;2;217;95;118m- Washington DC: 69°F\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n",
"\u001b[38;2;147;191;183massistant:\u001b[0m\n",
"\u001b[38;2;147;191;183m\u001b[0m\u001b[38;2;147;191;183mThe current weather is as follows:\u001b[0m\n",
"\u001b[38;2;147;191;183m- Virginia: 77°F\u001b[0m\n",
"\u001b[38;2;147;191;183m- New York: 68°F\u001b[0m\n",
"\u001b[38;2;147;191;183m- Washington DC: 69°F\u001b[0m\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"'The current weather is as follows:\\n- Virginia: 77°F\\n- New York: 68°F\\n- Washington DC: 69°F'"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"await AIAgent.run(\"What is the weather in Virgina, New York and Washington DC?\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,32 @@
# OpenAPI Agent
The `OpenAPI Agent` represents a specialized agent designed to interact with the external world by transforming OpenAPI specifications into tools. This agent is crucial for scenarios where precise and authenticated API interactions are necessary, allowing the agent to understand and utilize API endpoints dynamically. By leveraging OpenAPI specifications, the agent can adapt to a wide range of APIs, converting each specification into tools that it can use autonomously.
## Agents
| Pattern | Overview |
| --- | --- |
| [ReAct (Reason + Act) MS Graph](react_agent_openapi_msgraph.ipynb) | An OpenAPI agent that applies the `ReAct` prompting technique, following a chain-of-thought reasoning (Thought, Action, Observation) loop. This agent autonomously selects the appropriate MS Graph API endpoint, performs the call, and integrates the response back into its reasoning cycle. |
## Tools
The `OpenAPI Agent` has two main tools created from OpenAPI specifications to facilitate dynamic API interaction. These tools allow the agent to identify relevant API endpoints and execute API calls effectively. Below is a breakdown of each tool's purpose, inputs, and how it operates within the agent's workflow.
### get_openapi_definition
* **Goal**: This tool retrieves a list of relevant API endpoints from OpenAPI specifications that the agent could use to fulfill the users query. The tool leverages a vector store to store and search through API definitions, helping the agent narrow down potential APIs based on the task at hand.
* **Functionality**:
* Similarity Search: Takes the users input and queries the `VectorToolStore` to find similar API tools. It ranks potential API endpoints based on similarity to the users task and returns the top matches.
* Tool Usage: This tool is always called before any API call execution to ensure the agent understands which endpoint to use.
### open_api_call_executor
* **Goal**: This tool is responsible for executing API calls using the specific parameters and configuration associated with the selected OpenAPI endpoint. It provides flexibility to adjust API paths, methods, headers, and query parameters, making it versatile for interacting with any OpenAPI-defined API.
* **Functionality**:
* API Call Execution: Takes in a structured input of HTTP method, path parameters, headers, and other data required to make the API request.
* Endpoint Selection: After get_openapi_definition suggests possible endpoints, this tool is used to execute the chosen endpoint with specific parameters.
* Version Management: Ensures the correct API version is used, preventing duplication or misalignment of API path versions.
## How the Tools Work Together?
* Identify Relevant Endpoint: The agent first uses get_openapi_definition to identify a relevant API endpoint based on the users query.
* Execute API Call: With the selected endpoint, open_api_call_executor is called to make the actual API request, providing the necessary method, parameters, headers, and data.
This design allows the `OpenAPI Agent` to dynamically interpret and call any API defined within an OpenAPI specification, adapting flexibly to various tasks and user requests.

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,30 @@
# The Weather Agent
The Weather Agent represents a basic example of an agent that interacts with the external world through tools, such as APIs. This agent demonstrates how a language model (LLM) can suggest which tool to use and provide the necessary inputs for tool execution. However, it is the agent—not the language model—that executes the tool and processes the results. Once the tool has been executed, the results are passed back to the language model for further suggestions, summaries, or next actions. This agent showcases the foundational concept of integrating language models with external tools to retrieve real-world data, such as weather information.
## Agents
| Pattern | Overview |
| --- | --- |
| [ToolCall (Function Calling)](toolcall_agent.ipynb) | A weather agent that uses OpenAIs tool calling (Function Calling) to pass tools in JSON schema format. The language model suggests the tool to be used based on the task, but the agent executes the tool and processes the results. |
| [ReAct (Reason + Act)](react_agent.ipynb) | A weather agent following the ReAct prompting technique. The language model uses a chain-of-thought reasoning process (Thought, Action, Observation) to suggest the next tool to use. The agent then executes the tool, and the results are fed back into the reasoning loop. |
## Tools
* **WeatherTool**: A tool that allows the agent to retrieve weather data by first obtaining geographical coordinates (latitude and longitude) using the Nominatim API. For weather data, the agent either calls the National Weather Service (NWS) API (for locations in the USA) or the Met.no API (for locations outside the USA). This tool is executed by the agent based on the suggestions provided by the language model.
* **HistoricalWeather**: A tool that retrieves historical weather data for a specified location and date range. The agent uses the Nominatim API to get the coordinates for the specified location and calls the Open-Meteo Historical Weather API to retrieve temperature data for past dates. This tool allows the agent to compare past weather conditions with current forecasts, providing richer insights.
### APIs Used
* Nominatim API: Provides geocoding services to convert city, state, and country into geographical coordinates (latitude and longitude).
* Endpoint: https://nominatim.openstreetmap.org/search.php
* Purpose: Used to fetch coordinates for a given location, which is then passed to weather APIs.
* National Weather Service (NWS) API: Provides weather data for locations within the United States.
* Endpoint: https://api.weather.gov
* Purpose: Used to retrieve detailed weather forecasts and temperature data for locations in the USA.
* Met.no API: Provides weather data for locations outside the United States.
* Endpoint: https://api.met.no/weatherapi
* Purpose: Used to retrieve weather forecasts and temperature data for locations outside the USA, offering international coverage.
* Open-Meteo Historical Weather API: Provides historical weather data for any location worldwide.
* Endpoint: https://archive-api.open-meteo.com/v1/archive
* Purpose: Used to retrieve historical weather data, including temperature readings for past dates, allowing the agent to analyze past weather conditions and trends.

View File

@ -0,0 +1,223 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# ReAct Weather Agent"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Install Required Libraries\n",
"Before starting, ensure the required libraries are installed:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install dapr-agents python-dotenv"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize Environment Variables"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from dotenv import load_dotenv\n",
"load_dotenv()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import Modules"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents import Agent"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import Tools"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"from tools import WeatherForecast, HistoricalWeather"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize Agent"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"weather_agent = Agent(\n",
" name=\"Weather Agent\",\n",
" role=\"Weather Expert\",\n",
" pattern=\"react\",\n",
" tools=[WeatherForecast(), HistoricalWeather()],\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Run Agent"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[38;2;242;182;128muser:\u001b[0m\n",
"\u001b[38;2;242;182;128m\u001b[0m\u001b[38;2;242;182;128mwhat will be the difference of temperature in Paris between 7 days ago and 7 from now?\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n",
"\u001b[38;2;217;95;118mThought: For this, I need to gather two pieces of information: the historical temperature of Paris from 7 days ago and the forecasted temperature for Paris 7 days from now.\u001b[0m\n",
"\u001b[38;2;217;95;118m\u001b[0m\n",
"\u001b[38;2;217;95;118mI'll start by retrieving the historical temperature data for Paris from 7 days ago.\u001b[0m\u001b[0m\n",
"\u001b[38;2;191;69;126mAction: {\"name\": \"Historicalweather\", \"arguments\": {\"city\": \"Paris\", \"state\": null, \"country\": \"France\", \"start_date\": \"2024-11-04\", \"end_date\": \"2024-11-04\"}}\u001b[0m\u001b[0m\n",
"\u001b[38;2;146;94;130mObservation: {'city': 'Paris', 'state': None, 'country': 'France', 'start_date': '2024-11-04', 'end_date': '2024-11-04', 'temperature_data': {'2024-11-04T00:00': 6.8, '2024-11-04T01:00': 8.7, '2024-11-04T02:00': 8.7, '2024-11-04T03:00': 8.6, '2024-11-04T04:00': 7.9, '2024-11-04T05:00': 7.3, '2024-11-04T06:00': 7.0, '2024-11-04T07:00': 6.8, '2024-11-04T08:00': 6.9, '2024-11-04T09:00': 7.3, '2024-11-04T10:00': 8.0, '2024-11-04T11:00': 9.6, '2024-11-04T12:00': 11.3, '2024-11-04T13:00': 14.0, '2024-11-04T14:00': 14.5, '2024-11-04T15:00': 14.7, '2024-11-04T16:00': 12.6, '2024-11-04T17:00': 11.2, '2024-11-04T18:00': 9.8, '2024-11-04T19:00': 9.1, '2024-11-04T20:00': 8.7, '2024-11-04T21:00': 8.0, '2024-11-04T22:00': 8.0, '2024-11-04T23:00': 7.3}, 'unit': '°C'}\u001b[0m\u001b[0m\n",
"\u001b[38;2;217;95;118mThought: I have obtained the historical temperatures for Paris on November 4, 2024. Next, I need to obtain the forecasted temperature for Paris 7 days from now, which will be November 18, 2024.\u001b[0m\u001b[0m\n",
"\u001b[38;2;191;69;126mAction: {\"name\": \"Weatherforecast\", \"arguments\": {\"city\": \"Paris\", \"state\": null, \"country\": \"France\"}}\u001b[0m\u001b[0m\n",
"\u001b[38;2;146;94;130mObservation: {'city': 'Paris', 'state': None, 'country': 'France', 'temperature': 7.0, 'unit': 'celsius'}\u001b[0m\u001b[0m\n",
"\u001b[38;2;217;95;118mThought: I now have sufficient information to calculate the temperature difference between 7 days ago and 7 days from now in Paris.\u001b[0m\n",
"\u001b[38;2;217;95;118m\u001b[0m\n",
"\u001b[38;2;217;95;118mAnswer: The average temperature on November 4, 2024, based on the historical data I retrieved, was approximately 9.3°C. The forecasted temperature for Paris on November 18, 2024, is 7.0°C. Therefore, the temperature difference is approximately 2.3°C, with the conditions expected to be cooler on November 18 compared to November 4.\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n",
"\u001b[38;2;147;191;183massistant:\u001b[0m\n",
"\u001b[38;2;147;191;183m\u001b[0m\u001b[38;2;147;191;183mThe average temperature on November 4, 2024, based on the historical data I retrieved, was approximately 9.3°C. The forecasted temperature for Paris on November 18, 2024, is 7.0°C. Therefore, the temperature difference is approximately 2.3°C, with the conditions expected to be cooler on November 18 compared to November 4.\u001b[0m\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"'The average temperature on November 4, 2024, based on the historical data I retrieved, was approximately 9.3°C. The forecasted temperature for Paris on November 18, 2024, is 7.0°C. Therefore, the temperature difference is approximately 2.3°C, with the conditions expected to be cooler on November 18 compared to November 4.'"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"await weather_agent.run(\"what will be the difference of temperature in Paris between 7 days ago and 7 from now?\")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'content': 'what will be the difference of temperature in Paris between 7 days ago and 7 from now?',\n",
" 'role': 'user'},\n",
" {'content': 'The average temperature on November 4, 2024, based on the historical data I retrieved, was approximately 9.3°C. The forecasted temperature for Paris on November 18, 2024, is 7.0°C. Therefore, the temperature difference is approximately 2.3°C, with the conditions expected to be cooler on November 18 compared to November 4.',\n",
" 'role': 'assistant'}]"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"weather_agent.chat_history"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"await weather_agent.run(\"What was the weather like in Paris two days ago?\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.8"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,264 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# ToolCall Weather Agent"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Install Required Libraries\n",
"Before starting, ensure the required libraries are installed:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install dapr-agents python-dotenv"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize Environment Variables"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from dotenv import load_dotenv\n",
"load_dotenv()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import Modules"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents import Agent"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import Tools"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"from tools import WeatherForecast, HistoricalWeather"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize Agent"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"weather_agent = Agent(\n",
" name=\"Weather Agent\",\n",
" role=\"Weather Expert\",\n",
" tools=[WeatherForecast(),HistoricalWeather()],\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Run Agent"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[38;2;242;182;128muser:\u001b[0m\n",
"\u001b[38;2;242;182;128m\u001b[0m\u001b[38;2;242;182;128mwhat is the weather in Paris?\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n",
"\u001b[38;2;217;95;118massistant(tool_call):\u001b[0m\n",
"\u001b[38;2;217;95;118m\u001b[0m\u001b[38;2;217;95;118mFunction name: Weatherforecast (Call Id: call_qyfgmgDAJSrRM58Hb83AtdDh)\u001b[0m\n",
"\u001b[38;2;217;95;118m\u001b[0m\u001b[38;2;217;95;118mArguments: {\"city\":\"Paris\",\"country\":\"france\"}\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n",
"\u001b[38;2;191;69;126mtool(Id: call_qyfgmgDAJSrRM58Hb83AtdDh):\u001b[0m\n",
"\u001b[38;2;191;69;126m\u001b[0m\u001b[38;2;191;69;126m{'city': 'Paris', 'state': None, 'country': 'france', 'temperature': 4.6, 'unit': 'celsius'}\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n",
"\u001b[38;2;147;191;183massistant:\u001b[0m\n",
"\u001b[38;2;147;191;183m\u001b[0m\u001b[38;2;147;191;183mThe current temperature in Paris, France is 4.6°C.\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"'The current temperature in Paris, France is 4.6°C.'"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"await weather_agent.run(\"what is the weather in Paris?\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[38;2;242;182;128muser:\u001b[0m\n",
"\u001b[38;2;242;182;128m\u001b[0m\u001b[38;2;242;182;128mwhat was the weather like in Paris two days ago?\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n",
"\u001b[38;2;217;95;118massistant(tool_call):\u001b[0m\n",
"\u001b[38;2;217;95;118m\u001b[0m\u001b[38;2;217;95;118mFunction name: Historicalweather (Call Id: call_VANaENO9iXLhOuWKOAnV769o)\u001b[0m\n",
"\u001b[38;2;217;95;118m\u001b[0m\u001b[38;2;217;95;118mArguments: {\"city\":\"Paris\",\"country\":\"france\",\"start_date\":\"2024-11-25\",\"end_date\":\"2024-11-25\"}\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n",
"\u001b[38;2;191;69;126mtool(Id: call_VANaENO9iXLhOuWKOAnV769o):\u001b[0m\n",
"\u001b[38;2;191;69;126m\u001b[0m\u001b[38;2;191;69;126m{'city': 'Paris', 'state': None, 'country': 'france', 'start_date': '2024-11-25', 'end_date': '2024-11-25', 'temperature_data': {'2024-11-25T00:00': 16.9, '2024-11-25T01:00': 17.0, '2024-11-25T02:00': 17.4, '2024-11-25T03:00': 17.7, '2024-11-25T04:00': 17.8, '2024-11-25T05:00': 17.6, '2024-11-25T06:00': 16.8, '2024-11-25T07:00': 15.5, '2024-11-25T08:00': 14.6, '2024-11-25T09:00': 14.2, '2024-11-25T10:00': 13.5, '2024-11-25T11:00': 12.2, '2024-11-25T12:00': 11.1, '2024-11-25T13:00': 9.8, '2024-11-25T14:00': 9.9, '2024-11-25T15:00': 10.0, '2024-11-25T16:00': 9.8, '2024-11-25T17:00': 9.3, '2024-11-25T18:00': 9.1, '2024-11-25T19:00': 8.7, '2024-11-25T20:00': 8.4, '2024-11-25T21:00': 8.4, '2024-11-25T22:00': 8.6, '2024-11-25T23:00': 8.2}, 'unit': '°C'}\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n",
"\u001b[38;2;147;191;183massistant:\u001b[0m\n",
"\u001b[38;2;147;191;183m\u001b[0m\u001b[38;2;147;191;183mOn November 25, 2024, the temperature in Paris was as follows:\u001b[0m\n",
"\u001b[38;2;147;191;183m\u001b[0m\n",
"\u001b[38;2;147;191;183m- Midnight: 16.9°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 01:00: 17.0°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 02:00: 17.4°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 03:00: 17.7°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 04:00: 17.8°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 05:00: 17.6°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 06:00: 16.8°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 07:00: 15.5°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 08:00: 14.6°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 09:00: 14.2°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 10:00: 13.5°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 11:00: 12.2°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 12:00: 11.1°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 13:00: 9.8°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 14:00: 9.9°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 15:00: 10.0°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 16:00: 9.8°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 17:00: 9.3°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 18:00: 9.1°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 19:00: 8.7°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 20:00: 8.4°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 21:00: 8.4°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 22:00: 8.6°C\u001b[0m\n",
"\u001b[38;2;147;191;183m- 23:00: 8.2°C\u001b[0m\n",
"\u001b[38;2;147;191;183m\u001b[0m\n",
"\u001b[38;2;147;191;183mThe day started relatively warm in the early hours and cooled down throughout the day and into the evening.\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"'On November 25, 2024, the temperature in Paris was as follows:\\n\\n- Midnight: 16.9°C\\n- 01:00: 17.0°C\\n- 02:00: 17.4°C\\n- 03:00: 17.7°C\\n- 04:00: 17.8°C\\n- 05:00: 17.6°C\\n- 06:00: 16.8°C\\n- 07:00: 15.5°C\\n- 08:00: 14.6°C\\n- 09:00: 14.2°C\\n- 10:00: 13.5°C\\n- 11:00: 12.2°C\\n- 12:00: 11.1°C\\n- 13:00: 9.8°C\\n- 14:00: 9.9°C\\n- 15:00: 10.0°C\\n- 16:00: 9.8°C\\n- 17:00: 9.3°C\\n- 18:00: 9.1°C\\n- 19:00: 8.7°C\\n- 20:00: 8.4°C\\n- 21:00: 8.4°C\\n- 22:00: 8.6°C\\n- 23:00: 8.2°C\\n\\nThe day started relatively warm in the early hours and cooled down throughout the day and into the evening.'"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"await weather_agent.run(\"what was the weather like in Paris two days ago?\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,191 @@
from typing import Optional
from dapr_agents import AgentTool
from datetime import datetime
import requests
import time
class WeatherForecast(AgentTool):
name: str = 'WeatherForecast'
description: str = 'A tool for retrieving the weather/temperature for a given city.'
# Default user agent
user_agent: str = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 Safari/605.1.15"
def handle_error(self, response: requests.Response, url: str, stage: str) -> None:
"""Handles error responses and raises a ValueError with detailed information."""
if response.status_code != 200:
raise ValueError(
f"Failed to get data during {stage}. Status: {response.status_code}. "
f"URL: {url}. Response: {response.text}"
)
if not response.json():
raise ValueError(
f"No data found during {stage}. URL: {url}. Response: {response.text}"
)
def _run(self, city: str, state: Optional[str] = None, country: Optional[str] = "usa") -> dict:
"""
Retrieves weather data by first fetching geocode data for the city and then fetching weather data.
Args:
city (str): The name of the city to get weather for.
state (Optional[str]): The two-letter state abbreviation (optional).
country (Optional[str]): The two-letter country abbreviation. Defaults to 'usa'.
Returns:
dict: A dictionary containing the city, state, country, and current temperature.
"""
headers = {
"User-Agent": self.user_agent
}
# Construct the geocode URL, conditionally including the state if it's provided
geocode_url = f"https://nominatim.openstreetmap.org/search?city={city}&country={country}"
if state:
geocode_url += f"&state={state}"
geocode_url += "&limit=1&format=jsonv2"
# Geocode request
geocode_response = requests.get(geocode_url, headers=headers)
self.handle_error(geocode_response, geocode_url, "geocode lookup")
# Add delay between requests
time.sleep(2)
geocode_data = geocode_response.json()
lat, lon = geocode_data[0]["lat"], geocode_data[0]["lon"]
# Use different APIs based on the country
if country.lower() == "usa":
# Weather.gov request for USA
weather_gov_url = f"https://api.weather.gov/points/{lat},{lon}"
weather_response = requests.get(weather_gov_url, headers=headers)
self.handle_error(weather_response, weather_gov_url, "weather lookup")
# Add delay between requests
time.sleep(2)
weather_data = weather_response.json()
forecast_url = weather_data["properties"]["forecast"]
# Forecast request
forecast_response = requests.get(forecast_url, headers=headers)
self.handle_error(forecast_response, forecast_url, "forecast lookup")
forecast_data = forecast_response.json()
today_forecast = forecast_data["properties"]["periods"][0]
# Return the weather data along with the city, state, and country
return {
"city": city,
"state": state,
"country": country,
"temperature": today_forecast["temperature"],
"unit": "Fahrenheit"
}
else:
# Met.no API for non-USA countries
met_no_url = f"https://api.met.no/weatherapi/locationforecast/2.0/compact?lat={lat}&lon={lon}"
weather_response = requests.get(met_no_url, headers=headers)
self.handle_error(weather_response, met_no_url, "Met.no weather lookup")
weather_data = weather_response.json()
temperature_unit = weather_data["properties"]["meta"]["units"]["air_temperature"]
today_forecast = weather_data["properties"]["timeseries"][0]["data"]["instant"]["details"]["air_temperature"]
# Return the weather data along with the city, state, and country
return {
"city": city,
"state": state,
"country": country,
"temperature": today_forecast,
"unit": temperature_unit
}
class HistoricalWeather(AgentTool):
name: str = 'HistoricalWeather'
description: str = 'A tool for retrieving historical weather data (temperature) for a given city.'
# Default user agent
user_agent: str = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 Safari/605.1.15"
def handle_error(self, response: requests.Response, url: str, stage: str) -> None:
"""Handles error responses and raises a ValueError with detailed information."""
if response.status_code != 200:
raise ValueError(
f"Failed to get data during {stage}. Status: {response.status_code}. "
f"URL: {url}. Response: {response.text}"
)
if not response.json():
raise ValueError(
f"No data found during {stage}. URL: {url}. Response: {response.text}"
)
def _run(self, city: str, state: Optional[str] = None, country: Optional[str] = "usa", start_date: Optional[str] = None, end_date: Optional[str] = None) -> dict:
"""
Retrieves historical weather data for the city by first fetching geocode data and then historical weather data.
Args:
city (str): The name of the city to get weather for.
state (Optional[str]): The two-letter state abbreviation (optional).
country (Optional[str]): The two-letter country abbreviation. Defaults to 'usa'.
start_date (Optional[str]): Start date for historical data (YYYY-MM-DD format).
end_date (Optional[str]): End date for historical data (YYYY-MM-DD format).
Returns:
dict: A dictionary containing the city, state, country, and historical temperature data.
"""
headers = {
"User-Agent": self.user_agent
}
# Validate dates
current_date = datetime.now().strftime('%Y-%m-%d')
if start_date >= current_date or end_date >= current_date:
raise ValueError("Both start_date and end_date must be earlier than the current date.")
if (datetime.strptime(end_date, "%Y-%m-%d") - datetime.strptime(start_date, "%Y-%m-%d")).days > 30:
raise ValueError("The time span between start_date and end_date cannot exceed 30 days.")
# Construct the geocode URL, conditionally including the state if it's provided
geocode_url = f"https://nominatim.openstreetmap.org/search?city={city}&country={country}"
if state:
geocode_url += f"&state={state}"
geocode_url += "&limit=1&format=jsonv2"
# Geocode request
geocode_response = requests.get(geocode_url, headers=headers)
self.handle_error(geocode_response, geocode_url, "geocode lookup")
# Add delay between requests
time.sleep(2)
geocode_data = geocode_response.json()
lat, lon = geocode_data[0]["lat"], geocode_data[0]["lon"]
# Historical weather request
historical_weather_url = f"https://archive-api.open-meteo.com/v1/archive?latitude={lat}&longitude={lon}&start_date={start_date}&end_date={end_date}&hourly=temperature_2m"
weather_response = requests.get(historical_weather_url, headers=headers)
self.handle_error(weather_response, historical_weather_url, "historical weather lookup")
weather_data = weather_response.json()
# Extract time and temperature data
timestamps = weather_data["hourly"]["time"]
temperatures = weather_data["hourly"]["temperature_2m"]
temperature_unit = weather_data["hourly_units"]["temperature_2m"]
# Combine timestamps and temperatures into a dictionary
temperature_data = {timestamps[i]: temperatures[i] for i in range(len(timestamps))}
# Return the structured weather data along with the city, state, country
return {
"city": city,
"state": state,
"country": country,
"start_date": start_date,
"end_date": end_date,
"temperature_data": temperature_data,
"unit": temperature_unit
}

1174
cookbook/arxiv_search.ipynb Normal file

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,462 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# GraphStore: Neo4j Database Basic Examples\n",
"\n",
"This notebook demonstrates how to use the `Neo4jGraphStore` in `dapr-agents` for basic graph-based tasks. We will explore:\n",
"\n",
"* Initializing the `Neo4jGraphStore` class.\n",
"* Adding sample nodes.\n",
"* Adding one sample relationship.\n",
"* Querying graph database.\n",
"* Resseting database."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Install Required Libraries\n",
"\n",
"Ensure dapr_agents and neo4j are installed:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install dapr-agents python-dotenv neo4j"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import Environment Variables\n",
"\n",
"Load your API keys or other configuration values using `dotenv`."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from dotenv import load_dotenv\n",
"load_dotenv() # Load environment variables from a `.env` file"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Enable Logging"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"logging.basicConfig(level=logging.INFO)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Deploy Neo4j Graph Database as Docker Container"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#docker run \\\n",
"#--restart always \\\n",
"#--publish=7474:7474 --publish=7687:7687 \\\n",
"#--env NEO4J_AUTH=neo4j/graphwardog \\\n",
"#--volume=neo4j-data \\\n",
"#--name neo4j-apoc \\\n",
"#--env NEO4J_apoc_export_file_enabled=true \\\n",
"#--env NEO4J_apoc_import_file_enabled=true \\\n",
"#--env NEO4J_apoc_import_file_use__neo4j__config=true \\\n",
"#--env NEO4J_PLUGINS=\\[\\\"apoc\\\"\\] \\\n",
"#neo4j:latest"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize Neo4jGraphStore\n",
"\n",
"Set the `NEO4J_URI`, `NEO4J_USERNAME` and `NEO4J_PASSWORD` variables in a `.env` file. The URI can be set to `bolt://localhost:7687`."
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.storage.graphstores.neo4j.client:Successfully created the driver for URI: bolt://localhost:7687\n",
"INFO:dapr_agents.storage.graphstores.neo4j.base:Neo4jGraphStore initialized with database neo4j\n"
]
}
],
"source": [
"from dapr_agents.storage.graphstores.neo4j import Neo4jGraphStore\n",
"import os\n",
"\n",
"# Initialize Neo4jGraphStore\n",
"graph_store = Neo4jGraphStore(\n",
" uri=os.getenv(\"NEO4J_URI\"),\n",
" user=os.getenv(\"NEO4J_USERNAME\"),\n",
" password=os.getenv(\"NEO4J_PASSWORD\"),\n",
" database=\"neo4j\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.storage.graphstores.neo4j.client:Connected to Neo4j Kernel version 5.15.0 (community edition)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Neo4j connection successful\n"
]
}
],
"source": [
"# Test the connection\n",
"assert graph_store.client.test_connection(), \"Connection to Neo4j failed\"\n",
"print(\"Neo4j connection successful\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Add Sample Nodes\n",
"Create and add nodes to the graph store:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.storage.graphstores.neo4j.base:Processed batch 1/1\n",
"INFO:dapr_agents.storage.graphstores.neo4j.base:Nodes with label `Person` added successfully.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Nodes added successfully\n"
]
}
],
"source": [
"from dapr_agents.types import Node\n",
"\n",
"# Sample nodes\n",
"nodes = [\n",
" Node(\n",
" id=\"1\",\n",
" label=\"Person\",\n",
" properties={\"name\": \"Alice\", \"age\": 30},\n",
" additional_labels=[\"Employee\"]\n",
" ),\n",
" Node(\n",
" id=\"2\",\n",
" label=\"Person\",\n",
" properties={\"name\": \"Bob\", \"age\": 25},\n",
" additional_labels=[\"Contractor\"]\n",
" )\n",
"]\n",
"\n",
"# Add nodes\n",
"graph_store.add_nodes(nodes)\n",
"print(\"Nodes added successfully\")\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Add Sample Relationship\n",
"Create and add a relationship to the graph store:"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.storage.graphstores.neo4j.base:Processed batch 1/1\n",
"INFO:neo4j.notifications:Received notification from DBMS server: {severity: INFORMATION} {code: Neo.ClientNotification.Statement.CartesianProduct} {category: PERFORMANCE} {title: This query builds a cartesian product between disconnected patterns.} {description: If a part of a query contains multiple disconnected patterns, this will build a cartesian product between all those parts. This may produce a large amount of data and slow down query processing. While occasionally intended, it may often be possible to reformulate the query that avoids the use of this cross product, perhaps by adding a relationship between the different parts or by using OPTIONAL MATCH (identifier is: (b))} {position: line: 3, column: 25, offset: 45} for query: '\\n UNWIND $data AS rel\\n MATCH (a {id: rel.source_node_id}), (b {id: rel.target_node_id})\\n MERGE (a)-[r:`KNOWS`]->(b)\\n ON CREATE SET r.createdAt = rel.current_time\\n SET r.updatedAt = rel.current_time, r += rel.properties\\n RETURN r\\n '\n",
"INFO:dapr_agents.storage.graphstores.neo4j.base:Relationships of type `KNOWS` added successfully.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Relationships added successfully\n"
]
}
],
"source": [
"from dapr_agents.types import Relationship\n",
"\n",
"# Sample relationships\n",
"relationships = [\n",
" Relationship(\n",
" source_node_id=\"1\",\n",
" target_node_id=\"2\",\n",
" type=\"KNOWS\",\n",
" properties={\"since\": \"2023\"}\n",
" )\n",
"]\n",
"\n",
"# Add relationships\n",
"graph_store.add_relationships(relationships)\n",
"print(\"Relationships added successfully\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Query Graph"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.storage.graphstores.neo4j.base:Query executed successfully: MATCH (n) RETURN n | Time: 0.06 seconds | Results: 2\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Nodes in the database:\n",
"{'n': {'createdAt': '2025-03-04T10:55:57.109885Z', 'name': 'Alice', 'id': '1', 'age': 30, 'updatedAt': '2025-03-04T10:55:57.109885Z'}}\n",
"{'n': {'createdAt': '2025-03-04T10:55:57.109885Z', 'name': 'Bob', 'id': '2', 'age': 25, 'updatedAt': '2025-03-04T10:55:57.109885Z'}}\n"
]
}
],
"source": [
"query = \"MATCH (n) RETURN n\"\n",
"results = graph_store.query(query)\n",
"print(\"Nodes in the database:\")\n",
"for record in results:\n",
" print(record)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.storage.graphstores.neo4j.base:Query executed successfully: \n",
"MATCH (a)-[r]->(b)\n",
"RETURN a.id AS source, b.id AS target, type(r) AS type, properties(r) AS properties\n",
" | Time: 0.07 seconds | Results: 1\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Relationships in the database:\n",
"{'source': '1', 'target': '2', 'type': 'KNOWS', 'properties': {'updatedAt': '2025-03-04T10:55:59.835379Z', 'createdAt': '2025-03-04T10:55:59.835379Z', 'since': '2023'}}\n"
]
}
],
"source": [
"query = \"\"\"\n",
"MATCH (a)-[r]->(b)\n",
"RETURN a.id AS source, b.id AS target, type(r) AS type, properties(r) AS properties\n",
"\"\"\"\n",
"results = graph_store.query(query)\n",
"print(\"Relationships in the database:\")\n",
"for record in results:\n",
" print(record)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.storage.graphstores.neo4j.base:Query executed successfully: \n",
"MATCH (n)-[r]->(m)\n",
"RETURN n, r, m\n",
" | Time: 0.05 seconds | Results: 1\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Nodes and relationships in the database:\n",
"{'n': {'createdAt': '2025-03-04T10:55:57.109885Z', 'name': 'Alice', 'id': '1', 'age': 30, 'updatedAt': '2025-03-04T10:55:57.109885Z'}, 'r': ({'createdAt': '2025-03-04T10:55:57.109885Z', 'name': 'Alice', 'id': '1', 'age': 30, 'updatedAt': '2025-03-04T10:55:57.109885Z'}, 'KNOWS', {'createdAt': '2025-03-04T10:55:57.109885Z', 'name': 'Bob', 'id': '2', 'age': 25, 'updatedAt': '2025-03-04T10:55:57.109885Z'}), 'm': {'createdAt': '2025-03-04T10:55:57.109885Z', 'name': 'Bob', 'id': '2', 'age': 25, 'updatedAt': '2025-03-04T10:55:57.109885Z'}}\n"
]
}
],
"source": [
"query = \"\"\"\n",
"MATCH (n)-[r]->(m)\n",
"RETURN n, r, m\n",
"\"\"\"\n",
"results = graph_store.query(query)\n",
"print(\"Nodes and relationships in the database:\")\n",
"for record in results:\n",
" print(record)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Reset Graph"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.storage.graphstores.neo4j.base:Database reset successfully\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Graph database has been reset.\n"
]
}
],
"source": [
"graph_store.reset()\n",
"print(\"Graph database has been reset.\")"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.storage.graphstores.neo4j.base:Query executed successfully: MATCH (n) RETURN n | Time: 0.01 seconds | Results: 0\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Nodes in the database:\n"
]
}
],
"source": [
"query = \"MATCH (n) RETURN n\"\n",
"results = graph_store.query(query)\n",
"print(\"Nodes in the database:\")\n",
"for record in results:\n",
" print(record)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,286 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# LLM: Azure OpenAI Chat Endpoint Basic Examples\n",
"\n",
"This notebook demonstrates how to use the `OpenAIChatClient` in `dapr-agents` for basic tasks with the Azure OpenAI Chat API. We will explore:\n",
"\n",
"* Initializing the OpenAI Chat client.\n",
"* Generating responses to simple prompts.\n",
"* Using a `.prompty` file to provide context/history for enhanced generation."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Install Required Libraries\n",
"Before starting, ensure the required libraries are installed:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install dapr-agents python-dotenv"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load Environment Variables\n",
"\n",
"Load API keys or other configuration values from your `.env` file using `dotenv`."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from dotenv import load_dotenv\n",
"load_dotenv()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import OpenAIChatClient"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents import OpenAIChatClient"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Basic Chat Completion\n",
"\n",
"Initialize the `OpenAIChatClient` and generate a response to a simple prompt."
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# Initialize the client\n",
"import os\n",
"\n",
"llm = OpenAIChatClient(\n",
" #api_key=os.getenv(\"AZURE_OPENAI_API_KEY\") # or add AZURE_OPENAI_API_KEY environment variable to .env file\n",
" azure_endpoint=os.getenv(\"AZURE_OPENAI_ENDPOINT\"), # or add AZURE_OPENAI_ENDPOINT environment variable to .env file\n",
" azure_deployment=\"gpt-4o\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"ChatCompletion(choices=[Choice(finish_reason='stop', index=0, message=MessageContent(content='One famous dog is Lassie, the fictional Rough Collie from the \"Lassie\" television series and movies. Lassie is known for her intelligence, loyalty, and the ability to help her human companions out of tricky situations.', role='assistant'), logprobs=None)], created=1743846818, id='chatcmpl-BIuVWArM8Lzqug16s43O9M8BLaFkZ', model='gpt-4o-2024-08-06', object='chat.completion', usage={'completion_tokens': 48, 'prompt_tokens': 12, 'total_tokens': 60, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}})"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"\n",
"# Generate a response\n",
"response = llm.generate('Name a famous dog!')\n",
"\n",
"# Display the response\n",
"response"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'content': 'One famous dog is Lassie, the fictional Rough Collie from the \"Lassie\" television series and movies. Lassie is known for her intelligence, loyalty, and the ability to help her human companions out of tricky situations.', 'role': 'assistant'}\n"
]
}
],
"source": [
"print(response.get_message())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Using a Prompty File for Context\n",
"\n",
"Use a `.prompty` file to provide context for chat history or additional instructions."
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"llm = OpenAIChatClient.from_prompty('basic-azopenai-chat.prompty')"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"ChatPromptTemplate(input_variables=['question'], pre_filled_variables={}, messages=[SystemMessage(content='You are an AI assistant who helps people find information.\\nAs the assistant, you answer questions briefly, succinctly.', role='system'), UserMessage(content='{{question}}', role='user')], template_format='jinja2')"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm.prompt_template"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"ChatCompletion(choices=[Choice(finish_reason='stop', index=0, message=MessageContent(content=\"I am an AI assistant and don't have a personal name, but you can call me Assistant.\", role='assistant'), logprobs=None)], created=1743846828, id='chatcmpl-BIuVgBC6I3w1TFn15pmuCBGu6VZQM', model='gpt-4o-2024-08-06', object='chat.completion', usage={'completion_tokens': 20, 'prompt_tokens': 39, 'total_tokens': 59, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}})"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm.generate(input_data={\"question\":\"What is your name?\"})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Chat Completion with Messages"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"# Initialize the client\n",
"llm = OpenAIChatClient(\n",
" api_key=os.getenv(\"AZURE_OPENAI_API_KEY\"), # or add AZURE_OPENAI_API_KEY environment variable to .env file\n",
" #azure_endpoint=os.getenv(\"AZURE_OPENAI_ENDPOINT\"), # or add AZURE_OPENAI_ENDPOINT environment variable to .env file\n",
" azure_deployment=\"gpt-4o\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'content': 'Hello! How can I assist you today?', 'role': 'assistant'}\n"
]
}
],
"source": [
"from dapr_agents.types import UserMessage\n",
"\n",
"# Generate a response using structured messages\n",
"response = llm.generate(messages=[UserMessage(\"hello\")])\n",
"\n",
"# Display the structured response\n",
"print(response.get_message())"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"llm.prompt_template"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,23 @@
---
name: Basic Prompt
description: A basic prompt that uses the Azure OpenAI chat API to answer questions
model:
api: chat
configuration:
type: azure_openai
azure_deployment: gpt-4o
parameters:
max_tokens: 128
temperature: 0.2
inputs:
question:
type: string
sample:
"question": "Who is the most famous person in the world?"
---
system:
You are an AI assistant who helps people find information.
As the assistant, you answer questions briefly, succinctly.
user:
{{question}}

View File

@ -0,0 +1,23 @@
---
name: Basic Prompt
description: A basic prompt that uses the chat API to answer questions
model:
api: chat
configuration:
type: huggingface
name: microsoft/Phi-3-mini-4k-instruct
parameters:
max_tokens: 128
temperature: 0.2
inputs:
question:
type: string
sample:
"question": "Who is the most famous person in the world?"
---
system:
You are an AI assistant who helps people find information.
As the assistant, you answer questions briefly, succinctly.
user:
{{question}}

View File

@ -0,0 +1,23 @@
---
name: Basic Prompt
description: A basic prompt that uses the chat API to answer questions
model:
api: chat
configuration:
type: nvidia
name: meta/llama3-8b-instruct
parameters:
max_tokens: 128
temperature: 0.2
inputs:
question:
type: string
sample:
"question": "Who is the most famous person in the world?"
---
system:
You are an AI assistant who helps people find information.
As the assistant, you answer questions briefly, succinctly.
user:
{{question}}

View File

@ -0,0 +1,30 @@
---
name: Basic Prompt
description: A basic prompt that uses the chat API to answer questions
model:
api: chat
configuration:
type: openai
name: gpt-4o
parameters:
max_tokens: 128
temperature: 0.2
inputs:
question:
type: string
chat_history:
type: list
default: []
---
system:
You are an AI assistant who helps people find information.
As the assistant, you answer questions briefly, succinctly,
and in a personable manner using markdown and even add some personal flair with appropriate emojis.
{% for item in chat_history %}
{{item.role}}:
{{item.content}}
{% endfor %}
user:
{{question}}

View File

@ -0,0 +1,23 @@
---
name: Basic Prompt
description: A basic prompt that uses the chat API to answer questions
model:
api: chat
configuration:
type: openai
name: gpt-4o
parameters:
max_tokens: 128
temperature: 0.2
inputs:
question:
type: string
sample:
"question": "Who is the most famous person in the world?"
---
system:
You are an AI assistant who helps people find information.
As the assistant, you answer questions briefly, succinctly.
user:
{{question}}

View File

@ -0,0 +1,187 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# LLM: ElevenLabs Text-To-Speech Endpoint Basic Examples\n",
"\n",
"This notebook demonstrates how to use the `ElevenLabsSpeechClient` in dapr-agents for basic tasks with the [ElevenLabs Text-To-Speech Endpoint](https://elevenlabs.io/docs/api-reference/text-to-speech/convert). We will explore:\n",
"\n",
"* Initializing the `ElevenLabsSpeechClient`.\n",
"* Generating speech from text and saving it as an MP3 file.."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Install Required Libraries\n",
"\n",
"Ensure you have the required library installed:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install dapr-agents python-dotenv elevenlabs"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load Environment Variables"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from dotenv import load_dotenv\n",
"load_dotenv()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Enable Logging"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"logging.basicConfig(level=logging.INFO)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize ElevenLabsSpeechClient\n",
"\n",
"Initialize the `ElevenLabsSpeechClient`. By default the voice is set to: `voice_id=EXAVITQu4vr4xnSDxMaL\",name=\"Sarah\"`"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents import ElevenLabsSpeechClient\n",
"\n",
"client = ElevenLabsSpeechClient(\n",
" model=\"eleven_multilingual_v2\", # Default model\n",
" voice=\"JBFqnCBsd6RMkjVDRZzb\" # 'name': 'George', 'language': 'en', 'labels': {'accent': 'British', 'description': 'warm', 'age': 'middle aged', 'gender': 'male', 'use_case': 'narration'}\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Generate Speech from Text\n",
"\n",
"### Manual File Creation\n",
"\n",
"This section demonstrates how to generate speech from a given text input and save it as an MP3 file."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Define the text to convert to speech\n",
"text = \"Hello Roberto! This is an example of text-to-speech generation.\"\n",
"\n",
"# Create speech from text\n",
"audio_bytes = client.create_speech(\n",
" text=text,\n",
" output_format=\"mp3_44100_128\" # default output format, mp3 with 44.1kHz sample rate at 128kbps.\n",
")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Save the audio to an MP3 file\n",
"output_path = \"output_speech.mp3\"\n",
"with open(output_path, \"wb\") as audio_file:\n",
" audio_file.write(audio_bytes)\n",
"\n",
"print(f\"Audio saved to {output_path}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Automatic File Creation\n",
"\n",
"The audio file is saved directly by providing the file_name parameter."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Define the text to convert to speech\n",
"text = \"Hello Roberto! This is another example of text-to-speech generation.\"\n",
"\n",
"# Create speech from text\n",
"client.create_speech(\n",
" text=text,\n",
" output_format=\"mp3_44100_128\", # default output format, mp3 with 44.1kHz sample rate at 128kbps.,\n",
" file_name='output_speech_auto.mp3'\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,342 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# LLM: Hugging Face Chat Endpoint Basic Examples\n",
"\n",
"This notebook demonstrates how to use the `HFHubChatClient` in `dapr-agents` for basic tasks with the Hugging Face Chat API. We will explore:\n",
"\n",
"* Initializing the Hugging Face Chat client.\n",
"* Generating responses to simple prompts.\n",
"* Using a `.prompty` file to provide context/history for enhanced generation."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Install Required Libraries\n",
"Before starting, ensure the required libraries are installed:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install dapr-agents python-dotenv"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load Environment Variables\n",
"\n",
"Load API keys or other configuration values from your `.env` file using `dotenv`."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from dotenv import load_dotenv\n",
"load_dotenv()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Enable Logging"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"logging.basicConfig(level=logging.INFO)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import HFHubChatClient"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents import HFHubChatClient"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Basic Chat Completion\n",
"\n",
"Initialize the `HFHubChatClient`."
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"llm = HFHubChatClient(\n",
" api_key=os.getenv(\"HUGGINGFACE_API_KEY\"),\n",
" model=\"microsoft/Phi-3-mini-4k-instruct\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Generate a response to a simple prompt"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.llm.huggingface.chat:Invoking Hugging Face ChatCompletion API.\n",
"INFO:dapr_agents.llm.huggingface.chat:Chat completion retrieved successfully.\n"
]
}
],
"source": [
"# Generate a response\n",
"response = llm.generate('Name a famous dog!')"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"ChatCompletion(choices=[Choice(finish_reason='stop', index=0, message=MessageContent(content='A famous dog is Lassie. Lassie was a fictional collie first introduced in the 1943 film \"Lassie Come Home.\" She went on to have her own television series that aired from 1954 to 1973, in which she starred as Rin Tin Tin Jr. Her adventurous and heroic stories captured the hearts of audiences worldwide, and she became an iconic figure in the world of television.', role='assistant'), logprobs=None)], created=1741085108, id='', model='microsoft/Phi-3-mini-4k-instruct', object='chat.completion', usage={'completion_tokens': 105, 'prompt_tokens': 8, 'total_tokens': 113})"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Display the response\n",
"response"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'content': 'A famous dog is Lassie. Lassie was a fictional collie first introduced in the 1943 film \"Lassie Come Home.\" She went on to have her own television series that aired from 1954 to 1973, in which she starred as Rin Tin Tin Jr. Her adventurous and heroic stories captured the hearts of audiences worldwide, and she became an iconic figure in the world of television.',\n",
" 'role': 'assistant'}"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"response.get_message()"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'A famous dog is Lassie. Lassie was a fictional collie first introduced in the 1943 film \"Lassie Come Home.\" She went on to have her own television series that aired from 1954 to 1973, in which she starred as Rin Tin Tin Jr. Her adventurous and heroic stories captured the hearts of audiences worldwide, and she became an iconic figure in the world of television.'"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"response.get_content()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Using a Prompty File for Context\n",
"\n",
"Use a `.prompty` file to provide context for chat history or additional instructions."
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"llm = HFHubChatClient.from_prompty('basic-hf-chat.prompty')"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.llm.huggingface.chat:Using prompt template to generate messages.\n",
"INFO:dapr_agents.llm.huggingface.chat:Invoking Hugging Face ChatCompletion API.\n",
"INFO:dapr_agents.llm.huggingface.chat:Chat completion retrieved successfully.\n"
]
},
{
"data": {
"text/plain": [
"ChatCompletion(choices=[Choice(finish_reason='length', index=0, message=MessageContent(content=\"I'm Phi and my purpose as Microsoft GPT-3 developed by MS Corporation in 2019 serves to assist users with a wide range of queries or tasks they may have at hand! How can i help today ? Let me know if theres anything specific that comes up for which assistance would be beneficial ! :) 😊✨ #AIAssistant#MicrosoftGptPhilosophyOfHelpfulness@MSCorporationTechnologyInnovationsAndEthicsAtTheCoreofOurDesignProcessesWeStriveToCreateAnExperience\", role='assistant'), logprobs=None)], created=1741085113, id='', model='microsoft/Phi-3-mini-4k-instruct', object='chat.completion', usage={'completion_tokens': 128, 'prompt_tokens': 36, 'total_tokens': 164})"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm.generate(input_data={\"question\":\"What is your name?\"})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Chat Completion with Messages"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.llm.huggingface.chat:Invoking Hugging Face ChatCompletion API.\n",
"INFO:dapr_agents.llm.huggingface.chat:Chat completion retrieved successfully.\n"
]
}
],
"source": [
"from dapr_agents.types import UserMessage\n",
"\n",
"# Initialize the client\n",
"llm = HFHubChatClient()\n",
"\n",
"# Generate a response using structured messages\n",
"response = llm.generate(messages=[UserMessage(\"hello\")])"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'content': \"Hello! How can I assist you today? Whether you have a question, need help with a problem, or just want to chat, I'm here to help. 😊\", 'role': 'assistant'}\n"
]
}
],
"source": [
"# Display the structured response\n",
"print(response.get_message())"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"llm.prompt_template"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,257 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# LLM: NVIDIA Chat Endpoint Basic Examples\n",
"\n",
"This notebook demonstrates how to use the `NVIDIAChatClient` in `dapr-agents` for basic tasks with the NVIDIA Chat API. We will explore:\n",
"\n",
"* Initializing the `NVIDIAChatClient`.\n",
"* Generating responses to simple prompts.\n",
"* Using a `.prompty` file to provide context/history for enhanced generation."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Install Required Libraries\n",
"Before starting, ensure the required libraries are installed:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install dapr-agents python-dotenv"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load Environment Variables\n",
"\n",
"Load API keys or other configuration values from your `.env` file using `dotenv`."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from dotenv import load_dotenv\n",
"load_dotenv()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import NVIDIAChatClient"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/wardog/Documents/GitHub/dapr-agents/.venv/lib/python3.13/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n"
]
}
],
"source": [
"from dapr_agents import NVIDIAChatClient"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Basic Chat Completion\n",
"\n",
"Initialize the `OpenAIChatClient` and generate a response to a simple prompt."
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"# Initialize the client\n",
"llm = NVIDIAChatClient()"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"ChatCompletion(choices=[Choice(finish_reason='stop', index=0, message=MessageContent(content=\"That's an easy one! One of the most famous dogs is probably Laika, the Soviet space dog. She was the first living creature to orbit the Earth, launched into space on November 3, 1957, and paved the way for human spaceflight.\", role='assistant'), logprobs=None)], created=1741709966, id='cmpl-7c89ca25c9e140639fe179801738c8dd', model='meta/llama3-8b-instruct', object='chat.completion', usage={'completion_tokens': 55, 'prompt_tokens': 15, 'total_tokens': 70, 'completion_tokens_details': None, 'prompt_tokens_details': None})"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Generate a response\n",
"response = llm.generate('Name a famous dog!')\n",
"\n",
"# Display the response\n",
"response"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'content': \"That's an easy one! One of the most famous dogs is probably Laika, the Soviet space dog. She was the first living creature to orbit the Earth, launched into space on November 3, 1957, and paved the way for human spaceflight.\", 'role': 'assistant'}\n"
]
}
],
"source": [
"print(response.get_message())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Using a Prompty File for Context\n",
"\n",
"Use a `.prompty` file to provide context for chat history or additional instructions."
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"llm = NVIDIAChatClient.from_prompty('basic-nvidia-chat.prompty')"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"ChatCompletion(choices=[Choice(finish_reason='stop', index=0, message=MessageContent(content=\"I'm AI Assistant, nice to meet you!\", role='assistant'), logprobs=None)], created=1737847868, id='cmpl-abe14ae7edef456da870b7c473bffcc7', model='meta/llama3-8b-instruct', object='chat.completion', usage={'completion_tokens': 11, 'prompt_tokens': 43, 'total_tokens': 54, 'completion_tokens_details': None, 'prompt_tokens_details': None})"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm.generate(input_data={\"question\":\"What is your name?\"})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Chat Completion with Messages"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents.types import UserMessage\n",
"\n",
"# Initialize the client\n",
"llm = NVIDIAChatClient()\n",
"\n",
"# Generate a response using structured messages\n",
"response = llm.generate(messages=[UserMessage(\"hello\")])"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'content': \"Hello! It's nice to meet you. Is there something I can help you with, or would you like to chat?\", 'role': 'assistant'}\n"
]
}
],
"source": [
"# Display the structured response\n",
"print(response.get_message())"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"llm.prompt_template"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,234 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# LLM: NVIDIA Chat Completion with Structured Output\n",
"\n",
"This notebook demonstrates how to use the `NVIDIAChatClient` from `dapr_agents` to generate structured output using `Pydantic` models.\n",
"\n",
"We will:\n",
"\n",
"* Initialize the `NVIDIAChatClient` with the `meta/llama-3.1-8b-instruct` model.\n",
"* Define a Pydantic model to structure the response.\n",
"* Use the `response_model` parameter to get structured output from the LLM."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Install Required Libraries"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install dapr-agents python-dotenv"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import Environment Variables\n",
"\n",
"Load your API keys or other configuration values using `dotenv`."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from dotenv import load_dotenv\n",
"load_dotenv() # Load environment variables from a `.env` file"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Enable Logging"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"logging.basicConfig(level=logging.INFO)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import Libraries"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents import NVIDIAChatClient\n",
"from dapr_agents.types import UserMessage"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize LLM Client\n",
"\n",
"Create an instance of the `NVIDIAChatClient`."
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.llm.nvidia.client:Initializing NVIDIA API client...\n"
]
}
],
"source": [
"llmClient = NVIDIAChatClient(\n",
" model=\"meta/llama-3.1-8b-instruct\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Define the Pydantic Model\n",
"\n",
"Define a Pydantic model to represent the structured response from the LLM."
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"from pydantic import BaseModel\n",
"\n",
"class Dog(BaseModel):\n",
" name: str\n",
" breed: str\n",
" reason: str"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Generate Structured Output (JSON)\n",
"\n",
"Use the generate method of the `NVIDIAChatClient` with the `response_model` parameter to enforce the structure of the response."
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.llm.utils.request:A response model has been passed to structure the response of the LLM.\n",
"INFO:dapr_agents.llm.utils.structure:Structured response enabled.\n",
"INFO:dapr_agents.llm.nvidia.chat:Invoking ChatCompletion API.\n",
"INFO:httpx:HTTP Request: POST https://integrate.api.nvidia.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:dapr_agents.llm.nvidia.chat:Chat completion retrieved successfully.\n",
"INFO:dapr_agents.llm.utils.response:Structured output was successfully validated.\n",
"INFO:dapr_agents.llm.utils.response:Returning an instance of <class '__main__.Dog'>.\n"
]
}
],
"source": [
"response = llmClient.generate(\n",
" messages=[UserMessage(\"One famous dog in history.\")],\n",
" response_model=Dog\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Dog(name='Laika', breed='Soviet space dog (mixed breeds)', reason='First animal in space')"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"response"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,260 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# LLM: NVIDIA Embeddings Endpoint Basic Examples\n",
"\n",
"This notebook demonstrates how to use the `NVIDIAEmbedder` in `dapr-agents` for generating text embeddings. We will explore:\n",
"\n",
"* Initializing the `NVIDIAEmbedder`.\n",
"* Generating embeddings for single and multiple inputs.\n",
"* Using the class both as a direct function and via its `embed` method."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Install Required Libraries\n",
"Before starting, ensure the required libraries are installed:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install dapr-agents python-dotenv"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load Environment Variables\n",
"\n",
"Load API keys or other configuration values from your `.env` file using `dotenv`."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from dotenv import load_dotenv\n",
"load_dotenv()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import NVIDIAEmbedder"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents.document.embedder import NVIDIAEmbedder"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize the NVIDIAEmbedder\n",
"\n",
"To start, create an instance of the `NVIDIAEmbedder` class."
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# Initialize the embedder\n",
"embedder = NVIDIAEmbedder(\n",
" model=\"nvidia/nv-embedqa-e5-v5\", # Default embedding model\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Embedding a Single Text\n",
"\n",
"You can use the embed method to generate an embedding for a single input string."
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Embedding (first 5 values): [-0.007270217100869654, -0.03521439888521964, 0.008612880489907491, 0.03619088134997443, 0.03658757735128107]\n"
]
}
],
"source": [
"# Input text\n",
"text = \"The quick brown fox jumps over the lazy dog.\"\n",
"\n",
"# Generate embedding\n",
"embedding = embedder.embed(text)\n",
"\n",
"# Display the embedding\n",
"print(f\"Embedding (first 5 values): {embedding[:5]}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Embedding Multiple Texts\n",
"\n",
"The embed method also supports embedding multiple texts at once."
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Text 1 embedding (first 5 values): [-0.007270217100869654, -0.03521439888521964, 0.008612880489907491, 0.03619088134997443, 0.03658757735128107]\n",
"Text 2 embedding (first 5 values): [0.03491632278487177, -0.045598764196327295, 0.014955417976037734, 0.049291836798573345, 0.03741906620126992]\n"
]
}
],
"source": [
"# Input texts\n",
"texts = [\n",
" \"The quick brown fox jumps over the lazy dog.\",\n",
" \"A journey of a thousand miles begins with a single step.\"\n",
"]\n",
"\n",
"# Generate embeddings\n",
"embeddings = embedder.embed(texts)\n",
"\n",
"# Display the embeddings\n",
"for i, emb in enumerate(embeddings):\n",
" print(f\"Text {i + 1} embedding (first 5 values): {emb[:5]}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Using the NVIDIAEmbedder as a Callable Function\n",
"\n",
"The `NVIDIAEmbedder` class can also be used directly as a function, thanks to its `__call__` implementation."
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Embedding (first 5 values): [-0.005809799816153762, -0.08734154733463988, -0.017593431879252233, 0.027511671880565285, 0.001342777107870075]\n"
]
}
],
"source": [
"# Use the class instance as a callable\n",
"text_embedding = embedder(\"A stitch in time saves nine.\")\n",
"\n",
"# Display the embedding\n",
"print(f\"Embedding (first 5 values): {text_embedding[:5]}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For multiple inputs:"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Text 1 embedding (first 5 values): [0.021093917798446042, -0.04365205548745667, 0.02008726662368289, 0.024922242720651362, 0.024556187748010216]\n",
"Text 2 embedding (first 5 values): [-0.006683721130524534, -0.05764852452568794, 0.01164408689824411, 0.04627132894469238, 0.03458911471541276]\n"
]
}
],
"source": [
"text_list = [\"The early bird catches the worm.\", \"An apple a day keeps the doctor away.\"]\n",
"embeddings_list = embedder(text_list)\n",
"\n",
"# Display the embeddings\n",
"for i, emb in enumerate(embeddings_list):\n",
" print(f\"Text {i + 1} embedding (first 5 values): {emb[:5]}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,453 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# LLM: OpenAI Audio Endpoint Basic Examples\n",
"\n",
"This notebook demonstrates how to use the `OpenAIAudioClient` in `dapr-agents` for basic tasks with the OpenAI Audio API. We will explore:\n",
"\n",
"* Generating speech from text and saving it as an MP3 file.\n",
"* Transcribing audio to text.\n",
"* Translating audio content to English."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Install Required Libraries\n",
"\n",
"Ensure you have the required library installed:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install dapr-agents python-dotenv"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load Environment Variables"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from dotenv import load_dotenv\n",
"load_dotenv()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize OpenAIAudioClient"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents import OpenAIAudioClient\n",
"\n",
"client = OpenAIAudioClient()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Generate Speech from Text\n",
"\n",
"### Manual File Creation\n",
"\n",
"This section demonstrates how to generate speech from a given text input and save it as an MP3 file."
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Audio saved to output_speech.mp3\n"
]
}
],
"source": [
"from dapr_agents.types.llm import AudioSpeechRequest\n",
"\n",
"# Define the text to convert to speech\n",
"text_to_speech = \"Hello Roberto! This is an example of text-to-speech generation.\"\n",
"\n",
"# Create a request for TTS\n",
"tts_request = AudioSpeechRequest(\n",
" model=\"tts-1\",\n",
" input=text_to_speech,\n",
" voice=\"fable\",\n",
" response_format=\"mp3\"\n",
")\n",
"\n",
"# Generate the audio\n",
"audio_bytes = client.create_speech(request=tts_request)\n",
"\n",
"# Save the audio to an MP3 file\n",
"output_path = \"output_speech.mp3\"\n",
"with open(output_path, \"wb\") as audio_file:\n",
" audio_file.write(audio_bytes)\n",
"\n",
"print(f\"Audio saved to {output_path}\")\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Automatic File Creation\n",
"\n",
"The audio file is saved directly by providing the file_name parameter."
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents.types.llm import AudioSpeechRequest\n",
"\n",
"# Define the text to convert to speech\n",
"text_to_speech = \"Hola Roberto! Este es otro ejemplo de generacion de voz desde texto.\"\n",
"\n",
"# Create a request for TTS\n",
"tts_request = AudioSpeechRequest(\n",
" model=\"tts-1\",\n",
" input=text_to_speech,\n",
" voice=\"echo\",\n",
" response_format=\"mp3\"\n",
")\n",
"\n",
"# Generate the audio\n",
"client.create_speech(request=tts_request, file_name=\"output_speech_spanish_auto.mp3\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Transcribe Audio to Text\n",
"\n",
"This section demonstrates how to transcribe audio content into text."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Using a File Path"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Transcription: Hello Roberto, this is an example of text-to-speech generation.\n"
]
}
],
"source": [
"from dapr_agents.types.llm import AudioTranscriptionRequest\n",
"\n",
"# Specify the audio file to transcribe\n",
"audio_file_path = \"output_speech.mp3\"\n",
"\n",
"# Create a transcription request\n",
"transcription_request = AudioTranscriptionRequest(\n",
" model=\"whisper-1\",\n",
" file=audio_file_path\n",
")\n",
"\n",
"# Generate transcription\n",
"transcription_response = client.create_transcription(request=transcription_request)\n",
"\n",
"# Display the transcription result\n",
"print(\"Transcription:\", transcription_response.text)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Using Audio Bytes"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Transcription: Hola Roberto, este es otro ejemplo de generación de voz desde texto.\n"
]
}
],
"source": [
"# audio_bytes = open(\"output_speech_spanish_auto.mp3\", \"rb\")\n",
"\n",
"with open(\"output_speech_spanish_auto.mp3\", \"rb\") as f:\n",
" audio_bytes = f.read()\n",
"\n",
"transcription_request = AudioTranscriptionRequest(\n",
" model=\"whisper-1\",\n",
" file=audio_bytes, # File as bytes\n",
" language=\"en\" # Optional: Specify the language of the audio\n",
")\n",
"\n",
"# Generate transcription\n",
"transcription_response = client.create_transcription(request=transcription_request)\n",
"\n",
"# Display the transcription result\n",
"print(\"Transcription:\", transcription_response.text)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Using File-Like Objects (e.g., BufferedReader)\n",
"\n",
"You can use file-like objects, such as BufferedReader, directly for transcription or translation."
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Transcription: ¡Hola, Roberto! Este es otro ejemplo de generación de voz desde texto.\n"
]
}
],
"source": [
"from io import BufferedReader\n",
"\n",
"# Open the audio file as a BufferedReader\n",
"audio_file_path = \"output_speech_spanish_auto.mp3\"\n",
"with open(audio_file_path, \"rb\") as f:\n",
" buffered_file = BufferedReader(f)\n",
"\n",
" # Create a transcription request\n",
" transcription_request = AudioTranscriptionRequest(\n",
" model=\"whisper-1\",\n",
" file=buffered_file, # File as BufferedReader\n",
" language=\"es\"\n",
" )\n",
"\n",
" # Generate transcription\n",
" transcription_response = client.create_transcription(request=transcription_request)\n",
"\n",
" # Display the transcription result\n",
" print(\"Transcription:\", transcription_response.text)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Translate Audio to English\n",
"\n",
"This section demonstrates how to translate audio content into English."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Using a File Path"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Translation: Hola Roberto, este es otro ejemplo de generación de voz desde texto.\n"
]
}
],
"source": [
"from dapr_agents.types.llm import AudioTranslationRequest\n",
"\n",
"# Specify the audio file to translate\n",
"audio_file_path = \"output_speech_spanish_auto.mp3\"\n",
"\n",
"# Create a translation request\n",
"translation_request = AudioTranslationRequest(\n",
" model=\"whisper-1\",\n",
" file=audio_file_path,\n",
" prompt=\"The following audio needs to be translated to English.\"\n",
")\n",
"\n",
"# Generate translation\n",
"translation_response = client.create_translation(request=translation_request)\n",
"\n",
"# Display the translation result\n",
"print(\"Translation:\", translation_response.text)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Using Audio Bytes"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Translation: Hola Roberto, este es otro ejemplo de generación de voz desde texto.\n"
]
}
],
"source": [
"# audio_bytes = open(\"output_speech_spanish_auto.mp3\", \"rb\")\n",
"\n",
"with open(\"output_speech_spanish_auto.mp3\", \"rb\") as f:\n",
" audio_bytes = f.read()\n",
"\n",
"translation_request = AudioTranslationRequest(\n",
" model=\"whisper-1\",\n",
" file=audio_bytes, # File as bytes\n",
" prompt=\"The following audio needs to be translated to English.\"\n",
")\n",
"\n",
"# Generate translation\n",
"translation_response = client.create_translation(request=translation_request)\n",
"\n",
"# Display the translation result\n",
"print(\"Translation:\", translation_response.text)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Using File-Like Objects (e.g., BufferedReader) for Translation\n",
"\n",
"You can use a file-like object, such as a BufferedReader, directly for translating audio content."
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Translation: Hola Roberto, este es otro ejemplo de generación de voz desde texto.\n"
]
}
],
"source": [
"from io import BufferedReader\n",
"\n",
"# Open the audio file as a BufferedReader\n",
"audio_file_path = \"output_speech_spanish_auto.mp3\"\n",
"with open(audio_file_path, \"rb\") as f:\n",
" buffered_file = BufferedReader(f)\n",
"\n",
" # Create a translation request\n",
" translation_request = AudioTranslationRequest(\n",
" model=\"whisper-1\",\n",
" file=buffered_file, # File as BufferedReader\n",
" prompt=\"The following audio needs to be translated to English.\"\n",
" )\n",
"\n",
" # Generate translation\n",
" translation_response = client.create_translation(request=translation_request)\n",
"\n",
" # Display the translation result\n",
" print(\"Translation:\", translation_response.text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,275 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# LLM: OpenAI Chat Endpoint Basic Examples\n",
"\n",
"This notebook demonstrates how to use the `OpenAIChatClient` in `dapr-agents` for basic tasks with the OpenAI Chat API. We will explore:\n",
"\n",
"* Initializing the OpenAI Chat client.\n",
"* Generating responses to simple prompts.\n",
"* Using a `.prompty` file to provide context/history for enhanced generation."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Install Required Libraries\n",
"Before starting, ensure the required libraries are installed:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install dapr-agents python-dotenv"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load Environment Variables\n",
"\n",
"Load API keys or other configuration values from your `.env` file using `dotenv`."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from dotenv import load_dotenv\n",
"load_dotenv()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import OpenAIChatClient"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents import OpenAIChatClient"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Basic Chat Completion\n",
"\n",
"Initialize the `OpenAIChatClient` and generate a response to a simple prompt."
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# Initialize the client\n",
"llm = OpenAIChatClient()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"ChatCompletion(choices=[Choice(finish_reason='stop', index=0, message=MessageContent(content='One famous dog is Lassie, the Rough Collie from the television series and films that became iconic for her intelligence and heroic adventures.', role='assistant'), logprobs=None)], created=1741085405, id='chatcmpl-B7K8brL19kn1KgDTG9on7n7ICnt3P', model='gpt-4o-2024-08-06', object='chat.completion', usage={'completion_tokens': 28, 'prompt_tokens': 12, 'total_tokens': 40, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}})"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Generate a response\n",
"response = llm.generate('Name a famous dog!')\n",
"\n",
"# Display the response\n",
"response"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'content': 'One famous dog is Lassie, the Rough Collie from the television series and films that became iconic for her intelligence and heroic adventures.', 'role': 'assistant'}\n"
]
}
],
"source": [
"print(response.get_message())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Using a Prompty File for Context\n",
"\n",
"Use a `.prompty` file to provide context for chat history or additional instructions."
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"llm = OpenAIChatClient.from_prompty('basic-openai-chat-history.prompty')"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"ChatPromptTemplate(input_variables=['chat_history', 'question'], pre_filled_variables={}, messages=[SystemMessage(content='You are an AI assistant who helps people find information.\\nAs the assistant, you answer questions briefly, succinctly, \\nand in a personable manner using markdown and even add some personal flair with appropriate emojis.\\n\\n{% for item in chat_history %}\\n{{item.role}}:\\n{{item.content}}\\n{% endfor %}', role='system'), UserMessage(content='{{question}}', role='user')], template_format='jinja2')"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm.prompt_template"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"ChatCompletion(choices=[Choice(finish_reason='stop', index=0, message=MessageContent(content=\"Hey there! I'm your friendly AI assistant. You can call me whatever you'd like, but I don't have a specific name. 😊 How can I help you today?\", role='assistant'), logprobs=None)], created=1741085407, id='chatcmpl-B7K8dI84xY2hjaEspDtJL5EICbSLh', model='gpt-4o-2024-08-06', object='chat.completion', usage={'completion_tokens': 34, 'prompt_tokens': 57, 'total_tokens': 91, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}})"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm.generate(input_data={\"question\":\"What is your name?\"})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Chat Completion with Messages"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents.types import UserMessage\n",
"\n",
"# Initialize the client\n",
"llm = OpenAIChatClient()\n",
"\n",
"# Generate a response using structured messages\n",
"response = llm.generate(messages=[UserMessage(\"hello\")])"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'content': 'Hello! How can I assist you today?', 'role': 'assistant'}\n"
]
}
],
"source": [
"# Display the structured response\n",
"print(response.get_message())"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"llm.prompt_template"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,226 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# LLM: OpenAI Chat Completion with Structured Output\n",
"\n",
"This notebook demonstrates how to use the `OpenAIChatClient` from `dapr-agents` to generate structured output using `Pydantic` models.\n",
"\n",
"We will:\n",
"\n",
"* Initialize the OpenAIChatClient.\n",
"* Define a Pydantic model to structure the response.\n",
"* Use the response_model parameter to get structured output from the LLM."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Install Required Libraries\n",
"Before starting, ensure the required libraries are installed:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install dapr-agents python-dotenv"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import Environment Variables\n",
"\n",
"Load your API keys or other configuration values using `dotenv`."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from dotenv import load_dotenv\n",
"load_dotenv() # Load environment variables from a `.env` file"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Enable Logging"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"logging.basicConfig(level=logging.INFO)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import dapr-agents Libraries\n",
"\n",
"Import the necessary classes and types from `dapr-agents`."
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents import OpenAIChatClient\n",
"from dapr_agents.types import UserMessage"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize LLM Client\n",
"\n",
"Create an instance of the `OpenAIChatClient`."
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.llm.openai.client.base:Initializing OpenAI client...\n"
]
}
],
"source": [
"llmClient = OpenAIChatClient()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Define the Pydantic Model\n",
"\n",
"Define a Pydantic model to represent the structured response from the LLM."
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"from pydantic import BaseModel\n",
"\n",
"class Dog(BaseModel):\n",
" name: str\n",
" breed: str\n",
" reason: str"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Generate Structured Output (JSON)\n",
"\n",
"Use the generate method of the `OpenAIChatClient` with the `response_model` parameter to enforce the structure of the response."
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.llm.utils.request:Structured Mode Activated! Mode=json.\n",
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n",
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
"INFO:dapr_agents.llm.utils.response:Structured output was successfully validated.\n"
]
}
],
"source": [
"response = llmClient.generate(\n",
" messages=[UserMessage(\"One famous dog in history.\")],\n",
" response_format=Dog\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Dog(name='Balto', breed='Siberian Husky', reason=\"Balto is famous for his role in the 1925 serum run to Nome, also known as the 'Great Race of Mercy.' This life-saving mission involved a relay of sled dog teams transporting diphtheria antitoxin across harsh Alaskan wilderness under treacherous winter conditions, preventing a potential epidemic. Balto led the final leg of the journey, becoming a symbol of bravery and teamwork.\")"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"response"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,262 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# LLM: OpenAI Embeddings Endpoint Basic Examples\n",
"\n",
"This notebook demonstrates how to use the `OpenAIEmbedder` in `dapr-agents` for generating text embeddings. We will explore:\n",
"\n",
"* Initializing the `OpenAIEmbedder`.\n",
"* Generating embeddings for single and multiple inputs.\n",
"* Using the class both as a direct function and via its `embed` method."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Install Required Libraries\n",
"Before starting, ensure the required libraries are installed:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install dapr-agents python-dotenv tiktoken"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load Environment Variables\n",
"\n",
"Load API keys or other configuration values from your `.env` file using `dotenv`."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from dotenv import load_dotenv\n",
"load_dotenv()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import OpenAIEmbedder"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents.document.embedder import OpenAIEmbedder"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize the OpenAIEmbedder\n",
"\n",
"To start, create an instance of the `OpenAIEmbedder` class. You can customize its parameters if needed, such as the `model` or `chunk_size`."
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"# Initialize the embedder\n",
"embedder = OpenAIEmbedder(\n",
" model=\"text-embedding-ada-002\", # Default embedding model\n",
" chunk_size=1000, # Batch size for processing\n",
" max_tokens=8191 # Maximum tokens per input\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Embedding a Single Text\n",
"\n",
"You can use the embed method to generate an embedding for a single input string."
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Embedding (first 5 values): [0.0015723939, 0.005963983, -0.015102495, -0.008559333, -0.011583589]\n"
]
}
],
"source": [
"# Input text\n",
"text = \"The quick brown fox jumps over the lazy dog.\"\n",
"\n",
"# Generate embedding\n",
"embedding = embedder.embed(text)\n",
"\n",
"# Display the embedding\n",
"print(f\"Embedding (first 5 values): {embedding[:5]}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Embedding Multiple Texts\n",
"\n",
"The embed method also supports embedding multiple texts at once."
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Text 1 embedding (first 5 values): [0.0015723939, 0.005963983, -0.015102495, -0.008559333, -0.011583589]\n",
"Text 2 embedding (first 5 values): [0.03261204, -0.020966679, 0.0026475298, -0.009384127, -0.007305047]\n"
]
}
],
"source": [
"# Input texts\n",
"texts = [\n",
" \"The quick brown fox jumps over the lazy dog.\",\n",
" \"A journey of a thousand miles begins with a single step.\"\n",
"]\n",
"\n",
"# Generate embeddings\n",
"embeddings = embedder.embed(texts)\n",
"\n",
"# Display the embeddings\n",
"for i, emb in enumerate(embeddings):\n",
" print(f\"Text {i + 1} embedding (first 5 values): {emb[:5]}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Using the OpenAIEmbedder as a Callable Function\n",
"\n",
"The OpenAIEmbedder class can also be used directly as a function, thanks to its `__call__` implementation."
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Embedding (first 5 values): [-0.0022105372, -0.022207271, 0.017802631, -0.00742872, 0.007270942]\n"
]
}
],
"source": [
"# Use the class instance as a callable\n",
"text_embedding = embedder(\"A stitch in time saves nine.\")\n",
"\n",
"# Display the embedding\n",
"print(f\"Embedding (first 5 values): {text_embedding[:5]}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For multiple inputs:"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Text 1 embedding (first 5 values): [0.0038562817, -0.020030975, 0.01792581, -0.014723405, -0.014608578]\n",
"Text 2 embedding (first 5 values): [0.011255961, 0.004331666, 0.029073123, -0.01053614, 0.021288864]\n"
]
}
],
"source": [
"text_list = [\"The early bird catches the worm.\", \"An apple a day keeps the doctor away.\"]\n",
"embeddings_list = embedder(text_list)\n",
"\n",
"# Display the embeddings\n",
"for i, emb in enumerate(embeddings_list):\n",
" print(f\"Text {i + 1} embedding (first 5 values): {emb[:5]}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,88 @@
# 🧪 Basic MCP Agent Playground
This demo shows how to use a **lightweight agent** to call tools served via the [Model Context Protoco (MCP)](https://modelcontextprotocol.io/introduction). The agent uses a simple pattern from `dapr_agents` — but **without running inside Dapr**.
Its a minimal, Python-based setup for:
- Exploring how MCP tools work
- Testing stdio and SSE transport
- Running tool-calling agents (like ToolCallingAgent or ReActAgent)
- Experimenting **without** durable workflows or Dapr dependencies
> 🧠 Looking for something more robust?
> Check out the full `dapr_agents` repo to see how we run these agents inside Dapr workflows with durable task orchestration and state management.
---
## 🛠️ Project Structure
```text
.
├── tools.py # Registers two tools via FastMCP
├── server.py # Starts the MCP server in stdio or SSE mode
├── stdio.ipynb # Example using ToolCallingAgent over stdio
├── sse.ipynb # Example using ToolCallingAgent over SSE
├── requirements.txt
└── README.md
```
## Installation
Before running anything, make sure to install the dependencies:
```bash
pip install -r requirements.txt
```
## 🚀 Starting the MCP Tool Server
The server exposes two tools via MCP:
* `get_weather(location: str) → str`
* `jump(distance: str) → str`
Defined in `tools.py`, these tools are registered using FastMCP.
You can run the server in two modes:
### ▶️ 1. STDIO Mode
This runs inside the notebook. It's useful for quick tests because the MCP server doesn't need to be running in a separate terminal.
* This is used in `stdio.ipynb`
* The agent communicates with the tool server via stdio transport
### 🌐 2. SSE Mode (Starlette + Uvicorn)
This mode requires running the server outside the notebook (in a terminal).
```python
python server.py --server_type sse --host 127.0.0.1 --port 8000
```
The server exposes:
* `/sse` for the SSE connection
* `/messages/` to receive tool calls
Used by `sse.ipynb`
📌 You can change the port and host using --host and --port.
## 📓 Notebooks
There are two notebooks in this repo that show basic agent behavior using MCP tools:
| Notebook | Description | Transport |
| --- | --- | --- |
| stdio.ipynb | Uses ToolCallingAgent via mcp.run("stdio") | STDIO |
| sse.ipynb Uses | ToolCallingAgent with SSE tool server | SSE |
Each notebook runs a basic `ToolCallingAgent`, using tools served via MCP. These agents are not managed via Dapr or durable workflows — it's pure Python execution with async support.
## 🔄 Whats Next?
After testing these lightweight agents, you can try:
* Running the full dapr_agents workflow system
* Registering more complex MCP tools
* Using other agent types (e.g., ReActAgent, AssistantAgent)
* Testing stateful, durable workflows using Dapr + MCP tools

View File

@ -0,0 +1,4 @@
dapr-agents
python-dotenv
mcp
starlette

View File

@ -3,7 +3,6 @@ import logging
import uvicorn import uvicorn
from starlette.applications import Starlette from starlette.applications import Starlette
from starlette.requests import Request from starlette.requests import Request
from starlette.responses import Response
from starlette.routing import Mount, Route from starlette.routing import Mount, Route
from mcp.server.sse import SseServerTransport from mcp.server.sse import SseServerTransport
@ -13,11 +12,11 @@ from tools import mcp
# Logging Configuration # Logging Configuration
# ───────────────────────────────────────────── # ─────────────────────────────────────────────
logging.basicConfig( logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s"
) )
logger = logging.getLogger("mcp-server") logger = logging.getLogger("mcp-server")
# ───────────────────────────────────────────── # ─────────────────────────────────────────────
# Starlette App Factory # Starlette App Factory
# ───────────────────────────────────────────── # ─────────────────────────────────────────────
@ -30,45 +29,27 @@ def create_starlette_app():
async def handle_sse(request: Request) -> None: async def handle_sse(request: Request) -> None:
logger.info("🔌 SSE connection established") logger.info("🔌 SSE connection established")
async with sse.connect_sse(request.scope, request.receive, request._send) as ( async with sse.connect_sse(request.scope, request.receive, request._send) as (read_stream, write_stream):
read_stream,
write_stream,
):
logger.debug("Starting MCP server run loop over SSE") logger.debug("Starting MCP server run loop over SSE")
await mcp._mcp_server.run( await mcp._mcp_server.run(read_stream, write_stream, mcp._mcp_server.create_initialization_options())
read_stream,
write_stream,
mcp._mcp_server.create_initialization_options(),
)
logger.debug("MCP run loop completed") logger.debug("MCP run loop completed")
return Response(status_code=200)
return Starlette( return Starlette(
debug=False, debug=False,
routes=[ routes=[
Route("/sse", endpoint=handle_sse), Route("/sse", endpoint=handle_sse),
Mount("/messages/", app=sse.handle_post_message), Mount("/messages/", app=sse.handle_post_message)
], ]
) )
# ───────────────────────────────────────────── # ─────────────────────────────────────────────
# CLI Entrypoint # CLI Entrypoint
# ───────────────────────────────────────────── # ─────────────────────────────────────────────
def main(): def main():
parser = argparse.ArgumentParser(description="Run an MCP tool server.") parser = argparse.ArgumentParser(description="Run an MCP tool server.")
parser.add_argument( parser.add_argument("--server_type", choices=["stdio", "sse"], default="stdio", help="Transport to use")
"--server_type", parser.add_argument("--host", default="127.0.0.1", help="Host to bind to (SSE only)")
choices=["stdio", "sse"], parser.add_argument("--port", type=int, default=8000, help="Port to bind to (SSE only)")
default="stdio",
help="Transport to use",
)
parser.add_argument(
"--host", default="127.0.0.1", help="Host to bind to (SSE only)"
)
parser.add_argument(
"--port", type=int, default=8000, help="Port to bind to (SSE only)"
)
args = parser.parse_args() args = parser.parse_args()
logger.info(f"🚀 Starting MCP server in {args.server_type.upper()} mode") logger.info(f"🚀 Starting MCP server in {args.server_type.upper()} mode")
@ -80,6 +61,5 @@ def main():
logger.info(f"🌐 Running SSE server on {args.host}:{args.port}") logger.info(f"🌐 Running SSE server on {args.host}:{args.port}")
uvicorn.run(app, host=args.host, port=args.port) uvicorn.run(app, host=args.host, port=args.port)
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@ -0,0 +1,298 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Basic Weather Agent with MCP Support (SSE Transport)\n",
"\n",
"* Collaborator: Roberto Rodriguez @Cyb3rWard0g"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Install Required Libraries\n",
"Before starting, ensure the required libraries are installed:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install dapr-agents python-dotenv mcp starlette"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import Environment Variables"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from dotenv import load_dotenv\n",
"load_dotenv() # take environment variables from .env."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Enable Logging"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"logging.basicConfig(level=logging.INFO)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Connect to MCP Server and Get Tools"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.tool.mcp.client:Connecting to MCP server 'local' via SSE: http://localhost:8000/sse\n",
"INFO:mcp.client.sse:Connecting to SSE endpoint: http://localhost:8000/sse\n",
"INFO:httpx:HTTP Request: GET http://localhost:8000/sse \"HTTP/1.1 200 OK\"\n",
"INFO:mcp.client.sse:Received endpoint URL: http://localhost:8000/messages/?session_id=916bc6e1fb514b3e814e6a980ce20bbc\n",
"INFO:mcp.client.sse:Starting post writer with endpoint URL: http://localhost:8000/messages/?session_id=916bc6e1fb514b3e814e6a980ce20bbc\n",
"INFO:httpx:HTTP Request: POST http://localhost:8000/messages/?session_id=916bc6e1fb514b3e814e6a980ce20bbc \"HTTP/1.1 202 Accepted\"\n",
"INFO:httpx:HTTP Request: POST http://localhost:8000/messages/?session_id=916bc6e1fb514b3e814e6a980ce20bbc \"HTTP/1.1 202 Accepted\"\n",
"INFO:httpx:HTTP Request: POST http://localhost:8000/messages/?session_id=916bc6e1fb514b3e814e6a980ce20bbc \"HTTP/1.1 202 Accepted\"\n",
"INFO:dapr_agents.tool.mcp.client:Loaded 2 tools from server 'local'\n",
"INFO:httpx:HTTP Request: POST http://localhost:8000/messages/?session_id=916bc6e1fb514b3e814e6a980ce20bbc \"HTTP/1.1 202 Accepted\"\n",
"INFO:dapr_agents.tool.mcp.client:Loaded 0 prompts from server 'local': \n",
"INFO:dapr_agents.tool.mcp.client:Successfully connected to MCP server 'local'\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"🔧 Tools: ['LocalGetWeather', 'LocalJump']\n"
]
}
],
"source": [
"from dapr_agents.tool.mcp.client import MCPClient\n",
"\n",
"client = MCPClient()\n",
"\n",
"await client.connect_sse(\n",
" server_name=\"local\", # Unique name you assign to this server\n",
" url=\"http://localhost:8000/sse\", # MCP SSE endpoint\n",
" headers=None # Optional HTTP headers if needed\n",
")\n",
"\n",
"# See what tools were loaded\n",
"tools = client.get_all_tools()\n",
"print(\"🔧 Tools:\", [t.name for t in tools])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize Agent"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.llm.openai.client.base:Initializing OpenAI client...\n",
"INFO:dapr_agents.tool.executor:Tool registered: LocalGetWeather\n",
"INFO:dapr_agents.tool.executor:Tool registered: LocalJump\n",
"INFO:dapr_agents.tool.executor:Tool Executor initialized with 2 tool(s).\n",
"INFO:dapr_agents.agent.base:Constructing system_prompt from agent attributes.\n",
"INFO:dapr_agents.agent.base:Using system_prompt to create the prompt template.\n",
"INFO:dapr_agents.agent.base:Pre-filled prompt template with attributes: ['name', 'role', 'goal']\n"
]
}
],
"source": [
"from dapr_agents import Agent\n",
"\n",
"agent = Agent(\n",
" name=\"Rob\",\n",
" role= \"Weather Assistant\",\n",
" tools=tools\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Run Agent"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.agent.patterns.toolcall.base:Iteration 1/10 started.\n",
"INFO:dapr_agents.llm.utils.request:Tools are available in the request.\n",
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[38;2;242;182;128muser:\u001b[0m\n",
"\u001b[38;2;242;182;128m\u001b[0m\u001b[38;2;242;182;128mWhat is the weather in New York?\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
"INFO:dapr_agents.agent.patterns.toolcall.base:Executing LocalGetWeather with arguments {\"location\":\"New York\"}\n",
"INFO:dapr_agents.tool.executor:Running tool (auto): LocalGetWeather\n",
"INFO:dapr_agents.tool.mcp.client:[MCP] Executing tool 'get_weather' with args: {'location': 'New York'}\n",
"INFO:mcp.client.sse:Connecting to SSE endpoint: http://localhost:8000/sse\n",
"INFO:httpx:HTTP Request: GET http://localhost:8000/sse \"HTTP/1.1 200 OK\"\n",
"INFO:mcp.client.sse:Received endpoint URL: http://localhost:8000/messages/?session_id=b47ef10b57dd471aac4c5d7aaeadbf5b\n",
"INFO:mcp.client.sse:Starting post writer with endpoint URL: http://localhost:8000/messages/?session_id=b47ef10b57dd471aac4c5d7aaeadbf5b\n",
"INFO:httpx:HTTP Request: POST http://localhost:8000/messages/?session_id=b47ef10b57dd471aac4c5d7aaeadbf5b \"HTTP/1.1 202 Accepted\"\n",
"INFO:httpx:HTTP Request: POST http://localhost:8000/messages/?session_id=b47ef10b57dd471aac4c5d7aaeadbf5b \"HTTP/1.1 202 Accepted\"\n",
"INFO:httpx:HTTP Request: POST http://localhost:8000/messages/?session_id=b47ef10b57dd471aac4c5d7aaeadbf5b \"HTTP/1.1 202 Accepted\"\n",
"INFO:dapr_agents.agent.patterns.toolcall.base:Iteration 2/10 started.\n",
"INFO:dapr_agents.llm.utils.request:Tools are available in the request.\n",
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[38;2;217;95;118massistant:\u001b[0m\n",
"\u001b[38;2;217;95;118m\u001b[0m\u001b[38;2;217;95;118mFunction name: LocalGetWeather (Call Id: call_lBVZIV7seOsWttLnfZaLSwS3)\u001b[0m\n",
"\u001b[38;2;217;95;118m\u001b[0m\u001b[38;2;217;95;118mArguments: {\"location\":\"New York\"}\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n",
"\u001b[38;2;191;69;126mLocalGetWeather(tool) (Id: call_lBVZIV7seOsWttLnfZaLSwS3):\u001b[0m\n",
"\u001b[38;2;191;69;126m\u001b[0m\u001b[38;2;191;69;126mNew York: 65F.\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[38;2;147;191;183massistant:\u001b[0m\n",
"\u001b[38;2;147;191;183m\u001b[0m\u001b[38;2;147;191;183mThe current weather in New York is 65°F. If you need more information, feel free to ask!\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"'The current weather in New York is 65°F. If you need more information, feel free to ask!'"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"await agent.run(\"What is the weather in New York?\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,296 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Basic Weather Agent with MCP Support (Stdio Transport)\n",
"\n",
"* Collaborator: Roberto Rodriguez @Cyb3rWard0g"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Install Required Libraries\n",
"Before starting, ensure the required libraries are installed:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install dapr-agents python-dotenv mcp starlette"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import Environment Variables"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from dotenv import load_dotenv\n",
"load_dotenv() # take environment variables from .env."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Enable Logging"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"logging.basicConfig(level=logging.INFO)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Connect to MCP Server and Get Tools"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.tool.mcp.client:Connecting to MCP server 'local' via stdio: python ['server.py', '--server_type', 'stdio']\n",
"INFO:dapr_agents.tool.mcp.client:Loaded 2 tools from server 'local'\n",
"INFO:dapr_agents.tool.mcp.client:Loaded 0 prompts from server 'local': \n",
"INFO:dapr_agents.tool.mcp.client:Successfully connected to MCP server 'local'\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"🔧 Tools: ['LocalGetWeather', 'LocalJump']\n"
]
}
],
"source": [
"from dapr_agents.tool.mcp.client import MCPClient\n",
"\n",
"client = MCPClient()\n",
"\n",
"# Connect to your test server\n",
"await client.connect_stdio(\n",
" server_name=\"local\",\n",
" command=\"python\",\n",
" args=[\"server.py\", \"--server_type\", \"stdio\"]\n",
")\n",
"\n",
"# Test tools\n",
"tools = client.get_all_tools()\n",
"print(\"🔧 Tools:\", [t.name for t in tools])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize Agent"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.llm.openai.client.base:Initializing OpenAI client...\n",
"INFO:dapr_agents.tool.executor:Tool registered: LocalGetWeather\n",
"INFO:dapr_agents.tool.executor:Tool registered: LocalJump\n",
"INFO:dapr_agents.tool.executor:Tool Executor initialized with 2 tool(s).\n",
"INFO:dapr_agents.agent.base:Constructing system_prompt from agent attributes.\n",
"INFO:dapr_agents.agent.base:Using system_prompt to create the prompt template.\n",
"INFO:dapr_agents.agent.base:Pre-filled prompt template with attributes: ['name', 'role', 'goal']\n"
]
}
],
"source": [
"from dapr_agents import Agent\n",
"\n",
"agent = Agent(\n",
" name=\"Rob\",\n",
" role= \"Weather Assistant\",\n",
" tools=tools\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Run Agent"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.agent.patterns.toolcall.base:Iteration 1/10 started.\n",
"INFO:dapr_agents.llm.utils.request:Tools are available in the request.\n",
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[38;2;242;182;128muser:\u001b[0m\n",
"\u001b[38;2;242;182;128m\u001b[0m\u001b[38;2;242;182;128mWhat is the weather in New York?\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
"INFO:dapr_agents.agent.patterns.toolcall.base:Executing LocalGetWeather with arguments {\"location\":\"New York\"}\n",
"INFO:dapr_agents.tool.executor:Running tool (auto): LocalGetWeather\n",
"INFO:dapr_agents.tool.mcp.client:[MCP] Executing tool 'get_weather' with args: {'location': 'New York'}\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[38;2;217;95;118massistant:\u001b[0m\n",
"\u001b[38;2;217;95;118m\u001b[0m\u001b[38;2;217;95;118mFunction name: LocalGetWeather (Call Id: call_l8KuS39PvriksogjGN71rzCm)\u001b[0m\n",
"\u001b[38;2;217;95;118m\u001b[0m\u001b[38;2;217;95;118mArguments: {\"location\":\"New York\"}\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:dapr_agents.agent.patterns.toolcall.base:Iteration 2/10 started.\n",
"INFO:dapr_agents.llm.utils.request:Tools are available in the request.\n",
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[38;2;191;69;126mLocalGetWeather(tool) (Id: call_l8KuS39PvriksogjGN71rzCm):\u001b[0m\n",
"\u001b[38;2;191;69;126m\u001b[0m\u001b[38;2;191;69;126mNew York: 60F.\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[38;2;147;191;183massistant:\u001b[0m\n",
"\u001b[38;2;147;191;183m\u001b[0m\u001b[38;2;147;191;183mThe current temperature in New York is 60°F.\u001b[0m\u001b[0m\n",
"\u001b[0m\u001b[0m\n",
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
"\u001b[0m\u001b[0m\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"'The current temperature in New York is 60°F.'"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"await agent.run(\"What is the weather in New York?\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -3,14 +3,12 @@ import random
mcp = FastMCP("TestServer") mcp = FastMCP("TestServer")
@mcp.tool() @mcp.tool()
async def get_weather(location: str) -> str: async def get_weather(location: str) -> str:
"""Get weather information for a specific location.""" """Get weather information for a specific location."""
temperature = random.randint(60, 80) temperature = random.randint(60, 80)
return f"{location}: {temperature}F." return f"{location}: {temperature}F."
@mcp.tool() @mcp.tool()
async def jump(distance: str) -> str: async def jump(distance: str) -> str:
"""Simulate a jump of a given distance.""" """Simulate a jump of a given distance."""

View File

@ -0,0 +1,146 @@
# MCP Agent with Dapr Workflows
This demo shows how to run an AI agent inside a Dapr Workflow, calling tools exposed via the [Model Context Protoco (MCP)](https://modelcontextprotocol.io/introduction).
Unlike the lightweight notebook-based examples, this setup runs a full Dapr agent using:
✅ Durable task orchestration with Dapr Workflows
✅ Tools served via MCP (stdio or SSE)
✅ Full integration with the Dapr ecosystem
## 🛠️ Project Structure
```text
.
├── app.py # Main entrypoint: runs a Dapr Agent and workflow on port 8001
├── tools.py # MCP tool definitions (get_weather, jump)
├── server.py # Starlette-based SSE server
|-- client.py # Script to send an HTTP request to the Agent over port 8001
├── components/ # Dapr pubsub + state components (Redis, etc.)
├── requirements.txt
└── README.md
```
## 📦 Installation
Install dependencies:
```python
pip install -r requirements.txt
```
Make sure you have Dapr installed and initialized:
```bash
dapr init
```
## 🧰 MCP Tool Server
Your agent will call tools defined in tools.py, served via FastMCP:
```python
@mcp.tool()
async def get_weather(location: str) -> str:
...
@mcp.tool()
async def jump(distance: str) -> str:
...
```
These tools can be served in one of two modes:
### STDIO Mode (local execution)
No external server needed — the agent runs the MCP server in-process.
✅ Best for internal experiments or testing
🚫 Not supported for agents that rely on external workflows (e.g., Dapr orchestration)
### SSE Mode (recommended for Dapr workflows)
In this demo, we run the MCP server as a separate Starlette + Uvicorn app:
```python
python server.py --server_type sse --host 127.0.0.1 --port 8000
```
This exposes:
* `/sse` for the SSE stream
* `/messages/` for tool execution
Used by the Dapr agent in this repo.
## 🚀 Running the Dapr Agent
Start the MCP server in SSE mode:
```python
python server.py --server_type sse --port 8000
```
Then in a separate terminal, run the agent workflow:
```bash
dapr run --app-id weatherappmcp --resources-path components/ -- python app.py
```
Once agent is ready, run the `client.py` script to send a message to it.
```bash
python3 client.py
```
You will see the state of the agent in a json file in the same directory.
```
{
"instances": {
"e098e5b85d544c84a26250be80316152": {
"input": "What is the weather in New York?",
"output": "The current temperature in New York, USA, is 66\u00b0F.",
"start_time": "2025-04-05T05:37:50.496005",
"end_time": "2025-04-05T05:37:52.501630",
"messages": [
{
"id": "e8ccc9d2-1674-47cc-afd2-8e68b91ff791",
"role": "user",
"content": "What is the weather in New York?",
"timestamp": "2025-04-05T05:37:50.516572",
"name": null
},
{
"id": "47b8db93-558c-46ed-80bb-8cb599c4272b",
"role": "assistant",
"content": "The current temperature in New York, USA, is 66\u00b0F.",
"timestamp": "2025-04-05T05:37:52.499945",
"name": null
}
],
"last_message": {
"id": "47b8db93-558c-46ed-80bb-8cb599c4272b",
"role": "assistant",
"content": "The current temperature in New York, USA, is 66\u00b0F.",
"timestamp": "2025-04-05T05:37:52.499945",
"name": null
},
"tool_history": [
{
"content": "New York, USA: 66F.",
"role": "tool",
"tool_call_id": "call_LTDMHvt05e1tvbWBe0kVvnUM",
"id": "2c1535fe-c43a-42c1-be7e-25c71b43c32e",
"function_name": "LocalGetWeather",
"function_args": "{\"location\":\"New York, USA\"}",
"timestamp": "2025-04-05T05:37:51.609087"
}
],
"source": null,
"source_workflow_instance_id": null
}
}
}
```

View File

@ -2,10 +2,9 @@ import asyncio
import logging import logging
from dotenv import load_dotenv from dotenv import load_dotenv
from dapr_agents import DurableAgent from dapr_agents import AssistantAgent
from dapr_agents.tool.mcp import MCPClient from dapr_agents.tool.mcp import MCPClient
async def main(): async def main():
try: try:
# Load MCP tools from server (stdio or sse) # Load MCP tools from server (stdio or sse)
@ -16,7 +15,7 @@ async def main():
tools = client.get_all_tools() tools = client.get_all_tools()
# Create the Weather Agent using those tools # Create the Weather Agent using those tools
weather_agent = DurableAgent( weather_agent = AssistantAgent(
role="Weather Assistant", role="Weather Assistant",
name="Stevie", name="Stevie",
goal="Help humans get weather and location info using smart tools.", goal="Help humans get weather and location info using smart tools.",
@ -39,7 +38,6 @@ async def main():
except Exception as e: except Exception as e:
logging.exception("Error starting weather agent service", exc_info=e) logging.exception("Error starting weather agent service", exc_info=e)
if __name__ == "__main__": if __name__ == "__main__":
load_dotenv() load_dotenv()
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)

View File

@ -1,15 +1,12 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import sys
import requests import requests
import time import time
import sys
def call_trigger_job(task): if __name__ == "__main__":
task_payload = {"task": task} status_url = "http://localhost:8001/status"
base_url = "http://localhost:8004" healthy = False
workflow_url = f"{base_url}/start-workflow"
status_url = f"{base_url}/status"
for attempt in range(1, 11): for attempt in range(1, 11):
try: try:
print(f"Attempt {attempt}...") print(f"Attempt {attempt}...")
@ -17,6 +14,7 @@ def call_trigger_job(task):
if response.status_code == 200: if response.status_code == 200:
print("Workflow app is healthy!") print("Workflow app is healthy!")
healthy = True
break break
else: else:
print(f"Received status code {response.status_code}: {response.text}") print(f"Received status code {response.status_code}: {response.text}")
@ -25,9 +23,16 @@ def call_trigger_job(task):
print(f"Request failed: {e}") print(f"Request failed: {e}")
attempt += 1 attempt += 1
print("Waiting 5s seconds before next health checkattempt...") print(f"Waiting 5s seconds before next health checkattempt...")
time.sleep(5) time.sleep(5)
if not healthy:
print("Workflow app is not healthy!")
sys.exit(1)
workflow_url = "http://localhost:8001/start-workflow"
task_payload = {"task": "What is the weather in New York?"}
for attempt in range(1, 11): for attempt in range(1, 11):
try: try:
print(f"Attempt {attempt}...") print(f"Attempt {attempt}...")
@ -43,15 +48,10 @@ def call_trigger_job(task):
print(f"Request failed: {e}") print(f"Request failed: {e}")
attempt += 1 attempt += 1
print("Waiting 1s seconds before next attempt...") print(f"Waiting 1s seconds before next attempt...")
time.sleep(1) time.sleep(1)
print("Maximum attempts (10) reached without success.") print(f"Maximum attempts (10) reached without success.")
print("Failed to get successful response") print("Failed to get successful response")
sys.exit(1) sys.exit(1)
if __name__ == "__main__":
task = "How to get to Mordor? We all need to help!"
call_trigger_job(task)

View File

@ -0,0 +1,4 @@
dapr-agents
python-dotenv
mcp
starlette

View File

@ -0,0 +1,65 @@
import argparse
import logging
import uvicorn
from starlette.applications import Starlette
from starlette.requests import Request
from starlette.routing import Mount, Route
from mcp.server.sse import SseServerTransport
from tools import mcp
# ─────────────────────────────────────────────
# Logging Configuration
# ─────────────────────────────────────────────
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger("mcp-server")
# ─────────────────────────────────────────────
# Starlette App Factory
# ─────────────────────────────────────────────
def create_starlette_app():
"""
Create a Starlette app wired with the MCP server over SSE transport.
"""
logger.debug("Creating Starlette app with SSE transport")
sse = SseServerTransport("/messages/")
async def handle_sse(request: Request) -> None:
logger.info("🔌 SSE connection established")
async with sse.connect_sse(request.scope, request.receive, request._send) as (read_stream, write_stream):
logger.debug("Starting MCP server run loop over SSE")
await mcp._mcp_server.run(read_stream, write_stream, mcp._mcp_server.create_initialization_options())
logger.debug("MCP run loop completed")
return Starlette(
debug=False,
routes=[
Route("/sse", endpoint=handle_sse),
Mount("/messages/", app=sse.handle_post_message)
]
)
# ─────────────────────────────────────────────
# CLI Entrypoint
# ─────────────────────────────────────────────
def main():
parser = argparse.ArgumentParser(description="Run an MCP tool server.")
parser.add_argument("--server_type", choices=["stdio", "sse"], default="stdio", help="Transport to use")
parser.add_argument("--host", default="127.0.0.1", help="Host to bind to (SSE only)")
parser.add_argument("--port", type=int, default=8000, help="Port to bind to (SSE only)")
args = parser.parse_args()
logger.info(f"🚀 Starting MCP server in {args.server_type.upper()} mode")
if args.server_type == "stdio":
mcp.run("stdio")
else:
app = create_starlette_app()
logger.info(f"🌐 Running SSE server on {args.host}:{args.port}")
uvicorn.run(app, host=args.host, port=args.port)
if __name__ == "__main__":
main()

View File

@ -3,14 +3,12 @@ import random
mcp = FastMCP("TestServer") mcp = FastMCP("TestServer")
@mcp.tool() @mcp.tool()
async def get_weather(location: str) -> str: async def get_weather(location: str) -> str:
"""Get weather information for a specific location.""" """Get weather information for a specific location."""
temperature = random.randint(60, 80) temperature = random.randint(60, 80)
return f"{location}: {temperature}F." return f"{location}: {temperature}F."
@mcp.tool() @mcp.tool()
async def jump(distance: str) -> str: async def jump(distance: str) -> str:
"""Simulate a jump of a given distance.""" """Simulate a jump of a given distance."""

View File

@ -0,0 +1,499 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# VectorStore: Chroma and OpenAI Embeddings Basic Examples\n",
"\n",
"This notebook demonstrates how to use the `ChromaVectorStore` in `dapr-agents` for storing, querying, and filtering documents. We will explore:\n",
"\n",
"* Initializing the `OpenAIEmbedder` embedding function and `ChromaVectorStore`.\n",
"* Adding documents with text and metadata.\n",
"* Retrieving documents by ID.\n",
"* Updating documents.\n",
"* Deleting documents.\n",
"* Performing similarity searches.\n",
"* Filtering results based on metadata."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Install Required Libraries\n",
"Before starting, ensure the required libraries are installed:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install dapr-agents python-dotenv chromadb"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load Environment Variables\n",
"\n",
"Load API keys or other configuration values from your `.env` file using `dotenv`."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from dotenv import load_dotenv\n",
"load_dotenv()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialize OpenAI Embedding Function\n",
"\n",
"The default embedding function is `SentenceTransformerEmbedder`, but for this example we will use the `OpenAIEmbedder`."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents.document.embedder import OpenAIEmbedder\n",
"\n",
"embedding_funciton = OpenAIEmbedder(\n",
" model = \"text-embedding-ada-002\",\n",
" encoding_name=\"cl100k_base\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initializing the ChromaVectorStore\n",
"\n",
"To start, create an instance of the `ChromaVectorStore`. You can customize its parameters if needed, such as enabling persistence or specifying the embedding_function."
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents.storage import ChromaVectorStore\n",
"\n",
"# Initialize ChromaVectorStore\n",
"store = ChromaVectorStore(\n",
" name=\"example_collection\", # Name of the collection\n",
" embedding_function=embedding_funciton,\n",
" persistent=False, # No persistence for this example\n",
" host=\"localhost\", # Host for the Chroma server\n",
" port=8000 # Port for the Chroma server\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Adding Documents\n",
"We will use Document objects to add content to the collection. Each Document includes text and optional metadata."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Creating Documents"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents.types.document import Document\n",
"\n",
"# Example Lord of the Rings-inspired conversations\n",
"documents = [\n",
" Document(\n",
" text=\"Gandalf: A wizard is never late, Frodo Baggins. Nor is he early; he arrives precisely when he means to.\",\n",
" metadata={\"topic\": \"wisdom\", \"location\": \"The Shire\"}\n",
" ),\n",
" Document(\n",
" text=\"Frodo: I wish the Ring had never come to me. I wish none of this had happened.\",\n",
" metadata={\"topic\": \"destiny\", \"location\": \"Moria\"}\n",
" ),\n",
" Document(\n",
" text=\"Aragorn: You cannot wield it! None of us can. The One Ring answers to Sauron alone. It has no other master.\",\n",
" metadata={\"topic\": \"power\", \"location\": \"Rivendell\"}\n",
" ),\n",
" Document(\n",
" text=\"Sam: I can't carry it for you, but I can carry you!\",\n",
" metadata={\"topic\": \"friendship\", \"location\": \"Mount Doom\"}\n",
" ),\n",
" Document(\n",
" text=\"Legolas: A red sun rises. Blood has been spilled this night.\",\n",
" metadata={\"topic\": \"war\", \"location\": \"Rohan\"}\n",
" ),\n",
" Document(\n",
" text=\"Gimli: Certainty of death. Small chance of success. What are we waiting for?\",\n",
" metadata={\"topic\": \"bravery\", \"location\": \"Helm's Deep\"}\n",
" ),\n",
" Document(\n",
" text=\"Boromir: One does not simply walk into Mordor.\",\n",
" metadata={\"topic\": \"impossible tasks\", \"location\": \"Rivendell\"}\n",
" ),\n",
" Document(\n",
" text=\"Galadriel: Even the smallest person can change the course of the future.\",\n",
" metadata={\"topic\": \"hope\", \"location\": \"Lothlórien\"}\n",
" ),\n",
" Document(\n",
" text=\"Théoden: So it begins.\",\n",
" metadata={\"topic\": \"battle\", \"location\": \"Helm's Deep\"}\n",
" ),\n",
" Document(\n",
" text=\"Elrond: The strength of the Ring-bearer is failing. In his heart, Frodo begins to understand. The quest will claim his life.\",\n",
" metadata={\"topic\": \"sacrifice\", \"location\": \"Rivendell\"}\n",
" )\n",
"]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Adding Documents to the Collection"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Number of documents in the collection: 10\n"
]
}
],
"source": [
"store.add_documents(documents=documents)\n",
"print(f\"Number of documents in the collection: {store.count()}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Retrieving Documents\n",
"\n",
"Retrieve documents by their IDs or fetch all items in the collection."
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Retrieved documents:\n",
"ID: 82f3b922-c64c-4ad1-a632-ea9f8d13a19a, Text: Gandalf: A wizard is never late, Frodo Baggins. Nor is he early; he arrives precisely when he means to., Metadata: {'location': 'The Shire', 'topic': 'wisdom'}\n",
"ID: f5a45d8b-7f8f-4516-a54a-d9ef3c39db53, Text: Frodo: I wish the Ring had never come to me. I wish none of this had happened., Metadata: {'location': 'Moria', 'topic': 'destiny'}\n",
"ID: 7fead849-c4eb-42ce-88ca-ca62fe9f51a4, Text: Aragorn: You cannot wield it! None of us can. The One Ring answers to Sauron alone. It has no other master., Metadata: {'location': 'Rivendell', 'topic': 'power'}\n",
"ID: ebd6c642-c8f4-4f45-a75e-4a5acdf33ad5, Text: Sam: I can't carry it for you, but I can carry you!, Metadata: {'location': 'Mount Doom', 'topic': 'friendship'}\n",
"ID: 1dc4da81-cbfc-417b-ad71-120fae505842, Text: Legolas: A red sun rises. Blood has been spilled this night., Metadata: {'location': 'Rohan', 'topic': 'war'}\n",
"ID: d1ed1836-c0d8-491c-a813-2c5a2688b2d1, Text: Gimli: Certainty of death. Small chance of success. What are we waiting for?, Metadata: {'location': \"Helm's Deep\", 'topic': 'bravery'}\n",
"ID: 6fe3f229-bf74-4eea-8fe4-fc38efb2cf9a, Text: Boromir: One does not simply walk into Mordor., Metadata: {'location': 'Rivendell', 'topic': 'impossible tasks'}\n",
"ID: 081453e4-0a56-4e78-927b-79289735e8a4, Text: Galadriel: Even the smallest person can change the course of the future., Metadata: {'location': 'Lothlórien', 'topic': 'hope'}\n",
"ID: a45db7d1-4224-4e42-b51d-bdb4593b5cf5, Text: Théoden: So it begins., Metadata: {'location': \"Helm's Deep\", 'topic': 'battle'}\n",
"ID: 5258d6f6-1f1b-459d-a04e-c96f58d76fca, Text: Elrond: The strength of the Ring-bearer is failing. In his heart, Frodo begins to understand. The quest will claim his life., Metadata: {'location': 'Rivendell', 'topic': 'sacrifice'}\n"
]
}
],
"source": [
"# Retrieve all documents\n",
"retrieved_docs = store.get()\n",
"print(\"Retrieved documents:\")\n",
"for doc in retrieved_docs:\n",
" print(f\"ID: {doc['id']}, Text: {doc['document']}, Metadata: {doc['metadata']}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Updating Documents\n",
"\n",
"You can update existing documents' text or metadata using their IDs."
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Updated document: [{'id': '82f3b922-c64c-4ad1-a632-ea9f8d13a19a', 'metadata': {'location': 'Fangorn Forest', 'topic': 'hope and wisdom'}, 'document': 'Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true.'}]\n"
]
}
],
"source": [
"# Retrieve a document by its ID\n",
"retrieved_docs = store.get() # Get all documents to find the ID\n",
"doc_id = retrieved_docs[0]['id'] # Select the first document's ID for this example\n",
"\n",
"# Define updated text and metadata\n",
"updated_text = \"Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true.\"\n",
"updated_metadata = {\"topic\": \"hope and wisdom\", \"location\": \"Fangorn Forest\"}\n",
"\n",
"# Update the document's text and metadata in the store\n",
"store.update(ids=[doc_id], documents=[updated_text], metadatas=[updated_metadata])\n",
"\n",
"# Verify the update\n",
"updated_doc = store.get(ids=[doc_id])\n",
"print(f\"Updated document: {updated_doc}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Deleting Documents\n",
"\n",
"Delete documents by their IDs."
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Number of documents after deletion: 9\n"
]
}
],
"source": [
"# Delete a document by ID\n",
"doc_id_to_delete = retrieved_docs[2]['id']\n",
"store.delete(ids=[doc_id_to_delete])\n",
"\n",
"# Verify deletion\n",
"print(f\"Number of documents after deletion: {store.count()}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Similarity Search\n",
"\n",
"Perform a similarity search using text queries. The embedding function automatically generates embeddings for the input query."
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Similarity search results:\n",
"Text: ['Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true.', 'Galadriel: Even the smallest person can change the course of the future.']\n",
"Metadata: [{'location': 'Fangorn Forest', 'topic': 'hope and wisdom'}, {'location': 'Lothlórien', 'topic': 'hope'}]\n"
]
}
],
"source": [
"# Search for similar documents based on a query\n",
"query = \"wise advice\"\n",
"results = store.search_similar(query_texts=query, k=2)\n",
"\n",
"# Display results\n",
"print(\"Similarity search results:\")\n",
"for doc, metadata in zip(results[\"documents\"], results[\"metadatas\"]):\n",
" print(f\"Text: {doc}\")\n",
" print(f\"Metadata: {metadata}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Filtering Results\n",
"\n",
"Filter results based on metadata."
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"# Search for documents with specific metadata filters\n",
"filter_conditions = {\n",
" \"$and\": [\n",
" {\"location\": {\"$eq\": \"Fangorn Forest\"}},\n",
" {\"topic\": {\"$eq\": \"hope and wisdom\"}}\n",
" ]\n",
"}\n",
"\n",
"filtered_results = store.query_with_filters(query_texts=[\"journey\"], where=filter_conditions, k=3)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'ids': [['82f3b922-c64c-4ad1-a632-ea9f8d13a19a']],\n",
" 'embeddings': None,\n",
" 'documents': [['Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true.']],\n",
" 'uris': None,\n",
" 'data': None,\n",
" 'metadatas': [[{'location': 'Fangorn Forest', 'topic': 'hope and wisdom'}]],\n",
" 'distances': [[0.21403032541275024]],\n",
" 'included': [<IncludeEnum.distances: 'distances'>,\n",
" <IncludeEnum.documents: 'documents'>,\n",
" <IncludeEnum.metadatas: 'metadatas'>]}"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"filtered_results"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Resetting the Database\n",
"\n",
"Reset the database to clear all stored data."
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['example_collection']"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"store.client.list_collections()"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"# Reset the collection\n",
"store.reset()"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[]"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"store.client.list_collections()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,498 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# VectorStore: Chroma and Sentence Transformer (all-MiniLM-L6-v2) with Basic Examples\n",
"\n",
"This notebook demonstrates how to use the `ChromaVectorStore` in `dapr-agents` for storing, querying, and filtering documents. We will explore:\n",
"\n",
"* Initializing the `SentenceTransformerEmbedder` embedding function and `ChromaVectorStore`.\n",
"* Adding documents with text and metadata.\n",
"* Retrieving documents by ID.\n",
"* Updating documents.\n",
"* Deleting documents.\n",
"* Performing similarity searches.\n",
"* Filtering results based on metadata."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Install Required Libraries\n",
"Before starting, ensure the required libraries are installed:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install dapr-agents python-dotenv chromadb sentence-transformers"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load Environment Variables\n",
"\n",
"Load API keys or other configuration values from your `.env` file using `dotenv`."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from dotenv import load_dotenv\n",
"load_dotenv()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initializing SentenceTransformer Embedding Function\n",
"\n",
"The default embedding function is `SentenceTransformerEmbedder`, but we will initialize it explicitly for clarity."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents.document.embedder import SentenceTransformerEmbedder\n",
"\n",
"embedding_function = SentenceTransformerEmbedder(\n",
" model=\"all-MiniLM-L6-v2\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initializing the ChromaVectorStore\n",
"\n",
"To start, create an instance of the `ChromaVectorStore` and set the `embedding_function` to the instance of `SentenceTransformerEmbedder`"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents.storage import ChromaVectorStore\n",
"\n",
"# Initialize ChromaVectorStore\n",
"store = ChromaVectorStore(\n",
" name=\"example_collection\", # Name of the collection\n",
" embedding_function=embedding_function,\n",
" persistent=False, # No persistence for this example\n",
" host=\"localhost\", # Host for the Chroma server\n",
" port=8000 # Port for the Chroma server\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Adding Documents\n",
"We will use Document objects to add content to the collection. Each Document includes text and optional metadata."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Creating Documents"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents.types.document import Document\n",
"\n",
"# Example Lord of the Rings-inspired conversations\n",
"documents = [\n",
" Document(\n",
" text=\"Gandalf: A wizard is never late, Frodo Baggins. Nor is he early; he arrives precisely when he means to.\",\n",
" metadata={\"topic\": \"wisdom\", \"location\": \"The Shire\"}\n",
" ),\n",
" Document(\n",
" text=\"Frodo: I wish the Ring had never come to me. I wish none of this had happened.\",\n",
" metadata={\"topic\": \"destiny\", \"location\": \"Moria\"}\n",
" ),\n",
" Document(\n",
" text=\"Aragorn: You cannot wield it! None of us can. The One Ring answers to Sauron alone. It has no other master.\",\n",
" metadata={\"topic\": \"power\", \"location\": \"Rivendell\"}\n",
" ),\n",
" Document(\n",
" text=\"Sam: I can't carry it for you, but I can carry you!\",\n",
" metadata={\"topic\": \"friendship\", \"location\": \"Mount Doom\"}\n",
" ),\n",
" Document(\n",
" text=\"Legolas: A red sun rises. Blood has been spilled this night.\",\n",
" metadata={\"topic\": \"war\", \"location\": \"Rohan\"}\n",
" ),\n",
" Document(\n",
" text=\"Gimli: Certainty of death. Small chance of success. What are we waiting for?\",\n",
" metadata={\"topic\": \"bravery\", \"location\": \"Helm's Deep\"}\n",
" ),\n",
" Document(\n",
" text=\"Boromir: One does not simply walk into Mordor.\",\n",
" metadata={\"topic\": \"impossible tasks\", \"location\": \"Rivendell\"}\n",
" ),\n",
" Document(\n",
" text=\"Galadriel: Even the smallest person can change the course of the future.\",\n",
" metadata={\"topic\": \"hope\", \"location\": \"Lothlórien\"}\n",
" ),\n",
" Document(\n",
" text=\"Théoden: So it begins.\",\n",
" metadata={\"topic\": \"battle\", \"location\": \"Helm's Deep\"}\n",
" ),\n",
" Document(\n",
" text=\"Elrond: The strength of the Ring-bearer is failing. In his heart, Frodo begins to understand. The quest will claim his life.\",\n",
" metadata={\"topic\": \"sacrifice\", \"location\": \"Rivendell\"}\n",
" )\n",
"]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Adding Documents to the Collection"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Number of documents in the collection: 10\n"
]
}
],
"source": [
"store.add_documents(documents=documents)\n",
"print(f\"Number of documents in the collection: {store.count()}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Retrieving Documents\n",
"\n",
"Retrieve documents by their IDs or fetch all items in the collection."
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Retrieved documents:\n",
"ID: 483fc189-df92-4815-987e-b732391e356a, Text: Gandalf: A wizard is never late, Frodo Baggins. Nor is he early; he arrives precisely when he means to., Metadata: {'location': 'The Shire', 'topic': 'wisdom'}\n",
"ID: fcbcbf50-7b0c-458a-a232-abbc1b77518b, Text: Frodo: I wish the Ring had never come to me. I wish none of this had happened., Metadata: {'location': 'Moria', 'topic': 'destiny'}\n",
"ID: d4fbda4e-f933-4d1c-8d63-ee4d9f0d0af7, Text: Aragorn: You cannot wield it! None of us can. The One Ring answers to Sauron alone. It has no other master., Metadata: {'location': 'Rivendell', 'topic': 'power'}\n",
"ID: 98d218e5-4274-4d93-ac9a-3fbbeb3c0a19, Text: Sam: I can't carry it for you, but I can carry you!, Metadata: {'location': 'Mount Doom', 'topic': 'friendship'}\n",
"ID: df9d0abe-0b47-4079-9697-b66f47656e64, Text: Legolas: A red sun rises. Blood has been spilled this night., Metadata: {'location': 'Rohan', 'topic': 'war'}\n",
"ID: 309e0971-6826-4bac-81a8-3acfc3a28fa9, Text: Gimli: Certainty of death. Small chance of success. What are we waiting for?, Metadata: {'location': \"Helm's Deep\", 'topic': 'bravery'}\n",
"ID: a0a312be-bebd-405b-b993-4e37ed7fd569, Text: Boromir: One does not simply walk into Mordor., Metadata: {'location': 'Rivendell', 'topic': 'impossible tasks'}\n",
"ID: 0c09f89c-cf60-4428-beee-294b31dfd6a9, Text: Galadriel: Even the smallest person can change the course of the future., Metadata: {'location': 'Lothlórien', 'topic': 'hope'}\n",
"ID: d4778b45-f9fa-438c-b9e9-7466c872b4cc, Text: Théoden: So it begins., Metadata: {'location': \"Helm's Deep\", 'topic': 'battle'}\n",
"ID: 7a44e69f-e0c9-41c0-9cdf-a8f34ddf45f5, Text: Elrond: The strength of the Ring-bearer is failing. In his heart, Frodo begins to understand. The quest will claim his life., Metadata: {'location': 'Rivendell', 'topic': 'sacrifice'}\n"
]
}
],
"source": [
"# Retrieve all documents\n",
"retrieved_docs = store.get()\n",
"print(\"Retrieved documents:\")\n",
"for doc in retrieved_docs:\n",
" print(f\"ID: {doc['id']}, Text: {doc['document']}, Metadata: {doc['metadata']}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Updating Documents\n",
"\n",
"You can update existing documents' text or metadata using their IDs."
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Updated document: [{'id': '483fc189-df92-4815-987e-b732391e356a', 'metadata': {'location': 'Fangorn Forest', 'topic': 'hope and wisdom'}, 'document': 'Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true.'}]\n"
]
}
],
"source": [
"# Retrieve a document by its ID\n",
"retrieved_docs = store.get() # Get all documents to find the ID\n",
"doc_id = retrieved_docs[0]['id'] # Select the first document's ID for this example\n",
"\n",
"# Define updated text and metadata\n",
"updated_text = \"Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true.\"\n",
"updated_metadata = {\"topic\": \"hope and wisdom\", \"location\": \"Fangorn Forest\"}\n",
"\n",
"# Update the document's text and metadata in the store\n",
"store.update(ids=[doc_id], documents=[updated_text], metadatas=[updated_metadata])\n",
"\n",
"# Verify the update\n",
"updated_doc = store.get(ids=[doc_id])\n",
"print(f\"Updated document: {updated_doc}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Deleting Documents\n",
"\n",
"Delete documents by their IDs."
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Number of documents after deletion: 9\n"
]
}
],
"source": [
"# Delete a document by ID\n",
"doc_id_to_delete = retrieved_docs[2]['id']\n",
"store.delete(ids=[doc_id_to_delete])\n",
"\n",
"# Verify deletion\n",
"print(f\"Number of documents after deletion: {store.count()}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Similarity Search\n",
"\n",
"Perform a similarity search using text queries. The embedding function automatically generates embeddings for the input query."
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Similarity search results:\n",
"Text: ['Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true.', 'Gimli: Certainty of death. Small chance of success. What are we waiting for?']\n",
"Metadata: [{'location': 'Fangorn Forest', 'topic': 'hope and wisdom'}, {'location': \"Helm's Deep\", 'topic': 'bravery'}]\n"
]
}
],
"source": [
"# Search for similar documents based on a query\n",
"query = \"wise advice\"\n",
"results = store.search_similar(query_texts=query, k=2)\n",
"\n",
"# Display results\n",
"print(\"Similarity search results:\")\n",
"for doc, metadata in zip(results[\"documents\"], results[\"metadatas\"]):\n",
" print(f\"Text: {doc}\")\n",
" print(f\"Metadata: {metadata}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Filtering Results\n",
"\n",
"Filter results based on metadata."
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"# Search for documents with specific metadata filters\n",
"filter_conditions = {\n",
" \"$and\": [\n",
" {\"location\": {\"$eq\": \"Fangorn Forest\"}},\n",
" {\"topic\": {\"$eq\": \"hope and wisdom\"}}\n",
" ]\n",
"}\n",
"\n",
"filtered_results = store.query_with_filters(query_texts=[\"journey\"], where=filter_conditions, k=3)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'ids': [['483fc189-df92-4815-987e-b732391e356a']],\n",
" 'embeddings': None,\n",
" 'documents': [['Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true.']],\n",
" 'uris': None,\n",
" 'data': None,\n",
" 'metadatas': [[{'location': 'Fangorn Forest', 'topic': 'hope and wisdom'}]],\n",
" 'distances': [[0.7907481789588928]],\n",
" 'included': [<IncludeEnum.distances: 'distances'>,\n",
" <IncludeEnum.documents: 'documents'>,\n",
" <IncludeEnum.metadatas: 'metadatas'>]}"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"filtered_results"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Resetting the Database\n",
"\n",
"Reset the database to clear all stored data."
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['example_collection']"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"store.client.list_collections()"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"# Reset the collection\n",
"store.reset()"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[]"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"store.client.list_collections()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,522 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# VectorStore: Postgres and Sentence Transformer (all-MiniLM-L6-v2) with Basic Examples\n",
"\n",
"This notebook demonstrates how to use the `PostgresVectorStore` in `dapr-agents` for storing, querying, and filtering documents. We will explore:\n",
"\n",
"* Initializing the `SentenceTransformerEmbedder` embedding function and `PostgresVectorStore`.\n",
"* Adding documents with text and metadata.\n",
"* Performing similarity searches.\n",
"* Filtering results based on metadata.\n",
"* Resetting the database."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Install Required Libraries\n",
"Before starting, ensure the required libraries are installed:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install dapr-agents python-dotenv \"psycopg[binary,pool]\" pgvector"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load Environment Variables\n",
"\n",
"Load API keys or other configuration values from your `.env` file using `dotenv`."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from dotenv import load_dotenv\n",
"load_dotenv()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setting Up The Database\n",
"\n",
"Before initializing the `PostgresVectorStore`, set up a PostgreSQL instance with pgvector enabled. For a local setup, use Docker:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"d920da4b841a66223431ad1dce49c3b0c215a971a4860ee9e25ea5bf0b4bfcd0\n"
]
}
],
"source": [
"!docker run --name pgvector-container \\\n",
" -e POSTGRES_USER=dapr_agents \\\n",
" -e POSTGRES_PASSWORD=dapr_agents \\\n",
" -e POSTGRES_DB=dapr_agents \\\n",
" -p 5432:5432 \\\n",
" -d pgvector/pgvector:pg17"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initializing SentenceTransformer Embedding Function\n",
"\n",
"The default embedding function is `SentenceTransformerEmbedder`, but we will initialize it explicitly for clarity."
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents.document.embedder import SentenceTransformerEmbedder\n",
"\n",
"embedding_function = SentenceTransformerEmbedder(\n",
" model=\"all-MiniLM-L6-v2\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initializing the PostgresVectorStore\n",
"\n",
"To start, create an instance of the `PostgresVectorStore` and set the `embedding_function` to the instance of `SentenceTransformerEmbedder`"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents.storage.vectorstores import PostgresVectorStore\n",
"import os\n",
"\n",
"# Set up connection parameters\n",
"connection_string = os.getenv(\"POSTGRES_CONNECTION_STRING\", \"postgresql://dapr_agents:dapr_agents@localhost:5432/dapr_agents\")\n",
"\n",
"# Initialize PostgresVectorStore\n",
"store = PostgresVectorStore(\n",
" connection_string=connection_string,\n",
" table_name=\"dapr_agents\",\n",
" embedding_function=SentenceTransformerEmbedder()\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Adding Documents\n",
"We will use Document objects to add content to the collection. Each document includes text and optional metadata."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Creating Documents"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"from dapr_agents.types.document import Document\n",
"\n",
"# Example Lord of the Rings-inspired conversations\n",
"documents = [\n",
" Document(\n",
" text=\"Gandalf: A wizard is never late, Frodo Baggins. Nor is he early; he arrives precisely when he means to.\",\n",
" metadata={\"topic\": \"wisdom\", \"location\": \"The Shire\"}\n",
" ),\n",
" Document(\n",
" text=\"Frodo: I wish the Ring had never come to me. I wish none of this had happened.\",\n",
" metadata={\"topic\": \"destiny\", \"location\": \"Moria\"}\n",
" ),\n",
" Document(\n",
" text=\"Aragorn: You cannot wield it! None of us can. The One Ring answers to Sauron alone. It has no other master.\",\n",
" metadata={\"topic\": \"power\", \"location\": \"Rivendell\"}\n",
" ),\n",
" Document(\n",
" text=\"Sam: I can't carry it for you, but I can carry you!\",\n",
" metadata={\"topic\": \"friendship\", \"location\": \"Mount Doom\"}\n",
" ),\n",
" Document(\n",
" text=\"Legolas: A red sun rises. Blood has been spilled this night.\",\n",
" metadata={\"topic\": \"war\", \"location\": \"Rohan\"}\n",
" ),\n",
" Document(\n",
" text=\"Gimli: Certainty of death. Small chance of success. What are we waiting for?\",\n",
" metadata={\"topic\": \"bravery\", \"location\": \"Helm's Deep\"}\n",
" ),\n",
" Document(\n",
" text=\"Boromir: One does not simply walk into Mordor.\",\n",
" metadata={\"topic\": \"impossible tasks\", \"location\": \"Rivendell\"}\n",
" ),\n",
" Document(\n",
" text=\"Galadriel: Even the smallest person can change the course of the future.\",\n",
" metadata={\"topic\": \"hope\", \"location\": \"Lothlórien\"}\n",
" ),\n",
" Document(\n",
" text=\"Théoden: So it begins.\",\n",
" metadata={\"topic\": \"battle\", \"location\": \"Helm's Deep\"}\n",
" ),\n",
" Document(\n",
" text=\"Elrond: The strength of the Ring-bearer is failing. In his heart, Frodo begins to understand. The quest will claim his life.\",\n",
" metadata={\"topic\": \"sacrifice\", \"location\": \"Rivendell\"}\n",
" )\n",
"]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Adding Documents to the Collection"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Number of documents in the collection: 10\n"
]
}
],
"source": [
"store.add_documents(documents=documents)\n",
"print(f\"Number of documents in the collection: {store.count()}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Retrieving Documents\n",
"\n",
"Retrieve all documents or specific ones by ID."
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Retrieved documents:\n",
"ID: feb3b2c1-d3cf-423b-bd5d-6094e2200bc8, Text: Gandalf: A wizard is never late, Frodo Baggins. Nor is he early; he arrives precisely when he means to., Metadata: {'topic': 'wisdom', 'location': 'The Shire'}\n",
"ID: b206833f-4c19-4f3c-91e2-2ccbcc895a63, Text: Frodo: I wish the Ring had never come to me. I wish none of this had happened., Metadata: {'topic': 'destiny', 'location': 'Moria'}\n",
"ID: 57226af8-d035-4052-86b2-4f68d7c5a8f6, Text: Aragorn: You cannot wield it! None of us can. The One Ring answers to Sauron alone. It has no other master., Metadata: {'topic': 'power', 'location': 'Rivendell'}\n",
"ID: 5376d46a-4161-408c-850c-4b73cd8d2aa6, Text: Sam: I can't carry it for you, but I can carry you!, Metadata: {'topic': 'friendship', 'location': 'Mount Doom'}\n",
"ID: 7d8c78c3-e4c9-4c6a-8bb4-a04f450e6bfd, Text: Legolas: A red sun rises. Blood has been spilled this night., Metadata: {'topic': 'war', 'location': 'Rohan'}\n",
"ID: 749a126e-2ad5-4aa6-b043-a204e50963f3, Text: Gimli: Certainty of death. Small chance of success. What are we waiting for?, Metadata: {'topic': 'bravery', 'location': \"Helm's Deep\"}\n",
"ID: 4848f783-fbc0-43ec-98d6-43b03fa79809, Text: Boromir: One does not simply walk into Mordor., Metadata: {'topic': 'impossible tasks', 'location': 'Rivendell'}\n",
"ID: ecc3257d-e542-407e-9db9-21ec3b78249c, Text: Galadriel: Even the smallest person can change the course of the future., Metadata: {'topic': 'hope', 'location': 'Lothlórien'}\n",
"ID: 6dad5159-724f-4f03-8cc8-aabc4ee308cd, Text: Théoden: So it begins., Metadata: {'topic': 'battle', 'location': \"Helm's Deep\"}\n",
"ID: 63a09862-438a-41d7-abe7-74ec5510ce82, Text: Elrond: The strength of the Ring-bearer is failing. In his heart, Frodo begins to understand. The quest will claim his life., Metadata: {'topic': 'sacrifice', 'location': 'Rivendell'}\n"
]
}
],
"source": [
"# Retrieve all documents\n",
"retrieved_docs = store.get()\n",
"print(\"Retrieved documents:\")\n",
"for doc in retrieved_docs:\n",
" print(f\"ID: {doc['id']}, Text: {doc['document']}, Metadata: {doc['metadata']}\")"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Specific document: [{'id': UUID('feb3b2c1-d3cf-423b-bd5d-6094e2200bc8'), 'document': 'Gandalf: A wizard is never late, Frodo Baggins. Nor is he early; he arrives precisely when he means to.', 'metadata': {'topic': 'wisdom', 'location': 'The Shire'}}]\n"
]
}
],
"source": [
"# Retrieve a specific document by ID\n",
"doc_id = retrieved_docs[0]['id']\n",
"specific_doc = store.get(ids=[doc_id])\n",
"print(f\"Specific document: {specific_doc}\")"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Specific document Embedding (first 5 values): [-0.0\n"
]
}
],
"source": [
"# Retrieve a specific document by ID\n",
"doc_id = retrieved_docs[0]['id']\n",
"specific_doc = store.get(ids=[doc_id], with_embedding=True)\n",
"embedding = specific_doc[0]['embedding']\n",
"print(f\"Specific document Embedding (first 5 values): {embedding[:5]}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Updating Documents\n",
"\n",
"You can update existing documents' text or metadata using their IDs."
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Updated document: [{'id': UUID('feb3b2c1-d3cf-423b-bd5d-6094e2200bc8'), 'document': 'Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true.', 'metadata': {'topic': 'hope and wisdom', 'location': 'Fangorn Forest'}}]\n"
]
}
],
"source": [
"# Retrieve a document by its ID\n",
"retrieved_docs = store.get() # Get all documents to find the ID\n",
"doc_id = retrieved_docs[0]['id'] # Select the first document's ID for this example\n",
"\n",
"# Define updated text and metadata\n",
"updated_text = \"Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true.\"\n",
"updated_metadata = {\"topic\": \"hope and wisdom\", \"location\": \"Fangorn Forest\"}\n",
"\n",
"# Update the document's text and metadata in the store\n",
"store.update(ids=[doc_id], documents=[updated_text], metadatas=[updated_metadata])\n",
"\n",
"# Verify the update\n",
"updated_doc = store.get(ids=[doc_id])\n",
"print(f\"Updated document: {updated_doc}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Deleting Documents\n",
"\n",
"Delete documents by their IDs."
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Number of documents after deletion: 9\n"
]
}
],
"source": [
"# Delete a document by ID\n",
"doc_id_to_delete = retrieved_docs[2]['id']\n",
"store.delete(ids=[doc_id_to_delete])\n",
"\n",
"# Verify deletion\n",
"print(f\"Number of documents after deletion: {store.count()}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Similarity Search\n",
"\n",
"Perform a similarity search using text queries. The embedding function automatically generates embeddings for the input query."
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Similarity search results:\n",
"ID: 749a126e-2ad5-4aa6-b043-a204e50963f3, Document: Gimli: Certainty of death. Small chance of success. What are we waiting for?, Metadata: {'topic': 'bravery', 'location': \"Helm's Deep\"}, Similarity: 0.1567628941818613\n",
"ID: 4848f783-fbc0-43ec-98d6-43b03fa79809, Document: Boromir: One does not simply walk into Mordor., Metadata: {'topic': 'impossible tasks', 'location': 'Rivendell'}, Similarity: 0.13233356090384096\n"
]
}
],
"source": [
"# Perform a similarity search using text queries.\n",
"query = \"wise advice\"\n",
"results = store.search_similar(query_texts=query, k=2)\n",
"\n",
"# Display results\n",
"print(\"Similarity search results:\")\n",
"for result in results:\n",
" print(f\"ID: {result['id']}, Document: {result['document']}, Metadata: {result['metadata']}, Similarity: {result['similarity']}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Filtering Results\n",
"\n",
"Filter results based on metadata."
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Filtered search results:\n",
"ID: feb3b2c1-d3cf-423b-bd5d-6094e2200bc8, Document: Gandalf: Even the wisest cannot foresee all ends, but hope remains while the Company is true., Metadata: {'topic': 'hope and wisdom', 'location': 'Fangorn Forest'}, Similarity: 0.1670202911216282\n"
]
}
],
"source": [
"# Search for documents with specific metadata filters\n",
"query = \"journey\"\n",
"filter_conditions = {\n",
" \"location\": \"Fangorn Forest\",\n",
" \"topic\": \"hope and wisdom\"\n",
"}\n",
"\n",
"filtered_results = store.search_similar(query_texts=query, metadata_filter=filter_conditions, k=3)\n",
"\n",
"# Display filtered results\n",
"print(\"Filtered search results:\")\n",
"for result in filtered_results:\n",
" print(f\"ID: {result['id']}, Document: {result['document']}, Metadata: {result['metadata']}, Similarity: {result['similarity']}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Resetting the Database\n",
"\n",
"Reset the database to clear all stored data."
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Database reset complete. Current documents: []\n"
]
}
],
"source": [
"# Reset the collection\n",
"store.reset()\n",
"print(\"Database reset complete. Current documents:\", store.get())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,41 @@
from time import sleep
import dapr.ext.workflow as wf
wfr = wf.WorkflowRuntime()
@wfr.workflow(name='random_workflow')
def task_chain_workflow(ctx: wf.DaprWorkflowContext, x: int):
result1 = yield ctx.call_activity(step1, input=x)
result2 = yield ctx.call_activity(step2, input=result1)
result3 = yield ctx.call_activity(step3, input=result2)
return [result1, result2, result3]
@wfr.activity
def step1(ctx, activity_input):
print(f'Step 1: Received input: {activity_input}.')
# Do some work
return activity_input + 1
@wfr.activity
def step2(ctx, activity_input):
print(f'Step 2: Received input: {activity_input}.')
# Do some work
return activity_input * 2
@wfr.activity
def step3(ctx, activity_input):
print(f'Step 3: Received input: {activity_input}.')
# Do some work
return activity_input ^ 2
if __name__ == '__main__':
wfr.start()
sleep(5) # wait for workflow runtime to start
wf_client = wf.DaprWorkflowClient()
instance_id = wf_client.schedule_new_workflow(workflow=task_chain_workflow, input=10)
print(f'Workflow started. Instance ID: {instance_id}')
state = wf_client.wait_for_workflow_completion(instance_id)
print(f'Workflow completed! Status: {state.runtime_status}')
wfr.shutdown()

View File

@ -0,0 +1,38 @@
import logging
from dapr_agents.workflow import WorkflowApp, workflow, task
from dapr_agents.types import DaprWorkflowContext
@workflow(name='random_workflow')
def task_chain_workflow(ctx:DaprWorkflowContext, input: int):
result1 = yield ctx.call_activity(step1, input=input)
result2 = yield ctx.call_activity(step2, input=result1)
result3 = yield ctx.call_activity(step3, input=result2)
return [result1, result2, result3]
@task
def step1(activity_input):
print(f'Step 1: Received input: {activity_input}.')
# Do some work
return activity_input + 1
@task
def step2(activity_input):
print(f'Step 2: Received input: {activity_input}.')
# Do some work
return activity_input * 2
@task
def step3(activity_input):
print(f'Step 3: Received input: {activity_input}.')
# Do some work
return activity_input ^ 2
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
wfapp = WorkflowApp()
results = wfapp.run_and_monitor_workflow_sync(task_chain_workflow, input=10)
print(f"Results: {results}")

View File

@ -0,0 +1,40 @@
import asyncio
import logging
from dapr_agents.workflow import WorkflowApp, workflow, task
from dapr_agents.types import DaprWorkflowContext
@workflow(name="random_workflow")
def task_chain_workflow(ctx: DaprWorkflowContext, input: int):
result1 = yield ctx.call_activity(step1, input=input)
result2 = yield ctx.call_activity(step2, input=result1)
result3 = yield ctx.call_activity(step3, input=result2)
return [result1, result2, result3]
@task
def step1(activity_input: int) -> int:
print(f"Step 1: Received input: {activity_input}.")
return activity_input + 1
@task
def step2(activity_input: int) -> int:
print(f"Step 2: Received input: {activity_input}.")
return activity_input * 2
@task
def step3(activity_input: int) -> int:
print(f"Step 3: Received input: {activity_input}.")
return activity_input ^ 2
async def main():
logging.basicConfig(level=logging.INFO)
wfapp = WorkflowApp()
result = await wfapp.run_and_monitor_workflow_async(
task_chain_workflow,
input=10
)
print(f"Results: {result}")
if __name__ == "__main__":
asyncio.run(main())

View File

@ -0,0 +1,35 @@
from dapr_agents.workflow import WorkflowApp, workflow, task
from dapr_agents.types import DaprWorkflowContext
from dotenv import load_dotenv
import logging
# Define Workflow logic
@workflow(name='lotr_workflow')
def task_chain_workflow(ctx: DaprWorkflowContext):
result1 = yield ctx.call_activity(get_character)
result2 = yield ctx.call_activity(get_line, input={"character": result1})
return result2
@task(description="""
Pick a random character from The Lord of the Rings\n
and respond with the character's name ONLY
""")
def get_character() -> str:
pass
@task(description="What is a famous line by {character}",)
def get_line(character: str) -> str:
pass
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
# Load environment variables
load_dotenv()
# Initialize the WorkflowApp
wfapp = WorkflowApp()
# Run workflow
results = wfapp.run_and_monitor_workflow_sync(task_chain_workflow)
print(results)

View File

@ -0,0 +1,40 @@
import asyncio
import logging
from dapr_agents.workflow import WorkflowApp, workflow, task
from dapr_agents.types import DaprWorkflowContext
from dotenv import load_dotenv
# Define Workflow logic
@workflow(name='lotr_workflow')
def task_chain_workflow(ctx: DaprWorkflowContext):
result1 = yield ctx.call_activity(get_character)
result2 = yield ctx.call_activity(get_line, input={"character": result1})
return result2
@task(description="""
Pick a random character from The Lord of the Rings\n
and respond with the character's name ONLY
""")
def get_character() -> str:
pass
@task(description="What is a famous line by {character}",)
def get_line(character: str) -> str:
pass
async def main():
logging.basicConfig(level=logging.INFO)
# Load environment variables
load_dotenv()
# Initialize the WorkflowApp
wfapp = WorkflowApp()
# Run workflow
result = await wfapp.run_and_monitor_workflow_async(task_chain_workflow)
print(f"Results: {result}")
if __name__ == "__main__":
asyncio.run(main())

View File

@ -0,0 +1,33 @@
import logging
from dapr_agents.workflow import WorkflowApp, workflow, task
from dapr_agents.types import DaprWorkflowContext
from pydantic import BaseModel
from dotenv import load_dotenv
@workflow
def question(ctx:DaprWorkflowContext, input:int):
step1 = yield ctx.call_activity(ask, input=input)
return step1
class Dog(BaseModel):
name: str
bio: str
breed: str
@task("Who was {name}?")
def ask(name:str) -> Dog:
pass
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
load_dotenv()
wfapp = WorkflowApp()
results = wfapp.run_and_monitor_workflow_sync(
workflow=question,
input="Scooby Doo"
)
print(results)

View File

@ -0,0 +1,40 @@
import asyncio
import logging
from dapr_agents.workflow import WorkflowApp, workflow, task
from dapr_agents.types import DaprWorkflowContext
from pydantic import BaseModel
from dotenv import load_dotenv
@workflow
def question(ctx:DaprWorkflowContext, input:int):
step1 = yield ctx.call_activity(ask, input=input)
return step1
class Dog(BaseModel):
name: str
bio: str
breed: str
@task("Who was {name}?")
def ask(name:str) -> Dog:
pass
async def main():
logging.basicConfig(level=logging.INFO)
# Load environment variables
load_dotenv()
# Initialize the WorkflowApp
wfapp = WorkflowApp()
# Run workflow
result = await wfapp.run_and_monitor_workflow_async(
workflow=question,
input="Scooby Doo"
)
print(f"Results: {result}")
if __name__ == "__main__":
asyncio.run(main())

View File

@ -9,54 +9,55 @@ load_dotenv()
# Initialize Workflow Instance # Initialize Workflow Instance
wfr = wf.WorkflowRuntime() wfr = wf.WorkflowRuntime()
# Define Workflow logic # Define Workflow logic
@wfr.workflow(name="task_chain_workflow") @wfr.workflow(name='lotr_workflow')
def task_chain_workflow(ctx: wf.DaprWorkflowContext): def task_chain_workflow(ctx: wf.DaprWorkflowContext):
result1 = yield ctx.call_activity(get_character) result1 = yield ctx.call_activity(get_character)
result2 = yield ctx.call_activity(get_line, input=result1) result2 = yield ctx.call_activity(get_line, input=result1)
return result2 return result2
# Activity 1 # Activity 1
@wfr.activity(name="step1") @wfr.activity(name='step1')
def get_character(ctx): def get_character(ctx):
client = OpenAI() client = OpenAI()
response = client.chat.completions.create( response = client.chat.completions.create(
messages=[ messages = [
{ {
"role": "user", "role": "user",
"content": "Pick a random character from The Lord of the Rings and respond with the character name only", "content": "Pick a random character from The Lord of the Rings and respond with the character name only"
} }
], ],
model="gpt-4o", model = 'gpt-4o'
) )
character = response.choices[0].message.content character = response.choices[0].message.content
print(f"Character: {character}") print(f"Character: {character}")
return character return character
# Activity 2 # Activity 2
@wfr.activity(name="step2") @wfr.activity(name='step2')
def get_line(ctx, character: str): def get_line(ctx, character: str):
client = OpenAI() client = OpenAI()
response = client.chat.completions.create( response = client.chat.completions.create(
messages=[{"role": "user", "content": f"What is a famous line by {character}"}], messages = [
model="gpt-4o", {
"role": "user",
"content": f"What is a famous line by {character}"
}
],
model = 'gpt-4o'
) )
line = response.choices[0].message.content line = response.choices[0].message.content
print(f"Line: {line}") print(f"Line: {line}")
return line return line
if __name__ == '__main__':
if __name__ == "__main__":
wfr.start() wfr.start()
sleep(5) # wait for workflow runtime to start sleep(5) # wait for workflow runtime to start
wf_client = wf.DaprWorkflowClient() wf_client = wf.DaprWorkflowClient()
instance_id = wf_client.schedule_new_workflow(workflow=task_chain_workflow) instance_id = wf_client.schedule_new_workflow(workflow=task_chain_workflow)
print(f"Workflow started. Instance ID: {instance_id}") print(f'Workflow started. Instance ID: {instance_id}')
state = wf_client.wait_for_workflow_completion(instance_id) state = wf_client.wait_for_workflow_completion(instance_id)
print(f"Workflow completed! Status: {state.runtime_status}") print(f'Workflow completed! Status: {state.runtime_status}')
wfr.shutdown() wfr.shutdown()

View File

@ -0,0 +1,61 @@
# Doc2Podcast: Automating Podcast Creation from Research Papers
This workflow is a basic step toward automating the creation of podcast content from research using AI. It demonstrates how to process a single research paper, generate a dialogue-style transcript with LLMs, and convert it into a podcast audio file. While simple, this workflow serves as a foundation for exploring more advanced processes, such as handling multiple documents or optimizing content splitting for better audio output.
## Key Features and Workflow
* PDF Processing: Downloads a research paper from a specified URL and extracts its content page by page.
* LLM-Powered Transcripts: Transforms extracted text into a dialogue-style transcript using a large language model, alternating between a host and participants.
* AI-Generated Audio: Converts the transcript into a podcast-like audio file with natural-sounding voices for the host and participants.
* Custom Workflow: Saves the final podcast audio and transcript files locally, offering flexibility for future enhancements like handling multiple files or integrating additional AI tools.
## Prerequisites
* Python 3.8 or higher
* Required Python dependencies (install using `pip install -r requirements.txt`)
* A valid `OpenAI` API key for generating audio content
* Set the `OPENAI_API_KEY` variable with your key value in an `.env` file.
## Configuration
To run the workflow, provide a configuration file in JSON format. The `config.json` file in this folder points to the following file "[Exploring Applicability of LLM-Powered Autonomous Agents to Solve Real-life Problems](https://github.com/OTRF/MEAN/blob/main/Rodriquez%20%26%20Syynimaa%20(2024).%20Exploring%20Applicability%20of%20LLM-Powered%20Autonomous%20Agents%20to%20Solve%20Real-life%20Problems.pdf)". Config example:
```json
{
"pdf_url": "https://example.com/research-paper.pdf",
"podcast_name": "AI Explorations",
"host": {
"name": "John Doe",
"voice": "alloy"
},
"participants": [
{ "name": "Alice Smith" },
{ "name": "Bob Johnson" }
],
"max_rounds": 4,
"output_transcript_path": "podcast_dialogue.json",
"output_audio_path": "final_podcast.mp3",
"audio_model": "tts-1"
}
```
## Running the Workflow
* Place the configuration file (e.g., config.json) in the project directory.
* Run the workflow with the following command:
```bash
dapr run --app-id doc2podcast --resources-path components -- python3 workflow.py --config config.json
```
* Output:
* Transcript: A structured transcript saved as `podcast_dialogue.json` by default. An example can be found in the current directory.
* Audio: The final podcast audio saved as `final_podcast.mp3` as default. An example can be found [here](https://on.soundcloud.com/pzjYRcJZDU3y27hz5).
## Next Steps
This workflow is a simple starting point. Future enhancements could include:
* Processing Multiple Files: Extend the workflow to handle batches of PDFs.
* Advanced Text Splitting: Dynamically split text based on content rather than pages.
* Web Search Integration: Pull additional context or related research from the web.
* Multi-Modal Content: Process documents alongside images, slides, or charts.

View File

@ -0,0 +1,20 @@
{
"pdf_url": "https://raw.githubusercontent.com/OTRF/MEAN/main/Rodriquez%20%26%20Syynimaa%20(2024).%20Exploring%20Applicability%20of%20LLM-Powered%20Autonomous%20Agents%20to%20Solve%20Real-life%20Problems.pdf",
"podcast_name": "AI Explorations",
"host": {
"name": "John Doe",
"voice": "alloy"
},
"participants": [
{
"name": "Alice Smith"
},
{
"name": "Bob Johnson"
}
],
"max_rounds": 4,
"output_transcript_path": "podcast_dialogue.json",
"output_audio_path": "final_podcast.mp3",
"audio_model": "tts-1"
}

View File

@ -0,0 +1,234 @@
[
{
"name": "John Doe",
"text": "Welcome to 'AI Explorations'. I'm your host, John Doe. I'm joined today by Alice Smith and Bob Johnson. How are both of you doing today?"
},
{
"name": "Alice Smith",
"text": "Hi John, I'm doing great, thanks for having me. Excited to discuss today's topics."
},
{
"name": "John Doe",
"text": "Fantastic. In today's episode, we'll explore the applicability of LLM-powered autonomous agents in tackling real-life problems. We'll delve into Microsoft Entra ID Administration, particularly focusing on a project named MEAN. Alice, could you tell us a bit more about this project and its relevance?"
},
{
"name": "Alice Smith",
"text": "Absolutely, John. The MEAN project is fascinating as it leverages LLM technology to perform administrative tasks in Entra ID using natural language prompts. This is particularly useful given that Microsoft has retired some key PowerShell modules for these tasks."
},
{
"name": "John Doe",
"text": "That's interesting. Bob, from a technical standpoint, what changes are happening that make projects like MEAN necessary?"
},
{
"name": "Bob Johnson",
"text": "Well, John, with Microsoft retiring old PowerShell modules, administrators now need to use the Microsoft Graph API. This change requires learning software development skills, which isn't feasible for everyone. MEAN simplifies this by using natural language inputs instead."
},
{
"name": "John Doe",
"text": "Great point, Bob. So, Alice, could these autonomous agents make administrative tasks more accessible to a wider audience?"
},
{
"name": "Alice Smith",
"text": "Certainly, John. By abstracting complex programming tasks into simple language commands, these agents democratize access to technology, lowering the barrier for many administrators."
},
{
"name": "John Doe",
"text": "The notion of autonomous LLM-powered agents is intriguing, especially when it comes to simplifying complex tasks like software development. Alice, how do you see these agents addressing the skills gap that's typically present among system administrators? For instance, their need to master software development skills, which aren't typically part of their skill set."
},
{
"name": "Alice Smith",
"text": "John, I believe these agents can play a pivotal role by taking over tasks that require extensive software development knowledge. They can interface with complex APIs like MSGraph, providing administrators with the ability to perform tasks using natural language without the need to learn coding."
},
{
"name": "John Doe",
"text": "Bob, it seems like these agents must be quite advanced to achieve this level of functionality. Can you talk about how LLMs, like those used in these agents, handle tasks they've never been specifically trained on, and what challenges they might face?"
},
{
"name": "Bob Johnson",
"text": "Certainly, John. LLMs, such as Generative Pre-trained Transformers, use task-agnostic pre-training, but require additional task-specific training to perform new tasks effectively. Challenges include maintaining consistent logic and managing hallucinations, where generated content might not accurately reflect reality."
},
{
"name": "John Doe",
"text": "That's an important point, Bob. Alice, how do these agents overcome some of these challenges to ensure accurate performance?"
},
{
"name": "Alice Smith",
"text": "They employ strategies like using the ReAct paradigm, which involves reasoning and action in a closed-loop system. By incorporating external real-world entities into their reasoning processes, they aim to be more grounded and trustworthy, which reduces issues like hallucination."
},
{
"name": "John Doe",
"text": "Fascinating. Now, looking to the future, do you believe these LLM-powered agents will play a crucial role in evolving the role of system administrators?"
},
{
"name": "Bob Johnson",
"text": "Absolutely, John. As these agents become more sophisticated, they will enhance productivity by offloading routine and complex tasks, allowing administrators to focus on strategic decision-making and innovation."
},
{
"name": "John Doe",
"text": "Continuing with our discussion on the autonomous agents for Entra ID administration, Alice, could you elaborate on some of the research questions that were pivotal to the development of the MEAN project?"
},
{
"name": "Alice Smith",
"text": "Sure, John. One of the primary research questions we focused on was determining how these autonomous LLM-powered agents can effectively assist administrators in performing Entra ID tasks. This became crucial as traditional PowerShell modules were deprecated, requiring new solutions."
},
{
"name": "John Doe",
"text": "That sounds essential. Bob, could you walk us through the structure of the research paper related to MEAN and highlight how it helps in understanding the essence of the project?"
},
{
"name": "Bob Johnson",
"text": "Certainly, John. The paper is structured to first describe the construction process of the MEAN agent, proceeding to a discussion section that encapsulates the project's essence. It offers a comprehensive view from motivation to design and testing phases."
},
{
"name": "John Doe",
"text": "Alice, let's talk about the design and development phase of MEAN. I understand Jupyter Notebooks was chosen as the platform. Could you explain why this choice was made and how it integrates with the capabilities of tools like ChatGPT and MSGraph API?"
},
{
"name": "Alice Smith",
"text": "Jupyter Notebooks was selected for its Python support, which is crucial for integrating with ChatGPT-4 API. This setup allows the agent to call external APIs easily, essential for the tasks at hand. Utilizing the OpenAPI specification from the MSGraph API documentation further streamlines this process."
},
{
"name": "John Doe",
"text": "Bob, how does the design process ensure that the agent can interpret and execute tasks accurately, especially when leveraging APIs such as MSGraph?"
},
{
"name": "Bob Johnson",
"text": "The design emphasizes a reasoning and planning loop where the agent interprets user prompts and the OpenAPI specification. It then strategically executes plans by interacting with the API to return accurate results. This methodical approach helps in achieving precision in task execution."
},
{
"name": "John Doe",
"text": "Alice, you've previously mentioned the significance of using Jupyter Notebooks for integrating various tools like ChatGPT and MSGraph API. Given the extensive properties of users from Microsoft Entra ID and the challenges MEAN faced in its first design round, how essential was it to adapt the setup further? What steps were taken to enhance the agent's understanding of the API?"
},
{
"name": "Alice Smith",
"text": "In our first design round, we realized the importance of improving the agent's grasp of the API due to its partial functionality. Hence, adapting the design to incorporate better reasoning and planning capabilities was essential. We started by ensuring that the agent can parse and understand extensive OpenAPI specifications and use parameters like $top to request more users."
},
{
"name": "John Doe",
"text": "Bob, it seems there were significant hurdles with the original MS Graph API specification, especially with its size causing browser crashes during validation. How did the team manage this aspect, and what was the impact on the agent's functionality?"
},
{
"name": "Bob Johnson",
"text": "The sheer size of the OpenAPI YAML file posed challenges, but breaking it down into manageable parts allowed us to validate it without crashing the systems. This step was crucial for the agent to execute tasks more efficiently and understand the complex relationships within the API."
},
{
"name": "John Doe",
"text": "With these enhancements, how did the team ensure that MEAN could accurately retrieve up to 1000 users per request, especially when the default is limited to 100 users?"
},
{
"name": "Alice Smith",
"text": "After refining the agent's interpretation of the API, we implemented logic to utilize the $top query parameter effectively, allowing MEAN to request and handle up to 1000 users at a time. This adjustment significantly improved its performance in managing data."
},
{
"name": "John Doe",
"text": "Bob, looking ahead, how does this adaptation enhance the agent's ability to handle real-world administrative scenarios in Entra ID?"
},
{
"name": "Bob Johnson",
"text": "By optimizing data retrieval and understanding API parameters fully, MEAN is now far better equipped to handle bulk operations and real-world administrative tasks, enhancing both efficiency and accuracy for users."
},
{
"name": "John Doe",
"text": "Alice, are there specific use cases within Entra ID where these improvements in MEANs capabilities have had the most impact?"
},
{
"name": "John Doe",
"text": "Alice, with all the technical modifications made to the OpenAPI specification, tell us how these changes impacted the agent's ability to interpret and execute tasks more efficiently."
},
{
"name": "Alice Smith",
"text": "The changes were substantial, John. By manually adjusting the OpenAPI specification to eliminate circular references and mark query parameters as required, we managed to maintain crucial API information. This improved the agent's ability to process and execute tasks accurately, highlighting the efficiency necessary for real-world applications."
},
{
"name": "John Doe",
"text": "That's quite an advancement. Bob, what can you tell us about the logical observations made by the agent when encountering issues, like using multiple $select parameters?"
},
{
"name": "Bob Johnson",
"text": "It's fascinating, John. The agent learned from its mistakes by recognizing that the API threw errors when $select was used multiple times. It adapted by using a single $select parameter and separating values with commas. This shows how the agent mimics human logical processes in troubleshooting."
},
{
"name": "John Doe",
"text": "Alice, do these improvements mean that tasks typically performed by an administrator using PowerShell can now be easily transferred to the agent, without needing extensive software knowledge?"
},
{
"name": "Alice Smith",
"text": "Absolutely. Now that the agent understands how to interpret the API parameters correctly, it simplifies tasks for administrators. They no longer need to know specific API calls or PowerShell cmdlets, making complex operations much more accessible."
},
{
"name": "John Doe",
"text": "Bob, what did the evaluation reveal about how the agent can empower users without software development backgrounds to accomplish tasks?"
},
{
"name": "Bob Johnson",
"text": "The evaluation was quite promising. It showed that users could achieve the desired outcomes using natural language, thanks to the agent's capability. Although there are some limitations with the current implementation, we are on the right path towards bridging the gap for non-technical users."
},
{
"name": "John Doe",
"text": "Alice, with all these technical modifications, it seems that adapting the OpenAPI specifications has been challenging but rewarding. Can you tell us about the role of open and clear communication in the success of the MEAN project?"
},
{
"name": "Alice Smith",
"text": "Absolutely, John. Communicating our progress and challenges was crucial. We've reported our processes and findings in a research paper, and we've made our source code and Jupyter notebooks publicly available on GitHub. This transparency not only facilitated collaboration but also allowed us to receive valuable feedback from the community."
},
{
"name": "John Doe",
"text": "That's commendable, Alice. It seems like these improvements have significant implications for practice. Bob, do you think the findings could transform how routine administrative tasks are approached, especially in high-stress environments like during cyber-attacks?"
},
{
"name": "Bob Johnson",
"text": "Certainly, John. The ability of LLM-powered agents to simplify complex tasks allows administrators to focus on their core responsibilities without getting bogged down by software development. This is particularly beneficial during high-pressure situations where quick decision-making is essential. However, the current limitations mean it's not fully mature for everyday tasks just yet."
},
{
"name": "John Doe",
"text": "Alice, it sounds like enabling administrators to use natural language inputs for Entra ID tasks without needing to learn coding is a major leap forward. In terms of future research, where do you see the next steps for the MEAN project?"
},
{
"name": "Alice Smith",
"text": "Moving forward, a promising direction is to explore how these agents could interface with PowerShell commands in addition to APIs. By doing so, we could potentially create a more versatile solution that isn't limited to cloud services and also leverages tasks on local systems."
},
{
"name": "John Doe",
"text": "Interesting. Bob, do you have thoughts on how exploring PowerShell integration could provide a broader application for these agents?"
},
{
"name": "Bob Johnson",
"text": "Integrating with PowerShell could allow agents to perform tasks that extend beyond cloud-based system administration, covering local environments as well. This could open doors to a generalized tool for admins who deal with hybrid IT infrastructures."
},
{
"name": "John Doe",
"text": "Thank you for such an insightful discussion on the MEAN project. To wrap up, we've explored the impressive capabilities of LLM-powered agents in simplifying complex tasks by utilizing natural language, the technical hurdles overcome in adapting OpenAPI specifications, and the potential for integrating PowerShell for broader applicability. Alice and Bob, your insights have been invaluable."
},
{
"name": "Alice Smith",
"text": "Thank you, John. It's been a pleasure discussing the project and sharing our journey with MEAN. The implications for simplifying administrative tasks are exciting, especially as we continue to evolve these capabilities."
},
{
"name": "Bob Johnson",
"text": "Absolutely, John. Exploring how MEAN addresses real-world administrative challenges underlines its potential impact, particularly in high-stress environments. I'm eager to see how future research will further break down barriers for non-technical users."
},
{
"name": "John Doe",
"text": "Thank you both for your contributions. It's clear that the work being done with MEAN is transformative and could pave the way for future innovations in cloud administration."
},
{
"name": "Alice Smith",
"text": "Thanks again, John. I look forward to further developments and encourage listeners to follow our updates on GitHub for the latest insights."
},
{
"name": "Bob Johnson",
"text": "And thank you, John, for the engaging conversation. It's always rewarding to share the exciting strides we're making in this field."
},
{
"name": "John Doe",
"text": "This concludes our episode on AI Explorations. Don't forget to check out the provided resources to delve deeper into the topics we've covered. Until next time, stay curious and keep exploring the world of AI."
},
{
"name": "Alice Smith",
"text": "Goodbye everyone, and thank you for tuning in!"
},
{
"name": "Bob Johnson",
"text": "Goodbye, and thank you for listening!"
}
]

View File

@ -0,0 +1,3 @@
dapr_agents
pydub
pypdf

View File

@ -0,0 +1,361 @@
from dapr_agents.document.reader.pdf.pypdf import PyPDFReader
from dapr_agents.types import DaprWorkflowContext
from dapr_agents import WorkflowApp
from urllib.parse import urlparse, unquote
from dotenv import load_dotenv
from typing import Dict, Any, List
from pydantic import BaseModel
from pathlib import Path
from dapr_agents import OpenAIAudioClient
from dapr_agents.types.llm import AudioSpeechRequest
from pydub import AudioSegment
import io
import requests
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Load environment variables
load_dotenv()
# Initialize the WorkflowApp
wfapp = WorkflowApp()
# Define structured output models
class SpeakerEntry(BaseModel):
name: str
text: str
class PodcastDialogue(BaseModel):
participants: List[SpeakerEntry]
# Define Workflow logic
@wfapp.workflow(name='doc2podcast')
def doc2podcast(ctx: DaprWorkflowContext, input: Dict[str, Any]):
# Extract pre-validated input
podcast_name = input["podcast_name"]
host_config = input["host"]
participant_configs = input["participants"]
max_rounds = input["max_rounds"]
file_input = input["pdf_url"]
output_transcript_path = input["output_transcript_path"]
output_audio_path = input["output_audio_path"]
audio_model = input["audio_model"]
# Step 1: Assign voices to the team
team_config = yield ctx.call_activity(assign_podcast_voices, input={
"host_config": host_config,
"participant_configs": participant_configs,
})
# Step 2: Read PDF and get documents
file_path = yield ctx.call_activity(download_pdf, input=file_input)
documents = yield ctx.call_activity(read_pdf, input={"file_path": file_path})
# Step 3: Initialize context and transcript parts
accumulated_context = ""
transcript_parts = []
total_iterations = len(documents)
for chunk_index, document in enumerate(documents):
# Generate the intermediate prompt
document_with_context = {
"text": document["text"],
"iteration_index": chunk_index + 1,
"total_iterations": total_iterations,
"context": accumulated_context,
"participants": [p["name"] for p in team_config["participants"]],
}
generated_prompt = yield ctx.call_activity(generate_prompt, input=document_with_context)
# Use the prompt to generate the structured dialogue
prompt_parameters = {
"podcast_name": podcast_name,
"host_name": team_config["host"]["name"],
"prompt": generated_prompt,
"max_rounds": max_rounds,
}
dialogue_entry = yield ctx.call_activity(generate_transcript, input=prompt_parameters)
# Update context and transcript parts
conversations = dialogue_entry["participants"]
for participant in conversations:
accumulated_context += f" {participant['name']}: {participant['text']}"
transcript_parts.append(participant)
# Step 4: Write the final transcript to a file
yield ctx.call_activity(write_transcript_to_file, input={"podcast_dialogue": transcript_parts, "output_path": output_transcript_path})
# Step 5: Convert transcript to audio using team_config
yield ctx.call_activity(convert_transcript_to_audio, input={
"transcript_parts": transcript_parts,
"output_path": output_audio_path,
"voices": team_config,
"model": audio_model,
})
@wfapp.task
def assign_podcast_voices(host_config: Dict[str, Any], participant_configs: List[Dict[str, Any]]) -> Dict[str, Any]:
"""
Assign voices to the podcast host and participants.
Args:
host_config: Dictionary containing the host's configuration (name and optionally a voice).
participant_configs: List of dictionaries containing participants' configurations (name and optionally a voice).
Returns:
A dictionary with the updated `host` and `participants`, including their assigned voices.
"""
allowed_voices = ["alloy", "echo", "fable", "onyx", "nova", "shimmer"]
assigned_voices = set() # Track assigned voices to avoid duplication
# Assign voice to the host if not already specified
if "voice" not in host_config:
host_config["voice"] = next(voice for voice in allowed_voices if voice not in assigned_voices)
assigned_voices.add(host_config["voice"])
# Assign voices to participants, ensuring no duplicates
updated_participants = []
for participant in participant_configs:
if "voice" not in participant:
participant["voice"] = next(
voice for voice in allowed_voices if voice not in assigned_voices
)
assigned_voices.add(participant["voice"])
updated_participants.append(participant)
# Return the updated host and participants
return {
"host": host_config,
"participants": updated_participants,
}
@wfapp.task
def download_pdf(pdf_url: str, local_directory: str = ".") -> str:
"""
Downloads a PDF file from a URL and saves it locally, automatically determining the filename.
"""
try:
parsed_url = urlparse(pdf_url)
filename = unquote(Path(parsed_url.path).name)
if not filename:
raise ValueError("Invalid URL: Cannot determine filename from the URL.")
filename = filename.replace(" ", "_")
local_directory_path = Path(local_directory).resolve()
local_directory_path.mkdir(parents=True, exist_ok=True)
local_file_path = local_directory_path / filename
if not local_file_path.exists():
logger.info(f"Downloading PDF from {pdf_url}...")
response = requests.get(pdf_url)
response.raise_for_status()
with open(local_file_path, "wb") as pdf_file:
pdf_file.write(response.content)
logger.info(f"PDF saved to {local_file_path}")
else:
logger.info(f"PDF already exists at {local_file_path}")
return str(local_file_path)
except Exception as e:
logger.error(f"Error downloading PDF: {e}")
raise
@wfapp.task
def read_pdf(file_path: str) -> List[dict]:
"""
Reads and extracts text from a PDF document.
"""
try:
reader = PyPDFReader()
documents = reader.load(file_path)
return [doc.model_dump() for doc in documents]
except Exception as e:
logger.error(f"Error reading document: {e}")
raise
@wfapp.task
def generate_prompt(text: str, iteration_index: int, total_iterations: int, context: str, participants: List[str]) -> str:
"""
Generate a prompt dynamically for the chunk.
"""
logger.info(f"Processing iteration {iteration_index} of {total_iterations}.")
instructions = f"""
CONTEXT:
- Previous conversation: {context.strip() or "No prior context available."}
- This is iteration {iteration_index} of {total_iterations}.
"""
if participants:
participant_names = ', '.join(participants)
instructions += f"\nPARTICIPANTS: {participant_names}"
else:
instructions += "\nPARTICIPANTS: None (Host-only conversation)"
if iteration_index == 1:
instructions += """
INSTRUCTIONS:
- Begin with a warm welcome to the podcast titled 'Podcast Name'.
- Introduce the host and the participants (if available).
- Provide an overview of the topics to be discussed in this episode.
"""
elif iteration_index == total_iterations:
instructions += """
INSTRUCTIONS:
- Conclude the conversation with a summary of the discussion.
- Include farewell messages from the host and participants.
"""
else:
instructions += """
INSTRUCTIONS:
- Continue the conversation smoothly without re-introducing the podcast.
- Follow up on the previous discussion points and introduce the next topic naturally.
"""
instructions += f"""
TASK:
- Use the provided TEXT to guide this part of the conversation.
- Alternate between speakers, ensuring a natural conversational flow.
- Keep responses concise and aligned with the context.
"""
return f"{instructions}\nTEXT:\n{text.strip()}"
@wfapp.task("""
Generate a structured podcast dialogue based on the context and text provided.
The podcast is titled '{podcast_name}' and is hosted by {host_name}.
If participants are available, each speaker is limited to a maximum of {max_rounds} turns per iteration.
A "round" is defined as one turn by the host followed by one turn by a participant.
The podcast should alternate between the host and participants.
If participants are not available, the host drives the conversation alone.
Keep the dialogue concise and ensure a natural conversational flow.
{prompt}
""")
def generate_transcript(podcast_name: str, host_name: str, prompt: str, max_rounds: int) -> PodcastDialogue:
pass
@wfapp.task
def write_transcript_to_file(podcast_dialogue: List[Dict[str, Any]], output_path: str) -> None:
"""
Write the final structured transcript to a file.
"""
try:
with open(output_path, "w", encoding="utf-8") as file:
import json
json.dump(podcast_dialogue, file, ensure_ascii=False, indent=4)
logger.info(f"Podcast dialogue successfully written to {output_path}")
except Exception as e:
logger.error(f"Error writing podcast dialogue to file: {e}")
raise
@wfapp.task
def convert_transcript_to_audio(transcript_parts: List[Dict[str, Any]], output_path: str, voices: Dict[str, Any], model: str = "tts-1") -> None:
"""
Converts a transcript into a single audio file using the OpenAI Audio Client and pydub for concatenation.
Args:
transcript_parts: List of dictionaries containing speaker and text.
output_path: File path to save the final audio.
voices: Dictionary containing "host" and "participants" with their assigned voices.
model: TTS model to use (default: "tts-1").
"""
try:
client = OpenAIAudioClient()
combined_audio = AudioSegment.silent(duration=500) # Start with a short silence
# Build voice mapping
voice_mapping = {voices["host"]["name"]: voices["host"]["voice"]}
voice_mapping.update({p["name"]: p["voice"] for p in voices["participants"]})
for part in transcript_parts:
speaker_name = part["name"]
speaker_text = part["text"]
assigned_voice = voice_mapping.get(speaker_name, "alloy") # Default to "alloy" if not found
# Log assigned voice for debugging
logger.info(f"Generating audio for {speaker_name} using voice '{assigned_voice}'.")
# Create TTS request
tts_request = AudioSpeechRequest(
model=model,
input=speaker_text,
voice=assigned_voice,
response_format="mp3"
)
# Generate the audio
audio_bytes = client.create_speech(request=tts_request)
# Create an AudioSegment from the audio bytes
audio_chunk = AudioSegment.from_file(io.BytesIO(audio_bytes), format=tts_request.response_format)
# Append the audio to the combined segment
combined_audio += audio_chunk + AudioSegment.silent(duration=300)
# Export the combined audio to the output file
combined_audio.export(output_path, format="mp3")
logger.info(f"Podcast audio successfully saved to {output_path}")
except Exception as e:
logger.error(f"Error during audio generation: {e}")
raise
if __name__ == '__main__':
import argparse
import json
import yaml
def load_config(file_path: str) -> dict:
"""Load configuration from a JSON or YAML file."""
with open(file_path, 'r') as file:
if file_path.endswith('.yaml') or file_path.endswith('.yml'):
return yaml.safe_load(file)
elif file_path.endswith('.json'):
return json.load(file)
else:
raise ValueError("Unsupported file format. Use JSON or YAML.")
# CLI Argument Parser
parser = argparse.ArgumentParser(description="Document to Podcast Workflow")
parser.add_argument("--config", type=str, help="Path to a JSON/YAML config file.")
parser.add_argument("--pdf_url", type=str, help="URL of the PDF document.")
parser.add_argument("--podcast_name", type=str, help="Name of the podcast.")
parser.add_argument("--host_name", type=str, help="Name of the host.")
parser.add_argument("--host_voice", type=str, help="Voice for the host.")
parser.add_argument("--participants", type=str, nargs='+', help="List of participant names.")
parser.add_argument("--max_rounds", type=int, default=4, help="Number of turns per round.")
parser.add_argument("--output_transcript_path", type=str, help="Path to save the output transcript.")
parser.add_argument("--output_audio_path", type=str, help="Path to save the final audio file.")
parser.add_argument("--audio_model", type=str, default="tts-1", help="Audio model for TTS.")
args = parser.parse_args()
# Load config file if provided
config = load_config(args.config) if args.config else {}
# Merge CLI and Config inputs
user_input = {
"pdf_url": args.pdf_url or config.get("pdf_url"),
"podcast_name": args.podcast_name or config.get("podcast_name", "Default Podcast"),
"host": {
"name": args.host_name or config.get("host", {}).get("name", "Host"),
"voice": args.host_voice or config.get("host", {}).get("voice", "alloy"),
},
"participants": config.get("participants", []),
"max_rounds": args.max_rounds or config.get("max_rounds", 4),
"output_transcript_path": args.output_transcript_path or config.get("output_transcript_path", "podcast_dialogue.json"),
"output_audio_path": args.output_audio_path or config.get("output_audio_path", "final_podcast.mp3"),
"audio_model": args.audio_model or config.get("audio_model", "tts-1"),
}
# Add participants from CLI if provided
if args.participants:
user_input["participants"].extend({"name": name} for name in args.participants)
# Validate inputs
if not user_input["pdf_url"]:
raise ValueError("PDF URL must be provided via CLI or config file.")
# Run the workflow
wfapp.run_and_monitor_workflow_sync(workflow=doc2podcast, input=user_input)

View File

@ -0,0 +1,60 @@
from dapr_agents import OpenAIChatClient, NVIDIAChatClient
from dapr_agents.types import DaprWorkflowContext
from dapr_agents. workflow import WorkflowApp, task, workflow
from dotenv import load_dotenv
import os
import logging
load_dotenv()
nvidia_llm = NVIDIAChatClient(
model="meta/llama-3.1-8b-instruct",
api_key=os.getenv("NVIDIA_API_KEY")
)
oai_llm = OpenAIChatClient(
api_key=os.getenv("OPENAI_API_KEY"),
model="gpt-4o",
base_url=os.getenv("OPENAI_API_BASE_URL"),
)
azoai_llm = OpenAIChatClient(
api_key=os.getenv("AZURE_OPENAI_API_KEY"),
azure_deployment="gpt-4o-mini",
azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
azure_api_version="2024-12-01-preview"
)
@workflow
def test_workflow(ctx: DaprWorkflowContext):
"""
A simple workflow that uses a multi-modal task chain.
"""
oai_results = yield ctx.call_activity(invoke_oai, input="Peru")
azoai_results = yield ctx.call_activity(invoke_azoai, input=oai_results)
nvidia_results = yield ctx.call_activity(invoke_nvidia, input=azoai_results)
return nvidia_results
@task(description="What is the name of the capital of {country}?. Reply with just the name.", llm=oai_llm)
def invoke_oai(country: str) -> str:
pass
@task(description="What is a famous thing about {capital}?", llm=azoai_llm)
def invoke_azoai(capital: str) -> str:
pass
@task(description="Context: {context}. From the previous context. Pick one thing to do.", llm=nvidia_llm)
def invoke_nvidia(context: str) -> str:
pass
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
wfapp = WorkflowApp()
results = wfapp.run_and_monitor_workflow_sync(workflow=test_workflow)
logging.info("Workflow results: %s", results)
logging.info("Workflow completed successfully.")

View File

@ -0,0 +1,151 @@
{
"instances": {
"22fb2349f9a742279ddbfae9da3330ac": {
"input": "What is 1 + 1?",
"output": "The task is currently in progress. The MathematicsAgent has successfully acknowledged the mathematical problem, identified the operands (both as 1), and set up the addition operation 1 + 1. The initial result of the addition operation has been completed, yielding 2.0.\n\nNext steps involve verifying the calculation result to ensure its accuracy and confirming the solution to conclude the task.",
"start_time": "2025-04-21T03:19:34.372003",
"end_time": "2025-04-21T03:19:53.991669",
"messages": [
{
"id": "61df6088-707b-4c39-aaad-a428f89f6007",
"role": "user",
"content": "## Mission Briefing\n\nWe have received the following task:\n\nWhat is 1 + 1?\n\n### Team of Agents\n- MathematicsAgent: Calculator Assistant (Goal: Assist Humans with calculation tasks.)\n\n### Execution Plan\nHere is the structured approach the team will follow to accomplish the task:\n\n[{'step': 1, 'description': 'Initiate the process by acknowledging the mathematical problem to solve: Determine the sum of 1 + 1.', 'status': 'not_started', 'substeps': None}, {'step': 2, 'description': \"Identify and note the operands involved in the calculation: The first number is '1', and the second number is '1'.\", 'status': 'not_started', 'substeps': [{'substep': 2.1, 'description': 'Record the first operand: 1', 'status': 'not_started'}, {'substep': 2.2, 'description': 'Record the second operand: 1', 'status': 'not_started'}]}, {'step': 3, 'description': 'Perform the addition of the identified numbers: Add the first number to the second number.', 'status': 'not_started', 'substeps': [{'substep': 3.1, 'description': 'Set up the addition operation: 1 + 1.', 'status': 'not_started'}, {'substep': 3.2, 'description': 'Execute the addition operation.', 'status': 'not_started'}]}, {'step': 4, 'description': 'Verify the calculation result to ensure accuracy of the addition process.', 'status': 'not_started', 'substeps': [{'substep': 4.1, 'description': 'Check if the operation result matches expected arithmetic principles.', 'status': 'not_started'}]}, {'step': 5, 'description': 'Conclude the task by recording and confirming the correct solution to the problem. Result: 2.', 'status': 'not_started', 'substeps': None}]\n",
"timestamp": "2025-04-21T03:19:38.416157",
"name": "LLMOrchestrator"
},
{
"id": "9b758573-9ec5-4d59-a3ac-d0cb9941b4eb",
"role": "user",
"content": "Initiate the process by acknowledging the mathematical problem to solve: Determine the sum of 1 + 1.",
"timestamp": "2025-04-21T03:19:39.479920",
"name": "LLMOrchestrator"
},
{
"id": "c29d839c-3a54-4c8f-bf33-88a228d4e902",
"role": "user",
"content": "Acknowledging the task: We need to determine the sum of 1 + 1. Let's proceed to the next step and identify the operands involved in this calculation.",
"timestamp": "2025-04-21T03:19:40.831663",
"name": "MathematicsAgent"
},
{
"id": "cb95ebd3-8eb8-48c6-aaa3-ba2507ee9a62",
"role": "user",
"content": "Please record the second operand: 1.",
"timestamp": "2025-04-21T03:19:43.579299",
"name": "LLMOrchestrator"
},
{
"id": "c3b61fe8-ea8b-48a8-a255-7d495d825727",
"role": "user",
"content": "The second operand involved in this calculation is recorded as: 1. Now, let's proceed to perform the addition of the identified numbers.",
"timestamp": "2025-04-21T03:19:44.589307",
"name": "MathematicsAgent"
},
{
"id": "7fd1f829-eeae-42f0-b6f9-cf3ef6464fff",
"role": "user",
"content": "Proceed to set up the addition operation with the recorded operands: 1 + 1.",
"timestamp": "2025-04-21T03:19:47.387450",
"name": "LLMOrchestrator"
},
{
"id": "035932ab-766e-43af-bab8-5fd1a8c6dd4b",
"role": "user",
"content": "The result of the addition operation 1 + 1 is 2.0. Let's verify the calculation result to ensure the accuracy of the addition process.",
"timestamp": "2025-04-21T03:19:50.046203",
"name": "MathematicsAgent"
}
],
"last_message": {
"id": "035932ab-766e-43af-bab8-5fd1a8c6dd4b",
"role": "user",
"content": "The result of the addition operation 1 + 1 is 2.0. Let's verify the calculation result to ensure the accuracy of the addition process.",
"timestamp": "2025-04-21T03:19:50.046203",
"name": "MathematicsAgent"
},
"plan": [
{
"step": 1,
"description": "Initiate the process by acknowledging the mathematical problem to solve: Determine the sum of 1 + 1.",
"status": "completed",
"substeps": null
},
{
"step": 2,
"description": "Identify and note the operands involved in the calculation: The first number is '1', and the second number is '1'.",
"status": "completed",
"substeps": [
{
"substep": 2.1,
"description": "Record the first operand: 1",
"status": "completed"
},
{
"substep": 2.2,
"description": "Record the second operand: 1",
"status": "completed"
}
]
},
{
"step": 3,
"description": "Perform the addition of the identified numbers: Add the first number to the second number.",
"status": "in_progress",
"substeps": [
{
"substep": 3.1,
"description": "Set up the addition operation: 1 + 1.",
"status": "completed"
},
{
"substep": 3.2,
"description": "Execute the addition operation.",
"status": "not_started"
}
]
},
{
"step": 4,
"description": "Verify the calculation result to ensure accuracy of the addition process.",
"status": "not_started",
"substeps": [
{
"substep": 4.1,
"description": "Check if the operation result matches expected arithmetic principles.",
"status": "not_started"
}
]
},
{
"step": 5,
"description": "Conclude the task by recording and confirming the correct solution to the problem. Result: 2.",
"status": "in_progress",
"substeps": null
}
],
"task_history": [
{
"agent": "MathematicsAgent",
"step": 1,
"substep": null,
"result": "Acknowledging the task: We need to determine the sum of 1 + 1. Let's proceed to the next step and identify the operands involved in this calculation.",
"timestamp": "2025-04-21T03:19:40.835007"
},
{
"agent": "MathematicsAgent",
"step": 2,
"substep": 2.2,
"result": "The second operand involved in this calculation is recorded as: 1. Now, let's proceed to perform the addition of the identified numbers.",
"timestamp": "2025-04-21T03:19:44.590818"
},
{
"agent": "MathematicsAgent",
"step": 3,
"substep": 3.1,
"result": "The result of the addition operation 1 + 1 is 2.0. Let's verify the calculation result to ensure the accuracy of the addition process.",
"timestamp": "2025-04-21T03:19:50.048520"
}
]
}
}
}

View File

@ -0,0 +1,413 @@
# Dapr Agents Calculator Demo
## Prerequisites
- Python 3.10 or later
- Dapr CLI (v1.15.x)
- Redis (for state storage and pub/sub)
- Azure OpenAI API key
## Setup
1. Create and activate a virtual environment:
```bash
# Create a virtual environment
python3.10 -m venv .venv
# Activate the virtual environment
# On Windows:
.venv\Scripts\activate
# On macOS/Linux:
source .venv/bin/activate
```
2. Install dependencies:
```bash
pip install -r requirements.txt
```
3. Set Up Environment Variables: Create an `.env` file to securely store your API keys and other sensitive information. For example:
```
OPENAI_API_KEY="your-api-key"
OPENAI_BASE_URL="https://api.openai.com/v1"
```
## Running the Application
Make sure Redis is running on your local machine (default port 6379).
### Running All Components with Dapr
1. Start the calculator agent:
```bash
dapr run --app-id CalculatorApp --app-port 8002 --resources-path ./components python calculator_agent.py
```
2. Start the LLM orchestrator:
```bash
dapr run --app-id OrchestratorApp --app-port 8004 --resources-path ./components python llm_orchestrator.py
```
3. Run the client:
```bash
dapr run --app-id ClientApp --dapr-http-port 3502 --resources-path ./components -- python client.py
```
## Expected Behavior
### LLM Orchestrator
```
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Workflow iteration 1 started (Instance ID: 22fb2349f9a742279ddbfae9da3330ac).
== APP == 2025-04-21 03:19:34.372 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
== APP == INFO:dapr_agents.workflow.base:Started workflow with instance ID 22fb2349f9a742279ddbfae9da3330ac.
== APP == INFO:dapr_agents.workflow.base:Monitoring workflow '22fb2349f9a742279ddbfae9da3330ac'...
== APP == 2025-04-21 03:19:34.377 durabletask-client INFO: Waiting up to 300s for instance '22fb2349f9a742279ddbfae9da3330ac' to complete.
== APP == INFO:dapr_agents.workflow.agentic:Agents found in 'agentstatestore' for key 'agents_registry'.
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Initial message from User -> LLMOrchestrator
== APP == 2025-04-21 03:19:34.383 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Task with LLM...
== APP == INFO:dapr_agents.llm.utils.request:Structured Mode Activated! Mode=json.
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
== APP == INFO:dapr_agents.llm.utils.response:Structured output was successfully validated.
== APP == 2025-04-21 03:19:38.396 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
== APP == 2025-04-21 03:19:38.410 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
== APP == INFO:dapr_agents.workflow.agentic:Agents found in 'agentstatestore' for key 'agents_registry'.
== APP == INFO:dapr_agents.workflow.agentic:LLMOrchestrator broadcasting message to beacon_channel.
== APP == INFO:dapr_agents.workflow.messaging.pubsub:LLMOrchestrator published 'BroadcastMessage' to topic 'beacon_channel'.
== APP == 2025-04-21 03:19:38.427 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Task with LLM...
== APP == INFO:dapr_agents.workflow.task:Retrieving conversation history...
== APP == INFO:dapr_agents.llm.utils.request:Structured Mode Activated! Mode=json.
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
== APP == INFO:dapr_agents.llm.utils.response:Structured output was successfully validated.
== APP == 2025-04-21 03:19:39.462 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
== APP == 2025-04-21 03:19:39.476 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
== APP == INFO:dapr_agents.workflow.agentic:Agents found in 'agentstatestore' for key 'agents_registry'.
== APP == INFO:dapr_agents.workflow.agentic:LLMOrchestrator broadcasting message to beacon_channel.
== APP == INFO:dapr_agents.workflow.messaging.pubsub:LLMOrchestrator published 'BroadcastMessage' to topic 'beacon_channel'.
== APP == 2025-04-21 03:19:39.490 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Triggering agent MathematicsAgent for step 1, substep None (Instance ID: 22fb2349f9a742279ddbfae9da3330ac)
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Marked step 1, substep None as 'in_progress'
== APP == INFO:dapr_agents.workflow.agentic:Agents found in 'agentstatestore' for key 'agents_registry'.
== APP == INFO:dapr_agents.workflow.agentic:LLMOrchestrator sending message to agent 'MathematicsAgent'.
== APP == INFO:dapr_agents.workflow.messaging.pubsub:LLMOrchestrator published 'TriggerAction' to topic 'MathematicsAgent'.
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Waiting for MathematicsAgent's response...
== APP == 2025-04-21 03:19:39.502 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 1 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'AgentTaskResponse'...
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_agent_response' for event type 'AgentTaskResponse'
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:LLMOrchestrator processing agent response for workflow instance '22fb2349f9a742279ddbfae9da3330ac'.
== APP == INFO:dapr_agents.workflow.base:Raising workflow event 'AgentTaskResponse' for instance '22fb2349f9a742279ddbfae9da3330ac'
== APP == 2025-04-21 03:19:40.819 durabletask-client INFO: Raising event 'AgentTaskResponse' for instance '22fb2349f9a742279ddbfae9da3330ac'.
== APP == INFO:dapr_agents.workflow.base:Successfully raised workflow event 'AgentTaskResponse' for instance '22fb2349f9a742279ddbfae9da3330ac'!
== APP == 2025-04-21 03:19:40.827 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac Event raised: agenttaskresponse
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:MathematicsAgent sent a response.
== APP == 2025-04-21 03:19:40.827 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 2 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updating task history for MathematicsAgent at step 1, substep None (Instance ID: 22fb2349f9a742279ddbfae9da3330ac)
== APP == 2025-04-21 03:19:40.843 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 2 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Task with LLM...
== APP == INFO:dapr_agents.workflow.task:Retrieving conversation history...
== APP == INFO:dapr_agents.llm.utils.request:Structured Mode Activated! Mode=json.
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
== APP == INFO:dapr_agents.llm.utils.response:Structured output was successfully validated.
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Tracking Progress: {'verdict': 'continue', 'plan_needs_update': False, 'plan_status_update': [{'step': 1, 'substep': None, 'status': 'completed'}, {'step': 2, 'substep': None, 'status': 'in_progress'}, {'step': 2, 'substep': 2.1, 'status': 'in_progress'}], 'plan_restructure': None}
== APP == 2025-04-21 03:19:42.532 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 2 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updating plan for instance 22fb2349f9a742279ddbfae9da3330ac
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updated status of step 1, substep None to 'completed'
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updated status of step 2, substep None to 'in_progress'
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updated status of step 2, substep 2.1 to 'in_progress'
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Plan successfully updated for instance 22fb2349f9a742279ddbfae9da3330ac
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Workflow iteration 2 started (Instance ID: 22fb2349f9a742279ddbfae9da3330ac).
== APP == 2025-04-21 03:19:42.543 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
== APP == INFO:dapr_agents.workflow.agentic:Agents found in 'agentstatestore' for key 'agents_registry'.
== APP == 2025-04-21 03:19:42.552 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Task with LLM...
== APP == INFO:dapr_agents.workflow.task:Retrieving conversation history...
== APP == INFO:dapr_agents.llm.utils.request:Structured Mode Activated! Mode=json.
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
== APP == INFO:dapr_agents.llm.utils.response:Structured output was successfully validated.
== APP == 2025-04-21 03:19:43.561 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
== APP == 2025-04-21 03:19:43.574 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
== APP == INFO:dapr_agents.workflow.agentic:Agents found in 'agentstatestore' for key 'agents_registry'.
== APP == INFO:dapr_agents.workflow.agentic:LLMOrchestrator broadcasting message to beacon_channel.
== APP == INFO:dapr_agents.workflow.messaging.pubsub:LLMOrchestrator published 'BroadcastMessage' to topic 'beacon_channel'.
== APP == 2025-04-21 03:19:43.593 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Triggering agent MathematicsAgent for step 2, substep 2.2 (Instance ID: 22fb2349f9a742279ddbfae9da3330ac)
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Marked step 2, substep 2.2 as 'in_progress'
== APP == INFO:dapr_agents.workflow.agentic:Agents found in 'agentstatestore' for key 'agents_registry'.
== APP == INFO:dapr_agents.workflow.agentic:LLMOrchestrator sending message to agent 'MathematicsAgent'.
== APP == INFO:dapr_agents.workflow.messaging.pubsub:LLMOrchestrator published 'TriggerAction' to topic 'MathematicsAgent'.
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Waiting for MathematicsAgent's response...
== APP == 2025-04-21 03:19:43.605 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 1 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'AgentTaskResponse'...
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_agent_response' for event type 'AgentTaskResponse'
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:LLMOrchestrator processing agent response for workflow instance '22fb2349f9a742279ddbfae9da3330ac'.
== APP == INFO:dapr_agents.workflow.base:Raising workflow event 'AgentTaskResponse' for instance '22fb2349f9a742279ddbfae9da3330ac'
== APP == 2025-04-21 03:19:44.581 durabletask-client INFO: Raising event 'AgentTaskResponse' for instance '22fb2349f9a742279ddbfae9da3330ac'.
== APP == INFO:dapr_agents.workflow.base:Successfully raised workflow event 'AgentTaskResponse' for instance '22fb2349f9a742279ddbfae9da3330ac'!
== APP == 2025-04-21 03:19:44.585 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac Event raised: agenttaskresponse
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:MathematicsAgent sent a response.
== APP == 2025-04-21 03:19:44.585 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 2 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updating task history for MathematicsAgent at step 2, substep 2.2 (Instance ID: 22fb2349f9a742279ddbfae9da3330ac)
== APP == 2025-04-21 03:19:44.600 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 2 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Task with LLM...
== APP == INFO:dapr_agents.workflow.task:Retrieving conversation history...
== APP == INFO:dapr_agents.llm.utils.request:Structured Mode Activated! Mode=json.
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
== APP == INFO:dapr_agents.llm.utils.response:Structured output was successfully validated.
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Tracking Progress: {'verdict': 'continue', 'plan_needs_update': False, 'plan_status_update': [{'step': 2, 'substep': 2.1, 'status': 'completed'}, {'step': 2, 'substep': 2.2, 'status': 'completed'}, {'step': 2, 'substep': None, 'status': 'completed'}], 'plan_restructure': None}
== APP == 2025-04-21 03:19:46.130 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 2 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updating plan for instance 22fb2349f9a742279ddbfae9da3330ac
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updated status of step 2, substep 2.1 to 'completed'
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updated status of step 2, substep 2.2 to 'completed'
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updated status of step 2, substep None to 'completed'
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Plan successfully updated for instance 22fb2349f9a742279ddbfae9da3330ac
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Workflow iteration 3 started (Instance ID: 22fb2349f9a742279ddbfae9da3330ac).
== APP == 2025-04-21 03:19:46.159 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
== APP == INFO:dapr_agents.workflow.agentic:Agents found in 'agentstatestore' for key 'agents_registry'.
== APP == 2025-04-21 03:19:46.174 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Task with LLM...
== APP == INFO:dapr_agents.workflow.task:Retrieving conversation history...
== APP == INFO:dapr_agents.llm.utils.request:Structured Mode Activated! Mode=json.
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
== APP == INFO:dapr_agents.llm.utils.response:Structured output was successfully validated.
== APP == 2025-04-21 03:19:47.370 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
== APP == 2025-04-21 03:19:47.383 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
== APP == INFO:dapr_agents.workflow.agentic:Agents found in 'agentstatestore' for key 'agents_registry'.
== APP == INFO:dapr_agents.workflow.agentic:LLMOrchestrator broadcasting message to beacon_channel.
== APP == INFO:dapr_agents.workflow.messaging.pubsub:LLMOrchestrator published 'BroadcastMessage' to topic 'beacon_channel'.
== APP == 2025-04-21 03:19:47.403 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Triggering agent MathematicsAgent for step 3, substep 3.1 (Instance ID: 22fb2349f9a742279ddbfae9da3330ac)
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Marked step 3, substep 3.1 as 'in_progress'
== APP == INFO:dapr_agents.workflow.agentic:Agents found in 'agentstatestore' for key 'agents_registry'.
== APP == INFO:dapr_agents.workflow.agentic:LLMOrchestrator sending message to agent 'MathematicsAgent'.
== APP == INFO:dapr_agents.workflow.messaging.pubsub:LLMOrchestrator published 'TriggerAction' to topic 'MathematicsAgent'.
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Waiting for MathematicsAgent's response...
== APP == 2025-04-21 03:19:47.417 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 1 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'AgentTaskResponse'...
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_agent_response' for event type 'AgentTaskResponse'
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:LLMOrchestrator processing agent response for workflow instance '22fb2349f9a742279ddbfae9da3330ac'.
== APP == INFO:dapr_agents.workflow.base:Raising workflow event 'AgentTaskResponse' for instance '22fb2349f9a742279ddbfae9da3330ac'
== APP == 2025-04-21 03:19:50.031 durabletask-client INFO: Raising event 'AgentTaskResponse' for instance '22fb2349f9a742279ddbfae9da3330ac'.
== APP == INFO:dapr_agents.workflow.base:Successfully raised workflow event 'AgentTaskResponse' for instance '22fb2349f9a742279ddbfae9da3330ac'!
== APP == 2025-04-21 03:19:50.038 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac Event raised: agenttaskresponse
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:MathematicsAgent sent a response.
== APP == 2025-04-21 03:19:50.039 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 2 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updating task history for MathematicsAgent at step 3, substep 3.1 (Instance ID: 22fb2349f9a742279ddbfae9da3330ac)
== APP == 2025-04-21 03:19:50.055 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 2 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Task with LLM...
== APP == INFO:dapr_agents.workflow.task:Retrieving conversation history...
== APP == INFO:dapr_agents.llm.utils.request:Structured Mode Activated! Mode=json.
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
== APP == INFO:dapr_agents.llm.utils.response:Structured output was successfully validated.
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Tracking Progress: {'verdict': 'completed', 'plan_needs_update': False, 'plan_status_update': [{'step': 3, 'substep': 3.1, 'status': 'completed'}, {'step': 3, 'substep': 3.2, 'status': 'completed'}, {'step': 3, 'substep': None, 'status': 'completed'}, {'step': 4, 'substep': 4.1, 'status': 'completed'}, {'step': 4, 'substep': None, 'status': 'completed'}, {'step': 5, 'substep': None, 'status': 'completed'}], 'plan_restructure': None}
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Workflow ending with verdict: completed
== APP == 2025-04-21 03:19:52.263 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 2 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Task with LLM...
== APP == INFO:dapr_agents.workflow.task:Retrieving conversation history...
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
== APP == 2025-04-21 03:19:53.984 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 2 task(s) and 0 event(s) outstanding.
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updating plan for instance 22fb2349f9a742279ddbfae9da3330ac
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updated status of step 3, substep 3.1 to 'completed'
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Plan successfully updated for instance 22fb2349f9a742279ddbfae9da3330ac
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Workflow 22fb2349f9a742279ddbfae9da3330ac has been finalized with verdict: completed
== APP == 2025-04-21 03:19:53.998 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestration completed with status: COMPLETED
INFO[0044] 22fb2349f9a742279ddbfae9da3330ac: 'LLMWorkflow' completed with a COMPLETED status. app_id=OrchestratorApp instance=mac.lan scope=dapr.wfengine.durabletask.backend type=log ver=1.15.3
INFO[0044] Workflow Actor '22fb2349f9a742279ddbfae9da3330ac': workflow completed with status 'ORCHESTRATION_STATUS_COMPLETED' workflowName 'LLMWorkflow' app_id=OrchestratorApp instance=mac.lan scope=dapr.runtime.actors.targets.workflow type=log ver=1.15.3
== APP == 2025-04-21 03:19:53.999 durabletask-client INFO: Instance '22fb2349f9a742279ddbfae9da3330ac' completed.
== APP == INFO:dapr_agents.workflow.base:Workflow 22fb2349f9a742279ddbfae9da3330ac completed with status: WorkflowStatus.COMPLETED.
== APP == INFO:dapr_agents.workflow.base:Workflow '22fb2349f9a742279ddbfae9da3330ac' completed successfully. Status: COMPLETED.
== APP == INFO:dapr_agents.workflow.base:Finished monitoring workflow '22fb2349f9a742279ddbfae9da3330ac'.
INFO[0076] Placement tables updated, version: 103 app_id=OrchestratorApp instance=mac.lan scope=dapr.runtime.actors.placement type=log ver=1.15.3
INFO[0076] Running actor reminder migration from state store to scheduler app_id=OrchestratorApp instance=mac.lan scope=dapr.runtime.actors.reminders.migration type=log ver=1.15.3
INFO[0076] Skipping migration, no missing scheduler reminders found app_id=OrchestratorApp instance=mac.lan scope=dapr.runtime.actors.reminders.migration type=log ver=1.15.3
INFO[0076] Found 0 missing scheduler reminders from state store app_id=OrchestratorApp instance=mac.lan scope=dapr.runtime.actors.reminders.migration type=log ver=1.15.3
INFO[0076] Migrated 0 reminders from state store to scheduler successfully app_id=OrchestratorApp instance=mac.lan scope=dapr.runtime.actors.reminders.migration type=log ver=1.15.3
^C
terminated signal received: shutting down
INFO[0081] Received signal 'interrupt'; beginning shutdown app_id=OrchestratorApp instance=mac.lan scope=dapr.signals type=log ver=1.15.3
✅ Exited Dapr successfully
✅ Exited App successfully
```
### MathematicsAgent
```
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'BroadcastMessage'...
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_broadcast_message' for event type 'BroadcastMessage'
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent received broadcast message of type 'BroadcastMessage' from 'LLMOrchestrator'.
== APP == INFO:dapr_agents.agent.actor.base:Activating actor with ID: MathematicsAgent
== APP == INFO:dapr_agents.agent.actor.base:Initializing state for MathematicsAgent
WARN[0021] Redis does not support transaction rollbacks and should not be used in production as an actor state store. app_id=CalculatorApp component="workflowstatestore (state.redis/v1)" instance=mac.lan scope=dapr.contrib type=log ver=1.15.3
== APP == INFO: 127.0.0.1:59669 - "PUT /actors/MathematicsAgentActor/MathematicsAgent/method/AddMessage HTTP/1.1" 200 OK
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'BroadcastMessage'...
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_broadcast_message' for event type 'BroadcastMessage'
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent received broadcast message of type 'BroadcastMessage' from 'LLMOrchestrator'.
== APP == INFO: 127.0.0.1:59669 - "PUT /actors/MathematicsAgentActor/MathematicsAgent/method/AddMessage HTTP/1.1" 200 OK
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'TriggerAction'...
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_trigger_action' for event type 'TriggerAction'
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent received TriggerAction from LLMOrchestrator.
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent executing default task from memory.
== APP == INFO:dapr_agents.agent.actor.base:Actor MathematicsAgent invoking a task
== APP == INFO:dapr_agents.agent.patterns.toolcall.base:Iteration 1/10 started.
== APP == INFO:dapr_agents.llm.utils.request:Tools are available in the request.
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
== APP == user:
== APP == Initiate the process by acknowledging the mathematical problem to solve: Determine the sum of 1 + 1.
== APP ==
== APP == --------------------------------------------------------------------------------
== APP ==
== APP == assistant:
== APP == Acknowledging the task: We need to determine the sum of 1 + 1. Let's proceed to the next step and identify the operands involved in this calculation.
== APP ==
== APP == --------------------------------------------------------------------------------
== APP ==
== APP == INFO: 127.0.0.1:59669 - "PUT /actors/MathematicsAgentActor/MathematicsAgent/method/InvokeTask HTTP/1.1" 200 OK
== APP == INFO:dapr_agents.agent.actor.service:Agents found in 'agentstatestore' for key 'agents_registry'.
== APP == INFO:dapr_agents.agent.actor.service:MathematicsAgent broadcasting message to selected agents.
== APP == INFO:dapr_agents.workflow.messaging.pubsub:MathematicsAgent published 'BroadcastMessage' to topic 'beacon_channel'.
== APP == INFO:dapr_agents.agent.actor.service:Agents found in 'agentstatestore' for key 'agents_registry'.
== APP == INFO:dapr_agents.agent.actor.service:MathematicsAgent sending message to agent 'LLMOrchestrator'.
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'BroadcastMessage'...
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_broadcast_message' for event type 'BroadcastMessage'
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent received broadcast message of type 'BroadcastMessage' from 'MathematicsAgent'.
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent ignored its own broadcast message of type 'BroadcastMessage'.
== APP == INFO:dapr_agents.workflow.messaging.pubsub:MathematicsAgent published 'AgentTaskResponse' to topic 'LLMOrchestrator'.
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'BroadcastMessage'...
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_broadcast_message' for event type 'BroadcastMessage'
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent received broadcast message of type 'BroadcastMessage' from 'LLMOrchestrator'.
== APP == INFO: 127.0.0.1:59669 - "PUT /actors/MathematicsAgentActor/MathematicsAgent/method/AddMessage HTTP/1.1" 200 OK
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'TriggerAction'...
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_trigger_action' for event type 'TriggerAction'
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent received TriggerAction from LLMOrchestrator.
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent executing default task from memory.
== APP == INFO:dapr_agents.agent.actor.base:Actor MathematicsAgent invoking a task
== APP == INFO:dapr_agents.agent.patterns.toolcall.base:Iteration 1/10 started.
== APP == INFO:dapr_agents.llm.utils.request:Tools are available in the request.
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
== APP == user:
== APP == Please record the second operand: 1.
== APP ==
== APP == --------------------------------------------------------------------------------
== APP ==
== APP == assistant:
== APP == The second operand involved in this calculation is recorded as: 1. Now, let's proceed to perform the addition of the identified numbers.
== APP ==
== APP == --------------------------------------------------------------------------------
== APP ==
== APP == INFO: 127.0.0.1:59669 - "PUT /actors/MathematicsAgentActor/MathematicsAgent/method/InvokeTask HTTP/1.1" 200 OK
== APP == INFO:dapr_agents.agent.actor.service:Agents found in 'agentstatestore' for key 'agents_registry'.
== APP == INFO:dapr_agents.agent.actor.service:MathematicsAgent broadcasting message to selected agents.
== APP == INFO:dapr_agents.workflow.messaging.pubsub:MathematicsAgent published 'BroadcastMessage' to topic 'beacon_channel'.
== APP == INFO:dapr_agents.agent.actor.service:Agents found in 'agentstatestore' for key 'agents_registry'.
== APP == INFO:dapr_agents.agent.actor.service:MathematicsAgent sending message to agent 'LLMOrchestrator'.
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'BroadcastMessage'...
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_broadcast_message' for event type 'BroadcastMessage'
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent received broadcast message of type 'BroadcastMessage' from 'MathematicsAgent'.
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent ignored its own broadcast message of type 'BroadcastMessage'.
== APP == INFO:dapr_agents.workflow.messaging.pubsub:MathematicsAgent published 'AgentTaskResponse' to topic 'LLMOrchestrator'.
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'BroadcastMessage'...
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_broadcast_message' for event type 'BroadcastMessage'
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent received broadcast message of type 'BroadcastMessage' from 'LLMOrchestrator'.
== APP == INFO: 127.0.0.1:59669 - "PUT /actors/MathematicsAgentActor/MathematicsAgent/method/AddMessage HTTP/1.1" 200 OK
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'TriggerAction'...
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_trigger_action' for event type 'TriggerAction'
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent received TriggerAction from LLMOrchestrator.
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent executing default task from memory.
== APP == INFO:dapr_agents.agent.actor.base:Actor MathematicsAgent invoking a task
== APP == INFO:dapr_agents.agent.patterns.toolcall.base:Iteration 1/10 started.
== APP == INFO:dapr_agents.llm.utils.request:Tools are available in the request.
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
== APP == INFO:dapr_agents.agent.patterns.toolcall.base:Executing Add with arguments {"a":1,"b":1}
== APP == INFO:dapr_agents.tool.executor:Running tool (auto): Add
== APP == INFO:dapr_agents.agent.patterns.toolcall.base:Iteration 2/10 started.
== APP == INFO:dapr_agents.llm.utils.request:Tools are available in the request.
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
== APP == user:
== APP == Proceed to set up the addition operation with the recorded operands: 1 + 1.
== APP ==
== APP == --------------------------------------------------------------------------------
== APP ==
== APP == assistant:
== APP == Function name: Add (Call Id: call_ac3Xlh4pn7tBFkrI2K9uOqvG)
== APP == Arguments: {"a":1,"b":1}
== APP ==
== APP == --------------------------------------------------------------------------------
== APP ==
== APP == Add(tool) (Id: call_ac3Xlh4pn7tBFkrI2K9uOqvG):
== APP == 2.0
== APP ==
== APP == --------------------------------------------------------------------------------
== APP ==
== APP == assistant:
== APP == The result of the addition operation 1 + 1 is 2.0. Let's verify the calculation result to ensure the accuracy of the addition process.
== APP ==
== APP == --------------------------------------------------------------------------------
== APP ==
== APP == INFO: 127.0.0.1:59669 - "PUT /actors/MathematicsAgentActor/MathematicsAgent/method/InvokeTask HTTP/1.1" 200 OK
== APP == INFO:dapr_agents.agent.actor.service:Agents found in 'agentstatestore' for key 'agents_registry'.
== APP == INFO:dapr_agents.agent.actor.service:MathematicsAgent broadcasting message to selected agents.
== APP == INFO:dapr_agents.workflow.messaging.pubsub:MathematicsAgent published 'BroadcastMessage' to topic 'beacon_channel'.
== APP == INFO:dapr_agents.agent.actor.service:Agents found in 'agentstatestore' for key 'agents_registry'.
== APP == INFO:dapr_agents.agent.actor.service:MathematicsAgent sending message to agent 'LLMOrchestrator'.
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'BroadcastMessage'...
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_broadcast_message' for event type 'BroadcastMessage'
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent received broadcast message of type 'BroadcastMessage' from 'MathematicsAgent'.
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent ignored its own broadcast message of type 'BroadcastMessage'.
== APP == INFO:dapr_agents.workflow.messaging.pubsub:MathematicsAgent published 'AgentTaskResponse' to topic 'LLMOrchestrator'.
^C
terminated signal received: shutting down
✅ Exited Dapr successfully
✅ Exited App successfully
```

View File

@ -0,0 +1,62 @@
from dapr_agents import tool
from dapr_agents import AgentActor
from pydantic import BaseModel, Field
from dapr_agents import Agent
from dotenv import load_dotenv
import logging
import asyncio
import os
class AddSchema(BaseModel):
a: float = Field(description="first number to add")
b: float = Field(description="second number to add")
@tool(args_model=AddSchema)
def add(a: float, b: float) -> float:
"""Add two numbers."""
return a + b
class SubSchema(BaseModel):
a: float = Field(description="first number to subtract")
b: float = Field(description="second number to subtract")
@tool(args_model=SubSchema)
def sub(a: float, b: float) -> float:
"""Subtract two numbers."""
return a - b
async def main():
calculator_agent = Agent(
name="MathematicsAgent",
role="Calculator Assistant",
goal="Assist Humans with calculation tasks.",
instructions=[
"Get accurate calculation results",
"Break down the calculation into smaller steps.",
],
tools=[add, sub],
)
calculator_service = AgentActor(
agent=calculator_agent,
message_bus_name="pubsub",
agents_registry_key="agents_registry",
agents_registry_store_name="agentstatestore",
service_port=8002,
)
await calculator_service.start()
if __name__ == "__main__":
load_dotenv()
logging.basicConfig(level=logging.INFO)
asyncio.run(main())

View File

@ -0,0 +1,59 @@
#!/usr/bin/env python3
import json
import sys
import time
from dapr.clients import DaprClient
# Default Pub/Sub component
PUBSUB_NAME = "pubsub"
def main(orchestrator_topic, max_attempts=10, retry_delay=1):
"""
Publishes a task to a specified Dapr Pub/Sub topic with retries.
Args:
orchestrator_topic (str): The name of the orchestrator topic.
max_attempts (int): Maximum number of retry attempts.
retry_delay (int): Delay in seconds between attempts.
"""
task_message = {
"task": "What is 1 + 1?",
}
time.sleep(5)
attempt = 1
while attempt <= max_attempts:
try:
print(f"📢 Attempt {attempt}: Publishing to topic '{orchestrator_topic}'...")
with DaprClient() as client:
client.publish_event(
pubsub_name=PUBSUB_NAME,
topic_name=orchestrator_topic,
data=json.dumps(task_message),
data_content_type="application/json",
publish_metadata={
"cloudevent.type": "TriggerAction",
}
)
print(f"✅ Successfully published request to '{orchestrator_topic}'")
sys.exit(0)
except Exception as e:
print(f"❌ Request failed: {e}")
attempt += 1
print(f"⏳ Waiting {retry_delay}s before next attempt...")
time.sleep(retry_delay)
print(f"❌ Maximum attempts ({max_attempts}) reached without success.")
sys.exit(1)
if __name__ == "__main__":
orchestrator_topic = 'LLMOrchestrator'
main(orchestrator_topic)

View File

@ -1,7 +1,7 @@
apiVersion: dapr.io/v1alpha1 apiVersion: dapr.io/v1alpha1
kind: Component kind: Component
metadata: metadata:
name: conversationstore name: agentstatestore
spec: spec:
type: state.redis type: state.redis
version: v1 version: v1

View File

@ -1,7 +1,7 @@
apiVersion: dapr.io/v1alpha1 apiVersion: dapr.io/v1alpha1
kind: Component kind: Component
metadata: metadata:
name: messagepubsub name: pubsub
spec: spec:
type: pubsub.redis type: pubsub.redis
version: v1 version: v1
@ -10,7 +10,3 @@ spec:
value: localhost:6379 value: localhost:6379
- name: redisPassword - name: redisPassword
value: "" value: ""
- name: consumerID
value: "travel-planner-group"
- name: enableTLS
value: "false"

View File

@ -0,0 +1,14 @@
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: workflowstatestore
spec:
type: state.redis
version: v1
metadata:
- name: redisHost
value: localhost:6379
- name: redisPassword
value: ""
- name: actorStateStore
value: "true"

View File

@ -0,0 +1,31 @@
from dapr_agents import LLMOrchestrator
from dotenv import load_dotenv
import asyncio
import logging
async def main():
try:
workflow_service = LLMOrchestrator(
name="LLMOrchestrator",
message_bus_name="pubsub",
state_store_name="workflowstatestore",
state_key="workflow_state",
agents_registry_store_name="agentstatestore",
agents_registry_key="agents_registry",
max_iterations=20, # Increased from 3 to 20 to avoid potential issues
).as_service(port=8004)
await workflow_service.start()
except Exception as e:
print(f"Error starting service: {e}")
if __name__ == "__main__":
load_dotenv()
logging.basicConfig(level=logging.INFO)
asyncio.run(main())

View File

@ -0,0 +1,2 @@
dapr-agents>=0.4.2
python-dotenv

View File

@ -0,0 +1,91 @@
# Multi-Agent LOTR: Agents as Actors
This guide shows you how to set up and run an event-driven agentic workflow using Dapr Agents. By leveraging [Dapr Pub/Sub](https://docs.dapr.io/developing-applications/building-blocks/pubsub/pubsub-overview/) and FastAPI, `Dapr Agents` enables agents to collaborate dynamically in decentralized systems.
## Prerequisites
Before you start, ensure you have the following:
* [Dapr Agents environment set up](https://github.com/dapr/dapr-agents), including Python 3.8 or higher and Dapr CLI.
* Docker installed and running.
* Basic understanding of microservices and event-driven architecture.
## Project Structure
The project is organized into multiple services, each representing an agent or a workflow. Heres the layout:
```
├── components/ # Dapr configuration files
│ ├── statestore.yaml # State store configuration
│ ├── pubsub.yaml # Pub/Sub configuration
├── services/ # Directory for services
│ ├── hobbit/ # Hobbit Agent Service
│ │ └── app.py # FastAPI app for Hobbit
│ ├── wizard/ # Wizard Agent Service
│ │ └── app.py # FastAPI app for Wizard
│ ├── elf/ # Elf Agent Service
│ │ └── app.py # FastAPI app for Elf
│ ├── workflow-roundrobin/ # Workflow Service
│ └── app.py # Orchestrator Workflow
├── dapr.yaml # Multi-App Run Template
```
## Running the Services
0. Set Up Environment Variables: Create an `.env` file to securely store your API keys and other sensitive information. For example:
```
OPENAI_API_KEY="your-api-key"
OPENAI_BASE_URL="https://api.openai.com/v1"
```
1. Multi-App Run: Use the dapr.yaml file to start all services simultaneously:
```bash
dapr run -f .
```
2. Verify console Logs: Each service outputs logs to confirm successful initialization.
3. Verify Redis entries: Access the Redis Insight interface at `http://localhost:5540/`
## Starting the Workflow
Send an HTTP POST request to the workflow service to start the workflow. Use curl or any API client:
```bash
curl -i -X POST http://localhost:8009/start-workflow \
-H "Content-Type: application/json" \
-d '{"task": "Lets solve the riddle to open the Doors of Durin and enter Moria."}'
```
```
HTTP/1.1 200 OK
date: Thu, 05 Dec 2024 07:46:19 GMT
server: uvicorn
content-length: 104
content-type: application/json
{"message":"Workflow initiated successfully.","workflow_instance_id":"422ab3c3f58f4221a36b36c05fefb99b"}
```
The workflow will trigger agents in a round-robin sequence to process the message.
## Monitoring Workflow Execution
1. Check console logs to trace activities in the workflow.
2. Verify Redis entries: Access the Redis Insight interface at `http://localhost:5540/`
3. As mentioned earlier, when we ran dapr init, Dapr initialized, a `Zipkin` container instance, used for observability and tracing. Open `http://localhost:9411/zipkin/` in your browser to view traces > Find a Trace > Run Query.
4. Select the trace entry with multiple spans labeled `<workflow name>: /taskhubsidecarservice/startinstance.`. When you open this entry, youll see details about how each task or activity in the workflow was executed. If any task failed, the error will also be visible here.
5. Check console logs to validate if workflow was executed successfuly.
### Reset Redis Database
1. Access the Redis Insight interface at `http://localhost:5540/`
2. In the search bar type `*` to select all items in the database.
3. Click on `Bulk Actions` > `Delete` > `Delete`

View File

@ -1,7 +1,7 @@
apiVersion: dapr.io/v1alpha1 apiVersion: dapr.io/v1alpha1
kind: Component kind: Component
metadata: metadata:
name: registrystatestore name: agentsregistrystore
spec: spec:
type: state.redis type: state.redis
version: v1 version: v1
@ -10,7 +10,7 @@ spec:
value: localhost:6379 value: localhost:6379
- name: redisPassword - name: redisPassword
value: "" value: ""
- name: enableTLS
value: "false"
- name: keyPrefix - name: keyPrefix
value: none value: none
- name: actorStateStore
value: "true"

View File

@ -1,7 +1,7 @@
apiVersion: dapr.io/v1alpha1 apiVersion: dapr.io/v1alpha1
kind: Component kind: Component
metadata: metadata:
name: conversationstore name: agenticworkflowstate
spec: spec:
type: state.redis type: state.redis
version: v1 version: v1

View File

@ -0,0 +1,28 @@
# https://docs.dapr.io/developing-applications/local-development/multi-app-dapr-run/multi-app-template/#template-properties
version: 1
common:
resourcesPath: ./components
logLevel: info
appLogDestination: console
daprdLogDestination: console
apps:
- appID: HobbitApp
appDirPath: ./services/hobbit/
appPort: 8001
command: ["python3", "app.py"]
- appID: WizardApp
appDirPath: ./services/wizard/
appPort: 8002
command: ["python3", "app.py"]
- appID: ElfApp
appDirPath: ./services/elf/
appPort: 8003
command: ["python3", "app.py"]
- appID: WorkflowApp
appDirPath: ./services/workflow-roundrobin/
command: ["python3", "app.py"]
appPort: 8004

View File

@ -0,0 +1,40 @@
from dapr_agents import Agent, AgentActor
from dotenv import load_dotenv
import asyncio
import logging
async def main():
try:
# Define Agent
elf_agent = Agent(
role="Elf",
name="Legolas",
goal="Act as a scout, marksman, and protector, using keen senses and deadly accuracy to ensure the success of the journey.",
instructions=[
"Speak like Legolas, with grace, wisdom, and keen observation.",
"Be swift, silent, and precise, moving effortlessly across any terrain.",
"Use superior vision and heightened senses to scout ahead and detect threats.",
"Excel in ranged combat, delivering pinpoint arrow strikes from great distances.",
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
]
)
# Expose Agent as an Actor over a Service
elf_service = AgentActor(
agent=elf_agent,
message_bus_name="messagepubsub",
agents_registry_store_name="agentsregistrystore",
agents_registry_key="agents_registry",
service_port=8003,
)
await elf_service.start()
except Exception as e:
print(f"Error starting service: {e}")
if __name__ == "__main__":
load_dotenv()
logging.basicConfig(level=logging.INFO)
asyncio.run(main())

View File

@ -0,0 +1,40 @@
from dapr_agents import Agent, AgentActor
from dotenv import load_dotenv
import asyncio
import logging
async def main():
try:
# Define Agent
hobbit_agent = Agent(
role="Hobbit",
name="Frodo",
goal="Carry the One Ring to Mount Doom, resisting its corruptive power while navigating danger and uncertainty.",
instructions=[
"Speak like Frodo, with humility, determination, and a growing sense of resolve.",
"Endure hardships and temptations, staying true to the mission even when faced with doubt.",
"Seek guidance and trust allies, but bear the ultimate burden alone when necessary.",
"Move carefully through enemy-infested lands, avoiding unnecessary risks.",
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
]
)
# Expose Agent as an Actor over a Service
hobbit_service = AgentActor(
agent=hobbit_agent,
message_bus_name="messagepubsub",
agents_registry_store_name="agentsregistrystore",
agents_registry_key="agents_registry",
service_port=8001
)
await hobbit_service.start()
except Exception as e:
print(f"Error starting service: {e}")
if __name__ == "__main__":
load_dotenv()
logging.basicConfig(level=logging.INFO)
asyncio.run(main())

View File

@ -0,0 +1,40 @@
from dapr_agents import Agent, AgentActor
from dotenv import load_dotenv
import asyncio
import logging
async def main():
try:
# Define Agent
wizard_agent = Agent(
role="Wizard",
name="Gandalf",
goal="Guide the Fellowship with wisdom and strategy, using magic and insight to ensure the downfall of Sauron.",
instructions=[
"Speak like Gandalf, with wisdom, patience, and a touch of mystery.",
"Provide strategic counsel, always considering the long-term consequences of actions.",
"Use magic sparingly, applying it when necessary to guide or protect.",
"Encourage allies to find strength within themselves rather than relying solely on your power.",
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
]
)
# Expose Agent as an Actor over a Service
wizard_service = AgentActor(
agent=wizard_agent,
message_bus_name="messagepubsub",
agents_registry_store_name="agentsregistrystore",
agents_registry_key="agents_registry",
service_port=8002
)
await wizard_service.start()
except Exception as e:
print(f"Error starting service: {e}")
if __name__ == "__main__":
load_dotenv()
logging.basicConfig(level=logging.INFO)
asyncio.run(main())

View File

@ -0,0 +1,27 @@
from dapr_agents import LLMOrchestrator
from dotenv import load_dotenv
import asyncio
import logging
async def main():
try:
agentic_orchestrator = LLMOrchestrator(
name="Orchestrator",
message_bus_name="messagepubsub",
state_store_name="agenticworkflowstate",
state_key="workflow_state",
agents_registry_store_name="agentsregistrystore",
agents_registry_key="agents_registry",
max_iterations=25
).as_service(port=8004)
await agentic_orchestrator.start()
except Exception as e:
print(f"Error starting service: {e}")
if __name__ == "__main__":
load_dotenv()
logging.basicConfig(level=logging.INFO)
asyncio.run(main())

View File

@ -0,0 +1,27 @@
from dapr_agents import RandomOrchestrator
from dotenv import load_dotenv
import asyncio
import logging
async def main():
try:
random_workflow_service = RandomOrchestrator(
name="Orchestrator",
message_bus_name="messagepubsub",
state_store_name="agenticworkflowstate",
state_key="workflow_state",
agents_registry_store_name="agentsregistrystore",
agents_registry_key="agents_registry",
max_iterations=3
).as_service(port=8004)
await random_workflow_service.start()
except Exception as e:
print(f"Error starting service: {e}")
if __name__ == "__main__":
load_dotenv()
logging.basicConfig(level=logging.INFO)
asyncio.run(main())

View File

@ -0,0 +1,27 @@
from dapr_agents import RoundRobinOrchestrator
from dotenv import load_dotenv
import asyncio
import logging
async def main():
try:
roundrobin_workflow_service = RoundRobinOrchestrator(
name="Orchestrator",
message_bus_name="messagepubsub",
state_store_name="agenticworkflowstate",
state_key="workflow_state",
agents_registry_store_name="agentsregistrystore",
agents_registry_key="agents_registry",
max_iterations=3
).as_service(port=8004)
await roundrobin_workflow_service.start()
except Exception as e:
print(f"Error starting service: {e}")
if __name__ == "__main__":
load_dotenv()
logging.basicConfig(level=logging.INFO)
asyncio.run(main())

View File

@ -1,7 +1,7 @@
apiVersion: dapr.io/v1alpha1 apiVersion: dapr.io/v1alpha1
kind: Component kind: Component
metadata: metadata:
name: agentstatestore name: agentsregistrystore
spec: spec:
type: state.redis type: state.redis
version: v1 version: v1

View File

@ -0,0 +1,14 @@
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: agenticworkflowstate
spec:
type: state.redis
version: v1
metadata:
- name: redisHost
value: localhost:6379
- name: redisPassword
value: ""
- name: actorStateStore
value: "true"

View File

@ -0,0 +1,48 @@
# https://docs.dapr.io/developing-applications/local-development/multi-app-dapr-run/multi-app-template/#template-properties
version: 1
common:
resourcesPath: ./components
logLevel: info
appLogDestination: console
daprdLogDestination: console
apps:
- appID: HobbitApp
appDirPath: ./services/hobbit/
command: ["python3", "app.py"]
- appID: WizardApp
appDirPath: ./services/wizard/
command: ["python3", "app.py"]
- appID: ElfApp
appDirPath: ./services/elf/
command: ["python3", "app.py"]
- appID: DwarfApp
appDirPath: ./services/dwarf/
command: ["python3", "app.py"]
- appID: RangerApp
appDirPath: ./services/ranger/
command: ["python3", "app.py"]
- appID: EagleApp
appDirPath: ./services/eagle/
command: ["python3", "app.py"]
- appID: LLMOrchestratorApp
appDirPath: ./services/orchestrator/
command: ["python3", "app.py"]
appPort: 8004
#- appID: RandomApp
# appDirPath: ./services/workflow-random/
# appPort: 8009
# command: ["python3", "app.py"]
#- appID: RoundRobinApp
# appDirPath: ./services/workflow-roundrobin/
# appPort: 8009
# command: ["python3", "app.py"]

View File

@ -0,0 +1,36 @@
from dapr_agents import AssistantAgent
from dotenv import load_dotenv
import asyncio
import logging
async def main():
try:
# Define Agent
dwarf_service = AssistantAgent(
name="Gimli",
role="Dwarf",
goal="Fight fiercely in battle, protect allies, and expertly navigate underground realms and stonework.",
instructions=[
"Speak like Gimli, with boldness and a warrior's pride.",
"Be strong-willed, fiercely loyal, and protective of companions.",
"Excel in close combat and battlefield tactics, favoring axes and brute strength.",
"Navigate caves, tunnels, and ancient stonework with expert knowledge.",
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
],
message_bus_name="messagepubsub",
state_store_name="agenticworkflowstate",
state_key="workflow_state",
agents_registry_store_name="agentsregistrystore",
agents_registry_key="agents_registry",
)
await dwarf_service.start()
except Exception as e:
print(f"Error starting service: {e}")
if __name__ == "__main__":
load_dotenv()
logging.basicConfig(level=logging.INFO)
asyncio.run(main())

View File

@ -0,0 +1,37 @@
from dapr_agents import AssistantAgent
from dotenv import load_dotenv
import asyncio
import logging
async def main():
try:
# Define Eagle Agent
eagle_service = AssistantAgent(
role="Eagle",
name="Gwaihir",
goal="Provide unmatched aerial transport, carrying anyone anywhere, overcoming any obstacle, and offering strategic reconnaissance to aid in epic quests.",
instructions=[
"Fly anywhere from anywhere, carrying travelers effortlessly across vast distances.",
"Overcome any barrier—mountains, oceans, enemy fortresses—by taking to the skies.",
"Provide swift and strategic transport for those on critical journeys.",
"Offer aerial insights, spotting dangers, tracking movements, and scouting strategic locations.",
"Speak with wisdom and authority, as one of the ancient and noble Great Eagles.",
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
],
message_bus_name="messagepubsub",
state_store_name="agenticworkflowstate",
state_key="workflow_state",
agents_registry_store_name="agentsregistrystore",
agents_registry_key="agents_registry",
)
await eagle_service.start()
except Exception as e:
print(f"Error starting service: {e}")
if __name__ == "__main__":
load_dotenv()
logging.basicConfig(level=logging.INFO)
asyncio.run(main())

View File

@ -0,0 +1,36 @@
from dapr_agents import AssistantAgent
from dotenv import load_dotenv
import asyncio
import logging
async def main():
try:
# Define Agent
elf_service = AssistantAgent(
name="Legolas",
role="Elf",
goal="Act as a scout, marksman, and protector, using keen senses and deadly accuracy to ensure the success of the journey.",
instructions=[
"Speak like Legolas, with grace, wisdom, and keen observation.",
"Be swift, silent, and precise, moving effortlessly across any terrain.",
"Use superior vision and heightened senses to scout ahead and detect threats.",
"Excel in ranged combat, delivering pinpoint arrow strikes from great distances.",
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
],
message_bus_name="messagepubsub",
state_store_name="agenticworkflowstate",
state_key="workflow_state",
agents_registry_store_name="agentsregistrystore",
agents_registry_key="agents_registry",
)
await elf_service.start()
except Exception as e:
print(f"Error starting service: {e}")
if __name__ == "__main__":
load_dotenv()
logging.basicConfig(level=logging.INFO)
asyncio.run(main())

View File

@ -0,0 +1,36 @@
from dapr_agents import AssistantAgent
from dotenv import load_dotenv
import asyncio
import logging
async def main():
try:
# Define Agent
hobbit_agent = AssistantAgent(
name="Frodo",
role="Hobbit",
goal="Carry the One Ring to Mount Doom, resisting its corruptive power while navigating danger and uncertainty.",
instructions=[
"Speak like Frodo, with humility, determination, and a growing sense of resolve.",
"Endure hardships and temptations, staying true to the mission even when faced with doubt.",
"Seek guidance and trust allies, but bear the ultimate burden alone when necessary.",
"Move carefully through enemy-infested lands, avoiding unnecessary risks.",
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
],
message_bus_name="messagepubsub",
state_store_name="agenticworkflowstate",
state_key="workflow_state",
agents_registry_store_name="agentsregistrystore",
agents_registry_key="agents_registry",
)
await hobbit_agent.start()
except Exception as e:
print(f"Error starting service: {e}")
if __name__ == "__main__":
load_dotenv()
logging.basicConfig(level=logging.INFO)
asyncio.run(main())

View File

@ -0,0 +1,27 @@
from dapr_agents import LLMOrchestrator
from dotenv import load_dotenv
import asyncio
import logging
async def main():
try:
agentic_orchestrator = LLMOrchestrator(
name="Orchestrator",
message_bus_name="messagepubsub",
state_store_name="agenticworkflowstate",
state_key="workflow_state",
agents_registry_store_name="agentsregistrystore",
agents_registry_key="agents_registry",
max_iterations=3
).as_service(port=8004)
await agentic_orchestrator.start()
except Exception as e:
print(f"Error starting service: {e}")
if __name__ == "__main__":
load_dotenv()
logging.basicConfig(level=logging.INFO)
asyncio.run(main())

View File

@ -0,0 +1,36 @@
from dapr_agents import AssistantAgent
from dotenv import load_dotenv
import asyncio
import logging
async def main():
try:
# Define Agent
ranger_service = AssistantAgent(
name="Aragorn",
role="Ranger",
goal="Lead and protect the Fellowship, ensuring Frodo reaches his destination while uniting the Free Peoples against Sauron.",
instructions=[
"Speak like Aragorn, with calm authority, wisdom, and unwavering leadership.",
"Lead by example, inspiring courage and loyalty in allies.",
"Navigate wilderness with expert tracking and survival skills.",
"Master both swordplay and battlefield strategy, excelling in one-on-one combat and large-scale warfare.",
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
],
message_bus_name="messagepubsub",
state_store_name="agenticworkflowstate",
state_key="workflow_state",
agents_registry_store_name="agentsregistrystore",
agents_registry_key="agents_registry",
)
await ranger_service.start()
except Exception as e:
print(f"Error starting service: {e}")
if __name__ == "__main__":
load_dotenv()
logging.basicConfig(level=logging.INFO)
asyncio.run(main())

View File

@ -0,0 +1,36 @@
from dapr_agents import AssistantAgent
from dotenv import load_dotenv
import asyncio
import logging
async def main():
try:
# Define Agent
wizard_service = AssistantAgent(
name="Gandalf",
role="Wizard",
goal="Guide the Fellowship with wisdom and strategy, using magic and insight to ensure the downfall of Sauron.",
instructions=[
"Speak like Gandalf, with wisdom, patience, and a touch of mystery.",
"Provide strategic counsel, always considering the long-term consequences of actions.",
"Use magic sparingly, applying it when necessary to guide or protect.",
"Encourage allies to find strength within themselves rather than relying solely on your power.",
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
],
message_bus_name="messagepubsub",
state_store_name="agenticworkflowstate",
state_key="workflow_state",
agents_registry_store_name="agentsregistrystore",
agents_registry_key="agents_registry",
)
await wizard_service.start()
except Exception as e:
print(f"Error starting service: {e}")
if __name__ == "__main__":
load_dotenv()
logging.basicConfig(level=logging.INFO)
asyncio.run(main())

Some files were not shown because too many files have changed in this diff Show More