mirror of https://github.com/dapr/dapr-agents.git
Compare commits
39 Commits
Author | SHA1 | Date |
---|---|---|
|
c2eff2b971 | |
|
c4b1f7c441 | |
|
f87e27f450 | |
|
1e5275834d | |
|
b7b4a9891e | |
|
2fd44b3ecc | |
|
2757aab5b6 | |
|
d86a4c5a70 | |
|
83fc449e39 | |
|
94bf5d2a38 | |
|
8741289e7d | |
|
41faa4f5b7 | |
|
76ad962b69 | |
|
28ac198055 | |
|
e27f5befb0 | |
|
889b7bf7ef | |
|
4dce1c0300 | |
|
53c1c9ffde | |
|
6f20c0d9a0 | |
|
6823cd633d | |
|
a878e76ec1 | |
|
75274ac607 | |
|
f129754486 | |
|
c31e985d81 | |
|
f9eb48c02c | |
|
6f0cfc8818 | |
|
fd28b02935 | |
|
356a25f281 | |
|
bd0859d181 | |
|
099dc5d2fb | |
|
b939d7d2f5 | |
|
f870d35916 | |
|
f5dc9372e7 | |
|
199fcf9d02 | |
|
3edbcf29c2 | |
|
e8cb700652 | |
|
62d4cdbe02 | |
|
c872c5a8bd | |
|
d6fc2c89f0 |
|
@ -0,0 +1,65 @@
|
|||
name: Lint and Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- feature/*
|
||||
- feat/*
|
||||
- bugfix/*
|
||||
- hotfix/*
|
||||
- fix/*
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- feature/*
|
||||
- release-*
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install setuptools wheel tox
|
||||
- name: Run Autoformatter
|
||||
run: |
|
||||
tox -e ruff
|
||||
statusResult=$(git status -u --porcelain)
|
||||
if [ -z $statusResult ]
|
||||
then
|
||||
exit 0
|
||||
else
|
||||
echo "Source files are not formatted correctly. Run 'tox -e ruff' to autoformat."
|
||||
exit 1
|
||||
fi
|
||||
- name: Run Linter
|
||||
run: |
|
||||
tox -e flake8
|
||||
|
||||
build:
|
||||
needs: lint
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python_ver: ["3.10", "3.11", "3.12", "3.13"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python_ver }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python_ver }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install setuptools wheel tox
|
||||
- name: Check Typing
|
||||
run: |
|
||||
tox -e type
|
|
@ -4,12 +4,12 @@ on:
|
|||
branches:
|
||||
- main
|
||||
paths:
|
||||
- docs
|
||||
- docs/**
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- docs
|
||||
- docs/**
|
||||
workflow_dispatch:
|
||||
permissions:
|
||||
contents: write
|
||||
|
@ -18,7 +18,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
name: Review changed files
|
||||
outputs:
|
||||
docs_any_changed: NaN
|
||||
docs_any_changed: ${{ steps.changed-files.outputs.docs_any_changed }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get changed files
|
||||
|
@ -42,10 +42,16 @@ jobs:
|
|||
- name: Remove plugins from mkdocs configuration
|
||||
run: |
|
||||
sed -i '/^plugins:/,/^[^ ]/d' mkdocs.yml
|
||||
- name: Run MkDocs build
|
||||
uses: Kjuly/mkdocs-page-builder@main
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
pip install mkdocs-material
|
||||
pip install .[recommended,git,imaging]
|
||||
pip install mkdocs-jupyter
|
||||
- name: Validate build
|
||||
run: mkdocs build
|
||||
|
||||
deploy:
|
||||
if: github.ref == 'refs/heads/main'
|
||||
runs-on: ubuntu-latest
|
||||
needs: documentation_validation
|
||||
steps:
|
||||
|
@ -53,7 +59,7 @@ jobs:
|
|||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.x
|
||||
- run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV
|
||||
- run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
key: mkdocs-material-${{ env.cache_id }}
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
.DS_Store
|
||||
secrets.json
|
||||
test
|
||||
.dapr
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
|
@ -164,3 +165,7 @@ cython_debug/
|
|||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
.idea
|
||||
|
||||
.ruff_cache/
|
||||
|
||||
quickstarts/05-multi-agent-workflow-dapr-workflows/services/**/*_state.json
|
|
@ -1,86 +0,0 @@
|
|||
# Code of Conduct
|
||||
|
||||
We are committed to fostering a welcoming, inclusive, and respectful environment for everyone involved in this project. This Code of Conduct outlines the expected behaviors within our community and the steps for reporting unacceptable actions. By participating, you agree to uphold these standards, helping to create a positive and collaborative space.
|
||||
|
||||
---
|
||||
|
||||
## Our Pledge
|
||||
|
||||
As members, contributors, and leaders of this community, we pledge to:
|
||||
|
||||
* Ensure participation in our project is free from harassment, discrimination, or exclusion.
|
||||
* Treat everyone with respect and empathy, regardless of factors such as age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity or expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual orientation.
|
||||
* Act in ways that contribute to a safe, welcoming, and supportive environment for all participants.
|
||||
|
||||
---
|
||||
|
||||
## Our Standards
|
||||
|
||||
We strive to create an environment where all members can thrive. Examples of positive behaviors include:
|
||||
|
||||
* Showing kindness, empathy, and consideration for others.
|
||||
* Being respectful of differing opinions, experiences, and perspectives.
|
||||
* Providing constructive feedback in a supportive manner.
|
||||
* Taking responsibility for mistakes, apologizing when necessary, and learning from experiences.
|
||||
* Prioritizing the success and well-being of the entire community over individual gains.
|
||||
|
||||
The following behaviors are considered unacceptable:
|
||||
|
||||
* Using sexualized language or imagery, or engaging in inappropriate sexual attention or advances.
|
||||
* Making insulting, derogatory, or inflammatory comments, including trolling or personal attacks.
|
||||
* Engaging in harassment, whether public or private.
|
||||
* Publishing private or sensitive information about others without explicit consent.
|
||||
* Engaging in behavior that disrupts discussions, events, or contributions in a negative way.
|
||||
* Any conduct that could reasonably be deemed unprofessional or harmful to others.
|
||||
|
||||
---
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies to all areas of interaction within the community, including but not limited to:
|
||||
|
||||
* Discussions on forums, repositories, or other official communication channels.
|
||||
* Contributions made to the project, such as code, documentation, or issues.
|
||||
* Public representation of the community, such as through official social media accounts or at events.
|
||||
|
||||
It also applies to actions outside these spaces if they negatively impact the health, safety, or inclusivity of the community.
|
||||
|
||||
---
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for ensuring that this Code of Conduct is upheld. They may take appropriate and fair corrective actions in response to any behavior that violates these standards, including:
|
||||
|
||||
* Removing, editing, or rejecting comments, commits, issues, or other contributions not aligned with the Code of Conduct.
|
||||
* Temporarily or permanently banning individuals for repeated or severe violations.
|
||||
|
||||
Leaders will always strive to communicate their decisions clearly and fairly.
|
||||
|
||||
---
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
If you experience or witness unacceptable behavior, please report it to the project's owner [Roberto Rodriguez](https://www.linkedin.com/in/cyb3rward0g/). Your report will be handled with sensitivity, and we will respect your privacy and confidentiality while addressing the issue.
|
||||
|
||||
When reporting, please include:
|
||||
|
||||
* A description of the incident.
|
||||
* When and where it occurred.
|
||||
* Any additional context or supporting evidence, if available.
|
||||
|
||||
---
|
||||
|
||||
## Enforcement Process
|
||||
|
||||
We encourage resolving issues through dialogue when possible, but community leaders will intervene when necessary. Actions may include warnings, temporary bans, or permanent removal from the community, depending on the severity of the behavior.
|
||||
|
||||
---
|
||||
|
||||
## Attribution
|
||||
This Code of Conduct is inspired by the [Contributor Covenant, version 2.0](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html) and has drawn inspiration from open source community guidelines by Microsoft, Mozilla, and others.
|
||||
|
||||
For further context on best practices for open source codes of conduct, see the [Contributor Covenant FAQ](https://www.contributor-covenant.org/faq).
|
||||
|
||||
---
|
||||
|
||||
Thank you for helping to create a positive environment! ❤️
|
42
README.md
42
README.md
|
@ -1,5 +1,13 @@
|
|||
# Dapr Agents: A Framework for Agentic AI Systems
|
||||
|
||||
[](https://pypi.org/project/dapr-agents/)
|
||||
[](https://pypi.org/project/dapr-agents/)
|
||||
[](https://github.com/dapr/dapr-agents/actions/workflows/build.yaml)
|
||||
[](https://github.com/dapr/dapr-agents/blob/main/LICENSE)
|
||||
[](http://bit.ly/dapr-discord)
|
||||
[](https://youtube.com/@daprdev)
|
||||
[](https://twitter.com/daprdev)
|
||||
|
||||
Dapr Agents is a developer framework designed to build production-grade resilient AI agent systems that operate at scale. Built on top of the battle-tested Dapr project, it enables software developers to create AI agents that reason, act, and collaborate using Large Language Models (LLMs), while leveraging built-in observability and stateful workflow execution to guarantee agentic workflows complete successfully, no matter how complex.
|
||||
|
||||

|
||||
|
@ -26,7 +34,7 @@ Dapr Agents builds on top of Dapr's Workflow API, which under the hood represent
|
|||
|
||||
### Data-Centric AI Agents
|
||||
|
||||
With built-in connectivity to over 50 enterprise data sources, Dapr Agents efficiently handles structured and unstructured data. From basic [PDF extraction](./docs/concepts/arxiv_fetcher.md) to large-scale database interactions, it enables seamless data-driven AI workflows with minimal code changes. Dapr's [bindings](https://docs.dapr.io/reference/components-reference/supported-bindings/) and [state stores](https://docs.dapr.io/reference/components-reference/supported-state-stores/) provide access to a large number of data sources that can be used to ingest data to an agent. [MCP integration](https://docs.anthropic.com/en/docs/agents-and-tools/mcp) is coming soon.
|
||||
With built-in connectivity to over 50 enterprise data sources, Dapr Agents efficiently handles structured and unstructured data. From basic [PDF extraction](./docs/concepts/arxiv_fetcher.md) to large-scale database interactions, it enables seamless data-driven AI workflows with minimal code changes. Dapr's [bindings](https://docs.dapr.io/reference/components-reference/supported-bindings/) and [state stores](https://docs.dapr.io/reference/components-reference/supported-state-stores/) provide access to a large number of data sources that can be used to ingest data to an agent.
|
||||
|
||||
### Accelerated Development
|
||||
|
||||
|
@ -38,6 +46,7 @@ Dapr Agents provides a set of AI features that give developers a complete API su
|
|||
- Contextual memory
|
||||
- Flexible prompting
|
||||
- Intelligent tool selection
|
||||
- [MCP integration](https://docs.anthropic.com/en/docs/agents-and-tools/mcp).
|
||||
|
||||
### Integrated Security and Reliability
|
||||
|
||||
|
@ -55,6 +64,35 @@ By building on top of Dapr, platform and infrastructure teams can apply Dapr's [
|
|||
|
||||
As a part of **CNCF**, Dapr Agents is vendor-neutral, eliminating concerns about lock-in, intellectual property risks, or proprietary restrictions. Organizations gain full flexibility and control over their AI applications using open-source software they can audit and contribute to.
|
||||
|
||||
## Roadmap
|
||||
|
||||
Here are some of the major features we're working on for the current quarter:
|
||||
|
||||
### Q2 2025
|
||||
- **MCP Support** - Integration with Anthropic's MCP platform ([#50](https://github.com/dapr/dapr-agents/issues/50) ✅ )
|
||||
- **Agent Interaction Tracing** - Enhanced observability of agent interactions with LLMs and tools ([#79](https://github.com/dapr/dapr-agents/issues/79))
|
||||
- **Streaming LLM Output** - Real-time streaming capabilities for LLM responses ([#80](https://github.com/dapr/dapr-agents/issues/80))
|
||||
- **HTTP Endpoint Tools** - Support for using Dapr's HTTP endpoint capabilities for tool calling ([#81](https://github.com/dapr/dapr-agents/issues/81))
|
||||
- **DSL Cleanup** - Streamlining the domain-specific language and removing actor dependencies ([#65](https://github.com/dapr/dapr-agents/issues/65))
|
||||
- **Samples Registry** - A dedicated repository for Dapr Agents examples and use cases
|
||||
|
||||
### Q3/Q4 2025
|
||||
- **Human-in-the-Loop Support**
|
||||
- **Conversation API Progressed to Beta**
|
||||
- **Vector API** - Vector operations support in Dapr and Dapr Agents
|
||||
|
||||
For more details about these features and other planned work, please check out our [GitHub issues](https://github.com/dapr/dapr-agents/issues).
|
||||
|
||||
|
||||
### Language Support
|
||||
|
||||
| Language | Current Status | Development Status | Stable Status |
|
||||
|----------|---------------|-------------|--------|
|
||||
| Python | In Development | Q2 2025 | Q3 2025 |
|
||||
| .NET | Planning | Q3 2025 | Q4 2025 |
|
||||
| Other Languages | Coming Soon | TBD | TBD |
|
||||
|
||||
|
||||
## Getting Started
|
||||
|
||||
Prerequisites:
|
||||
|
@ -62,6 +100,8 @@ Prerequisites:
|
|||
- [Dapr CLI](https://docs.dapr.io/getting-started/install-dapr-cli/)
|
||||
- [Python 3.10](https://www.python.org/downloads/release/python-3100/)
|
||||
|
||||
|
||||
|
||||
### Install Dapr Agents
|
||||
|
||||
```bash
|
||||
|
|
|
@ -111,7 +111,7 @@
|
|||
"INFO:dapr_agents.llm.openai.client.base:Initializing OpenAI client...\n",
|
||||
"INFO:dapr_agents.tool.executor:Tool registered: GetWeather\n",
|
||||
"INFO:dapr_agents.tool.executor:Tool registered: Jump\n",
|
||||
"INFO:dapr_agents.tool.executor:Tool Executor initialized with 2 registered tools.\n",
|
||||
"INFO:dapr_agents.tool.executor:Tool Executor initialized with 2 tool(s).\n",
|
||||
"INFO:dapr_agents.agent.base:Constructing system_prompt from agent attributes.\n",
|
||||
"INFO:dapr_agents.agent.base:Using system_prompt to create the prompt template.\n",
|
||||
"INFO:dapr_agents.agent.base:Pre-filled prompt template with attributes: ['name', 'role', 'goal']\n"
|
||||
|
@ -136,7 +136,7 @@
|
|||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ChatPromptTemplate(input_variables=['chat_history'], pre_filled_variables={'name': 'Rob', 'role': 'Weather Assistant', 'goal': 'Help humans'}, messages=[('system', '# Today\\'s date is: March 04, 2025\\n\\n## Name\\nYour name is {{name}}.\\n\\n## Role\\nYour role is {{role}}.\\n\\n## Goal\\n{{goal}}.\\n\\n## Tools\\nYou have access ONLY to the following tools:\\nGetWeather: Get weather information for a specific location.. Args schema: {\\'location\\': {\\'description\\': \\'location to get weather for\\', \\'type\\': \\'string\\'}}\\nJump: Jump a specific distance.. Args schema: {\\'distance\\': {\\'description\\': \\'Distance for agent to jump\\', \\'type\\': \\'string\\'}}\\n\\nIf you think about using tool, it must use the correct tool JSON blob format as shown below:\\n```\\n{\\n \"name\": $TOOL_NAME,\\n \"arguments\": $INPUT\\n}\\n```\\n\\n## ReAct Format\\nThought: Reflect on the current state of the conversation or task. If additional information is needed, determine if using a tool is necessary. When a tool is required, briefly explain why it is needed for the specific step at hand, and immediately follow this with an `Action:` statement to address that specific requirement. Avoid combining multiple tool requests in a single `Thought`. If no tools are needed, proceed directly to an `Answer:` statement.\\nAction:\\n```\\n{\\n \"name\": $TOOL_NAME,\\n \"arguments\": $INPUT\\n}\\n```\\nObservation: Describe the result of the action taken.\\n... (repeat Thought/Action/Observation as needed, but **ALWAYS proceed to a final `Answer:` statement when you have enough information**)\\nThought: I now have sufficient information to answer the initial question.\\nAnswer: ALWAYS proceed to a final `Answer:` statement once enough information is gathered or if the tools do not provide the necessary data.\\n\\n### Providing a Final Answer\\nOnce you have enough information to answer the question OR if tools cannot provide the necessary data, respond using one of the following formats:\\n\\n1. **Direct Answer without Tools**:\\nThought: I can answer directly without using any tools. Answer: Direct answer based on previous interactions or current knowledge.\\n\\n2. **When All Needed Information is Gathered**:\\nThought: I now have sufficient information to answer the question. Answer: Complete final answer here.\\n\\n3. **If Tools Cannot Provide the Needed Information**:\\nThought: The available tools do not provide the necessary information. Answer: Explanation of limitation and relevant information if possible.\\n\\n### Key Guidelines\\n- Always Conclude with an `Answer:` statement.\\n- Ensure every response ends with an `Answer:` statement that summarizes the most recent findings or relevant information, avoiding incomplete thoughts.\\n- Direct Final Answer for Past or Known Information: If the user inquires about past interactions, respond directly with an Answer: based on the information in chat history.\\n- Avoid Repetitive Thought Statements: If the answer is ready, skip repetitive Thought steps and proceed directly to Answer.\\n- Minimize Redundant Steps: Use minimal Thought/Action/Observation cycles to arrive at a final Answer efficiently.\\n- Reference Past Information When Relevant: Use chat history accurately when answering questions about previous responses to avoid redundancy.\\n- Progressively Move Towards Finality: Reflect on the current step and avoid re-evaluating the entire user request each time. Aim to advance towards the final Answer in each cycle.\\n\\n## Chat History\\nThe chat history is provided to avoid repeating information and to ensure accurate references when summarizing past interactions.'), MessagePlaceHolder(variable_name=chat_history)], template_format='jinja2')"
|
||||
"ChatPromptTemplate(input_variables=['chat_history'], pre_filled_variables={'name': 'Rob', 'role': 'Weather Assistant', 'goal': 'Help humans'}, messages=[('system', '# Today\\'s date is: April 05, 2025\\n\\n## Name\\nYour name is {{name}}.\\n\\n## Role\\nYour role is {{role}}.\\n\\n## Goal\\n{{goal}}.\\n\\n## Tools\\nYou have access ONLY to the following tools:\\nGetWeather: Get weather information for a specific location.. Args schema: {\\'location\\': {\\'description\\': \\'location to get weather for\\', \\'type\\': \\'string\\'}}\\nJump: Jump a specific distance.. Args schema: {\\'distance\\': {\\'description\\': \\'Distance for agent to jump\\', \\'type\\': \\'string\\'}}\\n\\nIf you think about using tool, it must use the correct tool JSON blob format as shown below:\\n```\\n{\\n \"name\": $TOOL_NAME,\\n \"arguments\": $INPUT\\n}\\n```\\n\\n## ReAct Format\\nThought: Reflect on the current state of the conversation or task. If additional information is needed, determine if using a tool is necessary. When a tool is required, briefly explain why it is needed for the specific step at hand, and immediately follow this with an `Action:` statement to address that specific requirement. Avoid combining multiple tool requests in a single `Thought`. If no tools are needed, proceed directly to an `Answer:` statement.\\nAction:\\n```\\n{\\n \"name\": $TOOL_NAME,\\n \"arguments\": $INPUT\\n}\\n```\\nObservation: Describe the result of the action taken.\\n... (repeat Thought/Action/Observation as needed, but **ALWAYS proceed to a final `Answer:` statement when you have enough information**)\\nThought: I now have sufficient information to answer the initial question.\\nAnswer: ALWAYS proceed to a final `Answer:` statement once enough information is gathered or if the tools do not provide the necessary data.\\n\\n### Providing a Final Answer\\nOnce you have enough information to answer the question OR if tools cannot provide the necessary data, respond using one of the following formats:\\n\\n1. **Direct Answer without Tools**:\\nThought: I can answer directly without using any tools. Answer: Direct answer based on previous interactions or current knowledge.\\n\\n2. **When All Needed Information is Gathered**:\\nThought: I now have sufficient information to answer the question. Answer: Complete final answer here.\\n\\n3. **If Tools Cannot Provide the Needed Information**:\\nThought: The available tools do not provide the necessary information. Answer: Explanation of limitation and relevant information if possible.\\n\\n### Key Guidelines\\n- Always Conclude with an `Answer:` statement.\\n- Ensure every response ends with an `Answer:` statement that summarizes the most recent findings or relevant information, avoiding incomplete thoughts.\\n- Direct Final Answer for Past or Known Information: If the user inquires about past interactions, respond directly with an Answer: based on the information in chat history.\\n- Avoid Repetitive Thought Statements: If the answer is ready, skip repetitive Thought steps and proceed directly to Answer.\\n- Minimize Redundant Steps: Use minimal Thought/Action/Observation cycles to arrive at a final Answer efficiently.\\n- Reference Past Information When Relevant: Use chat history accurately when answering questions about previous responses to avoid redundancy.\\n- Progressively Move Towards Finality: Reflect on the current step and avoid re-evaluating the entire user request each time. Aim to advance towards the final Answer in each cycle.\\n\\n## Chat History\\nThe chat history is provided to avoid repeating information and to ensure accurate references when summarizing past interactions.'), MessagePlaceHolder(variable_name=chat_history)], template_format='jinja2')"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
|
@ -184,7 +184,6 @@
|
|||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.agent.base:Pre-filled prompt template with variables: dict_keys(['chat_history'])\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Iteration 1/10 started.\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
|
||||
]
|
||||
|
@ -215,7 +214,7 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;217;95;118mThought: Hello Roberto! How can I assist you today with the weather?\u001b[0m\u001b[0m\n"
|
||||
"\u001b[38;2;217;95;118mThought: Hello Roberto! How can I assist you today?\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -231,18 +230,18 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;217;95;118mThought: Answer: Hello Roberto! How can I assist you today with the weather?\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118mThought: Answer: Hello Roberto! How can I assist you today?\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183massistant:\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m\u001b[0m\u001b[38;2;147;191;183mHello Roberto! How can I assist you today with the weather?\u001b[0m\u001b[0m\n"
|
||||
"\u001b[38;2;147;191;183m\u001b[0m\u001b[38;2;147;191;183mHello Roberto! How can I assist you today?\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Hello Roberto! How can I assist you today with the weather?'"
|
||||
"'Hello Roberto! How can I assist you today?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
|
@ -251,7 +250,7 @@
|
|||
}
|
||||
],
|
||||
"source": [
|
||||
"AIAgent.run(\"Hi my name is Roberto\")"
|
||||
"await AIAgent.run(\"Hi my name is Roberto\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -263,7 +262,7 @@
|
|||
"data": {
|
||||
"text/plain": [
|
||||
"[{'role': 'user', 'content': 'Hi my name is Roberto'},\n",
|
||||
" {'content': 'Hello Roberto! How can I assist you today with the weather?',\n",
|
||||
" {'content': 'Hello Roberto! How can I assist you today?',\n",
|
||||
" 'role': 'assistant'}]"
|
||||
]
|
||||
},
|
||||
|
@ -285,7 +284,6 @@
|
|||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.agent.base:Pre-filled prompt template with variables: dict_keys(['chat_history'])\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Iteration 1/10 started.\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
|
||||
]
|
||||
|
@ -308,11 +306,7 @@
|
|||
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Executing GetWeather with arguments {'location': 'Virginia'}\n",
|
||||
"INFO:dapr_agents.tool.executor:Attempting to execute tool: GetWeather\n",
|
||||
"INFO:dapr_agents.tool.executor:Tool 'GetWeather' executed successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Thought:I will need to gather the current weather information for both Virginia and Washington, D.C. by using the GetWeather tool.\n",
|
||||
"Action:{'name': 'GetWeather', 'arguments': {'location': 'Virginia'}}\n",
|
||||
"Observation:Virginia: 74F.\n",
|
||||
"INFO:dapr_agents.tool.executor:Running tool (auto): GetWeather\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Iteration 2/10 started.\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
|
||||
]
|
||||
|
@ -321,9 +315,9 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;217;95;118mThought: I will need to gather the current weather information for both Virginia and Washington, D.C. by using the GetWeather tool.\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118mThought: I need to get the current weather information for Virginia, New York, and Washington DC. I will fetch the data for each location separately. Let's start with Virginia.\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;191;69;126mAction: {\"name\": \"GetWeather\", \"arguments\": {\"location\": \"Virginia\"}}\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;146;94;130mObservation: Virginia: 74F.\u001b[0m\u001b[0m\n"
|
||||
"\u001b[38;2;146;94;130mObservation: Virginia: 77F.\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -333,11 +327,7 @@
|
|||
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Executing GetWeather with arguments {'location': 'New York'}\n",
|
||||
"INFO:dapr_agents.tool.executor:Attempting to execute tool: GetWeather\n",
|
||||
"INFO:dapr_agents.tool.executor:Tool 'GetWeather' executed successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Thought:\n",
|
||||
"Action:{'name': 'GetWeather', 'arguments': {'location': 'New York'}}\n",
|
||||
"Observation:New York: 65F.\n",
|
||||
"INFO:dapr_agents.tool.executor:Running tool (auto): GetWeather\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Iteration 3/10 started.\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
|
||||
]
|
||||
|
@ -346,9 +336,9 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;217;95;118mThought: \u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118mThought: I now have the weather information for Virginia. Next, I will get the weather information for New York.\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;191;69;126mAction: {\"name\": \"GetWeather\", \"arguments\": {\"location\": \"New York\"}}\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;146;94;130mObservation: New York: 65F.\u001b[0m\u001b[0m\n"
|
||||
"\u001b[38;2;146;94;130mObservation: New York: 68F.\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -358,11 +348,7 @@
|
|||
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Executing GetWeather with arguments {'location': 'Washington DC'}\n",
|
||||
"INFO:dapr_agents.tool.executor:Attempting to execute tool: GetWeather\n",
|
||||
"INFO:dapr_agents.tool.executor:Tool 'GetWeather' executed successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Thought:\n",
|
||||
"Action:{'name': 'GetWeather', 'arguments': {'location': 'Washington DC'}}\n",
|
||||
"Observation:Washington DC: 66F.\n",
|
||||
"INFO:dapr_agents.tool.executor:Running tool (auto): GetWeather\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Iteration 4/10 started.\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
|
||||
]
|
||||
|
@ -371,9 +357,9 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;217;95;118mThought: \u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118mThought: I have the weather information for Virginia and New York. Next, I will get the weather information for Washington DC.\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;191;69;126mAction: {\"name\": \"GetWeather\", \"arguments\": {\"location\": \"Washington DC\"}}\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;146;94;130mObservation: Washington DC: 66F.\u001b[0m\u001b[0m\n"
|
||||
"\u001b[38;2;146;94;130mObservation: Washington DC: 69F.\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -389,26 +375,26 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;217;95;118mThought: I now have sufficient information to answer the question. \u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118mThought: I now have the weather information for all requested locations. \u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118mAnswer: The current weather is as follows:\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m- Virginia: 74°F\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m- New York: 65°F\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m- Washington, D.C.: 66°F\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m- Virginia: 77°F\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m- New York: 68°F\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m- Washington DC: 69°F\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183massistant:\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m\u001b[0m\u001b[38;2;147;191;183mThe current weather is as follows:\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- Virginia: 74°F\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- New York: 65°F\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- Washington, D.C.: 66°F\u001b[0m\u001b[0m\n"
|
||||
"\u001b[38;2;147;191;183m- Virginia: 77°F\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- New York: 68°F\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m- Washington DC: 69°F\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The current weather is as follows:\\n- Virginia: 74°F\\n- New York: 65°F\\n- Washington, D.C.: 66°F'"
|
||||
"'The current weather is as follows:\\n- Virginia: 77°F\\n- New York: 68°F\\n- Washington DC: 69°F'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
|
@ -417,124 +403,8 @@
|
|||
}
|
||||
],
|
||||
"source": [
|
||||
"AIAgent.run(\"What is the weather in Virgina, New York and Washington DC?\")"
|
||||
"await AIAgent.run(\"What is the weather in Virgina, New York and Washington DC?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.agent.base:Pre-filled prompt template with variables: dict_keys(['chat_history'])\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Iteration 1/10 started.\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;242;182;128muser:\u001b[0m\n",
|
||||
"\u001b[38;2;242;182;128m\u001b[0m\u001b[38;2;242;182;128mWhat places did you already help me with the weather?\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:No action specified; continuing with further reasoning.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Iteration 2/10 started.\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;217;95;118mThought: You asked about the weather in Virginia, New York, and Washington, D.C., and I provided you with the current temperatures for those locations.\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.react.base:Agent provided a direct final answer.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;217;95;118mThought: Answer: I helped you with the weather for Virginia, New York, and Washington, D.C.\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183massistant:\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m\u001b[0m\u001b[38;2;147;191;183mI helped you with the weather for Virginia, New York, and Washington, D.C.\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I helped you with the weather for Virginia, New York, and Washington, D.C.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"AIAgent.run(\"What places did you already help me with the weather?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'role': 'user', 'content': 'Hi my name is Roberto'},\n",
|
||||
" {'content': 'Hello Roberto! How can I assist you today with the weather?',\n",
|
||||
" 'role': 'assistant'},\n",
|
||||
" {'role': 'user',\n",
|
||||
" 'content': 'What is the weather in Virgina, New York and Washington DC?'},\n",
|
||||
" {'content': 'The current weather is as follows:\\n- Virginia: 74°F\\n- New York: 65°F\\n- Washington, D.C.: 66°F',\n",
|
||||
" 'role': 'assistant'},\n",
|
||||
" {'role': 'user',\n",
|
||||
" 'content': 'What places did you already help me with the weather?'},\n",
|
||||
" {'content': 'I helped you with the weather for Virginia, New York, and Washington, D.C.',\n",
|
||||
" 'role': 'assistant'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"AIAgent.chat_history"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
@ -553,7 +423,7 @@
|
|||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.1"
|
||||
"version": "3.13.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
|
|
@ -542,7 +542,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
|
@ -840,7 +840,7 @@
|
|||
],
|
||||
"source": [
|
||||
"prompt = \"Get information about a user with ID da48bd32-94bd-4263-b23a-5b9820a67fab\"\n",
|
||||
"AIAgent.run(prompt)"
|
||||
"await AIAgent.run(prompt)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
|
@ -114,7 +114,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
|
@ -156,7 +156,7 @@
|
|||
}
|
||||
],
|
||||
"source": [
|
||||
"weather_agent.run(\"what will be the difference of temperature in Paris between 7 days ago and 7 from now?\")"
|
||||
"await weather_agent.run(\"what will be the difference of temperature in Paris between 7 days ago and 7 from now?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -188,7 +188,7 @@
|
|||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"weather_agent.run(\"What was the weather like in Paris two days ago?\")"
|
||||
"await weather_agent.run(\"What was the weather like in Paris two days ago?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
|
@ -113,7 +113,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
|
@ -155,12 +155,12 @@
|
|||
}
|
||||
],
|
||||
"source": [
|
||||
"weather_agent.run(\"what is the weather in Paris?\")"
|
||||
"await weather_agent.run(\"what is the weather in Paris?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
|
@ -229,7 +229,7 @@
|
|||
}
|
||||
],
|
||||
"source": [
|
||||
"weather_agent.run(\"what was the weather like in Paris two days ago?\")"
|
||||
"await weather_agent.run(\"what was the weather like in Paris two days ago?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
|
@ -4,9 +4,10 @@ from datetime import datetime
|
|||
import requests
|
||||
import time
|
||||
|
||||
|
||||
class WeatherForecast(AgentTool):
|
||||
name: str = 'WeatherForecast'
|
||||
description: str = 'A tool for retrieving the weather/temperature for a given city.'
|
||||
name: str = "WeatherForecast"
|
||||
description: str = "A tool for retrieving the weather/temperature for a given city."
|
||||
|
||||
# Default user agent
|
||||
user_agent: str = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 Safari/605.1.15"
|
||||
|
@ -23,24 +24,26 @@ class WeatherForecast(AgentTool):
|
|||
f"No data found during {stage}. URL: {url}. Response: {response.text}"
|
||||
)
|
||||
|
||||
def _run(self, city: str, state: Optional[str] = None, country: Optional[str] = "usa") -> dict:
|
||||
def _run(
|
||||
self, city: str, state: Optional[str] = None, country: Optional[str] = "usa"
|
||||
) -> dict:
|
||||
"""
|
||||
Retrieves weather data by first fetching geocode data for the city and then fetching weather data.
|
||||
|
||||
|
||||
Args:
|
||||
city (str): The name of the city to get weather for.
|
||||
state (Optional[str]): The two-letter state abbreviation (optional).
|
||||
country (Optional[str]): The two-letter country abbreviation. Defaults to 'usa'.
|
||||
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing the city, state, country, and current temperature.
|
||||
"""
|
||||
headers = {
|
||||
"User-Agent": self.user_agent
|
||||
}
|
||||
headers = {"User-Agent": self.user_agent}
|
||||
|
||||
# Construct the geocode URL, conditionally including the state if it's provided
|
||||
geocode_url = f"https://nominatim.openstreetmap.org/search?city={city}&country={country}"
|
||||
geocode_url = (
|
||||
f"https://nominatim.openstreetmap.org/search?city={city}&country={country}"
|
||||
)
|
||||
if state:
|
||||
geocode_url += f"&state={state}"
|
||||
geocode_url += "&limit=1&format=jsonv2"
|
||||
|
@ -64,7 +67,7 @@ class WeatherForecast(AgentTool):
|
|||
|
||||
# Add delay between requests
|
||||
time.sleep(2)
|
||||
|
||||
|
||||
weather_data = weather_response.json()
|
||||
forecast_url = weather_data["properties"]["forecast"]
|
||||
|
||||
|
@ -81,7 +84,7 @@ class WeatherForecast(AgentTool):
|
|||
"state": state,
|
||||
"country": country,
|
||||
"temperature": today_forecast["temperature"],
|
||||
"unit": "Fahrenheit"
|
||||
"unit": "Fahrenheit",
|
||||
}
|
||||
|
||||
else:
|
||||
|
@ -91,8 +94,12 @@ class WeatherForecast(AgentTool):
|
|||
self.handle_error(weather_response, met_no_url, "Met.no weather lookup")
|
||||
|
||||
weather_data = weather_response.json()
|
||||
temperature_unit = weather_data["properties"]["meta"]["units"]["air_temperature"]
|
||||
today_forecast = weather_data["properties"]["timeseries"][0]["data"]["instant"]["details"]["air_temperature"]
|
||||
temperature_unit = weather_data["properties"]["meta"]["units"][
|
||||
"air_temperature"
|
||||
]
|
||||
today_forecast = weather_data["properties"]["timeseries"][0]["data"][
|
||||
"instant"
|
||||
]["details"]["air_temperature"]
|
||||
|
||||
# Return the weather data along with the city, state, and country
|
||||
return {
|
||||
|
@ -100,12 +107,15 @@ class WeatherForecast(AgentTool):
|
|||
"state": state,
|
||||
"country": country,
|
||||
"temperature": today_forecast,
|
||||
"unit": temperature_unit
|
||||
"unit": temperature_unit,
|
||||
}
|
||||
|
||||
|
||||
|
||||
class HistoricalWeather(AgentTool):
|
||||
name: str = 'HistoricalWeather'
|
||||
description: str = 'A tool for retrieving historical weather data (temperature) for a given city.'
|
||||
name: str = "HistoricalWeather"
|
||||
description: str = (
|
||||
"A tool for retrieving historical weather data (temperature) for a given city."
|
||||
)
|
||||
|
||||
# Default user agent
|
||||
user_agent: str = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 Safari/605.1.15"
|
||||
|
@ -122,34 +132,48 @@ class HistoricalWeather(AgentTool):
|
|||
f"No data found during {stage}. URL: {url}. Response: {response.text}"
|
||||
)
|
||||
|
||||
def _run(self, city: str, state: Optional[str] = None, country: Optional[str] = "usa", start_date: Optional[str] = None, end_date: Optional[str] = None) -> dict:
|
||||
def _run(
|
||||
self,
|
||||
city: str,
|
||||
state: Optional[str] = None,
|
||||
country: Optional[str] = "usa",
|
||||
start_date: Optional[str] = None,
|
||||
end_date: Optional[str] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
Retrieves historical weather data for the city by first fetching geocode data and then historical weather data.
|
||||
|
||||
|
||||
Args:
|
||||
city (str): The name of the city to get weather for.
|
||||
state (Optional[str]): The two-letter state abbreviation (optional).
|
||||
country (Optional[str]): The two-letter country abbreviation. Defaults to 'usa'.
|
||||
start_date (Optional[str]): Start date for historical data (YYYY-MM-DD format).
|
||||
end_date (Optional[str]): End date for historical data (YYYY-MM-DD format).
|
||||
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing the city, state, country, and historical temperature data.
|
||||
"""
|
||||
headers = {
|
||||
"User-Agent": self.user_agent
|
||||
}
|
||||
headers = {"User-Agent": self.user_agent}
|
||||
|
||||
# Validate dates
|
||||
current_date = datetime.now().strftime('%Y-%m-%d')
|
||||
current_date = datetime.now().strftime("%Y-%m-%d")
|
||||
if start_date >= current_date or end_date >= current_date:
|
||||
raise ValueError("Both start_date and end_date must be earlier than the current date.")
|
||||
|
||||
if (datetime.strptime(end_date, "%Y-%m-%d") - datetime.strptime(start_date, "%Y-%m-%d")).days > 30:
|
||||
raise ValueError("The time span between start_date and end_date cannot exceed 30 days.")
|
||||
raise ValueError(
|
||||
"Both start_date and end_date must be earlier than the current date."
|
||||
)
|
||||
|
||||
if (
|
||||
datetime.strptime(end_date, "%Y-%m-%d")
|
||||
- datetime.strptime(start_date, "%Y-%m-%d")
|
||||
).days > 30:
|
||||
raise ValueError(
|
||||
"The time span between start_date and end_date cannot exceed 30 days."
|
||||
)
|
||||
|
||||
# Construct the geocode URL, conditionally including the state if it's provided
|
||||
geocode_url = f"https://nominatim.openstreetmap.org/search?city={city}&country={country}"
|
||||
geocode_url = (
|
||||
f"https://nominatim.openstreetmap.org/search?city={city}&country={country}"
|
||||
)
|
||||
if state:
|
||||
geocode_url += f"&state={state}"
|
||||
geocode_url += "&limit=1&format=jsonv2"
|
||||
|
@ -167,7 +191,9 @@ class HistoricalWeather(AgentTool):
|
|||
# Historical weather request
|
||||
historical_weather_url = f"https://archive-api.open-meteo.com/v1/archive?latitude={lat}&longitude={lon}&start_date={start_date}&end_date={end_date}&hourly=temperature_2m"
|
||||
weather_response = requests.get(historical_weather_url, headers=headers)
|
||||
self.handle_error(weather_response, historical_weather_url, "historical weather lookup")
|
||||
self.handle_error(
|
||||
weather_response, historical_weather_url, "historical weather lookup"
|
||||
)
|
||||
|
||||
weather_data = weather_response.json()
|
||||
|
||||
|
@ -177,7 +203,9 @@ class HistoricalWeather(AgentTool):
|
|||
temperature_unit = weather_data["hourly_units"]["temperature_2m"]
|
||||
|
||||
# Combine timestamps and temperatures into a dictionary
|
||||
temperature_data = {timestamps[i]: temperatures[i] for i in range(len(timestamps))}
|
||||
temperature_data = {
|
||||
timestamps[i]: temperatures[i] for i in range(len(timestamps))
|
||||
}
|
||||
|
||||
# Return the structured weather data along with the city, state, country
|
||||
return {
|
||||
|
@ -187,5 +215,5 @@ class HistoricalWeather(AgentTool):
|
|||
"start_date": start_date,
|
||||
"end_date": end_date,
|
||||
"temperature_data": temperature_data,
|
||||
"unit": temperature_unit
|
||||
}
|
||||
"unit": temperature_unit,
|
||||
}
|
||||
|
|
|
@ -0,0 +1,501 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "39c2dcc0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Executor: LocalCodeExecutor Basic Examples\n",
|
||||
"\n",
|
||||
"This notebook shows how to execute Python and shell snippets in **isolated, cached virtual environments**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c4ff4b2b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Required Libraries\n",
|
||||
"Before starting, ensure the required libraries are installed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5b41a66a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install dapr-agents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a9c01be3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "508fd446",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"from dapr_agents.executors.local import LocalCodeExecutor\n",
|
||||
"from dapr_agents.types.executor import CodeSnippet, ExecutionRequest\n",
|
||||
"from rich.console import Console\n",
|
||||
"from rich.ansi import AnsiDecoder\n",
|
||||
"import shutil"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "27594072",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"logging.basicConfig(level=logging.INFO)\n",
|
||||
"\n",
|
||||
"executor = LocalCodeExecutor()\n",
|
||||
"console = Console()\n",
|
||||
"decoder = AnsiDecoder()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4d663475",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Running a basic Python Code Snippet"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "ba45ddc8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.executors.local:Sandbox backend enabled: seatbelt\n",
|
||||
"INFO:dapr_agents.executors.local:Created a new virtual environment\n",
|
||||
"INFO:dapr_agents.executors.local:Installing print, rich\n",
|
||||
"INFO:dapr_agents.executors.local:Snippet 1 finished in 2.442s\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #008000; text-decoration-color: #008000; font-weight: bold\">Hello executor!</span>\n",
|
||||
"</pre>\n"
|
||||
],
|
||||
"text/plain": [
|
||||
"\u001b[1;32mHello executor!\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"code = \"\"\"\n",
|
||||
"from rich import print\n",
|
||||
"print(\"[bold green]Hello executor![/bold green]\")\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"request = ExecutionRequest(snippets=[\n",
|
||||
" CodeSnippet(language='python', code=code, timeout=10)\n",
|
||||
"])\n",
|
||||
"\n",
|
||||
"results = await executor.execute(request)\n",
|
||||
"results[0] # raw result\n",
|
||||
"\n",
|
||||
"# pretty‑print with Rich\n",
|
||||
"console.print(*decoder.decode(results[0].output))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d28c7531",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Run a Shell Snipper"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "4ea89b85",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.executors.local:Sandbox backend enabled: seatbelt\n",
|
||||
"INFO:dapr_agents.executors.local:Snippet 1 finished in 0.019s\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[ExecutionResult(status='success', output='4\\n', exit_code=0)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"shell_request = ExecutionRequest(snippets=[\n",
|
||||
" CodeSnippet(language='sh', code='echo $((2+2))', timeout=5)\n",
|
||||
"])\n",
|
||||
"\n",
|
||||
"await executor.execute(shell_request)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "da281b6e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Reuse the cached virtual environment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "3e9e7e9b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.executors.local:Sandbox backend enabled: seatbelt\n",
|
||||
"INFO:dapr_agents.executors.local:Reusing cached virtual environment.\n",
|
||||
"INFO:dapr_agents.executors.local:Installing print, rich\n",
|
||||
"INFO:dapr_agents.executors.local:Snippet 1 finished in 0.297s\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[ExecutionResult(status='success', output='\\x1b[1;32mHello executor!\\x1b[0m\\n', exit_code=0)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Re‑running the same Python request will reuse the cached venv, so it is faster\n",
|
||||
"await executor.execute(request)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "14dc3e4c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Inject Helper Functions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "82f9a168",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.executors.local:Sandbox backend enabled: seatbelt\n",
|
||||
"INFO:dapr_agents.executors.local:Created a new virtual environment\n",
|
||||
"INFO:dapr_agents.executors.local:Snippet 1 finished in 1.408s\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[ExecutionResult(status='success', output='42\\n', exit_code=0)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"def fancy_sum(a: int, b: int) -> int:\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"executor.user_functions.append(fancy_sum)\n",
|
||||
"\n",
|
||||
"helper_request = ExecutionRequest(snippets=[\n",
|
||||
" CodeSnippet(language='python', code='print(fancy_sum(40, 2))', timeout=5)\n",
|
||||
"])\n",
|
||||
"\n",
|
||||
"await executor.execute(helper_request)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "25f9718c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Clean Up"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "b09059f1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Cache directory removed ✅\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"shutil.rmtree(executor.cache_dir, ignore_errors=True)\n",
|
||||
"print(\"Cache directory removed ✅\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2c93cdef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Package-manager detection & automatic bootstrap"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "8691f3e3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dapr_agents.executors.utils import package_manager as pm\n",
|
||||
"import pathlib, tempfile"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e9e08d81",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create a throw-away project"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "4c7dd9c3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"tmp project: /var/folders/9z/8xhqw8x1611fcbhzl339yrs40000gn/T/tmpmssk0m2b\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tmp_proj = pathlib.Path(tempfile.mkdtemp())\n",
|
||||
"(tmp_proj / \"requirements.txt\").write_text(\"rich==13.7.0\\n\")\n",
|
||||
"print(\"tmp project:\", tmp_proj)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "03558a95",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Show what the helper detects"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "3b5acbfb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"detect_package_managers -> [<PackageManagerType.PIP: 'pip'>]\n",
|
||||
"get_install_command -> pip install -r requirements.txt\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(\"detect_package_managers ->\",\n",
|
||||
" [m.name for m in pm.detect_package_managers(tmp_proj)])\n",
|
||||
"print(\"get_install_command ->\",\n",
|
||||
" pm.get_install_command(tmp_proj))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "42f1ae7c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Point the executor at that directory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "81e53cf4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from contextlib import contextmanager, ExitStack\n",
|
||||
"\n",
|
||||
"@contextmanager\n",
|
||||
"def chdir(path):\n",
|
||||
" \"\"\"\n",
|
||||
" Temporarily change the process CWD to *path*.\n",
|
||||
"\n",
|
||||
" Works on every CPython ≥ 3.6 (and PyPy) and restores the old directory\n",
|
||||
" even if an exception is raised inside the block.\n",
|
||||
" \"\"\"\n",
|
||||
" old_cwd = os.getcwd()\n",
|
||||
" os.chdir(path)\n",
|
||||
" try:\n",
|
||||
" yield\n",
|
||||
" finally:\n",
|
||||
" os.chdir(old_cwd)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "fb2f5052",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.executors.local:bootstrapping python project with 'pip install -r requirements.txt'\n",
|
||||
"INFO:dapr_agents.executors.local:Sandbox backend enabled: seatbelt\n",
|
||||
"INFO:dapr_agents.executors.local:Created a new virtual environment\n",
|
||||
"INFO:dapr_agents.executors.local:Snippet 1 finished in 1.433s\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">bootstrap OK\n",
|
||||
"\n",
|
||||
"</pre>\n"
|
||||
],
|
||||
"text/plain": [
|
||||
"bootstrap OK\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with ExitStack() as stack:\n",
|
||||
" # keep a directory handle open (optional but handy if you’ll delete tmp_proj later)\n",
|
||||
" stack.enter_context(os.scandir(tmp_proj))\n",
|
||||
"\n",
|
||||
" # <-- our portable replacement for contextlib.chdir()\n",
|
||||
" stack.enter_context(chdir(tmp_proj))\n",
|
||||
"\n",
|
||||
" # run a trivial snippet; executor will bootstrap because it now “sees”\n",
|
||||
" # requirements.txt in the current working directory\n",
|
||||
" out = await executor.execute(\n",
|
||||
" ExecutionRequest(snippets=[\n",
|
||||
" CodeSnippet(language=\"python\", code=\"print('bootstrap OK')\", timeout=5)\n",
|
||||
" ])\n",
|
||||
" )\n",
|
||||
" console.print(out[0].output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45de2386",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Clean Up the throw-away project "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "0c7aa010",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Cache directory removed ✅\n",
|
||||
"temporary project removed ✅\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"shutil.rmtree(executor.cache_dir, ignore_errors=True)\n",
|
||||
"print(\"Cache directory removed ✅\")\n",
|
||||
"shutil.rmtree(tmp_proj, ignore_errors=True)\n",
|
||||
"print(\"temporary project removed ✅\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "36ea4010",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.13.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
|
@ -109,7 +109,7 @@
|
|||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ChatCompletion(choices=[Choice(finish_reason='stop', index=0, message=MessageContent(content='One famous dog is Lassie, a fictional Rough Collie known from movies, television series, and books for her intelligence and bravery.', role='assistant'), logprobs=None)], created=1741085078, id='chatcmpl-B7K3KbzErY3CMSoknZyDUSAN52xzL', model='gpt-4o-2024-08-06', object='chat.completion', usage={'completion_tokens': 27, 'prompt_tokens': 12, 'total_tokens': 39, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}})"
|
||||
"ChatCompletion(choices=[Choice(finish_reason='stop', index=0, message=MessageContent(content='One famous dog is Lassie, the fictional Rough Collie from the \"Lassie\" television series and movies. Lassie is known for her intelligence, loyalty, and the ability to help her human companions out of tricky situations.', role='assistant'), logprobs=None)], created=1743846818, id='chatcmpl-BIuVWArM8Lzqug16s43O9M8BLaFkZ', model='gpt-4o-2024-08-06', object='chat.completion', usage={'completion_tokens': 48, 'prompt_tokens': 12, 'total_tokens': 60, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
|
@ -135,7 +135,7 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'content': 'One famous dog is Lassie, a fictional Rough Collie known from movies, television series, and books for her intelligence and bravery.', 'role': 'assistant'}\n"
|
||||
"{'content': 'One famous dog is Lassie, the fictional Rough Collie from the \"Lassie\" television series and movies. Lassie is known for her intelligence, loyalty, and the ability to help her human companions out of tricky situations.', 'role': 'assistant'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -189,7 +189,7 @@
|
|||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ChatCompletion(choices=[Choice(finish_reason='stop', index=0, message=MessageContent(content=\"I am an AI assistant and don't have a personal name, but you can call me Assistant.\", role='assistant'), logprobs=None)], created=1741085084, id='chatcmpl-B7K3QXh8FWH8odMdwUI61eXieb0zk', model='gpt-4o-2024-08-06', object='chat.completion', usage={'completion_tokens': 19, 'prompt_tokens': 39, 'total_tokens': 58, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}})"
|
||||
"ChatCompletion(choices=[Choice(finish_reason='stop', index=0, message=MessageContent(content=\"I am an AI assistant and don't have a personal name, but you can call me Assistant.\", role='assistant'), logprobs=None)], created=1743846828, id='chatcmpl-BIuVgBC6I3w1TFn15pmuCBGu6VZQM', model='gpt-4o-2024-08-06', object='chat.completion', usage={'completion_tokens': 20, 'prompt_tokens': 39, 'total_tokens': 59, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
|
@ -278,7 +278,7 @@
|
|||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.1"
|
||||
"version": "3.13.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
|
|
@ -0,0 +1,88 @@
|
|||
# 🧪 Basic MCP Agent Playground
|
||||
|
||||
This demo shows how to use a **lightweight agent** to call tools served via the [Model Context Protoco (MCP)](https://modelcontextprotocol.io/introduction). The agent uses a simple pattern from `dapr_agents` — but **without running inside Dapr**.
|
||||
|
||||
It’s a minimal, Python-based setup for:
|
||||
|
||||
- Exploring how MCP tools work
|
||||
- Testing stdio and SSE transport
|
||||
- Running tool-calling agents (like ToolCallingAgent or ReActAgent)
|
||||
- Experimenting **without** durable workflows or Dapr dependencies
|
||||
|
||||
> 🧠 Looking for something more robust?
|
||||
> Check out the full `dapr_agents` repo to see how we run these agents inside Dapr workflows with durable task orchestration and state management.
|
||||
|
||||
---
|
||||
|
||||
## 🛠️ Project Structure
|
||||
|
||||
```text
|
||||
.
|
||||
├── tools.py # Registers two tools via FastMCP
|
||||
├── server.py # Starts the MCP server in stdio or SSE mode
|
||||
├── stdio.ipynb # Example using ToolCallingAgent over stdio
|
||||
├── sse.ipynb # Example using ToolCallingAgent over SSE
|
||||
├── requirements.txt
|
||||
└── README.md
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
Before running anything, make sure to install the dependencies:
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## 🚀 Starting the MCP Tool Server
|
||||
|
||||
The server exposes two tools via MCP:
|
||||
|
||||
* `get_weather(location: str) → str`
|
||||
* `jump(distance: str) → str`
|
||||
|
||||
Defined in `tools.py`, these tools are registered using FastMCP.
|
||||
|
||||
You can run the server in two modes:
|
||||
|
||||
### ▶️ 1. STDIO Mode
|
||||
|
||||
This runs inside the notebook. It's useful for quick tests because the MCP server doesn't need to be running in a separate terminal.
|
||||
|
||||
* This is used in `stdio.ipynb`
|
||||
* The agent communicates with the tool server via stdio transport
|
||||
|
||||
### 🌐 2. SSE Mode (Starlette + Uvicorn)
|
||||
This mode requires running the server outside the notebook (in a terminal).
|
||||
|
||||
```python
|
||||
python server.py --server_type sse --host 127.0.0.1 --port 8000
|
||||
```
|
||||
|
||||
The server exposes:
|
||||
|
||||
* `/sse` for the SSE connection
|
||||
* `/messages/` to receive tool calls
|
||||
|
||||
Used by `sse.ipynb`
|
||||
|
||||
📌 You can change the port and host using --host and --port.
|
||||
|
||||
## 📓 Notebooks
|
||||
There are two notebooks in this repo that show basic agent behavior using MCP tools:
|
||||
|
||||
| Notebook | Description | Transport |
|
||||
| --- | --- | --- |
|
||||
| stdio.ipynb | Uses ToolCallingAgent via mcp.run("stdio") | STDIO |
|
||||
| sse.ipynb Uses | ToolCallingAgent with SSE tool server | SSE |
|
||||
|
||||
Each notebook runs a basic `ToolCallingAgent`, using tools served via MCP. These agents are not managed via Dapr or durable workflows — it's pure Python execution with async support.
|
||||
|
||||
## 🔄 What’s Next?
|
||||
|
||||
After testing these lightweight agents, you can try:
|
||||
|
||||
* Running the full dapr_agents workflow system
|
||||
* Registering more complex MCP tools
|
||||
* Using other agent types (e.g., ReActAgent, AssistantAgent)
|
||||
* Testing stateful, durable workflows using Dapr + MCP tools
|
|
@ -0,0 +1,4 @@
|
|||
dapr-agents
|
||||
python-dotenv
|
||||
mcp
|
||||
starlette
|
|
@ -0,0 +1,83 @@
|
|||
import argparse
|
||||
import logging
|
||||
import uvicorn
|
||||
from starlette.applications import Starlette
|
||||
from starlette.requests import Request
|
||||
from starlette.routing import Mount, Route
|
||||
|
||||
from mcp.server.sse import SseServerTransport
|
||||
from tools import mcp
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
# Logging Configuration
|
||||
# ─────────────────────────────────────────────
|
||||
logging.basicConfig(
|
||||
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
logger = logging.getLogger("mcp-server")
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
# Starlette App Factory
|
||||
# ─────────────────────────────────────────────
|
||||
def create_starlette_app():
|
||||
"""
|
||||
Create a Starlette app wired with the MCP server over SSE transport.
|
||||
"""
|
||||
logger.debug("Creating Starlette app with SSE transport")
|
||||
sse = SseServerTransport("/messages/")
|
||||
|
||||
async def handle_sse(request: Request) -> None:
|
||||
logger.info("🔌 SSE connection established")
|
||||
async with sse.connect_sse(request.scope, request.receive, request._send) as (
|
||||
read_stream,
|
||||
write_stream,
|
||||
):
|
||||
logger.debug("Starting MCP server run loop over SSE")
|
||||
await mcp._mcp_server.run(
|
||||
read_stream,
|
||||
write_stream,
|
||||
mcp._mcp_server.create_initialization_options(),
|
||||
)
|
||||
logger.debug("MCP run loop completed")
|
||||
|
||||
return Starlette(
|
||||
debug=False,
|
||||
routes=[
|
||||
Route("/sse", endpoint=handle_sse),
|
||||
Mount("/messages/", app=sse.handle_post_message),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
# CLI Entrypoint
|
||||
# ─────────────────────────────────────────────
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Run an MCP tool server.")
|
||||
parser.add_argument(
|
||||
"--server_type",
|
||||
choices=["stdio", "sse"],
|
||||
default="stdio",
|
||||
help="Transport to use",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--host", default="127.0.0.1", help="Host to bind to (SSE only)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--port", type=int, default=8000, help="Port to bind to (SSE only)"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
logger.info(f"🚀 Starting MCP server in {args.server_type.upper()} mode")
|
||||
|
||||
if args.server_type == "stdio":
|
||||
mcp.run("stdio")
|
||||
else:
|
||||
app = create_starlette_app()
|
||||
logger.info(f"🌐 Running SSE server on {args.host}:{args.port}")
|
||||
uvicorn.run(app, host=args.host, port=args.port)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -0,0 +1,298 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Basic Weather Agent with MCP Support (SSE Transport)\n",
|
||||
"\n",
|
||||
"* Collaborator: Roberto Rodriguez @Cyb3rWard0g"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Required Libraries\n",
|
||||
"Before starting, ensure the required libraries are installed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install dapr-agents python-dotenv mcp starlette"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import Environment Variables"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv() # take environment variables from .env."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Enable Logging"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"logging.basicConfig(level=logging.INFO)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Connect to MCP Server and Get Tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.tool.mcp.client:Connecting to MCP server 'local' via SSE: http://localhost:8000/sse\n",
|
||||
"INFO:mcp.client.sse:Connecting to SSE endpoint: http://localhost:8000/sse\n",
|
||||
"INFO:httpx:HTTP Request: GET http://localhost:8000/sse \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:mcp.client.sse:Received endpoint URL: http://localhost:8000/messages/?session_id=916bc6e1fb514b3e814e6a980ce20bbc\n",
|
||||
"INFO:mcp.client.sse:Starting post writer with endpoint URL: http://localhost:8000/messages/?session_id=916bc6e1fb514b3e814e6a980ce20bbc\n",
|
||||
"INFO:httpx:HTTP Request: POST http://localhost:8000/messages/?session_id=916bc6e1fb514b3e814e6a980ce20bbc \"HTTP/1.1 202 Accepted\"\n",
|
||||
"INFO:httpx:HTTP Request: POST http://localhost:8000/messages/?session_id=916bc6e1fb514b3e814e6a980ce20bbc \"HTTP/1.1 202 Accepted\"\n",
|
||||
"INFO:httpx:HTTP Request: POST http://localhost:8000/messages/?session_id=916bc6e1fb514b3e814e6a980ce20bbc \"HTTP/1.1 202 Accepted\"\n",
|
||||
"INFO:dapr_agents.tool.mcp.client:Loaded 2 tools from server 'local'\n",
|
||||
"INFO:httpx:HTTP Request: POST http://localhost:8000/messages/?session_id=916bc6e1fb514b3e814e6a980ce20bbc \"HTTP/1.1 202 Accepted\"\n",
|
||||
"INFO:dapr_agents.tool.mcp.client:Loaded 0 prompts from server 'local': \n",
|
||||
"INFO:dapr_agents.tool.mcp.client:Successfully connected to MCP server 'local'\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"🔧 Tools: ['LocalGetWeather', 'LocalJump']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dapr_agents.tool.mcp.client import MCPClient\n",
|
||||
"\n",
|
||||
"client = MCPClient()\n",
|
||||
"\n",
|
||||
"await client.connect_sse(\n",
|
||||
" server_name=\"local\", # Unique name you assign to this server\n",
|
||||
" url=\"http://localhost:8000/sse\", # MCP SSE endpoint\n",
|
||||
" headers=None # Optional HTTP headers if needed\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# See what tools were loaded\n",
|
||||
"tools = client.get_all_tools()\n",
|
||||
"print(\"🔧 Tools:\", [t.name for t in tools])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.llm.openai.client.base:Initializing OpenAI client...\n",
|
||||
"INFO:dapr_agents.tool.executor:Tool registered: LocalGetWeather\n",
|
||||
"INFO:dapr_agents.tool.executor:Tool registered: LocalJump\n",
|
||||
"INFO:dapr_agents.tool.executor:Tool Executor initialized with 2 tool(s).\n",
|
||||
"INFO:dapr_agents.agent.base:Constructing system_prompt from agent attributes.\n",
|
||||
"INFO:dapr_agents.agent.base:Using system_prompt to create the prompt template.\n",
|
||||
"INFO:dapr_agents.agent.base:Pre-filled prompt template with attributes: ['name', 'role', 'goal']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dapr_agents import Agent\n",
|
||||
"\n",
|
||||
"agent = Agent(\n",
|
||||
" name=\"Rob\",\n",
|
||||
" role= \"Weather Assistant\",\n",
|
||||
" tools=tools\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Run Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.agent.patterns.toolcall.base:Iteration 1/10 started.\n",
|
||||
"INFO:dapr_agents.llm.utils.request:Tools are available in the request.\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;242;182;128muser:\u001b[0m\n",
|
||||
"\u001b[38;2;242;182;128m\u001b[0m\u001b[38;2;242;182;128mWhat is the weather in New York?\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.toolcall.base:Executing LocalGetWeather with arguments {\"location\":\"New York\"}\n",
|
||||
"INFO:dapr_agents.tool.executor:Running tool (auto): LocalGetWeather\n",
|
||||
"INFO:dapr_agents.tool.mcp.client:[MCP] Executing tool 'get_weather' with args: {'location': 'New York'}\n",
|
||||
"INFO:mcp.client.sse:Connecting to SSE endpoint: http://localhost:8000/sse\n",
|
||||
"INFO:httpx:HTTP Request: GET http://localhost:8000/sse \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:mcp.client.sse:Received endpoint URL: http://localhost:8000/messages/?session_id=b47ef10b57dd471aac4c5d7aaeadbf5b\n",
|
||||
"INFO:mcp.client.sse:Starting post writer with endpoint URL: http://localhost:8000/messages/?session_id=b47ef10b57dd471aac4c5d7aaeadbf5b\n",
|
||||
"INFO:httpx:HTTP Request: POST http://localhost:8000/messages/?session_id=b47ef10b57dd471aac4c5d7aaeadbf5b \"HTTP/1.1 202 Accepted\"\n",
|
||||
"INFO:httpx:HTTP Request: POST http://localhost:8000/messages/?session_id=b47ef10b57dd471aac4c5d7aaeadbf5b \"HTTP/1.1 202 Accepted\"\n",
|
||||
"INFO:httpx:HTTP Request: POST http://localhost:8000/messages/?session_id=b47ef10b57dd471aac4c5d7aaeadbf5b \"HTTP/1.1 202 Accepted\"\n",
|
||||
"INFO:dapr_agents.agent.patterns.toolcall.base:Iteration 2/10 started.\n",
|
||||
"INFO:dapr_agents.llm.utils.request:Tools are available in the request.\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;217;95;118massistant:\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m\u001b[0m\u001b[38;2;217;95;118mFunction name: LocalGetWeather (Call Id: call_lBVZIV7seOsWttLnfZaLSwS3)\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m\u001b[0m\u001b[38;2;217;95;118mArguments: {\"location\":\"New York\"}\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[38;2;191;69;126mLocalGetWeather(tool) (Id: call_lBVZIV7seOsWttLnfZaLSwS3):\u001b[0m\n",
|
||||
"\u001b[38;2;191;69;126m\u001b[0m\u001b[38;2;191;69;126mNew York: 65F.\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;147;191;183massistant:\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m\u001b[0m\u001b[38;2;147;191;183mThe current weather in New York is 65°F. If you need more information, feel free to ask!\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The current weather in New York is 65°F. If you need more information, feel free to ask!'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"await agent.run(\"What is the weather in New York?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.13.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -0,0 +1,296 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Basic Weather Agent with MCP Support (Stdio Transport)\n",
|
||||
"\n",
|
||||
"* Collaborator: Roberto Rodriguez @Cyb3rWard0g"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Required Libraries\n",
|
||||
"Before starting, ensure the required libraries are installed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install dapr-agents python-dotenv mcp starlette"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import Environment Variables"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"load_dotenv() # take environment variables from .env."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Enable Logging"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"logging.basicConfig(level=logging.INFO)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Connect to MCP Server and Get Tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.tool.mcp.client:Connecting to MCP server 'local' via stdio: python ['server.py', '--server_type', 'stdio']\n",
|
||||
"INFO:dapr_agents.tool.mcp.client:Loaded 2 tools from server 'local'\n",
|
||||
"INFO:dapr_agents.tool.mcp.client:Loaded 0 prompts from server 'local': \n",
|
||||
"INFO:dapr_agents.tool.mcp.client:Successfully connected to MCP server 'local'\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"🔧 Tools: ['LocalGetWeather', 'LocalJump']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dapr_agents.tool.mcp.client import MCPClient\n",
|
||||
"\n",
|
||||
"client = MCPClient()\n",
|
||||
"\n",
|
||||
"# Connect to your test server\n",
|
||||
"await client.connect_stdio(\n",
|
||||
" server_name=\"local\",\n",
|
||||
" command=\"python\",\n",
|
||||
" args=[\"server.py\", \"--server_type\", \"stdio\"]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Test tools\n",
|
||||
"tools = client.get_all_tools()\n",
|
||||
"print(\"🔧 Tools:\", [t.name for t in tools])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.llm.openai.client.base:Initializing OpenAI client...\n",
|
||||
"INFO:dapr_agents.tool.executor:Tool registered: LocalGetWeather\n",
|
||||
"INFO:dapr_agents.tool.executor:Tool registered: LocalJump\n",
|
||||
"INFO:dapr_agents.tool.executor:Tool Executor initialized with 2 tool(s).\n",
|
||||
"INFO:dapr_agents.agent.base:Constructing system_prompt from agent attributes.\n",
|
||||
"INFO:dapr_agents.agent.base:Using system_prompt to create the prompt template.\n",
|
||||
"INFO:dapr_agents.agent.base:Pre-filled prompt template with attributes: ['name', 'role', 'goal']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from dapr_agents import Agent\n",
|
||||
"\n",
|
||||
"agent = Agent(\n",
|
||||
" name=\"Rob\",\n",
|
||||
" role= \"Weather Assistant\",\n",
|
||||
" tools=tools\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Run Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.agent.patterns.toolcall.base:Iteration 1/10 started.\n",
|
||||
"INFO:dapr_agents.llm.utils.request:Tools are available in the request.\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;242;182;128muser:\u001b[0m\n",
|
||||
"\u001b[38;2;242;182;128m\u001b[0m\u001b[38;2;242;182;128mWhat is the weather in New York?\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n",
|
||||
"INFO:dapr_agents.agent.patterns.toolcall.base:Executing LocalGetWeather with arguments {\"location\":\"New York\"}\n",
|
||||
"INFO:dapr_agents.tool.executor:Running tool (auto): LocalGetWeather\n",
|
||||
"INFO:dapr_agents.tool.mcp.client:[MCP] Executing tool 'get_weather' with args: {'location': 'New York'}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;217;95;118massistant:\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m\u001b[0m\u001b[38;2;217;95;118mFunction name: LocalGetWeather (Call Id: call_l8KuS39PvriksogjGN71rzCm)\u001b[0m\n",
|
||||
"\u001b[38;2;217;95;118m\u001b[0m\u001b[38;2;217;95;118mArguments: {\"location\":\"New York\"}\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:dapr_agents.agent.patterns.toolcall.base:Iteration 2/10 started.\n",
|
||||
"INFO:dapr_agents.llm.utils.request:Tools are available in the request.\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;191;69;126mLocalGetWeather(tool) (Id: call_l8KuS39PvriksogjGN71rzCm):\u001b[0m\n",
|
||||
"\u001b[38;2;191;69;126m\u001b[0m\u001b[38;2;191;69;126mNew York: 60F.\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[38;2;147;191;183massistant:\u001b[0m\n",
|
||||
"\u001b[38;2;147;191;183m\u001b[0m\u001b[38;2;147;191;183mThe current temperature in New York is 60°F.\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0m--------------------------------------------------------------------------------\u001b[0m\n",
|
||||
"\u001b[0m\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The current temperature in New York is 60°F.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"await agent.run(\"What is the weather in New York?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.13.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
from mcp.server.fastmcp import FastMCP
|
||||
import random
|
||||
|
||||
mcp = FastMCP("TestServer")
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def get_weather(location: str) -> str:
|
||||
"""Get weather information for a specific location."""
|
||||
temperature = random.randint(60, 80)
|
||||
return f"{location}: {temperature}F."
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def jump(distance: str) -> str:
|
||||
"""Simulate a jump of a given distance."""
|
||||
return f"I jumped the following distance: {distance}"
|
|
@ -0,0 +1,146 @@
|
|||
# MCP Agent with Dapr Workflows
|
||||
|
||||
This demo shows how to run an AI agent inside a Dapr Workflow, calling tools exposed via the [Model Context Protoco (MCP)](https://modelcontextprotocol.io/introduction).
|
||||
|
||||
Unlike the lightweight notebook-based examples, this setup runs a full Dapr agent using:
|
||||
|
||||
✅ Durable task orchestration with Dapr Workflows
|
||||
✅ Tools served via MCP (stdio or SSE)
|
||||
✅ Full integration with the Dapr ecosystem
|
||||
|
||||
## 🛠️ Project Structure
|
||||
|
||||
```text
|
||||
.
|
||||
├── app.py # Main entrypoint: runs a Dapr Agent and workflow on port 8001
|
||||
├── tools.py # MCP tool definitions (get_weather, jump)
|
||||
├── server.py # Starlette-based SSE server
|
||||
|-- client.py # Script to send an HTTP request to the Agent over port 8001
|
||||
├── components/ # Dapr pubsub + state components (Redis, etc.)
|
||||
├── requirements.txt
|
||||
└── README.md
|
||||
```
|
||||
|
||||
## 📦 Installation
|
||||
|
||||
Install dependencies:
|
||||
|
||||
```python
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
Make sure you have Dapr installed and initialized:
|
||||
|
||||
```bash
|
||||
dapr init
|
||||
```
|
||||
|
||||
## 🧰 MCP Tool Server
|
||||
|
||||
Your agent will call tools defined in tools.py, served via FastMCP:
|
||||
|
||||
```python
|
||||
@mcp.tool()
|
||||
async def get_weather(location: str) -> str:
|
||||
...
|
||||
|
||||
@mcp.tool()
|
||||
async def jump(distance: str) -> str:
|
||||
...
|
||||
```
|
||||
|
||||
These tools can be served in one of two modes:
|
||||
|
||||
### STDIO Mode (local execution)
|
||||
|
||||
No external server needed — the agent runs the MCP server in-process.
|
||||
|
||||
✅ Best for internal experiments or testing
|
||||
🚫 Not supported for agents that rely on external workflows (e.g., Dapr orchestration)
|
||||
|
||||
### SSE Mode (recommended for Dapr workflows)
|
||||
|
||||
In this demo, we run the MCP server as a separate Starlette + Uvicorn app:
|
||||
|
||||
```python
|
||||
python server.py --server_type sse --host 127.0.0.1 --port 8000
|
||||
```
|
||||
|
||||
This exposes:
|
||||
|
||||
* `/sse` for the SSE stream
|
||||
* `/messages/` for tool execution
|
||||
|
||||
Used by the Dapr agent in this repo.
|
||||
|
||||
## 🚀 Running the Dapr Agent
|
||||
|
||||
Start the MCP server in SSE mode:
|
||||
|
||||
```python
|
||||
python server.py --server_type sse --port 8000
|
||||
```
|
||||
|
||||
Then in a separate terminal, run the agent workflow:
|
||||
|
||||
```bash
|
||||
dapr run --app-id weatherappmcp --resources-path components/ -- python app.py
|
||||
```
|
||||
|
||||
Once agent is ready, run the `client.py` script to send a message to it.
|
||||
|
||||
```bash
|
||||
python3 client.py
|
||||
```
|
||||
|
||||
You will see the state of the agent in a json file in the same directory.
|
||||
|
||||
```
|
||||
{
|
||||
"instances": {
|
||||
"e098e5b85d544c84a26250be80316152": {
|
||||
"input": "What is the weather in New York?",
|
||||
"output": "The current temperature in New York, USA, is 66\u00b0F.",
|
||||
"start_time": "2025-04-05T05:37:50.496005",
|
||||
"end_time": "2025-04-05T05:37:52.501630",
|
||||
"messages": [
|
||||
{
|
||||
"id": "e8ccc9d2-1674-47cc-afd2-8e68b91ff791",
|
||||
"role": "user",
|
||||
"content": "What is the weather in New York?",
|
||||
"timestamp": "2025-04-05T05:37:50.516572",
|
||||
"name": null
|
||||
},
|
||||
{
|
||||
"id": "47b8db93-558c-46ed-80bb-8cb599c4272b",
|
||||
"role": "assistant",
|
||||
"content": "The current temperature in New York, USA, is 66\u00b0F.",
|
||||
"timestamp": "2025-04-05T05:37:52.499945",
|
||||
"name": null
|
||||
}
|
||||
],
|
||||
"last_message": {
|
||||
"id": "47b8db93-558c-46ed-80bb-8cb599c4272b",
|
||||
"role": "assistant",
|
||||
"content": "The current temperature in New York, USA, is 66\u00b0F.",
|
||||
"timestamp": "2025-04-05T05:37:52.499945",
|
||||
"name": null
|
||||
},
|
||||
"tool_history": [
|
||||
{
|
||||
"content": "New York, USA: 66F.",
|
||||
"role": "tool",
|
||||
"tool_call_id": "call_LTDMHvt05e1tvbWBe0kVvnUM",
|
||||
"id": "2c1535fe-c43a-42c1-be7e-25c71b43c32e",
|
||||
"function_name": "LocalGetWeather",
|
||||
"function_args": "{\"location\":\"New York, USA\"}",
|
||||
"timestamp": "2025-04-05T05:37:51.609087"
|
||||
}
|
||||
],
|
||||
"source": null,
|
||||
"source_workflow_instance_id": null
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
import asyncio
|
||||
import logging
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from dapr_agents import AssistantAgent
|
||||
from dapr_agents.tool.mcp import MCPClient
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Load MCP tools from server (stdio or sse)
|
||||
client = MCPClient()
|
||||
await client.connect_sse("local", url="http://localhost:8000/sse")
|
||||
|
||||
# Convert MCP tools to AgentTool list
|
||||
tools = client.get_all_tools()
|
||||
|
||||
# Create the Weather Agent using those tools
|
||||
weather_agent = AssistantAgent(
|
||||
role="Weather Assistant",
|
||||
name="Stevie",
|
||||
goal="Help humans get weather and location info using smart tools.",
|
||||
instructions=[
|
||||
"Respond clearly and helpfully to weather-related questions.",
|
||||
"Use tools when appropriate to fetch or simulate weather data.",
|
||||
"You may sometimes jump after answering the weather question.",
|
||||
],
|
||||
tools=tools,
|
||||
message_bus_name="messagepubsub",
|
||||
state_store_name="workflowstatestore",
|
||||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentstatestore",
|
||||
agents_registry_key="agents_registry",
|
||||
).as_service(port=8001)
|
||||
|
||||
# Start the FastAPI agent service
|
||||
await weather_agent.start()
|
||||
|
||||
except Exception as e:
|
||||
logging.exception("Error starting weather agent service", exc_info=e)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
asyncio.run(main())
|
|
@ -0,0 +1,57 @@
|
|||
#!/usr/bin/env python3
|
||||
import requests
|
||||
import time
|
||||
import sys
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
status_url = "http://localhost:8001/status"
|
||||
healthy = False
|
||||
for attempt in range(1, 11):
|
||||
try:
|
||||
print(f"Attempt {attempt}...")
|
||||
response = requests.get(status_url, timeout=5)
|
||||
|
||||
if response.status_code == 200:
|
||||
print("Workflow app is healthy!")
|
||||
healthy = True
|
||||
break
|
||||
else:
|
||||
print(f"Received status code {response.status_code}: {response.text}")
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"Request failed: {e}")
|
||||
|
||||
attempt += 1
|
||||
print("Waiting 5s seconds before next health checkattempt...")
|
||||
time.sleep(5)
|
||||
|
||||
if not healthy:
|
||||
print("Workflow app is not healthy!")
|
||||
sys.exit(1)
|
||||
|
||||
workflow_url = "http://localhost:8001/start-workflow"
|
||||
task_payload = {"task": "What is the weather in New York?"}
|
||||
|
||||
for attempt in range(1, 11):
|
||||
try:
|
||||
print(f"Attempt {attempt}...")
|
||||
response = requests.post(workflow_url, json=task_payload, timeout=5)
|
||||
|
||||
if response.status_code == 202:
|
||||
print("Workflow started successfully!")
|
||||
sys.exit(0)
|
||||
else:
|
||||
print(f"Received status code {response.status_code}: {response.text}")
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"Request failed: {e}")
|
||||
|
||||
attempt += 1
|
||||
print("Waiting 1s seconds before next attempt...")
|
||||
time.sleep(1)
|
||||
|
||||
print("Maximum attempts (10) reached without success.")
|
||||
|
||||
print("Failed to get successful response")
|
||||
sys.exit(1)
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: messagepubsub
|
||||
spec:
|
||||
type: pubsub.redis
|
||||
version: v1
|
||||
metadata:
|
||||
- name: redisHost
|
||||
value: localhost:6379
|
||||
- name: redisPassword
|
||||
value: ""
|
|
@ -0,0 +1,16 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: agentstatestore
|
||||
spec:
|
||||
type: state.redis
|
||||
version: v1
|
||||
metadata:
|
||||
- name: redisHost
|
||||
value: localhost:6379
|
||||
- name: redisPassword
|
||||
value: ""
|
||||
- name: keyPrefix
|
||||
value: none
|
||||
- name: actorStateStore
|
||||
value: "true"
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: workflowstatestore
|
||||
spec:
|
||||
type: state.redis
|
||||
version: v1
|
||||
metadata:
|
||||
- name: redisHost
|
||||
value: localhost:6379
|
||||
- name: redisPassword
|
||||
value: ""
|
|
@ -0,0 +1,4 @@
|
|||
dapr-agents
|
||||
python-dotenv
|
||||
mcp
|
||||
starlette
|
|
@ -0,0 +1,83 @@
|
|||
import argparse
|
||||
import logging
|
||||
import uvicorn
|
||||
from starlette.applications import Starlette
|
||||
from starlette.requests import Request
|
||||
from starlette.routing import Mount, Route
|
||||
|
||||
from mcp.server.sse import SseServerTransport
|
||||
from tools import mcp
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
# Logging Configuration
|
||||
# ─────────────────────────────────────────────
|
||||
logging.basicConfig(
|
||||
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
logger = logging.getLogger("mcp-server")
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
# Starlette App Factory
|
||||
# ─────────────────────────────────────────────
|
||||
def create_starlette_app():
|
||||
"""
|
||||
Create a Starlette app wired with the MCP server over SSE transport.
|
||||
"""
|
||||
logger.debug("Creating Starlette app with SSE transport")
|
||||
sse = SseServerTransport("/messages/")
|
||||
|
||||
async def handle_sse(request: Request) -> None:
|
||||
logger.info("🔌 SSE connection established")
|
||||
async with sse.connect_sse(request.scope, request.receive, request._send) as (
|
||||
read_stream,
|
||||
write_stream,
|
||||
):
|
||||
logger.debug("Starting MCP server run loop over SSE")
|
||||
await mcp._mcp_server.run(
|
||||
read_stream,
|
||||
write_stream,
|
||||
mcp._mcp_server.create_initialization_options(),
|
||||
)
|
||||
logger.debug("MCP run loop completed")
|
||||
|
||||
return Starlette(
|
||||
debug=False,
|
||||
routes=[
|
||||
Route("/sse", endpoint=handle_sse),
|
||||
Mount("/messages/", app=sse.handle_post_message),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
# CLI Entrypoint
|
||||
# ─────────────────────────────────────────────
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Run an MCP tool server.")
|
||||
parser.add_argument(
|
||||
"--server_type",
|
||||
choices=["stdio", "sse"],
|
||||
default="stdio",
|
||||
help="Transport to use",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--host", default="127.0.0.1", help="Host to bind to (SSE only)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--port", type=int, default=8000, help="Port to bind to (SSE only)"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
logger.info(f"🚀 Starting MCP server in {args.server_type.upper()} mode")
|
||||
|
||||
if args.server_type == "stdio":
|
||||
mcp.run("stdio")
|
||||
else:
|
||||
app = create_starlette_app()
|
||||
logger.info(f"🌐 Running SSE server on {args.host}:{args.port}")
|
||||
uvicorn.run(app, host=args.host, port=args.port)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -0,0 +1,17 @@
|
|||
from mcp.server.fastmcp import FastMCP
|
||||
import random
|
||||
|
||||
mcp = FastMCP("TestServer")
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def get_weather(location: str) -> str:
|
||||
"""Get weather information for a specific location."""
|
||||
temperature = random.randint(60, 80)
|
||||
return f"{location}: {temperature}F."
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def jump(distance: str) -> str:
|
||||
"""Simulate a jump of a given distance."""
|
||||
return f"I jumped the following distance: {distance}"
|
|
@ -3,39 +3,46 @@ import dapr.ext.workflow as wf
|
|||
|
||||
wfr = wf.WorkflowRuntime()
|
||||
|
||||
@wfr.workflow(name='random_workflow')
|
||||
|
||||
@wfr.workflow(name="random_workflow")
|
||||
def task_chain_workflow(ctx: wf.DaprWorkflowContext, x: int):
|
||||
result1 = yield ctx.call_activity(step1, input=x)
|
||||
result2 = yield ctx.call_activity(step2, input=result1)
|
||||
result3 = yield ctx.call_activity(step3, input=result2)
|
||||
return [result1, result2, result3]
|
||||
|
||||
|
||||
@wfr.activity
|
||||
def step1(ctx, activity_input):
|
||||
print(f'Step 1: Received input: {activity_input}.')
|
||||
print(f"Step 1: Received input: {activity_input}.")
|
||||
# Do some work
|
||||
return activity_input + 1
|
||||
|
||||
|
||||
@wfr.activity
|
||||
def step2(ctx, activity_input):
|
||||
print(f'Step 2: Received input: {activity_input}.')
|
||||
print(f"Step 2: Received input: {activity_input}.")
|
||||
# Do some work
|
||||
return activity_input * 2
|
||||
|
||||
|
||||
@wfr.activity
|
||||
def step3(ctx, activity_input):
|
||||
print(f'Step 3: Received input: {activity_input}.')
|
||||
print(f"Step 3: Received input: {activity_input}.")
|
||||
# Do some work
|
||||
return activity_input ^ 2
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
wfr.start()
|
||||
sleep(5) # wait for workflow runtime to start
|
||||
|
||||
wf_client = wf.DaprWorkflowClient()
|
||||
instance_id = wf_client.schedule_new_workflow(workflow=task_chain_workflow, input=10)
|
||||
print(f'Workflow started. Instance ID: {instance_id}')
|
||||
instance_id = wf_client.schedule_new_workflow(
|
||||
workflow=task_chain_workflow, input=10
|
||||
)
|
||||
print(f"Workflow started. Instance ID: {instance_id}")
|
||||
state = wf_client.wait_for_workflow_completion(instance_id)
|
||||
print(f'Workflow completed! Status: {state.runtime_status}')
|
||||
print(f"Workflow completed! Status: {state.runtime_status}")
|
||||
|
||||
wfr.shutdown()
|
||||
wfr.shutdown()
|
||||
|
|
|
@ -1,37 +1,43 @@
|
|||
from dapr_agents.workflow import WorkflowApp, workflow, task
|
||||
from dapr_agents.types import DaprWorkflowContext
|
||||
import logging
|
||||
|
||||
@workflow(name='random_workflow')
|
||||
def task_chain_workflow(ctx:DaprWorkflowContext, input: int):
|
||||
from dapr_agents.workflow import WorkflowApp, workflow, task
|
||||
from dapr.ext.workflow import DaprWorkflowContext
|
||||
|
||||
|
||||
@workflow(name="random_workflow")
|
||||
def task_chain_workflow(ctx: DaprWorkflowContext, input: int):
|
||||
result1 = yield ctx.call_activity(step1, input=input)
|
||||
result2 = yield ctx.call_activity(step2, input=result1)
|
||||
result3 = yield ctx.call_activity(step3, input=result2)
|
||||
return [result1, result2, result3]
|
||||
|
||||
|
||||
@task
|
||||
def step1(activity_input):
|
||||
print(f'Step 1: Received input: {activity_input}.')
|
||||
print(f"Step 1: Received input: {activity_input}.")
|
||||
# Do some work
|
||||
return activity_input + 1
|
||||
|
||||
|
||||
@task
|
||||
def step2(activity_input):
|
||||
print(f'Step 2: Received input: {activity_input}.')
|
||||
print(f"Step 2: Received input: {activity_input}.")
|
||||
# Do some work
|
||||
return activity_input * 2
|
||||
|
||||
|
||||
@task
|
||||
def step3(activity_input):
|
||||
print(f'Step 3: Received input: {activity_input}.')
|
||||
print(f"Step 3: Received input: {activity_input}.")
|
||||
# Do some work
|
||||
return activity_input ^ 2
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
wfapp = WorkflowApp()
|
||||
|
||||
results = wfapp.run_and_monitor_workflow(task_chain_workflow, input=10)
|
||||
results = wfapp.run_and_monitor_workflow_sync(task_chain_workflow, input=10)
|
||||
|
||||
print(f"Results: {results}")
|
||||
print(f"Results: {results}")
|
|
@ -0,0 +1,43 @@
|
|||
import asyncio
|
||||
import logging
|
||||
|
||||
from dapr_agents.workflow import WorkflowApp, workflow, task
|
||||
from dapr.ext.workflow import DaprWorkflowContext
|
||||
|
||||
|
||||
@workflow(name="random_workflow")
|
||||
def task_chain_workflow(ctx: DaprWorkflowContext, input: int):
|
||||
result1 = yield ctx.call_activity(step1, input=input)
|
||||
result2 = yield ctx.call_activity(step2, input=result1)
|
||||
result3 = yield ctx.call_activity(step3, input=result2)
|
||||
return [result1, result2, result3]
|
||||
|
||||
|
||||
@task
|
||||
def step1(activity_input: int) -> int:
|
||||
print(f"Step 1: Received input: {activity_input}.")
|
||||
return activity_input + 1
|
||||
|
||||
|
||||
@task
|
||||
def step2(activity_input: int) -> int:
|
||||
print(f"Step 2: Received input: {activity_input}.")
|
||||
return activity_input * 2
|
||||
|
||||
|
||||
@task
|
||||
def step3(activity_input: int) -> int:
|
||||
print(f"Step 3: Received input: {activity_input}.")
|
||||
return activity_input ^ 2
|
||||
|
||||
|
||||
async def main():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
wfapp = WorkflowApp()
|
||||
|
||||
result = await wfapp.run_and_monitor_workflow_async(task_chain_workflow, input=10)
|
||||
print(f"Results: {result}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
|
@ -1,27 +1,35 @@
|
|||
from dapr_agents.workflow import WorkflowApp, workflow, task
|
||||
from dapr_agents.types import DaprWorkflowContext
|
||||
from dapr.ext.workflow import DaprWorkflowContext
|
||||
from dotenv import load_dotenv
|
||||
import logging
|
||||
|
||||
|
||||
# Define Workflow logic
|
||||
@workflow(name='lotr_workflow')
|
||||
@workflow(name="lotr_workflow")
|
||||
def task_chain_workflow(ctx: DaprWorkflowContext):
|
||||
result1 = yield ctx.call_activity(get_character)
|
||||
result2 = yield ctx.call_activity(get_line, input={"character": result1})
|
||||
return result2
|
||||
|
||||
@task(description="""
|
||||
|
||||
@task(
|
||||
description="""
|
||||
Pick a random character from The Lord of the Rings\n
|
||||
and respond with the character's name ONLY
|
||||
""")
|
||||
"""
|
||||
)
|
||||
def get_character() -> str:
|
||||
pass
|
||||
|
||||
@task(description="What is a famous line by {character}",)
|
||||
|
||||
@task(
|
||||
description="What is a famous line by {character}",
|
||||
)
|
||||
def get_line(character: str) -> str:
|
||||
pass
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
# Load environment variables
|
||||
|
@ -31,5 +39,5 @@ if __name__ == '__main__':
|
|||
wfapp = WorkflowApp()
|
||||
|
||||
# Run workflow
|
||||
results = wfapp.run_and_monitor_workflow(task_chain_workflow)
|
||||
print(results)
|
||||
results = wfapp.run_and_monitor_workflow_sync(task_chain_workflow)
|
||||
print(results)
|
|
@ -0,0 +1,49 @@
|
|||
import asyncio
|
||||
import logging
|
||||
|
||||
from dapr_agents.workflow import WorkflowApp, workflow, task
|
||||
from dapr.ext.workflow import DaprWorkflowContext
|
||||
from dotenv import load_dotenv
|
||||
|
||||
|
||||
# Define Workflow logic
|
||||
@workflow(name="lotr_workflow")
|
||||
def task_chain_workflow(ctx: DaprWorkflowContext):
|
||||
result1 = yield ctx.call_activity(get_character)
|
||||
result2 = yield ctx.call_activity(get_line, input={"character": result1})
|
||||
return result2
|
||||
|
||||
|
||||
@task(
|
||||
description="""
|
||||
Pick a random character from The Lord of the Rings\n
|
||||
and respond with the character's name ONLY
|
||||
"""
|
||||
)
|
||||
def get_character() -> str:
|
||||
pass
|
||||
|
||||
|
||||
@task(
|
||||
description="What is a famous line by {character}",
|
||||
)
|
||||
def get_line(character: str) -> str:
|
||||
pass
|
||||
|
||||
|
||||
async def main():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Initialize the WorkflowApp
|
||||
wfapp = WorkflowApp()
|
||||
|
||||
# Run workflow
|
||||
result = await wfapp.run_and_monitor_workflow_async(task_chain_workflow)
|
||||
print(f"Results: {result}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
|
@ -1,29 +1,34 @@
|
|||
import logging
|
||||
from dapr_agents.workflow import WorkflowApp, workflow, task
|
||||
from dapr_agents.types import DaprWorkflowContext
|
||||
from dapr.ext.workflow import DaprWorkflowContext
|
||||
from pydantic import BaseModel
|
||||
from dotenv import load_dotenv
|
||||
import logging
|
||||
|
||||
|
||||
@workflow
|
||||
def question(ctx:DaprWorkflowContext, input:int):
|
||||
def question(ctx: DaprWorkflowContext, input: int):
|
||||
step1 = yield ctx.call_activity(ask, input=input)
|
||||
return step1
|
||||
|
||||
|
||||
class Dog(BaseModel):
|
||||
name: str
|
||||
bio: str
|
||||
breed: str
|
||||
|
||||
|
||||
@task("Who was {name}?")
|
||||
def ask(name:str) -> Dog:
|
||||
def ask(name: str) -> Dog:
|
||||
pass
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
load_dotenv()
|
||||
|
||||
wfapp = WorkflowApp()
|
||||
|
||||
results = wfapp.run_and_monitor_workflow(workflow=question, input="Scooby Doo")
|
||||
print(results)
|
||||
results = wfapp.run_and_monitor_workflow_sync(workflow=question, input="Scooby Doo")
|
||||
|
||||
print(results)
|
|
@ -0,0 +1,44 @@
|
|||
import asyncio
|
||||
import logging
|
||||
|
||||
from dapr_agents.workflow import WorkflowApp, workflow, task
|
||||
from dapr.ext.workflow import DaprWorkflowContext
|
||||
from pydantic import BaseModel
|
||||
from dotenv import load_dotenv
|
||||
|
||||
|
||||
@workflow
|
||||
def question(ctx: DaprWorkflowContext, input: int):
|
||||
step1 = yield ctx.call_activity(ask, input=input)
|
||||
return step1
|
||||
|
||||
|
||||
class Dog(BaseModel):
|
||||
name: str
|
||||
bio: str
|
||||
breed: str
|
||||
|
||||
|
||||
@task("Who was {name}?")
|
||||
def ask(name: str) -> Dog:
|
||||
pass
|
||||
|
||||
|
||||
async def main():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Initialize the WorkflowApp
|
||||
wfapp = WorkflowApp()
|
||||
|
||||
# Run workflow
|
||||
result = await wfapp.run_and_monitor_workflow_async(
|
||||
workflow=question, input="Scooby Doo"
|
||||
)
|
||||
print(f"Results: {result}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
|
@ -9,55 +9,54 @@ load_dotenv()
|
|||
# Initialize Workflow Instance
|
||||
wfr = wf.WorkflowRuntime()
|
||||
|
||||
|
||||
# Define Workflow logic
|
||||
@wfr.workflow(name='lotr_workflow')
|
||||
@wfr.workflow(name="lotr_workflow")
|
||||
def task_chain_workflow(ctx: wf.DaprWorkflowContext):
|
||||
result1 = yield ctx.call_activity(get_character)
|
||||
result2 = yield ctx.call_activity(get_line, input=result1)
|
||||
return result2
|
||||
|
||||
|
||||
# Activity 1
|
||||
@wfr.activity(name='step1')
|
||||
@wfr.activity(name="step1")
|
||||
def get_character(ctx):
|
||||
client = OpenAI()
|
||||
response = client.chat.completions.create(
|
||||
messages = [
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Pick a random character from The Lord of the Rings and respond with the character name only"
|
||||
"content": "Pick a random character from The Lord of the Rings and respond with the character name only",
|
||||
}
|
||||
],
|
||||
model = 'gpt-4o'
|
||||
model="gpt-4o",
|
||||
)
|
||||
character = response.choices[0].message.content
|
||||
print(f"Character: {character}")
|
||||
return character
|
||||
|
||||
|
||||
# Activity 2
|
||||
@wfr.activity(name='step2')
|
||||
@wfr.activity(name="step2")
|
||||
def get_line(ctx, character: str):
|
||||
client = OpenAI()
|
||||
response = client.chat.completions.create(
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"What is a famous line by {character}"
|
||||
}
|
||||
],
|
||||
model = 'gpt-4o'
|
||||
messages=[{"role": "user", "content": f"What is a famous line by {character}"}],
|
||||
model="gpt-4o",
|
||||
)
|
||||
line = response.choices[0].message.content
|
||||
print(f"Line: {line}")
|
||||
return line
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
wfr.start()
|
||||
sleep(5) # wait for workflow runtime to start
|
||||
|
||||
wf_client = wf.DaprWorkflowClient()
|
||||
instance_id = wf_client.schedule_new_workflow(workflow=task_chain_workflow)
|
||||
print(f'Workflow started. Instance ID: {instance_id}')
|
||||
print(f"Workflow started. Instance ID: {instance_id}")
|
||||
state = wf_client.wait_for_workflow_completion(instance_id)
|
||||
print(f'Workflow completed! Status: {state.runtime_status}')
|
||||
print(f"Workflow completed! Status: {state.runtime_status}")
|
||||
|
||||
wfr.shutdown()
|
||||
wfr.shutdown()
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from dapr_agents.document.reader.pdf.pypdf import PyPDFReader
|
||||
from dapr_agents.types import DaprWorkflowContext
|
||||
from dapr.ext.workflow import DaprWorkflowContext
|
||||
from dapr_agents import WorkflowApp
|
||||
from urllib.parse import urlparse, unquote
|
||||
from dotenv import load_dotenv
|
||||
|
@ -22,16 +22,19 @@ load_dotenv()
|
|||
# Initialize the WorkflowApp
|
||||
wfapp = WorkflowApp()
|
||||
|
||||
|
||||
# Define structured output models
|
||||
class SpeakerEntry(BaseModel):
|
||||
name: str
|
||||
text: str
|
||||
|
||||
|
||||
class PodcastDialogue(BaseModel):
|
||||
participants: List[SpeakerEntry]
|
||||
|
||||
|
||||
# Define Workflow logic
|
||||
@wfapp.workflow(name='doc2podcast')
|
||||
@wfapp.workflow(name="doc2podcast")
|
||||
def doc2podcast(ctx: DaprWorkflowContext, input: Dict[str, Any]):
|
||||
# Extract pre-validated input
|
||||
podcast_name = input["podcast_name"]
|
||||
|
@ -44,10 +47,13 @@ def doc2podcast(ctx: DaprWorkflowContext, input: Dict[str, Any]):
|
|||
audio_model = input["audio_model"]
|
||||
|
||||
# Step 1: Assign voices to the team
|
||||
team_config = yield ctx.call_activity(assign_podcast_voices, input={
|
||||
"host_config": host_config,
|
||||
"participant_configs": participant_configs,
|
||||
})
|
||||
team_config = yield ctx.call_activity(
|
||||
assign_podcast_voices,
|
||||
input={
|
||||
"host_config": host_config,
|
||||
"participant_configs": participant_configs,
|
||||
},
|
||||
)
|
||||
|
||||
# Step 2: Read PDF and get documents
|
||||
file_path = yield ctx.call_activity(download_pdf, input=file_input)
|
||||
|
@ -67,7 +73,9 @@ def doc2podcast(ctx: DaprWorkflowContext, input: Dict[str, Any]):
|
|||
"context": accumulated_context,
|
||||
"participants": [p["name"] for p in team_config["participants"]],
|
||||
}
|
||||
generated_prompt = yield ctx.call_activity(generate_prompt, input=document_with_context)
|
||||
generated_prompt = yield ctx.call_activity(
|
||||
generate_prompt, input=document_with_context
|
||||
)
|
||||
|
||||
# Use the prompt to generate the structured dialogue
|
||||
prompt_parameters = {
|
||||
|
@ -76,7 +84,9 @@ def doc2podcast(ctx: DaprWorkflowContext, input: Dict[str, Any]):
|
|||
"prompt": generated_prompt,
|
||||
"max_rounds": max_rounds,
|
||||
}
|
||||
dialogue_entry = yield ctx.call_activity(generate_transcript, input=prompt_parameters)
|
||||
dialogue_entry = yield ctx.call_activity(
|
||||
generate_transcript, input=prompt_parameters
|
||||
)
|
||||
|
||||
# Update context and transcript parts
|
||||
conversations = dialogue_entry["participants"]
|
||||
|
@ -85,18 +95,30 @@ def doc2podcast(ctx: DaprWorkflowContext, input: Dict[str, Any]):
|
|||
transcript_parts.append(participant)
|
||||
|
||||
# Step 4: Write the final transcript to a file
|
||||
yield ctx.call_activity(write_transcript_to_file, input={"podcast_dialogue": transcript_parts, "output_path": output_transcript_path})
|
||||
yield ctx.call_activity(
|
||||
write_transcript_to_file,
|
||||
input={
|
||||
"podcast_dialogue": transcript_parts,
|
||||
"output_path": output_transcript_path,
|
||||
},
|
||||
)
|
||||
|
||||
# Step 5: Convert transcript to audio using team_config
|
||||
yield ctx.call_activity(convert_transcript_to_audio, input={
|
||||
"transcript_parts": transcript_parts,
|
||||
"output_path": output_audio_path,
|
||||
"voices": team_config,
|
||||
"model": audio_model,
|
||||
})
|
||||
yield ctx.call_activity(
|
||||
convert_transcript_to_audio,
|
||||
input={
|
||||
"transcript_parts": transcript_parts,
|
||||
"output_path": output_audio_path,
|
||||
"voices": team_config,
|
||||
"model": audio_model,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@wfapp.task
|
||||
def assign_podcast_voices(host_config: Dict[str, Any], participant_configs: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
def assign_podcast_voices(
|
||||
host_config: Dict[str, Any], participant_configs: List[Dict[str, Any]]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Assign voices to the podcast host and participants.
|
||||
|
||||
|
@ -112,7 +134,9 @@ def assign_podcast_voices(host_config: Dict[str, Any], participant_configs: List
|
|||
|
||||
# Assign voice to the host if not already specified
|
||||
if "voice" not in host_config:
|
||||
host_config["voice"] = next(voice for voice in allowed_voices if voice not in assigned_voices)
|
||||
host_config["voice"] = next(
|
||||
voice for voice in allowed_voices if voice not in assigned_voices
|
||||
)
|
||||
assigned_voices.add(host_config["voice"])
|
||||
|
||||
# Assign voices to participants, ensuring no duplicates
|
||||
|
@ -131,6 +155,7 @@ def assign_podcast_voices(host_config: Dict[str, Any], participant_configs: List
|
|||
"participants": updated_participants,
|
||||
}
|
||||
|
||||
|
||||
@wfapp.task
|
||||
def download_pdf(pdf_url: str, local_directory: str = ".") -> str:
|
||||
"""
|
||||
|
@ -142,7 +167,7 @@ def download_pdf(pdf_url: str, local_directory: str = ".") -> str:
|
|||
|
||||
if not filename:
|
||||
raise ValueError("Invalid URL: Cannot determine filename from the URL.")
|
||||
|
||||
|
||||
filename = filename.replace(" ", "_")
|
||||
local_directory_path = Path(local_directory).resolve()
|
||||
local_directory_path.mkdir(parents=True, exist_ok=True)
|
||||
|
@ -163,6 +188,7 @@ def download_pdf(pdf_url: str, local_directory: str = ".") -> str:
|
|||
logger.error(f"Error downloading PDF: {e}")
|
||||
raise
|
||||
|
||||
|
||||
@wfapp.task
|
||||
def read_pdf(file_path: str) -> List[dict]:
|
||||
"""
|
||||
|
@ -176,8 +202,15 @@ def read_pdf(file_path: str) -> List[dict]:
|
|||
logger.error(f"Error reading document: {e}")
|
||||
raise
|
||||
|
||||
|
||||
@wfapp.task
|
||||
def generate_prompt(text: str, iteration_index: int, total_iterations: int, context: str, participants: List[str]) -> str:
|
||||
def generate_prompt(
|
||||
text: str,
|
||||
iteration_index: int,
|
||||
total_iterations: int,
|
||||
context: str,
|
||||
participants: List[str],
|
||||
) -> str:
|
||||
"""
|
||||
Generate a prompt dynamically for the chunk.
|
||||
"""
|
||||
|
@ -189,7 +222,7 @@ def generate_prompt(text: str, iteration_index: int, total_iterations: int, cont
|
|||
"""
|
||||
|
||||
if participants:
|
||||
participant_names = ', '.join(participants)
|
||||
participant_names = ", ".join(participants)
|
||||
instructions += f"\nPARTICIPANTS: {participant_names}"
|
||||
else:
|
||||
instructions += "\nPARTICIPANTS: None (Host-only conversation)"
|
||||
|
@ -214,7 +247,7 @@ def generate_prompt(text: str, iteration_index: int, total_iterations: int, cont
|
|||
- Follow up on the previous discussion points and introduce the next topic naturally.
|
||||
"""
|
||||
|
||||
instructions += f"""
|
||||
instructions += """
|
||||
TASK:
|
||||
- Use the provided TEXT to guide this part of the conversation.
|
||||
- Alternate between speakers, ensuring a natural conversational flow.
|
||||
|
@ -222,7 +255,9 @@ def generate_prompt(text: str, iteration_index: int, total_iterations: int, cont
|
|||
"""
|
||||
return f"{instructions}\nTEXT:\n{text.strip()}"
|
||||
|
||||
@wfapp.task("""
|
||||
|
||||
@wfapp.task(
|
||||
"""
|
||||
Generate a structured podcast dialogue based on the context and text provided.
|
||||
The podcast is titled '{podcast_name}' and is hosted by {host_name}.
|
||||
If participants are available, each speaker is limited to a maximum of {max_rounds} turns per iteration.
|
||||
|
@ -231,26 +266,39 @@ def generate_prompt(text: str, iteration_index: int, total_iterations: int, cont
|
|||
If participants are not available, the host drives the conversation alone.
|
||||
Keep the dialogue concise and ensure a natural conversational flow.
|
||||
{prompt}
|
||||
""")
|
||||
def generate_transcript(podcast_name: str, host_name: str, prompt: str, max_rounds: int) -> PodcastDialogue:
|
||||
"""
|
||||
)
|
||||
def generate_transcript(
|
||||
podcast_name: str, host_name: str, prompt: str, max_rounds: int
|
||||
) -> PodcastDialogue:
|
||||
pass
|
||||
|
||||
|
||||
@wfapp.task
|
||||
def write_transcript_to_file(podcast_dialogue: List[Dict[str, Any]], output_path: str) -> None:
|
||||
def write_transcript_to_file(
|
||||
podcast_dialogue: List[Dict[str, Any]], output_path: str
|
||||
) -> None:
|
||||
"""
|
||||
Write the final structured transcript to a file.
|
||||
"""
|
||||
try:
|
||||
with open(output_path, "w", encoding="utf-8") as file:
|
||||
import json
|
||||
|
||||
json.dump(podcast_dialogue, file, ensure_ascii=False, indent=4)
|
||||
logger.info(f"Podcast dialogue successfully written to {output_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error writing podcast dialogue to file: {e}")
|
||||
raise
|
||||
|
||||
|
||||
@wfapp.task
|
||||
def convert_transcript_to_audio(transcript_parts: List[Dict[str, Any]], output_path: str, voices: Dict[str, Any], model: str = "tts-1") -> None:
|
||||
def convert_transcript_to_audio(
|
||||
transcript_parts: List[Dict[str, Any]],
|
||||
output_path: str,
|
||||
voices: Dict[str, Any],
|
||||
model: str = "tts-1",
|
||||
) -> None:
|
||||
"""
|
||||
Converts a transcript into a single audio file using the OpenAI Audio Client and pydub for concatenation.
|
||||
|
||||
|
@ -271,24 +319,30 @@ def convert_transcript_to_audio(transcript_parts: List[Dict[str, Any]], output_p
|
|||
for part in transcript_parts:
|
||||
speaker_name = part["name"]
|
||||
speaker_text = part["text"]
|
||||
assigned_voice = voice_mapping.get(speaker_name, "alloy") # Default to "alloy" if not found
|
||||
assigned_voice = voice_mapping.get(
|
||||
speaker_name, "alloy"
|
||||
) # Default to "alloy" if not found
|
||||
|
||||
# Log assigned voice for debugging
|
||||
logger.info(f"Generating audio for {speaker_name} using voice '{assigned_voice}'.")
|
||||
logger.info(
|
||||
f"Generating audio for {speaker_name} using voice '{assigned_voice}'."
|
||||
)
|
||||
|
||||
# Create TTS request
|
||||
tts_request = AudioSpeechRequest(
|
||||
model=model,
|
||||
input=speaker_text,
|
||||
voice=assigned_voice,
|
||||
response_format="mp3"
|
||||
response_format="mp3",
|
||||
)
|
||||
|
||||
# Generate the audio
|
||||
audio_bytes = client.create_speech(request=tts_request)
|
||||
|
||||
# Create an AudioSegment from the audio bytes
|
||||
audio_chunk = AudioSegment.from_file(io.BytesIO(audio_bytes), format=tts_request.response_format)
|
||||
audio_chunk = AudioSegment.from_file(
|
||||
io.BytesIO(audio_bytes), format=tts_request.response_format
|
||||
)
|
||||
|
||||
# Append the audio to the combined segment
|
||||
combined_audio += audio_chunk + AudioSegment.silent(duration=300)
|
||||
|
@ -301,17 +355,18 @@ def convert_transcript_to_audio(transcript_parts: List[Dict[str, Any]], output_p
|
|||
logger.error(f"Error during audio generation: {e}")
|
||||
raise
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import json
|
||||
import yaml
|
||||
|
||||
def load_config(file_path: str) -> dict:
|
||||
"""Load configuration from a JSON or YAML file."""
|
||||
with open(file_path, 'r') as file:
|
||||
if file_path.endswith('.yaml') or file_path.endswith('.yml'):
|
||||
with open(file_path, "r") as file:
|
||||
if file_path.endswith(".yaml") or file_path.endswith(".yml"):
|
||||
return yaml.safe_load(file)
|
||||
elif file_path.endswith('.json'):
|
||||
elif file_path.endswith(".json"):
|
||||
return json.load(file)
|
||||
else:
|
||||
raise ValueError("Unsupported file format. Use JSON or YAML.")
|
||||
|
@ -323,11 +378,21 @@ if __name__ == '__main__':
|
|||
parser.add_argument("--podcast_name", type=str, help="Name of the podcast.")
|
||||
parser.add_argument("--host_name", type=str, help="Name of the host.")
|
||||
parser.add_argument("--host_voice", type=str, help="Voice for the host.")
|
||||
parser.add_argument("--participants", type=str, nargs='+', help="List of participant names.")
|
||||
parser.add_argument("--max_rounds", type=int, default=4, help="Number of turns per round.")
|
||||
parser.add_argument("--output_transcript_path", type=str, help="Path to save the output transcript.")
|
||||
parser.add_argument("--output_audio_path", type=str, help="Path to save the final audio file.")
|
||||
parser.add_argument("--audio_model", type=str, default="tts-1", help="Audio model for TTS.")
|
||||
parser.add_argument(
|
||||
"--participants", type=str, nargs="+", help="List of participant names."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--max_rounds", type=int, default=4, help="Number of turns per round."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output_transcript_path", type=str, help="Path to save the output transcript."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output_audio_path", type=str, help="Path to save the final audio file."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--audio_model", type=str, default="tts-1", help="Audio model for TTS."
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
@ -337,15 +402,18 @@ if __name__ == '__main__':
|
|||
# Merge CLI and Config inputs
|
||||
user_input = {
|
||||
"pdf_url": args.pdf_url or config.get("pdf_url"),
|
||||
"podcast_name": args.podcast_name or config.get("podcast_name", "Default Podcast"),
|
||||
"podcast_name": args.podcast_name
|
||||
or config.get("podcast_name", "Default Podcast"),
|
||||
"host": {
|
||||
"name": args.host_name or config.get("host", {}).get("name", "Host"),
|
||||
"voice": args.host_voice or config.get("host", {}).get("voice", "alloy"),
|
||||
},
|
||||
"participants": config.get("participants", []),
|
||||
"max_rounds": args.max_rounds or config.get("max_rounds", 4),
|
||||
"output_transcript_path": args.output_transcript_path or config.get("output_transcript_path", "podcast_dialogue.json"),
|
||||
"output_audio_path": args.output_audio_path or config.get("output_audio_path", "final_podcast.mp3"),
|
||||
"output_transcript_path": args.output_transcript_path
|
||||
or config.get("output_transcript_path", "podcast_dialogue.json"),
|
||||
"output_audio_path": args.output_audio_path
|
||||
or config.get("output_audio_path", "final_podcast.mp3"),
|
||||
"audio_model": args.audio_model or config.get("audio_model", "tts-1"),
|
||||
}
|
||||
|
||||
|
@ -356,6 +424,6 @@ if __name__ == '__main__':
|
|||
# Validate inputs
|
||||
if not user_input["pdf_url"]:
|
||||
raise ValueError("PDF URL must be provided via CLI or config file.")
|
||||
|
||||
|
||||
# Run the workflow
|
||||
wfapp.run_and_monitor_workflow(workflow=doc2podcast, input=user_input)
|
||||
wfapp.run_and_monitor_workflow_sync(workflow=doc2podcast, input=user_input)
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
from dapr_agents import OpenAIChatClient, NVIDIAChatClient
|
||||
from dapr.ext.workflow import DaprWorkflowContext
|
||||
from dapr_agents.workflow import WorkflowApp, task, workflow
|
||||
from dotenv import load_dotenv
|
||||
import os
|
||||
import logging
|
||||
|
||||
load_dotenv()
|
||||
|
||||
nvidia_llm = NVIDIAChatClient(
|
||||
model="meta/llama-3.1-8b-instruct", api_key=os.getenv("NVIDIA_API_KEY")
|
||||
)
|
||||
|
||||
oai_llm = OpenAIChatClient(
|
||||
api_key=os.getenv("OPENAI_API_KEY"),
|
||||
model="gpt-4o",
|
||||
base_url=os.getenv("OPENAI_API_BASE_URL"),
|
||||
)
|
||||
|
||||
azoai_llm = OpenAIChatClient(
|
||||
api_key=os.getenv("AZURE_OPENAI_API_KEY"),
|
||||
azure_deployment="gpt-4o-mini",
|
||||
azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
|
||||
azure_api_version="2024-12-01-preview",
|
||||
)
|
||||
|
||||
|
||||
@workflow
|
||||
def test_workflow(ctx: DaprWorkflowContext):
|
||||
"""
|
||||
A simple workflow that uses a multi-modal task chain.
|
||||
"""
|
||||
oai_results = yield ctx.call_activity(invoke_oai, input="Peru")
|
||||
azoai_results = yield ctx.call_activity(invoke_azoai, input=oai_results)
|
||||
nvidia_results = yield ctx.call_activity(invoke_nvidia, input=azoai_results)
|
||||
return nvidia_results
|
||||
|
||||
|
||||
@task(
|
||||
description="What is the name of the capital of {country}?. Reply with just the name.",
|
||||
llm=oai_llm,
|
||||
)
|
||||
def invoke_oai(country: str) -> str:
|
||||
pass
|
||||
|
||||
|
||||
@task(description="What is a famous thing about {capital}?", llm=azoai_llm)
|
||||
def invoke_azoai(capital: str) -> str:
|
||||
pass
|
||||
|
||||
|
||||
@task(
|
||||
description="Context: {context}. From the previous context. Pick one thing to do.",
|
||||
llm=nvidia_llm,
|
||||
)
|
||||
def invoke_nvidia(context: str) -> str:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
wfapp = WorkflowApp()
|
||||
|
||||
results = wfapp.run_and_monitor_workflow_sync(workflow=test_workflow)
|
||||
|
||||
logging.info("Workflow results: %s", results)
|
||||
logging.info("Workflow completed successfully.")
|
|
@ -0,0 +1,151 @@
|
|||
{
|
||||
"instances": {
|
||||
"22fb2349f9a742279ddbfae9da3330ac": {
|
||||
"input": "What is 1 + 1?",
|
||||
"output": "The task is currently in progress. The MathematicsAgent has successfully acknowledged the mathematical problem, identified the operands (both as 1), and set up the addition operation 1 + 1. The initial result of the addition operation has been completed, yielding 2.0.\n\nNext steps involve verifying the calculation result to ensure its accuracy and confirming the solution to conclude the task.",
|
||||
"start_time": "2025-04-21T03:19:34.372003",
|
||||
"end_time": "2025-04-21T03:19:53.991669",
|
||||
"messages": [
|
||||
{
|
||||
"id": "61df6088-707b-4c39-aaad-a428f89f6007",
|
||||
"role": "user",
|
||||
"content": "## Mission Briefing\n\nWe have received the following task:\n\nWhat is 1 + 1?\n\n### Team of Agents\n- MathematicsAgent: Calculator Assistant (Goal: Assist Humans with calculation tasks.)\n\n### Execution Plan\nHere is the structured approach the team will follow to accomplish the task:\n\n[{'step': 1, 'description': 'Initiate the process by acknowledging the mathematical problem to solve: Determine the sum of 1 + 1.', 'status': 'not_started', 'substeps': None}, {'step': 2, 'description': \"Identify and note the operands involved in the calculation: The first number is '1', and the second number is '1'.\", 'status': 'not_started', 'substeps': [{'substep': 2.1, 'description': 'Record the first operand: 1', 'status': 'not_started'}, {'substep': 2.2, 'description': 'Record the second operand: 1', 'status': 'not_started'}]}, {'step': 3, 'description': 'Perform the addition of the identified numbers: Add the first number to the second number.', 'status': 'not_started', 'substeps': [{'substep': 3.1, 'description': 'Set up the addition operation: 1 + 1.', 'status': 'not_started'}, {'substep': 3.2, 'description': 'Execute the addition operation.', 'status': 'not_started'}]}, {'step': 4, 'description': 'Verify the calculation result to ensure accuracy of the addition process.', 'status': 'not_started', 'substeps': [{'substep': 4.1, 'description': 'Check if the operation result matches expected arithmetic principles.', 'status': 'not_started'}]}, {'step': 5, 'description': 'Conclude the task by recording and confirming the correct solution to the problem. Result: 2.', 'status': 'not_started', 'substeps': None}]\n",
|
||||
"timestamp": "2025-04-21T03:19:38.416157",
|
||||
"name": "LLMOrchestrator"
|
||||
},
|
||||
{
|
||||
"id": "9b758573-9ec5-4d59-a3ac-d0cb9941b4eb",
|
||||
"role": "user",
|
||||
"content": "Initiate the process by acknowledging the mathematical problem to solve: Determine the sum of 1 + 1.",
|
||||
"timestamp": "2025-04-21T03:19:39.479920",
|
||||
"name": "LLMOrchestrator"
|
||||
},
|
||||
{
|
||||
"id": "c29d839c-3a54-4c8f-bf33-88a228d4e902",
|
||||
"role": "user",
|
||||
"content": "Acknowledging the task: We need to determine the sum of 1 + 1. Let's proceed to the next step and identify the operands involved in this calculation.",
|
||||
"timestamp": "2025-04-21T03:19:40.831663",
|
||||
"name": "MathematicsAgent"
|
||||
},
|
||||
{
|
||||
"id": "cb95ebd3-8eb8-48c6-aaa3-ba2507ee9a62",
|
||||
"role": "user",
|
||||
"content": "Please record the second operand: 1.",
|
||||
"timestamp": "2025-04-21T03:19:43.579299",
|
||||
"name": "LLMOrchestrator"
|
||||
},
|
||||
{
|
||||
"id": "c3b61fe8-ea8b-48a8-a255-7d495d825727",
|
||||
"role": "user",
|
||||
"content": "The second operand involved in this calculation is recorded as: 1. Now, let's proceed to perform the addition of the identified numbers.",
|
||||
"timestamp": "2025-04-21T03:19:44.589307",
|
||||
"name": "MathematicsAgent"
|
||||
},
|
||||
{
|
||||
"id": "7fd1f829-eeae-42f0-b6f9-cf3ef6464fff",
|
||||
"role": "user",
|
||||
"content": "Proceed to set up the addition operation with the recorded operands: 1 + 1.",
|
||||
"timestamp": "2025-04-21T03:19:47.387450",
|
||||
"name": "LLMOrchestrator"
|
||||
},
|
||||
{
|
||||
"id": "035932ab-766e-43af-bab8-5fd1a8c6dd4b",
|
||||
"role": "user",
|
||||
"content": "The result of the addition operation 1 + 1 is 2.0. Let's verify the calculation result to ensure the accuracy of the addition process.",
|
||||
"timestamp": "2025-04-21T03:19:50.046203",
|
||||
"name": "MathematicsAgent"
|
||||
}
|
||||
],
|
||||
"last_message": {
|
||||
"id": "035932ab-766e-43af-bab8-5fd1a8c6dd4b",
|
||||
"role": "user",
|
||||
"content": "The result of the addition operation 1 + 1 is 2.0. Let's verify the calculation result to ensure the accuracy of the addition process.",
|
||||
"timestamp": "2025-04-21T03:19:50.046203",
|
||||
"name": "MathematicsAgent"
|
||||
},
|
||||
"plan": [
|
||||
{
|
||||
"step": 1,
|
||||
"description": "Initiate the process by acknowledging the mathematical problem to solve: Determine the sum of 1 + 1.",
|
||||
"status": "completed",
|
||||
"substeps": null
|
||||
},
|
||||
{
|
||||
"step": 2,
|
||||
"description": "Identify and note the operands involved in the calculation: The first number is '1', and the second number is '1'.",
|
||||
"status": "completed",
|
||||
"substeps": [
|
||||
{
|
||||
"substep": 2.1,
|
||||
"description": "Record the first operand: 1",
|
||||
"status": "completed"
|
||||
},
|
||||
{
|
||||
"substep": 2.2,
|
||||
"description": "Record the second operand: 1",
|
||||
"status": "completed"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"step": 3,
|
||||
"description": "Perform the addition of the identified numbers: Add the first number to the second number.",
|
||||
"status": "in_progress",
|
||||
"substeps": [
|
||||
{
|
||||
"substep": 3.1,
|
||||
"description": "Set up the addition operation: 1 + 1.",
|
||||
"status": "completed"
|
||||
},
|
||||
{
|
||||
"substep": 3.2,
|
||||
"description": "Execute the addition operation.",
|
||||
"status": "not_started"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"step": 4,
|
||||
"description": "Verify the calculation result to ensure accuracy of the addition process.",
|
||||
"status": "not_started",
|
||||
"substeps": [
|
||||
{
|
||||
"substep": 4.1,
|
||||
"description": "Check if the operation result matches expected arithmetic principles.",
|
||||
"status": "not_started"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"step": 5,
|
||||
"description": "Conclude the task by recording and confirming the correct solution to the problem. Result: 2.",
|
||||
"status": "in_progress",
|
||||
"substeps": null
|
||||
}
|
||||
],
|
||||
"task_history": [
|
||||
{
|
||||
"agent": "MathematicsAgent",
|
||||
"step": 1,
|
||||
"substep": null,
|
||||
"result": "Acknowledging the task: We need to determine the sum of 1 + 1. Let's proceed to the next step and identify the operands involved in this calculation.",
|
||||
"timestamp": "2025-04-21T03:19:40.835007"
|
||||
},
|
||||
{
|
||||
"agent": "MathematicsAgent",
|
||||
"step": 2,
|
||||
"substep": 2.2,
|
||||
"result": "The second operand involved in this calculation is recorded as: 1. Now, let's proceed to perform the addition of the identified numbers.",
|
||||
"timestamp": "2025-04-21T03:19:44.590818"
|
||||
},
|
||||
{
|
||||
"agent": "MathematicsAgent",
|
||||
"step": 3,
|
||||
"substep": 3.1,
|
||||
"result": "The result of the addition operation 1 + 1 is 2.0. Let's verify the calculation result to ensure the accuracy of the addition process.",
|
||||
"timestamp": "2025-04-21T03:19:50.048520"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,413 @@
|
|||
# Dapr Agents Calculator Demo
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Python 3.10 or later
|
||||
- Dapr CLI (v1.15.x)
|
||||
- Redis (for state storage and pub/sub)
|
||||
- Azure OpenAI API key
|
||||
|
||||
## Setup
|
||||
|
||||
1. Create and activate a virtual environment:
|
||||
|
||||
```bash
|
||||
# Create a virtual environment
|
||||
python3.10 -m venv .venv
|
||||
|
||||
# Activate the virtual environment
|
||||
# On Windows:
|
||||
.venv\Scripts\activate
|
||||
# On macOS/Linux:
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
2. Install dependencies:
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
3. Set Up Environment Variables: Create an `.env` file to securely store your API keys and other sensitive information. For example:
|
||||
|
||||
```
|
||||
OPENAI_API_KEY="your-api-key"
|
||||
OPENAI_BASE_URL="https://api.openai.com/v1"
|
||||
```
|
||||
|
||||
## Running the Application
|
||||
|
||||
Make sure Redis is running on your local machine (default port 6379).
|
||||
|
||||
### Running All Components with Dapr
|
||||
|
||||
1. Start the calculator agent:
|
||||
|
||||
```bash
|
||||
dapr run --app-id CalculatorApp --app-port 8002 --resources-path ./components python calculator_agent.py
|
||||
```
|
||||
|
||||
2. Start the LLM orchestrator:
|
||||
|
||||
```bash
|
||||
dapr run --app-id OrchestratorApp --app-port 8004 --resources-path ./components python llm_orchestrator.py
|
||||
```
|
||||
|
||||
3. Run the client:
|
||||
|
||||
```bash
|
||||
dapr run --app-id ClientApp --dapr-http-port 3502 --resources-path ./components -- python client.py
|
||||
|
||||
```
|
||||
|
||||
## Expected Behavior
|
||||
|
||||
### LLM Orchestrator
|
||||
|
||||
```
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Workflow iteration 1 started (Instance ID: 22fb2349f9a742279ddbfae9da3330ac).
|
||||
== APP == 2025-04-21 03:19:34.372 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
|
||||
== APP == INFO:dapr_agents.workflow.base:Started workflow with instance ID 22fb2349f9a742279ddbfae9da3330ac.
|
||||
== APP == INFO:dapr_agents.workflow.base:Monitoring workflow '22fb2349f9a742279ddbfae9da3330ac'...
|
||||
== APP == 2025-04-21 03:19:34.377 durabletask-client INFO: Waiting up to 300s for instance '22fb2349f9a742279ddbfae9da3330ac' to complete.
|
||||
== APP == INFO:dapr_agents.workflow.agentic:Agents found in 'agentstatestore' for key 'agents_registry'.
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Initial message from User -> LLMOrchestrator
|
||||
== APP == 2025-04-21 03:19:34.383 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Task with LLM...
|
||||
== APP == INFO:dapr_agents.llm.utils.request:Structured Mode Activated! Mode=json.
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
|
||||
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
|
||||
== APP == INFO:dapr_agents.llm.utils.response:Structured output was successfully validated.
|
||||
== APP == 2025-04-21 03:19:38.396 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
|
||||
== APP == 2025-04-21 03:19:38.410 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
|
||||
== APP == INFO:dapr_agents.workflow.agentic:Agents found in 'agentstatestore' for key 'agents_registry'.
|
||||
== APP == INFO:dapr_agents.workflow.agentic:LLMOrchestrator broadcasting message to beacon_channel.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.pubsub:LLMOrchestrator published 'BroadcastMessage' to topic 'beacon_channel'.
|
||||
== APP == 2025-04-21 03:19:38.427 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Task with LLM...
|
||||
== APP == INFO:dapr_agents.workflow.task:Retrieving conversation history...
|
||||
== APP == INFO:dapr_agents.llm.utils.request:Structured Mode Activated! Mode=json.
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
|
||||
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
|
||||
== APP == INFO:dapr_agents.llm.utils.response:Structured output was successfully validated.
|
||||
== APP == 2025-04-21 03:19:39.462 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
|
||||
== APP == 2025-04-21 03:19:39.476 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
|
||||
== APP == INFO:dapr_agents.workflow.agentic:Agents found in 'agentstatestore' for key 'agents_registry'.
|
||||
== APP == INFO:dapr_agents.workflow.agentic:LLMOrchestrator broadcasting message to beacon_channel.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.pubsub:LLMOrchestrator published 'BroadcastMessage' to topic 'beacon_channel'.
|
||||
== APP == 2025-04-21 03:19:39.490 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Triggering agent MathematicsAgent for step 1, substep None (Instance ID: 22fb2349f9a742279ddbfae9da3330ac)
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Marked step 1, substep None as 'in_progress'
|
||||
== APP == INFO:dapr_agents.workflow.agentic:Agents found in 'agentstatestore' for key 'agents_registry'.
|
||||
== APP == INFO:dapr_agents.workflow.agentic:LLMOrchestrator sending message to agent 'MathematicsAgent'.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.pubsub:LLMOrchestrator published 'TriggerAction' to topic 'MathematicsAgent'.
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Waiting for MathematicsAgent's response...
|
||||
== APP == 2025-04-21 03:19:39.502 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 1 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'AgentTaskResponse'...
|
||||
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_agent_response' for event type 'AgentTaskResponse'
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:LLMOrchestrator processing agent response for workflow instance '22fb2349f9a742279ddbfae9da3330ac'.
|
||||
== APP == INFO:dapr_agents.workflow.base:Raising workflow event 'AgentTaskResponse' for instance '22fb2349f9a742279ddbfae9da3330ac'
|
||||
== APP == 2025-04-21 03:19:40.819 durabletask-client INFO: Raising event 'AgentTaskResponse' for instance '22fb2349f9a742279ddbfae9da3330ac'.
|
||||
== APP == INFO:dapr_agents.workflow.base:Successfully raised workflow event 'AgentTaskResponse' for instance '22fb2349f9a742279ddbfae9da3330ac'!
|
||||
== APP == 2025-04-21 03:19:40.827 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac Event raised: agenttaskresponse
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:MathematicsAgent sent a response.
|
||||
== APP == 2025-04-21 03:19:40.827 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 2 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updating task history for MathematicsAgent at step 1, substep None (Instance ID: 22fb2349f9a742279ddbfae9da3330ac)
|
||||
== APP == 2025-04-21 03:19:40.843 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 2 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Task with LLM...
|
||||
== APP == INFO:dapr_agents.workflow.task:Retrieving conversation history...
|
||||
== APP == INFO:dapr_agents.llm.utils.request:Structured Mode Activated! Mode=json.
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
|
||||
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
|
||||
== APP == INFO:dapr_agents.llm.utils.response:Structured output was successfully validated.
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Tracking Progress: {'verdict': 'continue', 'plan_needs_update': False, 'plan_status_update': [{'step': 1, 'substep': None, 'status': 'completed'}, {'step': 2, 'substep': None, 'status': 'in_progress'}, {'step': 2, 'substep': 2.1, 'status': 'in_progress'}], 'plan_restructure': None}
|
||||
== APP == 2025-04-21 03:19:42.532 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 2 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updating plan for instance 22fb2349f9a742279ddbfae9da3330ac
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updated status of step 1, substep None to 'completed'
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updated status of step 2, substep None to 'in_progress'
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updated status of step 2, substep 2.1 to 'in_progress'
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Plan successfully updated for instance 22fb2349f9a742279ddbfae9da3330ac
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Workflow iteration 2 started (Instance ID: 22fb2349f9a742279ddbfae9da3330ac).
|
||||
== APP == 2025-04-21 03:19:42.543 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
|
||||
== APP == INFO:dapr_agents.workflow.agentic:Agents found in 'agentstatestore' for key 'agents_registry'.
|
||||
== APP == 2025-04-21 03:19:42.552 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Task with LLM...
|
||||
== APP == INFO:dapr_agents.workflow.task:Retrieving conversation history...
|
||||
== APP == INFO:dapr_agents.llm.utils.request:Structured Mode Activated! Mode=json.
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
|
||||
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
|
||||
== APP == INFO:dapr_agents.llm.utils.response:Structured output was successfully validated.
|
||||
== APP == 2025-04-21 03:19:43.561 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
|
||||
== APP == 2025-04-21 03:19:43.574 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
|
||||
== APP == INFO:dapr_agents.workflow.agentic:Agents found in 'agentstatestore' for key 'agents_registry'.
|
||||
== APP == INFO:dapr_agents.workflow.agentic:LLMOrchestrator broadcasting message to beacon_channel.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.pubsub:LLMOrchestrator published 'BroadcastMessage' to topic 'beacon_channel'.
|
||||
== APP == 2025-04-21 03:19:43.593 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Triggering agent MathematicsAgent for step 2, substep 2.2 (Instance ID: 22fb2349f9a742279ddbfae9da3330ac)
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Marked step 2, substep 2.2 as 'in_progress'
|
||||
== APP == INFO:dapr_agents.workflow.agentic:Agents found in 'agentstatestore' for key 'agents_registry'.
|
||||
== APP == INFO:dapr_agents.workflow.agentic:LLMOrchestrator sending message to agent 'MathematicsAgent'.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.pubsub:LLMOrchestrator published 'TriggerAction' to topic 'MathematicsAgent'.
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Waiting for MathematicsAgent's response...
|
||||
== APP == 2025-04-21 03:19:43.605 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 1 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'AgentTaskResponse'...
|
||||
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_agent_response' for event type 'AgentTaskResponse'
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:LLMOrchestrator processing agent response for workflow instance '22fb2349f9a742279ddbfae9da3330ac'.
|
||||
== APP == INFO:dapr_agents.workflow.base:Raising workflow event 'AgentTaskResponse' for instance '22fb2349f9a742279ddbfae9da3330ac'
|
||||
== APP == 2025-04-21 03:19:44.581 durabletask-client INFO: Raising event 'AgentTaskResponse' for instance '22fb2349f9a742279ddbfae9da3330ac'.
|
||||
== APP == INFO:dapr_agents.workflow.base:Successfully raised workflow event 'AgentTaskResponse' for instance '22fb2349f9a742279ddbfae9da3330ac'!
|
||||
== APP == 2025-04-21 03:19:44.585 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac Event raised: agenttaskresponse
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:MathematicsAgent sent a response.
|
||||
== APP == 2025-04-21 03:19:44.585 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 2 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updating task history for MathematicsAgent at step 2, substep 2.2 (Instance ID: 22fb2349f9a742279ddbfae9da3330ac)
|
||||
== APP == 2025-04-21 03:19:44.600 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 2 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Task with LLM...
|
||||
== APP == INFO:dapr_agents.workflow.task:Retrieving conversation history...
|
||||
== APP == INFO:dapr_agents.llm.utils.request:Structured Mode Activated! Mode=json.
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
|
||||
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
|
||||
== APP == INFO:dapr_agents.llm.utils.response:Structured output was successfully validated.
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Tracking Progress: {'verdict': 'continue', 'plan_needs_update': False, 'plan_status_update': [{'step': 2, 'substep': 2.1, 'status': 'completed'}, {'step': 2, 'substep': 2.2, 'status': 'completed'}, {'step': 2, 'substep': None, 'status': 'completed'}], 'plan_restructure': None}
|
||||
== APP == 2025-04-21 03:19:46.130 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 2 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updating plan for instance 22fb2349f9a742279ddbfae9da3330ac
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updated status of step 2, substep 2.1 to 'completed'
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updated status of step 2, substep 2.2 to 'completed'
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updated status of step 2, substep None to 'completed'
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Plan successfully updated for instance 22fb2349f9a742279ddbfae9da3330ac
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Workflow iteration 3 started (Instance ID: 22fb2349f9a742279ddbfae9da3330ac).
|
||||
== APP == 2025-04-21 03:19:46.159 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
|
||||
== APP == INFO:dapr_agents.workflow.agentic:Agents found in 'agentstatestore' for key 'agents_registry'.
|
||||
== APP == 2025-04-21 03:19:46.174 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Task with LLM...
|
||||
== APP == INFO:dapr_agents.workflow.task:Retrieving conversation history...
|
||||
== APP == INFO:dapr_agents.llm.utils.request:Structured Mode Activated! Mode=json.
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
|
||||
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
|
||||
== APP == INFO:dapr_agents.llm.utils.response:Structured output was successfully validated.
|
||||
== APP == 2025-04-21 03:19:47.370 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
|
||||
== APP == 2025-04-21 03:19:47.383 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
|
||||
== APP == INFO:dapr_agents.workflow.agentic:Agents found in 'agentstatestore' for key 'agents_registry'.
|
||||
== APP == INFO:dapr_agents.workflow.agentic:LLMOrchestrator broadcasting message to beacon_channel.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.pubsub:LLMOrchestrator published 'BroadcastMessage' to topic 'beacon_channel'.
|
||||
== APP == 2025-04-21 03:19:47.403 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Triggering agent MathematicsAgent for step 3, substep 3.1 (Instance ID: 22fb2349f9a742279ddbfae9da3330ac)
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Marked step 3, substep 3.1 as 'in_progress'
|
||||
== APP == INFO:dapr_agents.workflow.agentic:Agents found in 'agentstatestore' for key 'agents_registry'.
|
||||
== APP == INFO:dapr_agents.workflow.agentic:LLMOrchestrator sending message to agent 'MathematicsAgent'.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.pubsub:LLMOrchestrator published 'TriggerAction' to topic 'MathematicsAgent'.
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Waiting for MathematicsAgent's response...
|
||||
== APP == 2025-04-21 03:19:47.417 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 1 task(s) and 1 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'AgentTaskResponse'...
|
||||
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_agent_response' for event type 'AgentTaskResponse'
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:LLMOrchestrator processing agent response for workflow instance '22fb2349f9a742279ddbfae9da3330ac'.
|
||||
== APP == INFO:dapr_agents.workflow.base:Raising workflow event 'AgentTaskResponse' for instance '22fb2349f9a742279ddbfae9da3330ac'
|
||||
== APP == 2025-04-21 03:19:50.031 durabletask-client INFO: Raising event 'AgentTaskResponse' for instance '22fb2349f9a742279ddbfae9da3330ac'.
|
||||
== APP == INFO:dapr_agents.workflow.base:Successfully raised workflow event 'AgentTaskResponse' for instance '22fb2349f9a742279ddbfae9da3330ac'!
|
||||
== APP == 2025-04-21 03:19:50.038 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac Event raised: agenttaskresponse
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:MathematicsAgent sent a response.
|
||||
== APP == 2025-04-21 03:19:50.039 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 2 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updating task history for MathematicsAgent at step 3, substep 3.1 (Instance ID: 22fb2349f9a742279ddbfae9da3330ac)
|
||||
== APP == 2025-04-21 03:19:50.055 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 2 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Task with LLM...
|
||||
== APP == INFO:dapr_agents.workflow.task:Retrieving conversation history...
|
||||
== APP == INFO:dapr_agents.llm.utils.request:Structured Mode Activated! Mode=json.
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
|
||||
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
|
||||
== APP == INFO:dapr_agents.llm.utils.response:Structured output was successfully validated.
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Tracking Progress: {'verdict': 'completed', 'plan_needs_update': False, 'plan_status_update': [{'step': 3, 'substep': 3.1, 'status': 'completed'}, {'step': 3, 'substep': 3.2, 'status': 'completed'}, {'step': 3, 'substep': None, 'status': 'completed'}, {'step': 4, 'substep': 4.1, 'status': 'completed'}, {'step': 4, 'substep': None, 'status': 'completed'}, {'step': 5, 'substep': None, 'status': 'completed'}], 'plan_restructure': None}
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Workflow ending with verdict: completed
|
||||
== APP == 2025-04-21 03:19:52.263 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 2 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Task with LLM...
|
||||
== APP == INFO:dapr_agents.workflow.task:Retrieving conversation history...
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
|
||||
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
|
||||
== APP == 2025-04-21 03:19:53.984 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestrator yielded with 2 task(s) and 0 event(s) outstanding.
|
||||
== APP == INFO:dapr_agents.workflow.task:Invoking Regular Task
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updating plan for instance 22fb2349f9a742279ddbfae9da3330ac
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Updated status of step 3, substep 3.1 to 'completed'
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Plan successfully updated for instance 22fb2349f9a742279ddbfae9da3330ac
|
||||
== APP == INFO:dapr_agents.workflow.orchestrators.llm.orchestrator:Workflow 22fb2349f9a742279ddbfae9da3330ac has been finalized with verdict: completed
|
||||
== APP == 2025-04-21 03:19:53.998 durabletask-worker INFO: 22fb2349f9a742279ddbfae9da3330ac: Orchestration completed with status: COMPLETED
|
||||
INFO[0044] 22fb2349f9a742279ddbfae9da3330ac: 'LLMWorkflow' completed with a COMPLETED status. app_id=OrchestratorApp instance=mac.lan scope=dapr.wfengine.durabletask.backend type=log ver=1.15.3
|
||||
INFO[0044] Workflow Actor '22fb2349f9a742279ddbfae9da3330ac': workflow completed with status 'ORCHESTRATION_STATUS_COMPLETED' workflowName 'LLMWorkflow' app_id=OrchestratorApp instance=mac.lan scope=dapr.runtime.actors.targets.workflow type=log ver=1.15.3
|
||||
== APP == 2025-04-21 03:19:53.999 durabletask-client INFO: Instance '22fb2349f9a742279ddbfae9da3330ac' completed.
|
||||
== APP == INFO:dapr_agents.workflow.base:Workflow 22fb2349f9a742279ddbfae9da3330ac completed with status: WorkflowStatus.COMPLETED.
|
||||
== APP == INFO:dapr_agents.workflow.base:Workflow '22fb2349f9a742279ddbfae9da3330ac' completed successfully. Status: COMPLETED.
|
||||
== APP == INFO:dapr_agents.workflow.base:Finished monitoring workflow '22fb2349f9a742279ddbfae9da3330ac'.
|
||||
INFO[0076] Placement tables updated, version: 103 app_id=OrchestratorApp instance=mac.lan scope=dapr.runtime.actors.placement type=log ver=1.15.3
|
||||
INFO[0076] Running actor reminder migration from state store to scheduler app_id=OrchestratorApp instance=mac.lan scope=dapr.runtime.actors.reminders.migration type=log ver=1.15.3
|
||||
INFO[0076] Skipping migration, no missing scheduler reminders found app_id=OrchestratorApp instance=mac.lan scope=dapr.runtime.actors.reminders.migration type=log ver=1.15.3
|
||||
INFO[0076] Found 0 missing scheduler reminders from state store app_id=OrchestratorApp instance=mac.lan scope=dapr.runtime.actors.reminders.migration type=log ver=1.15.3
|
||||
INFO[0076] Migrated 0 reminders from state store to scheduler successfully app_id=OrchestratorApp instance=mac.lan scope=dapr.runtime.actors.reminders.migration type=log ver=1.15.3
|
||||
^Cℹ️
|
||||
terminated signal received: shutting down
|
||||
INFO[0081] Received signal 'interrupt'; beginning shutdown app_id=OrchestratorApp instance=mac.lan scope=dapr.signals type=log ver=1.15.3
|
||||
✅ Exited Dapr successfully
|
||||
✅ Exited App successfully
|
||||
```
|
||||
|
||||
### MathematicsAgent
|
||||
|
||||
```
|
||||
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'BroadcastMessage'...
|
||||
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_broadcast_message' for event type 'BroadcastMessage'
|
||||
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent received broadcast message of type 'BroadcastMessage' from 'LLMOrchestrator'.
|
||||
== APP == INFO:dapr_agents.agent.actor.base:Activating actor with ID: MathematicsAgent
|
||||
== APP == INFO:dapr_agents.agent.actor.base:Initializing state for MathematicsAgent
|
||||
WARN[0021] Redis does not support transaction rollbacks and should not be used in production as an actor state store. app_id=CalculatorApp component="workflowstatestore (state.redis/v1)" instance=mac.lan scope=dapr.contrib type=log ver=1.15.3
|
||||
== APP == INFO: 127.0.0.1:59669 - "PUT /actors/MathematicsAgentActor/MathematicsAgent/method/AddMessage HTTP/1.1" 200 OK
|
||||
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'BroadcastMessage'...
|
||||
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_broadcast_message' for event type 'BroadcastMessage'
|
||||
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent received broadcast message of type 'BroadcastMessage' from 'LLMOrchestrator'.
|
||||
== APP == INFO: 127.0.0.1:59669 - "PUT /actors/MathematicsAgentActor/MathematicsAgent/method/AddMessage HTTP/1.1" 200 OK
|
||||
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'TriggerAction'...
|
||||
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_trigger_action' for event type 'TriggerAction'
|
||||
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent received TriggerAction from LLMOrchestrator.
|
||||
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent executing default task from memory.
|
||||
== APP == INFO:dapr_agents.agent.actor.base:Actor MathematicsAgent invoking a task
|
||||
== APP == INFO:dapr_agents.agent.patterns.toolcall.base:Iteration 1/10 started.
|
||||
== APP == INFO:dapr_agents.llm.utils.request:Tools are available in the request.
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
|
||||
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
|
||||
== APP == user:
|
||||
== APP == Initiate the process by acknowledging the mathematical problem to solve: Determine the sum of 1 + 1.
|
||||
== APP ==
|
||||
== APP == --------------------------------------------------------------------------------
|
||||
== APP ==
|
||||
== APP == assistant:
|
||||
== APP == Acknowledging the task: We need to determine the sum of 1 + 1. Let's proceed to the next step and identify the operands involved in this calculation.
|
||||
== APP ==
|
||||
== APP == --------------------------------------------------------------------------------
|
||||
== APP ==
|
||||
== APP == INFO: 127.0.0.1:59669 - "PUT /actors/MathematicsAgentActor/MathematicsAgent/method/InvokeTask HTTP/1.1" 200 OK
|
||||
== APP == INFO:dapr_agents.agent.actor.service:Agents found in 'agentstatestore' for key 'agents_registry'.
|
||||
== APP == INFO:dapr_agents.agent.actor.service:MathematicsAgent broadcasting message to selected agents.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.pubsub:MathematicsAgent published 'BroadcastMessage' to topic 'beacon_channel'.
|
||||
== APP == INFO:dapr_agents.agent.actor.service:Agents found in 'agentstatestore' for key 'agents_registry'.
|
||||
== APP == INFO:dapr_agents.agent.actor.service:MathematicsAgent sending message to agent 'LLMOrchestrator'.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'BroadcastMessage'...
|
||||
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_broadcast_message' for event type 'BroadcastMessage'
|
||||
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent received broadcast message of type 'BroadcastMessage' from 'MathematicsAgent'.
|
||||
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent ignored its own broadcast message of type 'BroadcastMessage'.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.pubsub:MathematicsAgent published 'AgentTaskResponse' to topic 'LLMOrchestrator'.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'BroadcastMessage'...
|
||||
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_broadcast_message' for event type 'BroadcastMessage'
|
||||
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent received broadcast message of type 'BroadcastMessage' from 'LLMOrchestrator'.
|
||||
== APP == INFO: 127.0.0.1:59669 - "PUT /actors/MathematicsAgentActor/MathematicsAgent/method/AddMessage HTTP/1.1" 200 OK
|
||||
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'TriggerAction'...
|
||||
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_trigger_action' for event type 'TriggerAction'
|
||||
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent received TriggerAction from LLMOrchestrator.
|
||||
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent executing default task from memory.
|
||||
== APP == INFO:dapr_agents.agent.actor.base:Actor MathematicsAgent invoking a task
|
||||
== APP == INFO:dapr_agents.agent.patterns.toolcall.base:Iteration 1/10 started.
|
||||
== APP == INFO:dapr_agents.llm.utils.request:Tools are available in the request.
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
|
||||
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
|
||||
== APP == user:
|
||||
== APP == Please record the second operand: 1.
|
||||
== APP ==
|
||||
== APP == --------------------------------------------------------------------------------
|
||||
== APP ==
|
||||
== APP == assistant:
|
||||
== APP == The second operand involved in this calculation is recorded as: 1. Now, let's proceed to perform the addition of the identified numbers.
|
||||
== APP ==
|
||||
== APP == --------------------------------------------------------------------------------
|
||||
== APP ==
|
||||
== APP == INFO: 127.0.0.1:59669 - "PUT /actors/MathematicsAgentActor/MathematicsAgent/method/InvokeTask HTTP/1.1" 200 OK
|
||||
== APP == INFO:dapr_agents.agent.actor.service:Agents found in 'agentstatestore' for key 'agents_registry'.
|
||||
== APP == INFO:dapr_agents.agent.actor.service:MathematicsAgent broadcasting message to selected agents.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.pubsub:MathematicsAgent published 'BroadcastMessage' to topic 'beacon_channel'.
|
||||
== APP == INFO:dapr_agents.agent.actor.service:Agents found in 'agentstatestore' for key 'agents_registry'.
|
||||
== APP == INFO:dapr_agents.agent.actor.service:MathematicsAgent sending message to agent 'LLMOrchestrator'.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'BroadcastMessage'...
|
||||
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_broadcast_message' for event type 'BroadcastMessage'
|
||||
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent received broadcast message of type 'BroadcastMessage' from 'MathematicsAgent'.
|
||||
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent ignored its own broadcast message of type 'BroadcastMessage'.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.pubsub:MathematicsAgent published 'AgentTaskResponse' to topic 'LLMOrchestrator'.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'BroadcastMessage'...
|
||||
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_broadcast_message' for event type 'BroadcastMessage'
|
||||
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent received broadcast message of type 'BroadcastMessage' from 'LLMOrchestrator'.
|
||||
== APP == INFO: 127.0.0.1:59669 - "PUT /actors/MathematicsAgentActor/MathematicsAgent/method/AddMessage HTTP/1.1" 200 OK
|
||||
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'TriggerAction'...
|
||||
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_trigger_action' for event type 'TriggerAction'
|
||||
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent received TriggerAction from LLMOrchestrator.
|
||||
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent executing default task from memory.
|
||||
== APP == INFO:dapr_agents.agent.actor.base:Actor MathematicsAgent invoking a task
|
||||
== APP == INFO:dapr_agents.agent.patterns.toolcall.base:Iteration 1/10 started.
|
||||
== APP == INFO:dapr_agents.llm.utils.request:Tools are available in the request.
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
|
||||
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
|
||||
== APP == INFO:dapr_agents.agent.patterns.toolcall.base:Executing Add with arguments {"a":1,"b":1}
|
||||
== APP == INFO:dapr_agents.tool.executor:Running tool (auto): Add
|
||||
== APP == INFO:dapr_agents.agent.patterns.toolcall.base:Iteration 2/10 started.
|
||||
== APP == INFO:dapr_agents.llm.utils.request:Tools are available in the request.
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Invoking ChatCompletion API.
|
||||
== APP == INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
|
||||
== APP == INFO:dapr_agents.llm.openai.chat:Chat completion retrieved successfully.
|
||||
== APP == user:
|
||||
== APP == Proceed to set up the addition operation with the recorded operands: 1 + 1.
|
||||
== APP ==
|
||||
== APP == --------------------------------------------------------------------------------
|
||||
== APP ==
|
||||
== APP == assistant:
|
||||
== APP == Function name: Add (Call Id: call_ac3Xlh4pn7tBFkrI2K9uOqvG)
|
||||
== APP == Arguments: {"a":1,"b":1}
|
||||
== APP ==
|
||||
== APP == --------------------------------------------------------------------------------
|
||||
== APP ==
|
||||
== APP == Add(tool) (Id: call_ac3Xlh4pn7tBFkrI2K9uOqvG):
|
||||
== APP == 2.0
|
||||
== APP ==
|
||||
== APP == --------------------------------------------------------------------------------
|
||||
== APP ==
|
||||
== APP == assistant:
|
||||
== APP == The result of the addition operation 1 + 1 is 2.0. Let's verify the calculation result to ensure the accuracy of the addition process.
|
||||
== APP ==
|
||||
== APP == --------------------------------------------------------------------------------
|
||||
== APP ==
|
||||
== APP == INFO: 127.0.0.1:59669 - "PUT /actors/MathematicsAgentActor/MathematicsAgent/method/InvokeTask HTTP/1.1" 200 OK
|
||||
== APP == INFO:dapr_agents.agent.actor.service:Agents found in 'agentstatestore' for key 'agents_registry'.
|
||||
== APP == INFO:dapr_agents.agent.actor.service:MathematicsAgent broadcasting message to selected agents.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.pubsub:MathematicsAgent published 'BroadcastMessage' to topic 'beacon_channel'.
|
||||
== APP == INFO:dapr_agents.agent.actor.service:Agents found in 'agentstatestore' for key 'agents_registry'.
|
||||
== APP == INFO:dapr_agents.agent.actor.service:MathematicsAgent sending message to agent 'LLMOrchestrator'.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.parser:Validating payload with model 'BroadcastMessage'...
|
||||
== APP == INFO:dapr_agents.workflow.messaging.routing:Dispatched to handler 'process_broadcast_message' for event type 'BroadcastMessage'
|
||||
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent received broadcast message of type 'BroadcastMessage' from 'MathematicsAgent'.
|
||||
== APP == INFO:dapr_agents.agent.actor.agent:MathematicsAgent ignored its own broadcast message of type 'BroadcastMessage'.
|
||||
== APP == INFO:dapr_agents.workflow.messaging.pubsub:MathematicsAgent published 'AgentTaskResponse' to topic 'LLMOrchestrator'.
|
||||
^Cℹ️
|
||||
terminated signal received: shutting down
|
||||
✅ Exited Dapr successfully
|
||||
✅ Exited App successfully
|
||||
```
|
|
@ -0,0 +1,59 @@
|
|||
from dapr_agents import tool
|
||||
from dapr_agents import AgentActor
|
||||
from pydantic import BaseModel, Field
|
||||
from dapr_agents import Agent
|
||||
from dotenv import load_dotenv
|
||||
import logging
|
||||
import asyncio
|
||||
import os
|
||||
|
||||
|
||||
class AddSchema(BaseModel):
|
||||
a: float = Field(description="first number to add")
|
||||
b: float = Field(description="second number to add")
|
||||
|
||||
|
||||
@tool(args_model=AddSchema)
|
||||
def add(a: float, b: float) -> float:
|
||||
"""Add two numbers."""
|
||||
return a + b
|
||||
|
||||
|
||||
class SubSchema(BaseModel):
|
||||
a: float = Field(description="first number to subtract")
|
||||
b: float = Field(description="second number to subtract")
|
||||
|
||||
|
||||
@tool(args_model=SubSchema)
|
||||
def sub(a: float, b: float) -> float:
|
||||
"""Subtract two numbers."""
|
||||
return a - b
|
||||
|
||||
|
||||
async def main():
|
||||
calculator_agent = Agent(
|
||||
name="MathematicsAgent",
|
||||
role="Calculator Assistant",
|
||||
goal="Assist Humans with calculation tasks.",
|
||||
instructions=[
|
||||
"Get accurate calculation results",
|
||||
"Break down the calculation into smaller steps.",
|
||||
],
|
||||
tools=[add, sub],
|
||||
)
|
||||
|
||||
calculator_service = AgentActor(
|
||||
agent=calculator_agent,
|
||||
message_bus_name="pubsub",
|
||||
agents_registry_key="agents_registry",
|
||||
agents_registry_store_name="agentstatestore",
|
||||
service_port=8002,
|
||||
)
|
||||
|
||||
await calculator_service.start()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
asyncio.run(main())
|
|
@ -0,0 +1,62 @@
|
|||
#!/usr/bin/env python3
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
from dapr.clients import DaprClient
|
||||
|
||||
# Default Pub/Sub component
|
||||
PUBSUB_NAME = "pubsub"
|
||||
|
||||
|
||||
def main(orchestrator_topic, max_attempts=10, retry_delay=1):
|
||||
"""
|
||||
Publishes a task to a specified Dapr Pub/Sub topic with retries.
|
||||
|
||||
Args:
|
||||
orchestrator_topic (str): The name of the orchestrator topic.
|
||||
max_attempts (int): Maximum number of retry attempts.
|
||||
retry_delay (int): Delay in seconds between attempts.
|
||||
"""
|
||||
task_message = {
|
||||
"task": "What is 1 + 1?",
|
||||
}
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
attempt = 1
|
||||
|
||||
while attempt <= max_attempts:
|
||||
try:
|
||||
print(
|
||||
f"📢 Attempt {attempt}: Publishing to topic '{orchestrator_topic}'..."
|
||||
)
|
||||
|
||||
with DaprClient() as client:
|
||||
client.publish_event(
|
||||
pubsub_name=PUBSUB_NAME,
|
||||
topic_name=orchestrator_topic,
|
||||
data=json.dumps(task_message),
|
||||
data_content_type="application/json",
|
||||
publish_metadata={
|
||||
"cloudevent.type": "TriggerAction",
|
||||
},
|
||||
)
|
||||
|
||||
print(f"✅ Successfully published request to '{orchestrator_topic}'")
|
||||
sys.exit(0)
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Request failed: {e}")
|
||||
|
||||
attempt += 1
|
||||
print(f"⏳ Waiting {retry_delay}s before next attempt...")
|
||||
time.sleep(retry_delay)
|
||||
|
||||
print(f"❌ Maximum attempts ({max_attempts}) reached without success.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
orchestrator_topic = "LLMOrchestrator"
|
||||
|
||||
main(orchestrator_topic)
|
|
@ -0,0 +1,14 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: agentstatestore
|
||||
spec:
|
||||
type: state.redis
|
||||
version: v1
|
||||
metadata:
|
||||
- name: redisHost
|
||||
value: localhost:6379
|
||||
- name: redisPassword
|
||||
value: ""
|
||||
- name: keyPrefix
|
||||
value: none
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: pubsub
|
||||
spec:
|
||||
type: pubsub.redis
|
||||
version: v1
|
||||
metadata:
|
||||
- name: redisHost
|
||||
value: localhost:6379
|
||||
- name: redisPassword
|
||||
value: ""
|
|
@ -0,0 +1,14 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: workflowstatestore
|
||||
spec:
|
||||
type: state.redis
|
||||
version: v1
|
||||
metadata:
|
||||
- name: redisHost
|
||||
value: localhost:6379
|
||||
- name: redisPassword
|
||||
value: ""
|
||||
- name: actorStateStore
|
||||
value: "true"
|
|
@ -0,0 +1,29 @@
|
|||
from dapr_agents import LLMOrchestrator
|
||||
from dotenv import load_dotenv
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
workflow_service = LLMOrchestrator(
|
||||
name="LLMOrchestrator",
|
||||
message_bus_name="pubsub",
|
||||
state_store_name="workflowstatestore",
|
||||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentstatestore",
|
||||
agents_registry_key="agents_registry",
|
||||
max_iterations=20, # Increased from 3 to 20 to avoid potential issues
|
||||
).as_service(port=8004)
|
||||
|
||||
await workflow_service.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
|
@ -0,0 +1,2 @@
|
|||
dapr-agents>=0.4.2
|
||||
python-dotenv
|
|
@ -3,6 +3,7 @@ from dotenv import load_dotenv
|
|||
import asyncio
|
||||
import logging
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Define Agent
|
||||
|
@ -15,8 +16,8 @@ async def main():
|
|||
"Be swift, silent, and precise, moving effortlessly across any terrain.",
|
||||
"Use superior vision and heightened senses to scout ahead and detect threats.",
|
||||
"Excel in ranged combat, delivering pinpoint arrow strikes from great distances.",
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
|
||||
]
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task.",
|
||||
],
|
||||
)
|
||||
|
||||
# Expose Agent as an Actor over a Service
|
||||
|
@ -32,9 +33,10 @@ async def main():
|
|||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
||||
asyncio.run(main())
|
||||
|
|
|
@ -3,6 +3,7 @@ from dotenv import load_dotenv
|
|||
import asyncio
|
||||
import logging
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Define Agent
|
||||
|
@ -15,26 +16,27 @@ async def main():
|
|||
"Endure hardships and temptations, staying true to the mission even when faced with doubt.",
|
||||
"Seek guidance and trust allies, but bear the ultimate burden alone when necessary.",
|
||||
"Move carefully through enemy-infested lands, avoiding unnecessary risks.",
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
|
||||
]
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task.",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
# Expose Agent as an Actor over a Service
|
||||
hobbit_service = AgentActor(
|
||||
agent=hobbit_agent,
|
||||
message_bus_name="messagepubsub",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
service_port=8001
|
||||
service_port=8001,
|
||||
)
|
||||
|
||||
await hobbit_service.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
||||
asyncio.run(main())
|
||||
|
|
|
@ -3,6 +3,7 @@ from dotenv import load_dotenv
|
|||
import asyncio
|
||||
import logging
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Define Agent
|
||||
|
@ -15,8 +16,8 @@ async def main():
|
|||
"Provide strategic counsel, always considering the long-term consequences of actions.",
|
||||
"Use magic sparingly, applying it when necessary to guide or protect.",
|
||||
"Encourage allies to find strength within themselves rather than relying solely on your power.",
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
|
||||
]
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task.",
|
||||
],
|
||||
)
|
||||
|
||||
# Expose Agent as an Actor over a Service
|
||||
|
@ -25,16 +26,17 @@ async def main():
|
|||
message_bus_name="messagepubsub",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
service_port=8002
|
||||
service_port=8002,
|
||||
)
|
||||
|
||||
await wizard_service.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
||||
|
||||
asyncio.run(main())
|
||||
|
|
|
@ -3,6 +3,7 @@ from dotenv import load_dotenv
|
|||
import asyncio
|
||||
import logging
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
agentic_orchestrator = LLMOrchestrator(
|
||||
|
@ -12,16 +13,17 @@ async def main():
|
|||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
max_iterations=25
|
||||
max_iterations=25,
|
||||
).as_service(port=8004)
|
||||
|
||||
await agentic_orchestrator.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
||||
|
||||
asyncio.run(main())
|
||||
|
|
|
@ -3,6 +3,7 @@ from dotenv import load_dotenv
|
|||
import asyncio
|
||||
import logging
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
random_workflow_service = RandomOrchestrator(
|
||||
|
@ -12,16 +13,17 @@ async def main():
|
|||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
max_iterations=3
|
||||
max_iterations=3,
|
||||
).as_service(port=8004)
|
||||
|
||||
|
||||
await random_workflow_service.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
||||
|
||||
asyncio.run(main())
|
||||
|
|
|
@ -3,6 +3,7 @@ from dotenv import load_dotenv
|
|||
import asyncio
|
||||
import logging
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
roundrobin_workflow_service = RoundRobinOrchestrator(
|
||||
|
@ -12,16 +13,17 @@ async def main():
|
|||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
max_iterations=3
|
||||
max_iterations=3,
|
||||
).as_service(port=8004)
|
||||
|
||||
await roundrobin_workflow_service.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
||||
|
||||
asyncio.run(main())
|
||||
|
|
|
@ -3,6 +3,7 @@ from dotenv import load_dotenv
|
|||
import asyncio
|
||||
import logging
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Define Agent
|
||||
|
@ -15,7 +16,7 @@ async def main():
|
|||
"Be strong-willed, fiercely loyal, and protective of companions.",
|
||||
"Excel in close combat and battlefield tactics, favoring axes and brute strength.",
|
||||
"Navigate caves, tunnels, and ancient stonework with expert knowledge.",
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task.",
|
||||
],
|
||||
message_bus_name="messagepubsub",
|
||||
state_store_name="agenticworkflowstate",
|
||||
|
@ -28,9 +29,10 @@ async def main():
|
|||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
||||
|
||||
asyncio.run(main())
|
||||
|
|
|
@ -3,6 +3,7 @@ from dotenv import load_dotenv
|
|||
import asyncio
|
||||
import logging
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Define Eagle Agent
|
||||
|
@ -16,7 +17,7 @@ async def main():
|
|||
"Provide swift and strategic transport for those on critical journeys.",
|
||||
"Offer aerial insights, spotting dangers, tracking movements, and scouting strategic locations.",
|
||||
"Speak with wisdom and authority, as one of the ancient and noble Great Eagles.",
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task.",
|
||||
],
|
||||
message_bus_name="messagepubsub",
|
||||
state_store_name="agenticworkflowstate",
|
||||
|
@ -29,9 +30,10 @@ async def main():
|
|||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
||||
|
||||
asyncio.run(main())
|
||||
|
|
|
@ -3,6 +3,7 @@ from dotenv import load_dotenv
|
|||
import asyncio
|
||||
import logging
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Define Agent
|
||||
|
@ -15,7 +16,7 @@ async def main():
|
|||
"Be swift, silent, and precise, moving effortlessly across any terrain.",
|
||||
"Use superior vision and heightened senses to scout ahead and detect threats.",
|
||||
"Excel in ranged combat, delivering pinpoint arrow strikes from great distances.",
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task.",
|
||||
],
|
||||
message_bus_name="messagepubsub",
|
||||
state_store_name="agenticworkflowstate",
|
||||
|
@ -28,9 +29,10 @@ async def main():
|
|||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
||||
asyncio.run(main())
|
||||
|
|
|
@ -3,6 +3,7 @@ from dotenv import load_dotenv
|
|||
import asyncio
|
||||
import logging
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Define Agent
|
||||
|
@ -15,7 +16,7 @@ async def main():
|
|||
"Endure hardships and temptations, staying true to the mission even when faced with doubt.",
|
||||
"Seek guidance and trust allies, but bear the ultimate burden alone when necessary.",
|
||||
"Move carefully through enemy-infested lands, avoiding unnecessary risks.",
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task.",
|
||||
],
|
||||
message_bus_name="messagepubsub",
|
||||
state_store_name="agenticworkflowstate",
|
||||
|
@ -23,14 +24,15 @@ async def main():
|
|||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
)
|
||||
|
||||
|
||||
await hobbit_agent.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
||||
|
||||
asyncio.run(main())
|
||||
|
|
|
@ -3,6 +3,7 @@ from dotenv import load_dotenv
|
|||
import asyncio
|
||||
import logging
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
agentic_orchestrator = LLMOrchestrator(
|
||||
|
@ -12,16 +13,17 @@ async def main():
|
|||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
max_iterations=3
|
||||
max_iterations=3,
|
||||
).as_service(port=8004)
|
||||
|
||||
await agentic_orchestrator.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
||||
|
||||
asyncio.run(main())
|
||||
|
|
|
@ -3,6 +3,7 @@ from dotenv import load_dotenv
|
|||
import asyncio
|
||||
import logging
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Define Agent
|
||||
|
@ -15,7 +16,7 @@ async def main():
|
|||
"Lead by example, inspiring courage and loyalty in allies.",
|
||||
"Navigate wilderness with expert tracking and survival skills.",
|
||||
"Master both swordplay and battlefield strategy, excelling in one-on-one combat and large-scale warfare.",
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task.",
|
||||
],
|
||||
message_bus_name="messagepubsub",
|
||||
state_store_name="agenticworkflowstate",
|
||||
|
@ -28,9 +29,10 @@ async def main():
|
|||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
||||
|
||||
asyncio.run(main())
|
||||
|
|
|
@ -3,6 +3,7 @@ from dotenv import load_dotenv
|
|||
import asyncio
|
||||
import logging
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Define Agent
|
||||
|
@ -15,7 +16,7 @@ async def main():
|
|||
"Provide strategic counsel, always considering the long-term consequences of actions.",
|
||||
"Use magic sparingly, applying it when necessary to guide or protect.",
|
||||
"Encourage allies to find strength within themselves rather than relying solely on your power.",
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task."
|
||||
"Respond concisely, accurately, and relevantly, ensuring clarity and strict alignment with the task.",
|
||||
],
|
||||
message_bus_name="messagepubsub",
|
||||
state_store_name="agenticworkflowstate",
|
||||
|
@ -28,9 +29,10 @@ async def main():
|
|||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
||||
|
||||
asyncio.run(main())
|
||||
|
|
|
@ -3,6 +3,7 @@ from dotenv import load_dotenv
|
|||
import asyncio
|
||||
import logging
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
random_workflow_service = RandomOrchestrator(
|
||||
|
@ -12,16 +13,17 @@ async def main():
|
|||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
max_iterations=3
|
||||
max_iterations=3,
|
||||
).as_service(port=8004)
|
||||
|
||||
await random_workflow_service.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
||||
|
||||
asyncio.run(main())
|
||||
|
|
|
@ -3,6 +3,7 @@ from dotenv import load_dotenv
|
|||
import asyncio
|
||||
import logging
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
roundrobin_workflow_service = RoundRobinOrchestrator(
|
||||
|
@ -12,16 +13,17 @@ async def main():
|
|||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentsregistrystore",
|
||||
agents_registry_key="agents_registry",
|
||||
max_iterations=3
|
||||
max_iterations=3,
|
||||
).as_service(port=8004)
|
||||
|
||||
await roundrobin_workflow_service.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
||||
|
||||
asyncio.run(main())
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
from dapr_agents import AssistantAgent
|
||||
from dotenv import load_dotenv
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Create the Weather Agent using those tools
|
||||
weather_agent = AssistantAgent(
|
||||
role="Weather Assistant",
|
||||
name="Stevie",
|
||||
goal="Help humans get weather and location info using smart tools.",
|
||||
instructions=[
|
||||
"Respond clearly and helpfully to weather-related questions.",
|
||||
"Use tools when appropriate to fetch or simulate weather data.",
|
||||
"You may sometimes jump after answering the weather question.",
|
||||
],
|
||||
message_bus_name="messagepubsub",
|
||||
state_store_name="workflowstatestore",
|
||||
state_key="workflow_state",
|
||||
agents_registry_store_name="agentstatestore",
|
||||
agents_registry_key="agents_registry",
|
||||
).as_service(port=8001)
|
||||
|
||||
# Start the FastAPI agent service
|
||||
await weather_agent.start()
|
||||
except Exception as e:
|
||||
print(f"Error starting service: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
asyncio.run(main())
|
|
@ -0,0 +1,57 @@
|
|||
#!/usr/bin/env python3
|
||||
import requests
|
||||
import time
|
||||
import sys
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
status_url = "http://localhost:8001/status"
|
||||
healthy = False
|
||||
for attempt in range(1, 11):
|
||||
try:
|
||||
print(f"Attempt {attempt}...")
|
||||
response = requests.get(status_url, timeout=5)
|
||||
|
||||
if response.status_code == 200:
|
||||
print("Workflow app is healthy!")
|
||||
healthy = True
|
||||
break
|
||||
else:
|
||||
print(f"Received status code {response.status_code}: {response.text}")
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"Request failed: {e}")
|
||||
|
||||
attempt += 1
|
||||
print("Waiting 5s seconds before next health checkattempt...")
|
||||
time.sleep(5)
|
||||
|
||||
if not healthy:
|
||||
print("Workflow app is not healthy!")
|
||||
sys.exit(1)
|
||||
|
||||
workflow_url = "http://localhost:8001/start-workflow"
|
||||
task_payload = {"task": "What is the weather in New York?"}
|
||||
|
||||
for attempt in range(1, 11):
|
||||
try:
|
||||
print(f"Attempt {attempt}...")
|
||||
response = requests.post(workflow_url, json=task_payload, timeout=5)
|
||||
|
||||
if response.status_code == 202:
|
||||
print("Workflow started successfully!")
|
||||
sys.exit(0)
|
||||
else:
|
||||
print(f"Received status code {response.status_code}: {response.text}")
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"Request failed: {e}")
|
||||
|
||||
attempt += 1
|
||||
print("Waiting 1s seconds before next attempt...")
|
||||
time.sleep(1)
|
||||
|
||||
print("Maximum attempts (10) reached without success.")
|
||||
|
||||
print("Failed to get successful response")
|
||||
sys.exit(1)
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: messagepubsub
|
||||
spec:
|
||||
type: pubsub.redis
|
||||
version: v1
|
||||
metadata:
|
||||
- name: redisHost
|
||||
value: localhost:6379
|
||||
- name: redisPassword
|
||||
value: ""
|
|
@ -0,0 +1,16 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: agentstatestore
|
||||
spec:
|
||||
type: state.redis
|
||||
version: v1
|
||||
metadata:
|
||||
- name: redisHost
|
||||
value: localhost:6379
|
||||
- name: redisPassword
|
||||
value: ""
|
||||
- name: keyPrefix
|
||||
value: none
|
||||
- name: actorStateStore
|
||||
value: "true"
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: workflowstatestore
|
||||
spec:
|
||||
type: state.redis
|
||||
version: v1
|
||||
metadata:
|
||||
- name: redisHost
|
||||
value: localhost:6379
|
||||
- name: redisPassword
|
||||
value: ""
|
|
@ -1,12 +1,25 @@
|
|||
from dapr_agents.agent import Agent, AgentActor, ReActAgent, ToolCallAgent, OpenAPIReActAgent
|
||||
from dapr_agents.llm.openai import OpenAIChatClient, OpenAIAudioClient, OpenAIEmbeddingClient
|
||||
from dapr_agents.agent import (
|
||||
Agent,
|
||||
AgentActor,
|
||||
ReActAgent,
|
||||
ToolCallAgent,
|
||||
OpenAPIReActAgent,
|
||||
)
|
||||
from dapr_agents.llm.openai import (
|
||||
OpenAIChatClient,
|
||||
OpenAIAudioClient,
|
||||
OpenAIEmbeddingClient,
|
||||
)
|
||||
from dapr_agents.llm.huggingface import HFHubChatClient
|
||||
from dapr_agents.llm.nvidia import NVIDIAChatClient, NVIDIAEmbeddingClient
|
||||
from dapr_agents.llm.elevenlabs import ElevenLabsSpeechClient
|
||||
from dapr_agents.tool import AgentTool, tool
|
||||
from dapr_agents.workflow import (
|
||||
WorkflowApp, AgenticWorkflow,
|
||||
LLMOrchestrator, RandomOrchestrator, RoundRobinOrchestrator,
|
||||
AssistantAgent
|
||||
WorkflowApp,
|
||||
AgenticWorkflow,
|
||||
LLMOrchestrator,
|
||||
RandomOrchestrator,
|
||||
RoundRobinOrchestrator,
|
||||
AssistantAgent,
|
||||
)
|
||||
from dapr_agents.executors import LocalCodeExecutor, DockerCodeExecutor
|
||||
from dapr_agents.executors import LocalCodeExecutor, DockerCodeExecutor
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from .base import AgentBase
|
||||
from .utils.factory import Agent
|
||||
from .actor import AgentActor
|
||||
from .patterns import ReActAgent, ToolCallAgent, OpenAPIReActAgent
|
||||
from .patterns import ReActAgent, ToolCallAgent, OpenAPIReActAgent
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from .base import AgentActorBase
|
||||
from .interface import AgentActorInterface
|
||||
from .service import AgentActorService
|
||||
from .agent import AgentActor
|
||||
from .agent import AgentActor
|
||||
|
|
|
@ -1,16 +1,21 @@
|
|||
import logging
|
||||
from dapr_agents.agent.actor.schemas import AgentTaskResponse, TriggerAction, BroadcastMessage
|
||||
from dapr_agents.agent.actor.schemas import (
|
||||
AgentTaskResponse,
|
||||
TriggerAction,
|
||||
BroadcastMessage,
|
||||
)
|
||||
from dapr_agents.agent.actor.service import AgentActorService
|
||||
from dapr_agents.types.agent import AgentActorMessage
|
||||
from dapr_agents.workflow.messaging.decorator import message_router
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentActor(AgentActorService):
|
||||
"""
|
||||
A Pydantic-based class for managing services and exposing FastAPI routes with Dapr pub/sub and actor support.
|
||||
"""
|
||||
|
||||
|
||||
@message_router
|
||||
async def process_trigger_action(self, message: TriggerAction):
|
||||
"""
|
||||
|
@ -35,17 +40,23 @@ class AgentActor(AgentActorService):
|
|||
response = await self.invoke_task(task)
|
||||
|
||||
# Check if the response exists
|
||||
content = response.body.decode() if response and response.body else "Task completed but no response generated."
|
||||
content = (
|
||||
response.body.decode()
|
||||
if response and response.body
|
||||
else "Task completed but no response generated."
|
||||
)
|
||||
|
||||
# Broadcast result
|
||||
response_message = BroadcastMessage(name=self.agent.name, role="user", content=content)
|
||||
response_message = BroadcastMessage(
|
||||
name=self.agent.name, role="user", content=content
|
||||
)
|
||||
await self.broadcast_message(message=response_message)
|
||||
|
||||
|
||||
# Update response
|
||||
response_message = response_message.model_dump()
|
||||
response_message["workflow_instance_id"] = workflow_instance_id
|
||||
agent_response = AgentTaskResponse(**response_message)
|
||||
|
||||
|
||||
# Send the message to the target agent
|
||||
await self.send_message_to_agent(name=source, message=agent_response)
|
||||
except Exception as e:
|
||||
|
@ -60,22 +71,30 @@ class AgentActor(AgentActorService):
|
|||
metadata = message.pop("_message_metadata", {})
|
||||
|
||||
if not isinstance(metadata, dict):
|
||||
logger.warning(f"{getattr(self, 'name', 'agent')} received a broadcast with invalid metadata. Ignoring.")
|
||||
logger.warning(
|
||||
f"{getattr(self, 'name', 'agent')} received a broadcast with invalid metadata. Ignoring."
|
||||
)
|
||||
return
|
||||
|
||||
source = metadata.get("source", "unknown_source")
|
||||
message_type = metadata.get("type", "unknown_type")
|
||||
message_content = message.get("content", "No content")
|
||||
|
||||
logger.info(f"{self.agent.name} received broadcast message of type '{message_type}' from '{source}'.")
|
||||
logger.info(
|
||||
f"{self.agent.name} received broadcast message of type '{message_type}' from '{source}'."
|
||||
)
|
||||
|
||||
# Ignore messages sent by this agent
|
||||
if source == self.agent.name:
|
||||
logger.info(f"{self.agent.name} ignored its own broadcast message of type '{message_type}'.")
|
||||
logger.info(
|
||||
f"{self.agent.name} ignored its own broadcast message of type '{message_type}'."
|
||||
)
|
||||
return
|
||||
|
||||
|
||||
# Log and process the valid broadcast message
|
||||
logger.debug(f"{self.agent.name} is processing broadcast message of type '{message_type}' from '{source}'.")
|
||||
logger.debug(
|
||||
f"{self.agent.name} is processing broadcast message of type '{message_type}' from '{source}'."
|
||||
)
|
||||
logger.debug(f"Message content: {message_content}")
|
||||
|
||||
# Add the message to the agent's memory
|
||||
|
@ -86,4 +105,4 @@ class AgentActor(AgentActorService):
|
|||
await self.add_message(actor_message)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing broadcast message: {e}", exc_info=True)
|
||||
logger.error(f"Error processing broadcast message: {e}", exc_info=True)
|
||||
|
|
|
@ -16,6 +16,7 @@ from pydantic import ValidationError
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentActorBase(Actor, AgentActorInterface):
|
||||
"""Base class for all agent actors, including task execution and agent state management."""
|
||||
|
||||
|
@ -24,19 +25,23 @@ class AgentActorBase(Actor, AgentActorInterface):
|
|||
self.actor_id = actor_id
|
||||
self.agent: AgentBase
|
||||
self.agent_state_key = "agent_state"
|
||||
|
||||
|
||||
async def _on_activate(self) -> None:
|
||||
"""
|
||||
Called when the actor is activated. Initializes the agent's state if not present.
|
||||
"""
|
||||
logger.info(f"Activating actor with ID: {self.actor_id}")
|
||||
has_state, state_data = await self._state_manager.try_get_state(self.agent_state_key)
|
||||
has_state, state_data = await self._state_manager.try_get_state(
|
||||
self.agent_state_key
|
||||
)
|
||||
|
||||
if not has_state:
|
||||
# Initialize state with default values if it doesn't exist
|
||||
logger.info(f"Initializing state for {self.actor_id}")
|
||||
self.state = AgentActorState(overall_status=AgentStatus.IDLE)
|
||||
await self._state_manager.set_state(self.agent_state_key, self.state.model_dump())
|
||||
await self._state_manager.set_state(
|
||||
self.agent_state_key, self.state.model_dump()
|
||||
)
|
||||
await self._state_manager.save_state()
|
||||
else:
|
||||
# Load existing state
|
||||
|
@ -48,16 +53,20 @@ class AgentActorBase(Actor, AgentActorInterface):
|
|||
"""
|
||||
Called when the actor is deactivated.
|
||||
"""
|
||||
logger.info(f"Deactivate {self.__class__.__name__} actor with ID: {self.actor_id}.")
|
||||
|
||||
logger.info(
|
||||
f"Deactivate {self.__class__.__name__} actor with ID: {self.actor_id}."
|
||||
)
|
||||
|
||||
async def set_status(self, status: AgentStatus) -> None:
|
||||
"""
|
||||
Sets the current operational status of the agent and saves the state.
|
||||
"""
|
||||
self.state.overall_status = status
|
||||
await self._state_manager.set_state(self.agent_state_key, self.state.model_dump())
|
||||
await self._state_manager.set_state(
|
||||
self.agent_state_key, self.state.model_dump()
|
||||
)
|
||||
await self._state_manager.save_state()
|
||||
|
||||
|
||||
async def invoke_task(self, task: Optional[str] = None) -> str:
|
||||
"""
|
||||
Execute the agent's main task, log the input/output in the task history,
|
||||
|
@ -76,7 +85,9 @@ class AgentActorBase(Actor, AgentActorInterface):
|
|||
# Look for the last message in the conversation history
|
||||
last_message = messages[-1]
|
||||
default_task = last_message.get("content")
|
||||
logger.debug(f"Default task entry input derived from last message: {default_task}")
|
||||
logger.debug(
|
||||
f"Default task entry input derived from last message: {default_task}"
|
||||
)
|
||||
|
||||
# Prepare the input for task entry
|
||||
task_entry_input = task or default_task or "Triggered without a specific task"
|
||||
|
@ -93,12 +104,15 @@ class AgentActorBase(Actor, AgentActorInterface):
|
|||
self.state.task_history.append(task_entry)
|
||||
|
||||
# Save initial task state with IN_PROGRESS status
|
||||
await self._state_manager.set_state(self.agent_state_key, self.state.model_dump())
|
||||
await self._state_manager.set_state(
|
||||
self.agent_state_key, self.state.model_dump()
|
||||
)
|
||||
await self._state_manager.save_state()
|
||||
|
||||
try:
|
||||
# Run the task if provided, or fallback to agent.run() if no task
|
||||
result = self.agent.run(task) if task else self.agent.run()
|
||||
task_input = task or None
|
||||
result = await self.agent.run(task_input)
|
||||
|
||||
# Update the task entry with the result and mark as COMPLETE
|
||||
task_entry.output = result
|
||||
|
@ -119,11 +133,13 @@ class AgentActorBase(Actor, AgentActorInterface):
|
|||
|
||||
finally:
|
||||
# Ensure the final state of the task is saved
|
||||
await self._state_manager.set_state(self.agent_state_key, self.state.model_dump())
|
||||
await self._state_manager.set_state(
|
||||
self.agent_state_key, self.state.model_dump()
|
||||
)
|
||||
await self._state_manager.save_state()
|
||||
# Revert the agent's status to idle
|
||||
await self.set_status(AgentStatus.IDLE)
|
||||
|
||||
|
||||
async def add_message(self, message: Union[AgentActorMessage, dict]) -> None:
|
||||
"""
|
||||
Adds a message to the conversation history in the actor's state.
|
||||
|
@ -134,21 +150,25 @@ class AgentActorBase(Actor, AgentActorInterface):
|
|||
# Convert dictionary to AgentActorMessage if necessary
|
||||
if isinstance(message, dict):
|
||||
message = AgentActorMessage(**message)
|
||||
|
||||
|
||||
# Add the new message to the state
|
||||
self.state.messages.append(message)
|
||||
self.state.message_count += 1
|
||||
|
||||
# Save state back to Dapr
|
||||
await self._state_manager.set_state(self.agent_state_key, self.state.model_dump())
|
||||
await self._state_manager.set_state(
|
||||
self.agent_state_key, self.state.model_dump()
|
||||
)
|
||||
await self._state_manager.save_state()
|
||||
|
||||
async def get_messages(self) -> List[dict]:
|
||||
"""
|
||||
Retrieves the messages from the actor's state, validates it using Pydantic,
|
||||
Retrieves the messages from the actor's state, validates it using Pydantic,
|
||||
and returns a list of dictionaries if valid.
|
||||
"""
|
||||
has_state, state_data = await self._state_manager.try_get_state(self.agent_state_key)
|
||||
has_state, state_data = await self._state_manager.try_get_state(
|
||||
self.agent_state_key
|
||||
)
|
||||
|
||||
if has_state:
|
||||
try:
|
||||
|
@ -161,4 +181,4 @@ class AgentActorBase(Actor, AgentActorInterface):
|
|||
# Handle validation errors
|
||||
print(f"Validation error: {e}")
|
||||
return []
|
||||
return []
|
||||
return []
|
||||
|
|
|
@ -3,9 +3,10 @@ from typing import List, Optional, Union
|
|||
from dapr.actor import ActorInterface, actormethod
|
||||
from dapr_agents.types.agent import AgentActorMessage, AgentStatus
|
||||
|
||||
|
||||
class AgentActorInterface(ActorInterface):
|
||||
@abstractmethod
|
||||
@actormethod(name='InvokeTask')
|
||||
@actormethod(name="InvokeTask")
|
||||
async def invoke_task(self, task: Optional[str] = None) -> str:
|
||||
"""
|
||||
Invoke a task and returns the result as a string.
|
||||
|
@ -13,7 +14,7 @@ class AgentActorInterface(ActorInterface):
|
|||
pass
|
||||
|
||||
@abstractmethod
|
||||
@actormethod(name='AddMessage')
|
||||
@actormethod(name="AddMessage")
|
||||
async def add_message(self, message: Union[AgentActorMessage, dict]) -> None:
|
||||
"""
|
||||
Adds a message to the conversation history in the actor's state.
|
||||
|
@ -21,7 +22,7 @@ class AgentActorInterface(ActorInterface):
|
|||
pass
|
||||
|
||||
@abstractmethod
|
||||
@actormethod(name='GetMessages')
|
||||
@actormethod(name="GetMessages")
|
||||
async def get_messages(self) -> List[dict]:
|
||||
"""
|
||||
Retrieves the conversation history from the actor's state.
|
||||
|
@ -29,9 +30,9 @@ class AgentActorInterface(ActorInterface):
|
|||
pass
|
||||
|
||||
@abstractmethod
|
||||
@actormethod(name='SetStatus')
|
||||
@actormethod(name="SetStatus")
|
||||
async def set_status(self, status: AgentStatus) -> None:
|
||||
"""
|
||||
Sets the current operational status of the agent.
|
||||
"""
|
||||
pass
|
||||
pass
|
||||
|
|
|
@ -2,21 +2,33 @@ from typing import Optional
|
|||
from pydantic import BaseModel, Field
|
||||
from dapr_agents.types.message import BaseMessage
|
||||
|
||||
|
||||
class AgentTaskResponse(BaseMessage):
|
||||
"""
|
||||
Represents a response message from an agent after completing a task.
|
||||
"""
|
||||
workflow_instance_id: Optional[str] = Field(default=None, description="Dapr workflow instance id from source if available")
|
||||
|
||||
workflow_instance_id: Optional[str] = Field(
|
||||
default=None, description="Dapr workflow instance id from source if available"
|
||||
)
|
||||
|
||||
|
||||
class TriggerAction(BaseModel):
|
||||
"""
|
||||
Represents a message used to trigger an agent's activity within the workflow.
|
||||
"""
|
||||
task: Optional[str] = Field(None, description="The specific task to execute. If not provided, the agent will act based on its memory or predefined behavior.")
|
||||
|
||||
task: Optional[str] = Field(
|
||||
None,
|
||||
description="The specific task to execute. If not provided, the agent will act based on its memory or predefined behavior.",
|
||||
)
|
||||
iteration: Optional[int] = Field(0, description="")
|
||||
workflow_instance_id: Optional[str] = Field(default=None, description="Dapr workflow instance id from source if available")
|
||||
workflow_instance_id: Optional[str] = Field(
|
||||
default=None, description="Dapr workflow instance id from source if available"
|
||||
)
|
||||
|
||||
|
||||
class BroadcastMessage(BaseMessage):
|
||||
"""
|
||||
Represents a broadcast message from an agent
|
||||
"""
|
||||
"""
|
||||
|
|
|
@ -4,6 +4,7 @@ import logging
|
|||
from contextlib import asynccontextmanager
|
||||
from datetime import timedelta
|
||||
from typing import Any, Callable, Dict, Optional, Tuple, Type, Union
|
||||
import time
|
||||
|
||||
from fastapi import FastAPI, HTTPException, Response, status
|
||||
from fastapi.encoders import jsonable_encoder
|
||||
|
@ -17,7 +18,12 @@ from dapr.actor.runtime.config import (
|
|||
)
|
||||
from dapr.actor.runtime.runtime import ActorRuntime
|
||||
from dapr.clients import DaprClient
|
||||
from dapr.clients.grpc._request import (
|
||||
TransactionOperationType,
|
||||
TransactionalStateOperation,
|
||||
)
|
||||
from dapr.clients.grpc._response import StateResponse
|
||||
from dapr.clients.grpc._state import Concurrency, Consistency, StateOptions
|
||||
from dapr.ext.fastapi import DaprActor
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr, model_validator
|
||||
|
@ -31,22 +37,56 @@ from dapr_agents.workflow.messaging.routing import MessageRoutingMixin
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentActorService(DaprPubSub, MessageRoutingMixin):
|
||||
agent: AgentBase
|
||||
name: Optional[str] = Field(default=None, description="Name of the agent actor, derived from the agent if not provided.")
|
||||
agent_topic_name: Optional[str] = Field(None, description="The topic name dedicated to this specific agent, derived from the agent's name if not provided.")
|
||||
broadcast_topic_name: str = Field("beacon_channel", description="The default topic used for broadcasting messages to all agents.")
|
||||
agents_registry_store_name: str = Field(..., description="The name of the Dapr state store component used to store and share agent metadata centrally.")
|
||||
agents_registry_key: str = Field(default="agents_registry", description="Dapr state store key for agentic workflow state.")
|
||||
service_port: Optional[int] = Field(default=None, description="The port number to run the API server on.")
|
||||
service_host: Optional[str] = Field(default="0.0.0.0", description="Host address for the API server.")
|
||||
name: Optional[str] = Field(
|
||||
default=None,
|
||||
description="Name of the agent actor, derived from the agent if not provided.",
|
||||
)
|
||||
agent_topic_name: Optional[str] = Field(
|
||||
None,
|
||||
description="The topic name dedicated to this specific agent, derived from the agent's name if not provided.",
|
||||
)
|
||||
broadcast_topic_name: str = Field(
|
||||
"beacon_channel",
|
||||
description="The default topic used for broadcasting messages to all agents.",
|
||||
)
|
||||
agents_registry_store_name: str = Field(
|
||||
...,
|
||||
description="The name of the Dapr state store component used to store and share agent metadata centrally.",
|
||||
)
|
||||
agents_registry_key: str = Field(
|
||||
default="agents_registry",
|
||||
description="Dapr state store key for agentic workflow state.",
|
||||
)
|
||||
service_port: Optional[int] = Field(
|
||||
default=None, description="The port number to run the API server on."
|
||||
)
|
||||
service_host: Optional[str] = Field(
|
||||
default="0.0.0.0", description="Host address for the API server."
|
||||
)
|
||||
|
||||
# Fields initialized in model_post_init
|
||||
actor: Optional[DaprActor] = Field(default=None, init=False, description="DaprActor for actor lifecycle support.")
|
||||
actor_name: Optional[str] = Field(default=None, init=False, description="Actor name")
|
||||
actor_proxy: Optional[ActorProxy] = Field(default=None, init=False, description="Proxy for invoking methods on the agent's actor.")
|
||||
actor_class: Optional[type] = Field(default=None, init=False, description="Dynamically created actor class for the agent")
|
||||
agent_metadata: Optional[dict] = Field(default=None, init=False, description="Agent's metadata")
|
||||
actor: Optional[DaprActor] = Field(
|
||||
default=None, init=False, description="DaprActor for actor lifecycle support."
|
||||
)
|
||||
actor_name: Optional[str] = Field(
|
||||
default=None, init=False, description="Actor name"
|
||||
)
|
||||
actor_proxy: Optional[ActorProxy] = Field(
|
||||
default=None,
|
||||
init=False,
|
||||
description="Proxy for invoking methods on the agent's actor.",
|
||||
)
|
||||
actor_class: Optional[type] = Field(
|
||||
default=None,
|
||||
init=False,
|
||||
description="Dynamically created actor class for the agent",
|
||||
)
|
||||
agent_metadata: Optional[dict] = Field(
|
||||
default=None, init=False, description="Agent's metadata"
|
||||
)
|
||||
|
||||
# Private internal attributes (not schema/validated)
|
||||
_http_server: Optional[Any] = PrivateAttr(default=None)
|
||||
|
@ -54,7 +94,9 @@ class AgentActorService(DaprPubSub, MessageRoutingMixin):
|
|||
_dapr_client: Optional[DaprClient] = PrivateAttr(default=None)
|
||||
_is_running: bool = PrivateAttr(default=False)
|
||||
_subscriptions: Dict[str, Callable] = PrivateAttr(default_factory=dict)
|
||||
_topic_handlers: Dict[Tuple[str, str], Dict[Type[BaseModel], Callable]] = PrivateAttr(default_factory=dict)
|
||||
_topic_handlers: Dict[
|
||||
Tuple[str, str], Dict[Type[BaseModel], Callable]
|
||||
] = PrivateAttr(default_factory=dict)
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
|
@ -68,19 +110,25 @@ class AgentActorService(DaprPubSub, MessageRoutingMixin):
|
|||
if not values.get("name") and agent:
|
||||
values["name"] = agent.name or agent.role
|
||||
return values
|
||||
|
||||
|
||||
def model_post_init(self, __context: Any) -> None:
|
||||
# Proceed with base model setup
|
||||
super().model_post_init(__context)
|
||||
|
||||
|
||||
# Dynamically create the actor class based on the agent's name
|
||||
actor_class_name = f"{self.agent.name}Actor"
|
||||
|
||||
# Create the actor class dynamically using the 'type' function
|
||||
self.actor_class = type(actor_class_name, (AgentActorBase,), {
|
||||
'__init__': lambda self, ctx, actor_id: AgentActorBase.__init__(self, ctx, actor_id),
|
||||
'agent': self.agent
|
||||
})
|
||||
self.actor_class = type(
|
||||
actor_class_name,
|
||||
(AgentActorBase,),
|
||||
{
|
||||
"__init__": lambda self, ctx, actor_id: AgentActorBase.__init__(
|
||||
self, ctx, actor_id
|
||||
),
|
||||
"agent": self.agent,
|
||||
},
|
||||
)
|
||||
|
||||
# Prepare agent metadata
|
||||
self.agent_metadata = {
|
||||
|
@ -89,12 +137,14 @@ class AgentActorService(DaprPubSub, MessageRoutingMixin):
|
|||
"goal": self.agent.goal,
|
||||
"topic_name": self.agent_topic_name,
|
||||
"pubsub_name": self.message_bus_name,
|
||||
"orchestrator": False
|
||||
"orchestrator": False,
|
||||
}
|
||||
|
||||
# Proxy for actor methods
|
||||
self.actor_name = self.actor_class.__name__
|
||||
self.actor_proxy = ActorProxy.create(self.actor_name, ActorId(self.agent.name), AgentActorInterface)
|
||||
self.actor_proxy = ActorProxy.create(
|
||||
self.actor_name, ActorId(self.agent.name), AgentActorInterface
|
||||
)
|
||||
|
||||
# Initialize Sync Dapr Client
|
||||
self._dapr_client = DaprClient()
|
||||
|
@ -103,13 +153,13 @@ class AgentActorService(DaprPubSub, MessageRoutingMixin):
|
|||
self._http_server: FastAPIServerBase = FastAPIServerBase(
|
||||
service_name=self.agent.name,
|
||||
service_port=self.service_port,
|
||||
service_host=self.service_host
|
||||
service_host=self.service_host,
|
||||
)
|
||||
self._http_server.app.router.lifespan_context = self.lifespan
|
||||
|
||||
# Create DaprActor using FastAPI app
|
||||
self.actor = DaprActor(self.app)
|
||||
|
||||
|
||||
self.app.add_api_route("/GetMessages", self.get_messages, methods=["GET"])
|
||||
|
||||
logger.info(f"Dapr Actor class {self.actor_class.__name__} initialized.")
|
||||
|
@ -125,20 +175,23 @@ class AgentActorService(DaprPubSub, MessageRoutingMixin):
|
|||
if self._http_server:
|
||||
return self._http_server.app
|
||||
raise RuntimeError("FastAPI server not initialized.")
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(self, app: FastAPI):
|
||||
# Register actor
|
||||
actor_runtime_config = ActorRuntimeConfig()
|
||||
actor_runtime_config.update_actor_type_configs([
|
||||
ActorTypeConfig(
|
||||
actor_type=self.actor_class.__name__,
|
||||
actor_idle_timeout=timedelta(hours=1),
|
||||
actor_scan_interval=timedelta(seconds=30),
|
||||
drain_ongoing_call_timeout=timedelta(minutes=1),
|
||||
drain_rebalanced_actors=True,
|
||||
reentrancy=ActorReentrancyConfig(enabled=True))
|
||||
])
|
||||
actor_runtime_config.update_actor_type_configs(
|
||||
[
|
||||
ActorTypeConfig(
|
||||
actor_type=self.actor_class.__name__,
|
||||
actor_idle_timeout=timedelta(hours=1),
|
||||
actor_scan_interval=timedelta(seconds=30),
|
||||
drain_ongoing_call_timeout=timedelta(minutes=1),
|
||||
drain_rebalanced_actors=True,
|
||||
reentrancy=ActorReentrancyConfig(enabled=True),
|
||||
)
|
||||
]
|
||||
)
|
||||
ActorRuntime.set_actor_config(actor_runtime_config)
|
||||
|
||||
await self.actor.register_actor(self.actor_class)
|
||||
|
@ -155,7 +208,9 @@ class AgentActorService(DaprPubSub, MessageRoutingMixin):
|
|||
|
||||
async def start(self):
|
||||
if self._is_running:
|
||||
logger.warning("Service is already running. Ignoring duplicate start request.")
|
||||
logger.warning(
|
||||
"Service is already running. Ignoring duplicate start request."
|
||||
)
|
||||
return
|
||||
|
||||
logger.info("Starting Agent Actor Service...")
|
||||
|
@ -173,7 +228,9 @@ class AgentActorService(DaprPubSub, MessageRoutingMixin):
|
|||
|
||||
for (pubsub_name, topic_name), close_fn in self._subscriptions.items():
|
||||
try:
|
||||
logger.info(f"Unsubscribing from pubsub '{pubsub_name}' topic '{topic_name}'")
|
||||
logger.info(
|
||||
f"Unsubscribing from pubsub '{pubsub_name}' topic '{topic_name}'"
|
||||
)
|
||||
close_fn()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to unsubscribe from topic '{topic_name}': {e}")
|
||||
|
@ -181,7 +238,7 @@ class AgentActorService(DaprPubSub, MessageRoutingMixin):
|
|||
self._subscriptions.clear()
|
||||
self._is_running = False
|
||||
logger.info("Agent Actor Service stopped.")
|
||||
|
||||
|
||||
def get_data_from_store(self, store_name: str, key: str) -> Optional[dict]:
|
||||
"""
|
||||
Retrieve data from a specified Dapr state store using a provided key.
|
||||
|
@ -194,15 +251,21 @@ class AgentActorService(DaprPubSub, MessageRoutingMixin):
|
|||
Optional[dict]: The data stored under the specified key if found; otherwise, None.
|
||||
"""
|
||||
try:
|
||||
response: StateResponse = self._dapr_client.get_state(store_name=store_name, key=key)
|
||||
response: StateResponse = self._dapr_client.get_state(
|
||||
store_name=store_name, key=key
|
||||
)
|
||||
data = response.data
|
||||
|
||||
return json.loads(data) if data else None
|
||||
except Exception as e:
|
||||
logger.warning(f"Error retrieving data for key '{key}' from store '{store_name}'")
|
||||
except Exception:
|
||||
logger.warning(
|
||||
f"Error retrieving data for key '{key}' from store '{store_name}'"
|
||||
)
|
||||
return None
|
||||
|
||||
def get_agents_metadata(self, exclude_self: bool = True, exclude_orchestrator: bool = False) -> dict:
|
||||
|
||||
def get_agents_metadata(
|
||||
self, exclude_self: bool = True, exclude_orchestrator: bool = False
|
||||
) -> dict:
|
||||
"""
|
||||
Retrieves metadata for all registered agents while ensuring orchestrators do not interact with other orchestrators.
|
||||
|
||||
|
@ -218,17 +281,28 @@ class AgentActorService(DaprPubSub, MessageRoutingMixin):
|
|||
"""
|
||||
try:
|
||||
# Fetch agent metadata from the registry
|
||||
agents_metadata = self.get_data_from_store(self.agents_registry_store_name, self.agents_registry_key) or {}
|
||||
agents_metadata = (
|
||||
self.get_data_from_store(
|
||||
self.agents_registry_store_name, self.agents_registry_key
|
||||
)
|
||||
or {}
|
||||
)
|
||||
|
||||
if agents_metadata:
|
||||
logger.info(f"Agents found in '{self.agents_registry_store_name}' for key '{self.agents_registry_key}'.")
|
||||
logger.info(
|
||||
f"Agents found in '{self.agents_registry_store_name}' for key '{self.agents_registry_key}'."
|
||||
)
|
||||
|
||||
# Filter based on exclusion rules
|
||||
filtered_metadata = {
|
||||
name: metadata
|
||||
for name, metadata in agents_metadata.items()
|
||||
if not (exclude_self and name == self.agent.name) # Exclude self if requested
|
||||
and not (exclude_orchestrator and metadata.get("orchestrator", False)) # Exclude all orchestrators if exclude_orchestrator=True
|
||||
if not (
|
||||
exclude_self and name == self.agent.name
|
||||
) # Exclude self if requested
|
||||
and not (
|
||||
exclude_orchestrator and metadata.get("orchestrator", False)
|
||||
) # Exclude all orchestrators if exclude_orchestrator=True
|
||||
}
|
||||
|
||||
if not filtered_metadata:
|
||||
|
@ -236,34 +310,99 @@ class AgentActorService(DaprPubSub, MessageRoutingMixin):
|
|||
|
||||
return filtered_metadata
|
||||
|
||||
logger.info(f"No agents found in '{self.agents_registry_store_name}' for key '{self.agents_registry_key}'.")
|
||||
logger.info(
|
||||
f"No agents found in '{self.agents_registry_store_name}' for key '{self.agents_registry_key}'."
|
||||
)
|
||||
return {}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to retrieve agents metadata: {e}", exc_info=True)
|
||||
return {}
|
||||
|
||||
|
||||
def register_agent_metadata(self) -> None:
|
||||
"""
|
||||
Registers the agent's metadata in the Dapr state store under 'agents_metadata'.
|
||||
"""
|
||||
try:
|
||||
# Retrieve existing metadata or initialize as an empty dictionary
|
||||
agents_metadata = self.get_agents_metadata()
|
||||
agents_metadata[self.agent.name] = self.agent_metadata
|
||||
|
||||
# Save the updated metadata back to Dapr store
|
||||
self._dapr_client.save_state(
|
||||
# Update the agents registry store with the new agent metadata
|
||||
self.register_agent(
|
||||
store_name=self.agents_registry_store_name,
|
||||
key=self.agents_registry_key,
|
||||
value=json.dumps(agents_metadata),
|
||||
state_metadata={"contentType": "application/json"}
|
||||
store_key=self.agents_registry_key,
|
||||
agent_name=self.name,
|
||||
agent_metadata=self.agent_metadata,
|
||||
)
|
||||
logger.info(
|
||||
f"{self.name} registered its metadata under key '{self.agents_registry_key}'"
|
||||
)
|
||||
|
||||
logger.info(f"{self.agent.name} registered its metadata under key '{self.agents_registry_key}'")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to register metadata for agent {self.agent.name}: {e}")
|
||||
|
||||
logger.error(
|
||||
f"Failed to register metadata for agent {self.agent.name}: {e}"
|
||||
)
|
||||
raise e
|
||||
|
||||
def register_agent(
|
||||
self, store_name: str, store_key: str, agent_name: str, agent_metadata: dict
|
||||
) -> None:
|
||||
"""
|
||||
Merges the existing data with the new data and updates the store.
|
||||
|
||||
Args:
|
||||
store_name (str): The name of the Dapr state store component.
|
||||
key (str): The key to update.
|
||||
data (dict): The data to update the store with.
|
||||
"""
|
||||
# retry the entire operation up to ten times sleeping 1 second between each attempt
|
||||
for attempt in range(1, 11):
|
||||
try:
|
||||
response: StateResponse = self._dapr_client.get_state(
|
||||
store_name=store_name, key=store_key
|
||||
)
|
||||
if not response.etag:
|
||||
# if there is no etag the following transaction won't work as expected
|
||||
# so we need to save an empty object with a strong consistency to force the etag to be created
|
||||
self._dapr_client.save_state(
|
||||
store_name=store_name,
|
||||
key=store_key,
|
||||
value=json.dumps({}),
|
||||
state_metadata={"contentType": "application/json"},
|
||||
options=StateOptions(
|
||||
concurrency=Concurrency.first_write,
|
||||
consistency=Consistency.strong,
|
||||
),
|
||||
)
|
||||
# raise an exception to retry the entire operation
|
||||
raise Exception(f"No etag found for key: {store_key}")
|
||||
existing_data = json.loads(response.data) if response.data else {}
|
||||
if (agent_name, agent_metadata) in existing_data.items():
|
||||
logger.debug(f"agent {agent_name} already registered.")
|
||||
return None
|
||||
agent_data = {agent_name: agent_metadata}
|
||||
merged_data = {**existing_data, **agent_data}
|
||||
logger.debug(f"merged data: {merged_data} etag: {response.etag}")
|
||||
try:
|
||||
# using the transactional API to be able to later support the Dapr outbox pattern
|
||||
self._dapr_client.execute_state_transaction(
|
||||
store_name=store_name,
|
||||
operations=[
|
||||
TransactionalStateOperation(
|
||||
key=store_key,
|
||||
data=json.dumps(merged_data),
|
||||
etag=response.etag,
|
||||
operation_type=TransactionOperationType.upsert,
|
||||
)
|
||||
],
|
||||
transactional_metadata={"contentType": "application/json"},
|
||||
)
|
||||
except Exception as e:
|
||||
raise e
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.debug(f"Error on transaction attempt: {attempt}: {e}")
|
||||
logger.debug("Sleeping for 1 second before retrying transaction...")
|
||||
time.sleep(1)
|
||||
raise Exception(
|
||||
f"Failed to update state store key: {store_key} after 10 attempts."
|
||||
)
|
||||
|
||||
async def invoke_task(self, task: Optional[str]) -> Response:
|
||||
"""
|
||||
Use the actor to invoke a task by running the InvokeTask method through ActorProxy.
|
||||
|
@ -279,8 +418,10 @@ class AgentActorService(DaprPubSub, MessageRoutingMixin):
|
|||
return Response(content=response, status_code=status.HTTP_200_OK)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to run task for {self.actor_name}: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Error invoking task: {str(e)}")
|
||||
|
||||
raise HTTPException(
|
||||
status_code=500, detail=f"Error invoking task: {str(e)}"
|
||||
)
|
||||
|
||||
async def add_message(self, message: AgentActorMessage) -> None:
|
||||
"""
|
||||
Adds a message to the conversation history in the actor's state.
|
||||
|
@ -289,19 +430,28 @@ class AgentActorService(DaprPubSub, MessageRoutingMixin):
|
|||
await self.actor_proxy.AddMessage(message.model_dump())
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to add message to {self.actor_name}: {e}")
|
||||
|
||||
|
||||
async def get_messages(self) -> Response:
|
||||
"""
|
||||
Retrieve the conversation history from the actor.
|
||||
"""
|
||||
try:
|
||||
messages = await self.actor_proxy.GetMessages()
|
||||
return JSONResponse(content=jsonable_encoder(messages), status_code=status.HTTP_200_OK)
|
||||
return JSONResponse(
|
||||
content=jsonable_encoder(messages), status_code=status.HTTP_200_OK
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to retrieve messages for {self.actor_name}: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Error retrieving messages: {str(e)}")
|
||||
|
||||
async def broadcast_message(self, message: Union[BaseModel, dict], exclude_orchestrator: bool = False, **kwargs) -> None:
|
||||
raise HTTPException(
|
||||
status_code=500, detail=f"Error retrieving messages: {str(e)}"
|
||||
)
|
||||
|
||||
async def broadcast_message(
|
||||
self,
|
||||
message: Union[BaseModel, dict],
|
||||
exclude_orchestrator: bool = False,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
"""
|
||||
Sends a message to all agents (or only to non-orchestrator agents if exclude_orchestrator=True).
|
||||
|
||||
|
@ -312,7 +462,9 @@ class AgentActorService(DaprPubSub, MessageRoutingMixin):
|
|||
"""
|
||||
try:
|
||||
# Retrieve agents metadata while respecting the exclude_orchestrator flag
|
||||
agents_metadata = self.get_agents_metadata(exclude_orchestrator=exclude_orchestrator)
|
||||
agents_metadata = self.get_agents_metadata(
|
||||
exclude_orchestrator=exclude_orchestrator
|
||||
)
|
||||
|
||||
if not agents_metadata:
|
||||
logger.warning("No agents available for broadcast.")
|
||||
|
@ -332,7 +484,9 @@ class AgentActorService(DaprPubSub, MessageRoutingMixin):
|
|||
except Exception as e:
|
||||
logger.error(f"Failed to broadcast message: {e}", exc_info=True)
|
||||
|
||||
async def send_message_to_agent(self, name: str, message: Union[BaseModel, dict], **kwargs) -> None:
|
||||
async def send_message_to_agent(
|
||||
self, name: str, message: Union[BaseModel, dict], **kwargs
|
||||
) -> None:
|
||||
"""
|
||||
Sends a message to a specific agent.
|
||||
|
||||
|
@ -343,9 +497,11 @@ class AgentActorService(DaprPubSub, MessageRoutingMixin):
|
|||
"""
|
||||
try:
|
||||
agents_metadata = self.get_agents_metadata()
|
||||
|
||||
|
||||
if name not in agents_metadata:
|
||||
logger.warning(f"Target '{name}' is not registered as an agent. Skipping message send.")
|
||||
logger.warning(
|
||||
f"Target '{name}' is not registered as an agent. Skipping message send."
|
||||
)
|
||||
return # Do not raise an error—just warn and move on.
|
||||
|
||||
agent_metadata = agents_metadata[name]
|
||||
|
@ -361,4 +517,6 @@ class AgentActorService(DaprPubSub, MessageRoutingMixin):
|
|||
|
||||
logger.debug(f"{self.name} sent message to agent '{name}'.")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to send message to agent '{name}': {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Failed to send message to agent '{name}': {e}", exc_info=True
|
||||
)
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
from dapr_agents.memory import MemoryBase, ConversationListMemory, ConversationVectorMemory
|
||||
from dapr_agents.memory import (
|
||||
MemoryBase,
|
||||
ConversationListMemory,
|
||||
ConversationVectorMemory,
|
||||
)
|
||||
from dapr_agents.agent.utils.text_printer import ColorTextFormatter
|
||||
from dapr_agents.types import MessageContent, MessagePlaceHolder
|
||||
from dapr_agents.tool.executor import AgentToolExecutor
|
||||
|
@ -14,46 +18,79 @@ import logging
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentBase(BaseModel, ABC):
|
||||
"""
|
||||
Base class for agents that interact with language models and manage tools for task execution.
|
||||
"""
|
||||
|
||||
name: Optional[str] = Field(default=None, description="The agent's name, defaulting to the role if not provided.")
|
||||
role: Optional[str] = Field(default="Assistant", description="The agent's role in the interaction (e.g., 'Weather Expert').")
|
||||
goal: Optional[str] = Field(default="Help humans", description="The agent's main objective (e.g., 'Provide Weather information').")
|
||||
instructions: Optional[List[str]] = Field(default=None, description="Instructions guiding the agent's tasks.")
|
||||
system_prompt: Optional[str] = Field(default=None, description="A custom system prompt, overriding name, role, goal, and instructions.")
|
||||
llm: LLMClientBase = Field(default_factory=OpenAIChatClient, description="Language model client for generating responses.")
|
||||
prompt_template: Optional[PromptTemplateBase] = Field(default=None, description="The prompt template for the agent.")
|
||||
tools: List[Union[AgentTool, Callable]] = Field(default_factory=list, description="Tools available for the agent to assist with tasks.")
|
||||
max_iterations: int = Field(default=10, description="Max iterations for conversation cycles.")
|
||||
memory: MemoryBase = Field(default_factory=ConversationListMemory, description="Handles conversation history and context storage.")
|
||||
template_format: Literal["f-string", "jinja2"] = Field(default="jinja2", description="The format used for rendering the prompt template.")
|
||||
name: Optional[str] = Field(
|
||||
default=None,
|
||||
description="The agent's name, defaulting to the role if not provided.",
|
||||
)
|
||||
role: Optional[str] = Field(
|
||||
default="Assistant",
|
||||
description="The agent's role in the interaction (e.g., 'Weather Expert').",
|
||||
)
|
||||
goal: Optional[str] = Field(
|
||||
default="Help humans",
|
||||
description="The agent's main objective (e.g., 'Provide Weather information').",
|
||||
)
|
||||
instructions: Optional[List[str]] = Field(
|
||||
default=None, description="Instructions guiding the agent's tasks."
|
||||
)
|
||||
system_prompt: Optional[str] = Field(
|
||||
default=None,
|
||||
description="A custom system prompt, overriding name, role, goal, and instructions.",
|
||||
)
|
||||
llm: LLMClientBase = Field(
|
||||
default_factory=OpenAIChatClient,
|
||||
description="Language model client for generating responses.",
|
||||
)
|
||||
prompt_template: Optional[PromptTemplateBase] = Field(
|
||||
default=None, description="The prompt template for the agent."
|
||||
)
|
||||
tools: List[Union[AgentTool, Callable]] = Field(
|
||||
default_factory=list,
|
||||
description="Tools available for the agent to assist with tasks.",
|
||||
)
|
||||
max_iterations: int = Field(
|
||||
default=10, description="Max iterations for conversation cycles."
|
||||
)
|
||||
memory: MemoryBase = Field(
|
||||
default_factory=ConversationListMemory,
|
||||
description="Handles conversation history and context storage.",
|
||||
)
|
||||
template_format: Literal["f-string", "jinja2"] = Field(
|
||||
default="jinja2",
|
||||
description="The format used for rendering the prompt template.",
|
||||
)
|
||||
|
||||
# Private attributes
|
||||
_tool_executor: AgentToolExecutor = PrivateAttr()
|
||||
_text_formatter: ColorTextFormatter = PrivateAttr(default_factory=ColorTextFormatter)
|
||||
_text_formatter: ColorTextFormatter = PrivateAttr(
|
||||
default_factory=ColorTextFormatter
|
||||
)
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
|
||||
@model_validator(mode="before")
|
||||
def set_name_from_role(cls, values: dict):
|
||||
# Set name to role if name is not provided
|
||||
if not values.get("name") and values.get("role"):
|
||||
values["name"] = values["role"]
|
||||
return values
|
||||
|
||||
|
||||
@property
|
||||
def tool_executor(self) -> AgentToolExecutor:
|
||||
"""Returns the tool executor, ensuring it's accessible but read-only."""
|
||||
return self._tool_executor
|
||||
|
||||
|
||||
@property
|
||||
def text_formatter(self) -> ColorTextFormatter:
|
||||
"""Returns the text formatter for the agent."""
|
||||
return self._text_formatter
|
||||
|
||||
|
||||
@property
|
||||
def chat_history(self, task: str = None) -> List[MessageContent]:
|
||||
"""
|
||||
|
@ -69,7 +106,7 @@ class AgentBase(BaseModel, ABC):
|
|||
query_embeddings = self.memory.vector_store.embed_documents([task])
|
||||
return self.memory.get_messages(query_embeddings=query_embeddings)
|
||||
return self.memory.get_messages()
|
||||
|
||||
|
||||
@abstractmethod
|
||||
def run(self, input_data: Union[str, Dict[str, Any]]) -> Any:
|
||||
"""
|
||||
|
@ -97,7 +134,9 @@ class AgentBase(BaseModel, ABC):
|
|||
|
||||
# If the agent's prompt_template is provided, use it and skip further configuration
|
||||
if self.prompt_template:
|
||||
logger.info("Using the provided agent prompt_template. Skipping system prompt construction.")
|
||||
logger.info(
|
||||
"Using the provided agent prompt_template. Skipping system prompt construction."
|
||||
)
|
||||
self.llm.prompt_template = self.prompt_template
|
||||
|
||||
# If the LLM client already has a prompt template, sync it and prefill/validate as needed
|
||||
|
@ -112,7 +151,7 @@ class AgentBase(BaseModel, ABC):
|
|||
|
||||
logger.info("Using system_prompt to create the prompt template.")
|
||||
self.prompt_template = self.construct_prompt_template()
|
||||
|
||||
|
||||
# Pre-fill Agent Attributes if needed
|
||||
self.prefill_agent_attributes()
|
||||
|
||||
|
@ -145,28 +184,44 @@ class AgentBase(BaseModel, ABC):
|
|||
prefill_data["instructions"] = "\n".join(self.instructions)
|
||||
|
||||
# Collect attributes set but not in input_variables for informational logging
|
||||
set_attributes = {"name": self.name, "role": self.role, "goal": self.goal, "instructions": self.instructions}
|
||||
|
||||
set_attributes = {
|
||||
"name": self.name,
|
||||
"role": self.role,
|
||||
"goal": self.goal,
|
||||
"instructions": self.instructions,
|
||||
}
|
||||
|
||||
# Use Pydantic's model_fields_set to detect if attributes were user-set
|
||||
user_set_attributes = {attr for attr in set_attributes if attr in self.model_fields_set}
|
||||
|
||||
user_set_attributes = {
|
||||
attr for attr in set_attributes if attr in self.model_fields_set
|
||||
}
|
||||
|
||||
ignored_attributes = [
|
||||
attr for attr in set_attributes
|
||||
if attr not in self.prompt_template.input_variables and set_attributes[attr] is not None and attr in user_set_attributes
|
||||
attr
|
||||
for attr in set_attributes
|
||||
if attr not in self.prompt_template.input_variables
|
||||
and set_attributes[attr] is not None
|
||||
and attr in user_set_attributes
|
||||
]
|
||||
|
||||
# Apply pre-filled data only for attributes that are in input_variables
|
||||
if prefill_data:
|
||||
self.prompt_template = self.prompt_template.pre_fill_variables(**prefill_data)
|
||||
logger.info(f"Pre-filled prompt template with attributes: {list(prefill_data.keys())}")
|
||||
self.prompt_template = self.prompt_template.pre_fill_variables(
|
||||
**prefill_data
|
||||
)
|
||||
logger.info(
|
||||
f"Pre-filled prompt template with attributes: {list(prefill_data.keys())}"
|
||||
)
|
||||
elif ignored_attributes:
|
||||
raise ValueError(
|
||||
f"The following agent attributes were explicitly set by the user but are not considered by the prompt template: {', '.join(ignored_attributes)}. "
|
||||
"Please ensure that these attributes are included in the prompt template's input variables if they are needed."
|
||||
)
|
||||
else:
|
||||
logger.info("No agent attributes were pre-filled, as the template did not require any.")
|
||||
|
||||
logger.info(
|
||||
"No agent attributes were pre-filled, as the template did not require any."
|
||||
)
|
||||
|
||||
def construct_system_prompt(self) -> str:
|
||||
"""
|
||||
Constructs a system prompt with agent attributes like `name`, `role`, `goal`, and `instructions`.
|
||||
|
@ -191,7 +246,7 @@ class AgentBase(BaseModel, ABC):
|
|||
prompt_parts.append("## Instructions\n{{instructions}}")
|
||||
|
||||
return "\n\n".join(prompt_parts)
|
||||
|
||||
|
||||
def construct_prompt_template(self) -> ChatPromptTemplate:
|
||||
"""
|
||||
Constructs a ChatPromptTemplate that includes the system prompt and a placeholder for chat history.
|
||||
|
@ -206,19 +261,21 @@ class AgentBase(BaseModel, ABC):
|
|||
# Create the template with placeholders for system message and chat history
|
||||
return ChatPromptTemplate.from_messages(
|
||||
messages=[
|
||||
('system', system_prompt),
|
||||
MessagePlaceHolder(variable_name="chat_history")
|
||||
("system", system_prompt),
|
||||
MessagePlaceHolder(variable_name="chat_history"),
|
||||
],
|
||||
template_format=self.template_format
|
||||
template_format=self.template_format,
|
||||
)
|
||||
|
||||
def construct_messages(self, input_data: Union[str, Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
|
||||
def construct_messages(
|
||||
self, input_data: Union[str, Dict[str, Any]]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Constructs and formats initial messages based on input type, pre-filling chat history as needed.
|
||||
|
||||
Args:
|
||||
input_data (Union[str, Dict[str, Any]]): User input, either as a string or dictionary.
|
||||
|
||||
|
||||
Returns:
|
||||
List[Dict[str, Any]]: List of formatted messages, including the user message if input_data is a string.
|
||||
"""
|
||||
|
@ -244,7 +301,7 @@ class AgentBase(BaseModel, ABC):
|
|||
def reset_memory(self):
|
||||
"""Clears all messages stored in the agent's memory."""
|
||||
self.memory.reset_memory()
|
||||
|
||||
|
||||
def get_last_message(self) -> Optional[MessageContent]:
|
||||
"""
|
||||
Retrieves the last message from the chat history.
|
||||
|
@ -254,8 +311,10 @@ class AgentBase(BaseModel, ABC):
|
|||
"""
|
||||
chat_history = self.chat_history
|
||||
return chat_history[-1] if chat_history else None
|
||||
|
||||
def get_last_user_message(self, messages: List[Dict[str, Any]]) -> Optional[MessageContent]:
|
||||
|
||||
def get_last_user_message(
|
||||
self, messages: List[Dict[str, Any]]
|
||||
) -> Optional[MessageContent]:
|
||||
"""
|
||||
Retrieves the last user message in a list of messages.
|
||||
|
||||
|
@ -272,13 +331,13 @@ class AgentBase(BaseModel, ABC):
|
|||
message["content"] = message["content"].strip()
|
||||
return message
|
||||
return None
|
||||
|
||||
|
||||
def pre_fill_prompt_template(self, **kwargs: Union[str, Callable[[], str]]) -> None:
|
||||
"""
|
||||
Pre-fills the prompt template with specified variables, updating input variables if applicable.
|
||||
|
||||
Args:
|
||||
**kwargs: Variables to pre-fill in the prompt template. These can be strings or callables
|
||||
**kwargs: Variables to pre-fill in the prompt template. These can be strings or callables
|
||||
that return strings.
|
||||
|
||||
Notes:
|
||||
|
@ -286,7 +345,9 @@ class AgentBase(BaseModel, ABC):
|
|||
- This method does not affect the `chat_history` which is dynamically updated.
|
||||
"""
|
||||
if not self.prompt_template:
|
||||
raise ValueError("Prompt template must be initialized before pre-filling variables.")
|
||||
|
||||
raise ValueError(
|
||||
"Prompt template must be initialized before pre-filling variables."
|
||||
)
|
||||
|
||||
self.prompt_template = self.prompt_template.pre_fill_variables(**kwargs)
|
||||
logger.debug(f"Pre-filled prompt template with variables: {kwargs.keys()}")
|
||||
logger.debug(f"Pre-filled prompt template with variables: {kwargs.keys()}")
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
from .react import ReActAgent
|
||||
from .toolcall import ToolCallAgent
|
||||
from .openapi import OpenAPIReActAgent
|
||||
from .openapi import OpenAPIReActAgent
|
||||
|
|
|
@ -1 +1 @@
|
|||
from .react import OpenAPIReActAgent
|
||||
from .react import OpenAPIReActAgent
|
||||
|
|
|
@ -8,15 +8,18 @@ import logging
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OpenAPIReActAgent(ReActAgent):
|
||||
"""
|
||||
Extends ReActAgent with OpenAPI handling capabilities, including tools for managing API calls.
|
||||
"""
|
||||
|
||||
role: str = Field(default="OpenAPI Expert", description="The agent's role in the interaction.")
|
||||
role: str = Field(
|
||||
default="OpenAPI Expert", description="The agent's role in the interaction."
|
||||
)
|
||||
goal: str = Field(
|
||||
default="Help users work with OpenAPI specifications and API integrations.",
|
||||
description="The main objective of the agent."
|
||||
description="The main objective of the agent.",
|
||||
)
|
||||
instructions: List[str] = Field(
|
||||
default=[
|
||||
|
@ -25,15 +28,23 @@ class OpenAPIReActAgent(ReActAgent):
|
|||
"You must first help users explore potential APIs by analyzing OpenAPI definitions, then assist in making authenticated API requests.",
|
||||
"Ensure that all API calls are executed with the correct parameters, authentication, and methods.",
|
||||
"Your responses should be concise, clear, and focus on guiding the user through the steps of working with APIs, including retrieving API definitions, understanding endpoint parameters, and handling errors.",
|
||||
"You only respond to questions directly related to your role."
|
||||
"You only respond to questions directly related to your role.",
|
||||
],
|
||||
description="Instructions to guide the agent's behavior."
|
||||
description="Instructions to guide the agent's behavior.",
|
||||
)
|
||||
spec_parser: OpenAPISpecParser = Field(
|
||||
..., description="Parser for handling OpenAPI specifications."
|
||||
)
|
||||
api_vector_store: VectorStoreBase = Field(
|
||||
..., description="Vector store for storing API definitions."
|
||||
)
|
||||
auth_header: Optional[Dict] = Field(
|
||||
None, description="Authentication headers for executing API calls."
|
||||
)
|
||||
|
||||
tool_vector_store: Optional[VectorToolStore] = Field(
|
||||
default=None, init=False, description="Internal vector store for OpenAPI tools."
|
||||
)
|
||||
spec_parser: OpenAPISpecParser = Field(..., description="Parser for handling OpenAPI specifications.")
|
||||
api_vector_store: VectorStoreBase = Field(..., description="Vector store for storing API definitions.")
|
||||
auth_header: Optional[Dict] = Field(None, description="Authentication headers for executing API calls.")
|
||||
|
||||
tool_vector_store: Optional[VectorToolStore] = Field(default=None, init=False, description="Internal vector store for OpenAPI tools.")
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
|
@ -52,13 +63,14 @@ class OpenAPIReActAgent(ReActAgent):
|
|||
|
||||
# Generate OpenAPI-specific tools
|
||||
from .tools import generate_api_call_executor, generate_get_openapi_definition
|
||||
|
||||
openapi_tools = [
|
||||
generate_get_openapi_definition(self.tool_vector_store),
|
||||
generate_api_call_executor(self.spec_parser, self.auth_header)
|
||||
generate_api_call_executor(self.spec_parser, self.auth_header),
|
||||
]
|
||||
|
||||
# Extend tools with OpenAPI tools
|
||||
self.tools.extend(openapi_tools)
|
||||
|
||||
# Call parent model_post_init for additional setup
|
||||
super().model_post_init(__context)
|
||||
super().model_post_init(__context)
|
||||
|
|
|
@ -1,107 +1,159 @@
|
|||
|
||||
from dapr_agents.tool.utils.openapi import OpenAPISpecParser
|
||||
from dapr_agents.tool.storage import VectorToolStore
|
||||
from dapr_agents.tool.base import tool
|
||||
from pydantic import BaseModel ,Field, ConfigDict
|
||||
from typing import Optional, Any, Dict
|
||||
from urllib.parse import urlparse
|
||||
import json
|
||||
import logging
|
||||
import requests
|
||||
from urllib.parse import urlparse
|
||||
from typing import Any, Dict, Optional, List
|
||||
|
||||
def extract_version(path: str) -> str:
|
||||
from pydantic import BaseModel, Field, ConfigDict
|
||||
|
||||
from dapr_agents.tool.base import tool
|
||||
from dapr_agents.tool.storage import VectorToolStore
|
||||
from dapr_agents.tool.utils.openapi import OpenAPISpecParser
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _extract_version(path: str) -> str:
|
||||
"""Extracts the version prefix from a path if it exists, assuming it starts with 'v' followed by digits."""
|
||||
parts = path.strip('/').split('/')
|
||||
if parts and parts[0].startswith('v') and parts[0][1:].isdigit():
|
||||
return parts[0]
|
||||
return ''
|
||||
seg = path.lstrip("/").split("/", 1)[0]
|
||||
return seg if seg.startswith("v") and seg[1:].isdigit() else ""
|
||||
|
||||
def generate_get_openapi_definition(tool_vector_store: VectorToolStore):
|
||||
@tool
|
||||
def get_openapi_definition(user_input: str):
|
||||
|
||||
def _join_url(base: str, path: str) -> str:
|
||||
"""
|
||||
Join *base* and *path* while avoiding duplicated version segments
|
||||
and double slashes. Assumes base already ends at the **/servers[0].url**.
|
||||
"""
|
||||
parsed = urlparse(base)
|
||||
origin = f"{parsed.scheme}://{parsed.netloc}"
|
||||
base_path = parsed.path.strip("/")
|
||||
|
||||
b_ver, p_ver = _extract_version(base_path), _extract_version(path)
|
||||
if b_ver and b_ver == p_ver:
|
||||
path = path[len(f"/{p_ver}") :]
|
||||
|
||||
pieces = [p for p in (base_path, path.lstrip("/")) if p]
|
||||
return f"{origin}/" + "/".join(pieces).replace("//", "/")
|
||||
|
||||
|
||||
def _fmt_candidate(doc: str, meta: Dict[str, Any]) -> str:
|
||||
"""Return a single nice, log-friendly candidate string."""
|
||||
meta_line = f"url={meta.get('url')} | method={meta.get('method', '').upper()} | name={meta.get('name')}"
|
||||
return f"{doc.strip()}\n{meta_line}"
|
||||
|
||||
|
||||
class GetDefinitionInput(BaseModel):
|
||||
"""Free-form query describing *one* desired operation (e.g. "multiply two numbers")."""
|
||||
|
||||
user_input: str = Field(
|
||||
..., description="Natural-language description of ONE desired API operation."
|
||||
)
|
||||
|
||||
|
||||
def generate_get_openapi_definition(store: VectorToolStore):
|
||||
@tool(args_model=GetDefinitionInput)
|
||||
def get_openapi_definition(user_input: str) -> List[str]:
|
||||
"""
|
||||
Get potential APIs for the user to use to accompish task.
|
||||
You have to choose the right one after getting a response.
|
||||
This tool MUST be used before calling any APIs.
|
||||
Search the vector store for OpenAPI *operation IDs / paths* most relevant
|
||||
to **one** user task.
|
||||
|
||||
Always call this **once per new task** *before* attempting an
|
||||
`open_api_call_executor`. Returns up to 5 candidate operations.
|
||||
"""
|
||||
similatiry_result = tool_vector_store.get_similar_tools(query_texts=[user_input], k=5)
|
||||
documents = similatiry_result['documents'][0]
|
||||
return documents
|
||||
result = store.get_similar_tools(query_texts=[user_input], k=5)
|
||||
docs: List[str] = result["documents"][0]
|
||||
metas: List[Dict[str, Any]] = result["metadatas"][0]
|
||||
|
||||
return [_fmt_candidate(d, m) for d, m in zip(docs, metas)]
|
||||
|
||||
return get_openapi_definition
|
||||
|
||||
def generate_api_call_executor(spec_parser: OpenAPISpecParser, auth_header: Dict = None):
|
||||
base_url = spec_parser.spec.servers[0].url
|
||||
|
||||
class OpenAPIExecutorInput(BaseModel):
|
||||
path_template: str = Field(description="Template of the API path that may include placeholders.")
|
||||
method: str = Field(description="The HTTP method to be used for the API call (e.g., 'GET', 'POST').")
|
||||
path_params: Dict[str, Any] = Field(default={}, description="Path parameters to be replaced in the path template.")
|
||||
data: Dict[str, Any] = Field(default={}, description="Data to be sent in the body of the request, applicable for POST, PUT methods.")
|
||||
headers: Optional[Dict[str, Any]] = Field(default=None, description="HTTP headers to send with the request.")
|
||||
params: Optional[Dict[str, Any]] = Field(default=None, description="Query parameters to append to the URL.")
|
||||
|
||||
model_config = ConfigDict(extra="allow")
|
||||
|
||||
class OpenAPIExecutorInput(BaseModel):
|
||||
path_template: str = Field(
|
||||
..., description="Path template, may contain `{placeholder}` segments."
|
||||
)
|
||||
method: str = Field(..., description="HTTP verb, upper‑case.")
|
||||
path_params: Dict[str, Any] = Field(
|
||||
default_factory=dict, description="Replacements for path placeholders."
|
||||
)
|
||||
data: Dict[str, Any] = Field(
|
||||
default_factory=dict, description="JSON body for POST/PUT/PATCH."
|
||||
)
|
||||
headers: Optional[Dict[str, Any]] = Field(
|
||||
default=None, description="Extra request headers."
|
||||
)
|
||||
params: Optional[Dict[str, Any]] = Field(
|
||||
default=None, description="Query params (?key=value)."
|
||||
)
|
||||
|
||||
model_config = ConfigDict(extra="allow")
|
||||
|
||||
|
||||
def generate_api_call_executor(
|
||||
spec: OpenAPISpecParser, auth_header: Optional[Dict[str, str]] = None
|
||||
):
|
||||
base_url = spec.spec.servers[0].url # assumes at least one server entry
|
||||
|
||||
@tool(args_model=OpenAPIExecutorInput)
|
||||
def open_api_call_executor(
|
||||
*,
|
||||
path_template: str,
|
||||
method: str,
|
||||
path_params: Dict[str, Any],
|
||||
data: Dict[str, Any],
|
||||
headers: Optional[Dict[str, Any]] = None,
|
||||
params: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any
|
||||
**req_kwargs: Any,
|
||||
) -> Any:
|
||||
"""
|
||||
Execute an API call based on provided parameters and configuration.
|
||||
It MUST be used after the get_openapi_definition to call APIs.
|
||||
Make sure to include the right header values to authenticate to the API if needed.
|
||||
"""
|
||||
|
||||
# Format the path with path_params
|
||||
formatted_path = path_template.format(**path_params)
|
||||
|
||||
# Parse the base_url and extract the version
|
||||
parsed_url = urlparse(base_url)
|
||||
origin = f"{parsed_url.scheme}://{parsed_url.netloc}"
|
||||
base_path = parsed_url.path.strip('/')
|
||||
|
||||
base_version = extract_version(base_path)
|
||||
path_version = extract_version(formatted_path)
|
||||
Execute **one** REST call described by an OpenAPI operation.
|
||||
|
||||
# Avoid duplication of the version in the final URL
|
||||
if base_version and path_version == base_version:
|
||||
formatted_path = formatted_path[len(f"/{path_version}"):]
|
||||
|
||||
# Ensure there is a single slash between origin, base_path, and formatted_path
|
||||
final_url = f"{origin}/{base_path}/{formatted_path}".replace('//', '/')
|
||||
# Fix the issue by ensuring the correct scheme with double slashes
|
||||
if not final_url.startswith('https://') and parsed_url.scheme == 'https':
|
||||
final_url = final_url.replace('https:/', 'https://')
|
||||
|
||||
# Initialize the headers with auth_header if provided
|
||||
final_headers = auth_header if auth_header else {}
|
||||
# Update the final_headers with additional headers passed to the function
|
||||
Use this only *after* `get_openapi_definition` has returned a matching
|
||||
`path_template`/`method`.
|
||||
|
||||
Authentication: merge `auth_header` given at agent-init time with
|
||||
any per-call `headers` argument (per-call overrides duplicates).
|
||||
"""
|
||||
|
||||
url = _join_url(base_url, path_template.format(**path_params))
|
||||
|
||||
final_headers = (auth_header or {}).copy()
|
||||
if headers:
|
||||
final_headers.update(headers)
|
||||
|
||||
if data:
|
||||
data = json.dumps(data) # Convert data to JSON string if not empty
|
||||
|
||||
request_kwargs = {
|
||||
"headers": final_headers,
|
||||
"params": params,
|
||||
"data": data,
|
||||
**kwargs
|
||||
# redact auth key in debug logs
|
||||
safe_hdrs = {
|
||||
k: ("***" if "auth" in k.lower() or "key" in k.lower() else v)
|
||||
for k, v in final_headers.items()
|
||||
}
|
||||
|
||||
# Only convert data to JSON if we're doing a request that requires a body
|
||||
# and there's actually data to send
|
||||
body = None
|
||||
if method.upper() in ["POST", "PUT", "PATCH"] and data:
|
||||
body = json.dumps(data)
|
||||
|
||||
# Add more detailed logging similar to old implementation
|
||||
logger.debug(
|
||||
"→ %s %s | headers=%s params=%s data=%s",
|
||||
method,
|
||||
url,
|
||||
safe_hdrs,
|
||||
params,
|
||||
"***" if body else None,
|
||||
)
|
||||
|
||||
# For debugging purposes, similar to the old implementation
|
||||
print(f"Base Url: {base_url}")
|
||||
print(f"Requested Url: {final_url}")
|
||||
print(f"Requested Url: {url}")
|
||||
print(f"Requested Method: {method}")
|
||||
print(f"Requested Parameters: {params}")
|
||||
|
||||
# Filter out None values to avoid sending them to requests
|
||||
request_kwargs = {k: v for k, v in request_kwargs.items() if v is not None}
|
||||
resp = requests.request(
|
||||
method, url, headers=final_headers, params=params, data=body, **req_kwargs
|
||||
)
|
||||
resp.raise_for_status()
|
||||
return resp.json()
|
||||
|
||||
response = requests.request(method, final_url, **request_kwargs)
|
||||
return response.json()
|
||||
|
||||
return open_api_call_executor
|
||||
return open_api_call_executor
|
||||
|
|
|
@ -1 +1 @@
|
|||
from .base import ReActAgent
|
||||
from .base import ReActAgent
|
||||
|
|
|
@ -1,29 +1,45 @@
|
|||
from dapr_agents.types import AgentError, AssistantMessage, ChatCompletion, FunctionCall
|
||||
import json
|
||||
import logging
|
||||
import textwrap
|
||||
from datetime import datetime
|
||||
|
||||
import regex
|
||||
from pydantic import ConfigDict, Field
|
||||
|
||||
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union
|
||||
|
||||
from dapr_agents.agent import AgentBase
|
||||
from dapr_agents.tool import AgentTool
|
||||
from typing import List, Dict, Any, Union, Callable, Literal, Optional, Tuple
|
||||
from datetime import datetime
|
||||
from pydantic import Field, ConfigDict
|
||||
import regex, json, textwrap, logging
|
||||
from dapr_agents.types import AgentError, AssistantMessage, ChatCompletion
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ReActAgent(AgentBase):
|
||||
"""
|
||||
Agent implementing the ReAct (Reasoning-Action) framework for dynamic, few-shot problem-solving by leveraging
|
||||
contextual reasoning, actions, and observations in a conversation flow.
|
||||
"""
|
||||
|
||||
stop_at_token: List[str] = Field(default=["\nObservation:"], description="Token(s) signaling the LLM to stop generation.")
|
||||
tools: List[Union[AgentTool, Callable]] = Field(default_factory=list, description="Tools available for the agent, including final_answer.")
|
||||
template_format: Literal["f-string", "jinja2"] = Field(default="jinja2", description="The format used for rendering the prompt template.")
|
||||
|
||||
stop_at_token: List[str] = Field(
|
||||
default=["\nObservation:"],
|
||||
description="Token(s) signaling the LLM to stop generation.",
|
||||
)
|
||||
tools: List[Union[AgentTool, Callable]] = Field(
|
||||
default_factory=list,
|
||||
description="Tools available for the agent, including final_answer.",
|
||||
)
|
||||
template_format: Literal["f-string", "jinja2"] = Field(
|
||||
default="jinja2",
|
||||
description="The format used for rendering the prompt template.",
|
||||
)
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
|
||||
def construct_system_prompt(self) -> str:
|
||||
"""
|
||||
Constructs a system prompt in the ReAct reasoning-action format based on the agent's attributes and tools.
|
||||
|
||||
|
||||
Returns:
|
||||
str: The structured system message content.
|
||||
"""
|
||||
|
@ -45,11 +61,16 @@ class ReActAgent(AgentBase):
|
|||
# Tools section with schema details
|
||||
tools_section = "## Tools\nYou have access ONLY to the following tools:\n"
|
||||
for tool in self.tools:
|
||||
tools_section += f"{tool.name}: {tool.description}. Args schema: {tool.args_schema}\n"
|
||||
prompt_parts.append(tools_section.rstrip()) # Trim any trailing newlines from tools_section
|
||||
tools_section += (
|
||||
f"{tool.name}: {tool.description}. Args schema: {tool.args_schema}\n"
|
||||
)
|
||||
prompt_parts.append(
|
||||
tools_section.rstrip()
|
||||
) # Trim any trailing newlines from tools_section
|
||||
|
||||
# Additional Guidelines
|
||||
additional_guidelines = textwrap.dedent("""
|
||||
additional_guidelines = textwrap.dedent(
|
||||
"""
|
||||
If you think about using tool, it must use the correct tool JSON blob format as shown below:
|
||||
```
|
||||
{
|
||||
|
@ -57,11 +78,13 @@ class ReActAgent(AgentBase):
|
|||
"arguments": $INPUT
|
||||
}
|
||||
```
|
||||
""").strip()
|
||||
"""
|
||||
).strip()
|
||||
prompt_parts.append(additional_guidelines)
|
||||
|
||||
# ReAct specific guidelines
|
||||
react_guidelines = textwrap.dedent("""
|
||||
react_guidelines = textwrap.dedent(
|
||||
"""
|
||||
## ReAct Format
|
||||
Thought: Reflect on the current state of the conversation or task. If additional information is needed, determine if using a tool is necessary. When a tool is required, briefly explain why it is needed for the specific step at hand, and immediately follow this with an `Action:` statement to address that specific requirement. Avoid combining multiple tool requests in a single `Thought`. If no tools are needed, proceed directly to an `Answer:` statement.
|
||||
Action:
|
||||
|
@ -75,19 +98,19 @@ class ReActAgent(AgentBase):
|
|||
... (repeat Thought/Action/Observation as needed, but **ALWAYS proceed to a final `Answer:` statement when you have enough information**)
|
||||
Thought: I now have sufficient information to answer the initial question.
|
||||
Answer: ALWAYS proceed to a final `Answer:` statement once enough information is gathered or if the tools do not provide the necessary data.
|
||||
|
||||
|
||||
### Providing a Final Answer
|
||||
Once you have enough information to answer the question OR if tools cannot provide the necessary data, respond using one of the following formats:
|
||||
|
||||
|
||||
1. **Direct Answer without Tools**:
|
||||
Thought: I can answer directly without using any tools. Answer: Direct answer based on previous interactions or current knowledge.
|
||||
|
||||
|
||||
2. **When All Needed Information is Gathered**:
|
||||
Thought: I now have sufficient information to answer the question. Answer: Complete final answer here.
|
||||
|
||||
|
||||
3. **If Tools Cannot Provide the Needed Information**:
|
||||
Thought: The available tools do not provide the necessary information. Answer: Explanation of limitation and relevant information if possible.
|
||||
|
||||
|
||||
### Key Guidelines
|
||||
- Always Conclude with an `Answer:` statement.
|
||||
- Ensure every response ends with an `Answer:` statement that summarizes the most recent findings or relevant information, avoiding incomplete thoughts.
|
||||
|
@ -98,37 +121,37 @@ class ReActAgent(AgentBase):
|
|||
- Progressively Move Towards Finality: Reflect on the current step and avoid re-evaluating the entire user request each time. Aim to advance towards the final Answer in each cycle.
|
||||
|
||||
## Chat History
|
||||
The chat history is provided to avoid repeating information and to ensure accurate references when summarizing past interactions.
|
||||
""").strip()
|
||||
The chat history is provided to avoid repeating information and to ensure accurate references when summarizing past interactions.
|
||||
"""
|
||||
).strip()
|
||||
prompt_parts.append(react_guidelines)
|
||||
|
||||
return "\n\n".join(prompt_parts)
|
||||
|
||||
def run(self, input_data: Optional[Union[str, Dict[str, Any]]] = None) -> Any:
|
||||
|
||||
async def run(self, input_data: Optional[Union[str, Dict[str, Any]]] = None) -> Any:
|
||||
"""
|
||||
Runs the main logic loop for processing the task and executing actions until a result is reached.
|
||||
Runs the agent in a ReAct-style loop until it generates a final answer or reaches max iterations.
|
||||
|
||||
Args:
|
||||
input_data (Optional[Union[str, Dict[str, Any]]]): The task or data for the agent to process. If None, relies on memory.
|
||||
input_data (Optional[Union[str, Dict[str, Any]]]): Initial task or message input.
|
||||
|
||||
Returns:
|
||||
Any: Final response after processing the task or reaching a final answer.
|
||||
Any: The agent's final answer.
|
||||
|
||||
Raises:
|
||||
AgentError: On errors during chat message processing or action execution.
|
||||
AgentError: If LLM fails or tool execution encounters issues.
|
||||
"""
|
||||
logger.debug(f"Agent run started with input: {input_data if input_data else 'Using memory context'}")
|
||||
logger.debug(
|
||||
f"Agent run started with input: {input_data or 'Using memory context'}"
|
||||
)
|
||||
|
||||
# Format messages; construct_messages already includes chat history.
|
||||
messages = self.construct_messages(input_data or {})
|
||||
|
||||
# Get Last User Message
|
||||
user_message = self.get_last_user_message(messages)
|
||||
|
||||
if input_data:
|
||||
# Add the new user message to memory only if input_data is provided
|
||||
if user_message: # Ensure a user message exists before adding to memory
|
||||
self.memory.add_message(user_message)
|
||||
|
||||
# Add the new user message to memory only if input_data is provided and user message exists.
|
||||
if input_data and user_message:
|
||||
self.memory.add_message(user_message)
|
||||
|
||||
# Always print the last user message for context, even if no input_data is provided
|
||||
if user_message:
|
||||
|
@ -139,7 +162,7 @@ class ReActAgent(AgentBase):
|
|||
|
||||
# Initialize react_loop for iterative reasoning
|
||||
react_loop = ""
|
||||
|
||||
|
||||
for iteration in range(self.max_iterations):
|
||||
logger.info(f"Iteration {iteration + 1}/{self.max_iterations} started.")
|
||||
|
||||
|
@ -158,107 +181,148 @@ class ReActAgent(AgentBase):
|
|||
break
|
||||
else:
|
||||
# Append react_loop to the last message if no user message is found
|
||||
logger.warning("No user message found in the current messages; appending react_loop to the last message.")
|
||||
iteration_messages[-1]["content"] += f"\n{react_loop}" # Append react_loop to the last message
|
||||
logger.warning(
|
||||
"No user message found in the current messages; appending react_loop to the last message."
|
||||
)
|
||||
iteration_messages[-1][
|
||||
"content"
|
||||
] += f"\n{react_loop}" # Append react_loop to the last message
|
||||
|
||||
try:
|
||||
response: ChatCompletion = self.llm.generate(messages=iteration_messages, stop=self.stop_at_token)
|
||||
response: ChatCompletion = self.llm.generate(
|
||||
messages=iteration_messages, stop=self.stop_at_token
|
||||
)
|
||||
|
||||
# Parse response into thought, action, and potential final answer
|
||||
thought_action, action, final_answer = self.parse_response(response)
|
||||
|
||||
# Print Thought immediately
|
||||
self.text_formatter.print_react_part("Thought", thought_action)
|
||||
|
||||
if final_answer: # Direct final answer provided
|
||||
assistant_final_message = AssistantMessage(final_answer)
|
||||
self.memory.add_message(assistant_final_message)
|
||||
|
||||
if final_answer:
|
||||
assistant_final = AssistantMessage(final_answer)
|
||||
self.memory.add_message(assistant_final)
|
||||
self.text_formatter.print_separator()
|
||||
self.text_formatter.print_message(assistant_final_message, include_separator=False)
|
||||
self.text_formatter.print_message(
|
||||
assistant_final, include_separator=False
|
||||
)
|
||||
logger.info("Agent provided a direct final answer.")
|
||||
return final_answer
|
||||
|
||||
|
||||
# If there's no action, update the loop and continue reasoning
|
||||
if action is None:
|
||||
logger.info("No action specified; continuing with further reasoning.")
|
||||
if not action:
|
||||
logger.info(
|
||||
"No action specified; continuing with further reasoning."
|
||||
)
|
||||
react_loop += f"Thought:{thought_action}\n"
|
||||
continue # Proceed to the next iteration
|
||||
|
||||
|
||||
action_name = action["name"]
|
||||
action_args = action["arguments"]
|
||||
|
||||
# Print Action
|
||||
self.text_formatter.print_react_part("Action", json.dumps(action))
|
||||
|
||||
if action_name in available_tools:
|
||||
logger.info(f"Executing {action_name} with arguments {action_args}")
|
||||
function_call = FunctionCall(**action)
|
||||
execution_results = self.tool_executor.execute(action_name, **function_call.arguments_dict)
|
||||
|
||||
# Print Observation
|
||||
self.text_formatter.print_react_part("Observation", execution_results)
|
||||
|
||||
# Update react_loop with the current execution
|
||||
new_content = f"Thought:{thought_action}\nAction:{action}\nObservation:{execution_results}"
|
||||
react_loop += new_content
|
||||
logger.info(new_content)
|
||||
else:
|
||||
if action_name not in available_tools:
|
||||
raise AgentError(f"Unknown tool specified: {action_name}")
|
||||
|
||||
|
||||
logger.info(f"Executing {action_name} with arguments {action_args}")
|
||||
result = await self.tool_executor.run_tool(action_name, **action_args)
|
||||
|
||||
# Print Observation
|
||||
self.text_formatter.print_react_part("Observation", result)
|
||||
react_loop += f"Thought:{thought_action}\nAction:{json.dumps(action)}\nObservation:{result}\n"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed during chat generation: {e}")
|
||||
raise AgentError(f"Failed during chat generation: {e}") from e
|
||||
|
||||
logger.info("Max iterations completed. Agent has stopped.")
|
||||
|
||||
def parse_response(self, response: ChatCompletion) -> Tuple[str, Optional[dict], Optional[str]]:
|
||||
logger.error(f"Error during ReAct agent loop: {e}")
|
||||
raise AgentError(f"ReActAgent failed: {e}") from e
|
||||
|
||||
logger.info("Max iterations reached. Agent has stopped.")
|
||||
|
||||
def parse_response(
|
||||
self, response: ChatCompletion
|
||||
) -> Tuple[str, Optional[dict], Optional[str]]:
|
||||
"""
|
||||
Extracts the thought, action, and final answer (if present) from the language model response.
|
||||
Parses a ReAct-style LLM response into a Thought, optional Action (JSON blob), and optional Final Answer.
|
||||
|
||||
Args:
|
||||
response (ChatCompletion): The language model's response message.
|
||||
response (ChatCompletion): The LLM response object containing the message content.
|
||||
|
||||
Returns:
|
||||
tuple: (thought content, action dictionary if present, final answer if present)
|
||||
Tuple[str, Optional[dict], Optional[str]]:
|
||||
- Thought string.
|
||||
- Parsed Action dictionary, if present.
|
||||
- Final Answer string, if present.
|
||||
"""
|
||||
pattern = r"\{(?:[^{}]|(?R))*\}" # Recursive pattern to match nested JSON blobs
|
||||
content = response.get_content()
|
||||
|
||||
# Compile reusable regex patterns
|
||||
action_split_regex = regex.compile(r"action:\s*", flags=regex.IGNORECASE)
|
||||
final_answer_regex = regex.compile(
|
||||
r"answer:\s*(.*)", flags=regex.IGNORECASE | regex.DOTALL
|
||||
)
|
||||
thought_label_regex = regex.compile(r"thought:\s*", flags=regex.IGNORECASE)
|
||||
|
||||
# Strip leading "Thought:" labels (they get repeated a lot)
|
||||
content = thought_label_regex.sub("", content).strip()
|
||||
|
||||
# Check if there's a final answer present
|
||||
if final_match := final_answer_regex.search(content):
|
||||
final_answer = final_match.group(1).strip()
|
||||
logger.debug(f"[parse_response] Final answer detected: {final_answer}")
|
||||
return content, None, final_answer
|
||||
|
||||
# Split on first "Action:" to separate Thought and Action
|
||||
if action_split_regex.search(content):
|
||||
thought_part, action_block = action_split_regex.split(content, 1)
|
||||
thought_part = thought_part.strip()
|
||||
logger.debug(f"[parse_response] Thought extracted: {thought_part}")
|
||||
logger.debug(
|
||||
f"[parse_response] Action block to parse: {action_block.strip()}"
|
||||
)
|
||||
else:
|
||||
logger.debug(
|
||||
f"[parse_response] No action or answer found. Returning content as Thought: {content}"
|
||||
)
|
||||
return content, None, None
|
||||
|
||||
# Attempt to extract the first valid JSON blob from the action block
|
||||
for match in regex.finditer(pattern, action_block, flags=regex.DOTALL):
|
||||
try:
|
||||
action_dict = json.loads(match.group())
|
||||
logger.debug(
|
||||
f"[parse_response] Successfully parsed action: {action_dict}"
|
||||
)
|
||||
return thought_part, action_dict, None
|
||||
except json.JSONDecodeError as e:
|
||||
logger.debug(
|
||||
f"[parse_response] Failed to parse action JSON blob: {match.group()} — {e}"
|
||||
)
|
||||
continue
|
||||
|
||||
logger.debug(
|
||||
"[parse_response] No valid action JSON found. Returning Thought only."
|
||||
)
|
||||
return thought_part, None, None
|
||||
|
||||
async def run_tool(self, tool_name: str, *args, **kwargs) -> Any:
|
||||
"""
|
||||
Executes a tool by name, resolving async or sync tools automatically.
|
||||
|
||||
Args:
|
||||
tool_name (str): The name of the registered tool.
|
||||
*args: Positional arguments.
|
||||
**kwargs: Keyword arguments.
|
||||
|
||||
Returns:
|
||||
Any: The tool result.
|
||||
|
||||
Raises:
|
||||
ValueError: If the action details cannot be decoded from the response.
|
||||
AgentError: If execution fails.
|
||||
"""
|
||||
pattern = r'\{(?:[^{}]|(?R))*\}' # Pattern to match JSON blobs
|
||||
message_content = response.get_content()
|
||||
|
||||
# Use regex to find the start of "Action" or "Final Answer" (case insensitive)
|
||||
action_split_regex = regex.compile(r'(?i)action:\s*', regex.IGNORECASE)
|
||||
final_answer_regex = regex.compile(r'(?i)answer:\s*(.*)', regex.IGNORECASE | regex.DOTALL)
|
||||
thought_label_regex = regex.compile(r'(?i)thought:\s*', regex.IGNORECASE)
|
||||
|
||||
# Clean up any repeated or prefixed "Thought:" labels
|
||||
message_content = thought_label_regex.sub('', message_content).strip()
|
||||
|
||||
# Check for "Final Answer" directly in the thought
|
||||
final_answer_match = final_answer_regex.search(message_content)
|
||||
if final_answer_match:
|
||||
final_answer = final_answer_match.group(1).strip() if final_answer_match.group(1) else None
|
||||
return message_content, None, final_answer
|
||||
|
||||
# Split the content into "thought" and "action" parts
|
||||
if action_split_regex.search(message_content):
|
||||
parts = action_split_regex.split(message_content, 1)
|
||||
thought_part = parts[0].strip() # Everything before "Action" is the thought part
|
||||
action_part = parts[1] if len(parts) > 1 else None # Everything after "Action" is the action part
|
||||
else:
|
||||
thought_part = message_content
|
||||
action_part = None
|
||||
|
||||
# If there's an action part, attempt to extract the JSON blob
|
||||
if action_part:
|
||||
matches = regex.finditer(pattern, action_part, regex.DOTALL)
|
||||
for match in matches:
|
||||
try:
|
||||
action_dict = json.loads(match.group())
|
||||
return thought_part, action_dict, None # Return thought and action directly
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
# If no action is found, just return the thought part with None for action and final answer
|
||||
return thought_part, None, None
|
||||
try:
|
||||
return await self.tool_executor.run_tool(tool_name, *args, **kwargs)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to run tool '{tool_name}' via ReActAgent: {e}")
|
||||
raise AgentError(f"Error running tool '{tool_name}': {e}") from e
|
||||
|
|
|
@ -1 +1 @@
|
|||
from .base import ToolCallAgent
|
||||
from .base import ToolCallAgent
|
||||
|
|
|
@ -6,14 +6,20 @@ import logging
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ToolCallAgent(AgentBase):
|
||||
"""
|
||||
Agent that manages tool calls and conversations using a language model.
|
||||
It integrates tools and processes them based on user inputs and task orchestration.
|
||||
"""
|
||||
|
||||
tool_history: List[ToolMessage] = Field(default_factory=list, description="Executed tool calls during the conversation.")
|
||||
tool_choice: Optional[str] = Field(default=None, description="Strategy for selecting tools ('auto', 'required', 'none'). Defaults to 'auto' if tools are provided.")
|
||||
tool_history: List[ToolMessage] = Field(
|
||||
default_factory=list, description="Executed tool calls during the conversation."
|
||||
)
|
||||
tool_choice: Optional[str] = Field(
|
||||
default=None,
|
||||
description="Strategy for selecting tools ('auto', 'required', 'none'). Defaults to 'auto' if tools are provided.",
|
||||
)
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
|
@ -22,100 +28,128 @@ class ToolCallAgent(AgentBase):
|
|||
Initialize the agent's settings, such as tool choice and parent setup.
|
||||
Sets the tool choice strategy based on provided tools.
|
||||
"""
|
||||
self.tool_choice = self.tool_choice or ('auto' if self.tools else None)
|
||||
|
||||
self.tool_choice = self.tool_choice or ("auto" if self.tools else None)
|
||||
|
||||
# Proceed with base model setup
|
||||
super().model_post_init(__context)
|
||||
|
||||
def run(self, input_data: Optional[Union[str, Dict[str, Any]]] = None) -> Any:
|
||||
async def run(self, input_data: Optional[Union[str, Dict[str, Any]]] = None) -> Any:
|
||||
"""
|
||||
Executes the agent's main task using the provided input or memory context.
|
||||
Asynchronously executes the agent's main task using the provided input or memory context.
|
||||
|
||||
Args:
|
||||
input_data (Optional[Union[str, Dict[str, Any]]]): User's input, either as a string, a dictionary, or `None` to use memory context.
|
||||
input_data (Optional[Union[str, Dict[str, Any]]]): User input as string or dict.
|
||||
|
||||
Returns:
|
||||
Any: The agent's response after processing the input.
|
||||
Any: The agent's final output.
|
||||
|
||||
Raises:
|
||||
AgentError: If the input data is invalid or if a user message is missing.
|
||||
AgentError: If user input is invalid or tool execution fails.
|
||||
"""
|
||||
logger.debug(f"Agent run started with input: {input_data if input_data else 'Using memory context'}")
|
||||
logger.debug(
|
||||
f"Agent run started with input: {input_data if input_data else 'Using memory context'}"
|
||||
)
|
||||
|
||||
# Format messages; construct_messages already includes chat history.
|
||||
messages = self.construct_messages(input_data or {})
|
||||
|
||||
# Get Last User Message
|
||||
user_message = self.get_last_user_message(messages)
|
||||
|
||||
if input_data:
|
||||
# Add the new user message to memory only if input_data is provided
|
||||
if user_message: # Ensure a user message exists before adding to memory
|
||||
self.memory.add_message(user_message)
|
||||
|
||||
if input_data and user_message:
|
||||
# Add the new user message to memory only if input_data is provided and user message exists
|
||||
self.memory.add_message(user_message)
|
||||
|
||||
# Always print the last user message for context, even if no input_data is provided
|
||||
if user_message:
|
||||
self.text_formatter.print_message(user_message)
|
||||
|
||||
# Process conversation iterations
|
||||
return self.process_iterations(messages)
|
||||
return await self.process_iterations(messages)
|
||||
|
||||
def process_response(self, tool_calls: List[dict]) -> None:
|
||||
async def process_response(self, tool_calls: List[dict]) -> None:
|
||||
"""
|
||||
Execute tool calls and log their results in the tool history.
|
||||
Asynchronously executes tool calls and appends tool results to memory.
|
||||
|
||||
Args:
|
||||
tool_calls (List[dict]): Definitions of tool calls from the response.
|
||||
tool_calls (List[dict]): Tool calls returned by the LLM.
|
||||
|
||||
Raises:
|
||||
AgentError: If an error occurs during tool execution.
|
||||
AgentError: If a tool execution fails.
|
||||
"""
|
||||
for tool in tool_calls:
|
||||
function_name = tool.function.name
|
||||
try:
|
||||
logger.info(f"Executing {function_name} with arguments {tool.function.arguments}")
|
||||
result = self.tool_executor.execute(function_name, **tool.function.arguments_dict)
|
||||
tool_message = ToolMessage(tool_call_id=tool.id, name=function_name, content=str(result))
|
||||
|
||||
logger.info(
|
||||
f"Executing {function_name} with arguments {tool.function.arguments}"
|
||||
)
|
||||
result = await self.tool_executor.run_tool(
|
||||
function_name, **tool.function.arguments_dict
|
||||
)
|
||||
tool_message = ToolMessage(
|
||||
tool_call_id=tool.id, name=function_name, content=str(result)
|
||||
)
|
||||
self.text_formatter.print_message(tool_message)
|
||||
self.tool_history.append(tool_message)
|
||||
except Exception as e:
|
||||
logger.error(f"Error executing tool {function_name}: {e}")
|
||||
raise AgentError(f"Error executing tool '{function_name}': {e}") from e
|
||||
|
||||
def process_iterations(self, messages: List[Dict[str, Any]]) -> Any:
|
||||
|
||||
async def process_iterations(self, messages: List[Dict[str, Any]]) -> Any:
|
||||
"""
|
||||
Processes conversation iterations, invoking tool calls as needed.
|
||||
Iteratively drives the agent conversation until a final answer or max iterations.
|
||||
|
||||
Args:
|
||||
messages (List[Dict[str, Any]]): Initial conversation messages.
|
||||
|
||||
Returns:
|
||||
Any: The final response content after processing all iterations.
|
||||
Any: The final assistant message.
|
||||
|
||||
Raises:
|
||||
AgentError: If an error occurs during chat generation or if maximum iterations are reached.
|
||||
AgentError: On chat failure or tool issues.
|
||||
"""
|
||||
for iteration in range(self.max_iterations):
|
||||
logger.info(f"Iteration {iteration + 1}/{self.max_iterations} started.")
|
||||
|
||||
|
||||
messages += self.tool_history
|
||||
|
||||
try:
|
||||
response: ChatCompletion = self.llm.generate(messages=messages, tools=self.tools, tool_choice=self.tool_choice)
|
||||
response: ChatCompletion = self.llm.generate(
|
||||
messages=messages,
|
||||
tools=self.tools,
|
||||
tool_choice=self.tool_choice,
|
||||
)
|
||||
response_message = response.get_message()
|
||||
self.text_formatter.print_message(response_message)
|
||||
|
||||
if response.get_reason() == "tool_calls":
|
||||
self.tool_history.append(response_message)
|
||||
self.process_response(response.get_tool_calls())
|
||||
await self.process_response(response.get_tool_calls())
|
||||
else:
|
||||
self.memory.add_message(AssistantMessage(response.get_content()))
|
||||
self.tool_history.clear()
|
||||
return response.get_content()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during chat generation: {e}")
|
||||
raise AgentError(f"Failed during chat generation: {e}") from e
|
||||
|
||||
logger.info("Max iterations reached. Agent has stopped.")
|
||||
logger.info("Max iterations reached. Agent has stopped.")
|
||||
|
||||
async def run_tool(self, tool_name: str, *args, **kwargs) -> Any:
|
||||
"""
|
||||
Executes a registered tool by name, automatically handling sync or async tools.
|
||||
|
||||
Args:
|
||||
tool_name (str): Name of the tool to run.
|
||||
*args: Positional arguments passed to the tool.
|
||||
**kwargs: Keyword arguments passed to the tool.
|
||||
|
||||
Returns:
|
||||
Any: Result from the tool execution.
|
||||
|
||||
Raises:
|
||||
AgentError: If the tool is not found or execution fails.
|
||||
"""
|
||||
try:
|
||||
return await self.tool_executor.run_tool(tool_name, *args, **kwargs)
|
||||
except Exception as e:
|
||||
logger.error(f"Agent failed to run tool '{tool_name}': {e}")
|
||||
raise AgentError(f"Failed to run tool '{tool_name}': {e}") from e
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
from .otel import DaprAgentsOTel
|
|
@ -0,0 +1,144 @@
|
|||
from logging import Logger
|
||||
from typing import Union
|
||||
|
||||
from opentelemetry._logs import set_logger_provider
|
||||
from opentelemetry.metrics import set_meter_provider
|
||||
from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
|
||||
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
|
||||
from opentelemetry.sdk.metrics import MeterProvider
|
||||
from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
|
||||
from opentelemetry.sdk.resources import Resource, SERVICE_NAME
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
||||
from opentelemetry.trace import set_tracer_provider
|
||||
from opentelemetry.exporter.otlp.proto.http._log_exporter import OTLPLogExporter
|
||||
from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
|
||||
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
|
||||
|
||||
|
||||
class DaprAgentsOTel:
|
||||
"""
|
||||
OpenTelemetry configuration for Dapr agents.
|
||||
"""
|
||||
|
||||
def __init__(self, service_name: str = "", otlp_endpoint: str = ""):
|
||||
# Configure OpenTelemetry
|
||||
self.service_name = service_name
|
||||
self.otlp_endpoint = otlp_endpoint
|
||||
|
||||
self.setup_resources()
|
||||
|
||||
def setup_resources(self):
|
||||
"""
|
||||
Set up the resource for OpenTelemetry.
|
||||
"""
|
||||
|
||||
self._resource = Resource.create(
|
||||
attributes={
|
||||
SERVICE_NAME: str(self.service_name),
|
||||
}
|
||||
)
|
||||
|
||||
def create_and_instrument_meter_provider(
|
||||
self,
|
||||
otlp_endpoint: str = "",
|
||||
) -> MeterProvider:
|
||||
"""
|
||||
Returns a `MeterProvider` that is configured to export metrics using the `PeriodicExportingMetricReader`
|
||||
which means that metrics are exported periodically in the background. The interval can be set by
|
||||
the environment variable `OTEL_METRIC_EXPORT_INTERVAL`. The default value is 60000ms (1 minute).
|
||||
|
||||
Also sets the global OpenTelemetry meter provider to the returned meter provider.
|
||||
"""
|
||||
|
||||
# Ensure the endpoint is set correctly
|
||||
endpoint = self._endpoint_validator(
|
||||
endpoint=self.otlp_endpoint if otlp_endpoint == "" else otlp_endpoint,
|
||||
telemetry_type="metrics",
|
||||
)
|
||||
|
||||
metric_exporter = OTLPMetricExporter(endpoint=str(endpoint))
|
||||
metric_reader = PeriodicExportingMetricReader(metric_exporter)
|
||||
meter_provider = MeterProvider(
|
||||
resource=self._resource, metric_readers=[metric_reader]
|
||||
)
|
||||
set_meter_provider(meter_provider)
|
||||
return meter_provider
|
||||
|
||||
def create_and_instrument_tracer_provider(
|
||||
self,
|
||||
otlp_endpoint: str = "",
|
||||
) -> TracerProvider:
|
||||
"""
|
||||
Returns a `TracerProvider` that is configured to export traces using the `BatchSpanProcessor`
|
||||
which means that traces are exported in batches. The batch size can be set by
|
||||
the environment variable `OTEL_TRACES_EXPORT_BATCH_SIZE`. The default value is 512.
|
||||
Also sets the global OpenTelemetry tracer provider to the returned tracer provider.
|
||||
"""
|
||||
|
||||
# Ensure the endpoint is set correctly
|
||||
endpoint = self._endpoint_validator(
|
||||
endpoint=self.otlp_endpoint if otlp_endpoint == "" else otlp_endpoint,
|
||||
telemetry_type="traces",
|
||||
)
|
||||
|
||||
trace_exporter = OTLPSpanExporter(endpoint=str(endpoint))
|
||||
tracer_processor = BatchSpanProcessor(trace_exporter)
|
||||
tracer_provider = TracerProvider(resource=self._resource)
|
||||
tracer_provider.add_span_processor(tracer_processor)
|
||||
set_tracer_provider(tracer_provider)
|
||||
return tracer_provider
|
||||
|
||||
def create_and_instrument_logging_provider(
|
||||
self,
|
||||
logger: Logger,
|
||||
otlp_endpoint: str = "",
|
||||
) -> LoggerProvider:
|
||||
"""
|
||||
Returns a `LoggingProvider` that is configured to export logs using the `BatchLogProcessor`
|
||||
which means that logs are exported in batches. The batch size can be set by
|
||||
the environment variable `OTEL_LOGS_EXPORT_BATCH_SIZE`. The default value is 512.
|
||||
Also sets the global OpenTelemetry logging provider to the returned logging provider.
|
||||
"""
|
||||
|
||||
# Ensure the endpoint is set correctly
|
||||
endpoint = self._endpoint_validator(
|
||||
endpoint=self.otlp_endpoint if otlp_endpoint == "" else otlp_endpoint,
|
||||
telemetry_type="logs",
|
||||
)
|
||||
|
||||
log_exporter = OTLPLogExporter(endpoint=str(endpoint))
|
||||
logging_provider = LoggerProvider(resource=self._resource)
|
||||
logging_provider.add_log_record_processor(BatchLogRecordProcessor(log_exporter))
|
||||
set_logger_provider(logging_provider)
|
||||
|
||||
handler = LoggingHandler(logger_provider=logging_provider)
|
||||
logger.addHandler(handler)
|
||||
return logging_provider
|
||||
|
||||
def _endpoint_validator(
|
||||
self,
|
||||
endpoint: str,
|
||||
telemetry_type: str,
|
||||
) -> Union[str | Exception]:
|
||||
"""
|
||||
Validates the endpoint and method.
|
||||
"""
|
||||
|
||||
if endpoint == "":
|
||||
raise ValueError(
|
||||
"OTLP endpoint must be set either in the environment variable OTEL_EXPORTER_OTLP_ENDPOINT or in the constructor."
|
||||
)
|
||||
if endpoint.startswith("https://"):
|
||||
raise NotImplementedError(
|
||||
"OTLP over HTTPS is not supported. Please use HTTP."
|
||||
)
|
||||
|
||||
endpoint = (
|
||||
endpoint
|
||||
if endpoint.endswith(f"/v1/{telemetry_type}")
|
||||
else f"{endpoint}/v1/{telemetry_type}"
|
||||
)
|
||||
endpoint = endpoint if endpoint.startswith("http://") else f"http://{endpoint}"
|
||||
|
||||
return endpoint
|
|
@ -1,28 +1,33 @@
|
|||
import requests
|
||||
import os
|
||||
|
||||
def construct_auth_headers(auth_url, grant_type='client_credentials', **kwargs):
|
||||
|
||||
def construct_auth_headers(auth_url, grant_type="client_credentials", **kwargs):
|
||||
"""
|
||||
Construct authorization headers for API requests.
|
||||
|
||||
|
||||
:param auth_url: The authorization URL.
|
||||
:param grant_type: The type of OAuth grant (default is 'client_credentials').
|
||||
:param kwargs: Additional parameters for the POST request body.
|
||||
|
||||
|
||||
:return: A dictionary containing the Authorization header.
|
||||
"""
|
||||
|
||||
|
||||
# Define default parameters based on the grant_type
|
||||
data = {
|
||||
'grant_type': grant_type,
|
||||
"grant_type": grant_type,
|
||||
}
|
||||
|
||||
# Defaults for client_credentials grant type
|
||||
if grant_type == 'client_credentials':
|
||||
data.update({
|
||||
'client_id': kwargs.get('client_id', os.getenv('CLIENT_ID')),
|
||||
'client_secret': kwargs.get('client_secret', os.getenv('CLIENT_SECRET')),
|
||||
})
|
||||
if grant_type == "client_credentials":
|
||||
data.update(
|
||||
{
|
||||
"client_id": kwargs.get("client_id", os.getenv("CLIENT_ID")),
|
||||
"client_secret": kwargs.get(
|
||||
"client_secret", os.getenv("CLIENT_SECRET")
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
# Add any additional data passed in kwargs
|
||||
data.update(kwargs)
|
||||
|
@ -37,9 +42,9 @@ def construct_auth_headers(auth_url, grant_type='client_credentials', **kwargs):
|
|||
auth_response_data = auth_response.json()
|
||||
|
||||
# Extract the access token
|
||||
access_token = auth_response_data.get('access_token')
|
||||
access_token = auth_response_data.get("access_token")
|
||||
|
||||
if not access_token:
|
||||
raise ValueError("No access token found in the response")
|
||||
|
||||
return {"Authorization": f"Bearer {access_token}"}
|
||||
return {"Authorization": f"Bearer {access_token}"}
|
||||
|
|
|
@ -8,16 +8,18 @@ from dapr_agents.memory import MemoryBase
|
|||
from dapr_agents.tool import AgentTool
|
||||
from typing import Optional, List, Union, Type, TypeVar
|
||||
|
||||
T = TypeVar('T', ToolCallAgent, ReActAgent, OpenAPIReActAgent)
|
||||
T = TypeVar("T", ToolCallAgent, ReActAgent, OpenAPIReActAgent)
|
||||
|
||||
|
||||
class AgentFactory:
|
||||
"""
|
||||
Returns agent classes based on the provided pattern.
|
||||
"""
|
||||
|
||||
AGENT_PATTERNS = {
|
||||
"react": ReActAgent,
|
||||
"toolcalling": ToolCallAgent,
|
||||
"openapireact": OpenAPIReActAgent
|
||||
"openapireact": OpenAPIReActAgent,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
|
@ -54,7 +56,7 @@ class Agent(AgentBase):
|
|||
llm: Optional[LLMClientBase] = None,
|
||||
memory: Optional[MemoryBase] = None,
|
||||
tools: Optional[List[AgentTool]] = [],
|
||||
**kwargs
|
||||
**kwargs,
|
||||
) -> Union[ToolCallAgent, ReActAgent, OpenAPIReActAgent]:
|
||||
"""
|
||||
Creates and returns an instance of the selected agent class.
|
||||
|
@ -77,11 +79,21 @@ class Agent(AgentBase):
|
|||
memory = memory or ConversationListMemory()
|
||||
|
||||
if pattern == "openapireact":
|
||||
kwargs.update({
|
||||
"spec_parser": kwargs.get('spec_parser', OpenAPISpecParser()),
|
||||
"auth_header": kwargs.get('auth_header', {})
|
||||
})
|
||||
kwargs.update(
|
||||
{
|
||||
"spec_parser": kwargs.get("spec_parser", OpenAPISpecParser()),
|
||||
"auth_header": kwargs.get("auth_header", {}),
|
||||
}
|
||||
)
|
||||
|
||||
instance = super().__new__(agent_class)
|
||||
agent_class.__init__(instance, role=role, name=name, llm=llm, memory=memory, tools=tools, **kwargs)
|
||||
return instance
|
||||
agent_class.__init__(
|
||||
instance,
|
||||
role=role,
|
||||
name=name,
|
||||
llm=llm,
|
||||
memory=memory,
|
||||
tools=tools,
|
||||
**kwargs,
|
||||
)
|
||||
return instance
|
||||
|
|
|
@ -2,6 +2,7 @@ from dapr_agents.types import BaseMessage
|
|||
from typing import List
|
||||
from pydantic import ValidationError
|
||||
|
||||
|
||||
def messages_to_string(messages: List[BaseMessage]) -> str:
|
||||
"""
|
||||
Converts messages into a single string with roles and content.
|
||||
|
@ -36,4 +37,4 @@ def messages_to_string(messages: List[BaseMessage]) -> str:
|
|||
except (ValidationError, ValueError) as e:
|
||||
raise ValueError(f"Invalid message in chat history. Error: {e}")
|
||||
|
||||
return "\n".join(formatted_history)
|
||||
return "\n".join(formatted_history)
|
||||
|
|
|
@ -4,14 +4,15 @@ from colorama import Style
|
|||
|
||||
# Define your custom colors as a dictionary
|
||||
COLORS = {
|
||||
"dapr_agents_teal": '\033[38;2;147;191;183m',
|
||||
"dapr_agents_mustard": '\033[38;2;242;182;128m',
|
||||
"dapr_agents_red": '\033[38;2;217;95;118m',
|
||||
"dapr_agents_pink": '\033[38;2;191;69;126m',
|
||||
"dapr_agents_purple": '\033[38;2;146;94;130m',
|
||||
"reset": Style.RESET_ALL
|
||||
"dapr_agents_teal": "\033[38;2;147;191;183m",
|
||||
"dapr_agents_mustard": "\033[38;2;242;182;128m",
|
||||
"dapr_agents_red": "\033[38;2;217;95;118m",
|
||||
"dapr_agents_pink": "\033[38;2;191;69;126m",
|
||||
"dapr_agents_purple": "\033[38;2;146;94;130m",
|
||||
"reset": Style.RESET_ALL,
|
||||
}
|
||||
|
||||
|
||||
class ColorTextFormatter:
|
||||
"""
|
||||
A flexible text formatter class to print colored text dynamically.
|
||||
|
@ -40,7 +41,7 @@ class ColorTextFormatter:
|
|||
"""
|
||||
color_code = COLORS.get(color, self.default_color)
|
||||
return f"{color_code}{text}{COLORS['reset']}"
|
||||
|
||||
|
||||
def print_colored_text(self, text_blocks: list[tuple[str, Optional[str]]]):
|
||||
"""
|
||||
Print multiple blocks of text in specified colors dynamically, ensuring that newlines
|
||||
|
@ -55,9 +56,9 @@ class ColorTextFormatter:
|
|||
for i, line in enumerate(lines):
|
||||
formatted_line = self.format_text(line, color)
|
||||
print(formatted_line, end="\n" if i < len(lines) - 1 else "")
|
||||
|
||||
print(COLORS['reset']) # Ensure terminal color is reset at the end
|
||||
|
||||
|
||||
print(COLORS["reset"]) # Ensure terminal color is reset at the end
|
||||
|
||||
def print_separator(self):
|
||||
"""
|
||||
Prints a separator line.
|
||||
|
@ -65,13 +66,17 @@ class ColorTextFormatter:
|
|||
separator = "-" * 80
|
||||
self.print_colored_text([(f"\n{separator}\n", "reset")])
|
||||
|
||||
def print_message(self, message: Union[BaseMessage, Dict[str, Any]], include_separator: bool = True):
|
||||
def print_message(
|
||||
self,
|
||||
message: Union[BaseMessage, Dict[str, Any]],
|
||||
include_separator: bool = True,
|
||||
):
|
||||
"""
|
||||
Prints messages with colored formatting based on the role and message content.
|
||||
|
||||
Args:
|
||||
message (Union[BaseMessage, Dict[str, Any]]): The message content, either as a BaseMessage object or
|
||||
a dictionary. If a BaseMessage is provided, it will be
|
||||
message (Union[BaseMessage, Dict[str, Any]]): The message content, either as a BaseMessage object or
|
||||
a dictionary. If a BaseMessage is provided, it will be
|
||||
converted to a dictionary using its `model_dump` method.
|
||||
include_separator (bool): Whether to include a separator line after the message. Defaults to True.
|
||||
"""
|
||||
|
@ -86,14 +91,14 @@ class ColorTextFormatter:
|
|||
formatted_role = f"{name}({role})" if name else role
|
||||
|
||||
content = message.get("content", "")
|
||||
|
||||
|
||||
color_map = {
|
||||
"user": "dapr_agents_mustard",
|
||||
"assistant": "dapr_agents_teal",
|
||||
"tool_calls": "dapr_agents_red",
|
||||
"tool": "dapr_agents_pink"
|
||||
"tool": "dapr_agents_pink",
|
||||
}
|
||||
|
||||
|
||||
# Handle tool calls
|
||||
if "tool_calls" in message and message["tool_calls"]:
|
||||
tool_calls = message["tool_calls"]
|
||||
|
@ -103,13 +108,16 @@ class ColorTextFormatter:
|
|||
tool_id = tool_call["id"]
|
||||
tool_call_text = [
|
||||
(f"{formatted_role}:\n", color_map["tool_calls"]),
|
||||
(f"Function name: {function_name} (Call Id: {tool_id})\n", color_map["tool_calls"]),
|
||||
(
|
||||
f"Function name: {function_name} (Call Id: {tool_id})\n",
|
||||
color_map["tool_calls"],
|
||||
),
|
||||
(f"Arguments: {arguments}", color_map["tool_calls"]),
|
||||
]
|
||||
self.print_colored_text(tool_call_text)
|
||||
if include_separator:
|
||||
self.print_separator()
|
||||
|
||||
|
||||
elif role == "tool":
|
||||
# Handle tool messages
|
||||
tool_call_id = message.get("tool_call_id", "Unknown")
|
||||
|
@ -130,7 +138,7 @@ class ColorTextFormatter:
|
|||
self.print_colored_text(regular_message_text)
|
||||
if include_separator:
|
||||
self.print_separator()
|
||||
|
||||
|
||||
def print_react_part(self, part_type: str, content: str):
|
||||
"""
|
||||
Prints a part of the ReAct loop (Thought, Action, Observation) with the corresponding color.
|
||||
|
@ -142,11 +150,11 @@ class ColorTextFormatter:
|
|||
color_map = {
|
||||
"Thought": "dapr_agents_red",
|
||||
"Action": "dapr_agents_pink",
|
||||
"Observation": "dapr_agents_purple"
|
||||
"Observation": "dapr_agents_purple",
|
||||
}
|
||||
|
||||
# Get the color for the part type, defaulting to reset if not found
|
||||
color = color_map.get(part_type, "reset")
|
||||
|
||||
# Print the part with the specified color
|
||||
self.print_colored_text([(f"{part_type}: {content}", color)])
|
||||
self.print_colored_text([(f"{part_type}: {content}", color)])
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from .fetcher import ArxivFetcher
|
||||
from .reader import PyMuPDFReader, PyPDFReader
|
||||
from .splitter import TextSplitter
|
||||
from .embedder import OpenAIEmbedder, SentenceTransformerEmbedder, NVIDIAEmbedder
|
||||
from .embedder import OpenAIEmbedder, SentenceTransformerEmbedder, NVIDIAEmbedder
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
from .openai import OpenAIEmbedder
|
||||
from .sentence import SentenceTransformerEmbedder
|
||||
from .nvidia import NVIDIAEmbedder
|
||||
from .nvidia import NVIDIAEmbedder
|
||||
|
|
|
@ -2,6 +2,7 @@ from abc import ABC, abstractmethod
|
|||
from pydantic import BaseModel
|
||||
from typing import List, Any
|
||||
|
||||
|
||||
class EmbedderBase(BaseModel, ABC):
|
||||
"""
|
||||
Abstract base class for Embedders.
|
||||
|
@ -19,4 +20,4 @@ class EmbedderBase(BaseModel, ABC):
|
|||
Returns:
|
||||
List[Any]: A list of results.
|
||||
"""
|
||||
pass
|
||||
pass
|
||||
|
|
|
@ -7,6 +7,7 @@ import logging
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NVIDIAEmbedder(NVIDIAEmbeddingClient, EmbedderBase):
|
||||
"""
|
||||
NVIDIA-based embedder for generating text embeddings with support for indexing (passage) and querying.
|
||||
|
@ -17,10 +18,16 @@ class NVIDIAEmbedder(NVIDIAEmbeddingClient, EmbedderBase):
|
|||
normalize (bool): Whether to normalize embeddings. Defaults to True.
|
||||
"""
|
||||
|
||||
chunk_size: int = Field(default=1000, description="Batch size for embedding requests.")
|
||||
normalize: bool = Field(default=True, description="Whether to normalize embeddings.")
|
||||
chunk_size: int = Field(
|
||||
default=1000, description="Batch size for embedding requests."
|
||||
)
|
||||
normalize: bool = Field(
|
||||
default=True, description="Whether to normalize embeddings."
|
||||
)
|
||||
|
||||
def embed(self, input: Union[str, List[str]]) -> Union[List[float], List[List[float]]]:
|
||||
def embed(
|
||||
self, input: Union[str, List[str]]
|
||||
) -> Union[List[float], List[List[float]]]:
|
||||
"""
|
||||
Embeds input text(s) for indexing with default input_type set to 'passage'.
|
||||
|
||||
|
@ -37,7 +44,9 @@ class NVIDIAEmbedder(NVIDIAEmbeddingClient, EmbedderBase):
|
|||
"""
|
||||
return self._generate_embeddings(input, input_type="passage")
|
||||
|
||||
def embed_query(self, input: Union[str, List[str]]) -> Union[List[float], List[List[float]]]:
|
||||
def embed_query(
|
||||
self, input: Union[str, List[str]]
|
||||
) -> Union[List[float], List[List[float]]]:
|
||||
"""
|
||||
Embeds input text(s) for querying with input_type set to 'query'.
|
||||
|
||||
|
@ -54,7 +63,9 @@ class NVIDIAEmbedder(NVIDIAEmbeddingClient, EmbedderBase):
|
|||
"""
|
||||
return self._generate_embeddings(input, input_type="query")
|
||||
|
||||
def _generate_embeddings(self, input: Union[str, List[str]], input_type: str) -> Union[List[float], List[List[float]]]:
|
||||
def _generate_embeddings(
|
||||
self, input: Union[str, List[str]], input_type: str
|
||||
) -> Union[List[float], List[List[float]]]:
|
||||
"""
|
||||
Helper function to generate embeddings for given input text(s) with specified input_type.
|
||||
|
||||
|
@ -75,14 +86,15 @@ class NVIDIAEmbedder(NVIDIAEmbeddingClient, EmbedderBase):
|
|||
# Process input in chunks for efficiency
|
||||
chunk_embeddings = []
|
||||
for i in range(0, len(input_list), self.chunk_size):
|
||||
batch = input_list[i:i + self.chunk_size]
|
||||
batch = input_list[i : i + self.chunk_size]
|
||||
response = self.create_embedding(input=batch, input_type=input_type)
|
||||
chunk_embeddings.extend(r.embedding for r in response.data)
|
||||
|
||||
# Normalize embeddings if required
|
||||
if self.normalize:
|
||||
normalized_embeddings = [
|
||||
(embedding / np.linalg.norm(embedding)).tolist() for embedding in chunk_embeddings
|
||||
(embedding / np.linalg.norm(embedding)).tolist()
|
||||
for embedding in chunk_embeddings
|
||||
]
|
||||
else:
|
||||
normalized_embeddings = chunk_embeddings
|
||||
|
@ -90,7 +102,9 @@ class NVIDIAEmbedder(NVIDIAEmbeddingClient, EmbedderBase):
|
|||
# Return a single embedding if the input was a single string; otherwise, return a list
|
||||
return normalized_embeddings[0] if single_input else normalized_embeddings
|
||||
|
||||
def __call__(self, input: Union[str, List[str]], query: bool = False) -> Union[List[float], List[List[float]]]:
|
||||
def __call__(
|
||||
self, input: Union[str, List[str]], query: bool = False
|
||||
) -> Union[List[float], List[List[float]]]:
|
||||
"""
|
||||
Allows the instance to be called directly to embed text(s).
|
||||
|
||||
|
@ -103,4 +117,4 @@ class NVIDIAEmbedder(NVIDIAEmbeddingClient, EmbedderBase):
|
|||
"""
|
||||
if query:
|
||||
return self.embed_query(input)
|
||||
return self.embed(input)
|
||||
return self.embed(input)
|
||||
|
|
|
@ -7,20 +7,31 @@ import logging
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OpenAIEmbedder(OpenAIEmbeddingClient, EmbedderBase):
|
||||
"""
|
||||
OpenAI-based embedder for generating text embeddings with handling for long inputs.
|
||||
Inherits functionality from OpenAIEmbeddingClient for API interactions.
|
||||
"""
|
||||
|
||||
max_tokens: int = Field(default=8191, description="Maximum tokens allowed per input.")
|
||||
chunk_size: int = Field(default=1000, description="Batch size for embedding requests.")
|
||||
normalize: bool = Field(default=True, description="Whether to normalize embeddings.")
|
||||
encoding_name: Optional[str] = Field(default=None, description="Token encoding name (if provided).")
|
||||
encoder: Optional[Any] = Field(default=None, init=False, description="TikToken Encoder")
|
||||
max_tokens: int = Field(
|
||||
default=8191, description="Maximum tokens allowed per input."
|
||||
)
|
||||
chunk_size: int = Field(
|
||||
default=1000, description="Batch size for embedding requests."
|
||||
)
|
||||
normalize: bool = Field(
|
||||
default=True, description="Whether to normalize embeddings."
|
||||
)
|
||||
encoding_name: Optional[str] = Field(
|
||||
default=None, description="Token encoding name (if provided)."
|
||||
)
|
||||
encoder: Optional[Any] = Field(
|
||||
default=None, init=False, description="TikToken Encoder"
|
||||
)
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
|
||||
def model_post_init(self, __context: Any) -> None:
|
||||
"""
|
||||
Initialize attributes after model validation.
|
||||
|
@ -59,9 +70,13 @@ class OpenAIEmbedder(OpenAIEmbeddingClient, EmbedderBase):
|
|||
|
||||
def _chunk_tokens(self, tokens: List[int], chunk_length: int) -> List[List[int]]:
|
||||
"""Splits tokens into chunks of the specified length."""
|
||||
return [tokens[i:i + chunk_length] for i in range(0, len(tokens), chunk_length)]
|
||||
return [
|
||||
tokens[i : i + chunk_length] for i in range(0, len(tokens), chunk_length)
|
||||
]
|
||||
|
||||
def _process_embeddings(self, embeddings: List[List[float]], weights: List[int]) -> List[float]:
|
||||
def _process_embeddings(
|
||||
self, embeddings: List[List[float]], weights: List[int]
|
||||
) -> List[float]:
|
||||
"""Combines embeddings using weighted averaging."""
|
||||
weighted_avg = np.average(embeddings, axis=0, weights=weights)
|
||||
if self.normalize:
|
||||
|
@ -69,7 +84,9 @@ class OpenAIEmbedder(OpenAIEmbeddingClient, EmbedderBase):
|
|||
return (weighted_avg / norm).tolist()
|
||||
return weighted_avg.tolist()
|
||||
|
||||
def embed(self, input: Union[str, List[str]]) -> Union[List[float], List[List[float]]]:
|
||||
def embed(
|
||||
self, input: Union[str, List[str]]
|
||||
) -> Union[List[float], List[List[float]]]:
|
||||
"""
|
||||
Embeds input text(s) with support for both single and multiple inputs, handling long texts via chunking and batching.
|
||||
|
||||
|
@ -116,7 +133,7 @@ class OpenAIEmbedder(OpenAIEmbeddingClient, EmbedderBase):
|
|||
chunk_embeddings = [] # Holds embeddings for all chunks
|
||||
|
||||
for i in range(0, len(chunks), batch_size):
|
||||
batch = chunks[i:i + batch_size]
|
||||
batch = chunks[i : i + batch_size]
|
||||
response = self.create_embedding(input=batch) # Batch API call
|
||||
chunk_embeddings.extend(r.embedding for r in response.data)
|
||||
|
||||
|
@ -133,19 +150,23 @@ class OpenAIEmbedder(OpenAIEmbeddingClient, EmbedderBase):
|
|||
results.append(embeddings[0])
|
||||
else:
|
||||
# Combine chunk embeddings using weighted averaging
|
||||
weights = [len(chunk) for chunk in self._chunk_tokens(tokens, self.max_tokens)]
|
||||
weights = [
|
||||
len(chunk) for chunk in self._chunk_tokens(tokens, self.max_tokens)
|
||||
]
|
||||
results.append(self._process_embeddings(embeddings, weights))
|
||||
|
||||
# Return a single embedding if the input was a single string; otherwise, return a list
|
||||
return results[0] if single_input else results
|
||||
|
||||
def __call__(self, input: Union[str, List[str]]) -> Union[List[float], List[List[float]]]:
|
||||
|
||||
def __call__(
|
||||
self, input: Union[str, List[str]]
|
||||
) -> Union[List[float], List[List[float]]]:
|
||||
"""
|
||||
Allows the instance to be called directly to embed text(s).
|
||||
|
||||
Args:
|
||||
input (Union[str, List[str]]): The input text(s) to embed.
|
||||
|
||||
|
||||
Returns:
|
||||
Union[List[float], List[List[float]]]: Embedding vector(s) for the input(s).
|
||||
"""
|
||||
|
|
|
@ -6,19 +6,33 @@ import os
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SentenceTransformerEmbedder(EmbedderBase):
|
||||
"""
|
||||
SentenceTransformer-based embedder for generating text embeddings.
|
||||
Supports multi-process encoding for large datasets.
|
||||
"""
|
||||
|
||||
model: str = Field(default="all-MiniLM-L6-v2", description="Name of the SentenceTransformer model to use.")
|
||||
device: Literal["cpu", "cuda", "mps", "npu"] = Field(default="cpu", description="Device for computation.")
|
||||
normalize_embeddings: bool = Field(default=False, description="Whether to normalize embeddings.")
|
||||
multi_process: bool = Field(default=False, description="Whether to use multi-process encoding.")
|
||||
cache_dir: Optional[str] = Field(default=None, description="Directory to cache or load the model.")
|
||||
|
||||
client: Optional[Any] = Field(default=None, init=False, description="Loaded SentenceTransformer model.")
|
||||
model: str = Field(
|
||||
default="all-MiniLM-L6-v2",
|
||||
description="Name of the SentenceTransformer model to use.",
|
||||
)
|
||||
device: Literal["cpu", "cuda", "mps", "npu"] = Field(
|
||||
default="cpu", description="Device for computation."
|
||||
)
|
||||
normalize_embeddings: bool = Field(
|
||||
default=False, description="Whether to normalize embeddings."
|
||||
)
|
||||
multi_process: bool = Field(
|
||||
default=False, description="Whether to use multi-process encoding."
|
||||
)
|
||||
cache_dir: Optional[str] = Field(
|
||||
default=None, description="Directory to cache or load the model."
|
||||
)
|
||||
|
||||
client: Optional[Any] = Field(
|
||||
default=None, init=False, description="Loaded SentenceTransformer model."
|
||||
)
|
||||
|
||||
def model_post_init(self, __context: Any) -> None:
|
||||
"""
|
||||
|
@ -35,26 +49,40 @@ class SentenceTransformerEmbedder(EmbedderBase):
|
|||
)
|
||||
|
||||
# Determine whether to load from cache or download
|
||||
model_path = self.cache_dir if self.cache_dir and os.path.exists(self.cache_dir) else self.model
|
||||
model_path = (
|
||||
self.cache_dir
|
||||
if self.cache_dir and os.path.exists(self.cache_dir)
|
||||
else self.model
|
||||
)
|
||||
# Attempt to load the model
|
||||
try:
|
||||
if os.path.exists(model_path):
|
||||
logger.info(f"Loading SentenceTransformer model from local path: {model_path}")
|
||||
logger.info(
|
||||
f"Loading SentenceTransformer model from local path: {model_path}"
|
||||
)
|
||||
else:
|
||||
logger.info(f"Downloading SentenceTransformer model: {self.model}")
|
||||
if self.cache_dir:
|
||||
logger.info(f"Model will be cached to: {self.cache_dir}")
|
||||
self.client: SentenceTransformer = SentenceTransformer(model_name_or_path=model_path, device=self.device)
|
||||
self.client: SentenceTransformer = SentenceTransformer(
|
||||
model_name_or_path=model_path, device=self.device
|
||||
)
|
||||
logger.info("Model loaded successfully.")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load SentenceTransformer model: {e}")
|
||||
raise
|
||||
# Save to cache directory if downloaded
|
||||
if model_path == self.model and self.cache_dir and not os.path.exists(self.cache_dir):
|
||||
if (
|
||||
model_path == self.model
|
||||
and self.cache_dir
|
||||
and not os.path.exists(self.cache_dir)
|
||||
):
|
||||
logger.info(f"Saving the downloaded model to: {self.cache_dir}")
|
||||
self.client.save(self.cache_dir)
|
||||
|
||||
def embed(self, input: Union[str, List[str]]) -> Union[List[float], List[List[float]]]:
|
||||
def embed(
|
||||
self, input: Union[str, List[str]]
|
||||
) -> Union[List[float], List[List[float]]]:
|
||||
"""
|
||||
Generate embeddings for input text(s).
|
||||
|
||||
|
@ -82,7 +110,7 @@ class SentenceTransformerEmbedder(EmbedderBase):
|
|||
embeddings = self.client.encode_multi_process(
|
||||
input_strings,
|
||||
pool=pool,
|
||||
normalize_embeddings=self.normalize_embeddings
|
||||
normalize_embeddings=self.normalize_embeddings,
|
||||
)
|
||||
finally:
|
||||
logger.info("Stopping multi-process pool.")
|
||||
|
@ -91,14 +119,16 @@ class SentenceTransformerEmbedder(EmbedderBase):
|
|||
embeddings = self.client.encode(
|
||||
input_strings,
|
||||
convert_to_numpy=True,
|
||||
normalize_embeddings=self.normalize_embeddings
|
||||
normalize_embeddings=self.normalize_embeddings,
|
||||
)
|
||||
|
||||
if single_input:
|
||||
return embeddings[0].tolist()
|
||||
return embeddings.tolist()
|
||||
|
||||
def __call__(self, input: Union[str, List[str]]) -> Union[List[float], List[List[float]]]:
|
||||
def __call__(
|
||||
self, input: Union[str, List[str]]
|
||||
) -> Union[List[float], List[List[float]]]:
|
||||
"""
|
||||
Allows the instance to be called directly to embed text(s).
|
||||
|
||||
|
@ -108,4 +138,4 @@ class SentenceTransformerEmbedder(EmbedderBase):
|
|||
Returns:
|
||||
Union[List[float], List[List[float]]]: Embedding vector(s) for the input(s).
|
||||
"""
|
||||
return self.embed(input)
|
||||
return self.embed(input)
|
||||
|
|
|
@ -1 +1 @@
|
|||
from .arxiv import ArxivFetcher
|
||||
from .arxiv import ArxivFetcher
|
||||
|
|
|
@ -16,7 +16,7 @@ class ArxivFetcher(FetcherBase):
|
|||
|
||||
max_results: int = 10
|
||||
include_full_metadata: bool = False
|
||||
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
|
@ -25,7 +25,7 @@ class ArxivFetcher(FetcherBase):
|
|||
download: bool = False,
|
||||
dirpath: Path = Path("./"),
|
||||
include_summary: bool = False,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
) -> Union[List[Dict], List["Document"]]:
|
||||
"""
|
||||
Search for papers on arXiv and optionally download them.
|
||||
|
@ -64,12 +64,14 @@ class ArxivFetcher(FetcherBase):
|
|||
"The `arxiv` library is required to use the ArxivFetcher. "
|
||||
"Install it with `pip install arxiv`."
|
||||
)
|
||||
|
||||
|
||||
logger.info(f"Searching for query: {query}")
|
||||
|
||||
# Enforce that both from_date and to_date are provided if one is specified
|
||||
if (from_date and not to_date) or (to_date and not from_date):
|
||||
raise ValueError("Both 'from_date' and 'to_date' must be specified if one is provided.")
|
||||
raise ValueError(
|
||||
"Both 'from_date' and 'to_date' must be specified if one is provided."
|
||||
)
|
||||
|
||||
# Add date filter if both from_date and to_date are provided
|
||||
if from_date and to_date:
|
||||
|
@ -94,7 +96,7 @@ class ArxivFetcher(FetcherBase):
|
|||
content_id: str,
|
||||
download: bool = False,
|
||||
dirpath: Path = Path("./"),
|
||||
include_summary: bool = False
|
||||
include_summary: bool = False,
|
||||
) -> Union[Optional[Dict], Optional[Document]]:
|
||||
"""
|
||||
Search for a specific paper by its arXiv ID and optionally download it.
|
||||
|
@ -124,7 +126,7 @@ class ArxivFetcher(FetcherBase):
|
|||
"The `arxiv` library is required to use the ArxivFetcher. "
|
||||
"Install it with `pip install arxiv`."
|
||||
)
|
||||
|
||||
|
||||
logger.info(f"Searching for paper by ID: {content_id}")
|
||||
try:
|
||||
search = arxiv.Search(id_list=[content_id])
|
||||
|
@ -133,17 +135,15 @@ class ArxivFetcher(FetcherBase):
|
|||
logger.warning(f"No result found for ID: {content_id}")
|
||||
return None
|
||||
|
||||
return self._process_results([result], download, dirpath, include_summary)[0]
|
||||
return self._process_results([result], download, dirpath, include_summary)[
|
||||
0
|
||||
]
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching result for ID {content_id}: {e}")
|
||||
return None
|
||||
|
||||
def _process_results(
|
||||
self,
|
||||
results: List[Any],
|
||||
download: bool,
|
||||
dirpath: Path,
|
||||
include_summary: bool
|
||||
self, results: List[Any], download: bool, dirpath: Path, include_summary: bool
|
||||
) -> Union[List[Dict], List["Document"]]:
|
||||
"""
|
||||
Process arXiv search results.
|
||||
|
@ -162,16 +162,22 @@ class ArxivFetcher(FetcherBase):
|
|||
metadata_list = []
|
||||
for result in results:
|
||||
file_path = self._download_result(result, dirpath)
|
||||
metadata_list.append(self._format_result_metadata(result, file_path=file_path, include_summary=include_summary))
|
||||
metadata_list.append(
|
||||
self._format_result_metadata(
|
||||
result, file_path=file_path, include_summary=include_summary
|
||||
)
|
||||
)
|
||||
return metadata_list
|
||||
else:
|
||||
documents = []
|
||||
for result in results:
|
||||
metadata = self._format_result_metadata(result, include_summary=include_summary)
|
||||
metadata = self._format_result_metadata(
|
||||
result, include_summary=include_summary
|
||||
)
|
||||
text = result.summary.strip()
|
||||
documents.append(Document(text=text, metadata=metadata))
|
||||
return documents
|
||||
|
||||
|
||||
def _download_result(self, result: Any, dirpath: Path) -> Optional[str]:
|
||||
"""
|
||||
Download a paper from an arXiv result object.
|
||||
|
@ -194,7 +200,12 @@ class ArxivFetcher(FetcherBase):
|
|||
logger.error(f"Failed to download paper {result.title}: {e}")
|
||||
return None
|
||||
|
||||
def _format_result_metadata(self, result: Any, file_path: Optional[str] = None, include_summary: bool = False) -> Dict:
|
||||
def _format_result_metadata(
|
||||
self,
|
||||
result: Any,
|
||||
file_path: Optional[str] = None,
|
||||
include_summary: bool = False,
|
||||
) -> Dict:
|
||||
"""
|
||||
Format metadata from an arXiv result, optionally including file path and summary.
|
||||
|
||||
|
@ -219,24 +230,26 @@ class ArxivFetcher(FetcherBase):
|
|||
}
|
||||
|
||||
if self.include_full_metadata:
|
||||
metadata.update({
|
||||
"links": result.links,
|
||||
"authors_comment": result.comment,
|
||||
"DOI": result.doi,
|
||||
"journal_reference": result.journal_ref,
|
||||
})
|
||||
metadata.update(
|
||||
{
|
||||
"links": result.links,
|
||||
"authors_comment": result.comment,
|
||||
"DOI": result.doi,
|
||||
"journal_reference": result.journal_ref,
|
||||
}
|
||||
)
|
||||
|
||||
if include_summary:
|
||||
metadata["summary"] = result.summary.strip()
|
||||
|
||||
|
||||
return {key: value for key, value in metadata.items() if value is not None}
|
||||
|
||||
|
||||
def _format_date(self, date: Union[str, datetime]) -> str:
|
||||
"""
|
||||
Format a date into the 'YYYYMMDDHHMM' format required by the arXiv API.
|
||||
|
||||
Args:
|
||||
date (Union[str, datetime]): The date to format. Can be a string in 'YYYYMMDD' or
|
||||
date (Union[str, datetime]): The date to format. Can be a string in 'YYYYMMDD' or
|
||||
'YYYYMMDDHHMM' format, or a datetime object.
|
||||
|
||||
Returns:
|
||||
|
@ -262,7 +275,9 @@ class ArxivFetcher(FetcherBase):
|
|||
if isinstance(date, str):
|
||||
# Check if the string matches the basic format
|
||||
if not re.fullmatch(r"^\d{8}(\d{4})?$", date):
|
||||
raise ValueError(f"Invalid date format: {date}. Use 'YYYYMMDD' or 'YYYYMMDDHHMM'.")
|
||||
raise ValueError(
|
||||
f"Invalid date format: {date}. Use 'YYYYMMDD' or 'YYYYMMDDHHMM'."
|
||||
)
|
||||
|
||||
# Validate that it is a real date
|
||||
try:
|
||||
|
@ -277,4 +292,6 @@ class ArxivFetcher(FetcherBase):
|
|||
elif isinstance(date, datetime):
|
||||
return date.strftime("%Y%m%d%H%M")
|
||||
else:
|
||||
raise ValueError("Invalid date input. Provide a string in 'YYYYMMDD', 'YYYYMMDDHHMM' format, or a datetime object.")
|
||||
raise ValueError(
|
||||
"Invalid date input. Provide a string in 'YYYYMMDD', 'YYYYMMDDHHMM' format, or a datetime object."
|
||||
)
|
||||
|
|
|
@ -2,6 +2,7 @@ from abc import ABC, abstractmethod
|
|||
from pydantic import BaseModel
|
||||
from typing import List, Any
|
||||
|
||||
|
||||
class FetcherBase(BaseModel, ABC):
|
||||
"""
|
||||
Abstract base class for fetchers.
|
||||
|
@ -19,4 +20,4 @@ class FetcherBase(BaseModel, ABC):
|
|||
Returns:
|
||||
List[Any]: A list of results.
|
||||
"""
|
||||
pass
|
||||
pass
|
||||
|
|
|
@ -1 +1 @@
|
|||
from .pdf import PyMuPDFReader, PyPDFReader
|
||||
from .pdf import PyMuPDFReader, PyPDFReader
|
||||
|
|
|
@ -4,6 +4,7 @@ from pydantic import BaseModel
|
|||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
|
||||
class ReaderBase(BaseModel, ABC):
|
||||
"""
|
||||
Abstract base class for file readers.
|
||||
|
@ -20,4 +21,4 @@ class ReaderBase(BaseModel, ABC):
|
|||
Returns:
|
||||
List[Document]: A list of Document objects.
|
||||
"""
|
||||
pass
|
||||
pass
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue