mirror of https://github.com/dapr/dapr-agents.git
				
				
				
			Updtate Workflow Task to use Agent instances without a task description
Signed-off-by: Roberto Rodriguez <9653181+Cyb3rWard0g@users.noreply.github.com>
This commit is contained in:
		
							parent
							
								
									201f831b9a
								
							
						
					
					
						commit
						b7e05f5dbc
					
				|  | @ -106,9 +106,19 @@ class WorkflowTask(BaseModel): | ||||||
|         try: |         try: | ||||||
|             executor = self._choose_executor() |             executor = self._choose_executor() | ||||||
|             if executor in ("agent", "llm"): |             if executor in ("agent", "llm"): | ||||||
|                 if not self.description: |                 if executor == "llm" and not self.description: | ||||||
|                     raise ValueError("LLM/agent tasks require a description template") |                     raise ValueError("LLM tasks require a description template") | ||||||
|                 prompt = self.format_description(self.description, data) |                 elif executor == "agent": | ||||||
|  |                     # For agents, prefer string input for natural conversation | ||||||
|  |                     if self.description: | ||||||
|  |                         # Use description template with parameter substitution | ||||||
|  |                         prompt = self.format_description(self.description, data) | ||||||
|  |                     else: | ||||||
|  |                         # Pass string input naturally for direct agent conversation | ||||||
|  |                         prompt = self._format_natural_agent_input(payload, data) | ||||||
|  |                 else: | ||||||
|  |                     # LLM with description | ||||||
|  |                     prompt = self.format_description(self.description, data) | ||||||
|                 raw = await self._run_via_ai(prompt, executor) |                 raw = await self._run_via_ai(prompt, executor) | ||||||
|             else: |             else: | ||||||
|                 raw = await self._run_python(data) |                 raw = await self._run_python(data) | ||||||
|  | @ -154,13 +164,13 @@ class WorkflowTask(BaseModel): | ||||||
|         else: |         else: | ||||||
|             return self.func(**data) |             return self.func(**data) | ||||||
| 
 | 
 | ||||||
|     async def _run_via_ai(self, prompt: str, executor: Literal["agent", "llm"]) -> Any: |     async def _run_via_ai(self, prompt: Any, executor: Literal["agent", "llm"]) -> Any: | ||||||
|         """ |         """ | ||||||
|         Run the prompt through an Agent or LLM. |         Run the prompt through an Agent or LLM. | ||||||
| 
 | 
 | ||||||
|         Args: |         Args: | ||||||
|             prompt: The fully formatted prompt string. |             prompt: The prompt data - string for LLM, string/dict/Any for agent. | ||||||
|             kind: "agent" or "llm". |             executor: "agent" or "llm". | ||||||
| 
 | 
 | ||||||
|         Returns: |         Returns: | ||||||
|             Raw result from the AI path. |             Raw result from the AI path. | ||||||
|  | @ -168,8 +178,12 @@ class WorkflowTask(BaseModel): | ||||||
|         logger.debug(f"Invoking task via {executor.upper()}") |         logger.debug(f"Invoking task via {executor.upper()}") | ||||||
|         logger.debug(f"Invoking task with prompt: {prompt!r}") |         logger.debug(f"Invoking task with prompt: {prompt!r}") | ||||||
|         if executor == "agent": |         if executor == "agent": | ||||||
|  |             # Agents can handle string, dict, or other input types | ||||||
|             result = await self.agent.run(prompt) |             result = await self.agent.run(prompt) | ||||||
|         else: |         else: | ||||||
|  |             # LLM expects a string prompt | ||||||
|  |             if not isinstance(prompt, str): | ||||||
|  |                 raise ValueError(f"LLM executor requires string prompt, got {type(prompt)}") | ||||||
|             result = await self._invoke_llm(prompt) |             result = await self._invoke_llm(prompt) | ||||||
|         return self._convert_result(result) |         return self._convert_result(result) | ||||||
| 
 | 
 | ||||||
|  | @ -227,8 +241,9 @@ class WorkflowTask(BaseModel): | ||||||
|             return vars(raw_input) |             return vars(raw_input) | ||||||
|         if not isinstance(raw_input, dict): |         if not isinstance(raw_input, dict): | ||||||
|             # wrap single argument |             # wrap single argument | ||||||
|             if not self.signature: |             if not self.signature or len(self.signature.parameters) == 0: | ||||||
|                 raise ValueError("Cannot infer param name without signature") |                 # No signature or no parameters - return empty dict for consistency | ||||||
|  |                 return {} | ||||||
|             name = next(iter(self.signature.parameters)) |             name = next(iter(self.signature.parameters)) | ||||||
|             return {name: raw_input} |             return {name: raw_input} | ||||||
|         return raw_input |         return raw_input | ||||||
|  | @ -310,6 +325,41 @@ class WorkflowTask(BaseModel): | ||||||
|             return template.format(**bound.arguments) |             return template.format(**bound.arguments) | ||||||
|         return template.format(**data) |         return template.format(**data) | ||||||
| 
 | 
 | ||||||
|  |     def _format_natural_agent_input(self, payload: Any, data: dict) -> str: | ||||||
|  |         """ | ||||||
|  |         Format input for natural agent conversation. | ||||||
|  |         Favors string input over dictionary for better agent interaction. | ||||||
|  |          | ||||||
|  |         Args: | ||||||
|  |             payload: The original raw payload from the workflow | ||||||
|  |             data: The normalized dictionary version | ||||||
|  |              | ||||||
|  |         Returns: | ||||||
|  |             String input for natural agent conversation | ||||||
|  |         """ | ||||||
|  |         if payload is None: | ||||||
|  |             return "" | ||||||
|  |              | ||||||
|  |         # If payload is already a simple string/number, use it directly | ||||||
|  |         if isinstance(payload, (str, int, float, bool)): | ||||||
|  |             return str(payload) | ||||||
|  |              | ||||||
|  |         # If we have function parameters, format them naturally | ||||||
|  |         if data and len(data) == 1: | ||||||
|  |             # Single parameter: extract the value | ||||||
|  |             value = next(iter(data.values())) | ||||||
|  |             return str(value) if value is not None else "" | ||||||
|  |         elif data: | ||||||
|  |             # Multiple parameters: format as natural text | ||||||
|  |             parts = [] | ||||||
|  |             for key, value in data.items(): | ||||||
|  |                 if value is not None: | ||||||
|  |                     parts.append(f"{key}: {value}") | ||||||
|  |             return "\n".join(parts) | ||||||
|  |         else: | ||||||
|  |             # Fallback to string representation of payload | ||||||
|  |             return str(payload) | ||||||
|  | 
 | ||||||
| 
 | 
 | ||||||
| class TaskWrapper: | class TaskWrapper: | ||||||
|     """ |     """ | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue