import json from typing import Any from uuid import UUID import api.context as ctx from api.agent import MayaInversionistas from banortegpt.database.mongo_memory import crud from langfuse.decorators import langfuse_context, observe from pydantic import BaseModel class Response(BaseModel): content: str urls: list[str] @observe(capture_input=False, capture_output=False) async def generate( agent: MayaInversionistas, prompt: str, conversation_id: UUID, system_prompt: str | None = None, ) -> Response: conversation = await crud.get_or_create_conversation( conversation_id, system_prompt or agent.system_prompt ) conversation.add(role="user", content=prompt) response = await agent.generate(conversation.to_openai_format(agent.message_limit)) reference_urls, image_urls = [], [] if call := response.tool_calls: if id := call[0].id: ctx.tool_id.set(id) if name := call[0].function.name: ctx.tool_name.set(name) ctx.tool_buffer.set(call[0].function.arguments) else: ctx.buffer.set(response.content) buffer = ctx.buffer.get() tool_buffer = ctx.tool_buffer.get() tool_id = ctx.tool_id.get() tool_name = ctx.tool_name.get() if tool_id is not None: # Si tool_buffer es un string JSON, lo convertimos a diccionario if isinstance(tool_buffer, str): try: tool_args = json.loads(tool_buffer) except json.JSONDecodeError: tool_args = {"question": tool_buffer} else: tool_args = tool_buffer response, payloads = await agent.tool_map[tool_name](**tool_args) # type: ignore tool_call: dict[str, Any] = agent.llm.build_tool_call( tool_id, tool_name, tool_buffer ) tool_call_id: dict[str, Any] = agent.llm.build_tool_call_id(tool_id) conversation.add("assistant", **tool_call) conversation.add("tool", content=response, **tool_call_id) response = await agent.generate( conversation.to_openai_format(agent.message_limit), {"tools": None} ) ctx.buffer.set(response.content) reference_urls, image_urls = await agent.get_shareable_urls(payloads) # type: ignore buffer = ctx.buffer.get() if buffer is None: raise ValueError("No buffer found") conversation.add_message(role="assistant", content=buffer) langfuse_context.update_current_trace( name=str(conversation_id), session_id=str(conversation_id), input=prompt, output=buffer, ) return Response(content=buffer, urls=reference_urls + image_urls)