This commit is contained in:
Rogelio
2025-10-13 18:16:25 +00:00
parent 739f087cef
commit 325f1ef439
415 changed files with 46870 additions and 0 deletions

View File

@@ -0,0 +1,18 @@
module.exports = {
root: true,
env: { browser: true, es2020: true },
extends: [
'eslint:recommended',
'plugin:@typescript-eslint/recommended',
'plugin:react-hooks/recommended',
],
ignorePatterns: ['dist', '.eslintrc.cjs'],
parser: '@typescript-eslint/parser',
plugins: ['react-refresh'],
rules: {
'react-refresh/only-export-components': [
'warn',
{ allowConstantExport: true },
],
},
}

View File

View File

@@ -0,0 +1,3 @@
from .main import Agent
__all__ = ["Agent"]

View File

@@ -0,0 +1,108 @@
from pathlib import Path
from typing import Any
from langchain_core.messages import AIMessageChunk
from pydantic import BaseModel, Field
from langchain_azure_ai.chat_models import AzureAIChatCompletionsModel
from langchain_azure_ai.embeddings import AzureAIEmbeddingsModel
from banortegpt.vector.qdrant import AsyncQdrant
from api import context
from api.config import config
parent = Path(__file__).parent
SYSTEM_PROMPT = (parent / "system_prompt.md").read_text()
AZURE_AI_URI = "https://eastus2.api.cognitive.microsoft.com"
class get_information(BaseModel):
"""Search a private repository for information."""
question: str = Field(..., description="The user question")
class Agent:
system_prompt = SYSTEM_PROMPT
generation_config = {
"temperature": config.model_temperature,
}
embedding_model = config.embedding_model
message_limit = config.message_limit
index = config.vector_index
limit = config.search_limit
search = AsyncQdrant.from_config(config)
llm = AzureAIChatCompletionsModel(
endpoint=f"{AZURE_AI_URI}/openai/deployments/{config.model}",
credential=config.openai_api_key,
).bind_tools([get_information])
embedder = AzureAIEmbeddingsModel(
endpoint=f"{AZURE_AI_URI}/openai/deployments/{config.embedding_model}",
credential=config.openai_api_key,
)
def __init__(self) -> None:
self.tool_map = {
"get_information": self.get_information
}
def build_response(self, payloads, fallback):
template = "<FAQ {index}>\n\n{content}\n\n</FAQ {index}>"
filled_templates = [
template.format(index=idx, content=payload["content"])
for idx, payload in enumerate(payloads)
]
filled_templates.append(f"<FALLBACK>\n{fallback}\n</FALLBACK>")
return "\n".join(filled_templates)
async def get_information(self, question: str):
embedding = await self.embedder.aembed_query(question)
payloads = await self.search.semantic_search(
embedding=embedding,
collection=self.index,
limit=self.limit,
)
fallback_messages = {}
images = []
for idx, payload in enumerate(payloads):
fallback_message = payload.get("fallback_message", "None")
fallback_messages[fallback_message] = fallback_messages.get(fallback_message, 0) + 1
# Solo extraer imágenes del primer payload
if idx == 0 and "images" in payload:
images.extend(payload["images"])
fallback = max(fallback_messages, key=fallback_messages.get) # type: ignore
response = self.build_response(payloads, fallback)
return str(response), images[:3] # Limitar a 3 imágenes máximo
def _generation_config_overwrite(self, overwrites: dict | None) -> dict[str, Any]:
if not overwrites:
return self.generation_config.copy()
return {**self.generation_config, **overwrites}
async def stream(self, history, overwrites: dict | None = None):
generation_config = self._generation_config_overwrite(overwrites)
async for delta in self.llm.astream(input=history, **generation_config):
assert isinstance(delta, AIMessageChunk)
if call := delta.tool_call_chunks:
if tool_id := call[0].get("id"):
context.tool_id.set(tool_id)
if name := call[0].get("name"):
context.tool_name.set(name)
if args := call[0].get("args"):
context.tool_buffer.set(context.tool_buffer.get() + args)
elif delta.content:
assert isinstance(delta.content, str)
context.buffer.set(context.buffer.get() + delta.content)
yield delta.content
async def generate(self, history, overwrites: dict | None = None):
generation_config = self._generation_config_overwrite(overwrites)
return await self.llm.ainvoke(input=history, **generation_config)

View File

@@ -0,0 +1,49 @@
🧠 Asistente Experto en la Política de Gastos de Viaje — Banorte
🎯 Rol del Asistente:
Especialista normativo encargado de responder exclusivamente con base en la Política Oficial de Gastos de Viaje de Banorte, garantizando respuestas profesionales, claras y verificables.
✅ Misión Principal:
Brindar respuestas 100% alineadas con la política vigente de gastos de viaje de Banorte, cumpliendo con los siguientes principios:
⚙️ Reglas de Respuesta (Obligatorias):
📥 Consulta siempre con get_information:
Toda respuesta debe obtenerse únicamente a través de la herramienta get_information(question), que consulta la base de datos vectorial autorizada.
Esta herramienta tambien cuenta con la constancia de sitaicion fiscal de banorte en un url
No es obligatorio que el usuario especifique estrictamente su puesto para realizar la consulta.
Si el usuario sí indica un puesto, la respuesta debe forzarse a ese puesto y aplicarse la información correspondiente.
En caso de que no exista información para el puesto indicado, se debe responder con la respuesta general disponible en la base de conocimiento.
❗ Nunca inventar ni responder sin antes consultar esta fuente.
Si la herramienta no devuelve información relevante, indicar que la política no contempla esa situación.
📚 Fuente única y oficial:
Las respuestas deben estar basadas únicamente en la política oficial de Banorte.
❌ Prohibido usar Google, foros, suposiciones o contenido externo.
✅ Si get_information devuelve un enlace oficial o documento, debe incluirse con el ícono:
🔗 [Ver política oficial].
📐 Formato estructurado y profesional:
Utilizar un formato claro y fácil de leer:
• Viñetas para listar pasos, excepciones o montos autorizados
• Negritas para resaltar conceptos clave
• Separación clara entre secciones
🔒 Cero invención o interpretación libre:
Si una pregunta no está contemplada en la política, responder claramente:
❗ La política oficial no proporciona lineamientos específicos sobre este caso.
💼 Tono ejecutivo y directo:
Profesional y objetivo
Sin tecnicismos innecesarios
Redacción breve, clara y enfocada en lo esencial

View File

@@ -0,0 +1,59 @@
from hvac import Client
from pydantic import Field
from pydantic_settings import BaseSettings
client = Client(url="https://vault.ia-innovacion.work")
if not client.is_authenticated():
raise Exception("Vault authentication failed")
secret_map = client.secrets.kv.v2.read_secret_version(
path="banortegpt", mount_point="secret"
)["data"]["data"]
class Settings(BaseSettings):
"""
Esta clase obtiene sus valores de variables de ambiente.
Si no estan en el ambiente, los jala de nuestra Vault.
"""
# Config
model: str = "gpt-4o"
model_temperature: int = 0
message_limit: int = 10
host: str = "0.0.0.0"
port: int = 8000
vector_index: str = "chat-egresos-3"
search_limit: int = 3
embedding_model: str = "text-embedding-3-large"
# API Keys
azure_endpoint: str = Field(default_factory=lambda: secret_map["azure_endpoint"])
openai_api_key: str = Field(default_factory=lambda: secret_map["openai_api_key"])
openai_api_version: str = Field(
default_factory=lambda: secret_map["openai_api_version"]
)
mongodb_url: str = Field(
default_factory=lambda: secret_map["cosmosdb_connection_string"]
)
qdrant_url: str = Field(default_factory=lambda: secret_map["qdrant_api_url"])
qdrant_api_key: str | None = Field(
default_factory=lambda: secret_map["qdrant_api_key"]
)
async def init_mongo_db(self):
"""Este helper inicia la conexion enter el MongoDB ORM y nuestra instancia"""
from beanie import init_beanie
from motor.motor_asyncio import AsyncIOMotorClient
from banortegpt.database.mongo_memory.models import Conversation
await init_beanie(
database=AsyncIOMotorClient(self.mongodb_url).banortegptdos,
document_models=[Conversation],
)
config = Settings()

View File

@@ -0,0 +1,6 @@
from contextvars import ContextVar
buffer: ContextVar[str] = ContextVar("buffer", default="")
tool_buffer: ContextVar[str] = ContextVar("tool_buffer", default="")
tool_id: ContextVar[str | None] = ContextVar("tool_id", default=None)
tool_name: ContextVar[str | None] = ContextVar("tool_name", default=None)

View File

@@ -0,0 +1,112 @@
import uuid
import time
from contextlib import asynccontextmanager
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from langfuse import Langfuse
from api import services
from api.agent import Agent
from api.config import config
# Configurar Langfuse
langfuse = Langfuse(
public_key="pk-lf-49cb04b3-0c7d-475b-8105-ad8b8749ecdd",
secret_key="sk-lf-e02fa322-c709-4d80-bef2-9cb279846a0c",
host="https://ailogger.azurewebsites.net"
)
@asynccontextmanager
async def lifespan(_: FastAPI):
await config.init_mongo_db()
yield
app = FastAPI(lifespan=lifespan)
agent = Agent()
@app.post("/api/v1/conversation")
async def create_conversation():
conversation_id = uuid.uuid4()
await services.create_conversation(conversation_id, agent.system_prompt)
return {"conversation_id": conversation_id}
class Message(BaseModel):
conversation_id: uuid.UUID
prompt: str
@app.post("/api/v1/message")
async def send(message: Message):
# Crear trace principal
trace = langfuse.trace(
name="chat_message",
session_id=str(message.conversation_id),
input={
"prompt": message.prompt,
"conversation_id": str(message.conversation_id)
}
)
def b64_sse(func):
async def wrapper(*args, **kwargs):
response_parts = []
start_time = time.time()
async for chunk in func(*args, **kwargs):
if chunk.type == "text" and chunk.content:
response_parts.append(str(chunk.content))
content = chunk.model_dump_json()
data = f"data: {content}\n\n"
yield data
end_time = time.time()
latency_ms = round((end_time - start_time) * 1000)
full_response = "".join(response_parts)
input_tokens = len(message.prompt.split()) * 1.3
output_tokens = len(full_response.split()) * 1.3
total_tokens = int(input_tokens + output_tokens)
cost_per_1k_input = 0.03
cost_per_1k_output = 0.06
total_cost = (input_tokens/1000 * cost_per_1k_input) + (output_tokens/1000 * cost_per_1k_output)
trace.update(
output={"response": full_response},
usage={
"input": int(input_tokens),
"output": int(output_tokens),
"total": total_tokens,
"unit": "TOKENS"
}
)
langfuse.score(
trace_id=trace.id,
name="latency",
value=latency_ms,
comment=f"Response time: {latency_ms}ms"
)
langfuse.score(
trace_id=trace.id,
name="cost",
value=round(total_cost, 4),
comment=f"Estimated cost: ${round(total_cost, 4)}"
)
return wrapper
sse_stream = b64_sse(services.stream)
generator = sse_stream(agent, message.prompt, message.conversation_id)
return StreamingResponse(generator, media_type="text/event-stream")

View File

@@ -0,0 +1,8 @@
from banortegpt.database.mongo_memory.crud import create_conversation
from .stream_response import stream
__all__ = [
"stream",
"create_conversation",
]

View File

@@ -0,0 +1,86 @@
import json
from enum import StrEnum
from typing import TypeAlias
from uuid import UUID
from pydantic import BaseModel
import api.context as ctx
from api.agent import Agent
from banortegpt.database.mongo_memory import crud
class ChunkType(StrEnum):
START = "start"
TEXT = "text"
REFERENCE = "reference"
IMAGE = "image"
TOOL = "tool"
END = "end"
ERROR = "error"
ContentType: TypeAlias = str | int
class ResponseChunk(BaseModel):
type: ChunkType
content: ContentType | list[ContentType] | None
images: list[str] | None = None # Nuevo campo para imágenes
async def stream(agent: Agent, prompt: str, conversation_id: UUID):
yield ResponseChunk(type=ChunkType.START, content="")
conversation = await crud.get_conversation(conversation_id)
if conversation is None:
raise ValueError("Conversation not found")
conversation.add(role="user", content=prompt)
history = conversation.to_openai_format(agent.message_limit, langchain_compat=True)
async for content in agent.stream(history):
yield ResponseChunk(type=ChunkType.TEXT, content=content)
if (tool_id := ctx.tool_id.get()) is not None:
tool_buffer = ctx.tool_buffer.get()
assert tool_buffer is not None
tool_name = ctx.tool_name.get()
assert tool_name is not None
yield ResponseChunk(type=ChunkType.TOOL, content=None)
buffer_dict = json.loads(tool_buffer)
result, images = await agent.tool_map[tool_name](**buffer_dict)
# Enviar imágenes si existen
if images:
yield ResponseChunk(type=ChunkType.IMAGE, content=images)
conversation.add(
role="assistant",
tool_calls=[
{
"id": tool_id,
"type": "function",
"function": {
"name": tool_name,
"arguments": tool_buffer,
},
}
],
)
conversation.add(role="tool", content=result, tool_call_id=tool_id)
history = conversation.to_openai_format(agent.message_limit, langchain_compat=True)
async for content in agent.stream(history, {"tools": None}):
yield ResponseChunk(type=ChunkType.TEXT, content=content)
conversation.add(role="assistant", content=ctx.buffer.get())
await conversation.replace()
yield ResponseChunk(type=ChunkType.END, content="")

View File

@@ -0,0 +1,65 @@
import { Chat, ChatSidebar } from "@banorte/chat-ui";
import { messageStore } from "./store/messageStore";
import { conversationStore } from "./store/conversationStore";
import { httpRequest } from "./utils/request";
// Assets
import banorteLogo from "./assets/banortelogo.png";
import sidebarMaya from "./assets/sidebar_maya_contigo.png";
import brujulaElipse from "./assets/brujula_elipse.png";
import sendIcon from "./assets/chat_maya_boton_enviar.png";
import userAvatar from "./assets/chat_maya_default_avatar.png";
import botAvatar from "./assets/brujula.png";
function App() {
const { messages, pushMessage } = messageStore();
const {
conversationId,
setConversationId,
setAssistantName,
receivingMsg,
setReceivingMsg
} = conversationStore();
const handleStartConversation = async (user: string, assistant: string): Promise<string> => {
const response = await httpRequest("POST", "/v1/conversation", { user, assistant });
console.log("Conversation id:", response.conversation_id);
return response.conversation_id;
};
const handleFeedback = async (key: string, rating: string): Promise<void> => {
await httpRequest("POST", "/v1/feedback", { key, rating });
};
const assistant = "Maya" + "ChatEgresos";
return (
<div className="w-screen flex flex-col h-screen min-h-screen scrollbar-none">
<div className="w-full flex">
<ChatSidebar
assistant={assistant}
logoSrc={banorteLogo}
sidebarImageSrc={sidebarMaya}
assistantAvatarSrc={brujulaElipse}
/>
<Chat
assistant={assistant}
messages={messages}
pushMessage={pushMessage}
conversationId={conversationId}
setConversationId={setConversationId}
setAssistantName={setAssistantName}
receivingMsg={receivingMsg}
setReceivingMsg={setReceivingMsg}
onStartConversation={handleStartConversation}
sendIcon={sendIcon}
userAvatar={userAvatar}
botAvatar={botAvatar}
onFeedback={handleFeedback}
/>
</div>
</div>
);
}
export default App;

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

View File

@@ -0,0 +1,16 @@
@tailwind base;
@tailwind components;
@tailwind utilities;
.markdown a {
color: #0000FF;
text-decoration: underline;
}
.markdown a:hover {
color: #FF0000;
}
.markdown a:visited {
color: #800080;
}

View File

@@ -0,0 +1,5 @@
import ReactDOM from "react-dom/client";
import App from "./App.tsx";
import "./index.css";
ReactDOM.createRoot(document.getElementById("root")!).render(<App />);

View File

@@ -0,0 +1,19 @@
import { create } from "zustand";
interface conversationState {
assistantName: string;
conversationId: string;
receivingMsg: boolean;
setConversationId: (newId: string) => void;
setAssistantName: (newName: string) => void;
setReceivingMsg: (newState: boolean) => void;
}
export const conversationStore = create<conversationState>()((set) => ({
assistantName: "",
conversationId: "",
receivingMsg: false,
setConversationId: (newId) => set({ conversationId: newId }),
setAssistantName: (newName) => set({ assistantName: newName }),
setReceivingMsg: (newState) => set({ receivingMsg: newState }),
}));

View File

@@ -0,0 +1,14 @@
import { create } from "zustand";
interface messageState {
messages: Array<{ user: boolean; content: string }>;
pushMessage: (newMessage: { user: boolean; content: string }) => void;
resetConversation: () => void;
}
export const messageStore = create<messageState>()((set) => ({
messages: [],
pushMessage: (newMessage) =>
set((state) => ({ messages: [...state.messages, newMessage] })),
resetConversation: () => set(() => ({ messages: [] })),
}));

View File

@@ -0,0 +1,16 @@
export async function httpRequest(
method: string,
endpoint: string,
body: object | null,
) {
const url = "/api" + endpoint;
const data = {
method: method,
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify(body),
credentials: "include" as RequestCredentials,
};
return await fetch(url, data).then((response) => response.json());
}

1
apps/ChatEgresos/gui/vite-env.d.ts vendored Normal file
View File

@@ -0,0 +1 @@
/// <reference types="vite/client" />

View File

@@ -0,0 +1,13 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" type="image/svg+xml" href="/vite.svg" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>ChatEgresos</title>
</head>
<body>
<div id="root"></div>
<script type="module" src="/gui/main.tsx"></script>
</body>
</html>

View File

@@ -0,0 +1,40 @@
{
"name": "ChatEgresos",
"private": true,
"version": "0.0.7",
"type": "module",
"scripts": {
"dev": "vite",
"build": "tsc && vite build",
"lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0",
"preview": "vite preview"
},
"dependencies": {
"@banorte/chat-ui": "workspace:*",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-markdown": "^9.0.1",
"react-spring": "^9.7.4",
"rehype-raw": "^7.0.0",
"sse.js": "^2.5.0",
"zustand": "^4.5.2"
},
"devDependencies": {
"@iconify-icon/react": "^2.1.0",
"@types/react": "^18.2.67",
"@types/react-dom": "^18.2.22",
"@typescript-eslint/eslint-plugin": "^7.3.1",
"@typescript-eslint/parser": "^7.3.1",
"@vitejs/plugin-react": "^4.2.1",
"autoprefixer": "^10.4.19",
"daisyui": "^4.7.3",
"eslint": "^8.57.0",
"eslint-plugin-react-hooks": "^4.6.0",
"eslint-plugin-react-refresh": "^0.4.6",
"postcss": "^8.4.38",
"tailwind-scrollbar": "^3.1.0",
"tailwindcss": "^3.4.1",
"typescript": "^5.4.3",
"vite": "^5.2.3"
}
}

View File

@@ -0,0 +1,6 @@
export default {
plugins: {
tailwindcss: {},
autoprefixer: {},
},
}

View File

@@ -0,0 +1,20 @@
[project]
name = "ChatEgresos"
version = "0.1.0"
description = "Add your description here"
readme = "README.md"
requires-python = ">=3.12, <4"
dependencies = [
"aiohttp>=3.11.16",
"fastapi>=0.115.6",
"hvac>=2.3.0",
"langchain-azure-ai[opentelemetry]>=0.1.4",
"mongo-memory",
"pydantic-settings>=2.8.1",
"qdrant",
"uvicorn>=0.34.0",
]
[tool.uv.sources]
mongo-memory = { workspace = true }
qdrant = { workspace = true }

154
apps/ChatEgresos/readme.md Normal file
View File

@@ -0,0 +1,154 @@
# 💬 ChatEgresos
ChatEgresos es un proyecto del equipo de Innovación en **Banorte** diseñado para acelerar la creación de aplicaciones **RAG (Retrieval-Augmented Generation)** enfocadas en la gestión, consulta y análisis de información de egresos.
Este repositorio no solo contiene la aplicación principal, sino también una librería de componentes reutilizables y notebooks para el procesamiento de documentos, evaluación de modelos y generación de datos sintéticos.
---
## 🚀 Inicio Rápido
```bash
# Instala dependencias del monorepo
mise setup
# Crea una nueva aplicación RAG (ejemplo de prueba)
mise new prueba
# Levanta un entorno de desarrollo
mise dev --app prueba
```
---
## ✅ Prerrequisitos
Si estás en el entorno de desarrollo oficial, ya deberías contar con estas herramientas.
De lo contrario, instálalas previamente:
- **Mise** → [Documentación](https://mise.jdx.dev/)
- **Docker** → [Documentación](https://www.docker.com/)
- **Vault** → [Documentación](https://developer.hashicorp.com/vault/)
---
## 📂 Estructura del Proyecto
```
chategresos/
├── apps/ # Aplicaciones individuales de ChatEgresos
├── packages/ # Paquetes compartidos
├── notebooks/ # Notebooks para procesamiento y evaluación
├── .templates/ # Plantillas de aplicaciones
├── .containers/ # Configuraciones de Docker
└── compose.yaml # Servicios de Docker Compose
```
---
## 🛠️ Comandos de Desarrollo
### 📌 Crear Nuevos Proyectos
```bash
# Crea una nueva aplicación RAG
mise new <nombre-app>
# Creación interactiva
mise new
```
### 🖥️ Entorno de Desarrollo
```bash
# Inicia servidores de desarrollo (frontend + backend)
mise dev
mise dev --app <nombre-app> # App específica
mise dev --no-dashboard # Sin dashboard en vivo
mise dev --check-deps # Verifica dependencias
mise dev --list-apps # Lista apps disponibles
```
### 📦 Gestión de Contenedores
```bash
# Inicia contenedores localmente
mise container:start
mise container:start <nombre-app>
# Subir imágenes a Azure Container Registry
mise container:push
mise container:push <nombre-imagen>
```
---
## 🏗️ Stack Tecnológico
### Tecnologías Principales
- **Frontend** → React / Next.js + TypeScript
- **Backend** → Python + FastAPI / Uvicorn
- **Paquetería** → pnpm (Node.js), uv (Python)
- **Contenedores** → Docker & Docker Compose
### Infraestructura
- **Gestión de Secretos** → HashiCorp Vault
- **Registro de Contenedores** → Azure Container Registry
- **Observabilidad** → OpenTelemetry
- **Proxy Inverso** → Traefik
---
## 🎯 Tu Primera App en ChatEgresos
1. **Genera desde plantilla**
```bash
mise new mi-app-chategresos
```
2. **Inicia el entorno**
```bash
mise dev --app mi-app-chategresos
```
3. **Accede a tu aplicación**
- 🌐 Frontend: [http://localhost:3000](http://localhost:3000)
- ⚙️ API Backend: [http://localhost:8000](http://localhost:8000)
---
## 🔧 Configuración
### Desarrollo Local
- Frontend → Puerto `3000`
- Backend APIs → Puerto `8000`
- Contenedores → Puertos auto-asignados (8001+)
### Depuración
- Usa `--no-dashboard` para un log más limpio
- Ejecuta `mise dev --check-deps` para verificar dependencias
- Logs de contenedores:
```bash
docker logs <nombre-contenedor>
```
---
## 🤝 Contribuyendo
1. Crea nuevas aplicaciones usando las plantillas disponibles
2. Respeta la estructura del monorepo
3. Usa los comandos de desarrollo recomendados
4. Verifica dependencias y realiza pruebas antes de hacer PRs
---
## 📖 Recursos Adicionales
- 📁 **Plantillas** → `.templates/`
- 🐳 **Docker Config** → `.containers/`
- ⚡ **Tareas Automáticas** → `.mise/tasks/`
---
*ChatEgresos: Innovación con IA para la gestión de egresos* 🚀

View File

@@ -0,0 +1,27 @@
/** @type {import('tailwindcss').Config} */
export default {
content: ["./index.html", "./gui/**/*.{js,ts,jsx,tsx}"],
theme: {
extend: {
backgroundImage: {
"navigation-pattern": "url('./assets/navigation.webp')",
},
},
},
plugins: [
require("daisyui"),
require("tailwind-scrollbar"),
require("@banorte/chat-ui/tailwind")
],
daisyui: {
themes: [
{
light: {
...require("daisyui/src/theming/themes")["light"],
primary: "red",
secondary: "teal",
},
},
],
},
};

View File

@@ -0,0 +1,25 @@
{
"compilerOptions": {
"target": "ES2023",
"useDefineForClassFields": true,
"lib": ["ES2023", "DOM", "DOM.Iterable", "ES2021.String"],
"module": "ESNext",
"skipLibCheck": true,
/* Bundler mode */
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"resolveJsonModule": true,
"isolatedModules": true,
"noEmit": true,
"jsx": "react-jsx",
/* Linting */
"strict": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"noFallthroughCasesInSwitch": true
},
"include": ["gui"],
"references": [{ "path": "./tsconfig.node.json" }]
}

View File

@@ -0,0 +1,11 @@
{
"compilerOptions": {
"composite": true,
"skipLibCheck": true,
"module": "ESNext",
"moduleResolution": "bundler",
"allowSyntheticDefaultImports": true,
"strict": true
},
"include": ["vite.config.ts"]
}

View File

@@ -0,0 +1,17 @@
import { defineConfig } from "vite";
import react from "@vitejs/plugin-react";
// https://vitejs.dev/config/
export default defineConfig({
plugins: [react()],
server: {
host: "0.0.0.0",
port: 3000,
proxy: {
"/api": {
target: "http://localhost:8000",
},
},
allowedHosts: true,
},
});