98 lines
3.0 KiB
Python
98 lines
3.0 KiB
Python
"""Main application entry point and FastAPI app configuration."""
|
|
|
|
import logging
|
|
from collections.abc import AsyncIterator
|
|
from contextlib import asynccontextmanager
|
|
|
|
import uvicorn
|
|
from fastapi import FastAPI
|
|
from fastapi.middleware.cors import CORSMiddleware
|
|
|
|
from .config import settings
|
|
from .dependencies import init_services, shutdown_services, startup_services
|
|
from .routers import conversation_router, notification_router, quick_replies_router
|
|
|
|
# Configure logging
|
|
logging.basicConfig(
|
|
level=logging.INFO,
|
|
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
)
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
@asynccontextmanager
|
|
async def lifespan(_: FastAPI) -> AsyncIterator[None]:
|
|
"""Application lifespan manager."""
|
|
# Startup
|
|
logger.info("Initializing services...")
|
|
init_services(settings)
|
|
await startup_services()
|
|
logger.info("Application started successfully")
|
|
|
|
yield
|
|
|
|
# Shutdown
|
|
logger.info("Shutting down services...")
|
|
await shutdown_services()
|
|
logger.info("Application shutdown complete")
|
|
|
|
|
|
app = FastAPI(
|
|
title="Capa de Integración - Orchestrator Service",
|
|
description=(
|
|
"Conversational AI orchestrator for Dialogflow CX, Gemini, and Vertex AI"
|
|
),
|
|
version="0.1.0",
|
|
lifespan=lifespan,
|
|
)
|
|
|
|
# CORS middleware
|
|
# Note: Type checker reports false positive for CORSMiddleware
|
|
# This is the correct FastAPI pattern per official documentation
|
|
app.add_middleware(
|
|
CORSMiddleware, # ty: ignore
|
|
allow_origins=["*"], # Configure appropriately for production
|
|
allow_credentials=True,
|
|
allow_methods=["*"],
|
|
allow_headers=["*"],
|
|
)
|
|
|
|
# Register routers
|
|
app.include_router(conversation_router)
|
|
app.include_router(notification_router)
|
|
app.include_router(quick_replies_router)
|
|
|
|
|
|
@app.get("/health")
|
|
async def health_check() -> dict[str, str]:
|
|
"""Health check endpoint."""
|
|
return {"status": "healthy", "service": "capa-de-integracion"}
|
|
|
|
|
|
def main() -> None:
|
|
"""Entry point for CLI."""
|
|
import argparse
|
|
|
|
parser = argparse.ArgumentParser(description="Capa de Integración server")
|
|
parser.add_argument("--host", default="0.0.0.0", help="Bind host (default: 0.0.0.0)") # noqa: S104
|
|
parser.add_argument("--port", type=int, default=8080, help="Bind port (default: 8080)")
|
|
parser.add_argument("--workers", type=int, default=1, help="Number of worker processes (default: 1)")
|
|
parser.add_argument("--limit-concurrency", type=int, default=None, help="Max concurrent connections per worker")
|
|
parser.add_argument("--backlog", type=int, default=2048, help="TCP listen backlog (default: 2048)")
|
|
parser.add_argument("--reload", action="store_true", help="Enable auto-reload (dev only)")
|
|
args = parser.parse_args()
|
|
|
|
uvicorn.run(
|
|
"capa_de_integracion.main:app",
|
|
host=args.host,
|
|
port=args.port,
|
|
workers=args.workers,
|
|
limit_concurrency=args.limit_concurrency,
|
|
backlog=args.backlog,
|
|
reload=args.reload,
|
|
)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|