import asyncio import os from contextlib import asynccontextmanager from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from src.core.config import MAX_CONCURRENT_INFERENCES from src.core.logging import log, init_logging_session, close_logging_session from src.api import danger, explorer, search, system, upload @asynccontextmanager async def lifespan(app: FastAPI): await init_logging_session() log("INFO", "server.startup", message="Loading AI models...") from src.services.ai_manager import AIModelManager loop = asyncio.get_event_loop() app.state.ai = await loop.run_in_executor(None, AIModelManager) app.state.ai_semaphore = asyncio.Semaphore(MAX_CONCURRENT_INFERENCES) log("INFO", "server.ready", message="All models loaded. API ready.") yield log("INFO", "server.shutdown", message="API shutting down.") await close_logging_session() app = FastAPI(lifespan=lifespan) app.add_middleware( CORSMiddleware, allow_origins=["*"], # Update this to your frontend URL in production allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) os.makedirs("temp_uploads", exist_ok=True) app.include_router(system.router) app.include_router(upload.router) app.include_router(search.router) app.include_router(explorer.router) app.include_router(danger.router)