# ============================================================================== # EATHVISION API SERVER (Production Version) # Description: High-performance FastAPI server running native YOLO (.pt) models. # Integrates dual USDA databases for macros and ingredients. # ============================================================================== import os import io import pandas as pd from fastapi import FastAPI, File, UploadFile, Form, HTTPException from fastapi.responses import JSONResponse, FileResponse from fastapi.staticfiles import StaticFiles from fastapi.middleware.cors import CORSMiddleware from ultralytics import YOLO from PIL import Image import warnings warnings.filterwarnings('ignore') # --- 1. Server Configuration --- app = FastAPI( title="eath API", description="Production endpoint for dish classification and advanced nutrition tracking.", version="1.0.0" ) # Enable CORS for external frontend requests if needed app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) # Mount the static directory to serve HTML, CSS, JS, and Images app.mount("/static", StaticFiles(directory="static"), name="static") # --- 2. Global Variables & Resource Loading --- MODELS_DIR = "models" DATA_DIR = "data" # File paths for both databases MACROS_CSV_PATH = os.path.join(DATA_DIR, "Dishes Information and DV values V1.csv") INGREDIENTS_CSV_PATH = os.path.join(DATA_DIR, "Nutrition Ingredient Database- Version 1.csv") loaded_models = {} macros_db = None ingredients_db = None def load_resources(): """ Loads YOLO .pt models into memory and parses both nutrition CSVs. """ global loaded_models, macros_db, ingredients_db print("[INIT] Loading AI Models into memory...") # Load YOLO-X (High Precision) yolo_x_path = os.path.join(MODELS_DIR, "best_yolo_x.pt") if os.path.exists(yolo_x_path): loaded_models["yolo_x"] = YOLO(yolo_x_path, task="classify") print(" -> YOLO-X Loaded Successfully.") # Load YOLO-S (High Speed) yolo_s_path = os.path.join(MODELS_DIR, "best_yolo_s.pt") if os.path.exists(yolo_s_path): loaded_models["yolo_s"] = YOLO(yolo_s_path, task="classify") print(" -> YOLO-S Loaded Successfully.") # Load Databases print("[INIT] Loading Databases...") try: macros_db = pd.read_csv(MACROS_CSV_PATH) macros_db.columns = macros_db.columns.str.strip() print(f" -> Macros DB Loaded: {len(macros_db)} entries.") except Exception as e: print(f"[ERROR] Failed to load Macros CSV: {e}") try: ingredients_db = pd.read_csv(INGREDIENTS_CSV_PATH) ingredients_db.columns = ingredients_db.columns.str.strip() print(f" -> Ingredients DB Loaded: {len(ingredients_db)} entries.") except Exception as e: print(f"[ERROR] Failed to load Ingredients CSV: {e}") # Initialize resources on startup load_resources() # --- 3. API Endpoints --- @app.get("/") async def serve_frontend(): """Serves the main HTML interface from the root URL.""" return FileResponse("static/index.html") @app.post("/api/predict") async def predict_dish( file: UploadFile = File(...), model_type: str = Form("yolo_x"), portion_g: int = Form(100) ): """ Core prediction endpoint. Returns AI prediction, calculated macros, and ingredient list. """ if model_type not in loaded_models: raise HTTPException(status_code=400, detail=f"Model '{model_type}' is currently unavailable.") # Read and convert image try: image_bytes = await file.read() image = Image.open(io.BytesIO(image_bytes)).convert("RGB") except Exception: raise HTTPException(status_code=400, detail="Invalid image file format.") # 1. AI Inference try: model = loaded_models[model_type] results = model(image, imgsz=320, verbose=False) top1_idx = results[0].probs.top1 raw_class_name = results[0].names[top1_idx] confidence = float(results[0].probs.top1conf) formatted_dish_name = raw_class_name.replace('_', ' ').title() except Exception as e: raise HTTPException(status_code=500, detail=f"Inference failed: {str(e)}") # 2. Extract Macros nutrition_data = {"status": "not_found", "message": "Macros not found."} if macros_db is not None: # Assuming the column is 'FoodName' or similar. Adjust if your CSV differs. # Using a flexible search to avoid exact match case issues match = macros_db[macros_db.iloc[:, 0].astype(str).str.contains(formatted_dish_name, case=False, na=False)] if not match.empty: base_data = match.iloc[0] multiplier = portion_g / 100.0 def safe_calc(col_name): try: # Fetch by exact name if exists, else return 0 if col_name in base_data: val = float(base_data[col_name]) else: val = 0.0 return round(val * multiplier, 1) except: return 0.0 nutrition_data = { "status": "success", "portion_g": portion_g, "base_data": { "energy_kcal": safe_calc("Energy_kcal"), "protein_g": safe_calc("Protein_g"), "fat_g": safe_calc("Fat_g"), "carbs_g": safe_calc("Carbs_g") } } # 3. Extract Ingredients ingredients_list = [] if ingredients_db is not None: # Assuming first column is the dish name and the second contains ingredients ing_match = ingredients_db[ingredients_db.iloc[:, 0].astype(str).str.contains(formatted_dish_name, case=False, na=False)] if not ing_match.empty: # Convert the matched row's ingredient column to a string raw_ingredients = str(ing_match.iloc[0, 1]) # Split by comma to create a clean list for the frontend ingredients_list = [ing.strip() for ing in raw_ingredients.split(',') if ing.strip()] # 4. Final Response Payload return JSONResponse(content={ "ai_prediction": { "model_used": model_type, "dish_name": formatted_dish_name, "confidence": round(confidence * 100, 2) }, "nutrition_insights": nutrition_data, "ingredients": ingredients_list }) if __name__ == "__main__": import uvicorn uvicorn.run("app:app", host="0.0.0.0", port=8000, reload=True)