Spaces:
Sleeping
Sleeping
| """Pre-warm the LocalAI model before starting the backend""" | |
| import os | |
| import sys | |
| os.environ['HF_HOME'] = 'd:/VSC Codes/Bild/.cache/hf' | |
| os.environ['TRANSFORMERS_CACHE'] = 'd:/VSC Codes/Bild/.cache/hf' | |
| sys.path.insert(0, 'd:/VSC Codes/Bild/imageforge') | |
| print("Pre-warming LocalAI model...") | |
| print("This will take 30-60 seconds the first time.\n") | |
| try: | |
| from backend.app.local_ai.engine import LocalAIEngine, LocalAIRequest | |
| print("β Imports successful") | |
| print("Creating engine...") | |
| engine = LocalAIEngine() | |
| if not engine.is_available(): | |
| print("β Engine not available!") | |
| sys.exit(1) | |
| print("β Engine available") | |
| print(f" Model: {engine.model_id}") | |
| print("\nLoading model (this is the slow part)...") | |
| # Force model load | |
| image = engine.generate(LocalAIRequest( | |
| prompt="test", | |
| negative_prompt="", | |
| width=512, | |
| height=512, | |
| steps=1, # Just 1 step to test loading | |
| guidance=7.5, | |
| seed=42, | |
| )) | |
| print("\nβββ Model loaded successfully!") | |
| print(f"Generated test image: {image.size}") | |
| print("\nThe backend should now start quickly.\n") | |
| except Exception as e: | |
| print(f"\nβ Failed to pre-warm model: {e}") | |
| import traceback | |
| traceback.print_exc() | |
| sys.exit(1) | |