gaurv007's picture
Upload alpha_factory/run.py
f78abd8 verified
"""
Alpha Factory β€” Entry Point v2
Run: python -m alpha_factory.run [--dry-run] [--batch-size N] [--interactive]
[--proven] [--enable-brain]
"""
import os
import asyncio
import argparse
# Load .env file FIRST before anything else reads env vars
try:
from dotenv import load_dotenv
load_dotenv() # Reads .env from current directory
except ImportError:
pass # python-dotenv not installed; rely on system env vars
from rich.console import Console
from .config import load_config
from .infra import ModelManager, interactive_model_select, LLMClient
from .orchestration import AlphaPipeline
console = Console()
async def setup_models(interactive: bool = False, hf_token: str = None) -> ModelManager:
"""Discover models and optionally let user pick interactively."""
manager = ModelManager(hf_token=hf_token)
console.print("\n[bold]πŸ” Discovering available models...[/]")
await manager.discover_all()
if interactive:
selections = interactive_model_select(manager)
for tier, model in selections.items():
manager.select_model(tier, model)
else:
manager.auto_assign_defaults()
manager.print_status()
return manager
def main():
parser = argparse.ArgumentParser(description="Alpha Factory β€” LLM-Driven Alpha Generation Pipeline")
parser.add_argument("--dry-run", action="store_true", help="Run without BRAIN submissions")
parser.add_argument("--batch-size", type=int, default=10, help="Number of candidates per batch")
parser.add_argument("--interactive", action="store_true", help="Interactively select models")
parser.add_argument("--hf-token", type=str, default=None, help="HuggingFace API token (or set HF_TOKEN env)")
parser.add_argument("--ollama-url", type=str, default="http://localhost:11434", help="Ollama server URL")
parser.add_argument("--proven", action="store_true", help="Use proven templates (no LLM, deterministic generation)")
parser.add_argument("--enable-brain", action="store_true", help="Enable live BRAIN submission (requires BRAIN_SESSION_TOKEN)")
args = parser.parse_args()
config = load_config()
config.batch_size = args.batch_size
config.use_proven_templates = args.proven
config.enable_brain_client = args.enable_brain
if args.dry_run:
config.enable_brain_client = False
# Resolve HF token: CLI arg > env var (loaded from .env)
hf_token = args.hf_token or os.getenv("HF_TOKEN")
mode_str = "PROVEN TEMPLATES" if args.proven else "LLM GENERATION"
brain_str = "LIVE (BRAIN submissions)" if config.enable_brain_client else "DRY RUN"
console.print(f"""
[bold green]━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[/]
[bold green] ALPHA FACTORY v0.2.0[/]
[bold green] Open-Source LLM-Driven Pipeline for WorldQuant BRAIN[/]
[bold green]━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[/]
Mode: {mode_str}
Brain: {brain_str}
Batch size: {args.batch_size}
Ollama: {args.ollama_url}
HF Token: {"βœ“ Set" if hf_token else "βœ— Not set (cloud models unavailable)"}
[yellow]NOTE: This is v0.2.0 with real personas wired, but BRAIN integration
requires a valid BRAIN_SESSION_TOKEN. See .env.example for setup.[/]
""")
# Discover and select models (only needed for LLM mode)
if not args.proven:
manager = asyncio.run(setup_models(
interactive=args.interactive,
hf_token=hf_token,
))
else:
manager = None
console.print(" [green]Proven template mode β€” no LLM model discovery needed[/]")
# Update LLM config with Ollama URL
config.llm.base_url = f"{args.ollama_url}/v1"
# Create pipeline
pipeline = AlphaPipeline(config)
if manager:
pipeline.llm = LLMClient(config.llm, model_manager=manager)
# Initialize BRAIN client if enabled
if config.enable_brain_client:
try:
import aiohttp
session = aiohttp.ClientSession()
asyncio.run(pipeline.init_brain_client(session))
except ImportError:
console.print("[red]aiohttp required for BRAIN client. pip install aiohttp[/]")
config.enable_brain_client = False
try:
result = asyncio.run(pipeline.run_batch(args.batch_size))
console.print(f"\n[bold]Final: {result}[/]")
except KeyboardInterrupt:
console.print("\n[yellow]Interrupted by user[/]")
finally:
pipeline.close()
if __name__ == "__main__":
main()