File size: 4,803 Bytes
62adb1b 434f57f 62adb1b 3dabb66 62adb1b 3dabb66 434f57f 3dabb66 434f57f 3dabb66 434f57f 3dabb66 62adb1b 434f57f 62adb1b d221ea2 434f57f d221ea2 434f57f d221ea2 434f57f d221ea2 434f57f 85f8d4c 62adb1b 434f57f 62adb1b d221ea2 434f57f 3dabb66 434f57f 62adb1b 434f57f 62adb1b 434f57f 62adb1b 434f57f 62adb1b 434f57f d221ea2 434f57f 62adb1b 434f57f 62adb1b 434f57f 62adb1b f78abd8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 | """
Alpha Factory β Entry Point v2
Run: python -m alpha_factory.run [--dry-run] [--batch-size N] [--interactive]
[--proven] [--enable-brain]
"""
import os
import asyncio
import argparse
# Load .env file FIRST before anything else reads env vars
try:
from dotenv import load_dotenv
load_dotenv() # Reads .env from current directory
except ImportError:
pass # python-dotenv not installed; rely on system env vars
from rich.console import Console
from .config import load_config
from .infra import ModelManager, interactive_model_select, LLMClient
from .orchestration import AlphaPipeline
console = Console()
async def setup_models(interactive: bool = False, hf_token: str = None) -> ModelManager:
"""Discover models and optionally let user pick interactively."""
manager = ModelManager(hf_token=hf_token)
console.print("\n[bold]π Discovering available models...[/]")
await manager.discover_all()
if interactive:
selections = interactive_model_select(manager)
for tier, model in selections.items():
manager.select_model(tier, model)
else:
manager.auto_assign_defaults()
manager.print_status()
return manager
def main():
parser = argparse.ArgumentParser(description="Alpha Factory β LLM-Driven Alpha Generation Pipeline")
parser.add_argument("--dry-run", action="store_true", help="Run without BRAIN submissions")
parser.add_argument("--batch-size", type=int, default=10, help="Number of candidates per batch")
parser.add_argument("--interactive", action="store_true", help="Interactively select models")
parser.add_argument("--hf-token", type=str, default=None, help="HuggingFace API token (or set HF_TOKEN env)")
parser.add_argument("--ollama-url", type=str, default="http://localhost:11434", help="Ollama server URL")
parser.add_argument("--proven", action="store_true", help="Use proven templates (no LLM, deterministic generation)")
parser.add_argument("--enable-brain", action="store_true", help="Enable live BRAIN submission (requires BRAIN_SESSION_TOKEN)")
args = parser.parse_args()
config = load_config()
config.batch_size = args.batch_size
config.use_proven_templates = args.proven
config.enable_brain_client = args.enable_brain
if args.dry_run:
config.enable_brain_client = False
# Resolve HF token: CLI arg > env var (loaded from .env)
hf_token = args.hf_token or os.getenv("HF_TOKEN")
mode_str = "PROVEN TEMPLATES" if args.proven else "LLM GENERATION"
brain_str = "LIVE (BRAIN submissions)" if config.enable_brain_client else "DRY RUN"
console.print(f"""
[bold green]ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ[/]
[bold green] ALPHA FACTORY v0.2.0[/]
[bold green] Open-Source LLM-Driven Pipeline for WorldQuant BRAIN[/]
[bold green]ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ[/]
Mode: {mode_str}
Brain: {brain_str}
Batch size: {args.batch_size}
Ollama: {args.ollama_url}
HF Token: {"β Set" if hf_token else "β Not set (cloud models unavailable)"}
[yellow]NOTE: This is v0.2.0 with real personas wired, but BRAIN integration
requires a valid BRAIN_SESSION_TOKEN. See .env.example for setup.[/]
""")
# Discover and select models (only needed for LLM mode)
if not args.proven:
manager = asyncio.run(setup_models(
interactive=args.interactive,
hf_token=hf_token,
))
else:
manager = None
console.print(" [green]Proven template mode β no LLM model discovery needed[/]")
# Update LLM config with Ollama URL
config.llm.base_url = f"{args.ollama_url}/v1"
# Create pipeline
pipeline = AlphaPipeline(config)
if manager:
pipeline.llm = LLMClient(config.llm, model_manager=manager)
# Initialize BRAIN client if enabled
if config.enable_brain_client:
try:
import aiohttp
session = aiohttp.ClientSession()
asyncio.run(pipeline.init_brain_client(session))
except ImportError:
console.print("[red]aiohttp required for BRAIN client. pip install aiohttp[/]")
config.enable_brain_client = False
try:
result = asyncio.run(pipeline.run_batch(args.batch_size))
console.print(f"\n[bold]Final: {result}[/]")
except KeyboardInterrupt:
console.print("\n[yellow]Interrupted by user[/]")
finally:
pipeline.close()
if __name__ == "__main__":
main()
|