gaurv007 commited on
Commit
d221ea2
Β·
verified Β·
1 Parent(s): 10c2948

Upload alpha_factory/run.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. alpha_factory/run.py +40 -8
alpha_factory/run.py CHANGED
@@ -1,41 +1,73 @@
1
  """
2
  Alpha Factory β€” Entry Point
3
- Run: python -m alpha_factory.run [--dry-run] [--batch-size N]
4
  """
5
  import asyncio
6
  import argparse
7
  from rich.console import Console
8
  from .config import load_config
 
9
  from .orchestration import AlphaPipeline
10
 
11
  console = Console()
12
 
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  def main():
15
  parser = argparse.ArgumentParser(description="Alpha Factory β€” LLM-Driven Alpha Generation Pipeline")
16
  parser.add_argument("--dry-run", action="store_true", help="Run without BRAIN submissions")
17
  parser.add_argument("--batch-size", type=int, default=10, help="Number of candidates per batch")
18
- parser.add_argument("--config", type=str, default=None, help="Path to config override file")
 
 
19
  args = parser.parse_args()
20
 
21
  config = load_config()
22
- if args.batch_size:
23
- config.batch_size = args.batch_size
24
 
25
  console.print(f"""
26
  [bold green]━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[/]
27
- [bold green] ALPHA FACTORY v{config.__class__.__module__.split('.')[0]}[/]
28
  [bold green] Open-Source LLM-Driven Pipeline for WorldQuant BRAIN[/]
29
  [bold green]━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[/]
30
 
31
  Mode: {"DRY RUN (no BRAIN submissions)" if args.dry_run else "LIVE"}
32
  Batch size: {args.batch_size}
33
- LLM: {config.llm.base_url}
34
- Model: {config.llm.microfish_model} (generation)
35
- {config.llm.mediumfish_model} (critique)
36
  """)
37
 
 
 
 
 
 
 
 
 
 
 
38
  pipeline = AlphaPipeline(config)
 
39
 
40
  try:
41
  result = asyncio.run(pipeline.run_batch(args.batch_size))
 
1
  """
2
  Alpha Factory β€” Entry Point
3
+ Run: python -m alpha_factory.run [--dry-run] [--batch-size N] [--interactive]
4
  """
5
  import asyncio
6
  import argparse
7
  from rich.console import Console
8
  from .config import load_config
9
+ from .infra import ModelManager, interactive_model_select, LLMClient
10
  from .orchestration import AlphaPipeline
11
 
12
  console = Console()
13
 
14
 
15
+ async def setup_models(interactive: bool = False, hf_token: str = None) -> ModelManager:
16
+ """Discover models and optionally let user pick interactively."""
17
+ manager = ModelManager(hf_token=hf_token)
18
+
19
+ console.print("\n[bold]πŸ” Discovering available models...[/]")
20
+ await manager.discover_all()
21
+
22
+ if interactive:
23
+ # Let user pick models for each tier
24
+ selections = interactive_model_select(manager)
25
+ for tier, model in selections.items():
26
+ manager.select_model(tier, model)
27
+ else:
28
+ # Auto-assign best available
29
+ manager.auto_assign_defaults()
30
+
31
+ manager.print_status()
32
+ return manager
33
+
34
+
35
  def main():
36
  parser = argparse.ArgumentParser(description="Alpha Factory β€” LLM-Driven Alpha Generation Pipeline")
37
  parser.add_argument("--dry-run", action="store_true", help="Run without BRAIN submissions")
38
  parser.add_argument("--batch-size", type=int, default=10, help="Number of candidates per batch")
39
+ parser.add_argument("--interactive", action="store_true", help="Interactively select models")
40
+ parser.add_argument("--hf-token", type=str, default=None, help="HuggingFace API token (or set HF_TOKEN env)")
41
+ parser.add_argument("--ollama-url", type=str, default="http://localhost:11434", help="Ollama server URL")
42
  args = parser.parse_args()
43
 
44
  config = load_config()
45
+ config.batch_size = args.batch_size
 
46
 
47
  console.print(f"""
48
  [bold green]━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[/]
49
+ [bold green] ALPHA FACTORY v0.1.0[/]
50
  [bold green] Open-Source LLM-Driven Pipeline for WorldQuant BRAIN[/]
51
  [bold green]━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[/]
52
 
53
  Mode: {"DRY RUN (no BRAIN submissions)" if args.dry_run else "LIVE"}
54
  Batch size: {args.batch_size}
55
+ Ollama: {args.ollama_url}
56
+ HF Token: {"βœ“ Set" if args.hf_token or __import__('os').getenv('HF_TOKEN') else "βœ— Not set (cloud models unavailable)"}
 
57
  """)
58
 
59
+ # Discover and select models
60
+ manager = asyncio.run(setup_models(
61
+ interactive=args.interactive,
62
+ hf_token=args.hf_token,
63
+ ))
64
+
65
+ # Update LLM config with Ollama URL
66
+ config.llm.base_url = f"{args.ollama_url}/v1"
67
+
68
+ # Create pipeline with model manager
69
  pipeline = AlphaPipeline(config)
70
+ pipeline.llm = LLMClient(config.llm, model_manager=manager)
71
 
72
  try:
73
  result = asyncio.run(pipeline.run_batch(args.batch_size))