gaurv007 commited on
Commit
4fc0527
·
verified ·
1 Parent(s): d91ab53

fix: ui.py — fix dry run button by loading .env before subprocess and handling async properly"

Browse files
Files changed (1) hide show
  1. alpha_factory/ui.py +66 -22
alpha_factory/ui.py CHANGED
@@ -4,11 +4,19 @@ View generated alphas, copy expressions, run new batches.
4
 
5
  Run: uv run python -m alpha_factory.ui
6
  """
 
 
 
7
  import duckdb
8
  import gradio as gr
9
  from pathlib import Path
10
- from datetime import datetime
11
 
 
 
 
 
 
 
12
 
13
  DB_PATH = Path("factor_store/alphas.duckdb")
14
 
@@ -44,35 +52,66 @@ def get_alphas_from_db(limit=50):
44
  def get_alpha_cards():
45
  rows = get_alphas_from_db()
46
  if not rows:
47
- return []
48
  data = []
49
  for row in rows:
50
  alpha_id, submitted_at, expression, theme, archetype, tag, neutral, decay, fields, verdict = row
51
  timestamp = submitted_at.strftime("%Y-%m-%d %H:%M") if submitted_at else "?"
52
  verdict_str = {"promote": "PASS", "iterate": "PENDING", "kill": "FAIL"}.get(verdict or "", "NEW")
53
- expr_preview = (expression[:100] + "...") if expression and len(expression) > 100 else (expression or "")
54
- data.append([timestamp, alpha_id[:10], theme or "", archetype or "", tag or "", decay or 0, verdict_str, expr_preview])
55
  return data
56
 
57
 
58
  def get_full_expression(evt: gr.SelectData):
59
  rows = get_alphas_from_db()
60
  if not rows or evt.index is None:
61
- return ""
62
  row_idx = evt.index[0] if isinstance(evt.index, (list, tuple)) else evt.index
63
  if row_idx < len(rows):
64
  return rows[row_idx][2] or ""
65
  return ""
66
 
67
 
68
- def run_batch_and_refresh(batch_size):
69
- import subprocess, sys
70
- result = subprocess.run(
71
- [sys.executable, "-m", "alpha_factory.run", "--dry-run", "--batch-size", str(int(batch_size))],
72
- capture_output=True, text=True, encoding="utf-8", errors="replace"
73
- )
74
- log = result.stdout[-3000:] if result.stdout else result.stderr[-3000:]
75
- return get_alpha_cards(), log
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
 
78
  def build_ui():
@@ -84,13 +123,14 @@ def build_ui():
84
 
85
  with gr.Row():
86
  with gr.Column(scale=1):
87
- batch_size_input = gr.Number(value=5, label="Batch Size", minimum=1, maximum=20)
88
- generate_btn = gr.Button("🚀 Generate New Batch (Dry Run)", variant="primary")
89
  refresh_btn = gr.Button("🔄 Refresh Table")
 
90
  with gr.Column(scale=3):
91
- gr.Markdown("Click any row below to see the **full expression** with copy support.")
92
 
93
- gr.Markdown("### 📋 Generated Alphas")
94
 
95
  alpha_table = gr.Dataframe(
96
  value=get_alpha_cards(),
@@ -99,20 +139,24 @@ def build_ui():
99
  wrap=True,
100
  )
101
 
102
- gr.Markdown("### 📝 Full Expression — select all & copy (Ctrl+A, Ctrl+C)")
103
  full_expr = gr.Textbox(
104
  label="Full Expression",
105
  lines=6,
106
- interactive=False,
107
  )
108
 
109
  gr.Markdown("### 📜 Pipeline Log")
110
- pipeline_log = gr.Textbox(label="Last Run Output", lines=12, interactive=False)
111
 
112
  # Events
113
  alpha_table.select(get_full_expression, outputs=[full_expr])
114
- refresh_btn.click(lambda: get_alpha_cards(), outputs=[alpha_table])
115
- generate_btn.click(run_batch_and_refresh, inputs=[batch_size_input], outputs=[alpha_table, pipeline_log])
 
 
 
 
116
 
117
  return app
118
 
 
4
 
5
  Run: uv run python -m alpha_factory.ui
6
  """
7
+ import os
8
+ import sys
9
+ import subprocess
10
  import duckdb
11
  import gradio as gr
12
  from pathlib import Path
 
13
 
14
+ # Load .env so HF_TOKEN is available
15
+ try:
16
+ from dotenv import load_dotenv
17
+ load_dotenv()
18
+ except ImportError:
19
+ pass
20
 
21
  DB_PATH = Path("factor_store/alphas.duckdb")
22
 
 
52
  def get_alpha_cards():
53
  rows = get_alphas_from_db()
54
  if not rows:
55
+ return [["No alphas yet", "", "", "", "", "", "", "Run a batch first"]]
56
  data = []
57
  for row in rows:
58
  alpha_id, submitted_at, expression, theme, archetype, tag, neutral, decay, fields, verdict = row
59
  timestamp = submitted_at.strftime("%Y-%m-%d %H:%M") if submitted_at else "?"
60
  verdict_str = {"promote": "PASS", "iterate": "PENDING", "kill": "FAIL"}.get(verdict or "", "NEW")
61
+ expr_preview = (expression[:80] + "...") if expression and len(expression) > 80 else (expression or "")
62
+ data.append([timestamp, alpha_id[:10], theme or "", archetype or "", tag or "", str(decay or 0), verdict_str, expr_preview])
63
  return data
64
 
65
 
66
  def get_full_expression(evt: gr.SelectData):
67
  rows = get_alphas_from_db()
68
  if not rows or evt.index is None:
69
+ return "Click a row above to see the full expression"
70
  row_idx = evt.index[0] if isinstance(evt.index, (list, tuple)) else evt.index
71
  if row_idx < len(rows):
72
  return rows[row_idx][2] or ""
73
  return ""
74
 
75
 
76
+ def run_batch(batch_size):
77
+ """Run pipeline as subprocess and return log."""
78
+ env = os.environ.copy()
79
+ # Ensure HF_TOKEN passes to subprocess
80
+ if "HF_TOKEN" not in env:
81
+ token = os.getenv("HF_TOKEN", "")
82
+ if token:
83
+ env["HF_TOKEN"] = token
84
+
85
+ try:
86
+ result = subprocess.run(
87
+ [sys.executable, "-m", "alpha_factory.run", "--dry-run", "--batch-size", str(int(batch_size))],
88
+ capture_output=True,
89
+ text=True,
90
+ encoding="utf-8",
91
+ errors="replace",
92
+ env=env,
93
+ timeout=120,
94
+ cwd=str(Path.cwd()),
95
+ )
96
+ log = ""
97
+ if result.stdout:
98
+ log += result.stdout[-3000:]
99
+ if result.returncode != 0 and result.stderr:
100
+ log += "\n\n--- ERRORS ---\n" + result.stderr[-2000:]
101
+ if not log.strip():
102
+ log = f"Process exited with code {result.returncode}"
103
+ return log
104
+ except subprocess.TimeoutExpired:
105
+ return "ERROR: Pipeline timed out after 120 seconds"
106
+ except Exception as e:
107
+ return f"ERROR: {str(e)}"
108
+
109
+
110
+ def generate_and_refresh(batch_size):
111
+ """Run batch then refresh the table."""
112
+ log = run_batch(batch_size)
113
+ table = get_alpha_cards()
114
+ return table, log
115
 
116
 
117
  def build_ui():
 
123
 
124
  with gr.Row():
125
  with gr.Column(scale=1):
126
+ batch_size_input = gr.Number(value=3, label="Batch Size", minimum=1, maximum=20)
127
+ generate_btn = gr.Button("🚀 Generate New Batch", variant="primary")
128
  refresh_btn = gr.Button("🔄 Refresh Table")
129
+ gr.Markdown("*Dry run mode — no BRAIN submissions*")
130
  with gr.Column(scale=3):
131
+ stats_md = gr.Markdown(f"**Alphas in store:** {len(get_alphas_from_db())}")
132
 
133
+ gr.Markdown("### 📋 Click any row to see full expression")
134
 
135
  alpha_table = gr.Dataframe(
136
  value=get_alpha_cards(),
 
139
  wrap=True,
140
  )
141
 
142
+ gr.Markdown("### 📝 Full Expression — Ctrl+A then Ctrl+C to copy")
143
  full_expr = gr.Textbox(
144
  label="Full Expression",
145
  lines=6,
146
+ interactive=True, # So user can select & copy
147
  )
148
 
149
  gr.Markdown("### 📜 Pipeline Log")
150
+ pipeline_log = gr.Textbox(label="Output", lines=15, interactive=False)
151
 
152
  # Events
153
  alpha_table.select(get_full_expression, outputs=[full_expr])
154
+ refresh_btn.click(get_alpha_cards, outputs=[alpha_table])
155
+ generate_btn.click(
156
+ generate_and_refresh,
157
+ inputs=[batch_size_input],
158
+ outputs=[alpha_table, pipeline_log],
159
+ )
160
 
161
  return app
162