narcolepticchicken commited on
Commit
b2d878e
·
verified ·
1 Parent(s): cf57590

Add end-to-end pipeline script

Browse files
Files changed (1) hide show
  1. end_to_end_pipeline.py +492 -0
end_to_end_pipeline.py ADDED
@@ -0,0 +1,492 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Speculative Tool Actions — End-to-End Pipeline
3
+ ================================================
4
+ 1. Build datasets from SWE-smith + ToolBench
5
+ 2. Train cheap proposer (Qwen3-1.7B + LoRA SFT)
6
+ 3. Train verifier (Qwen3-4B + LoRA Reward)
7
+ 4. Evaluate all 5 configs (A-E) on held-out set
8
+ 5. Generate ablation report + cost-quality frontier
9
+
10
+ Run via: hf_jobs with GPU hardware (a10g-large or a100-large)
11
+ """
12
+ import os
13
+ import json
14
+ import re
15
+ import argparse
16
+ from collections import Counter, defaultdict
17
+ from random import Random
18
+
19
+ import torch
20
+ from datasets import load_dataset, Dataset
21
+ from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed
22
+ from peft import PeftModel
23
+
24
+ # ============================================================================
25
+ # Configuration
26
+ # ============================================================================
27
+ HUB_ORG = "narcolepticchicken"
28
+ ACTION_TYPES = [
29
+ "tool_call", "retrieval", "file_read", "file_write",
30
+ "repair", "verifier", "ask_clarification", "final_answer", "BLOCKED",
31
+ ]
32
+
33
+ # Cost weights (relative)
34
+ COST = {"strong_in": 1.0, "strong_out": 1.0, "cheap_in": 0.2, "cheap_out": 0.2}
35
+
36
+ # ============================================================================
37
+ # Step 1: Dataset Builder
38
+ # ============================================================================
39
+ def classify_action(content, tool_calls=None):
40
+ c = (content or "").lower()
41
+ tc = json.dumps(tool_calls).lower() if tool_calls else ""
42
+ combined = c + " " + tc
43
+ if re.search(r'\b(final answer|conclusion|summary:|in conclusion|the answer is)\b', combined):
44
+ return "final_answer"
45
+ if re.search(r'\b(ask for clarification|need more info|could you clarify|what do you mean)\b', combined):
46
+ return "ask_clarification"
47
+ if re.search(r'\b(blocked|unsafe|i cannot|i\'m sorry, but|refuse|not allowed|harmful)\b', combined):
48
+ return "BLOCKED"
49
+ if re.search(r'\b(write.*file|save.*file|edit.*file|patch|diff)\b', combined):
50
+ return "file_write"
51
+ if re.search(r'\b(read.*file|view.*file|cat |head |tail |open.*file|get_content)\b', combined):
52
+ return "file_read"
53
+ if re.search(r'\b(repair|fix.*bug|correct.*error|debug|resolve|try.*again with)\b', combined):
54
+ return "repair"
55
+ if re.search(r'\b(verify|check|validate|test|assert|review)\b', combined):
56
+ return "verifier"
57
+ if re.search(r'\b(search|retrieve|find|lookup|query|google|bing)\b', combined):
58
+ return "retrieval"
59
+ if tool_calls or re.search(r'\b(function call|tool call|invoke|execute)\b', combined):
60
+ return "tool_call"
61
+ return "tool_call"
62
+
63
+
64
+ def build_datasets(max_swe=5000, max_toolbench=3000):
65
+ print("=== Step 1: Building Datasets ===")
66
+ # SWE-smith
67
+ ds_swe = load_dataset("SWE-bench/SWE-smith-trajectories", "tool", split="train", streaming=True)
68
+ p_rows, v_rows, e_rows = [], [], []
69
+ count = 0
70
+ for ex in ds_swe:
71
+ count += 1
72
+ if count > max_swe:
73
+ break
74
+ msgs = ex.get("messages", [])
75
+ resolved = ex.get("resolved", False)
76
+ state = []
77
+ for msg in msgs:
78
+ role = msg.get("role", "")
79
+ if role in ("assistant", "agent"):
80
+ atype = classify_action(msg.get("content", ""), msg.get("tool_calls"))
81
+ comp = [{"role": "assistant", "content": msg.get("content", "")}]
82
+ if msg.get("tool_calls"):
83
+ comp[0]["tool_calls"] = msg["tool_calls"]
84
+ p_rows.append({"prompt": list(state), "completion": comp, "action_type": atype})
85
+ v_rows.append({"prompt": list(state), "completion": comp, "label": bool(resolved), "action_type": atype})
86
+ e_rows.append({"messages": list(state) + comp, "resolved": resolved, "action_type": atype})
87
+ state.append(msg)
88
+
89
+ # ToolBench
90
+ ds_tb = load_dataset("tuandunghcmut/toolbench-v1", split="train", streaming=True)
91
+ count = 0
92
+ for ex in ds_tb:
93
+ count += 1
94
+ if count > max_toolbench:
95
+ break
96
+ conv = ex.get("conversations", {})
97
+ for role, content in zip(conv.get("from", []), conv.get("value", [])):
98
+ msg = {"role": role, "content": content}
99
+ if role == "assistant":
100
+ atype = classify_action(content)
101
+ p_rows.append({"prompt": [m for m in state if m["role"] != "assistant" or m is not msg], "completion": [msg], "action_type": atype})
102
+ v_rows.append({"prompt": [m for m in state if m["role"] != "assistant" or m is not msg], "completion": [msg], "label": True, "action_type": atype})
103
+ e_rows.append({"messages": state + [msg], "resolved": True, "action_type": atype})
104
+ state.append(msg)
105
+
106
+ print(f"Total rows: proposer={len(p_rows)}, verifier={len(v_rows)}, eval={len(e_rows)}")
107
+ print("Action distribution:", Counter(r["action_type"] for r in p_rows).most_common())
108
+
109
+ # Proposer SFT dataset
110
+ def fmt_proposer(r):
111
+ sys_msg = {"role": "system", "content": (
112
+ "You are an agent action predictor. Given the conversation state, predict the next action from: "
113
+ + ", ".join(ACTION_TYPES) + ". Respond with exactly the action name and a brief justification.")}
114
+ prompt = [sys_msg] + r["prompt"]
115
+ if prompt:
116
+ prompt[-1]["content"] += "\n\n[Next Action Prediction] Choose one: " + ", ".join(ACTION_TYPES)
117
+ comp = r["completion"]
118
+ comp[0]["content"] = f"Action: {r['action_type']}\n" + comp[0]["content"]
119
+ return {"prompt": prompt, "completion": comp}
120
+
121
+ proposer_ds = Dataset.from_list([fmt_proposer(r) for r in p_rows]).shuffle(seed=42).train_test_split(test_size=0.1)
122
+ proposer_ds.push_to_hub(f"{HUB_ORG}/speculative-actions-proposer-sft")
123
+ print(f"Pushed proposer dataset to {HUB_ORG}/speculative-actions-proposer-sft")
124
+
125
+ # Verifier preference dataset
126
+ rng = Random(42)
127
+ good = [r for r in v_rows if r["label"]]
128
+ bad = [r for r in v_rows if not r["label"]]
129
+ if len(bad) < len(good) * 0.2:
130
+ for r in good:
131
+ wa = rng.choice([a for a in ACTION_TYPES if a != r["action_type"]])
132
+ bad.append({
133
+ "prompt": r["prompt"],
134
+ "completion": [{"role": "assistant", "content": f"Action: {wa}\n(synthetic incorrect action)"}],
135
+ "label": False, "action_type": wa,
136
+ })
137
+ pairs = []
138
+ for g in good:
139
+ b = rng.choice(bad)
140
+ pairs.append({"prompt": g["prompt"], "chosen": g["completion"], "rejected": b["completion"], "action_type": g["action_type"]})
141
+ verifier_ds = Dataset.from_list(pairs).shuffle(seed=42).train_test_split(test_size=0.1)
142
+ verifier_ds.push_to_hub(f"{HUB_ORG}/speculative-actions-verifier-pref")
143
+ print(f"Pushed verifier dataset to {HUB_ORG}/speculative-actions-verifier-pref")
144
+
145
+ # Eval dataset
146
+ eval_ds = Dataset.from_list(e_rows).shuffle(seed=42).select(range(min(2000, len(e_rows))))
147
+ eval_ds.push_to_hub(f"{HUB_ORG}/speculative-actions-eval")
148
+ print(f"Pushed eval dataset to {HUB_ORG}/speculative-actions-eval")
149
+
150
+ return proposer_ds, verifier_ds, eval_ds
151
+
152
+
153
+ # ============================================================================
154
+ # Step 2: Train Proposer
155
+ # ============================================================================
156
+ def train_proposer():
157
+ print("\n=== Step 2: Training Proposer ===")
158
+ from trl import SFTTrainer, SFTConfig
159
+ from peft import LoraConfig
160
+
161
+ ds = load_dataset(f"{HUB_ORG}/speculative-actions-proposer-sft")
162
+ peft_config = LoraConfig(
163
+ r=16, lora_alpha=32,
164
+ target_modules=["q_proj", "v_proj", "k_proj", "o_proj"],
165
+ modules_to_save=["embed_tokens", "lm_head"],
166
+ )
167
+ config = SFTConfig(
168
+ output_dir="/tmp/proposer-out",
169
+ hub_model_id=f"{HUB_ORG}/speculative-proposer-qwen3-1.7b",
170
+ push_to_hub=True,
171
+ learning_rate=2e-4,
172
+ per_device_train_batch_size=4,
173
+ gradient_accumulation_steps=4,
174
+ num_train_epochs=3,
175
+ max_seq_length=4096,
176
+ bf16=True,
177
+ gradient_checkpointing=True,
178
+ logging_strategy="steps",
179
+ logging_steps=10,
180
+ logging_first_step=True,
181
+ disable_tqdm=True,
182
+ report_to="trackio",
183
+ run_name="proposer-sft-qwen3-1.7b",
184
+ )
185
+ trainer = SFTTrainer(
186
+ model="Qwen/Qwen3-1.7B",
187
+ train_dataset=ds["train"],
188
+ eval_dataset=ds["test"],
189
+ args=config,
190
+ peft_config=peft_config,
191
+ )
192
+ trainer.train()
193
+ trainer.push_to_hub()
194
+ print("Proposer training complete.")
195
+
196
+
197
+ # ============================================================================
198
+ # Step 3: Train Verifier
199
+ # ============================================================================
200
+ def train_verifier():
201
+ print("\n=== Step 3: Training Verifier ===")
202
+ from trl import RewardTrainer, RewardConfig
203
+ from peft import LoraConfig
204
+
205
+ ds = load_dataset(f"{HUB_ORG}/speculative-actions-verifier-pref")
206
+ peft_config = LoraConfig(
207
+ r=16, lora_alpha=32,
208
+ target_modules=["q_proj", "v_proj", "k_proj", "o_proj"],
209
+ modules_to_save=["score"],
210
+ )
211
+ config = RewardConfig(
212
+ output_dir="/tmp/verifier-out",
213
+ hub_model_id=f"{HUB_ORG}/speculative-verifier-qwen3-4b",
214
+ push_to_hub=True,
215
+ learning_rate=1e-3,
216
+ per_device_train_batch_size=2,
217
+ gradient_accumulation_steps=8,
218
+ num_train_epochs=2,
219
+ max_seq_length=4096,
220
+ bf16=True,
221
+ gradient_checkpointing=True,
222
+ logging_strategy="steps",
223
+ logging_steps=10,
224
+ logging_first_step=True,
225
+ disable_tqdm=True,
226
+ report_to="trackio",
227
+ run_name="verifier-reward-qwen3-4b",
228
+ )
229
+ trainer = RewardTrainer(
230
+ model="Qwen/Qwen3-4B",
231
+ train_dataset=ds["train"],
232
+ eval_dataset=ds["test"],
233
+ args=config,
234
+ peft_config=peft_config,
235
+ )
236
+ trainer.train()
237
+ trainer.push_to_hub()
238
+ print("Verifier training complete.")
239
+
240
+
241
+ # ============================================================================
242
+ # Step 4: Evaluate Configs A-E
243
+ # ============================================================================
244
+ class EvalRunner:
245
+ def __init__(self, strong_name="Qwen/Qwen2.5-7B-Instruct", cheap_name="Qwen/Qwen3-1.7B",
246
+ verifier_name=None, device="cuda"):
247
+ self.device = device
248
+ self.strong_tok = AutoTokenizer.from_pretrained(strong_name, trust_remote_code=True)
249
+ self.strong_model = AutoModelForCausalLM.from_pretrained(
250
+ strong_name, torch_dtype=torch.bfloat16, device_map="auto", trust_remote_code=True)
251
+
252
+ self.cheap_tok = AutoTokenizer.from_pretrained(cheap_name, trust_remote_code=True)
253
+ self.cheap_model = AutoModelForCausalLM.from_pretrained(
254
+ cheap_name, torch_dtype=torch.bfloat16, device_map="auto", trust_remote_code=True)
255
+
256
+ self.verifier_name = verifier_name
257
+ if verifier_name:
258
+ self.v_tok = AutoTokenizer.from_pretrained(verifier_name, trust_remote_code=True)
259
+ self.v_model = AutoModelForCausalLM.from_pretrained(
260
+ verifier_name, torch_dtype=torch.bfloat16, device_map="auto", trust_remote_code=True)
261
+
262
+ def _gen(self, model, tokenizer, messages, max_new=128, temp=0.0):
263
+ inputs = tokenizer.apply_chat_template(messages, tokenize=True, return_tensors="pt", add_generation_prompt=True).to(model.device)
264
+ with torch.no_grad():
265
+ out = model.generate(inputs, max_new_tokens=max_new, do_sample=temp > 0,
266
+ temperature=temp if temp > 0 else None,
267
+ pad_token_id=tokenizer.pad_token_id or tokenizer.eos_token_id)
268
+ text = tokenizer.decode(out[0][inputs.shape[1]:], skip_special_tokens=True)
269
+ return text, inputs.shape[1], out.shape[1] - inputs.shape[1]
270
+
271
+ def _parse(self, text):
272
+ for a in ACTION_TYPES:
273
+ if a.lower() in text.lower():
274
+ return a
275
+ return "tool_call"
276
+
277
+ def run_a(self, messages):
278
+ sys_msg = {"role": "system", "content": f"Predict next action from: {', '.join(ACTION_TYPES)}"}
279
+ out, i_t, o_t = self._gen(self.strong_model, self.strong_tok, [sys_msg] + messages)
280
+ return self._parse(out), i_t, o_t, "strong"
281
+
282
+ def run_b(self, messages):
283
+ sys_msg = {"role": "system", "content": f"Predict next action from: {', '.join(ACTION_TYPES)}"}
284
+ out, i_t, o_t = self._gen(self.cheap_model, self.cheap_tok, [sys_msg] + messages)
285
+ return self._parse(out), i_t, o_t, "cheap"
286
+
287
+ def run_c(self, messages):
288
+ sys_msg = {"role": "system", "content": f"Predict next action from: {', '.join(ACTION_TYPES)}"}
289
+ proposal, i1, o1, _ = self._gen(self.cheap_model, self.cheap_tok, [sys_msg] + messages)
290
+ vp = messages + [{"role": "assistant", "content": proposal},
291
+ {"role": "user", "content": "Is this action correct? Answer ONLY yes or no."}]
292
+ verdict, i2, o2, _ = self._gen(self.strong_model, self.strong_tok, vp, max_new=10)
293
+ if "yes" in verdict.lower():
294
+ return self._parse(proposal), i1 + i2, o1 + o2, "mixed"
295
+ out, i3, o3, _ = self._gen(self.strong_model, self.strong_tok, [sys_msg] + messages)
296
+ return self._parse(out), i1 + i2 + i3, o1 + o2 + o3, "mixed"
297
+
298
+ def run_d(self, messages):
299
+ if not self.verifier_name:
300
+ raise ValueError("Verifier model required for config D")
301
+ sys_msg = {"role": "system", "content": f"Predict next action from: {', '.join(ACTION_TYPES)}"}
302
+ proposal, i1, o1, _ = self._gen(self.cheap_model, self.cheap_tok, [sys_msg] + messages)
303
+ vp = messages + [{"role": "assistant", "content": proposal},
304
+ {"role": "user", "content": "Rate this action: good or bad."}]
305
+ verdict, i2, o2, _ = self._gen(self.v_model, self.v_tok, vp, max_new=10)
306
+ if "good" in verdict.lower():
307
+ return self._parse(proposal), i1 + i2, o1 + o2, "cheap"
308
+ out, i3, o3, _ = self._gen(self.strong_model, self.strong_tok, [sys_msg] + messages)
309
+ return self._parse(out), i1 + i2 + i3, o1 + o2 + o3, "mixed"
310
+
311
+ def run_e(self, messages, n=3):
312
+ sys_msg = {"role": "system", "content": f"Predict next action from: {', '.join(ACTION_TYPES)}"}
313
+ proposals = []
314
+ total_i, total_o = 0, 0
315
+ for _ in range(n):
316
+ p, i_t, o_t, _ = self._gen(self.cheap_model, self.cheap_tok, [sys_msg] + messages, temp=0.7)
317
+ proposals.append(p)
318
+ total_i += i_t
319
+ total_o += o_t
320
+ best = proposals[0]
321
+ best_score = -1
322
+ for p in proposals:
323
+ rp = messages + [{"role": "assistant", "content": p},
324
+ {"role": "user", "content": "Score this action 1-10."}]
325
+ s_text, i_t, o_t, _ = self._gen(self.strong_model, self.strong_tok, rp, max_new=5)
326
+ total_i += i_t
327
+ total_o += o_t
328
+ m = re.search(r'(\d+)', s_text)
329
+ if m:
330
+ sc = int(m.group(1))
331
+ if sc > best_score:
332
+ best_score = sc
333
+ best = p
334
+ return self._parse(best), total_i, total_o, "mixed"
335
+
336
+
337
+ def evaluate(configs="ABCDE", limit=200):
338
+ print("\n=== Step 4: Evaluation ===")
339
+ ds = load_dataset(f"{HUB_ORG}/speculative-actions-eval", split="train")
340
+ ds = ds.shuffle(seed=42).select(range(min(limit, len(ds))))
341
+
342
+ runner = EvalRunner(
343
+ strong_name="Qwen/Qwen2.5-7B-Instruct",
344
+ cheap_name="Qwen/Qwen3-1.7B",
345
+ verifier_name=f"{HUB_ORG}/speculative-verifier-qwen3-4b",
346
+ )
347
+
348
+ results = defaultdict(lambda: {"correct": 0, "total": 0, "cost": 0.0, "unsafe": 0})
349
+ cost_log = []
350
+
351
+ for idx, ex in enumerate(ds):
352
+ msgs = ex["messages"]
353
+ gold = ex["action_type"]
354
+ for cfg in configs:
355
+ try:
356
+ if cfg == "A":
357
+ pred, i_t, o_t, mtype = runner.run_a(msgs)
358
+ elif cfg == "B":
359
+ pred, i_t, o_t, mtype = runner.run_b(msgs)
360
+ elif cfg == "C":
361
+ pred, i_t, o_t, mtype = runner.run_c(msgs)
362
+ elif cfg == "D":
363
+ pred, i_t, o_t, mtype = runner.run_d(msgs)
364
+ elif cfg == "E":
365
+ pred, i_t, o_t, mtype = runner.run_e(msgs)
366
+ else:
367
+ continue
368
+ except Exception as e:
369
+ print(f"Error {cfg} idx {idx}: {e}")
370
+ pred = "tool_call"
371
+ i_t, o_t, mtype = 0, 0, "unknown"
372
+
373
+ results[cfg]["total"] += 1
374
+ if pred == gold:
375
+ results[cfg]["correct"] += 1
376
+ if pred == "BLOCKED" and gold != "BLOCKED":
377
+ results[cfg]["unsafe"] += 1
378
+ if pred != "BLOCKED" and gold == "BLOCKED":
379
+ results[cfg]["unsafe"] += 1
380
+
381
+ cost = i_t * COST.get(f"{mtype}_in", 1.0) + o_t * COST.get(f"{mtype}_out", 1.0)
382
+ results[cfg]["cost"] += cost
383
+ cost_log.append({"config": cfg, "cost": cost})
384
+
385
+ for cfg in results:
386
+ t = max(results[cfg]["total"], 1)
387
+ results[cfg]["accuracy"] = results[cfg]["correct"] / t
388
+ results[cfg]["avg_cost"] = results[cfg]["cost"] / t
389
+ results[cfg]["unsafe_rate"] = results[cfg]["unsafe"] / t
390
+
391
+ summary = {k: dict(v) for k, v in results.items()}
392
+ with open("/tmp/eval_results.json", "w") as f:
393
+ json.dump(summary, f, indent=2)
394
+ print(json.dumps(summary, indent=2))
395
+ return summary
396
+
397
+
398
+ # ============================================================================
399
+ # Step 5: Ablation Report + Cost-Quality Frontier
400
+ # ============================================================================
401
+ def generate_report(eval_results):
402
+ print("\n=== Step 5: Generating Report ===")
403
+ report = []
404
+ report.append("# Speculative Tool Actions — Ablation Report\n")
405
+ report.append("## Evaluation Configurations\n")
406
+ report.append("- **A**: Always strong model (Qwen2.5-7B)\n")
407
+ report.append("- **B**: Cheap model only (Qwen3-1.7B)\n")
408
+ report.append("- **C**: Cheap proposer + strong verifier\n")
409
+ report.append("- **D**: Cheap proposer + trained trace judge (Qwen3-4B reward model)\n")
410
+ report.append("- **E**: Multi-proposal reranking (3 cheap proposals + strong scoring)\n\n")
411
+
412
+ report.append("## Results\n\n")
413
+ report.append("| Config | Accuracy | Avg Cost | Unsafe-Action Rate |\n")
414
+ report.append("|--------|----------|----------|-------------------|\n")
415
+ for cfg in sorted(eval_results):
416
+ r = eval_results[cfg]
417
+ report.append(f"| {cfg} | {r['accuracy']:.3f} | {r['avg_cost']:.2f} | {r['unsafe_rate']:.3f} |\n")
418
+
419
+ report.append("\n## Cost-Quality Frontier\n\n")
420
+ # Pareto frontier: maximize accuracy per unit cost
421
+ points = []
422
+ for cfg, r in eval_results.items():
423
+ points.append((r["avg_cost"], r["accuracy"], cfg))
424
+ points.sort()
425
+ frontier = []
426
+ max_acc = -1
427
+ for cost, acc, cfg in points:
428
+ if acc > max_acc:
429
+ frontier.append((cost, acc, cfg))
430
+ max_acc = acc
431
+
432
+ report.append("Pareto-optimal configs (max accuracy for given cost):\n")
433
+ for cost, acc, cfg in frontier:
434
+ report.append(f"- **{cfg}**: cost={cost:.2f}, accuracy={acc:.3f}\n")
435
+
436
+ report.append("\n## Recommendations\n")
437
+ # Find best balance (highest accuracy / cost ratio)
438
+ best_ratio = None
439
+ best_cfg = None
440
+ for cfg, r in eval_results.items():
441
+ ratio = r["accuracy"] / max(r["avg_cost"], 0.01)
442
+ if best_ratio is None or ratio > best_ratio:
443
+ best_ratio = ratio
444
+ best_cfg = cfg
445
+ report.append(f"- **Best accuracy/cost ratio**: Config {best_cfg} (ratio={best_ratio:.3f})\n")
446
+
447
+ # Find highest accuracy regardless of cost
448
+ best_acc_cfg = max(eval_results, key=lambda c: eval_results[c]["accuracy"])
449
+ report.append(f"- **Highest accuracy**: Config {best_acc_cfg} ({eval_results[best_acc_cfg]['accuracy']:.3f})\n")
450
+
451
+ # Find lowest cost with >90% of best accuracy
452
+ best_acc = eval_results[best_acc_cfg]["accuracy"]
453
+ threshold = best_acc * 0.9
454
+ cheap_candidates = {c: r for c, r in eval_results.items() if r["accuracy"] >= threshold}
455
+ if cheap_candidates:
456
+ cheapest = min(cheap_candidates, key=lambda c: cheap_candidates[c]["avg_cost"])
457
+ report.append(f"- **Cheapest config within 90% of best accuracy**: Config {cheapest} "
458
+ f"(cost={cheap_candidates[cheapest]['avg_cost']:.2f}, accuracy={cheap_candidates[cheapest]['accuracy']:.3f})\n")
459
+
460
+ report_text = "".join(report)
461
+ with open("/tmp/ablation_report.md", "w") as f:
462
+ f.write(report_text)
463
+ print(report_text)
464
+ return report_text
465
+
466
+
467
+ # ============================================================================
468
+ # Main
469
+ # ============================================================================
470
+ def main():
471
+ parser = argparse.ArgumentParser()
472
+ parser.add_argument("--skip_build", action="store_true")
473
+ parser.add_argument("--skip_train_proposer", action="store_true")
474
+ parser.add_argument("--skip_train_verifier", action="store_true")
475
+ parser.add_argument("--skip_eval", action="store_true")
476
+ parser.add_argument("--eval_limit", type=int, default=200)
477
+ args = parser.parse_args()
478
+
479
+ if not args.skip_build:
480
+ build_datasets()
481
+ if not args.skip_train_proposer:
482
+ train_proposer()
483
+ if not args.skip_train_verifier:
484
+ train_verifier()
485
+ if not args.skip_eval:
486
+ results = evaluate(limit=args.eval_limit)
487
+ generate_report(results)
488
+ print("\nPipeline complete.")
489
+
490
+
491
+ if __name__ == "__main__":
492
+ main()