narcolepticchicken commited on
Commit
996a6e0
Β·
verified Β·
1 Parent(s): 6dd90f4

Upload training/router_v5_calibrated.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. training/router_v5_calibrated.py +393 -0
training/router_v5_calibrated.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """Trained Router v5: Calibrated + per-task thresholds + oversampled training.
3
+
4
+ Key improvements over v4:
5
+ 1. Platt scaling calibration on held-out data
6
+ 2. Per-task-type escalation thresholds
7
+ 3. Oversampled easy-task successes in training data
8
+ """
9
+ import json, os, sys, random, uuid
10
+ import numpy as np
11
+ from datetime import datetime
12
+ from collections import defaultdict
13
+ from typing import Dict, List, Optional, Tuple
14
+
15
+ TASK_TYPES = ["quick_answer","coding","research","document_drafting",
16
+ "legal_regulated","tool_heavy","retrieval_heavy",
17
+ "long_horizon","unknown_ambiguous"]
18
+ TT2IDX = {t:i for i,t in enumerate(TASK_TYPES)}
19
+
20
+ CODE_KW = ["python","javascript","code","function","bug","debug","refactor",
21
+ "implement","test","compile","runtime","class","module","async","thread"]
22
+ LEGAL_KW = ["contract","legal","compliance","gdpr","privacy","policy","regulatory","liability"]
23
+ RESEARCH_KW = ["research","find sources","literature","investigate","compare","analyze","survey"]
24
+ TOOL_KW = ["search","fetch","retrieve","query","api","database","scrape","aggregate"]
25
+ LONG_KW = ["plan","project","roadmap","orchestrate","multi-step","migrate","pipeline","deploy"]
26
+ MATH_KW = ["calculate","compute","solve","equation","formula","optimize","probability"]
27
+
28
+ TIER_STR = {1:0.35,2:0.55,3:0.80,4:0.93,5:0.97}
29
+ TIER_COST = {1:0.05,2:0.15,3:0.75,4:1.0,5:1.5}
30
+ TASK_FLOOR = {"legal_regulated":4,"long_horizon":3,"research":3,"coding":3,
31
+ "unknown_ambiguous":3,"quick_answer":1,"document_drafting":2,
32
+ "tool_heavy":2,"retrieval_heavy":2}
33
+
34
+ # Per-task-type escalation thresholds (lower = more aggressive cost savings)
35
+ TASK_THRESHOLDS = {
36
+ "quick_answer": 0.35, # Easy tasks: low threshold, use cheap models
37
+ "document_drafting": 0.45, # Medium-easy tasks
38
+ "tool_heavy": 0.45, # Tool orchestration, not deep reasoning
39
+ "retrieval_heavy": 0.45, # Retrieval-heavy, moderate reasoning
40
+ "coding": 0.55, # Coding needs decent models
41
+ "research": 0.55, # Research needs good models
42
+ "unknown_ambiguous": 0.60, # Unknown = be careful
43
+ "long_horizon": 0.60, # Long horizon = be careful
44
+ "legal_regulated": 0.75, # Legal = always verify, escalate aggressively
45
+ }
46
+
47
+ TASK_TEMPLATES = {
48
+ "quick_answer":["What is the capital of France?","Explain quantum computing briefly.",
49
+ "What is 237*452?","Define photosynthesis.","Who wrote Hamlet?",
50
+ "What is the speed of light?","List the primary colors.","What is GDP?",
51
+ "What is 2+2?","Name the planets in the solar system."],
52
+ "coding":["Write a Python function to reverse a linked list.",
53
+ "Fix the bug in this React component.","Refactor auth module to JWT.",
54
+ "Implement LRU cache in Go.","Debug segfault in C++ thread pool.",
55
+ "Add unit tests for the payment module.","Optimize this SQL query.",
56
+ "Create a REST API for user management.","Implement binary search in Rust.",
57
+ "Write a fibonacci function with memoization."],
58
+ "research":["Research latest transformer advances.",
59
+ "Find sources comparing LoRA and full FT.",
60
+ "Investigate data center climate impact.",
61
+ "Survey privacy-preserving ML techniques.",
62
+ "Compare reinforcement learning algorithms for robotics.",
63
+ "Analyze recent papers on mixture of experts."],
64
+ "document_drafting":["Draft project proposal for ML pipeline.",
65
+ "Write email to team about deployment.","Create technical report on performance.",
66
+ "Write a project brief for the migration.","Draft meeting agenda."],
67
+ "legal_regulated":["Review this contract for liability clauses.",
68
+ "Check GDPR compliance for data pipeline.","Draft privacy policy section.",
69
+ "Verify regulatory compliance for medical device software.",
70
+ "Analyze indemnification clause in vendor agreement."],
71
+ "tool_heavy":["Search open issues and create summary.",
72
+ "Fetch API docs and generate client code.","Query Q3 sales and produce chart.",
73
+ "Aggregate logs from 3 services."],
74
+ "retrieval_heavy":["Answer based on 50-page document.",
75
+ "Find all payment processing mentions.","Retrieve relevant cases for legal query.",
76
+ "Summarize findings from quarterly report."],
77
+ "long_horizon":["Plan 3-month roadmap.","Orchestrate multi-region deployment.",
78
+ "Redesign data architecture end-to-end.","Migrate monolith to microservices."],
79
+ "unknown_ambiguous":["Help me with this thing.",
80
+ "I need something about the server.","Can you look into that issue?",
81
+ "There's a problem with the data."],
82
+ }
83
+
84
+ def tsp(tier, diff):
85
+ return TIER_STR[tier] ** (diff * 0.6)
86
+
87
+ def extract_features(request, task_type, difficulty=3):
88
+ r = request.lower()
89
+ f = {"req_len":len(request),"num_words":len(request.split()),
90
+ "has_code":int(any(k in r for k in CODE_KW)),
91
+ "n_code":sum(1 for k in CODE_KW if k in r),
92
+ "has_legal":int(any(k in r for k in LEGAL_KW)),
93
+ "n_legal":sum(1 for k in LEGAL_KW if k in r),
94
+ "has_research":int(any(k in r for k in RESEARCH_KW)),
95
+ "n_research":sum(1 for k in RESEARCH_KW if k in r),
96
+ "has_tool":int(any(k in r for k in TOOL_KW)),
97
+ "n_tool":sum(1 for k in TOOL_KW if k in r),
98
+ "has_long":int(any(k in r for k in LONG_KW)),
99
+ "has_math":int(any(k in r for k in MATH_KW)),
100
+ "tt_idx":TT2IDX.get(task_type,8),"difficulty":difficulty}
101
+ for tt in TASK_TYPES:
102
+ f[f"tt_{tt}"] = int(task_type == tt)
103
+ return f
104
+
105
+ def gen_trace(idx, rng, oversample_easy=False):
106
+ tt = rng.choice(list(TASK_TEMPLATES.keys()))
107
+ diff = {"quick_answer":1,"document_drafting":2,"tool_heavy":2,"retrieval_heavy":2,
108
+ "research":3,"coding":3,"unknown_ambiguous":3,"long_horizon":4,"legal_regulated":5}[tt]
109
+ tier_out = {t: rng.random() < tsp(t, diff) for t in range(1,6)}
110
+ opt = 5
111
+ for t in range(1,6):
112
+ if tier_out[t]: opt = t; break
113
+
114
+ # When oversampling: bias actual_tier toward optimal to create more success examples
115
+ if oversample_easy and opt <= 2:
116
+ actual = rng.choices([1,2,3,4,5], weights=[2,5,2,1,0.5])[0]
117
+ elif oversample_easy and opt <= 3:
118
+ actual = rng.choices([1,2,3,4,5], weights=[0.5,1,5,2,0.5])[0]
119
+ else:
120
+ if diff <= 2:
121
+ actual = rng.choices([1,2,3,4,5],weights=[3,4,2,1,0.5])[0]
122
+ elif diff == 3:
123
+ actual = rng.choices([1,2,3,4,5],weights=[1,2,4,2,1])[0]
124
+ elif diff == 4:
125
+ actual = rng.choices([1,2,3,4,5],weights=[0.5,1,2,4,2])[0]
126
+ else:
127
+ actual = rng.choices([1,2,3,4,5],weights=[0.2,0.5,1,3,4])[0]
128
+
129
+ outcome = "success" if tier_out[actual] else "failure"
130
+ req = rng.choice(TASK_TEMPLATES[tt])
131
+ feats = extract_features(req, tt, diff)
132
+ return {"feats":feats,"opt":opt,"actual":actual,"outcome":outcome,
133
+ "tier_out":tier_out,"tt":tt,"diff":diff,"req":req}
134
+
135
+ print("="*80)
136
+ print("ACO TRAINED ROUTER v5: CALIBRATED + PER-TASK THRESHOLDS")
137
+ print("="*80)
138
+
139
+ # ─── Generate Training Data with Oversampling ────────────────────────
140
+ print("\n[1] Generating 60K training traces (with easy-task oversampling)...")
141
+ rng = random.Random(42)
142
+
143
+ # Base 50K traces
144
+ traces = [gen_trace(i, rng, oversample_easy=False) for i in range(40000)]
145
+ # Add 20K oversampled easy-task traces
146
+ traces += [gen_trace(i+40000, rng, oversample_easy=True) for i in range(20000)]
147
+
148
+ print(f" Total: {len(traces)} traces")
149
+
150
+ # Check success rate per tier
151
+ for tier in range(1, 6):
152
+ succ = sum(1 for t in traces if t["tier_out"].get(tier, False))
153
+ print(f" Tier {tier}: success rate = {succ/len(traces):.3f}")
154
+
155
+ # ─── Build Feature Matrix ────────────────────────────────────────────
156
+ FEAT_KEYS = sorted(traces[0]["feats"].keys())
157
+ def f2v(feats):
158
+ return np.array([float(feats.get(k, 0.0)) for k in FEAT_KEYS], dtype=np.float32)
159
+
160
+ X_all = np.array([f2v(t["feats"]) for t in traces])
161
+ y_opt = np.array([t["opt"] for t in traces])
162
+
163
+ per_tier_labels = {}
164
+ for tier in range(1, 6):
165
+ per_tier_labels[tier] = np.array([1 if t["tier_out"].get(tier, False) else 0 for t in traces])
166
+
167
+ from sklearn.model_selection import train_test_split
168
+ from sklearn.metrics import accuracy_score, f1_score, brier_score_loss
169
+ import xgboost as xgb
170
+ from sklearn.calibration import CalibratedClassifierCV
171
+
172
+ X_train, X_test, idx_train, idx_test = train_test_split(
173
+ X_all, range(len(traces)), test_size=0.2, random_state=42, stratify=y_opt
174
+ )
175
+ print(f"\n Train: {len(X_train)}, Test: {len(X_test)}")
176
+
177
+ # ─── Train + Calibrate Per-Tier Classifiers ──────────────────────────
178
+ print("\n[2] Training + calibrating per-tier P(success) classifiers...")
179
+ tier_clfs = {}
180
+ tier_calibrators = {}
181
+
182
+ for tier in range(1, 6):
183
+ y_tr = per_tier_labels[tier][idx_train]
184
+ y_te = per_tier_labels[tier][idx_test]
185
+
186
+ neg = (y_tr == 0).sum()
187
+ pos = (y_tr == 1).sum()
188
+ spw = neg / max(pos, 1)
189
+
190
+ # Train XGBoost
191
+ clf = xgb.XGBClassifier(
192
+ n_estimators=200, max_depth=5, learning_rate=0.1,
193
+ subsample=0.8, colsample_bytree=0.8,
194
+ scale_pos_weight=min(spw, 5.0),
195
+ objective="binary:logistic", eval_metric="logloss",
196
+ random_state=42, verbosity=0,
197
+ )
198
+ clf.fit(X_train, y_tr)
199
+
200
+ # Platt scaling calibration
201
+ from sklearn.linear_model import LogisticRegression
202
+ from sklearn.isotonic import IsotonicRegression
203
+
204
+ # Get raw probabilities on test set for calibration
205
+ y_prob_raw = clf.predict_proba(X_test)[:, 1]
206
+
207
+ # Use isotonic regression for calibration (works better than Platt for small datasets)
208
+ iso_reg = IsotonicRegression(out_of_bounds="clip")
209
+ iso_reg.fit(y_prob_raw, y_te)
210
+
211
+ # Evaluate calibration
212
+ y_prob_cal = iso_reg.transform(y_prob_raw)
213
+ brier_raw = brier_score_loss(y_te, y_prob_raw)
214
+ brier_cal = brier_score_loss(y_te, y_prob_cal)
215
+
216
+ acc = accuracy_score(y_te, clf.predict(X_test))
217
+ f1 = f1_score(y_te, clf.predict(X_test), zero_division=0)
218
+
219
+ tier_clfs[tier] = clf
220
+ tier_calibrators[tier] = iso_reg
221
+ print(f" Tier {tier}: acc={acc:.3f}, f1={f1:.3f}, brier_raw={brier_raw:.3f}, brier_cal={brier_cal:.3f}")
222
+
223
+ # ─── Calibrated Router ────────────────────────────────────────────────
224
+ print("\n[3] Building calibrated per-task-threshold router...")
225
+
226
+ def route_calibrated(request, task_type, difficulty):
227
+ """Calibrated router with per-task thresholds."""
228
+ base_tier = min(difficulty + 1, 5)
229
+ floor = TASK_FLOOR.get(task_type, 2)
230
+ base_tier = max(base_tier, floor)
231
+
232
+ feats = extract_features(request, task_type, difficulty)
233
+ x = f2v(feats).reshape(1, -1)
234
+
235
+ # Get CALIBRATED P(success) at base_tier
236
+ p_raw = tier_clfs[base_tier].predict_proba(x)[0, 1]
237
+ p_success = float(tier_calibrators[base_tier].transform([p_raw])[0])
238
+
239
+ # Per-task threshold
240
+ threshold = TASK_THRESHOLDS.get(task_type, 0.55)
241
+
242
+ # Escalate if calibrated probability too low
243
+ while p_success < threshold and base_tier < 5:
244
+ base_tier += 1
245
+ p_raw = tier_clfs[base_tier].predict_proba(x)[0, 1]
246
+ p_success = float(tier_calibrators[base_tier].transform([p_raw])[0])
247
+
248
+ return base_tier
249
+
250
+ # ─── Evaluate ─────────────────────────────────────────────────────────
251
+ print("\n[4] Evaluating all routers on 2K eval traces (seed=999)...")
252
+
253
+ eval_rng = random.Random(999)
254
+ eval_traces = []
255
+ for i in range(2000):
256
+ tt = eval_rng.choice(list(TASK_TEMPLATES.keys()))
257
+ diff = {"quick_answer":1,"document_drafting":2,"tool_heavy":2,"retrieval_heavy":2,
258
+ "research":3,"coding":3,"unknown_ambiguous":3,"long_horizon":4,"legal_regulated":5}[tt]
259
+ tier_out = {t: eval_rng.random() < tsp(t, diff) for t in range(1,6)}
260
+ opt = 5
261
+ for t in range(1,6):
262
+ if tier_out[t]: opt = t; break
263
+ req = eval_rng.choice(TASK_TEMPLATES[tt])
264
+ eval_traces.append({"tt":tt,"diff":diff,"opt":opt,"tier_out":tier_out,"req":req})
265
+
266
+ print(f" Generated {len(eval_traces)} eval traces")
267
+
268
+ def eval_router(name, route_fn):
269
+ succ=0; cost=0.0; unsafe=0; fd=0; td=defaultdict(int)
270
+ for t in eval_traces:
271
+ pred = route_fn(t)
272
+ td[pred] += 1
273
+ if t["tier_out"].get(pred, False): succ += 1
274
+ elif pred < t["opt"]: unsafe += 1
275
+ else: fd += 1
276
+ cost += TIER_COST[pred]
277
+ n = len(eval_traces)
278
+ return {"success":succ/n, "avg_cost":cost/n, "unsafe_rate":unsafe/n,
279
+ "false_done":fd/n, "tier_dist":dict(td)}
280
+
281
+ results = {}
282
+ results["always_frontier"] = eval_router("always_frontier", lambda t: 4)
283
+ results["always_cheap"] = eval_router("always_cheap", lambda t: 1)
284
+ results["heuristic_diff+1"] = eval_router("heuristic_diff+1", lambda t: min(t["diff"]+1, 5))
285
+ results["heuristic_floor"] = eval_router("heuristic_floor", lambda t: TASK_FLOOR.get(t["tt"], 2))
286
+ results["oracle"] = eval_router("oracle", lambda t: t["opt"])
287
+ # results["v4_prod_t0.55"] = eval_router("v4_prod_t0.55",
288
+ # lambda t: route_v4(t, 0.55))
289
+ results["v5_calibrated"] = eval_router("v5_calibrated",
290
+ lambda t: route_calibrated(t["req"], t["tt"], t["diff"]))
291
+
292
+ # v4 router for comparison
293
+ def route_v4(t, threshold):
294
+ base = min(t["diff"]+1, 5)
295
+ floor = TASK_FLOOR.get(t["tt"], 2)
296
+ base = max(base, floor)
297
+ feats = extract_features(t["req"], t["tt"], t["diff"])
298
+ x = f2v(feats).reshape(1, -1)
299
+ ps = tier_clfs[base].predict_proba(x)[0, 1]
300
+ while ps < threshold and base < 5:
301
+ base += 1
302
+ ps = tier_clfs[base].predict_proba(x)[0, 1]
303
+ return base
304
+
305
+ # Print
306
+ print(f"\n{'Router':<25} {'Success':>10} {'AvgCost':>10} {'CostRed':>10} {'Unsafe':>10} {'F-DONE':>10}")
307
+ print("-"*75)
308
+ fc = results["always_frontier"]["avg_cost"]
309
+ for name, r in sorted(results.items(), key=lambda x: (-x[1]["success"], x[1]["avg_cost"])):
310
+ cr = (1 - r["avg_cost"]/fc)*100
311
+ print(f"{name:<25} {r['success']:>10.3f} {r['avg_cost']:>10.4f} {cr:>9.1f}% {r['unsafe_rate']:>10.3f} {r['false_done']:>10.3f}")
312
+
313
+ # Per-task breakdown
314
+ print(f"\n\n[5] Per-task-type breakdown (calibrated v5 vs frontier vs heuristic)...")
315
+ for tt in sorted(set(t["tt"] for t in eval_traces)):
316
+ tt_traces = [t for t in eval_traces if t["tt"] == tt]
317
+ n_tt = len(tt_traces)
318
+ if n_tt == 0: continue
319
+ print(f"\n {tt} (n={n_tt}):")
320
+ for rname, rfn in [("frontier", lambda t:4),
321
+ ("heuristic", lambda t:min(t["diff"]+1,5)),
322
+ ("calibrated", lambda t:route_calibrated(t["req"],t["tt"],t["diff"])),
323
+ ("oracle", lambda t:t["opt"])]:
324
+ succ = sum(1 for t in tt_traces if t["tier_out"].get(rfn(t), False))
325
+ cost = sum(TIER_COST[rfn(t)] for t in tt_traces)
326
+ sr = succ/n_tt; ac = cost/n_tt
327
+ cr = (1 - ac/fc)*100
328
+ print(f" {rname:<12} success={sr:.3f} cost={ac:.4f} costRed={cr:.1f}%")
329
+
330
+ # ─── Pareto Frontier ──────────────────────────────────────────────────
331
+ print(f"\n\n[6] Pareto frontier analysis...")
332
+ pareto = []
333
+ for name, r in results.items():
334
+ if name == "always_cheap": continue
335
+ dominated = False
336
+ for name2, r2 in results.items():
337
+ if name == name2: continue
338
+ if r2["success"] >= r["success"] and r2["avg_cost"] <= r["avg_cost"]:
339
+ if r2["success"] > r["success"] or r2["avg_cost"] < r["avg_cost"]:
340
+ dominated = True; break
341
+ if not dominated:
342
+ pareto.append((name, r))
343
+ cr = (1 - r["avg_cost"]/fc)*100
344
+ print(f" {name:<25} success={r['success']:.3f} cost={r['avg_cost']:.4f} costRed={cr:.1f}% unsafe={r['unsafe_rate']:.3f}")
345
+
346
+ # ─── Save Final Production Model ──────────────────────────────────────
347
+ print("\n\n[7] Saving final production model bundle...")
348
+ os.makedirs("/app/router_models", exist_ok=True)
349
+
350
+ import pickle
351
+
352
+ bundle = {
353
+ "tier_clfs": {str(k): v for k, v in tier_clfs.items()},
354
+ "tier_calibrators": {str(k): v for k, v in tier_calibrators.items()},
355
+ "feat_keys": FEAT_KEYS,
356
+ "tier_config": {
357
+ "tier_cost": TIER_COST,
358
+ "tier_str": TIER_STR,
359
+ "task_floor": TASK_FLOOR,
360
+ "task_thresholds": TASK_THRESHOLDS,
361
+ },
362
+ "version": "5.0",
363
+ "description": "ACO Production Router v5: calibrated + per-task thresholds + oversampled",
364
+ }
365
+
366
+ with open("/app/router_models/router_bundle_v5.pkl", "wb") as f:
367
+ pickle.dump(bundle, f)
368
+ print(f" Saved router_bundle_v5.pkl ({os.path.getsize('/app/router_models/router_bundle_v5.pkl')/1024:.0f} KB)")
369
+
370
+ # Also save individual files
371
+ for tier in range(1, 6):
372
+ tier_clfs[tier].save_model(f"/app/router_models/v5_tier_{tier}_success.json")
373
+ with open("/app/router_models/v5_feat_keys.json","w") as f:
374
+ json.dump(FEAT_KEYS, f)
375
+ with open("/app/router_models/v5_tier_config.json","w") as f:
376
+ json.dump(bundle["tier_config"], f, indent=2)
377
+ with open("/app/router_models/v5_calibrators.pkl","wb") as f:
378
+ pickle.dump(tier_calibrators, f)
379
+
380
+ # Save eval results
381
+ with open("/app/router_models/v5_eval_results.json","w") as f:
382
+ json.dump(results, f, indent=2, default=str)
383
+
384
+ print(f"\n\n{'='*80}")
385
+ print("FINAL v5 COMPARISON")
386
+ print(f"{'='*80}")
387
+ print(f"\n{'Router':<25} {'Success':>10} {'AvgCost':>10} {'CostRed':>10} {'Unsafe':>10} {'F-DONE':>10}")
388
+ print("-"*75)
389
+ for name, r in sorted(results.items(), key=lambda x: (-x[1]["success"], x[1]["avg_cost"])):
390
+ cr = (1 - r["avg_cost"]/fc)*100
391
+ print(f"{name:<25} {r['success']:>10.3f} {r['avg_cost']:>10.4f} {cr:>9.1f}% {r['unsafe_rate']:>10.3f} {r['false_done']:>10.3f}")
392
+
393
+ print(f"\nDONE!")