#!/usr/bin/env python3 """Train a learned model router for Agent Cost Optimizer.""" import json, os, sys, random, pickle, uuid import numpy as np from datetime import datetime from collections import defaultdict from typing import Dict, List, Tuple, Any, Optional TASK_TYPES = ["quick_answer","coding","research","document_drafting", "legal_regulated","tool_heavy","retrieval_heavy", "long_horizon","unknown_ambiguous"] TT2IDX = {t:i for i,t in enumerate(TASK_TYPES)} CODE_KW = ["python","javascript","code","function","bug","debug","refactor", "implement","test","compile","runtime","class","module","async","thread"] LEGAL_KW = ["contract","legal","compliance","gdpr","privacy","policy","regulatory","liability"] RESEARCH_KW = ["research","find sources","literature","investigate","compare","analyze","survey"] TOOL_KW = ["search","fetch","retrieve","query","api","database","scrape","aggregate"] LONG_KW = ["plan","project","roadmap","orchestrate","multi-step","migrate","pipeline","deploy"] MATH_KW = ["calculate","compute","solve","equation","formula","optimize","probability"] TIER_STR = {1:0.35,2:0.55,3:0.80,4:0.93,5:0.97} TIER_COST = {1:0.05,2:0.15,3:0.75,4:1.0,5:1.5} TASK_TEMPLATES = { "quick_answer":["What is the capital of France?","Explain quantum computing briefly.", "What is 237*452?","Define photosynthesis.","Who wrote Hamlet?", "What is the speed of light?","List the primary colors.","What is GDP?"], "coding":["Write a Python function to reverse a linked list.", "Fix the bug in this React component.","Refactor auth module to JWT.", "Implement LRU cache in Go.","Debug segfault in C++ thread pool.", "Add unit tests for the payment module.","Optimize this SQL query.", "Create a REST API for user management.","Implement binary search in Rust."], "research":["Research latest transformer advances.", "Find sources comparing LoRA and full FT.", "Investigate data center climate impact.", "Survey privacy-preserving ML techniques.", "Compare reinforcement learning algorithms for robotics."], "document_drafting":["Draft project proposal for ML pipeline.", "Write email to team about deployment.","Create technical report on performance."], "legal_regulated":["Review this contract for liability clauses.", "Check GDPR compliance for data pipeline.","Draft privacy policy section.", "Verify regulatory compliance for medical device software."], "tool_heavy":["Search open issues and create summary.", "Fetch API docs and generate client code.","Query Q3 sales and produce chart."], "retrieval_heavy":["Answer based on 50-page document.", "Find all payment processing mentions.","Retrieve relevant cases for legal query."], "long_horizon":["Plan 3-month roadmap.","Orchestrate multi-region deployment.", "Redesign data architecture end-to-end.","Migrate monolith to microservices."], "unknown_ambiguous":["Help me with this thing.", "I need something about the server.","Can you look into that issue?"], } def tsp(tier, diff): return TIER_STR[tier] ** (diff * 0.6) def extract_features(request, task_type, difficulty=3): r = request.lower() f = { "req_len": len(request), "num_words": len(request.split()), "has_code": int(any(k in r for k in CODE_KW)), "n_code": sum(1 for k in CODE_KW if k in r), "has_legal": int(any(k in r for k in LEGAL_KW)), "n_legal": sum(1 for k in LEGAL_KW if k in r), "has_research": int(any(k in r for k in RESEARCH_KW)), "n_research": sum(1 for k in RESEARCH_KW if k in r), "has_tool": int(any(k in r for k in TOOL_KW)), "n_tool": sum(1 for k in TOOL_KW if k in r), "has_long": int(any(k in r for k in LONG_KW)), "has_math": int(any(k in r for k in MATH_KW)), "tt_idx": TT2IDX.get(task_type, 8), "difficulty": difficulty, } for tt in TASK_TYPES: f[f"tt_{tt}"] = int(task_type == tt) return f def gen_trace(idx, rng): tt = rng.choice(list(TASK_TEMPLATES.keys())) diff = {"quick_answer":1,"document_drafting":2,"tool_heavy":2,"retrieval_heavy":2, "research":3,"coding":3,"unknown_ambiguous":3,"long_horizon":4,"legal_regulated":5}[tt] tier_out = {} for t in range(1,6): tier_out[t] = rng.random() < tsp(t, diff) opt = 5 for t in range(1,6): if tier_out[t]: opt = t break if diff <= 2: actual = rng.choices([1,2,3,4,5],weights=[3,4,2,1,0.5])[0] elif diff == 3: actual = rng.choices([1,2,3,4,5],weights=[1,2,4,2,1])[0] elif diff == 4: actual = rng.choices([1,2,3,4,5],weights=[0.5,1,2,4,2])[0] else: actual = rng.choices([1,2,3,4,5],weights=[0.2,0.5,1,3,4])[0] outcome = "success" if tier_out[actual] else "failure" req = rng.choice(TASK_TEMPLATES[tt]) feats = extract_features(req, tt, diff) return {"feats":feats,"opt":opt,"actual":actual,"outcome":outcome, "tier_out":tier_out,"tt":tt,"diff":diff,"req":req} print("="*80) print("AGENT COST OPTIMIZER - TRAINED ROUTER TRAINING") print("="*80) # ─── Generate Training Data ──────────────────────────────────────── print("\n[1] Generating 50K training traces...") rng = random.Random(42) traces = [gen_trace(i, rng) for i in range(50000)] print(f" Generated {len(traces)} traces") opt_dist = defaultdict(int) for t in traces: opt_dist[t["opt"]] += 1 for k in sorted(opt_dist): print(f" opt_tier={k}: {opt_dist[k]} ({opt_dist[k]/len(traces)*100:.1f}%)") # ─── Build Feature Matrix ────────────────────────────────────────── print("\n[2] Building feature matrix...") def feats_to_vec(feats): """Convert feature dict to fixed-order numpy array.""" keys = sorted(feats.keys()) return np.array([float(feats[k]) for k in keys], dtype=np.float32) # Get feature key order from first trace FEAT_KEYS = sorted(traces[0]["feats"].keys()) NUM_FEATURES = len(FEAT_KEYS) print(f" Feature count: {NUM_FEATURES}") print(f" Features: {FEAT_KEYS}") def feats_to_vec_safe(feats): return np.array([float(feats.get(k, 0.0)) for k in FEAT_KEYS], dtype=np.float32) X_all = np.array([feats_to_vec_safe(t["feats"]) for t in traces]) y_opt = np.array([t["opt"] for t in traces]) y_actual = np.array([t["actual"] for t in traces]) y_outcome = np.array([1 if t["outcome"]=="success" else 0 for t in traces]) # ─── Per-Tier Success Classifiers ─────────────────────────────────── print("\n[3] Training per-tier P(success|query) classifiers...") from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, accuracy_score, f1_score # For each tier, create binary label: did this tier succeed? per_tier_labels = {} for tier in range(1, 6): labels = [] for t in traces: labels.append(1 if t["tier_out"].get(tier, False) else 0) per_tier_labels[tier] = np.array(labels) succ_rate = per_tier_labels[tier].mean() print(f" Tier {tier}: success rate = {succ_rate:.3f}") # Split train/test X_train, X_test, idx_train, idx_test = train_test_split( X_all, range(len(traces)), test_size=0.2, random_state=42, stratify=y_opt ) print(f"\n Train: {len(X_train)}, Test: {len(X_test)}") # ─── XGBoost Per-Tier Classifiers ─────────────────────────────────── print("\n[4] Training XGBoost per-tier classifiers...") import xgboost as xgb tier_classifiers = {} for tier in range(1, 6): y_train_tier = per_tier_labels[tier][idx_train] y_test_tier = per_tier_labels[tier][idx_test] clf = xgb.XGBClassifier( n_estimators=100, max_depth=4, learning_rate=0.1, subsample=0.8, colsample_bytree=0.8, objective="binary:logistic", eval_metric="logloss", random_state=42, verbosity=0, ) clf.fit(X_train, y_train_tier) y_pred = clf.predict(X_test) y_prob = clf.predict_proba(X_test)[:, 1] acc = accuracy_score(y_test_tier, y_pred) f1 = f1_score(y_test_tier, y_pred, zero_division=0) tier_classifiers[tier] = clf print(f" Tier {tier}: accuracy={acc:.3f}, f1={f1:.3f}") # ─── CARROT-Style Router Decision ──────────────────────────────────── print("\n[5] Building CARROT-style router...") def route_carrot(features_vec, tier_clfs, mu=0.7): """Route to argmin_tier [mu*(1-P_success_tier) + (1-mu)*cost_tier]. mu controls quality-vs-cost tradeoff: mu=1.0: maximize quality only (always frontier) mu=0.0: minimize cost only (always cheapest) mu=0.7: 70% quality, 30% cost (our default) """ if features_vec.ndim == 1: features_vec = features_vec.reshape(1, -1) best_tier = 3 best_score = float("inf") for tier in range(1, 6): p_success = tier_clfs[tier].predict_proba(features_vec)[0, 1] cost_norm = TIER_COST[tier] / TIER_COST[5] # normalize to [0,1] score = mu * (1.0 - p_success) + (1.0 - mu) * cost_norm if score < best_score: best_score = score best_tier = tier return best_tier # Evaluate on test set print("\n[6] Evaluating CARROT router on test set...") mu_values = [0.5, 0.6, 0.7, 0.8, 0.9, 1.0] for mu in mu_values: correct = 0 total_cost = 0.0 unsafe_misses = 0 for i in idx_test: t = traces[i] x = feats_to_vec_safe(t["feats"]).reshape(1, -1) pred_tier = route_carrot(x, tier_classifiers, mu=mu) opt_tier = t["opt"] # Check if predicted tier would succeed would_succeed = t["tier_out"].get(pred_tier, False) if would_succeed: correct += 1 # Cost of predicted tier total_cost += TIER_COST[pred_tier] # Unsafe miss: predicted cheap tier for hard task if pred_tier < opt_tier and not would_succeed: unsafe_misses += 1 n_test = len(idx_test) success_rate = correct / n_test avg_cost = total_cost / n_test unsafe_rate = unsafe_misses / n_test # Compare to heuristic (task-type based) heuristic_correct = 0 heuristic_cost = 0.0 for i in idx_test: t = traces[i] # Heuristic: route by task type (from classifier.py) tt = t["tt"] diff = t["diff"] h_tier = min(diff + 1, 5) # simple: difficulty + 1 if t["tier_out"].get(h_tier, False): heuristic_correct += 1 heuristic_cost += TIER_COST[h_tier] h_success = heuristic_correct / n_test h_avg_cost = heuristic_cost / n_test # Frontier baseline frontier_correct = sum(1 for i in idx_test if traces[i]["tier_out"].get(4, False) or traces[i]["tier_out"].get(5, False)) frontier_rate = frontier_correct / n_test frontier_avg_cost = TIER_COST[4] # always tier 4 print(f"\n mu={mu:.1f}:") print(f" CARROT: success={success_rate:.3f}, avg_cost={avg_cost:.4f}, unsafe_miss={unsafe_rate:.3f}") print(f" Heuristic: success={h_success:.3f}, avg_cost={h_avg_cost:.4f}") print(f" Frontier: success={frontier_rate:.3f}, avg_cost={frontier_avg_cost:.4f}") print(f" Cost reduction vs frontier: {(1-avg_cost/frontier_avg_cost)*100:.1f}%") print(f" Cost reduction vs heuristic: {(1-avg_cost/h_avg_cost)*100:.1f}%") # ─── XGBoost Direct Optimal-Tier Classifier ───────────────────────── print("\n\n[7] Training XGBoost direct optimal-tier classifier...") y_train_opt = y_opt[idx_train] - 1 # XGB needs 0-indexed y_test_opt = y_opt[idx_test] - 1 direct_clf = xgb.XGBClassifier( n_estimators=200, max_depth=5, learning_rate=0.1, subsample=0.8, colsample_bytree=0.8, objective="multi:softmax", num_class=6, eval_metric="mlogloss", random_state=42, verbosity=0, ) direct_clf.fit(X_train, y_train_opt) y_pred_opt = direct_clf.predict(X_test) acc_direct = accuracy_score(y_test_opt, y_pred_opt) print(f" Direct classifier accuracy: {acc_direct:.3f}") # Detailed classification report from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test_opt, y_pred_opt, labels=[1,2,3,4,5]) print(f"\n Confusion Matrix (rows=true, cols=predicted):") print(f" {'':>10} {'T1':>6} {'T2':>6} {'T3':>6} {'T4':>6} {'T5':>6}") for i, tier in enumerate([1,2,3,4,5]): row = f" True T{tier:>1}:" for j in range(5): row += f" {cm[i][j]:>6}" print(row) # Evaluate direct classifier on test set print("\n[8] Evaluating direct optimal-tier classifier...") direct_correct = 0 direct_cost = 0.0 direct_unsafe = 0 direct_underkill = 0 direct_overkill = 0 for i, test_idx in enumerate(idx_test): t = traces[test_idx] x = feats_to_vec_safe(t["feats"]).reshape(1, -1) pred_tier = int(direct_clf.predict(x)[0]) + 1 # back to 1-indexed opt_tier = t["opt"] would_succeed = t["tier_out"].get(pred_tier, False) if would_succeed: direct_correct += 1 direct_cost += TIER_COST[pred_tier] if pred_tier < opt_tier: direct_underkill += 1 if not would_succeed: direct_unsafe += 1 elif pred_tier > opt_tier: direct_overkill += 1 n_test = len(idx_test) print(f" Success rate: {direct_correct/n_test:.3f}") print(f" Avg cost: {direct_cost/n_test:.4f}") print(f" Underkill (pred < optimal): {direct_underkill/n_test:.3f}") print(f" Overkill (pred > optimal): {direct_overkill/n_test:.3f}") print(f" Unsafe misses: {direct_unsafe/n_test:.3f}") # ─── Comparison: All Routers ──────────────────────────────────────── print("\n\n" + "="*80) print("FINAL COMPARISON: ALL ROUTERS ON TEST SET") print("="*80) # 1. Always frontier f_succ = sum(1 for i in idx_test if traces[i]["tier_out"].get(4,False) or traces[i]["tier_out"].get(5,False)) f_cost = TIER_COST[4] * n_test # 2. Always cheapest c_succ = sum(1 for i in idx_test if traces[i]["tier_out"].get(1,False) or traces[i]["tier_out"].get(2,False)) c_cost = TIER_COST[1] * n_test # 3. Heuristic (difficulty + 1) h_succ = 0; h_cost = 0.0 for i in idx_test: t = traces[i] h_tier = min(t["diff"] + 1, 5) if t["tier_out"].get(h_tier, False): h_succ += 1 h_cost += TIER_COST[h_tier] # 4. CARROT (best mu) best_mu = 0.7 carrot_succ = 0; carrot_cost = 0.0; carrot_unsafe = 0 for i in idx_test: t = traces[i] x = feats_to_vec_safe(t["feats"]).reshape(1, -1) pred = route_carrot(x, tier_classifiers, mu=best_mu) if t["tier_out"].get(pred, False): carrot_succ += 1 carrot_cost += TIER_COST[pred] if pred < t["opt"] and not t["tier_out"].get(pred, False): carrot_unsafe += 1 # 5. Direct XGB d_succ = direct_correct d_cost = direct_cost d_unsafe = direct_unsafe # 6. Oracle (always picks optimal) o_succ = sum(1 for i in idx_test if traces[i]["tier_out"].get(traces[i]["opt"], False)) o_cost = sum(TIER_COST[traces[i]["opt"]] for i in idx_test) print(f"\n{'Router':<20} {'Success':>10} {'AvgCost':>10} {'CostRed':>10} {'Unsafe':>10}") print("-"*60) for name, succ, cost, unsafe in [ ("always_frontier", f_succ, f_cost, 0), ("always_cheap", c_succ, c_cost, 0), ("heuristic", h_succ, h_cost, 0), (f"CARROT(mu={best_mu})", carrot_succ, carrot_cost, carrot_unsafe), ("direct_xgb", d_succ, d_cost, d_unsafe), ("oracle", o_succ, o_cost, 0), ]: sr = succ/n_test ac = cost/n_test cr = (1 - cost/f_cost)*100 um = unsafe/n_test print(f"{name:<20} {sr:>10.3f} {ac:>10.4f} {cr:>9.1f}% {um:>10.3f}") # ─── Save Models ──────────────────────────────────────────────────── print("\n\n[9] Saving models...") os.makedirs("/app/router_models", exist_ok=True) # Save per-tier classifiers for tier, clf in tier_classifiers.items(): clf.save_model(f"/app/router_models/tier_{tier}_success.json") print(f" Saved tier_{tier}_success.json") # Save direct classifier direct_clf.save_model("/app/router_models/direct_optimal_tier.json") print(f" Saved direct_optimal_tier.json") # Save feature keys with open("/app/router_models/feat_keys.json", "w") as f: json.dump(FEAT_KEYS, f) print(f" Saved feat_keys.json ({len(FEAT_KEYS)} features)") # Save tier config with open("/app/router_models/tier_config.json", "w") as f: json.dump({"tier_cost": TIER_COST, "tier_str": TIER_STR}, f) print(f" Saved tier_config.json") print("\n\nDONE! Models saved to /app/router_models/") print("Next step: integrate trained router into ACO ModelCascadeRouter._route_learned()")