File size: 7,506 Bytes
d1120af c9f3b32 d1120af c9f3b32 d1120af c9f3b32 d1120af c9f3b32 d1120af c9f3b32 d1120af c9f3b32 d1120af | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 | """Gradio Space for Agent Cost Optimizer Dashboard.
This app visualizes cost-quality frontiers from ACO benchmark runs.
If no benchmark data exists, it runs the benchmark on first load.
"""
import json
import subprocess
import sys
from pathlib import Path
from typing import Dict, List, Any
import gradio as gr
def ensure_data_exists():
"""Run benchmark if data doesn't exist."""
results_path = Path("eval_results_v2/baseline_results.json")
report_path = Path("eval_results_v2/report.txt")
if not results_path.exists() or not report_path.exists():
print("Benchmark data not found. Running benchmark...")
try:
# Run the benchmark generator
subprocess.run(
[sys.executable, "standalone_eval_v2.py", "--tasks", "2000", "--output", "eval_results_v2"],
capture_output=True, text=True, timeout=120
)
print("Benchmark complete.")
except Exception as e:
print(f"Benchmark failed: {e}")
return results_path, report_path
def load_results(path: str) -> Dict[str, Any]:
with open(path) as f:
return json.load(f)
def parse_report(report_path: str) -> str:
with open(report_path) as f:
return f.read()
def create_frontier_plot(results: Dict[str, Any]):
points = []
for name, data in results.items():
success = (data.get("num_success", 0) + data.get("num_partial", 0)) / max(data.get("num_tasks", 1), 1)
cost = data.get("avg_cost_success", 0)
points.append({"baseline": name, "success_rate": success, "cost_per_success": cost})
points.sort(key=lambda p: (-p["success_rate"], p["cost_per_success"]))
frontier = []
min_cost = float("inf")
for p in points:
if p["cost_per_success"] <= min_cost:
frontier.append(p)
min_cost = p["cost_per_success"]
return points, frontier
def build_dashboard():
results_path, report_path = ensure_data_exists()
if not results_path.exists() or not report_path.exists():
with gr.Blocks(title="Agent Cost Optimizer Dashboard") as demo:
gr.Markdown("# Agent Cost Optimizer Dashboard")
gr.Markdown("## Benchmark data not available")
gr.Markdown("Run `python standalone_eval_v2.py --tasks 2000 --output eval_results_v2` to generate data.")
return demo
results = load_results(str(results_path))
report_text = parse_report(str(report_path))
points, frontier = create_frontier_plot(results)
with gr.Blocks(title="Agent Cost Optimizer Dashboard") as demo:
gr.Markdown("# Agent Cost Optimizer - Cost-Quality Dashboard")
gr.Markdown("Visualize cost-quality tradeoffs across routing strategies and ablations.")
with gr.Row():
with gr.Column(scale=2):
gr.Markdown("## Cost-Quality Frontier")
gr.Markdown("**X-axis**: Average cost per successful task | **Y-axis**: Success rate")
scatter_data = [
[p["baseline"], f"{p['success_rate']:.1%}", f"${p['cost_per_success']:.4f}"]
for p in points
]
gr.Dataframe(
headers=["Baseline", "Success Rate", "Cost per Success"],
value=scatter_data,
label="All Baselines",
)
frontier_data = [
[p["baseline"], f"{p['success_rate']:.1%}", f"${p['cost_per_success']:.4f}"]
for p in frontier
]
gr.Dataframe(
headers=["Baseline", "Success Rate", "Cost per Success"],
value=frontier_data,
label="Pareto Frontier",
)
with gr.Column(scale=1):
gr.Markdown("## Pareto Frontier Baselines")
pareto_names = [p["baseline"] for p in frontier]
for name in pareto_names:
gr.Markdown(f"- **{name}**")
with gr.Row():
with gr.Column():
gr.Markdown("## Baseline Comparison")
comparison_data = []
for name, data in results.items():
comparison_data.append([
name,
f"{(data.get('num_success',0)+data.get('num_partial',0))/max(data.get('num_tasks',1),1):.1%}",
f"${data.get('avg_cost_success',0):.4f}",
f"${data.get('total_cost',0):.2f}",
f"{data.get('cost_reduction_vs_frontier',0):.1%}",
f"{data.get('false_done_rate',0):.1%}",
f"{data.get('unsafe_cheap_miss_rate',0):.1%}",
f"{data.get('regression_rate',0):.1%}",
])
gr.Dataframe(
headers=["Baseline", "Success", "Cost/Success", "Total Cost", "Cost Reduction", "False-DONE", "Cheap Miss", "Regression"],
value=comparison_data,
)
with gr.Row():
with gr.Column():
gr.Markdown("## Per-Scenario Breakdown (Full Optimizer)")
full_data = results.get("full_optimizer", {})
scenario_stats = full_data.get("per_scenario_stats", {})
if scenario_stats:
scenario_data = []
for scenario, stats in scenario_stats.items():
count = stats.get("count", 0)
success = stats.get("success", 0)
cost = stats.get("cost", 0)
scenario_data.append([
scenario,
str(count),
f"{success/max(count,1):.1%}",
f"${cost:.2f}",
])
gr.Dataframe(
headers=["Scenario", "Count", "Success Rate", "Total Cost"],
value=scenario_data,
)
with gr.Row():
with gr.Column():
gr.Markdown("## Ablation Impact")
gr.Markdown("Cost impact when removing each module (vs full_optimizer)")
full_cost = results.get("full_optimizer", {}).get("total_cost", 0)
ablation_data = []
for name, data in results.items():
if name.startswith("no_"):
delta = data.get("total_cost", 0) - full_cost
pct = (delta / max(full_cost, 0.001)) * 100
ablation_data.append([name, f"${delta:.2f}", f"{pct:.1f}%"])
if ablation_data:
ablation_data.sort(key=lambda x: float(x[1].replace("$", "")), reverse=True)
gr.Dataframe(
headers=["Module Removed", "Cost Increase", "% Increase"],
value=ablation_data,
)
with gr.Row():
with gr.Column():
gr.Markdown("## Full Report")
gr.Textbox(report_text, lines=40, label="Benchmark Report", interactive=False)
return demo
if __name__ == "__main__":
demo = build_dashboard()
demo.launch(server_name="0.0.0.0", server_port=7860)
|