"""Main evaluation runner for P2P StableToolBench experiments. Orchestrates: load queries -> load P2P data -> run ReAct inference -> save results. Usage: python run_eval.py --condition baseline --groups G1_instruction --max_queries 5 python run_eval.py --condition all --groups all python run_eval.py --condition p2p_desc --p2p_desc_dir /path/to/your/descriptions """ import os, sys, json, time, argparse from typing import Dict, List, Any, Optional from config import (TOOL_ROOT_DIR, SOLVABLE_QUERIES_DIR, OUTPUT_DIR, P2P_DESCRIPTIONS_DIR, P2P_EXAMPLES_DIR, TASK_MODEL, TEMPERATURE, ALL_GROUPS, CONDITION_NAMES, API_SERVER_URL, API_SERVER_PORT) from tool_utils import load_query_data, load_p2p_descriptions, load_p2p_examples from prompt_builder import build_initial_messages, get_condition_config, gather_examples_for_query from llm_client import LLMClient from react_loop import ReActRunner def run_condition(condition, group, llm, max_queries=None, output_dir=OUTPUT_DIR, p2p_descriptions=None, p2p_examples=None, service_url=None): config = get_condition_config(condition, p2p_descriptions, p2p_examples) query_path = os.path.join(SOLVABLE_QUERIES_DIR, f"{group}.json") if not os.path.exists(query_path): print(f"Query file not found: {query_path}"); return {} custom_descs = config["custom_descriptions"] if config["use_custom_descriptions"] else None queries = load_query_data(query_path, TOOL_ROOT_DIR, custom_descriptions=custom_descs) if max_queries: queries = queries[:max_queries] print(f"\n{'='*60}\nCondition: {condition} | Group: {group} | Queries: {len(queries)}\n{'='*60}") condition_dir = os.path.join(output_dir, condition, group) os.makedirs(condition_dir, exist_ok=True) results = [] for i, query_data in enumerate(queries): query_id = query_data["query_id"] output_file = os.path.join(condition_dir, f"{query_id}_CoT@1.json") if os.path.exists(output_file): print(f" [{i+1}/{len(queries)}] Query {query_id}: already done, skipping") with open(output_file) as f: result = json.load(f) results.append(result); continue print(f" [{i+1}/{len(queries)}] Query {query_id}: {query_data['query'][:80]}...") examples = None if config["use_examples"] and config["examples"]: examples = gather_examples_for_query(query_data["tool_names"], query_data["api_name_reflect"], query_data["functions"], config["examples"], max_per_tool=1) messages = build_initial_messages(query=query_data["query"], tool_descriptions=query_data["tool_descriptions"], examples=examples) runner = ReActRunner(llm=llm, functions=query_data["functions"], tool_descriptions=query_data["tool_descriptions"], api_name_reflect=query_data["api_name_reflect"], tool_names=query_data["tool_names"], cate_names=query_data["cate_names"], service_url=service_url) result = runner.run(messages) result["query"], result["query_id"], result["condition"], result["group"] = query_data["query"], query_id, condition, group with open(output_file, "w") as f: save_data = {"query_id": query_id, "query": query_data["query"], "condition": condition, "group": group, "success": result["success"], "final_answer": result["final_answer"], "give_up": result["give_up"], "total_tokens": result["total_tokens"], "query_count": result["query_count"], "steps": result["steps"], "trajectory": result["trajectory"], "available_tools": [f_["function"]["name"] for f_ in query_data["functions"]]} json.dump(save_data, f, indent=2) results.append(result) status = "solved" if result["success"] else ("gave up" if result["give_up"] else "failed") print(f" {status} | steps={result['steps']} | tokens={result['total_tokens']}") total = len(results) solved = sum(1 for r in results if r.get("success", False)) gave_up = sum(1 for r in results if r.get("give_up", False)) summary = {"condition": condition, "group": group, "total_queries": total, "solved": solved, "gave_up": gave_up, "failed": total - solved - gave_up, "pass_rate": solved / total * 100 if total > 0 else 0} with open(os.path.join(condition_dir, "summary.json"), "w") as f: json.dump(summary, f, indent=2) print(f"\n Summary: {solved}/{total} solved ({summary['pass_rate']:.1f}% pass rate)") return summary def main(): parser = argparse.ArgumentParser(description="P2P StableToolBench Evaluation") parser.add_argument("--condition", type=str, default="baseline", choices=CONDITION_NAMES + ["all"]) parser.add_argument("--groups", type=str, nargs="+", default=["G1_instruction"]) parser.add_argument("--max_queries", type=int, default=None) parser.add_argument("--model", type=str, default=TASK_MODEL) parser.add_argument("--vllm_url", type=str, default="http://localhost:8000/v1") parser.add_argument("--api_server_url", type=str, default=None) parser.add_argument("--temperature", type=float, default=TEMPERATURE) parser.add_argument("--output_dir", type=str, default=OUTPUT_DIR) parser.add_argument("--p2p_desc_dir", type=str, default=P2P_DESCRIPTIONS_DIR) parser.add_argument("--p2p_examples_dir", type=str, default=P2P_EXAMPLES_DIR) args = parser.parse_args() groups = ALL_GROUPS if "all" in args.groups else args.groups conditions = CONDITION_NAMES if args.condition == "all" else [args.condition] service_url = args.api_server_url or f"{API_SERVER_URL}:{API_SERVER_PORT}/virtual" print(f"Connecting to vLLM at {args.vllm_url}\nModel: {args.model}") llm = LLMClient(model=args.model, base_url=args.vllm_url, temperature=args.temperature) p2p_descriptions, p2p_examples = None, None if any(c in conditions for c in ["p2p_desc", "p2p_full"]): p2p_descriptions = load_p2p_descriptions(args.p2p_desc_dir) print(f" Loaded {len(p2p_descriptions)} descriptions") if any(c in conditions for c in ["p2p_demo", "p2p_full"]): p2p_examples = load_p2p_examples(args.p2p_examples_dir) print(f" Loaded examples for {len(p2p_examples)} tools") all_summaries = [] for condition in conditions: for group in groups: summary = run_condition(condition=condition, group=group, llm=llm, max_queries=args.max_queries, output_dir=args.output_dir, p2p_descriptions=p2p_descriptions, p2p_examples=p2p_examples, service_url=service_url) all_summaries.append(summary) print("\n" + "="*80 + "\nFINAL RESULTS\n" + "="*80) print(f"{'Condition':<15} {'Group':<20} {'Solved':>8} {'Total':>8} {'Pass Rate':>10}") print("-"*65) for s in all_summaries: if s: print(f"{s['condition']:<15} {s['group']:<20} {s['solved']:>8} {s['total_queries']:>8} {s['pass_rate']:>9.1f}%") os.makedirs(args.output_dir, exist_ok=True) with open(os.path.join(args.output_dir, "all_summaries.json"), "w") as f: json.dump(all_summaries, f, indent=2) print(f"\nResults saved to {args.output_dir}") if __name__ == "__main__": main()