Upload run_eval.py
Browse files- run_eval.py +21 -0
run_eval.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Standalone evaluation runner that works without pip install."""
|
| 3 |
+
|
| 4 |
+
import sys
|
| 5 |
+
import os
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
|
| 8 |
+
# Add project root to path
|
| 9 |
+
sys.path.insert(0, str(Path(__file__).parent))
|
| 10 |
+
|
| 11 |
+
from eval_runner import run_evaluation
|
| 12 |
+
|
| 13 |
+
if __name__ == "__main__":
|
| 14 |
+
import argparse
|
| 15 |
+
parser = argparse.ArgumentParser(description="ACO Evaluation Runner")
|
| 16 |
+
parser.add_argument("--tasks", "-n", type=int, default=1000, help="Number of tasks")
|
| 17 |
+
parser.add_argument("--seed", "-s", type=int, default=42, help="Random seed")
|
| 18 |
+
parser.add_argument("--output", "-o", default="./eval_results", help="Output directory")
|
| 19 |
+
args = parser.parse_args()
|
| 20 |
+
|
| 21 |
+
run_evaluation(args.tasks, args.seed, args.output)
|