lfj-code / transfer /code /prompt_selection /aggregate_results.py
ethan1115's picture
Upload folder using huggingface_hub
0161e74 verified
#!/usr/bin/env python3
"""Aggregate cell-eval comparison results across all perturbation conditions.
Reads per-perturbation comparison_mean.csv files and produces:
1. all_comparison.csv — full table with perturbation column
2. Summary statistics printed to stdout
Usage:
python code/prompt_selection/aggregate_results.py
"""
from __future__ import annotations
import logging
import sys
from pathlib import Path
_THIS_DIR = Path(__file__).resolve().parent
if str(_THIS_DIR.parent) not in sys.path:
sys.path.insert(0, str(_THIS_DIR.parent))
import pandas as pd
from prompt_selection import config as cfg
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(name)s] %(levelname)s: %(message)s",
)
LOGGER = logging.getLogger("aggregate_results")
def main():
all_dfs = []
for pert_name in cfg.ALL_PERTURBATIONS:
pcfg = cfg.get_pert_config(pert_name)
csv_path = pcfg.eval_dir / "comparison_mean.csv"
if not csv_path.exists():
LOGGER.warning("No comparison_mean.csv for %s, skipping.", pert_name)
continue
df = pd.read_csv(csv_path)
df["perturbation"] = pert_name
all_dfs.append(df)
LOGGER.info("Loaded %s (%d metrics)", pert_name, len(df))
if not all_dfs:
LOGGER.error("No comparison results found. Run evaluation first.")
return
combined = pd.concat(all_dfs, ignore_index=True)
# Save full table
output_path = cfg.EVAL_DIR / "all_comparison.csv"
cfg.EVAL_DIR.mkdir(parents=True, exist_ok=True)
combined.to_csv(output_path, index=False)
LOGGER.info("Saved aggregated results: %s (%d rows)", output_path, len(combined))
# Summary: mean across perturbations per metric
print("\n" + "=" * 80)
print("SUMMARY: Mean across all perturbations")
print("=" * 80)
summary = combined.groupby("metric")[["prompt_selection", "random_baseline", "diff"]].agg(
["mean", "std"]
)
summary.columns = [f"{col}_{stat}" for col, stat in summary.columns]
summary = summary.sort_values("diff_mean", ascending=False)
print(summary.to_string())
summary_path = cfg.EVAL_DIR / "summary_statistics.csv"
summary.to_csv(summary_path)
LOGGER.info("Saved summary statistics: %s", summary_path)
# Count wins
print("\n" + "=" * 80)
print("WIN COUNTS (per metric, across perturbations)")
print("=" * 80)
lower_is_better = {"mse", "mae", "mse_delta", "mae_delta"}
for metric_name, group in combined.groupby("metric"):
ps_wins = 0
bl_wins = 0
ties = 0
for _, row in group.iterrows():
diff = row["diff"]
if metric_name in lower_is_better:
diff = -diff
if abs(diff) < 1e-12:
ties += 1
elif diff > 0:
ps_wins += 1
else:
bl_wins += 1
total = len(group)
print(f" {metric_name:35s} PS wins: {ps_wins}/{total} BL wins: {bl_wins}/{total} Ties: {ties}/{total}")
if __name__ == "__main__":
main()