sgl / tools /summarize_shared_vision_ablation.py
xiaohaoWillX's picture
Add files using upload-large-folder tool
60e7f1f verified
import argparse
import json
from pathlib import Path
from typing import Dict, List
def infer_mode(summary: Dict[str, object]) -> str:
reasoning_mode = summary.get("guide_reasoning_mode", "none")
question_weight = float(summary.get("guide_question_attention_weight", 1.0))
answer_weight = float(summary.get("guide_answer_attention_weight", 1.0))
if reasoning_mode != "two_pass_explicit":
if question_weight == 1.0 and answer_weight == 1.0:
return "baseline"
if question_weight > 0 and answer_weight == 0.0:
return "question_only"
if question_weight == 0.0 and answer_weight > 0:
return "answer_only"
return f"combined_{question_weight:g}_{answer_weight:g}"
attention_source = summary.get("guide_attention_source", "default")
reasoning_weight = float(summary.get("guide_reasoning_attention_weight", 0.0))
if attention_source == "reasoning":
return "question_only"
if attention_source == "answer":
return "answer_only"
if attention_source == "combined":
if reasoning_weight == 1.0 and answer_weight == 1.0:
return "combined"
return f"combined_{reasoning_weight:g}_{answer_weight:g}"
return f"custom_{attention_source}"
def load_summaries(input_dir: Path, pattern: str) -> List[Dict[str, object]]:
rows = []
for path in sorted(input_dir.rglob(pattern)):
with path.open() as f:
summary = json.load(f)
row = {
"summary_path": str(path),
"run_name": path.name.replace(".summary.json", ""),
"count": int(summary.get("count", 0)),
"accuracy": float(summary["accuracy"]),
"prune_ratio": float(summary.get("large_model_prune_ratio", -1)),
"prune_layer": float(summary.get("large_model_prune_layer", 0.0)),
"reasoning_mode": summary.get("guide_reasoning_mode", "none"),
"attention_source": summary.get("guide_attention_source", "default"),
"question_weight": float(summary.get("guide_question_attention_weight", 1.0)),
"reasoning_weight": float(summary.get("guide_reasoning_attention_weight", 0.0)),
"answer_weight": float(summary.get("guide_answer_attention_weight", 0.0)),
"results_file": summary.get("results_file", ""),
}
row["mode"] = infer_mode(summary)
rows.append(row)
return rows
def add_baseline_deltas(rows: List[Dict[str, object]]) -> None:
baseline_by_ratio = {}
for row in rows:
if row["mode"] == "baseline":
baseline_by_ratio[row["prune_ratio"]] = row["accuracy"]
for row in rows:
baseline_accuracy = baseline_by_ratio.get(row["prune_ratio"])
row["baseline_accuracy"] = baseline_accuracy
row["delta_vs_baseline"] = None if baseline_accuracy is None else row["accuracy"] - baseline_accuracy
def print_table(rows: List[Dict[str, object]]) -> None:
headers = [
"prune_ratio",
"mode",
"accuracy",
"delta_vs_baseline",
"reasoning_mode",
"attention_source",
"weights(q,a)",
"count",
"run_name",
]
print(" | ".join(headers))
print(" | ".join(["---"] * len(headers)))
for row in rows:
delta = row["delta_vs_baseline"]
delta_text = "n/a" if delta is None else f"{delta:+.6f}"
weights = f"{row['question_weight']:.3f},{row['answer_weight']:.3f}"
print(
" | ".join(
[
f"{row['prune_ratio']:.3f}",
row["mode"],
f"{row['accuracy']:.6f}",
delta_text,
row["reasoning_mode"],
row["attention_source"],
weights,
str(row["count"]),
row["run_name"],
]
)
)
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--input-dir", type=Path, required=True)
parser.add_argument("--pattern", type=str, default="*.summary.json")
parser.add_argument("--output-json", type=Path, default=None)
args = parser.parse_args()
rows = load_summaries(args.input_dir, args.pattern)
if not rows:
raise SystemExit(f"No summary files found under {args.input_dir} matching {args.pattern}")
rows.sort(key=lambda item: (item["prune_ratio"], item["mode"], item["run_name"]))
add_baseline_deltas(rows)
print_table(rows)
if args.output_json is not None:
args.output_json.parent.mkdir(parents=True, exist_ok=True)
with args.output_json.open("w") as f:
json.dump(rows, f, ensure_ascii=False, indent=2)
if __name__ == "__main__":
main()