| """Generate Anonymous-Hard subset from Qwen per-law eval results. |
| |
| For each prompt, computes a physics micro-avg from per-law scores |
| (all 13 laws), averaged across all models. |
| Keeps prompts with cross-model avg < threshold. |
| |
| Physical laws are sourced from the canonical prompt JSONs (not from eval |
| JSONs, which may contain stale vocabulary). |
| |
| Usage: |
| # Default: threshold 1.75, overwrite existing hard subset |
| python -m dataprocessing.refine.gen_hard_subset |
| |
| # Custom threshold, write to a different file |
| python -m dataprocessing.refine.gen_hard_subset --threshold 1.50 \ |
| --output data/prompts/anonymous_hard_subset_150.json |
| |
| # Dry run: print stats without writing |
| python -m dataprocessing.refine.gen_hard_subset --dry-run |
| |
| # Strict mode: fail on data quality issues |
| python -m dataprocessing.refine.gen_hard_subset --strict |
| """ |
|
|
| import argparse |
| import json |
| import logging |
| import sys |
| from collections import Counter, defaultdict |
| from pathlib import Path |
|
|
| from dataprocessing.common.pipeline import PipelineCheck |
| from dataprocessing.common.video_id import PROMPT_SOURCES, load_source_laws |
|
|
| logger = logging.getLogger(__name__) |
|
|
| ROOT = Path(__file__).resolve().parents[2] |
| VIDEOS_DIR = ROOT / "data/videos" |
| OUTPUT_PATH = ROOT / "data/prompts/anonymous_hard_subset.json" |
|
|
| |
| DATASET_SUFFIXES = ["video_phy_2", "physics_iq", "openvid", "wmb"] |
|
|
|
|
| def parse_model_dataset(dirname: str) -> tuple[str, str] | None: |
| """Extract (model, dataset) from a directory name like 'ltx-2-video_phy_2'.""" |
| for ds in DATASET_SUFFIXES: |
| if dirname.endswith(f"-{ds}"): |
| model = dirname[:-(len(ds) + 1)] |
| return model, ds |
| return None |
|
|
|
|
| def find_latest_eval(dirpath: Path, evaluator: str = "qwen") -> Path | None: |
| """Find the latest batched eval JSON for the given evaluator, fallback to gemini.""" |
| evals = sorted(dirpath.glob(f"eval_{evaluator}_2*.json")) |
| if evals: |
| return evals[-1] |
| if evaluator != "gemini": |
| gemini = sorted(dirpath.glob("eval_gemini*_2*.json")) |
| if gemini: |
| return gemini[-1] |
| return None |
|
|
|
|
| def load_eval_scores(eval_path: Path) -> list[dict]: |
| """Load eval JSON and extract per-video physics micro-avg. |
| |
| Computes micro-avg from per-law scores (all laws). |
| Follows the same scoring approach as score_histogram / rank.md. |
| |
| Returns list of dicts with keys: video, prompt, phys_micro_avg, n_laws_scored. |
| """ |
| with open(eval_path) as f: |
| data = json.load(f) |
|
|
| entries = [] |
| for r in data.get("results", []): |
| video = r.get("video", "") |
| prompt = r.get("prompt", "") |
| if not video: |
| continue |
|
|
| |
| phys = r.get("physical", {}) |
| if not isinstance(phys, dict): |
| continue |
| laws = phys.get("laws", {}) |
|
|
| scored_vals = [] |
| for law_name, law_data in laws.items(): |
| if not isinstance(law_data, dict): |
| continue |
| score = law_data.get("score") |
| is_scored = (law_data.get("status") == "scored" |
| or law_data.get("valid", False)) |
| if is_scored and score is not None: |
| scored_vals.append(score) |
|
|
| if not scored_vals: |
| continue |
|
|
| entries.append({ |
| "video": video, |
| "prompt": prompt, |
| "phys_micro_avg": sum(scored_vals) / len(scored_vals), |
| "n_laws_scored": len(scored_vals), |
| }) |
| return entries |
|
|
|
|
| def main(argv: list[str] | None = None): |
| parser = argparse.ArgumentParser( |
| description="Generate Anonymous-Hard subset from Gemini eval scores") |
| parser.add_argument("--threshold", type=float, default=3.00, |
| help="Physics micro-avg threshold (default: 3.00)") |
| parser.add_argument("--output", type=str, default=None, |
| help="Output path (default: data/prompts/anonymous_hard_subset.json)") |
| parser.add_argument("--dry-run", action="store_true", |
| help="Print stats without writing") |
| parser.add_argument("--strict", action="store_true", |
| help="Fail on data quality issues (for CI)") |
| args = parser.parse_args(argv) |
|
|
| logging.basicConfig( |
| level=logging.INFO, |
| format="[%(asctime)s] %(message)s", |
| handlers=[logging.StreamHandler(sys.stdout)], |
| ) |
|
|
| checker = PipelineCheck(strict=args.strict) |
|
|
| |
| |
| video_scores: dict[str, dict] = defaultdict(dict) |
| video_prompts: dict[str, str] = {} |
| video_eval_ds: dict[str, str] = {} |
| eval_paths_by_ds: dict[str, Path] = {} |
|
|
| for d in sorted(VIDEOS_DIR.iterdir()): |
| if not d.is_dir(): |
| continue |
| parsed = parse_model_dataset(d.name) |
| if parsed is None: |
| continue |
| model, dataset = parsed |
| if "real_world" in model: |
| continue |
|
|
| eval_path = find_latest_eval(d) |
| if eval_path is None: |
| continue |
|
|
| eval_paths_by_ds[dataset] = eval_path |
| entries = load_eval_scores(eval_path) |
| logger.info("Loaded %d entries from %s", len(entries), eval_path.name) |
|
|
| for e in entries: |
| vid = e["video"] |
| video_scores[vid][model] = e["phys_micro_avg"] |
| video_prompts[vid] = e["prompt"] |
| video_eval_ds[vid] = dataset |
|
|
| logger.info("Total unique videos with scores: %d", len(video_scores)) |
|
|
| |
| video_difficulty = {} |
| for vid, model_scores in video_scores.items(): |
| vals = list(model_scores.values()) |
| avg = sum(vals) / len(vals) |
| video_difficulty[vid] = { |
| "phys_micro_avg": round(avg, 3), |
| "n_models": len(model_scores), |
| } |
|
|
| |
| hard_vids = [ |
| vid for vid, diff in video_difficulty.items() |
| if diff["phys_micro_avg"] < args.threshold |
| ] |
| hard_vids.sort(key=lambda v: video_difficulty[v]["phys_micro_avg"]) |
|
|
| logger.info("Threshold < %.2f: %d / %d videos", |
| args.threshold, len(hard_vids), len(video_difficulty)) |
|
|
| |
| source = load_source_laws() |
|
|
| |
| DS_TO_EVAL_SUFFIX = { |
| "wmb": "wmb", "video_phy_2": "video_phy_2", "physics_iq": "physics_iq", |
| "openvid": "openvid", |
| } |
| for ds_name, src_path in PROMPT_SOURCES: |
| eval_suffix = DS_TO_EVAL_SUFFIX.get(ds_name) |
| if eval_suffix and eval_suffix in eval_paths_by_ds: |
| checker.check_staleness(src_path, eval_paths_by_ds[eval_suffix]) |
|
|
| missing = 0 |
| prompts_out = [] |
| seen_disk_vids: dict[str, int] = {} |
| for eval_vid in hard_vids: |
| eval_ds = video_eval_ds.get(eval_vid, "") |
| matched = source.resolve_eval(eval_vid, eval_ds) |
|
|
| if matched: |
| cvid, entry = matched |
| laws = entry["laws"] |
| dataset = entry["dataset"] |
| prompt = entry["prompt"] or video_prompts.get(eval_vid, "") |
| legacy_ids = source.cvid_to_legacies.get(cvid, set()) |
| disk_vid = max(legacy_ids, key=len) if legacy_ids else eval_vid |
| else: |
| |
| missing += 1 |
| continue |
|
|
| |
| |
| if disk_vid in seen_disk_vids: |
| idx = seen_disk_vids[disk_vid] |
| existing = prompts_out[idx] |
| if video_difficulty[eval_vid]["n_models"] > existing["difficulty"]["n_models"]: |
| prompts_out[idx] = { |
| "video": disk_vid, |
| "dataset": dataset, |
| "prompt": prompt, |
| "physical_laws": laws, |
| "difficulty": video_difficulty[eval_vid], |
| "per_model_scores": dict(video_scores[eval_vid]), |
| } |
| continue |
|
|
| checker.check_empty_laws(disk_vid, laws, dataset, |
| resolved=matched is not None) |
|
|
| seen_disk_vids[disk_vid] = len(prompts_out) |
| prompts_out.append({ |
| "video": disk_vid, |
| "dataset": dataset, |
| "prompt": prompt, |
| "physical_laws": laws, |
| "difficulty": video_difficulty[eval_vid], |
| "per_model_scores": dict(video_scores[eval_vid]), |
| }) |
|
|
| checker.check_missing_ratio(missing, len(hard_vids)) |
|
|
| |
| by_dataset = Counter(p["dataset"] for p in prompts_out) |
| law_counts = Counter() |
| for p in prompts_out: |
| for law in p["physical_laws"]: |
| law_counts[law] += 1 |
|
|
| output = { |
| "description": ( |
| f"Anonymous-Hard: prompts where cross-model physics micro-avg < {args.threshold} " |
| f"(Qwen, per-law scores, all 13 laws)" |
| ), |
| "threshold": args.threshold, |
| "scoring_mode": "phys_micro_avg", |
| "judge": "qwen", |
| "num_prompts": len(prompts_out), |
| "by_dataset": dict(by_dataset.most_common()), |
| "prompts": prompts_out, |
| } |
|
|
| logger.info("=" * 60) |
| logger.info("Hard subset: %d prompts", len(prompts_out)) |
| logger.info("By dataset:") |
| for ds, cnt in by_dataset.most_common(): |
| logger.info(" %s: %d", ds, cnt) |
| logger.info("Physical law counts:") |
| for law, cnt in law_counts.most_common(): |
| logger.info(" %s: %d", law, cnt) |
|
|
| score = checker.report() |
|
|
| if args.dry_run: |
| logger.info("(dry-run — no file written)") |
| checker.finalize() |
| return |
|
|
| out_path = Path(args.output) if args.output else OUTPUT_PATH |
| with open(out_path, "w") as f: |
| json.dump(output, f, indent=2, ensure_ascii=False) |
| logger.info("Saved → %s", out_path) |
|
|
| checker.finalize() |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|