""" Edit video frames based on detect_results_all.json from detect_marker_sample_vllm.py. For every frame in every segment: - has_human_marker=False, no parse_error → copy original image to output path - has_human_marker=True OR parse_error → run QwenImageEdit, save edited image Output path mirrors the source tree, replacing the `frames` folder with `qwen_edited`: src: /llava178k_merged//frames/seg_000/000001.png dst: /llava178k_merged//qwen_edited/seg_000/000001.png Supports checkpoint/resume (segment-level) and --rank/--split_num for multi-GPU. Usage (single GPU): python edit_sample_qwen_vllm_detect.py \ --detect_json_path outputs/detect_marker_sample_vllm/detect_results_all.json \ --model_path /path/to/QwenImageEdit \ --data_root /private/data_group/weijiawu/code/project/dataset/markers Usage (8 GPUs, one process each): for rank in {0..7}; do CUDA_VISIBLE_DEVICES=$rank python edit_sample_qwen_vllm_detect.py \ --detect_json_path ... --model_path ... \ --rank $rank --split_num 8 & done wait """ import argparse import json import shutil import sys from pathlib import Path import torch from PIL import Image from diffusers import QwenImageEditPipeline from tqdm import tqdm if hasattr(sys.stdout, "reconfigure"): sys.stdout.reconfigure(encoding="utf-8", errors="replace") if hasattr(sys.stderr, "reconfigure"): sys.stderr.reconfigure(encoding="utf-8", errors="replace") # ─────────────────────────── args ─────────────────────────── def parse_args(): parser = argparse.ArgumentParser( description="Edit frames based on vLLM detect results (copy clean / edit marker)" ) parser.add_argument("--detect_json_path", type=str, required=True, help="Path to detect_results_all.json") parser.add_argument("--model_path", type=str, required=True, help="Path to QwenImageEdit model") parser.add_argument("--data_root", type=str, default="/private/data_group/code/project/dataset/markers", help="Root that contains llava178k_merged/; used to resolve relative paths") parser.add_argument("--prompt_mode", type=str, default="detailed", choices=["detailed", "symbols"]) parser.add_argument("--true_cfg_scale", type=float, default=4.0) parser.add_argument("--num_inference_steps", type=int, default=50) parser.add_argument("--seed", type=int, default=0) parser.add_argument("--rank", type=int, default=0) parser.add_argument("--split_num", type=int, default=1) return parser.parse_args() # ─────────────────────────── prompt ─────────────────────────── def build_prompt(meta: dict, mode: str) -> str: if mode == "symbols": symbols = meta.get("symbols", []) text = " and ".join(symbols).strip() else: details = meta.get("symbols_detailed", []) text = " and ".join(details).strip() return ( f"Remove the artificial visual {text} in the image. " "These are human-added overlay annotations. " "Restore the original appearance of the underlying image content " "without altering the real objects, colors, textures, or scene layout. " "Do NOT remove, modify, blur, distort, or hallucinate any natural text, " "captions, subtitles, on-screen words, numbers, labels, or watermarks " "that are part of the original image content." ) # ─────────────────────────── path helpers ─────────────────────────── def resolve_frame_path(raw_path: str, data_root: str) -> Path: """Convert relative/stale meta path to absolute path under data_root.""" p = Path(raw_path) if p.is_absolute() and p.exists(): return p parts = p.parts if parts and parts[0] == "outputs": p = Path(*parts[1:]) return Path(data_root) / p def output_frame_path(src_path: Path) -> Path: """Replace the `frames` component in the path with `qwen_edited`. src: .../llava178k_merged/ytb_xxx/frames/seg_000/000001.png dst: .../llava178k_merged/ytb_xxx/qwen_edited/seg_000/000001.png """ parts = src_path.parts # Find the index of 'frames' component for i, part in enumerate(parts): if part == "frames": new_parts = parts[:i] + ("qwen_edited",) + parts[i + 1:] return Path(*new_parts) # Fallback: place qwen_edited as sibling of seg_XXX folder seg_dir = src_path.parent return seg_dir.parent.parent / "qwen_edited" / seg_dir.name / src_path.name # ─────────────────────────── checkpoint ─────────────────────────── def save_json(path: Path, data) -> None: path.write_text(json.dumps(data, indent=2, ensure_ascii=False), encoding="utf-8") def load_checkpoint(path: Path) -> set: if not path.exists(): return set() try: data = json.loads(path.read_text(encoding="utf-8")) done = {(s["video_name"], s["seg_idx"]) for s in data} print(f"[RESUME] Checkpoint: {len(done)} segments already done.") return done except Exception as e: print(f"[WARN] Could not load checkpoint ({e}), starting fresh.") return set() # ─────────────────────────── main ─────────────────────────── def main(): args = parse_args() with open(args.detect_json_path, "r", encoding="utf-8") as f: data = json.load(f) print(f"Total segments: {len(data)}") data = data[args.rank:: args.split_num] print(f"Rank {args.rank}/{args.split_num}: {len(data)} segments assigned") # Checkpoint lives next to the detect json detect_dir = Path(args.detect_json_path).parent ckpt_path = detect_dir / f"edit_checkpoint_rank{args.rank}.json" done_set = load_checkpoint(ckpt_path) ckpt_data = [{"video_name": vn, "seg_idx": si} for vn, si in done_set] todo = [m for m in data if (m["video_name"], m["seg_idx"]) not in done_set] print(f"Skipping {len(data) - len(todo)} done, {len(todo)} remaining.") if not todo: print("[DONE] Nothing to process.") return # Lazy-load pipeline (only if there are frames to edit) pipeline = None stats = {"copied": 0, "edited": 0, "skipped": 0, "missing": 0} for meta in tqdm(todo, desc=f"rank{args.rank}", unit="seg"): video_name = meta["video_name"] seg_idx = meta["seg_idx"] frame_records = meta["frames"] # all frames in this segment prompt = build_prompt(meta, args.prompt_mode) needs_edit = [ fr for fr in frame_records if fr.get("has_human_marker") is True or fr.get("parse_error") is True ] needs_copy = [ fr for fr in frame_records if fr not in needs_edit ] tqdm.write( f"\n[{video_name}] seg_{seg_idx:03d} | " f"{len(frame_records)} frames " f"(edit={len(needs_edit)}, copy={len(needs_copy)})" ) # Load pipeline on first actual edit job if needs_edit and pipeline is None: print("Loading QwenImageEditPipeline ...") pipeline = QwenImageEditPipeline.from_pretrained( args.model_path, torch_dtype=torch.bfloat16, ) pipeline.to("cuda") pipeline.set_progress_bar_config(disable=True) print("Pipeline loaded.") # ── copy clean frames ── for fr in needs_copy: src = resolve_frame_path(fr["path"], args.data_root) dst = output_frame_path(src) if dst.exists(): stats["skipped"] += 1 continue if not src.exists(): tqdm.write(f" [WARN] Missing src: {src}") stats["missing"] += 1 continue dst.parent.mkdir(parents=True, exist_ok=True) shutil.copy2(src, dst) stats["copied"] += 1 # ── edit marker frames (batched) ── # Collect frames that still need processing edit_todo = [] for fr in needs_edit: src = resolve_frame_path(fr["path"], args.data_root) dst = output_frame_path(src) if dst.exists(): stats["skipped"] += 1 continue if not src.exists(): tqdm.write(f" [WARN] Missing src: {src}") stats["missing"] += 1 continue edit_todo.append((fr, src, dst)) for i, (fr, src, dst) in enumerate(edit_todo): dst.parent.mkdir(parents=True, exist_ok=True) image = Image.open(src).convert("RGB") generator = torch.Generator(device="cuda").manual_seed(args.seed + i) with torch.inference_mode(): output = pipeline( image=image, prompt=prompt, generator=generator, true_cfg_scale=args.true_cfg_scale, negative_prompt=" ", num_inference_steps=args.num_inference_steps, ) output.images[0].save(dst) orig_idx = fr.get("original_frame_idx", "?") tqdm.write(f" [EDIT] {dst.name} (orig_frame={orig_idx})") stats["edited"] += 1 ckpt_data.append({"video_name": video_name, "seg_idx": seg_idx}) save_json(ckpt_path, ckpt_data) print(f"\n=== Rank {args.rank} Done ===") print(f" Edited : {stats['edited']}") print(f" Copied : {stats['copied']}") print(f" Skipped: {stats['skipped']} (already existed)") print(f" Missing: {stats['missing']} (src not found)") if __name__ == "__main__": main()