| |
| """ |
| Generate rejected responses for ActivityNet DPO using the SFT base model. |
| 8-GPU parallel inference for maximum speed. |
| |
| Usage: |
| # Stage 1: Generate rejected (8 GPU parallel) |
| python generate_activitynet_rejected.py --stage 1 |
| |
| # Stage 2: Assemble final DPO json |
| python generate_activitynet_rejected.py --stage 2 |
| |
| # Both |
| python generate_activitynet_rejected.py --stage all |
| """ |
|
|
| import argparse |
| import json |
| import os |
| import logging |
| import multiprocessing as mp |
| from pathlib import Path |
|
|
| import torch |
| from tqdm import tqdm |
|
|
| LOG = logging.getLogger("activitynet_rejected") |
| logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(process)d] %(message)s") |
|
|
| VIDEO_DIR = "/opt/dlami/nvme/sharegpt_activitynet_raw/activitynet/videos" |
| AUDIO_DIR = "/opt/dlami/nvme/sharegpt_activitynet_audio" |
| GPT_CHOSEN_FILE = "/opt/dlami/nvme/activitynet_gpt_chosen.jsonl" |
| RAW_REJECTED_DIR = "/opt/dlami/nvme/activitynet_rejected_shards" |
| FINAL_OUTPUT = "/home/ubuntu/LlamaFactory/data/dpo_activitynet_gpt_chosen.json" |
|
|
| SFT_MODEL_PATH = "Rakancorle11/qwen3omni_full_sft_revised_thinker_key" |
| VANILLA_MODEL_PATH = "Qwen/Qwen3-Omni-30B-A3B-Instruct" |
| NUM_GPUS = len(os.environ.get("CUDA_VISIBLE_DEVICES", "0,1,2,3,4,5,6,7").split(",")) |
|
|
|
|
| def worker_fn(gpu_id: int, shard: list[dict], model_path: str, output_dir: str, model_type: str): |
| """Each worker loads model on one GPU and processes its shard.""" |
| visible_gpus = os.environ.get("CUDA_VISIBLE_DEVICES", "0,1,2,3,4,5,6,7").split(",") |
| os.environ["CUDA_VISIBLE_DEVICES"] = visible_gpus[gpu_id] |
|
|
| from transformers import AutoConfig, AutoProcessor, Qwen3OmniMoeForConditionalGeneration |
| from qwen_omni_utils import process_mm_info |
|
|
| LOG.info("GPU %d: loading model %s (type=%s), shard size=%d", gpu_id, model_path, model_type, len(shard)) |
| processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True) |
|
|
| config = AutoConfig.from_pretrained(model_path, trust_remote_code=True) |
| detected_type = getattr(config, "model_type", "") |
|
|
| if detected_type == "qwen3_omni_moe_thinker": |
| from transformers import Qwen3OmniMoeThinkerConfig, Qwen3OmniMoeThinkerForConditionalGeneration |
| thinker_config = Qwen3OmniMoeThinkerConfig.from_pretrained(model_path) |
| model = Qwen3OmniMoeThinkerForConditionalGeneration.from_pretrained( |
| model_path, config=thinker_config, torch_dtype=torch.bfloat16, |
| device_map="auto", attn_implementation="sdpa", |
| ) |
| is_thinker = True |
| else: |
| model = Qwen3OmniMoeForConditionalGeneration.from_pretrained( |
| model_path, config=config, torch_dtype=torch.bfloat16, |
| device_map="auto", attn_implementation="sdpa", |
| ) |
| if hasattr(model, "disable_talker"): |
| model.disable_talker() |
| is_thinker = False |
| model.eval() |
|
|
| output_file = os.path.join(output_dir, f"shard_{gpu_id}.jsonl") |
|
|
| |
| done_keys = set() |
| if os.path.exists(output_file): |
| with open(output_file) as f: |
| for line in f: |
| d = json.loads(line) |
| done_keys.add(f"{d['video']}_{d['prompt'][:50]}") |
| LOG.info("GPU %d: resuming, %d already done", gpu_id, len(done_keys)) |
|
|
| todo = [d for d in shard if f"{d['video']}_{d['prompt'][:50]}" not in done_keys] |
| LOG.info("GPU %d: %d to process", gpu_id, len(todo)) |
|
|
| with open(output_file, "a") as out_f: |
| for i, d in enumerate(todo): |
| video_path = os.path.join(VIDEO_DIR, f"{d['video']}.mp4") |
| audio_path = os.path.join(AUDIO_DIR, f"{d['video']}.wav") |
|
|
| if not os.path.exists(video_path): |
| continue |
|
|
| try: |
| messages = [ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "video", "video": video_path}, |
| *( |
| [{"type": "audio", "audio": audio_path}] |
| if os.path.exists(audio_path) |
| else [] |
| ), |
| {"type": "text", "text": d["prompt"]}, |
| ], |
| }, |
| ] |
|
|
| text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) |
| audios, images, videos = process_mm_info(messages, use_audio_in_video=False) |
| inputs = processor( |
| text=[text], audio=audios, images=images, videos=videos, |
| return_tensors="pt", padding=True, |
| ) |
|
|
| model_dtype = next(model.parameters()).dtype |
| for k, v in list(inputs.items()): |
| if hasattr(v, "to"): |
| v = v.to(model.device) |
| if torch.is_floating_point(v): |
| v = v.to(model_dtype) |
| inputs[k] = v |
|
|
| is_thinker_model = is_thinker |
| with torch.no_grad(): |
| if is_thinker_model: |
| gen_kwargs = { |
| "max_new_tokens": 512, |
| "do_sample": True, |
| "temperature": 0.7, |
| "top_p": 0.9, |
| } |
| else: |
| gen_kwargs = { |
| "thinker_max_new_tokens": 512, |
| "return_audio": False, |
| "use_audio_in_video": False, |
| "do_sample": True, |
| "temperature": 0.7, |
| "top_p": 0.9, |
| } |
| output_ids = model.generate(**inputs, **gen_kwargs) |
|
|
| if isinstance(output_ids, tuple): |
| output_ids = output_ids[0] |
|
|
| input_len = inputs["input_ids"].shape[-1] |
| response = processor.decode(output_ids[0][input_len:], skip_special_tokens=True).strip() |
|
|
| result = { |
| "video": d["video"], |
| "prompt": d["prompt"], |
| "rejected_base": response, |
| } |
| out_f.write(json.dumps(result, ensure_ascii=False) + "\n") |
| out_f.flush() |
|
|
| except Exception as e: |
| LOG.error("GPU %d: failed %s: %s", gpu_id, d["video"], e) |
| continue |
|
|
| if (i + 1) % 50 == 0: |
| LOG.info("GPU %d: %d/%d done", gpu_id, i + 1, len(todo)) |
|
|
| LOG.info("GPU %d: finished", gpu_id) |
|
|
|
|
| def stage1_generate_rejected(model_type: str): |
| """Split data across 8 GPUs and run in parallel.""" |
| if model_type == "sft": |
| model_path = SFT_MODEL_PATH |
| output_dir = RAW_REJECTED_DIR + "_sft" |
| else: |
| model_path = VANILLA_MODEL_PATH |
| output_dir = RAW_REJECTED_DIR + "_vanilla" |
|
|
| os.makedirs(output_dir, exist_ok=True) |
|
|
| gpt_data = [] |
| with open(GPT_CHOSEN_FILE) as f: |
| for line in f: |
| d = json.loads(line) |
| if "error" not in d: |
| gpt_data.append(d) |
|
|
| LOG.info("[%s] Total entries: %d, splitting across %d GPUs", model_type, len(gpt_data), NUM_GPUS) |
|
|
| |
| shards = [[] for _ in range(NUM_GPUS)] |
| for i, d in enumerate(gpt_data): |
| shards[i % NUM_GPUS].append(d) |
|
|
| for i, s in enumerate(shards): |
| LOG.info(" GPU %d: %d entries", i, len(s)) |
|
|
| |
| processes = [] |
| mp.set_start_method("spawn", force=True) |
| for gpu_id in range(NUM_GPUS): |
| p = mp.Process(target=worker_fn, args=(gpu_id, shards[gpu_id], model_path, output_dir, model_type)) |
| p.start() |
| processes.append(p) |
|
|
| for p in processes: |
| p.join() |
|
|
| LOG.info("All workers finished") |
|
|
|
|
| def stage2_assemble(model_type: str): |
| """Merge shard outputs + GPT chosen into final DPO data.""" |
| if model_type == "sft": |
| rejected_dir = RAW_REJECTED_DIR + "_sft" |
| output_file = FINAL_OUTPUT.replace(".json", "_sft.json") |
| dataset_name = "dpo_activitynet_gpt_chosen_sft" |
| else: |
| rejected_dir = RAW_REJECTED_DIR + "_vanilla" |
| output_file = FINAL_OUTPUT.replace(".json", "_vanilla.json") |
| dataset_name = "dpo_activitynet_gpt_chosen_vanilla" |
| |
| gpt_map = {} |
| with open(GPT_CHOSEN_FILE) as f: |
| for line in f: |
| d = json.loads(line) |
| if "error" not in d: |
| key = f"{d['video']}_{d['prompt']}" |
| gpt_map[key] = d["chosen_gpt"] |
|
|
| |
| rej_map = {} |
| for shard_file in sorted(Path(rejected_dir).glob("shard_*.jsonl")): |
| with open(shard_file) as f: |
| for line in f: |
| d = json.loads(line) |
| key = f"{d['video']}_{d['prompt']}" |
| rej_map[key] = d["rejected_base"] |
|
|
| overlap_keys = set(gpt_map) & set(rej_map) |
| LOG.info("GPT chosen: %d, Base rejected: %d, Overlap: %d", len(gpt_map), len(rej_map), len(overlap_keys)) |
|
|
| results = [] |
| skipped_similar = 0 |
|
|
| for key in overlap_keys: |
| chosen = gpt_map[key] |
| rejected = rej_map[key] |
|
|
| c_words = set(chosen.lower().split()) |
| r_words = set(rejected.lower().split()) |
| word_overlap = len(c_words & r_words) / len(c_words | r_words) if c_words | r_words else 0 |
| if word_overlap > 0.7: |
| skipped_similar += 1 |
| continue |
|
|
| |
| |
| |
| pass |
|
|
| |
| results = [] |
| skipped_similar = 0 |
| skipped_no_rejected = 0 |
|
|
| with open(GPT_CHOSEN_FILE) as f: |
| for line in f: |
| d = json.loads(line) |
| if "error" in d: |
| continue |
|
|
| key = f"{d['video']}_{d['prompt']}" |
| if key not in rej_map: |
| skipped_no_rejected += 1 |
| continue |
|
|
| chosen = d["chosen_gpt"] |
| rejected = rej_map[key] |
|
|
| c_words = set(chosen.lower().split()) |
| r_words = set(rejected.lower().split()) |
| word_overlap = len(c_words & r_words) / len(c_words | r_words) if c_words | r_words else 0 |
| if word_overlap > 0.7: |
| skipped_similar += 1 |
| continue |
|
|
| video_path = os.path.join(VIDEO_DIR, f"{d['video']}.mp4") |
| audio_path = os.path.join(AUDIO_DIR, f"{d['video']}.wav") |
|
|
| results.append({ |
| "messages": [{"role": "user", "content": f"<video><audio>{d['prompt']}"}], |
| "chosen": {"role": "assistant", "content": chosen}, |
| "rejected": {"role": "assistant", "content": rejected}, |
| "videos": [video_path], |
| "audios": [audio_path] if os.path.exists(audio_path) else [], |
| }) |
|
|
| with open(output_file, "w") as f: |
| json.dump(results, f, indent=2, ensure_ascii=False) |
|
|
| LOG.info("Final DPO data: %d entries -> %s", len(results), output_file) |
| LOG.info("Skipped (too similar): %d", skipped_similar) |
| LOG.info("Skipped (no rejected): %d", skipped_no_rejected) |
|
|
| |
| info_path = "/home/ubuntu/LlamaFactory/data/dataset_info.json" |
| info = json.load(open(info_path)) |
| info[dataset_name] = { |
| "file_name": os.path.basename(output_file), |
| "formatting": "sharegpt", |
| "ranking": True, |
| "columns": { |
| "messages": "messages", |
| "chosen": "chosen", |
| "rejected": "rejected", |
| "videos": "videos", |
| "audios": "audios" |
| }, |
| "tags": { |
| "role_tag": "role", |
| "content_tag": "content", |
| "user_tag": "user", |
| "assistant_tag": "assistant" |
| } |
| } |
| with open(info_path, "w") as f: |
| json.dump(info, f, indent=2, ensure_ascii=False) |
| LOG.info("Registered %s in dataset_info.json", dataset_name) |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser() |
| parser.add_argument("--stage", choices=["1", "2", "all"], default="all") |
| parser.add_argument("--model", choices=["sft", "vanilla", "both"], default="both", |
| help="Which model to use for rejected generation") |
| args = parser.parse_args() |
|
|
| models = ["sft", "vanilla"] if args.model == "both" else [args.model] |
|
|
| for model_type in models: |
| LOG.info("===== Processing with %s model =====", model_type) |
| if args.stage in ("1", "all"): |
| stage1_generate_rejected(model_type) |
| if args.stage in ("2", "all"): |
| stage2_assemble(model_type) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|