| |
| """ |
| Generate chosen responses for ActivityNet DPO data using GPT-5.4. |
| Usage: |
| export OPENAI_API_KEY="sk-..." |
| python generate_activitynet_chosen.py |
| """ |
|
|
| import json |
| import os |
| import sys |
| import base64 |
| import time |
| from pathlib import Path |
| from concurrent.futures import ThreadPoolExecutor, as_completed |
|
|
| import av |
| import numpy as np |
| from openai import OpenAI |
|
|
| |
| VIDEO_DIR = "/opt/dlami/nvme/sharegpt_activitynet_raw/activitynet/videos" |
| DPO_SOURCE = "/opt/dlami/nvme/sharegpt_instructions/video_instruction/train/dpo/sft_dpo_17k.jsonl" |
| OUTPUT_FILE = "/opt/dlami/nvme/activitynet_gpt_chosen.jsonl" |
| NUM_FRAMES = 10 |
| MAX_WORKERS = 8 |
| MODEL = "gpt-5.4-2026-03-05" |
|
|
| client = OpenAI() |
|
|
|
|
| def extract_frames(video_path: str, num_frames: int = 10) -> list[bytes]: |
| """Extract evenly-spaced frames from video, return as JPEG bytes.""" |
| container = av.open(video_path) |
| stream = next(s for s in container.streams if s.type == "video") |
| total_frames = stream.frames |
| if total_frames == 0: |
| total_frames = 300 |
|
|
| indices = np.linspace(0, total_frames - 1, num_frames).astype(int) |
| frames = [] |
| for i, frame in enumerate(container.decode(stream)): |
| if i in indices: |
| img = frame.to_image() |
| import io |
| buf = io.BytesIO() |
| img.save(buf, format="JPEG", quality=85) |
| frames.append(buf.getvalue()) |
| if len(frames) >= num_frames: |
| break |
|
|
| container.close() |
| return frames |
|
|
|
|
| def generate_chosen(video_id: str, prompt: str) -> dict: |
| """Call GPT-5.4 with video frames + question, return chosen response.""" |
| video_path = os.path.join(VIDEO_DIR, f"{video_id}.mp4") |
| if not os.path.exists(video_path): |
| return {"video": video_id, "error": f"video not found: {video_path}"} |
|
|
| try: |
| frames = extract_frames(video_path, NUM_FRAMES) |
| except Exception as e: |
| return {"video": video_id, "error": f"frame extraction failed: {e}"} |
|
|
| image_messages = [] |
| for i, frame_bytes in enumerate(frames): |
| b64 = base64.b64encode(frame_bytes).decode("utf-8") |
| image_messages.append({ |
| "type": "image_url", |
| "image_url": {"url": f"data:image/jpeg;base64,{b64}", "detail": "low"} |
| }) |
|
|
| system_prompt = ( |
| "You are watching a video. The following images are evenly-spaced frames extracted from the video. " |
| "Answer the question based on what you observe in these frames. " |
| "Be specific, accurate, and detailed. Describe visual details you actually see. " |
| "Do not hallucinate or guess about things not visible in the frames." |
| ) |
|
|
| user_content = [ |
| {"type": "text", "text": f"These are {len(frames)} frames from a video:"}, |
| *image_messages, |
| {"type": "text", "text": f"\nQuestion: {prompt}\n\nPlease provide a detailed and accurate answer."}, |
| ] |
|
|
| for attempt in range(3): |
| try: |
| response = client.chat.completions.create( |
| model=MODEL, |
| messages=[ |
| {"role": "system", "content": system_prompt}, |
| {"role": "user", "content": user_content}, |
| ], |
| max_completion_tokens=512, |
| temperature=0.3, |
| ) |
| answer = response.choices[0].message.content.strip() |
| return { |
| "video": video_id, |
| "prompt": prompt, |
| "chosen_gpt": answer, |
| "tokens_used": response.usage.total_tokens if response.usage else 0, |
| } |
| except Exception as e: |
| if attempt < 2: |
| time.sleep(2 ** (attempt + 1)) |
| else: |
| return {"video": video_id, "error": f"API failed after 3 retries: {e}"} |
|
|
|
|
| def main(): |
| |
| activitynet = [] |
| with open(DPO_SOURCE) as f: |
| for line in f: |
| d = json.loads(line) |
| if d.get("video", "").startswith("v_"): |
| activitynet.append(d) |
|
|
| print(f"ActivityNet DPO entries: {len(activitynet)}") |
|
|
| |
| done_ids = set() |
| if os.path.exists(OUTPUT_FILE): |
| with open(OUTPUT_FILE) as f: |
| for line in f: |
| d = json.loads(line) |
| done_ids.add(f"{d['video']}_{d.get('prompt', '')[:50]}") |
| print(f"Resuming: {len(done_ids)} already done") |
|
|
| todo = [] |
| for d in activitynet: |
| key = f"{d['video']}_{d['prompt'][:50]}" |
| if key not in done_ids: |
| todo.append(d) |
|
|
| print(f"To process: {len(todo)}") |
| if not todo: |
| print("All done!") |
| return |
|
|
| |
| total_tokens = 0 |
| success = 0 |
| errors = 0 |
|
|
| with open(OUTPUT_FILE, "a") as out_f: |
| with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor: |
| futures = { |
| executor.submit(generate_chosen, d["video"], d["prompt"]): d |
| for d in todo |
| } |
|
|
| for i, future in enumerate(as_completed(futures), 1): |
| result = future.result() |
|
|
| if "error" in result: |
| errors += 1 |
| print(f" [{i}/{len(todo)}] ERROR {result['video']}: {result['error']}") |
| else: |
| success += 1 |
| total_tokens += result.get("tokens_used", 0) |
| out_f.write(json.dumps(result, ensure_ascii=False) + "\n") |
| out_f.flush() |
|
|
| if i % 100 == 0: |
| print(f" [{i}/{len(todo)}] success={success}, errors={errors}, tokens={total_tokens:,}") |
|
|
| print(f"\nDone! success={success}, errors={errors}, total_tokens={total_tokens:,}") |
| print(f"Output: {OUTPUT_FILE}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|